lops.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License version 2.
  8. */
  9. #include <linux/sched.h>
  10. #include <linux/slab.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/completion.h>
  13. #include <linux/buffer_head.h>
  14. #include <linux/mempool.h>
  15. #include <linux/gfs2_ondisk.h>
  16. #include <linux/bio.h>
  17. #include <linux/fs.h>
  18. #include "gfs2.h"
  19. #include "incore.h"
  20. #include "inode.h"
  21. #include "glock.h"
  22. #include "log.h"
  23. #include "lops.h"
  24. #include "meta_io.h"
  25. #include "recovery.h"
  26. #include "rgrp.h"
  27. #include "trans.h"
  28. #include "util.h"
  29. #include "trace_gfs2.h"
  30. /**
  31. * gfs2_pin - Pin a buffer in memory
  32. * @sdp: The superblock
  33. * @bh: The buffer to be pinned
  34. *
  35. * The log lock must be held when calling this function
  36. */
  37. static void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
  38. {
  39. struct gfs2_bufdata *bd;
  40. BUG_ON(!current->journal_info);
  41. clear_buffer_dirty(bh);
  42. if (test_set_buffer_pinned(bh))
  43. gfs2_assert_withdraw(sdp, 0);
  44. if (!buffer_uptodate(bh))
  45. gfs2_io_error_bh(sdp, bh);
  46. bd = bh->b_private;
  47. /* If this buffer is in the AIL and it has already been written
  48. * to in-place disk block, remove it from the AIL.
  49. */
  50. spin_lock(&sdp->sd_ail_lock);
  51. if (bd->bd_ail)
  52. list_move(&bd->bd_ail_st_list, &bd->bd_ail->ai_ail2_list);
  53. spin_unlock(&sdp->sd_ail_lock);
  54. get_bh(bh);
  55. atomic_inc(&sdp->sd_log_pinned);
  56. trace_gfs2_pin(bd, 1);
  57. }
  58. static bool buffer_is_rgrp(const struct gfs2_bufdata *bd)
  59. {
  60. return bd->bd_gl->gl_name.ln_type == LM_TYPE_RGRP;
  61. }
  62. static void maybe_release_space(struct gfs2_bufdata *bd)
  63. {
  64. struct gfs2_glock *gl = bd->bd_gl;
  65. struct gfs2_sbd *sdp = gl->gl_sbd;
  66. struct gfs2_rgrpd *rgd = gl->gl_object;
  67. unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number;
  68. struct gfs2_bitmap *bi = rgd->rd_bits + index;
  69. if (bi->bi_clone == 0)
  70. return;
  71. if (sdp->sd_args.ar_discard)
  72. gfs2_rgrp_send_discards(sdp, rgd->rd_data0, bd->bd_bh, bi, 1, NULL);
  73. memcpy(bi->bi_clone + bi->bi_offset,
  74. bd->bd_bh->b_data + bi->bi_offset, bi->bi_len);
  75. clear_bit(GBF_FULL, &bi->bi_flags);
  76. rgd->rd_free_clone = rgd->rd_free;
  77. }
  78. /**
  79. * gfs2_unpin - Unpin a buffer
  80. * @sdp: the filesystem the buffer belongs to
  81. * @bh: The buffer to unpin
  82. * @ai:
  83. * @flags: The inode dirty flags
  84. *
  85. */
  86. static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
  87. struct gfs2_ail *ai)
  88. {
  89. struct gfs2_bufdata *bd = bh->b_private;
  90. BUG_ON(!buffer_uptodate(bh));
  91. BUG_ON(!buffer_pinned(bh));
  92. lock_buffer(bh);
  93. mark_buffer_dirty(bh);
  94. clear_buffer_pinned(bh);
  95. if (buffer_is_rgrp(bd))
  96. maybe_release_space(bd);
  97. spin_lock(&sdp->sd_ail_lock);
  98. if (bd->bd_ail) {
  99. list_del(&bd->bd_ail_st_list);
  100. brelse(bh);
  101. } else {
  102. struct gfs2_glock *gl = bd->bd_gl;
  103. list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list);
  104. atomic_inc(&gl->gl_ail_count);
  105. }
  106. bd->bd_ail = ai;
  107. list_add(&bd->bd_ail_st_list, &ai->ai_ail1_list);
  108. spin_unlock(&sdp->sd_ail_lock);
  109. clear_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
  110. trace_gfs2_pin(bd, 0);
  111. unlock_buffer(bh);
  112. atomic_dec(&sdp->sd_log_pinned);
  113. }
  114. static inline struct gfs2_log_descriptor *bh_log_desc(struct buffer_head *bh)
  115. {
  116. return (struct gfs2_log_descriptor *)bh->b_data;
  117. }
  118. static inline __be64 *bh_log_ptr(struct buffer_head *bh)
  119. {
  120. struct gfs2_log_descriptor *ld = bh_log_desc(bh);
  121. return (__force __be64 *)(ld + 1);
  122. }
  123. static inline __be64 *bh_ptr_end(struct buffer_head *bh)
  124. {
  125. return (__force __be64 *)(bh->b_data + bh->b_size);
  126. }
  127. /**
  128. * gfs2_log_write_endio - End of I/O for a log buffer
  129. * @bh: The buffer head
  130. * @uptodate: I/O Status
  131. *
  132. */
  133. static void gfs2_log_write_endio(struct buffer_head *bh, int uptodate)
  134. {
  135. struct gfs2_sbd *sdp = bh->b_private;
  136. bh->b_private = NULL;
  137. end_buffer_write_sync(bh, uptodate);
  138. if (atomic_dec_and_test(&sdp->sd_log_in_flight))
  139. wake_up(&sdp->sd_log_flush_wait);
  140. }
  141. /**
  142. * gfs2_log_get_buf - Get and initialize a buffer to use for log control data
  143. * @sdp: The GFS2 superblock
  144. *
  145. * tReturns: the buffer_head
  146. */
  147. static struct buffer_head *gfs2_log_get_buf(struct gfs2_sbd *sdp)
  148. {
  149. u64 blkno = gfs2_log_bmap(sdp, sdp->sd_log_flush_head);
  150. struct buffer_head *bh;
  151. bh = sb_getblk(sdp->sd_vfs, blkno);
  152. lock_buffer(bh);
  153. memset(bh->b_data, 0, bh->b_size);
  154. set_buffer_uptodate(bh);
  155. clear_buffer_dirty(bh);
  156. gfs2_log_incr_head(sdp);
  157. atomic_inc(&sdp->sd_log_in_flight);
  158. bh->b_private = sdp;
  159. bh->b_end_io = gfs2_log_write_endio;
  160. return bh;
  161. }
  162. /**
  163. * gfs2_fake_write_endio -
  164. * @bh: The buffer head
  165. * @uptodate: The I/O Status
  166. *
  167. */
  168. static void gfs2_fake_write_endio(struct buffer_head *bh, int uptodate)
  169. {
  170. struct buffer_head *real_bh = bh->b_private;
  171. struct gfs2_bufdata *bd = real_bh->b_private;
  172. struct gfs2_sbd *sdp = bd->bd_gl->gl_sbd;
  173. end_buffer_write_sync(bh, uptodate);
  174. mempool_free(bh, gfs2_bh_pool);
  175. unlock_buffer(real_bh);
  176. brelse(real_bh);
  177. if (atomic_dec_and_test(&sdp->sd_log_in_flight))
  178. wake_up(&sdp->sd_log_flush_wait);
  179. }
  180. /**
  181. * gfs2_log_fake_buf - Build a fake buffer head to write metadata buffer to log
  182. * @sdp: the filesystem
  183. * @data: the data the buffer_head should point to
  184. *
  185. * Returns: the log buffer descriptor
  186. */
  187. static struct buffer_head *gfs2_log_fake_buf(struct gfs2_sbd *sdp,
  188. struct buffer_head *real)
  189. {
  190. u64 blkno = gfs2_log_bmap(sdp, sdp->sd_log_flush_head);
  191. struct buffer_head *bh;
  192. bh = mempool_alloc(gfs2_bh_pool, GFP_NOFS);
  193. atomic_set(&bh->b_count, 1);
  194. bh->b_state = (1 << BH_Mapped) | (1 << BH_Uptodate) | (1 << BH_Lock);
  195. set_bh_page(bh, real->b_page, bh_offset(real));
  196. bh->b_blocknr = blkno;
  197. bh->b_size = sdp->sd_sb.sb_bsize;
  198. bh->b_bdev = sdp->sd_vfs->s_bdev;
  199. bh->b_private = real;
  200. bh->b_end_io = gfs2_fake_write_endio;
  201. gfs2_log_incr_head(sdp);
  202. atomic_inc(&sdp->sd_log_in_flight);
  203. return bh;
  204. }
  205. static struct buffer_head *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type)
  206. {
  207. struct buffer_head *bh = gfs2_log_get_buf(sdp);
  208. struct gfs2_log_descriptor *ld = bh_log_desc(bh);
  209. ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
  210. ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD);
  211. ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD);
  212. ld->ld_type = cpu_to_be32(ld_type);
  213. ld->ld_length = 0;
  214. ld->ld_data1 = 0;
  215. ld->ld_data2 = 0;
  216. memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved));
  217. return bh;
  218. }
  219. static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
  220. {
  221. struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le);
  222. struct gfs2_meta_header *mh;
  223. struct gfs2_trans *tr;
  224. lock_buffer(bd->bd_bh);
  225. gfs2_log_lock(sdp);
  226. if (!list_empty(&bd->bd_list_tr))
  227. goto out;
  228. tr = current->journal_info;
  229. tr->tr_touched = 1;
  230. tr->tr_num_buf++;
  231. list_add(&bd->bd_list_tr, &tr->tr_list_buf);
  232. if (!list_empty(&le->le_list))
  233. goto out;
  234. set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
  235. set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags);
  236. gfs2_meta_check(sdp, bd->bd_bh);
  237. gfs2_pin(sdp, bd->bd_bh);
  238. mh = (struct gfs2_meta_header *)bd->bd_bh->b_data;
  239. mh->__pad0 = cpu_to_be64(0);
  240. mh->mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
  241. sdp->sd_log_num_buf++;
  242. list_add(&le->le_list, &sdp->sd_log_le_buf);
  243. tr->tr_num_buf_new++;
  244. out:
  245. gfs2_log_unlock(sdp);
  246. unlock_buffer(bd->bd_bh);
  247. }
  248. static void buf_lo_before_commit(struct gfs2_sbd *sdp)
  249. {
  250. struct buffer_head *bh;
  251. struct gfs2_log_descriptor *ld;
  252. struct gfs2_bufdata *bd1 = NULL, *bd2;
  253. unsigned int total;
  254. unsigned int limit;
  255. unsigned int num;
  256. unsigned n;
  257. __be64 *ptr;
  258. limit = buf_limit(sdp);
  259. /* for 4k blocks, limit = 503 */
  260. gfs2_log_lock(sdp);
  261. total = sdp->sd_log_num_buf;
  262. bd1 = bd2 = list_prepare_entry(bd1, &sdp->sd_log_le_buf, bd_le.le_list);
  263. while(total) {
  264. num = total;
  265. if (total > limit)
  266. num = limit;
  267. gfs2_log_unlock(sdp);
  268. bh = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_METADATA);
  269. gfs2_log_lock(sdp);
  270. ld = bh_log_desc(bh);
  271. ptr = bh_log_ptr(bh);
  272. ld->ld_length = cpu_to_be32(num + 1);
  273. ld->ld_data1 = cpu_to_be32(num);
  274. n = 0;
  275. list_for_each_entry_continue(bd1, &sdp->sd_log_le_buf,
  276. bd_le.le_list) {
  277. *ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr);
  278. if (++n >= num)
  279. break;
  280. }
  281. gfs2_log_unlock(sdp);
  282. submit_bh(WRITE_SYNC, bh);
  283. gfs2_log_lock(sdp);
  284. n = 0;
  285. list_for_each_entry_continue(bd2, &sdp->sd_log_le_buf,
  286. bd_le.le_list) {
  287. get_bh(bd2->bd_bh);
  288. gfs2_log_unlock(sdp);
  289. lock_buffer(bd2->bd_bh);
  290. bh = gfs2_log_fake_buf(sdp, bd2->bd_bh);
  291. submit_bh(WRITE_SYNC, bh);
  292. gfs2_log_lock(sdp);
  293. if (++n >= num)
  294. break;
  295. }
  296. BUG_ON(total < num);
  297. total -= num;
  298. }
  299. gfs2_log_unlock(sdp);
  300. }
  301. static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
  302. {
  303. struct list_head *head = &sdp->sd_log_le_buf;
  304. struct gfs2_bufdata *bd;
  305. while (!list_empty(head)) {
  306. bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list);
  307. list_del_init(&bd->bd_le.le_list);
  308. sdp->sd_log_num_buf--;
  309. gfs2_unpin(sdp, bd->bd_bh, ai);
  310. }
  311. gfs2_assert_warn(sdp, !sdp->sd_log_num_buf);
  312. }
  313. static void buf_lo_before_scan(struct gfs2_jdesc *jd,
  314. struct gfs2_log_header_host *head, int pass)
  315. {
  316. struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
  317. if (pass != 0)
  318. return;
  319. sdp->sd_found_blocks = 0;
  320. sdp->sd_replayed_blocks = 0;
  321. }
  322. static int buf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
  323. struct gfs2_log_descriptor *ld, __be64 *ptr,
  324. int pass)
  325. {
  326. struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
  327. struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
  328. struct gfs2_glock *gl = ip->i_gl;
  329. unsigned int blks = be32_to_cpu(ld->ld_data1);
  330. struct buffer_head *bh_log, *bh_ip;
  331. u64 blkno;
  332. int error = 0;
  333. if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_METADATA)
  334. return 0;
  335. gfs2_replay_incr_blk(sdp, &start);
  336. for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
  337. blkno = be64_to_cpu(*ptr++);
  338. sdp->sd_found_blocks++;
  339. if (gfs2_revoke_check(sdp, blkno, start))
  340. continue;
  341. error = gfs2_replay_read_block(jd, start, &bh_log);
  342. if (error)
  343. return error;
  344. bh_ip = gfs2_meta_new(gl, blkno);
  345. memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
  346. if (gfs2_meta_check(sdp, bh_ip))
  347. error = -EIO;
  348. else
  349. mark_buffer_dirty(bh_ip);
  350. brelse(bh_log);
  351. brelse(bh_ip);
  352. if (error)
  353. break;
  354. sdp->sd_replayed_blocks++;
  355. }
  356. return error;
  357. }
  358. static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
  359. {
  360. struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
  361. struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
  362. if (error) {
  363. gfs2_meta_sync(ip->i_gl);
  364. return;
  365. }
  366. if (pass != 1)
  367. return;
  368. gfs2_meta_sync(ip->i_gl);
  369. fs_info(sdp, "jid=%u: Replayed %u of %u blocks\n",
  370. jd->jd_jid, sdp->sd_replayed_blocks, sdp->sd_found_blocks);
  371. }
  372. static void revoke_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
  373. {
  374. struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le);
  375. struct gfs2_glock *gl = bd->bd_gl;
  376. struct gfs2_trans *tr;
  377. tr = current->journal_info;
  378. tr->tr_touched = 1;
  379. tr->tr_num_revoke++;
  380. sdp->sd_log_num_revoke++;
  381. atomic_inc(&gl->gl_revokes);
  382. set_bit(GLF_LFLUSH, &gl->gl_flags);
  383. list_add(&le->le_list, &sdp->sd_log_le_revoke);
  384. }
  385. static void revoke_lo_before_commit(struct gfs2_sbd *sdp)
  386. {
  387. struct gfs2_log_descriptor *ld;
  388. struct gfs2_meta_header *mh;
  389. struct buffer_head *bh;
  390. unsigned int offset;
  391. struct list_head *head = &sdp->sd_log_le_revoke;
  392. struct gfs2_bufdata *bd;
  393. if (!sdp->sd_log_num_revoke)
  394. return;
  395. bh = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_REVOKE);
  396. ld = bh_log_desc(bh);
  397. ld->ld_length = cpu_to_be32(gfs2_struct2blk(sdp, sdp->sd_log_num_revoke,
  398. sizeof(u64)));
  399. ld->ld_data1 = cpu_to_be32(sdp->sd_log_num_revoke);
  400. offset = sizeof(struct gfs2_log_descriptor);
  401. list_for_each_entry(bd, head, bd_le.le_list) {
  402. sdp->sd_log_num_revoke--;
  403. if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) {
  404. submit_bh(WRITE_SYNC, bh);
  405. bh = gfs2_log_get_buf(sdp);
  406. mh = (struct gfs2_meta_header *)bh->b_data;
  407. mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
  408. mh->mh_type = cpu_to_be32(GFS2_METATYPE_LB);
  409. mh->mh_format = cpu_to_be32(GFS2_FORMAT_LB);
  410. offset = sizeof(struct gfs2_meta_header);
  411. }
  412. *(__be64 *)(bh->b_data + offset) = cpu_to_be64(bd->bd_blkno);
  413. offset += sizeof(u64);
  414. }
  415. gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
  416. submit_bh(WRITE_SYNC, bh);
  417. }
  418. static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
  419. {
  420. struct list_head *head = &sdp->sd_log_le_revoke;
  421. struct gfs2_bufdata *bd;
  422. struct gfs2_glock *gl;
  423. while (!list_empty(head)) {
  424. bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list);
  425. list_del_init(&bd->bd_le.le_list);
  426. gl = bd->bd_gl;
  427. atomic_dec(&gl->gl_revokes);
  428. clear_bit(GLF_LFLUSH, &gl->gl_flags);
  429. kmem_cache_free(gfs2_bufdata_cachep, bd);
  430. }
  431. }
  432. static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
  433. struct gfs2_log_header_host *head, int pass)
  434. {
  435. struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
  436. if (pass != 0)
  437. return;
  438. sdp->sd_found_revokes = 0;
  439. sdp->sd_replay_tail = head->lh_tail;
  440. }
  441. static int revoke_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
  442. struct gfs2_log_descriptor *ld, __be64 *ptr,
  443. int pass)
  444. {
  445. struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
  446. unsigned int blks = be32_to_cpu(ld->ld_length);
  447. unsigned int revokes = be32_to_cpu(ld->ld_data1);
  448. struct buffer_head *bh;
  449. unsigned int offset;
  450. u64 blkno;
  451. int first = 1;
  452. int error;
  453. if (pass != 0 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_REVOKE)
  454. return 0;
  455. offset = sizeof(struct gfs2_log_descriptor);
  456. for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
  457. error = gfs2_replay_read_block(jd, start, &bh);
  458. if (error)
  459. return error;
  460. if (!first)
  461. gfs2_metatype_check(sdp, bh, GFS2_METATYPE_LB);
  462. while (offset + sizeof(u64) <= sdp->sd_sb.sb_bsize) {
  463. blkno = be64_to_cpu(*(__be64 *)(bh->b_data + offset));
  464. error = gfs2_revoke_add(sdp, blkno, start);
  465. if (error < 0) {
  466. brelse(bh);
  467. return error;
  468. }
  469. else if (error)
  470. sdp->sd_found_revokes++;
  471. if (!--revokes)
  472. break;
  473. offset += sizeof(u64);
  474. }
  475. brelse(bh);
  476. offset = sizeof(struct gfs2_meta_header);
  477. first = 0;
  478. }
  479. return 0;
  480. }
  481. static void revoke_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
  482. {
  483. struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
  484. if (error) {
  485. gfs2_revoke_clean(sdp);
  486. return;
  487. }
  488. if (pass != 1)
  489. return;
  490. fs_info(sdp, "jid=%u: Found %u revoke tags\n",
  491. jd->jd_jid, sdp->sd_found_revokes);
  492. gfs2_revoke_clean(sdp);
  493. }
  494. /**
  495. * databuf_lo_add - Add a databuf to the transaction.
  496. *
  497. * This is used in two distinct cases:
  498. * i) In ordered write mode
  499. * We put the data buffer on a list so that we can ensure that its
  500. * synced to disk at the right time
  501. * ii) In journaled data mode
  502. * We need to journal the data block in the same way as metadata in
  503. * the functions above. The difference is that here we have a tag
  504. * which is two __be64's being the block number (as per meta data)
  505. * and a flag which says whether the data block needs escaping or
  506. * not. This means we need a new log entry for each 251 or so data
  507. * blocks, which isn't an enormous overhead but twice as much as
  508. * for normal metadata blocks.
  509. */
  510. static void databuf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
  511. {
  512. struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le);
  513. struct gfs2_trans *tr = current->journal_info;
  514. struct address_space *mapping = bd->bd_bh->b_page->mapping;
  515. struct gfs2_inode *ip = GFS2_I(mapping->host);
  516. lock_buffer(bd->bd_bh);
  517. gfs2_log_lock(sdp);
  518. if (tr) {
  519. if (!list_empty(&bd->bd_list_tr))
  520. goto out;
  521. tr->tr_touched = 1;
  522. if (gfs2_is_jdata(ip)) {
  523. tr->tr_num_buf++;
  524. list_add(&bd->bd_list_tr, &tr->tr_list_buf);
  525. }
  526. }
  527. if (!list_empty(&le->le_list))
  528. goto out;
  529. set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
  530. set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags);
  531. if (gfs2_is_jdata(ip)) {
  532. gfs2_pin(sdp, bd->bd_bh);
  533. tr->tr_num_databuf_new++;
  534. sdp->sd_log_num_databuf++;
  535. list_add_tail(&le->le_list, &sdp->sd_log_le_databuf);
  536. } else {
  537. list_add_tail(&le->le_list, &sdp->sd_log_le_ordered);
  538. }
  539. out:
  540. gfs2_log_unlock(sdp);
  541. unlock_buffer(bd->bd_bh);
  542. }
  543. static void gfs2_check_magic(struct buffer_head *bh)
  544. {
  545. void *kaddr;
  546. __be32 *ptr;
  547. clear_buffer_escaped(bh);
  548. kaddr = kmap_atomic(bh->b_page);
  549. ptr = kaddr + bh_offset(bh);
  550. if (*ptr == cpu_to_be32(GFS2_MAGIC))
  551. set_buffer_escaped(bh);
  552. kunmap_atomic(kaddr);
  553. }
  554. static void gfs2_write_blocks(struct gfs2_sbd *sdp, struct buffer_head *bh,
  555. struct list_head *list, struct list_head *done,
  556. unsigned int n)
  557. {
  558. struct buffer_head *bh1;
  559. struct gfs2_log_descriptor *ld;
  560. struct gfs2_bufdata *bd;
  561. __be64 *ptr;
  562. if (!bh)
  563. return;
  564. ld = bh_log_desc(bh);
  565. ld->ld_length = cpu_to_be32(n + 1);
  566. ld->ld_data1 = cpu_to_be32(n);
  567. ptr = bh_log_ptr(bh);
  568. get_bh(bh);
  569. submit_bh(WRITE_SYNC, bh);
  570. gfs2_log_lock(sdp);
  571. while(!list_empty(list)) {
  572. bd = list_entry(list->next, struct gfs2_bufdata, bd_le.le_list);
  573. list_move_tail(&bd->bd_le.le_list, done);
  574. get_bh(bd->bd_bh);
  575. while (be64_to_cpu(*ptr) != bd->bd_bh->b_blocknr) {
  576. gfs2_log_incr_head(sdp);
  577. ptr += 2;
  578. }
  579. gfs2_log_unlock(sdp);
  580. lock_buffer(bd->bd_bh);
  581. if (buffer_escaped(bd->bd_bh)) {
  582. void *kaddr;
  583. bh1 = gfs2_log_get_buf(sdp);
  584. kaddr = kmap_atomic(bd->bd_bh->b_page);
  585. memcpy(bh1->b_data, kaddr + bh_offset(bd->bd_bh),
  586. bh1->b_size);
  587. kunmap_atomic(kaddr);
  588. *(__be32 *)bh1->b_data = 0;
  589. clear_buffer_escaped(bd->bd_bh);
  590. unlock_buffer(bd->bd_bh);
  591. brelse(bd->bd_bh);
  592. } else {
  593. bh1 = gfs2_log_fake_buf(sdp, bd->bd_bh);
  594. }
  595. submit_bh(WRITE_SYNC, bh1);
  596. gfs2_log_lock(sdp);
  597. ptr += 2;
  598. }
  599. gfs2_log_unlock(sdp);
  600. brelse(bh);
  601. }
  602. /**
  603. * databuf_lo_before_commit - Scan the data buffers, writing as we go
  604. *
  605. */
  606. static void databuf_lo_before_commit(struct gfs2_sbd *sdp)
  607. {
  608. struct gfs2_bufdata *bd = NULL;
  609. struct buffer_head *bh = NULL;
  610. unsigned int n = 0;
  611. __be64 *ptr = NULL, *end = NULL;
  612. LIST_HEAD(processed);
  613. LIST_HEAD(in_progress);
  614. gfs2_log_lock(sdp);
  615. while (!list_empty(&sdp->sd_log_le_databuf)) {
  616. if (ptr == end) {
  617. gfs2_log_unlock(sdp);
  618. gfs2_write_blocks(sdp, bh, &in_progress, &processed, n);
  619. n = 0;
  620. bh = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_JDATA);
  621. ptr = bh_log_ptr(bh);
  622. end = bh_ptr_end(bh) - 1;
  623. gfs2_log_lock(sdp);
  624. continue;
  625. }
  626. bd = list_entry(sdp->sd_log_le_databuf.next, struct gfs2_bufdata, bd_le.le_list);
  627. list_move_tail(&bd->bd_le.le_list, &in_progress);
  628. gfs2_check_magic(bd->bd_bh);
  629. *ptr++ = cpu_to_be64(bd->bd_bh->b_blocknr);
  630. *ptr++ = cpu_to_be64(buffer_escaped(bh) ? 1 : 0);
  631. n++;
  632. }
  633. gfs2_log_unlock(sdp);
  634. gfs2_write_blocks(sdp, bh, &in_progress, &processed, n);
  635. gfs2_log_lock(sdp);
  636. list_splice(&processed, &sdp->sd_log_le_databuf);
  637. gfs2_log_unlock(sdp);
  638. }
  639. static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
  640. struct gfs2_log_descriptor *ld,
  641. __be64 *ptr, int pass)
  642. {
  643. struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
  644. struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
  645. struct gfs2_glock *gl = ip->i_gl;
  646. unsigned int blks = be32_to_cpu(ld->ld_data1);
  647. struct buffer_head *bh_log, *bh_ip;
  648. u64 blkno;
  649. u64 esc;
  650. int error = 0;
  651. if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_JDATA)
  652. return 0;
  653. gfs2_replay_incr_blk(sdp, &start);
  654. for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
  655. blkno = be64_to_cpu(*ptr++);
  656. esc = be64_to_cpu(*ptr++);
  657. sdp->sd_found_blocks++;
  658. if (gfs2_revoke_check(sdp, blkno, start))
  659. continue;
  660. error = gfs2_replay_read_block(jd, start, &bh_log);
  661. if (error)
  662. return error;
  663. bh_ip = gfs2_meta_new(gl, blkno);
  664. memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
  665. /* Unescape */
  666. if (esc) {
  667. __be32 *eptr = (__be32 *)bh_ip->b_data;
  668. *eptr = cpu_to_be32(GFS2_MAGIC);
  669. }
  670. mark_buffer_dirty(bh_ip);
  671. brelse(bh_log);
  672. brelse(bh_ip);
  673. sdp->sd_replayed_blocks++;
  674. }
  675. return error;
  676. }
  677. /* FIXME: sort out accounting for log blocks etc. */
  678. static void databuf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
  679. {
  680. struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
  681. struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
  682. if (error) {
  683. gfs2_meta_sync(ip->i_gl);
  684. return;
  685. }
  686. if (pass != 1)
  687. return;
  688. /* data sync? */
  689. gfs2_meta_sync(ip->i_gl);
  690. fs_info(sdp, "jid=%u: Replayed %u of %u data blocks\n",
  691. jd->jd_jid, sdp->sd_replayed_blocks, sdp->sd_found_blocks);
  692. }
  693. static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
  694. {
  695. struct list_head *head = &sdp->sd_log_le_databuf;
  696. struct gfs2_bufdata *bd;
  697. while (!list_empty(head)) {
  698. bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list);
  699. list_del_init(&bd->bd_le.le_list);
  700. sdp->sd_log_num_databuf--;
  701. gfs2_unpin(sdp, bd->bd_bh, ai);
  702. }
  703. gfs2_assert_warn(sdp, !sdp->sd_log_num_databuf);
  704. }
  705. const struct gfs2_log_operations gfs2_buf_lops = {
  706. .lo_add = buf_lo_add,
  707. .lo_before_commit = buf_lo_before_commit,
  708. .lo_after_commit = buf_lo_after_commit,
  709. .lo_before_scan = buf_lo_before_scan,
  710. .lo_scan_elements = buf_lo_scan_elements,
  711. .lo_after_scan = buf_lo_after_scan,
  712. .lo_name = "buf",
  713. };
  714. const struct gfs2_log_operations gfs2_revoke_lops = {
  715. .lo_add = revoke_lo_add,
  716. .lo_before_commit = revoke_lo_before_commit,
  717. .lo_after_commit = revoke_lo_after_commit,
  718. .lo_before_scan = revoke_lo_before_scan,
  719. .lo_scan_elements = revoke_lo_scan_elements,
  720. .lo_after_scan = revoke_lo_after_scan,
  721. .lo_name = "revoke",
  722. };
  723. const struct gfs2_log_operations gfs2_rg_lops = {
  724. .lo_name = "rg",
  725. };
  726. const struct gfs2_log_operations gfs2_databuf_lops = {
  727. .lo_add = databuf_lo_add,
  728. .lo_before_commit = databuf_lo_before_commit,
  729. .lo_after_commit = databuf_lo_after_commit,
  730. .lo_scan_elements = databuf_lo_scan_elements,
  731. .lo_after_scan = databuf_lo_after_scan,
  732. .lo_name = "databuf",
  733. };
  734. const struct gfs2_log_operations *gfs2_log_ops[] = {
  735. &gfs2_databuf_lops,
  736. &gfs2_buf_lops,
  737. &gfs2_rg_lops,
  738. &gfs2_revoke_lops,
  739. NULL,
  740. };