lops.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License v.2.
  8. */
  9. #include <linux/sched.h>
  10. #include <linux/slab.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/completion.h>
  13. #include <linux/buffer_head.h>
  14. #include <asm/semaphore.h>
  15. #include "gfs2.h"
  16. #include "glock.h"
  17. #include "log.h"
  18. #include "lops.h"
  19. #include "meta_io.h"
  20. #include "recovery.h"
  21. #include "rgrp.h"
  22. #include "trans.h"
  23. static void glock_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
  24. {
  25. struct gfs2_glock *gl;
  26. get_transaction->tr_touched = 1;
  27. if (!list_empty(&le->le_list))
  28. return;
  29. gl = container_of(le, struct gfs2_glock, gl_le);
  30. if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(gl)))
  31. return;
  32. gfs2_glock_hold(gl);
  33. set_bit(GLF_DIRTY, &gl->gl_flags);
  34. gfs2_log_lock(sdp);
  35. sdp->sd_log_num_gl++;
  36. list_add(&le->le_list, &sdp->sd_log_le_gl);
  37. gfs2_log_unlock(sdp);
  38. }
  39. static void glock_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
  40. {
  41. struct list_head *head = &sdp->sd_log_le_gl;
  42. struct gfs2_glock *gl;
  43. while (!list_empty(head)) {
  44. gl = list_entry(head->next, struct gfs2_glock, gl_le.le_list);
  45. list_del_init(&gl->gl_le.le_list);
  46. sdp->sd_log_num_gl--;
  47. gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(gl));
  48. gfs2_glock_put(gl);
  49. }
  50. gfs2_assert_warn(sdp, !sdp->sd_log_num_gl);
  51. }
  52. static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
  53. {
  54. struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le);
  55. struct gfs2_trans *tr;
  56. if (!list_empty(&bd->bd_list_tr))
  57. return;
  58. tr = get_transaction;
  59. tr->tr_touched = 1;
  60. tr->tr_num_buf++;
  61. list_add(&bd->bd_list_tr, &tr->tr_list_buf);
  62. if (!list_empty(&le->le_list))
  63. return;
  64. gfs2_trans_add_gl(bd->bd_gl);
  65. gfs2_meta_check(sdp, bd->bd_bh);
  66. gfs2_pin(sdp, bd->bd_bh);
  67. gfs2_log_lock(sdp);
  68. sdp->sd_log_num_buf++;
  69. list_add(&le->le_list, &sdp->sd_log_le_buf);
  70. gfs2_log_unlock(sdp);
  71. tr->tr_num_buf_new++;
  72. }
  73. static void buf_lo_incore_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
  74. {
  75. struct list_head *head = &tr->tr_list_buf;
  76. struct gfs2_bufdata *bd;
  77. while (!list_empty(head)) {
  78. bd = list_entry(head->next, struct gfs2_bufdata, bd_list_tr);
  79. list_del_init(&bd->bd_list_tr);
  80. tr->tr_num_buf--;
  81. }
  82. gfs2_assert_warn(sdp, !tr->tr_num_buf);
  83. }
  84. static void buf_lo_before_commit(struct gfs2_sbd *sdp)
  85. {
  86. struct buffer_head *bh;
  87. struct gfs2_log_descriptor *ld;
  88. struct gfs2_bufdata *bd1 = NULL, *bd2;
  89. unsigned int total = sdp->sd_log_num_buf;
  90. unsigned int offset = sizeof(struct gfs2_log_descriptor);
  91. unsigned int limit;
  92. unsigned int num;
  93. unsigned n;
  94. __be64 *ptr;
  95. offset += (sizeof(__be64) - 1);
  96. offset &= ~(sizeof(__be64) - 1);
  97. limit = (sdp->sd_sb.sb_bsize - offset)/sizeof(__be64);
  98. /* for 4k blocks, limit = 503 */
  99. bd1 = bd2 = list_prepare_entry(bd1, &sdp->sd_log_le_buf, bd_le.le_list);
  100. while(total) {
  101. num = total;
  102. if (total > limit)
  103. num = limit;
  104. bh = gfs2_log_get_buf(sdp);
  105. ld = (struct gfs2_log_descriptor *)bh->b_data;
  106. ptr = (__be64 *)(bh->b_data + offset);
  107. ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
  108. ld->ld_header.mh_type = cpu_to_be16(GFS2_METATYPE_LD);
  109. ld->ld_header.mh_format = cpu_to_be16(GFS2_FORMAT_LD);
  110. ld->ld_type = cpu_to_be32(GFS2_LOG_DESC_METADATA);
  111. ld->ld_length = cpu_to_be32(num + 1);
  112. ld->ld_data1 = cpu_to_be32(num);
  113. ld->ld_data2 = cpu_to_be32(0);
  114. memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved));
  115. n = 0;
  116. list_for_each_entry_continue(bd1, &sdp->sd_log_le_buf,
  117. bd_le.le_list) {
  118. *ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr);
  119. if (++n >= num)
  120. break;
  121. }
  122. set_buffer_dirty(bh);
  123. ll_rw_block(WRITE, 1, &bh);
  124. n = 0;
  125. list_for_each_entry_continue(bd2, &sdp->sd_log_le_buf,
  126. bd_le.le_list) {
  127. bh = gfs2_log_fake_buf(sdp, bd2->bd_bh);
  128. set_buffer_dirty(bh);
  129. ll_rw_block(WRITE, 1, &bh);
  130. if (++n >= num)
  131. break;
  132. }
  133. total -= num;
  134. }
  135. }
  136. static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
  137. {
  138. struct list_head *head = &sdp->sd_log_le_buf;
  139. struct gfs2_bufdata *bd;
  140. while (!list_empty(head)) {
  141. bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list);
  142. list_del_init(&bd->bd_le.le_list);
  143. sdp->sd_log_num_buf--;
  144. gfs2_unpin(sdp, bd->bd_bh, ai);
  145. }
  146. gfs2_assert_warn(sdp, !sdp->sd_log_num_buf);
  147. }
  148. static void buf_lo_before_scan(struct gfs2_jdesc *jd,
  149. struct gfs2_log_header *head, int pass)
  150. {
  151. struct gfs2_sbd *sdp = get_v2ip(jd->jd_inode)->i_sbd;
  152. if (pass != 0)
  153. return;
  154. sdp->sd_found_blocks = 0;
  155. sdp->sd_replayed_blocks = 0;
  156. }
  157. static int buf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
  158. struct gfs2_log_descriptor *ld, __be64 *ptr,
  159. int pass)
  160. {
  161. struct gfs2_sbd *sdp = get_v2ip(jd->jd_inode)->i_sbd;
  162. struct gfs2_glock *gl = get_v2ip(jd->jd_inode)->i_gl;
  163. unsigned int blks = be32_to_cpu(ld->ld_data1);
  164. struct buffer_head *bh_log, *bh_ip;
  165. uint64_t blkno;
  166. int error = 0;
  167. if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_METADATA)
  168. return 0;
  169. gfs2_replay_incr_blk(sdp, &start);
  170. for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
  171. blkno = be64_to_cpu(*ptr++);
  172. sdp->sd_found_blocks++;
  173. if (gfs2_revoke_check(sdp, blkno, start))
  174. continue;
  175. error = gfs2_replay_read_block(jd, start, &bh_log);
  176. if (error)
  177. return error;
  178. bh_ip = gfs2_meta_new(gl, blkno);
  179. memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
  180. if (gfs2_meta_check(sdp, bh_ip))
  181. error = -EIO;
  182. else
  183. mark_buffer_dirty(bh_ip);
  184. brelse(bh_log);
  185. brelse(bh_ip);
  186. if (error)
  187. break;
  188. sdp->sd_replayed_blocks++;
  189. }
  190. return error;
  191. }
  192. static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
  193. {
  194. struct gfs2_sbd *sdp = get_v2ip(jd->jd_inode)->i_sbd;
  195. if (error) {
  196. gfs2_meta_sync(get_v2ip(jd->jd_inode)->i_gl,
  197. DIO_START | DIO_WAIT);
  198. return;
  199. }
  200. if (pass != 1)
  201. return;
  202. gfs2_meta_sync(get_v2ip(jd->jd_inode)->i_gl, DIO_START | DIO_WAIT);
  203. fs_info(sdp, "jid=%u: Replayed %u of %u blocks\n",
  204. jd->jd_jid, sdp->sd_replayed_blocks, sdp->sd_found_blocks);
  205. }
  206. static void revoke_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
  207. {
  208. struct gfs2_trans *tr;
  209. tr = get_transaction;
  210. tr->tr_touched = 1;
  211. tr->tr_num_revoke++;
  212. gfs2_log_lock(sdp);
  213. sdp->sd_log_num_revoke++;
  214. list_add(&le->le_list, &sdp->sd_log_le_revoke);
  215. gfs2_log_unlock(sdp);
  216. }
  217. static void revoke_lo_before_commit(struct gfs2_sbd *sdp)
  218. {
  219. struct gfs2_log_descriptor *ld;
  220. struct gfs2_meta_header *mh;
  221. struct buffer_head *bh;
  222. unsigned int offset;
  223. struct list_head *head = &sdp->sd_log_le_revoke;
  224. struct gfs2_revoke *rv;
  225. if (!sdp->sd_log_num_revoke)
  226. return;
  227. bh = gfs2_log_get_buf(sdp);
  228. ld = (struct gfs2_log_descriptor *)bh->b_data;
  229. ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
  230. ld->ld_header.mh_type = cpu_to_be16(GFS2_METATYPE_LD);
  231. ld->ld_header.mh_format = cpu_to_be16(GFS2_FORMAT_LD);
  232. ld->ld_type = cpu_to_be32(GFS2_LOG_DESC_REVOKE);
  233. ld->ld_length = cpu_to_be32(gfs2_struct2blk(sdp, sdp->sd_log_num_revoke,
  234. sizeof(uint64_t)));
  235. ld->ld_data1 = cpu_to_be32(sdp->sd_log_num_revoke);
  236. ld->ld_data2 = cpu_to_be32(0);
  237. memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved));
  238. offset = sizeof(struct gfs2_log_descriptor);
  239. while (!list_empty(head)) {
  240. rv = list_entry(head->next, struct gfs2_revoke, rv_le.le_list);
  241. list_del_init(&rv->rv_le.le_list);
  242. sdp->sd_log_num_revoke--;
  243. if (offset + sizeof(uint64_t) > sdp->sd_sb.sb_bsize) {
  244. set_buffer_dirty(bh);
  245. ll_rw_block(WRITE, 1, &bh);
  246. bh = gfs2_log_get_buf(sdp);
  247. mh = (struct gfs2_meta_header *)bh->b_data;
  248. mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
  249. mh->mh_type = cpu_to_be16(GFS2_METATYPE_LB);
  250. mh->mh_format = cpu_to_be16(GFS2_FORMAT_LB);
  251. offset = sizeof(struct gfs2_meta_header);
  252. }
  253. *(__be64 *)(bh->b_data + offset) = cpu_to_be64(rv->rv_blkno);
  254. kfree(rv);
  255. offset += sizeof(uint64_t);
  256. }
  257. gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
  258. set_buffer_dirty(bh);
  259. ll_rw_block(WRITE, 1, &bh);
  260. }
  261. static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
  262. struct gfs2_log_header *head, int pass)
  263. {
  264. struct gfs2_sbd *sdp = get_v2ip(jd->jd_inode)->i_sbd;
  265. if (pass != 0)
  266. return;
  267. sdp->sd_found_revokes = 0;
  268. sdp->sd_replay_tail = head->lh_tail;
  269. }
  270. static int revoke_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
  271. struct gfs2_log_descriptor *ld, __be64 *ptr,
  272. int pass)
  273. {
  274. struct gfs2_sbd *sdp = get_v2ip(jd->jd_inode)->i_sbd;
  275. unsigned int blks = be32_to_cpu(ld->ld_length);
  276. unsigned int revokes = be32_to_cpu(ld->ld_data1);
  277. struct buffer_head *bh;
  278. unsigned int offset;
  279. uint64_t blkno;
  280. int first = 1;
  281. int error;
  282. if (pass != 0 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_REVOKE)
  283. return 0;
  284. offset = sizeof(struct gfs2_log_descriptor);
  285. for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
  286. error = gfs2_replay_read_block(jd, start, &bh);
  287. if (error)
  288. return error;
  289. if (!first)
  290. gfs2_metatype_check(sdp, bh, GFS2_METATYPE_LB);
  291. while (offset + sizeof(uint64_t) <= sdp->sd_sb.sb_bsize) {
  292. blkno = be64_to_cpu(*(__be64 *)(bh->b_data + offset));
  293. error = gfs2_revoke_add(sdp, blkno, start);
  294. if (error < 0)
  295. return error;
  296. else if (error)
  297. sdp->sd_found_revokes++;
  298. if (!--revokes)
  299. break;
  300. offset += sizeof(uint64_t);
  301. }
  302. brelse(bh);
  303. offset = sizeof(struct gfs2_meta_header);
  304. first = 0;
  305. }
  306. return 0;
  307. }
  308. static void revoke_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
  309. {
  310. struct gfs2_sbd *sdp = get_v2ip(jd->jd_inode)->i_sbd;
  311. if (error) {
  312. gfs2_revoke_clean(sdp);
  313. return;
  314. }
  315. if (pass != 1)
  316. return;
  317. fs_info(sdp, "jid=%u: Found %u revoke tags\n",
  318. jd->jd_jid, sdp->sd_found_revokes);
  319. gfs2_revoke_clean(sdp);
  320. }
  321. static void rg_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
  322. {
  323. struct gfs2_rgrpd *rgd;
  324. get_transaction->tr_touched = 1;
  325. if (!list_empty(&le->le_list))
  326. return;
  327. rgd = container_of(le, struct gfs2_rgrpd, rd_le);
  328. gfs2_rgrp_bh_hold(rgd);
  329. gfs2_log_lock(sdp);
  330. sdp->sd_log_num_rg++;
  331. list_add(&le->le_list, &sdp->sd_log_le_rg);
  332. gfs2_log_unlock(sdp);
  333. }
  334. static void rg_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
  335. {
  336. struct list_head *head = &sdp->sd_log_le_rg;
  337. struct gfs2_rgrpd *rgd;
  338. while (!list_empty(head)) {
  339. rgd = list_entry(head->next, struct gfs2_rgrpd, rd_le.le_list);
  340. list_del_init(&rgd->rd_le.le_list);
  341. sdp->sd_log_num_rg--;
  342. gfs2_rgrp_repolish_clones(rgd);
  343. gfs2_rgrp_bh_put(rgd);
  344. }
  345. gfs2_assert_warn(sdp, !sdp->sd_log_num_rg);
  346. }
  347. /**
  348. * databuf_lo_add - Add a databuf to the transaction.
  349. *
  350. * This is used in two distinct cases:
  351. * i) In ordered write mode
  352. * We put the data buffer on a list so that we can ensure that its
  353. * synced to disk at the right time
  354. * ii) In journaled data mode
  355. * We need to journal the data block in the same way as metadata in
  356. * the functions above. The difference is that here we have a tag
  357. * which is two __be64's being the block number (as per meta data)
  358. * and a flag which says whether the data block needs escaping or
  359. * not. This means we need a new log entry for each 251 or so data
  360. * blocks, which isn't an enormous overhead but twice as much as
  361. * for normal metadata blocks.
  362. */
  363. static void databuf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
  364. {
  365. struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le);
  366. struct gfs2_trans *tr = get_transaction;
  367. struct address_space *mapping = bd->bd_bh->b_page->mapping;
  368. struct gfs2_inode *ip = get_v2ip(mapping->host);
  369. tr->tr_touched = 1;
  370. if (!list_empty(&bd->bd_list_tr) &&
  371. (ip->i_di.di_flags & GFS2_DIF_JDATA)) {
  372. tr->tr_num_buf++;
  373. gfs2_trans_add_gl(bd->bd_gl);
  374. list_add(&bd->bd_list_tr, &tr->tr_list_buf);
  375. gfs2_pin(sdp, bd->bd_bh);
  376. }
  377. gfs2_log_lock(sdp);
  378. if (!list_empty(&le->le_list)) {
  379. if (ip->i_di.di_flags & GFS2_DIF_JDATA)
  380. sdp->sd_log_num_jdata++;
  381. sdp->sd_log_num_databuf++;
  382. list_add(&le->le_list, &sdp->sd_log_le_databuf);
  383. }
  384. gfs2_log_unlock(sdp);
  385. }
  386. static int gfs2_check_magic(struct buffer_head *bh)
  387. {
  388. struct page *page = bh->b_page;
  389. void *kaddr;
  390. __be32 *ptr;
  391. int rv = 0;
  392. kaddr = kmap_atomic(page, KM_USER0);
  393. ptr = kaddr + bh_offset(bh);
  394. if (*ptr == cpu_to_be32(GFS2_MAGIC))
  395. rv = 1;
  396. kunmap_atomic(page, KM_USER0);
  397. return rv;
  398. }
  399. /**
  400. * databuf_lo_before_commit - Scan the data buffers, writing as we go
  401. *
  402. * Here we scan through the lists of buffers and make the assumption
  403. * that any buffer thats been pinned is being journaled, and that
  404. * any unpinned buffer is an ordered write data buffer and therefore
  405. * will be written back rather than journaled.
  406. */
  407. static void databuf_lo_before_commit(struct gfs2_sbd *sdp)
  408. {
  409. LIST_HEAD(started);
  410. struct gfs2_bufdata *bd1 = NULL, *bd2, *bdt;
  411. struct buffer_head *bh = NULL;
  412. unsigned int offset = sizeof(struct gfs2_log_descriptor);
  413. struct gfs2_log_descriptor *ld;
  414. unsigned int limit;
  415. unsigned int total_dbuf = sdp->sd_log_num_databuf;
  416. unsigned int total_jdata = sdp->sd_log_num_jdata;
  417. unsigned int num, n;
  418. __be64 *ptr = NULL;
  419. offset += (2*sizeof(__be64) - 1);
  420. offset &= ~(2*sizeof(__be64) - 1);
  421. limit = (sdp->sd_sb.sb_bsize - offset)/sizeof(__be64);
  422. /*
  423. * Start writing ordered buffers, write journaled buffers
  424. * into the log along with a header
  425. */
  426. gfs2_log_lock(sdp);
  427. bd2 = bd1 = list_prepare_entry(bd1, &sdp->sd_log_le_databuf,
  428. bd_le.le_list);
  429. while(total_dbuf) {
  430. num = total_jdata;
  431. if (num > limit)
  432. num = limit;
  433. n = 0;
  434. list_for_each_entry_safe_continue(bd1, bdt,
  435. &sdp->sd_log_le_databuf,
  436. bd_le.le_list) {
  437. /* An ordered write buffer */
  438. if (bd1->bd_bh && !buffer_pinned(bd1->bd_bh)) {
  439. list_move(&bd1->bd_le.le_list, &started);
  440. if (bd1 == bd2) {
  441. bd2 = NULL;
  442. bd2 = list_prepare_entry(bd2,
  443. &sdp->sd_log_le_databuf,
  444. bd_le.le_list);
  445. }
  446. total_dbuf--;
  447. if (bd1->bd_bh) {
  448. get_bh(bd1->bd_bh);
  449. if (buffer_dirty(bd1->bd_bh)) {
  450. gfs2_log_unlock(sdp);
  451. wait_on_buffer(bd1->bd_bh);
  452. ll_rw_block(WRITE, 1,
  453. &bd1->bd_bh);
  454. gfs2_log_lock(sdp);
  455. }
  456. brelse(bd1->bd_bh);
  457. continue;
  458. }
  459. continue;
  460. } else if (bd1->bd_bh) { /* A journaled buffer */
  461. int magic;
  462. gfs2_log_unlock(sdp);
  463. if (!bh) {
  464. bh = gfs2_log_get_buf(sdp);
  465. ld = (struct gfs2_log_descriptor *)
  466. bh->b_data;
  467. ptr = (__be64 *)(bh->b_data + offset);
  468. ld->ld_header.mh_magic =
  469. cpu_to_be32(GFS2_MAGIC);
  470. ld->ld_header.mh_type =
  471. cpu_to_be16(GFS2_METATYPE_LD);
  472. ld->ld_header.mh_format =
  473. cpu_to_be16(GFS2_FORMAT_LD);
  474. ld->ld_type =
  475. cpu_to_be32(GFS2_LOG_DESC_JDATA);
  476. ld->ld_length = cpu_to_be32(num + 1);
  477. ld->ld_data1 = cpu_to_be32(num);
  478. ld->ld_data2 = cpu_to_be32(0);
  479. memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved));
  480. }
  481. magic = gfs2_check_magic(bd1->bd_bh);
  482. *ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr);
  483. *ptr++ = cpu_to_be64((__u64)magic);
  484. clear_buffer_escaped(bd1->bd_bh);
  485. if (unlikely(magic != 0))
  486. set_buffer_escaped(bd1->bd_bh);
  487. gfs2_log_lock(sdp);
  488. if (n++ > num)
  489. break;
  490. }
  491. }
  492. gfs2_log_unlock(sdp);
  493. if (bh) {
  494. set_buffer_dirty(bh);
  495. ll_rw_block(WRITE, 1, &bh);
  496. bh = NULL;
  497. }
  498. n = 0;
  499. gfs2_log_lock(sdp);
  500. list_for_each_entry_continue(bd2, &sdp->sd_log_le_databuf,
  501. bd_le.le_list) {
  502. if (!bd2->bd_bh)
  503. continue;
  504. /* copy buffer if it needs escaping */
  505. gfs2_log_unlock(sdp);
  506. if (unlikely(buffer_escaped(bd2->bd_bh))) {
  507. void *kaddr;
  508. struct page *page = bd2->bd_bh->b_page;
  509. bh = gfs2_log_get_buf(sdp);
  510. kaddr = kmap_atomic(page, KM_USER0);
  511. memcpy(bh->b_data,
  512. kaddr + bh_offset(bd2->bd_bh),
  513. sdp->sd_sb.sb_bsize);
  514. kunmap_atomic(page, KM_USER0);
  515. *(__be32 *)bh->b_data = 0;
  516. } else {
  517. bh = gfs2_log_fake_buf(sdp, bd2->bd_bh);
  518. }
  519. set_buffer_dirty(bh);
  520. ll_rw_block(WRITE, 1, &bh);
  521. gfs2_log_lock(sdp);
  522. if (++n >= num)
  523. break;
  524. }
  525. bh = NULL;
  526. total_dbuf -= num;
  527. total_jdata -= num;
  528. }
  529. gfs2_log_unlock(sdp);
  530. /* Wait on all ordered buffers */
  531. while (!list_empty(&started)) {
  532. gfs2_log_lock(sdp);
  533. bd1 = list_entry(started.next, struct gfs2_bufdata,
  534. bd_le.le_list);
  535. list_del(&bd1->bd_le.le_list);
  536. sdp->sd_log_num_databuf--;
  537. bh = bd1->bd_bh;
  538. if (bh) {
  539. set_v2bd(bh, NULL);
  540. gfs2_log_unlock(sdp);
  541. wait_on_buffer(bh);
  542. brelse(bh);
  543. } else
  544. gfs2_log_unlock(sdp);
  545. kfree(bd1);
  546. }
  547. /* We've removed all the ordered write bufs here, so only jdata left */
  548. gfs2_assert_warn(sdp, sdp->sd_log_num_databuf == sdp->sd_log_num_jdata);
  549. }
  550. static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
  551. struct gfs2_log_descriptor *ld,
  552. __be64 *ptr, int pass)
  553. {
  554. struct gfs2_sbd *sdp = get_v2ip(jd->jd_inode)->i_sbd;
  555. struct gfs2_glock *gl = get_v2ip(jd->jd_inode)->i_gl;
  556. unsigned int blks = be32_to_cpu(ld->ld_data1);
  557. struct buffer_head *bh_log, *bh_ip;
  558. uint64_t blkno;
  559. uint64_t esc;
  560. int error = 0;
  561. if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_JDATA)
  562. return 0;
  563. gfs2_replay_incr_blk(sdp, &start);
  564. for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
  565. blkno = be64_to_cpu(*ptr++);
  566. esc = be64_to_cpu(*ptr++);
  567. sdp->sd_found_blocks++;
  568. if (gfs2_revoke_check(sdp, blkno, start))
  569. continue;
  570. error = gfs2_replay_read_block(jd, start, &bh_log);
  571. if (error)
  572. return error;
  573. bh_ip = gfs2_meta_new(gl, blkno);
  574. memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
  575. /* Unescape */
  576. if (esc) {
  577. __be32 *eptr = (__be32 *)bh_ip->b_data;
  578. *eptr = cpu_to_be32(GFS2_MAGIC);
  579. }
  580. mark_buffer_dirty(bh_ip);
  581. brelse(bh_log);
  582. brelse(bh_ip);
  583. if (error)
  584. break;
  585. sdp->sd_replayed_blocks++;
  586. }
  587. return error;
  588. }
  589. /* FIXME: sort out accounting for log blocks etc. */
  590. static void databuf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
  591. {
  592. struct gfs2_sbd *sdp = get_v2ip(jd->jd_inode)->i_sbd;
  593. if (error) {
  594. gfs2_meta_sync(get_v2ip(jd->jd_inode)->i_gl,
  595. DIO_START | DIO_WAIT);
  596. return;
  597. }
  598. if (pass != 1)
  599. return;
  600. /* data sync? */
  601. gfs2_meta_sync(get_v2ip(jd->jd_inode)->i_gl, DIO_START | DIO_WAIT);
  602. fs_info(sdp, "jid=%u: Replayed %u of %u data blocks\n",
  603. jd->jd_jid, sdp->sd_replayed_blocks, sdp->sd_found_blocks);
  604. }
  605. static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
  606. {
  607. struct list_head *head = &sdp->sd_log_le_databuf;
  608. struct gfs2_bufdata *bd;
  609. while (!list_empty(head)) {
  610. bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list);
  611. list_del(&bd->bd_le.le_list);
  612. sdp->sd_log_num_databuf--;
  613. sdp->sd_log_num_jdata--;
  614. gfs2_unpin(sdp, bd->bd_bh, ai);
  615. }
  616. gfs2_assert_warn(sdp, !sdp->sd_log_num_databuf);
  617. gfs2_assert_warn(sdp, !sdp->sd_log_num_jdata);
  618. }
  619. struct gfs2_log_operations gfs2_glock_lops = {
  620. .lo_add = glock_lo_add,
  621. .lo_after_commit = glock_lo_after_commit,
  622. .lo_name = "glock"
  623. };
  624. struct gfs2_log_operations gfs2_buf_lops = {
  625. .lo_add = buf_lo_add,
  626. .lo_incore_commit = buf_lo_incore_commit,
  627. .lo_before_commit = buf_lo_before_commit,
  628. .lo_after_commit = buf_lo_after_commit,
  629. .lo_before_scan = buf_lo_before_scan,
  630. .lo_scan_elements = buf_lo_scan_elements,
  631. .lo_after_scan = buf_lo_after_scan,
  632. .lo_name = "buf"
  633. };
  634. struct gfs2_log_operations gfs2_revoke_lops = {
  635. .lo_add = revoke_lo_add,
  636. .lo_before_commit = revoke_lo_before_commit,
  637. .lo_before_scan = revoke_lo_before_scan,
  638. .lo_scan_elements = revoke_lo_scan_elements,
  639. .lo_after_scan = revoke_lo_after_scan,
  640. .lo_name = "revoke"
  641. };
  642. struct gfs2_log_operations gfs2_rg_lops = {
  643. .lo_add = rg_lo_add,
  644. .lo_after_commit = rg_lo_after_commit,
  645. .lo_name = "rg"
  646. };
  647. struct gfs2_log_operations gfs2_databuf_lops = {
  648. .lo_add = databuf_lo_add,
  649. .lo_incore_commit = buf_lo_incore_commit,
  650. .lo_before_commit = databuf_lo_before_commit,
  651. .lo_after_commit = databuf_lo_after_commit,
  652. .lo_scan_elements = databuf_lo_scan_elements,
  653. .lo_after_scan = databuf_lo_after_scan,
  654. .lo_name = "databuf"
  655. };
  656. struct gfs2_log_operations *gfs2_log_ops[] = {
  657. &gfs2_glock_lops,
  658. &gfs2_buf_lops,
  659. &gfs2_revoke_lops,
  660. &gfs2_rg_lops,
  661. &gfs2_databuf_lops,
  662. NULL
  663. };