lops.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License version 2.
  8. */
  9. #include <linux/sched.h>
  10. #include <linux/slab.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/completion.h>
  13. #include <linux/buffer_head.h>
  14. #include <linux/gfs2_ondisk.h>
  15. #include <linux/lm_interface.h>
  16. #include "gfs2.h"
  17. #include "incore.h"
  18. #include "glock.h"
  19. #include "log.h"
  20. #include "lops.h"
  21. #include "meta_io.h"
  22. #include "recovery.h"
  23. #include "rgrp.h"
  24. #include "trans.h"
  25. #include "util.h"
  26. static void glock_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
  27. {
  28. struct gfs2_glock *gl;
  29. struct gfs2_trans *tr = current->journal_info;
  30. tr->tr_touched = 1;
  31. if (!list_empty(&le->le_list))
  32. return;
  33. gl = container_of(le, struct gfs2_glock, gl_le);
  34. if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(gl)))
  35. return;
  36. gfs2_glock_hold(gl);
  37. set_bit(GLF_DIRTY, &gl->gl_flags);
  38. gfs2_log_lock(sdp);
  39. sdp->sd_log_num_gl++;
  40. list_add(&le->le_list, &sdp->sd_log_le_gl);
  41. gfs2_log_unlock(sdp);
  42. }
  43. static void glock_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
  44. {
  45. struct list_head *head = &sdp->sd_log_le_gl;
  46. struct gfs2_glock *gl;
  47. while (!list_empty(head)) {
  48. gl = list_entry(head->next, struct gfs2_glock, gl_le.le_list);
  49. list_del_init(&gl->gl_le.le_list);
  50. sdp->sd_log_num_gl--;
  51. gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(gl));
  52. gfs2_glock_put(gl);
  53. }
  54. gfs2_assert_warn(sdp, !sdp->sd_log_num_gl);
  55. }
  56. static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
  57. {
  58. struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le);
  59. struct gfs2_trans *tr;
  60. gfs2_log_lock(sdp);
  61. if (!list_empty(&bd->bd_list_tr)) {
  62. gfs2_log_unlock(sdp);
  63. return;
  64. }
  65. tr = current->journal_info;
  66. tr->tr_touched = 1;
  67. tr->tr_num_buf++;
  68. list_add(&bd->bd_list_tr, &tr->tr_list_buf);
  69. gfs2_log_unlock(sdp);
  70. if (!list_empty(&le->le_list))
  71. return;
  72. gfs2_trans_add_gl(bd->bd_gl);
  73. gfs2_meta_check(sdp, bd->bd_bh);
  74. gfs2_pin(sdp, bd->bd_bh);
  75. gfs2_log_lock(sdp);
  76. sdp->sd_log_num_buf++;
  77. list_add(&le->le_list, &sdp->sd_log_le_buf);
  78. gfs2_log_unlock(sdp);
  79. tr->tr_num_buf_new++;
  80. }
  81. static void buf_lo_incore_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
  82. {
  83. struct list_head *head = &tr->tr_list_buf;
  84. struct gfs2_bufdata *bd;
  85. gfs2_log_lock(sdp);
  86. while (!list_empty(head)) {
  87. bd = list_entry(head->next, struct gfs2_bufdata, bd_list_tr);
  88. list_del_init(&bd->bd_list_tr);
  89. tr->tr_num_buf--;
  90. }
  91. gfs2_log_unlock(sdp);
  92. gfs2_assert_warn(sdp, !tr->tr_num_buf);
  93. }
  94. static void buf_lo_before_commit(struct gfs2_sbd *sdp)
  95. {
  96. struct buffer_head *bh;
  97. struct gfs2_log_descriptor *ld;
  98. struct gfs2_bufdata *bd1 = NULL, *bd2;
  99. unsigned int total = sdp->sd_log_num_buf;
  100. unsigned int offset = sizeof(struct gfs2_log_descriptor);
  101. unsigned int limit;
  102. unsigned int num;
  103. unsigned n;
  104. __be64 *ptr;
  105. offset += sizeof(__be64) - 1;
  106. offset &= ~(sizeof(__be64) - 1);
  107. limit = (sdp->sd_sb.sb_bsize - offset)/sizeof(__be64);
  108. /* for 4k blocks, limit = 503 */
  109. bd1 = bd2 = list_prepare_entry(bd1, &sdp->sd_log_le_buf, bd_le.le_list);
  110. while(total) {
  111. num = total;
  112. if (total > limit)
  113. num = limit;
  114. bh = gfs2_log_get_buf(sdp);
  115. sdp->sd_log_num_hdrs++;
  116. ld = (struct gfs2_log_descriptor *)bh->b_data;
  117. ptr = (__be64 *)(bh->b_data + offset);
  118. ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
  119. ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD);
  120. ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD);
  121. ld->ld_type = cpu_to_be32(GFS2_LOG_DESC_METADATA);
  122. ld->ld_length = cpu_to_be32(num + 1);
  123. ld->ld_data1 = cpu_to_be32(num);
  124. ld->ld_data2 = cpu_to_be32(0);
  125. memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved));
  126. n = 0;
  127. list_for_each_entry_continue(bd1, &sdp->sd_log_le_buf,
  128. bd_le.le_list) {
  129. *ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr);
  130. if (++n >= num)
  131. break;
  132. }
  133. set_buffer_dirty(bh);
  134. ll_rw_block(WRITE, 1, &bh);
  135. n = 0;
  136. list_for_each_entry_continue(bd2, &sdp->sd_log_le_buf,
  137. bd_le.le_list) {
  138. bh = gfs2_log_fake_buf(sdp, bd2->bd_bh);
  139. set_buffer_dirty(bh);
  140. ll_rw_block(WRITE, 1, &bh);
  141. if (++n >= num)
  142. break;
  143. }
  144. total -= num;
  145. }
  146. }
  147. static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
  148. {
  149. struct list_head *head = &sdp->sd_log_le_buf;
  150. struct gfs2_bufdata *bd;
  151. while (!list_empty(head)) {
  152. bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list);
  153. list_del_init(&bd->bd_le.le_list);
  154. sdp->sd_log_num_buf--;
  155. gfs2_unpin(sdp, bd->bd_bh, ai);
  156. }
  157. gfs2_assert_warn(sdp, !sdp->sd_log_num_buf);
  158. }
  159. static void buf_lo_before_scan(struct gfs2_jdesc *jd,
  160. struct gfs2_log_header_host *head, int pass)
  161. {
  162. struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
  163. if (pass != 0)
  164. return;
  165. sdp->sd_found_blocks = 0;
  166. sdp->sd_replayed_blocks = 0;
  167. }
  168. static int buf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
  169. struct gfs2_log_descriptor *ld, __be64 *ptr,
  170. int pass)
  171. {
  172. struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
  173. struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
  174. struct gfs2_glock *gl = ip->i_gl;
  175. unsigned int blks = be32_to_cpu(ld->ld_data1);
  176. struct buffer_head *bh_log, *bh_ip;
  177. u64 blkno;
  178. int error = 0;
  179. if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_METADATA)
  180. return 0;
  181. gfs2_replay_incr_blk(sdp, &start);
  182. for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
  183. blkno = be64_to_cpu(*ptr++);
  184. sdp->sd_found_blocks++;
  185. if (gfs2_revoke_check(sdp, blkno, start))
  186. continue;
  187. error = gfs2_replay_read_block(jd, start, &bh_log);
  188. if (error)
  189. return error;
  190. bh_ip = gfs2_meta_new(gl, blkno);
  191. memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
  192. if (gfs2_meta_check(sdp, bh_ip))
  193. error = -EIO;
  194. else
  195. mark_buffer_dirty(bh_ip);
  196. brelse(bh_log);
  197. brelse(bh_ip);
  198. if (error)
  199. break;
  200. sdp->sd_replayed_blocks++;
  201. }
  202. return error;
  203. }
  204. static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
  205. {
  206. struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
  207. struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
  208. if (error) {
  209. gfs2_meta_sync(ip->i_gl);
  210. return;
  211. }
  212. if (pass != 1)
  213. return;
  214. gfs2_meta_sync(ip->i_gl);
  215. fs_info(sdp, "jid=%u: Replayed %u of %u blocks\n",
  216. jd->jd_jid, sdp->sd_replayed_blocks, sdp->sd_found_blocks);
  217. }
  218. static void revoke_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
  219. {
  220. struct gfs2_trans *tr;
  221. tr = current->journal_info;
  222. tr->tr_touched = 1;
  223. tr->tr_num_revoke++;
  224. gfs2_log_lock(sdp);
  225. sdp->sd_log_num_revoke++;
  226. list_add(&le->le_list, &sdp->sd_log_le_revoke);
  227. gfs2_log_unlock(sdp);
  228. }
  229. static void revoke_lo_before_commit(struct gfs2_sbd *sdp)
  230. {
  231. struct gfs2_log_descriptor *ld;
  232. struct gfs2_meta_header *mh;
  233. struct buffer_head *bh;
  234. unsigned int offset;
  235. struct list_head *head = &sdp->sd_log_le_revoke;
  236. struct gfs2_revoke *rv;
  237. if (!sdp->sd_log_num_revoke)
  238. return;
  239. bh = gfs2_log_get_buf(sdp);
  240. ld = (struct gfs2_log_descriptor *)bh->b_data;
  241. ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
  242. ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD);
  243. ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD);
  244. ld->ld_type = cpu_to_be32(GFS2_LOG_DESC_REVOKE);
  245. ld->ld_length = cpu_to_be32(gfs2_struct2blk(sdp, sdp->sd_log_num_revoke,
  246. sizeof(u64)));
  247. ld->ld_data1 = cpu_to_be32(sdp->sd_log_num_revoke);
  248. ld->ld_data2 = cpu_to_be32(0);
  249. memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved));
  250. offset = sizeof(struct gfs2_log_descriptor);
  251. while (!list_empty(head)) {
  252. rv = list_entry(head->next, struct gfs2_revoke, rv_le.le_list);
  253. list_del_init(&rv->rv_le.le_list);
  254. sdp->sd_log_num_revoke--;
  255. if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) {
  256. set_buffer_dirty(bh);
  257. ll_rw_block(WRITE, 1, &bh);
  258. bh = gfs2_log_get_buf(sdp);
  259. mh = (struct gfs2_meta_header *)bh->b_data;
  260. mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
  261. mh->mh_type = cpu_to_be32(GFS2_METATYPE_LB);
  262. mh->mh_format = cpu_to_be32(GFS2_FORMAT_LB);
  263. offset = sizeof(struct gfs2_meta_header);
  264. }
  265. *(__be64 *)(bh->b_data + offset) = cpu_to_be64(rv->rv_blkno);
  266. kfree(rv);
  267. offset += sizeof(u64);
  268. }
  269. gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
  270. set_buffer_dirty(bh);
  271. ll_rw_block(WRITE, 1, &bh);
  272. }
  273. static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
  274. struct gfs2_log_header_host *head, int pass)
  275. {
  276. struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
  277. if (pass != 0)
  278. return;
  279. sdp->sd_found_revokes = 0;
  280. sdp->sd_replay_tail = head->lh_tail;
  281. }
  282. static int revoke_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
  283. struct gfs2_log_descriptor *ld, __be64 *ptr,
  284. int pass)
  285. {
  286. struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
  287. unsigned int blks = be32_to_cpu(ld->ld_length);
  288. unsigned int revokes = be32_to_cpu(ld->ld_data1);
  289. struct buffer_head *bh;
  290. unsigned int offset;
  291. u64 blkno;
  292. int first = 1;
  293. int error;
  294. if (pass != 0 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_REVOKE)
  295. return 0;
  296. offset = sizeof(struct gfs2_log_descriptor);
  297. for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
  298. error = gfs2_replay_read_block(jd, start, &bh);
  299. if (error)
  300. return error;
  301. if (!first)
  302. gfs2_metatype_check(sdp, bh, GFS2_METATYPE_LB);
  303. while (offset + sizeof(u64) <= sdp->sd_sb.sb_bsize) {
  304. blkno = be64_to_cpu(*(__be64 *)(bh->b_data + offset));
  305. error = gfs2_revoke_add(sdp, blkno, start);
  306. if (error < 0)
  307. return error;
  308. else if (error)
  309. sdp->sd_found_revokes++;
  310. if (!--revokes)
  311. break;
  312. offset += sizeof(u64);
  313. }
  314. brelse(bh);
  315. offset = sizeof(struct gfs2_meta_header);
  316. first = 0;
  317. }
  318. return 0;
  319. }
  320. static void revoke_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
  321. {
  322. struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
  323. if (error) {
  324. gfs2_revoke_clean(sdp);
  325. return;
  326. }
  327. if (pass != 1)
  328. return;
  329. fs_info(sdp, "jid=%u: Found %u revoke tags\n",
  330. jd->jd_jid, sdp->sd_found_revokes);
  331. gfs2_revoke_clean(sdp);
  332. }
  333. static void rg_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
  334. {
  335. struct gfs2_rgrpd *rgd;
  336. struct gfs2_trans *tr = current->journal_info;
  337. tr->tr_touched = 1;
  338. if (!list_empty(&le->le_list))
  339. return;
  340. rgd = container_of(le, struct gfs2_rgrpd, rd_le);
  341. gfs2_rgrp_bh_hold(rgd);
  342. gfs2_log_lock(sdp);
  343. sdp->sd_log_num_rg++;
  344. list_add(&le->le_list, &sdp->sd_log_le_rg);
  345. gfs2_log_unlock(sdp);
  346. }
  347. static void rg_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
  348. {
  349. struct list_head *head = &sdp->sd_log_le_rg;
  350. struct gfs2_rgrpd *rgd;
  351. while (!list_empty(head)) {
  352. rgd = list_entry(head->next, struct gfs2_rgrpd, rd_le.le_list);
  353. list_del_init(&rgd->rd_le.le_list);
  354. sdp->sd_log_num_rg--;
  355. gfs2_rgrp_repolish_clones(rgd);
  356. gfs2_rgrp_bh_put(rgd);
  357. }
  358. gfs2_assert_warn(sdp, !sdp->sd_log_num_rg);
  359. }
  360. /**
  361. * databuf_lo_add - Add a databuf to the transaction.
  362. *
  363. * This is used in two distinct cases:
  364. * i) In ordered write mode
  365. * We put the data buffer on a list so that we can ensure that its
  366. * synced to disk at the right time
  367. * ii) In journaled data mode
  368. * We need to journal the data block in the same way as metadata in
  369. * the functions above. The difference is that here we have a tag
  370. * which is two __be64's being the block number (as per meta data)
  371. * and a flag which says whether the data block needs escaping or
  372. * not. This means we need a new log entry for each 251 or so data
  373. * blocks, which isn't an enormous overhead but twice as much as
  374. * for normal metadata blocks.
  375. */
  376. static void databuf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
  377. {
  378. struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le);
  379. struct gfs2_trans *tr = current->journal_info;
  380. struct address_space *mapping = bd->bd_bh->b_page->mapping;
  381. struct gfs2_inode *ip = GFS2_I(mapping->host);
  382. gfs2_log_lock(sdp);
  383. tr->tr_touched = 1;
  384. if (list_empty(&bd->bd_list_tr) &&
  385. (ip->i_di.di_flags & GFS2_DIF_JDATA)) {
  386. tr->tr_num_buf++;
  387. list_add(&bd->bd_list_tr, &tr->tr_list_buf);
  388. gfs2_log_unlock(sdp);
  389. gfs2_pin(sdp, bd->bd_bh);
  390. tr->tr_num_buf_new++;
  391. } else {
  392. gfs2_log_unlock(sdp);
  393. }
  394. gfs2_trans_add_gl(bd->bd_gl);
  395. gfs2_log_lock(sdp);
  396. if (list_empty(&le->le_list)) {
  397. if (ip->i_di.di_flags & GFS2_DIF_JDATA)
  398. sdp->sd_log_num_jdata++;
  399. sdp->sd_log_num_databuf++;
  400. list_add(&le->le_list, &sdp->sd_log_le_databuf);
  401. }
  402. gfs2_log_unlock(sdp);
  403. }
  404. static int gfs2_check_magic(struct buffer_head *bh)
  405. {
  406. struct page *page = bh->b_page;
  407. void *kaddr;
  408. __be32 *ptr;
  409. int rv = 0;
  410. kaddr = kmap_atomic(page, KM_USER0);
  411. ptr = kaddr + bh_offset(bh);
  412. if (*ptr == cpu_to_be32(GFS2_MAGIC))
  413. rv = 1;
  414. kunmap_atomic(kaddr, KM_USER0);
  415. return rv;
  416. }
  417. /**
  418. * databuf_lo_before_commit - Scan the data buffers, writing as we go
  419. *
  420. * Here we scan through the lists of buffers and make the assumption
  421. * that any buffer thats been pinned is being journaled, and that
  422. * any unpinned buffer is an ordered write data buffer and therefore
  423. * will be written back rather than journaled.
  424. */
  425. static void databuf_lo_before_commit(struct gfs2_sbd *sdp)
  426. {
  427. LIST_HEAD(started);
  428. struct gfs2_bufdata *bd1 = NULL, *bd2, *bdt;
  429. struct buffer_head *bh = NULL,*bh1 = NULL;
  430. unsigned int offset = sizeof(struct gfs2_log_descriptor);
  431. struct gfs2_log_descriptor *ld;
  432. unsigned int limit;
  433. unsigned int total_dbuf = sdp->sd_log_num_databuf;
  434. unsigned int total_jdata = sdp->sd_log_num_jdata;
  435. unsigned int num, n;
  436. __be64 *ptr = NULL;
  437. offset += 2*sizeof(__be64) - 1;
  438. offset &= ~(2*sizeof(__be64) - 1);
  439. limit = (sdp->sd_sb.sb_bsize - offset)/sizeof(__be64);
  440. /*
  441. * Start writing ordered buffers, write journaled buffers
  442. * into the log along with a header
  443. */
  444. gfs2_log_lock(sdp);
  445. bd2 = bd1 = list_prepare_entry(bd1, &sdp->sd_log_le_databuf,
  446. bd_le.le_list);
  447. while(total_dbuf) {
  448. num = total_jdata;
  449. if (num > limit)
  450. num = limit;
  451. n = 0;
  452. list_for_each_entry_safe_continue(bd1, bdt,
  453. &sdp->sd_log_le_databuf,
  454. bd_le.le_list) {
  455. /* store off the buffer head in a local ptr since
  456. * gfs2_bufdata might change when we drop the log lock
  457. */
  458. bh1 = bd1->bd_bh;
  459. /* An ordered write buffer */
  460. if (bh1 && !buffer_pinned(bh1)) {
  461. list_move(&bd1->bd_le.le_list, &started);
  462. if (bd1 == bd2) {
  463. bd2 = NULL;
  464. bd2 = list_prepare_entry(bd2,
  465. &sdp->sd_log_le_databuf,
  466. bd_le.le_list);
  467. }
  468. total_dbuf--;
  469. if (bh1) {
  470. if (buffer_dirty(bh1)) {
  471. get_bh(bh1);
  472. gfs2_log_unlock(sdp);
  473. ll_rw_block(SWRITE, 1, &bh1);
  474. brelse(bh1);
  475. gfs2_log_lock(sdp);
  476. }
  477. continue;
  478. }
  479. continue;
  480. } else if (bh1) { /* A journaled buffer */
  481. int magic;
  482. gfs2_log_unlock(sdp);
  483. if (!bh) {
  484. bh = gfs2_log_get_buf(sdp);
  485. sdp->sd_log_num_hdrs++;
  486. ld = (struct gfs2_log_descriptor *)
  487. bh->b_data;
  488. ptr = (__be64 *)(bh->b_data + offset);
  489. ld->ld_header.mh_magic =
  490. cpu_to_be32(GFS2_MAGIC);
  491. ld->ld_header.mh_type =
  492. cpu_to_be32(GFS2_METATYPE_LD);
  493. ld->ld_header.mh_format =
  494. cpu_to_be32(GFS2_FORMAT_LD);
  495. ld->ld_type =
  496. cpu_to_be32(GFS2_LOG_DESC_JDATA);
  497. ld->ld_length = cpu_to_be32(num + 1);
  498. ld->ld_data1 = cpu_to_be32(num);
  499. ld->ld_data2 = cpu_to_be32(0);
  500. memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved));
  501. }
  502. magic = gfs2_check_magic(bh1);
  503. *ptr++ = cpu_to_be64(bh1->b_blocknr);
  504. *ptr++ = cpu_to_be64((__u64)magic);
  505. clear_buffer_escaped(bh1);
  506. if (unlikely(magic != 0))
  507. set_buffer_escaped(bh1);
  508. gfs2_log_lock(sdp);
  509. if (n++ > num)
  510. break;
  511. } else if (!bh1) {
  512. total_dbuf--;
  513. sdp->sd_log_num_databuf--;
  514. list_del_init(&bd1->bd_le.le_list);
  515. if (bd1 == bd2) {
  516. bd2 = NULL;
  517. bd2 = list_prepare_entry(bd2,
  518. &sdp->sd_log_le_databuf,
  519. bd_le.le_list);
  520. }
  521. kmem_cache_free(gfs2_bufdata_cachep, bd1);
  522. }
  523. }
  524. gfs2_log_unlock(sdp);
  525. if (bh) {
  526. set_buffer_dirty(bh);
  527. ll_rw_block(WRITE, 1, &bh);
  528. bh = NULL;
  529. }
  530. n = 0;
  531. gfs2_log_lock(sdp);
  532. list_for_each_entry_continue(bd2, &sdp->sd_log_le_databuf,
  533. bd_le.le_list) {
  534. if (!bd2->bd_bh)
  535. continue;
  536. /* copy buffer if it needs escaping */
  537. gfs2_log_unlock(sdp);
  538. if (unlikely(buffer_escaped(bd2->bd_bh))) {
  539. void *kaddr;
  540. struct page *page = bd2->bd_bh->b_page;
  541. bh = gfs2_log_get_buf(sdp);
  542. kaddr = kmap_atomic(page, KM_USER0);
  543. memcpy(bh->b_data,
  544. kaddr + bh_offset(bd2->bd_bh),
  545. sdp->sd_sb.sb_bsize);
  546. kunmap_atomic(kaddr, KM_USER0);
  547. *(__be32 *)bh->b_data = 0;
  548. } else {
  549. bh = gfs2_log_fake_buf(sdp, bd2->bd_bh);
  550. }
  551. set_buffer_dirty(bh);
  552. ll_rw_block(WRITE, 1, &bh);
  553. gfs2_log_lock(sdp);
  554. if (++n >= num)
  555. break;
  556. }
  557. bh = NULL;
  558. total_dbuf -= num;
  559. total_jdata -= num;
  560. }
  561. gfs2_log_unlock(sdp);
  562. /* Wait on all ordered buffers */
  563. while (!list_empty(&started)) {
  564. gfs2_log_lock(sdp);
  565. bd1 = list_entry(started.next, struct gfs2_bufdata,
  566. bd_le.le_list);
  567. list_del_init(&bd1->bd_le.le_list);
  568. sdp->sd_log_num_databuf--;
  569. bh = bd1->bd_bh;
  570. if (bh) {
  571. bh->b_private = NULL;
  572. get_bh(bh);
  573. gfs2_log_unlock(sdp);
  574. wait_on_buffer(bh);
  575. brelse(bh);
  576. } else
  577. gfs2_log_unlock(sdp);
  578. kmem_cache_free(gfs2_bufdata_cachep, bd1);
  579. }
  580. /* We've removed all the ordered write bufs here, so only jdata left */
  581. gfs2_assert_warn(sdp, sdp->sd_log_num_databuf == sdp->sd_log_num_jdata);
  582. }
  583. static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
  584. struct gfs2_log_descriptor *ld,
  585. __be64 *ptr, int pass)
  586. {
  587. struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
  588. struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
  589. struct gfs2_glock *gl = ip->i_gl;
  590. unsigned int blks = be32_to_cpu(ld->ld_data1);
  591. struct buffer_head *bh_log, *bh_ip;
  592. u64 blkno;
  593. u64 esc;
  594. int error = 0;
  595. if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_JDATA)
  596. return 0;
  597. gfs2_replay_incr_blk(sdp, &start);
  598. for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
  599. blkno = be64_to_cpu(*ptr++);
  600. esc = be64_to_cpu(*ptr++);
  601. sdp->sd_found_blocks++;
  602. if (gfs2_revoke_check(sdp, blkno, start))
  603. continue;
  604. error = gfs2_replay_read_block(jd, start, &bh_log);
  605. if (error)
  606. return error;
  607. bh_ip = gfs2_meta_new(gl, blkno);
  608. memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
  609. /* Unescape */
  610. if (esc) {
  611. __be32 *eptr = (__be32 *)bh_ip->b_data;
  612. *eptr = cpu_to_be32(GFS2_MAGIC);
  613. }
  614. mark_buffer_dirty(bh_ip);
  615. brelse(bh_log);
  616. brelse(bh_ip);
  617. if (error)
  618. break;
  619. sdp->sd_replayed_blocks++;
  620. }
  621. return error;
  622. }
  623. /* FIXME: sort out accounting for log blocks etc. */
  624. static void databuf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
  625. {
  626. struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
  627. struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
  628. if (error) {
  629. gfs2_meta_sync(ip->i_gl);
  630. return;
  631. }
  632. if (pass != 1)
  633. return;
  634. /* data sync? */
  635. gfs2_meta_sync(ip->i_gl);
  636. fs_info(sdp, "jid=%u: Replayed %u of %u data blocks\n",
  637. jd->jd_jid, sdp->sd_replayed_blocks, sdp->sd_found_blocks);
  638. }
  639. static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
  640. {
  641. struct list_head *head = &sdp->sd_log_le_databuf;
  642. struct gfs2_bufdata *bd;
  643. while (!list_empty(head)) {
  644. bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list);
  645. list_del_init(&bd->bd_le.le_list);
  646. sdp->sd_log_num_databuf--;
  647. sdp->sd_log_num_jdata--;
  648. gfs2_unpin(sdp, bd->bd_bh, ai);
  649. }
  650. gfs2_assert_warn(sdp, !sdp->sd_log_num_databuf);
  651. gfs2_assert_warn(sdp, !sdp->sd_log_num_jdata);
  652. }
  653. const struct gfs2_log_operations gfs2_glock_lops = {
  654. .lo_add = glock_lo_add,
  655. .lo_after_commit = glock_lo_after_commit,
  656. .lo_name = "glock",
  657. };
  658. const struct gfs2_log_operations gfs2_buf_lops = {
  659. .lo_add = buf_lo_add,
  660. .lo_incore_commit = buf_lo_incore_commit,
  661. .lo_before_commit = buf_lo_before_commit,
  662. .lo_after_commit = buf_lo_after_commit,
  663. .lo_before_scan = buf_lo_before_scan,
  664. .lo_scan_elements = buf_lo_scan_elements,
  665. .lo_after_scan = buf_lo_after_scan,
  666. .lo_name = "buf",
  667. };
  668. const struct gfs2_log_operations gfs2_revoke_lops = {
  669. .lo_add = revoke_lo_add,
  670. .lo_before_commit = revoke_lo_before_commit,
  671. .lo_before_scan = revoke_lo_before_scan,
  672. .lo_scan_elements = revoke_lo_scan_elements,
  673. .lo_after_scan = revoke_lo_after_scan,
  674. .lo_name = "revoke",
  675. };
  676. const struct gfs2_log_operations gfs2_rg_lops = {
  677. .lo_add = rg_lo_add,
  678. .lo_after_commit = rg_lo_after_commit,
  679. .lo_name = "rg",
  680. };
  681. const struct gfs2_log_operations gfs2_databuf_lops = {
  682. .lo_add = databuf_lo_add,
  683. .lo_incore_commit = buf_lo_incore_commit,
  684. .lo_before_commit = databuf_lo_before_commit,
  685. .lo_after_commit = databuf_lo_after_commit,
  686. .lo_scan_elements = databuf_lo_scan_elements,
  687. .lo_after_scan = databuf_lo_after_scan,
  688. .lo_name = "databuf",
  689. };
  690. const struct gfs2_log_operations *gfs2_log_ops[] = {
  691. &gfs2_glock_lops,
  692. &gfs2_buf_lops,
  693. &gfs2_revoke_lops,
  694. &gfs2_rg_lops,
  695. &gfs2_databuf_lops,
  696. NULL,
  697. };