lops.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License version 2.
  8. */
  9. #include <linux/sched.h>
  10. #include <linux/slab.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/completion.h>
  13. #include <linux/buffer_head.h>
  14. #include <linux/gfs2_ondisk.h>
  15. #include <linux/lm_interface.h>
  16. #include "gfs2.h"
  17. #include "incore.h"
  18. #include "inode.h"
  19. #include "glock.h"
  20. #include "log.h"
  21. #include "lops.h"
  22. #include "meta_io.h"
  23. #include "recovery.h"
  24. #include "rgrp.h"
  25. #include "trans.h"
  26. #include "util.h"
  27. static void glock_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
  28. {
  29. struct gfs2_glock *gl;
  30. struct gfs2_trans *tr = current->journal_info;
  31. tr->tr_touched = 1;
  32. gl = container_of(le, struct gfs2_glock, gl_le);
  33. if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(gl)))
  34. return;
  35. gfs2_log_lock(sdp);
  36. if (!list_empty(&le->le_list)){
  37. gfs2_log_unlock(sdp);
  38. return;
  39. }
  40. gfs2_glock_hold(gl);
  41. set_bit(GLF_DIRTY, &gl->gl_flags);
  42. sdp->sd_log_num_gl++;
  43. list_add(&le->le_list, &sdp->sd_log_le_gl);
  44. gfs2_log_unlock(sdp);
  45. }
  46. static void glock_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
  47. {
  48. struct list_head *head = &sdp->sd_log_le_gl;
  49. struct gfs2_glock *gl;
  50. while (!list_empty(head)) {
  51. gl = list_entry(head->next, struct gfs2_glock, gl_le.le_list);
  52. list_del_init(&gl->gl_le.le_list);
  53. sdp->sd_log_num_gl--;
  54. gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(gl));
  55. gfs2_glock_put(gl);
  56. }
  57. gfs2_assert_warn(sdp, !sdp->sd_log_num_gl);
  58. }
  59. static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
  60. {
  61. struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le);
  62. struct gfs2_trans *tr;
  63. gfs2_log_lock(sdp);
  64. if (!list_empty(&bd->bd_list_tr)) {
  65. gfs2_log_unlock(sdp);
  66. return;
  67. }
  68. tr = current->journal_info;
  69. tr->tr_touched = 1;
  70. tr->tr_num_buf++;
  71. list_add(&bd->bd_list_tr, &tr->tr_list_buf);
  72. gfs2_log_unlock(sdp);
  73. if (!list_empty(&le->le_list))
  74. return;
  75. gfs2_trans_add_gl(bd->bd_gl);
  76. gfs2_meta_check(sdp, bd->bd_bh);
  77. gfs2_pin(sdp, bd->bd_bh);
  78. gfs2_log_lock(sdp);
  79. sdp->sd_log_num_buf++;
  80. list_add(&le->le_list, &sdp->sd_log_le_buf);
  81. gfs2_log_unlock(sdp);
  82. tr->tr_num_buf_new++;
  83. }
  84. static void buf_lo_incore_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
  85. {
  86. struct list_head *head = &tr->tr_list_buf;
  87. struct gfs2_bufdata *bd;
  88. gfs2_log_lock(sdp);
  89. while (!list_empty(head)) {
  90. bd = list_entry(head->next, struct gfs2_bufdata, bd_list_tr);
  91. list_del_init(&bd->bd_list_tr);
  92. tr->tr_num_buf--;
  93. }
  94. gfs2_log_unlock(sdp);
  95. gfs2_assert_warn(sdp, !tr->tr_num_buf);
  96. }
  97. static void buf_lo_before_commit(struct gfs2_sbd *sdp)
  98. {
  99. struct buffer_head *bh;
  100. struct gfs2_log_descriptor *ld;
  101. struct gfs2_bufdata *bd1 = NULL, *bd2;
  102. unsigned int total;
  103. unsigned int offset = BUF_OFFSET;
  104. unsigned int limit;
  105. unsigned int num;
  106. unsigned n;
  107. __be64 *ptr;
  108. limit = buf_limit(sdp);
  109. /* for 4k blocks, limit = 503 */
  110. gfs2_log_lock(sdp);
  111. total = sdp->sd_log_num_buf;
  112. bd1 = bd2 = list_prepare_entry(bd1, &sdp->sd_log_le_buf, bd_le.le_list);
  113. while(total) {
  114. num = total;
  115. if (total > limit)
  116. num = limit;
  117. gfs2_log_unlock(sdp);
  118. bh = gfs2_log_get_buf(sdp);
  119. gfs2_log_lock(sdp);
  120. ld = (struct gfs2_log_descriptor *)bh->b_data;
  121. ptr = (__be64 *)(bh->b_data + offset);
  122. ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
  123. ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD);
  124. ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD);
  125. ld->ld_type = cpu_to_be32(GFS2_LOG_DESC_METADATA);
  126. ld->ld_length = cpu_to_be32(num + 1);
  127. ld->ld_data1 = cpu_to_be32(num);
  128. ld->ld_data2 = cpu_to_be32(0);
  129. memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved));
  130. n = 0;
  131. list_for_each_entry_continue(bd1, &sdp->sd_log_le_buf,
  132. bd_le.le_list) {
  133. *ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr);
  134. if (++n >= num)
  135. break;
  136. }
  137. gfs2_log_unlock(sdp);
  138. set_buffer_dirty(bh);
  139. ll_rw_block(WRITE, 1, &bh);
  140. gfs2_log_lock(sdp);
  141. n = 0;
  142. list_for_each_entry_continue(bd2, &sdp->sd_log_le_buf,
  143. bd_le.le_list) {
  144. gfs2_log_unlock(sdp);
  145. bh = gfs2_log_fake_buf(sdp, bd2->bd_bh);
  146. set_buffer_dirty(bh);
  147. ll_rw_block(WRITE, 1, &bh);
  148. gfs2_log_lock(sdp);
  149. if (++n >= num)
  150. break;
  151. }
  152. BUG_ON(total < num);
  153. total -= num;
  154. }
  155. gfs2_log_unlock(sdp);
  156. }
  157. static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
  158. {
  159. struct list_head *head = &sdp->sd_log_le_buf;
  160. struct gfs2_bufdata *bd;
  161. while (!list_empty(head)) {
  162. bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list);
  163. list_del_init(&bd->bd_le.le_list);
  164. sdp->sd_log_num_buf--;
  165. gfs2_unpin(sdp, bd->bd_bh, ai);
  166. }
  167. gfs2_assert_warn(sdp, !sdp->sd_log_num_buf);
  168. }
  169. static void buf_lo_before_scan(struct gfs2_jdesc *jd,
  170. struct gfs2_log_header_host *head, int pass)
  171. {
  172. struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
  173. if (pass != 0)
  174. return;
  175. sdp->sd_found_blocks = 0;
  176. sdp->sd_replayed_blocks = 0;
  177. }
  178. static int buf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
  179. struct gfs2_log_descriptor *ld, __be64 *ptr,
  180. int pass)
  181. {
  182. struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
  183. struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
  184. struct gfs2_glock *gl = ip->i_gl;
  185. unsigned int blks = be32_to_cpu(ld->ld_data1);
  186. struct buffer_head *bh_log, *bh_ip;
  187. u64 blkno;
  188. int error = 0;
  189. if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_METADATA)
  190. return 0;
  191. gfs2_replay_incr_blk(sdp, &start);
  192. for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
  193. blkno = be64_to_cpu(*ptr++);
  194. sdp->sd_found_blocks++;
  195. if (gfs2_revoke_check(sdp, blkno, start))
  196. continue;
  197. error = gfs2_replay_read_block(jd, start, &bh_log);
  198. if (error)
  199. return error;
  200. bh_ip = gfs2_meta_new(gl, blkno);
  201. memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
  202. if (gfs2_meta_check(sdp, bh_ip))
  203. error = -EIO;
  204. else
  205. mark_buffer_dirty(bh_ip);
  206. brelse(bh_log);
  207. brelse(bh_ip);
  208. if (error)
  209. break;
  210. sdp->sd_replayed_blocks++;
  211. }
  212. return error;
  213. }
  214. static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
  215. {
  216. struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
  217. struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
  218. if (error) {
  219. gfs2_meta_sync(ip->i_gl);
  220. return;
  221. }
  222. if (pass != 1)
  223. return;
  224. gfs2_meta_sync(ip->i_gl);
  225. fs_info(sdp, "jid=%u: Replayed %u of %u blocks\n",
  226. jd->jd_jid, sdp->sd_replayed_blocks, sdp->sd_found_blocks);
  227. }
  228. static void revoke_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
  229. {
  230. struct gfs2_trans *tr;
  231. tr = current->journal_info;
  232. tr->tr_touched = 1;
  233. tr->tr_num_revoke++;
  234. gfs2_log_lock(sdp);
  235. sdp->sd_log_num_revoke++;
  236. list_add(&le->le_list, &sdp->sd_log_le_revoke);
  237. gfs2_log_unlock(sdp);
  238. }
  239. static void revoke_lo_before_commit(struct gfs2_sbd *sdp)
  240. {
  241. struct gfs2_log_descriptor *ld;
  242. struct gfs2_meta_header *mh;
  243. struct buffer_head *bh;
  244. unsigned int offset;
  245. struct list_head *head = &sdp->sd_log_le_revoke;
  246. struct gfs2_revoke *rv;
  247. if (!sdp->sd_log_num_revoke)
  248. return;
  249. bh = gfs2_log_get_buf(sdp);
  250. ld = (struct gfs2_log_descriptor *)bh->b_data;
  251. ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
  252. ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD);
  253. ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD);
  254. ld->ld_type = cpu_to_be32(GFS2_LOG_DESC_REVOKE);
  255. ld->ld_length = cpu_to_be32(gfs2_struct2blk(sdp, sdp->sd_log_num_revoke,
  256. sizeof(u64)));
  257. ld->ld_data1 = cpu_to_be32(sdp->sd_log_num_revoke);
  258. ld->ld_data2 = cpu_to_be32(0);
  259. memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved));
  260. offset = sizeof(struct gfs2_log_descriptor);
  261. while (!list_empty(head)) {
  262. rv = list_entry(head->next, struct gfs2_revoke, rv_le.le_list);
  263. list_del_init(&rv->rv_le.le_list);
  264. sdp->sd_log_num_revoke--;
  265. if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) {
  266. set_buffer_dirty(bh);
  267. ll_rw_block(WRITE, 1, &bh);
  268. bh = gfs2_log_get_buf(sdp);
  269. mh = (struct gfs2_meta_header *)bh->b_data;
  270. mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
  271. mh->mh_type = cpu_to_be32(GFS2_METATYPE_LB);
  272. mh->mh_format = cpu_to_be32(GFS2_FORMAT_LB);
  273. offset = sizeof(struct gfs2_meta_header);
  274. }
  275. *(__be64 *)(bh->b_data + offset) = cpu_to_be64(rv->rv_blkno);
  276. kfree(rv);
  277. offset += sizeof(u64);
  278. }
  279. gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
  280. set_buffer_dirty(bh);
  281. ll_rw_block(WRITE, 1, &bh);
  282. }
  283. static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
  284. struct gfs2_log_header_host *head, int pass)
  285. {
  286. struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
  287. if (pass != 0)
  288. return;
  289. sdp->sd_found_revokes = 0;
  290. sdp->sd_replay_tail = head->lh_tail;
  291. }
  292. static int revoke_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
  293. struct gfs2_log_descriptor *ld, __be64 *ptr,
  294. int pass)
  295. {
  296. struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
  297. unsigned int blks = be32_to_cpu(ld->ld_length);
  298. unsigned int revokes = be32_to_cpu(ld->ld_data1);
  299. struct buffer_head *bh;
  300. unsigned int offset;
  301. u64 blkno;
  302. int first = 1;
  303. int error;
  304. if (pass != 0 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_REVOKE)
  305. return 0;
  306. offset = sizeof(struct gfs2_log_descriptor);
  307. for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
  308. error = gfs2_replay_read_block(jd, start, &bh);
  309. if (error)
  310. return error;
  311. if (!first)
  312. gfs2_metatype_check(sdp, bh, GFS2_METATYPE_LB);
  313. while (offset + sizeof(u64) <= sdp->sd_sb.sb_bsize) {
  314. blkno = be64_to_cpu(*(__be64 *)(bh->b_data + offset));
  315. error = gfs2_revoke_add(sdp, blkno, start);
  316. if (error < 0)
  317. return error;
  318. else if (error)
  319. sdp->sd_found_revokes++;
  320. if (!--revokes)
  321. break;
  322. offset += sizeof(u64);
  323. }
  324. brelse(bh);
  325. offset = sizeof(struct gfs2_meta_header);
  326. first = 0;
  327. }
  328. return 0;
  329. }
  330. static void revoke_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
  331. {
  332. struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
  333. if (error) {
  334. gfs2_revoke_clean(sdp);
  335. return;
  336. }
  337. if (pass != 1)
  338. return;
  339. fs_info(sdp, "jid=%u: Found %u revoke tags\n",
  340. jd->jd_jid, sdp->sd_found_revokes);
  341. gfs2_revoke_clean(sdp);
  342. }
  343. static void rg_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
  344. {
  345. struct gfs2_rgrpd *rgd;
  346. struct gfs2_trans *tr = current->journal_info;
  347. tr->tr_touched = 1;
  348. rgd = container_of(le, struct gfs2_rgrpd, rd_le);
  349. gfs2_log_lock(sdp);
  350. if (!list_empty(&le->le_list)){
  351. gfs2_log_unlock(sdp);
  352. return;
  353. }
  354. gfs2_rgrp_bh_hold(rgd);
  355. sdp->sd_log_num_rg++;
  356. list_add(&le->le_list, &sdp->sd_log_le_rg);
  357. gfs2_log_unlock(sdp);
  358. }
  359. static void rg_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
  360. {
  361. struct list_head *head = &sdp->sd_log_le_rg;
  362. struct gfs2_rgrpd *rgd;
  363. while (!list_empty(head)) {
  364. rgd = list_entry(head->next, struct gfs2_rgrpd, rd_le.le_list);
  365. list_del_init(&rgd->rd_le.le_list);
  366. sdp->sd_log_num_rg--;
  367. gfs2_rgrp_repolish_clones(rgd);
  368. gfs2_rgrp_bh_put(rgd);
  369. }
  370. gfs2_assert_warn(sdp, !sdp->sd_log_num_rg);
  371. }
  372. /**
  373. * databuf_lo_add - Add a databuf to the transaction.
  374. *
  375. * This is used in two distinct cases:
  376. * i) In ordered write mode
  377. * We put the data buffer on a list so that we can ensure that its
  378. * synced to disk at the right time
  379. * ii) In journaled data mode
  380. * We need to journal the data block in the same way as metadata in
  381. * the functions above. The difference is that here we have a tag
  382. * which is two __be64's being the block number (as per meta data)
  383. * and a flag which says whether the data block needs escaping or
  384. * not. This means we need a new log entry for each 251 or so data
  385. * blocks, which isn't an enormous overhead but twice as much as
  386. * for normal metadata blocks.
  387. */
  388. static void databuf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
  389. {
  390. struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le);
  391. struct gfs2_trans *tr = current->journal_info;
  392. struct address_space *mapping = bd->bd_bh->b_page->mapping;
  393. struct gfs2_inode *ip = GFS2_I(mapping->host);
  394. gfs2_log_lock(sdp);
  395. if (!list_empty(&bd->bd_list_tr)) {
  396. gfs2_log_unlock(sdp);
  397. return;
  398. }
  399. tr->tr_touched = 1;
  400. if (gfs2_is_jdata(ip)) {
  401. tr->tr_num_buf++;
  402. list_add(&bd->bd_list_tr, &tr->tr_list_buf);
  403. }
  404. gfs2_log_unlock(sdp);
  405. if (!list_empty(&le->le_list))
  406. return;
  407. gfs2_trans_add_gl(bd->bd_gl);
  408. if (gfs2_is_jdata(ip)) {
  409. gfs2_pin(sdp, bd->bd_bh);
  410. tr->tr_num_databuf_new++;
  411. }
  412. gfs2_log_lock(sdp);
  413. if (gfs2_is_jdata(ip))
  414. sdp->sd_log_num_jdata++;
  415. sdp->sd_log_num_databuf++;
  416. list_add(&le->le_list, &sdp->sd_log_le_databuf);
  417. gfs2_log_unlock(sdp);
  418. }
  419. static int gfs2_check_magic(struct buffer_head *bh)
  420. {
  421. struct page *page = bh->b_page;
  422. void *kaddr;
  423. __be32 *ptr;
  424. int rv = 0;
  425. kaddr = kmap_atomic(page, KM_USER0);
  426. ptr = kaddr + bh_offset(bh);
  427. if (*ptr == cpu_to_be32(GFS2_MAGIC))
  428. rv = 1;
  429. kunmap_atomic(kaddr, KM_USER0);
  430. return rv;
  431. }
  432. /**
  433. * databuf_lo_before_commit - Scan the data buffers, writing as we go
  434. *
  435. * Here we scan through the lists of buffers and make the assumption
  436. * that any buffer thats been pinned is being journaled, and that
  437. * any unpinned buffer is an ordered write data buffer and therefore
  438. * will be written back rather than journaled.
  439. */
  440. static void databuf_lo_before_commit(struct gfs2_sbd *sdp)
  441. {
  442. LIST_HEAD(started);
  443. struct gfs2_bufdata *bd1 = NULL, *bd2, *bdt;
  444. struct buffer_head *bh = NULL,*bh1 = NULL;
  445. struct gfs2_log_descriptor *ld;
  446. unsigned int limit;
  447. unsigned int total_dbuf;
  448. unsigned int total_jdata;
  449. unsigned int num, n;
  450. __be64 *ptr = NULL;
  451. limit = databuf_limit(sdp);
  452. /*
  453. * Start writing ordered buffers, write journaled buffers
  454. * into the log along with a header
  455. */
  456. gfs2_log_lock(sdp);
  457. total_dbuf = sdp->sd_log_num_databuf;
  458. total_jdata = sdp->sd_log_num_jdata;
  459. bd2 = bd1 = list_prepare_entry(bd1, &sdp->sd_log_le_databuf,
  460. bd_le.le_list);
  461. while(total_dbuf) {
  462. num = total_jdata;
  463. if (num > limit)
  464. num = limit;
  465. n = 0;
  466. list_for_each_entry_safe_continue(bd1, bdt,
  467. &sdp->sd_log_le_databuf,
  468. bd_le.le_list) {
  469. /* store off the buffer head in a local ptr since
  470. * gfs2_bufdata might change when we drop the log lock
  471. */
  472. bh1 = bd1->bd_bh;
  473. /* An ordered write buffer */
  474. if (bh1 && !buffer_pinned(bh1)) {
  475. list_move(&bd1->bd_le.le_list, &started);
  476. if (bd1 == bd2) {
  477. bd2 = NULL;
  478. bd2 = list_prepare_entry(bd2,
  479. &sdp->sd_log_le_databuf,
  480. bd_le.le_list);
  481. }
  482. total_dbuf--;
  483. if (bh1) {
  484. if (buffer_dirty(bh1)) {
  485. get_bh(bh1);
  486. gfs2_log_unlock(sdp);
  487. ll_rw_block(SWRITE, 1, &bh1);
  488. brelse(bh1);
  489. gfs2_log_lock(sdp);
  490. }
  491. continue;
  492. }
  493. continue;
  494. } else if (bh1) { /* A journaled buffer */
  495. int magic;
  496. gfs2_log_unlock(sdp);
  497. if (!bh) {
  498. bh = gfs2_log_get_buf(sdp);
  499. ld = (struct gfs2_log_descriptor *)
  500. bh->b_data;
  501. ptr = (__be64 *)(bh->b_data +
  502. DATABUF_OFFSET);
  503. ld->ld_header.mh_magic =
  504. cpu_to_be32(GFS2_MAGIC);
  505. ld->ld_header.mh_type =
  506. cpu_to_be32(GFS2_METATYPE_LD);
  507. ld->ld_header.mh_format =
  508. cpu_to_be32(GFS2_FORMAT_LD);
  509. ld->ld_type =
  510. cpu_to_be32(GFS2_LOG_DESC_JDATA);
  511. ld->ld_length = cpu_to_be32(num + 1);
  512. ld->ld_data1 = cpu_to_be32(num);
  513. ld->ld_data2 = cpu_to_be32(0);
  514. memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved));
  515. }
  516. magic = gfs2_check_magic(bh1);
  517. *ptr++ = cpu_to_be64(bh1->b_blocknr);
  518. *ptr++ = cpu_to_be64((__u64)magic);
  519. clear_buffer_escaped(bh1);
  520. if (unlikely(magic != 0))
  521. set_buffer_escaped(bh1);
  522. gfs2_log_lock(sdp);
  523. if (++n >= num)
  524. break;
  525. } else if (!bh1) {
  526. total_dbuf--;
  527. sdp->sd_log_num_databuf--;
  528. list_del_init(&bd1->bd_le.le_list);
  529. if (bd1 == bd2) {
  530. bd2 = NULL;
  531. bd2 = list_prepare_entry(bd2,
  532. &sdp->sd_log_le_databuf,
  533. bd_le.le_list);
  534. }
  535. kmem_cache_free(gfs2_bufdata_cachep, bd1);
  536. }
  537. }
  538. gfs2_log_unlock(sdp);
  539. if (bh) {
  540. set_buffer_dirty(bh);
  541. ll_rw_block(WRITE, 1, &bh);
  542. bh = NULL;
  543. ptr = NULL;
  544. }
  545. n = 0;
  546. gfs2_log_lock(sdp);
  547. list_for_each_entry_continue(bd2, &sdp->sd_log_le_databuf,
  548. bd_le.le_list) {
  549. if (!bd2->bd_bh)
  550. continue;
  551. /* copy buffer if it needs escaping */
  552. gfs2_log_unlock(sdp);
  553. if (unlikely(buffer_escaped(bd2->bd_bh))) {
  554. void *kaddr;
  555. struct page *page = bd2->bd_bh->b_page;
  556. bh = gfs2_log_get_buf(sdp);
  557. kaddr = kmap_atomic(page, KM_USER0);
  558. memcpy(bh->b_data,
  559. kaddr + bh_offset(bd2->bd_bh),
  560. sdp->sd_sb.sb_bsize);
  561. kunmap_atomic(kaddr, KM_USER0);
  562. *(__be32 *)bh->b_data = 0;
  563. } else {
  564. bh = gfs2_log_fake_buf(sdp, bd2->bd_bh);
  565. }
  566. set_buffer_dirty(bh);
  567. ll_rw_block(WRITE, 1, &bh);
  568. gfs2_log_lock(sdp);
  569. if (++n >= num)
  570. break;
  571. }
  572. bh = NULL;
  573. BUG_ON(total_dbuf < num);
  574. total_dbuf -= num;
  575. total_jdata -= num;
  576. }
  577. gfs2_log_unlock(sdp);
  578. /* Wait on all ordered buffers */
  579. while (!list_empty(&started)) {
  580. gfs2_log_lock(sdp);
  581. bd1 = list_entry(started.next, struct gfs2_bufdata,
  582. bd_le.le_list);
  583. list_del_init(&bd1->bd_le.le_list);
  584. sdp->sd_log_num_databuf--;
  585. bh = bd1->bd_bh;
  586. if (bh) {
  587. bh->b_private = NULL;
  588. get_bh(bh);
  589. gfs2_log_unlock(sdp);
  590. wait_on_buffer(bh);
  591. brelse(bh);
  592. } else
  593. gfs2_log_unlock(sdp);
  594. kmem_cache_free(gfs2_bufdata_cachep, bd1);
  595. }
  596. /* We've removed all the ordered write bufs here, so only jdata left */
  597. gfs2_assert_warn(sdp, sdp->sd_log_num_databuf == sdp->sd_log_num_jdata);
  598. }
  599. static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
  600. struct gfs2_log_descriptor *ld,
  601. __be64 *ptr, int pass)
  602. {
  603. struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
  604. struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
  605. struct gfs2_glock *gl = ip->i_gl;
  606. unsigned int blks = be32_to_cpu(ld->ld_data1);
  607. struct buffer_head *bh_log, *bh_ip;
  608. u64 blkno;
  609. u64 esc;
  610. int error = 0;
  611. if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_JDATA)
  612. return 0;
  613. gfs2_replay_incr_blk(sdp, &start);
  614. for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
  615. blkno = be64_to_cpu(*ptr++);
  616. esc = be64_to_cpu(*ptr++);
  617. sdp->sd_found_blocks++;
  618. if (gfs2_revoke_check(sdp, blkno, start))
  619. continue;
  620. error = gfs2_replay_read_block(jd, start, &bh_log);
  621. if (error)
  622. return error;
  623. bh_ip = gfs2_meta_new(gl, blkno);
  624. memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
  625. /* Unescape */
  626. if (esc) {
  627. __be32 *eptr = (__be32 *)bh_ip->b_data;
  628. *eptr = cpu_to_be32(GFS2_MAGIC);
  629. }
  630. mark_buffer_dirty(bh_ip);
  631. brelse(bh_log);
  632. brelse(bh_ip);
  633. if (error)
  634. break;
  635. sdp->sd_replayed_blocks++;
  636. }
  637. return error;
  638. }
  639. /* FIXME: sort out accounting for log blocks etc. */
  640. static void databuf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
  641. {
  642. struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
  643. struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
  644. if (error) {
  645. gfs2_meta_sync(ip->i_gl);
  646. return;
  647. }
  648. if (pass != 1)
  649. return;
  650. /* data sync? */
  651. gfs2_meta_sync(ip->i_gl);
  652. fs_info(sdp, "jid=%u: Replayed %u of %u data blocks\n",
  653. jd->jd_jid, sdp->sd_replayed_blocks, sdp->sd_found_blocks);
  654. }
  655. static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
  656. {
  657. struct list_head *head = &sdp->sd_log_le_databuf;
  658. struct gfs2_bufdata *bd;
  659. while (!list_empty(head)) {
  660. bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list);
  661. list_del_init(&bd->bd_le.le_list);
  662. sdp->sd_log_num_databuf--;
  663. sdp->sd_log_num_jdata--;
  664. gfs2_unpin(sdp, bd->bd_bh, ai);
  665. }
  666. gfs2_assert_warn(sdp, !sdp->sd_log_num_databuf);
  667. gfs2_assert_warn(sdp, !sdp->sd_log_num_jdata);
  668. }
  669. const struct gfs2_log_operations gfs2_glock_lops = {
  670. .lo_add = glock_lo_add,
  671. .lo_after_commit = glock_lo_after_commit,
  672. .lo_name = "glock",
  673. };
  674. const struct gfs2_log_operations gfs2_buf_lops = {
  675. .lo_add = buf_lo_add,
  676. .lo_incore_commit = buf_lo_incore_commit,
  677. .lo_before_commit = buf_lo_before_commit,
  678. .lo_after_commit = buf_lo_after_commit,
  679. .lo_before_scan = buf_lo_before_scan,
  680. .lo_scan_elements = buf_lo_scan_elements,
  681. .lo_after_scan = buf_lo_after_scan,
  682. .lo_name = "buf",
  683. };
  684. const struct gfs2_log_operations gfs2_revoke_lops = {
  685. .lo_add = revoke_lo_add,
  686. .lo_before_commit = revoke_lo_before_commit,
  687. .lo_before_scan = revoke_lo_before_scan,
  688. .lo_scan_elements = revoke_lo_scan_elements,
  689. .lo_after_scan = revoke_lo_after_scan,
  690. .lo_name = "revoke",
  691. };
  692. const struct gfs2_log_operations gfs2_rg_lops = {
  693. .lo_add = rg_lo_add,
  694. .lo_after_commit = rg_lo_after_commit,
  695. .lo_name = "rg",
  696. };
  697. const struct gfs2_log_operations gfs2_databuf_lops = {
  698. .lo_add = databuf_lo_add,
  699. .lo_incore_commit = buf_lo_incore_commit,
  700. .lo_before_commit = databuf_lo_before_commit,
  701. .lo_after_commit = databuf_lo_after_commit,
  702. .lo_scan_elements = databuf_lo_scan_elements,
  703. .lo_after_scan = databuf_lo_after_scan,
  704. .lo_name = "databuf",
  705. };
  706. const struct gfs2_log_operations *gfs2_log_ops[] = {
  707. &gfs2_glock_lops,
  708. &gfs2_buf_lops,
  709. &gfs2_revoke_lops,
  710. &gfs2_rg_lops,
  711. &gfs2_databuf_lops,
  712. NULL,
  713. };