log.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License version 2.
  8. */
  9. #include <linux/sched.h>
  10. #include <linux/slab.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/completion.h>
  13. #include <linux/buffer_head.h>
  14. #include <linux/gfs2_ondisk.h>
  15. #include <linux/crc32.h>
  16. #include <linux/delay.h>
  17. #include <linux/kthread.h>
  18. #include <linux/freezer.h>
  19. #include <linux/bio.h>
  20. #include <linux/writeback.h>
  21. #include "gfs2.h"
  22. #include "incore.h"
  23. #include "bmap.h"
  24. #include "glock.h"
  25. #include "log.h"
  26. #include "lops.h"
  27. #include "meta_io.h"
  28. #include "util.h"
  29. #include "dir.h"
  30. #include "trace_gfs2.h"
  31. #define PULL 1
  32. /**
  33. * gfs2_struct2blk - compute stuff
  34. * @sdp: the filesystem
  35. * @nstruct: the number of structures
  36. * @ssize: the size of the structures
  37. *
  38. * Compute the number of log descriptor blocks needed to hold a certain number
  39. * of structures of a certain size.
  40. *
  41. * Returns: the number of blocks needed (minimum is always 1)
  42. */
  43. unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct,
  44. unsigned int ssize)
  45. {
  46. unsigned int blks;
  47. unsigned int first, second;
  48. blks = 1;
  49. first = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / ssize;
  50. if (nstruct > first) {
  51. second = (sdp->sd_sb.sb_bsize -
  52. sizeof(struct gfs2_meta_header)) / ssize;
  53. blks += DIV_ROUND_UP(nstruct - first, second);
  54. }
  55. return blks;
  56. }
  57. /**
  58. * gfs2_remove_from_ail - Remove an entry from the ail lists, updating counters
  59. * @mapping: The associated mapping (maybe NULL)
  60. * @bd: The gfs2_bufdata to remove
  61. *
  62. * The ail lock _must_ be held when calling this function
  63. *
  64. */
  65. void gfs2_remove_from_ail(struct gfs2_bufdata *bd)
  66. {
  67. bd->bd_ail = NULL;
  68. list_del_init(&bd->bd_ail_st_list);
  69. list_del_init(&bd->bd_ail_gl_list);
  70. atomic_dec(&bd->bd_gl->gl_ail_count);
  71. brelse(bd->bd_bh);
  72. }
  73. /**
  74. * gfs2_ail1_start_one - Start I/O on a part of the AIL
  75. * @sdp: the filesystem
  76. * @wbc: The writeback control structure
  77. * @ai: The ail structure
  78. *
  79. */
  80. static void gfs2_ail1_start_one(struct gfs2_sbd *sdp,
  81. struct writeback_control *wbc,
  82. struct gfs2_ail *ai)
  83. __releases(&sdp->sd_ail_lock)
  84. __acquires(&sdp->sd_ail_lock)
  85. {
  86. struct gfs2_glock *gl = NULL;
  87. struct address_space *mapping;
  88. struct gfs2_bufdata *bd, *s;
  89. struct buffer_head *bh;
  90. restart:
  91. list_for_each_entry_safe_reverse(bd, s, &ai->ai_ail1_list, bd_ail_st_list) {
  92. bh = bd->bd_bh;
  93. gfs2_assert(sdp, bd->bd_ail == ai);
  94. if (!buffer_busy(bh)) {
  95. if (!buffer_uptodate(bh))
  96. gfs2_io_error_bh(sdp, bh);
  97. list_move(&bd->bd_ail_st_list, &ai->ai_ail2_list);
  98. continue;
  99. }
  100. if (!buffer_dirty(bh))
  101. continue;
  102. if (gl == bd->bd_gl)
  103. continue;
  104. gl = bd->bd_gl;
  105. list_move(&bd->bd_ail_st_list, &ai->ai_ail1_list);
  106. mapping = bh->b_page->mapping;
  107. spin_unlock(&sdp->sd_ail_lock);
  108. generic_writepages(mapping, wbc);
  109. spin_lock(&sdp->sd_ail_lock);
  110. if (wbc->nr_to_write <= 0)
  111. break;
  112. goto restart;
  113. }
  114. }
  115. /**
  116. * gfs2_ail1_flush - start writeback of some ail1 entries
  117. * @sdp: The super block
  118. * @wbc: The writeback control structure
  119. *
  120. * Writes back some ail1 entries, according to the limits in the
  121. * writeback control structure
  122. */
  123. void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc)
  124. {
  125. struct list_head *head = &sdp->sd_ail1_list;
  126. struct gfs2_ail *ai;
  127. spin_lock(&sdp->sd_ail_lock);
  128. list_for_each_entry_reverse(ai, head, ai_list) {
  129. if (wbc->nr_to_write <= 0)
  130. break;
  131. gfs2_ail1_start_one(sdp, wbc, ai); /* This may drop ail lock */
  132. }
  133. spin_unlock(&sdp->sd_ail_lock);
  134. }
  135. /**
  136. * gfs2_ail1_start - start writeback of all ail1 entries
  137. * @sdp: The superblock
  138. */
  139. static void gfs2_ail1_start(struct gfs2_sbd *sdp)
  140. {
  141. struct writeback_control wbc = {
  142. .sync_mode = WB_SYNC_NONE,
  143. .nr_to_write = LONG_MAX,
  144. .range_start = 0,
  145. .range_end = LLONG_MAX,
  146. };
  147. return gfs2_ail1_flush(sdp, &wbc);
  148. }
  149. /**
  150. * gfs2_ail1_empty_one - Check whether or not a trans in the AIL has been synced
  151. * @sdp: the filesystem
  152. * @ai: the AIL entry
  153. *
  154. */
  155. static void gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
  156. {
  157. struct gfs2_bufdata *bd, *s;
  158. struct buffer_head *bh;
  159. list_for_each_entry_safe_reverse(bd, s, &ai->ai_ail1_list,
  160. bd_ail_st_list) {
  161. bh = bd->bd_bh;
  162. gfs2_assert(sdp, bd->bd_ail == ai);
  163. if (buffer_busy(bh))
  164. continue;
  165. if (!buffer_uptodate(bh))
  166. gfs2_io_error_bh(sdp, bh);
  167. list_move(&bd->bd_ail_st_list, &ai->ai_ail2_list);
  168. }
  169. }
  170. /**
  171. * gfs2_ail1_empty - Try to empty the ail1 lists
  172. * @sdp: The superblock
  173. *
  174. * Tries to empty the ail1 lists, starting with the oldest first
  175. */
  176. static int gfs2_ail1_empty(struct gfs2_sbd *sdp)
  177. {
  178. struct gfs2_ail *ai, *s;
  179. int ret;
  180. spin_lock(&sdp->sd_ail_lock);
  181. list_for_each_entry_safe_reverse(ai, s, &sdp->sd_ail1_list, ai_list) {
  182. gfs2_ail1_empty_one(sdp, ai);
  183. if (list_empty(&ai->ai_ail1_list))
  184. list_move(&ai->ai_list, &sdp->sd_ail2_list);
  185. else
  186. break;
  187. }
  188. ret = list_empty(&sdp->sd_ail1_list);
  189. spin_unlock(&sdp->sd_ail_lock);
  190. return ret;
  191. }
  192. /**
  193. * gfs2_ail2_empty_one - Check whether or not a trans in the AIL has been synced
  194. * @sdp: the filesystem
  195. * @ai: the AIL entry
  196. *
  197. */
  198. static void gfs2_ail2_empty_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
  199. {
  200. struct list_head *head = &ai->ai_ail2_list;
  201. struct gfs2_bufdata *bd;
  202. while (!list_empty(head)) {
  203. bd = list_entry(head->prev, struct gfs2_bufdata,
  204. bd_ail_st_list);
  205. gfs2_assert(sdp, bd->bd_ail == ai);
  206. gfs2_remove_from_ail(bd);
  207. }
  208. }
  209. static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail)
  210. {
  211. struct gfs2_ail *ai, *safe;
  212. unsigned int old_tail = sdp->sd_log_tail;
  213. int wrap = (new_tail < old_tail);
  214. int a, b, rm;
  215. spin_lock(&sdp->sd_ail_lock);
  216. list_for_each_entry_safe(ai, safe, &sdp->sd_ail2_list, ai_list) {
  217. a = (old_tail <= ai->ai_first);
  218. b = (ai->ai_first < new_tail);
  219. rm = (wrap) ? (a || b) : (a && b);
  220. if (!rm)
  221. continue;
  222. gfs2_ail2_empty_one(sdp, ai);
  223. list_del(&ai->ai_list);
  224. gfs2_assert_warn(sdp, list_empty(&ai->ai_ail1_list));
  225. gfs2_assert_warn(sdp, list_empty(&ai->ai_ail2_list));
  226. kfree(ai);
  227. }
  228. spin_unlock(&sdp->sd_ail_lock);
  229. }
  230. /**
  231. * gfs2_log_reserve - Make a log reservation
  232. * @sdp: The GFS2 superblock
  233. * @blks: The number of blocks to reserve
  234. *
  235. * Note that we never give out the last few blocks of the journal. Thats
  236. * due to the fact that there is a small number of header blocks
  237. * associated with each log flush. The exact number can't be known until
  238. * flush time, so we ensure that we have just enough free blocks at all
  239. * times to avoid running out during a log flush.
  240. *
  241. * We no longer flush the log here, instead we wake up logd to do that
  242. * for us. To avoid the thundering herd and to ensure that we deal fairly
  243. * with queued waiters, we use an exclusive wait. This means that when we
  244. * get woken with enough journal space to get our reservation, we need to
  245. * wake the next waiter on the list.
  246. *
  247. * Returns: errno
  248. */
  249. int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks)
  250. {
  251. unsigned reserved_blks = 6 * (4096 / sdp->sd_vfs->s_blocksize);
  252. unsigned wanted = blks + reserved_blks;
  253. DEFINE_WAIT(wait);
  254. int did_wait = 0;
  255. unsigned int free_blocks;
  256. if (gfs2_assert_warn(sdp, blks) ||
  257. gfs2_assert_warn(sdp, blks <= sdp->sd_jdesc->jd_blocks))
  258. return -EINVAL;
  259. retry:
  260. free_blocks = atomic_read(&sdp->sd_log_blks_free);
  261. if (unlikely(free_blocks <= wanted)) {
  262. do {
  263. prepare_to_wait_exclusive(&sdp->sd_log_waitq, &wait,
  264. TASK_UNINTERRUPTIBLE);
  265. wake_up(&sdp->sd_logd_waitq);
  266. did_wait = 1;
  267. if (atomic_read(&sdp->sd_log_blks_free) <= wanted)
  268. io_schedule();
  269. free_blocks = atomic_read(&sdp->sd_log_blks_free);
  270. } while(free_blocks <= wanted);
  271. finish_wait(&sdp->sd_log_waitq, &wait);
  272. }
  273. if (atomic_cmpxchg(&sdp->sd_log_blks_free, free_blocks,
  274. free_blocks - blks) != free_blocks)
  275. goto retry;
  276. trace_gfs2_log_blocks(sdp, -blks);
  277. /*
  278. * If we waited, then so might others, wake them up _after_ we get
  279. * our share of the log.
  280. */
  281. if (unlikely(did_wait))
  282. wake_up(&sdp->sd_log_waitq);
  283. down_read(&sdp->sd_log_flush_lock);
  284. return 0;
  285. }
  286. static u64 log_bmap(struct gfs2_sbd *sdp, unsigned int lbn)
  287. {
  288. struct gfs2_journal_extent *je;
  289. list_for_each_entry(je, &sdp->sd_jdesc->extent_list, extent_list) {
  290. if (lbn >= je->lblock && lbn < je->lblock + je->blocks)
  291. return je->dblock + lbn - je->lblock;
  292. }
  293. return -1;
  294. }
  295. /**
  296. * log_distance - Compute distance between two journal blocks
  297. * @sdp: The GFS2 superblock
  298. * @newer: The most recent journal block of the pair
  299. * @older: The older journal block of the pair
  300. *
  301. * Compute the distance (in the journal direction) between two
  302. * blocks in the journal
  303. *
  304. * Returns: the distance in blocks
  305. */
  306. static inline unsigned int log_distance(struct gfs2_sbd *sdp, unsigned int newer,
  307. unsigned int older)
  308. {
  309. int dist;
  310. dist = newer - older;
  311. if (dist < 0)
  312. dist += sdp->sd_jdesc->jd_blocks;
  313. return dist;
  314. }
  315. /**
  316. * calc_reserved - Calculate the number of blocks to reserve when
  317. * refunding a transaction's unused buffers.
  318. * @sdp: The GFS2 superblock
  319. *
  320. * This is complex. We need to reserve room for all our currently used
  321. * metadata buffers (e.g. normal file I/O rewriting file time stamps) and
  322. * all our journaled data buffers for journaled files (e.g. files in the
  323. * meta_fs like rindex, or files for which chattr +j was done.)
  324. * If we don't reserve enough space, gfs2_log_refund and gfs2_log_flush
  325. * will count it as free space (sd_log_blks_free) and corruption will follow.
  326. *
  327. * We can have metadata bufs and jdata bufs in the same journal. So each
  328. * type gets its own log header, for which we need to reserve a block.
  329. * In fact, each type has the potential for needing more than one header
  330. * in cases where we have more buffers than will fit on a journal page.
  331. * Metadata journal entries take up half the space of journaled buffer entries.
  332. * Thus, metadata entries have buf_limit (502) and journaled buffers have
  333. * databuf_limit (251) before they cause a wrap around.
  334. *
  335. * Also, we need to reserve blocks for revoke journal entries and one for an
  336. * overall header for the lot.
  337. *
  338. * Returns: the number of blocks reserved
  339. */
  340. static unsigned int calc_reserved(struct gfs2_sbd *sdp)
  341. {
  342. unsigned int reserved = 0;
  343. unsigned int mbuf_limit, metabufhdrs_needed;
  344. unsigned int dbuf_limit, databufhdrs_needed;
  345. unsigned int revokes = 0;
  346. mbuf_limit = buf_limit(sdp);
  347. metabufhdrs_needed = (sdp->sd_log_commited_buf +
  348. (mbuf_limit - 1)) / mbuf_limit;
  349. dbuf_limit = databuf_limit(sdp);
  350. databufhdrs_needed = (sdp->sd_log_commited_databuf +
  351. (dbuf_limit - 1)) / dbuf_limit;
  352. if (sdp->sd_log_commited_revoke > 0)
  353. revokes = gfs2_struct2blk(sdp, sdp->sd_log_commited_revoke,
  354. sizeof(u64));
  355. reserved = sdp->sd_log_commited_buf + metabufhdrs_needed +
  356. sdp->sd_log_commited_databuf + databufhdrs_needed +
  357. revokes;
  358. /* One for the overall header */
  359. if (reserved)
  360. reserved++;
  361. return reserved;
  362. }
  363. static unsigned int current_tail(struct gfs2_sbd *sdp)
  364. {
  365. struct gfs2_ail *ai;
  366. unsigned int tail;
  367. spin_lock(&sdp->sd_ail_lock);
  368. if (list_empty(&sdp->sd_ail1_list)) {
  369. tail = sdp->sd_log_head;
  370. } else {
  371. ai = list_entry(sdp->sd_ail1_list.prev, struct gfs2_ail, ai_list);
  372. tail = ai->ai_first;
  373. }
  374. spin_unlock(&sdp->sd_ail_lock);
  375. return tail;
  376. }
  377. void gfs2_log_incr_head(struct gfs2_sbd *sdp)
  378. {
  379. if (sdp->sd_log_flush_head == sdp->sd_log_tail)
  380. BUG_ON(sdp->sd_log_flush_head != sdp->sd_log_head);
  381. if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks) {
  382. sdp->sd_log_flush_head = 0;
  383. sdp->sd_log_flush_wrapped = 1;
  384. }
  385. }
  386. /**
  387. * gfs2_log_write_endio - End of I/O for a log buffer
  388. * @bh: The buffer head
  389. * @uptodate: I/O Status
  390. *
  391. */
  392. static void gfs2_log_write_endio(struct buffer_head *bh, int uptodate)
  393. {
  394. struct gfs2_sbd *sdp = bh->b_private;
  395. bh->b_private = NULL;
  396. end_buffer_write_sync(bh, uptodate);
  397. if (atomic_dec_and_test(&sdp->sd_log_in_flight))
  398. wake_up(&sdp->sd_log_flush_wait);
  399. }
  400. /**
  401. * gfs2_log_get_buf - Get and initialize a buffer to use for log control data
  402. * @sdp: The GFS2 superblock
  403. *
  404. * Returns: the buffer_head
  405. */
  406. struct buffer_head *gfs2_log_get_buf(struct gfs2_sbd *sdp)
  407. {
  408. u64 blkno = log_bmap(sdp, sdp->sd_log_flush_head);
  409. struct buffer_head *bh;
  410. bh = sb_getblk(sdp->sd_vfs, blkno);
  411. lock_buffer(bh);
  412. memset(bh->b_data, 0, bh->b_size);
  413. set_buffer_uptodate(bh);
  414. clear_buffer_dirty(bh);
  415. gfs2_log_incr_head(sdp);
  416. atomic_inc(&sdp->sd_log_in_flight);
  417. bh->b_private = sdp;
  418. bh->b_end_io = gfs2_log_write_endio;
  419. return bh;
  420. }
  421. /**
  422. * gfs2_fake_write_endio -
  423. * @bh: The buffer head
  424. * @uptodate: The I/O Status
  425. *
  426. */
  427. static void gfs2_fake_write_endio(struct buffer_head *bh, int uptodate)
  428. {
  429. struct buffer_head *real_bh = bh->b_private;
  430. struct gfs2_bufdata *bd = real_bh->b_private;
  431. struct gfs2_sbd *sdp = bd->bd_gl->gl_sbd;
  432. end_buffer_write_sync(bh, uptodate);
  433. free_buffer_head(bh);
  434. unlock_buffer(real_bh);
  435. brelse(real_bh);
  436. if (atomic_dec_and_test(&sdp->sd_log_in_flight))
  437. wake_up(&sdp->sd_log_flush_wait);
  438. }
  439. /**
  440. * gfs2_log_fake_buf - Build a fake buffer head to write metadata buffer to log
  441. * @sdp: the filesystem
  442. * @data: the data the buffer_head should point to
  443. *
  444. * Returns: the log buffer descriptor
  445. */
  446. struct buffer_head *gfs2_log_fake_buf(struct gfs2_sbd *sdp,
  447. struct buffer_head *real)
  448. {
  449. u64 blkno = log_bmap(sdp, sdp->sd_log_flush_head);
  450. struct buffer_head *bh;
  451. bh = alloc_buffer_head(GFP_NOFS | __GFP_NOFAIL);
  452. atomic_set(&bh->b_count, 1);
  453. bh->b_state = (1 << BH_Mapped) | (1 << BH_Uptodate) | (1 << BH_Lock);
  454. set_bh_page(bh, real->b_page, bh_offset(real));
  455. bh->b_blocknr = blkno;
  456. bh->b_size = sdp->sd_sb.sb_bsize;
  457. bh->b_bdev = sdp->sd_vfs->s_bdev;
  458. bh->b_private = real;
  459. bh->b_end_io = gfs2_fake_write_endio;
  460. gfs2_log_incr_head(sdp);
  461. atomic_inc(&sdp->sd_log_in_flight);
  462. return bh;
  463. }
  464. static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail)
  465. {
  466. unsigned int dist = log_distance(sdp, new_tail, sdp->sd_log_tail);
  467. ail2_empty(sdp, new_tail);
  468. atomic_add(dist, &sdp->sd_log_blks_free);
  469. trace_gfs2_log_blocks(sdp, dist);
  470. gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
  471. sdp->sd_jdesc->jd_blocks);
  472. sdp->sd_log_tail = new_tail;
  473. }
  474. /**
  475. * log_write_header - Get and initialize a journal header buffer
  476. * @sdp: The GFS2 superblock
  477. *
  478. * Returns: the initialized log buffer descriptor
  479. */
  480. static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull)
  481. {
  482. u64 blkno = log_bmap(sdp, sdp->sd_log_flush_head);
  483. struct buffer_head *bh;
  484. struct gfs2_log_header *lh;
  485. unsigned int tail;
  486. u32 hash;
  487. bh = sb_getblk(sdp->sd_vfs, blkno);
  488. lock_buffer(bh);
  489. memset(bh->b_data, 0, bh->b_size);
  490. set_buffer_uptodate(bh);
  491. clear_buffer_dirty(bh);
  492. gfs2_ail1_empty(sdp);
  493. tail = current_tail(sdp);
  494. lh = (struct gfs2_log_header *)bh->b_data;
  495. memset(lh, 0, sizeof(struct gfs2_log_header));
  496. lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
  497. lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH);
  498. lh->lh_header.__pad0 = cpu_to_be64(0);
  499. lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH);
  500. lh->lh_header.mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
  501. lh->lh_sequence = cpu_to_be64(sdp->sd_log_sequence++);
  502. lh->lh_flags = cpu_to_be32(flags);
  503. lh->lh_tail = cpu_to_be32(tail);
  504. lh->lh_blkno = cpu_to_be32(sdp->sd_log_flush_head);
  505. hash = gfs2_disk_hash(bh->b_data, sizeof(struct gfs2_log_header));
  506. lh->lh_hash = cpu_to_be32(hash);
  507. bh->b_end_io = end_buffer_write_sync;
  508. get_bh(bh);
  509. if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags))
  510. submit_bh(WRITE_SYNC | REQ_META, bh);
  511. else
  512. submit_bh(WRITE_FLUSH_FUA | REQ_META, bh);
  513. wait_on_buffer(bh);
  514. if (!buffer_uptodate(bh))
  515. gfs2_io_error_bh(sdp, bh);
  516. brelse(bh);
  517. if (sdp->sd_log_tail != tail)
  518. log_pull_tail(sdp, tail);
  519. else
  520. gfs2_assert_withdraw(sdp, !pull);
  521. sdp->sd_log_idle = (tail == sdp->sd_log_flush_head);
  522. gfs2_log_incr_head(sdp);
  523. }
  524. static void log_flush_commit(struct gfs2_sbd *sdp)
  525. {
  526. DEFINE_WAIT(wait);
  527. if (atomic_read(&sdp->sd_log_in_flight)) {
  528. do {
  529. prepare_to_wait(&sdp->sd_log_flush_wait, &wait,
  530. TASK_UNINTERRUPTIBLE);
  531. if (atomic_read(&sdp->sd_log_in_flight))
  532. io_schedule();
  533. } while(atomic_read(&sdp->sd_log_in_flight));
  534. finish_wait(&sdp->sd_log_flush_wait, &wait);
  535. }
  536. log_write_header(sdp, 0, 0);
  537. }
  538. static void gfs2_ordered_write(struct gfs2_sbd *sdp)
  539. {
  540. struct gfs2_bufdata *bd;
  541. struct buffer_head *bh;
  542. LIST_HEAD(written);
  543. gfs2_log_lock(sdp);
  544. while (!list_empty(&sdp->sd_log_le_ordered)) {
  545. bd = list_entry(sdp->sd_log_le_ordered.next, struct gfs2_bufdata, bd_le.le_list);
  546. list_move(&bd->bd_le.le_list, &written);
  547. bh = bd->bd_bh;
  548. if (!buffer_dirty(bh))
  549. continue;
  550. get_bh(bh);
  551. gfs2_log_unlock(sdp);
  552. lock_buffer(bh);
  553. if (buffer_mapped(bh) && test_clear_buffer_dirty(bh)) {
  554. bh->b_end_io = end_buffer_write_sync;
  555. submit_bh(WRITE_SYNC, bh);
  556. } else {
  557. unlock_buffer(bh);
  558. brelse(bh);
  559. }
  560. gfs2_log_lock(sdp);
  561. }
  562. list_splice(&written, &sdp->sd_log_le_ordered);
  563. gfs2_log_unlock(sdp);
  564. }
  565. static void gfs2_ordered_wait(struct gfs2_sbd *sdp)
  566. {
  567. struct gfs2_bufdata *bd;
  568. struct buffer_head *bh;
  569. gfs2_log_lock(sdp);
  570. while (!list_empty(&sdp->sd_log_le_ordered)) {
  571. bd = list_entry(sdp->sd_log_le_ordered.prev, struct gfs2_bufdata, bd_le.le_list);
  572. bh = bd->bd_bh;
  573. if (buffer_locked(bh)) {
  574. get_bh(bh);
  575. gfs2_log_unlock(sdp);
  576. wait_on_buffer(bh);
  577. brelse(bh);
  578. gfs2_log_lock(sdp);
  579. continue;
  580. }
  581. list_del_init(&bd->bd_le.le_list);
  582. }
  583. gfs2_log_unlock(sdp);
  584. }
  585. /**
  586. * gfs2_log_flush - flush incore transaction(s)
  587. * @sdp: the filesystem
  588. * @gl: The glock structure to flush. If NULL, flush the whole incore log
  589. *
  590. */
  591. void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl)
  592. {
  593. struct gfs2_ail *ai;
  594. down_write(&sdp->sd_log_flush_lock);
  595. /* Log might have been flushed while we waited for the flush lock */
  596. if (gl && !test_bit(GLF_LFLUSH, &gl->gl_flags)) {
  597. up_write(&sdp->sd_log_flush_lock);
  598. return;
  599. }
  600. trace_gfs2_log_flush(sdp, 1);
  601. ai = kzalloc(sizeof(struct gfs2_ail), GFP_NOFS | __GFP_NOFAIL);
  602. INIT_LIST_HEAD(&ai->ai_ail1_list);
  603. INIT_LIST_HEAD(&ai->ai_ail2_list);
  604. if (sdp->sd_log_num_buf != sdp->sd_log_commited_buf) {
  605. printk(KERN_INFO "GFS2: log buf %u %u\n", sdp->sd_log_num_buf,
  606. sdp->sd_log_commited_buf);
  607. gfs2_assert_withdraw(sdp, 0);
  608. }
  609. if (sdp->sd_log_num_databuf != sdp->sd_log_commited_databuf) {
  610. printk(KERN_INFO "GFS2: log databuf %u %u\n",
  611. sdp->sd_log_num_databuf, sdp->sd_log_commited_databuf);
  612. gfs2_assert_withdraw(sdp, 0);
  613. }
  614. gfs2_assert_withdraw(sdp,
  615. sdp->sd_log_num_revoke == sdp->sd_log_commited_revoke);
  616. sdp->sd_log_flush_head = sdp->sd_log_head;
  617. sdp->sd_log_flush_wrapped = 0;
  618. ai->ai_first = sdp->sd_log_flush_head;
  619. gfs2_ordered_write(sdp);
  620. lops_before_commit(sdp);
  621. gfs2_ordered_wait(sdp);
  622. if (sdp->sd_log_head != sdp->sd_log_flush_head)
  623. log_flush_commit(sdp);
  624. else if (sdp->sd_log_tail != current_tail(sdp) && !sdp->sd_log_idle){
  625. gfs2_log_lock(sdp);
  626. atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */
  627. trace_gfs2_log_blocks(sdp, -1);
  628. gfs2_log_unlock(sdp);
  629. log_write_header(sdp, 0, PULL);
  630. }
  631. lops_after_commit(sdp, ai);
  632. gfs2_log_lock(sdp);
  633. sdp->sd_log_head = sdp->sd_log_flush_head;
  634. sdp->sd_log_blks_reserved = 0;
  635. sdp->sd_log_commited_buf = 0;
  636. sdp->sd_log_commited_databuf = 0;
  637. sdp->sd_log_commited_revoke = 0;
  638. spin_lock(&sdp->sd_ail_lock);
  639. if (!list_empty(&ai->ai_ail1_list)) {
  640. list_add(&ai->ai_list, &sdp->sd_ail1_list);
  641. ai = NULL;
  642. }
  643. spin_unlock(&sdp->sd_ail_lock);
  644. gfs2_log_unlock(sdp);
  645. trace_gfs2_log_flush(sdp, 0);
  646. up_write(&sdp->sd_log_flush_lock);
  647. kfree(ai);
  648. }
  649. static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
  650. {
  651. unsigned int reserved;
  652. unsigned int unused;
  653. gfs2_log_lock(sdp);
  654. sdp->sd_log_commited_buf += tr->tr_num_buf_new - tr->tr_num_buf_rm;
  655. sdp->sd_log_commited_databuf += tr->tr_num_databuf_new -
  656. tr->tr_num_databuf_rm;
  657. gfs2_assert_withdraw(sdp, (((int)sdp->sd_log_commited_buf) >= 0) ||
  658. (((int)sdp->sd_log_commited_databuf) >= 0));
  659. sdp->sd_log_commited_revoke += tr->tr_num_revoke - tr->tr_num_revoke_rm;
  660. reserved = calc_reserved(sdp);
  661. gfs2_assert_withdraw(sdp, sdp->sd_log_blks_reserved + tr->tr_reserved >= reserved);
  662. unused = sdp->sd_log_blks_reserved - reserved + tr->tr_reserved;
  663. atomic_add(unused, &sdp->sd_log_blks_free);
  664. trace_gfs2_log_blocks(sdp, unused);
  665. gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
  666. sdp->sd_jdesc->jd_blocks);
  667. sdp->sd_log_blks_reserved = reserved;
  668. gfs2_log_unlock(sdp);
  669. }
  670. static void buf_lo_incore_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
  671. {
  672. struct list_head *head = &tr->tr_list_buf;
  673. struct gfs2_bufdata *bd;
  674. gfs2_log_lock(sdp);
  675. while (!list_empty(head)) {
  676. bd = list_entry(head->next, struct gfs2_bufdata, bd_list_tr);
  677. list_del_init(&bd->bd_list_tr);
  678. tr->tr_num_buf--;
  679. }
  680. gfs2_log_unlock(sdp);
  681. gfs2_assert_warn(sdp, !tr->tr_num_buf);
  682. }
  683. /**
  684. * gfs2_log_commit - Commit a transaction to the log
  685. * @sdp: the filesystem
  686. * @tr: the transaction
  687. *
  688. * We wake up gfs2_logd if the number of pinned blocks exceed thresh1
  689. * or the total number of used blocks (pinned blocks plus AIL blocks)
  690. * is greater than thresh2.
  691. *
  692. * At mount time thresh1 is 1/3rd of journal size, thresh2 is 2/3rd of
  693. * journal size.
  694. *
  695. * Returns: errno
  696. */
  697. void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
  698. {
  699. log_refund(sdp, tr);
  700. buf_lo_incore_commit(sdp, tr);
  701. up_read(&sdp->sd_log_flush_lock);
  702. if (atomic_read(&sdp->sd_log_pinned) > atomic_read(&sdp->sd_log_thresh1) ||
  703. ((sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free)) >
  704. atomic_read(&sdp->sd_log_thresh2)))
  705. wake_up(&sdp->sd_logd_waitq);
  706. }
  707. /**
  708. * gfs2_log_shutdown - write a shutdown header into a journal
  709. * @sdp: the filesystem
  710. *
  711. */
  712. void gfs2_log_shutdown(struct gfs2_sbd *sdp)
  713. {
  714. down_write(&sdp->sd_log_flush_lock);
  715. gfs2_assert_withdraw(sdp, !sdp->sd_log_blks_reserved);
  716. gfs2_assert_withdraw(sdp, !sdp->sd_log_num_buf);
  717. gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
  718. gfs2_assert_withdraw(sdp, !sdp->sd_log_num_rg);
  719. gfs2_assert_withdraw(sdp, !sdp->sd_log_num_databuf);
  720. gfs2_assert_withdraw(sdp, list_empty(&sdp->sd_ail1_list));
  721. sdp->sd_log_flush_head = sdp->sd_log_head;
  722. sdp->sd_log_flush_wrapped = 0;
  723. log_write_header(sdp, GFS2_LOG_HEAD_UNMOUNT,
  724. (sdp->sd_log_tail == current_tail(sdp)) ? 0 : PULL);
  725. gfs2_assert_warn(sdp, atomic_read(&sdp->sd_log_blks_free) == sdp->sd_jdesc->jd_blocks);
  726. gfs2_assert_warn(sdp, sdp->sd_log_head == sdp->sd_log_tail);
  727. gfs2_assert_warn(sdp, list_empty(&sdp->sd_ail2_list));
  728. sdp->sd_log_head = sdp->sd_log_flush_head;
  729. sdp->sd_log_tail = sdp->sd_log_head;
  730. up_write(&sdp->sd_log_flush_lock);
  731. }
  732. /**
  733. * gfs2_meta_syncfs - sync all the buffers in a filesystem
  734. * @sdp: the filesystem
  735. *
  736. */
  737. void gfs2_meta_syncfs(struct gfs2_sbd *sdp)
  738. {
  739. gfs2_log_flush(sdp, NULL);
  740. for (;;) {
  741. gfs2_ail1_start(sdp);
  742. if (gfs2_ail1_empty(sdp))
  743. break;
  744. msleep(10);
  745. }
  746. }
  747. static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp)
  748. {
  749. return (atomic_read(&sdp->sd_log_pinned) >= atomic_read(&sdp->sd_log_thresh1));
  750. }
  751. static inline int gfs2_ail_flush_reqd(struct gfs2_sbd *sdp)
  752. {
  753. unsigned int used_blocks = sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free);
  754. return used_blocks >= atomic_read(&sdp->sd_log_thresh2);
  755. }
  756. /**
  757. * gfs2_logd - Update log tail as Active Items get flushed to in-place blocks
  758. * @sdp: Pointer to GFS2 superblock
  759. *
  760. * Also, periodically check to make sure that we're using the most recent
  761. * journal index.
  762. */
  763. int gfs2_logd(void *data)
  764. {
  765. struct gfs2_sbd *sdp = data;
  766. unsigned long t = 1;
  767. DEFINE_WAIT(wait);
  768. unsigned preflush;
  769. while (!kthread_should_stop()) {
  770. preflush = atomic_read(&sdp->sd_log_pinned);
  771. if (gfs2_jrnl_flush_reqd(sdp) || t == 0) {
  772. gfs2_ail1_empty(sdp);
  773. gfs2_log_flush(sdp, NULL);
  774. }
  775. if (gfs2_ail_flush_reqd(sdp)) {
  776. gfs2_ail1_start(sdp);
  777. io_schedule();
  778. gfs2_ail1_empty(sdp);
  779. gfs2_log_flush(sdp, NULL);
  780. }
  781. wake_up(&sdp->sd_log_waitq);
  782. t = gfs2_tune_get(sdp, gt_logd_secs) * HZ;
  783. if (freezing(current))
  784. refrigerator();
  785. do {
  786. prepare_to_wait(&sdp->sd_logd_waitq, &wait,
  787. TASK_INTERRUPTIBLE);
  788. if (!gfs2_ail_flush_reqd(sdp) &&
  789. !gfs2_jrnl_flush_reqd(sdp) &&
  790. !kthread_should_stop())
  791. t = schedule_timeout(t);
  792. } while(t && !gfs2_ail_flush_reqd(sdp) &&
  793. !gfs2_jrnl_flush_reqd(sdp) &&
  794. !kthread_should_stop());
  795. finish_wait(&sdp->sd_logd_waitq, &wait);
  796. }
  797. return 0;
  798. }