log.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License version 2.
  8. */
  9. #include <linux/sched.h>
  10. #include <linux/slab.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/completion.h>
  13. #include <linux/buffer_head.h>
  14. #include <linux/gfs2_ondisk.h>
  15. #include <linux/crc32.h>
  16. #include <linux/delay.h>
  17. #include <linux/kthread.h>
  18. #include <linux/freezer.h>
  19. #include <linux/bio.h>
  20. #include "gfs2.h"
  21. #include "incore.h"
  22. #include "bmap.h"
  23. #include "glock.h"
  24. #include "log.h"
  25. #include "lops.h"
  26. #include "meta_io.h"
  27. #include "util.h"
  28. #include "dir.h"
  29. #include "trace_gfs2.h"
  30. #define PULL 1
  31. /**
  32. * gfs2_struct2blk - compute stuff
  33. * @sdp: the filesystem
  34. * @nstruct: the number of structures
  35. * @ssize: the size of the structures
  36. *
  37. * Compute the number of log descriptor blocks needed to hold a certain number
  38. * of structures of a certain size.
  39. *
  40. * Returns: the number of blocks needed (minimum is always 1)
  41. */
  42. unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct,
  43. unsigned int ssize)
  44. {
  45. unsigned int blks;
  46. unsigned int first, second;
  47. blks = 1;
  48. first = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / ssize;
  49. if (nstruct > first) {
  50. second = (sdp->sd_sb.sb_bsize -
  51. sizeof(struct gfs2_meta_header)) / ssize;
  52. blks += DIV_ROUND_UP(nstruct - first, second);
  53. }
  54. return blks;
  55. }
  56. /**
  57. * gfs2_remove_from_ail - Remove an entry from the ail lists, updating counters
  58. * @mapping: The associated mapping (maybe NULL)
  59. * @bd: The gfs2_bufdata to remove
  60. *
  61. * The ail lock _must_ be held when calling this function
  62. *
  63. */
  64. void gfs2_remove_from_ail(struct gfs2_bufdata *bd)
  65. {
  66. bd->bd_ail = NULL;
  67. list_del_init(&bd->bd_ail_st_list);
  68. list_del_init(&bd->bd_ail_gl_list);
  69. atomic_dec(&bd->bd_gl->gl_ail_count);
  70. brelse(bd->bd_bh);
  71. }
  72. /**
  73. * gfs2_ail1_start_one - Start I/O on a part of the AIL
  74. * @sdp: the filesystem
  75. * @tr: the part of the AIL
  76. *
  77. */
  78. static void gfs2_ail1_start_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
  79. __releases(&sdp->sd_ail_lock)
  80. __acquires(&sdp->sd_ail_lock)
  81. {
  82. struct gfs2_glock *gl = NULL;
  83. struct gfs2_bufdata *bd, *s;
  84. struct buffer_head *bh;
  85. int retry;
  86. do {
  87. retry = 0;
  88. list_for_each_entry_safe_reverse(bd, s, &ai->ai_ail1_list,
  89. bd_ail_st_list) {
  90. bh = bd->bd_bh;
  91. gfs2_assert(sdp, bd->bd_ail == ai);
  92. if (!buffer_busy(bh)) {
  93. if (!buffer_uptodate(bh))
  94. gfs2_io_error_bh(sdp, bh);
  95. list_move(&bd->bd_ail_st_list, &ai->ai_ail2_list);
  96. continue;
  97. }
  98. if (!buffer_dirty(bh))
  99. continue;
  100. if (gl == bd->bd_gl)
  101. continue;
  102. gl = bd->bd_gl;
  103. list_move(&bd->bd_ail_st_list, &ai->ai_ail1_list);
  104. spin_unlock(&sdp->sd_ail_lock);
  105. filemap_fdatawrite(gfs2_glock2aspace(gl));
  106. spin_lock(&sdp->sd_ail_lock);
  107. retry = 1;
  108. break;
  109. }
  110. } while (retry);
  111. }
  112. /**
  113. * gfs2_ail1_empty_one - Check whether or not a trans in the AIL has been synced
  114. * @sdp: the filesystem
  115. * @ai: the AIL entry
  116. *
  117. */
  118. static int gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai, int flags)
  119. {
  120. struct gfs2_bufdata *bd, *s;
  121. struct buffer_head *bh;
  122. list_for_each_entry_safe_reverse(bd, s, &ai->ai_ail1_list,
  123. bd_ail_st_list) {
  124. bh = bd->bd_bh;
  125. gfs2_assert(sdp, bd->bd_ail == ai);
  126. if (buffer_busy(bh)) {
  127. if (flags & DIO_ALL)
  128. continue;
  129. else
  130. break;
  131. }
  132. if (!buffer_uptodate(bh))
  133. gfs2_io_error_bh(sdp, bh);
  134. list_move(&bd->bd_ail_st_list, &ai->ai_ail2_list);
  135. }
  136. return list_empty(&ai->ai_ail1_list);
  137. }
  138. static void gfs2_ail1_start(struct gfs2_sbd *sdp)
  139. {
  140. struct list_head *head;
  141. u64 sync_gen;
  142. struct gfs2_ail *ai;
  143. int done = 0;
  144. spin_lock(&sdp->sd_ail_lock);
  145. head = &sdp->sd_ail1_list;
  146. if (list_empty(head)) {
  147. spin_unlock(&sdp->sd_ail_lock);
  148. return;
  149. }
  150. sync_gen = sdp->sd_ail_sync_gen++;
  151. while(!done) {
  152. done = 1;
  153. list_for_each_entry_reverse(ai, head, ai_list) {
  154. if (ai->ai_sync_gen >= sync_gen)
  155. continue;
  156. ai->ai_sync_gen = sync_gen;
  157. gfs2_ail1_start_one(sdp, ai); /* This may drop ail lock */
  158. done = 0;
  159. break;
  160. }
  161. }
  162. spin_unlock(&sdp->sd_ail_lock);
  163. }
  164. static int gfs2_ail1_empty(struct gfs2_sbd *sdp, int flags)
  165. {
  166. struct gfs2_ail *ai, *s;
  167. int ret;
  168. spin_lock(&sdp->sd_ail_lock);
  169. list_for_each_entry_safe_reverse(ai, s, &sdp->sd_ail1_list, ai_list) {
  170. if (gfs2_ail1_empty_one(sdp, ai, flags))
  171. list_move(&ai->ai_list, &sdp->sd_ail2_list);
  172. else if (!(flags & DIO_ALL))
  173. break;
  174. }
  175. ret = list_empty(&sdp->sd_ail1_list);
  176. spin_unlock(&sdp->sd_ail_lock);
  177. return ret;
  178. }
  179. /**
  180. * gfs2_ail2_empty_one - Check whether or not a trans in the AIL has been synced
  181. * @sdp: the filesystem
  182. * @ai: the AIL entry
  183. *
  184. */
  185. static void gfs2_ail2_empty_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
  186. {
  187. struct list_head *head = &ai->ai_ail2_list;
  188. struct gfs2_bufdata *bd;
  189. while (!list_empty(head)) {
  190. bd = list_entry(head->prev, struct gfs2_bufdata,
  191. bd_ail_st_list);
  192. gfs2_assert(sdp, bd->bd_ail == ai);
  193. gfs2_remove_from_ail(bd);
  194. }
  195. }
  196. static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail)
  197. {
  198. struct gfs2_ail *ai, *safe;
  199. unsigned int old_tail = sdp->sd_log_tail;
  200. int wrap = (new_tail < old_tail);
  201. int a, b, rm;
  202. spin_lock(&sdp->sd_ail_lock);
  203. list_for_each_entry_safe(ai, safe, &sdp->sd_ail2_list, ai_list) {
  204. a = (old_tail <= ai->ai_first);
  205. b = (ai->ai_first < new_tail);
  206. rm = (wrap) ? (a || b) : (a && b);
  207. if (!rm)
  208. continue;
  209. gfs2_ail2_empty_one(sdp, ai);
  210. list_del(&ai->ai_list);
  211. gfs2_assert_warn(sdp, list_empty(&ai->ai_ail1_list));
  212. gfs2_assert_warn(sdp, list_empty(&ai->ai_ail2_list));
  213. kfree(ai);
  214. }
  215. spin_unlock(&sdp->sd_ail_lock);
  216. }
  217. /**
  218. * gfs2_log_reserve - Make a log reservation
  219. * @sdp: The GFS2 superblock
  220. * @blks: The number of blocks to reserve
  221. *
  222. * Note that we never give out the last few blocks of the journal. Thats
  223. * due to the fact that there is a small number of header blocks
  224. * associated with each log flush. The exact number can't be known until
  225. * flush time, so we ensure that we have just enough free blocks at all
  226. * times to avoid running out during a log flush.
  227. *
  228. * We no longer flush the log here, instead we wake up logd to do that
  229. * for us. To avoid the thundering herd and to ensure that we deal fairly
  230. * with queued waiters, we use an exclusive wait. This means that when we
  231. * get woken with enough journal space to get our reservation, we need to
  232. * wake the next waiter on the list.
  233. *
  234. * Returns: errno
  235. */
  236. int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks)
  237. {
  238. unsigned reserved_blks = 6 * (4096 / sdp->sd_vfs->s_blocksize);
  239. unsigned wanted = blks + reserved_blks;
  240. DEFINE_WAIT(wait);
  241. int did_wait = 0;
  242. unsigned int free_blocks;
  243. if (gfs2_assert_warn(sdp, blks) ||
  244. gfs2_assert_warn(sdp, blks <= sdp->sd_jdesc->jd_blocks))
  245. return -EINVAL;
  246. retry:
  247. free_blocks = atomic_read(&sdp->sd_log_blks_free);
  248. if (unlikely(free_blocks <= wanted)) {
  249. do {
  250. prepare_to_wait_exclusive(&sdp->sd_log_waitq, &wait,
  251. TASK_UNINTERRUPTIBLE);
  252. wake_up(&sdp->sd_logd_waitq);
  253. did_wait = 1;
  254. if (atomic_read(&sdp->sd_log_blks_free) <= wanted)
  255. io_schedule();
  256. free_blocks = atomic_read(&sdp->sd_log_blks_free);
  257. } while(free_blocks <= wanted);
  258. finish_wait(&sdp->sd_log_waitq, &wait);
  259. }
  260. if (atomic_cmpxchg(&sdp->sd_log_blks_free, free_blocks,
  261. free_blocks - blks) != free_blocks)
  262. goto retry;
  263. trace_gfs2_log_blocks(sdp, -blks);
  264. /*
  265. * If we waited, then so might others, wake them up _after_ we get
  266. * our share of the log.
  267. */
  268. if (unlikely(did_wait))
  269. wake_up(&sdp->sd_log_waitq);
  270. down_read(&sdp->sd_log_flush_lock);
  271. return 0;
  272. }
  273. static u64 log_bmap(struct gfs2_sbd *sdp, unsigned int lbn)
  274. {
  275. struct gfs2_journal_extent *je;
  276. list_for_each_entry(je, &sdp->sd_jdesc->extent_list, extent_list) {
  277. if (lbn >= je->lblock && lbn < je->lblock + je->blocks)
  278. return je->dblock + lbn - je->lblock;
  279. }
  280. return -1;
  281. }
  282. /**
  283. * log_distance - Compute distance between two journal blocks
  284. * @sdp: The GFS2 superblock
  285. * @newer: The most recent journal block of the pair
  286. * @older: The older journal block of the pair
  287. *
  288. * Compute the distance (in the journal direction) between two
  289. * blocks in the journal
  290. *
  291. * Returns: the distance in blocks
  292. */
  293. static inline unsigned int log_distance(struct gfs2_sbd *sdp, unsigned int newer,
  294. unsigned int older)
  295. {
  296. int dist;
  297. dist = newer - older;
  298. if (dist < 0)
  299. dist += sdp->sd_jdesc->jd_blocks;
  300. return dist;
  301. }
  302. /**
  303. * calc_reserved - Calculate the number of blocks to reserve when
  304. * refunding a transaction's unused buffers.
  305. * @sdp: The GFS2 superblock
  306. *
  307. * This is complex. We need to reserve room for all our currently used
  308. * metadata buffers (e.g. normal file I/O rewriting file time stamps) and
  309. * all our journaled data buffers for journaled files (e.g. files in the
  310. * meta_fs like rindex, or files for which chattr +j was done.)
  311. * If we don't reserve enough space, gfs2_log_refund and gfs2_log_flush
  312. * will count it as free space (sd_log_blks_free) and corruption will follow.
  313. *
  314. * We can have metadata bufs and jdata bufs in the same journal. So each
  315. * type gets its own log header, for which we need to reserve a block.
  316. * In fact, each type has the potential for needing more than one header
  317. * in cases where we have more buffers than will fit on a journal page.
  318. * Metadata journal entries take up half the space of journaled buffer entries.
  319. * Thus, metadata entries have buf_limit (502) and journaled buffers have
  320. * databuf_limit (251) before they cause a wrap around.
  321. *
  322. * Also, we need to reserve blocks for revoke journal entries and one for an
  323. * overall header for the lot.
  324. *
  325. * Returns: the number of blocks reserved
  326. */
  327. static unsigned int calc_reserved(struct gfs2_sbd *sdp)
  328. {
  329. unsigned int reserved = 0;
  330. unsigned int mbuf_limit, metabufhdrs_needed;
  331. unsigned int dbuf_limit, databufhdrs_needed;
  332. unsigned int revokes = 0;
  333. mbuf_limit = buf_limit(sdp);
  334. metabufhdrs_needed = (sdp->sd_log_commited_buf +
  335. (mbuf_limit - 1)) / mbuf_limit;
  336. dbuf_limit = databuf_limit(sdp);
  337. databufhdrs_needed = (sdp->sd_log_commited_databuf +
  338. (dbuf_limit - 1)) / dbuf_limit;
  339. if (sdp->sd_log_commited_revoke > 0)
  340. revokes = gfs2_struct2blk(sdp, sdp->sd_log_commited_revoke,
  341. sizeof(u64));
  342. reserved = sdp->sd_log_commited_buf + metabufhdrs_needed +
  343. sdp->sd_log_commited_databuf + databufhdrs_needed +
  344. revokes;
  345. /* One for the overall header */
  346. if (reserved)
  347. reserved++;
  348. return reserved;
  349. }
  350. static unsigned int current_tail(struct gfs2_sbd *sdp)
  351. {
  352. struct gfs2_ail *ai;
  353. unsigned int tail;
  354. spin_lock(&sdp->sd_ail_lock);
  355. if (list_empty(&sdp->sd_ail1_list)) {
  356. tail = sdp->sd_log_head;
  357. } else {
  358. ai = list_entry(sdp->sd_ail1_list.prev, struct gfs2_ail, ai_list);
  359. tail = ai->ai_first;
  360. }
  361. spin_unlock(&sdp->sd_ail_lock);
  362. return tail;
  363. }
  364. void gfs2_log_incr_head(struct gfs2_sbd *sdp)
  365. {
  366. if (sdp->sd_log_flush_head == sdp->sd_log_tail)
  367. BUG_ON(sdp->sd_log_flush_head != sdp->sd_log_head);
  368. if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks) {
  369. sdp->sd_log_flush_head = 0;
  370. sdp->sd_log_flush_wrapped = 1;
  371. }
  372. }
  373. /**
  374. * gfs2_log_write_endio - End of I/O for a log buffer
  375. * @bh: The buffer head
  376. * @uptodate: I/O Status
  377. *
  378. */
  379. static void gfs2_log_write_endio(struct buffer_head *bh, int uptodate)
  380. {
  381. struct gfs2_sbd *sdp = bh->b_private;
  382. bh->b_private = NULL;
  383. end_buffer_write_sync(bh, uptodate);
  384. if (atomic_dec_and_test(&sdp->sd_log_in_flight))
  385. wake_up(&sdp->sd_log_flush_wait);
  386. }
  387. /**
  388. * gfs2_log_get_buf - Get and initialize a buffer to use for log control data
  389. * @sdp: The GFS2 superblock
  390. *
  391. * Returns: the buffer_head
  392. */
  393. struct buffer_head *gfs2_log_get_buf(struct gfs2_sbd *sdp)
  394. {
  395. u64 blkno = log_bmap(sdp, sdp->sd_log_flush_head);
  396. struct buffer_head *bh;
  397. bh = sb_getblk(sdp->sd_vfs, blkno);
  398. lock_buffer(bh);
  399. memset(bh->b_data, 0, bh->b_size);
  400. set_buffer_uptodate(bh);
  401. clear_buffer_dirty(bh);
  402. gfs2_log_incr_head(sdp);
  403. atomic_inc(&sdp->sd_log_in_flight);
  404. bh->b_private = sdp;
  405. bh->b_end_io = gfs2_log_write_endio;
  406. return bh;
  407. }
  408. /**
  409. * gfs2_fake_write_endio -
  410. * @bh: The buffer head
  411. * @uptodate: The I/O Status
  412. *
  413. */
  414. static void gfs2_fake_write_endio(struct buffer_head *bh, int uptodate)
  415. {
  416. struct buffer_head *real_bh = bh->b_private;
  417. struct gfs2_bufdata *bd = real_bh->b_private;
  418. struct gfs2_sbd *sdp = bd->bd_gl->gl_sbd;
  419. end_buffer_write_sync(bh, uptodate);
  420. free_buffer_head(bh);
  421. unlock_buffer(real_bh);
  422. brelse(real_bh);
  423. if (atomic_dec_and_test(&sdp->sd_log_in_flight))
  424. wake_up(&sdp->sd_log_flush_wait);
  425. }
  426. /**
  427. * gfs2_log_fake_buf - Build a fake buffer head to write metadata buffer to log
  428. * @sdp: the filesystem
  429. * @data: the data the buffer_head should point to
  430. *
  431. * Returns: the log buffer descriptor
  432. */
  433. struct buffer_head *gfs2_log_fake_buf(struct gfs2_sbd *sdp,
  434. struct buffer_head *real)
  435. {
  436. u64 blkno = log_bmap(sdp, sdp->sd_log_flush_head);
  437. struct buffer_head *bh;
  438. bh = alloc_buffer_head(GFP_NOFS | __GFP_NOFAIL);
  439. atomic_set(&bh->b_count, 1);
  440. bh->b_state = (1 << BH_Mapped) | (1 << BH_Uptodate) | (1 << BH_Lock);
  441. set_bh_page(bh, real->b_page, bh_offset(real));
  442. bh->b_blocknr = blkno;
  443. bh->b_size = sdp->sd_sb.sb_bsize;
  444. bh->b_bdev = sdp->sd_vfs->s_bdev;
  445. bh->b_private = real;
  446. bh->b_end_io = gfs2_fake_write_endio;
  447. gfs2_log_incr_head(sdp);
  448. atomic_inc(&sdp->sd_log_in_flight);
  449. return bh;
  450. }
  451. static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail)
  452. {
  453. unsigned int dist = log_distance(sdp, new_tail, sdp->sd_log_tail);
  454. ail2_empty(sdp, new_tail);
  455. atomic_add(dist, &sdp->sd_log_blks_free);
  456. trace_gfs2_log_blocks(sdp, dist);
  457. gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
  458. sdp->sd_jdesc->jd_blocks);
  459. sdp->sd_log_tail = new_tail;
  460. }
  461. /**
  462. * log_write_header - Get and initialize a journal header buffer
  463. * @sdp: The GFS2 superblock
  464. *
  465. * Returns: the initialized log buffer descriptor
  466. */
  467. static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull)
  468. {
  469. u64 blkno = log_bmap(sdp, sdp->sd_log_flush_head);
  470. struct buffer_head *bh;
  471. struct gfs2_log_header *lh;
  472. unsigned int tail;
  473. u32 hash;
  474. bh = sb_getblk(sdp->sd_vfs, blkno);
  475. lock_buffer(bh);
  476. memset(bh->b_data, 0, bh->b_size);
  477. set_buffer_uptodate(bh);
  478. clear_buffer_dirty(bh);
  479. gfs2_ail1_empty(sdp, 0);
  480. tail = current_tail(sdp);
  481. lh = (struct gfs2_log_header *)bh->b_data;
  482. memset(lh, 0, sizeof(struct gfs2_log_header));
  483. lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
  484. lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH);
  485. lh->lh_header.__pad0 = cpu_to_be64(0);
  486. lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH);
  487. lh->lh_header.mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
  488. lh->lh_sequence = cpu_to_be64(sdp->sd_log_sequence++);
  489. lh->lh_flags = cpu_to_be32(flags);
  490. lh->lh_tail = cpu_to_be32(tail);
  491. lh->lh_blkno = cpu_to_be32(sdp->sd_log_flush_head);
  492. hash = gfs2_disk_hash(bh->b_data, sizeof(struct gfs2_log_header));
  493. lh->lh_hash = cpu_to_be32(hash);
  494. bh->b_end_io = end_buffer_write_sync;
  495. get_bh(bh);
  496. if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags))
  497. submit_bh(WRITE_SYNC | REQ_META, bh);
  498. else
  499. submit_bh(WRITE_FLUSH_FUA | REQ_META, bh);
  500. wait_on_buffer(bh);
  501. if (!buffer_uptodate(bh))
  502. gfs2_io_error_bh(sdp, bh);
  503. brelse(bh);
  504. if (sdp->sd_log_tail != tail)
  505. log_pull_tail(sdp, tail);
  506. else
  507. gfs2_assert_withdraw(sdp, !pull);
  508. sdp->sd_log_idle = (tail == sdp->sd_log_flush_head);
  509. gfs2_log_incr_head(sdp);
  510. }
  511. static void log_flush_commit(struct gfs2_sbd *sdp)
  512. {
  513. DEFINE_WAIT(wait);
  514. if (atomic_read(&sdp->sd_log_in_flight)) {
  515. do {
  516. prepare_to_wait(&sdp->sd_log_flush_wait, &wait,
  517. TASK_UNINTERRUPTIBLE);
  518. if (atomic_read(&sdp->sd_log_in_flight))
  519. io_schedule();
  520. } while(atomic_read(&sdp->sd_log_in_flight));
  521. finish_wait(&sdp->sd_log_flush_wait, &wait);
  522. }
  523. log_write_header(sdp, 0, 0);
  524. }
  525. static void gfs2_ordered_write(struct gfs2_sbd *sdp)
  526. {
  527. struct gfs2_bufdata *bd;
  528. struct buffer_head *bh;
  529. LIST_HEAD(written);
  530. gfs2_log_lock(sdp);
  531. while (!list_empty(&sdp->sd_log_le_ordered)) {
  532. bd = list_entry(sdp->sd_log_le_ordered.next, struct gfs2_bufdata, bd_le.le_list);
  533. list_move(&bd->bd_le.le_list, &written);
  534. bh = bd->bd_bh;
  535. if (!buffer_dirty(bh))
  536. continue;
  537. get_bh(bh);
  538. gfs2_log_unlock(sdp);
  539. lock_buffer(bh);
  540. if (buffer_mapped(bh) && test_clear_buffer_dirty(bh)) {
  541. bh->b_end_io = end_buffer_write_sync;
  542. submit_bh(WRITE_SYNC, bh);
  543. } else {
  544. unlock_buffer(bh);
  545. brelse(bh);
  546. }
  547. gfs2_log_lock(sdp);
  548. }
  549. list_splice(&written, &sdp->sd_log_le_ordered);
  550. gfs2_log_unlock(sdp);
  551. }
  552. static void gfs2_ordered_wait(struct gfs2_sbd *sdp)
  553. {
  554. struct gfs2_bufdata *bd;
  555. struct buffer_head *bh;
  556. gfs2_log_lock(sdp);
  557. while (!list_empty(&sdp->sd_log_le_ordered)) {
  558. bd = list_entry(sdp->sd_log_le_ordered.prev, struct gfs2_bufdata, bd_le.le_list);
  559. bh = bd->bd_bh;
  560. if (buffer_locked(bh)) {
  561. get_bh(bh);
  562. gfs2_log_unlock(sdp);
  563. wait_on_buffer(bh);
  564. brelse(bh);
  565. gfs2_log_lock(sdp);
  566. continue;
  567. }
  568. list_del_init(&bd->bd_le.le_list);
  569. }
  570. gfs2_log_unlock(sdp);
  571. }
  572. /**
  573. * gfs2_log_flush - flush incore transaction(s)
  574. * @sdp: the filesystem
  575. * @gl: The glock structure to flush. If NULL, flush the whole incore log
  576. *
  577. */
  578. void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl)
  579. {
  580. struct gfs2_ail *ai;
  581. down_write(&sdp->sd_log_flush_lock);
  582. /* Log might have been flushed while we waited for the flush lock */
  583. if (gl && !test_bit(GLF_LFLUSH, &gl->gl_flags)) {
  584. up_write(&sdp->sd_log_flush_lock);
  585. return;
  586. }
  587. trace_gfs2_log_flush(sdp, 1);
  588. ai = kzalloc(sizeof(struct gfs2_ail), GFP_NOFS | __GFP_NOFAIL);
  589. INIT_LIST_HEAD(&ai->ai_ail1_list);
  590. INIT_LIST_HEAD(&ai->ai_ail2_list);
  591. if (sdp->sd_log_num_buf != sdp->sd_log_commited_buf) {
  592. printk(KERN_INFO "GFS2: log buf %u %u\n", sdp->sd_log_num_buf,
  593. sdp->sd_log_commited_buf);
  594. gfs2_assert_withdraw(sdp, 0);
  595. }
  596. if (sdp->sd_log_num_databuf != sdp->sd_log_commited_databuf) {
  597. printk(KERN_INFO "GFS2: log databuf %u %u\n",
  598. sdp->sd_log_num_databuf, sdp->sd_log_commited_databuf);
  599. gfs2_assert_withdraw(sdp, 0);
  600. }
  601. gfs2_assert_withdraw(sdp,
  602. sdp->sd_log_num_revoke == sdp->sd_log_commited_revoke);
  603. sdp->sd_log_flush_head = sdp->sd_log_head;
  604. sdp->sd_log_flush_wrapped = 0;
  605. ai->ai_first = sdp->sd_log_flush_head;
  606. gfs2_ordered_write(sdp);
  607. lops_before_commit(sdp);
  608. gfs2_ordered_wait(sdp);
  609. if (sdp->sd_log_head != sdp->sd_log_flush_head)
  610. log_flush_commit(sdp);
  611. else if (sdp->sd_log_tail != current_tail(sdp) && !sdp->sd_log_idle){
  612. gfs2_log_lock(sdp);
  613. atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */
  614. trace_gfs2_log_blocks(sdp, -1);
  615. gfs2_log_unlock(sdp);
  616. log_write_header(sdp, 0, PULL);
  617. }
  618. lops_after_commit(sdp, ai);
  619. gfs2_log_lock(sdp);
  620. sdp->sd_log_head = sdp->sd_log_flush_head;
  621. sdp->sd_log_blks_reserved = 0;
  622. sdp->sd_log_commited_buf = 0;
  623. sdp->sd_log_commited_databuf = 0;
  624. sdp->sd_log_commited_revoke = 0;
  625. spin_lock(&sdp->sd_ail_lock);
  626. if (!list_empty(&ai->ai_ail1_list)) {
  627. list_add(&ai->ai_list, &sdp->sd_ail1_list);
  628. ai = NULL;
  629. }
  630. spin_unlock(&sdp->sd_ail_lock);
  631. gfs2_log_unlock(sdp);
  632. trace_gfs2_log_flush(sdp, 0);
  633. up_write(&sdp->sd_log_flush_lock);
  634. kfree(ai);
  635. }
  636. static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
  637. {
  638. unsigned int reserved;
  639. unsigned int unused;
  640. gfs2_log_lock(sdp);
  641. sdp->sd_log_commited_buf += tr->tr_num_buf_new - tr->tr_num_buf_rm;
  642. sdp->sd_log_commited_databuf += tr->tr_num_databuf_new -
  643. tr->tr_num_databuf_rm;
  644. gfs2_assert_withdraw(sdp, (((int)sdp->sd_log_commited_buf) >= 0) ||
  645. (((int)sdp->sd_log_commited_databuf) >= 0));
  646. sdp->sd_log_commited_revoke += tr->tr_num_revoke - tr->tr_num_revoke_rm;
  647. reserved = calc_reserved(sdp);
  648. gfs2_assert_withdraw(sdp, sdp->sd_log_blks_reserved + tr->tr_reserved >= reserved);
  649. unused = sdp->sd_log_blks_reserved - reserved + tr->tr_reserved;
  650. atomic_add(unused, &sdp->sd_log_blks_free);
  651. trace_gfs2_log_blocks(sdp, unused);
  652. gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
  653. sdp->sd_jdesc->jd_blocks);
  654. sdp->sd_log_blks_reserved = reserved;
  655. gfs2_log_unlock(sdp);
  656. }
  657. static void buf_lo_incore_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
  658. {
  659. struct list_head *head = &tr->tr_list_buf;
  660. struct gfs2_bufdata *bd;
  661. gfs2_log_lock(sdp);
  662. while (!list_empty(head)) {
  663. bd = list_entry(head->next, struct gfs2_bufdata, bd_list_tr);
  664. list_del_init(&bd->bd_list_tr);
  665. tr->tr_num_buf--;
  666. }
  667. gfs2_log_unlock(sdp);
  668. gfs2_assert_warn(sdp, !tr->tr_num_buf);
  669. }
  670. /**
  671. * gfs2_log_commit - Commit a transaction to the log
  672. * @sdp: the filesystem
  673. * @tr: the transaction
  674. *
  675. * We wake up gfs2_logd if the number of pinned blocks exceed thresh1
  676. * or the total number of used blocks (pinned blocks plus AIL blocks)
  677. * is greater than thresh2.
  678. *
  679. * At mount time thresh1 is 1/3rd of journal size, thresh2 is 2/3rd of
  680. * journal size.
  681. *
  682. * Returns: errno
  683. */
  684. void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
  685. {
  686. log_refund(sdp, tr);
  687. buf_lo_incore_commit(sdp, tr);
  688. up_read(&sdp->sd_log_flush_lock);
  689. if (atomic_read(&sdp->sd_log_pinned) > atomic_read(&sdp->sd_log_thresh1) ||
  690. ((sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free)) >
  691. atomic_read(&sdp->sd_log_thresh2)))
  692. wake_up(&sdp->sd_logd_waitq);
  693. }
  694. /**
  695. * gfs2_log_shutdown - write a shutdown header into a journal
  696. * @sdp: the filesystem
  697. *
  698. */
  699. void gfs2_log_shutdown(struct gfs2_sbd *sdp)
  700. {
  701. down_write(&sdp->sd_log_flush_lock);
  702. gfs2_assert_withdraw(sdp, !sdp->sd_log_blks_reserved);
  703. gfs2_assert_withdraw(sdp, !sdp->sd_log_num_buf);
  704. gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
  705. gfs2_assert_withdraw(sdp, !sdp->sd_log_num_rg);
  706. gfs2_assert_withdraw(sdp, !sdp->sd_log_num_databuf);
  707. gfs2_assert_withdraw(sdp, list_empty(&sdp->sd_ail1_list));
  708. sdp->sd_log_flush_head = sdp->sd_log_head;
  709. sdp->sd_log_flush_wrapped = 0;
  710. log_write_header(sdp, GFS2_LOG_HEAD_UNMOUNT,
  711. (sdp->sd_log_tail == current_tail(sdp)) ? 0 : PULL);
  712. gfs2_assert_warn(sdp, atomic_read(&sdp->sd_log_blks_free) == sdp->sd_jdesc->jd_blocks);
  713. gfs2_assert_warn(sdp, sdp->sd_log_head == sdp->sd_log_tail);
  714. gfs2_assert_warn(sdp, list_empty(&sdp->sd_ail2_list));
  715. sdp->sd_log_head = sdp->sd_log_flush_head;
  716. sdp->sd_log_tail = sdp->sd_log_head;
  717. up_write(&sdp->sd_log_flush_lock);
  718. }
  719. /**
  720. * gfs2_meta_syncfs - sync all the buffers in a filesystem
  721. * @sdp: the filesystem
  722. *
  723. */
  724. void gfs2_meta_syncfs(struct gfs2_sbd *sdp)
  725. {
  726. gfs2_log_flush(sdp, NULL);
  727. for (;;) {
  728. gfs2_ail1_start(sdp);
  729. if (gfs2_ail1_empty(sdp, DIO_ALL))
  730. break;
  731. msleep(10);
  732. }
  733. }
  734. static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp)
  735. {
  736. return (atomic_read(&sdp->sd_log_pinned) >= atomic_read(&sdp->sd_log_thresh1));
  737. }
  738. static inline int gfs2_ail_flush_reqd(struct gfs2_sbd *sdp)
  739. {
  740. unsigned int used_blocks = sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free);
  741. return used_blocks >= atomic_read(&sdp->sd_log_thresh2);
  742. }
  743. /**
  744. * gfs2_logd - Update log tail as Active Items get flushed to in-place blocks
  745. * @sdp: Pointer to GFS2 superblock
  746. *
  747. * Also, periodically check to make sure that we're using the most recent
  748. * journal index.
  749. */
  750. int gfs2_logd(void *data)
  751. {
  752. struct gfs2_sbd *sdp = data;
  753. unsigned long t = 1;
  754. DEFINE_WAIT(wait);
  755. unsigned preflush;
  756. while (!kthread_should_stop()) {
  757. preflush = atomic_read(&sdp->sd_log_pinned);
  758. if (gfs2_jrnl_flush_reqd(sdp) || t == 0) {
  759. gfs2_ail1_empty(sdp, DIO_ALL);
  760. gfs2_log_flush(sdp, NULL);
  761. gfs2_ail1_empty(sdp, DIO_ALL);
  762. }
  763. if (gfs2_ail_flush_reqd(sdp)) {
  764. gfs2_ail1_start(sdp);
  765. io_schedule();
  766. gfs2_ail1_empty(sdp, 0);
  767. gfs2_log_flush(sdp, NULL);
  768. gfs2_ail1_empty(sdp, DIO_ALL);
  769. }
  770. wake_up(&sdp->sd_log_waitq);
  771. t = gfs2_tune_get(sdp, gt_logd_secs) * HZ;
  772. if (freezing(current))
  773. refrigerator();
  774. do {
  775. prepare_to_wait(&sdp->sd_logd_waitq, &wait,
  776. TASK_INTERRUPTIBLE);
  777. if (!gfs2_ail_flush_reqd(sdp) &&
  778. !gfs2_jrnl_flush_reqd(sdp) &&
  779. !kthread_should_stop())
  780. t = schedule_timeout(t);
  781. } while(t && !gfs2_ail_flush_reqd(sdp) &&
  782. !gfs2_jrnl_flush_reqd(sdp) &&
  783. !kthread_should_stop());
  784. finish_wait(&sdp->sd_logd_waitq, &wait);
  785. }
  786. return 0;
  787. }