super.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License v.2.
  8. */
  9. #include <linux/sched.h>
  10. #include <linux/slab.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/completion.h>
  13. #include <linux/buffer_head.h>
  14. #include <asm/semaphore.h>
  15. #include "gfs2.h"
  16. #include "bmap.h"
  17. #include "dir.h"
  18. #include "format.h"
  19. #include "glock.h"
  20. #include "glops.h"
  21. #include "inode.h"
  22. #include "log.h"
  23. #include "meta_io.h"
  24. #include "quota.h"
  25. #include "recovery.h"
  26. #include "rgrp.h"
  27. #include "super.h"
  28. #include "trans.h"
  29. #include "unlinked.h"
  30. /**
  31. * gfs2_tune_init - Fill a gfs2_tune structure with default values
  32. * @gt: tune
  33. *
  34. */
  35. void gfs2_tune_init(struct gfs2_tune *gt)
  36. {
  37. spin_lock_init(&gt->gt_spin);
  38. gt->gt_ilimit = 100;
  39. gt->gt_ilimit_tries = 3;
  40. gt->gt_ilimit_min = 1;
  41. gt->gt_demote_secs = 300;
  42. gt->gt_incore_log_blocks = 1024;
  43. gt->gt_log_flush_secs = 60;
  44. gt->gt_jindex_refresh_secs = 60;
  45. gt->gt_scand_secs = 15;
  46. gt->gt_recoverd_secs = 60;
  47. gt->gt_logd_secs = 1;
  48. gt->gt_quotad_secs = 5;
  49. gt->gt_inoded_secs = 15;
  50. gt->gt_quota_simul_sync = 64;
  51. gt->gt_quota_warn_period = 10;
  52. gt->gt_quota_scale_num = 1;
  53. gt->gt_quota_scale_den = 1;
  54. gt->gt_quota_cache_secs = 300;
  55. gt->gt_quota_quantum = 60;
  56. gt->gt_atime_quantum = 3600;
  57. gt->gt_new_files_jdata = 0;
  58. gt->gt_new_files_directio = 0;
  59. gt->gt_max_atomic_write = 4 << 20;
  60. gt->gt_max_readahead = 1 << 18;
  61. gt->gt_lockdump_size = 131072;
  62. gt->gt_stall_secs = 600;
  63. gt->gt_complain_secs = 10;
  64. gt->gt_reclaim_limit = 5000;
  65. gt->gt_entries_per_readdir = 32;
  66. gt->gt_prefetch_secs = 10;
  67. gt->gt_greedy_default = HZ / 10;
  68. gt->gt_greedy_quantum = HZ / 40;
  69. gt->gt_greedy_max = HZ / 4;
  70. gt->gt_statfs_quantum = 30;
  71. gt->gt_statfs_slow = 0;
  72. }
  73. /**
  74. * gfs2_check_sb - Check superblock
  75. * @sdp: the filesystem
  76. * @sb: The superblock
  77. * @silent: Don't print a message if the check fails
  78. *
  79. * Checks the version code of the FS is one that we understand how to
  80. * read and that the sizes of the various on-disk structures have not
  81. * changed.
  82. */
  83. int gfs2_check_sb(struct gfs2_sbd *sdp, struct gfs2_sb *sb, int silent)
  84. {
  85. unsigned int x;
  86. if (sb->sb_header.mh_magic != GFS2_MAGIC ||
  87. sb->sb_header.mh_type != GFS2_METATYPE_SB) {
  88. if (!silent)
  89. printk(KERN_WARNING "GFS2: not a GFS2 filesystem\n");
  90. return -EINVAL;
  91. }
  92. /* If format numbers match exactly, we're done. */
  93. if (sb->sb_fs_format == GFS2_FORMAT_FS &&
  94. sb->sb_multihost_format == GFS2_FORMAT_MULTI)
  95. return 0;
  96. if (sb->sb_fs_format != GFS2_FORMAT_FS) {
  97. for (x = 0; gfs2_old_fs_formats[x]; x++)
  98. if (gfs2_old_fs_formats[x] == sb->sb_fs_format)
  99. break;
  100. if (!gfs2_old_fs_formats[x]) {
  101. printk(KERN_WARNING
  102. "GFS2: code version (%u, %u) is incompatible "
  103. "with ondisk format (%u, %u)\n",
  104. GFS2_FORMAT_FS, GFS2_FORMAT_MULTI,
  105. sb->sb_fs_format, sb->sb_multihost_format);
  106. printk(KERN_WARNING
  107. "GFS2: I don't know how to upgrade this FS\n");
  108. return -EINVAL;
  109. }
  110. }
  111. if (sb->sb_multihost_format != GFS2_FORMAT_MULTI) {
  112. for (x = 0; gfs2_old_multihost_formats[x]; x++)
  113. if (gfs2_old_multihost_formats[x] ==
  114. sb->sb_multihost_format)
  115. break;
  116. if (!gfs2_old_multihost_formats[x]) {
  117. printk(KERN_WARNING
  118. "GFS2: code version (%u, %u) is incompatible "
  119. "with ondisk format (%u, %u)\n",
  120. GFS2_FORMAT_FS, GFS2_FORMAT_MULTI,
  121. sb->sb_fs_format, sb->sb_multihost_format);
  122. printk(KERN_WARNING
  123. "GFS2: I don't know how to upgrade this FS\n");
  124. return -EINVAL;
  125. }
  126. }
  127. if (!sdp->sd_args.ar_upgrade) {
  128. printk(KERN_WARNING
  129. "GFS2: code version (%u, %u) is incompatible "
  130. "with ondisk format (%u, %u)\n",
  131. GFS2_FORMAT_FS, GFS2_FORMAT_MULTI,
  132. sb->sb_fs_format, sb->sb_multihost_format);
  133. printk(KERN_INFO
  134. "GFS2: Use the \"upgrade\" mount option to upgrade "
  135. "the FS\n");
  136. printk(KERN_INFO "GFS2: See the manual for more details\n");
  137. return -EINVAL;
  138. }
  139. return 0;
  140. }
  141. /**
  142. * gfs2_read_sb - Read super block
  143. * @sdp: The GFS2 superblock
  144. * @gl: the glock for the superblock (assumed to be held)
  145. * @silent: Don't print message if mount fails
  146. *
  147. */
  148. int gfs2_read_sb(struct gfs2_sbd *sdp, struct gfs2_glock *gl, int silent)
  149. {
  150. struct buffer_head *bh;
  151. uint32_t hash_blocks, ind_blocks, leaf_blocks;
  152. uint32_t tmp_blocks;
  153. unsigned int x;
  154. int error;
  155. error = gfs2_meta_read(gl, GFS2_SB_ADDR >> sdp->sd_fsb2bb_shift,
  156. DIO_FORCE | DIO_START | DIO_WAIT, &bh);
  157. if (error) {
  158. if (!silent)
  159. fs_err(sdp, "can't read superblock\n");
  160. return error;
  161. }
  162. gfs2_assert(sdp, sizeof(struct gfs2_sb) <= bh->b_size);
  163. gfs2_sb_in(&sdp->sd_sb, bh->b_data);
  164. brelse(bh);
  165. error = gfs2_check_sb(sdp, &sdp->sd_sb, silent);
  166. if (error)
  167. return error;
  168. sdp->sd_fsb2bb_shift = sdp->sd_sb.sb_bsize_shift -
  169. GFS2_BASIC_BLOCK_SHIFT;
  170. sdp->sd_fsb2bb = 1 << sdp->sd_fsb2bb_shift;
  171. sdp->sd_diptrs = (sdp->sd_sb.sb_bsize -
  172. sizeof(struct gfs2_dinode)) / sizeof(uint64_t);
  173. sdp->sd_inptrs = (sdp->sd_sb.sb_bsize -
  174. sizeof(struct gfs2_meta_header)) / sizeof(uint64_t);
  175. sdp->sd_jbsize = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header);
  176. sdp->sd_hash_bsize = sdp->sd_sb.sb_bsize / 2;
  177. sdp->sd_hash_bsize_shift = sdp->sd_sb.sb_bsize_shift - 1;
  178. sdp->sd_hash_ptrs = sdp->sd_hash_bsize / sizeof(uint64_t);
  179. sdp->sd_ut_per_block = (sdp->sd_sb.sb_bsize -
  180. sizeof(struct gfs2_meta_header)) /
  181. sizeof(struct gfs2_unlinked_tag);
  182. sdp->sd_qc_per_block = (sdp->sd_sb.sb_bsize -
  183. sizeof(struct gfs2_meta_header)) /
  184. sizeof(struct gfs2_quota_change);
  185. /* Compute maximum reservation required to add a entry to a directory */
  186. hash_blocks = DIV_RU(sizeof(uint64_t) * (1 << GFS2_DIR_MAX_DEPTH),
  187. sdp->sd_jbsize);
  188. ind_blocks = 0;
  189. for (tmp_blocks = hash_blocks; tmp_blocks > sdp->sd_diptrs;) {
  190. tmp_blocks = DIV_RU(tmp_blocks, sdp->sd_inptrs);
  191. ind_blocks += tmp_blocks;
  192. }
  193. leaf_blocks = 2 + GFS2_DIR_MAX_DEPTH;
  194. sdp->sd_max_dirres = hash_blocks + ind_blocks + leaf_blocks;
  195. sdp->sd_heightsize[0] = sdp->sd_sb.sb_bsize -
  196. sizeof(struct gfs2_dinode);
  197. sdp->sd_heightsize[1] = sdp->sd_sb.sb_bsize * sdp->sd_diptrs;
  198. for (x = 2;; x++) {
  199. uint64_t space, d;
  200. uint32_t m;
  201. space = sdp->sd_heightsize[x - 1] * sdp->sd_inptrs;
  202. d = space;
  203. m = do_div(d, sdp->sd_inptrs);
  204. if (d != sdp->sd_heightsize[x - 1] || m)
  205. break;
  206. sdp->sd_heightsize[x] = space;
  207. }
  208. sdp->sd_max_height = x;
  209. gfs2_assert(sdp, sdp->sd_max_height <= GFS2_MAX_META_HEIGHT);
  210. sdp->sd_jheightsize[0] = sdp->sd_sb.sb_bsize -
  211. sizeof(struct gfs2_dinode);
  212. sdp->sd_jheightsize[1] = sdp->sd_jbsize * sdp->sd_diptrs;
  213. for (x = 2;; x++) {
  214. uint64_t space, d;
  215. uint32_t m;
  216. space = sdp->sd_jheightsize[x - 1] * sdp->sd_inptrs;
  217. d = space;
  218. m = do_div(d, sdp->sd_inptrs);
  219. if (d != sdp->sd_jheightsize[x - 1] || m)
  220. break;
  221. sdp->sd_jheightsize[x] = space;
  222. }
  223. sdp->sd_max_jheight = x;
  224. gfs2_assert(sdp, sdp->sd_max_jheight <= GFS2_MAX_META_HEIGHT);
  225. return 0;
  226. }
  227. int gfs2_do_upgrade(struct gfs2_sbd *sdp, struct gfs2_glock *sb_gl)
  228. {
  229. return 0;
  230. }
  231. /**
  232. * gfs2_jindex_hold - Grab a lock on the jindex
  233. * @sdp: The GFS2 superblock
  234. * @ji_gh: the holder for the jindex glock
  235. *
  236. * This is very similar to the gfs2_rindex_hold() function, except that
  237. * in general we hold the jindex lock for longer periods of time and
  238. * we grab it far less frequently (in general) then the rgrp lock.
  239. *
  240. * Returns: errno
  241. */
  242. int gfs2_jindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ji_gh)
  243. {
  244. struct gfs2_inode *dip = get_v2ip(sdp->sd_jindex);
  245. struct qstr name;
  246. char buf[20];
  247. struct gfs2_jdesc *jd;
  248. int error;
  249. name.name = buf;
  250. mutex_lock(&sdp->sd_jindex_mutex);
  251. for (;;) {
  252. error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED,
  253. GL_LOCAL_EXCL, ji_gh);
  254. if (error)
  255. break;
  256. name.len = sprintf(buf, "journal%u", sdp->sd_journals);
  257. error = gfs2_dir_search(get_v2ip(sdp->sd_jindex),
  258. &name, NULL, NULL);
  259. if (error == -ENOENT) {
  260. error = 0;
  261. break;
  262. }
  263. gfs2_glock_dq_uninit(ji_gh);
  264. if (error)
  265. break;
  266. error = -ENOMEM;
  267. jd = kzalloc(sizeof(struct gfs2_jdesc), GFP_KERNEL);
  268. if (!jd)
  269. break;
  270. error = gfs2_lookupi(sdp->sd_jindex, &name, 1, &jd->jd_inode);
  271. if (error) {
  272. kfree(jd);
  273. break;
  274. }
  275. spin_lock(&sdp->sd_jindex_spin);
  276. jd->jd_jid = sdp->sd_journals++;
  277. list_add_tail(&jd->jd_list, &sdp->sd_jindex_list);
  278. spin_unlock(&sdp->sd_jindex_spin);
  279. }
  280. mutex_unlock(&sdp->sd_jindex_mutex);
  281. return error;
  282. }
  283. /**
  284. * gfs2_jindex_free - Clear all the journal index information
  285. * @sdp: The GFS2 superblock
  286. *
  287. */
  288. void gfs2_jindex_free(struct gfs2_sbd *sdp)
  289. {
  290. struct list_head list;
  291. struct gfs2_jdesc *jd;
  292. spin_lock(&sdp->sd_jindex_spin);
  293. list_add(&list, &sdp->sd_jindex_list);
  294. list_del_init(&sdp->sd_jindex_list);
  295. sdp->sd_journals = 0;
  296. spin_unlock(&sdp->sd_jindex_spin);
  297. while (!list_empty(&list)) {
  298. jd = list_entry(list.next, struct gfs2_jdesc, jd_list);
  299. list_del(&jd->jd_list);
  300. iput(jd->jd_inode);
  301. kfree(jd);
  302. }
  303. }
  304. static struct gfs2_jdesc *jdesc_find_i(struct list_head *head, unsigned int jid)
  305. {
  306. struct gfs2_jdesc *jd;
  307. int found = 0;
  308. list_for_each_entry(jd, head, jd_list) {
  309. if (jd->jd_jid == jid) {
  310. found = 1;
  311. break;
  312. }
  313. }
  314. if (!found)
  315. jd = NULL;
  316. return jd;
  317. }
  318. struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid)
  319. {
  320. struct gfs2_jdesc *jd;
  321. spin_lock(&sdp->sd_jindex_spin);
  322. jd = jdesc_find_i(&sdp->sd_jindex_list, jid);
  323. spin_unlock(&sdp->sd_jindex_spin);
  324. return jd;
  325. }
  326. void gfs2_jdesc_make_dirty(struct gfs2_sbd *sdp, unsigned int jid)
  327. {
  328. struct gfs2_jdesc *jd;
  329. spin_lock(&sdp->sd_jindex_spin);
  330. jd = jdesc_find_i(&sdp->sd_jindex_list, jid);
  331. if (jd)
  332. jd->jd_dirty = 1;
  333. spin_unlock(&sdp->sd_jindex_spin);
  334. }
  335. struct gfs2_jdesc *gfs2_jdesc_find_dirty(struct gfs2_sbd *sdp)
  336. {
  337. struct gfs2_jdesc *jd;
  338. int found = 0;
  339. spin_lock(&sdp->sd_jindex_spin);
  340. list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
  341. if (jd->jd_dirty) {
  342. jd->jd_dirty = 0;
  343. found = 1;
  344. break;
  345. }
  346. }
  347. spin_unlock(&sdp->sd_jindex_spin);
  348. if (!found)
  349. jd = NULL;
  350. return jd;
  351. }
  352. int gfs2_jdesc_check(struct gfs2_jdesc *jd)
  353. {
  354. struct gfs2_inode *ip = get_v2ip(jd->jd_inode);
  355. struct gfs2_sbd *sdp = ip->i_sbd;
  356. int ar;
  357. int error;
  358. if (ip->i_di.di_size < (8 << 20) ||
  359. ip->i_di.di_size > (1 << 30) ||
  360. (ip->i_di.di_size & (sdp->sd_sb.sb_bsize - 1))) {
  361. gfs2_consist_inode(ip);
  362. return -EIO;
  363. }
  364. jd->jd_blocks = ip->i_di.di_size >> sdp->sd_sb.sb_bsize_shift;
  365. error = gfs2_write_alloc_required(ip,
  366. 0, ip->i_di.di_size,
  367. &ar);
  368. if (!error && ar) {
  369. gfs2_consist_inode(ip);
  370. error = -EIO;
  371. }
  372. return error;
  373. }
  374. int gfs2_lookup_master_dir(struct gfs2_sbd *sdp)
  375. {
  376. struct inode *inode = NULL;
  377. struct gfs2_glock *gl;
  378. int error;
  379. error = gfs2_glock_get(sdp,
  380. sdp->sd_sb.sb_master_dir.no_addr,
  381. &gfs2_inode_glops, CREATE, &gl);
  382. if (!error) {
  383. error = gfs2_lookup_simple(sdp->sd_root_dir, ".gfs2_admin",
  384. &inode);
  385. sdp->sd_master_dir = inode;
  386. gfs2_glock_put(gl);
  387. }
  388. return error;
  389. }
  390. /**
  391. * gfs2_make_fs_rw - Turn a Read-Only FS into a Read-Write one
  392. * @sdp: the filesystem
  393. *
  394. * Returns: errno
  395. */
  396. int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
  397. {
  398. struct gfs2_glock *j_gl = get_v2ip(sdp->sd_jdesc->jd_inode)->i_gl;
  399. struct gfs2_holder t_gh;
  400. struct gfs2_log_header head;
  401. int error;
  402. error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED,
  403. GL_LOCAL_EXCL | GL_NEVER_RECURSE, &t_gh);
  404. if (error)
  405. return error;
  406. gfs2_meta_cache_flush(get_v2ip(sdp->sd_jdesc->jd_inode));
  407. j_gl->gl_ops->go_inval(j_gl, DIO_METADATA | DIO_DATA);
  408. error = gfs2_find_jhead(sdp->sd_jdesc, &head);
  409. if (error)
  410. goto fail;
  411. if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
  412. gfs2_consist(sdp);
  413. error = -EIO;
  414. goto fail;
  415. }
  416. /* Initialize some head of the log stuff */
  417. sdp->sd_log_sequence = head.lh_sequence + 1;
  418. gfs2_log_pointers_init(sdp, head.lh_blkno);
  419. error = gfs2_unlinked_init(sdp);
  420. if (error)
  421. goto fail;
  422. error = gfs2_quota_init(sdp);
  423. if (error)
  424. goto fail_unlinked;
  425. set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
  426. gfs2_glock_dq_uninit(&t_gh);
  427. return 0;
  428. fail_unlinked:
  429. gfs2_unlinked_cleanup(sdp);
  430. fail:
  431. t_gh.gh_flags |= GL_NOCACHE;
  432. gfs2_glock_dq_uninit(&t_gh);
  433. return error;
  434. }
  435. /**
  436. * gfs2_make_fs_ro - Turn a Read-Write FS into a Read-Only one
  437. * @sdp: the filesystem
  438. *
  439. * Returns: errno
  440. */
  441. int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
  442. {
  443. struct gfs2_holder t_gh;
  444. int error;
  445. gfs2_unlinked_dealloc(sdp);
  446. gfs2_quota_sync(sdp);
  447. gfs2_statfs_sync(sdp);
  448. error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED,
  449. GL_LOCAL_EXCL | GL_NEVER_RECURSE | GL_NOCACHE,
  450. &t_gh);
  451. if (error && !test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
  452. return error;
  453. gfs2_meta_syncfs(sdp);
  454. gfs2_log_shutdown(sdp);
  455. clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
  456. if (t_gh.gh_gl)
  457. gfs2_glock_dq_uninit(&t_gh);
  458. gfs2_unlinked_cleanup(sdp);
  459. gfs2_quota_cleanup(sdp);
  460. return error;
  461. }
  462. int gfs2_statfs_init(struct gfs2_sbd *sdp)
  463. {
  464. struct gfs2_inode *m_ip = get_v2ip(sdp->sd_statfs_inode);
  465. struct gfs2_statfs_change *m_sc = &sdp->sd_statfs_master;
  466. struct gfs2_inode *l_ip = get_v2ip(sdp->sd_sc_inode);
  467. struct gfs2_statfs_change *l_sc = &sdp->sd_statfs_local;
  468. struct buffer_head *m_bh, *l_bh;
  469. struct gfs2_holder gh;
  470. int error;
  471. error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE,
  472. &gh);
  473. if (error)
  474. return error;
  475. error = gfs2_meta_inode_buffer(m_ip, &m_bh);
  476. if (error)
  477. goto out;
  478. if (sdp->sd_args.ar_spectator) {
  479. spin_lock(&sdp->sd_statfs_spin);
  480. gfs2_statfs_change_in(m_sc, m_bh->b_data +
  481. sizeof(struct gfs2_dinode));
  482. spin_unlock(&sdp->sd_statfs_spin);
  483. } else {
  484. error = gfs2_meta_inode_buffer(l_ip, &l_bh);
  485. if (error)
  486. goto out_m_bh;
  487. spin_lock(&sdp->sd_statfs_spin);
  488. gfs2_statfs_change_in(m_sc, m_bh->b_data +
  489. sizeof(struct gfs2_dinode));
  490. gfs2_statfs_change_in(l_sc, l_bh->b_data +
  491. sizeof(struct gfs2_dinode));
  492. spin_unlock(&sdp->sd_statfs_spin);
  493. brelse(l_bh);
  494. }
  495. out_m_bh:
  496. brelse(m_bh);
  497. out:
  498. gfs2_glock_dq_uninit(&gh);
  499. return 0;
  500. }
  501. void gfs2_statfs_change(struct gfs2_sbd *sdp, int64_t total, int64_t free,
  502. int64_t dinodes)
  503. {
  504. struct gfs2_inode *l_ip = get_v2ip(sdp->sd_sc_inode);
  505. struct gfs2_statfs_change *l_sc = &sdp->sd_statfs_local;
  506. struct buffer_head *l_bh;
  507. int error;
  508. error = gfs2_meta_inode_buffer(l_ip, &l_bh);
  509. if (error)
  510. return;
  511. mutex_lock(&sdp->sd_statfs_mutex);
  512. gfs2_trans_add_bh(l_ip->i_gl, l_bh, 1);
  513. mutex_unlock(&sdp->sd_statfs_mutex);
  514. spin_lock(&sdp->sd_statfs_spin);
  515. l_sc->sc_total += total;
  516. l_sc->sc_free += free;
  517. l_sc->sc_dinodes += dinodes;
  518. gfs2_statfs_change_out(l_sc, l_bh->b_data +
  519. sizeof(struct gfs2_dinode));
  520. spin_unlock(&sdp->sd_statfs_spin);
  521. brelse(l_bh);
  522. }
  523. int gfs2_statfs_sync(struct gfs2_sbd *sdp)
  524. {
  525. struct gfs2_inode *m_ip = get_v2ip(sdp->sd_statfs_inode);
  526. struct gfs2_inode *l_ip = get_v2ip(sdp->sd_sc_inode);
  527. struct gfs2_statfs_change *m_sc = &sdp->sd_statfs_master;
  528. struct gfs2_statfs_change *l_sc = &sdp->sd_statfs_local;
  529. struct gfs2_holder gh;
  530. struct buffer_head *m_bh, *l_bh;
  531. int error;
  532. error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE,
  533. &gh);
  534. if (error)
  535. return error;
  536. error = gfs2_meta_inode_buffer(m_ip, &m_bh);
  537. if (error)
  538. goto out;
  539. spin_lock(&sdp->sd_statfs_spin);
  540. gfs2_statfs_change_in(m_sc, m_bh->b_data +
  541. sizeof(struct gfs2_dinode));
  542. if (!l_sc->sc_total && !l_sc->sc_free && !l_sc->sc_dinodes) {
  543. spin_unlock(&sdp->sd_statfs_spin);
  544. goto out_bh;
  545. }
  546. spin_unlock(&sdp->sd_statfs_spin);
  547. error = gfs2_meta_inode_buffer(l_ip, &l_bh);
  548. if (error)
  549. goto out_bh;
  550. error = gfs2_trans_begin(sdp, 2 * RES_DINODE, 0);
  551. if (error)
  552. goto out_bh2;
  553. mutex_lock(&sdp->sd_statfs_mutex);
  554. gfs2_trans_add_bh(l_ip->i_gl, l_bh, 1);
  555. mutex_unlock(&sdp->sd_statfs_mutex);
  556. spin_lock(&sdp->sd_statfs_spin);
  557. m_sc->sc_total += l_sc->sc_total;
  558. m_sc->sc_free += l_sc->sc_free;
  559. m_sc->sc_dinodes += l_sc->sc_dinodes;
  560. memset(l_sc, 0, sizeof(struct gfs2_statfs_change));
  561. memset(l_bh->b_data + sizeof(struct gfs2_dinode),
  562. 0, sizeof(struct gfs2_statfs_change));
  563. spin_unlock(&sdp->sd_statfs_spin);
  564. gfs2_trans_add_bh(m_ip->i_gl, m_bh, 1);
  565. gfs2_statfs_change_out(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode));
  566. gfs2_trans_end(sdp);
  567. out_bh2:
  568. brelse(l_bh);
  569. out_bh:
  570. brelse(m_bh);
  571. out:
  572. gfs2_glock_dq_uninit(&gh);
  573. return error;
  574. }
  575. /**
  576. * gfs2_statfs_i - Do a statfs
  577. * @sdp: the filesystem
  578. * @sg: the sg structure
  579. *
  580. * Returns: errno
  581. */
  582. int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change *sc)
  583. {
  584. struct gfs2_statfs_change *m_sc = &sdp->sd_statfs_master;
  585. struct gfs2_statfs_change *l_sc = &sdp->sd_statfs_local;
  586. spin_lock(&sdp->sd_statfs_spin);
  587. *sc = *m_sc;
  588. sc->sc_total += l_sc->sc_total;
  589. sc->sc_free += l_sc->sc_free;
  590. sc->sc_dinodes += l_sc->sc_dinodes;
  591. spin_unlock(&sdp->sd_statfs_spin);
  592. if (sc->sc_free < 0)
  593. sc->sc_free = 0;
  594. if (sc->sc_free > sc->sc_total)
  595. sc->sc_free = sc->sc_total;
  596. if (sc->sc_dinodes < 0)
  597. sc->sc_dinodes = 0;
  598. return 0;
  599. }
  600. /**
  601. * statfs_fill - fill in the sg for a given RG
  602. * @rgd: the RG
  603. * @sc: the sc structure
  604. *
  605. * Returns: 0 on success, -ESTALE if the LVB is invalid
  606. */
  607. static int statfs_slow_fill(struct gfs2_rgrpd *rgd,
  608. struct gfs2_statfs_change *sc)
  609. {
  610. gfs2_rgrp_verify(rgd);
  611. sc->sc_total += rgd->rd_ri.ri_data;
  612. sc->sc_free += rgd->rd_rg.rg_free;
  613. sc->sc_dinodes += rgd->rd_rg.rg_dinodes;
  614. return 0;
  615. }
  616. /**
  617. * gfs2_statfs_slow - Stat a filesystem using asynchronous locking
  618. * @sdp: the filesystem
  619. * @sc: the sc info that will be returned
  620. *
  621. * Any error (other than a signal) will cause this routine to fall back
  622. * to the synchronous version.
  623. *
  624. * FIXME: This really shouldn't busy wait like this.
  625. *
  626. * Returns: errno
  627. */
  628. int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change *sc)
  629. {
  630. struct gfs2_holder ri_gh;
  631. struct gfs2_rgrpd *rgd_next;
  632. struct gfs2_holder *gha, *gh;
  633. unsigned int slots = 64;
  634. unsigned int x;
  635. int done;
  636. int error = 0, err;
  637. memset(sc, 0, sizeof(struct gfs2_statfs_change));
  638. gha = kcalloc(slots, sizeof(struct gfs2_holder), GFP_KERNEL);
  639. if (!gha)
  640. return -ENOMEM;
  641. error = gfs2_rindex_hold(sdp, &ri_gh);
  642. if (error)
  643. goto out;
  644. rgd_next = gfs2_rgrpd_get_first(sdp);
  645. for (;;) {
  646. done = 1;
  647. for (x = 0; x < slots; x++) {
  648. gh = gha + x;
  649. if (gh->gh_gl && gfs2_glock_poll(gh)) {
  650. err = gfs2_glock_wait(gh);
  651. if (err) {
  652. gfs2_holder_uninit(gh);
  653. error = err;
  654. } else {
  655. if (!error)
  656. error = statfs_slow_fill(get_gl2rgd(gh->gh_gl), sc);
  657. gfs2_glock_dq_uninit(gh);
  658. }
  659. }
  660. if (gh->gh_gl)
  661. done = 0;
  662. else if (rgd_next && !error) {
  663. error = gfs2_glock_nq_init(rgd_next->rd_gl,
  664. LM_ST_SHARED,
  665. GL_ASYNC,
  666. gh);
  667. rgd_next = gfs2_rgrpd_get_next(rgd_next);
  668. done = 0;
  669. }
  670. if (signal_pending(current))
  671. error = -ERESTARTSYS;
  672. }
  673. if (done)
  674. break;
  675. yield();
  676. }
  677. gfs2_glock_dq_uninit(&ri_gh);
  678. out:
  679. kfree(gha);
  680. return error;
  681. }
  682. struct lfcc {
  683. struct list_head list;
  684. struct gfs2_holder gh;
  685. };
  686. /**
  687. * gfs2_lock_fs_check_clean - Stop all writes to the FS and check that all
  688. * journals are clean
  689. * @sdp: the file system
  690. * @state: the state to put the transaction lock into
  691. * @t_gh: the hold on the transaction lock
  692. *
  693. * Returns: errno
  694. */
  695. int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp, struct gfs2_holder *t_gh)
  696. {
  697. struct gfs2_holder ji_gh;
  698. struct gfs2_jdesc *jd;
  699. struct lfcc *lfcc;
  700. LIST_HEAD(list);
  701. struct gfs2_log_header lh;
  702. int error;
  703. error = gfs2_jindex_hold(sdp, &ji_gh);
  704. if (error)
  705. return error;
  706. list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
  707. lfcc = kmalloc(sizeof(struct lfcc), GFP_KERNEL);
  708. if (!lfcc) {
  709. error = -ENOMEM;
  710. goto out;
  711. }
  712. error = gfs2_glock_nq_init(get_v2ip(jd->jd_inode)->i_gl,
  713. LM_ST_SHARED, 0,
  714. &lfcc->gh);
  715. if (error) {
  716. kfree(lfcc);
  717. goto out;
  718. }
  719. list_add(&lfcc->list, &list);
  720. }
  721. error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_DEFERRED,
  722. LM_FLAG_PRIORITY | GL_NEVER_RECURSE | GL_NOCACHE,
  723. t_gh);
  724. list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
  725. error = gfs2_jdesc_check(jd);
  726. if (error)
  727. break;
  728. error = gfs2_find_jhead(jd, &lh);
  729. if (error)
  730. break;
  731. if (!(lh.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
  732. error = -EBUSY;
  733. break;
  734. }
  735. }
  736. if (error)
  737. gfs2_glock_dq_uninit(t_gh);
  738. out:
  739. while (!list_empty(&list)) {
  740. lfcc = list_entry(list.next, struct lfcc, list);
  741. list_del(&lfcc->list);
  742. gfs2_glock_dq_uninit(&lfcc->gh);
  743. kfree(lfcc);
  744. }
  745. gfs2_glock_dq_uninit(&ji_gh);
  746. return error;
  747. }
  748. /**
  749. * gfs2_freeze_fs - freezes the file system
  750. * @sdp: the file system
  751. *
  752. * This function flushes data and meta data for all machines by
  753. * aquiring the transaction log exclusively. All journals are
  754. * ensured to be in a clean state as well.
  755. *
  756. * Returns: errno
  757. */
  758. int gfs2_freeze_fs(struct gfs2_sbd *sdp)
  759. {
  760. int error = 0;
  761. mutex_lock(&sdp->sd_freeze_lock);
  762. if (!sdp->sd_freeze_count++) {
  763. error = gfs2_lock_fs_check_clean(sdp, &sdp->sd_freeze_gh);
  764. if (error)
  765. sdp->sd_freeze_count--;
  766. }
  767. mutex_unlock(&sdp->sd_freeze_lock);
  768. return error;
  769. }
  770. /**
  771. * gfs2_unfreeze_fs - unfreezes the file system
  772. * @sdp: the file system
  773. *
  774. * This function allows the file system to proceed by unlocking
  775. * the exclusively held transaction lock. Other GFS2 nodes are
  776. * now free to acquire the lock shared and go on with their lives.
  777. *
  778. */
  779. void gfs2_unfreeze_fs(struct gfs2_sbd *sdp)
  780. {
  781. mutex_lock(&sdp->sd_freeze_lock);
  782. if (sdp->sd_freeze_count && !--sdp->sd_freeze_count)
  783. gfs2_glock_dq_uninit(&sdp->sd_freeze_gh);
  784. mutex_unlock(&sdp->sd_freeze_lock);
  785. }