super.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License version 2.
  8. */
  9. #include <linux/bio.h>
  10. #include <linux/sched.h>
  11. #include <linux/slab.h>
  12. #include <linux/spinlock.h>
  13. #include <linux/completion.h>
  14. #include <linux/buffer_head.h>
  15. #include <linux/statfs.h>
  16. #include <linux/seq_file.h>
  17. #include <linux/mount.h>
  18. #include <linux/kthread.h>
  19. #include <linux/delay.h>
  20. #include <linux/gfs2_ondisk.h>
  21. #include <linux/crc32.h>
  22. #include <linux/time.h>
  23. #include "gfs2.h"
  24. #include "incore.h"
  25. #include "bmap.h"
  26. #include "dir.h"
  27. #include "glock.h"
  28. #include "glops.h"
  29. #include "inode.h"
  30. #include "log.h"
  31. #include "meta_io.h"
  32. #include "quota.h"
  33. #include "recovery.h"
  34. #include "rgrp.h"
  35. #include "super.h"
  36. #include "trans.h"
  37. #include "util.h"
  38. #include "sys.h"
  39. #include "eattr.h"
  40. #define args_neq(a1, a2, x) ((a1)->ar_##x != (a2)->ar_##x)
  41. enum {
  42. Opt_lockproto,
  43. Opt_locktable,
  44. Opt_hostdata,
  45. Opt_spectator,
  46. Opt_ignore_local_fs,
  47. Opt_localflocks,
  48. Opt_localcaching,
  49. Opt_debug,
  50. Opt_nodebug,
  51. Opt_upgrade,
  52. Opt_acl,
  53. Opt_noacl,
  54. Opt_quota_off,
  55. Opt_quota_account,
  56. Opt_quota_on,
  57. Opt_quota,
  58. Opt_noquota,
  59. Opt_suiddir,
  60. Opt_nosuiddir,
  61. Opt_data_writeback,
  62. Opt_data_ordered,
  63. Opt_meta,
  64. Opt_discard,
  65. Opt_nodiscard,
  66. Opt_commit,
  67. Opt_error,
  68. };
  69. static const match_table_t tokens = {
  70. {Opt_lockproto, "lockproto=%s"},
  71. {Opt_locktable, "locktable=%s"},
  72. {Opt_hostdata, "hostdata=%s"},
  73. {Opt_spectator, "spectator"},
  74. {Opt_ignore_local_fs, "ignore_local_fs"},
  75. {Opt_localflocks, "localflocks"},
  76. {Opt_localcaching, "localcaching"},
  77. {Opt_debug, "debug"},
  78. {Opt_nodebug, "nodebug"},
  79. {Opt_upgrade, "upgrade"},
  80. {Opt_acl, "acl"},
  81. {Opt_noacl, "noacl"},
  82. {Opt_quota_off, "quota=off"},
  83. {Opt_quota_account, "quota=account"},
  84. {Opt_quota_on, "quota=on"},
  85. {Opt_quota, "quota"},
  86. {Opt_noquota, "noquota"},
  87. {Opt_suiddir, "suiddir"},
  88. {Opt_nosuiddir, "nosuiddir"},
  89. {Opt_data_writeback, "data=writeback"},
  90. {Opt_data_ordered, "data=ordered"},
  91. {Opt_meta, "meta"},
  92. {Opt_discard, "discard"},
  93. {Opt_nodiscard, "nodiscard"},
  94. {Opt_commit, "commit=%d"},
  95. {Opt_error, NULL}
  96. };
  97. /**
  98. * gfs2_mount_args - Parse mount options
  99. * @sdp:
  100. * @data:
  101. *
  102. * Return: errno
  103. */
  104. int gfs2_mount_args(struct gfs2_sbd *sdp, struct gfs2_args *args, char *options)
  105. {
  106. char *o;
  107. int token;
  108. substring_t tmp[MAX_OPT_ARGS];
  109. int rv;
  110. /* Split the options into tokens with the "," character and
  111. process them */
  112. while (1) {
  113. o = strsep(&options, ",");
  114. if (o == NULL)
  115. break;
  116. if (*o == '\0')
  117. continue;
  118. token = match_token(o, tokens, tmp);
  119. switch (token) {
  120. case Opt_lockproto:
  121. match_strlcpy(args->ar_lockproto, &tmp[0],
  122. GFS2_LOCKNAME_LEN);
  123. break;
  124. case Opt_locktable:
  125. match_strlcpy(args->ar_locktable, &tmp[0],
  126. GFS2_LOCKNAME_LEN);
  127. break;
  128. case Opt_hostdata:
  129. match_strlcpy(args->ar_hostdata, &tmp[0],
  130. GFS2_LOCKNAME_LEN);
  131. break;
  132. case Opt_spectator:
  133. args->ar_spectator = 1;
  134. break;
  135. case Opt_ignore_local_fs:
  136. args->ar_ignore_local_fs = 1;
  137. break;
  138. case Opt_localflocks:
  139. args->ar_localflocks = 1;
  140. break;
  141. case Opt_localcaching:
  142. args->ar_localcaching = 1;
  143. break;
  144. case Opt_debug:
  145. args->ar_debug = 1;
  146. break;
  147. case Opt_nodebug:
  148. args->ar_debug = 0;
  149. break;
  150. case Opt_upgrade:
  151. args->ar_upgrade = 1;
  152. break;
  153. case Opt_acl:
  154. args->ar_posix_acl = 1;
  155. break;
  156. case Opt_noacl:
  157. args->ar_posix_acl = 0;
  158. break;
  159. case Opt_quota_off:
  160. case Opt_noquota:
  161. args->ar_quota = GFS2_QUOTA_OFF;
  162. break;
  163. case Opt_quota_account:
  164. args->ar_quota = GFS2_QUOTA_ACCOUNT;
  165. break;
  166. case Opt_quota_on:
  167. case Opt_quota:
  168. args->ar_quota = GFS2_QUOTA_ON;
  169. break;
  170. case Opt_suiddir:
  171. args->ar_suiddir = 1;
  172. break;
  173. case Opt_nosuiddir:
  174. args->ar_suiddir = 0;
  175. break;
  176. case Opt_data_writeback:
  177. args->ar_data = GFS2_DATA_WRITEBACK;
  178. break;
  179. case Opt_data_ordered:
  180. args->ar_data = GFS2_DATA_ORDERED;
  181. break;
  182. case Opt_meta:
  183. args->ar_meta = 1;
  184. break;
  185. case Opt_discard:
  186. args->ar_discard = 1;
  187. break;
  188. case Opt_nodiscard:
  189. args->ar_discard = 0;
  190. break;
  191. case Opt_commit:
  192. rv = match_int(&tmp[0], &args->ar_commit);
  193. if (rv || args->ar_commit <= 0) {
  194. fs_info(sdp, "commit mount option requires a positive numeric argument\n");
  195. return rv ? rv : -EINVAL;
  196. }
  197. break;
  198. case Opt_error:
  199. default:
  200. fs_info(sdp, "invalid mount option: %s\n", o);
  201. return -EINVAL;
  202. }
  203. }
  204. return 0;
  205. }
  206. /**
  207. * gfs2_jindex_free - Clear all the journal index information
  208. * @sdp: The GFS2 superblock
  209. *
  210. */
  211. void gfs2_jindex_free(struct gfs2_sbd *sdp)
  212. {
  213. struct list_head list, *head;
  214. struct gfs2_jdesc *jd;
  215. struct gfs2_journal_extent *jext;
  216. spin_lock(&sdp->sd_jindex_spin);
  217. list_add(&list, &sdp->sd_jindex_list);
  218. list_del_init(&sdp->sd_jindex_list);
  219. sdp->sd_journals = 0;
  220. spin_unlock(&sdp->sd_jindex_spin);
  221. while (!list_empty(&list)) {
  222. jd = list_entry(list.next, struct gfs2_jdesc, jd_list);
  223. head = &jd->extent_list;
  224. while (!list_empty(head)) {
  225. jext = list_entry(head->next,
  226. struct gfs2_journal_extent,
  227. extent_list);
  228. list_del(&jext->extent_list);
  229. kfree(jext);
  230. }
  231. list_del(&jd->jd_list);
  232. iput(jd->jd_inode);
  233. kfree(jd);
  234. }
  235. }
  236. static struct gfs2_jdesc *jdesc_find_i(struct list_head *head, unsigned int jid)
  237. {
  238. struct gfs2_jdesc *jd;
  239. int found = 0;
  240. list_for_each_entry(jd, head, jd_list) {
  241. if (jd->jd_jid == jid) {
  242. found = 1;
  243. break;
  244. }
  245. }
  246. if (!found)
  247. jd = NULL;
  248. return jd;
  249. }
  250. struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid)
  251. {
  252. struct gfs2_jdesc *jd;
  253. spin_lock(&sdp->sd_jindex_spin);
  254. jd = jdesc_find_i(&sdp->sd_jindex_list, jid);
  255. spin_unlock(&sdp->sd_jindex_spin);
  256. return jd;
  257. }
  258. int gfs2_jdesc_check(struct gfs2_jdesc *jd)
  259. {
  260. struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
  261. struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
  262. int ar;
  263. int error;
  264. if (ip->i_disksize < (8 << 20) || ip->i_disksize > (1 << 30) ||
  265. (ip->i_disksize & (sdp->sd_sb.sb_bsize - 1))) {
  266. gfs2_consist_inode(ip);
  267. return -EIO;
  268. }
  269. jd->jd_blocks = ip->i_disksize >> sdp->sd_sb.sb_bsize_shift;
  270. error = gfs2_write_alloc_required(ip, 0, ip->i_disksize, &ar);
  271. if (!error && ar) {
  272. gfs2_consist_inode(ip);
  273. error = -EIO;
  274. }
  275. return error;
  276. }
  277. /**
  278. * gfs2_make_fs_rw - Turn a Read-Only FS into a Read-Write one
  279. * @sdp: the filesystem
  280. *
  281. * Returns: errno
  282. */
  283. int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
  284. {
  285. struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
  286. struct gfs2_glock *j_gl = ip->i_gl;
  287. struct gfs2_holder t_gh;
  288. struct gfs2_log_header_host head;
  289. int error;
  290. error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED, 0, &t_gh);
  291. if (error)
  292. return error;
  293. j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
  294. error = gfs2_find_jhead(sdp->sd_jdesc, &head);
  295. if (error)
  296. goto fail;
  297. if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
  298. gfs2_consist(sdp);
  299. error = -EIO;
  300. goto fail;
  301. }
  302. /* Initialize some head of the log stuff */
  303. sdp->sd_log_sequence = head.lh_sequence + 1;
  304. gfs2_log_pointers_init(sdp, head.lh_blkno);
  305. error = gfs2_quota_init(sdp);
  306. if (error)
  307. goto fail;
  308. set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
  309. gfs2_glock_dq_uninit(&t_gh);
  310. return 0;
  311. fail:
  312. t_gh.gh_flags |= GL_NOCACHE;
  313. gfs2_glock_dq_uninit(&t_gh);
  314. return error;
  315. }
  316. void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc, const void *buf)
  317. {
  318. const struct gfs2_statfs_change *str = buf;
  319. sc->sc_total = be64_to_cpu(str->sc_total);
  320. sc->sc_free = be64_to_cpu(str->sc_free);
  321. sc->sc_dinodes = be64_to_cpu(str->sc_dinodes);
  322. }
  323. static void gfs2_statfs_change_out(const struct gfs2_statfs_change_host *sc, void *buf)
  324. {
  325. struct gfs2_statfs_change *str = buf;
  326. str->sc_total = cpu_to_be64(sc->sc_total);
  327. str->sc_free = cpu_to_be64(sc->sc_free);
  328. str->sc_dinodes = cpu_to_be64(sc->sc_dinodes);
  329. }
  330. int gfs2_statfs_init(struct gfs2_sbd *sdp)
  331. {
  332. struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
  333. struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
  334. struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
  335. struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
  336. struct buffer_head *m_bh, *l_bh;
  337. struct gfs2_holder gh;
  338. int error;
  339. error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE,
  340. &gh);
  341. if (error)
  342. return error;
  343. error = gfs2_meta_inode_buffer(m_ip, &m_bh);
  344. if (error)
  345. goto out;
  346. if (sdp->sd_args.ar_spectator) {
  347. spin_lock(&sdp->sd_statfs_spin);
  348. gfs2_statfs_change_in(m_sc, m_bh->b_data +
  349. sizeof(struct gfs2_dinode));
  350. spin_unlock(&sdp->sd_statfs_spin);
  351. } else {
  352. error = gfs2_meta_inode_buffer(l_ip, &l_bh);
  353. if (error)
  354. goto out_m_bh;
  355. spin_lock(&sdp->sd_statfs_spin);
  356. gfs2_statfs_change_in(m_sc, m_bh->b_data +
  357. sizeof(struct gfs2_dinode));
  358. gfs2_statfs_change_in(l_sc, l_bh->b_data +
  359. sizeof(struct gfs2_dinode));
  360. spin_unlock(&sdp->sd_statfs_spin);
  361. brelse(l_bh);
  362. }
  363. out_m_bh:
  364. brelse(m_bh);
  365. out:
  366. gfs2_glock_dq_uninit(&gh);
  367. return 0;
  368. }
  369. void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
  370. s64 dinodes)
  371. {
  372. struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
  373. struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
  374. struct buffer_head *l_bh;
  375. int error;
  376. error = gfs2_meta_inode_buffer(l_ip, &l_bh);
  377. if (error)
  378. return;
  379. gfs2_trans_add_bh(l_ip->i_gl, l_bh, 1);
  380. spin_lock(&sdp->sd_statfs_spin);
  381. l_sc->sc_total += total;
  382. l_sc->sc_free += free;
  383. l_sc->sc_dinodes += dinodes;
  384. gfs2_statfs_change_out(l_sc, l_bh->b_data + sizeof(struct gfs2_dinode));
  385. spin_unlock(&sdp->sd_statfs_spin);
  386. brelse(l_bh);
  387. }
  388. void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh,
  389. struct buffer_head *l_bh)
  390. {
  391. struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
  392. struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
  393. struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
  394. struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
  395. gfs2_trans_add_bh(l_ip->i_gl, l_bh, 1);
  396. spin_lock(&sdp->sd_statfs_spin);
  397. m_sc->sc_total += l_sc->sc_total;
  398. m_sc->sc_free += l_sc->sc_free;
  399. m_sc->sc_dinodes += l_sc->sc_dinodes;
  400. memset(l_sc, 0, sizeof(struct gfs2_statfs_change));
  401. memset(l_bh->b_data + sizeof(struct gfs2_dinode),
  402. 0, sizeof(struct gfs2_statfs_change));
  403. spin_unlock(&sdp->sd_statfs_spin);
  404. gfs2_trans_add_bh(m_ip->i_gl, m_bh, 1);
  405. gfs2_statfs_change_out(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode));
  406. }
  407. int gfs2_statfs_sync(struct gfs2_sbd *sdp)
  408. {
  409. struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
  410. struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
  411. struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
  412. struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
  413. struct gfs2_holder gh;
  414. struct buffer_head *m_bh, *l_bh;
  415. int error;
  416. error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE,
  417. &gh);
  418. if (error)
  419. return error;
  420. error = gfs2_meta_inode_buffer(m_ip, &m_bh);
  421. if (error)
  422. goto out;
  423. spin_lock(&sdp->sd_statfs_spin);
  424. gfs2_statfs_change_in(m_sc, m_bh->b_data +
  425. sizeof(struct gfs2_dinode));
  426. if (!l_sc->sc_total && !l_sc->sc_free && !l_sc->sc_dinodes) {
  427. spin_unlock(&sdp->sd_statfs_spin);
  428. goto out_bh;
  429. }
  430. spin_unlock(&sdp->sd_statfs_spin);
  431. error = gfs2_meta_inode_buffer(l_ip, &l_bh);
  432. if (error)
  433. goto out_bh;
  434. error = gfs2_trans_begin(sdp, 2 * RES_DINODE, 0);
  435. if (error)
  436. goto out_bh2;
  437. update_statfs(sdp, m_bh, l_bh);
  438. gfs2_trans_end(sdp);
  439. out_bh2:
  440. brelse(l_bh);
  441. out_bh:
  442. brelse(m_bh);
  443. out:
  444. gfs2_glock_dq_uninit(&gh);
  445. return error;
  446. }
  447. struct lfcc {
  448. struct list_head list;
  449. struct gfs2_holder gh;
  450. };
  451. /**
  452. * gfs2_lock_fs_check_clean - Stop all writes to the FS and check that all
  453. * journals are clean
  454. * @sdp: the file system
  455. * @state: the state to put the transaction lock into
  456. * @t_gh: the hold on the transaction lock
  457. *
  458. * Returns: errno
  459. */
  460. static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp,
  461. struct gfs2_holder *t_gh)
  462. {
  463. struct gfs2_inode *ip;
  464. struct gfs2_jdesc *jd;
  465. struct lfcc *lfcc;
  466. LIST_HEAD(list);
  467. struct gfs2_log_header_host lh;
  468. int error;
  469. list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
  470. lfcc = kmalloc(sizeof(struct lfcc), GFP_KERNEL);
  471. if (!lfcc) {
  472. error = -ENOMEM;
  473. goto out;
  474. }
  475. ip = GFS2_I(jd->jd_inode);
  476. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &lfcc->gh);
  477. if (error) {
  478. kfree(lfcc);
  479. goto out;
  480. }
  481. list_add(&lfcc->list, &list);
  482. }
  483. error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_DEFERRED,
  484. GL_NOCACHE, t_gh);
  485. list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
  486. error = gfs2_jdesc_check(jd);
  487. if (error)
  488. break;
  489. error = gfs2_find_jhead(jd, &lh);
  490. if (error)
  491. break;
  492. if (!(lh.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
  493. error = -EBUSY;
  494. break;
  495. }
  496. }
  497. if (error)
  498. gfs2_glock_dq_uninit(t_gh);
  499. out:
  500. while (!list_empty(&list)) {
  501. lfcc = list_entry(list.next, struct lfcc, list);
  502. list_del(&lfcc->list);
  503. gfs2_glock_dq_uninit(&lfcc->gh);
  504. kfree(lfcc);
  505. }
  506. return error;
  507. }
  508. /**
  509. * gfs2_freeze_fs - freezes the file system
  510. * @sdp: the file system
  511. *
  512. * This function flushes data and meta data for all machines by
  513. * aquiring the transaction log exclusively. All journals are
  514. * ensured to be in a clean state as well.
  515. *
  516. * Returns: errno
  517. */
  518. int gfs2_freeze_fs(struct gfs2_sbd *sdp)
  519. {
  520. int error = 0;
  521. mutex_lock(&sdp->sd_freeze_lock);
  522. if (!sdp->sd_freeze_count++) {
  523. error = gfs2_lock_fs_check_clean(sdp, &sdp->sd_freeze_gh);
  524. if (error)
  525. sdp->sd_freeze_count--;
  526. }
  527. mutex_unlock(&sdp->sd_freeze_lock);
  528. return error;
  529. }
  530. /**
  531. * gfs2_unfreeze_fs - unfreezes the file system
  532. * @sdp: the file system
  533. *
  534. * This function allows the file system to proceed by unlocking
  535. * the exclusively held transaction lock. Other GFS2 nodes are
  536. * now free to acquire the lock shared and go on with their lives.
  537. *
  538. */
  539. void gfs2_unfreeze_fs(struct gfs2_sbd *sdp)
  540. {
  541. mutex_lock(&sdp->sd_freeze_lock);
  542. if (sdp->sd_freeze_count && !--sdp->sd_freeze_count)
  543. gfs2_glock_dq_uninit(&sdp->sd_freeze_gh);
  544. mutex_unlock(&sdp->sd_freeze_lock);
  545. }
  546. /**
  547. * gfs2_write_inode - Make sure the inode is stable on the disk
  548. * @inode: The inode
  549. * @sync: synchronous write flag
  550. *
  551. * Returns: errno
  552. */
  553. static int gfs2_write_inode(struct inode *inode, int sync)
  554. {
  555. struct gfs2_inode *ip = GFS2_I(inode);
  556. struct gfs2_sbd *sdp = GFS2_SB(inode);
  557. struct gfs2_holder gh;
  558. struct buffer_head *bh;
  559. struct timespec atime;
  560. struct gfs2_dinode *di;
  561. int ret = 0;
  562. /* Check this is a "normal" inode, etc */
  563. if (!test_bit(GIF_USER, &ip->i_flags) ||
  564. (current->flags & PF_MEMALLOC))
  565. return 0;
  566. ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
  567. if (ret)
  568. goto do_flush;
  569. ret = gfs2_trans_begin(sdp, RES_DINODE, 0);
  570. if (ret)
  571. goto do_unlock;
  572. ret = gfs2_meta_inode_buffer(ip, &bh);
  573. if (ret == 0) {
  574. di = (struct gfs2_dinode *)bh->b_data;
  575. atime.tv_sec = be64_to_cpu(di->di_atime);
  576. atime.tv_nsec = be32_to_cpu(di->di_atime_nsec);
  577. if (timespec_compare(&inode->i_atime, &atime) > 0) {
  578. gfs2_trans_add_bh(ip->i_gl, bh, 1);
  579. gfs2_dinode_out(ip, bh->b_data);
  580. }
  581. brelse(bh);
  582. }
  583. gfs2_trans_end(sdp);
  584. do_unlock:
  585. gfs2_glock_dq_uninit(&gh);
  586. do_flush:
  587. if (sync != 0)
  588. gfs2_log_flush(GFS2_SB(inode), ip->i_gl);
  589. return ret;
  590. }
  591. /**
  592. * gfs2_make_fs_ro - Turn a Read-Write FS into a Read-Only one
  593. * @sdp: the filesystem
  594. *
  595. * Returns: errno
  596. */
  597. static int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
  598. {
  599. struct gfs2_holder t_gh;
  600. int error;
  601. gfs2_quota_sync(sdp);
  602. gfs2_statfs_sync(sdp);
  603. error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED, GL_NOCACHE,
  604. &t_gh);
  605. if (error && !test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
  606. return error;
  607. gfs2_meta_syncfs(sdp);
  608. gfs2_log_shutdown(sdp);
  609. clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
  610. if (t_gh.gh_gl)
  611. gfs2_glock_dq_uninit(&t_gh);
  612. gfs2_quota_cleanup(sdp);
  613. return error;
  614. }
  615. static int gfs2_umount_recovery_wait(void *word)
  616. {
  617. schedule();
  618. return 0;
  619. }
  620. /**
  621. * gfs2_put_super - Unmount the filesystem
  622. * @sb: The VFS superblock
  623. *
  624. */
  625. static void gfs2_put_super(struct super_block *sb)
  626. {
  627. struct gfs2_sbd *sdp = sb->s_fs_info;
  628. int error;
  629. struct gfs2_jdesc *jd;
  630. /* Unfreeze the filesystem, if we need to */
  631. mutex_lock(&sdp->sd_freeze_lock);
  632. if (sdp->sd_freeze_count)
  633. gfs2_glock_dq_uninit(&sdp->sd_freeze_gh);
  634. mutex_unlock(&sdp->sd_freeze_lock);
  635. /* No more recovery requests */
  636. set_bit(SDF_NORECOVERY, &sdp->sd_flags);
  637. smp_mb();
  638. /* Wait on outstanding recovery */
  639. restart:
  640. spin_lock(&sdp->sd_jindex_spin);
  641. list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
  642. if (!test_bit(JDF_RECOVERY, &jd->jd_flags))
  643. continue;
  644. spin_unlock(&sdp->sd_jindex_spin);
  645. wait_on_bit(&jd->jd_flags, JDF_RECOVERY,
  646. gfs2_umount_recovery_wait, TASK_UNINTERRUPTIBLE);
  647. goto restart;
  648. }
  649. spin_unlock(&sdp->sd_jindex_spin);
  650. kthread_stop(sdp->sd_quotad_process);
  651. kthread_stop(sdp->sd_logd_process);
  652. if (!(sb->s_flags & MS_RDONLY)) {
  653. error = gfs2_make_fs_ro(sdp);
  654. if (error)
  655. gfs2_io_error(sdp);
  656. }
  657. /* At this point, we're through modifying the disk */
  658. /* Release stuff */
  659. iput(sdp->sd_jindex);
  660. iput(sdp->sd_inum_inode);
  661. iput(sdp->sd_statfs_inode);
  662. iput(sdp->sd_rindex);
  663. iput(sdp->sd_quota_inode);
  664. gfs2_glock_put(sdp->sd_rename_gl);
  665. gfs2_glock_put(sdp->sd_trans_gl);
  666. if (!sdp->sd_args.ar_spectator) {
  667. gfs2_glock_dq_uninit(&sdp->sd_journal_gh);
  668. gfs2_glock_dq_uninit(&sdp->sd_jinode_gh);
  669. gfs2_glock_dq_uninit(&sdp->sd_ir_gh);
  670. gfs2_glock_dq_uninit(&sdp->sd_sc_gh);
  671. gfs2_glock_dq_uninit(&sdp->sd_qc_gh);
  672. iput(sdp->sd_ir_inode);
  673. iput(sdp->sd_sc_inode);
  674. iput(sdp->sd_qc_inode);
  675. }
  676. gfs2_glock_dq_uninit(&sdp->sd_live_gh);
  677. gfs2_clear_rgrpd(sdp);
  678. gfs2_jindex_free(sdp);
  679. /* Take apart glock structures and buffer lists */
  680. gfs2_gl_hash_clear(sdp);
  681. /* Unmount the locking protocol */
  682. gfs2_lm_unmount(sdp);
  683. /* At this point, we're through participating in the lockspace */
  684. gfs2_sys_fs_del(sdp);
  685. }
  686. /**
  687. * gfs2_sync_fs - sync the filesystem
  688. * @sb: the superblock
  689. *
  690. * Flushes the log to disk.
  691. */
  692. static int gfs2_sync_fs(struct super_block *sb, int wait)
  693. {
  694. if (wait && sb->s_fs_info)
  695. gfs2_log_flush(sb->s_fs_info, NULL);
  696. return 0;
  697. }
  698. /**
  699. * gfs2_freeze - prevent further writes to the filesystem
  700. * @sb: the VFS structure for the filesystem
  701. *
  702. */
  703. static int gfs2_freeze(struct super_block *sb)
  704. {
  705. struct gfs2_sbd *sdp = sb->s_fs_info;
  706. int error;
  707. if (test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
  708. return -EINVAL;
  709. for (;;) {
  710. error = gfs2_freeze_fs(sdp);
  711. if (!error)
  712. break;
  713. switch (error) {
  714. case -EBUSY:
  715. fs_err(sdp, "waiting for recovery before freeze\n");
  716. break;
  717. default:
  718. fs_err(sdp, "error freezing FS: %d\n", error);
  719. break;
  720. }
  721. fs_err(sdp, "retrying...\n");
  722. msleep(1000);
  723. }
  724. return 0;
  725. }
  726. /**
  727. * gfs2_unfreeze - reallow writes to the filesystem
  728. * @sb: the VFS structure for the filesystem
  729. *
  730. */
  731. static int gfs2_unfreeze(struct super_block *sb)
  732. {
  733. gfs2_unfreeze_fs(sb->s_fs_info);
  734. return 0;
  735. }
  736. /**
  737. * statfs_fill - fill in the sg for a given RG
  738. * @rgd: the RG
  739. * @sc: the sc structure
  740. *
  741. * Returns: 0 on success, -ESTALE if the LVB is invalid
  742. */
  743. static int statfs_slow_fill(struct gfs2_rgrpd *rgd,
  744. struct gfs2_statfs_change_host *sc)
  745. {
  746. gfs2_rgrp_verify(rgd);
  747. sc->sc_total += rgd->rd_data;
  748. sc->sc_free += rgd->rd_free;
  749. sc->sc_dinodes += rgd->rd_dinodes;
  750. return 0;
  751. }
  752. /**
  753. * gfs2_statfs_slow - Stat a filesystem using asynchronous locking
  754. * @sdp: the filesystem
  755. * @sc: the sc info that will be returned
  756. *
  757. * Any error (other than a signal) will cause this routine to fall back
  758. * to the synchronous version.
  759. *
  760. * FIXME: This really shouldn't busy wait like this.
  761. *
  762. * Returns: errno
  763. */
  764. static int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc)
  765. {
  766. struct gfs2_holder ri_gh;
  767. struct gfs2_rgrpd *rgd_next;
  768. struct gfs2_holder *gha, *gh;
  769. unsigned int slots = 64;
  770. unsigned int x;
  771. int done;
  772. int error = 0, err;
  773. memset(sc, 0, sizeof(struct gfs2_statfs_change_host));
  774. gha = kcalloc(slots, sizeof(struct gfs2_holder), GFP_KERNEL);
  775. if (!gha)
  776. return -ENOMEM;
  777. error = gfs2_rindex_hold(sdp, &ri_gh);
  778. if (error)
  779. goto out;
  780. rgd_next = gfs2_rgrpd_get_first(sdp);
  781. for (;;) {
  782. done = 1;
  783. for (x = 0; x < slots; x++) {
  784. gh = gha + x;
  785. if (gh->gh_gl && gfs2_glock_poll(gh)) {
  786. err = gfs2_glock_wait(gh);
  787. if (err) {
  788. gfs2_holder_uninit(gh);
  789. error = err;
  790. } else {
  791. if (!error)
  792. error = statfs_slow_fill(
  793. gh->gh_gl->gl_object, sc);
  794. gfs2_glock_dq_uninit(gh);
  795. }
  796. }
  797. if (gh->gh_gl)
  798. done = 0;
  799. else if (rgd_next && !error) {
  800. error = gfs2_glock_nq_init(rgd_next->rd_gl,
  801. LM_ST_SHARED,
  802. GL_ASYNC,
  803. gh);
  804. rgd_next = gfs2_rgrpd_get_next(rgd_next);
  805. done = 0;
  806. }
  807. if (signal_pending(current))
  808. error = -ERESTARTSYS;
  809. }
  810. if (done)
  811. break;
  812. yield();
  813. }
  814. gfs2_glock_dq_uninit(&ri_gh);
  815. out:
  816. kfree(gha);
  817. return error;
  818. }
  819. /**
  820. * gfs2_statfs_i - Do a statfs
  821. * @sdp: the filesystem
  822. * @sg: the sg structure
  823. *
  824. * Returns: errno
  825. */
  826. static int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc)
  827. {
  828. struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
  829. struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
  830. spin_lock(&sdp->sd_statfs_spin);
  831. *sc = *m_sc;
  832. sc->sc_total += l_sc->sc_total;
  833. sc->sc_free += l_sc->sc_free;
  834. sc->sc_dinodes += l_sc->sc_dinodes;
  835. spin_unlock(&sdp->sd_statfs_spin);
  836. if (sc->sc_free < 0)
  837. sc->sc_free = 0;
  838. if (sc->sc_free > sc->sc_total)
  839. sc->sc_free = sc->sc_total;
  840. if (sc->sc_dinodes < 0)
  841. sc->sc_dinodes = 0;
  842. return 0;
  843. }
  844. /**
  845. * gfs2_statfs - Gather and return stats about the filesystem
  846. * @sb: The superblock
  847. * @statfsbuf: The buffer
  848. *
  849. * Returns: 0 on success or error code
  850. */
  851. static int gfs2_statfs(struct dentry *dentry, struct kstatfs *buf)
  852. {
  853. struct super_block *sb = dentry->d_inode->i_sb;
  854. struct gfs2_sbd *sdp = sb->s_fs_info;
  855. struct gfs2_statfs_change_host sc;
  856. int error;
  857. if (gfs2_tune_get(sdp, gt_statfs_slow))
  858. error = gfs2_statfs_slow(sdp, &sc);
  859. else
  860. error = gfs2_statfs_i(sdp, &sc);
  861. if (error)
  862. return error;
  863. buf->f_type = GFS2_MAGIC;
  864. buf->f_bsize = sdp->sd_sb.sb_bsize;
  865. buf->f_blocks = sc.sc_total;
  866. buf->f_bfree = sc.sc_free;
  867. buf->f_bavail = sc.sc_free;
  868. buf->f_files = sc.sc_dinodes + sc.sc_free;
  869. buf->f_ffree = sc.sc_free;
  870. buf->f_namelen = GFS2_FNAMESIZE;
  871. return 0;
  872. }
  873. /**
  874. * gfs2_remount_fs - called when the FS is remounted
  875. * @sb: the filesystem
  876. * @flags: the remount flags
  877. * @data: extra data passed in (not used right now)
  878. *
  879. * Returns: errno
  880. */
  881. static int gfs2_remount_fs(struct super_block *sb, int *flags, char *data)
  882. {
  883. struct gfs2_sbd *sdp = sb->s_fs_info;
  884. struct gfs2_args args = sdp->sd_args; /* Default to current settings */
  885. struct gfs2_tune *gt = &sdp->sd_tune;
  886. int error;
  887. spin_lock(&gt->gt_spin);
  888. args.ar_commit = gt->gt_log_flush_secs;
  889. spin_unlock(&gt->gt_spin);
  890. error = gfs2_mount_args(sdp, &args, data);
  891. if (error)
  892. return error;
  893. /* Not allowed to change locking details */
  894. if (strcmp(args.ar_lockproto, sdp->sd_args.ar_lockproto) ||
  895. strcmp(args.ar_locktable, sdp->sd_args.ar_locktable) ||
  896. strcmp(args.ar_hostdata, sdp->sd_args.ar_hostdata))
  897. return -EINVAL;
  898. /* Some flags must not be changed */
  899. if (args_neq(&args, &sdp->sd_args, spectator) ||
  900. args_neq(&args, &sdp->sd_args, ignore_local_fs) ||
  901. args_neq(&args, &sdp->sd_args, localflocks) ||
  902. args_neq(&args, &sdp->sd_args, localcaching) ||
  903. args_neq(&args, &sdp->sd_args, meta))
  904. return -EINVAL;
  905. if (sdp->sd_args.ar_spectator)
  906. *flags |= MS_RDONLY;
  907. if ((sb->s_flags ^ *flags) & MS_RDONLY) {
  908. if (*flags & MS_RDONLY)
  909. error = gfs2_make_fs_ro(sdp);
  910. else
  911. error = gfs2_make_fs_rw(sdp);
  912. if (error)
  913. return error;
  914. }
  915. sdp->sd_args = args;
  916. if (sdp->sd_args.ar_posix_acl)
  917. sb->s_flags |= MS_POSIXACL;
  918. else
  919. sb->s_flags &= ~MS_POSIXACL;
  920. spin_lock(&gt->gt_spin);
  921. gt->gt_log_flush_secs = args.ar_commit;
  922. spin_unlock(&gt->gt_spin);
  923. return 0;
  924. }
  925. /**
  926. * gfs2_drop_inode - Drop an inode (test for remote unlink)
  927. * @inode: The inode to drop
  928. *
  929. * If we've received a callback on an iopen lock then its because a
  930. * remote node tried to deallocate the inode but failed due to this node
  931. * still having the inode open. Here we mark the link count zero
  932. * since we know that it must have reached zero if the GLF_DEMOTE flag
  933. * is set on the iopen glock. If we didn't do a disk read since the
  934. * remote node removed the final link then we might otherwise miss
  935. * this event. This check ensures that this node will deallocate the
  936. * inode's blocks, or alternatively pass the baton on to another
  937. * node for later deallocation.
  938. */
  939. static void gfs2_drop_inode(struct inode *inode)
  940. {
  941. struct gfs2_inode *ip = GFS2_I(inode);
  942. if (test_bit(GIF_USER, &ip->i_flags) && inode->i_nlink) {
  943. struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
  944. if (gl && test_bit(GLF_DEMOTE, &gl->gl_flags))
  945. clear_nlink(inode);
  946. }
  947. generic_drop_inode(inode);
  948. }
  949. /**
  950. * gfs2_clear_inode - Deallocate an inode when VFS is done with it
  951. * @inode: The VFS inode
  952. *
  953. */
  954. static void gfs2_clear_inode(struct inode *inode)
  955. {
  956. struct gfs2_inode *ip = GFS2_I(inode);
  957. /* This tells us its a "real" inode and not one which only
  958. * serves to contain an address space (see rgrp.c, meta_io.c)
  959. * which therefore doesn't have its own glocks.
  960. */
  961. if (test_bit(GIF_USER, &ip->i_flags)) {
  962. ip->i_gl->gl_object = NULL;
  963. gfs2_glock_put(ip->i_gl);
  964. ip->i_gl = NULL;
  965. if (ip->i_iopen_gh.gh_gl) {
  966. ip->i_iopen_gh.gh_gl->gl_object = NULL;
  967. gfs2_glock_dq_uninit(&ip->i_iopen_gh);
  968. }
  969. }
  970. }
  971. static int is_ancestor(const struct dentry *d1, const struct dentry *d2)
  972. {
  973. do {
  974. if (d1 == d2)
  975. return 1;
  976. d1 = d1->d_parent;
  977. } while (!IS_ROOT(d1));
  978. return 0;
  979. }
  980. /**
  981. * gfs2_show_options - Show mount options for /proc/mounts
  982. * @s: seq_file structure
  983. * @mnt: vfsmount
  984. *
  985. * Returns: 0 on success or error code
  986. */
  987. static int gfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
  988. {
  989. struct gfs2_sbd *sdp = mnt->mnt_sb->s_fs_info;
  990. struct gfs2_args *args = &sdp->sd_args;
  991. int lfsecs;
  992. if (is_ancestor(mnt->mnt_root, sdp->sd_master_dir))
  993. seq_printf(s, ",meta");
  994. if (args->ar_lockproto[0])
  995. seq_printf(s, ",lockproto=%s", args->ar_lockproto);
  996. if (args->ar_locktable[0])
  997. seq_printf(s, ",locktable=%s", args->ar_locktable);
  998. if (args->ar_hostdata[0])
  999. seq_printf(s, ",hostdata=%s", args->ar_hostdata);
  1000. if (args->ar_spectator)
  1001. seq_printf(s, ",spectator");
  1002. if (args->ar_ignore_local_fs)
  1003. seq_printf(s, ",ignore_local_fs");
  1004. if (args->ar_localflocks)
  1005. seq_printf(s, ",localflocks");
  1006. if (args->ar_localcaching)
  1007. seq_printf(s, ",localcaching");
  1008. if (args->ar_debug)
  1009. seq_printf(s, ",debug");
  1010. if (args->ar_upgrade)
  1011. seq_printf(s, ",upgrade");
  1012. if (args->ar_posix_acl)
  1013. seq_printf(s, ",acl");
  1014. if (args->ar_quota != GFS2_QUOTA_DEFAULT) {
  1015. char *state;
  1016. switch (args->ar_quota) {
  1017. case GFS2_QUOTA_OFF:
  1018. state = "off";
  1019. break;
  1020. case GFS2_QUOTA_ACCOUNT:
  1021. state = "account";
  1022. break;
  1023. case GFS2_QUOTA_ON:
  1024. state = "on";
  1025. break;
  1026. default:
  1027. state = "unknown";
  1028. break;
  1029. }
  1030. seq_printf(s, ",quota=%s", state);
  1031. }
  1032. if (args->ar_suiddir)
  1033. seq_printf(s, ",suiddir");
  1034. if (args->ar_data != GFS2_DATA_DEFAULT) {
  1035. char *state;
  1036. switch (args->ar_data) {
  1037. case GFS2_DATA_WRITEBACK:
  1038. state = "writeback";
  1039. break;
  1040. case GFS2_DATA_ORDERED:
  1041. state = "ordered";
  1042. break;
  1043. default:
  1044. state = "unknown";
  1045. break;
  1046. }
  1047. seq_printf(s, ",data=%s", state);
  1048. }
  1049. if (args->ar_discard)
  1050. seq_printf(s, ",discard");
  1051. lfsecs = sdp->sd_tune.gt_log_flush_secs;
  1052. if (lfsecs != 60)
  1053. seq_printf(s, ",commit=%d", lfsecs);
  1054. return 0;
  1055. }
  1056. /*
  1057. * We have to (at the moment) hold the inodes main lock to cover
  1058. * the gap between unlocking the shared lock on the iopen lock and
  1059. * taking the exclusive lock. I'd rather do a shared -> exclusive
  1060. * conversion on the iopen lock, but we can change that later. This
  1061. * is safe, just less efficient.
  1062. */
  1063. static void gfs2_delete_inode(struct inode *inode)
  1064. {
  1065. struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
  1066. struct gfs2_inode *ip = GFS2_I(inode);
  1067. struct gfs2_holder gh;
  1068. int error;
  1069. if (!test_bit(GIF_USER, &ip->i_flags))
  1070. goto out;
  1071. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
  1072. if (unlikely(error)) {
  1073. gfs2_glock_dq_uninit(&ip->i_iopen_gh);
  1074. goto out;
  1075. }
  1076. gfs2_glock_dq_wait(&ip->i_iopen_gh);
  1077. gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE, &ip->i_iopen_gh);
  1078. error = gfs2_glock_nq(&ip->i_iopen_gh);
  1079. if (error)
  1080. goto out_truncate;
  1081. if (S_ISDIR(inode->i_mode) &&
  1082. (ip->i_diskflags & GFS2_DIF_EXHASH)) {
  1083. error = gfs2_dir_exhash_dealloc(ip);
  1084. if (error)
  1085. goto out_unlock;
  1086. }
  1087. if (ip->i_eattr) {
  1088. error = gfs2_ea_dealloc(ip);
  1089. if (error)
  1090. goto out_unlock;
  1091. }
  1092. if (!gfs2_is_stuffed(ip)) {
  1093. error = gfs2_file_dealloc(ip);
  1094. if (error)
  1095. goto out_unlock;
  1096. }
  1097. error = gfs2_dinode_dealloc(ip);
  1098. if (error)
  1099. goto out_unlock;
  1100. out_truncate:
  1101. error = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks);
  1102. if (error)
  1103. goto out_unlock;
  1104. /* Needs to be done before glock release & also in a transaction */
  1105. truncate_inode_pages(&inode->i_data, 0);
  1106. gfs2_trans_end(sdp);
  1107. out_unlock:
  1108. if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags))
  1109. gfs2_glock_dq(&ip->i_iopen_gh);
  1110. gfs2_holder_uninit(&ip->i_iopen_gh);
  1111. gfs2_glock_dq_uninit(&gh);
  1112. if (error && error != GLR_TRYFAILED && error != -EROFS)
  1113. fs_warn(sdp, "gfs2_delete_inode: %d\n", error);
  1114. out:
  1115. truncate_inode_pages(&inode->i_data, 0);
  1116. clear_inode(inode);
  1117. }
  1118. static struct inode *gfs2_alloc_inode(struct super_block *sb)
  1119. {
  1120. struct gfs2_inode *ip;
  1121. ip = kmem_cache_alloc(gfs2_inode_cachep, GFP_KERNEL);
  1122. if (ip) {
  1123. ip->i_flags = 0;
  1124. ip->i_gl = NULL;
  1125. }
  1126. return &ip->i_inode;
  1127. }
  1128. static void gfs2_destroy_inode(struct inode *inode)
  1129. {
  1130. kmem_cache_free(gfs2_inode_cachep, inode);
  1131. }
  1132. const struct super_operations gfs2_super_ops = {
  1133. .alloc_inode = gfs2_alloc_inode,
  1134. .destroy_inode = gfs2_destroy_inode,
  1135. .write_inode = gfs2_write_inode,
  1136. .delete_inode = gfs2_delete_inode,
  1137. .put_super = gfs2_put_super,
  1138. .sync_fs = gfs2_sync_fs,
  1139. .freeze_fs = gfs2_freeze,
  1140. .unfreeze_fs = gfs2_unfreeze,
  1141. .statfs = gfs2_statfs,
  1142. .remount_fs = gfs2_remount_fs,
  1143. .clear_inode = gfs2_clear_inode,
  1144. .drop_inode = gfs2_drop_inode,
  1145. .show_options = gfs2_show_options,
  1146. };