super.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License version 2.
  8. */
  9. #include <linux/bio.h>
  10. #include <linux/sched.h>
  11. #include <linux/slab.h>
  12. #include <linux/spinlock.h>
  13. #include <linux/completion.h>
  14. #include <linux/buffer_head.h>
  15. #include <linux/statfs.h>
  16. #include <linux/seq_file.h>
  17. #include <linux/mount.h>
  18. #include <linux/kthread.h>
  19. #include <linux/delay.h>
  20. #include <linux/gfs2_ondisk.h>
  21. #include <linux/crc32.h>
  22. #include <linux/time.h>
  23. #include "gfs2.h"
  24. #include "incore.h"
  25. #include "bmap.h"
  26. #include "dir.h"
  27. #include "glock.h"
  28. #include "glops.h"
  29. #include "inode.h"
  30. #include "log.h"
  31. #include "meta_io.h"
  32. #include "quota.h"
  33. #include "recovery.h"
  34. #include "rgrp.h"
  35. #include "super.h"
  36. #include "trans.h"
  37. #include "util.h"
  38. #include "sys.h"
  39. #include "eattr.h"
  40. #define args_neq(a1, a2, x) ((a1)->ar_##x != (a2)->ar_##x)
  41. enum {
  42. Opt_lockproto,
  43. Opt_locktable,
  44. Opt_hostdata,
  45. Opt_spectator,
  46. Opt_ignore_local_fs,
  47. Opt_localflocks,
  48. Opt_localcaching,
  49. Opt_debug,
  50. Opt_nodebug,
  51. Opt_upgrade,
  52. Opt_acl,
  53. Opt_noacl,
  54. Opt_quota_off,
  55. Opt_quota_account,
  56. Opt_quota_on,
  57. Opt_quota,
  58. Opt_noquota,
  59. Opt_suiddir,
  60. Opt_nosuiddir,
  61. Opt_data_writeback,
  62. Opt_data_ordered,
  63. Opt_meta,
  64. Opt_discard,
  65. Opt_nodiscard,
  66. Opt_commit,
  67. Opt_error,
  68. };
  69. static const match_table_t tokens = {
  70. {Opt_lockproto, "lockproto=%s"},
  71. {Opt_locktable, "locktable=%s"},
  72. {Opt_hostdata, "hostdata=%s"},
  73. {Opt_spectator, "spectator"},
  74. {Opt_ignore_local_fs, "ignore_local_fs"},
  75. {Opt_localflocks, "localflocks"},
  76. {Opt_localcaching, "localcaching"},
  77. {Opt_debug, "debug"},
  78. {Opt_nodebug, "nodebug"},
  79. {Opt_upgrade, "upgrade"},
  80. {Opt_acl, "acl"},
  81. {Opt_noacl, "noacl"},
  82. {Opt_quota_off, "quota=off"},
  83. {Opt_quota_account, "quota=account"},
  84. {Opt_quota_on, "quota=on"},
  85. {Opt_quota, "quota"},
  86. {Opt_noquota, "noquota"},
  87. {Opt_suiddir, "suiddir"},
  88. {Opt_nosuiddir, "nosuiddir"},
  89. {Opt_data_writeback, "data=writeback"},
  90. {Opt_data_ordered, "data=ordered"},
  91. {Opt_meta, "meta"},
  92. {Opt_discard, "discard"},
  93. {Opt_nodiscard, "nodiscard"},
  94. {Opt_commit, "commit=%d"},
  95. {Opt_error, NULL}
  96. };
  97. /**
  98. * gfs2_mount_args - Parse mount options
  99. * @sdp:
  100. * @data:
  101. *
  102. * Return: errno
  103. */
  104. int gfs2_mount_args(struct gfs2_sbd *sdp, struct gfs2_args *args, char *options)
  105. {
  106. char *o;
  107. int token;
  108. substring_t tmp[MAX_OPT_ARGS];
  109. int rv;
  110. /* Split the options into tokens with the "," character and
  111. process them */
  112. while (1) {
  113. o = strsep(&options, ",");
  114. if (o == NULL)
  115. break;
  116. if (*o == '\0')
  117. continue;
  118. token = match_token(o, tokens, tmp);
  119. switch (token) {
  120. case Opt_lockproto:
  121. match_strlcpy(args->ar_lockproto, &tmp[0],
  122. GFS2_LOCKNAME_LEN);
  123. break;
  124. case Opt_locktable:
  125. match_strlcpy(args->ar_locktable, &tmp[0],
  126. GFS2_LOCKNAME_LEN);
  127. break;
  128. case Opt_hostdata:
  129. match_strlcpy(args->ar_hostdata, &tmp[0],
  130. GFS2_LOCKNAME_LEN);
  131. break;
  132. case Opt_spectator:
  133. args->ar_spectator = 1;
  134. break;
  135. case Opt_ignore_local_fs:
  136. args->ar_ignore_local_fs = 1;
  137. break;
  138. case Opt_localflocks:
  139. args->ar_localflocks = 1;
  140. break;
  141. case Opt_localcaching:
  142. args->ar_localcaching = 1;
  143. break;
  144. case Opt_debug:
  145. args->ar_debug = 1;
  146. break;
  147. case Opt_nodebug:
  148. args->ar_debug = 0;
  149. break;
  150. case Opt_upgrade:
  151. args->ar_upgrade = 1;
  152. break;
  153. case Opt_acl:
  154. args->ar_posix_acl = 1;
  155. break;
  156. case Opt_noacl:
  157. args->ar_posix_acl = 0;
  158. break;
  159. case Opt_quota_off:
  160. case Opt_noquota:
  161. args->ar_quota = GFS2_QUOTA_OFF;
  162. break;
  163. case Opt_quota_account:
  164. args->ar_quota = GFS2_QUOTA_ACCOUNT;
  165. break;
  166. case Opt_quota_on:
  167. case Opt_quota:
  168. args->ar_quota = GFS2_QUOTA_ON;
  169. break;
  170. case Opt_suiddir:
  171. args->ar_suiddir = 1;
  172. break;
  173. case Opt_nosuiddir:
  174. args->ar_suiddir = 0;
  175. break;
  176. case Opt_data_writeback:
  177. args->ar_data = GFS2_DATA_WRITEBACK;
  178. break;
  179. case Opt_data_ordered:
  180. args->ar_data = GFS2_DATA_ORDERED;
  181. break;
  182. case Opt_meta:
  183. args->ar_meta = 1;
  184. break;
  185. case Opt_discard:
  186. args->ar_discard = 1;
  187. break;
  188. case Opt_nodiscard:
  189. args->ar_discard = 0;
  190. break;
  191. case Opt_commit:
  192. rv = match_int(&tmp[0], &args->ar_commit);
  193. if (rv || args->ar_commit <= 0) {
  194. fs_info(sdp, "commit mount option requires a positive numeric argument\n");
  195. return rv ? rv : -EINVAL;
  196. }
  197. break;
  198. case Opt_error:
  199. default:
  200. fs_info(sdp, "invalid mount option: %s\n", o);
  201. return -EINVAL;
  202. }
  203. }
  204. return 0;
  205. }
  206. /**
  207. * gfs2_jindex_free - Clear all the journal index information
  208. * @sdp: The GFS2 superblock
  209. *
  210. */
  211. void gfs2_jindex_free(struct gfs2_sbd *sdp)
  212. {
  213. struct list_head list, *head;
  214. struct gfs2_jdesc *jd;
  215. struct gfs2_journal_extent *jext;
  216. spin_lock(&sdp->sd_jindex_spin);
  217. list_add(&list, &sdp->sd_jindex_list);
  218. list_del_init(&sdp->sd_jindex_list);
  219. sdp->sd_journals = 0;
  220. spin_unlock(&sdp->sd_jindex_spin);
  221. while (!list_empty(&list)) {
  222. jd = list_entry(list.next, struct gfs2_jdesc, jd_list);
  223. head = &jd->extent_list;
  224. while (!list_empty(head)) {
  225. jext = list_entry(head->next,
  226. struct gfs2_journal_extent,
  227. extent_list);
  228. list_del(&jext->extent_list);
  229. kfree(jext);
  230. }
  231. list_del(&jd->jd_list);
  232. iput(jd->jd_inode);
  233. kfree(jd);
  234. }
  235. }
  236. static struct gfs2_jdesc *jdesc_find_i(struct list_head *head, unsigned int jid)
  237. {
  238. struct gfs2_jdesc *jd;
  239. int found = 0;
  240. list_for_each_entry(jd, head, jd_list) {
  241. if (jd->jd_jid == jid) {
  242. found = 1;
  243. break;
  244. }
  245. }
  246. if (!found)
  247. jd = NULL;
  248. return jd;
  249. }
  250. struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid)
  251. {
  252. struct gfs2_jdesc *jd;
  253. spin_lock(&sdp->sd_jindex_spin);
  254. jd = jdesc_find_i(&sdp->sd_jindex_list, jid);
  255. spin_unlock(&sdp->sd_jindex_spin);
  256. return jd;
  257. }
  258. int gfs2_jdesc_check(struct gfs2_jdesc *jd)
  259. {
  260. struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
  261. struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
  262. int ar;
  263. int error;
  264. if (ip->i_disksize < (8 << 20) || ip->i_disksize > (1 << 30) ||
  265. (ip->i_disksize & (sdp->sd_sb.sb_bsize - 1))) {
  266. gfs2_consist_inode(ip);
  267. return -EIO;
  268. }
  269. jd->jd_blocks = ip->i_disksize >> sdp->sd_sb.sb_bsize_shift;
  270. error = gfs2_write_alloc_required(ip, 0, ip->i_disksize, &ar);
  271. if (!error && ar) {
  272. gfs2_consist_inode(ip);
  273. error = -EIO;
  274. }
  275. return error;
  276. }
  277. /**
  278. * gfs2_make_fs_rw - Turn a Read-Only FS into a Read-Write one
  279. * @sdp: the filesystem
  280. *
  281. * Returns: errno
  282. */
  283. int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
  284. {
  285. struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
  286. struct gfs2_glock *j_gl = ip->i_gl;
  287. struct gfs2_holder t_gh;
  288. struct gfs2_log_header_host head;
  289. int error;
  290. error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED, 0, &t_gh);
  291. if (error)
  292. return error;
  293. j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
  294. error = gfs2_find_jhead(sdp->sd_jdesc, &head);
  295. if (error)
  296. goto fail;
  297. if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
  298. gfs2_consist(sdp);
  299. error = -EIO;
  300. goto fail;
  301. }
  302. /* Initialize some head of the log stuff */
  303. sdp->sd_log_sequence = head.lh_sequence + 1;
  304. gfs2_log_pointers_init(sdp, head.lh_blkno);
  305. error = gfs2_quota_init(sdp);
  306. if (error)
  307. goto fail;
  308. set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
  309. gfs2_glock_dq_uninit(&t_gh);
  310. return 0;
  311. fail:
  312. t_gh.gh_flags |= GL_NOCACHE;
  313. gfs2_glock_dq_uninit(&t_gh);
  314. return error;
  315. }
  316. static void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc, const void *buf)
  317. {
  318. const struct gfs2_statfs_change *str = buf;
  319. sc->sc_total = be64_to_cpu(str->sc_total);
  320. sc->sc_free = be64_to_cpu(str->sc_free);
  321. sc->sc_dinodes = be64_to_cpu(str->sc_dinodes);
  322. }
  323. static void gfs2_statfs_change_out(const struct gfs2_statfs_change_host *sc, void *buf)
  324. {
  325. struct gfs2_statfs_change *str = buf;
  326. str->sc_total = cpu_to_be64(sc->sc_total);
  327. str->sc_free = cpu_to_be64(sc->sc_free);
  328. str->sc_dinodes = cpu_to_be64(sc->sc_dinodes);
  329. }
  330. int gfs2_statfs_init(struct gfs2_sbd *sdp)
  331. {
  332. struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
  333. struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
  334. struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
  335. struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
  336. struct buffer_head *m_bh, *l_bh;
  337. struct gfs2_holder gh;
  338. int error;
  339. error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE,
  340. &gh);
  341. if (error)
  342. return error;
  343. error = gfs2_meta_inode_buffer(m_ip, &m_bh);
  344. if (error)
  345. goto out;
  346. if (sdp->sd_args.ar_spectator) {
  347. spin_lock(&sdp->sd_statfs_spin);
  348. gfs2_statfs_change_in(m_sc, m_bh->b_data +
  349. sizeof(struct gfs2_dinode));
  350. spin_unlock(&sdp->sd_statfs_spin);
  351. } else {
  352. error = gfs2_meta_inode_buffer(l_ip, &l_bh);
  353. if (error)
  354. goto out_m_bh;
  355. spin_lock(&sdp->sd_statfs_spin);
  356. gfs2_statfs_change_in(m_sc, m_bh->b_data +
  357. sizeof(struct gfs2_dinode));
  358. gfs2_statfs_change_in(l_sc, l_bh->b_data +
  359. sizeof(struct gfs2_dinode));
  360. spin_unlock(&sdp->sd_statfs_spin);
  361. brelse(l_bh);
  362. }
  363. out_m_bh:
  364. brelse(m_bh);
  365. out:
  366. gfs2_glock_dq_uninit(&gh);
  367. return 0;
  368. }
  369. void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
  370. s64 dinodes)
  371. {
  372. struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
  373. struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
  374. struct buffer_head *l_bh;
  375. int error;
  376. error = gfs2_meta_inode_buffer(l_ip, &l_bh);
  377. if (error)
  378. return;
  379. gfs2_trans_add_bh(l_ip->i_gl, l_bh, 1);
  380. spin_lock(&sdp->sd_statfs_spin);
  381. l_sc->sc_total += total;
  382. l_sc->sc_free += free;
  383. l_sc->sc_dinodes += dinodes;
  384. gfs2_statfs_change_out(l_sc, l_bh->b_data + sizeof(struct gfs2_dinode));
  385. spin_unlock(&sdp->sd_statfs_spin);
  386. brelse(l_bh);
  387. }
  388. int gfs2_statfs_sync(struct gfs2_sbd *sdp)
  389. {
  390. struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
  391. struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
  392. struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
  393. struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
  394. struct gfs2_holder gh;
  395. struct buffer_head *m_bh, *l_bh;
  396. int error;
  397. error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE,
  398. &gh);
  399. if (error)
  400. return error;
  401. error = gfs2_meta_inode_buffer(m_ip, &m_bh);
  402. if (error)
  403. goto out;
  404. spin_lock(&sdp->sd_statfs_spin);
  405. gfs2_statfs_change_in(m_sc, m_bh->b_data +
  406. sizeof(struct gfs2_dinode));
  407. if (!l_sc->sc_total && !l_sc->sc_free && !l_sc->sc_dinodes) {
  408. spin_unlock(&sdp->sd_statfs_spin);
  409. goto out_bh;
  410. }
  411. spin_unlock(&sdp->sd_statfs_spin);
  412. error = gfs2_meta_inode_buffer(l_ip, &l_bh);
  413. if (error)
  414. goto out_bh;
  415. error = gfs2_trans_begin(sdp, 2 * RES_DINODE, 0);
  416. if (error)
  417. goto out_bh2;
  418. gfs2_trans_add_bh(l_ip->i_gl, l_bh, 1);
  419. spin_lock(&sdp->sd_statfs_spin);
  420. m_sc->sc_total += l_sc->sc_total;
  421. m_sc->sc_free += l_sc->sc_free;
  422. m_sc->sc_dinodes += l_sc->sc_dinodes;
  423. memset(l_sc, 0, sizeof(struct gfs2_statfs_change));
  424. memset(l_bh->b_data + sizeof(struct gfs2_dinode),
  425. 0, sizeof(struct gfs2_statfs_change));
  426. spin_unlock(&sdp->sd_statfs_spin);
  427. gfs2_trans_add_bh(m_ip->i_gl, m_bh, 1);
  428. gfs2_statfs_change_out(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode));
  429. gfs2_trans_end(sdp);
  430. out_bh2:
  431. brelse(l_bh);
  432. out_bh:
  433. brelse(m_bh);
  434. out:
  435. gfs2_glock_dq_uninit(&gh);
  436. return error;
  437. }
  438. struct lfcc {
  439. struct list_head list;
  440. struct gfs2_holder gh;
  441. };
  442. /**
  443. * gfs2_lock_fs_check_clean - Stop all writes to the FS and check that all
  444. * journals are clean
  445. * @sdp: the file system
  446. * @state: the state to put the transaction lock into
  447. * @t_gh: the hold on the transaction lock
  448. *
  449. * Returns: errno
  450. */
  451. static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp,
  452. struct gfs2_holder *t_gh)
  453. {
  454. struct gfs2_inode *ip;
  455. struct gfs2_jdesc *jd;
  456. struct lfcc *lfcc;
  457. LIST_HEAD(list);
  458. struct gfs2_log_header_host lh;
  459. int error;
  460. list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
  461. lfcc = kmalloc(sizeof(struct lfcc), GFP_KERNEL);
  462. if (!lfcc) {
  463. error = -ENOMEM;
  464. goto out;
  465. }
  466. ip = GFS2_I(jd->jd_inode);
  467. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &lfcc->gh);
  468. if (error) {
  469. kfree(lfcc);
  470. goto out;
  471. }
  472. list_add(&lfcc->list, &list);
  473. }
  474. error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_DEFERRED,
  475. GL_NOCACHE, t_gh);
  476. list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
  477. error = gfs2_jdesc_check(jd);
  478. if (error)
  479. break;
  480. error = gfs2_find_jhead(jd, &lh);
  481. if (error)
  482. break;
  483. if (!(lh.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
  484. error = -EBUSY;
  485. break;
  486. }
  487. }
  488. if (error)
  489. gfs2_glock_dq_uninit(t_gh);
  490. out:
  491. while (!list_empty(&list)) {
  492. lfcc = list_entry(list.next, struct lfcc, list);
  493. list_del(&lfcc->list);
  494. gfs2_glock_dq_uninit(&lfcc->gh);
  495. kfree(lfcc);
  496. }
  497. return error;
  498. }
  499. /**
  500. * gfs2_freeze_fs - freezes the file system
  501. * @sdp: the file system
  502. *
  503. * This function flushes data and meta data for all machines by
  504. * aquiring the transaction log exclusively. All journals are
  505. * ensured to be in a clean state as well.
  506. *
  507. * Returns: errno
  508. */
  509. int gfs2_freeze_fs(struct gfs2_sbd *sdp)
  510. {
  511. int error = 0;
  512. mutex_lock(&sdp->sd_freeze_lock);
  513. if (!sdp->sd_freeze_count++) {
  514. error = gfs2_lock_fs_check_clean(sdp, &sdp->sd_freeze_gh);
  515. if (error)
  516. sdp->sd_freeze_count--;
  517. }
  518. mutex_unlock(&sdp->sd_freeze_lock);
  519. return error;
  520. }
  521. /**
  522. * gfs2_unfreeze_fs - unfreezes the file system
  523. * @sdp: the file system
  524. *
  525. * This function allows the file system to proceed by unlocking
  526. * the exclusively held transaction lock. Other GFS2 nodes are
  527. * now free to acquire the lock shared and go on with their lives.
  528. *
  529. */
  530. void gfs2_unfreeze_fs(struct gfs2_sbd *sdp)
  531. {
  532. mutex_lock(&sdp->sd_freeze_lock);
  533. if (sdp->sd_freeze_count && !--sdp->sd_freeze_count)
  534. gfs2_glock_dq_uninit(&sdp->sd_freeze_gh);
  535. mutex_unlock(&sdp->sd_freeze_lock);
  536. }
  537. /**
  538. * gfs2_write_inode - Make sure the inode is stable on the disk
  539. * @inode: The inode
  540. * @sync: synchronous write flag
  541. *
  542. * Returns: errno
  543. */
  544. static int gfs2_write_inode(struct inode *inode, int sync)
  545. {
  546. struct gfs2_inode *ip = GFS2_I(inode);
  547. struct gfs2_sbd *sdp = GFS2_SB(inode);
  548. struct gfs2_holder gh;
  549. struct buffer_head *bh;
  550. struct timespec atime;
  551. struct gfs2_dinode *di;
  552. int ret = 0;
  553. /* Check this is a "normal" inode, etc */
  554. if (!test_bit(GIF_USER, &ip->i_flags) ||
  555. (current->flags & PF_MEMALLOC))
  556. return 0;
  557. ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
  558. if (ret)
  559. goto do_flush;
  560. ret = gfs2_trans_begin(sdp, RES_DINODE, 0);
  561. if (ret)
  562. goto do_unlock;
  563. ret = gfs2_meta_inode_buffer(ip, &bh);
  564. if (ret == 0) {
  565. di = (struct gfs2_dinode *)bh->b_data;
  566. atime.tv_sec = be64_to_cpu(di->di_atime);
  567. atime.tv_nsec = be32_to_cpu(di->di_atime_nsec);
  568. if (timespec_compare(&inode->i_atime, &atime) > 0) {
  569. gfs2_trans_add_bh(ip->i_gl, bh, 1);
  570. gfs2_dinode_out(ip, bh->b_data);
  571. }
  572. brelse(bh);
  573. }
  574. gfs2_trans_end(sdp);
  575. do_unlock:
  576. gfs2_glock_dq_uninit(&gh);
  577. do_flush:
  578. if (sync != 0)
  579. gfs2_log_flush(GFS2_SB(inode), ip->i_gl);
  580. return ret;
  581. }
  582. /**
  583. * gfs2_make_fs_ro - Turn a Read-Write FS into a Read-Only one
  584. * @sdp: the filesystem
  585. *
  586. * Returns: errno
  587. */
  588. static int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
  589. {
  590. struct gfs2_holder t_gh;
  591. int error;
  592. gfs2_quota_sync(sdp);
  593. gfs2_statfs_sync(sdp);
  594. error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED, GL_NOCACHE,
  595. &t_gh);
  596. if (error && !test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
  597. return error;
  598. gfs2_meta_syncfs(sdp);
  599. gfs2_log_shutdown(sdp);
  600. clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
  601. if (t_gh.gh_gl)
  602. gfs2_glock_dq_uninit(&t_gh);
  603. gfs2_quota_cleanup(sdp);
  604. return error;
  605. }
  606. static int gfs2_umount_recovery_wait(void *word)
  607. {
  608. schedule();
  609. return 0;
  610. }
  611. /**
  612. * gfs2_put_super - Unmount the filesystem
  613. * @sb: The VFS superblock
  614. *
  615. */
  616. static void gfs2_put_super(struct super_block *sb)
  617. {
  618. struct gfs2_sbd *sdp = sb->s_fs_info;
  619. int error;
  620. struct gfs2_jdesc *jd;
  621. /* Unfreeze the filesystem, if we need to */
  622. mutex_lock(&sdp->sd_freeze_lock);
  623. if (sdp->sd_freeze_count)
  624. gfs2_glock_dq_uninit(&sdp->sd_freeze_gh);
  625. mutex_unlock(&sdp->sd_freeze_lock);
  626. /* No more recovery requests */
  627. set_bit(SDF_NORECOVERY, &sdp->sd_flags);
  628. smp_mb();
  629. /* Wait on outstanding recovery */
  630. restart:
  631. spin_lock(&sdp->sd_jindex_spin);
  632. list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
  633. if (!test_bit(JDF_RECOVERY, &jd->jd_flags))
  634. continue;
  635. spin_unlock(&sdp->sd_jindex_spin);
  636. wait_on_bit(&jd->jd_flags, JDF_RECOVERY,
  637. gfs2_umount_recovery_wait, TASK_UNINTERRUPTIBLE);
  638. goto restart;
  639. }
  640. spin_unlock(&sdp->sd_jindex_spin);
  641. kthread_stop(sdp->sd_quotad_process);
  642. kthread_stop(sdp->sd_logd_process);
  643. if (!(sb->s_flags & MS_RDONLY)) {
  644. error = gfs2_make_fs_ro(sdp);
  645. if (error)
  646. gfs2_io_error(sdp);
  647. }
  648. /* At this point, we're through modifying the disk */
  649. /* Release stuff */
  650. iput(sdp->sd_jindex);
  651. iput(sdp->sd_inum_inode);
  652. iput(sdp->sd_statfs_inode);
  653. iput(sdp->sd_rindex);
  654. iput(sdp->sd_quota_inode);
  655. gfs2_glock_put(sdp->sd_rename_gl);
  656. gfs2_glock_put(sdp->sd_trans_gl);
  657. if (!sdp->sd_args.ar_spectator) {
  658. gfs2_glock_dq_uninit(&sdp->sd_journal_gh);
  659. gfs2_glock_dq_uninit(&sdp->sd_jinode_gh);
  660. gfs2_glock_dq_uninit(&sdp->sd_ir_gh);
  661. gfs2_glock_dq_uninit(&sdp->sd_sc_gh);
  662. gfs2_glock_dq_uninit(&sdp->sd_qc_gh);
  663. iput(sdp->sd_ir_inode);
  664. iput(sdp->sd_sc_inode);
  665. iput(sdp->sd_qc_inode);
  666. }
  667. gfs2_glock_dq_uninit(&sdp->sd_live_gh);
  668. gfs2_clear_rgrpd(sdp);
  669. gfs2_jindex_free(sdp);
  670. /* Take apart glock structures and buffer lists */
  671. gfs2_gl_hash_clear(sdp);
  672. /* Unmount the locking protocol */
  673. gfs2_lm_unmount(sdp);
  674. /* At this point, we're through participating in the lockspace */
  675. gfs2_sys_fs_del(sdp);
  676. }
  677. /**
  678. * gfs2_sync_fs - sync the filesystem
  679. * @sb: the superblock
  680. *
  681. * Flushes the log to disk.
  682. */
  683. static int gfs2_sync_fs(struct super_block *sb, int wait)
  684. {
  685. if (wait && sb->s_fs_info)
  686. gfs2_log_flush(sb->s_fs_info, NULL);
  687. return 0;
  688. }
  689. /**
  690. * gfs2_freeze - prevent further writes to the filesystem
  691. * @sb: the VFS structure for the filesystem
  692. *
  693. */
  694. static int gfs2_freeze(struct super_block *sb)
  695. {
  696. struct gfs2_sbd *sdp = sb->s_fs_info;
  697. int error;
  698. if (test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
  699. return -EINVAL;
  700. for (;;) {
  701. error = gfs2_freeze_fs(sdp);
  702. if (!error)
  703. break;
  704. switch (error) {
  705. case -EBUSY:
  706. fs_err(sdp, "waiting for recovery before freeze\n");
  707. break;
  708. default:
  709. fs_err(sdp, "error freezing FS: %d\n", error);
  710. break;
  711. }
  712. fs_err(sdp, "retrying...\n");
  713. msleep(1000);
  714. }
  715. return 0;
  716. }
  717. /**
  718. * gfs2_unfreeze - reallow writes to the filesystem
  719. * @sb: the VFS structure for the filesystem
  720. *
  721. */
  722. static int gfs2_unfreeze(struct super_block *sb)
  723. {
  724. gfs2_unfreeze_fs(sb->s_fs_info);
  725. return 0;
  726. }
  727. /**
  728. * statfs_fill - fill in the sg for a given RG
  729. * @rgd: the RG
  730. * @sc: the sc structure
  731. *
  732. * Returns: 0 on success, -ESTALE if the LVB is invalid
  733. */
  734. static int statfs_slow_fill(struct gfs2_rgrpd *rgd,
  735. struct gfs2_statfs_change_host *sc)
  736. {
  737. gfs2_rgrp_verify(rgd);
  738. sc->sc_total += rgd->rd_data;
  739. sc->sc_free += rgd->rd_free;
  740. sc->sc_dinodes += rgd->rd_dinodes;
  741. return 0;
  742. }
  743. /**
  744. * gfs2_statfs_slow - Stat a filesystem using asynchronous locking
  745. * @sdp: the filesystem
  746. * @sc: the sc info that will be returned
  747. *
  748. * Any error (other than a signal) will cause this routine to fall back
  749. * to the synchronous version.
  750. *
  751. * FIXME: This really shouldn't busy wait like this.
  752. *
  753. * Returns: errno
  754. */
  755. static int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc)
  756. {
  757. struct gfs2_holder ri_gh;
  758. struct gfs2_rgrpd *rgd_next;
  759. struct gfs2_holder *gha, *gh;
  760. unsigned int slots = 64;
  761. unsigned int x;
  762. int done;
  763. int error = 0, err;
  764. memset(sc, 0, sizeof(struct gfs2_statfs_change_host));
  765. gha = kcalloc(slots, sizeof(struct gfs2_holder), GFP_KERNEL);
  766. if (!gha)
  767. return -ENOMEM;
  768. error = gfs2_rindex_hold(sdp, &ri_gh);
  769. if (error)
  770. goto out;
  771. rgd_next = gfs2_rgrpd_get_first(sdp);
  772. for (;;) {
  773. done = 1;
  774. for (x = 0; x < slots; x++) {
  775. gh = gha + x;
  776. if (gh->gh_gl && gfs2_glock_poll(gh)) {
  777. err = gfs2_glock_wait(gh);
  778. if (err) {
  779. gfs2_holder_uninit(gh);
  780. error = err;
  781. } else {
  782. if (!error)
  783. error = statfs_slow_fill(
  784. gh->gh_gl->gl_object, sc);
  785. gfs2_glock_dq_uninit(gh);
  786. }
  787. }
  788. if (gh->gh_gl)
  789. done = 0;
  790. else if (rgd_next && !error) {
  791. error = gfs2_glock_nq_init(rgd_next->rd_gl,
  792. LM_ST_SHARED,
  793. GL_ASYNC,
  794. gh);
  795. rgd_next = gfs2_rgrpd_get_next(rgd_next);
  796. done = 0;
  797. }
  798. if (signal_pending(current))
  799. error = -ERESTARTSYS;
  800. }
  801. if (done)
  802. break;
  803. yield();
  804. }
  805. gfs2_glock_dq_uninit(&ri_gh);
  806. out:
  807. kfree(gha);
  808. return error;
  809. }
  810. /**
  811. * gfs2_statfs_i - Do a statfs
  812. * @sdp: the filesystem
  813. * @sg: the sg structure
  814. *
  815. * Returns: errno
  816. */
  817. static int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc)
  818. {
  819. struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
  820. struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
  821. spin_lock(&sdp->sd_statfs_spin);
  822. *sc = *m_sc;
  823. sc->sc_total += l_sc->sc_total;
  824. sc->sc_free += l_sc->sc_free;
  825. sc->sc_dinodes += l_sc->sc_dinodes;
  826. spin_unlock(&sdp->sd_statfs_spin);
  827. if (sc->sc_free < 0)
  828. sc->sc_free = 0;
  829. if (sc->sc_free > sc->sc_total)
  830. sc->sc_free = sc->sc_total;
  831. if (sc->sc_dinodes < 0)
  832. sc->sc_dinodes = 0;
  833. return 0;
  834. }
  835. /**
  836. * gfs2_statfs - Gather and return stats about the filesystem
  837. * @sb: The superblock
  838. * @statfsbuf: The buffer
  839. *
  840. * Returns: 0 on success or error code
  841. */
  842. static int gfs2_statfs(struct dentry *dentry, struct kstatfs *buf)
  843. {
  844. struct super_block *sb = dentry->d_inode->i_sb;
  845. struct gfs2_sbd *sdp = sb->s_fs_info;
  846. struct gfs2_statfs_change_host sc;
  847. int error;
  848. if (gfs2_tune_get(sdp, gt_statfs_slow))
  849. error = gfs2_statfs_slow(sdp, &sc);
  850. else
  851. error = gfs2_statfs_i(sdp, &sc);
  852. if (error)
  853. return error;
  854. buf->f_type = GFS2_MAGIC;
  855. buf->f_bsize = sdp->sd_sb.sb_bsize;
  856. buf->f_blocks = sc.sc_total;
  857. buf->f_bfree = sc.sc_free;
  858. buf->f_bavail = sc.sc_free;
  859. buf->f_files = sc.sc_dinodes + sc.sc_free;
  860. buf->f_ffree = sc.sc_free;
  861. buf->f_namelen = GFS2_FNAMESIZE;
  862. return 0;
  863. }
  864. /**
  865. * gfs2_remount_fs - called when the FS is remounted
  866. * @sb: the filesystem
  867. * @flags: the remount flags
  868. * @data: extra data passed in (not used right now)
  869. *
  870. * Returns: errno
  871. */
  872. static int gfs2_remount_fs(struct super_block *sb, int *flags, char *data)
  873. {
  874. struct gfs2_sbd *sdp = sb->s_fs_info;
  875. struct gfs2_args args = sdp->sd_args; /* Default to current settings */
  876. struct gfs2_tune *gt = &sdp->sd_tune;
  877. int error;
  878. spin_lock(&gt->gt_spin);
  879. args.ar_commit = gt->gt_log_flush_secs;
  880. spin_unlock(&gt->gt_spin);
  881. error = gfs2_mount_args(sdp, &args, data);
  882. if (error)
  883. return error;
  884. /* Not allowed to change locking details */
  885. if (strcmp(args.ar_lockproto, sdp->sd_args.ar_lockproto) ||
  886. strcmp(args.ar_locktable, sdp->sd_args.ar_locktable) ||
  887. strcmp(args.ar_hostdata, sdp->sd_args.ar_hostdata))
  888. return -EINVAL;
  889. /* Some flags must not be changed */
  890. if (args_neq(&args, &sdp->sd_args, spectator) ||
  891. args_neq(&args, &sdp->sd_args, ignore_local_fs) ||
  892. args_neq(&args, &sdp->sd_args, localflocks) ||
  893. args_neq(&args, &sdp->sd_args, localcaching) ||
  894. args_neq(&args, &sdp->sd_args, meta))
  895. return -EINVAL;
  896. if (sdp->sd_args.ar_spectator)
  897. *flags |= MS_RDONLY;
  898. if ((sb->s_flags ^ *flags) & MS_RDONLY) {
  899. if (*flags & MS_RDONLY)
  900. error = gfs2_make_fs_ro(sdp);
  901. else
  902. error = gfs2_make_fs_rw(sdp);
  903. if (error)
  904. return error;
  905. }
  906. sdp->sd_args = args;
  907. if (sdp->sd_args.ar_posix_acl)
  908. sb->s_flags |= MS_POSIXACL;
  909. else
  910. sb->s_flags &= ~MS_POSIXACL;
  911. spin_lock(&gt->gt_spin);
  912. gt->gt_log_flush_secs = args.ar_commit;
  913. spin_unlock(&gt->gt_spin);
  914. return 0;
  915. }
  916. /**
  917. * gfs2_drop_inode - Drop an inode (test for remote unlink)
  918. * @inode: The inode to drop
  919. *
  920. * If we've received a callback on an iopen lock then its because a
  921. * remote node tried to deallocate the inode but failed due to this node
  922. * still having the inode open. Here we mark the link count zero
  923. * since we know that it must have reached zero if the GLF_DEMOTE flag
  924. * is set on the iopen glock. If we didn't do a disk read since the
  925. * remote node removed the final link then we might otherwise miss
  926. * this event. This check ensures that this node will deallocate the
  927. * inode's blocks, or alternatively pass the baton on to another
  928. * node for later deallocation.
  929. */
  930. static void gfs2_drop_inode(struct inode *inode)
  931. {
  932. struct gfs2_inode *ip = GFS2_I(inode);
  933. if (test_bit(GIF_USER, &ip->i_flags) && inode->i_nlink) {
  934. struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
  935. if (gl && test_bit(GLF_DEMOTE, &gl->gl_flags))
  936. clear_nlink(inode);
  937. }
  938. generic_drop_inode(inode);
  939. }
  940. /**
  941. * gfs2_clear_inode - Deallocate an inode when VFS is done with it
  942. * @inode: The VFS inode
  943. *
  944. */
  945. static void gfs2_clear_inode(struct inode *inode)
  946. {
  947. struct gfs2_inode *ip = GFS2_I(inode);
  948. /* This tells us its a "real" inode and not one which only
  949. * serves to contain an address space (see rgrp.c, meta_io.c)
  950. * which therefore doesn't have its own glocks.
  951. */
  952. if (test_bit(GIF_USER, &ip->i_flags)) {
  953. ip->i_gl->gl_object = NULL;
  954. gfs2_glock_put(ip->i_gl);
  955. ip->i_gl = NULL;
  956. if (ip->i_iopen_gh.gh_gl) {
  957. ip->i_iopen_gh.gh_gl->gl_object = NULL;
  958. gfs2_glock_dq_uninit(&ip->i_iopen_gh);
  959. }
  960. }
  961. }
  962. static int is_ancestor(const struct dentry *d1, const struct dentry *d2)
  963. {
  964. do {
  965. if (d1 == d2)
  966. return 1;
  967. d1 = d1->d_parent;
  968. } while (!IS_ROOT(d1));
  969. return 0;
  970. }
  971. /**
  972. * gfs2_show_options - Show mount options for /proc/mounts
  973. * @s: seq_file structure
  974. * @mnt: vfsmount
  975. *
  976. * Returns: 0 on success or error code
  977. */
  978. static int gfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
  979. {
  980. struct gfs2_sbd *sdp = mnt->mnt_sb->s_fs_info;
  981. struct gfs2_args *args = &sdp->sd_args;
  982. int lfsecs;
  983. if (is_ancestor(mnt->mnt_root, sdp->sd_master_dir))
  984. seq_printf(s, ",meta");
  985. if (args->ar_lockproto[0])
  986. seq_printf(s, ",lockproto=%s", args->ar_lockproto);
  987. if (args->ar_locktable[0])
  988. seq_printf(s, ",locktable=%s", args->ar_locktable);
  989. if (args->ar_hostdata[0])
  990. seq_printf(s, ",hostdata=%s", args->ar_hostdata);
  991. if (args->ar_spectator)
  992. seq_printf(s, ",spectator");
  993. if (args->ar_ignore_local_fs)
  994. seq_printf(s, ",ignore_local_fs");
  995. if (args->ar_localflocks)
  996. seq_printf(s, ",localflocks");
  997. if (args->ar_localcaching)
  998. seq_printf(s, ",localcaching");
  999. if (args->ar_debug)
  1000. seq_printf(s, ",debug");
  1001. if (args->ar_upgrade)
  1002. seq_printf(s, ",upgrade");
  1003. if (args->ar_posix_acl)
  1004. seq_printf(s, ",acl");
  1005. if (args->ar_quota != GFS2_QUOTA_DEFAULT) {
  1006. char *state;
  1007. switch (args->ar_quota) {
  1008. case GFS2_QUOTA_OFF:
  1009. state = "off";
  1010. break;
  1011. case GFS2_QUOTA_ACCOUNT:
  1012. state = "account";
  1013. break;
  1014. case GFS2_QUOTA_ON:
  1015. state = "on";
  1016. break;
  1017. default:
  1018. state = "unknown";
  1019. break;
  1020. }
  1021. seq_printf(s, ",quota=%s", state);
  1022. }
  1023. if (args->ar_suiddir)
  1024. seq_printf(s, ",suiddir");
  1025. if (args->ar_data != GFS2_DATA_DEFAULT) {
  1026. char *state;
  1027. switch (args->ar_data) {
  1028. case GFS2_DATA_WRITEBACK:
  1029. state = "writeback";
  1030. break;
  1031. case GFS2_DATA_ORDERED:
  1032. state = "ordered";
  1033. break;
  1034. default:
  1035. state = "unknown";
  1036. break;
  1037. }
  1038. seq_printf(s, ",data=%s", state);
  1039. }
  1040. if (args->ar_discard)
  1041. seq_printf(s, ",discard");
  1042. lfsecs = sdp->sd_tune.gt_log_flush_secs;
  1043. if (lfsecs != 60)
  1044. seq_printf(s, ",commit=%d", lfsecs);
  1045. return 0;
  1046. }
  1047. /*
  1048. * We have to (at the moment) hold the inodes main lock to cover
  1049. * the gap between unlocking the shared lock on the iopen lock and
  1050. * taking the exclusive lock. I'd rather do a shared -> exclusive
  1051. * conversion on the iopen lock, but we can change that later. This
  1052. * is safe, just less efficient.
  1053. */
  1054. static void gfs2_delete_inode(struct inode *inode)
  1055. {
  1056. struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
  1057. struct gfs2_inode *ip = GFS2_I(inode);
  1058. struct gfs2_holder gh;
  1059. int error;
  1060. if (!test_bit(GIF_USER, &ip->i_flags))
  1061. goto out;
  1062. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
  1063. if (unlikely(error)) {
  1064. gfs2_glock_dq_uninit(&ip->i_iopen_gh);
  1065. goto out;
  1066. }
  1067. gfs2_glock_dq_wait(&ip->i_iopen_gh);
  1068. gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE, &ip->i_iopen_gh);
  1069. error = gfs2_glock_nq(&ip->i_iopen_gh);
  1070. if (error)
  1071. goto out_truncate;
  1072. if (S_ISDIR(inode->i_mode) &&
  1073. (ip->i_diskflags & GFS2_DIF_EXHASH)) {
  1074. error = gfs2_dir_exhash_dealloc(ip);
  1075. if (error)
  1076. goto out_unlock;
  1077. }
  1078. if (ip->i_eattr) {
  1079. error = gfs2_ea_dealloc(ip);
  1080. if (error)
  1081. goto out_unlock;
  1082. }
  1083. if (!gfs2_is_stuffed(ip)) {
  1084. error = gfs2_file_dealloc(ip);
  1085. if (error)
  1086. goto out_unlock;
  1087. }
  1088. error = gfs2_dinode_dealloc(ip);
  1089. if (error)
  1090. goto out_unlock;
  1091. out_truncate:
  1092. error = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks);
  1093. if (error)
  1094. goto out_unlock;
  1095. /* Needs to be done before glock release & also in a transaction */
  1096. truncate_inode_pages(&inode->i_data, 0);
  1097. gfs2_trans_end(sdp);
  1098. out_unlock:
  1099. if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags))
  1100. gfs2_glock_dq(&ip->i_iopen_gh);
  1101. gfs2_holder_uninit(&ip->i_iopen_gh);
  1102. gfs2_glock_dq_uninit(&gh);
  1103. if (error && error != GLR_TRYFAILED && error != -EROFS)
  1104. fs_warn(sdp, "gfs2_delete_inode: %d\n", error);
  1105. out:
  1106. truncate_inode_pages(&inode->i_data, 0);
  1107. clear_inode(inode);
  1108. }
  1109. static struct inode *gfs2_alloc_inode(struct super_block *sb)
  1110. {
  1111. struct gfs2_inode *ip;
  1112. ip = kmem_cache_alloc(gfs2_inode_cachep, GFP_KERNEL);
  1113. if (ip) {
  1114. ip->i_flags = 0;
  1115. ip->i_gl = NULL;
  1116. }
  1117. return &ip->i_inode;
  1118. }
  1119. static void gfs2_destroy_inode(struct inode *inode)
  1120. {
  1121. kmem_cache_free(gfs2_inode_cachep, inode);
  1122. }
  1123. const struct super_operations gfs2_super_ops = {
  1124. .alloc_inode = gfs2_alloc_inode,
  1125. .destroy_inode = gfs2_destroy_inode,
  1126. .write_inode = gfs2_write_inode,
  1127. .delete_inode = gfs2_delete_inode,
  1128. .put_super = gfs2_put_super,
  1129. .sync_fs = gfs2_sync_fs,
  1130. .freeze_fs = gfs2_freeze,
  1131. .unfreeze_fs = gfs2_unfreeze,
  1132. .statfs = gfs2_statfs,
  1133. .remount_fs = gfs2_remount_fs,
  1134. .clear_inode = gfs2_clear_inode,
  1135. .drop_inode = gfs2_drop_inode,
  1136. .show_options = gfs2_show_options,
  1137. };