super.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License version 2.
  8. */
  9. #include <linux/bio.h>
  10. #include <linux/sched.h>
  11. #include <linux/slab.h>
  12. #include <linux/spinlock.h>
  13. #include <linux/completion.h>
  14. #include <linux/buffer_head.h>
  15. #include <linux/statfs.h>
  16. #include <linux/seq_file.h>
  17. #include <linux/mount.h>
  18. #include <linux/kthread.h>
  19. #include <linux/delay.h>
  20. #include <linux/gfs2_ondisk.h>
  21. #include <linux/crc32.h>
  22. #include <linux/time.h>
  23. #include "gfs2.h"
  24. #include "incore.h"
  25. #include "bmap.h"
  26. #include "dir.h"
  27. #include "glock.h"
  28. #include "glops.h"
  29. #include "inode.h"
  30. #include "log.h"
  31. #include "meta_io.h"
  32. #include "quota.h"
  33. #include "recovery.h"
  34. #include "rgrp.h"
  35. #include "super.h"
  36. #include "trans.h"
  37. #include "util.h"
  38. #include "sys.h"
  39. #include "eattr.h"
  40. #define args_neq(a1, a2, x) ((a1)->ar_##x != (a2)->ar_##x)
  41. enum {
  42. Opt_lockproto,
  43. Opt_locktable,
  44. Opt_hostdata,
  45. Opt_spectator,
  46. Opt_ignore_local_fs,
  47. Opt_localflocks,
  48. Opt_localcaching,
  49. Opt_debug,
  50. Opt_nodebug,
  51. Opt_upgrade,
  52. Opt_acl,
  53. Opt_noacl,
  54. Opt_quota_off,
  55. Opt_quota_account,
  56. Opt_quota_on,
  57. Opt_quota,
  58. Opt_noquota,
  59. Opt_suiddir,
  60. Opt_nosuiddir,
  61. Opt_data_writeback,
  62. Opt_data_ordered,
  63. Opt_meta,
  64. Opt_discard,
  65. Opt_nodiscard,
  66. Opt_commit,
  67. Opt_error,
  68. };
  69. static const match_table_t tokens = {
  70. {Opt_lockproto, "lockproto=%s"},
  71. {Opt_locktable, "locktable=%s"},
  72. {Opt_hostdata, "hostdata=%s"},
  73. {Opt_spectator, "spectator"},
  74. {Opt_ignore_local_fs, "ignore_local_fs"},
  75. {Opt_localflocks, "localflocks"},
  76. {Opt_localcaching, "localcaching"},
  77. {Opt_debug, "debug"},
  78. {Opt_nodebug, "nodebug"},
  79. {Opt_upgrade, "upgrade"},
  80. {Opt_acl, "acl"},
  81. {Opt_noacl, "noacl"},
  82. {Opt_quota_off, "quota=off"},
  83. {Opt_quota_account, "quota=account"},
  84. {Opt_quota_on, "quota=on"},
  85. {Opt_quota, "quota"},
  86. {Opt_noquota, "noquota"},
  87. {Opt_suiddir, "suiddir"},
  88. {Opt_nosuiddir, "nosuiddir"},
  89. {Opt_data_writeback, "data=writeback"},
  90. {Opt_data_ordered, "data=ordered"},
  91. {Opt_meta, "meta"},
  92. {Opt_discard, "discard"},
  93. {Opt_nodiscard, "nodiscard"},
  94. {Opt_commit, "commit=%d"},
  95. {Opt_error, NULL}
  96. };
  97. /**
  98. * gfs2_mount_args - Parse mount options
  99. * @sdp:
  100. * @data:
  101. *
  102. * Return: errno
  103. */
  104. int gfs2_mount_args(struct gfs2_sbd *sdp, struct gfs2_args *args, char *options)
  105. {
  106. char *o;
  107. int token;
  108. substring_t tmp[MAX_OPT_ARGS];
  109. int rv;
  110. /* Split the options into tokens with the "," character and
  111. process them */
  112. while (1) {
  113. o = strsep(&options, ",");
  114. if (o == NULL)
  115. break;
  116. if (*o == '\0')
  117. continue;
  118. token = match_token(o, tokens, tmp);
  119. switch (token) {
  120. case Opt_lockproto:
  121. match_strlcpy(args->ar_lockproto, &tmp[0],
  122. GFS2_LOCKNAME_LEN);
  123. break;
  124. case Opt_locktable:
  125. match_strlcpy(args->ar_locktable, &tmp[0],
  126. GFS2_LOCKNAME_LEN);
  127. break;
  128. case Opt_hostdata:
  129. match_strlcpy(args->ar_hostdata, &tmp[0],
  130. GFS2_LOCKNAME_LEN);
  131. break;
  132. case Opt_spectator:
  133. args->ar_spectator = 1;
  134. break;
  135. case Opt_ignore_local_fs:
  136. args->ar_ignore_local_fs = 1;
  137. break;
  138. case Opt_localflocks:
  139. args->ar_localflocks = 1;
  140. break;
  141. case Opt_localcaching:
  142. args->ar_localcaching = 1;
  143. break;
  144. case Opt_debug:
  145. args->ar_debug = 1;
  146. break;
  147. case Opt_nodebug:
  148. args->ar_debug = 0;
  149. break;
  150. case Opt_upgrade:
  151. args->ar_upgrade = 1;
  152. break;
  153. case Opt_acl:
  154. args->ar_posix_acl = 1;
  155. break;
  156. case Opt_noacl:
  157. args->ar_posix_acl = 0;
  158. break;
  159. case Opt_quota_off:
  160. case Opt_noquota:
  161. args->ar_quota = GFS2_QUOTA_OFF;
  162. break;
  163. case Opt_quota_account:
  164. args->ar_quota = GFS2_QUOTA_ACCOUNT;
  165. break;
  166. case Opt_quota_on:
  167. case Opt_quota:
  168. args->ar_quota = GFS2_QUOTA_ON;
  169. break;
  170. case Opt_suiddir:
  171. args->ar_suiddir = 1;
  172. break;
  173. case Opt_nosuiddir:
  174. args->ar_suiddir = 0;
  175. break;
  176. case Opt_data_writeback:
  177. args->ar_data = GFS2_DATA_WRITEBACK;
  178. break;
  179. case Opt_data_ordered:
  180. args->ar_data = GFS2_DATA_ORDERED;
  181. break;
  182. case Opt_meta:
  183. args->ar_meta = 1;
  184. break;
  185. case Opt_discard:
  186. args->ar_discard = 1;
  187. break;
  188. case Opt_nodiscard:
  189. args->ar_discard = 0;
  190. break;
  191. case Opt_commit:
  192. rv = match_int(&tmp[0], &args->ar_commit);
  193. if (rv || args->ar_commit <= 0) {
  194. fs_info(sdp, "commit mount option requires a positive numeric argument\n");
  195. return rv ? rv : -EINVAL;
  196. }
  197. break;
  198. case Opt_error:
  199. default:
  200. fs_info(sdp, "invalid mount option: %s\n", o);
  201. return -EINVAL;
  202. }
  203. }
  204. return 0;
  205. }
  206. /**
  207. * gfs2_jindex_free - Clear all the journal index information
  208. * @sdp: The GFS2 superblock
  209. *
  210. */
  211. void gfs2_jindex_free(struct gfs2_sbd *sdp)
  212. {
  213. struct list_head list, *head;
  214. struct gfs2_jdesc *jd;
  215. struct gfs2_journal_extent *jext;
  216. spin_lock(&sdp->sd_jindex_spin);
  217. list_add(&list, &sdp->sd_jindex_list);
  218. list_del_init(&sdp->sd_jindex_list);
  219. sdp->sd_journals = 0;
  220. spin_unlock(&sdp->sd_jindex_spin);
  221. while (!list_empty(&list)) {
  222. jd = list_entry(list.next, struct gfs2_jdesc, jd_list);
  223. head = &jd->extent_list;
  224. while (!list_empty(head)) {
  225. jext = list_entry(head->next,
  226. struct gfs2_journal_extent,
  227. extent_list);
  228. list_del(&jext->extent_list);
  229. kfree(jext);
  230. }
  231. list_del(&jd->jd_list);
  232. iput(jd->jd_inode);
  233. kfree(jd);
  234. }
  235. }
  236. static struct gfs2_jdesc *jdesc_find_i(struct list_head *head, unsigned int jid)
  237. {
  238. struct gfs2_jdesc *jd;
  239. int found = 0;
  240. list_for_each_entry(jd, head, jd_list) {
  241. if (jd->jd_jid == jid) {
  242. found = 1;
  243. break;
  244. }
  245. }
  246. if (!found)
  247. jd = NULL;
  248. return jd;
  249. }
  250. struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid)
  251. {
  252. struct gfs2_jdesc *jd;
  253. spin_lock(&sdp->sd_jindex_spin);
  254. jd = jdesc_find_i(&sdp->sd_jindex_list, jid);
  255. spin_unlock(&sdp->sd_jindex_spin);
  256. return jd;
  257. }
  258. int gfs2_jdesc_check(struct gfs2_jdesc *jd)
  259. {
  260. struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
  261. struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
  262. int ar;
  263. int error;
  264. if (ip->i_disksize < (8 << 20) || ip->i_disksize > (1 << 30) ||
  265. (ip->i_disksize & (sdp->sd_sb.sb_bsize - 1))) {
  266. gfs2_consist_inode(ip);
  267. return -EIO;
  268. }
  269. jd->jd_blocks = ip->i_disksize >> sdp->sd_sb.sb_bsize_shift;
  270. error = gfs2_write_alloc_required(ip, 0, ip->i_disksize, &ar);
  271. if (!error && ar) {
  272. gfs2_consist_inode(ip);
  273. error = -EIO;
  274. }
  275. return error;
  276. }
  277. /**
  278. * gfs2_make_fs_rw - Turn a Read-Only FS into a Read-Write one
  279. * @sdp: the filesystem
  280. *
  281. * Returns: errno
  282. */
  283. int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
  284. {
  285. struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
  286. struct gfs2_glock *j_gl = ip->i_gl;
  287. struct gfs2_holder t_gh;
  288. struct gfs2_log_header_host head;
  289. int error;
  290. error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED, 0, &t_gh);
  291. if (error)
  292. return error;
  293. j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
  294. error = gfs2_find_jhead(sdp->sd_jdesc, &head);
  295. if (error)
  296. goto fail;
  297. if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
  298. gfs2_consist(sdp);
  299. error = -EIO;
  300. goto fail;
  301. }
  302. /* Initialize some head of the log stuff */
  303. sdp->sd_log_sequence = head.lh_sequence + 1;
  304. gfs2_log_pointers_init(sdp, head.lh_blkno);
  305. error = gfs2_quota_init(sdp);
  306. if (error)
  307. goto fail;
  308. set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
  309. gfs2_glock_dq_uninit(&t_gh);
  310. return 0;
  311. fail:
  312. t_gh.gh_flags |= GL_NOCACHE;
  313. gfs2_glock_dq_uninit(&t_gh);
  314. return error;
  315. }
  316. static void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc, const void *buf)
  317. {
  318. const struct gfs2_statfs_change *str = buf;
  319. sc->sc_total = be64_to_cpu(str->sc_total);
  320. sc->sc_free = be64_to_cpu(str->sc_free);
  321. sc->sc_dinodes = be64_to_cpu(str->sc_dinodes);
  322. }
  323. static void gfs2_statfs_change_out(const struct gfs2_statfs_change_host *sc, void *buf)
  324. {
  325. struct gfs2_statfs_change *str = buf;
  326. str->sc_total = cpu_to_be64(sc->sc_total);
  327. str->sc_free = cpu_to_be64(sc->sc_free);
  328. str->sc_dinodes = cpu_to_be64(sc->sc_dinodes);
  329. }
  330. int gfs2_statfs_init(struct gfs2_sbd *sdp)
  331. {
  332. struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
  333. struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
  334. struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
  335. struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
  336. struct buffer_head *m_bh, *l_bh;
  337. struct gfs2_holder gh;
  338. int error;
  339. error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE,
  340. &gh);
  341. if (error)
  342. return error;
  343. error = gfs2_meta_inode_buffer(m_ip, &m_bh);
  344. if (error)
  345. goto out;
  346. if (sdp->sd_args.ar_spectator) {
  347. spin_lock(&sdp->sd_statfs_spin);
  348. gfs2_statfs_change_in(m_sc, m_bh->b_data +
  349. sizeof(struct gfs2_dinode));
  350. spin_unlock(&sdp->sd_statfs_spin);
  351. } else {
  352. error = gfs2_meta_inode_buffer(l_ip, &l_bh);
  353. if (error)
  354. goto out_m_bh;
  355. spin_lock(&sdp->sd_statfs_spin);
  356. gfs2_statfs_change_in(m_sc, m_bh->b_data +
  357. sizeof(struct gfs2_dinode));
  358. gfs2_statfs_change_in(l_sc, l_bh->b_data +
  359. sizeof(struct gfs2_dinode));
  360. spin_unlock(&sdp->sd_statfs_spin);
  361. brelse(l_bh);
  362. }
  363. out_m_bh:
  364. brelse(m_bh);
  365. out:
  366. gfs2_glock_dq_uninit(&gh);
  367. return 0;
  368. }
  369. void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
  370. s64 dinodes)
  371. {
  372. struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
  373. struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
  374. struct buffer_head *l_bh;
  375. int error;
  376. error = gfs2_meta_inode_buffer(l_ip, &l_bh);
  377. if (error)
  378. return;
  379. gfs2_trans_add_bh(l_ip->i_gl, l_bh, 1);
  380. spin_lock(&sdp->sd_statfs_spin);
  381. l_sc->sc_total += total;
  382. l_sc->sc_free += free;
  383. l_sc->sc_dinodes += dinodes;
  384. gfs2_statfs_change_out(l_sc, l_bh->b_data + sizeof(struct gfs2_dinode));
  385. spin_unlock(&sdp->sd_statfs_spin);
  386. brelse(l_bh);
  387. }
  388. int gfs2_statfs_sync(struct gfs2_sbd *sdp)
  389. {
  390. struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
  391. struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
  392. struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
  393. struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
  394. struct gfs2_holder gh;
  395. struct buffer_head *m_bh, *l_bh;
  396. int error;
  397. error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE,
  398. &gh);
  399. if (error)
  400. return error;
  401. error = gfs2_meta_inode_buffer(m_ip, &m_bh);
  402. if (error)
  403. goto out;
  404. spin_lock(&sdp->sd_statfs_spin);
  405. gfs2_statfs_change_in(m_sc, m_bh->b_data +
  406. sizeof(struct gfs2_dinode));
  407. if (!l_sc->sc_total && !l_sc->sc_free && !l_sc->sc_dinodes) {
  408. spin_unlock(&sdp->sd_statfs_spin);
  409. goto out_bh;
  410. }
  411. spin_unlock(&sdp->sd_statfs_spin);
  412. error = gfs2_meta_inode_buffer(l_ip, &l_bh);
  413. if (error)
  414. goto out_bh;
  415. error = gfs2_trans_begin(sdp, 2 * RES_DINODE, 0);
  416. if (error)
  417. goto out_bh2;
  418. gfs2_trans_add_bh(l_ip->i_gl, l_bh, 1);
  419. spin_lock(&sdp->sd_statfs_spin);
  420. m_sc->sc_total += l_sc->sc_total;
  421. m_sc->sc_free += l_sc->sc_free;
  422. m_sc->sc_dinodes += l_sc->sc_dinodes;
  423. memset(l_sc, 0, sizeof(struct gfs2_statfs_change));
  424. memset(l_bh->b_data + sizeof(struct gfs2_dinode),
  425. 0, sizeof(struct gfs2_statfs_change));
  426. spin_unlock(&sdp->sd_statfs_spin);
  427. gfs2_trans_add_bh(m_ip->i_gl, m_bh, 1);
  428. gfs2_statfs_change_out(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode));
  429. gfs2_trans_end(sdp);
  430. out_bh2:
  431. brelse(l_bh);
  432. out_bh:
  433. brelse(m_bh);
  434. out:
  435. gfs2_glock_dq_uninit(&gh);
  436. return error;
  437. }
  438. struct lfcc {
  439. struct list_head list;
  440. struct gfs2_holder gh;
  441. };
  442. /**
  443. * gfs2_lock_fs_check_clean - Stop all writes to the FS and check that all
  444. * journals are clean
  445. * @sdp: the file system
  446. * @state: the state to put the transaction lock into
  447. * @t_gh: the hold on the transaction lock
  448. *
  449. * Returns: errno
  450. */
  451. static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp,
  452. struct gfs2_holder *t_gh)
  453. {
  454. struct gfs2_inode *ip;
  455. struct gfs2_jdesc *jd;
  456. struct lfcc *lfcc;
  457. LIST_HEAD(list);
  458. struct gfs2_log_header_host lh;
  459. int error;
  460. list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
  461. lfcc = kmalloc(sizeof(struct lfcc), GFP_KERNEL);
  462. if (!lfcc) {
  463. error = -ENOMEM;
  464. goto out;
  465. }
  466. ip = GFS2_I(jd->jd_inode);
  467. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &lfcc->gh);
  468. if (error) {
  469. kfree(lfcc);
  470. goto out;
  471. }
  472. list_add(&lfcc->list, &list);
  473. }
  474. error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_DEFERRED,
  475. GL_NOCACHE, t_gh);
  476. list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
  477. error = gfs2_jdesc_check(jd);
  478. if (error)
  479. break;
  480. error = gfs2_find_jhead(jd, &lh);
  481. if (error)
  482. break;
  483. if (!(lh.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
  484. error = -EBUSY;
  485. break;
  486. }
  487. }
  488. if (error)
  489. gfs2_glock_dq_uninit(t_gh);
  490. out:
  491. while (!list_empty(&list)) {
  492. lfcc = list_entry(list.next, struct lfcc, list);
  493. list_del(&lfcc->list);
  494. gfs2_glock_dq_uninit(&lfcc->gh);
  495. kfree(lfcc);
  496. }
  497. return error;
  498. }
  499. /**
  500. * gfs2_freeze_fs - freezes the file system
  501. * @sdp: the file system
  502. *
  503. * This function flushes data and meta data for all machines by
  504. * aquiring the transaction log exclusively. All journals are
  505. * ensured to be in a clean state as well.
  506. *
  507. * Returns: errno
  508. */
  509. int gfs2_freeze_fs(struct gfs2_sbd *sdp)
  510. {
  511. int error = 0;
  512. mutex_lock(&sdp->sd_freeze_lock);
  513. if (!sdp->sd_freeze_count++) {
  514. error = gfs2_lock_fs_check_clean(sdp, &sdp->sd_freeze_gh);
  515. if (error)
  516. sdp->sd_freeze_count--;
  517. }
  518. mutex_unlock(&sdp->sd_freeze_lock);
  519. return error;
  520. }
  521. /**
  522. * gfs2_unfreeze_fs - unfreezes the file system
  523. * @sdp: the file system
  524. *
  525. * This function allows the file system to proceed by unlocking
  526. * the exclusively held transaction lock. Other GFS2 nodes are
  527. * now free to acquire the lock shared and go on with their lives.
  528. *
  529. */
  530. void gfs2_unfreeze_fs(struct gfs2_sbd *sdp)
  531. {
  532. mutex_lock(&sdp->sd_freeze_lock);
  533. if (sdp->sd_freeze_count && !--sdp->sd_freeze_count)
  534. gfs2_glock_dq_uninit(&sdp->sd_freeze_gh);
  535. mutex_unlock(&sdp->sd_freeze_lock);
  536. }
  537. /**
  538. * gfs2_write_inode - Make sure the inode is stable on the disk
  539. * @inode: The inode
  540. * @sync: synchronous write flag
  541. *
  542. * Returns: errno
  543. */
  544. static int gfs2_write_inode(struct inode *inode, int sync)
  545. {
  546. struct gfs2_inode *ip = GFS2_I(inode);
  547. struct gfs2_sbd *sdp = GFS2_SB(inode);
  548. struct gfs2_holder gh;
  549. struct buffer_head *bh;
  550. struct timespec atime;
  551. struct gfs2_dinode *di;
  552. int ret = 0;
  553. /* Check this is a "normal" inode, etc */
  554. if (!test_bit(GIF_USER, &ip->i_flags) ||
  555. (current->flags & PF_MEMALLOC))
  556. return 0;
  557. ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
  558. if (ret)
  559. goto do_flush;
  560. ret = gfs2_trans_begin(sdp, RES_DINODE, 0);
  561. if (ret)
  562. goto do_unlock;
  563. ret = gfs2_meta_inode_buffer(ip, &bh);
  564. if (ret == 0) {
  565. di = (struct gfs2_dinode *)bh->b_data;
  566. atime.tv_sec = be64_to_cpu(di->di_atime);
  567. atime.tv_nsec = be32_to_cpu(di->di_atime_nsec);
  568. if (timespec_compare(&inode->i_atime, &atime) > 0) {
  569. gfs2_trans_add_bh(ip->i_gl, bh, 1);
  570. gfs2_dinode_out(ip, bh->b_data);
  571. }
  572. brelse(bh);
  573. }
  574. gfs2_trans_end(sdp);
  575. do_unlock:
  576. gfs2_glock_dq_uninit(&gh);
  577. do_flush:
  578. if (sync != 0)
  579. gfs2_log_flush(GFS2_SB(inode), ip->i_gl);
  580. return ret;
  581. }
  582. /**
  583. * gfs2_make_fs_ro - Turn a Read-Write FS into a Read-Only one
  584. * @sdp: the filesystem
  585. *
  586. * Returns: errno
  587. */
  588. static int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
  589. {
  590. struct gfs2_holder t_gh;
  591. int error;
  592. gfs2_quota_sync(sdp);
  593. gfs2_statfs_sync(sdp);
  594. error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED, GL_NOCACHE,
  595. &t_gh);
  596. if (error && !test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
  597. return error;
  598. gfs2_meta_syncfs(sdp);
  599. gfs2_log_shutdown(sdp);
  600. clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
  601. if (t_gh.gh_gl)
  602. gfs2_glock_dq_uninit(&t_gh);
  603. gfs2_quota_cleanup(sdp);
  604. return error;
  605. }
  606. static int gfs2_umount_recovery_wait(void *word)
  607. {
  608. schedule();
  609. return 0;
  610. }
  611. /**
  612. * gfs2_put_super - Unmount the filesystem
  613. * @sb: The VFS superblock
  614. *
  615. */
  616. static void gfs2_put_super(struct super_block *sb)
  617. {
  618. struct gfs2_sbd *sdp = sb->s_fs_info;
  619. int error;
  620. struct gfs2_jdesc *jd;
  621. lock_kernel();
  622. /* Unfreeze the filesystem, if we need to */
  623. mutex_lock(&sdp->sd_freeze_lock);
  624. if (sdp->sd_freeze_count)
  625. gfs2_glock_dq_uninit(&sdp->sd_freeze_gh);
  626. mutex_unlock(&sdp->sd_freeze_lock);
  627. /* No more recovery requests */
  628. set_bit(SDF_NORECOVERY, &sdp->sd_flags);
  629. smp_mb();
  630. /* Wait on outstanding recovery */
  631. restart:
  632. spin_lock(&sdp->sd_jindex_spin);
  633. list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
  634. if (!test_bit(JDF_RECOVERY, &jd->jd_flags))
  635. continue;
  636. spin_unlock(&sdp->sd_jindex_spin);
  637. wait_on_bit(&jd->jd_flags, JDF_RECOVERY,
  638. gfs2_umount_recovery_wait, TASK_UNINTERRUPTIBLE);
  639. goto restart;
  640. }
  641. spin_unlock(&sdp->sd_jindex_spin);
  642. kthread_stop(sdp->sd_quotad_process);
  643. kthread_stop(sdp->sd_logd_process);
  644. if (!(sb->s_flags & MS_RDONLY)) {
  645. error = gfs2_make_fs_ro(sdp);
  646. if (error)
  647. gfs2_io_error(sdp);
  648. }
  649. /* At this point, we're through modifying the disk */
  650. /* Release stuff */
  651. iput(sdp->sd_jindex);
  652. iput(sdp->sd_inum_inode);
  653. iput(sdp->sd_statfs_inode);
  654. iput(sdp->sd_rindex);
  655. iput(sdp->sd_quota_inode);
  656. gfs2_glock_put(sdp->sd_rename_gl);
  657. gfs2_glock_put(sdp->sd_trans_gl);
  658. if (!sdp->sd_args.ar_spectator) {
  659. gfs2_glock_dq_uninit(&sdp->sd_journal_gh);
  660. gfs2_glock_dq_uninit(&sdp->sd_jinode_gh);
  661. gfs2_glock_dq_uninit(&sdp->sd_ir_gh);
  662. gfs2_glock_dq_uninit(&sdp->sd_sc_gh);
  663. gfs2_glock_dq_uninit(&sdp->sd_qc_gh);
  664. iput(sdp->sd_ir_inode);
  665. iput(sdp->sd_sc_inode);
  666. iput(sdp->sd_qc_inode);
  667. }
  668. gfs2_glock_dq_uninit(&sdp->sd_live_gh);
  669. gfs2_clear_rgrpd(sdp);
  670. gfs2_jindex_free(sdp);
  671. /* Take apart glock structures and buffer lists */
  672. gfs2_gl_hash_clear(sdp);
  673. /* Unmount the locking protocol */
  674. gfs2_lm_unmount(sdp);
  675. /* At this point, we're through participating in the lockspace */
  676. gfs2_sys_fs_del(sdp);
  677. unlock_kernel();
  678. }
  679. /**
  680. * gfs2_sync_fs - sync the filesystem
  681. * @sb: the superblock
  682. *
  683. * Flushes the log to disk.
  684. */
  685. static int gfs2_sync_fs(struct super_block *sb, int wait)
  686. {
  687. if (wait && sb->s_fs_info)
  688. gfs2_log_flush(sb->s_fs_info, NULL);
  689. return 0;
  690. }
  691. /**
  692. * gfs2_freeze - prevent further writes to the filesystem
  693. * @sb: the VFS structure for the filesystem
  694. *
  695. */
  696. static int gfs2_freeze(struct super_block *sb)
  697. {
  698. struct gfs2_sbd *sdp = sb->s_fs_info;
  699. int error;
  700. if (test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
  701. return -EINVAL;
  702. for (;;) {
  703. error = gfs2_freeze_fs(sdp);
  704. if (!error)
  705. break;
  706. switch (error) {
  707. case -EBUSY:
  708. fs_err(sdp, "waiting for recovery before freeze\n");
  709. break;
  710. default:
  711. fs_err(sdp, "error freezing FS: %d\n", error);
  712. break;
  713. }
  714. fs_err(sdp, "retrying...\n");
  715. msleep(1000);
  716. }
  717. return 0;
  718. }
  719. /**
  720. * gfs2_unfreeze - reallow writes to the filesystem
  721. * @sb: the VFS structure for the filesystem
  722. *
  723. */
  724. static int gfs2_unfreeze(struct super_block *sb)
  725. {
  726. gfs2_unfreeze_fs(sb->s_fs_info);
  727. return 0;
  728. }
  729. /**
  730. * statfs_fill - fill in the sg for a given RG
  731. * @rgd: the RG
  732. * @sc: the sc structure
  733. *
  734. * Returns: 0 on success, -ESTALE if the LVB is invalid
  735. */
  736. static int statfs_slow_fill(struct gfs2_rgrpd *rgd,
  737. struct gfs2_statfs_change_host *sc)
  738. {
  739. gfs2_rgrp_verify(rgd);
  740. sc->sc_total += rgd->rd_data;
  741. sc->sc_free += rgd->rd_free;
  742. sc->sc_dinodes += rgd->rd_dinodes;
  743. return 0;
  744. }
  745. /**
  746. * gfs2_statfs_slow - Stat a filesystem using asynchronous locking
  747. * @sdp: the filesystem
  748. * @sc: the sc info that will be returned
  749. *
  750. * Any error (other than a signal) will cause this routine to fall back
  751. * to the synchronous version.
  752. *
  753. * FIXME: This really shouldn't busy wait like this.
  754. *
  755. * Returns: errno
  756. */
  757. static int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc)
  758. {
  759. struct gfs2_holder ri_gh;
  760. struct gfs2_rgrpd *rgd_next;
  761. struct gfs2_holder *gha, *gh;
  762. unsigned int slots = 64;
  763. unsigned int x;
  764. int done;
  765. int error = 0, err;
  766. memset(sc, 0, sizeof(struct gfs2_statfs_change_host));
  767. gha = kcalloc(slots, sizeof(struct gfs2_holder), GFP_KERNEL);
  768. if (!gha)
  769. return -ENOMEM;
  770. error = gfs2_rindex_hold(sdp, &ri_gh);
  771. if (error)
  772. goto out;
  773. rgd_next = gfs2_rgrpd_get_first(sdp);
  774. for (;;) {
  775. done = 1;
  776. for (x = 0; x < slots; x++) {
  777. gh = gha + x;
  778. if (gh->gh_gl && gfs2_glock_poll(gh)) {
  779. err = gfs2_glock_wait(gh);
  780. if (err) {
  781. gfs2_holder_uninit(gh);
  782. error = err;
  783. } else {
  784. if (!error)
  785. error = statfs_slow_fill(
  786. gh->gh_gl->gl_object, sc);
  787. gfs2_glock_dq_uninit(gh);
  788. }
  789. }
  790. if (gh->gh_gl)
  791. done = 0;
  792. else if (rgd_next && !error) {
  793. error = gfs2_glock_nq_init(rgd_next->rd_gl,
  794. LM_ST_SHARED,
  795. GL_ASYNC,
  796. gh);
  797. rgd_next = gfs2_rgrpd_get_next(rgd_next);
  798. done = 0;
  799. }
  800. if (signal_pending(current))
  801. error = -ERESTARTSYS;
  802. }
  803. if (done)
  804. break;
  805. yield();
  806. }
  807. gfs2_glock_dq_uninit(&ri_gh);
  808. out:
  809. kfree(gha);
  810. return error;
  811. }
  812. /**
  813. * gfs2_statfs_i - Do a statfs
  814. * @sdp: the filesystem
  815. * @sg: the sg structure
  816. *
  817. * Returns: errno
  818. */
  819. static int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc)
  820. {
  821. struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
  822. struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
  823. spin_lock(&sdp->sd_statfs_spin);
  824. *sc = *m_sc;
  825. sc->sc_total += l_sc->sc_total;
  826. sc->sc_free += l_sc->sc_free;
  827. sc->sc_dinodes += l_sc->sc_dinodes;
  828. spin_unlock(&sdp->sd_statfs_spin);
  829. if (sc->sc_free < 0)
  830. sc->sc_free = 0;
  831. if (sc->sc_free > sc->sc_total)
  832. sc->sc_free = sc->sc_total;
  833. if (sc->sc_dinodes < 0)
  834. sc->sc_dinodes = 0;
  835. return 0;
  836. }
  837. /**
  838. * gfs2_statfs - Gather and return stats about the filesystem
  839. * @sb: The superblock
  840. * @statfsbuf: The buffer
  841. *
  842. * Returns: 0 on success or error code
  843. */
  844. static int gfs2_statfs(struct dentry *dentry, struct kstatfs *buf)
  845. {
  846. struct super_block *sb = dentry->d_inode->i_sb;
  847. struct gfs2_sbd *sdp = sb->s_fs_info;
  848. struct gfs2_statfs_change_host sc;
  849. int error;
  850. if (gfs2_tune_get(sdp, gt_statfs_slow))
  851. error = gfs2_statfs_slow(sdp, &sc);
  852. else
  853. error = gfs2_statfs_i(sdp, &sc);
  854. if (error)
  855. return error;
  856. buf->f_type = GFS2_MAGIC;
  857. buf->f_bsize = sdp->sd_sb.sb_bsize;
  858. buf->f_blocks = sc.sc_total;
  859. buf->f_bfree = sc.sc_free;
  860. buf->f_bavail = sc.sc_free;
  861. buf->f_files = sc.sc_dinodes + sc.sc_free;
  862. buf->f_ffree = sc.sc_free;
  863. buf->f_namelen = GFS2_FNAMESIZE;
  864. return 0;
  865. }
  866. /**
  867. * gfs2_remount_fs - called when the FS is remounted
  868. * @sb: the filesystem
  869. * @flags: the remount flags
  870. * @data: extra data passed in (not used right now)
  871. *
  872. * Returns: errno
  873. */
  874. static int gfs2_remount_fs(struct super_block *sb, int *flags, char *data)
  875. {
  876. struct gfs2_sbd *sdp = sb->s_fs_info;
  877. struct gfs2_args args = sdp->sd_args; /* Default to current settings */
  878. struct gfs2_tune *gt = &sdp->sd_tune;
  879. int error;
  880. spin_lock(&gt->gt_spin);
  881. args.ar_commit = gt->gt_log_flush_secs;
  882. spin_unlock(&gt->gt_spin);
  883. error = gfs2_mount_args(sdp, &args, data);
  884. if (error)
  885. return error;
  886. /* Not allowed to change locking details */
  887. if (strcmp(args.ar_lockproto, sdp->sd_args.ar_lockproto) ||
  888. strcmp(args.ar_locktable, sdp->sd_args.ar_locktable) ||
  889. strcmp(args.ar_hostdata, sdp->sd_args.ar_hostdata))
  890. return -EINVAL;
  891. /* Some flags must not be changed */
  892. if (args_neq(&args, &sdp->sd_args, spectator) ||
  893. args_neq(&args, &sdp->sd_args, ignore_local_fs) ||
  894. args_neq(&args, &sdp->sd_args, localflocks) ||
  895. args_neq(&args, &sdp->sd_args, localcaching) ||
  896. args_neq(&args, &sdp->sd_args, meta))
  897. return -EINVAL;
  898. if (sdp->sd_args.ar_spectator)
  899. *flags |= MS_RDONLY;
  900. if ((sb->s_flags ^ *flags) & MS_RDONLY) {
  901. if (*flags & MS_RDONLY)
  902. error = gfs2_make_fs_ro(sdp);
  903. else
  904. error = gfs2_make_fs_rw(sdp);
  905. if (error)
  906. return error;
  907. }
  908. sdp->sd_args = args;
  909. if (sdp->sd_args.ar_posix_acl)
  910. sb->s_flags |= MS_POSIXACL;
  911. else
  912. sb->s_flags &= ~MS_POSIXACL;
  913. spin_lock(&gt->gt_spin);
  914. gt->gt_log_flush_secs = args.ar_commit;
  915. spin_unlock(&gt->gt_spin);
  916. return 0;
  917. }
  918. /**
  919. * gfs2_drop_inode - Drop an inode (test for remote unlink)
  920. * @inode: The inode to drop
  921. *
  922. * If we've received a callback on an iopen lock then its because a
  923. * remote node tried to deallocate the inode but failed due to this node
  924. * still having the inode open. Here we mark the link count zero
  925. * since we know that it must have reached zero if the GLF_DEMOTE flag
  926. * is set on the iopen glock. If we didn't do a disk read since the
  927. * remote node removed the final link then we might otherwise miss
  928. * this event. This check ensures that this node will deallocate the
  929. * inode's blocks, or alternatively pass the baton on to another
  930. * node for later deallocation.
  931. */
  932. static void gfs2_drop_inode(struct inode *inode)
  933. {
  934. struct gfs2_inode *ip = GFS2_I(inode);
  935. if (test_bit(GIF_USER, &ip->i_flags) && inode->i_nlink) {
  936. struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
  937. if (gl && test_bit(GLF_DEMOTE, &gl->gl_flags))
  938. clear_nlink(inode);
  939. }
  940. generic_drop_inode(inode);
  941. }
  942. /**
  943. * gfs2_clear_inode - Deallocate an inode when VFS is done with it
  944. * @inode: The VFS inode
  945. *
  946. */
  947. static void gfs2_clear_inode(struct inode *inode)
  948. {
  949. struct gfs2_inode *ip = GFS2_I(inode);
  950. /* This tells us its a "real" inode and not one which only
  951. * serves to contain an address space (see rgrp.c, meta_io.c)
  952. * which therefore doesn't have its own glocks.
  953. */
  954. if (test_bit(GIF_USER, &ip->i_flags)) {
  955. ip->i_gl->gl_object = NULL;
  956. gfs2_glock_put(ip->i_gl);
  957. ip->i_gl = NULL;
  958. if (ip->i_iopen_gh.gh_gl) {
  959. ip->i_iopen_gh.gh_gl->gl_object = NULL;
  960. gfs2_glock_dq_uninit(&ip->i_iopen_gh);
  961. }
  962. }
  963. }
  964. static int is_ancestor(const struct dentry *d1, const struct dentry *d2)
  965. {
  966. do {
  967. if (d1 == d2)
  968. return 1;
  969. d1 = d1->d_parent;
  970. } while (!IS_ROOT(d1));
  971. return 0;
  972. }
  973. /**
  974. * gfs2_show_options - Show mount options for /proc/mounts
  975. * @s: seq_file structure
  976. * @mnt: vfsmount
  977. *
  978. * Returns: 0 on success or error code
  979. */
  980. static int gfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
  981. {
  982. struct gfs2_sbd *sdp = mnt->mnt_sb->s_fs_info;
  983. struct gfs2_args *args = &sdp->sd_args;
  984. int lfsecs;
  985. if (is_ancestor(mnt->mnt_root, sdp->sd_master_dir))
  986. seq_printf(s, ",meta");
  987. if (args->ar_lockproto[0])
  988. seq_printf(s, ",lockproto=%s", args->ar_lockproto);
  989. if (args->ar_locktable[0])
  990. seq_printf(s, ",locktable=%s", args->ar_locktable);
  991. if (args->ar_hostdata[0])
  992. seq_printf(s, ",hostdata=%s", args->ar_hostdata);
  993. if (args->ar_spectator)
  994. seq_printf(s, ",spectator");
  995. if (args->ar_ignore_local_fs)
  996. seq_printf(s, ",ignore_local_fs");
  997. if (args->ar_localflocks)
  998. seq_printf(s, ",localflocks");
  999. if (args->ar_localcaching)
  1000. seq_printf(s, ",localcaching");
  1001. if (args->ar_debug)
  1002. seq_printf(s, ",debug");
  1003. if (args->ar_upgrade)
  1004. seq_printf(s, ",upgrade");
  1005. if (args->ar_posix_acl)
  1006. seq_printf(s, ",acl");
  1007. if (args->ar_quota != GFS2_QUOTA_DEFAULT) {
  1008. char *state;
  1009. switch (args->ar_quota) {
  1010. case GFS2_QUOTA_OFF:
  1011. state = "off";
  1012. break;
  1013. case GFS2_QUOTA_ACCOUNT:
  1014. state = "account";
  1015. break;
  1016. case GFS2_QUOTA_ON:
  1017. state = "on";
  1018. break;
  1019. default:
  1020. state = "unknown";
  1021. break;
  1022. }
  1023. seq_printf(s, ",quota=%s", state);
  1024. }
  1025. if (args->ar_suiddir)
  1026. seq_printf(s, ",suiddir");
  1027. if (args->ar_data != GFS2_DATA_DEFAULT) {
  1028. char *state;
  1029. switch (args->ar_data) {
  1030. case GFS2_DATA_WRITEBACK:
  1031. state = "writeback";
  1032. break;
  1033. case GFS2_DATA_ORDERED:
  1034. state = "ordered";
  1035. break;
  1036. default:
  1037. state = "unknown";
  1038. break;
  1039. }
  1040. seq_printf(s, ",data=%s", state);
  1041. }
  1042. if (args->ar_discard)
  1043. seq_printf(s, ",discard");
  1044. lfsecs = sdp->sd_tune.gt_log_flush_secs;
  1045. if (lfsecs != 60)
  1046. seq_printf(s, ",commit=%d", lfsecs);
  1047. return 0;
  1048. }
  1049. /*
  1050. * We have to (at the moment) hold the inodes main lock to cover
  1051. * the gap between unlocking the shared lock on the iopen lock and
  1052. * taking the exclusive lock. I'd rather do a shared -> exclusive
  1053. * conversion on the iopen lock, but we can change that later. This
  1054. * is safe, just less efficient.
  1055. */
  1056. static void gfs2_delete_inode(struct inode *inode)
  1057. {
  1058. struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
  1059. struct gfs2_inode *ip = GFS2_I(inode);
  1060. struct gfs2_holder gh;
  1061. int error;
  1062. if (!test_bit(GIF_USER, &ip->i_flags))
  1063. goto out;
  1064. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
  1065. if (unlikely(error)) {
  1066. gfs2_glock_dq_uninit(&ip->i_iopen_gh);
  1067. goto out;
  1068. }
  1069. gfs2_glock_dq_wait(&ip->i_iopen_gh);
  1070. gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE, &ip->i_iopen_gh);
  1071. error = gfs2_glock_nq(&ip->i_iopen_gh);
  1072. if (error)
  1073. goto out_truncate;
  1074. if (S_ISDIR(inode->i_mode) &&
  1075. (ip->i_diskflags & GFS2_DIF_EXHASH)) {
  1076. error = gfs2_dir_exhash_dealloc(ip);
  1077. if (error)
  1078. goto out_unlock;
  1079. }
  1080. if (ip->i_eattr) {
  1081. error = gfs2_ea_dealloc(ip);
  1082. if (error)
  1083. goto out_unlock;
  1084. }
  1085. if (!gfs2_is_stuffed(ip)) {
  1086. error = gfs2_file_dealloc(ip);
  1087. if (error)
  1088. goto out_unlock;
  1089. }
  1090. error = gfs2_dinode_dealloc(ip);
  1091. if (error)
  1092. goto out_unlock;
  1093. out_truncate:
  1094. error = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks);
  1095. if (error)
  1096. goto out_unlock;
  1097. /* Needs to be done before glock release & also in a transaction */
  1098. truncate_inode_pages(&inode->i_data, 0);
  1099. gfs2_trans_end(sdp);
  1100. out_unlock:
  1101. if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags))
  1102. gfs2_glock_dq(&ip->i_iopen_gh);
  1103. gfs2_holder_uninit(&ip->i_iopen_gh);
  1104. gfs2_glock_dq_uninit(&gh);
  1105. if (error && error != GLR_TRYFAILED && error != -EROFS)
  1106. fs_warn(sdp, "gfs2_delete_inode: %d\n", error);
  1107. out:
  1108. truncate_inode_pages(&inode->i_data, 0);
  1109. clear_inode(inode);
  1110. }
  1111. static struct inode *gfs2_alloc_inode(struct super_block *sb)
  1112. {
  1113. struct gfs2_inode *ip;
  1114. ip = kmem_cache_alloc(gfs2_inode_cachep, GFP_KERNEL);
  1115. if (ip) {
  1116. ip->i_flags = 0;
  1117. ip->i_gl = NULL;
  1118. }
  1119. return &ip->i_inode;
  1120. }
  1121. static void gfs2_destroy_inode(struct inode *inode)
  1122. {
  1123. kmem_cache_free(gfs2_inode_cachep, inode);
  1124. }
  1125. const struct super_operations gfs2_super_ops = {
  1126. .alloc_inode = gfs2_alloc_inode,
  1127. .destroy_inode = gfs2_destroy_inode,
  1128. .write_inode = gfs2_write_inode,
  1129. .delete_inode = gfs2_delete_inode,
  1130. .put_super = gfs2_put_super,
  1131. .sync_fs = gfs2_sync_fs,
  1132. .freeze_fs = gfs2_freeze,
  1133. .unfreeze_fs = gfs2_unfreeze,
  1134. .statfs = gfs2_statfs,
  1135. .remount_fs = gfs2_remount_fs,
  1136. .clear_inode = gfs2_clear_inode,
  1137. .drop_inode = gfs2_drop_inode,
  1138. .show_options = gfs2_show_options,
  1139. };