super.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393
  1. /*
  2. * linux/fs/super.c
  3. *
  4. * Copyright (C) 1991, 1992 Linus Torvalds
  5. *
  6. * super.c contains code to handle: - mount structures
  7. * - super-block tables
  8. * - filesystem drivers list
  9. * - mount system call
  10. * - umount system call
  11. * - ustat system call
  12. *
  13. * GK 2/5/95 - Changed to support mounting the root fs via NFS
  14. *
  15. * Added kerneld support: Jacques Gelinas and Bjorn Ekwall
  16. * Added change_root: Werner Almesberger & Hans Lermen, Feb '96
  17. * Added options to /proc/mounts:
  18. * Torbjörn Lindh (torbjorn.lindh@gopta.se), April 14, 1996.
  19. * Added devfs support: Richard Gooch <rgooch@atnf.csiro.au>, 13-JAN-1998
  20. * Heavily rewritten for 'one fs - one tree' dcache architecture. AV, Mar 2000
  21. */
  22. #include <linux/export.h>
  23. #include <linux/slab.h>
  24. #include <linux/acct.h>
  25. #include <linux/blkdev.h>
  26. #include <linux/mount.h>
  27. #include <linux/security.h>
  28. #include <linux/writeback.h> /* for the emergency remount stuff */
  29. #include <linux/idr.h>
  30. #include <linux/mutex.h>
  31. #include <linux/backing-dev.h>
  32. #include <linux/rculist_bl.h>
  33. #include <linux/cleancache.h>
  34. #include <linux/fsnotify.h>
  35. #include <linux/lockdep.h>
  36. #include "internal.h"
  37. LIST_HEAD(super_blocks);
  38. DEFINE_SPINLOCK(sb_lock);
  39. static char *sb_writers_name[SB_FREEZE_LEVELS] = {
  40. "sb_writers",
  41. "sb_pagefaults",
  42. "sb_internal",
  43. };
  44. /*
  45. * One thing we have to be careful of with a per-sb shrinker is that we don't
  46. * drop the last active reference to the superblock from within the shrinker.
  47. * If that happens we could trigger unregistering the shrinker from within the
  48. * shrinker path and that leads to deadlock on the shrinker_rwsem. Hence we
  49. * take a passive reference to the superblock to avoid this from occurring.
  50. */
  51. static int prune_super(struct shrinker *shrink, struct shrink_control *sc)
  52. {
  53. struct super_block *sb;
  54. int fs_objects = 0;
  55. int total_objects;
  56. sb = container_of(shrink, struct super_block, s_shrink);
  57. /*
  58. * Deadlock avoidance. We may hold various FS locks, and we don't want
  59. * to recurse into the FS that called us in clear_inode() and friends..
  60. */
  61. if (sc->nr_to_scan && !(sc->gfp_mask & __GFP_FS))
  62. return -1;
  63. if (!grab_super_passive(sb))
  64. return -1;
  65. if (sb->s_op->nr_cached_objects)
  66. fs_objects = sb->s_op->nr_cached_objects(sb);
  67. total_objects = sb->s_nr_dentry_unused +
  68. sb->s_nr_inodes_unused + fs_objects + 1;
  69. if (sc->nr_to_scan) {
  70. int dentries;
  71. int inodes;
  72. /* proportion the scan between the caches */
  73. dentries = (sc->nr_to_scan * sb->s_nr_dentry_unused) /
  74. total_objects;
  75. inodes = (sc->nr_to_scan * sb->s_nr_inodes_unused) /
  76. total_objects;
  77. if (fs_objects)
  78. fs_objects = (sc->nr_to_scan * fs_objects) /
  79. total_objects;
  80. /*
  81. * prune the dcache first as the icache is pinned by it, then
  82. * prune the icache, followed by the filesystem specific caches
  83. */
  84. prune_dcache_sb(sb, dentries);
  85. prune_icache_sb(sb, inodes);
  86. if (fs_objects && sb->s_op->free_cached_objects) {
  87. sb->s_op->free_cached_objects(sb, fs_objects);
  88. fs_objects = sb->s_op->nr_cached_objects(sb);
  89. }
  90. total_objects = sb->s_nr_dentry_unused +
  91. sb->s_nr_inodes_unused + fs_objects;
  92. }
  93. total_objects = (total_objects / 100) * sysctl_vfs_cache_pressure;
  94. drop_super(sb);
  95. return total_objects;
  96. }
  97. static int init_sb_writers(struct super_block *s, struct file_system_type *type)
  98. {
  99. int err;
  100. int i;
  101. for (i = 0; i < SB_FREEZE_LEVELS; i++) {
  102. err = percpu_counter_init(&s->s_writers.counter[i], 0);
  103. if (err < 0)
  104. goto err_out;
  105. lockdep_init_map(&s->s_writers.lock_map[i], sb_writers_name[i],
  106. &type->s_writers_key[i], 0);
  107. }
  108. init_waitqueue_head(&s->s_writers.wait);
  109. init_waitqueue_head(&s->s_writers.wait_unfrozen);
  110. return 0;
  111. err_out:
  112. while (--i >= 0)
  113. percpu_counter_destroy(&s->s_writers.counter[i]);
  114. return err;
  115. }
  116. static void destroy_sb_writers(struct super_block *s)
  117. {
  118. int i;
  119. for (i = 0; i < SB_FREEZE_LEVELS; i++)
  120. percpu_counter_destroy(&s->s_writers.counter[i]);
  121. }
  122. /**
  123. * alloc_super - create new superblock
  124. * @type: filesystem type superblock should belong to
  125. * @flags: the mount flags
  126. *
  127. * Allocates and initializes a new &struct super_block. alloc_super()
  128. * returns a pointer new superblock or %NULL if allocation had failed.
  129. */
  130. static struct super_block *alloc_super(struct file_system_type *type, int flags)
  131. {
  132. struct super_block *s = kzalloc(sizeof(struct super_block), GFP_USER);
  133. static const struct super_operations default_op;
  134. if (s) {
  135. if (security_sb_alloc(s))
  136. goto out_free_sb;
  137. #ifdef CONFIG_SMP
  138. s->s_files = alloc_percpu(struct list_head);
  139. if (!s->s_files)
  140. goto err_out;
  141. else {
  142. int i;
  143. for_each_possible_cpu(i)
  144. INIT_LIST_HEAD(per_cpu_ptr(s->s_files, i));
  145. }
  146. #else
  147. INIT_LIST_HEAD(&s->s_files);
  148. #endif
  149. if (init_sb_writers(s, type))
  150. goto err_out;
  151. s->s_flags = flags;
  152. s->s_bdi = &default_backing_dev_info;
  153. INIT_HLIST_NODE(&s->s_instances);
  154. INIT_HLIST_BL_HEAD(&s->s_anon);
  155. INIT_LIST_HEAD(&s->s_inodes);
  156. INIT_LIST_HEAD(&s->s_dentry_lru);
  157. INIT_LIST_HEAD(&s->s_inode_lru);
  158. spin_lock_init(&s->s_inode_lru_lock);
  159. INIT_LIST_HEAD(&s->s_mounts);
  160. init_rwsem(&s->s_umount);
  161. lockdep_set_class(&s->s_umount, &type->s_umount_key);
  162. /*
  163. * sget() can have s_umount recursion.
  164. *
  165. * When it cannot find a suitable sb, it allocates a new
  166. * one (this one), and tries again to find a suitable old
  167. * one.
  168. *
  169. * In case that succeeds, it will acquire the s_umount
  170. * lock of the old one. Since these are clearly distrinct
  171. * locks, and this object isn't exposed yet, there's no
  172. * risk of deadlocks.
  173. *
  174. * Annotate this by putting this lock in a different
  175. * subclass.
  176. */
  177. down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING);
  178. s->s_count = 1;
  179. atomic_set(&s->s_active, 1);
  180. mutex_init(&s->s_vfs_rename_mutex);
  181. lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key);
  182. mutex_init(&s->s_dquot.dqio_mutex);
  183. mutex_init(&s->s_dquot.dqonoff_mutex);
  184. init_rwsem(&s->s_dquot.dqptr_sem);
  185. s->s_maxbytes = MAX_NON_LFS;
  186. s->s_op = &default_op;
  187. s->s_time_gran = 1000000000;
  188. s->cleancache_poolid = -1;
  189. s->s_shrink.seeks = DEFAULT_SEEKS;
  190. s->s_shrink.shrink = prune_super;
  191. s->s_shrink.batch = 1024;
  192. }
  193. out:
  194. return s;
  195. err_out:
  196. security_sb_free(s);
  197. #ifdef CONFIG_SMP
  198. if (s->s_files)
  199. free_percpu(s->s_files);
  200. #endif
  201. destroy_sb_writers(s);
  202. out_free_sb:
  203. kfree(s);
  204. s = NULL;
  205. goto out;
  206. }
  207. /**
  208. * destroy_super - frees a superblock
  209. * @s: superblock to free
  210. *
  211. * Frees a superblock.
  212. */
  213. static inline void destroy_super(struct super_block *s)
  214. {
  215. #ifdef CONFIG_SMP
  216. free_percpu(s->s_files);
  217. #endif
  218. destroy_sb_writers(s);
  219. security_sb_free(s);
  220. WARN_ON(!list_empty(&s->s_mounts));
  221. kfree(s->s_subtype);
  222. kfree(s->s_options);
  223. kfree(s);
  224. }
  225. /* Superblock refcounting */
  226. /*
  227. * Drop a superblock's refcount. The caller must hold sb_lock.
  228. */
  229. static void __put_super(struct super_block *sb)
  230. {
  231. if (!--sb->s_count) {
  232. list_del_init(&sb->s_list);
  233. destroy_super(sb);
  234. }
  235. }
  236. /**
  237. * put_super - drop a temporary reference to superblock
  238. * @sb: superblock in question
  239. *
  240. * Drops a temporary reference, frees superblock if there's no
  241. * references left.
  242. */
  243. static void put_super(struct super_block *sb)
  244. {
  245. spin_lock(&sb_lock);
  246. __put_super(sb);
  247. spin_unlock(&sb_lock);
  248. }
  249. /**
  250. * deactivate_locked_super - drop an active reference to superblock
  251. * @s: superblock to deactivate
  252. *
  253. * Drops an active reference to superblock, converting it into a temprory
  254. * one if there is no other active references left. In that case we
  255. * tell fs driver to shut it down and drop the temporary reference we
  256. * had just acquired.
  257. *
  258. * Caller holds exclusive lock on superblock; that lock is released.
  259. */
  260. void deactivate_locked_super(struct super_block *s)
  261. {
  262. struct file_system_type *fs = s->s_type;
  263. if (atomic_dec_and_test(&s->s_active)) {
  264. cleancache_invalidate_fs(s);
  265. fs->kill_sb(s);
  266. /* caches are now gone, we can safely kill the shrinker now */
  267. unregister_shrinker(&s->s_shrink);
  268. put_filesystem(fs);
  269. put_super(s);
  270. } else {
  271. up_write(&s->s_umount);
  272. }
  273. }
  274. EXPORT_SYMBOL(deactivate_locked_super);
  275. /**
  276. * deactivate_super - drop an active reference to superblock
  277. * @s: superblock to deactivate
  278. *
  279. * Variant of deactivate_locked_super(), except that superblock is *not*
  280. * locked by caller. If we are going to drop the final active reference,
  281. * lock will be acquired prior to that.
  282. */
  283. void deactivate_super(struct super_block *s)
  284. {
  285. if (!atomic_add_unless(&s->s_active, -1, 1)) {
  286. down_write(&s->s_umount);
  287. deactivate_locked_super(s);
  288. }
  289. }
  290. EXPORT_SYMBOL(deactivate_super);
  291. /**
  292. * grab_super - acquire an active reference
  293. * @s: reference we are trying to make active
  294. *
  295. * Tries to acquire an active reference. grab_super() is used when we
  296. * had just found a superblock in super_blocks or fs_type->fs_supers
  297. * and want to turn it into a full-blown active reference. grab_super()
  298. * is called with sb_lock held and drops it. Returns 1 in case of
  299. * success, 0 if we had failed (superblock contents was already dead or
  300. * dying when grab_super() had been called). Note that this is only
  301. * called for superblocks not in rundown mode (== ones still on ->fs_supers
  302. * of their type), so increment of ->s_count is OK here.
  303. */
  304. static int grab_super(struct super_block *s) __releases(sb_lock)
  305. {
  306. s->s_count++;
  307. spin_unlock(&sb_lock);
  308. down_write(&s->s_umount);
  309. if ((s->s_flags & MS_BORN) && atomic_inc_not_zero(&s->s_active)) {
  310. put_super(s);
  311. return 1;
  312. }
  313. up_write(&s->s_umount);
  314. put_super(s);
  315. return 0;
  316. }
  317. /*
  318. * grab_super_passive - acquire a passive reference
  319. * @sb: reference we are trying to grab
  320. *
  321. * Tries to acquire a passive reference. This is used in places where we
  322. * cannot take an active reference but we need to ensure that the
  323. * superblock does not go away while we are working on it. It returns
  324. * false if a reference was not gained, and returns true with the s_umount
  325. * lock held in read mode if a reference is gained. On successful return,
  326. * the caller must drop the s_umount lock and the passive reference when
  327. * done.
  328. */
  329. bool grab_super_passive(struct super_block *sb)
  330. {
  331. spin_lock(&sb_lock);
  332. if (hlist_unhashed(&sb->s_instances)) {
  333. spin_unlock(&sb_lock);
  334. return false;
  335. }
  336. sb->s_count++;
  337. spin_unlock(&sb_lock);
  338. if (down_read_trylock(&sb->s_umount)) {
  339. if (sb->s_root && (sb->s_flags & MS_BORN))
  340. return true;
  341. up_read(&sb->s_umount);
  342. }
  343. put_super(sb);
  344. return false;
  345. }
  346. /**
  347. * generic_shutdown_super - common helper for ->kill_sb()
  348. * @sb: superblock to kill
  349. *
  350. * generic_shutdown_super() does all fs-independent work on superblock
  351. * shutdown. Typical ->kill_sb() should pick all fs-specific objects
  352. * that need destruction out of superblock, call generic_shutdown_super()
  353. * and release aforementioned objects. Note: dentries and inodes _are_
  354. * taken care of and do not need specific handling.
  355. *
  356. * Upon calling this function, the filesystem may no longer alter or
  357. * rearrange the set of dentries belonging to this super_block, nor may it
  358. * change the attachments of dentries to inodes.
  359. */
  360. void generic_shutdown_super(struct super_block *sb)
  361. {
  362. const struct super_operations *sop = sb->s_op;
  363. if (sb->s_root) {
  364. shrink_dcache_for_umount(sb);
  365. sync_filesystem(sb);
  366. sb->s_flags &= ~MS_ACTIVE;
  367. fsnotify_unmount_inodes(&sb->s_inodes);
  368. evict_inodes(sb);
  369. if (sb->s_dio_done_wq) {
  370. destroy_workqueue(sb->s_dio_done_wq);
  371. sb->s_dio_done_wq = NULL;
  372. }
  373. if (sop->put_super)
  374. sop->put_super(sb);
  375. if (!list_empty(&sb->s_inodes)) {
  376. printk("VFS: Busy inodes after unmount of %s. "
  377. "Self-destruct in 5 seconds. Have a nice day...\n",
  378. sb->s_id);
  379. }
  380. }
  381. spin_lock(&sb_lock);
  382. /* should be initialized for __put_super_and_need_restart() */
  383. hlist_del_init(&sb->s_instances);
  384. spin_unlock(&sb_lock);
  385. up_write(&sb->s_umount);
  386. }
  387. EXPORT_SYMBOL(generic_shutdown_super);
  388. /**
  389. * sget - find or create a superblock
  390. * @type: filesystem type superblock should belong to
  391. * @test: comparison callback
  392. * @set: setup callback
  393. * @flags: mount flags
  394. * @data: argument to each of them
  395. */
  396. struct super_block *sget(struct file_system_type *type,
  397. int (*test)(struct super_block *,void *),
  398. int (*set)(struct super_block *,void *),
  399. int flags,
  400. void *data)
  401. {
  402. struct super_block *s = NULL;
  403. struct super_block *old;
  404. int err;
  405. retry:
  406. spin_lock(&sb_lock);
  407. if (test) {
  408. hlist_for_each_entry(old, &type->fs_supers, s_instances) {
  409. if (!test(old, data))
  410. continue;
  411. if (!grab_super(old))
  412. goto retry;
  413. if (s) {
  414. up_write(&s->s_umount);
  415. destroy_super(s);
  416. s = NULL;
  417. }
  418. return old;
  419. }
  420. }
  421. if (!s) {
  422. spin_unlock(&sb_lock);
  423. s = alloc_super(type, flags);
  424. if (!s)
  425. return ERR_PTR(-ENOMEM);
  426. goto retry;
  427. }
  428. err = set(s, data);
  429. if (err) {
  430. spin_unlock(&sb_lock);
  431. up_write(&s->s_umount);
  432. destroy_super(s);
  433. return ERR_PTR(err);
  434. }
  435. s->s_type = type;
  436. strlcpy(s->s_id, type->name, sizeof(s->s_id));
  437. list_add_tail(&s->s_list, &super_blocks);
  438. hlist_add_head(&s->s_instances, &type->fs_supers);
  439. spin_unlock(&sb_lock);
  440. get_filesystem(type);
  441. register_shrinker(&s->s_shrink);
  442. return s;
  443. }
  444. EXPORT_SYMBOL(sget);
  445. void drop_super(struct super_block *sb)
  446. {
  447. up_read(&sb->s_umount);
  448. put_super(sb);
  449. }
  450. EXPORT_SYMBOL(drop_super);
  451. /**
  452. * iterate_supers - call function for all active superblocks
  453. * @f: function to call
  454. * @arg: argument to pass to it
  455. *
  456. * Scans the superblock list and calls given function, passing it
  457. * locked superblock and given argument.
  458. */
  459. void iterate_supers(void (*f)(struct super_block *, void *), void *arg)
  460. {
  461. struct super_block *sb, *p = NULL;
  462. spin_lock(&sb_lock);
  463. list_for_each_entry(sb, &super_blocks, s_list) {
  464. if (hlist_unhashed(&sb->s_instances))
  465. continue;
  466. sb->s_count++;
  467. spin_unlock(&sb_lock);
  468. down_read(&sb->s_umount);
  469. if (sb->s_root && (sb->s_flags & MS_BORN))
  470. f(sb, arg);
  471. up_read(&sb->s_umount);
  472. spin_lock(&sb_lock);
  473. if (p)
  474. __put_super(p);
  475. p = sb;
  476. }
  477. if (p)
  478. __put_super(p);
  479. spin_unlock(&sb_lock);
  480. }
  481. /**
  482. * iterate_supers_type - call function for superblocks of given type
  483. * @type: fs type
  484. * @f: function to call
  485. * @arg: argument to pass to it
  486. *
  487. * Scans the superblock list and calls given function, passing it
  488. * locked superblock and given argument.
  489. */
  490. void iterate_supers_type(struct file_system_type *type,
  491. void (*f)(struct super_block *, void *), void *arg)
  492. {
  493. struct super_block *sb, *p = NULL;
  494. spin_lock(&sb_lock);
  495. hlist_for_each_entry(sb, &type->fs_supers, s_instances) {
  496. sb->s_count++;
  497. spin_unlock(&sb_lock);
  498. down_read(&sb->s_umount);
  499. if (sb->s_root && (sb->s_flags & MS_BORN))
  500. f(sb, arg);
  501. up_read(&sb->s_umount);
  502. spin_lock(&sb_lock);
  503. if (p)
  504. __put_super(p);
  505. p = sb;
  506. }
  507. if (p)
  508. __put_super(p);
  509. spin_unlock(&sb_lock);
  510. }
  511. EXPORT_SYMBOL(iterate_supers_type);
  512. /**
  513. * get_super - get the superblock of a device
  514. * @bdev: device to get the superblock for
  515. *
  516. * Scans the superblock list and finds the superblock of the file system
  517. * mounted on the device given. %NULL is returned if no match is found.
  518. */
  519. struct super_block *get_super(struct block_device *bdev)
  520. {
  521. struct super_block *sb;
  522. if (!bdev)
  523. return NULL;
  524. spin_lock(&sb_lock);
  525. rescan:
  526. list_for_each_entry(sb, &super_blocks, s_list) {
  527. if (hlist_unhashed(&sb->s_instances))
  528. continue;
  529. if (sb->s_bdev == bdev) {
  530. sb->s_count++;
  531. spin_unlock(&sb_lock);
  532. down_read(&sb->s_umount);
  533. /* still alive? */
  534. if (sb->s_root && (sb->s_flags & MS_BORN))
  535. return sb;
  536. up_read(&sb->s_umount);
  537. /* nope, got unmounted */
  538. spin_lock(&sb_lock);
  539. __put_super(sb);
  540. goto rescan;
  541. }
  542. }
  543. spin_unlock(&sb_lock);
  544. return NULL;
  545. }
  546. EXPORT_SYMBOL(get_super);
  547. /**
  548. * get_super_thawed - get thawed superblock of a device
  549. * @bdev: device to get the superblock for
  550. *
  551. * Scans the superblock list and finds the superblock of the file system
  552. * mounted on the device. The superblock is returned once it is thawed
  553. * (or immediately if it was not frozen). %NULL is returned if no match
  554. * is found.
  555. */
  556. struct super_block *get_super_thawed(struct block_device *bdev)
  557. {
  558. while (1) {
  559. struct super_block *s = get_super(bdev);
  560. if (!s || s->s_writers.frozen == SB_UNFROZEN)
  561. return s;
  562. up_read(&s->s_umount);
  563. wait_event(s->s_writers.wait_unfrozen,
  564. s->s_writers.frozen == SB_UNFROZEN);
  565. put_super(s);
  566. }
  567. }
  568. EXPORT_SYMBOL(get_super_thawed);
  569. /**
  570. * get_active_super - get an active reference to the superblock of a device
  571. * @bdev: device to get the superblock for
  572. *
  573. * Scans the superblock list and finds the superblock of the file system
  574. * mounted on the device given. Returns the superblock with an active
  575. * reference or %NULL if none was found.
  576. */
  577. struct super_block *get_active_super(struct block_device *bdev)
  578. {
  579. struct super_block *sb;
  580. if (!bdev)
  581. return NULL;
  582. restart:
  583. spin_lock(&sb_lock);
  584. list_for_each_entry(sb, &super_blocks, s_list) {
  585. if (hlist_unhashed(&sb->s_instances))
  586. continue;
  587. if (sb->s_bdev == bdev) {
  588. if (!grab_super(sb))
  589. goto restart;
  590. up_write(&sb->s_umount);
  591. return sb;
  592. }
  593. }
  594. spin_unlock(&sb_lock);
  595. return NULL;
  596. }
  597. struct super_block *user_get_super(dev_t dev)
  598. {
  599. struct super_block *sb;
  600. spin_lock(&sb_lock);
  601. rescan:
  602. list_for_each_entry(sb, &super_blocks, s_list) {
  603. if (hlist_unhashed(&sb->s_instances))
  604. continue;
  605. if (sb->s_dev == dev) {
  606. sb->s_count++;
  607. spin_unlock(&sb_lock);
  608. down_read(&sb->s_umount);
  609. /* still alive? */
  610. if (sb->s_root && (sb->s_flags & MS_BORN))
  611. return sb;
  612. up_read(&sb->s_umount);
  613. /* nope, got unmounted */
  614. spin_lock(&sb_lock);
  615. __put_super(sb);
  616. goto rescan;
  617. }
  618. }
  619. spin_unlock(&sb_lock);
  620. return NULL;
  621. }
  622. /**
  623. * do_remount_sb - asks filesystem to change mount options.
  624. * @sb: superblock in question
  625. * @flags: numeric part of options
  626. * @data: the rest of options
  627. * @force: whether or not to force the change
  628. *
  629. * Alters the mount options of a mounted file system.
  630. */
  631. int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
  632. {
  633. int retval;
  634. int remount_ro;
  635. if (sb->s_writers.frozen != SB_UNFROZEN)
  636. return -EBUSY;
  637. #ifdef CONFIG_BLOCK
  638. if (!(flags & MS_RDONLY) && bdev_read_only(sb->s_bdev))
  639. return -EACCES;
  640. #endif
  641. if (flags & MS_RDONLY)
  642. acct_auto_close(sb);
  643. shrink_dcache_sb(sb);
  644. sync_filesystem(sb);
  645. remount_ro = (flags & MS_RDONLY) && !(sb->s_flags & MS_RDONLY);
  646. /* If we are remounting RDONLY and current sb is read/write,
  647. make sure there are no rw files opened */
  648. if (remount_ro) {
  649. if (force) {
  650. mark_files_ro(sb);
  651. } else {
  652. retval = sb_prepare_remount_readonly(sb);
  653. if (retval)
  654. return retval;
  655. }
  656. }
  657. if (sb->s_op->remount_fs) {
  658. retval = sb->s_op->remount_fs(sb, &flags, data);
  659. if (retval) {
  660. if (!force)
  661. goto cancel_readonly;
  662. /* If forced remount, go ahead despite any errors */
  663. WARN(1, "forced remount of a %s fs returned %i\n",
  664. sb->s_type->name, retval);
  665. }
  666. }
  667. sb->s_flags = (sb->s_flags & ~MS_RMT_MASK) | (flags & MS_RMT_MASK);
  668. /* Needs to be ordered wrt mnt_is_readonly() */
  669. smp_wmb();
  670. sb->s_readonly_remount = 0;
  671. /*
  672. * Some filesystems modify their metadata via some other path than the
  673. * bdev buffer cache (eg. use a private mapping, or directories in
  674. * pagecache, etc). Also file data modifications go via their own
  675. * mappings. So If we try to mount readonly then copy the filesystem
  676. * from bdev, we could get stale data, so invalidate it to give a best
  677. * effort at coherency.
  678. */
  679. if (remount_ro && sb->s_bdev)
  680. invalidate_bdev(sb->s_bdev);
  681. return 0;
  682. cancel_readonly:
  683. sb->s_readonly_remount = 0;
  684. return retval;
  685. }
  686. static void do_emergency_remount(struct work_struct *work)
  687. {
  688. struct super_block *sb, *p = NULL;
  689. spin_lock(&sb_lock);
  690. list_for_each_entry(sb, &super_blocks, s_list) {
  691. if (hlist_unhashed(&sb->s_instances))
  692. continue;
  693. sb->s_count++;
  694. spin_unlock(&sb_lock);
  695. down_write(&sb->s_umount);
  696. if (sb->s_root && sb->s_bdev && (sb->s_flags & MS_BORN) &&
  697. !(sb->s_flags & MS_RDONLY)) {
  698. /*
  699. * What lock protects sb->s_flags??
  700. */
  701. do_remount_sb(sb, MS_RDONLY, NULL, 1);
  702. }
  703. up_write(&sb->s_umount);
  704. spin_lock(&sb_lock);
  705. if (p)
  706. __put_super(p);
  707. p = sb;
  708. }
  709. if (p)
  710. __put_super(p);
  711. spin_unlock(&sb_lock);
  712. kfree(work);
  713. printk("Emergency Remount complete\n");
  714. }
  715. void emergency_remount(void)
  716. {
  717. struct work_struct *work;
  718. work = kmalloc(sizeof(*work), GFP_ATOMIC);
  719. if (work) {
  720. INIT_WORK(work, do_emergency_remount);
  721. schedule_work(work);
  722. }
  723. }
  724. /*
  725. * Unnamed block devices are dummy devices used by virtual
  726. * filesystems which don't use real block-devices. -- jrs
  727. */
  728. static DEFINE_IDA(unnamed_dev_ida);
  729. static DEFINE_SPINLOCK(unnamed_dev_lock);/* protects the above */
  730. static int unnamed_dev_start = 0; /* don't bother trying below it */
  731. int get_anon_bdev(dev_t *p)
  732. {
  733. int dev;
  734. int error;
  735. retry:
  736. if (ida_pre_get(&unnamed_dev_ida, GFP_ATOMIC) == 0)
  737. return -ENOMEM;
  738. spin_lock(&unnamed_dev_lock);
  739. error = ida_get_new_above(&unnamed_dev_ida, unnamed_dev_start, &dev);
  740. if (!error)
  741. unnamed_dev_start = dev + 1;
  742. spin_unlock(&unnamed_dev_lock);
  743. if (error == -EAGAIN)
  744. /* We raced and lost with another CPU. */
  745. goto retry;
  746. else if (error)
  747. return -EAGAIN;
  748. if (dev == (1 << MINORBITS)) {
  749. spin_lock(&unnamed_dev_lock);
  750. ida_remove(&unnamed_dev_ida, dev);
  751. if (unnamed_dev_start > dev)
  752. unnamed_dev_start = dev;
  753. spin_unlock(&unnamed_dev_lock);
  754. return -EMFILE;
  755. }
  756. *p = MKDEV(0, dev & MINORMASK);
  757. return 0;
  758. }
  759. EXPORT_SYMBOL(get_anon_bdev);
  760. void free_anon_bdev(dev_t dev)
  761. {
  762. int slot = MINOR(dev);
  763. spin_lock(&unnamed_dev_lock);
  764. ida_remove(&unnamed_dev_ida, slot);
  765. if (slot < unnamed_dev_start)
  766. unnamed_dev_start = slot;
  767. spin_unlock(&unnamed_dev_lock);
  768. }
  769. EXPORT_SYMBOL(free_anon_bdev);
  770. int set_anon_super(struct super_block *s, void *data)
  771. {
  772. int error = get_anon_bdev(&s->s_dev);
  773. if (!error)
  774. s->s_bdi = &noop_backing_dev_info;
  775. return error;
  776. }
  777. EXPORT_SYMBOL(set_anon_super);
  778. void kill_anon_super(struct super_block *sb)
  779. {
  780. dev_t dev = sb->s_dev;
  781. generic_shutdown_super(sb);
  782. free_anon_bdev(dev);
  783. }
  784. EXPORT_SYMBOL(kill_anon_super);
  785. void kill_litter_super(struct super_block *sb)
  786. {
  787. if (sb->s_root)
  788. d_genocide(sb->s_root);
  789. kill_anon_super(sb);
  790. }
  791. EXPORT_SYMBOL(kill_litter_super);
  792. static int ns_test_super(struct super_block *sb, void *data)
  793. {
  794. return sb->s_fs_info == data;
  795. }
  796. static int ns_set_super(struct super_block *sb, void *data)
  797. {
  798. sb->s_fs_info = data;
  799. return set_anon_super(sb, NULL);
  800. }
  801. struct dentry *mount_ns(struct file_system_type *fs_type, int flags,
  802. void *data, int (*fill_super)(struct super_block *, void *, int))
  803. {
  804. struct super_block *sb;
  805. sb = sget(fs_type, ns_test_super, ns_set_super, flags, data);
  806. if (IS_ERR(sb))
  807. return ERR_CAST(sb);
  808. if (!sb->s_root) {
  809. int err;
  810. err = fill_super(sb, data, flags & MS_SILENT ? 1 : 0);
  811. if (err) {
  812. deactivate_locked_super(sb);
  813. return ERR_PTR(err);
  814. }
  815. sb->s_flags |= MS_ACTIVE;
  816. }
  817. return dget(sb->s_root);
  818. }
  819. EXPORT_SYMBOL(mount_ns);
  820. #ifdef CONFIG_BLOCK
  821. static int set_bdev_super(struct super_block *s, void *data)
  822. {
  823. s->s_bdev = data;
  824. s->s_dev = s->s_bdev->bd_dev;
  825. /*
  826. * We set the bdi here to the queue backing, file systems can
  827. * overwrite this in ->fill_super()
  828. */
  829. s->s_bdi = &bdev_get_queue(s->s_bdev)->backing_dev_info;
  830. return 0;
  831. }
  832. static int test_bdev_super(struct super_block *s, void *data)
  833. {
  834. return (void *)s->s_bdev == data;
  835. }
  836. struct dentry *mount_bdev(struct file_system_type *fs_type,
  837. int flags, const char *dev_name, void *data,
  838. int (*fill_super)(struct super_block *, void *, int))
  839. {
  840. struct block_device *bdev;
  841. struct super_block *s;
  842. fmode_t mode = FMODE_READ | FMODE_EXCL;
  843. int error = 0;
  844. if (!(flags & MS_RDONLY))
  845. mode |= FMODE_WRITE;
  846. bdev = blkdev_get_by_path(dev_name, mode, fs_type);
  847. if (IS_ERR(bdev))
  848. return ERR_CAST(bdev);
  849. /*
  850. * once the super is inserted into the list by sget, s_umount
  851. * will protect the lockfs code from trying to start a snapshot
  852. * while we are mounting
  853. */
  854. mutex_lock(&bdev->bd_fsfreeze_mutex);
  855. if (bdev->bd_fsfreeze_count > 0) {
  856. mutex_unlock(&bdev->bd_fsfreeze_mutex);
  857. error = -EBUSY;
  858. goto error_bdev;
  859. }
  860. s = sget(fs_type, test_bdev_super, set_bdev_super, flags | MS_NOSEC,
  861. bdev);
  862. mutex_unlock(&bdev->bd_fsfreeze_mutex);
  863. if (IS_ERR(s))
  864. goto error_s;
  865. if (s->s_root) {
  866. if ((flags ^ s->s_flags) & MS_RDONLY) {
  867. deactivate_locked_super(s);
  868. error = -EBUSY;
  869. goto error_bdev;
  870. }
  871. /*
  872. * s_umount nests inside bd_mutex during
  873. * __invalidate_device(). blkdev_put() acquires
  874. * bd_mutex and can't be called under s_umount. Drop
  875. * s_umount temporarily. This is safe as we're
  876. * holding an active reference.
  877. */
  878. up_write(&s->s_umount);
  879. blkdev_put(bdev, mode);
  880. down_write(&s->s_umount);
  881. } else {
  882. char b[BDEVNAME_SIZE];
  883. s->s_mode = mode;
  884. strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id));
  885. sb_set_blocksize(s, block_size(bdev));
  886. error = fill_super(s, data, flags & MS_SILENT ? 1 : 0);
  887. if (error) {
  888. deactivate_locked_super(s);
  889. goto error;
  890. }
  891. s->s_flags |= MS_ACTIVE;
  892. bdev->bd_super = s;
  893. }
  894. return dget(s->s_root);
  895. error_s:
  896. error = PTR_ERR(s);
  897. error_bdev:
  898. blkdev_put(bdev, mode);
  899. error:
  900. return ERR_PTR(error);
  901. }
  902. EXPORT_SYMBOL(mount_bdev);
  903. void kill_block_super(struct super_block *sb)
  904. {
  905. struct block_device *bdev = sb->s_bdev;
  906. fmode_t mode = sb->s_mode;
  907. bdev->bd_super = NULL;
  908. generic_shutdown_super(sb);
  909. sync_blockdev(bdev);
  910. WARN_ON_ONCE(!(mode & FMODE_EXCL));
  911. blkdev_put(bdev, mode | FMODE_EXCL);
  912. }
  913. EXPORT_SYMBOL(kill_block_super);
  914. #endif
  915. struct dentry *mount_nodev(struct file_system_type *fs_type,
  916. int flags, void *data,
  917. int (*fill_super)(struct super_block *, void *, int))
  918. {
  919. int error;
  920. struct super_block *s = sget(fs_type, NULL, set_anon_super, flags, NULL);
  921. if (IS_ERR(s))
  922. return ERR_CAST(s);
  923. error = fill_super(s, data, flags & MS_SILENT ? 1 : 0);
  924. if (error) {
  925. deactivate_locked_super(s);
  926. return ERR_PTR(error);
  927. }
  928. s->s_flags |= MS_ACTIVE;
  929. return dget(s->s_root);
  930. }
  931. EXPORT_SYMBOL(mount_nodev);
  932. static int compare_single(struct super_block *s, void *p)
  933. {
  934. return 1;
  935. }
  936. struct dentry *mount_single(struct file_system_type *fs_type,
  937. int flags, void *data,
  938. int (*fill_super)(struct super_block *, void *, int))
  939. {
  940. struct super_block *s;
  941. int error;
  942. s = sget(fs_type, compare_single, set_anon_super, flags, NULL);
  943. if (IS_ERR(s))
  944. return ERR_CAST(s);
  945. if (!s->s_root) {
  946. error = fill_super(s, data, flags & MS_SILENT ? 1 : 0);
  947. if (error) {
  948. deactivate_locked_super(s);
  949. return ERR_PTR(error);
  950. }
  951. s->s_flags |= MS_ACTIVE;
  952. } else {
  953. do_remount_sb(s, flags, data, 0);
  954. }
  955. return dget(s->s_root);
  956. }
  957. EXPORT_SYMBOL(mount_single);
  958. struct dentry *
  959. mount_fs(struct file_system_type *type, int flags, const char *name, void *data)
  960. {
  961. struct dentry *root;
  962. struct super_block *sb;
  963. char *secdata = NULL;
  964. int error = -ENOMEM;
  965. if (data && !(type->fs_flags & FS_BINARY_MOUNTDATA)) {
  966. secdata = alloc_secdata();
  967. if (!secdata)
  968. goto out;
  969. error = security_sb_copy_data(data, secdata);
  970. if (error)
  971. goto out_free_secdata;
  972. }
  973. root = type->mount(type, flags, name, data);
  974. if (IS_ERR(root)) {
  975. error = PTR_ERR(root);
  976. goto out_free_secdata;
  977. }
  978. sb = root->d_sb;
  979. BUG_ON(!sb);
  980. WARN_ON(!sb->s_bdi);
  981. WARN_ON(sb->s_bdi == &default_backing_dev_info);
  982. sb->s_flags |= MS_BORN;
  983. error = security_sb_kern_mount(sb, flags, secdata);
  984. if (error)
  985. goto out_sb;
  986. /*
  987. * filesystems should never set s_maxbytes larger than MAX_LFS_FILESIZE
  988. * but s_maxbytes was an unsigned long long for many releases. Throw
  989. * this warning for a little while to try and catch filesystems that
  990. * violate this rule.
  991. */
  992. WARN((sb->s_maxbytes < 0), "%s set sb->s_maxbytes to "
  993. "negative value (%lld)\n", type->name, sb->s_maxbytes);
  994. up_write(&sb->s_umount);
  995. free_secdata(secdata);
  996. return root;
  997. out_sb:
  998. dput(root);
  999. deactivate_locked_super(sb);
  1000. out_free_secdata:
  1001. free_secdata(secdata);
  1002. out:
  1003. return ERR_PTR(error);
  1004. }
  1005. /*
  1006. * This is an internal function, please use sb_end_{write,pagefault,intwrite}
  1007. * instead.
  1008. */
  1009. void __sb_end_write(struct super_block *sb, int level)
  1010. {
  1011. percpu_counter_dec(&sb->s_writers.counter[level-1]);
  1012. /*
  1013. * Make sure s_writers are updated before we wake up waiters in
  1014. * freeze_super().
  1015. */
  1016. smp_mb();
  1017. if (waitqueue_active(&sb->s_writers.wait))
  1018. wake_up(&sb->s_writers.wait);
  1019. rwsem_release(&sb->s_writers.lock_map[level-1], 1, _RET_IP_);
  1020. }
  1021. EXPORT_SYMBOL(__sb_end_write);
  1022. #ifdef CONFIG_LOCKDEP
  1023. /*
  1024. * We want lockdep to tell us about possible deadlocks with freezing but
  1025. * it's it bit tricky to properly instrument it. Getting a freeze protection
  1026. * works as getting a read lock but there are subtle problems. XFS for example
  1027. * gets freeze protection on internal level twice in some cases, which is OK
  1028. * only because we already hold a freeze protection also on higher level. Due
  1029. * to these cases we have to tell lockdep we are doing trylock when we
  1030. * already hold a freeze protection for a higher freeze level.
  1031. */
  1032. static void acquire_freeze_lock(struct super_block *sb, int level, bool trylock,
  1033. unsigned long ip)
  1034. {
  1035. int i;
  1036. if (!trylock) {
  1037. for (i = 0; i < level - 1; i++)
  1038. if (lock_is_held(&sb->s_writers.lock_map[i])) {
  1039. trylock = true;
  1040. break;
  1041. }
  1042. }
  1043. rwsem_acquire_read(&sb->s_writers.lock_map[level-1], 0, trylock, ip);
  1044. }
  1045. #endif
  1046. /*
  1047. * This is an internal function, please use sb_start_{write,pagefault,intwrite}
  1048. * instead.
  1049. */
  1050. int __sb_start_write(struct super_block *sb, int level, bool wait)
  1051. {
  1052. retry:
  1053. if (unlikely(sb->s_writers.frozen >= level)) {
  1054. if (!wait)
  1055. return 0;
  1056. wait_event(sb->s_writers.wait_unfrozen,
  1057. sb->s_writers.frozen < level);
  1058. }
  1059. #ifdef CONFIG_LOCKDEP
  1060. acquire_freeze_lock(sb, level, !wait, _RET_IP_);
  1061. #endif
  1062. percpu_counter_inc(&sb->s_writers.counter[level-1]);
  1063. /*
  1064. * Make sure counter is updated before we check for frozen.
  1065. * freeze_super() first sets frozen and then checks the counter.
  1066. */
  1067. smp_mb();
  1068. if (unlikely(sb->s_writers.frozen >= level)) {
  1069. __sb_end_write(sb, level);
  1070. goto retry;
  1071. }
  1072. return 1;
  1073. }
  1074. EXPORT_SYMBOL(__sb_start_write);
  1075. /**
  1076. * sb_wait_write - wait until all writers to given file system finish
  1077. * @sb: the super for which we wait
  1078. * @level: type of writers we wait for (normal vs page fault)
  1079. *
  1080. * This function waits until there are no writers of given type to given file
  1081. * system. Caller of this function should make sure there can be no new writers
  1082. * of type @level before calling this function. Otherwise this function can
  1083. * livelock.
  1084. */
  1085. static void sb_wait_write(struct super_block *sb, int level)
  1086. {
  1087. s64 writers;
  1088. /*
  1089. * We just cycle-through lockdep here so that it does not complain
  1090. * about returning with lock to userspace
  1091. */
  1092. rwsem_acquire(&sb->s_writers.lock_map[level-1], 0, 0, _THIS_IP_);
  1093. rwsem_release(&sb->s_writers.lock_map[level-1], 1, _THIS_IP_);
  1094. do {
  1095. DEFINE_WAIT(wait);
  1096. /*
  1097. * We use a barrier in prepare_to_wait() to separate setting
  1098. * of frozen and checking of the counter
  1099. */
  1100. prepare_to_wait(&sb->s_writers.wait, &wait,
  1101. TASK_UNINTERRUPTIBLE);
  1102. writers = percpu_counter_sum(&sb->s_writers.counter[level-1]);
  1103. if (writers)
  1104. schedule();
  1105. finish_wait(&sb->s_writers.wait, &wait);
  1106. } while (writers);
  1107. }
  1108. /**
  1109. * freeze_super - lock the filesystem and force it into a consistent state
  1110. * @sb: the super to lock
  1111. *
  1112. * Syncs the super to make sure the filesystem is consistent and calls the fs's
  1113. * freeze_fs. Subsequent calls to this without first thawing the fs will return
  1114. * -EBUSY.
  1115. *
  1116. * During this function, sb->s_writers.frozen goes through these values:
  1117. *
  1118. * SB_UNFROZEN: File system is normal, all writes progress as usual.
  1119. *
  1120. * SB_FREEZE_WRITE: The file system is in the process of being frozen. New
  1121. * writes should be blocked, though page faults are still allowed. We wait for
  1122. * all writes to complete and then proceed to the next stage.
  1123. *
  1124. * SB_FREEZE_PAGEFAULT: Freezing continues. Now also page faults are blocked
  1125. * but internal fs threads can still modify the filesystem (although they
  1126. * should not dirty new pages or inodes), writeback can run etc. After waiting
  1127. * for all running page faults we sync the filesystem which will clean all
  1128. * dirty pages and inodes (no new dirty pages or inodes can be created when
  1129. * sync is running).
  1130. *
  1131. * SB_FREEZE_FS: The file system is frozen. Now all internal sources of fs
  1132. * modification are blocked (e.g. XFS preallocation truncation on inode
  1133. * reclaim). This is usually implemented by blocking new transactions for
  1134. * filesystems that have them and need this additional guard. After all
  1135. * internal writers are finished we call ->freeze_fs() to finish filesystem
  1136. * freezing. Then we transition to SB_FREEZE_COMPLETE state. This state is
  1137. * mostly auxiliary for filesystems to verify they do not modify frozen fs.
  1138. *
  1139. * sb->s_writers.frozen is protected by sb->s_umount.
  1140. */
  1141. int freeze_super(struct super_block *sb)
  1142. {
  1143. int ret;
  1144. atomic_inc(&sb->s_active);
  1145. down_write(&sb->s_umount);
  1146. if (sb->s_writers.frozen != SB_UNFROZEN) {
  1147. deactivate_locked_super(sb);
  1148. return -EBUSY;
  1149. }
  1150. if (!(sb->s_flags & MS_BORN)) {
  1151. up_write(&sb->s_umount);
  1152. return 0; /* sic - it's "nothing to do" */
  1153. }
  1154. if (sb->s_flags & MS_RDONLY) {
  1155. /* Nothing to do really... */
  1156. sb->s_writers.frozen = SB_FREEZE_COMPLETE;
  1157. up_write(&sb->s_umount);
  1158. return 0;
  1159. }
  1160. /* From now on, no new normal writers can start */
  1161. sb->s_writers.frozen = SB_FREEZE_WRITE;
  1162. smp_wmb();
  1163. /* Release s_umount to preserve sb_start_write -> s_umount ordering */
  1164. up_write(&sb->s_umount);
  1165. sb_wait_write(sb, SB_FREEZE_WRITE);
  1166. /* Now we go and block page faults... */
  1167. down_write(&sb->s_umount);
  1168. sb->s_writers.frozen = SB_FREEZE_PAGEFAULT;
  1169. smp_wmb();
  1170. sb_wait_write(sb, SB_FREEZE_PAGEFAULT);
  1171. /* All writers are done so after syncing there won't be dirty data */
  1172. sync_filesystem(sb);
  1173. /* Now wait for internal filesystem counter */
  1174. sb->s_writers.frozen = SB_FREEZE_FS;
  1175. smp_wmb();
  1176. sb_wait_write(sb, SB_FREEZE_FS);
  1177. if (sb->s_op->freeze_fs) {
  1178. ret = sb->s_op->freeze_fs(sb);
  1179. if (ret) {
  1180. printk(KERN_ERR
  1181. "VFS:Filesystem freeze failed\n");
  1182. sb->s_writers.frozen = SB_UNFROZEN;
  1183. smp_wmb();
  1184. wake_up(&sb->s_writers.wait_unfrozen);
  1185. deactivate_locked_super(sb);
  1186. return ret;
  1187. }
  1188. }
  1189. /*
  1190. * This is just for debugging purposes so that fs can warn if it
  1191. * sees write activity when frozen is set to SB_FREEZE_COMPLETE.
  1192. */
  1193. sb->s_writers.frozen = SB_FREEZE_COMPLETE;
  1194. up_write(&sb->s_umount);
  1195. return 0;
  1196. }
  1197. EXPORT_SYMBOL(freeze_super);
  1198. /**
  1199. * thaw_super -- unlock filesystem
  1200. * @sb: the super to thaw
  1201. *
  1202. * Unlocks the filesystem and marks it writeable again after freeze_super().
  1203. */
  1204. int thaw_super(struct super_block *sb)
  1205. {
  1206. int error;
  1207. down_write(&sb->s_umount);
  1208. if (sb->s_writers.frozen == SB_UNFROZEN) {
  1209. up_write(&sb->s_umount);
  1210. return -EINVAL;
  1211. }
  1212. if (sb->s_flags & MS_RDONLY)
  1213. goto out;
  1214. if (sb->s_op->unfreeze_fs) {
  1215. error = sb->s_op->unfreeze_fs(sb);
  1216. if (error) {
  1217. printk(KERN_ERR
  1218. "VFS:Filesystem thaw failed\n");
  1219. up_write(&sb->s_umount);
  1220. return error;
  1221. }
  1222. }
  1223. out:
  1224. sb->s_writers.frozen = SB_UNFROZEN;
  1225. smp_wmb();
  1226. wake_up(&sb->s_writers.wait_unfrozen);
  1227. deactivate_locked_super(sb);
  1228. return 0;
  1229. }
  1230. EXPORT_SYMBOL(thaw_super);