super.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390
  1. /*
  2. * linux/fs/super.c
  3. *
  4. * Copyright (C) 1991, 1992 Linus Torvalds
  5. *
  6. * super.c contains code to handle: - mount structures
  7. * - super-block tables
  8. * - filesystem drivers list
  9. * - mount system call
  10. * - umount system call
  11. * - ustat system call
  12. *
  13. * GK 2/5/95 - Changed to support mounting the root fs via NFS
  14. *
  15. * Added kerneld support: Jacques Gelinas and Bjorn Ekwall
  16. * Added change_root: Werner Almesberger & Hans Lermen, Feb '96
  17. * Added options to /proc/mounts:
  18. * Torbjörn Lindh (torbjorn.lindh@gopta.se), April 14, 1996.
  19. * Added devfs support: Richard Gooch <rgooch@atnf.csiro.au>, 13-JAN-1998
  20. * Heavily rewritten for 'one fs - one tree' dcache architecture. AV, Mar 2000
  21. */
  22. #include <linux/export.h>
  23. #include <linux/slab.h>
  24. #include <linux/acct.h>
  25. #include <linux/blkdev.h>
  26. #include <linux/mount.h>
  27. #include <linux/security.h>
  28. #include <linux/writeback.h> /* for the emergency remount stuff */
  29. #include <linux/idr.h>
  30. #include <linux/mutex.h>
  31. #include <linux/backing-dev.h>
  32. #include <linux/rculist_bl.h>
  33. #include <linux/cleancache.h>
  34. #include <linux/fsnotify.h>
  35. #include <linux/lockdep.h>
  36. #include "internal.h"
  37. LIST_HEAD(super_blocks);
  38. DEFINE_SPINLOCK(sb_lock);
  39. static char *sb_writers_name[SB_FREEZE_LEVELS] = {
  40. "sb_writers",
  41. "sb_pagefaults",
  42. "sb_internal",
  43. };
  44. /*
  45. * One thing we have to be careful of with a per-sb shrinker is that we don't
  46. * drop the last active reference to the superblock from within the shrinker.
  47. * If that happens we could trigger unregistering the shrinker from within the
  48. * shrinker path and that leads to deadlock on the shrinker_rwsem. Hence we
  49. * take a passive reference to the superblock to avoid this from occurring.
  50. */
  51. static unsigned long super_cache_scan(struct shrinker *shrink,
  52. struct shrink_control *sc)
  53. {
  54. struct super_block *sb;
  55. long fs_objects = 0;
  56. long total_objects;
  57. long freed = 0;
  58. long dentries;
  59. long inodes;
  60. sb = container_of(shrink, struct super_block, s_shrink);
  61. /*
  62. * Deadlock avoidance. We may hold various FS locks, and we don't want
  63. * to recurse into the FS that called us in clear_inode() and friends..
  64. */
  65. if (!(sc->gfp_mask & __GFP_FS))
  66. return SHRINK_STOP;
  67. if (!grab_super_passive(sb))
  68. return SHRINK_STOP;
  69. if (sb->s_op->nr_cached_objects)
  70. fs_objects = sb->s_op->nr_cached_objects(sb, sc->nid);
  71. inodes = list_lru_count_node(&sb->s_inode_lru, sc->nid);
  72. dentries = list_lru_count_node(&sb->s_dentry_lru, sc->nid);
  73. total_objects = dentries + inodes + fs_objects + 1;
  74. /* proportion the scan between the caches */
  75. dentries = mult_frac(sc->nr_to_scan, dentries, total_objects);
  76. inodes = mult_frac(sc->nr_to_scan, inodes, total_objects);
  77. /*
  78. * prune the dcache first as the icache is pinned by it, then
  79. * prune the icache, followed by the filesystem specific caches
  80. */
  81. freed = prune_dcache_sb(sb, dentries, sc->nid);
  82. freed += prune_icache_sb(sb, inodes, sc->nid);
  83. if (fs_objects) {
  84. fs_objects = mult_frac(sc->nr_to_scan, fs_objects,
  85. total_objects);
  86. freed += sb->s_op->free_cached_objects(sb, fs_objects,
  87. sc->nid);
  88. }
  89. drop_super(sb);
  90. return freed;
  91. }
  92. static unsigned long super_cache_count(struct shrinker *shrink,
  93. struct shrink_control *sc)
  94. {
  95. struct super_block *sb;
  96. long total_objects = 0;
  97. sb = container_of(shrink, struct super_block, s_shrink);
  98. if (!grab_super_passive(sb))
  99. return 0;
  100. if (sb->s_op && sb->s_op->nr_cached_objects)
  101. total_objects = sb->s_op->nr_cached_objects(sb,
  102. sc->nid);
  103. total_objects += list_lru_count_node(&sb->s_dentry_lru,
  104. sc->nid);
  105. total_objects += list_lru_count_node(&sb->s_inode_lru,
  106. sc->nid);
  107. total_objects = vfs_pressure_ratio(total_objects);
  108. drop_super(sb);
  109. return total_objects;
  110. }
  111. /**
  112. * destroy_super - frees a superblock
  113. * @s: superblock to free
  114. *
  115. * Frees a superblock.
  116. */
  117. static void destroy_super(struct super_block *s)
  118. {
  119. int i;
  120. list_lru_destroy(&s->s_dentry_lru);
  121. list_lru_destroy(&s->s_inode_lru);
  122. #ifdef CONFIG_SMP
  123. free_percpu(s->s_files);
  124. #endif
  125. for (i = 0; i < SB_FREEZE_LEVELS; i++)
  126. percpu_counter_destroy(&s->s_writers.counter[i]);
  127. security_sb_free(s);
  128. WARN_ON(!list_empty(&s->s_mounts));
  129. kfree(s->s_subtype);
  130. kfree(s->s_options);
  131. kfree_rcu(s, rcu);
  132. }
  133. /**
  134. * alloc_super - create new superblock
  135. * @type: filesystem type superblock should belong to
  136. * @flags: the mount flags
  137. *
  138. * Allocates and initializes a new &struct super_block. alloc_super()
  139. * returns a pointer new superblock or %NULL if allocation had failed.
  140. */
  141. static struct super_block *alloc_super(struct file_system_type *type, int flags)
  142. {
  143. struct super_block *s = kzalloc(sizeof(struct super_block), GFP_USER);
  144. static const struct super_operations default_op;
  145. int i;
  146. if (!s)
  147. return NULL;
  148. if (security_sb_alloc(s))
  149. goto fail;
  150. #ifdef CONFIG_SMP
  151. s->s_files = alloc_percpu(struct list_head);
  152. if (!s->s_files)
  153. goto fail;
  154. for_each_possible_cpu(i)
  155. INIT_LIST_HEAD(per_cpu_ptr(s->s_files, i));
  156. #else
  157. INIT_LIST_HEAD(&s->s_files);
  158. #endif
  159. for (i = 0; i < SB_FREEZE_LEVELS; i++) {
  160. if (percpu_counter_init(&s->s_writers.counter[i], 0) < 0)
  161. goto fail;
  162. lockdep_init_map(&s->s_writers.lock_map[i], sb_writers_name[i],
  163. &type->s_writers_key[i], 0);
  164. }
  165. init_waitqueue_head(&s->s_writers.wait);
  166. init_waitqueue_head(&s->s_writers.wait_unfrozen);
  167. s->s_flags = flags;
  168. s->s_bdi = &default_backing_dev_info;
  169. INIT_HLIST_NODE(&s->s_instances);
  170. INIT_HLIST_BL_HEAD(&s->s_anon);
  171. INIT_LIST_HEAD(&s->s_inodes);
  172. if (list_lru_init(&s->s_dentry_lru))
  173. goto fail;
  174. if (list_lru_init(&s->s_inode_lru))
  175. goto fail;
  176. INIT_LIST_HEAD(&s->s_mounts);
  177. init_rwsem(&s->s_umount);
  178. lockdep_set_class(&s->s_umount, &type->s_umount_key);
  179. /*
  180. * sget() can have s_umount recursion.
  181. *
  182. * When it cannot find a suitable sb, it allocates a new
  183. * one (this one), and tries again to find a suitable old
  184. * one.
  185. *
  186. * In case that succeeds, it will acquire the s_umount
  187. * lock of the old one. Since these are clearly distrinct
  188. * locks, and this object isn't exposed yet, there's no
  189. * risk of deadlocks.
  190. *
  191. * Annotate this by putting this lock in a different
  192. * subclass.
  193. */
  194. down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING);
  195. s->s_count = 1;
  196. atomic_set(&s->s_active, 1);
  197. mutex_init(&s->s_vfs_rename_mutex);
  198. lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key);
  199. mutex_init(&s->s_dquot.dqio_mutex);
  200. mutex_init(&s->s_dquot.dqonoff_mutex);
  201. init_rwsem(&s->s_dquot.dqptr_sem);
  202. s->s_maxbytes = MAX_NON_LFS;
  203. s->s_op = &default_op;
  204. s->s_time_gran = 1000000000;
  205. s->cleancache_poolid = -1;
  206. s->s_shrink.seeks = DEFAULT_SEEKS;
  207. s->s_shrink.scan_objects = super_cache_scan;
  208. s->s_shrink.count_objects = super_cache_count;
  209. s->s_shrink.batch = 1024;
  210. s->s_shrink.flags = SHRINKER_NUMA_AWARE;
  211. return s;
  212. fail:
  213. destroy_super(s);
  214. return NULL;
  215. }
  216. /* Superblock refcounting */
  217. /*
  218. * Drop a superblock's refcount. The caller must hold sb_lock.
  219. */
  220. static void __put_super(struct super_block *sb)
  221. {
  222. if (!--sb->s_count) {
  223. list_del_init(&sb->s_list);
  224. destroy_super(sb);
  225. }
  226. }
  227. /**
  228. * put_super - drop a temporary reference to superblock
  229. * @sb: superblock in question
  230. *
  231. * Drops a temporary reference, frees superblock if there's no
  232. * references left.
  233. */
  234. static void put_super(struct super_block *sb)
  235. {
  236. spin_lock(&sb_lock);
  237. __put_super(sb);
  238. spin_unlock(&sb_lock);
  239. }
  240. /**
  241. * deactivate_locked_super - drop an active reference to superblock
  242. * @s: superblock to deactivate
  243. *
  244. * Drops an active reference to superblock, converting it into a temprory
  245. * one if there is no other active references left. In that case we
  246. * tell fs driver to shut it down and drop the temporary reference we
  247. * had just acquired.
  248. *
  249. * Caller holds exclusive lock on superblock; that lock is released.
  250. */
  251. void deactivate_locked_super(struct super_block *s)
  252. {
  253. struct file_system_type *fs = s->s_type;
  254. if (atomic_dec_and_test(&s->s_active)) {
  255. cleancache_invalidate_fs(s);
  256. fs->kill_sb(s);
  257. /* caches are now gone, we can safely kill the shrinker now */
  258. unregister_shrinker(&s->s_shrink);
  259. put_filesystem(fs);
  260. put_super(s);
  261. } else {
  262. up_write(&s->s_umount);
  263. }
  264. }
  265. EXPORT_SYMBOL(deactivate_locked_super);
  266. /**
  267. * deactivate_super - drop an active reference to superblock
  268. * @s: superblock to deactivate
  269. *
  270. * Variant of deactivate_locked_super(), except that superblock is *not*
  271. * locked by caller. If we are going to drop the final active reference,
  272. * lock will be acquired prior to that.
  273. */
  274. void deactivate_super(struct super_block *s)
  275. {
  276. if (!atomic_add_unless(&s->s_active, -1, 1)) {
  277. down_write(&s->s_umount);
  278. deactivate_locked_super(s);
  279. }
  280. }
  281. EXPORT_SYMBOL(deactivate_super);
  282. /**
  283. * grab_super - acquire an active reference
  284. * @s: reference we are trying to make active
  285. *
  286. * Tries to acquire an active reference. grab_super() is used when we
  287. * had just found a superblock in super_blocks or fs_type->fs_supers
  288. * and want to turn it into a full-blown active reference. grab_super()
  289. * is called with sb_lock held and drops it. Returns 1 in case of
  290. * success, 0 if we had failed (superblock contents was already dead or
  291. * dying when grab_super() had been called). Note that this is only
  292. * called for superblocks not in rundown mode (== ones still on ->fs_supers
  293. * of their type), so increment of ->s_count is OK here.
  294. */
  295. static int grab_super(struct super_block *s) __releases(sb_lock)
  296. {
  297. s->s_count++;
  298. spin_unlock(&sb_lock);
  299. down_write(&s->s_umount);
  300. if ((s->s_flags & MS_BORN) && atomic_inc_not_zero(&s->s_active)) {
  301. put_super(s);
  302. return 1;
  303. }
  304. up_write(&s->s_umount);
  305. put_super(s);
  306. return 0;
  307. }
  308. /*
  309. * grab_super_passive - acquire a passive reference
  310. * @sb: reference we are trying to grab
  311. *
  312. * Tries to acquire a passive reference. This is used in places where we
  313. * cannot take an active reference but we need to ensure that the
  314. * superblock does not go away while we are working on it. It returns
  315. * false if a reference was not gained, and returns true with the s_umount
  316. * lock held in read mode if a reference is gained. On successful return,
  317. * the caller must drop the s_umount lock and the passive reference when
  318. * done.
  319. */
  320. bool grab_super_passive(struct super_block *sb)
  321. {
  322. spin_lock(&sb_lock);
  323. if (hlist_unhashed(&sb->s_instances)) {
  324. spin_unlock(&sb_lock);
  325. return false;
  326. }
  327. sb->s_count++;
  328. spin_unlock(&sb_lock);
  329. if (down_read_trylock(&sb->s_umount)) {
  330. if (sb->s_root && (sb->s_flags & MS_BORN))
  331. return true;
  332. up_read(&sb->s_umount);
  333. }
  334. put_super(sb);
  335. return false;
  336. }
  337. /**
  338. * generic_shutdown_super - common helper for ->kill_sb()
  339. * @sb: superblock to kill
  340. *
  341. * generic_shutdown_super() does all fs-independent work on superblock
  342. * shutdown. Typical ->kill_sb() should pick all fs-specific objects
  343. * that need destruction out of superblock, call generic_shutdown_super()
  344. * and release aforementioned objects. Note: dentries and inodes _are_
  345. * taken care of and do not need specific handling.
  346. *
  347. * Upon calling this function, the filesystem may no longer alter or
  348. * rearrange the set of dentries belonging to this super_block, nor may it
  349. * change the attachments of dentries to inodes.
  350. */
  351. void generic_shutdown_super(struct super_block *sb)
  352. {
  353. const struct super_operations *sop = sb->s_op;
  354. if (sb->s_root) {
  355. shrink_dcache_for_umount(sb);
  356. sync_filesystem(sb);
  357. sb->s_flags &= ~MS_ACTIVE;
  358. fsnotify_unmount_inodes(&sb->s_inodes);
  359. evict_inodes(sb);
  360. if (sb->s_dio_done_wq) {
  361. destroy_workqueue(sb->s_dio_done_wq);
  362. sb->s_dio_done_wq = NULL;
  363. }
  364. if (sop->put_super)
  365. sop->put_super(sb);
  366. if (!list_empty(&sb->s_inodes)) {
  367. printk("VFS: Busy inodes after unmount of %s. "
  368. "Self-destruct in 5 seconds. Have a nice day...\n",
  369. sb->s_id);
  370. }
  371. }
  372. spin_lock(&sb_lock);
  373. /* should be initialized for __put_super_and_need_restart() */
  374. hlist_del_init(&sb->s_instances);
  375. spin_unlock(&sb_lock);
  376. up_write(&sb->s_umount);
  377. }
  378. EXPORT_SYMBOL(generic_shutdown_super);
  379. /**
  380. * sget - find or create a superblock
  381. * @type: filesystem type superblock should belong to
  382. * @test: comparison callback
  383. * @set: setup callback
  384. * @flags: mount flags
  385. * @data: argument to each of them
  386. */
  387. struct super_block *sget(struct file_system_type *type,
  388. int (*test)(struct super_block *,void *),
  389. int (*set)(struct super_block *,void *),
  390. int flags,
  391. void *data)
  392. {
  393. struct super_block *s = NULL;
  394. struct super_block *old;
  395. int err;
  396. retry:
  397. spin_lock(&sb_lock);
  398. if (test) {
  399. hlist_for_each_entry(old, &type->fs_supers, s_instances) {
  400. if (!test(old, data))
  401. continue;
  402. if (!grab_super(old))
  403. goto retry;
  404. if (s) {
  405. up_write(&s->s_umount);
  406. destroy_super(s);
  407. s = NULL;
  408. }
  409. return old;
  410. }
  411. }
  412. if (!s) {
  413. spin_unlock(&sb_lock);
  414. s = alloc_super(type, flags);
  415. if (!s)
  416. return ERR_PTR(-ENOMEM);
  417. goto retry;
  418. }
  419. err = set(s, data);
  420. if (err) {
  421. spin_unlock(&sb_lock);
  422. up_write(&s->s_umount);
  423. destroy_super(s);
  424. return ERR_PTR(err);
  425. }
  426. s->s_type = type;
  427. strlcpy(s->s_id, type->name, sizeof(s->s_id));
  428. list_add_tail(&s->s_list, &super_blocks);
  429. hlist_add_head(&s->s_instances, &type->fs_supers);
  430. spin_unlock(&sb_lock);
  431. get_filesystem(type);
  432. register_shrinker(&s->s_shrink);
  433. return s;
  434. }
  435. EXPORT_SYMBOL(sget);
  436. void drop_super(struct super_block *sb)
  437. {
  438. up_read(&sb->s_umount);
  439. put_super(sb);
  440. }
  441. EXPORT_SYMBOL(drop_super);
  442. /**
  443. * iterate_supers - call function for all active superblocks
  444. * @f: function to call
  445. * @arg: argument to pass to it
  446. *
  447. * Scans the superblock list and calls given function, passing it
  448. * locked superblock and given argument.
  449. */
  450. void iterate_supers(void (*f)(struct super_block *, void *), void *arg)
  451. {
  452. struct super_block *sb, *p = NULL;
  453. spin_lock(&sb_lock);
  454. list_for_each_entry(sb, &super_blocks, s_list) {
  455. if (hlist_unhashed(&sb->s_instances))
  456. continue;
  457. sb->s_count++;
  458. spin_unlock(&sb_lock);
  459. down_read(&sb->s_umount);
  460. if (sb->s_root && (sb->s_flags & MS_BORN))
  461. f(sb, arg);
  462. up_read(&sb->s_umount);
  463. spin_lock(&sb_lock);
  464. if (p)
  465. __put_super(p);
  466. p = sb;
  467. }
  468. if (p)
  469. __put_super(p);
  470. spin_unlock(&sb_lock);
  471. }
  472. /**
  473. * iterate_supers_type - call function for superblocks of given type
  474. * @type: fs type
  475. * @f: function to call
  476. * @arg: argument to pass to it
  477. *
  478. * Scans the superblock list and calls given function, passing it
  479. * locked superblock and given argument.
  480. */
  481. void iterate_supers_type(struct file_system_type *type,
  482. void (*f)(struct super_block *, void *), void *arg)
  483. {
  484. struct super_block *sb, *p = NULL;
  485. spin_lock(&sb_lock);
  486. hlist_for_each_entry(sb, &type->fs_supers, s_instances) {
  487. sb->s_count++;
  488. spin_unlock(&sb_lock);
  489. down_read(&sb->s_umount);
  490. if (sb->s_root && (sb->s_flags & MS_BORN))
  491. f(sb, arg);
  492. up_read(&sb->s_umount);
  493. spin_lock(&sb_lock);
  494. if (p)
  495. __put_super(p);
  496. p = sb;
  497. }
  498. if (p)
  499. __put_super(p);
  500. spin_unlock(&sb_lock);
  501. }
  502. EXPORT_SYMBOL(iterate_supers_type);
  503. /**
  504. * get_super - get the superblock of a device
  505. * @bdev: device to get the superblock for
  506. *
  507. * Scans the superblock list and finds the superblock of the file system
  508. * mounted on the device given. %NULL is returned if no match is found.
  509. */
  510. struct super_block *get_super(struct block_device *bdev)
  511. {
  512. struct super_block *sb;
  513. if (!bdev)
  514. return NULL;
  515. spin_lock(&sb_lock);
  516. rescan:
  517. list_for_each_entry(sb, &super_blocks, s_list) {
  518. if (hlist_unhashed(&sb->s_instances))
  519. continue;
  520. if (sb->s_bdev == bdev) {
  521. sb->s_count++;
  522. spin_unlock(&sb_lock);
  523. down_read(&sb->s_umount);
  524. /* still alive? */
  525. if (sb->s_root && (sb->s_flags & MS_BORN))
  526. return sb;
  527. up_read(&sb->s_umount);
  528. /* nope, got unmounted */
  529. spin_lock(&sb_lock);
  530. __put_super(sb);
  531. goto rescan;
  532. }
  533. }
  534. spin_unlock(&sb_lock);
  535. return NULL;
  536. }
  537. EXPORT_SYMBOL(get_super);
  538. /**
  539. * get_super_thawed - get thawed superblock of a device
  540. * @bdev: device to get the superblock for
  541. *
  542. * Scans the superblock list and finds the superblock of the file system
  543. * mounted on the device. The superblock is returned once it is thawed
  544. * (or immediately if it was not frozen). %NULL is returned if no match
  545. * is found.
  546. */
  547. struct super_block *get_super_thawed(struct block_device *bdev)
  548. {
  549. while (1) {
  550. struct super_block *s = get_super(bdev);
  551. if (!s || s->s_writers.frozen == SB_UNFROZEN)
  552. return s;
  553. up_read(&s->s_umount);
  554. wait_event(s->s_writers.wait_unfrozen,
  555. s->s_writers.frozen == SB_UNFROZEN);
  556. put_super(s);
  557. }
  558. }
  559. EXPORT_SYMBOL(get_super_thawed);
  560. /**
  561. * get_active_super - get an active reference to the superblock of a device
  562. * @bdev: device to get the superblock for
  563. *
  564. * Scans the superblock list and finds the superblock of the file system
  565. * mounted on the device given. Returns the superblock with an active
  566. * reference or %NULL if none was found.
  567. */
  568. struct super_block *get_active_super(struct block_device *bdev)
  569. {
  570. struct super_block *sb;
  571. if (!bdev)
  572. return NULL;
  573. restart:
  574. spin_lock(&sb_lock);
  575. list_for_each_entry(sb, &super_blocks, s_list) {
  576. if (hlist_unhashed(&sb->s_instances))
  577. continue;
  578. if (sb->s_bdev == bdev) {
  579. if (!grab_super(sb))
  580. goto restart;
  581. up_write(&sb->s_umount);
  582. return sb;
  583. }
  584. }
  585. spin_unlock(&sb_lock);
  586. return NULL;
  587. }
  588. struct super_block *user_get_super(dev_t dev)
  589. {
  590. struct super_block *sb;
  591. spin_lock(&sb_lock);
  592. rescan:
  593. list_for_each_entry(sb, &super_blocks, s_list) {
  594. if (hlist_unhashed(&sb->s_instances))
  595. continue;
  596. if (sb->s_dev == dev) {
  597. sb->s_count++;
  598. spin_unlock(&sb_lock);
  599. down_read(&sb->s_umount);
  600. /* still alive? */
  601. if (sb->s_root && (sb->s_flags & MS_BORN))
  602. return sb;
  603. up_read(&sb->s_umount);
  604. /* nope, got unmounted */
  605. spin_lock(&sb_lock);
  606. __put_super(sb);
  607. goto rescan;
  608. }
  609. }
  610. spin_unlock(&sb_lock);
  611. return NULL;
  612. }
  613. /**
  614. * do_remount_sb - asks filesystem to change mount options.
  615. * @sb: superblock in question
  616. * @flags: numeric part of options
  617. * @data: the rest of options
  618. * @force: whether or not to force the change
  619. *
  620. * Alters the mount options of a mounted file system.
  621. */
  622. int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
  623. {
  624. int retval;
  625. int remount_ro;
  626. if (sb->s_writers.frozen != SB_UNFROZEN)
  627. return -EBUSY;
  628. #ifdef CONFIG_BLOCK
  629. if (!(flags & MS_RDONLY) && bdev_read_only(sb->s_bdev))
  630. return -EACCES;
  631. #endif
  632. if (flags & MS_RDONLY)
  633. acct_auto_close(sb);
  634. shrink_dcache_sb(sb);
  635. sync_filesystem(sb);
  636. remount_ro = (flags & MS_RDONLY) && !(sb->s_flags & MS_RDONLY);
  637. /* If we are remounting RDONLY and current sb is read/write,
  638. make sure there are no rw files opened */
  639. if (remount_ro) {
  640. if (force) {
  641. mark_files_ro(sb);
  642. } else {
  643. retval = sb_prepare_remount_readonly(sb);
  644. if (retval)
  645. return retval;
  646. }
  647. }
  648. if (sb->s_op->remount_fs) {
  649. retval = sb->s_op->remount_fs(sb, &flags, data);
  650. if (retval) {
  651. if (!force)
  652. goto cancel_readonly;
  653. /* If forced remount, go ahead despite any errors */
  654. WARN(1, "forced remount of a %s fs returned %i\n",
  655. sb->s_type->name, retval);
  656. }
  657. }
  658. sb->s_flags = (sb->s_flags & ~MS_RMT_MASK) | (flags & MS_RMT_MASK);
  659. /* Needs to be ordered wrt mnt_is_readonly() */
  660. smp_wmb();
  661. sb->s_readonly_remount = 0;
  662. /*
  663. * Some filesystems modify their metadata via some other path than the
  664. * bdev buffer cache (eg. use a private mapping, or directories in
  665. * pagecache, etc). Also file data modifications go via their own
  666. * mappings. So If we try to mount readonly then copy the filesystem
  667. * from bdev, we could get stale data, so invalidate it to give a best
  668. * effort at coherency.
  669. */
  670. if (remount_ro && sb->s_bdev)
  671. invalidate_bdev(sb->s_bdev);
  672. return 0;
  673. cancel_readonly:
  674. sb->s_readonly_remount = 0;
  675. return retval;
  676. }
  677. static void do_emergency_remount(struct work_struct *work)
  678. {
  679. struct super_block *sb, *p = NULL;
  680. spin_lock(&sb_lock);
  681. list_for_each_entry(sb, &super_blocks, s_list) {
  682. if (hlist_unhashed(&sb->s_instances))
  683. continue;
  684. sb->s_count++;
  685. spin_unlock(&sb_lock);
  686. down_write(&sb->s_umount);
  687. if (sb->s_root && sb->s_bdev && (sb->s_flags & MS_BORN) &&
  688. !(sb->s_flags & MS_RDONLY)) {
  689. /*
  690. * What lock protects sb->s_flags??
  691. */
  692. do_remount_sb(sb, MS_RDONLY, NULL, 1);
  693. }
  694. up_write(&sb->s_umount);
  695. spin_lock(&sb_lock);
  696. if (p)
  697. __put_super(p);
  698. p = sb;
  699. }
  700. if (p)
  701. __put_super(p);
  702. spin_unlock(&sb_lock);
  703. kfree(work);
  704. printk("Emergency Remount complete\n");
  705. }
  706. void emergency_remount(void)
  707. {
  708. struct work_struct *work;
  709. work = kmalloc(sizeof(*work), GFP_ATOMIC);
  710. if (work) {
  711. INIT_WORK(work, do_emergency_remount);
  712. schedule_work(work);
  713. }
  714. }
  715. /*
  716. * Unnamed block devices are dummy devices used by virtual
  717. * filesystems which don't use real block-devices. -- jrs
  718. */
  719. static DEFINE_IDA(unnamed_dev_ida);
  720. static DEFINE_SPINLOCK(unnamed_dev_lock);/* protects the above */
  721. static int unnamed_dev_start = 0; /* don't bother trying below it */
  722. int get_anon_bdev(dev_t *p)
  723. {
  724. int dev;
  725. int error;
  726. retry:
  727. if (ida_pre_get(&unnamed_dev_ida, GFP_ATOMIC) == 0)
  728. return -ENOMEM;
  729. spin_lock(&unnamed_dev_lock);
  730. error = ida_get_new_above(&unnamed_dev_ida, unnamed_dev_start, &dev);
  731. if (!error)
  732. unnamed_dev_start = dev + 1;
  733. spin_unlock(&unnamed_dev_lock);
  734. if (error == -EAGAIN)
  735. /* We raced and lost with another CPU. */
  736. goto retry;
  737. else if (error)
  738. return -EAGAIN;
  739. if (dev == (1 << MINORBITS)) {
  740. spin_lock(&unnamed_dev_lock);
  741. ida_remove(&unnamed_dev_ida, dev);
  742. if (unnamed_dev_start > dev)
  743. unnamed_dev_start = dev;
  744. spin_unlock(&unnamed_dev_lock);
  745. return -EMFILE;
  746. }
  747. *p = MKDEV(0, dev & MINORMASK);
  748. return 0;
  749. }
  750. EXPORT_SYMBOL(get_anon_bdev);
  751. void free_anon_bdev(dev_t dev)
  752. {
  753. int slot = MINOR(dev);
  754. spin_lock(&unnamed_dev_lock);
  755. ida_remove(&unnamed_dev_ida, slot);
  756. if (slot < unnamed_dev_start)
  757. unnamed_dev_start = slot;
  758. spin_unlock(&unnamed_dev_lock);
  759. }
  760. EXPORT_SYMBOL(free_anon_bdev);
  761. int set_anon_super(struct super_block *s, void *data)
  762. {
  763. int error = get_anon_bdev(&s->s_dev);
  764. if (!error)
  765. s->s_bdi = &noop_backing_dev_info;
  766. return error;
  767. }
  768. EXPORT_SYMBOL(set_anon_super);
  769. void kill_anon_super(struct super_block *sb)
  770. {
  771. dev_t dev = sb->s_dev;
  772. generic_shutdown_super(sb);
  773. free_anon_bdev(dev);
  774. }
  775. EXPORT_SYMBOL(kill_anon_super);
  776. void kill_litter_super(struct super_block *sb)
  777. {
  778. if (sb->s_root)
  779. d_genocide(sb->s_root);
  780. kill_anon_super(sb);
  781. }
  782. EXPORT_SYMBOL(kill_litter_super);
  783. static int ns_test_super(struct super_block *sb, void *data)
  784. {
  785. return sb->s_fs_info == data;
  786. }
  787. static int ns_set_super(struct super_block *sb, void *data)
  788. {
  789. sb->s_fs_info = data;
  790. return set_anon_super(sb, NULL);
  791. }
  792. struct dentry *mount_ns(struct file_system_type *fs_type, int flags,
  793. void *data, int (*fill_super)(struct super_block *, void *, int))
  794. {
  795. struct super_block *sb;
  796. sb = sget(fs_type, ns_test_super, ns_set_super, flags, data);
  797. if (IS_ERR(sb))
  798. return ERR_CAST(sb);
  799. if (!sb->s_root) {
  800. int err;
  801. err = fill_super(sb, data, flags & MS_SILENT ? 1 : 0);
  802. if (err) {
  803. deactivate_locked_super(sb);
  804. return ERR_PTR(err);
  805. }
  806. sb->s_flags |= MS_ACTIVE;
  807. }
  808. return dget(sb->s_root);
  809. }
  810. EXPORT_SYMBOL(mount_ns);
  811. #ifdef CONFIG_BLOCK
  812. static int set_bdev_super(struct super_block *s, void *data)
  813. {
  814. s->s_bdev = data;
  815. s->s_dev = s->s_bdev->bd_dev;
  816. /*
  817. * We set the bdi here to the queue backing, file systems can
  818. * overwrite this in ->fill_super()
  819. */
  820. s->s_bdi = &bdev_get_queue(s->s_bdev)->backing_dev_info;
  821. return 0;
  822. }
  823. static int test_bdev_super(struct super_block *s, void *data)
  824. {
  825. return (void *)s->s_bdev == data;
  826. }
  827. struct dentry *mount_bdev(struct file_system_type *fs_type,
  828. int flags, const char *dev_name, void *data,
  829. int (*fill_super)(struct super_block *, void *, int))
  830. {
  831. struct block_device *bdev;
  832. struct super_block *s;
  833. fmode_t mode = FMODE_READ | FMODE_EXCL;
  834. int error = 0;
  835. if (!(flags & MS_RDONLY))
  836. mode |= FMODE_WRITE;
  837. bdev = blkdev_get_by_path(dev_name, mode, fs_type);
  838. if (IS_ERR(bdev))
  839. return ERR_CAST(bdev);
  840. /*
  841. * once the super is inserted into the list by sget, s_umount
  842. * will protect the lockfs code from trying to start a snapshot
  843. * while we are mounting
  844. */
  845. mutex_lock(&bdev->bd_fsfreeze_mutex);
  846. if (bdev->bd_fsfreeze_count > 0) {
  847. mutex_unlock(&bdev->bd_fsfreeze_mutex);
  848. error = -EBUSY;
  849. goto error_bdev;
  850. }
  851. s = sget(fs_type, test_bdev_super, set_bdev_super, flags | MS_NOSEC,
  852. bdev);
  853. mutex_unlock(&bdev->bd_fsfreeze_mutex);
  854. if (IS_ERR(s))
  855. goto error_s;
  856. if (s->s_root) {
  857. if ((flags ^ s->s_flags) & MS_RDONLY) {
  858. deactivate_locked_super(s);
  859. error = -EBUSY;
  860. goto error_bdev;
  861. }
  862. /*
  863. * s_umount nests inside bd_mutex during
  864. * __invalidate_device(). blkdev_put() acquires
  865. * bd_mutex and can't be called under s_umount. Drop
  866. * s_umount temporarily. This is safe as we're
  867. * holding an active reference.
  868. */
  869. up_write(&s->s_umount);
  870. blkdev_put(bdev, mode);
  871. down_write(&s->s_umount);
  872. } else {
  873. char b[BDEVNAME_SIZE];
  874. s->s_mode = mode;
  875. strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id));
  876. sb_set_blocksize(s, block_size(bdev));
  877. error = fill_super(s, data, flags & MS_SILENT ? 1 : 0);
  878. if (error) {
  879. deactivate_locked_super(s);
  880. goto error;
  881. }
  882. s->s_flags |= MS_ACTIVE;
  883. bdev->bd_super = s;
  884. }
  885. return dget(s->s_root);
  886. error_s:
  887. error = PTR_ERR(s);
  888. error_bdev:
  889. blkdev_put(bdev, mode);
  890. error:
  891. return ERR_PTR(error);
  892. }
  893. EXPORT_SYMBOL(mount_bdev);
  894. void kill_block_super(struct super_block *sb)
  895. {
  896. struct block_device *bdev = sb->s_bdev;
  897. fmode_t mode = sb->s_mode;
  898. bdev->bd_super = NULL;
  899. generic_shutdown_super(sb);
  900. sync_blockdev(bdev);
  901. WARN_ON_ONCE(!(mode & FMODE_EXCL));
  902. blkdev_put(bdev, mode | FMODE_EXCL);
  903. }
  904. EXPORT_SYMBOL(kill_block_super);
  905. #endif
  906. struct dentry *mount_nodev(struct file_system_type *fs_type,
  907. int flags, void *data,
  908. int (*fill_super)(struct super_block *, void *, int))
  909. {
  910. int error;
  911. struct super_block *s = sget(fs_type, NULL, set_anon_super, flags, NULL);
  912. if (IS_ERR(s))
  913. return ERR_CAST(s);
  914. error = fill_super(s, data, flags & MS_SILENT ? 1 : 0);
  915. if (error) {
  916. deactivate_locked_super(s);
  917. return ERR_PTR(error);
  918. }
  919. s->s_flags |= MS_ACTIVE;
  920. return dget(s->s_root);
  921. }
  922. EXPORT_SYMBOL(mount_nodev);
  923. static int compare_single(struct super_block *s, void *p)
  924. {
  925. return 1;
  926. }
  927. struct dentry *mount_single(struct file_system_type *fs_type,
  928. int flags, void *data,
  929. int (*fill_super)(struct super_block *, void *, int))
  930. {
  931. struct super_block *s;
  932. int error;
  933. s = sget(fs_type, compare_single, set_anon_super, flags, NULL);
  934. if (IS_ERR(s))
  935. return ERR_CAST(s);
  936. if (!s->s_root) {
  937. error = fill_super(s, data, flags & MS_SILENT ? 1 : 0);
  938. if (error) {
  939. deactivate_locked_super(s);
  940. return ERR_PTR(error);
  941. }
  942. s->s_flags |= MS_ACTIVE;
  943. } else {
  944. do_remount_sb(s, flags, data, 0);
  945. }
  946. return dget(s->s_root);
  947. }
  948. EXPORT_SYMBOL(mount_single);
  949. struct dentry *
  950. mount_fs(struct file_system_type *type, int flags, const char *name, void *data)
  951. {
  952. struct dentry *root;
  953. struct super_block *sb;
  954. char *secdata = NULL;
  955. int error = -ENOMEM;
  956. if (data && !(type->fs_flags & FS_BINARY_MOUNTDATA)) {
  957. secdata = alloc_secdata();
  958. if (!secdata)
  959. goto out;
  960. error = security_sb_copy_data(data, secdata);
  961. if (error)
  962. goto out_free_secdata;
  963. }
  964. root = type->mount(type, flags, name, data);
  965. if (IS_ERR(root)) {
  966. error = PTR_ERR(root);
  967. goto out_free_secdata;
  968. }
  969. sb = root->d_sb;
  970. BUG_ON(!sb);
  971. WARN_ON(!sb->s_bdi);
  972. WARN_ON(sb->s_bdi == &default_backing_dev_info);
  973. sb->s_flags |= MS_BORN;
  974. error = security_sb_kern_mount(sb, flags, secdata);
  975. if (error)
  976. goto out_sb;
  977. /*
  978. * filesystems should never set s_maxbytes larger than MAX_LFS_FILESIZE
  979. * but s_maxbytes was an unsigned long long for many releases. Throw
  980. * this warning for a little while to try and catch filesystems that
  981. * violate this rule.
  982. */
  983. WARN((sb->s_maxbytes < 0), "%s set sb->s_maxbytes to "
  984. "negative value (%lld)\n", type->name, sb->s_maxbytes);
  985. up_write(&sb->s_umount);
  986. free_secdata(secdata);
  987. return root;
  988. out_sb:
  989. dput(root);
  990. deactivate_locked_super(sb);
  991. out_free_secdata:
  992. free_secdata(secdata);
  993. out:
  994. return ERR_PTR(error);
  995. }
  996. /*
  997. * This is an internal function, please use sb_end_{write,pagefault,intwrite}
  998. * instead.
  999. */
  1000. void __sb_end_write(struct super_block *sb, int level)
  1001. {
  1002. percpu_counter_dec(&sb->s_writers.counter[level-1]);
  1003. /*
  1004. * Make sure s_writers are updated before we wake up waiters in
  1005. * freeze_super().
  1006. */
  1007. smp_mb();
  1008. if (waitqueue_active(&sb->s_writers.wait))
  1009. wake_up(&sb->s_writers.wait);
  1010. rwsem_release(&sb->s_writers.lock_map[level-1], 1, _RET_IP_);
  1011. }
  1012. EXPORT_SYMBOL(__sb_end_write);
  1013. #ifdef CONFIG_LOCKDEP
  1014. /*
  1015. * We want lockdep to tell us about possible deadlocks with freezing but
  1016. * it's it bit tricky to properly instrument it. Getting a freeze protection
  1017. * works as getting a read lock but there are subtle problems. XFS for example
  1018. * gets freeze protection on internal level twice in some cases, which is OK
  1019. * only because we already hold a freeze protection also on higher level. Due
  1020. * to these cases we have to tell lockdep we are doing trylock when we
  1021. * already hold a freeze protection for a higher freeze level.
  1022. */
  1023. static void acquire_freeze_lock(struct super_block *sb, int level, bool trylock,
  1024. unsigned long ip)
  1025. {
  1026. int i;
  1027. if (!trylock) {
  1028. for (i = 0; i < level - 1; i++)
  1029. if (lock_is_held(&sb->s_writers.lock_map[i])) {
  1030. trylock = true;
  1031. break;
  1032. }
  1033. }
  1034. rwsem_acquire_read(&sb->s_writers.lock_map[level-1], 0, trylock, ip);
  1035. }
  1036. #endif
  1037. /*
  1038. * This is an internal function, please use sb_start_{write,pagefault,intwrite}
  1039. * instead.
  1040. */
  1041. int __sb_start_write(struct super_block *sb, int level, bool wait)
  1042. {
  1043. retry:
  1044. if (unlikely(sb->s_writers.frozen >= level)) {
  1045. if (!wait)
  1046. return 0;
  1047. wait_event(sb->s_writers.wait_unfrozen,
  1048. sb->s_writers.frozen < level);
  1049. }
  1050. #ifdef CONFIG_LOCKDEP
  1051. acquire_freeze_lock(sb, level, !wait, _RET_IP_);
  1052. #endif
  1053. percpu_counter_inc(&sb->s_writers.counter[level-1]);
  1054. /*
  1055. * Make sure counter is updated before we check for frozen.
  1056. * freeze_super() first sets frozen and then checks the counter.
  1057. */
  1058. smp_mb();
  1059. if (unlikely(sb->s_writers.frozen >= level)) {
  1060. __sb_end_write(sb, level);
  1061. goto retry;
  1062. }
  1063. return 1;
  1064. }
  1065. EXPORT_SYMBOL(__sb_start_write);
  1066. /**
  1067. * sb_wait_write - wait until all writers to given file system finish
  1068. * @sb: the super for which we wait
  1069. * @level: type of writers we wait for (normal vs page fault)
  1070. *
  1071. * This function waits until there are no writers of given type to given file
  1072. * system. Caller of this function should make sure there can be no new writers
  1073. * of type @level before calling this function. Otherwise this function can
  1074. * livelock.
  1075. */
  1076. static void sb_wait_write(struct super_block *sb, int level)
  1077. {
  1078. s64 writers;
  1079. /*
  1080. * We just cycle-through lockdep here so that it does not complain
  1081. * about returning with lock to userspace
  1082. */
  1083. rwsem_acquire(&sb->s_writers.lock_map[level-1], 0, 0, _THIS_IP_);
  1084. rwsem_release(&sb->s_writers.lock_map[level-1], 1, _THIS_IP_);
  1085. do {
  1086. DEFINE_WAIT(wait);
  1087. /*
  1088. * We use a barrier in prepare_to_wait() to separate setting
  1089. * of frozen and checking of the counter
  1090. */
  1091. prepare_to_wait(&sb->s_writers.wait, &wait,
  1092. TASK_UNINTERRUPTIBLE);
  1093. writers = percpu_counter_sum(&sb->s_writers.counter[level-1]);
  1094. if (writers)
  1095. schedule();
  1096. finish_wait(&sb->s_writers.wait, &wait);
  1097. } while (writers);
  1098. }
  1099. /**
  1100. * freeze_super - lock the filesystem and force it into a consistent state
  1101. * @sb: the super to lock
  1102. *
  1103. * Syncs the super to make sure the filesystem is consistent and calls the fs's
  1104. * freeze_fs. Subsequent calls to this without first thawing the fs will return
  1105. * -EBUSY.
  1106. *
  1107. * During this function, sb->s_writers.frozen goes through these values:
  1108. *
  1109. * SB_UNFROZEN: File system is normal, all writes progress as usual.
  1110. *
  1111. * SB_FREEZE_WRITE: The file system is in the process of being frozen. New
  1112. * writes should be blocked, though page faults are still allowed. We wait for
  1113. * all writes to complete and then proceed to the next stage.
  1114. *
  1115. * SB_FREEZE_PAGEFAULT: Freezing continues. Now also page faults are blocked
  1116. * but internal fs threads can still modify the filesystem (although they
  1117. * should not dirty new pages or inodes), writeback can run etc. After waiting
  1118. * for all running page faults we sync the filesystem which will clean all
  1119. * dirty pages and inodes (no new dirty pages or inodes can be created when
  1120. * sync is running).
  1121. *
  1122. * SB_FREEZE_FS: The file system is frozen. Now all internal sources of fs
  1123. * modification are blocked (e.g. XFS preallocation truncation on inode
  1124. * reclaim). This is usually implemented by blocking new transactions for
  1125. * filesystems that have them and need this additional guard. After all
  1126. * internal writers are finished we call ->freeze_fs() to finish filesystem
  1127. * freezing. Then we transition to SB_FREEZE_COMPLETE state. This state is
  1128. * mostly auxiliary for filesystems to verify they do not modify frozen fs.
  1129. *
  1130. * sb->s_writers.frozen is protected by sb->s_umount.
  1131. */
  1132. int freeze_super(struct super_block *sb)
  1133. {
  1134. int ret;
  1135. atomic_inc(&sb->s_active);
  1136. down_write(&sb->s_umount);
  1137. if (sb->s_writers.frozen != SB_UNFROZEN) {
  1138. deactivate_locked_super(sb);
  1139. return -EBUSY;
  1140. }
  1141. if (!(sb->s_flags & MS_BORN)) {
  1142. up_write(&sb->s_umount);
  1143. return 0; /* sic - it's "nothing to do" */
  1144. }
  1145. if (sb->s_flags & MS_RDONLY) {
  1146. /* Nothing to do really... */
  1147. sb->s_writers.frozen = SB_FREEZE_COMPLETE;
  1148. up_write(&sb->s_umount);
  1149. return 0;
  1150. }
  1151. /* From now on, no new normal writers can start */
  1152. sb->s_writers.frozen = SB_FREEZE_WRITE;
  1153. smp_wmb();
  1154. /* Release s_umount to preserve sb_start_write -> s_umount ordering */
  1155. up_write(&sb->s_umount);
  1156. sb_wait_write(sb, SB_FREEZE_WRITE);
  1157. /* Now we go and block page faults... */
  1158. down_write(&sb->s_umount);
  1159. sb->s_writers.frozen = SB_FREEZE_PAGEFAULT;
  1160. smp_wmb();
  1161. sb_wait_write(sb, SB_FREEZE_PAGEFAULT);
  1162. /* All writers are done so after syncing there won't be dirty data */
  1163. sync_filesystem(sb);
  1164. /* Now wait for internal filesystem counter */
  1165. sb->s_writers.frozen = SB_FREEZE_FS;
  1166. smp_wmb();
  1167. sb_wait_write(sb, SB_FREEZE_FS);
  1168. if (sb->s_op->freeze_fs) {
  1169. ret = sb->s_op->freeze_fs(sb);
  1170. if (ret) {
  1171. printk(KERN_ERR
  1172. "VFS:Filesystem freeze failed\n");
  1173. sb->s_writers.frozen = SB_UNFROZEN;
  1174. smp_wmb();
  1175. wake_up(&sb->s_writers.wait_unfrozen);
  1176. deactivate_locked_super(sb);
  1177. return ret;
  1178. }
  1179. }
  1180. /*
  1181. * This is just for debugging purposes so that fs can warn if it
  1182. * sees write activity when frozen is set to SB_FREEZE_COMPLETE.
  1183. */
  1184. sb->s_writers.frozen = SB_FREEZE_COMPLETE;
  1185. up_write(&sb->s_umount);
  1186. return 0;
  1187. }
  1188. EXPORT_SYMBOL(freeze_super);
  1189. /**
  1190. * thaw_super -- unlock filesystem
  1191. * @sb: the super to thaw
  1192. *
  1193. * Unlocks the filesystem and marks it writeable again after freeze_super().
  1194. */
  1195. int thaw_super(struct super_block *sb)
  1196. {
  1197. int error;
  1198. down_write(&sb->s_umount);
  1199. if (sb->s_writers.frozen == SB_UNFROZEN) {
  1200. up_write(&sb->s_umount);
  1201. return -EINVAL;
  1202. }
  1203. if (sb->s_flags & MS_RDONLY)
  1204. goto out;
  1205. if (sb->s_op->unfreeze_fs) {
  1206. error = sb->s_op->unfreeze_fs(sb);
  1207. if (error) {
  1208. printk(KERN_ERR
  1209. "VFS:Filesystem thaw failed\n");
  1210. up_write(&sb->s_umount);
  1211. return error;
  1212. }
  1213. }
  1214. out:
  1215. sb->s_writers.frozen = SB_UNFROZEN;
  1216. smp_wmb();
  1217. wake_up(&sb->s_writers.wait_unfrozen);
  1218. deactivate_locked_super(sb);
  1219. return 0;
  1220. }
  1221. EXPORT_SYMBOL(thaw_super);