super.c 24 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034
  1. /*
  2. * linux/fs/super.c
  3. *
  4. * Copyright (C) 1991, 1992 Linus Torvalds
  5. *
  6. * super.c contains code to handle: - mount structures
  7. * - super-block tables
  8. * - filesystem drivers list
  9. * - mount system call
  10. * - umount system call
  11. * - ustat system call
  12. *
  13. * GK 2/5/95 - Changed to support mounting the root fs via NFS
  14. *
  15. * Added kerneld support: Jacques Gelinas and Bjorn Ekwall
  16. * Added change_root: Werner Almesberger & Hans Lermen, Feb '96
  17. * Added options to /proc/mounts:
  18. * Torbjörn Lindh (torbjorn.lindh@gopta.se), April 14, 1996.
  19. * Added devfs support: Richard Gooch <rgooch@atnf.csiro.au>, 13-JAN-1998
  20. * Heavily rewritten for 'one fs - one tree' dcache architecture. AV, Mar 2000
  21. */
  22. #include <linux/module.h>
  23. #include <linux/slab.h>
  24. #include <linux/init.h>
  25. #include <linux/smp_lock.h>
  26. #include <linux/acct.h>
  27. #include <linux/blkdev.h>
  28. #include <linux/quotaops.h>
  29. #include <linux/namei.h>
  30. #include <linux/mount.h>
  31. #include <linux/security.h>
  32. #include <linux/syscalls.h>
  33. #include <linux/vfs.h>
  34. #include <linux/writeback.h> /* for the emergency remount stuff */
  35. #include <linux/idr.h>
  36. #include <linux/kobject.h>
  37. #include <linux/mutex.h>
  38. #include <linux/file.h>
  39. #include <asm/uaccess.h>
  40. #include "internal.h"
  41. LIST_HEAD(super_blocks);
  42. DEFINE_SPINLOCK(sb_lock);
  43. /**
  44. * alloc_super - create new superblock
  45. * @type: filesystem type superblock should belong to
  46. *
  47. * Allocates and initializes a new &struct super_block. alloc_super()
  48. * returns a pointer new superblock or %NULL if allocation had failed.
  49. */
  50. static struct super_block *alloc_super(struct file_system_type *type)
  51. {
  52. struct super_block *s = kzalloc(sizeof(struct super_block), GFP_USER);
  53. static const struct super_operations default_op;
  54. if (s) {
  55. if (security_sb_alloc(s)) {
  56. kfree(s);
  57. s = NULL;
  58. goto out;
  59. }
  60. INIT_LIST_HEAD(&s->s_files);
  61. INIT_LIST_HEAD(&s->s_instances);
  62. INIT_HLIST_HEAD(&s->s_anon);
  63. INIT_LIST_HEAD(&s->s_inodes);
  64. INIT_LIST_HEAD(&s->s_dentry_lru);
  65. init_rwsem(&s->s_umount);
  66. mutex_init(&s->s_lock);
  67. lockdep_set_class(&s->s_umount, &type->s_umount_key);
  68. /*
  69. * The locking rules for s_lock are up to the
  70. * filesystem. For example ext3fs has different
  71. * lock ordering than usbfs:
  72. */
  73. lockdep_set_class(&s->s_lock, &type->s_lock_key);
  74. /*
  75. * sget() can have s_umount recursion.
  76. *
  77. * When it cannot find a suitable sb, it allocates a new
  78. * one (this one), and tries again to find a suitable old
  79. * one.
  80. *
  81. * In case that succeeds, it will acquire the s_umount
  82. * lock of the old one. Since these are clearly distrinct
  83. * locks, and this object isn't exposed yet, there's no
  84. * risk of deadlocks.
  85. *
  86. * Annotate this by putting this lock in a different
  87. * subclass.
  88. */
  89. down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING);
  90. s->s_count = S_BIAS;
  91. atomic_set(&s->s_active, 1);
  92. mutex_init(&s->s_vfs_rename_mutex);
  93. mutex_init(&s->s_dquot.dqio_mutex);
  94. mutex_init(&s->s_dquot.dqonoff_mutex);
  95. init_rwsem(&s->s_dquot.dqptr_sem);
  96. init_waitqueue_head(&s->s_wait_unfrozen);
  97. s->s_maxbytes = MAX_NON_LFS;
  98. s->dq_op = sb_dquot_ops;
  99. s->s_qcop = sb_quotactl_ops;
  100. s->s_op = &default_op;
  101. s->s_time_gran = 1000000000;
  102. }
  103. out:
  104. return s;
  105. }
  106. /**
  107. * destroy_super - frees a superblock
  108. * @s: superblock to free
  109. *
  110. * Frees a superblock.
  111. */
  112. static inline void destroy_super(struct super_block *s)
  113. {
  114. security_sb_free(s);
  115. kfree(s->s_subtype);
  116. kfree(s->s_options);
  117. kfree(s);
  118. }
  119. /* Superblock refcounting */
  120. /*
  121. * Drop a superblock's refcount. Returns non-zero if the superblock was
  122. * destroyed. The caller must hold sb_lock.
  123. */
  124. static int __put_super(struct super_block *sb)
  125. {
  126. int ret = 0;
  127. if (!--sb->s_count) {
  128. destroy_super(sb);
  129. ret = 1;
  130. }
  131. return ret;
  132. }
  133. /*
  134. * Drop a superblock's refcount.
  135. * Returns non-zero if the superblock is about to be destroyed and
  136. * at least is already removed from super_blocks list, so if we are
  137. * making a loop through super blocks then we need to restart.
  138. * The caller must hold sb_lock.
  139. */
  140. int __put_super_and_need_restart(struct super_block *sb)
  141. {
  142. /* check for race with generic_shutdown_super() */
  143. if (list_empty(&sb->s_list)) {
  144. /* super block is removed, need to restart... */
  145. __put_super(sb);
  146. return 1;
  147. }
  148. /* can't be the last, since s_list is still in use */
  149. sb->s_count--;
  150. BUG_ON(sb->s_count == 0);
  151. return 0;
  152. }
  153. /**
  154. * put_super - drop a temporary reference to superblock
  155. * @sb: superblock in question
  156. *
  157. * Drops a temporary reference, frees superblock if there's no
  158. * references left.
  159. */
  160. void put_super(struct super_block *sb)
  161. {
  162. spin_lock(&sb_lock);
  163. __put_super(sb);
  164. spin_unlock(&sb_lock);
  165. }
  166. /**
  167. * deactivate_super - drop an active reference to superblock
  168. * @s: superblock to deactivate
  169. *
  170. * Drops an active reference to superblock, acquiring a temprory one if
  171. * there is no active references left. In that case we lock superblock,
  172. * tell fs driver to shut it down and drop the temporary reference we
  173. * had just acquired.
  174. */
  175. void deactivate_super(struct super_block *s)
  176. {
  177. struct file_system_type *fs = s->s_type;
  178. if (atomic_dec_and_lock(&s->s_active, &sb_lock)) {
  179. s->s_count -= S_BIAS-1;
  180. spin_unlock(&sb_lock);
  181. vfs_dq_off(s, 0);
  182. down_write(&s->s_umount);
  183. fs->kill_sb(s);
  184. put_filesystem(fs);
  185. put_super(s);
  186. }
  187. }
  188. EXPORT_SYMBOL(deactivate_super);
  189. /**
  190. * deactivate_locked_super - drop an active reference to superblock
  191. * @s: superblock to deactivate
  192. *
  193. * Equivalent of up_write(&s->s_umount); deactivate_super(s);, except that
  194. * it does not unlock it until it's all over. As the result, it's safe to
  195. * use to dispose of new superblock on ->get_sb() failure exits - nobody
  196. * will see the sucker until it's all over. Equivalent using up_write +
  197. * deactivate_super is safe for that purpose only if superblock is either
  198. * safe to use or has NULL ->s_root when we unlock.
  199. */
  200. void deactivate_locked_super(struct super_block *s)
  201. {
  202. struct file_system_type *fs = s->s_type;
  203. if (atomic_dec_and_lock(&s->s_active, &sb_lock)) {
  204. s->s_count -= S_BIAS-1;
  205. spin_unlock(&sb_lock);
  206. vfs_dq_off(s, 0);
  207. fs->kill_sb(s);
  208. put_filesystem(fs);
  209. put_super(s);
  210. } else {
  211. up_write(&s->s_umount);
  212. }
  213. }
  214. EXPORT_SYMBOL(deactivate_locked_super);
  215. /**
  216. * grab_super - acquire an active reference
  217. * @s: reference we are trying to make active
  218. *
  219. * Tries to acquire an active reference. grab_super() is used when we
  220. * had just found a superblock in super_blocks or fs_type->fs_supers
  221. * and want to turn it into a full-blown active reference. grab_super()
  222. * is called with sb_lock held and drops it. Returns 1 in case of
  223. * success, 0 if we had failed (superblock contents was already dead or
  224. * dying when grab_super() had been called).
  225. */
  226. static int grab_super(struct super_block *s) __releases(sb_lock)
  227. {
  228. s->s_count++;
  229. spin_unlock(&sb_lock);
  230. down_write(&s->s_umount);
  231. if (s->s_root) {
  232. spin_lock(&sb_lock);
  233. if (s->s_count > S_BIAS) {
  234. atomic_inc(&s->s_active);
  235. s->s_count--;
  236. spin_unlock(&sb_lock);
  237. return 1;
  238. }
  239. spin_unlock(&sb_lock);
  240. }
  241. up_write(&s->s_umount);
  242. put_super(s);
  243. yield();
  244. return 0;
  245. }
  246. /*
  247. * Superblock locking. We really ought to get rid of these two.
  248. */
  249. void lock_super(struct super_block * sb)
  250. {
  251. get_fs_excl();
  252. mutex_lock(&sb->s_lock);
  253. }
  254. void unlock_super(struct super_block * sb)
  255. {
  256. put_fs_excl();
  257. mutex_unlock(&sb->s_lock);
  258. }
  259. EXPORT_SYMBOL(lock_super);
  260. EXPORT_SYMBOL(unlock_super);
  261. /**
  262. * generic_shutdown_super - common helper for ->kill_sb()
  263. * @sb: superblock to kill
  264. *
  265. * generic_shutdown_super() does all fs-independent work on superblock
  266. * shutdown. Typical ->kill_sb() should pick all fs-specific objects
  267. * that need destruction out of superblock, call generic_shutdown_super()
  268. * and release aforementioned objects. Note: dentries and inodes _are_
  269. * taken care of and do not need specific handling.
  270. *
  271. * Upon calling this function, the filesystem may no longer alter or
  272. * rearrange the set of dentries belonging to this super_block, nor may it
  273. * change the attachments of dentries to inodes.
  274. */
  275. void generic_shutdown_super(struct super_block *sb)
  276. {
  277. const struct super_operations *sop = sb->s_op;
  278. if (sb->s_root) {
  279. shrink_dcache_for_umount(sb);
  280. sync_filesystem(sb);
  281. get_fs_excl();
  282. sb->s_flags &= ~MS_ACTIVE;
  283. /* bad name - it should be evict_inodes() */
  284. invalidate_inodes(sb);
  285. if (sop->put_super)
  286. sop->put_super(sb);
  287. /* Forget any remaining inodes */
  288. if (invalidate_inodes(sb)) {
  289. printk("VFS: Busy inodes after unmount of %s. "
  290. "Self-destruct in 5 seconds. Have a nice day...\n",
  291. sb->s_id);
  292. }
  293. put_fs_excl();
  294. }
  295. spin_lock(&sb_lock);
  296. /* should be initialized for __put_super_and_need_restart() */
  297. list_del_init(&sb->s_list);
  298. list_del(&sb->s_instances);
  299. spin_unlock(&sb_lock);
  300. up_write(&sb->s_umount);
  301. }
  302. EXPORT_SYMBOL(generic_shutdown_super);
  303. /**
  304. * sget - find or create a superblock
  305. * @type: filesystem type superblock should belong to
  306. * @test: comparison callback
  307. * @set: setup callback
  308. * @data: argument to each of them
  309. */
  310. struct super_block *sget(struct file_system_type *type,
  311. int (*test)(struct super_block *,void *),
  312. int (*set)(struct super_block *,void *),
  313. void *data)
  314. {
  315. struct super_block *s = NULL;
  316. struct super_block *old;
  317. int err;
  318. retry:
  319. spin_lock(&sb_lock);
  320. if (test) {
  321. list_for_each_entry(old, &type->fs_supers, s_instances) {
  322. if (!test(old, data))
  323. continue;
  324. if (!grab_super(old))
  325. goto retry;
  326. if (s) {
  327. up_write(&s->s_umount);
  328. destroy_super(s);
  329. }
  330. return old;
  331. }
  332. }
  333. if (!s) {
  334. spin_unlock(&sb_lock);
  335. s = alloc_super(type);
  336. if (!s)
  337. return ERR_PTR(-ENOMEM);
  338. goto retry;
  339. }
  340. err = set(s, data);
  341. if (err) {
  342. spin_unlock(&sb_lock);
  343. up_write(&s->s_umount);
  344. destroy_super(s);
  345. return ERR_PTR(err);
  346. }
  347. s->s_type = type;
  348. strlcpy(s->s_id, type->name, sizeof(s->s_id));
  349. list_add_tail(&s->s_list, &super_blocks);
  350. list_add(&s->s_instances, &type->fs_supers);
  351. spin_unlock(&sb_lock);
  352. get_filesystem(type);
  353. return s;
  354. }
  355. EXPORT_SYMBOL(sget);
  356. void drop_super(struct super_block *sb)
  357. {
  358. up_read(&sb->s_umount);
  359. put_super(sb);
  360. }
  361. EXPORT_SYMBOL(drop_super);
  362. /**
  363. * sync_supers - helper for periodic superblock writeback
  364. *
  365. * Call the write_super method if present on all dirty superblocks in
  366. * the system. This is for the periodic writeback used by most older
  367. * filesystems. For data integrity superblock writeback use
  368. * sync_filesystems() instead.
  369. *
  370. * Note: check the dirty flag before waiting, so we don't
  371. * hold up the sync while mounting a device. (The newly
  372. * mounted device won't need syncing.)
  373. */
  374. void sync_supers(void)
  375. {
  376. struct super_block *sb;
  377. spin_lock(&sb_lock);
  378. restart:
  379. list_for_each_entry(sb, &super_blocks, s_list) {
  380. if (sb->s_op->write_super && sb->s_dirt) {
  381. sb->s_count++;
  382. spin_unlock(&sb_lock);
  383. down_read(&sb->s_umount);
  384. if (sb->s_root && sb->s_dirt)
  385. sb->s_op->write_super(sb);
  386. up_read(&sb->s_umount);
  387. spin_lock(&sb_lock);
  388. if (__put_super_and_need_restart(sb))
  389. goto restart;
  390. }
  391. }
  392. spin_unlock(&sb_lock);
  393. }
  394. /**
  395. * get_super - get the superblock of a device
  396. * @bdev: device to get the superblock for
  397. *
  398. * Scans the superblock list and finds the superblock of the file system
  399. * mounted on the device given. %NULL is returned if no match is found.
  400. */
  401. struct super_block * get_super(struct block_device *bdev)
  402. {
  403. struct super_block *sb;
  404. if (!bdev)
  405. return NULL;
  406. spin_lock(&sb_lock);
  407. rescan:
  408. list_for_each_entry(sb, &super_blocks, s_list) {
  409. if (sb->s_bdev == bdev) {
  410. sb->s_count++;
  411. spin_unlock(&sb_lock);
  412. down_read(&sb->s_umount);
  413. if (sb->s_root)
  414. return sb;
  415. up_read(&sb->s_umount);
  416. /* restart only when sb is no longer on the list */
  417. spin_lock(&sb_lock);
  418. if (__put_super_and_need_restart(sb))
  419. goto rescan;
  420. }
  421. }
  422. spin_unlock(&sb_lock);
  423. return NULL;
  424. }
  425. EXPORT_SYMBOL(get_super);
  426. /**
  427. * get_active_super - get an active reference to the superblock of a device
  428. * @bdev: device to get the superblock for
  429. *
  430. * Scans the superblock list and finds the superblock of the file system
  431. * mounted on the device given. Returns the superblock with an active
  432. * reference and s_umount held exclusively or %NULL if none was found.
  433. */
  434. struct super_block *get_active_super(struct block_device *bdev)
  435. {
  436. struct super_block *sb;
  437. if (!bdev)
  438. return NULL;
  439. spin_lock(&sb_lock);
  440. list_for_each_entry(sb, &super_blocks, s_list) {
  441. if (sb->s_bdev != bdev)
  442. continue;
  443. sb->s_count++;
  444. spin_unlock(&sb_lock);
  445. down_write(&sb->s_umount);
  446. if (sb->s_root) {
  447. spin_lock(&sb_lock);
  448. if (sb->s_count > S_BIAS) {
  449. atomic_inc(&sb->s_active);
  450. sb->s_count--;
  451. spin_unlock(&sb_lock);
  452. return sb;
  453. }
  454. spin_unlock(&sb_lock);
  455. }
  456. up_write(&sb->s_umount);
  457. put_super(sb);
  458. yield();
  459. spin_lock(&sb_lock);
  460. }
  461. spin_unlock(&sb_lock);
  462. return NULL;
  463. }
  464. struct super_block * user_get_super(dev_t dev)
  465. {
  466. struct super_block *sb;
  467. spin_lock(&sb_lock);
  468. rescan:
  469. list_for_each_entry(sb, &super_blocks, s_list) {
  470. if (sb->s_dev == dev) {
  471. sb->s_count++;
  472. spin_unlock(&sb_lock);
  473. down_read(&sb->s_umount);
  474. if (sb->s_root)
  475. return sb;
  476. up_read(&sb->s_umount);
  477. /* restart only when sb is no longer on the list */
  478. spin_lock(&sb_lock);
  479. if (__put_super_and_need_restart(sb))
  480. goto rescan;
  481. }
  482. }
  483. spin_unlock(&sb_lock);
  484. return NULL;
  485. }
  486. SYSCALL_DEFINE2(ustat, unsigned, dev, struct ustat __user *, ubuf)
  487. {
  488. struct super_block *s;
  489. struct ustat tmp;
  490. struct kstatfs sbuf;
  491. int err = -EINVAL;
  492. s = user_get_super(new_decode_dev(dev));
  493. if (s == NULL)
  494. goto out;
  495. err = vfs_statfs(s->s_root, &sbuf);
  496. drop_super(s);
  497. if (err)
  498. goto out;
  499. memset(&tmp,0,sizeof(struct ustat));
  500. tmp.f_tfree = sbuf.f_bfree;
  501. tmp.f_tinode = sbuf.f_ffree;
  502. err = copy_to_user(ubuf,&tmp,sizeof(struct ustat)) ? -EFAULT : 0;
  503. out:
  504. return err;
  505. }
  506. /**
  507. * do_remount_sb - asks filesystem to change mount options.
  508. * @sb: superblock in question
  509. * @flags: numeric part of options
  510. * @data: the rest of options
  511. * @force: whether or not to force the change
  512. *
  513. * Alters the mount options of a mounted file system.
  514. */
  515. int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
  516. {
  517. int retval;
  518. int remount_rw, remount_ro;
  519. if (sb->s_frozen != SB_UNFROZEN)
  520. return -EBUSY;
  521. #ifdef CONFIG_BLOCK
  522. if (!(flags & MS_RDONLY) && bdev_read_only(sb->s_bdev))
  523. return -EACCES;
  524. #endif
  525. if (flags & MS_RDONLY)
  526. acct_auto_close(sb);
  527. shrink_dcache_sb(sb);
  528. sync_filesystem(sb);
  529. remount_ro = (flags & MS_RDONLY) && !(sb->s_flags & MS_RDONLY);
  530. remount_rw = !(flags & MS_RDONLY) && (sb->s_flags & MS_RDONLY);
  531. /* If we are remounting RDONLY and current sb is read/write,
  532. make sure there are no rw files opened */
  533. if (remount_ro) {
  534. if (force)
  535. mark_files_ro(sb);
  536. else if (!fs_may_remount_ro(sb))
  537. return -EBUSY;
  538. retval = vfs_dq_off(sb, 1);
  539. if (retval < 0 && retval != -ENOSYS)
  540. return -EBUSY;
  541. }
  542. if (sb->s_op->remount_fs) {
  543. retval = sb->s_op->remount_fs(sb, &flags, data);
  544. if (retval)
  545. return retval;
  546. }
  547. sb->s_flags = (sb->s_flags & ~MS_RMT_MASK) | (flags & MS_RMT_MASK);
  548. if (remount_rw)
  549. vfs_dq_quota_on_remount(sb);
  550. /*
  551. * Some filesystems modify their metadata via some other path than the
  552. * bdev buffer cache (eg. use a private mapping, or directories in
  553. * pagecache, etc). Also file data modifications go via their own
  554. * mappings. So If we try to mount readonly then copy the filesystem
  555. * from bdev, we could get stale data, so invalidate it to give a best
  556. * effort at coherency.
  557. */
  558. if (remount_ro && sb->s_bdev)
  559. invalidate_bdev(sb->s_bdev);
  560. return 0;
  561. }
  562. static void do_emergency_remount(struct work_struct *work)
  563. {
  564. struct super_block *sb;
  565. spin_lock(&sb_lock);
  566. list_for_each_entry(sb, &super_blocks, s_list) {
  567. sb->s_count++;
  568. spin_unlock(&sb_lock);
  569. down_write(&sb->s_umount);
  570. if (sb->s_root && sb->s_bdev && !(sb->s_flags & MS_RDONLY)) {
  571. /*
  572. * ->remount_fs needs lock_kernel().
  573. *
  574. * What lock protects sb->s_flags??
  575. */
  576. do_remount_sb(sb, MS_RDONLY, NULL, 1);
  577. }
  578. up_write(&sb->s_umount);
  579. put_super(sb);
  580. spin_lock(&sb_lock);
  581. }
  582. spin_unlock(&sb_lock);
  583. kfree(work);
  584. printk("Emergency Remount complete\n");
  585. }
  586. void emergency_remount(void)
  587. {
  588. struct work_struct *work;
  589. work = kmalloc(sizeof(*work), GFP_ATOMIC);
  590. if (work) {
  591. INIT_WORK(work, do_emergency_remount);
  592. schedule_work(work);
  593. }
  594. }
  595. /*
  596. * Unnamed block devices are dummy devices used by virtual
  597. * filesystems which don't use real block-devices. -- jrs
  598. */
  599. static DEFINE_IDA(unnamed_dev_ida);
  600. static DEFINE_SPINLOCK(unnamed_dev_lock);/* protects the above */
  601. static int unnamed_dev_start = 0; /* don't bother trying below it */
  602. int set_anon_super(struct super_block *s, void *data)
  603. {
  604. int dev;
  605. int error;
  606. retry:
  607. if (ida_pre_get(&unnamed_dev_ida, GFP_ATOMIC) == 0)
  608. return -ENOMEM;
  609. spin_lock(&unnamed_dev_lock);
  610. error = ida_get_new_above(&unnamed_dev_ida, unnamed_dev_start, &dev);
  611. if (!error)
  612. unnamed_dev_start = dev + 1;
  613. spin_unlock(&unnamed_dev_lock);
  614. if (error == -EAGAIN)
  615. /* We raced and lost with another CPU. */
  616. goto retry;
  617. else if (error)
  618. return -EAGAIN;
  619. if ((dev & MAX_ID_MASK) == (1 << MINORBITS)) {
  620. spin_lock(&unnamed_dev_lock);
  621. ida_remove(&unnamed_dev_ida, dev);
  622. if (unnamed_dev_start > dev)
  623. unnamed_dev_start = dev;
  624. spin_unlock(&unnamed_dev_lock);
  625. return -EMFILE;
  626. }
  627. s->s_dev = MKDEV(0, dev & MINORMASK);
  628. return 0;
  629. }
  630. EXPORT_SYMBOL(set_anon_super);
  631. void kill_anon_super(struct super_block *sb)
  632. {
  633. int slot = MINOR(sb->s_dev);
  634. generic_shutdown_super(sb);
  635. spin_lock(&unnamed_dev_lock);
  636. ida_remove(&unnamed_dev_ida, slot);
  637. if (slot < unnamed_dev_start)
  638. unnamed_dev_start = slot;
  639. spin_unlock(&unnamed_dev_lock);
  640. }
  641. EXPORT_SYMBOL(kill_anon_super);
  642. void kill_litter_super(struct super_block *sb)
  643. {
  644. if (sb->s_root)
  645. d_genocide(sb->s_root);
  646. kill_anon_super(sb);
  647. }
  648. EXPORT_SYMBOL(kill_litter_super);
  649. static int ns_test_super(struct super_block *sb, void *data)
  650. {
  651. return sb->s_fs_info == data;
  652. }
  653. static int ns_set_super(struct super_block *sb, void *data)
  654. {
  655. sb->s_fs_info = data;
  656. return set_anon_super(sb, NULL);
  657. }
  658. int get_sb_ns(struct file_system_type *fs_type, int flags, void *data,
  659. int (*fill_super)(struct super_block *, void *, int),
  660. struct vfsmount *mnt)
  661. {
  662. struct super_block *sb;
  663. sb = sget(fs_type, ns_test_super, ns_set_super, data);
  664. if (IS_ERR(sb))
  665. return PTR_ERR(sb);
  666. if (!sb->s_root) {
  667. int err;
  668. sb->s_flags = flags;
  669. err = fill_super(sb, data, flags & MS_SILENT ? 1 : 0);
  670. if (err) {
  671. deactivate_locked_super(sb);
  672. return err;
  673. }
  674. sb->s_flags |= MS_ACTIVE;
  675. }
  676. simple_set_mnt(mnt, sb);
  677. return 0;
  678. }
  679. EXPORT_SYMBOL(get_sb_ns);
  680. #ifdef CONFIG_BLOCK
  681. static int set_bdev_super(struct super_block *s, void *data)
  682. {
  683. s->s_bdev = data;
  684. s->s_dev = s->s_bdev->bd_dev;
  685. /*
  686. * We set the bdi here to the queue backing, file systems can
  687. * overwrite this in ->fill_super()
  688. */
  689. s->s_bdi = &bdev_get_queue(s->s_bdev)->backing_dev_info;
  690. return 0;
  691. }
  692. static int test_bdev_super(struct super_block *s, void *data)
  693. {
  694. return (void *)s->s_bdev == data;
  695. }
  696. int get_sb_bdev(struct file_system_type *fs_type,
  697. int flags, const char *dev_name, void *data,
  698. int (*fill_super)(struct super_block *, void *, int),
  699. struct vfsmount *mnt)
  700. {
  701. struct block_device *bdev;
  702. struct super_block *s;
  703. fmode_t mode = FMODE_READ;
  704. int error = 0;
  705. if (!(flags & MS_RDONLY))
  706. mode |= FMODE_WRITE;
  707. bdev = open_bdev_exclusive(dev_name, mode, fs_type);
  708. if (IS_ERR(bdev))
  709. return PTR_ERR(bdev);
  710. /*
  711. * once the super is inserted into the list by sget, s_umount
  712. * will protect the lockfs code from trying to start a snapshot
  713. * while we are mounting
  714. */
  715. mutex_lock(&bdev->bd_fsfreeze_mutex);
  716. if (bdev->bd_fsfreeze_count > 0) {
  717. mutex_unlock(&bdev->bd_fsfreeze_mutex);
  718. error = -EBUSY;
  719. goto error_bdev;
  720. }
  721. s = sget(fs_type, test_bdev_super, set_bdev_super, bdev);
  722. mutex_unlock(&bdev->bd_fsfreeze_mutex);
  723. if (IS_ERR(s))
  724. goto error_s;
  725. if (s->s_root) {
  726. if ((flags ^ s->s_flags) & MS_RDONLY) {
  727. deactivate_locked_super(s);
  728. error = -EBUSY;
  729. goto error_bdev;
  730. }
  731. close_bdev_exclusive(bdev, mode);
  732. } else {
  733. char b[BDEVNAME_SIZE];
  734. s->s_flags = flags;
  735. s->s_mode = mode;
  736. strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id));
  737. sb_set_blocksize(s, block_size(bdev));
  738. error = fill_super(s, data, flags & MS_SILENT ? 1 : 0);
  739. if (error) {
  740. deactivate_locked_super(s);
  741. goto error;
  742. }
  743. s->s_flags |= MS_ACTIVE;
  744. bdev->bd_super = s;
  745. }
  746. simple_set_mnt(mnt, s);
  747. return 0;
  748. error_s:
  749. error = PTR_ERR(s);
  750. error_bdev:
  751. close_bdev_exclusive(bdev, mode);
  752. error:
  753. return error;
  754. }
  755. EXPORT_SYMBOL(get_sb_bdev);
  756. void kill_block_super(struct super_block *sb)
  757. {
  758. struct block_device *bdev = sb->s_bdev;
  759. fmode_t mode = sb->s_mode;
  760. bdev->bd_super = NULL;
  761. generic_shutdown_super(sb);
  762. sync_blockdev(bdev);
  763. close_bdev_exclusive(bdev, mode);
  764. }
  765. EXPORT_SYMBOL(kill_block_super);
  766. #endif
  767. int get_sb_nodev(struct file_system_type *fs_type,
  768. int flags, void *data,
  769. int (*fill_super)(struct super_block *, void *, int),
  770. struct vfsmount *mnt)
  771. {
  772. int error;
  773. struct super_block *s = sget(fs_type, NULL, set_anon_super, NULL);
  774. if (IS_ERR(s))
  775. return PTR_ERR(s);
  776. s->s_flags = flags;
  777. error = fill_super(s, data, flags & MS_SILENT ? 1 : 0);
  778. if (error) {
  779. deactivate_locked_super(s);
  780. return error;
  781. }
  782. s->s_flags |= MS_ACTIVE;
  783. simple_set_mnt(mnt, s);
  784. return 0;
  785. }
  786. EXPORT_SYMBOL(get_sb_nodev);
  787. static int compare_single(struct super_block *s, void *p)
  788. {
  789. return 1;
  790. }
  791. int get_sb_single(struct file_system_type *fs_type,
  792. int flags, void *data,
  793. int (*fill_super)(struct super_block *, void *, int),
  794. struct vfsmount *mnt)
  795. {
  796. struct super_block *s;
  797. int error;
  798. s = sget(fs_type, compare_single, set_anon_super, NULL);
  799. if (IS_ERR(s))
  800. return PTR_ERR(s);
  801. if (!s->s_root) {
  802. s->s_flags = flags;
  803. error = fill_super(s, data, flags & MS_SILENT ? 1 : 0);
  804. if (error) {
  805. deactivate_locked_super(s);
  806. return error;
  807. }
  808. s->s_flags |= MS_ACTIVE;
  809. } else {
  810. do_remount_sb(s, flags, data, 0);
  811. }
  812. simple_set_mnt(mnt, s);
  813. return 0;
  814. }
  815. EXPORT_SYMBOL(get_sb_single);
  816. struct vfsmount *
  817. vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void *data)
  818. {
  819. struct vfsmount *mnt;
  820. char *secdata = NULL;
  821. int error;
  822. if (!type)
  823. return ERR_PTR(-ENODEV);
  824. error = -ENOMEM;
  825. mnt = alloc_vfsmnt(name);
  826. if (!mnt)
  827. goto out;
  828. if (flags & MS_KERNMOUNT)
  829. mnt->mnt_flags = MNT_INTERNAL;
  830. if (data && !(type->fs_flags & FS_BINARY_MOUNTDATA)) {
  831. secdata = alloc_secdata();
  832. if (!secdata)
  833. goto out_mnt;
  834. error = security_sb_copy_data(data, secdata);
  835. if (error)
  836. goto out_free_secdata;
  837. }
  838. error = type->get_sb(type, flags, name, data, mnt);
  839. if (error < 0)
  840. goto out_free_secdata;
  841. BUG_ON(!mnt->mnt_sb);
  842. error = security_sb_kern_mount(mnt->mnt_sb, flags, secdata);
  843. if (error)
  844. goto out_sb;
  845. /*
  846. * filesystems should never set s_maxbytes larger than MAX_LFS_FILESIZE
  847. * but s_maxbytes was an unsigned long long for many releases. Throw
  848. * this warning for a little while to try and catch filesystems that
  849. * violate this rule. This warning should be either removed or
  850. * converted to a BUG() in 2.6.34.
  851. */
  852. WARN((mnt->mnt_sb->s_maxbytes < 0), "%s set sb->s_maxbytes to "
  853. "negative value (%lld)\n", type->name, mnt->mnt_sb->s_maxbytes);
  854. mnt->mnt_mountpoint = mnt->mnt_root;
  855. mnt->mnt_parent = mnt;
  856. up_write(&mnt->mnt_sb->s_umount);
  857. free_secdata(secdata);
  858. return mnt;
  859. out_sb:
  860. dput(mnt->mnt_root);
  861. deactivate_locked_super(mnt->mnt_sb);
  862. out_free_secdata:
  863. free_secdata(secdata);
  864. out_mnt:
  865. free_vfsmnt(mnt);
  866. out:
  867. return ERR_PTR(error);
  868. }
  869. EXPORT_SYMBOL_GPL(vfs_kern_mount);
  870. static struct vfsmount *fs_set_subtype(struct vfsmount *mnt, const char *fstype)
  871. {
  872. int err;
  873. const char *subtype = strchr(fstype, '.');
  874. if (subtype) {
  875. subtype++;
  876. err = -EINVAL;
  877. if (!subtype[0])
  878. goto err;
  879. } else
  880. subtype = "";
  881. mnt->mnt_sb->s_subtype = kstrdup(subtype, GFP_KERNEL);
  882. err = -ENOMEM;
  883. if (!mnt->mnt_sb->s_subtype)
  884. goto err;
  885. return mnt;
  886. err:
  887. mntput(mnt);
  888. return ERR_PTR(err);
  889. }
  890. struct vfsmount *
  891. do_kern_mount(const char *fstype, int flags, const char *name, void *data)
  892. {
  893. struct file_system_type *type = get_fs_type(fstype);
  894. struct vfsmount *mnt;
  895. if (!type)
  896. return ERR_PTR(-ENODEV);
  897. mnt = vfs_kern_mount(type, flags, name, data);
  898. if (!IS_ERR(mnt) && (type->fs_flags & FS_HAS_SUBTYPE) &&
  899. !mnt->mnt_sb->s_subtype)
  900. mnt = fs_set_subtype(mnt, fstype);
  901. put_filesystem(type);
  902. return mnt;
  903. }
  904. EXPORT_SYMBOL_GPL(do_kern_mount);
  905. struct vfsmount *kern_mount_data(struct file_system_type *type, void *data)
  906. {
  907. return vfs_kern_mount(type, MS_KERNMOUNT, type->name, data);
  908. }
  909. EXPORT_SYMBOL_GPL(kern_mount_data);