namespace.c 57 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325
  1. /*
  2. * linux/fs/namespace.c
  3. *
  4. * (C) Copyright Al Viro 2000, 2001
  5. * Released under GPL v2.
  6. *
  7. * Based on code from fs/super.c, copyright Linus Torvalds and others.
  8. * Heavily rewritten.
  9. */
  10. #include <linux/syscalls.h>
  11. #include <linux/slab.h>
  12. #include <linux/sched.h>
  13. #include <linux/smp_lock.h>
  14. #include <linux/init.h>
  15. #include <linux/kernel.h>
  16. #include <linux/acct.h>
  17. #include <linux/capability.h>
  18. #include <linux/cpumask.h>
  19. #include <linux/module.h>
  20. #include <linux/sysfs.h>
  21. #include <linux/seq_file.h>
  22. #include <linux/mnt_namespace.h>
  23. #include <linux/namei.h>
  24. #include <linux/nsproxy.h>
  25. #include <linux/security.h>
  26. #include <linux/mount.h>
  27. #include <linux/ramfs.h>
  28. #include <linux/log2.h>
  29. #include <linux/idr.h>
  30. #include <linux/fs_struct.h>
  31. #include <asm/uaccess.h>
  32. #include <asm/unistd.h>
  33. #include "pnode.h"
  34. #include "internal.h"
  35. #define HASH_SHIFT ilog2(PAGE_SIZE / sizeof(struct list_head))
  36. #define HASH_SIZE (1UL << HASH_SHIFT)
  37. /* spinlock for vfsmount related operations, inplace of dcache_lock */
  38. __cacheline_aligned_in_smp DEFINE_SPINLOCK(vfsmount_lock);
  39. static int event;
  40. static DEFINE_IDA(mnt_id_ida);
  41. static DEFINE_IDA(mnt_group_ida);
  42. static int mnt_id_start = 0;
  43. static int mnt_group_start = 1;
  44. static struct list_head *mount_hashtable __read_mostly;
  45. static struct kmem_cache *mnt_cache __read_mostly;
  46. static struct rw_semaphore namespace_sem;
  47. /* /sys/fs */
  48. struct kobject *fs_kobj;
  49. EXPORT_SYMBOL_GPL(fs_kobj);
  50. static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry)
  51. {
  52. unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES);
  53. tmp += ((unsigned long)dentry / L1_CACHE_BYTES);
  54. tmp = tmp + (tmp >> HASH_SHIFT);
  55. return tmp & (HASH_SIZE - 1);
  56. }
  57. #define MNT_WRITER_UNDERFLOW_LIMIT -(1<<16)
  58. /* allocation is serialized by namespace_sem */
  59. static int mnt_alloc_id(struct vfsmount *mnt)
  60. {
  61. int res;
  62. retry:
  63. ida_pre_get(&mnt_id_ida, GFP_KERNEL);
  64. spin_lock(&vfsmount_lock);
  65. res = ida_get_new_above(&mnt_id_ida, mnt_id_start, &mnt->mnt_id);
  66. if (!res)
  67. mnt_id_start = mnt->mnt_id + 1;
  68. spin_unlock(&vfsmount_lock);
  69. if (res == -EAGAIN)
  70. goto retry;
  71. return res;
  72. }
  73. static void mnt_free_id(struct vfsmount *mnt)
  74. {
  75. int id = mnt->mnt_id;
  76. spin_lock(&vfsmount_lock);
  77. ida_remove(&mnt_id_ida, id);
  78. if (mnt_id_start > id)
  79. mnt_id_start = id;
  80. spin_unlock(&vfsmount_lock);
  81. }
  82. /*
  83. * Allocate a new peer group ID
  84. *
  85. * mnt_group_ida is protected by namespace_sem
  86. */
  87. static int mnt_alloc_group_id(struct vfsmount *mnt)
  88. {
  89. int res;
  90. if (!ida_pre_get(&mnt_group_ida, GFP_KERNEL))
  91. return -ENOMEM;
  92. res = ida_get_new_above(&mnt_group_ida,
  93. mnt_group_start,
  94. &mnt->mnt_group_id);
  95. if (!res)
  96. mnt_group_start = mnt->mnt_group_id + 1;
  97. return res;
  98. }
  99. /*
  100. * Release a peer group ID
  101. */
  102. void mnt_release_group_id(struct vfsmount *mnt)
  103. {
  104. int id = mnt->mnt_group_id;
  105. ida_remove(&mnt_group_ida, id);
  106. if (mnt_group_start > id)
  107. mnt_group_start = id;
  108. mnt->mnt_group_id = 0;
  109. }
  110. struct vfsmount *alloc_vfsmnt(const char *name)
  111. {
  112. struct vfsmount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL);
  113. if (mnt) {
  114. int err;
  115. err = mnt_alloc_id(mnt);
  116. if (err)
  117. goto out_free_cache;
  118. if (name) {
  119. mnt->mnt_devname = kstrdup(name, GFP_KERNEL);
  120. if (!mnt->mnt_devname)
  121. goto out_free_id;
  122. }
  123. atomic_set(&mnt->mnt_count, 1);
  124. INIT_LIST_HEAD(&mnt->mnt_hash);
  125. INIT_LIST_HEAD(&mnt->mnt_child);
  126. INIT_LIST_HEAD(&mnt->mnt_mounts);
  127. INIT_LIST_HEAD(&mnt->mnt_list);
  128. INIT_LIST_HEAD(&mnt->mnt_expire);
  129. INIT_LIST_HEAD(&mnt->mnt_share);
  130. INIT_LIST_HEAD(&mnt->mnt_slave_list);
  131. INIT_LIST_HEAD(&mnt->mnt_slave);
  132. #ifdef CONFIG_SMP
  133. mnt->mnt_writers = alloc_percpu(int);
  134. if (!mnt->mnt_writers)
  135. goto out_free_devname;
  136. #else
  137. mnt->mnt_writers = 0;
  138. #endif
  139. }
  140. return mnt;
  141. #ifdef CONFIG_SMP
  142. out_free_devname:
  143. kfree(mnt->mnt_devname);
  144. #endif
  145. out_free_id:
  146. mnt_free_id(mnt);
  147. out_free_cache:
  148. kmem_cache_free(mnt_cache, mnt);
  149. return NULL;
  150. }
  151. /*
  152. * Most r/o checks on a fs are for operations that take
  153. * discrete amounts of time, like a write() or unlink().
  154. * We must keep track of when those operations start
  155. * (for permission checks) and when they end, so that
  156. * we can determine when writes are able to occur to
  157. * a filesystem.
  158. */
  159. /*
  160. * __mnt_is_readonly: check whether a mount is read-only
  161. * @mnt: the mount to check for its write status
  162. *
  163. * This shouldn't be used directly ouside of the VFS.
  164. * It does not guarantee that the filesystem will stay
  165. * r/w, just that it is right *now*. This can not and
  166. * should not be used in place of IS_RDONLY(inode).
  167. * mnt_want/drop_write() will _keep_ the filesystem
  168. * r/w.
  169. */
  170. int __mnt_is_readonly(struct vfsmount *mnt)
  171. {
  172. if (mnt->mnt_flags & MNT_READONLY)
  173. return 1;
  174. if (mnt->mnt_sb->s_flags & MS_RDONLY)
  175. return 1;
  176. return 0;
  177. }
  178. EXPORT_SYMBOL_GPL(__mnt_is_readonly);
  179. static inline void inc_mnt_writers(struct vfsmount *mnt)
  180. {
  181. #ifdef CONFIG_SMP
  182. (*per_cpu_ptr(mnt->mnt_writers, smp_processor_id()))++;
  183. #else
  184. mnt->mnt_writers++;
  185. #endif
  186. }
  187. static inline void dec_mnt_writers(struct vfsmount *mnt)
  188. {
  189. #ifdef CONFIG_SMP
  190. (*per_cpu_ptr(mnt->mnt_writers, smp_processor_id()))--;
  191. #else
  192. mnt->mnt_writers--;
  193. #endif
  194. }
  195. static unsigned int count_mnt_writers(struct vfsmount *mnt)
  196. {
  197. #ifdef CONFIG_SMP
  198. unsigned int count = 0;
  199. int cpu;
  200. for_each_possible_cpu(cpu) {
  201. count += *per_cpu_ptr(mnt->mnt_writers, cpu);
  202. }
  203. return count;
  204. #else
  205. return mnt->mnt_writers;
  206. #endif
  207. }
  208. /*
  209. * Most r/o checks on a fs are for operations that take
  210. * discrete amounts of time, like a write() or unlink().
  211. * We must keep track of when those operations start
  212. * (for permission checks) and when they end, so that
  213. * we can determine when writes are able to occur to
  214. * a filesystem.
  215. */
  216. /**
  217. * mnt_want_write - get write access to a mount
  218. * @mnt: the mount on which to take a write
  219. *
  220. * This tells the low-level filesystem that a write is
  221. * about to be performed to it, and makes sure that
  222. * writes are allowed before returning success. When
  223. * the write operation is finished, mnt_drop_write()
  224. * must be called. This is effectively a refcount.
  225. */
  226. int mnt_want_write(struct vfsmount *mnt)
  227. {
  228. int ret = 0;
  229. preempt_disable();
  230. inc_mnt_writers(mnt);
  231. /*
  232. * The store to inc_mnt_writers must be visible before we pass
  233. * MNT_WRITE_HOLD loop below, so that the slowpath can see our
  234. * incremented count after it has set MNT_WRITE_HOLD.
  235. */
  236. smp_mb();
  237. while (mnt->mnt_flags & MNT_WRITE_HOLD)
  238. cpu_relax();
  239. /*
  240. * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will
  241. * be set to match its requirements. So we must not load that until
  242. * MNT_WRITE_HOLD is cleared.
  243. */
  244. smp_rmb();
  245. if (__mnt_is_readonly(mnt)) {
  246. dec_mnt_writers(mnt);
  247. ret = -EROFS;
  248. goto out;
  249. }
  250. out:
  251. preempt_enable();
  252. return ret;
  253. }
  254. EXPORT_SYMBOL_GPL(mnt_want_write);
  255. /**
  256. * mnt_clone_write - get write access to a mount
  257. * @mnt: the mount on which to take a write
  258. *
  259. * This is effectively like mnt_want_write, except
  260. * it must only be used to take an extra write reference
  261. * on a mountpoint that we already know has a write reference
  262. * on it. This allows some optimisation.
  263. *
  264. * After finished, mnt_drop_write must be called as usual to
  265. * drop the reference.
  266. */
  267. int mnt_clone_write(struct vfsmount *mnt)
  268. {
  269. /* superblock may be r/o */
  270. if (__mnt_is_readonly(mnt))
  271. return -EROFS;
  272. preempt_disable();
  273. inc_mnt_writers(mnt);
  274. preempt_enable();
  275. return 0;
  276. }
  277. EXPORT_SYMBOL_GPL(mnt_clone_write);
  278. /**
  279. * mnt_want_write_file - get write access to a file's mount
  280. * @file: the file who's mount on which to take a write
  281. *
  282. * This is like mnt_want_write, but it takes a file and can
  283. * do some optimisations if the file is open for write already
  284. */
  285. int mnt_want_write_file(struct file *file)
  286. {
  287. struct inode *inode = file->f_dentry->d_inode;
  288. if (!(file->f_mode & FMODE_WRITE) || special_file(inode->i_mode))
  289. return mnt_want_write(file->f_path.mnt);
  290. else
  291. return mnt_clone_write(file->f_path.mnt);
  292. }
  293. EXPORT_SYMBOL_GPL(mnt_want_write_file);
  294. /**
  295. * mnt_drop_write - give up write access to a mount
  296. * @mnt: the mount on which to give up write access
  297. *
  298. * Tells the low-level filesystem that we are done
  299. * performing writes to it. Must be matched with
  300. * mnt_want_write() call above.
  301. */
  302. void mnt_drop_write(struct vfsmount *mnt)
  303. {
  304. preempt_disable();
  305. dec_mnt_writers(mnt);
  306. preempt_enable();
  307. }
  308. EXPORT_SYMBOL_GPL(mnt_drop_write);
  309. static int mnt_make_readonly(struct vfsmount *mnt)
  310. {
  311. int ret = 0;
  312. spin_lock(&vfsmount_lock);
  313. mnt->mnt_flags |= MNT_WRITE_HOLD;
  314. /*
  315. * After storing MNT_WRITE_HOLD, we'll read the counters. This store
  316. * should be visible before we do.
  317. */
  318. smp_mb();
  319. /*
  320. * With writers on hold, if this value is zero, then there are
  321. * definitely no active writers (although held writers may subsequently
  322. * increment the count, they'll have to wait, and decrement it after
  323. * seeing MNT_READONLY).
  324. *
  325. * It is OK to have counter incremented on one CPU and decremented on
  326. * another: the sum will add up correctly. The danger would be when we
  327. * sum up each counter, if we read a counter before it is incremented,
  328. * but then read another CPU's count which it has been subsequently
  329. * decremented from -- we would see more decrements than we should.
  330. * MNT_WRITE_HOLD protects against this scenario, because
  331. * mnt_want_write first increments count, then smp_mb, then spins on
  332. * MNT_WRITE_HOLD, so it can't be decremented by another CPU while
  333. * we're counting up here.
  334. */
  335. if (count_mnt_writers(mnt) > 0)
  336. ret = -EBUSY;
  337. else
  338. mnt->mnt_flags |= MNT_READONLY;
  339. /*
  340. * MNT_READONLY must become visible before ~MNT_WRITE_HOLD, so writers
  341. * that become unheld will see MNT_READONLY.
  342. */
  343. smp_wmb();
  344. mnt->mnt_flags &= ~MNT_WRITE_HOLD;
  345. spin_unlock(&vfsmount_lock);
  346. return ret;
  347. }
  348. static void __mnt_unmake_readonly(struct vfsmount *mnt)
  349. {
  350. spin_lock(&vfsmount_lock);
  351. mnt->mnt_flags &= ~MNT_READONLY;
  352. spin_unlock(&vfsmount_lock);
  353. }
  354. void simple_set_mnt(struct vfsmount *mnt, struct super_block *sb)
  355. {
  356. mnt->mnt_sb = sb;
  357. mnt->mnt_root = dget(sb->s_root);
  358. }
  359. EXPORT_SYMBOL(simple_set_mnt);
  360. void free_vfsmnt(struct vfsmount *mnt)
  361. {
  362. kfree(mnt->mnt_devname);
  363. mnt_free_id(mnt);
  364. #ifdef CONFIG_SMP
  365. free_percpu(mnt->mnt_writers);
  366. #endif
  367. kmem_cache_free(mnt_cache, mnt);
  368. }
  369. /*
  370. * find the first or last mount at @dentry on vfsmount @mnt depending on
  371. * @dir. If @dir is set return the first mount else return the last mount.
  372. */
  373. struct vfsmount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry,
  374. int dir)
  375. {
  376. struct list_head *head = mount_hashtable + hash(mnt, dentry);
  377. struct list_head *tmp = head;
  378. struct vfsmount *p, *found = NULL;
  379. for (;;) {
  380. tmp = dir ? tmp->next : tmp->prev;
  381. p = NULL;
  382. if (tmp == head)
  383. break;
  384. p = list_entry(tmp, struct vfsmount, mnt_hash);
  385. if (p->mnt_parent == mnt && p->mnt_mountpoint == dentry) {
  386. found = p;
  387. break;
  388. }
  389. }
  390. return found;
  391. }
  392. /*
  393. * lookup_mnt increments the ref count before returning
  394. * the vfsmount struct.
  395. */
  396. struct vfsmount *lookup_mnt(struct path *path)
  397. {
  398. struct vfsmount *child_mnt;
  399. spin_lock(&vfsmount_lock);
  400. if ((child_mnt = __lookup_mnt(path->mnt, path->dentry, 1)))
  401. mntget(child_mnt);
  402. spin_unlock(&vfsmount_lock);
  403. return child_mnt;
  404. }
  405. static inline int check_mnt(struct vfsmount *mnt)
  406. {
  407. return mnt->mnt_ns == current->nsproxy->mnt_ns;
  408. }
  409. static void touch_mnt_namespace(struct mnt_namespace *ns)
  410. {
  411. if (ns) {
  412. ns->event = ++event;
  413. wake_up_interruptible(&ns->poll);
  414. }
  415. }
  416. static void __touch_mnt_namespace(struct mnt_namespace *ns)
  417. {
  418. if (ns && ns->event != event) {
  419. ns->event = event;
  420. wake_up_interruptible(&ns->poll);
  421. }
  422. }
  423. static void detach_mnt(struct vfsmount *mnt, struct path *old_path)
  424. {
  425. old_path->dentry = mnt->mnt_mountpoint;
  426. old_path->mnt = mnt->mnt_parent;
  427. mnt->mnt_parent = mnt;
  428. mnt->mnt_mountpoint = mnt->mnt_root;
  429. list_del_init(&mnt->mnt_child);
  430. list_del_init(&mnt->mnt_hash);
  431. old_path->dentry->d_mounted--;
  432. }
  433. void mnt_set_mountpoint(struct vfsmount *mnt, struct dentry *dentry,
  434. struct vfsmount *child_mnt)
  435. {
  436. child_mnt->mnt_parent = mntget(mnt);
  437. child_mnt->mnt_mountpoint = dget(dentry);
  438. dentry->d_mounted++;
  439. }
  440. static void attach_mnt(struct vfsmount *mnt, struct path *path)
  441. {
  442. mnt_set_mountpoint(path->mnt, path->dentry, mnt);
  443. list_add_tail(&mnt->mnt_hash, mount_hashtable +
  444. hash(path->mnt, path->dentry));
  445. list_add_tail(&mnt->mnt_child, &path->mnt->mnt_mounts);
  446. }
  447. /*
  448. * the caller must hold vfsmount_lock
  449. */
  450. static void commit_tree(struct vfsmount *mnt)
  451. {
  452. struct vfsmount *parent = mnt->mnt_parent;
  453. struct vfsmount *m;
  454. LIST_HEAD(head);
  455. struct mnt_namespace *n = parent->mnt_ns;
  456. BUG_ON(parent == mnt);
  457. list_add_tail(&head, &mnt->mnt_list);
  458. list_for_each_entry(m, &head, mnt_list)
  459. m->mnt_ns = n;
  460. list_splice(&head, n->list.prev);
  461. list_add_tail(&mnt->mnt_hash, mount_hashtable +
  462. hash(parent, mnt->mnt_mountpoint));
  463. list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
  464. touch_mnt_namespace(n);
  465. }
  466. static struct vfsmount *next_mnt(struct vfsmount *p, struct vfsmount *root)
  467. {
  468. struct list_head *next = p->mnt_mounts.next;
  469. if (next == &p->mnt_mounts) {
  470. while (1) {
  471. if (p == root)
  472. return NULL;
  473. next = p->mnt_child.next;
  474. if (next != &p->mnt_parent->mnt_mounts)
  475. break;
  476. p = p->mnt_parent;
  477. }
  478. }
  479. return list_entry(next, struct vfsmount, mnt_child);
  480. }
  481. static struct vfsmount *skip_mnt_tree(struct vfsmount *p)
  482. {
  483. struct list_head *prev = p->mnt_mounts.prev;
  484. while (prev != &p->mnt_mounts) {
  485. p = list_entry(prev, struct vfsmount, mnt_child);
  486. prev = p->mnt_mounts.prev;
  487. }
  488. return p;
  489. }
  490. static struct vfsmount *clone_mnt(struct vfsmount *old, struct dentry *root,
  491. int flag)
  492. {
  493. struct super_block *sb = old->mnt_sb;
  494. struct vfsmount *mnt = alloc_vfsmnt(old->mnt_devname);
  495. if (mnt) {
  496. if (flag & (CL_SLAVE | CL_PRIVATE))
  497. mnt->mnt_group_id = 0; /* not a peer of original */
  498. else
  499. mnt->mnt_group_id = old->mnt_group_id;
  500. if ((flag & CL_MAKE_SHARED) && !mnt->mnt_group_id) {
  501. int err = mnt_alloc_group_id(mnt);
  502. if (err)
  503. goto out_free;
  504. }
  505. mnt->mnt_flags = old->mnt_flags;
  506. atomic_inc(&sb->s_active);
  507. mnt->mnt_sb = sb;
  508. mnt->mnt_root = dget(root);
  509. mnt->mnt_mountpoint = mnt->mnt_root;
  510. mnt->mnt_parent = mnt;
  511. if (flag & CL_SLAVE) {
  512. list_add(&mnt->mnt_slave, &old->mnt_slave_list);
  513. mnt->mnt_master = old;
  514. CLEAR_MNT_SHARED(mnt);
  515. } else if (!(flag & CL_PRIVATE)) {
  516. if ((flag & CL_PROPAGATION) || IS_MNT_SHARED(old))
  517. list_add(&mnt->mnt_share, &old->mnt_share);
  518. if (IS_MNT_SLAVE(old))
  519. list_add(&mnt->mnt_slave, &old->mnt_slave);
  520. mnt->mnt_master = old->mnt_master;
  521. }
  522. if (flag & CL_MAKE_SHARED)
  523. set_mnt_shared(mnt);
  524. /* stick the duplicate mount on the same expiry list
  525. * as the original if that was on one */
  526. if (flag & CL_EXPIRE) {
  527. if (!list_empty(&old->mnt_expire))
  528. list_add(&mnt->mnt_expire, &old->mnt_expire);
  529. }
  530. }
  531. return mnt;
  532. out_free:
  533. free_vfsmnt(mnt);
  534. return NULL;
  535. }
  536. static inline void __mntput(struct vfsmount *mnt)
  537. {
  538. struct super_block *sb = mnt->mnt_sb;
  539. /*
  540. * This probably indicates that somebody messed
  541. * up a mnt_want/drop_write() pair. If this
  542. * happens, the filesystem was probably unable
  543. * to make r/w->r/o transitions.
  544. */
  545. /*
  546. * atomic_dec_and_lock() used to deal with ->mnt_count decrements
  547. * provides barriers, so count_mnt_writers() below is safe. AV
  548. */
  549. WARN_ON(count_mnt_writers(mnt));
  550. dput(mnt->mnt_root);
  551. free_vfsmnt(mnt);
  552. deactivate_super(sb);
  553. }
  554. void mntput_no_expire(struct vfsmount *mnt)
  555. {
  556. repeat:
  557. if (atomic_dec_and_lock(&mnt->mnt_count, &vfsmount_lock)) {
  558. if (likely(!mnt->mnt_pinned)) {
  559. spin_unlock(&vfsmount_lock);
  560. __mntput(mnt);
  561. return;
  562. }
  563. atomic_add(mnt->mnt_pinned + 1, &mnt->mnt_count);
  564. mnt->mnt_pinned = 0;
  565. spin_unlock(&vfsmount_lock);
  566. acct_auto_close_mnt(mnt);
  567. security_sb_umount_close(mnt);
  568. goto repeat;
  569. }
  570. }
  571. EXPORT_SYMBOL(mntput_no_expire);
  572. void mnt_pin(struct vfsmount *mnt)
  573. {
  574. spin_lock(&vfsmount_lock);
  575. mnt->mnt_pinned++;
  576. spin_unlock(&vfsmount_lock);
  577. }
  578. EXPORT_SYMBOL(mnt_pin);
  579. void mnt_unpin(struct vfsmount *mnt)
  580. {
  581. spin_lock(&vfsmount_lock);
  582. if (mnt->mnt_pinned) {
  583. atomic_inc(&mnt->mnt_count);
  584. mnt->mnt_pinned--;
  585. }
  586. spin_unlock(&vfsmount_lock);
  587. }
  588. EXPORT_SYMBOL(mnt_unpin);
  589. static inline void mangle(struct seq_file *m, const char *s)
  590. {
  591. seq_escape(m, s, " \t\n\\");
  592. }
  593. /*
  594. * Simple .show_options callback for filesystems which don't want to
  595. * implement more complex mount option showing.
  596. *
  597. * See also save_mount_options().
  598. */
  599. int generic_show_options(struct seq_file *m, struct vfsmount *mnt)
  600. {
  601. const char *options;
  602. rcu_read_lock();
  603. options = rcu_dereference(mnt->mnt_sb->s_options);
  604. if (options != NULL && options[0]) {
  605. seq_putc(m, ',');
  606. mangle(m, options);
  607. }
  608. rcu_read_unlock();
  609. return 0;
  610. }
  611. EXPORT_SYMBOL(generic_show_options);
  612. /*
  613. * If filesystem uses generic_show_options(), this function should be
  614. * called from the fill_super() callback.
  615. *
  616. * The .remount_fs callback usually needs to be handled in a special
  617. * way, to make sure, that previous options are not overwritten if the
  618. * remount fails.
  619. *
  620. * Also note, that if the filesystem's .remount_fs function doesn't
  621. * reset all options to their default value, but changes only newly
  622. * given options, then the displayed options will not reflect reality
  623. * any more.
  624. */
  625. void save_mount_options(struct super_block *sb, char *options)
  626. {
  627. BUG_ON(sb->s_options);
  628. rcu_assign_pointer(sb->s_options, kstrdup(options, GFP_KERNEL));
  629. }
  630. EXPORT_SYMBOL(save_mount_options);
  631. void replace_mount_options(struct super_block *sb, char *options)
  632. {
  633. char *old = sb->s_options;
  634. rcu_assign_pointer(sb->s_options, options);
  635. if (old) {
  636. synchronize_rcu();
  637. kfree(old);
  638. }
  639. }
  640. EXPORT_SYMBOL(replace_mount_options);
  641. #ifdef CONFIG_PROC_FS
  642. /* iterator */
  643. static void *m_start(struct seq_file *m, loff_t *pos)
  644. {
  645. struct proc_mounts *p = m->private;
  646. down_read(&namespace_sem);
  647. return seq_list_start(&p->ns->list, *pos);
  648. }
  649. static void *m_next(struct seq_file *m, void *v, loff_t *pos)
  650. {
  651. struct proc_mounts *p = m->private;
  652. return seq_list_next(v, &p->ns->list, pos);
  653. }
  654. static void m_stop(struct seq_file *m, void *v)
  655. {
  656. up_read(&namespace_sem);
  657. }
  658. struct proc_fs_info {
  659. int flag;
  660. const char *str;
  661. };
  662. static int show_sb_opts(struct seq_file *m, struct super_block *sb)
  663. {
  664. static const struct proc_fs_info fs_info[] = {
  665. { MS_SYNCHRONOUS, ",sync" },
  666. { MS_DIRSYNC, ",dirsync" },
  667. { MS_MANDLOCK, ",mand" },
  668. { 0, NULL }
  669. };
  670. const struct proc_fs_info *fs_infop;
  671. for (fs_infop = fs_info; fs_infop->flag; fs_infop++) {
  672. if (sb->s_flags & fs_infop->flag)
  673. seq_puts(m, fs_infop->str);
  674. }
  675. return security_sb_show_options(m, sb);
  676. }
  677. static void show_mnt_opts(struct seq_file *m, struct vfsmount *mnt)
  678. {
  679. static const struct proc_fs_info mnt_info[] = {
  680. { MNT_NOSUID, ",nosuid" },
  681. { MNT_NODEV, ",nodev" },
  682. { MNT_NOEXEC, ",noexec" },
  683. { MNT_NOATIME, ",noatime" },
  684. { MNT_NODIRATIME, ",nodiratime" },
  685. { MNT_RELATIME, ",relatime" },
  686. { MNT_STRICTATIME, ",strictatime" },
  687. { 0, NULL }
  688. };
  689. const struct proc_fs_info *fs_infop;
  690. for (fs_infop = mnt_info; fs_infop->flag; fs_infop++) {
  691. if (mnt->mnt_flags & fs_infop->flag)
  692. seq_puts(m, fs_infop->str);
  693. }
  694. }
  695. static void show_type(struct seq_file *m, struct super_block *sb)
  696. {
  697. mangle(m, sb->s_type->name);
  698. if (sb->s_subtype && sb->s_subtype[0]) {
  699. seq_putc(m, '.');
  700. mangle(m, sb->s_subtype);
  701. }
  702. }
  703. static int show_vfsmnt(struct seq_file *m, void *v)
  704. {
  705. struct vfsmount *mnt = list_entry(v, struct vfsmount, mnt_list);
  706. int err = 0;
  707. struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
  708. mangle(m, mnt->mnt_devname ? mnt->mnt_devname : "none");
  709. seq_putc(m, ' ');
  710. seq_path(m, &mnt_path, " \t\n\\");
  711. seq_putc(m, ' ');
  712. show_type(m, mnt->mnt_sb);
  713. seq_puts(m, __mnt_is_readonly(mnt) ? " ro" : " rw");
  714. err = show_sb_opts(m, mnt->mnt_sb);
  715. if (err)
  716. goto out;
  717. show_mnt_opts(m, mnt);
  718. if (mnt->mnt_sb->s_op->show_options)
  719. err = mnt->mnt_sb->s_op->show_options(m, mnt);
  720. seq_puts(m, " 0 0\n");
  721. out:
  722. return err;
  723. }
  724. const struct seq_operations mounts_op = {
  725. .start = m_start,
  726. .next = m_next,
  727. .stop = m_stop,
  728. .show = show_vfsmnt
  729. };
  730. static int show_mountinfo(struct seq_file *m, void *v)
  731. {
  732. struct proc_mounts *p = m->private;
  733. struct vfsmount *mnt = list_entry(v, struct vfsmount, mnt_list);
  734. struct super_block *sb = mnt->mnt_sb;
  735. struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
  736. struct path root = p->root;
  737. int err = 0;
  738. seq_printf(m, "%i %i %u:%u ", mnt->mnt_id, mnt->mnt_parent->mnt_id,
  739. MAJOR(sb->s_dev), MINOR(sb->s_dev));
  740. seq_dentry(m, mnt->mnt_root, " \t\n\\");
  741. seq_putc(m, ' ');
  742. seq_path_root(m, &mnt_path, &root, " \t\n\\");
  743. if (root.mnt != p->root.mnt || root.dentry != p->root.dentry) {
  744. /*
  745. * Mountpoint is outside root, discard that one. Ugly,
  746. * but less so than trying to do that in iterator in a
  747. * race-free way (due to renames).
  748. */
  749. return SEQ_SKIP;
  750. }
  751. seq_puts(m, mnt->mnt_flags & MNT_READONLY ? " ro" : " rw");
  752. show_mnt_opts(m, mnt);
  753. /* Tagged fields ("foo:X" or "bar") */
  754. if (IS_MNT_SHARED(mnt))
  755. seq_printf(m, " shared:%i", mnt->mnt_group_id);
  756. if (IS_MNT_SLAVE(mnt)) {
  757. int master = mnt->mnt_master->mnt_group_id;
  758. int dom = get_dominating_id(mnt, &p->root);
  759. seq_printf(m, " master:%i", master);
  760. if (dom && dom != master)
  761. seq_printf(m, " propagate_from:%i", dom);
  762. }
  763. if (IS_MNT_UNBINDABLE(mnt))
  764. seq_puts(m, " unbindable");
  765. /* Filesystem specific data */
  766. seq_puts(m, " - ");
  767. show_type(m, sb);
  768. seq_putc(m, ' ');
  769. mangle(m, mnt->mnt_devname ? mnt->mnt_devname : "none");
  770. seq_puts(m, sb->s_flags & MS_RDONLY ? " ro" : " rw");
  771. err = show_sb_opts(m, sb);
  772. if (err)
  773. goto out;
  774. if (sb->s_op->show_options)
  775. err = sb->s_op->show_options(m, mnt);
  776. seq_putc(m, '\n');
  777. out:
  778. return err;
  779. }
  780. const struct seq_operations mountinfo_op = {
  781. .start = m_start,
  782. .next = m_next,
  783. .stop = m_stop,
  784. .show = show_mountinfo,
  785. };
  786. static int show_vfsstat(struct seq_file *m, void *v)
  787. {
  788. struct vfsmount *mnt = list_entry(v, struct vfsmount, mnt_list);
  789. struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
  790. int err = 0;
  791. /* device */
  792. if (mnt->mnt_devname) {
  793. seq_puts(m, "device ");
  794. mangle(m, mnt->mnt_devname);
  795. } else
  796. seq_puts(m, "no device");
  797. /* mount point */
  798. seq_puts(m, " mounted on ");
  799. seq_path(m, &mnt_path, " \t\n\\");
  800. seq_putc(m, ' ');
  801. /* file system type */
  802. seq_puts(m, "with fstype ");
  803. show_type(m, mnt->mnt_sb);
  804. /* optional statistics */
  805. if (mnt->mnt_sb->s_op->show_stats) {
  806. seq_putc(m, ' ');
  807. err = mnt->mnt_sb->s_op->show_stats(m, mnt);
  808. }
  809. seq_putc(m, '\n');
  810. return err;
  811. }
  812. const struct seq_operations mountstats_op = {
  813. .start = m_start,
  814. .next = m_next,
  815. .stop = m_stop,
  816. .show = show_vfsstat,
  817. };
  818. #endif /* CONFIG_PROC_FS */
  819. /**
  820. * may_umount_tree - check if a mount tree is busy
  821. * @mnt: root of mount tree
  822. *
  823. * This is called to check if a tree of mounts has any
  824. * open files, pwds, chroots or sub mounts that are
  825. * busy.
  826. */
  827. int may_umount_tree(struct vfsmount *mnt)
  828. {
  829. int actual_refs = 0;
  830. int minimum_refs = 0;
  831. struct vfsmount *p;
  832. spin_lock(&vfsmount_lock);
  833. for (p = mnt; p; p = next_mnt(p, mnt)) {
  834. actual_refs += atomic_read(&p->mnt_count);
  835. minimum_refs += 2;
  836. }
  837. spin_unlock(&vfsmount_lock);
  838. if (actual_refs > minimum_refs)
  839. return 0;
  840. return 1;
  841. }
  842. EXPORT_SYMBOL(may_umount_tree);
  843. /**
  844. * may_umount - check if a mount point is busy
  845. * @mnt: root of mount
  846. *
  847. * This is called to check if a mount point has any
  848. * open files, pwds, chroots or sub mounts. If the
  849. * mount has sub mounts this will return busy
  850. * regardless of whether the sub mounts are busy.
  851. *
  852. * Doesn't take quota and stuff into account. IOW, in some cases it will
  853. * give false negatives. The main reason why it's here is that we need
  854. * a non-destructive way to look for easily umountable filesystems.
  855. */
  856. int may_umount(struct vfsmount *mnt)
  857. {
  858. int ret = 1;
  859. spin_lock(&vfsmount_lock);
  860. if (propagate_mount_busy(mnt, 2))
  861. ret = 0;
  862. spin_unlock(&vfsmount_lock);
  863. return ret;
  864. }
  865. EXPORT_SYMBOL(may_umount);
  866. void release_mounts(struct list_head *head)
  867. {
  868. struct vfsmount *mnt;
  869. while (!list_empty(head)) {
  870. mnt = list_first_entry(head, struct vfsmount, mnt_hash);
  871. list_del_init(&mnt->mnt_hash);
  872. if (mnt->mnt_parent != mnt) {
  873. struct dentry *dentry;
  874. struct vfsmount *m;
  875. spin_lock(&vfsmount_lock);
  876. dentry = mnt->mnt_mountpoint;
  877. m = mnt->mnt_parent;
  878. mnt->mnt_mountpoint = mnt->mnt_root;
  879. mnt->mnt_parent = mnt;
  880. m->mnt_ghosts--;
  881. spin_unlock(&vfsmount_lock);
  882. dput(dentry);
  883. mntput(m);
  884. }
  885. mntput(mnt);
  886. }
  887. }
  888. void umount_tree(struct vfsmount *mnt, int propagate, struct list_head *kill)
  889. {
  890. struct vfsmount *p;
  891. for (p = mnt; p; p = next_mnt(p, mnt))
  892. list_move(&p->mnt_hash, kill);
  893. if (propagate)
  894. propagate_umount(kill);
  895. list_for_each_entry(p, kill, mnt_hash) {
  896. list_del_init(&p->mnt_expire);
  897. list_del_init(&p->mnt_list);
  898. __touch_mnt_namespace(p->mnt_ns);
  899. p->mnt_ns = NULL;
  900. list_del_init(&p->mnt_child);
  901. if (p->mnt_parent != p) {
  902. p->mnt_parent->mnt_ghosts++;
  903. p->mnt_mountpoint->d_mounted--;
  904. }
  905. change_mnt_propagation(p, MS_PRIVATE);
  906. }
  907. }
  908. static void shrink_submounts(struct vfsmount *mnt, struct list_head *umounts);
  909. static int do_umount(struct vfsmount *mnt, int flags)
  910. {
  911. struct super_block *sb = mnt->mnt_sb;
  912. int retval;
  913. LIST_HEAD(umount_list);
  914. retval = security_sb_umount(mnt, flags);
  915. if (retval)
  916. return retval;
  917. /*
  918. * Allow userspace to request a mountpoint be expired rather than
  919. * unmounting unconditionally. Unmount only happens if:
  920. * (1) the mark is already set (the mark is cleared by mntput())
  921. * (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount]
  922. */
  923. if (flags & MNT_EXPIRE) {
  924. if (mnt == current->fs->root.mnt ||
  925. flags & (MNT_FORCE | MNT_DETACH))
  926. return -EINVAL;
  927. if (atomic_read(&mnt->mnt_count) != 2)
  928. return -EBUSY;
  929. if (!xchg(&mnt->mnt_expiry_mark, 1))
  930. return -EAGAIN;
  931. }
  932. /*
  933. * If we may have to abort operations to get out of this
  934. * mount, and they will themselves hold resources we must
  935. * allow the fs to do things. In the Unix tradition of
  936. * 'Gee thats tricky lets do it in userspace' the umount_begin
  937. * might fail to complete on the first run through as other tasks
  938. * must return, and the like. Thats for the mount program to worry
  939. * about for the moment.
  940. */
  941. if (flags & MNT_FORCE && sb->s_op->umount_begin) {
  942. sb->s_op->umount_begin(sb);
  943. }
  944. /*
  945. * No sense to grab the lock for this test, but test itself looks
  946. * somewhat bogus. Suggestions for better replacement?
  947. * Ho-hum... In principle, we might treat that as umount + switch
  948. * to rootfs. GC would eventually take care of the old vfsmount.
  949. * Actually it makes sense, especially if rootfs would contain a
  950. * /reboot - static binary that would close all descriptors and
  951. * call reboot(9). Then init(8) could umount root and exec /reboot.
  952. */
  953. if (mnt == current->fs->root.mnt && !(flags & MNT_DETACH)) {
  954. /*
  955. * Special case for "unmounting" root ...
  956. * we just try to remount it readonly.
  957. */
  958. down_write(&sb->s_umount);
  959. if (!(sb->s_flags & MS_RDONLY))
  960. retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
  961. up_write(&sb->s_umount);
  962. return retval;
  963. }
  964. down_write(&namespace_sem);
  965. spin_lock(&vfsmount_lock);
  966. event++;
  967. if (!(flags & MNT_DETACH))
  968. shrink_submounts(mnt, &umount_list);
  969. retval = -EBUSY;
  970. if (flags & MNT_DETACH || !propagate_mount_busy(mnt, 2)) {
  971. if (!list_empty(&mnt->mnt_list))
  972. umount_tree(mnt, 1, &umount_list);
  973. retval = 0;
  974. }
  975. spin_unlock(&vfsmount_lock);
  976. if (retval)
  977. security_sb_umount_busy(mnt);
  978. up_write(&namespace_sem);
  979. release_mounts(&umount_list);
  980. return retval;
  981. }
  982. /*
  983. * Now umount can handle mount points as well as block devices.
  984. * This is important for filesystems which use unnamed block devices.
  985. *
  986. * We now support a flag for forced unmount like the other 'big iron'
  987. * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
  988. */
  989. SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
  990. {
  991. struct path path;
  992. int retval;
  993. retval = user_path(name, &path);
  994. if (retval)
  995. goto out;
  996. retval = -EINVAL;
  997. if (path.dentry != path.mnt->mnt_root)
  998. goto dput_and_out;
  999. if (!check_mnt(path.mnt))
  1000. goto dput_and_out;
  1001. retval = -EPERM;
  1002. if (!capable(CAP_SYS_ADMIN))
  1003. goto dput_and_out;
  1004. retval = do_umount(path.mnt, flags);
  1005. dput_and_out:
  1006. /* we mustn't call path_put() as that would clear mnt_expiry_mark */
  1007. dput(path.dentry);
  1008. mntput_no_expire(path.mnt);
  1009. out:
  1010. return retval;
  1011. }
  1012. #ifdef __ARCH_WANT_SYS_OLDUMOUNT
  1013. /*
  1014. * The 2.0 compatible umount. No flags.
  1015. */
  1016. SYSCALL_DEFINE1(oldumount, char __user *, name)
  1017. {
  1018. return sys_umount(name, 0);
  1019. }
  1020. #endif
  1021. static int mount_is_safe(struct path *path)
  1022. {
  1023. if (capable(CAP_SYS_ADMIN))
  1024. return 0;
  1025. return -EPERM;
  1026. #ifdef notyet
  1027. if (S_ISLNK(path->dentry->d_inode->i_mode))
  1028. return -EPERM;
  1029. if (path->dentry->d_inode->i_mode & S_ISVTX) {
  1030. if (current_uid() != path->dentry->d_inode->i_uid)
  1031. return -EPERM;
  1032. }
  1033. if (inode_permission(path->dentry->d_inode, MAY_WRITE))
  1034. return -EPERM;
  1035. return 0;
  1036. #endif
  1037. }
  1038. struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry,
  1039. int flag)
  1040. {
  1041. struct vfsmount *res, *p, *q, *r, *s;
  1042. struct path path;
  1043. if (!(flag & CL_COPY_ALL) && IS_MNT_UNBINDABLE(mnt))
  1044. return NULL;
  1045. res = q = clone_mnt(mnt, dentry, flag);
  1046. if (!q)
  1047. goto Enomem;
  1048. q->mnt_mountpoint = mnt->mnt_mountpoint;
  1049. p = mnt;
  1050. list_for_each_entry(r, &mnt->mnt_mounts, mnt_child) {
  1051. if (!is_subdir(r->mnt_mountpoint, dentry))
  1052. continue;
  1053. for (s = r; s; s = next_mnt(s, r)) {
  1054. if (!(flag & CL_COPY_ALL) && IS_MNT_UNBINDABLE(s)) {
  1055. s = skip_mnt_tree(s);
  1056. continue;
  1057. }
  1058. while (p != s->mnt_parent) {
  1059. p = p->mnt_parent;
  1060. q = q->mnt_parent;
  1061. }
  1062. p = s;
  1063. path.mnt = q;
  1064. path.dentry = p->mnt_mountpoint;
  1065. q = clone_mnt(p, p->mnt_root, flag);
  1066. if (!q)
  1067. goto Enomem;
  1068. spin_lock(&vfsmount_lock);
  1069. list_add_tail(&q->mnt_list, &res->mnt_list);
  1070. attach_mnt(q, &path);
  1071. spin_unlock(&vfsmount_lock);
  1072. }
  1073. }
  1074. return res;
  1075. Enomem:
  1076. if (res) {
  1077. LIST_HEAD(umount_list);
  1078. spin_lock(&vfsmount_lock);
  1079. umount_tree(res, 0, &umount_list);
  1080. spin_unlock(&vfsmount_lock);
  1081. release_mounts(&umount_list);
  1082. }
  1083. return NULL;
  1084. }
  1085. struct vfsmount *collect_mounts(struct path *path)
  1086. {
  1087. struct vfsmount *tree;
  1088. down_write(&namespace_sem);
  1089. tree = copy_tree(path->mnt, path->dentry, CL_COPY_ALL | CL_PRIVATE);
  1090. up_write(&namespace_sem);
  1091. return tree;
  1092. }
  1093. void drop_collected_mounts(struct vfsmount *mnt)
  1094. {
  1095. LIST_HEAD(umount_list);
  1096. down_write(&namespace_sem);
  1097. spin_lock(&vfsmount_lock);
  1098. umount_tree(mnt, 0, &umount_list);
  1099. spin_unlock(&vfsmount_lock);
  1100. up_write(&namespace_sem);
  1101. release_mounts(&umount_list);
  1102. }
  1103. static void cleanup_group_ids(struct vfsmount *mnt, struct vfsmount *end)
  1104. {
  1105. struct vfsmount *p;
  1106. for (p = mnt; p != end; p = next_mnt(p, mnt)) {
  1107. if (p->mnt_group_id && !IS_MNT_SHARED(p))
  1108. mnt_release_group_id(p);
  1109. }
  1110. }
  1111. static int invent_group_ids(struct vfsmount *mnt, bool recurse)
  1112. {
  1113. struct vfsmount *p;
  1114. for (p = mnt; p; p = recurse ? next_mnt(p, mnt) : NULL) {
  1115. if (!p->mnt_group_id && !IS_MNT_SHARED(p)) {
  1116. int err = mnt_alloc_group_id(p);
  1117. if (err) {
  1118. cleanup_group_ids(mnt, p);
  1119. return err;
  1120. }
  1121. }
  1122. }
  1123. return 0;
  1124. }
  1125. /*
  1126. * @source_mnt : mount tree to be attached
  1127. * @nd : place the mount tree @source_mnt is attached
  1128. * @parent_nd : if non-null, detach the source_mnt from its parent and
  1129. * store the parent mount and mountpoint dentry.
  1130. * (done when source_mnt is moved)
  1131. *
  1132. * NOTE: in the table below explains the semantics when a source mount
  1133. * of a given type is attached to a destination mount of a given type.
  1134. * ---------------------------------------------------------------------------
  1135. * | BIND MOUNT OPERATION |
  1136. * |**************************************************************************
  1137. * | source-->| shared | private | slave | unbindable |
  1138. * | dest | | | | |
  1139. * | | | | | | |
  1140. * | v | | | | |
  1141. * |**************************************************************************
  1142. * | shared | shared (++) | shared (+) | shared(+++)| invalid |
  1143. * | | | | | |
  1144. * |non-shared| shared (+) | private | slave (*) | invalid |
  1145. * ***************************************************************************
  1146. * A bind operation clones the source mount and mounts the clone on the
  1147. * destination mount.
  1148. *
  1149. * (++) the cloned mount is propagated to all the mounts in the propagation
  1150. * tree of the destination mount and the cloned mount is added to
  1151. * the peer group of the source mount.
  1152. * (+) the cloned mount is created under the destination mount and is marked
  1153. * as shared. The cloned mount is added to the peer group of the source
  1154. * mount.
  1155. * (+++) the mount is propagated to all the mounts in the propagation tree
  1156. * of the destination mount and the cloned mount is made slave
  1157. * of the same master as that of the source mount. The cloned mount
  1158. * is marked as 'shared and slave'.
  1159. * (*) the cloned mount is made a slave of the same master as that of the
  1160. * source mount.
  1161. *
  1162. * ---------------------------------------------------------------------------
  1163. * | MOVE MOUNT OPERATION |
  1164. * |**************************************************************************
  1165. * | source-->| shared | private | slave | unbindable |
  1166. * | dest | | | | |
  1167. * | | | | | | |
  1168. * | v | | | | |
  1169. * |**************************************************************************
  1170. * | shared | shared (+) | shared (+) | shared(+++) | invalid |
  1171. * | | | | | |
  1172. * |non-shared| shared (+*) | private | slave (*) | unbindable |
  1173. * ***************************************************************************
  1174. *
  1175. * (+) the mount is moved to the destination. And is then propagated to
  1176. * all the mounts in the propagation tree of the destination mount.
  1177. * (+*) the mount is moved to the destination.
  1178. * (+++) the mount is moved to the destination and is then propagated to
  1179. * all the mounts belonging to the destination mount's propagation tree.
  1180. * the mount is marked as 'shared and slave'.
  1181. * (*) the mount continues to be a slave at the new location.
  1182. *
  1183. * if the source mount is a tree, the operations explained above is
  1184. * applied to each mount in the tree.
  1185. * Must be called without spinlocks held, since this function can sleep
  1186. * in allocations.
  1187. */
  1188. static int attach_recursive_mnt(struct vfsmount *source_mnt,
  1189. struct path *path, struct path *parent_path)
  1190. {
  1191. LIST_HEAD(tree_list);
  1192. struct vfsmount *dest_mnt = path->mnt;
  1193. struct dentry *dest_dentry = path->dentry;
  1194. struct vfsmount *child, *p;
  1195. int err;
  1196. if (IS_MNT_SHARED(dest_mnt)) {
  1197. err = invent_group_ids(source_mnt, true);
  1198. if (err)
  1199. goto out;
  1200. }
  1201. err = propagate_mnt(dest_mnt, dest_dentry, source_mnt, &tree_list);
  1202. if (err)
  1203. goto out_cleanup_ids;
  1204. if (IS_MNT_SHARED(dest_mnt)) {
  1205. for (p = source_mnt; p; p = next_mnt(p, source_mnt))
  1206. set_mnt_shared(p);
  1207. }
  1208. spin_lock(&vfsmount_lock);
  1209. if (parent_path) {
  1210. detach_mnt(source_mnt, parent_path);
  1211. attach_mnt(source_mnt, path);
  1212. touch_mnt_namespace(parent_path->mnt->mnt_ns);
  1213. } else {
  1214. mnt_set_mountpoint(dest_mnt, dest_dentry, source_mnt);
  1215. commit_tree(source_mnt);
  1216. }
  1217. list_for_each_entry_safe(child, p, &tree_list, mnt_hash) {
  1218. list_del_init(&child->mnt_hash);
  1219. commit_tree(child);
  1220. }
  1221. spin_unlock(&vfsmount_lock);
  1222. return 0;
  1223. out_cleanup_ids:
  1224. if (IS_MNT_SHARED(dest_mnt))
  1225. cleanup_group_ids(source_mnt, NULL);
  1226. out:
  1227. return err;
  1228. }
  1229. static int graft_tree(struct vfsmount *mnt, struct path *path)
  1230. {
  1231. int err;
  1232. if (mnt->mnt_sb->s_flags & MS_NOUSER)
  1233. return -EINVAL;
  1234. if (S_ISDIR(path->dentry->d_inode->i_mode) !=
  1235. S_ISDIR(mnt->mnt_root->d_inode->i_mode))
  1236. return -ENOTDIR;
  1237. err = -ENOENT;
  1238. mutex_lock(&path->dentry->d_inode->i_mutex);
  1239. if (IS_DEADDIR(path->dentry->d_inode))
  1240. goto out_unlock;
  1241. err = security_sb_check_sb(mnt, path);
  1242. if (err)
  1243. goto out_unlock;
  1244. err = -ENOENT;
  1245. if (!d_unlinked(path->dentry))
  1246. err = attach_recursive_mnt(mnt, path, NULL);
  1247. out_unlock:
  1248. mutex_unlock(&path->dentry->d_inode->i_mutex);
  1249. if (!err)
  1250. security_sb_post_addmount(mnt, path);
  1251. return err;
  1252. }
  1253. /*
  1254. * recursively change the type of the mountpoint.
  1255. */
  1256. static int do_change_type(struct path *path, int flag)
  1257. {
  1258. struct vfsmount *m, *mnt = path->mnt;
  1259. int recurse = flag & MS_REC;
  1260. int type = flag & ~MS_REC;
  1261. int err = 0;
  1262. if (!capable(CAP_SYS_ADMIN))
  1263. return -EPERM;
  1264. if (path->dentry != path->mnt->mnt_root)
  1265. return -EINVAL;
  1266. down_write(&namespace_sem);
  1267. if (type == MS_SHARED) {
  1268. err = invent_group_ids(mnt, recurse);
  1269. if (err)
  1270. goto out_unlock;
  1271. }
  1272. spin_lock(&vfsmount_lock);
  1273. for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL))
  1274. change_mnt_propagation(m, type);
  1275. spin_unlock(&vfsmount_lock);
  1276. out_unlock:
  1277. up_write(&namespace_sem);
  1278. return err;
  1279. }
  1280. /*
  1281. * do loopback mount.
  1282. */
  1283. static int do_loopback(struct path *path, char *old_name,
  1284. int recurse)
  1285. {
  1286. struct path old_path;
  1287. struct vfsmount *mnt = NULL;
  1288. int err = mount_is_safe(path);
  1289. if (err)
  1290. return err;
  1291. if (!old_name || !*old_name)
  1292. return -EINVAL;
  1293. err = kern_path(old_name, LOOKUP_FOLLOW, &old_path);
  1294. if (err)
  1295. return err;
  1296. down_write(&namespace_sem);
  1297. err = -EINVAL;
  1298. if (IS_MNT_UNBINDABLE(old_path.mnt))
  1299. goto out;
  1300. if (!check_mnt(path->mnt) || !check_mnt(old_path.mnt))
  1301. goto out;
  1302. err = -ENOMEM;
  1303. if (recurse)
  1304. mnt = copy_tree(old_path.mnt, old_path.dentry, 0);
  1305. else
  1306. mnt = clone_mnt(old_path.mnt, old_path.dentry, 0);
  1307. if (!mnt)
  1308. goto out;
  1309. err = graft_tree(mnt, path);
  1310. if (err) {
  1311. LIST_HEAD(umount_list);
  1312. spin_lock(&vfsmount_lock);
  1313. umount_tree(mnt, 0, &umount_list);
  1314. spin_unlock(&vfsmount_lock);
  1315. release_mounts(&umount_list);
  1316. }
  1317. out:
  1318. up_write(&namespace_sem);
  1319. path_put(&old_path);
  1320. return err;
  1321. }
  1322. static int change_mount_flags(struct vfsmount *mnt, int ms_flags)
  1323. {
  1324. int error = 0;
  1325. int readonly_request = 0;
  1326. if (ms_flags & MS_RDONLY)
  1327. readonly_request = 1;
  1328. if (readonly_request == __mnt_is_readonly(mnt))
  1329. return 0;
  1330. if (readonly_request)
  1331. error = mnt_make_readonly(mnt);
  1332. else
  1333. __mnt_unmake_readonly(mnt);
  1334. return error;
  1335. }
  1336. /*
  1337. * change filesystem flags. dir should be a physical root of filesystem.
  1338. * If you've mounted a non-root directory somewhere and want to do remount
  1339. * on it - tough luck.
  1340. */
  1341. static int do_remount(struct path *path, int flags, int mnt_flags,
  1342. void *data)
  1343. {
  1344. int err;
  1345. struct super_block *sb = path->mnt->mnt_sb;
  1346. if (!capable(CAP_SYS_ADMIN))
  1347. return -EPERM;
  1348. if (!check_mnt(path->mnt))
  1349. return -EINVAL;
  1350. if (path->dentry != path->mnt->mnt_root)
  1351. return -EINVAL;
  1352. down_write(&sb->s_umount);
  1353. if (flags & MS_BIND)
  1354. err = change_mount_flags(path->mnt, flags);
  1355. else
  1356. err = do_remount_sb(sb, flags, data, 0);
  1357. if (!err)
  1358. path->mnt->mnt_flags = mnt_flags;
  1359. up_write(&sb->s_umount);
  1360. if (!err) {
  1361. security_sb_post_remount(path->mnt, flags, data);
  1362. spin_lock(&vfsmount_lock);
  1363. touch_mnt_namespace(path->mnt->mnt_ns);
  1364. spin_unlock(&vfsmount_lock);
  1365. }
  1366. return err;
  1367. }
  1368. static inline int tree_contains_unbindable(struct vfsmount *mnt)
  1369. {
  1370. struct vfsmount *p;
  1371. for (p = mnt; p; p = next_mnt(p, mnt)) {
  1372. if (IS_MNT_UNBINDABLE(p))
  1373. return 1;
  1374. }
  1375. return 0;
  1376. }
  1377. static int do_move_mount(struct path *path, char *old_name)
  1378. {
  1379. struct path old_path, parent_path;
  1380. struct vfsmount *p;
  1381. int err = 0;
  1382. if (!capable(CAP_SYS_ADMIN))
  1383. return -EPERM;
  1384. if (!old_name || !*old_name)
  1385. return -EINVAL;
  1386. err = kern_path(old_name, LOOKUP_FOLLOW, &old_path);
  1387. if (err)
  1388. return err;
  1389. down_write(&namespace_sem);
  1390. while (d_mountpoint(path->dentry) &&
  1391. follow_down(path))
  1392. ;
  1393. err = -EINVAL;
  1394. if (!check_mnt(path->mnt) || !check_mnt(old_path.mnt))
  1395. goto out;
  1396. err = -ENOENT;
  1397. mutex_lock(&path->dentry->d_inode->i_mutex);
  1398. if (IS_DEADDIR(path->dentry->d_inode))
  1399. goto out1;
  1400. if (d_unlinked(path->dentry))
  1401. goto out1;
  1402. err = -EINVAL;
  1403. if (old_path.dentry != old_path.mnt->mnt_root)
  1404. goto out1;
  1405. if (old_path.mnt == old_path.mnt->mnt_parent)
  1406. goto out1;
  1407. if (S_ISDIR(path->dentry->d_inode->i_mode) !=
  1408. S_ISDIR(old_path.dentry->d_inode->i_mode))
  1409. goto out1;
  1410. /*
  1411. * Don't move a mount residing in a shared parent.
  1412. */
  1413. if (old_path.mnt->mnt_parent &&
  1414. IS_MNT_SHARED(old_path.mnt->mnt_parent))
  1415. goto out1;
  1416. /*
  1417. * Don't move a mount tree containing unbindable mounts to a destination
  1418. * mount which is shared.
  1419. */
  1420. if (IS_MNT_SHARED(path->mnt) &&
  1421. tree_contains_unbindable(old_path.mnt))
  1422. goto out1;
  1423. err = -ELOOP;
  1424. for (p = path->mnt; p->mnt_parent != p; p = p->mnt_parent)
  1425. if (p == old_path.mnt)
  1426. goto out1;
  1427. err = attach_recursive_mnt(old_path.mnt, path, &parent_path);
  1428. if (err)
  1429. goto out1;
  1430. /* if the mount is moved, it should no longer be expire
  1431. * automatically */
  1432. list_del_init(&old_path.mnt->mnt_expire);
  1433. out1:
  1434. mutex_unlock(&path->dentry->d_inode->i_mutex);
  1435. out:
  1436. up_write(&namespace_sem);
  1437. if (!err)
  1438. path_put(&parent_path);
  1439. path_put(&old_path);
  1440. return err;
  1441. }
  1442. /*
  1443. * create a new mount for userspace and request it to be added into the
  1444. * namespace's tree
  1445. */
  1446. static int do_new_mount(struct path *path, char *type, int flags,
  1447. int mnt_flags, char *name, void *data)
  1448. {
  1449. struct vfsmount *mnt;
  1450. if (!type)
  1451. return -EINVAL;
  1452. /* we need capabilities... */
  1453. if (!capable(CAP_SYS_ADMIN))
  1454. return -EPERM;
  1455. lock_kernel();
  1456. mnt = do_kern_mount(type, flags, name, data);
  1457. unlock_kernel();
  1458. if (IS_ERR(mnt))
  1459. return PTR_ERR(mnt);
  1460. return do_add_mount(mnt, path, mnt_flags, NULL);
  1461. }
  1462. /*
  1463. * add a mount into a namespace's mount tree
  1464. * - provide the option of adding the new mount to an expiration list
  1465. */
  1466. int do_add_mount(struct vfsmount *newmnt, struct path *path,
  1467. int mnt_flags, struct list_head *fslist)
  1468. {
  1469. int err;
  1470. down_write(&namespace_sem);
  1471. /* Something was mounted here while we slept */
  1472. while (d_mountpoint(path->dentry) &&
  1473. follow_down(path))
  1474. ;
  1475. err = -EINVAL;
  1476. if (!(mnt_flags & MNT_SHRINKABLE) && !check_mnt(path->mnt))
  1477. goto unlock;
  1478. /* Refuse the same filesystem on the same mount point */
  1479. err = -EBUSY;
  1480. if (path->mnt->mnt_sb == newmnt->mnt_sb &&
  1481. path->mnt->mnt_root == path->dentry)
  1482. goto unlock;
  1483. err = -EINVAL;
  1484. if (S_ISLNK(newmnt->mnt_root->d_inode->i_mode))
  1485. goto unlock;
  1486. newmnt->mnt_flags = mnt_flags;
  1487. if ((err = graft_tree(newmnt, path)))
  1488. goto unlock;
  1489. if (fslist) /* add to the specified expiration list */
  1490. list_add_tail(&newmnt->mnt_expire, fslist);
  1491. up_write(&namespace_sem);
  1492. return 0;
  1493. unlock:
  1494. up_write(&namespace_sem);
  1495. mntput(newmnt);
  1496. return err;
  1497. }
  1498. EXPORT_SYMBOL_GPL(do_add_mount);
  1499. /*
  1500. * process a list of expirable mountpoints with the intent of discarding any
  1501. * mountpoints that aren't in use and haven't been touched since last we came
  1502. * here
  1503. */
  1504. void mark_mounts_for_expiry(struct list_head *mounts)
  1505. {
  1506. struct vfsmount *mnt, *next;
  1507. LIST_HEAD(graveyard);
  1508. LIST_HEAD(umounts);
  1509. if (list_empty(mounts))
  1510. return;
  1511. down_write(&namespace_sem);
  1512. spin_lock(&vfsmount_lock);
  1513. /* extract from the expiration list every vfsmount that matches the
  1514. * following criteria:
  1515. * - only referenced by its parent vfsmount
  1516. * - still marked for expiry (marked on the last call here; marks are
  1517. * cleared by mntput())
  1518. */
  1519. list_for_each_entry_safe(mnt, next, mounts, mnt_expire) {
  1520. if (!xchg(&mnt->mnt_expiry_mark, 1) ||
  1521. propagate_mount_busy(mnt, 1))
  1522. continue;
  1523. list_move(&mnt->mnt_expire, &graveyard);
  1524. }
  1525. while (!list_empty(&graveyard)) {
  1526. mnt = list_first_entry(&graveyard, struct vfsmount, mnt_expire);
  1527. touch_mnt_namespace(mnt->mnt_ns);
  1528. umount_tree(mnt, 1, &umounts);
  1529. }
  1530. spin_unlock(&vfsmount_lock);
  1531. up_write(&namespace_sem);
  1532. release_mounts(&umounts);
  1533. }
  1534. EXPORT_SYMBOL_GPL(mark_mounts_for_expiry);
  1535. /*
  1536. * Ripoff of 'select_parent()'
  1537. *
  1538. * search the list of submounts for a given mountpoint, and move any
  1539. * shrinkable submounts to the 'graveyard' list.
  1540. */
  1541. static int select_submounts(struct vfsmount *parent, struct list_head *graveyard)
  1542. {
  1543. struct vfsmount *this_parent = parent;
  1544. struct list_head *next;
  1545. int found = 0;
  1546. repeat:
  1547. next = this_parent->mnt_mounts.next;
  1548. resume:
  1549. while (next != &this_parent->mnt_mounts) {
  1550. struct list_head *tmp = next;
  1551. struct vfsmount *mnt = list_entry(tmp, struct vfsmount, mnt_child);
  1552. next = tmp->next;
  1553. if (!(mnt->mnt_flags & MNT_SHRINKABLE))
  1554. continue;
  1555. /*
  1556. * Descend a level if the d_mounts list is non-empty.
  1557. */
  1558. if (!list_empty(&mnt->mnt_mounts)) {
  1559. this_parent = mnt;
  1560. goto repeat;
  1561. }
  1562. if (!propagate_mount_busy(mnt, 1)) {
  1563. list_move_tail(&mnt->mnt_expire, graveyard);
  1564. found++;
  1565. }
  1566. }
  1567. /*
  1568. * All done at this level ... ascend and resume the search
  1569. */
  1570. if (this_parent != parent) {
  1571. next = this_parent->mnt_child.next;
  1572. this_parent = this_parent->mnt_parent;
  1573. goto resume;
  1574. }
  1575. return found;
  1576. }
  1577. /*
  1578. * process a list of expirable mountpoints with the intent of discarding any
  1579. * submounts of a specific parent mountpoint
  1580. */
  1581. static void shrink_submounts(struct vfsmount *mnt, struct list_head *umounts)
  1582. {
  1583. LIST_HEAD(graveyard);
  1584. struct vfsmount *m;
  1585. /* extract submounts of 'mountpoint' from the expiration list */
  1586. while (select_submounts(mnt, &graveyard)) {
  1587. while (!list_empty(&graveyard)) {
  1588. m = list_first_entry(&graveyard, struct vfsmount,
  1589. mnt_expire);
  1590. touch_mnt_namespace(m->mnt_ns);
  1591. umount_tree(m, 1, umounts);
  1592. }
  1593. }
  1594. }
  1595. /*
  1596. * Some copy_from_user() implementations do not return the exact number of
  1597. * bytes remaining to copy on a fault. But copy_mount_options() requires that.
  1598. * Note that this function differs from copy_from_user() in that it will oops
  1599. * on bad values of `to', rather than returning a short copy.
  1600. */
  1601. static long exact_copy_from_user(void *to, const void __user * from,
  1602. unsigned long n)
  1603. {
  1604. char *t = to;
  1605. const char __user *f = from;
  1606. char c;
  1607. if (!access_ok(VERIFY_READ, from, n))
  1608. return n;
  1609. while (n) {
  1610. if (__get_user(c, f)) {
  1611. memset(t, 0, n);
  1612. break;
  1613. }
  1614. *t++ = c;
  1615. f++;
  1616. n--;
  1617. }
  1618. return n;
  1619. }
  1620. int copy_mount_options(const void __user * data, unsigned long *where)
  1621. {
  1622. int i;
  1623. unsigned long page;
  1624. unsigned long size;
  1625. *where = 0;
  1626. if (!data)
  1627. return 0;
  1628. if (!(page = __get_free_page(GFP_KERNEL)))
  1629. return -ENOMEM;
  1630. /* We only care that *some* data at the address the user
  1631. * gave us is valid. Just in case, we'll zero
  1632. * the remainder of the page.
  1633. */
  1634. /* copy_from_user cannot cross TASK_SIZE ! */
  1635. size = TASK_SIZE - (unsigned long)data;
  1636. if (size > PAGE_SIZE)
  1637. size = PAGE_SIZE;
  1638. i = size - exact_copy_from_user((void *)page, data, size);
  1639. if (!i) {
  1640. free_page(page);
  1641. return -EFAULT;
  1642. }
  1643. if (i != PAGE_SIZE)
  1644. memset((char *)page + i, 0, PAGE_SIZE - i);
  1645. *where = page;
  1646. return 0;
  1647. }
  1648. int copy_mount_string(const void __user *data, char **where)
  1649. {
  1650. char *tmp;
  1651. if (!data) {
  1652. *where = NULL;
  1653. return 0;
  1654. }
  1655. tmp = strndup_user(data, PAGE_SIZE);
  1656. if (IS_ERR(tmp))
  1657. return PTR_ERR(tmp);
  1658. *where = tmp;
  1659. return 0;
  1660. }
  1661. /*
  1662. * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to
  1663. * be given to the mount() call (ie: read-only, no-dev, no-suid etc).
  1664. *
  1665. * data is a (void *) that can point to any structure up to
  1666. * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent
  1667. * information (or be NULL).
  1668. *
  1669. * Pre-0.97 versions of mount() didn't have a flags word.
  1670. * When the flags word was introduced its top half was required
  1671. * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9.
  1672. * Therefore, if this magic number is present, it carries no information
  1673. * and must be discarded.
  1674. */
  1675. long do_mount(char *dev_name, char *dir_name, char *type_page,
  1676. unsigned long flags, void *data_page)
  1677. {
  1678. struct path path;
  1679. int retval = 0;
  1680. int mnt_flags = 0;
  1681. /* Discard magic */
  1682. if ((flags & MS_MGC_MSK) == MS_MGC_VAL)
  1683. flags &= ~MS_MGC_MSK;
  1684. /* Basic sanity checks */
  1685. if (!dir_name || !*dir_name || !memchr(dir_name, 0, PAGE_SIZE))
  1686. return -EINVAL;
  1687. if (data_page)
  1688. ((char *)data_page)[PAGE_SIZE - 1] = 0;
  1689. /* Default to relatime unless overriden */
  1690. if (!(flags & MS_NOATIME))
  1691. mnt_flags |= MNT_RELATIME;
  1692. /* Separate the per-mountpoint flags */
  1693. if (flags & MS_NOSUID)
  1694. mnt_flags |= MNT_NOSUID;
  1695. if (flags & MS_NODEV)
  1696. mnt_flags |= MNT_NODEV;
  1697. if (flags & MS_NOEXEC)
  1698. mnt_flags |= MNT_NOEXEC;
  1699. if (flags & MS_NOATIME)
  1700. mnt_flags |= MNT_NOATIME;
  1701. if (flags & MS_NODIRATIME)
  1702. mnt_flags |= MNT_NODIRATIME;
  1703. if (flags & MS_STRICTATIME)
  1704. mnt_flags &= ~(MNT_RELATIME | MNT_NOATIME);
  1705. if (flags & MS_RDONLY)
  1706. mnt_flags |= MNT_READONLY;
  1707. flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE |
  1708. MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
  1709. MS_STRICTATIME);
  1710. /* ... and get the mountpoint */
  1711. retval = kern_path(dir_name, LOOKUP_FOLLOW, &path);
  1712. if (retval)
  1713. return retval;
  1714. retval = security_sb_mount(dev_name, &path,
  1715. type_page, flags, data_page);
  1716. if (retval)
  1717. goto dput_out;
  1718. if (flags & MS_REMOUNT)
  1719. retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
  1720. data_page);
  1721. else if (flags & MS_BIND)
  1722. retval = do_loopback(&path, dev_name, flags & MS_REC);
  1723. else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
  1724. retval = do_change_type(&path, flags);
  1725. else if (flags & MS_MOVE)
  1726. retval = do_move_mount(&path, dev_name);
  1727. else
  1728. retval = do_new_mount(&path, type_page, flags, mnt_flags,
  1729. dev_name, data_page);
  1730. dput_out:
  1731. path_put(&path);
  1732. return retval;
  1733. }
  1734. static struct mnt_namespace *alloc_mnt_ns(void)
  1735. {
  1736. struct mnt_namespace *new_ns;
  1737. new_ns = kmalloc(sizeof(struct mnt_namespace), GFP_KERNEL);
  1738. if (!new_ns)
  1739. return ERR_PTR(-ENOMEM);
  1740. atomic_set(&new_ns->count, 1);
  1741. new_ns->root = NULL;
  1742. INIT_LIST_HEAD(&new_ns->list);
  1743. init_waitqueue_head(&new_ns->poll);
  1744. new_ns->event = 0;
  1745. return new_ns;
  1746. }
  1747. /*
  1748. * Allocate a new namespace structure and populate it with contents
  1749. * copied from the namespace of the passed in task structure.
  1750. */
  1751. static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
  1752. struct fs_struct *fs)
  1753. {
  1754. struct mnt_namespace *new_ns;
  1755. struct vfsmount *rootmnt = NULL, *pwdmnt = NULL;
  1756. struct vfsmount *p, *q;
  1757. new_ns = alloc_mnt_ns();
  1758. if (IS_ERR(new_ns))
  1759. return new_ns;
  1760. down_write(&namespace_sem);
  1761. /* First pass: copy the tree topology */
  1762. new_ns->root = copy_tree(mnt_ns->root, mnt_ns->root->mnt_root,
  1763. CL_COPY_ALL | CL_EXPIRE);
  1764. if (!new_ns->root) {
  1765. up_write(&namespace_sem);
  1766. kfree(new_ns);
  1767. return ERR_PTR(-ENOMEM);
  1768. }
  1769. spin_lock(&vfsmount_lock);
  1770. list_add_tail(&new_ns->list, &new_ns->root->mnt_list);
  1771. spin_unlock(&vfsmount_lock);
  1772. /*
  1773. * Second pass: switch the tsk->fs->* elements and mark new vfsmounts
  1774. * as belonging to new namespace. We have already acquired a private
  1775. * fs_struct, so tsk->fs->lock is not needed.
  1776. */
  1777. p = mnt_ns->root;
  1778. q = new_ns->root;
  1779. while (p) {
  1780. q->mnt_ns = new_ns;
  1781. if (fs) {
  1782. if (p == fs->root.mnt) {
  1783. rootmnt = p;
  1784. fs->root.mnt = mntget(q);
  1785. }
  1786. if (p == fs->pwd.mnt) {
  1787. pwdmnt = p;
  1788. fs->pwd.mnt = mntget(q);
  1789. }
  1790. }
  1791. p = next_mnt(p, mnt_ns->root);
  1792. q = next_mnt(q, new_ns->root);
  1793. }
  1794. up_write(&namespace_sem);
  1795. if (rootmnt)
  1796. mntput(rootmnt);
  1797. if (pwdmnt)
  1798. mntput(pwdmnt);
  1799. return new_ns;
  1800. }
  1801. struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
  1802. struct fs_struct *new_fs)
  1803. {
  1804. struct mnt_namespace *new_ns;
  1805. BUG_ON(!ns);
  1806. get_mnt_ns(ns);
  1807. if (!(flags & CLONE_NEWNS))
  1808. return ns;
  1809. new_ns = dup_mnt_ns(ns, new_fs);
  1810. put_mnt_ns(ns);
  1811. return new_ns;
  1812. }
  1813. /**
  1814. * create_mnt_ns - creates a private namespace and adds a root filesystem
  1815. * @mnt: pointer to the new root filesystem mountpoint
  1816. */
  1817. struct mnt_namespace *create_mnt_ns(struct vfsmount *mnt)
  1818. {
  1819. struct mnt_namespace *new_ns;
  1820. new_ns = alloc_mnt_ns();
  1821. if (!IS_ERR(new_ns)) {
  1822. mnt->mnt_ns = new_ns;
  1823. new_ns->root = mnt;
  1824. list_add(&new_ns->list, &new_ns->root->mnt_list);
  1825. }
  1826. return new_ns;
  1827. }
  1828. EXPORT_SYMBOL(create_mnt_ns);
  1829. SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
  1830. char __user *, type, unsigned long, flags, void __user *, data)
  1831. {
  1832. int ret;
  1833. char *kernel_type;
  1834. char *kernel_dir;
  1835. char *kernel_dev;
  1836. unsigned long data_page;
  1837. ret = copy_mount_string(type, &kernel_type);
  1838. if (ret < 0)
  1839. goto out_type;
  1840. kernel_dir = getname(dir_name);
  1841. if (IS_ERR(kernel_dir)) {
  1842. ret = PTR_ERR(kernel_dir);
  1843. goto out_dir;
  1844. }
  1845. ret = copy_mount_string(dev_name, &kernel_dev);
  1846. if (ret < 0)
  1847. goto out_dev;
  1848. ret = copy_mount_options(data, &data_page);
  1849. if (ret < 0)
  1850. goto out_data;
  1851. ret = do_mount(kernel_dev, kernel_dir, kernel_type, flags,
  1852. (void *) data_page);
  1853. free_page(data_page);
  1854. out_data:
  1855. kfree(kernel_dev);
  1856. out_dev:
  1857. putname(kernel_dir);
  1858. out_dir:
  1859. kfree(kernel_type);
  1860. out_type:
  1861. return ret;
  1862. }
  1863. /*
  1864. * pivot_root Semantics:
  1865. * Moves the root file system of the current process to the directory put_old,
  1866. * makes new_root as the new root file system of the current process, and sets
  1867. * root/cwd of all processes which had them on the current root to new_root.
  1868. *
  1869. * Restrictions:
  1870. * The new_root and put_old must be directories, and must not be on the
  1871. * same file system as the current process root. The put_old must be
  1872. * underneath new_root, i.e. adding a non-zero number of /.. to the string
  1873. * pointed to by put_old must yield the same directory as new_root. No other
  1874. * file system may be mounted on put_old. After all, new_root is a mountpoint.
  1875. *
  1876. * Also, the current root cannot be on the 'rootfs' (initial ramfs) filesystem.
  1877. * See Documentation/filesystems/ramfs-rootfs-initramfs.txt for alternatives
  1878. * in this situation.
  1879. *
  1880. * Notes:
  1881. * - we don't move root/cwd if they are not at the root (reason: if something
  1882. * cared enough to change them, it's probably wrong to force them elsewhere)
  1883. * - it's okay to pick a root that isn't the root of a file system, e.g.
  1884. * /nfs/my_root where /nfs is the mount point. It must be a mountpoint,
  1885. * though, so you may need to say mount --bind /nfs/my_root /nfs/my_root
  1886. * first.
  1887. */
  1888. SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
  1889. const char __user *, put_old)
  1890. {
  1891. struct vfsmount *tmp;
  1892. struct path new, old, parent_path, root_parent, root;
  1893. int error;
  1894. if (!capable(CAP_SYS_ADMIN))
  1895. return -EPERM;
  1896. error = user_path_dir(new_root, &new);
  1897. if (error)
  1898. goto out0;
  1899. error = -EINVAL;
  1900. if (!check_mnt(new.mnt))
  1901. goto out1;
  1902. error = user_path_dir(put_old, &old);
  1903. if (error)
  1904. goto out1;
  1905. error = security_sb_pivotroot(&old, &new);
  1906. if (error) {
  1907. path_put(&old);
  1908. goto out1;
  1909. }
  1910. read_lock(&current->fs->lock);
  1911. root = current->fs->root;
  1912. path_get(&current->fs->root);
  1913. read_unlock(&current->fs->lock);
  1914. down_write(&namespace_sem);
  1915. mutex_lock(&old.dentry->d_inode->i_mutex);
  1916. error = -EINVAL;
  1917. if (IS_MNT_SHARED(old.mnt) ||
  1918. IS_MNT_SHARED(new.mnt->mnt_parent) ||
  1919. IS_MNT_SHARED(root.mnt->mnt_parent))
  1920. goto out2;
  1921. if (!check_mnt(root.mnt))
  1922. goto out2;
  1923. error = -ENOENT;
  1924. if (IS_DEADDIR(new.dentry->d_inode))
  1925. goto out2;
  1926. if (d_unlinked(new.dentry))
  1927. goto out2;
  1928. if (d_unlinked(old.dentry))
  1929. goto out2;
  1930. error = -EBUSY;
  1931. if (new.mnt == root.mnt ||
  1932. old.mnt == root.mnt)
  1933. goto out2; /* loop, on the same file system */
  1934. error = -EINVAL;
  1935. if (root.mnt->mnt_root != root.dentry)
  1936. goto out2; /* not a mountpoint */
  1937. if (root.mnt->mnt_parent == root.mnt)
  1938. goto out2; /* not attached */
  1939. if (new.mnt->mnt_root != new.dentry)
  1940. goto out2; /* not a mountpoint */
  1941. if (new.mnt->mnt_parent == new.mnt)
  1942. goto out2; /* not attached */
  1943. /* make sure we can reach put_old from new_root */
  1944. tmp = old.mnt;
  1945. spin_lock(&vfsmount_lock);
  1946. if (tmp != new.mnt) {
  1947. for (;;) {
  1948. if (tmp->mnt_parent == tmp)
  1949. goto out3; /* already mounted on put_old */
  1950. if (tmp->mnt_parent == new.mnt)
  1951. break;
  1952. tmp = tmp->mnt_parent;
  1953. }
  1954. if (!is_subdir(tmp->mnt_mountpoint, new.dentry))
  1955. goto out3;
  1956. } else if (!is_subdir(old.dentry, new.dentry))
  1957. goto out3;
  1958. detach_mnt(new.mnt, &parent_path);
  1959. detach_mnt(root.mnt, &root_parent);
  1960. /* mount old root on put_old */
  1961. attach_mnt(root.mnt, &old);
  1962. /* mount new_root on / */
  1963. attach_mnt(new.mnt, &root_parent);
  1964. touch_mnt_namespace(current->nsproxy->mnt_ns);
  1965. spin_unlock(&vfsmount_lock);
  1966. chroot_fs_refs(&root, &new);
  1967. security_sb_post_pivotroot(&root, &new);
  1968. error = 0;
  1969. path_put(&root_parent);
  1970. path_put(&parent_path);
  1971. out2:
  1972. mutex_unlock(&old.dentry->d_inode->i_mutex);
  1973. up_write(&namespace_sem);
  1974. path_put(&root);
  1975. path_put(&old);
  1976. out1:
  1977. path_put(&new);
  1978. out0:
  1979. return error;
  1980. out3:
  1981. spin_unlock(&vfsmount_lock);
  1982. goto out2;
  1983. }
  1984. static void __init init_mount_tree(void)
  1985. {
  1986. struct vfsmount *mnt;
  1987. struct mnt_namespace *ns;
  1988. struct path root;
  1989. mnt = do_kern_mount("rootfs", 0, "rootfs", NULL);
  1990. if (IS_ERR(mnt))
  1991. panic("Can't create rootfs");
  1992. ns = create_mnt_ns(mnt);
  1993. if (IS_ERR(ns))
  1994. panic("Can't allocate initial namespace");
  1995. init_task.nsproxy->mnt_ns = ns;
  1996. get_mnt_ns(ns);
  1997. root.mnt = ns->root;
  1998. root.dentry = ns->root->mnt_root;
  1999. set_fs_pwd(current->fs, &root);
  2000. set_fs_root(current->fs, &root);
  2001. }
  2002. void __init mnt_init(void)
  2003. {
  2004. unsigned u;
  2005. int err;
  2006. init_rwsem(&namespace_sem);
  2007. mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct vfsmount),
  2008. 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
  2009. mount_hashtable = (struct list_head *)__get_free_page(GFP_ATOMIC);
  2010. if (!mount_hashtable)
  2011. panic("Failed to allocate mount hash table\n");
  2012. printk("Mount-cache hash table entries: %lu\n", HASH_SIZE);
  2013. for (u = 0; u < HASH_SIZE; u++)
  2014. INIT_LIST_HEAD(&mount_hashtable[u]);
  2015. err = sysfs_init();
  2016. if (err)
  2017. printk(KERN_WARNING "%s: sysfs_init error: %d\n",
  2018. __func__, err);
  2019. fs_kobj = kobject_create_and_add("fs", NULL);
  2020. if (!fs_kobj)
  2021. printk(KERN_WARNING "%s: kobj create error\n", __func__);
  2022. init_rootfs();
  2023. init_mount_tree();
  2024. }
  2025. void put_mnt_ns(struct mnt_namespace *ns)
  2026. {
  2027. struct vfsmount *root;
  2028. LIST_HEAD(umount_list);
  2029. if (!atomic_dec_and_lock(&ns->count, &vfsmount_lock))
  2030. return;
  2031. root = ns->root;
  2032. ns->root = NULL;
  2033. spin_unlock(&vfsmount_lock);
  2034. down_write(&namespace_sem);
  2035. spin_lock(&vfsmount_lock);
  2036. umount_tree(root, 0, &umount_list);
  2037. spin_unlock(&vfsmount_lock);
  2038. up_write(&namespace_sem);
  2039. release_mounts(&umount_list);
  2040. kfree(ns);
  2041. }
  2042. EXPORT_SYMBOL(put_mnt_ns);