namespace.c 63 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600
  1. /*
  2. * linux/fs/namespace.c
  3. *
  4. * (C) Copyright Al Viro 2000, 2001
  5. * Released under GPL v2.
  6. *
  7. * Based on code from fs/super.c, copyright Linus Torvalds and others.
  8. * Heavily rewritten.
  9. */
  10. #include <linux/syscalls.h>
  11. #include <linux/slab.h>
  12. #include <linux/sched.h>
  13. #include <linux/spinlock.h>
  14. #include <linux/percpu.h>
  15. #include <linux/init.h>
  16. #include <linux/kernel.h>
  17. #include <linux/acct.h>
  18. #include <linux/capability.h>
  19. #include <linux/cpumask.h>
  20. #include <linux/module.h>
  21. #include <linux/sysfs.h>
  22. #include <linux/seq_file.h>
  23. #include <linux/mnt_namespace.h>
  24. #include <linux/namei.h>
  25. #include <linux/nsproxy.h>
  26. #include <linux/security.h>
  27. #include <linux/mount.h>
  28. #include <linux/ramfs.h>
  29. #include <linux/log2.h>
  30. #include <linux/idr.h>
  31. #include <linux/fs_struct.h>
  32. #include <linux/fsnotify.h>
  33. #include <asm/uaccess.h>
  34. #include <asm/unistd.h>
  35. #include "pnode.h"
  36. #include "internal.h"
  37. #define HASH_SHIFT ilog2(PAGE_SIZE / sizeof(struct list_head))
  38. #define HASH_SIZE (1UL << HASH_SHIFT)
  39. static int event;
  40. static DEFINE_IDA(mnt_id_ida);
  41. static DEFINE_IDA(mnt_group_ida);
  42. static DEFINE_SPINLOCK(mnt_id_lock);
  43. static int mnt_id_start = 0;
  44. static int mnt_group_start = 1;
  45. static struct list_head *mount_hashtable __read_mostly;
  46. static struct kmem_cache *mnt_cache __read_mostly;
  47. static struct rw_semaphore namespace_sem;
  48. /* /sys/fs */
  49. struct kobject *fs_kobj;
  50. EXPORT_SYMBOL_GPL(fs_kobj);
  51. /*
  52. * vfsmount lock may be taken for read to prevent changes to the
  53. * vfsmount hash, ie. during mountpoint lookups or walking back
  54. * up the tree.
  55. *
  56. * It should be taken for write in all cases where the vfsmount
  57. * tree or hash is modified or when a vfsmount structure is modified.
  58. */
  59. DEFINE_BRLOCK(vfsmount_lock);
  60. static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry)
  61. {
  62. unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES);
  63. tmp += ((unsigned long)dentry / L1_CACHE_BYTES);
  64. tmp = tmp + (tmp >> HASH_SHIFT);
  65. return tmp & (HASH_SIZE - 1);
  66. }
  67. #define MNT_WRITER_UNDERFLOW_LIMIT -(1<<16)
  68. /*
  69. * allocation is serialized by namespace_sem, but we need the spinlock to
  70. * serialize with freeing.
  71. */
  72. static int mnt_alloc_id(struct vfsmount *mnt)
  73. {
  74. int res;
  75. retry:
  76. ida_pre_get(&mnt_id_ida, GFP_KERNEL);
  77. spin_lock(&mnt_id_lock);
  78. res = ida_get_new_above(&mnt_id_ida, mnt_id_start, &mnt->mnt_id);
  79. if (!res)
  80. mnt_id_start = mnt->mnt_id + 1;
  81. spin_unlock(&mnt_id_lock);
  82. if (res == -EAGAIN)
  83. goto retry;
  84. return res;
  85. }
  86. static void mnt_free_id(struct vfsmount *mnt)
  87. {
  88. int id = mnt->mnt_id;
  89. spin_lock(&mnt_id_lock);
  90. ida_remove(&mnt_id_ida, id);
  91. if (mnt_id_start > id)
  92. mnt_id_start = id;
  93. spin_unlock(&mnt_id_lock);
  94. }
  95. /*
  96. * Allocate a new peer group ID
  97. *
  98. * mnt_group_ida is protected by namespace_sem
  99. */
  100. static int mnt_alloc_group_id(struct vfsmount *mnt)
  101. {
  102. int res;
  103. if (!ida_pre_get(&mnt_group_ida, GFP_KERNEL))
  104. return -ENOMEM;
  105. res = ida_get_new_above(&mnt_group_ida,
  106. mnt_group_start,
  107. &mnt->mnt_group_id);
  108. if (!res)
  109. mnt_group_start = mnt->mnt_group_id + 1;
  110. return res;
  111. }
  112. /*
  113. * Release a peer group ID
  114. */
  115. void mnt_release_group_id(struct vfsmount *mnt)
  116. {
  117. int id = mnt->mnt_group_id;
  118. ida_remove(&mnt_group_ida, id);
  119. if (mnt_group_start > id)
  120. mnt_group_start = id;
  121. mnt->mnt_group_id = 0;
  122. }
  123. /*
  124. * vfsmount lock must be held for read
  125. */
  126. static inline void mnt_add_count(struct vfsmount *mnt, int n)
  127. {
  128. #ifdef CONFIG_SMP
  129. this_cpu_add(mnt->mnt_pcp->mnt_count, n);
  130. #else
  131. preempt_disable();
  132. mnt->mnt_count += n;
  133. preempt_enable();
  134. #endif
  135. }
  136. static inline void mnt_set_count(struct vfsmount *mnt, int n)
  137. {
  138. #ifdef CONFIG_SMP
  139. this_cpu_write(mnt->mnt_pcp->mnt_count, n);
  140. #else
  141. mnt->mnt_count = n;
  142. #endif
  143. }
  144. /*
  145. * vfsmount lock must be held for read
  146. */
  147. static inline void mnt_inc_count(struct vfsmount *mnt)
  148. {
  149. mnt_add_count(mnt, 1);
  150. }
  151. /*
  152. * vfsmount lock must be held for read
  153. */
  154. static inline void mnt_dec_count(struct vfsmount *mnt)
  155. {
  156. mnt_add_count(mnt, -1);
  157. }
  158. /*
  159. * vfsmount lock must be held for write
  160. */
  161. unsigned int mnt_get_count(struct vfsmount *mnt)
  162. {
  163. #ifdef CONFIG_SMP
  164. unsigned int count = atomic_read(&mnt->mnt_longrefs);
  165. int cpu;
  166. for_each_possible_cpu(cpu) {
  167. count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_count;
  168. }
  169. return count;
  170. #else
  171. return mnt->mnt_count;
  172. #endif
  173. }
  174. struct vfsmount *alloc_vfsmnt(const char *name)
  175. {
  176. struct vfsmount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL);
  177. if (mnt) {
  178. int err;
  179. err = mnt_alloc_id(mnt);
  180. if (err)
  181. goto out_free_cache;
  182. if (name) {
  183. mnt->mnt_devname = kstrdup(name, GFP_KERNEL);
  184. if (!mnt->mnt_devname)
  185. goto out_free_id;
  186. }
  187. #ifdef CONFIG_SMP
  188. mnt->mnt_pcp = alloc_percpu(struct mnt_pcp);
  189. if (!mnt->mnt_pcp)
  190. goto out_free_devname;
  191. atomic_set(&mnt->mnt_longrefs, 1);
  192. #else
  193. mnt->mnt_count = 1;
  194. mnt->mnt_writers = 0;
  195. #endif
  196. INIT_LIST_HEAD(&mnt->mnt_hash);
  197. INIT_LIST_HEAD(&mnt->mnt_child);
  198. INIT_LIST_HEAD(&mnt->mnt_mounts);
  199. INIT_LIST_HEAD(&mnt->mnt_list);
  200. INIT_LIST_HEAD(&mnt->mnt_expire);
  201. INIT_LIST_HEAD(&mnt->mnt_share);
  202. INIT_LIST_HEAD(&mnt->mnt_slave_list);
  203. INIT_LIST_HEAD(&mnt->mnt_slave);
  204. #ifdef CONFIG_FSNOTIFY
  205. INIT_HLIST_HEAD(&mnt->mnt_fsnotify_marks);
  206. #endif
  207. }
  208. return mnt;
  209. #ifdef CONFIG_SMP
  210. out_free_devname:
  211. kfree(mnt->mnt_devname);
  212. #endif
  213. out_free_id:
  214. mnt_free_id(mnt);
  215. out_free_cache:
  216. kmem_cache_free(mnt_cache, mnt);
  217. return NULL;
  218. }
  219. /*
  220. * Most r/o checks on a fs are for operations that take
  221. * discrete amounts of time, like a write() or unlink().
  222. * We must keep track of when those operations start
  223. * (for permission checks) and when they end, so that
  224. * we can determine when writes are able to occur to
  225. * a filesystem.
  226. */
  227. /*
  228. * __mnt_is_readonly: check whether a mount is read-only
  229. * @mnt: the mount to check for its write status
  230. *
  231. * This shouldn't be used directly ouside of the VFS.
  232. * It does not guarantee that the filesystem will stay
  233. * r/w, just that it is right *now*. This can not and
  234. * should not be used in place of IS_RDONLY(inode).
  235. * mnt_want/drop_write() will _keep_ the filesystem
  236. * r/w.
  237. */
  238. int __mnt_is_readonly(struct vfsmount *mnt)
  239. {
  240. if (mnt->mnt_flags & MNT_READONLY)
  241. return 1;
  242. if (mnt->mnt_sb->s_flags & MS_RDONLY)
  243. return 1;
  244. return 0;
  245. }
  246. EXPORT_SYMBOL_GPL(__mnt_is_readonly);
  247. static inline void mnt_inc_writers(struct vfsmount *mnt)
  248. {
  249. #ifdef CONFIG_SMP
  250. this_cpu_inc(mnt->mnt_pcp->mnt_writers);
  251. #else
  252. mnt->mnt_writers++;
  253. #endif
  254. }
  255. static inline void mnt_dec_writers(struct vfsmount *mnt)
  256. {
  257. #ifdef CONFIG_SMP
  258. this_cpu_dec(mnt->mnt_pcp->mnt_writers);
  259. #else
  260. mnt->mnt_writers--;
  261. #endif
  262. }
  263. static unsigned int mnt_get_writers(struct vfsmount *mnt)
  264. {
  265. #ifdef CONFIG_SMP
  266. unsigned int count = 0;
  267. int cpu;
  268. for_each_possible_cpu(cpu) {
  269. count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_writers;
  270. }
  271. return count;
  272. #else
  273. return mnt->mnt_writers;
  274. #endif
  275. }
  276. /*
  277. * Most r/o checks on a fs are for operations that take
  278. * discrete amounts of time, like a write() or unlink().
  279. * We must keep track of when those operations start
  280. * (for permission checks) and when they end, so that
  281. * we can determine when writes are able to occur to
  282. * a filesystem.
  283. */
  284. /**
  285. * mnt_want_write - get write access to a mount
  286. * @mnt: the mount on which to take a write
  287. *
  288. * This tells the low-level filesystem that a write is
  289. * about to be performed to it, and makes sure that
  290. * writes are allowed before returning success. When
  291. * the write operation is finished, mnt_drop_write()
  292. * must be called. This is effectively a refcount.
  293. */
  294. int mnt_want_write(struct vfsmount *mnt)
  295. {
  296. int ret = 0;
  297. preempt_disable();
  298. mnt_inc_writers(mnt);
  299. /*
  300. * The store to mnt_inc_writers must be visible before we pass
  301. * MNT_WRITE_HOLD loop below, so that the slowpath can see our
  302. * incremented count after it has set MNT_WRITE_HOLD.
  303. */
  304. smp_mb();
  305. while (mnt->mnt_flags & MNT_WRITE_HOLD)
  306. cpu_relax();
  307. /*
  308. * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will
  309. * be set to match its requirements. So we must not load that until
  310. * MNT_WRITE_HOLD is cleared.
  311. */
  312. smp_rmb();
  313. if (__mnt_is_readonly(mnt)) {
  314. mnt_dec_writers(mnt);
  315. ret = -EROFS;
  316. goto out;
  317. }
  318. out:
  319. preempt_enable();
  320. return ret;
  321. }
  322. EXPORT_SYMBOL_GPL(mnt_want_write);
  323. /**
  324. * mnt_clone_write - get write access to a mount
  325. * @mnt: the mount on which to take a write
  326. *
  327. * This is effectively like mnt_want_write, except
  328. * it must only be used to take an extra write reference
  329. * on a mountpoint that we already know has a write reference
  330. * on it. This allows some optimisation.
  331. *
  332. * After finished, mnt_drop_write must be called as usual to
  333. * drop the reference.
  334. */
  335. int mnt_clone_write(struct vfsmount *mnt)
  336. {
  337. /* superblock may be r/o */
  338. if (__mnt_is_readonly(mnt))
  339. return -EROFS;
  340. preempt_disable();
  341. mnt_inc_writers(mnt);
  342. preempt_enable();
  343. return 0;
  344. }
  345. EXPORT_SYMBOL_GPL(mnt_clone_write);
  346. /**
  347. * mnt_want_write_file - get write access to a file's mount
  348. * @file: the file who's mount on which to take a write
  349. *
  350. * This is like mnt_want_write, but it takes a file and can
  351. * do some optimisations if the file is open for write already
  352. */
  353. int mnt_want_write_file(struct file *file)
  354. {
  355. struct inode *inode = file->f_dentry->d_inode;
  356. if (!(file->f_mode & FMODE_WRITE) || special_file(inode->i_mode))
  357. return mnt_want_write(file->f_path.mnt);
  358. else
  359. return mnt_clone_write(file->f_path.mnt);
  360. }
  361. EXPORT_SYMBOL_GPL(mnt_want_write_file);
  362. /**
  363. * mnt_drop_write - give up write access to a mount
  364. * @mnt: the mount on which to give up write access
  365. *
  366. * Tells the low-level filesystem that we are done
  367. * performing writes to it. Must be matched with
  368. * mnt_want_write() call above.
  369. */
  370. void mnt_drop_write(struct vfsmount *mnt)
  371. {
  372. preempt_disable();
  373. mnt_dec_writers(mnt);
  374. preempt_enable();
  375. }
  376. EXPORT_SYMBOL_GPL(mnt_drop_write);
  377. static int mnt_make_readonly(struct vfsmount *mnt)
  378. {
  379. int ret = 0;
  380. br_write_lock(vfsmount_lock);
  381. mnt->mnt_flags |= MNT_WRITE_HOLD;
  382. /*
  383. * After storing MNT_WRITE_HOLD, we'll read the counters. This store
  384. * should be visible before we do.
  385. */
  386. smp_mb();
  387. /*
  388. * With writers on hold, if this value is zero, then there are
  389. * definitely no active writers (although held writers may subsequently
  390. * increment the count, they'll have to wait, and decrement it after
  391. * seeing MNT_READONLY).
  392. *
  393. * It is OK to have counter incremented on one CPU and decremented on
  394. * another: the sum will add up correctly. The danger would be when we
  395. * sum up each counter, if we read a counter before it is incremented,
  396. * but then read another CPU's count which it has been subsequently
  397. * decremented from -- we would see more decrements than we should.
  398. * MNT_WRITE_HOLD protects against this scenario, because
  399. * mnt_want_write first increments count, then smp_mb, then spins on
  400. * MNT_WRITE_HOLD, so it can't be decremented by another CPU while
  401. * we're counting up here.
  402. */
  403. if (mnt_get_writers(mnt) > 0)
  404. ret = -EBUSY;
  405. else
  406. mnt->mnt_flags |= MNT_READONLY;
  407. /*
  408. * MNT_READONLY must become visible before ~MNT_WRITE_HOLD, so writers
  409. * that become unheld will see MNT_READONLY.
  410. */
  411. smp_wmb();
  412. mnt->mnt_flags &= ~MNT_WRITE_HOLD;
  413. br_write_unlock(vfsmount_lock);
  414. return ret;
  415. }
  416. static void __mnt_unmake_readonly(struct vfsmount *mnt)
  417. {
  418. br_write_lock(vfsmount_lock);
  419. mnt->mnt_flags &= ~MNT_READONLY;
  420. br_write_unlock(vfsmount_lock);
  421. }
  422. void simple_set_mnt(struct vfsmount *mnt, struct super_block *sb)
  423. {
  424. mnt->mnt_sb = sb;
  425. mnt->mnt_root = dget(sb->s_root);
  426. }
  427. EXPORT_SYMBOL(simple_set_mnt);
  428. void free_vfsmnt(struct vfsmount *mnt)
  429. {
  430. kfree(mnt->mnt_devname);
  431. mnt_free_id(mnt);
  432. #ifdef CONFIG_SMP
  433. free_percpu(mnt->mnt_pcp);
  434. #endif
  435. kmem_cache_free(mnt_cache, mnt);
  436. }
  437. /*
  438. * find the first or last mount at @dentry on vfsmount @mnt depending on
  439. * @dir. If @dir is set return the first mount else return the last mount.
  440. * vfsmount_lock must be held for read or write.
  441. */
  442. struct vfsmount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry,
  443. int dir)
  444. {
  445. struct list_head *head = mount_hashtable + hash(mnt, dentry);
  446. struct list_head *tmp = head;
  447. struct vfsmount *p, *found = NULL;
  448. for (;;) {
  449. tmp = dir ? tmp->next : tmp->prev;
  450. p = NULL;
  451. if (tmp == head)
  452. break;
  453. p = list_entry(tmp, struct vfsmount, mnt_hash);
  454. if (p->mnt_parent == mnt && p->mnt_mountpoint == dentry) {
  455. found = p;
  456. break;
  457. }
  458. }
  459. return found;
  460. }
  461. /*
  462. * lookup_mnt increments the ref count before returning
  463. * the vfsmount struct.
  464. */
  465. struct vfsmount *lookup_mnt(struct path *path)
  466. {
  467. struct vfsmount *child_mnt;
  468. br_read_lock(vfsmount_lock);
  469. if ((child_mnt = __lookup_mnt(path->mnt, path->dentry, 1)))
  470. mntget(child_mnt);
  471. br_read_unlock(vfsmount_lock);
  472. return child_mnt;
  473. }
  474. static inline int check_mnt(struct vfsmount *mnt)
  475. {
  476. return mnt->mnt_ns == current->nsproxy->mnt_ns;
  477. }
  478. /*
  479. * vfsmount lock must be held for write
  480. */
  481. static void touch_mnt_namespace(struct mnt_namespace *ns)
  482. {
  483. if (ns) {
  484. ns->event = ++event;
  485. wake_up_interruptible(&ns->poll);
  486. }
  487. }
  488. /*
  489. * vfsmount lock must be held for write
  490. */
  491. static void __touch_mnt_namespace(struct mnt_namespace *ns)
  492. {
  493. if (ns && ns->event != event) {
  494. ns->event = event;
  495. wake_up_interruptible(&ns->poll);
  496. }
  497. }
  498. /*
  499. * Clear dentry's mounted state if it has no remaining mounts.
  500. * vfsmount_lock must be held for write.
  501. */
  502. static void dentry_reset_mounted(struct vfsmount *mnt, struct dentry *dentry)
  503. {
  504. unsigned u;
  505. for (u = 0; u < HASH_SIZE; u++) {
  506. struct vfsmount *p;
  507. list_for_each_entry(p, &mount_hashtable[u], mnt_hash) {
  508. if (p->mnt_mountpoint == dentry)
  509. return;
  510. }
  511. }
  512. spin_lock(&dentry->d_lock);
  513. dentry->d_flags &= ~DCACHE_MOUNTED;
  514. spin_unlock(&dentry->d_lock);
  515. }
  516. /*
  517. * vfsmount lock must be held for write
  518. */
  519. static void detach_mnt(struct vfsmount *mnt, struct path *old_path)
  520. {
  521. old_path->dentry = mnt->mnt_mountpoint;
  522. old_path->mnt = mnt->mnt_parent;
  523. mnt->mnt_parent = mnt;
  524. mnt->mnt_mountpoint = mnt->mnt_root;
  525. list_del_init(&mnt->mnt_child);
  526. list_del_init(&mnt->mnt_hash);
  527. dentry_reset_mounted(old_path->mnt, old_path->dentry);
  528. }
  529. /*
  530. * vfsmount lock must be held for write
  531. */
  532. void mnt_set_mountpoint(struct vfsmount *mnt, struct dentry *dentry,
  533. struct vfsmount *child_mnt)
  534. {
  535. child_mnt->mnt_parent = mntget(mnt);
  536. child_mnt->mnt_mountpoint = dget(dentry);
  537. spin_lock(&dentry->d_lock);
  538. dentry->d_flags |= DCACHE_MOUNTED;
  539. spin_unlock(&dentry->d_lock);
  540. }
  541. /*
  542. * vfsmount lock must be held for write
  543. */
  544. static void attach_mnt(struct vfsmount *mnt, struct path *path)
  545. {
  546. mnt_set_mountpoint(path->mnt, path->dentry, mnt);
  547. list_add_tail(&mnt->mnt_hash, mount_hashtable +
  548. hash(path->mnt, path->dentry));
  549. list_add_tail(&mnt->mnt_child, &path->mnt->mnt_mounts);
  550. }
  551. /*
  552. * vfsmount lock must be held for write
  553. */
  554. static void commit_tree(struct vfsmount *mnt)
  555. {
  556. struct vfsmount *parent = mnt->mnt_parent;
  557. struct vfsmount *m;
  558. LIST_HEAD(head);
  559. struct mnt_namespace *n = parent->mnt_ns;
  560. BUG_ON(parent == mnt);
  561. list_add_tail(&head, &mnt->mnt_list);
  562. list_for_each_entry(m, &head, mnt_list)
  563. m->mnt_ns = n;
  564. list_splice(&head, n->list.prev);
  565. list_add_tail(&mnt->mnt_hash, mount_hashtable +
  566. hash(parent, mnt->mnt_mountpoint));
  567. list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
  568. touch_mnt_namespace(n);
  569. }
  570. static struct vfsmount *next_mnt(struct vfsmount *p, struct vfsmount *root)
  571. {
  572. struct list_head *next = p->mnt_mounts.next;
  573. if (next == &p->mnt_mounts) {
  574. while (1) {
  575. if (p == root)
  576. return NULL;
  577. next = p->mnt_child.next;
  578. if (next != &p->mnt_parent->mnt_mounts)
  579. break;
  580. p = p->mnt_parent;
  581. }
  582. }
  583. return list_entry(next, struct vfsmount, mnt_child);
  584. }
  585. static struct vfsmount *skip_mnt_tree(struct vfsmount *p)
  586. {
  587. struct list_head *prev = p->mnt_mounts.prev;
  588. while (prev != &p->mnt_mounts) {
  589. p = list_entry(prev, struct vfsmount, mnt_child);
  590. prev = p->mnt_mounts.prev;
  591. }
  592. return p;
  593. }
  594. static struct vfsmount *clone_mnt(struct vfsmount *old, struct dentry *root,
  595. int flag)
  596. {
  597. struct super_block *sb = old->mnt_sb;
  598. struct vfsmount *mnt = alloc_vfsmnt(old->mnt_devname);
  599. if (mnt) {
  600. if (flag & (CL_SLAVE | CL_PRIVATE))
  601. mnt->mnt_group_id = 0; /* not a peer of original */
  602. else
  603. mnt->mnt_group_id = old->mnt_group_id;
  604. if ((flag & CL_MAKE_SHARED) && !mnt->mnt_group_id) {
  605. int err = mnt_alloc_group_id(mnt);
  606. if (err)
  607. goto out_free;
  608. }
  609. mnt->mnt_flags = old->mnt_flags & ~MNT_WRITE_HOLD;
  610. atomic_inc(&sb->s_active);
  611. mnt->mnt_sb = sb;
  612. mnt->mnt_root = dget(root);
  613. mnt->mnt_mountpoint = mnt->mnt_root;
  614. mnt->mnt_parent = mnt;
  615. if (flag & CL_SLAVE) {
  616. list_add(&mnt->mnt_slave, &old->mnt_slave_list);
  617. mnt->mnt_master = old;
  618. CLEAR_MNT_SHARED(mnt);
  619. } else if (!(flag & CL_PRIVATE)) {
  620. if ((flag & CL_MAKE_SHARED) || IS_MNT_SHARED(old))
  621. list_add(&mnt->mnt_share, &old->mnt_share);
  622. if (IS_MNT_SLAVE(old))
  623. list_add(&mnt->mnt_slave, &old->mnt_slave);
  624. mnt->mnt_master = old->mnt_master;
  625. }
  626. if (flag & CL_MAKE_SHARED)
  627. set_mnt_shared(mnt);
  628. /* stick the duplicate mount on the same expiry list
  629. * as the original if that was on one */
  630. if (flag & CL_EXPIRE) {
  631. if (!list_empty(&old->mnt_expire))
  632. list_add(&mnt->mnt_expire, &old->mnt_expire);
  633. }
  634. }
  635. return mnt;
  636. out_free:
  637. free_vfsmnt(mnt);
  638. return NULL;
  639. }
  640. static inline void mntfree(struct vfsmount *mnt)
  641. {
  642. struct super_block *sb = mnt->mnt_sb;
  643. /*
  644. * This probably indicates that somebody messed
  645. * up a mnt_want/drop_write() pair. If this
  646. * happens, the filesystem was probably unable
  647. * to make r/w->r/o transitions.
  648. */
  649. /*
  650. * The locking used to deal with mnt_count decrement provides barriers,
  651. * so mnt_get_writers() below is safe.
  652. */
  653. WARN_ON(mnt_get_writers(mnt));
  654. fsnotify_vfsmount_delete(mnt);
  655. dput(mnt->mnt_root);
  656. free_vfsmnt(mnt);
  657. deactivate_super(sb);
  658. }
  659. #ifdef CONFIG_SMP
  660. static inline void __mntput(struct vfsmount *mnt, int longrefs)
  661. {
  662. if (!longrefs) {
  663. put_again:
  664. br_read_lock(vfsmount_lock);
  665. if (likely(atomic_read(&mnt->mnt_longrefs))) {
  666. mnt_dec_count(mnt);
  667. br_read_unlock(vfsmount_lock);
  668. return;
  669. }
  670. br_read_unlock(vfsmount_lock);
  671. } else {
  672. BUG_ON(!atomic_read(&mnt->mnt_longrefs));
  673. if (atomic_add_unless(&mnt->mnt_longrefs, -1, 1))
  674. return;
  675. }
  676. br_write_lock(vfsmount_lock);
  677. if (!longrefs)
  678. mnt_dec_count(mnt);
  679. else
  680. atomic_dec(&mnt->mnt_longrefs);
  681. if (mnt_get_count(mnt)) {
  682. br_write_unlock(vfsmount_lock);
  683. return;
  684. }
  685. if (unlikely(mnt->mnt_pinned)) {
  686. mnt_add_count(mnt, mnt->mnt_pinned + 1);
  687. mnt->mnt_pinned = 0;
  688. br_write_unlock(vfsmount_lock);
  689. acct_auto_close_mnt(mnt);
  690. goto put_again;
  691. }
  692. br_write_unlock(vfsmount_lock);
  693. mntfree(mnt);
  694. }
  695. #else
  696. static inline void __mntput(struct vfsmount *mnt, int longrefs)
  697. {
  698. put_again:
  699. mnt_dec_count(mnt);
  700. if (likely(mnt_get_count(mnt)))
  701. return;
  702. br_write_lock(vfsmount_lock);
  703. if (unlikely(mnt->mnt_pinned)) {
  704. mnt_add_count(mnt, mnt->mnt_pinned + 1);
  705. mnt->mnt_pinned = 0;
  706. br_write_unlock(vfsmount_lock);
  707. acct_auto_close_mnt(mnt);
  708. goto put_again;
  709. }
  710. br_write_unlock(vfsmount_lock);
  711. mntfree(mnt);
  712. }
  713. #endif
  714. static void mntput_no_expire(struct vfsmount *mnt)
  715. {
  716. __mntput(mnt, 0);
  717. }
  718. void mntput(struct vfsmount *mnt)
  719. {
  720. if (mnt) {
  721. /* avoid cacheline pingpong, hope gcc doesn't get "smart" */
  722. if (unlikely(mnt->mnt_expiry_mark))
  723. mnt->mnt_expiry_mark = 0;
  724. __mntput(mnt, 0);
  725. }
  726. }
  727. EXPORT_SYMBOL(mntput);
  728. struct vfsmount *mntget(struct vfsmount *mnt)
  729. {
  730. if (mnt)
  731. mnt_inc_count(mnt);
  732. return mnt;
  733. }
  734. EXPORT_SYMBOL(mntget);
  735. void mntput_long(struct vfsmount *mnt)
  736. {
  737. #ifdef CONFIG_SMP
  738. if (mnt) {
  739. /* avoid cacheline pingpong, hope gcc doesn't get "smart" */
  740. if (unlikely(mnt->mnt_expiry_mark))
  741. mnt->mnt_expiry_mark = 0;
  742. __mntput(mnt, 1);
  743. }
  744. #else
  745. mntput(mnt);
  746. #endif
  747. }
  748. EXPORT_SYMBOL(mntput_long);
  749. struct vfsmount *mntget_long(struct vfsmount *mnt)
  750. {
  751. #ifdef CONFIG_SMP
  752. if (mnt)
  753. atomic_inc(&mnt->mnt_longrefs);
  754. return mnt;
  755. #else
  756. return mntget(mnt);
  757. #endif
  758. }
  759. EXPORT_SYMBOL(mntget_long);
  760. void mnt_pin(struct vfsmount *mnt)
  761. {
  762. br_write_lock(vfsmount_lock);
  763. mnt->mnt_pinned++;
  764. br_write_unlock(vfsmount_lock);
  765. }
  766. EXPORT_SYMBOL(mnt_pin);
  767. void mnt_unpin(struct vfsmount *mnt)
  768. {
  769. br_write_lock(vfsmount_lock);
  770. if (mnt->mnt_pinned) {
  771. mnt_inc_count(mnt);
  772. mnt->mnt_pinned--;
  773. }
  774. br_write_unlock(vfsmount_lock);
  775. }
  776. EXPORT_SYMBOL(mnt_unpin);
  777. static inline void mangle(struct seq_file *m, const char *s)
  778. {
  779. seq_escape(m, s, " \t\n\\");
  780. }
  781. /*
  782. * Simple .show_options callback for filesystems which don't want to
  783. * implement more complex mount option showing.
  784. *
  785. * See also save_mount_options().
  786. */
  787. int generic_show_options(struct seq_file *m, struct vfsmount *mnt)
  788. {
  789. const char *options;
  790. rcu_read_lock();
  791. options = rcu_dereference(mnt->mnt_sb->s_options);
  792. if (options != NULL && options[0]) {
  793. seq_putc(m, ',');
  794. mangle(m, options);
  795. }
  796. rcu_read_unlock();
  797. return 0;
  798. }
  799. EXPORT_SYMBOL(generic_show_options);
  800. /*
  801. * If filesystem uses generic_show_options(), this function should be
  802. * called from the fill_super() callback.
  803. *
  804. * The .remount_fs callback usually needs to be handled in a special
  805. * way, to make sure, that previous options are not overwritten if the
  806. * remount fails.
  807. *
  808. * Also note, that if the filesystem's .remount_fs function doesn't
  809. * reset all options to their default value, but changes only newly
  810. * given options, then the displayed options will not reflect reality
  811. * any more.
  812. */
  813. void save_mount_options(struct super_block *sb, char *options)
  814. {
  815. BUG_ON(sb->s_options);
  816. rcu_assign_pointer(sb->s_options, kstrdup(options, GFP_KERNEL));
  817. }
  818. EXPORT_SYMBOL(save_mount_options);
  819. void replace_mount_options(struct super_block *sb, char *options)
  820. {
  821. char *old = sb->s_options;
  822. rcu_assign_pointer(sb->s_options, options);
  823. if (old) {
  824. synchronize_rcu();
  825. kfree(old);
  826. }
  827. }
  828. EXPORT_SYMBOL(replace_mount_options);
  829. #ifdef CONFIG_PROC_FS
  830. /* iterator */
  831. static void *m_start(struct seq_file *m, loff_t *pos)
  832. {
  833. struct proc_mounts *p = m->private;
  834. down_read(&namespace_sem);
  835. return seq_list_start(&p->ns->list, *pos);
  836. }
  837. static void *m_next(struct seq_file *m, void *v, loff_t *pos)
  838. {
  839. struct proc_mounts *p = m->private;
  840. return seq_list_next(v, &p->ns->list, pos);
  841. }
  842. static void m_stop(struct seq_file *m, void *v)
  843. {
  844. up_read(&namespace_sem);
  845. }
  846. int mnt_had_events(struct proc_mounts *p)
  847. {
  848. struct mnt_namespace *ns = p->ns;
  849. int res = 0;
  850. br_read_lock(vfsmount_lock);
  851. if (p->event != ns->event) {
  852. p->event = ns->event;
  853. res = 1;
  854. }
  855. br_read_unlock(vfsmount_lock);
  856. return res;
  857. }
  858. struct proc_fs_info {
  859. int flag;
  860. const char *str;
  861. };
  862. static int show_sb_opts(struct seq_file *m, struct super_block *sb)
  863. {
  864. static const struct proc_fs_info fs_info[] = {
  865. { MS_SYNCHRONOUS, ",sync" },
  866. { MS_DIRSYNC, ",dirsync" },
  867. { MS_MANDLOCK, ",mand" },
  868. { 0, NULL }
  869. };
  870. const struct proc_fs_info *fs_infop;
  871. for (fs_infop = fs_info; fs_infop->flag; fs_infop++) {
  872. if (sb->s_flags & fs_infop->flag)
  873. seq_puts(m, fs_infop->str);
  874. }
  875. return security_sb_show_options(m, sb);
  876. }
  877. static void show_mnt_opts(struct seq_file *m, struct vfsmount *mnt)
  878. {
  879. static const struct proc_fs_info mnt_info[] = {
  880. { MNT_NOSUID, ",nosuid" },
  881. { MNT_NODEV, ",nodev" },
  882. { MNT_NOEXEC, ",noexec" },
  883. { MNT_NOATIME, ",noatime" },
  884. { MNT_NODIRATIME, ",nodiratime" },
  885. { MNT_RELATIME, ",relatime" },
  886. { 0, NULL }
  887. };
  888. const struct proc_fs_info *fs_infop;
  889. for (fs_infop = mnt_info; fs_infop->flag; fs_infop++) {
  890. if (mnt->mnt_flags & fs_infop->flag)
  891. seq_puts(m, fs_infop->str);
  892. }
  893. }
  894. static void show_type(struct seq_file *m, struct super_block *sb)
  895. {
  896. mangle(m, sb->s_type->name);
  897. if (sb->s_subtype && sb->s_subtype[0]) {
  898. seq_putc(m, '.');
  899. mangle(m, sb->s_subtype);
  900. }
  901. }
  902. static int show_vfsmnt(struct seq_file *m, void *v)
  903. {
  904. struct vfsmount *mnt = list_entry(v, struct vfsmount, mnt_list);
  905. int err = 0;
  906. struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
  907. mangle(m, mnt->mnt_devname ? mnt->mnt_devname : "none");
  908. seq_putc(m, ' ');
  909. seq_path(m, &mnt_path, " \t\n\\");
  910. seq_putc(m, ' ');
  911. show_type(m, mnt->mnt_sb);
  912. seq_puts(m, __mnt_is_readonly(mnt) ? " ro" : " rw");
  913. err = show_sb_opts(m, mnt->mnt_sb);
  914. if (err)
  915. goto out;
  916. show_mnt_opts(m, mnt);
  917. if (mnt->mnt_sb->s_op->show_options)
  918. err = mnt->mnt_sb->s_op->show_options(m, mnt);
  919. seq_puts(m, " 0 0\n");
  920. out:
  921. return err;
  922. }
  923. const struct seq_operations mounts_op = {
  924. .start = m_start,
  925. .next = m_next,
  926. .stop = m_stop,
  927. .show = show_vfsmnt
  928. };
  929. static int show_mountinfo(struct seq_file *m, void *v)
  930. {
  931. struct proc_mounts *p = m->private;
  932. struct vfsmount *mnt = list_entry(v, struct vfsmount, mnt_list);
  933. struct super_block *sb = mnt->mnt_sb;
  934. struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
  935. struct path root = p->root;
  936. int err = 0;
  937. seq_printf(m, "%i %i %u:%u ", mnt->mnt_id, mnt->mnt_parent->mnt_id,
  938. MAJOR(sb->s_dev), MINOR(sb->s_dev));
  939. seq_dentry(m, mnt->mnt_root, " \t\n\\");
  940. seq_putc(m, ' ');
  941. seq_path_root(m, &mnt_path, &root, " \t\n\\");
  942. if (root.mnt != p->root.mnt || root.dentry != p->root.dentry) {
  943. /*
  944. * Mountpoint is outside root, discard that one. Ugly,
  945. * but less so than trying to do that in iterator in a
  946. * race-free way (due to renames).
  947. */
  948. return SEQ_SKIP;
  949. }
  950. seq_puts(m, mnt->mnt_flags & MNT_READONLY ? " ro" : " rw");
  951. show_mnt_opts(m, mnt);
  952. /* Tagged fields ("foo:X" or "bar") */
  953. if (IS_MNT_SHARED(mnt))
  954. seq_printf(m, " shared:%i", mnt->mnt_group_id);
  955. if (IS_MNT_SLAVE(mnt)) {
  956. int master = mnt->mnt_master->mnt_group_id;
  957. int dom = get_dominating_id(mnt, &p->root);
  958. seq_printf(m, " master:%i", master);
  959. if (dom && dom != master)
  960. seq_printf(m, " propagate_from:%i", dom);
  961. }
  962. if (IS_MNT_UNBINDABLE(mnt))
  963. seq_puts(m, " unbindable");
  964. /* Filesystem specific data */
  965. seq_puts(m, " - ");
  966. show_type(m, sb);
  967. seq_putc(m, ' ');
  968. mangle(m, mnt->mnt_devname ? mnt->mnt_devname : "none");
  969. seq_puts(m, sb->s_flags & MS_RDONLY ? " ro" : " rw");
  970. err = show_sb_opts(m, sb);
  971. if (err)
  972. goto out;
  973. if (sb->s_op->show_options)
  974. err = sb->s_op->show_options(m, mnt);
  975. seq_putc(m, '\n');
  976. out:
  977. return err;
  978. }
  979. const struct seq_operations mountinfo_op = {
  980. .start = m_start,
  981. .next = m_next,
  982. .stop = m_stop,
  983. .show = show_mountinfo,
  984. };
  985. static int show_vfsstat(struct seq_file *m, void *v)
  986. {
  987. struct vfsmount *mnt = list_entry(v, struct vfsmount, mnt_list);
  988. struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
  989. int err = 0;
  990. /* device */
  991. if (mnt->mnt_devname) {
  992. seq_puts(m, "device ");
  993. mangle(m, mnt->mnt_devname);
  994. } else
  995. seq_puts(m, "no device");
  996. /* mount point */
  997. seq_puts(m, " mounted on ");
  998. seq_path(m, &mnt_path, " \t\n\\");
  999. seq_putc(m, ' ');
  1000. /* file system type */
  1001. seq_puts(m, "with fstype ");
  1002. show_type(m, mnt->mnt_sb);
  1003. /* optional statistics */
  1004. if (mnt->mnt_sb->s_op->show_stats) {
  1005. seq_putc(m, ' ');
  1006. err = mnt->mnt_sb->s_op->show_stats(m, mnt);
  1007. }
  1008. seq_putc(m, '\n');
  1009. return err;
  1010. }
  1011. const struct seq_operations mountstats_op = {
  1012. .start = m_start,
  1013. .next = m_next,
  1014. .stop = m_stop,
  1015. .show = show_vfsstat,
  1016. };
  1017. #endif /* CONFIG_PROC_FS */
  1018. /**
  1019. * may_umount_tree - check if a mount tree is busy
  1020. * @mnt: root of mount tree
  1021. *
  1022. * This is called to check if a tree of mounts has any
  1023. * open files, pwds, chroots or sub mounts that are
  1024. * busy.
  1025. */
  1026. int may_umount_tree(struct vfsmount *mnt)
  1027. {
  1028. int actual_refs = 0;
  1029. int minimum_refs = 0;
  1030. struct vfsmount *p;
  1031. /* write lock needed for mnt_get_count */
  1032. br_write_lock(vfsmount_lock);
  1033. for (p = mnt; p; p = next_mnt(p, mnt)) {
  1034. actual_refs += mnt_get_count(p);
  1035. minimum_refs += 2;
  1036. }
  1037. br_write_unlock(vfsmount_lock);
  1038. if (actual_refs > minimum_refs)
  1039. return 0;
  1040. return 1;
  1041. }
  1042. EXPORT_SYMBOL(may_umount_tree);
  1043. /**
  1044. * may_umount - check if a mount point is busy
  1045. * @mnt: root of mount
  1046. *
  1047. * This is called to check if a mount point has any
  1048. * open files, pwds, chroots or sub mounts. If the
  1049. * mount has sub mounts this will return busy
  1050. * regardless of whether the sub mounts are busy.
  1051. *
  1052. * Doesn't take quota and stuff into account. IOW, in some cases it will
  1053. * give false negatives. The main reason why it's here is that we need
  1054. * a non-destructive way to look for easily umountable filesystems.
  1055. */
  1056. int may_umount(struct vfsmount *mnt)
  1057. {
  1058. int ret = 1;
  1059. down_read(&namespace_sem);
  1060. br_write_lock(vfsmount_lock);
  1061. if (propagate_mount_busy(mnt, 2))
  1062. ret = 0;
  1063. br_write_unlock(vfsmount_lock);
  1064. up_read(&namespace_sem);
  1065. return ret;
  1066. }
  1067. EXPORT_SYMBOL(may_umount);
  1068. void release_mounts(struct list_head *head)
  1069. {
  1070. struct vfsmount *mnt;
  1071. while (!list_empty(head)) {
  1072. mnt = list_first_entry(head, struct vfsmount, mnt_hash);
  1073. list_del_init(&mnt->mnt_hash);
  1074. if (mnt->mnt_parent != mnt) {
  1075. struct dentry *dentry;
  1076. struct vfsmount *m;
  1077. br_write_lock(vfsmount_lock);
  1078. dentry = mnt->mnt_mountpoint;
  1079. m = mnt->mnt_parent;
  1080. mnt->mnt_mountpoint = mnt->mnt_root;
  1081. mnt->mnt_parent = mnt;
  1082. m->mnt_ghosts--;
  1083. br_write_unlock(vfsmount_lock);
  1084. dput(dentry);
  1085. mntput(m);
  1086. }
  1087. mntput_long(mnt);
  1088. }
  1089. }
  1090. /*
  1091. * vfsmount lock must be held for write
  1092. * namespace_sem must be held for write
  1093. */
  1094. void umount_tree(struct vfsmount *mnt, int propagate, struct list_head *kill)
  1095. {
  1096. struct vfsmount *p;
  1097. for (p = mnt; p; p = next_mnt(p, mnt))
  1098. list_move(&p->mnt_hash, kill);
  1099. if (propagate)
  1100. propagate_umount(kill);
  1101. list_for_each_entry(p, kill, mnt_hash) {
  1102. list_del_init(&p->mnt_expire);
  1103. list_del_init(&p->mnt_list);
  1104. __touch_mnt_namespace(p->mnt_ns);
  1105. p->mnt_ns = NULL;
  1106. list_del_init(&p->mnt_child);
  1107. if (p->mnt_parent != p) {
  1108. p->mnt_parent->mnt_ghosts++;
  1109. dentry_reset_mounted(p->mnt_parent, p->mnt_mountpoint);
  1110. }
  1111. change_mnt_propagation(p, MS_PRIVATE);
  1112. }
  1113. }
  1114. static void shrink_submounts(struct vfsmount *mnt, struct list_head *umounts);
  1115. static int do_umount(struct vfsmount *mnt, int flags)
  1116. {
  1117. struct super_block *sb = mnt->mnt_sb;
  1118. int retval;
  1119. LIST_HEAD(umount_list);
  1120. retval = security_sb_umount(mnt, flags);
  1121. if (retval)
  1122. return retval;
  1123. /*
  1124. * Allow userspace to request a mountpoint be expired rather than
  1125. * unmounting unconditionally. Unmount only happens if:
  1126. * (1) the mark is already set (the mark is cleared by mntput())
  1127. * (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount]
  1128. */
  1129. if (flags & MNT_EXPIRE) {
  1130. if (mnt == current->fs->root.mnt ||
  1131. flags & (MNT_FORCE | MNT_DETACH))
  1132. return -EINVAL;
  1133. /*
  1134. * probably don't strictly need the lock here if we examined
  1135. * all race cases, but it's a slowpath.
  1136. */
  1137. br_write_lock(vfsmount_lock);
  1138. if (mnt_get_count(mnt) != 2) {
  1139. br_write_lock(vfsmount_lock);
  1140. return -EBUSY;
  1141. }
  1142. br_write_unlock(vfsmount_lock);
  1143. if (!xchg(&mnt->mnt_expiry_mark, 1))
  1144. return -EAGAIN;
  1145. }
  1146. /*
  1147. * If we may have to abort operations to get out of this
  1148. * mount, and they will themselves hold resources we must
  1149. * allow the fs to do things. In the Unix tradition of
  1150. * 'Gee thats tricky lets do it in userspace' the umount_begin
  1151. * might fail to complete on the first run through as other tasks
  1152. * must return, and the like. Thats for the mount program to worry
  1153. * about for the moment.
  1154. */
  1155. if (flags & MNT_FORCE && sb->s_op->umount_begin) {
  1156. sb->s_op->umount_begin(sb);
  1157. }
  1158. /*
  1159. * No sense to grab the lock for this test, but test itself looks
  1160. * somewhat bogus. Suggestions for better replacement?
  1161. * Ho-hum... In principle, we might treat that as umount + switch
  1162. * to rootfs. GC would eventually take care of the old vfsmount.
  1163. * Actually it makes sense, especially if rootfs would contain a
  1164. * /reboot - static binary that would close all descriptors and
  1165. * call reboot(9). Then init(8) could umount root and exec /reboot.
  1166. */
  1167. if (mnt == current->fs->root.mnt && !(flags & MNT_DETACH)) {
  1168. /*
  1169. * Special case for "unmounting" root ...
  1170. * we just try to remount it readonly.
  1171. */
  1172. down_write(&sb->s_umount);
  1173. if (!(sb->s_flags & MS_RDONLY))
  1174. retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
  1175. up_write(&sb->s_umount);
  1176. return retval;
  1177. }
  1178. down_write(&namespace_sem);
  1179. br_write_lock(vfsmount_lock);
  1180. event++;
  1181. if (!(flags & MNT_DETACH))
  1182. shrink_submounts(mnt, &umount_list);
  1183. retval = -EBUSY;
  1184. if (flags & MNT_DETACH || !propagate_mount_busy(mnt, 2)) {
  1185. if (!list_empty(&mnt->mnt_list))
  1186. umount_tree(mnt, 1, &umount_list);
  1187. retval = 0;
  1188. }
  1189. br_write_unlock(vfsmount_lock);
  1190. up_write(&namespace_sem);
  1191. release_mounts(&umount_list);
  1192. return retval;
  1193. }
  1194. /*
  1195. * Now umount can handle mount points as well as block devices.
  1196. * This is important for filesystems which use unnamed block devices.
  1197. *
  1198. * We now support a flag for forced unmount like the other 'big iron'
  1199. * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
  1200. */
  1201. SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
  1202. {
  1203. struct path path;
  1204. int retval;
  1205. int lookup_flags = 0;
  1206. if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW))
  1207. return -EINVAL;
  1208. if (!(flags & UMOUNT_NOFOLLOW))
  1209. lookup_flags |= LOOKUP_FOLLOW;
  1210. retval = user_path_at(AT_FDCWD, name, lookup_flags, &path);
  1211. if (retval)
  1212. goto out;
  1213. retval = -EINVAL;
  1214. if (path.dentry != path.mnt->mnt_root)
  1215. goto dput_and_out;
  1216. if (!check_mnt(path.mnt))
  1217. goto dput_and_out;
  1218. retval = -EPERM;
  1219. if (!capable(CAP_SYS_ADMIN))
  1220. goto dput_and_out;
  1221. retval = do_umount(path.mnt, flags);
  1222. dput_and_out:
  1223. /* we mustn't call path_put() as that would clear mnt_expiry_mark */
  1224. dput(path.dentry);
  1225. mntput_no_expire(path.mnt);
  1226. out:
  1227. return retval;
  1228. }
  1229. #ifdef __ARCH_WANT_SYS_OLDUMOUNT
  1230. /*
  1231. * The 2.0 compatible umount. No flags.
  1232. */
  1233. SYSCALL_DEFINE1(oldumount, char __user *, name)
  1234. {
  1235. return sys_umount(name, 0);
  1236. }
  1237. #endif
  1238. static int mount_is_safe(struct path *path)
  1239. {
  1240. if (capable(CAP_SYS_ADMIN))
  1241. return 0;
  1242. return -EPERM;
  1243. #ifdef notyet
  1244. if (S_ISLNK(path->dentry->d_inode->i_mode))
  1245. return -EPERM;
  1246. if (path->dentry->d_inode->i_mode & S_ISVTX) {
  1247. if (current_uid() != path->dentry->d_inode->i_uid)
  1248. return -EPERM;
  1249. }
  1250. if (inode_permission(path->dentry->d_inode, MAY_WRITE))
  1251. return -EPERM;
  1252. return 0;
  1253. #endif
  1254. }
  1255. struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry,
  1256. int flag)
  1257. {
  1258. struct vfsmount *res, *p, *q, *r, *s;
  1259. struct path path;
  1260. if (!(flag & CL_COPY_ALL) && IS_MNT_UNBINDABLE(mnt))
  1261. return NULL;
  1262. res = q = clone_mnt(mnt, dentry, flag);
  1263. if (!q)
  1264. goto Enomem;
  1265. q->mnt_mountpoint = mnt->mnt_mountpoint;
  1266. p = mnt;
  1267. list_for_each_entry(r, &mnt->mnt_mounts, mnt_child) {
  1268. if (!is_subdir(r->mnt_mountpoint, dentry))
  1269. continue;
  1270. for (s = r; s; s = next_mnt(s, r)) {
  1271. if (!(flag & CL_COPY_ALL) && IS_MNT_UNBINDABLE(s)) {
  1272. s = skip_mnt_tree(s);
  1273. continue;
  1274. }
  1275. while (p != s->mnt_parent) {
  1276. p = p->mnt_parent;
  1277. q = q->mnt_parent;
  1278. }
  1279. p = s;
  1280. path.mnt = q;
  1281. path.dentry = p->mnt_mountpoint;
  1282. q = clone_mnt(p, p->mnt_root, flag);
  1283. if (!q)
  1284. goto Enomem;
  1285. br_write_lock(vfsmount_lock);
  1286. list_add_tail(&q->mnt_list, &res->mnt_list);
  1287. attach_mnt(q, &path);
  1288. br_write_unlock(vfsmount_lock);
  1289. }
  1290. }
  1291. return res;
  1292. Enomem:
  1293. if (res) {
  1294. LIST_HEAD(umount_list);
  1295. br_write_lock(vfsmount_lock);
  1296. umount_tree(res, 0, &umount_list);
  1297. br_write_unlock(vfsmount_lock);
  1298. release_mounts(&umount_list);
  1299. }
  1300. return NULL;
  1301. }
  1302. struct vfsmount *collect_mounts(struct path *path)
  1303. {
  1304. struct vfsmount *tree;
  1305. down_write(&namespace_sem);
  1306. tree = copy_tree(path->mnt, path->dentry, CL_COPY_ALL | CL_PRIVATE);
  1307. up_write(&namespace_sem);
  1308. return tree;
  1309. }
  1310. void drop_collected_mounts(struct vfsmount *mnt)
  1311. {
  1312. LIST_HEAD(umount_list);
  1313. down_write(&namespace_sem);
  1314. br_write_lock(vfsmount_lock);
  1315. umount_tree(mnt, 0, &umount_list);
  1316. br_write_unlock(vfsmount_lock);
  1317. up_write(&namespace_sem);
  1318. release_mounts(&umount_list);
  1319. }
  1320. int iterate_mounts(int (*f)(struct vfsmount *, void *), void *arg,
  1321. struct vfsmount *root)
  1322. {
  1323. struct vfsmount *mnt;
  1324. int res = f(root, arg);
  1325. if (res)
  1326. return res;
  1327. list_for_each_entry(mnt, &root->mnt_list, mnt_list) {
  1328. res = f(mnt, arg);
  1329. if (res)
  1330. return res;
  1331. }
  1332. return 0;
  1333. }
  1334. static void cleanup_group_ids(struct vfsmount *mnt, struct vfsmount *end)
  1335. {
  1336. struct vfsmount *p;
  1337. for (p = mnt; p != end; p = next_mnt(p, mnt)) {
  1338. if (p->mnt_group_id && !IS_MNT_SHARED(p))
  1339. mnt_release_group_id(p);
  1340. }
  1341. }
  1342. static int invent_group_ids(struct vfsmount *mnt, bool recurse)
  1343. {
  1344. struct vfsmount *p;
  1345. for (p = mnt; p; p = recurse ? next_mnt(p, mnt) : NULL) {
  1346. if (!p->mnt_group_id && !IS_MNT_SHARED(p)) {
  1347. int err = mnt_alloc_group_id(p);
  1348. if (err) {
  1349. cleanup_group_ids(mnt, p);
  1350. return err;
  1351. }
  1352. }
  1353. }
  1354. return 0;
  1355. }
  1356. /*
  1357. * @source_mnt : mount tree to be attached
  1358. * @nd : place the mount tree @source_mnt is attached
  1359. * @parent_nd : if non-null, detach the source_mnt from its parent and
  1360. * store the parent mount and mountpoint dentry.
  1361. * (done when source_mnt is moved)
  1362. *
  1363. * NOTE: in the table below explains the semantics when a source mount
  1364. * of a given type is attached to a destination mount of a given type.
  1365. * ---------------------------------------------------------------------------
  1366. * | BIND MOUNT OPERATION |
  1367. * |**************************************************************************
  1368. * | source-->| shared | private | slave | unbindable |
  1369. * | dest | | | | |
  1370. * | | | | | | |
  1371. * | v | | | | |
  1372. * |**************************************************************************
  1373. * | shared | shared (++) | shared (+) | shared(+++)| invalid |
  1374. * | | | | | |
  1375. * |non-shared| shared (+) | private | slave (*) | invalid |
  1376. * ***************************************************************************
  1377. * A bind operation clones the source mount and mounts the clone on the
  1378. * destination mount.
  1379. *
  1380. * (++) the cloned mount is propagated to all the mounts in the propagation
  1381. * tree of the destination mount and the cloned mount is added to
  1382. * the peer group of the source mount.
  1383. * (+) the cloned mount is created under the destination mount and is marked
  1384. * as shared. The cloned mount is added to the peer group of the source
  1385. * mount.
  1386. * (+++) the mount is propagated to all the mounts in the propagation tree
  1387. * of the destination mount and the cloned mount is made slave
  1388. * of the same master as that of the source mount. The cloned mount
  1389. * is marked as 'shared and slave'.
  1390. * (*) the cloned mount is made a slave of the same master as that of the
  1391. * source mount.
  1392. *
  1393. * ---------------------------------------------------------------------------
  1394. * | MOVE MOUNT OPERATION |
  1395. * |**************************************************************************
  1396. * | source-->| shared | private | slave | unbindable |
  1397. * | dest | | | | |
  1398. * | | | | | | |
  1399. * | v | | | | |
  1400. * |**************************************************************************
  1401. * | shared | shared (+) | shared (+) | shared(+++) | invalid |
  1402. * | | | | | |
  1403. * |non-shared| shared (+*) | private | slave (*) | unbindable |
  1404. * ***************************************************************************
  1405. *
  1406. * (+) the mount is moved to the destination. And is then propagated to
  1407. * all the mounts in the propagation tree of the destination mount.
  1408. * (+*) the mount is moved to the destination.
  1409. * (+++) the mount is moved to the destination and is then propagated to
  1410. * all the mounts belonging to the destination mount's propagation tree.
  1411. * the mount is marked as 'shared and slave'.
  1412. * (*) the mount continues to be a slave at the new location.
  1413. *
  1414. * if the source mount is a tree, the operations explained above is
  1415. * applied to each mount in the tree.
  1416. * Must be called without spinlocks held, since this function can sleep
  1417. * in allocations.
  1418. */
  1419. static int attach_recursive_mnt(struct vfsmount *source_mnt,
  1420. struct path *path, struct path *parent_path)
  1421. {
  1422. LIST_HEAD(tree_list);
  1423. struct vfsmount *dest_mnt = path->mnt;
  1424. struct dentry *dest_dentry = path->dentry;
  1425. struct vfsmount *child, *p;
  1426. int err;
  1427. if (IS_MNT_SHARED(dest_mnt)) {
  1428. err = invent_group_ids(source_mnt, true);
  1429. if (err)
  1430. goto out;
  1431. }
  1432. err = propagate_mnt(dest_mnt, dest_dentry, source_mnt, &tree_list);
  1433. if (err)
  1434. goto out_cleanup_ids;
  1435. br_write_lock(vfsmount_lock);
  1436. if (IS_MNT_SHARED(dest_mnt)) {
  1437. for (p = source_mnt; p; p = next_mnt(p, source_mnt))
  1438. set_mnt_shared(p);
  1439. }
  1440. if (parent_path) {
  1441. detach_mnt(source_mnt, parent_path);
  1442. attach_mnt(source_mnt, path);
  1443. touch_mnt_namespace(parent_path->mnt->mnt_ns);
  1444. } else {
  1445. mnt_set_mountpoint(dest_mnt, dest_dentry, source_mnt);
  1446. commit_tree(source_mnt);
  1447. }
  1448. list_for_each_entry_safe(child, p, &tree_list, mnt_hash) {
  1449. list_del_init(&child->mnt_hash);
  1450. commit_tree(child);
  1451. }
  1452. br_write_unlock(vfsmount_lock);
  1453. return 0;
  1454. out_cleanup_ids:
  1455. if (IS_MNT_SHARED(dest_mnt))
  1456. cleanup_group_ids(source_mnt, NULL);
  1457. out:
  1458. return err;
  1459. }
  1460. static int graft_tree(struct vfsmount *mnt, struct path *path)
  1461. {
  1462. int err;
  1463. if (mnt->mnt_sb->s_flags & MS_NOUSER)
  1464. return -EINVAL;
  1465. if (S_ISDIR(path->dentry->d_inode->i_mode) !=
  1466. S_ISDIR(mnt->mnt_root->d_inode->i_mode))
  1467. return -ENOTDIR;
  1468. err = -ENOENT;
  1469. mutex_lock(&path->dentry->d_inode->i_mutex);
  1470. if (cant_mount(path->dentry))
  1471. goto out_unlock;
  1472. if (!d_unlinked(path->dentry))
  1473. err = attach_recursive_mnt(mnt, path, NULL);
  1474. out_unlock:
  1475. mutex_unlock(&path->dentry->d_inode->i_mutex);
  1476. return err;
  1477. }
  1478. /*
  1479. * Sanity check the flags to change_mnt_propagation.
  1480. */
  1481. static int flags_to_propagation_type(int flags)
  1482. {
  1483. int type = flags & ~MS_REC;
  1484. /* Fail if any non-propagation flags are set */
  1485. if (type & ~(MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
  1486. return 0;
  1487. /* Only one propagation flag should be set */
  1488. if (!is_power_of_2(type))
  1489. return 0;
  1490. return type;
  1491. }
  1492. /*
  1493. * recursively change the type of the mountpoint.
  1494. */
  1495. static int do_change_type(struct path *path, int flag)
  1496. {
  1497. struct vfsmount *m, *mnt = path->mnt;
  1498. int recurse = flag & MS_REC;
  1499. int type;
  1500. int err = 0;
  1501. if (!capable(CAP_SYS_ADMIN))
  1502. return -EPERM;
  1503. if (path->dentry != path->mnt->mnt_root)
  1504. return -EINVAL;
  1505. type = flags_to_propagation_type(flag);
  1506. if (!type)
  1507. return -EINVAL;
  1508. down_write(&namespace_sem);
  1509. if (type == MS_SHARED) {
  1510. err = invent_group_ids(mnt, recurse);
  1511. if (err)
  1512. goto out_unlock;
  1513. }
  1514. br_write_lock(vfsmount_lock);
  1515. for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL))
  1516. change_mnt_propagation(m, type);
  1517. br_write_unlock(vfsmount_lock);
  1518. out_unlock:
  1519. up_write(&namespace_sem);
  1520. return err;
  1521. }
  1522. /*
  1523. * do loopback mount.
  1524. */
  1525. static int do_loopback(struct path *path, char *old_name,
  1526. int recurse)
  1527. {
  1528. struct path old_path;
  1529. struct vfsmount *mnt = NULL;
  1530. int err = mount_is_safe(path);
  1531. if (err)
  1532. return err;
  1533. if (!old_name || !*old_name)
  1534. return -EINVAL;
  1535. err = kern_path(old_name, LOOKUP_FOLLOW, &old_path);
  1536. if (err)
  1537. return err;
  1538. down_write(&namespace_sem);
  1539. err = -EINVAL;
  1540. if (IS_MNT_UNBINDABLE(old_path.mnt))
  1541. goto out;
  1542. if (!check_mnt(path->mnt) || !check_mnt(old_path.mnt))
  1543. goto out;
  1544. err = -ENOMEM;
  1545. if (recurse)
  1546. mnt = copy_tree(old_path.mnt, old_path.dentry, 0);
  1547. else
  1548. mnt = clone_mnt(old_path.mnt, old_path.dentry, 0);
  1549. if (!mnt)
  1550. goto out;
  1551. err = graft_tree(mnt, path);
  1552. if (err) {
  1553. LIST_HEAD(umount_list);
  1554. br_write_lock(vfsmount_lock);
  1555. umount_tree(mnt, 0, &umount_list);
  1556. br_write_unlock(vfsmount_lock);
  1557. release_mounts(&umount_list);
  1558. }
  1559. out:
  1560. up_write(&namespace_sem);
  1561. path_put(&old_path);
  1562. return err;
  1563. }
  1564. static int change_mount_flags(struct vfsmount *mnt, int ms_flags)
  1565. {
  1566. int error = 0;
  1567. int readonly_request = 0;
  1568. if (ms_flags & MS_RDONLY)
  1569. readonly_request = 1;
  1570. if (readonly_request == __mnt_is_readonly(mnt))
  1571. return 0;
  1572. if (readonly_request)
  1573. error = mnt_make_readonly(mnt);
  1574. else
  1575. __mnt_unmake_readonly(mnt);
  1576. return error;
  1577. }
  1578. /*
  1579. * change filesystem flags. dir should be a physical root of filesystem.
  1580. * If you've mounted a non-root directory somewhere and want to do remount
  1581. * on it - tough luck.
  1582. */
  1583. static int do_remount(struct path *path, int flags, int mnt_flags,
  1584. void *data)
  1585. {
  1586. int err;
  1587. struct super_block *sb = path->mnt->mnt_sb;
  1588. if (!capable(CAP_SYS_ADMIN))
  1589. return -EPERM;
  1590. if (!check_mnt(path->mnt))
  1591. return -EINVAL;
  1592. if (path->dentry != path->mnt->mnt_root)
  1593. return -EINVAL;
  1594. err = security_sb_remount(sb, data);
  1595. if (err)
  1596. return err;
  1597. down_write(&sb->s_umount);
  1598. if (flags & MS_BIND)
  1599. err = change_mount_flags(path->mnt, flags);
  1600. else
  1601. err = do_remount_sb(sb, flags, data, 0);
  1602. if (!err) {
  1603. br_write_lock(vfsmount_lock);
  1604. mnt_flags |= path->mnt->mnt_flags & MNT_PROPAGATION_MASK;
  1605. path->mnt->mnt_flags = mnt_flags;
  1606. br_write_unlock(vfsmount_lock);
  1607. }
  1608. up_write(&sb->s_umount);
  1609. if (!err) {
  1610. br_write_lock(vfsmount_lock);
  1611. touch_mnt_namespace(path->mnt->mnt_ns);
  1612. br_write_unlock(vfsmount_lock);
  1613. }
  1614. return err;
  1615. }
  1616. static inline int tree_contains_unbindable(struct vfsmount *mnt)
  1617. {
  1618. struct vfsmount *p;
  1619. for (p = mnt; p; p = next_mnt(p, mnt)) {
  1620. if (IS_MNT_UNBINDABLE(p))
  1621. return 1;
  1622. }
  1623. return 0;
  1624. }
  1625. static int do_move_mount(struct path *path, char *old_name)
  1626. {
  1627. struct path old_path, parent_path;
  1628. struct vfsmount *p;
  1629. int err = 0;
  1630. if (!capable(CAP_SYS_ADMIN))
  1631. return -EPERM;
  1632. if (!old_name || !*old_name)
  1633. return -EINVAL;
  1634. err = kern_path(old_name, LOOKUP_FOLLOW, &old_path);
  1635. if (err)
  1636. return err;
  1637. down_write(&namespace_sem);
  1638. while (d_mountpoint(path->dentry) &&
  1639. follow_down(path))
  1640. ;
  1641. err = -EINVAL;
  1642. if (!check_mnt(path->mnt) || !check_mnt(old_path.mnt))
  1643. goto out;
  1644. err = -ENOENT;
  1645. mutex_lock(&path->dentry->d_inode->i_mutex);
  1646. if (cant_mount(path->dentry))
  1647. goto out1;
  1648. if (d_unlinked(path->dentry))
  1649. goto out1;
  1650. err = -EINVAL;
  1651. if (old_path.dentry != old_path.mnt->mnt_root)
  1652. goto out1;
  1653. if (old_path.mnt == old_path.mnt->mnt_parent)
  1654. goto out1;
  1655. if (S_ISDIR(path->dentry->d_inode->i_mode) !=
  1656. S_ISDIR(old_path.dentry->d_inode->i_mode))
  1657. goto out1;
  1658. /*
  1659. * Don't move a mount residing in a shared parent.
  1660. */
  1661. if (old_path.mnt->mnt_parent &&
  1662. IS_MNT_SHARED(old_path.mnt->mnt_parent))
  1663. goto out1;
  1664. /*
  1665. * Don't move a mount tree containing unbindable mounts to a destination
  1666. * mount which is shared.
  1667. */
  1668. if (IS_MNT_SHARED(path->mnt) &&
  1669. tree_contains_unbindable(old_path.mnt))
  1670. goto out1;
  1671. err = -ELOOP;
  1672. for (p = path->mnt; p->mnt_parent != p; p = p->mnt_parent)
  1673. if (p == old_path.mnt)
  1674. goto out1;
  1675. err = attach_recursive_mnt(old_path.mnt, path, &parent_path);
  1676. if (err)
  1677. goto out1;
  1678. /* if the mount is moved, it should no longer be expire
  1679. * automatically */
  1680. list_del_init(&old_path.mnt->mnt_expire);
  1681. out1:
  1682. mutex_unlock(&path->dentry->d_inode->i_mutex);
  1683. out:
  1684. up_write(&namespace_sem);
  1685. if (!err)
  1686. path_put(&parent_path);
  1687. path_put(&old_path);
  1688. return err;
  1689. }
  1690. /*
  1691. * create a new mount for userspace and request it to be added into the
  1692. * namespace's tree
  1693. */
  1694. static int do_new_mount(struct path *path, char *type, int flags,
  1695. int mnt_flags, char *name, void *data)
  1696. {
  1697. struct vfsmount *mnt;
  1698. if (!type)
  1699. return -EINVAL;
  1700. /* we need capabilities... */
  1701. if (!capable(CAP_SYS_ADMIN))
  1702. return -EPERM;
  1703. mnt = do_kern_mount(type, flags, name, data);
  1704. if (IS_ERR(mnt))
  1705. return PTR_ERR(mnt);
  1706. return do_add_mount(mnt, path, mnt_flags, NULL);
  1707. }
  1708. /*
  1709. * add a mount into a namespace's mount tree
  1710. * - provide the option of adding the new mount to an expiration list
  1711. */
  1712. int do_add_mount(struct vfsmount *newmnt, struct path *path,
  1713. int mnt_flags, struct list_head *fslist)
  1714. {
  1715. int err;
  1716. mnt_flags &= ~(MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL);
  1717. down_write(&namespace_sem);
  1718. /* Something was mounted here while we slept */
  1719. while (d_mountpoint(path->dentry) &&
  1720. follow_down(path))
  1721. ;
  1722. err = -EINVAL;
  1723. if (!(mnt_flags & MNT_SHRINKABLE) && !check_mnt(path->mnt))
  1724. goto unlock;
  1725. /* Refuse the same filesystem on the same mount point */
  1726. err = -EBUSY;
  1727. if (path->mnt->mnt_sb == newmnt->mnt_sb &&
  1728. path->mnt->mnt_root == path->dentry)
  1729. goto unlock;
  1730. err = -EINVAL;
  1731. if (S_ISLNK(newmnt->mnt_root->d_inode->i_mode))
  1732. goto unlock;
  1733. newmnt->mnt_flags = mnt_flags;
  1734. if ((err = graft_tree(newmnt, path)))
  1735. goto unlock;
  1736. if (fslist) /* add to the specified expiration list */
  1737. list_add_tail(&newmnt->mnt_expire, fslist);
  1738. up_write(&namespace_sem);
  1739. return 0;
  1740. unlock:
  1741. up_write(&namespace_sem);
  1742. mntput_long(newmnt);
  1743. return err;
  1744. }
  1745. EXPORT_SYMBOL_GPL(do_add_mount);
  1746. /*
  1747. * process a list of expirable mountpoints with the intent of discarding any
  1748. * mountpoints that aren't in use and haven't been touched since last we came
  1749. * here
  1750. */
  1751. void mark_mounts_for_expiry(struct list_head *mounts)
  1752. {
  1753. struct vfsmount *mnt, *next;
  1754. LIST_HEAD(graveyard);
  1755. LIST_HEAD(umounts);
  1756. if (list_empty(mounts))
  1757. return;
  1758. down_write(&namespace_sem);
  1759. br_write_lock(vfsmount_lock);
  1760. /* extract from the expiration list every vfsmount that matches the
  1761. * following criteria:
  1762. * - only referenced by its parent vfsmount
  1763. * - still marked for expiry (marked on the last call here; marks are
  1764. * cleared by mntput())
  1765. */
  1766. list_for_each_entry_safe(mnt, next, mounts, mnt_expire) {
  1767. if (!xchg(&mnt->mnt_expiry_mark, 1) ||
  1768. propagate_mount_busy(mnt, 1))
  1769. continue;
  1770. list_move(&mnt->mnt_expire, &graveyard);
  1771. }
  1772. while (!list_empty(&graveyard)) {
  1773. mnt = list_first_entry(&graveyard, struct vfsmount, mnt_expire);
  1774. touch_mnt_namespace(mnt->mnt_ns);
  1775. umount_tree(mnt, 1, &umounts);
  1776. }
  1777. br_write_unlock(vfsmount_lock);
  1778. up_write(&namespace_sem);
  1779. release_mounts(&umounts);
  1780. }
  1781. EXPORT_SYMBOL_GPL(mark_mounts_for_expiry);
  1782. /*
  1783. * Ripoff of 'select_parent()'
  1784. *
  1785. * search the list of submounts for a given mountpoint, and move any
  1786. * shrinkable submounts to the 'graveyard' list.
  1787. */
  1788. static int select_submounts(struct vfsmount *parent, struct list_head *graveyard)
  1789. {
  1790. struct vfsmount *this_parent = parent;
  1791. struct list_head *next;
  1792. int found = 0;
  1793. repeat:
  1794. next = this_parent->mnt_mounts.next;
  1795. resume:
  1796. while (next != &this_parent->mnt_mounts) {
  1797. struct list_head *tmp = next;
  1798. struct vfsmount *mnt = list_entry(tmp, struct vfsmount, mnt_child);
  1799. next = tmp->next;
  1800. if (!(mnt->mnt_flags & MNT_SHRINKABLE))
  1801. continue;
  1802. /*
  1803. * Descend a level if the d_mounts list is non-empty.
  1804. */
  1805. if (!list_empty(&mnt->mnt_mounts)) {
  1806. this_parent = mnt;
  1807. goto repeat;
  1808. }
  1809. if (!propagate_mount_busy(mnt, 1)) {
  1810. list_move_tail(&mnt->mnt_expire, graveyard);
  1811. found++;
  1812. }
  1813. }
  1814. /*
  1815. * All done at this level ... ascend and resume the search
  1816. */
  1817. if (this_parent != parent) {
  1818. next = this_parent->mnt_child.next;
  1819. this_parent = this_parent->mnt_parent;
  1820. goto resume;
  1821. }
  1822. return found;
  1823. }
  1824. /*
  1825. * process a list of expirable mountpoints with the intent of discarding any
  1826. * submounts of a specific parent mountpoint
  1827. *
  1828. * vfsmount_lock must be held for write
  1829. */
  1830. static void shrink_submounts(struct vfsmount *mnt, struct list_head *umounts)
  1831. {
  1832. LIST_HEAD(graveyard);
  1833. struct vfsmount *m;
  1834. /* extract submounts of 'mountpoint' from the expiration list */
  1835. while (select_submounts(mnt, &graveyard)) {
  1836. while (!list_empty(&graveyard)) {
  1837. m = list_first_entry(&graveyard, struct vfsmount,
  1838. mnt_expire);
  1839. touch_mnt_namespace(m->mnt_ns);
  1840. umount_tree(m, 1, umounts);
  1841. }
  1842. }
  1843. }
  1844. /*
  1845. * Some copy_from_user() implementations do not return the exact number of
  1846. * bytes remaining to copy on a fault. But copy_mount_options() requires that.
  1847. * Note that this function differs from copy_from_user() in that it will oops
  1848. * on bad values of `to', rather than returning a short copy.
  1849. */
  1850. static long exact_copy_from_user(void *to, const void __user * from,
  1851. unsigned long n)
  1852. {
  1853. char *t = to;
  1854. const char __user *f = from;
  1855. char c;
  1856. if (!access_ok(VERIFY_READ, from, n))
  1857. return n;
  1858. while (n) {
  1859. if (__get_user(c, f)) {
  1860. memset(t, 0, n);
  1861. break;
  1862. }
  1863. *t++ = c;
  1864. f++;
  1865. n--;
  1866. }
  1867. return n;
  1868. }
  1869. int copy_mount_options(const void __user * data, unsigned long *where)
  1870. {
  1871. int i;
  1872. unsigned long page;
  1873. unsigned long size;
  1874. *where = 0;
  1875. if (!data)
  1876. return 0;
  1877. if (!(page = __get_free_page(GFP_KERNEL)))
  1878. return -ENOMEM;
  1879. /* We only care that *some* data at the address the user
  1880. * gave us is valid. Just in case, we'll zero
  1881. * the remainder of the page.
  1882. */
  1883. /* copy_from_user cannot cross TASK_SIZE ! */
  1884. size = TASK_SIZE - (unsigned long)data;
  1885. if (size > PAGE_SIZE)
  1886. size = PAGE_SIZE;
  1887. i = size - exact_copy_from_user((void *)page, data, size);
  1888. if (!i) {
  1889. free_page(page);
  1890. return -EFAULT;
  1891. }
  1892. if (i != PAGE_SIZE)
  1893. memset((char *)page + i, 0, PAGE_SIZE - i);
  1894. *where = page;
  1895. return 0;
  1896. }
  1897. int copy_mount_string(const void __user *data, char **where)
  1898. {
  1899. char *tmp;
  1900. if (!data) {
  1901. *where = NULL;
  1902. return 0;
  1903. }
  1904. tmp = strndup_user(data, PAGE_SIZE);
  1905. if (IS_ERR(tmp))
  1906. return PTR_ERR(tmp);
  1907. *where = tmp;
  1908. return 0;
  1909. }
  1910. /*
  1911. * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to
  1912. * be given to the mount() call (ie: read-only, no-dev, no-suid etc).
  1913. *
  1914. * data is a (void *) that can point to any structure up to
  1915. * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent
  1916. * information (or be NULL).
  1917. *
  1918. * Pre-0.97 versions of mount() didn't have a flags word.
  1919. * When the flags word was introduced its top half was required
  1920. * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9.
  1921. * Therefore, if this magic number is present, it carries no information
  1922. * and must be discarded.
  1923. */
  1924. long do_mount(char *dev_name, char *dir_name, char *type_page,
  1925. unsigned long flags, void *data_page)
  1926. {
  1927. struct path path;
  1928. int retval = 0;
  1929. int mnt_flags = 0;
  1930. /* Discard magic */
  1931. if ((flags & MS_MGC_MSK) == MS_MGC_VAL)
  1932. flags &= ~MS_MGC_MSK;
  1933. /* Basic sanity checks */
  1934. if (!dir_name || !*dir_name || !memchr(dir_name, 0, PAGE_SIZE))
  1935. return -EINVAL;
  1936. if (data_page)
  1937. ((char *)data_page)[PAGE_SIZE - 1] = 0;
  1938. /* ... and get the mountpoint */
  1939. retval = kern_path(dir_name, LOOKUP_FOLLOW, &path);
  1940. if (retval)
  1941. return retval;
  1942. retval = security_sb_mount(dev_name, &path,
  1943. type_page, flags, data_page);
  1944. if (retval)
  1945. goto dput_out;
  1946. /* Default to relatime unless overriden */
  1947. if (!(flags & MS_NOATIME))
  1948. mnt_flags |= MNT_RELATIME;
  1949. /* Separate the per-mountpoint flags */
  1950. if (flags & MS_NOSUID)
  1951. mnt_flags |= MNT_NOSUID;
  1952. if (flags & MS_NODEV)
  1953. mnt_flags |= MNT_NODEV;
  1954. if (flags & MS_NOEXEC)
  1955. mnt_flags |= MNT_NOEXEC;
  1956. if (flags & MS_NOATIME)
  1957. mnt_flags |= MNT_NOATIME;
  1958. if (flags & MS_NODIRATIME)
  1959. mnt_flags |= MNT_NODIRATIME;
  1960. if (flags & MS_STRICTATIME)
  1961. mnt_flags &= ~(MNT_RELATIME | MNT_NOATIME);
  1962. if (flags & MS_RDONLY)
  1963. mnt_flags |= MNT_READONLY;
  1964. flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE | MS_BORN |
  1965. MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
  1966. MS_STRICTATIME);
  1967. if (flags & MS_REMOUNT)
  1968. retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
  1969. data_page);
  1970. else if (flags & MS_BIND)
  1971. retval = do_loopback(&path, dev_name, flags & MS_REC);
  1972. else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
  1973. retval = do_change_type(&path, flags);
  1974. else if (flags & MS_MOVE)
  1975. retval = do_move_mount(&path, dev_name);
  1976. else
  1977. retval = do_new_mount(&path, type_page, flags, mnt_flags,
  1978. dev_name, data_page);
  1979. dput_out:
  1980. path_put(&path);
  1981. return retval;
  1982. }
  1983. static struct mnt_namespace *alloc_mnt_ns(void)
  1984. {
  1985. struct mnt_namespace *new_ns;
  1986. new_ns = kmalloc(sizeof(struct mnt_namespace), GFP_KERNEL);
  1987. if (!new_ns)
  1988. return ERR_PTR(-ENOMEM);
  1989. atomic_set(&new_ns->count, 1);
  1990. new_ns->root = NULL;
  1991. INIT_LIST_HEAD(&new_ns->list);
  1992. init_waitqueue_head(&new_ns->poll);
  1993. new_ns->event = 0;
  1994. return new_ns;
  1995. }
  1996. /*
  1997. * Allocate a new namespace structure and populate it with contents
  1998. * copied from the namespace of the passed in task structure.
  1999. */
  2000. static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
  2001. struct fs_struct *fs)
  2002. {
  2003. struct mnt_namespace *new_ns;
  2004. struct vfsmount *rootmnt = NULL, *pwdmnt = NULL;
  2005. struct vfsmount *p, *q;
  2006. new_ns = alloc_mnt_ns();
  2007. if (IS_ERR(new_ns))
  2008. return new_ns;
  2009. down_write(&namespace_sem);
  2010. /* First pass: copy the tree topology */
  2011. new_ns->root = copy_tree(mnt_ns->root, mnt_ns->root->mnt_root,
  2012. CL_COPY_ALL | CL_EXPIRE);
  2013. if (!new_ns->root) {
  2014. up_write(&namespace_sem);
  2015. kfree(new_ns);
  2016. return ERR_PTR(-ENOMEM);
  2017. }
  2018. br_write_lock(vfsmount_lock);
  2019. list_add_tail(&new_ns->list, &new_ns->root->mnt_list);
  2020. br_write_unlock(vfsmount_lock);
  2021. /*
  2022. * Second pass: switch the tsk->fs->* elements and mark new vfsmounts
  2023. * as belonging to new namespace. We have already acquired a private
  2024. * fs_struct, so tsk->fs->lock is not needed.
  2025. */
  2026. p = mnt_ns->root;
  2027. q = new_ns->root;
  2028. while (p) {
  2029. q->mnt_ns = new_ns;
  2030. if (fs) {
  2031. if (p == fs->root.mnt) {
  2032. rootmnt = p;
  2033. fs->root.mnt = mntget_long(q);
  2034. }
  2035. if (p == fs->pwd.mnt) {
  2036. pwdmnt = p;
  2037. fs->pwd.mnt = mntget_long(q);
  2038. }
  2039. }
  2040. p = next_mnt(p, mnt_ns->root);
  2041. q = next_mnt(q, new_ns->root);
  2042. }
  2043. up_write(&namespace_sem);
  2044. if (rootmnt)
  2045. mntput_long(rootmnt);
  2046. if (pwdmnt)
  2047. mntput_long(pwdmnt);
  2048. return new_ns;
  2049. }
  2050. struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
  2051. struct fs_struct *new_fs)
  2052. {
  2053. struct mnt_namespace *new_ns;
  2054. BUG_ON(!ns);
  2055. get_mnt_ns(ns);
  2056. if (!(flags & CLONE_NEWNS))
  2057. return ns;
  2058. new_ns = dup_mnt_ns(ns, new_fs);
  2059. put_mnt_ns(ns);
  2060. return new_ns;
  2061. }
  2062. /**
  2063. * create_mnt_ns - creates a private namespace and adds a root filesystem
  2064. * @mnt: pointer to the new root filesystem mountpoint
  2065. */
  2066. struct mnt_namespace *create_mnt_ns(struct vfsmount *mnt)
  2067. {
  2068. struct mnt_namespace *new_ns;
  2069. new_ns = alloc_mnt_ns();
  2070. if (!IS_ERR(new_ns)) {
  2071. mnt->mnt_ns = new_ns;
  2072. new_ns->root = mnt;
  2073. list_add(&new_ns->list, &new_ns->root->mnt_list);
  2074. }
  2075. return new_ns;
  2076. }
  2077. EXPORT_SYMBOL(create_mnt_ns);
  2078. SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
  2079. char __user *, type, unsigned long, flags, void __user *, data)
  2080. {
  2081. int ret;
  2082. char *kernel_type;
  2083. char *kernel_dir;
  2084. char *kernel_dev;
  2085. unsigned long data_page;
  2086. ret = copy_mount_string(type, &kernel_type);
  2087. if (ret < 0)
  2088. goto out_type;
  2089. kernel_dir = getname(dir_name);
  2090. if (IS_ERR(kernel_dir)) {
  2091. ret = PTR_ERR(kernel_dir);
  2092. goto out_dir;
  2093. }
  2094. ret = copy_mount_string(dev_name, &kernel_dev);
  2095. if (ret < 0)
  2096. goto out_dev;
  2097. ret = copy_mount_options(data, &data_page);
  2098. if (ret < 0)
  2099. goto out_data;
  2100. ret = do_mount(kernel_dev, kernel_dir, kernel_type, flags,
  2101. (void *) data_page);
  2102. free_page(data_page);
  2103. out_data:
  2104. kfree(kernel_dev);
  2105. out_dev:
  2106. putname(kernel_dir);
  2107. out_dir:
  2108. kfree(kernel_type);
  2109. out_type:
  2110. return ret;
  2111. }
  2112. /*
  2113. * pivot_root Semantics:
  2114. * Moves the root file system of the current process to the directory put_old,
  2115. * makes new_root as the new root file system of the current process, and sets
  2116. * root/cwd of all processes which had them on the current root to new_root.
  2117. *
  2118. * Restrictions:
  2119. * The new_root and put_old must be directories, and must not be on the
  2120. * same file system as the current process root. The put_old must be
  2121. * underneath new_root, i.e. adding a non-zero number of /.. to the string
  2122. * pointed to by put_old must yield the same directory as new_root. No other
  2123. * file system may be mounted on put_old. After all, new_root is a mountpoint.
  2124. *
  2125. * Also, the current root cannot be on the 'rootfs' (initial ramfs) filesystem.
  2126. * See Documentation/filesystems/ramfs-rootfs-initramfs.txt for alternatives
  2127. * in this situation.
  2128. *
  2129. * Notes:
  2130. * - we don't move root/cwd if they are not at the root (reason: if something
  2131. * cared enough to change them, it's probably wrong to force them elsewhere)
  2132. * - it's okay to pick a root that isn't the root of a file system, e.g.
  2133. * /nfs/my_root where /nfs is the mount point. It must be a mountpoint,
  2134. * though, so you may need to say mount --bind /nfs/my_root /nfs/my_root
  2135. * first.
  2136. */
  2137. SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
  2138. const char __user *, put_old)
  2139. {
  2140. struct vfsmount *tmp;
  2141. struct path new, old, parent_path, root_parent, root;
  2142. int error;
  2143. if (!capable(CAP_SYS_ADMIN))
  2144. return -EPERM;
  2145. error = user_path_dir(new_root, &new);
  2146. if (error)
  2147. goto out0;
  2148. error = -EINVAL;
  2149. if (!check_mnt(new.mnt))
  2150. goto out1;
  2151. error = user_path_dir(put_old, &old);
  2152. if (error)
  2153. goto out1;
  2154. error = security_sb_pivotroot(&old, &new);
  2155. if (error) {
  2156. path_put(&old);
  2157. goto out1;
  2158. }
  2159. get_fs_root(current->fs, &root);
  2160. down_write(&namespace_sem);
  2161. mutex_lock(&old.dentry->d_inode->i_mutex);
  2162. error = -EINVAL;
  2163. if (IS_MNT_SHARED(old.mnt) ||
  2164. IS_MNT_SHARED(new.mnt->mnt_parent) ||
  2165. IS_MNT_SHARED(root.mnt->mnt_parent))
  2166. goto out2;
  2167. if (!check_mnt(root.mnt))
  2168. goto out2;
  2169. error = -ENOENT;
  2170. if (cant_mount(old.dentry))
  2171. goto out2;
  2172. if (d_unlinked(new.dentry))
  2173. goto out2;
  2174. if (d_unlinked(old.dentry))
  2175. goto out2;
  2176. error = -EBUSY;
  2177. if (new.mnt == root.mnt ||
  2178. old.mnt == root.mnt)
  2179. goto out2; /* loop, on the same file system */
  2180. error = -EINVAL;
  2181. if (root.mnt->mnt_root != root.dentry)
  2182. goto out2; /* not a mountpoint */
  2183. if (root.mnt->mnt_parent == root.mnt)
  2184. goto out2; /* not attached */
  2185. if (new.mnt->mnt_root != new.dentry)
  2186. goto out2; /* not a mountpoint */
  2187. if (new.mnt->mnt_parent == new.mnt)
  2188. goto out2; /* not attached */
  2189. /* make sure we can reach put_old from new_root */
  2190. tmp = old.mnt;
  2191. br_write_lock(vfsmount_lock);
  2192. if (tmp != new.mnt) {
  2193. for (;;) {
  2194. if (tmp->mnt_parent == tmp)
  2195. goto out3; /* already mounted on put_old */
  2196. if (tmp->mnt_parent == new.mnt)
  2197. break;
  2198. tmp = tmp->mnt_parent;
  2199. }
  2200. if (!is_subdir(tmp->mnt_mountpoint, new.dentry))
  2201. goto out3;
  2202. } else if (!is_subdir(old.dentry, new.dentry))
  2203. goto out3;
  2204. detach_mnt(new.mnt, &parent_path);
  2205. detach_mnt(root.mnt, &root_parent);
  2206. /* mount old root on put_old */
  2207. attach_mnt(root.mnt, &old);
  2208. /* mount new_root on / */
  2209. attach_mnt(new.mnt, &root_parent);
  2210. touch_mnt_namespace(current->nsproxy->mnt_ns);
  2211. br_write_unlock(vfsmount_lock);
  2212. chroot_fs_refs(&root, &new);
  2213. error = 0;
  2214. path_put(&root_parent);
  2215. path_put(&parent_path);
  2216. out2:
  2217. mutex_unlock(&old.dentry->d_inode->i_mutex);
  2218. up_write(&namespace_sem);
  2219. path_put(&root);
  2220. path_put(&old);
  2221. out1:
  2222. path_put(&new);
  2223. out0:
  2224. return error;
  2225. out3:
  2226. br_write_unlock(vfsmount_lock);
  2227. goto out2;
  2228. }
  2229. static void __init init_mount_tree(void)
  2230. {
  2231. struct vfsmount *mnt;
  2232. struct mnt_namespace *ns;
  2233. struct path root;
  2234. mnt = do_kern_mount("rootfs", 0, "rootfs", NULL);
  2235. if (IS_ERR(mnt))
  2236. panic("Can't create rootfs");
  2237. ns = create_mnt_ns(mnt);
  2238. if (IS_ERR(ns))
  2239. panic("Can't allocate initial namespace");
  2240. init_task.nsproxy->mnt_ns = ns;
  2241. get_mnt_ns(ns);
  2242. root.mnt = ns->root;
  2243. root.dentry = ns->root->mnt_root;
  2244. set_fs_pwd(current->fs, &root);
  2245. set_fs_root(current->fs, &root);
  2246. }
  2247. void __init mnt_init(void)
  2248. {
  2249. unsigned u;
  2250. int err;
  2251. init_rwsem(&namespace_sem);
  2252. mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct vfsmount),
  2253. 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
  2254. mount_hashtable = (struct list_head *)__get_free_page(GFP_ATOMIC);
  2255. if (!mount_hashtable)
  2256. panic("Failed to allocate mount hash table\n");
  2257. printk("Mount-cache hash table entries: %lu\n", HASH_SIZE);
  2258. for (u = 0; u < HASH_SIZE; u++)
  2259. INIT_LIST_HEAD(&mount_hashtable[u]);
  2260. br_lock_init(vfsmount_lock);
  2261. err = sysfs_init();
  2262. if (err)
  2263. printk(KERN_WARNING "%s: sysfs_init error: %d\n",
  2264. __func__, err);
  2265. fs_kobj = kobject_create_and_add("fs", NULL);
  2266. if (!fs_kobj)
  2267. printk(KERN_WARNING "%s: kobj create error\n", __func__);
  2268. init_rootfs();
  2269. init_mount_tree();
  2270. }
  2271. void put_mnt_ns(struct mnt_namespace *ns)
  2272. {
  2273. LIST_HEAD(umount_list);
  2274. if (!atomic_dec_and_test(&ns->count))
  2275. return;
  2276. down_write(&namespace_sem);
  2277. br_write_lock(vfsmount_lock);
  2278. umount_tree(ns->root, 0, &umount_list);
  2279. br_write_unlock(vfsmount_lock);
  2280. up_write(&namespace_sem);
  2281. release_mounts(&umount_list);
  2282. kfree(ns);
  2283. }
  2284. EXPORT_SYMBOL(put_mnt_ns);