fs_struct.c 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188
  1. #include <linux/module.h>
  2. #include <linux/sched.h>
  3. #include <linux/fs.h>
  4. #include <linux/path.h>
  5. #include <linux/slab.h>
  6. #include <linux/fs_struct.h>
  7. /*
  8. * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
  9. * It can block.
  10. */
  11. void set_fs_root(struct fs_struct *fs, struct path *path)
  12. {
  13. struct path old_root;
  14. spin_lock(&fs->lock);
  15. write_seqcount_begin(&fs->seq);
  16. old_root = fs->root;
  17. fs->root = *path;
  18. path_get_long(path);
  19. write_seqcount_end(&fs->seq);
  20. spin_unlock(&fs->lock);
  21. if (old_root.dentry)
  22. path_put_long(&old_root);
  23. }
  24. /*
  25. * Replace the fs->{pwdmnt,pwd} with {mnt,dentry}. Put the old values.
  26. * It can block.
  27. */
  28. void set_fs_pwd(struct fs_struct *fs, struct path *path)
  29. {
  30. struct path old_pwd;
  31. spin_lock(&fs->lock);
  32. write_seqcount_begin(&fs->seq);
  33. old_pwd = fs->pwd;
  34. fs->pwd = *path;
  35. path_get_long(path);
  36. write_seqcount_end(&fs->seq);
  37. spin_unlock(&fs->lock);
  38. if (old_pwd.dentry)
  39. path_put_long(&old_pwd);
  40. }
  41. void chroot_fs_refs(struct path *old_root, struct path *new_root)
  42. {
  43. struct task_struct *g, *p;
  44. struct fs_struct *fs;
  45. int count = 0;
  46. read_lock(&tasklist_lock);
  47. do_each_thread(g, p) {
  48. task_lock(p);
  49. fs = p->fs;
  50. if (fs) {
  51. spin_lock(&fs->lock);
  52. write_seqcount_begin(&fs->seq);
  53. if (fs->root.dentry == old_root->dentry
  54. && fs->root.mnt == old_root->mnt) {
  55. path_get_long(new_root);
  56. fs->root = *new_root;
  57. count++;
  58. }
  59. if (fs->pwd.dentry == old_root->dentry
  60. && fs->pwd.mnt == old_root->mnt) {
  61. path_get_long(new_root);
  62. fs->pwd = *new_root;
  63. count++;
  64. }
  65. write_seqcount_end(&fs->seq);
  66. spin_unlock(&fs->lock);
  67. }
  68. task_unlock(p);
  69. } while_each_thread(g, p);
  70. read_unlock(&tasklist_lock);
  71. while (count--)
  72. path_put_long(old_root);
  73. }
  74. void free_fs_struct(struct fs_struct *fs)
  75. {
  76. path_put_long(&fs->root);
  77. path_put_long(&fs->pwd);
  78. kmem_cache_free(fs_cachep, fs);
  79. }
  80. void exit_fs(struct task_struct *tsk)
  81. {
  82. struct fs_struct *fs = tsk->fs;
  83. if (fs) {
  84. int kill;
  85. task_lock(tsk);
  86. spin_lock(&fs->lock);
  87. write_seqcount_begin(&fs->seq);
  88. tsk->fs = NULL;
  89. kill = !--fs->users;
  90. write_seqcount_end(&fs->seq);
  91. spin_unlock(&fs->lock);
  92. task_unlock(tsk);
  93. if (kill)
  94. free_fs_struct(fs);
  95. }
  96. }
  97. struct fs_struct *copy_fs_struct(struct fs_struct *old)
  98. {
  99. struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
  100. /* We don't need to lock fs - think why ;-) */
  101. if (fs) {
  102. fs->users = 1;
  103. fs->in_exec = 0;
  104. spin_lock_init(&fs->lock);
  105. seqcount_init(&fs->seq);
  106. fs->umask = old->umask;
  107. spin_lock(&old->lock);
  108. fs->root = old->root;
  109. path_get_long(&fs->root);
  110. fs->pwd = old->pwd;
  111. path_get_long(&fs->pwd);
  112. spin_unlock(&old->lock);
  113. }
  114. return fs;
  115. }
  116. int unshare_fs_struct(void)
  117. {
  118. struct fs_struct *fs = current->fs;
  119. struct fs_struct *new_fs = copy_fs_struct(fs);
  120. int kill;
  121. if (!new_fs)
  122. return -ENOMEM;
  123. task_lock(current);
  124. spin_lock(&fs->lock);
  125. kill = !--fs->users;
  126. current->fs = new_fs;
  127. spin_unlock(&fs->lock);
  128. task_unlock(current);
  129. if (kill)
  130. free_fs_struct(fs);
  131. return 0;
  132. }
  133. EXPORT_SYMBOL_GPL(unshare_fs_struct);
  134. int current_umask(void)
  135. {
  136. return current->fs->umask;
  137. }
  138. EXPORT_SYMBOL(current_umask);
  139. /* to be mentioned only in INIT_TASK */
  140. struct fs_struct init_fs = {
  141. .users = 1,
  142. .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
  143. .seq = SEQCNT_ZERO,
  144. .umask = 0022,
  145. };
  146. void daemonize_fs_struct(void)
  147. {
  148. struct fs_struct *fs = current->fs;
  149. if (fs) {
  150. int kill;
  151. task_lock(current);
  152. spin_lock(&init_fs.lock);
  153. init_fs.users++;
  154. spin_unlock(&init_fs.lock);
  155. spin_lock(&fs->lock);
  156. current->fs = &init_fs;
  157. kill = !--fs->users;
  158. spin_unlock(&fs->lock);
  159. task_unlock(current);
  160. if (kill)
  161. free_fs_struct(fs);
  162. }
  163. }