fs_struct.c 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141
  1. #include <linux/module.h>
  2. #include <linux/sched.h>
  3. #include <linux/fs.h>
  4. #include <linux/path.h>
  5. #include <linux/slab.h>
  6. /*
  7. * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
  8. * It can block.
  9. */
  10. void set_fs_root(struct fs_struct *fs, struct path *path)
  11. {
  12. struct path old_root;
  13. write_lock(&fs->lock);
  14. old_root = fs->root;
  15. fs->root = *path;
  16. path_get(path);
  17. write_unlock(&fs->lock);
  18. if (old_root.dentry)
  19. path_put(&old_root);
  20. }
  21. /*
  22. * Replace the fs->{pwdmnt,pwd} with {mnt,dentry}. Put the old values.
  23. * It can block.
  24. */
  25. void set_fs_pwd(struct fs_struct *fs, struct path *path)
  26. {
  27. struct path old_pwd;
  28. write_lock(&fs->lock);
  29. old_pwd = fs->pwd;
  30. fs->pwd = *path;
  31. path_get(path);
  32. write_unlock(&fs->lock);
  33. if (old_pwd.dentry)
  34. path_put(&old_pwd);
  35. }
  36. void chroot_fs_refs(struct path *old_root, struct path *new_root)
  37. {
  38. struct task_struct *g, *p;
  39. struct fs_struct *fs;
  40. int count = 0;
  41. read_lock(&tasklist_lock);
  42. do_each_thread(g, p) {
  43. task_lock(p);
  44. fs = p->fs;
  45. if (fs) {
  46. write_lock(&fs->lock);
  47. if (fs->root.dentry == old_root->dentry
  48. && fs->root.mnt == old_root->mnt) {
  49. path_get(new_root);
  50. fs->root = *new_root;
  51. count++;
  52. }
  53. if (fs->pwd.dentry == old_root->dentry
  54. && fs->pwd.mnt == old_root->mnt) {
  55. path_get(new_root);
  56. fs->pwd = *new_root;
  57. count++;
  58. }
  59. write_unlock(&fs->lock);
  60. }
  61. task_unlock(p);
  62. } while_each_thread(g, p);
  63. read_unlock(&tasklist_lock);
  64. while (count--)
  65. path_put(old_root);
  66. }
  67. void put_fs_struct(struct fs_struct *fs)
  68. {
  69. /* No need to hold fs->lock if we are killing it */
  70. if (atomic_dec_and_test(&fs->count)) {
  71. path_put(&fs->root);
  72. path_put(&fs->pwd);
  73. kmem_cache_free(fs_cachep, fs);
  74. }
  75. }
  76. void exit_fs(struct task_struct *tsk)
  77. {
  78. struct fs_struct * fs = tsk->fs;
  79. if (fs) {
  80. task_lock(tsk);
  81. tsk->fs = NULL;
  82. task_unlock(tsk);
  83. put_fs_struct(fs);
  84. }
  85. }
  86. struct fs_struct *copy_fs_struct(struct fs_struct *old)
  87. {
  88. struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
  89. /* We don't need to lock fs - think why ;-) */
  90. if (fs) {
  91. atomic_set(&fs->count, 1);
  92. rwlock_init(&fs->lock);
  93. fs->umask = old->umask;
  94. read_lock(&old->lock);
  95. fs->root = old->root;
  96. path_get(&old->root);
  97. fs->pwd = old->pwd;
  98. path_get(&old->pwd);
  99. read_unlock(&old->lock);
  100. }
  101. return fs;
  102. }
  103. int unshare_fs_struct(void)
  104. {
  105. struct fs_struct *fsp = copy_fs_struct(current->fs);
  106. if (!fsp)
  107. return -ENOMEM;
  108. exit_fs(current);
  109. current->fs = fsp;
  110. return 0;
  111. }
  112. EXPORT_SYMBOL_GPL(unshare_fs_struct);
  113. /* to be mentioned only in INIT_TASK */
  114. struct fs_struct init_fs = {
  115. .count = ATOMIC_INIT(1),
  116. .lock = __RW_LOCK_UNLOCKED(init_fs.lock),
  117. .umask = 0022,
  118. };
  119. void daemonize_fs_struct(void)
  120. {
  121. struct fs_struct *fs;
  122. exit_fs(current); /* current->fs->count--; */
  123. fs = &init_fs;
  124. current->fs = fs;
  125. atomic_inc(&fs->count);
  126. }