inode.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438
  1. /*
  2. * linux/fs/proc/inode.c
  3. *
  4. * Copyright (C) 1991, 1992 Linus Torvalds
  5. */
  6. #include <linux/time.h>
  7. #include <linux/proc_fs.h>
  8. #include <linux/kernel.h>
  9. #include <linux/pid_namespace.h>
  10. #include <linux/mm.h>
  11. #include <linux/string.h>
  12. #include <linux/stat.h>
  13. #include <linux/completion.h>
  14. #include <linux/poll.h>
  15. #include <linux/printk.h>
  16. #include <linux/file.h>
  17. #include <linux/limits.h>
  18. #include <linux/init.h>
  19. #include <linux/module.h>
  20. #include <linux/sysctl.h>
  21. #include <linux/seq_file.h>
  22. #include <linux/slab.h>
  23. #include <linux/mount.h>
  24. #include <asm/uaccess.h>
  25. #include "internal.h"
  26. static void proc_evict_inode(struct inode *inode)
  27. {
  28. struct proc_dir_entry *de;
  29. struct ctl_table_header *head;
  30. const struct proc_ns_operations *ns_ops;
  31. void *ns;
  32. truncate_inode_pages(&inode->i_data, 0);
  33. clear_inode(inode);
  34. /* Stop tracking associated processes */
  35. put_pid(PROC_I(inode)->pid);
  36. /* Let go of any associated proc directory entry */
  37. de = PROC_I(inode)->pde;
  38. if (de)
  39. pde_put(de);
  40. head = PROC_I(inode)->sysctl;
  41. if (head) {
  42. rcu_assign_pointer(PROC_I(inode)->sysctl, NULL);
  43. sysctl_head_put(head);
  44. }
  45. /* Release any associated namespace */
  46. ns_ops = PROC_I(inode)->ns_ops;
  47. ns = PROC_I(inode)->ns;
  48. if (ns_ops && ns)
  49. ns_ops->put(ns);
  50. }
  51. static struct kmem_cache * proc_inode_cachep;
  52. static struct inode *proc_alloc_inode(struct super_block *sb)
  53. {
  54. struct proc_inode *ei;
  55. struct inode *inode;
  56. ei = (struct proc_inode *)kmem_cache_alloc(proc_inode_cachep, GFP_KERNEL);
  57. if (!ei)
  58. return NULL;
  59. ei->pid = NULL;
  60. ei->fd = 0;
  61. ei->op.proc_get_link = NULL;
  62. ei->pde = NULL;
  63. ei->sysctl = NULL;
  64. ei->sysctl_entry = NULL;
  65. ei->ns = NULL;
  66. ei->ns_ops = NULL;
  67. inode = &ei->vfs_inode;
  68. inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
  69. return inode;
  70. }
  71. static void proc_i_callback(struct rcu_head *head)
  72. {
  73. struct inode *inode = container_of(head, struct inode, i_rcu);
  74. kmem_cache_free(proc_inode_cachep, PROC_I(inode));
  75. }
  76. static void proc_destroy_inode(struct inode *inode)
  77. {
  78. call_rcu(&inode->i_rcu, proc_i_callback);
  79. }
  80. static void init_once(void *foo)
  81. {
  82. struct proc_inode *ei = (struct proc_inode *) foo;
  83. inode_init_once(&ei->vfs_inode);
  84. }
  85. void __init proc_init_inodecache(void)
  86. {
  87. proc_inode_cachep = kmem_cache_create("proc_inode_cache",
  88. sizeof(struct proc_inode),
  89. 0, (SLAB_RECLAIM_ACCOUNT|
  90. SLAB_MEM_SPREAD|SLAB_PANIC),
  91. init_once);
  92. }
  93. static int proc_show_options(struct seq_file *seq, struct dentry *root)
  94. {
  95. struct super_block *sb = root->d_sb;
  96. struct pid_namespace *pid = sb->s_fs_info;
  97. if (!gid_eq(pid->pid_gid, GLOBAL_ROOT_GID))
  98. seq_printf(seq, ",gid=%u", from_kgid_munged(&init_user_ns, pid->pid_gid));
  99. if (pid->hide_pid != 0)
  100. seq_printf(seq, ",hidepid=%u", pid->hide_pid);
  101. return 0;
  102. }
  103. static const struct super_operations proc_sops = {
  104. .alloc_inode = proc_alloc_inode,
  105. .destroy_inode = proc_destroy_inode,
  106. .drop_inode = generic_delete_inode,
  107. .evict_inode = proc_evict_inode,
  108. .statfs = simple_statfs,
  109. .remount_fs = proc_remount,
  110. .show_options = proc_show_options,
  111. };
  112. enum {BIAS = -1U<<31};
  113. static inline int use_pde(struct proc_dir_entry *pde)
  114. {
  115. return atomic_inc_unless_negative(&pde->in_use);
  116. }
  117. static void unuse_pde(struct proc_dir_entry *pde)
  118. {
  119. if (atomic_dec_return(&pde->in_use) == BIAS)
  120. complete(pde->pde_unload_completion);
  121. }
  122. /* pde is locked */
  123. static void close_pdeo(struct proc_dir_entry *pde, struct pde_opener *pdeo)
  124. {
  125. if (pdeo->closing) {
  126. /* somebody else is doing that, just wait */
  127. DECLARE_COMPLETION_ONSTACK(c);
  128. pdeo->c = &c;
  129. spin_unlock(&pde->pde_unload_lock);
  130. wait_for_completion(&c);
  131. spin_lock(&pde->pde_unload_lock);
  132. } else {
  133. struct file *file;
  134. pdeo->closing = 1;
  135. spin_unlock(&pde->pde_unload_lock);
  136. file = pdeo->file;
  137. pde->proc_fops->release(file_inode(file), file);
  138. spin_lock(&pde->pde_unload_lock);
  139. list_del_init(&pdeo->lh);
  140. if (pdeo->c)
  141. complete(pdeo->c);
  142. kfree(pdeo);
  143. }
  144. }
  145. void proc_entry_rundown(struct proc_dir_entry *de)
  146. {
  147. DECLARE_COMPLETION_ONSTACK(c);
  148. /* Wait until all existing callers into module are done. */
  149. de->pde_unload_completion = &c;
  150. if (atomic_add_return(BIAS, &de->in_use) != BIAS)
  151. wait_for_completion(&c);
  152. spin_lock(&de->pde_unload_lock);
  153. while (!list_empty(&de->pde_openers)) {
  154. struct pde_opener *pdeo;
  155. pdeo = list_first_entry(&de->pde_openers, struct pde_opener, lh);
  156. close_pdeo(de, pdeo);
  157. }
  158. spin_unlock(&de->pde_unload_lock);
  159. }
  160. static loff_t proc_reg_llseek(struct file *file, loff_t offset, int whence)
  161. {
  162. struct proc_dir_entry *pde = PDE(file_inode(file));
  163. loff_t rv = -EINVAL;
  164. if (use_pde(pde)) {
  165. loff_t (*llseek)(struct file *, loff_t, int);
  166. llseek = pde->proc_fops->llseek;
  167. if (!llseek)
  168. llseek = default_llseek;
  169. rv = llseek(file, offset, whence);
  170. unuse_pde(pde);
  171. }
  172. return rv;
  173. }
  174. static ssize_t proc_reg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
  175. {
  176. ssize_t (*read)(struct file *, char __user *, size_t, loff_t *);
  177. struct proc_dir_entry *pde = PDE(file_inode(file));
  178. ssize_t rv = -EIO;
  179. if (use_pde(pde)) {
  180. read = pde->proc_fops->read;
  181. if (read)
  182. rv = read(file, buf, count, ppos);
  183. unuse_pde(pde);
  184. }
  185. return rv;
  186. }
  187. static ssize_t proc_reg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
  188. {
  189. ssize_t (*write)(struct file *, const char __user *, size_t, loff_t *);
  190. struct proc_dir_entry *pde = PDE(file_inode(file));
  191. ssize_t rv = -EIO;
  192. if (use_pde(pde)) {
  193. write = pde->proc_fops->write;
  194. if (write)
  195. rv = write(file, buf, count, ppos);
  196. unuse_pde(pde);
  197. }
  198. return rv;
  199. }
  200. static unsigned int proc_reg_poll(struct file *file, struct poll_table_struct *pts)
  201. {
  202. struct proc_dir_entry *pde = PDE(file_inode(file));
  203. unsigned int rv = DEFAULT_POLLMASK;
  204. unsigned int (*poll)(struct file *, struct poll_table_struct *);
  205. if (use_pde(pde)) {
  206. poll = pde->proc_fops->poll;
  207. if (poll)
  208. rv = poll(file, pts);
  209. unuse_pde(pde);
  210. }
  211. return rv;
  212. }
  213. static long proc_reg_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  214. {
  215. struct proc_dir_entry *pde = PDE(file_inode(file));
  216. long rv = -ENOTTY;
  217. long (*ioctl)(struct file *, unsigned int, unsigned long);
  218. if (use_pde(pde)) {
  219. ioctl = pde->proc_fops->unlocked_ioctl;
  220. if (ioctl)
  221. rv = ioctl(file, cmd, arg);
  222. unuse_pde(pde);
  223. }
  224. return rv;
  225. }
  226. #ifdef CONFIG_COMPAT
  227. static long proc_reg_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  228. {
  229. struct proc_dir_entry *pde = PDE(file_inode(file));
  230. long rv = -ENOTTY;
  231. long (*compat_ioctl)(struct file *, unsigned int, unsigned long);
  232. if (use_pde(pde)) {
  233. compat_ioctl = pde->proc_fops->compat_ioctl;
  234. if (compat_ioctl)
  235. rv = compat_ioctl(file, cmd, arg);
  236. unuse_pde(pde);
  237. }
  238. return rv;
  239. }
  240. #endif
  241. static int proc_reg_mmap(struct file *file, struct vm_area_struct *vma)
  242. {
  243. struct proc_dir_entry *pde = PDE(file_inode(file));
  244. int rv = -EIO;
  245. int (*mmap)(struct file *, struct vm_area_struct *);
  246. if (use_pde(pde)) {
  247. mmap = pde->proc_fops->mmap;
  248. if (mmap)
  249. rv = mmap(file, vma);
  250. unuse_pde(pde);
  251. }
  252. return rv;
  253. }
  254. static int proc_reg_open(struct inode *inode, struct file *file)
  255. {
  256. struct proc_dir_entry *pde = PDE(inode);
  257. int rv = 0;
  258. int (*open)(struct inode *, struct file *);
  259. int (*release)(struct inode *, struct file *);
  260. struct pde_opener *pdeo;
  261. /*
  262. * What for, you ask? Well, we can have open, rmmod, remove_proc_entry
  263. * sequence. ->release won't be called because ->proc_fops will be
  264. * cleared. Depending on complexity of ->release, consequences vary.
  265. *
  266. * We can't wait for mercy when close will be done for real, it's
  267. * deadlockable: rmmod foo </proc/foo . So, we're going to do ->release
  268. * by hand in remove_proc_entry(). For this, save opener's credentials
  269. * for later.
  270. */
  271. pdeo = kzalloc(sizeof(struct pde_opener), GFP_KERNEL);
  272. if (!pdeo)
  273. return -ENOMEM;
  274. if (!use_pde(pde)) {
  275. kfree(pdeo);
  276. return -ENOENT;
  277. }
  278. open = pde->proc_fops->open;
  279. release = pde->proc_fops->release;
  280. if (open)
  281. rv = open(inode, file);
  282. if (rv == 0 && release) {
  283. /* To know what to release. */
  284. pdeo->file = file;
  285. /* Strictly for "too late" ->release in proc_reg_release(). */
  286. spin_lock(&pde->pde_unload_lock);
  287. list_add(&pdeo->lh, &pde->pde_openers);
  288. spin_unlock(&pde->pde_unload_lock);
  289. } else
  290. kfree(pdeo);
  291. unuse_pde(pde);
  292. return rv;
  293. }
  294. static int proc_reg_release(struct inode *inode, struct file *file)
  295. {
  296. struct proc_dir_entry *pde = PDE(inode);
  297. struct pde_opener *pdeo;
  298. spin_lock(&pde->pde_unload_lock);
  299. list_for_each_entry(pdeo, &pde->pde_openers, lh) {
  300. if (pdeo->file == file) {
  301. close_pdeo(pde, pdeo);
  302. break;
  303. }
  304. }
  305. spin_unlock(&pde->pde_unload_lock);
  306. return 0;
  307. }
  308. static const struct file_operations proc_reg_file_ops = {
  309. .llseek = proc_reg_llseek,
  310. .read = proc_reg_read,
  311. .write = proc_reg_write,
  312. .poll = proc_reg_poll,
  313. .unlocked_ioctl = proc_reg_unlocked_ioctl,
  314. #ifdef CONFIG_COMPAT
  315. .compat_ioctl = proc_reg_compat_ioctl,
  316. #endif
  317. .mmap = proc_reg_mmap,
  318. .open = proc_reg_open,
  319. .release = proc_reg_release,
  320. };
  321. #ifdef CONFIG_COMPAT
  322. static const struct file_operations proc_reg_file_ops_no_compat = {
  323. .llseek = proc_reg_llseek,
  324. .read = proc_reg_read,
  325. .write = proc_reg_write,
  326. .poll = proc_reg_poll,
  327. .unlocked_ioctl = proc_reg_unlocked_ioctl,
  328. .mmap = proc_reg_mmap,
  329. .open = proc_reg_open,
  330. .release = proc_reg_release,
  331. };
  332. #endif
  333. struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
  334. {
  335. struct inode *inode = new_inode_pseudo(sb);
  336. if (inode) {
  337. inode->i_ino = de->low_ino;
  338. inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
  339. PROC_I(inode)->pde = de;
  340. if (de->mode) {
  341. inode->i_mode = de->mode;
  342. inode->i_uid = de->uid;
  343. inode->i_gid = de->gid;
  344. }
  345. if (de->size)
  346. inode->i_size = de->size;
  347. if (de->nlink)
  348. set_nlink(inode, de->nlink);
  349. WARN_ON(!de->proc_iops);
  350. inode->i_op = de->proc_iops;
  351. if (de->proc_fops) {
  352. if (S_ISREG(inode->i_mode)) {
  353. #ifdef CONFIG_COMPAT
  354. if (!de->proc_fops->compat_ioctl)
  355. inode->i_fop =
  356. &proc_reg_file_ops_no_compat;
  357. else
  358. #endif
  359. inode->i_fop = &proc_reg_file_ops;
  360. } else {
  361. inode->i_fop = de->proc_fops;
  362. }
  363. }
  364. } else
  365. pde_put(de);
  366. return inode;
  367. }
  368. int proc_fill_super(struct super_block *s)
  369. {
  370. struct inode *root_inode;
  371. s->s_flags |= MS_NODIRATIME | MS_NOSUID | MS_NOEXEC;
  372. s->s_blocksize = 1024;
  373. s->s_blocksize_bits = 10;
  374. s->s_magic = PROC_SUPER_MAGIC;
  375. s->s_op = &proc_sops;
  376. s->s_time_gran = 1;
  377. pde_get(&proc_root);
  378. root_inode = proc_get_inode(s, &proc_root);
  379. if (!root_inode) {
  380. pr_err("proc_fill_super: get root inode failed\n");
  381. return -ENOMEM;
  382. }
  383. s->s_root = d_make_root(root_inode);
  384. if (!s->s_root) {
  385. pr_err("proc_fill_super: allocate dentry failed\n");
  386. return -ENOMEM;
  387. }
  388. return proc_setup_self(s);
  389. }