inode.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439
  1. /*
  2. * linux/fs/proc/inode.c
  3. *
  4. * Copyright (C) 1991, 1992 Linus Torvalds
  5. */
  6. #include <linux/time.h>
  7. #include <linux/proc_fs.h>
  8. #include <linux/kernel.h>
  9. #include <linux/pid_namespace.h>
  10. #include <linux/mm.h>
  11. #include <linux/string.h>
  12. #include <linux/stat.h>
  13. #include <linux/completion.h>
  14. #include <linux/poll.h>
  15. #include <linux/printk.h>
  16. #include <linux/file.h>
  17. #include <linux/limits.h>
  18. #include <linux/init.h>
  19. #include <linux/module.h>
  20. #include <linux/sysctl.h>
  21. #include <linux/seq_file.h>
  22. #include <linux/slab.h>
  23. #include <linux/mount.h>
  24. #include <linux/magic.h>
  25. #include <asm/uaccess.h>
  26. #include "internal.h"
  27. static void proc_evict_inode(struct inode *inode)
  28. {
  29. struct proc_dir_entry *de;
  30. struct ctl_table_header *head;
  31. const struct proc_ns_operations *ns_ops;
  32. void *ns;
  33. truncate_inode_pages(&inode->i_data, 0);
  34. clear_inode(inode);
  35. /* Stop tracking associated processes */
  36. put_pid(PROC_I(inode)->pid);
  37. /* Let go of any associated proc directory entry */
  38. de = PROC_I(inode)->pde;
  39. if (de)
  40. pde_put(de);
  41. head = PROC_I(inode)->sysctl;
  42. if (head) {
  43. rcu_assign_pointer(PROC_I(inode)->sysctl, NULL);
  44. sysctl_head_put(head);
  45. }
  46. /* Release any associated namespace */
  47. ns_ops = PROC_I(inode)->ns.ns_ops;
  48. ns = PROC_I(inode)->ns.ns;
  49. if (ns_ops && ns)
  50. ns_ops->put(ns);
  51. }
  52. static struct kmem_cache * proc_inode_cachep;
  53. static struct inode *proc_alloc_inode(struct super_block *sb)
  54. {
  55. struct proc_inode *ei;
  56. struct inode *inode;
  57. ei = (struct proc_inode *)kmem_cache_alloc(proc_inode_cachep, GFP_KERNEL);
  58. if (!ei)
  59. return NULL;
  60. ei->pid = NULL;
  61. ei->fd = 0;
  62. ei->op.proc_get_link = NULL;
  63. ei->pde = NULL;
  64. ei->sysctl = NULL;
  65. ei->sysctl_entry = NULL;
  66. ei->ns.ns = NULL;
  67. ei->ns.ns_ops = NULL;
  68. inode = &ei->vfs_inode;
  69. inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
  70. return inode;
  71. }
  72. static void proc_i_callback(struct rcu_head *head)
  73. {
  74. struct inode *inode = container_of(head, struct inode, i_rcu);
  75. kmem_cache_free(proc_inode_cachep, PROC_I(inode));
  76. }
  77. static void proc_destroy_inode(struct inode *inode)
  78. {
  79. call_rcu(&inode->i_rcu, proc_i_callback);
  80. }
  81. static void init_once(void *foo)
  82. {
  83. struct proc_inode *ei = (struct proc_inode *) foo;
  84. inode_init_once(&ei->vfs_inode);
  85. }
  86. void __init proc_init_inodecache(void)
  87. {
  88. proc_inode_cachep = kmem_cache_create("proc_inode_cache",
  89. sizeof(struct proc_inode),
  90. 0, (SLAB_RECLAIM_ACCOUNT|
  91. SLAB_MEM_SPREAD|SLAB_PANIC),
  92. init_once);
  93. }
  94. static int proc_show_options(struct seq_file *seq, struct dentry *root)
  95. {
  96. struct super_block *sb = root->d_sb;
  97. struct pid_namespace *pid = sb->s_fs_info;
  98. if (!gid_eq(pid->pid_gid, GLOBAL_ROOT_GID))
  99. seq_printf(seq, ",gid=%u", from_kgid_munged(&init_user_ns, pid->pid_gid));
  100. if (pid->hide_pid != 0)
  101. seq_printf(seq, ",hidepid=%u", pid->hide_pid);
  102. return 0;
  103. }
  104. static const struct super_operations proc_sops = {
  105. .alloc_inode = proc_alloc_inode,
  106. .destroy_inode = proc_destroy_inode,
  107. .drop_inode = generic_delete_inode,
  108. .evict_inode = proc_evict_inode,
  109. .statfs = simple_statfs,
  110. .remount_fs = proc_remount,
  111. .show_options = proc_show_options,
  112. };
  113. enum {BIAS = -1U<<31};
  114. static inline int use_pde(struct proc_dir_entry *pde)
  115. {
  116. return atomic_inc_unless_negative(&pde->in_use);
  117. }
  118. static void unuse_pde(struct proc_dir_entry *pde)
  119. {
  120. if (atomic_dec_return(&pde->in_use) == BIAS)
  121. complete(pde->pde_unload_completion);
  122. }
  123. /* pde is locked */
  124. static void close_pdeo(struct proc_dir_entry *pde, struct pde_opener *pdeo)
  125. {
  126. if (pdeo->closing) {
  127. /* somebody else is doing that, just wait */
  128. DECLARE_COMPLETION_ONSTACK(c);
  129. pdeo->c = &c;
  130. spin_unlock(&pde->pde_unload_lock);
  131. wait_for_completion(&c);
  132. spin_lock(&pde->pde_unload_lock);
  133. } else {
  134. struct file *file;
  135. pdeo->closing = 1;
  136. spin_unlock(&pde->pde_unload_lock);
  137. file = pdeo->file;
  138. pde->proc_fops->release(file_inode(file), file);
  139. spin_lock(&pde->pde_unload_lock);
  140. list_del_init(&pdeo->lh);
  141. if (pdeo->c)
  142. complete(pdeo->c);
  143. kfree(pdeo);
  144. }
  145. }
  146. void proc_entry_rundown(struct proc_dir_entry *de)
  147. {
  148. DECLARE_COMPLETION_ONSTACK(c);
  149. /* Wait until all existing callers into module are done. */
  150. de->pde_unload_completion = &c;
  151. if (atomic_add_return(BIAS, &de->in_use) != BIAS)
  152. wait_for_completion(&c);
  153. spin_lock(&de->pde_unload_lock);
  154. while (!list_empty(&de->pde_openers)) {
  155. struct pde_opener *pdeo;
  156. pdeo = list_first_entry(&de->pde_openers, struct pde_opener, lh);
  157. close_pdeo(de, pdeo);
  158. }
  159. spin_unlock(&de->pde_unload_lock);
  160. }
  161. static loff_t proc_reg_llseek(struct file *file, loff_t offset, int whence)
  162. {
  163. struct proc_dir_entry *pde = PDE(file_inode(file));
  164. loff_t rv = -EINVAL;
  165. if (use_pde(pde)) {
  166. loff_t (*llseek)(struct file *, loff_t, int);
  167. llseek = pde->proc_fops->llseek;
  168. if (!llseek)
  169. llseek = default_llseek;
  170. rv = llseek(file, offset, whence);
  171. unuse_pde(pde);
  172. }
  173. return rv;
  174. }
  175. static ssize_t proc_reg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
  176. {
  177. ssize_t (*read)(struct file *, char __user *, size_t, loff_t *);
  178. struct proc_dir_entry *pde = PDE(file_inode(file));
  179. ssize_t rv = -EIO;
  180. if (use_pde(pde)) {
  181. read = pde->proc_fops->read;
  182. if (read)
  183. rv = read(file, buf, count, ppos);
  184. unuse_pde(pde);
  185. }
  186. return rv;
  187. }
  188. static ssize_t proc_reg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
  189. {
  190. ssize_t (*write)(struct file *, const char __user *, size_t, loff_t *);
  191. struct proc_dir_entry *pde = PDE(file_inode(file));
  192. ssize_t rv = -EIO;
  193. if (use_pde(pde)) {
  194. write = pde->proc_fops->write;
  195. if (write)
  196. rv = write(file, buf, count, ppos);
  197. unuse_pde(pde);
  198. }
  199. return rv;
  200. }
  201. static unsigned int proc_reg_poll(struct file *file, struct poll_table_struct *pts)
  202. {
  203. struct proc_dir_entry *pde = PDE(file_inode(file));
  204. unsigned int rv = DEFAULT_POLLMASK;
  205. unsigned int (*poll)(struct file *, struct poll_table_struct *);
  206. if (use_pde(pde)) {
  207. poll = pde->proc_fops->poll;
  208. if (poll)
  209. rv = poll(file, pts);
  210. unuse_pde(pde);
  211. }
  212. return rv;
  213. }
  214. static long proc_reg_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  215. {
  216. struct proc_dir_entry *pde = PDE(file_inode(file));
  217. long rv = -ENOTTY;
  218. long (*ioctl)(struct file *, unsigned int, unsigned long);
  219. if (use_pde(pde)) {
  220. ioctl = pde->proc_fops->unlocked_ioctl;
  221. if (ioctl)
  222. rv = ioctl(file, cmd, arg);
  223. unuse_pde(pde);
  224. }
  225. return rv;
  226. }
  227. #ifdef CONFIG_COMPAT
  228. static long proc_reg_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  229. {
  230. struct proc_dir_entry *pde = PDE(file_inode(file));
  231. long rv = -ENOTTY;
  232. long (*compat_ioctl)(struct file *, unsigned int, unsigned long);
  233. if (use_pde(pde)) {
  234. compat_ioctl = pde->proc_fops->compat_ioctl;
  235. if (compat_ioctl)
  236. rv = compat_ioctl(file, cmd, arg);
  237. unuse_pde(pde);
  238. }
  239. return rv;
  240. }
  241. #endif
  242. static int proc_reg_mmap(struct file *file, struct vm_area_struct *vma)
  243. {
  244. struct proc_dir_entry *pde = PDE(file_inode(file));
  245. int rv = -EIO;
  246. int (*mmap)(struct file *, struct vm_area_struct *);
  247. if (use_pde(pde)) {
  248. mmap = pde->proc_fops->mmap;
  249. if (mmap)
  250. rv = mmap(file, vma);
  251. unuse_pde(pde);
  252. }
  253. return rv;
  254. }
  255. static int proc_reg_open(struct inode *inode, struct file *file)
  256. {
  257. struct proc_dir_entry *pde = PDE(inode);
  258. int rv = 0;
  259. int (*open)(struct inode *, struct file *);
  260. int (*release)(struct inode *, struct file *);
  261. struct pde_opener *pdeo;
  262. /*
  263. * What for, you ask? Well, we can have open, rmmod, remove_proc_entry
  264. * sequence. ->release won't be called because ->proc_fops will be
  265. * cleared. Depending on complexity of ->release, consequences vary.
  266. *
  267. * We can't wait for mercy when close will be done for real, it's
  268. * deadlockable: rmmod foo </proc/foo . So, we're going to do ->release
  269. * by hand in remove_proc_entry(). For this, save opener's credentials
  270. * for later.
  271. */
  272. pdeo = kzalloc(sizeof(struct pde_opener), GFP_KERNEL);
  273. if (!pdeo)
  274. return -ENOMEM;
  275. if (!use_pde(pde)) {
  276. kfree(pdeo);
  277. return -ENOENT;
  278. }
  279. open = pde->proc_fops->open;
  280. release = pde->proc_fops->release;
  281. if (open)
  282. rv = open(inode, file);
  283. if (rv == 0 && release) {
  284. /* To know what to release. */
  285. pdeo->file = file;
  286. /* Strictly for "too late" ->release in proc_reg_release(). */
  287. spin_lock(&pde->pde_unload_lock);
  288. list_add(&pdeo->lh, &pde->pde_openers);
  289. spin_unlock(&pde->pde_unload_lock);
  290. } else
  291. kfree(pdeo);
  292. unuse_pde(pde);
  293. return rv;
  294. }
  295. static int proc_reg_release(struct inode *inode, struct file *file)
  296. {
  297. struct proc_dir_entry *pde = PDE(inode);
  298. struct pde_opener *pdeo;
  299. spin_lock(&pde->pde_unload_lock);
  300. list_for_each_entry(pdeo, &pde->pde_openers, lh) {
  301. if (pdeo->file == file) {
  302. close_pdeo(pde, pdeo);
  303. break;
  304. }
  305. }
  306. spin_unlock(&pde->pde_unload_lock);
  307. return 0;
  308. }
  309. static const struct file_operations proc_reg_file_ops = {
  310. .llseek = proc_reg_llseek,
  311. .read = proc_reg_read,
  312. .write = proc_reg_write,
  313. .poll = proc_reg_poll,
  314. .unlocked_ioctl = proc_reg_unlocked_ioctl,
  315. #ifdef CONFIG_COMPAT
  316. .compat_ioctl = proc_reg_compat_ioctl,
  317. #endif
  318. .mmap = proc_reg_mmap,
  319. .open = proc_reg_open,
  320. .release = proc_reg_release,
  321. };
  322. #ifdef CONFIG_COMPAT
  323. static const struct file_operations proc_reg_file_ops_no_compat = {
  324. .llseek = proc_reg_llseek,
  325. .read = proc_reg_read,
  326. .write = proc_reg_write,
  327. .poll = proc_reg_poll,
  328. .unlocked_ioctl = proc_reg_unlocked_ioctl,
  329. .mmap = proc_reg_mmap,
  330. .open = proc_reg_open,
  331. .release = proc_reg_release,
  332. };
  333. #endif
  334. struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
  335. {
  336. struct inode *inode = new_inode_pseudo(sb);
  337. if (inode) {
  338. inode->i_ino = de->low_ino;
  339. inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
  340. PROC_I(inode)->pde = de;
  341. if (de->mode) {
  342. inode->i_mode = de->mode;
  343. inode->i_uid = de->uid;
  344. inode->i_gid = de->gid;
  345. }
  346. if (de->size)
  347. inode->i_size = de->size;
  348. if (de->nlink)
  349. set_nlink(inode, de->nlink);
  350. WARN_ON(!de->proc_iops);
  351. inode->i_op = de->proc_iops;
  352. if (de->proc_fops) {
  353. if (S_ISREG(inode->i_mode)) {
  354. #ifdef CONFIG_COMPAT
  355. if (!de->proc_fops->compat_ioctl)
  356. inode->i_fop =
  357. &proc_reg_file_ops_no_compat;
  358. else
  359. #endif
  360. inode->i_fop = &proc_reg_file_ops;
  361. } else {
  362. inode->i_fop = de->proc_fops;
  363. }
  364. }
  365. } else
  366. pde_put(de);
  367. return inode;
  368. }
  369. int proc_fill_super(struct super_block *s)
  370. {
  371. struct inode *root_inode;
  372. s->s_flags |= MS_NODIRATIME | MS_NOSUID | MS_NOEXEC;
  373. s->s_blocksize = 1024;
  374. s->s_blocksize_bits = 10;
  375. s->s_magic = PROC_SUPER_MAGIC;
  376. s->s_op = &proc_sops;
  377. s->s_time_gran = 1;
  378. pde_get(&proc_root);
  379. root_inode = proc_get_inode(s, &proc_root);
  380. if (!root_inode) {
  381. pr_err("proc_fill_super: get root inode failed\n");
  382. return -ENOMEM;
  383. }
  384. s->s_root = d_make_root(root_inode);
  385. if (!s->s_root) {
  386. pr_err("proc_fill_super: allocate dentry failed\n");
  387. return -ENOMEM;
  388. }
  389. return proc_setup_self(s);
  390. }