inotify_user.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753
  1. /*
  2. * fs/inotify_user.c - inotify support for userspace
  3. *
  4. * Authors:
  5. * John McCutchan <ttb@tentacle.dhs.org>
  6. * Robert Love <rml@novell.com>
  7. *
  8. * Copyright (C) 2005 John McCutchan
  9. * Copyright 2006 Hewlett-Packard Development Company, L.P.
  10. *
  11. * Copyright (C) 2009 Eric Paris <Red Hat Inc>
  12. * inotify was largely rewriten to make use of the fsnotify infrastructure
  13. *
  14. * This program is free software; you can redistribute it and/or modify it
  15. * under the terms of the GNU General Public License as published by the
  16. * Free Software Foundation; either version 2, or (at your option) any
  17. * later version.
  18. *
  19. * This program is distributed in the hope that it will be useful, but
  20. * WITHOUT ANY WARRANTY; without even the implied warranty of
  21. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  22. * General Public License for more details.
  23. */
  24. #include <linux/file.h>
  25. #include <linux/fs.h> /* struct inode */
  26. #include <linux/fsnotify_backend.h>
  27. #include <linux/idr.h>
  28. #include <linux/init.h> /* module_init */
  29. #include <linux/inotify.h>
  30. #include <linux/kernel.h> /* roundup() */
  31. #include <linux/magic.h> /* superblock magic number */
  32. #include <linux/mount.h> /* mntget */
  33. #include <linux/namei.h> /* LOOKUP_FOLLOW */
  34. #include <linux/path.h> /* struct path */
  35. #include <linux/sched.h> /* struct user */
  36. #include <linux/slab.h> /* struct kmem_cache */
  37. #include <linux/syscalls.h>
  38. #include <linux/types.h>
  39. #include <linux/uaccess.h>
  40. #include <linux/poll.h>
  41. #include <linux/wait.h>
  42. #include "inotify.h"
  43. #include <asm/ioctls.h>
  44. static struct vfsmount *inotify_mnt __read_mostly;
  45. /* this just sits here and wastes global memory. used to just pad userspace messages with zeros */
  46. static struct inotify_event nul_inotify_event;
  47. /* these are configurable via /proc/sys/fs/inotify/ */
  48. static int inotify_max_user_instances __read_mostly;
  49. static int inotify_max_queued_events __read_mostly;
  50. int inotify_max_user_watches __read_mostly;
  51. static struct kmem_cache *inotify_inode_mark_cachep __read_mostly;
  52. struct kmem_cache *event_priv_cachep __read_mostly;
  53. static struct fsnotify_event *inotify_ignored_event;
  54. /*
  55. * When inotify registers a new group it increments this and uses that
  56. * value as an offset to set the fsnotify group "name" and priority.
  57. */
  58. static atomic_t inotify_grp_num;
  59. #ifdef CONFIG_SYSCTL
  60. #include <linux/sysctl.h>
  61. static int zero;
  62. ctl_table inotify_table[] = {
  63. {
  64. .ctl_name = INOTIFY_MAX_USER_INSTANCES,
  65. .procname = "max_user_instances",
  66. .data = &inotify_max_user_instances,
  67. .maxlen = sizeof(int),
  68. .mode = 0644,
  69. .proc_handler = &proc_dointvec_minmax,
  70. .strategy = &sysctl_intvec,
  71. .extra1 = &zero,
  72. },
  73. {
  74. .ctl_name = INOTIFY_MAX_USER_WATCHES,
  75. .procname = "max_user_watches",
  76. .data = &inotify_max_user_watches,
  77. .maxlen = sizeof(int),
  78. .mode = 0644,
  79. .proc_handler = &proc_dointvec_minmax,
  80. .strategy = &sysctl_intvec,
  81. .extra1 = &zero,
  82. },
  83. {
  84. .ctl_name = INOTIFY_MAX_QUEUED_EVENTS,
  85. .procname = "max_queued_events",
  86. .data = &inotify_max_queued_events,
  87. .maxlen = sizeof(int),
  88. .mode = 0644,
  89. .proc_handler = &proc_dointvec_minmax,
  90. .strategy = &sysctl_intvec,
  91. .extra1 = &zero
  92. },
  93. { .ctl_name = 0 }
  94. };
  95. #endif /* CONFIG_SYSCTL */
  96. static inline __u32 inotify_arg_to_mask(u32 arg)
  97. {
  98. __u32 mask;
  99. /* everything should accept their own ignored and cares about children */
  100. mask = (FS_IN_IGNORED | FS_EVENT_ON_CHILD);
  101. /* mask off the flags used to open the fd */
  102. mask |= (arg & (IN_ALL_EVENTS | IN_ONESHOT));
  103. return mask;
  104. }
  105. static inline u32 inotify_mask_to_arg(__u32 mask)
  106. {
  107. return mask & (IN_ALL_EVENTS | IN_ISDIR | IN_UNMOUNT | IN_IGNORED |
  108. IN_Q_OVERFLOW);
  109. }
  110. /* intofiy userspace file descriptor functions */
  111. static unsigned int inotify_poll(struct file *file, poll_table *wait)
  112. {
  113. struct fsnotify_group *group = file->private_data;
  114. int ret = 0;
  115. poll_wait(file, &group->notification_waitq, wait);
  116. mutex_lock(&group->notification_mutex);
  117. if (!fsnotify_notify_queue_is_empty(group))
  118. ret = POLLIN | POLLRDNORM;
  119. mutex_unlock(&group->notification_mutex);
  120. return ret;
  121. }
  122. /*
  123. * Get an inotify_kernel_event if one exists and is small
  124. * enough to fit in "count". Return an error pointer if
  125. * not large enough.
  126. *
  127. * Called with the group->notification_mutex held.
  128. */
  129. static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
  130. size_t count)
  131. {
  132. size_t event_size = sizeof(struct inotify_event);
  133. struct fsnotify_event *event;
  134. if (fsnotify_notify_queue_is_empty(group))
  135. return NULL;
  136. event = fsnotify_peek_notify_event(group);
  137. event_size += roundup(event->name_len, event_size);
  138. if (event_size > count)
  139. return ERR_PTR(-EINVAL);
  140. /* held the notification_mutex the whole time, so this is the
  141. * same event we peeked above */
  142. fsnotify_remove_notify_event(group);
  143. return event;
  144. }
  145. /*
  146. * Copy an event to user space, returning how much we copied.
  147. *
  148. * We already checked that the event size is smaller than the
  149. * buffer we had in "get_one_event()" above.
  150. */
  151. static ssize_t copy_event_to_user(struct fsnotify_group *group,
  152. struct fsnotify_event *event,
  153. char __user *buf)
  154. {
  155. struct inotify_event inotify_event;
  156. struct fsnotify_event_private_data *fsn_priv;
  157. struct inotify_event_private_data *priv;
  158. size_t event_size = sizeof(struct inotify_event);
  159. size_t name_len;
  160. /* we get the inotify watch descriptor from the event private data */
  161. spin_lock(&event->lock);
  162. fsn_priv = fsnotify_remove_priv_from_event(group, event);
  163. spin_unlock(&event->lock);
  164. if (!fsn_priv)
  165. inotify_event.wd = -1;
  166. else {
  167. priv = container_of(fsn_priv, struct inotify_event_private_data,
  168. fsnotify_event_priv_data);
  169. inotify_event.wd = priv->wd;
  170. inotify_free_event_priv(fsn_priv);
  171. }
  172. /* round up event->name_len so it is a multiple of event_size */
  173. name_len = roundup(event->name_len, event_size);
  174. inotify_event.len = name_len;
  175. inotify_event.mask = inotify_mask_to_arg(event->mask);
  176. inotify_event.cookie = event->sync_cookie;
  177. /* send the main event */
  178. if (copy_to_user(buf, &inotify_event, event_size))
  179. return -EFAULT;
  180. buf += event_size;
  181. /*
  182. * fsnotify only stores the pathname, so here we have to send the pathname
  183. * and then pad that pathname out to a multiple of sizeof(inotify_event)
  184. * with zeros. I get my zeros from the nul_inotify_event.
  185. */
  186. if (name_len) {
  187. unsigned int len_to_zero = name_len - event->name_len;
  188. /* copy the path name */
  189. if (copy_to_user(buf, event->file_name, event->name_len))
  190. return -EFAULT;
  191. buf += event->name_len;
  192. /* fill userspace with 0's from nul_inotify_event */
  193. if (copy_to_user(buf, &nul_inotify_event, len_to_zero))
  194. return -EFAULT;
  195. buf += len_to_zero;
  196. event_size += name_len;
  197. }
  198. return event_size;
  199. }
  200. static ssize_t inotify_read(struct file *file, char __user *buf,
  201. size_t count, loff_t *pos)
  202. {
  203. struct fsnotify_group *group;
  204. struct fsnotify_event *kevent;
  205. char __user *start;
  206. int ret;
  207. DEFINE_WAIT(wait);
  208. start = buf;
  209. group = file->private_data;
  210. while (1) {
  211. prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE);
  212. mutex_lock(&group->notification_mutex);
  213. kevent = get_one_event(group, count);
  214. mutex_unlock(&group->notification_mutex);
  215. if (kevent) {
  216. ret = PTR_ERR(kevent);
  217. if (IS_ERR(kevent))
  218. break;
  219. ret = copy_event_to_user(group, kevent, buf);
  220. fsnotify_put_event(kevent);
  221. if (ret < 0)
  222. break;
  223. buf += ret;
  224. count -= ret;
  225. continue;
  226. }
  227. ret = -EAGAIN;
  228. if (file->f_flags & O_NONBLOCK)
  229. break;
  230. ret = -EINTR;
  231. if (signal_pending(current))
  232. break;
  233. if (start != buf)
  234. break;
  235. schedule();
  236. }
  237. finish_wait(&group->notification_waitq, &wait);
  238. if (start != buf && ret != -EFAULT)
  239. ret = buf - start;
  240. return ret;
  241. }
  242. static int inotify_fasync(int fd, struct file *file, int on)
  243. {
  244. struct fsnotify_group *group = file->private_data;
  245. return fasync_helper(fd, file, on, &group->inotify_data.fa) >= 0 ? 0 : -EIO;
  246. }
  247. static int inotify_release(struct inode *ignored, struct file *file)
  248. {
  249. struct fsnotify_group *group = file->private_data;
  250. fsnotify_clear_marks_by_group(group);
  251. /* free this group, matching get was inotify_init->fsnotify_obtain_group */
  252. fsnotify_put_group(group);
  253. return 0;
  254. }
  255. static long inotify_ioctl(struct file *file, unsigned int cmd,
  256. unsigned long arg)
  257. {
  258. struct fsnotify_group *group;
  259. struct fsnotify_event_holder *holder;
  260. struct fsnotify_event *event;
  261. void __user *p;
  262. int ret = -ENOTTY;
  263. size_t send_len = 0;
  264. group = file->private_data;
  265. p = (void __user *) arg;
  266. switch (cmd) {
  267. case FIONREAD:
  268. mutex_lock(&group->notification_mutex);
  269. list_for_each_entry(holder, &group->notification_list, event_list) {
  270. event = holder->event;
  271. send_len += sizeof(struct inotify_event);
  272. send_len += roundup(event->name_len,
  273. sizeof(struct inotify_event));
  274. }
  275. mutex_unlock(&group->notification_mutex);
  276. ret = put_user(send_len, (int __user *) p);
  277. break;
  278. }
  279. return ret;
  280. }
  281. static const struct file_operations inotify_fops = {
  282. .poll = inotify_poll,
  283. .read = inotify_read,
  284. .fasync = inotify_fasync,
  285. .release = inotify_release,
  286. .unlocked_ioctl = inotify_ioctl,
  287. .compat_ioctl = inotify_ioctl,
  288. };
  289. /*
  290. * find_inode - resolve a user-given path to a specific inode
  291. */
  292. static int inotify_find_inode(const char __user *dirname, struct path *path, unsigned flags)
  293. {
  294. int error;
  295. error = user_path_at(AT_FDCWD, dirname, flags, path);
  296. if (error)
  297. return error;
  298. /* you can only watch an inode if you have read permissions on it */
  299. error = inode_permission(path->dentry->d_inode, MAY_READ);
  300. if (error)
  301. path_put(path);
  302. return error;
  303. }
  304. /*
  305. * When, for whatever reason, inotify is done with a mark (or what used to be a
  306. * watch) we need to remove that watch from the idr and we need to send IN_IGNORED
  307. * for the given wd.
  308. *
  309. * There is a bit of recursion here. The loop looks like:
  310. * inotify_destroy_mark_entry -> fsnotify_destroy_mark_by_entry ->
  311. * inotify_freeing_mark -> inotify_destory_mark_entry -> restart
  312. * But the loop is broken in 2 places. fsnotify_destroy_mark_by_entry sets
  313. * entry->group = NULL before the call to inotify_freeing_mark, so the if (egroup)
  314. * test below will not call back to fsnotify again. But even if that test wasn't
  315. * there this would still be safe since fsnotify_destroy_mark_by_entry() is
  316. * safe from recursion.
  317. */
  318. void inotify_destroy_mark_entry(struct fsnotify_mark_entry *entry, struct fsnotify_group *group)
  319. {
  320. struct inotify_inode_mark_entry *ientry;
  321. struct inotify_event_private_data *event_priv;
  322. struct fsnotify_event_private_data *fsn_event_priv;
  323. struct fsnotify_group *egroup;
  324. struct idr *idr;
  325. spin_lock(&entry->lock);
  326. egroup = entry->group;
  327. /* if egroup we aren't really done and something might still send events
  328. * for this inode, on the callback we'll send the IN_IGNORED */
  329. if (egroup) {
  330. spin_unlock(&entry->lock);
  331. fsnotify_destroy_mark_by_entry(entry);
  332. return;
  333. }
  334. spin_unlock(&entry->lock);
  335. ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
  336. event_priv = kmem_cache_alloc(event_priv_cachep, GFP_KERNEL);
  337. if (unlikely(!event_priv))
  338. goto skip_send_ignore;
  339. fsn_event_priv = &event_priv->fsnotify_event_priv_data;
  340. fsn_event_priv->group = group;
  341. event_priv->wd = ientry->wd;
  342. fsnotify_add_notify_event(group, inotify_ignored_event, fsn_event_priv);
  343. /* did the private data get added? */
  344. if (list_empty(&fsn_event_priv->event_list))
  345. inotify_free_event_priv(fsn_event_priv);
  346. skip_send_ignore:
  347. /* remove this entry from the idr */
  348. spin_lock(&group->inotify_data.idr_lock);
  349. idr = &group->inotify_data.idr;
  350. idr_remove(idr, ientry->wd);
  351. spin_unlock(&group->inotify_data.idr_lock);
  352. /* removed from idr, drop that reference */
  353. fsnotify_put_mark(entry);
  354. }
  355. /* ding dong the mark is dead */
  356. static void inotify_free_mark(struct fsnotify_mark_entry *entry)
  357. {
  358. struct inotify_inode_mark_entry *ientry = (struct inotify_inode_mark_entry *)entry;
  359. kmem_cache_free(inotify_inode_mark_cachep, ientry);
  360. }
  361. static int inotify_update_watch(struct fsnotify_group *group, struct inode *inode, u32 arg)
  362. {
  363. struct fsnotify_mark_entry *entry = NULL;
  364. struct inotify_inode_mark_entry *ientry;
  365. int ret = 0;
  366. int add = (arg & IN_MASK_ADD);
  367. __u32 mask;
  368. __u32 old_mask, new_mask;
  369. /* don't allow invalid bits: we don't want flags set */
  370. mask = inotify_arg_to_mask(arg);
  371. if (unlikely(!mask))
  372. return -EINVAL;
  373. ientry = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);
  374. if (unlikely(!ientry))
  375. return -ENOMEM;
  376. /* we set the mask at the end after attaching it */
  377. fsnotify_init_mark(&ientry->fsn_entry, inotify_free_mark);
  378. ientry->wd = 0;
  379. find_entry:
  380. spin_lock(&inode->i_lock);
  381. entry = fsnotify_find_mark_entry(group, inode);
  382. spin_unlock(&inode->i_lock);
  383. if (entry) {
  384. kmem_cache_free(inotify_inode_mark_cachep, ientry);
  385. ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
  386. } else {
  387. if (atomic_read(&group->inotify_data.user->inotify_watches) >= inotify_max_user_watches) {
  388. ret = -ENOSPC;
  389. goto out_err;
  390. }
  391. ret = fsnotify_add_mark(&ientry->fsn_entry, group, inode);
  392. if (ret == -EEXIST)
  393. goto find_entry;
  394. else if (ret)
  395. goto out_err;
  396. entry = &ientry->fsn_entry;
  397. retry:
  398. ret = -ENOMEM;
  399. if (unlikely(!idr_pre_get(&group->inotify_data.idr, GFP_KERNEL)))
  400. goto out_err;
  401. spin_lock(&group->inotify_data.idr_lock);
  402. /* if entry is added to the idr we keep the reference obtained
  403. * through fsnotify_mark_add. remember to drop this reference
  404. * when entry is removed from idr */
  405. ret = idr_get_new_above(&group->inotify_data.idr, entry,
  406. ++group->inotify_data.last_wd,
  407. &ientry->wd);
  408. spin_unlock(&group->inotify_data.idr_lock);
  409. if (ret) {
  410. if (ret == -EAGAIN)
  411. goto retry;
  412. goto out_err;
  413. }
  414. atomic_inc(&group->inotify_data.user->inotify_watches);
  415. }
  416. spin_lock(&entry->lock);
  417. old_mask = entry->mask;
  418. if (add) {
  419. entry->mask |= mask;
  420. new_mask = entry->mask;
  421. } else {
  422. entry->mask = mask;
  423. new_mask = entry->mask;
  424. }
  425. spin_unlock(&entry->lock);
  426. if (old_mask != new_mask) {
  427. /* more bits in old than in new? */
  428. int dropped = (old_mask & ~new_mask);
  429. /* more bits in this entry than the inode's mask? */
  430. int do_inode = (new_mask & ~inode->i_fsnotify_mask);
  431. /* more bits in this entry than the group? */
  432. int do_group = (new_mask & ~group->mask);
  433. /* update the inode with this new entry */
  434. if (dropped || do_inode)
  435. fsnotify_recalc_inode_mask(inode);
  436. /* update the group mask with the new mask */
  437. if (dropped || do_group)
  438. fsnotify_recalc_group_mask(group);
  439. }
  440. return ientry->wd;
  441. out_err:
  442. /* see this isn't supposed to happen, just kill the watch */
  443. if (entry) {
  444. fsnotify_destroy_mark_by_entry(entry);
  445. fsnotify_put_mark(entry);
  446. }
  447. return ret;
  448. }
  449. static struct fsnotify_group *inotify_new_group(struct user_struct *user, unsigned int max_events)
  450. {
  451. struct fsnotify_group *group;
  452. unsigned int grp_num;
  453. /* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */
  454. grp_num = (INOTIFY_GROUP_NUM - atomic_inc_return(&inotify_grp_num));
  455. group = fsnotify_obtain_group(grp_num, 0, &inotify_fsnotify_ops);
  456. if (IS_ERR(group))
  457. return group;
  458. group->max_events = max_events;
  459. spin_lock_init(&group->inotify_data.idr_lock);
  460. idr_init(&group->inotify_data.idr);
  461. group->inotify_data.last_wd = 0;
  462. group->inotify_data.user = user;
  463. group->inotify_data.fa = NULL;
  464. return group;
  465. }
  466. /* inotify syscalls */
  467. SYSCALL_DEFINE1(inotify_init1, int, flags)
  468. {
  469. struct fsnotify_group *group;
  470. struct user_struct *user;
  471. struct file *filp;
  472. int fd, ret;
  473. /* Check the IN_* constants for consistency. */
  474. BUILD_BUG_ON(IN_CLOEXEC != O_CLOEXEC);
  475. BUILD_BUG_ON(IN_NONBLOCK != O_NONBLOCK);
  476. if (flags & ~(IN_CLOEXEC | IN_NONBLOCK))
  477. return -EINVAL;
  478. fd = get_unused_fd_flags(flags & O_CLOEXEC);
  479. if (fd < 0)
  480. return fd;
  481. filp = get_empty_filp();
  482. if (!filp) {
  483. ret = -ENFILE;
  484. goto out_put_fd;
  485. }
  486. user = get_current_user();
  487. if (unlikely(atomic_read(&user->inotify_devs) >=
  488. inotify_max_user_instances)) {
  489. ret = -EMFILE;
  490. goto out_free_uid;
  491. }
  492. /* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */
  493. group = inotify_new_group(user, inotify_max_queued_events);
  494. if (IS_ERR(group)) {
  495. ret = PTR_ERR(group);
  496. goto out_free_uid;
  497. }
  498. filp->f_op = &inotify_fops;
  499. filp->f_path.mnt = mntget(inotify_mnt);
  500. filp->f_path.dentry = dget(inotify_mnt->mnt_root);
  501. filp->f_mapping = filp->f_path.dentry->d_inode->i_mapping;
  502. filp->f_mode = FMODE_READ;
  503. filp->f_flags = O_RDONLY | (flags & O_NONBLOCK);
  504. filp->private_data = group;
  505. atomic_inc(&user->inotify_devs);
  506. fd_install(fd, filp);
  507. return fd;
  508. out_free_uid:
  509. free_uid(user);
  510. put_filp(filp);
  511. out_put_fd:
  512. put_unused_fd(fd);
  513. return ret;
  514. }
  515. SYSCALL_DEFINE0(inotify_init)
  516. {
  517. return sys_inotify_init1(0);
  518. }
  519. SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
  520. u32, mask)
  521. {
  522. struct fsnotify_group *group;
  523. struct inode *inode;
  524. struct path path;
  525. struct file *filp;
  526. int ret, fput_needed;
  527. unsigned flags = 0;
  528. filp = fget_light(fd, &fput_needed);
  529. if (unlikely(!filp))
  530. return -EBADF;
  531. /* verify that this is indeed an inotify instance */
  532. if (unlikely(filp->f_op != &inotify_fops)) {
  533. ret = -EINVAL;
  534. goto fput_and_out;
  535. }
  536. if (!(mask & IN_DONT_FOLLOW))
  537. flags |= LOOKUP_FOLLOW;
  538. if (mask & IN_ONLYDIR)
  539. flags |= LOOKUP_DIRECTORY;
  540. ret = inotify_find_inode(pathname, &path, flags);
  541. if (ret)
  542. goto fput_and_out;
  543. /* inode held in place by reference to path; group by fget on fd */
  544. inode = path.dentry->d_inode;
  545. group = filp->private_data;
  546. /* create/update an inode mark */
  547. ret = inotify_update_watch(group, inode, mask);
  548. if (unlikely(ret))
  549. goto path_put_and_out;
  550. path_put_and_out:
  551. path_put(&path);
  552. fput_and_out:
  553. fput_light(filp, fput_needed);
  554. return ret;
  555. }
  556. SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd)
  557. {
  558. struct fsnotify_group *group;
  559. struct fsnotify_mark_entry *entry;
  560. struct file *filp;
  561. int ret = 0, fput_needed;
  562. filp = fget_light(fd, &fput_needed);
  563. if (unlikely(!filp))
  564. return -EBADF;
  565. /* verify that this is indeed an inotify instance */
  566. if (unlikely(filp->f_op != &inotify_fops)) {
  567. ret = -EINVAL;
  568. goto out;
  569. }
  570. group = filp->private_data;
  571. spin_lock(&group->inotify_data.idr_lock);
  572. entry = idr_find(&group->inotify_data.idr, wd);
  573. if (unlikely(!entry)) {
  574. spin_unlock(&group->inotify_data.idr_lock);
  575. ret = -EINVAL;
  576. goto out;
  577. }
  578. fsnotify_get_mark(entry);
  579. spin_unlock(&group->inotify_data.idr_lock);
  580. inotify_destroy_mark_entry(entry, group);
  581. fsnotify_put_mark(entry);
  582. out:
  583. fput_light(filp, fput_needed);
  584. return ret;
  585. }
  586. static int
  587. inotify_get_sb(struct file_system_type *fs_type, int flags,
  588. const char *dev_name, void *data, struct vfsmount *mnt)
  589. {
  590. return get_sb_pseudo(fs_type, "inotify", NULL,
  591. INOTIFYFS_SUPER_MAGIC, mnt);
  592. }
  593. static struct file_system_type inotify_fs_type = {
  594. .name = "inotifyfs",
  595. .get_sb = inotify_get_sb,
  596. .kill_sb = kill_anon_super,
  597. };
  598. /*
  599. * inotify_user_setup - Our initialization function. Note that we cannnot return
  600. * error because we have compiled-in VFS hooks. So an (unlikely) failure here
  601. * must result in panic().
  602. */
  603. static int __init inotify_user_setup(void)
  604. {
  605. int ret;
  606. ret = register_filesystem(&inotify_fs_type);
  607. if (unlikely(ret))
  608. panic("inotify: register_filesystem returned %d!\n", ret);
  609. inotify_mnt = kern_mount(&inotify_fs_type);
  610. if (IS_ERR(inotify_mnt))
  611. panic("inotify: kern_mount ret %ld!\n", PTR_ERR(inotify_mnt));
  612. inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark_entry, SLAB_PANIC);
  613. event_priv_cachep = KMEM_CACHE(inotify_event_private_data, SLAB_PANIC);
  614. inotify_ignored_event = fsnotify_create_event(NULL, FS_IN_IGNORED, NULL, FSNOTIFY_EVENT_NONE, NULL, 0);
  615. if (!inotify_ignored_event)
  616. panic("unable to allocate the inotify ignored event\n");
  617. inotify_max_queued_events = 16384;
  618. inotify_max_user_instances = 128;
  619. inotify_max_user_watches = 8192;
  620. return 0;
  621. }
  622. module_init(inotify_user_setup);