inotify_user.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779
  1. /*
  2. * fs/inotify_user.c - inotify support for userspace
  3. *
  4. * Authors:
  5. * John McCutchan <ttb@tentacle.dhs.org>
  6. * Robert Love <rml@novell.com>
  7. *
  8. * Copyright (C) 2005 John McCutchan
  9. * Copyright 2006 Hewlett-Packard Development Company, L.P.
  10. *
  11. * Copyright (C) 2009 Eric Paris <Red Hat Inc>
  12. * inotify was largely rewriten to make use of the fsnotify infrastructure
  13. *
  14. * This program is free software; you can redistribute it and/or modify it
  15. * under the terms of the GNU General Public License as published by the
  16. * Free Software Foundation; either version 2, or (at your option) any
  17. * later version.
  18. *
  19. * This program is distributed in the hope that it will be useful, but
  20. * WITHOUT ANY WARRANTY; without even the implied warranty of
  21. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  22. * General Public License for more details.
  23. */
  24. #include <linux/file.h>
  25. #include <linux/fs.h> /* struct inode */
  26. #include <linux/fsnotify_backend.h>
  27. #include <linux/idr.h>
  28. #include <linux/init.h> /* module_init */
  29. #include <linux/inotify.h>
  30. #include <linux/kernel.h> /* roundup() */
  31. #include <linux/namei.h> /* LOOKUP_FOLLOW */
  32. #include <linux/sched.h> /* struct user */
  33. #include <linux/slab.h> /* struct kmem_cache */
  34. #include <linux/syscalls.h>
  35. #include <linux/types.h>
  36. #include <linux/anon_inodes.h>
  37. #include <linux/uaccess.h>
  38. #include <linux/poll.h>
  39. #include <linux/wait.h>
  40. #include "inotify.h"
  41. #include <asm/ioctls.h>
  42. /* these are configurable via /proc/sys/fs/inotify/ */
  43. static int inotify_max_user_instances __read_mostly;
  44. static int inotify_max_queued_events __read_mostly;
  45. int inotify_max_user_watches __read_mostly;
  46. static struct kmem_cache *inotify_inode_mark_cachep __read_mostly;
  47. struct kmem_cache *event_priv_cachep __read_mostly;
  48. /*
  49. * When inotify registers a new group it increments this and uses that
  50. * value as an offset to set the fsnotify group "name" and priority.
  51. */
  52. static atomic_t inotify_grp_num;
  53. #ifdef CONFIG_SYSCTL
  54. #include <linux/sysctl.h>
  55. static int zero;
  56. ctl_table inotify_table[] = {
  57. {
  58. .procname = "max_user_instances",
  59. .data = &inotify_max_user_instances,
  60. .maxlen = sizeof(int),
  61. .mode = 0644,
  62. .proc_handler = proc_dointvec_minmax,
  63. .extra1 = &zero,
  64. },
  65. {
  66. .procname = "max_user_watches",
  67. .data = &inotify_max_user_watches,
  68. .maxlen = sizeof(int),
  69. .mode = 0644,
  70. .proc_handler = proc_dointvec_minmax,
  71. .extra1 = &zero,
  72. },
  73. {
  74. .procname = "max_queued_events",
  75. .data = &inotify_max_queued_events,
  76. .maxlen = sizeof(int),
  77. .mode = 0644,
  78. .proc_handler = proc_dointvec_minmax,
  79. .extra1 = &zero
  80. },
  81. { }
  82. };
  83. #endif /* CONFIG_SYSCTL */
  84. static inline __u32 inotify_arg_to_mask(u32 arg)
  85. {
  86. __u32 mask;
  87. /* everything should accept their own ignored and cares about children */
  88. mask = (FS_IN_IGNORED | FS_EVENT_ON_CHILD);
  89. /* mask off the flags used to open the fd */
  90. mask |= (arg & (IN_ALL_EVENTS | IN_ONESHOT));
  91. return mask;
  92. }
  93. static inline u32 inotify_mask_to_arg(__u32 mask)
  94. {
  95. return mask & (IN_ALL_EVENTS | IN_ISDIR | IN_UNMOUNT | IN_IGNORED |
  96. IN_Q_OVERFLOW);
  97. }
  98. /* intofiy userspace file descriptor functions */
  99. static unsigned int inotify_poll(struct file *file, poll_table *wait)
  100. {
  101. struct fsnotify_group *group = file->private_data;
  102. int ret = 0;
  103. poll_wait(file, &group->notification_waitq, wait);
  104. mutex_lock(&group->notification_mutex);
  105. if (!fsnotify_notify_queue_is_empty(group))
  106. ret = POLLIN | POLLRDNORM;
  107. mutex_unlock(&group->notification_mutex);
  108. return ret;
  109. }
  110. /*
  111. * Get an inotify_kernel_event if one exists and is small
  112. * enough to fit in "count". Return an error pointer if
  113. * not large enough.
  114. *
  115. * Called with the group->notification_mutex held.
  116. */
  117. static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
  118. size_t count)
  119. {
  120. size_t event_size = sizeof(struct inotify_event);
  121. struct fsnotify_event *event;
  122. if (fsnotify_notify_queue_is_empty(group))
  123. return NULL;
  124. event = fsnotify_peek_notify_event(group);
  125. if (event->name_len)
  126. event_size += roundup(event->name_len + 1, event_size);
  127. if (event_size > count)
  128. return ERR_PTR(-EINVAL);
  129. /* held the notification_mutex the whole time, so this is the
  130. * same event we peeked above */
  131. fsnotify_remove_notify_event(group);
  132. return event;
  133. }
  134. /*
  135. * Copy an event to user space, returning how much we copied.
  136. *
  137. * We already checked that the event size is smaller than the
  138. * buffer we had in "get_one_event()" above.
  139. */
  140. static ssize_t copy_event_to_user(struct fsnotify_group *group,
  141. struct fsnotify_event *event,
  142. char __user *buf)
  143. {
  144. struct inotify_event inotify_event;
  145. struct fsnotify_event_private_data *fsn_priv;
  146. struct inotify_event_private_data *priv;
  147. size_t event_size = sizeof(struct inotify_event);
  148. size_t name_len = 0;
  149. /* we get the inotify watch descriptor from the event private data */
  150. spin_lock(&event->lock);
  151. fsn_priv = fsnotify_remove_priv_from_event(group, event);
  152. spin_unlock(&event->lock);
  153. if (!fsn_priv)
  154. inotify_event.wd = -1;
  155. else {
  156. priv = container_of(fsn_priv, struct inotify_event_private_data,
  157. fsnotify_event_priv_data);
  158. inotify_event.wd = priv->wd;
  159. inotify_free_event_priv(fsn_priv);
  160. }
  161. /*
  162. * round up event->name_len so it is a multiple of event_size
  163. * plus an extra byte for the terminating '\0'.
  164. */
  165. if (event->name_len)
  166. name_len = roundup(event->name_len + 1, event_size);
  167. inotify_event.len = name_len;
  168. inotify_event.mask = inotify_mask_to_arg(event->mask);
  169. inotify_event.cookie = event->sync_cookie;
  170. /* send the main event */
  171. if (copy_to_user(buf, &inotify_event, event_size))
  172. return -EFAULT;
  173. buf += event_size;
  174. /*
  175. * fsnotify only stores the pathname, so here we have to send the pathname
  176. * and then pad that pathname out to a multiple of sizeof(inotify_event)
  177. * with zeros. I get my zeros from the nul_inotify_event.
  178. */
  179. if (name_len) {
  180. unsigned int len_to_zero = name_len - event->name_len;
  181. /* copy the path name */
  182. if (copy_to_user(buf, event->file_name, event->name_len))
  183. return -EFAULT;
  184. buf += event->name_len;
  185. /* fill userspace with 0's */
  186. if (clear_user(buf, len_to_zero))
  187. return -EFAULT;
  188. buf += len_to_zero;
  189. event_size += name_len;
  190. }
  191. return event_size;
  192. }
  193. static ssize_t inotify_read(struct file *file, char __user *buf,
  194. size_t count, loff_t *pos)
  195. {
  196. struct fsnotify_group *group;
  197. struct fsnotify_event *kevent;
  198. char __user *start;
  199. int ret;
  200. DEFINE_WAIT(wait);
  201. start = buf;
  202. group = file->private_data;
  203. while (1) {
  204. prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE);
  205. mutex_lock(&group->notification_mutex);
  206. kevent = get_one_event(group, count);
  207. mutex_unlock(&group->notification_mutex);
  208. if (kevent) {
  209. ret = PTR_ERR(kevent);
  210. if (IS_ERR(kevent))
  211. break;
  212. ret = copy_event_to_user(group, kevent, buf);
  213. fsnotify_put_event(kevent);
  214. if (ret < 0)
  215. break;
  216. buf += ret;
  217. count -= ret;
  218. continue;
  219. }
  220. ret = -EAGAIN;
  221. if (file->f_flags & O_NONBLOCK)
  222. break;
  223. ret = -EINTR;
  224. if (signal_pending(current))
  225. break;
  226. if (start != buf)
  227. break;
  228. schedule();
  229. }
  230. finish_wait(&group->notification_waitq, &wait);
  231. if (start != buf && ret != -EFAULT)
  232. ret = buf - start;
  233. return ret;
  234. }
  235. static int inotify_fasync(int fd, struct file *file, int on)
  236. {
  237. struct fsnotify_group *group = file->private_data;
  238. return fasync_helper(fd, file, on, &group->inotify_data.fa) >= 0 ? 0 : -EIO;
  239. }
  240. static int inotify_release(struct inode *ignored, struct file *file)
  241. {
  242. struct fsnotify_group *group = file->private_data;
  243. struct user_struct *user = group->inotify_data.user;
  244. fsnotify_clear_marks_by_group(group);
  245. /* free this group, matching get was inotify_init->fsnotify_obtain_group */
  246. fsnotify_put_group(group);
  247. atomic_dec(&user->inotify_devs);
  248. return 0;
  249. }
  250. static long inotify_ioctl(struct file *file, unsigned int cmd,
  251. unsigned long arg)
  252. {
  253. struct fsnotify_group *group;
  254. struct fsnotify_event_holder *holder;
  255. struct fsnotify_event *event;
  256. void __user *p;
  257. int ret = -ENOTTY;
  258. size_t send_len = 0;
  259. group = file->private_data;
  260. p = (void __user *) arg;
  261. switch (cmd) {
  262. case FIONREAD:
  263. mutex_lock(&group->notification_mutex);
  264. list_for_each_entry(holder, &group->notification_list, event_list) {
  265. event = holder->event;
  266. send_len += sizeof(struct inotify_event);
  267. if (event->name_len)
  268. send_len += roundup(event->name_len + 1,
  269. sizeof(struct inotify_event));
  270. }
  271. mutex_unlock(&group->notification_mutex);
  272. ret = put_user(send_len, (int __user *) p);
  273. break;
  274. }
  275. return ret;
  276. }
  277. static const struct file_operations inotify_fops = {
  278. .poll = inotify_poll,
  279. .read = inotify_read,
  280. .fasync = inotify_fasync,
  281. .release = inotify_release,
  282. .unlocked_ioctl = inotify_ioctl,
  283. .compat_ioctl = inotify_ioctl,
  284. };
  285. /*
  286. * find_inode - resolve a user-given path to a specific inode
  287. */
  288. static int inotify_find_inode(const char __user *dirname, struct path *path, unsigned flags)
  289. {
  290. int error;
  291. error = user_path_at(AT_FDCWD, dirname, flags, path);
  292. if (error)
  293. return error;
  294. /* you can only watch an inode if you have read permissions on it */
  295. error = inode_permission(path->dentry->d_inode, MAY_READ);
  296. if (error)
  297. path_put(path);
  298. return error;
  299. }
  300. /*
  301. * Remove the mark from the idr (if present) and drop the reference
  302. * on the mark because it was in the idr.
  303. */
  304. static void inotify_remove_from_idr(struct fsnotify_group *group,
  305. struct inotify_inode_mark_entry *ientry)
  306. {
  307. struct idr *idr;
  308. struct fsnotify_mark_entry *entry;
  309. struct inotify_inode_mark_entry *found_ientry;
  310. int wd;
  311. spin_lock(&group->inotify_data.idr_lock);
  312. idr = &group->inotify_data.idr;
  313. wd = ientry->wd;
  314. if (wd == -1)
  315. goto out;
  316. entry = idr_find(&group->inotify_data.idr, wd);
  317. if (unlikely(!entry))
  318. goto out;
  319. found_ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
  320. if (unlikely(found_ientry != ientry)) {
  321. /* We found an entry in the idr with the right wd, but it's
  322. * not the entry we were told to remove. eparis seriously
  323. * fucked up somewhere. */
  324. WARN_ON(1);
  325. ientry->wd = -1;
  326. goto out;
  327. }
  328. /* One ref for being in the idr, one ref held by the caller */
  329. BUG_ON(atomic_read(&entry->refcnt) < 2);
  330. idr_remove(idr, wd);
  331. ientry->wd = -1;
  332. /* removed from the idr, drop that ref */
  333. fsnotify_put_mark(entry);
  334. out:
  335. spin_unlock(&group->inotify_data.idr_lock);
  336. }
  337. /*
  338. * Send IN_IGNORED for this wd, remove this wd from the idr.
  339. */
  340. void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry,
  341. struct fsnotify_group *group)
  342. {
  343. struct inotify_inode_mark_entry *ientry;
  344. struct fsnotify_event *ignored_event;
  345. struct inotify_event_private_data *event_priv;
  346. struct fsnotify_event_private_data *fsn_event_priv;
  347. int ret;
  348. ignored_event = fsnotify_create_event(NULL, FS_IN_IGNORED, NULL,
  349. FSNOTIFY_EVENT_NONE, NULL, 0,
  350. GFP_NOFS);
  351. if (!ignored_event)
  352. return;
  353. ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
  354. event_priv = kmem_cache_alloc(event_priv_cachep, GFP_NOFS);
  355. if (unlikely(!event_priv))
  356. goto skip_send_ignore;
  357. fsn_event_priv = &event_priv->fsnotify_event_priv_data;
  358. fsn_event_priv->group = group;
  359. event_priv->wd = ientry->wd;
  360. ret = fsnotify_add_notify_event(group, ignored_event, fsn_event_priv);
  361. if (ret)
  362. inotify_free_event_priv(fsn_event_priv);
  363. skip_send_ignore:
  364. /* matches the reference taken when the event was created */
  365. fsnotify_put_event(ignored_event);
  366. /* remove this entry from the idr */
  367. inotify_remove_from_idr(group, ientry);
  368. atomic_dec(&group->inotify_data.user->inotify_watches);
  369. }
  370. /* ding dong the mark is dead */
  371. static void inotify_free_mark(struct fsnotify_mark_entry *entry)
  372. {
  373. struct inotify_inode_mark_entry *ientry = (struct inotify_inode_mark_entry *)entry;
  374. kmem_cache_free(inotify_inode_mark_cachep, ientry);
  375. }
  376. static int inotify_update_existing_watch(struct fsnotify_group *group,
  377. struct inode *inode,
  378. u32 arg)
  379. {
  380. struct fsnotify_mark_entry *entry;
  381. struct inotify_inode_mark_entry *ientry;
  382. __u32 old_mask, new_mask;
  383. __u32 mask;
  384. int add = (arg & IN_MASK_ADD);
  385. int ret;
  386. /* don't allow invalid bits: we don't want flags set */
  387. mask = inotify_arg_to_mask(arg);
  388. if (unlikely(!mask))
  389. return -EINVAL;
  390. spin_lock(&inode->i_lock);
  391. entry = fsnotify_find_mark_entry(group, inode);
  392. spin_unlock(&inode->i_lock);
  393. if (!entry)
  394. return -ENOENT;
  395. ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
  396. spin_lock(&entry->lock);
  397. old_mask = entry->mask;
  398. if (add) {
  399. entry->mask |= mask;
  400. new_mask = entry->mask;
  401. } else {
  402. entry->mask = mask;
  403. new_mask = entry->mask;
  404. }
  405. spin_unlock(&entry->lock);
  406. if (old_mask != new_mask) {
  407. /* more bits in old than in new? */
  408. int dropped = (old_mask & ~new_mask);
  409. /* more bits in this entry than the inode's mask? */
  410. int do_inode = (new_mask & ~inode->i_fsnotify_mask);
  411. /* more bits in this entry than the group? */
  412. int do_group = (new_mask & ~group->mask);
  413. /* update the inode with this new entry */
  414. if (dropped || do_inode)
  415. fsnotify_recalc_inode_mask(inode);
  416. /* update the group mask with the new mask */
  417. if (dropped || do_group)
  418. fsnotify_recalc_group_mask(group);
  419. }
  420. /* return the wd */
  421. ret = ientry->wd;
  422. /* match the get from fsnotify_find_mark_entry() */
  423. fsnotify_put_mark(entry);
  424. return ret;
  425. }
  426. static int inotify_new_watch(struct fsnotify_group *group,
  427. struct inode *inode,
  428. u32 arg)
  429. {
  430. struct inotify_inode_mark_entry *tmp_ientry;
  431. __u32 mask;
  432. int ret;
  433. /* don't allow invalid bits: we don't want flags set */
  434. mask = inotify_arg_to_mask(arg);
  435. if (unlikely(!mask))
  436. return -EINVAL;
  437. tmp_ientry = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);
  438. if (unlikely(!tmp_ientry))
  439. return -ENOMEM;
  440. fsnotify_init_mark(&tmp_ientry->fsn_entry, inotify_free_mark);
  441. tmp_ientry->fsn_entry.mask = mask;
  442. tmp_ientry->wd = -1;
  443. ret = -ENOSPC;
  444. if (atomic_read(&group->inotify_data.user->inotify_watches) >= inotify_max_user_watches)
  445. goto out_err;
  446. retry:
  447. ret = -ENOMEM;
  448. if (unlikely(!idr_pre_get(&group->inotify_data.idr, GFP_KERNEL)))
  449. goto out_err;
  450. /* we are putting the mark on the idr, take a reference */
  451. fsnotify_get_mark(&tmp_ientry->fsn_entry);
  452. spin_lock(&group->inotify_data.idr_lock);
  453. ret = idr_get_new_above(&group->inotify_data.idr, &tmp_ientry->fsn_entry,
  454. group->inotify_data.last_wd+1,
  455. &tmp_ientry->wd);
  456. spin_unlock(&group->inotify_data.idr_lock);
  457. if (ret) {
  458. /* we didn't get on the idr, drop the idr reference */
  459. fsnotify_put_mark(&tmp_ientry->fsn_entry);
  460. /* idr was out of memory allocate and try again */
  461. if (ret == -EAGAIN)
  462. goto retry;
  463. goto out_err;
  464. }
  465. /* we are on the idr, now get on the inode */
  466. ret = fsnotify_add_mark(&tmp_ientry->fsn_entry, group, inode);
  467. if (ret) {
  468. /* we failed to get on the inode, get off the idr */
  469. inotify_remove_from_idr(group, tmp_ientry);
  470. goto out_err;
  471. }
  472. /* update the idr hint, who cares about races, it's just a hint */
  473. group->inotify_data.last_wd = tmp_ientry->wd;
  474. /* increment the number of watches the user has */
  475. atomic_inc(&group->inotify_data.user->inotify_watches);
  476. /* return the watch descriptor for this new entry */
  477. ret = tmp_ientry->wd;
  478. /* if this mark added a new event update the group mask */
  479. if (mask & ~group->mask)
  480. fsnotify_recalc_group_mask(group);
  481. out_err:
  482. /* match the ref from fsnotify_init_markentry() */
  483. fsnotify_put_mark(&tmp_ientry->fsn_entry);
  484. return ret;
  485. }
  486. static int inotify_update_watch(struct fsnotify_group *group, struct inode *inode, u32 arg)
  487. {
  488. int ret = 0;
  489. retry:
  490. /* try to update and existing watch with the new arg */
  491. ret = inotify_update_existing_watch(group, inode, arg);
  492. /* no mark present, try to add a new one */
  493. if (ret == -ENOENT)
  494. ret = inotify_new_watch(group, inode, arg);
  495. /*
  496. * inotify_new_watch could race with another thread which did an
  497. * inotify_new_watch between the update_existing and the add watch
  498. * here, go back and try to update an existing mark again.
  499. */
  500. if (ret == -EEXIST)
  501. goto retry;
  502. return ret;
  503. }
  504. static struct fsnotify_group *inotify_new_group(struct user_struct *user, unsigned int max_events)
  505. {
  506. struct fsnotify_group *group;
  507. unsigned int grp_num;
  508. /* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */
  509. grp_num = (INOTIFY_GROUP_NUM - atomic_inc_return(&inotify_grp_num));
  510. group = fsnotify_obtain_group(grp_num, 0, &inotify_fsnotify_ops);
  511. if (IS_ERR(group))
  512. return group;
  513. group->max_events = max_events;
  514. spin_lock_init(&group->inotify_data.idr_lock);
  515. idr_init(&group->inotify_data.idr);
  516. group->inotify_data.last_wd = 0;
  517. group->inotify_data.user = user;
  518. group->inotify_data.fa = NULL;
  519. return group;
  520. }
  521. /* inotify syscalls */
  522. SYSCALL_DEFINE1(inotify_init1, int, flags)
  523. {
  524. struct fsnotify_group *group;
  525. struct user_struct *user;
  526. int ret;
  527. /* Check the IN_* constants for consistency. */
  528. BUILD_BUG_ON(IN_CLOEXEC != O_CLOEXEC);
  529. BUILD_BUG_ON(IN_NONBLOCK != O_NONBLOCK);
  530. if (flags & ~(IN_CLOEXEC | IN_NONBLOCK))
  531. return -EINVAL;
  532. user = get_current_user();
  533. if (unlikely(atomic_read(&user->inotify_devs) >=
  534. inotify_max_user_instances)) {
  535. ret = -EMFILE;
  536. goto out_free_uid;
  537. }
  538. /* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */
  539. group = inotify_new_group(user, inotify_max_queued_events);
  540. if (IS_ERR(group)) {
  541. ret = PTR_ERR(group);
  542. goto out_free_uid;
  543. }
  544. atomic_inc(&user->inotify_devs);
  545. ret = anon_inode_getfd("inotify", &inotify_fops, group,
  546. O_RDONLY | flags);
  547. if (ret >= 0)
  548. return ret;
  549. atomic_dec(&user->inotify_devs);
  550. out_free_uid:
  551. free_uid(user);
  552. return ret;
  553. }
  554. SYSCALL_DEFINE0(inotify_init)
  555. {
  556. return sys_inotify_init1(0);
  557. }
  558. SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
  559. u32, mask)
  560. {
  561. struct fsnotify_group *group;
  562. struct inode *inode;
  563. struct path path;
  564. struct file *filp;
  565. int ret, fput_needed;
  566. unsigned flags = 0;
  567. filp = fget_light(fd, &fput_needed);
  568. if (unlikely(!filp))
  569. return -EBADF;
  570. /* verify that this is indeed an inotify instance */
  571. if (unlikely(filp->f_op != &inotify_fops)) {
  572. ret = -EINVAL;
  573. goto fput_and_out;
  574. }
  575. if (!(mask & IN_DONT_FOLLOW))
  576. flags |= LOOKUP_FOLLOW;
  577. if (mask & IN_ONLYDIR)
  578. flags |= LOOKUP_DIRECTORY;
  579. ret = inotify_find_inode(pathname, &path, flags);
  580. if (ret)
  581. goto fput_and_out;
  582. /* inode held in place by reference to path; group by fget on fd */
  583. inode = path.dentry->d_inode;
  584. group = filp->private_data;
  585. /* create/update an inode mark */
  586. ret = inotify_update_watch(group, inode, mask);
  587. path_put(&path);
  588. fput_and_out:
  589. fput_light(filp, fput_needed);
  590. return ret;
  591. }
  592. SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd)
  593. {
  594. struct fsnotify_group *group;
  595. struct fsnotify_mark_entry *entry;
  596. struct file *filp;
  597. int ret = 0, fput_needed;
  598. filp = fget_light(fd, &fput_needed);
  599. if (unlikely(!filp))
  600. return -EBADF;
  601. /* verify that this is indeed an inotify instance */
  602. if (unlikely(filp->f_op != &inotify_fops)) {
  603. ret = -EINVAL;
  604. goto out;
  605. }
  606. group = filp->private_data;
  607. spin_lock(&group->inotify_data.idr_lock);
  608. entry = idr_find(&group->inotify_data.idr, wd);
  609. if (unlikely(!entry)) {
  610. spin_unlock(&group->inotify_data.idr_lock);
  611. ret = -EINVAL;
  612. goto out;
  613. }
  614. fsnotify_get_mark(entry);
  615. spin_unlock(&group->inotify_data.idr_lock);
  616. fsnotify_destroy_mark_by_entry(entry);
  617. fsnotify_put_mark(entry);
  618. out:
  619. fput_light(filp, fput_needed);
  620. return ret;
  621. }
  622. /*
  623. * inotify_user_setup - Our initialization function. Note that we cannnot return
  624. * error because we have compiled-in VFS hooks. So an (unlikely) failure here
  625. * must result in panic().
  626. */
  627. static int __init inotify_user_setup(void)
  628. {
  629. inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark_entry, SLAB_PANIC);
  630. event_priv_cachep = KMEM_CACHE(inotify_event_private_data, SLAB_PANIC);
  631. inotify_max_queued_events = 16384;
  632. inotify_max_user_instances = 128;
  633. inotify_max_user_watches = 8192;
  634. return 0;
  635. }
  636. module_init(inotify_user_setup);