fanotify_user.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825
  1. #include <linux/fanotify.h>
  2. #include <linux/fcntl.h>
  3. #include <linux/file.h>
  4. #include <linux/fs.h>
  5. #include <linux/anon_inodes.h>
  6. #include <linux/fsnotify_backend.h>
  7. #include <linux/init.h>
  8. #include <linux/mount.h>
  9. #include <linux/namei.h>
  10. #include <linux/poll.h>
  11. #include <linux/security.h>
  12. #include <linux/syscalls.h>
  13. #include <linux/slab.h>
  14. #include <linux/types.h>
  15. #include <linux/uaccess.h>
  16. #include <asm/ioctls.h>
  17. #define FANOTIFY_DEFAULT_MAX_EVENTS 16384
  18. extern const struct fsnotify_ops fanotify_fsnotify_ops;
  19. static struct kmem_cache *fanotify_mark_cache __read_mostly;
  20. static struct kmem_cache *fanotify_response_event_cache __read_mostly;
  21. struct fanotify_response_event {
  22. struct list_head list;
  23. __s32 fd;
  24. struct fsnotify_event *event;
  25. };
  26. /*
  27. * Get an fsnotify notification event if one exists and is small
  28. * enough to fit in "count". Return an error pointer if the count
  29. * is not large enough.
  30. *
  31. * Called with the group->notification_mutex held.
  32. */
  33. static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
  34. size_t count)
  35. {
  36. BUG_ON(!mutex_is_locked(&group->notification_mutex));
  37. pr_debug("%s: group=%p count=%zd\n", __func__, group, count);
  38. if (fsnotify_notify_queue_is_empty(group))
  39. return NULL;
  40. if (FAN_EVENT_METADATA_LEN > count)
  41. return ERR_PTR(-EINVAL);
  42. /* held the notification_mutex the whole time, so this is the
  43. * same event we peeked above */
  44. return fsnotify_remove_notify_event(group);
  45. }
  46. static int create_fd(struct fsnotify_group *group, struct fsnotify_event *event)
  47. {
  48. int client_fd;
  49. struct dentry *dentry;
  50. struct vfsmount *mnt;
  51. struct file *new_file;
  52. pr_debug("%s: group=%p event=%p\n", __func__, group, event);
  53. client_fd = get_unused_fd();
  54. if (client_fd < 0)
  55. return client_fd;
  56. if (event->data_type != FSNOTIFY_EVENT_PATH) {
  57. WARN_ON(1);
  58. put_unused_fd(client_fd);
  59. return -EINVAL;
  60. }
  61. /*
  62. * we need a new file handle for the userspace program so it can read even if it was
  63. * originally opened O_WRONLY.
  64. */
  65. dentry = dget(event->path.dentry);
  66. mnt = mntget(event->path.mnt);
  67. /* it's possible this event was an overflow event. in that case dentry and mnt
  68. * are NULL; That's fine, just don't call dentry open */
  69. if (dentry && mnt)
  70. new_file = dentry_open(dentry, mnt,
  71. group->fanotify_data.f_flags | FMODE_NONOTIFY,
  72. current_cred());
  73. else
  74. new_file = ERR_PTR(-EOVERFLOW);
  75. if (IS_ERR(new_file)) {
  76. /*
  77. * we still send an event even if we can't open the file. this
  78. * can happen when say tasks are gone and we try to open their
  79. * /proc files or we try to open a WRONLY file like in sysfs
  80. * we just send the errno to userspace since there isn't much
  81. * else we can do.
  82. */
  83. put_unused_fd(client_fd);
  84. client_fd = PTR_ERR(new_file);
  85. } else {
  86. fd_install(client_fd, new_file);
  87. }
  88. return client_fd;
  89. }
  90. static ssize_t fill_event_metadata(struct fsnotify_group *group,
  91. struct fanotify_event_metadata *metadata,
  92. struct fsnotify_event *event)
  93. {
  94. pr_debug("%s: group=%p metadata=%p event=%p\n", __func__,
  95. group, metadata, event);
  96. metadata->event_len = FAN_EVENT_METADATA_LEN;
  97. metadata->vers = FANOTIFY_METADATA_VERSION;
  98. metadata->mask = event->mask & FAN_ALL_OUTGOING_EVENTS;
  99. metadata->pid = pid_vnr(event->tgid);
  100. metadata->fd = create_fd(group, event);
  101. return metadata->fd;
  102. }
  103. #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
  104. static struct fanotify_response_event *dequeue_re(struct fsnotify_group *group,
  105. __s32 fd)
  106. {
  107. struct fanotify_response_event *re, *return_re = NULL;
  108. mutex_lock(&group->fanotify_data.access_mutex);
  109. list_for_each_entry(re, &group->fanotify_data.access_list, list) {
  110. if (re->fd != fd)
  111. continue;
  112. list_del_init(&re->list);
  113. return_re = re;
  114. break;
  115. }
  116. mutex_unlock(&group->fanotify_data.access_mutex);
  117. pr_debug("%s: found return_re=%p\n", __func__, return_re);
  118. return return_re;
  119. }
  120. static int process_access_response(struct fsnotify_group *group,
  121. struct fanotify_response *response_struct)
  122. {
  123. struct fanotify_response_event *re;
  124. __s32 fd = response_struct->fd;
  125. __u32 response = response_struct->response;
  126. pr_debug("%s: group=%p fd=%d response=%d\n", __func__, group,
  127. fd, response);
  128. /*
  129. * make sure the response is valid, if invalid we do nothing and either
  130. * userspace can send a valid responce or we will clean it up after the
  131. * timeout
  132. */
  133. switch (response) {
  134. case FAN_ALLOW:
  135. case FAN_DENY:
  136. break;
  137. default:
  138. return -EINVAL;
  139. }
  140. if (fd < 0)
  141. return -EINVAL;
  142. re = dequeue_re(group, fd);
  143. if (!re)
  144. return -ENOENT;
  145. re->event->response = response;
  146. wake_up(&group->fanotify_data.access_waitq);
  147. kmem_cache_free(fanotify_response_event_cache, re);
  148. return 0;
  149. }
  150. static int prepare_for_access_response(struct fsnotify_group *group,
  151. struct fsnotify_event *event,
  152. __s32 fd)
  153. {
  154. struct fanotify_response_event *re;
  155. if (!(event->mask & FAN_ALL_PERM_EVENTS))
  156. return 0;
  157. re = kmem_cache_alloc(fanotify_response_event_cache, GFP_KERNEL);
  158. if (!re)
  159. return -ENOMEM;
  160. re->event = event;
  161. re->fd = fd;
  162. mutex_lock(&group->fanotify_data.access_mutex);
  163. if (group->fanotify_data.bypass_perm) {
  164. mutex_unlock(&group->fanotify_data.access_mutex);
  165. kmem_cache_free(fanotify_response_event_cache, re);
  166. event->response = FAN_ALLOW;
  167. return 0;
  168. }
  169. list_add_tail(&re->list, &group->fanotify_data.access_list);
  170. mutex_unlock(&group->fanotify_data.access_mutex);
  171. return 0;
  172. }
  173. static void remove_access_response(struct fsnotify_group *group,
  174. struct fsnotify_event *event,
  175. __s32 fd)
  176. {
  177. struct fanotify_response_event *re;
  178. if (!(event->mask & FAN_ALL_PERM_EVENTS))
  179. return;
  180. re = dequeue_re(group, fd);
  181. if (!re)
  182. return;
  183. BUG_ON(re->event != event);
  184. kmem_cache_free(fanotify_response_event_cache, re);
  185. return;
  186. }
  187. #else
  188. static int prepare_for_access_response(struct fsnotify_group *group,
  189. struct fsnotify_event *event,
  190. __s32 fd)
  191. {
  192. return 0;
  193. }
  194. static void remove_access_response(struct fsnotify_group *group,
  195. struct fsnotify_event *event,
  196. __s32 fd)
  197. {
  198. return;
  199. }
  200. #endif
  201. static ssize_t copy_event_to_user(struct fsnotify_group *group,
  202. struct fsnotify_event *event,
  203. char __user *buf)
  204. {
  205. struct fanotify_event_metadata fanotify_event_metadata;
  206. int fd, ret;
  207. pr_debug("%s: group=%p event=%p\n", __func__, group, event);
  208. fd = fill_event_metadata(group, &fanotify_event_metadata, event);
  209. if (fd < 0)
  210. return fd;
  211. ret = prepare_for_access_response(group, event, fd);
  212. if (ret)
  213. goto out_close_fd;
  214. ret = -EFAULT;
  215. if (copy_to_user(buf, &fanotify_event_metadata, FAN_EVENT_METADATA_LEN))
  216. goto out_kill_access_response;
  217. return FAN_EVENT_METADATA_LEN;
  218. out_kill_access_response:
  219. remove_access_response(group, event, fd);
  220. out_close_fd:
  221. sys_close(fd);
  222. return ret;
  223. }
  224. /* intofiy userspace file descriptor functions */
  225. static unsigned int fanotify_poll(struct file *file, poll_table *wait)
  226. {
  227. struct fsnotify_group *group = file->private_data;
  228. int ret = 0;
  229. poll_wait(file, &group->notification_waitq, wait);
  230. mutex_lock(&group->notification_mutex);
  231. if (!fsnotify_notify_queue_is_empty(group))
  232. ret = POLLIN | POLLRDNORM;
  233. mutex_unlock(&group->notification_mutex);
  234. return ret;
  235. }
  236. static ssize_t fanotify_read(struct file *file, char __user *buf,
  237. size_t count, loff_t *pos)
  238. {
  239. struct fsnotify_group *group;
  240. struct fsnotify_event *kevent;
  241. char __user *start;
  242. int ret;
  243. DEFINE_WAIT(wait);
  244. start = buf;
  245. group = file->private_data;
  246. pr_debug("%s: group=%p\n", __func__, group);
  247. while (1) {
  248. prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE);
  249. mutex_lock(&group->notification_mutex);
  250. kevent = get_one_event(group, count);
  251. mutex_unlock(&group->notification_mutex);
  252. if (kevent) {
  253. ret = PTR_ERR(kevent);
  254. if (IS_ERR(kevent))
  255. break;
  256. ret = copy_event_to_user(group, kevent, buf);
  257. fsnotify_put_event(kevent);
  258. if (ret < 0)
  259. break;
  260. buf += ret;
  261. count -= ret;
  262. continue;
  263. }
  264. ret = -EAGAIN;
  265. if (file->f_flags & O_NONBLOCK)
  266. break;
  267. ret = -EINTR;
  268. if (signal_pending(current))
  269. break;
  270. if (start != buf)
  271. break;
  272. schedule();
  273. }
  274. finish_wait(&group->notification_waitq, &wait);
  275. if (start != buf && ret != -EFAULT)
  276. ret = buf - start;
  277. return ret;
  278. }
  279. static ssize_t fanotify_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
  280. {
  281. #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
  282. struct fanotify_response response = { .fd = -1, .response = -1 };
  283. struct fsnotify_group *group;
  284. int ret;
  285. group = file->private_data;
  286. if (count > sizeof(response))
  287. count = sizeof(response);
  288. pr_debug("%s: group=%p count=%zu\n", __func__, group, count);
  289. if (copy_from_user(&response, buf, count))
  290. return -EFAULT;
  291. ret = process_access_response(group, &response);
  292. if (ret < 0)
  293. count = ret;
  294. return count;
  295. #else
  296. return -EINVAL;
  297. #endif
  298. }
  299. static int fanotify_release(struct inode *ignored, struct file *file)
  300. {
  301. struct fsnotify_group *group = file->private_data;
  302. struct fanotify_response_event *re, *lre;
  303. pr_debug("%s: file=%p group=%p\n", __func__, file, group);
  304. #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
  305. mutex_lock(&group->fanotify_data.access_mutex);
  306. group->fanotify_data.bypass_perm = true;
  307. list_for_each_entry_safe(re, lre, &group->fanotify_data.access_list, list) {
  308. pr_debug("%s: found group=%p re=%p event=%p\n", __func__, group,
  309. re, re->event);
  310. list_del_init(&re->list);
  311. re->event->response = FAN_ALLOW;
  312. kmem_cache_free(fanotify_response_event_cache, re);
  313. }
  314. mutex_unlock(&group->fanotify_data.access_mutex);
  315. wake_up(&group->fanotify_data.access_waitq);
  316. #endif
  317. /* matches the fanotify_init->fsnotify_alloc_group */
  318. fsnotify_put_group(group);
  319. return 0;
  320. }
  321. static long fanotify_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  322. {
  323. struct fsnotify_group *group;
  324. struct fsnotify_event_holder *holder;
  325. void __user *p;
  326. int ret = -ENOTTY;
  327. size_t send_len = 0;
  328. group = file->private_data;
  329. p = (void __user *) arg;
  330. switch (cmd) {
  331. case FIONREAD:
  332. mutex_lock(&group->notification_mutex);
  333. list_for_each_entry(holder, &group->notification_list, event_list)
  334. send_len += FAN_EVENT_METADATA_LEN;
  335. mutex_unlock(&group->notification_mutex);
  336. ret = put_user(send_len, (int __user *) p);
  337. break;
  338. }
  339. return ret;
  340. }
  341. static const struct file_operations fanotify_fops = {
  342. .poll = fanotify_poll,
  343. .read = fanotify_read,
  344. .write = fanotify_write,
  345. .fasync = NULL,
  346. .release = fanotify_release,
  347. .unlocked_ioctl = fanotify_ioctl,
  348. .compat_ioctl = fanotify_ioctl,
  349. .llseek = noop_llseek,
  350. };
  351. static void fanotify_free_mark(struct fsnotify_mark *fsn_mark)
  352. {
  353. kmem_cache_free(fanotify_mark_cache, fsn_mark);
  354. }
  355. static int fanotify_find_path(int dfd, const char __user *filename,
  356. struct path *path, unsigned int flags)
  357. {
  358. int ret;
  359. pr_debug("%s: dfd=%d filename=%p flags=%x\n", __func__,
  360. dfd, filename, flags);
  361. if (filename == NULL) {
  362. struct file *file;
  363. int fput_needed;
  364. ret = -EBADF;
  365. file = fget_light(dfd, &fput_needed);
  366. if (!file)
  367. goto out;
  368. ret = -ENOTDIR;
  369. if ((flags & FAN_MARK_ONLYDIR) &&
  370. !(S_ISDIR(file->f_path.dentry->d_inode->i_mode))) {
  371. fput_light(file, fput_needed);
  372. goto out;
  373. }
  374. *path = file->f_path;
  375. path_get(path);
  376. fput_light(file, fput_needed);
  377. } else {
  378. unsigned int lookup_flags = 0;
  379. if (!(flags & FAN_MARK_DONT_FOLLOW))
  380. lookup_flags |= LOOKUP_FOLLOW;
  381. if (flags & FAN_MARK_ONLYDIR)
  382. lookup_flags |= LOOKUP_DIRECTORY;
  383. ret = user_path_at(dfd, filename, lookup_flags, path);
  384. if (ret)
  385. goto out;
  386. }
  387. /* you can only watch an inode if you have read permissions on it */
  388. ret = inode_permission(path->dentry->d_inode, MAY_READ);
  389. if (ret)
  390. path_put(path);
  391. out:
  392. return ret;
  393. }
  394. static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark,
  395. __u32 mask,
  396. unsigned int flags)
  397. {
  398. __u32 oldmask;
  399. spin_lock(&fsn_mark->lock);
  400. if (!(flags & FAN_MARK_IGNORED_MASK)) {
  401. oldmask = fsn_mark->mask;
  402. fsnotify_set_mark_mask_locked(fsn_mark, (oldmask & ~mask));
  403. } else {
  404. oldmask = fsn_mark->ignored_mask;
  405. fsnotify_set_mark_ignored_mask_locked(fsn_mark, (oldmask & ~mask));
  406. }
  407. spin_unlock(&fsn_mark->lock);
  408. if (!(oldmask & ~mask))
  409. fsnotify_destroy_mark(fsn_mark);
  410. return mask & oldmask;
  411. }
  412. static int fanotify_remove_vfsmount_mark(struct fsnotify_group *group,
  413. struct vfsmount *mnt, __u32 mask,
  414. unsigned int flags)
  415. {
  416. struct fsnotify_mark *fsn_mark = NULL;
  417. __u32 removed;
  418. fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);
  419. if (!fsn_mark)
  420. return -ENOENT;
  421. removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags);
  422. fsnotify_put_mark(fsn_mark);
  423. if (removed & mnt->mnt_fsnotify_mask)
  424. fsnotify_recalc_vfsmount_mask(mnt);
  425. return 0;
  426. }
  427. static int fanotify_remove_inode_mark(struct fsnotify_group *group,
  428. struct inode *inode, __u32 mask,
  429. unsigned int flags)
  430. {
  431. struct fsnotify_mark *fsn_mark = NULL;
  432. __u32 removed;
  433. fsn_mark = fsnotify_find_inode_mark(group, inode);
  434. if (!fsn_mark)
  435. return -ENOENT;
  436. removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags);
  437. /* matches the fsnotify_find_inode_mark() */
  438. fsnotify_put_mark(fsn_mark);
  439. if (removed & inode->i_fsnotify_mask)
  440. fsnotify_recalc_inode_mask(inode);
  441. return 0;
  442. }
  443. static __u32 fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark,
  444. __u32 mask,
  445. unsigned int flags)
  446. {
  447. __u32 oldmask;
  448. spin_lock(&fsn_mark->lock);
  449. if (!(flags & FAN_MARK_IGNORED_MASK)) {
  450. oldmask = fsn_mark->mask;
  451. fsnotify_set_mark_mask_locked(fsn_mark, (oldmask | mask));
  452. } else {
  453. oldmask = fsn_mark->ignored_mask;
  454. fsnotify_set_mark_ignored_mask_locked(fsn_mark, (oldmask | mask));
  455. if (flags & FAN_MARK_IGNORED_SURV_MODIFY)
  456. fsn_mark->flags |= FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY;
  457. }
  458. spin_unlock(&fsn_mark->lock);
  459. return mask & ~oldmask;
  460. }
  461. static int fanotify_add_vfsmount_mark(struct fsnotify_group *group,
  462. struct vfsmount *mnt, __u32 mask,
  463. unsigned int flags)
  464. {
  465. struct fsnotify_mark *fsn_mark;
  466. __u32 added;
  467. fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);
  468. if (!fsn_mark) {
  469. int ret;
  470. fsn_mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL);
  471. if (!fsn_mark)
  472. return -ENOMEM;
  473. fsnotify_init_mark(fsn_mark, fanotify_free_mark);
  474. ret = fsnotify_add_mark(fsn_mark, group, NULL, mnt, 0);
  475. if (ret) {
  476. fanotify_free_mark(fsn_mark);
  477. return ret;
  478. }
  479. }
  480. added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
  481. fsnotify_put_mark(fsn_mark);
  482. if (added & ~mnt->mnt_fsnotify_mask)
  483. fsnotify_recalc_vfsmount_mask(mnt);
  484. return 0;
  485. }
  486. static int fanotify_add_inode_mark(struct fsnotify_group *group,
  487. struct inode *inode, __u32 mask,
  488. unsigned int flags)
  489. {
  490. struct fsnotify_mark *fsn_mark;
  491. __u32 added;
  492. pr_debug("%s: group=%p inode=%p\n", __func__, group, inode);
  493. /*
  494. * If some other task has this inode open for write we should not add
  495. * an ignored mark, unless that ignored mark is supposed to survive
  496. * modification changes anyway.
  497. */
  498. if ((flags & FAN_MARK_IGNORED_MASK) &&
  499. !(flags & FAN_MARK_IGNORED_SURV_MODIFY) &&
  500. (atomic_read(&inode->i_writecount) > 0))
  501. return 0;
  502. fsn_mark = fsnotify_find_inode_mark(group, inode);
  503. if (!fsn_mark) {
  504. int ret;
  505. fsn_mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL);
  506. if (!fsn_mark)
  507. return -ENOMEM;
  508. fsnotify_init_mark(fsn_mark, fanotify_free_mark);
  509. ret = fsnotify_add_mark(fsn_mark, group, inode, NULL, 0);
  510. if (ret) {
  511. fanotify_free_mark(fsn_mark);
  512. return ret;
  513. }
  514. }
  515. added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
  516. fsnotify_put_mark(fsn_mark);
  517. if (added & ~inode->i_fsnotify_mask)
  518. fsnotify_recalc_inode_mask(inode);
  519. return 0;
  520. }
  521. /* fanotify syscalls */
  522. SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
  523. {
  524. struct fsnotify_group *group;
  525. int f_flags, fd;
  526. pr_debug("%s: flags=%d event_f_flags=%d\n",
  527. __func__, flags, event_f_flags);
  528. if (!capable(CAP_SYS_ADMIN))
  529. return -EPERM;
  530. if (flags & ~FAN_ALL_INIT_FLAGS)
  531. return -EINVAL;
  532. f_flags = O_RDWR | FMODE_NONOTIFY;
  533. if (flags & FAN_CLOEXEC)
  534. f_flags |= O_CLOEXEC;
  535. if (flags & FAN_NONBLOCK)
  536. f_flags |= O_NONBLOCK;
  537. /* fsnotify_alloc_group takes a ref. Dropped in fanotify_release */
  538. group = fsnotify_alloc_group(&fanotify_fsnotify_ops);
  539. if (IS_ERR(group))
  540. return PTR_ERR(group);
  541. group->fanotify_data.f_flags = event_f_flags;
  542. #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
  543. mutex_init(&group->fanotify_data.access_mutex);
  544. init_waitqueue_head(&group->fanotify_data.access_waitq);
  545. INIT_LIST_HEAD(&group->fanotify_data.access_list);
  546. #endif
  547. switch (flags & FAN_ALL_CLASS_BITS) {
  548. case FAN_CLASS_NOTIF:
  549. group->priority = FS_PRIO_0;
  550. break;
  551. case FAN_CLASS_CONTENT:
  552. group->priority = FS_PRIO_1;
  553. break;
  554. case FAN_CLASS_PRE_CONTENT:
  555. group->priority = FS_PRIO_2;
  556. break;
  557. default:
  558. fd = -EINVAL;
  559. goto out_put_group;
  560. }
  561. group->max_events = FANOTIFY_DEFAULT_MAX_EVENTS;
  562. fd = anon_inode_getfd("[fanotify]", &fanotify_fops, group, f_flags);
  563. if (fd < 0)
  564. goto out_put_group;
  565. return fd;
  566. out_put_group:
  567. fsnotify_put_group(group);
  568. return fd;
  569. }
  570. SYSCALL_DEFINE(fanotify_mark)(int fanotify_fd, unsigned int flags,
  571. __u64 mask, int dfd,
  572. const char __user * pathname)
  573. {
  574. struct inode *inode = NULL;
  575. struct vfsmount *mnt = NULL;
  576. struct fsnotify_group *group;
  577. struct file *filp;
  578. struct path path;
  579. int ret, fput_needed;
  580. pr_debug("%s: fanotify_fd=%d flags=%x dfd=%d pathname=%p mask=%llx\n",
  581. __func__, fanotify_fd, flags, dfd, pathname, mask);
  582. /* we only use the lower 32 bits as of right now. */
  583. if (mask & ((__u64)0xffffffff << 32))
  584. return -EINVAL;
  585. if (flags & ~FAN_ALL_MARK_FLAGS)
  586. return -EINVAL;
  587. switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) {
  588. case FAN_MARK_ADD:
  589. case FAN_MARK_REMOVE:
  590. case FAN_MARK_FLUSH:
  591. break;
  592. default:
  593. return -EINVAL;
  594. }
  595. #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
  596. if (mask & ~(FAN_ALL_EVENTS | FAN_ALL_PERM_EVENTS | FAN_EVENT_ON_CHILD))
  597. #else
  598. if (mask & ~(FAN_ALL_EVENTS | FAN_EVENT_ON_CHILD))
  599. #endif
  600. return -EINVAL;
  601. filp = fget_light(fanotify_fd, &fput_needed);
  602. if (unlikely(!filp))
  603. return -EBADF;
  604. /* verify that this is indeed an fanotify instance */
  605. ret = -EINVAL;
  606. if (unlikely(filp->f_op != &fanotify_fops))
  607. goto fput_and_out;
  608. group = filp->private_data;
  609. /*
  610. * group->priority == FS_PRIO_0 == FAN_CLASS_NOTIF. These are not
  611. * allowed to set permissions events.
  612. */
  613. ret = -EINVAL;
  614. if (mask & FAN_ALL_PERM_EVENTS &&
  615. group->priority == FS_PRIO_0)
  616. goto fput_and_out;
  617. ret = fanotify_find_path(dfd, pathname, &path, flags);
  618. if (ret)
  619. goto fput_and_out;
  620. /* inode held in place by reference to path; group by fget on fd */
  621. if (!(flags & FAN_MARK_MOUNT))
  622. inode = path.dentry->d_inode;
  623. else
  624. mnt = path.mnt;
  625. /* create/update an inode mark */
  626. switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) {
  627. case FAN_MARK_ADD:
  628. if (flags & FAN_MARK_MOUNT)
  629. ret = fanotify_add_vfsmount_mark(group, mnt, mask, flags);
  630. else
  631. ret = fanotify_add_inode_mark(group, inode, mask, flags);
  632. break;
  633. case FAN_MARK_REMOVE:
  634. if (flags & FAN_MARK_MOUNT)
  635. ret = fanotify_remove_vfsmount_mark(group, mnt, mask, flags);
  636. else
  637. ret = fanotify_remove_inode_mark(group, inode, mask, flags);
  638. break;
  639. case FAN_MARK_FLUSH:
  640. if (flags & FAN_MARK_MOUNT)
  641. fsnotify_clear_vfsmount_marks_by_group(group);
  642. else
  643. fsnotify_clear_inode_marks_by_group(group);
  644. break;
  645. default:
  646. ret = -EINVAL;
  647. }
  648. path_put(&path);
  649. fput_and_out:
  650. fput_light(filp, fput_needed);
  651. return ret;
  652. }
  653. #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
  654. asmlinkage long SyS_fanotify_mark(long fanotify_fd, long flags, __u64 mask,
  655. long dfd, long pathname)
  656. {
  657. return SYSC_fanotify_mark((int) fanotify_fd, (unsigned int) flags,
  658. mask, (int) dfd,
  659. (const char __user *) pathname);
  660. }
  661. SYSCALL_ALIAS(sys_fanotify_mark, SyS_fanotify_mark);
  662. #endif
  663. /*
  664. * fanotify_user_setup - Our initialization function. Note that we cannnot return
  665. * error because we have compiled-in VFS hooks. So an (unlikely) failure here
  666. * must result in panic().
  667. */
  668. static int __init fanotify_user_setup(void)
  669. {
  670. fanotify_mark_cache = KMEM_CACHE(fsnotify_mark, SLAB_PANIC);
  671. fanotify_response_event_cache = KMEM_CACHE(fanotify_response_event,
  672. SLAB_PANIC);
  673. return 0;
  674. }
  675. device_initcall(fanotify_user_setup);