|
@@ -106,20 +106,29 @@ static int create_fd(struct fsnotify_group *group, struct fsnotify_event *event)
|
|
return client_fd;
|
|
return client_fd;
|
|
}
|
|
}
|
|
|
|
|
|
-static ssize_t fill_event_metadata(struct fsnotify_group *group,
|
|
|
|
|
|
+static int fill_event_metadata(struct fsnotify_group *group,
|
|
struct fanotify_event_metadata *metadata,
|
|
struct fanotify_event_metadata *metadata,
|
|
struct fsnotify_event *event)
|
|
struct fsnotify_event *event)
|
|
{
|
|
{
|
|
|
|
+ int ret = 0;
|
|
|
|
+
|
|
pr_debug("%s: group=%p metadata=%p event=%p\n", __func__,
|
|
pr_debug("%s: group=%p metadata=%p event=%p\n", __func__,
|
|
group, metadata, event);
|
|
group, metadata, event);
|
|
|
|
|
|
metadata->event_len = FAN_EVENT_METADATA_LEN;
|
|
metadata->event_len = FAN_EVENT_METADATA_LEN;
|
|
|
|
+ metadata->metadata_len = FAN_EVENT_METADATA_LEN;
|
|
metadata->vers = FANOTIFY_METADATA_VERSION;
|
|
metadata->vers = FANOTIFY_METADATA_VERSION;
|
|
metadata->mask = event->mask & FAN_ALL_OUTGOING_EVENTS;
|
|
metadata->mask = event->mask & FAN_ALL_OUTGOING_EVENTS;
|
|
metadata->pid = pid_vnr(event->tgid);
|
|
metadata->pid = pid_vnr(event->tgid);
|
|
- metadata->fd = create_fd(group, event);
|
|
|
|
|
|
+ if (unlikely(event->mask & FAN_Q_OVERFLOW))
|
|
|
|
+ metadata->fd = FAN_NOFD;
|
|
|
|
+ else {
|
|
|
|
+ metadata->fd = create_fd(group, event);
|
|
|
|
+ if (metadata->fd < 0)
|
|
|
|
+ ret = metadata->fd;
|
|
|
|
+ }
|
|
|
|
|
|
- return metadata->fd;
|
|
|
|
|
|
+ return ret;
|
|
}
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
|
|
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
|
|
@@ -200,7 +209,7 @@ static int prepare_for_access_response(struct fsnotify_group *group,
|
|
|
|
|
|
mutex_lock(&group->fanotify_data.access_mutex);
|
|
mutex_lock(&group->fanotify_data.access_mutex);
|
|
|
|
|
|
- if (group->fanotify_data.bypass_perm) {
|
|
|
|
|
|
+ if (atomic_read(&group->fanotify_data.bypass_perm)) {
|
|
mutex_unlock(&group->fanotify_data.access_mutex);
|
|
mutex_unlock(&group->fanotify_data.access_mutex);
|
|
kmem_cache_free(fanotify_response_event_cache, re);
|
|
kmem_cache_free(fanotify_response_event_cache, re);
|
|
event->response = FAN_ALLOW;
|
|
event->response = FAN_ALLOW;
|
|
@@ -257,24 +266,34 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
|
|
|
|
|
|
pr_debug("%s: group=%p event=%p\n", __func__, group, event);
|
|
pr_debug("%s: group=%p event=%p\n", __func__, group, event);
|
|
|
|
|
|
- fd = fill_event_metadata(group, &fanotify_event_metadata, event);
|
|
|
|
- if (fd < 0)
|
|
|
|
- return fd;
|
|
|
|
|
|
+ ret = fill_event_metadata(group, &fanotify_event_metadata, event);
|
|
|
|
+ if (ret < 0)
|
|
|
|
+ goto out;
|
|
|
|
|
|
|
|
+ fd = fanotify_event_metadata.fd;
|
|
ret = prepare_for_access_response(group, event, fd);
|
|
ret = prepare_for_access_response(group, event, fd);
|
|
if (ret)
|
|
if (ret)
|
|
goto out_close_fd;
|
|
goto out_close_fd;
|
|
|
|
|
|
ret = -EFAULT;
|
|
ret = -EFAULT;
|
|
- if (copy_to_user(buf, &fanotify_event_metadata, FAN_EVENT_METADATA_LEN))
|
|
|
|
|
|
+ if (copy_to_user(buf, &fanotify_event_metadata,
|
|
|
|
+ fanotify_event_metadata.event_len))
|
|
goto out_kill_access_response;
|
|
goto out_kill_access_response;
|
|
|
|
|
|
- return FAN_EVENT_METADATA_LEN;
|
|
|
|
|
|
+ return fanotify_event_metadata.event_len;
|
|
|
|
|
|
out_kill_access_response:
|
|
out_kill_access_response:
|
|
remove_access_response(group, event, fd);
|
|
remove_access_response(group, event, fd);
|
|
out_close_fd:
|
|
out_close_fd:
|
|
- sys_close(fd);
|
|
|
|
|
|
+ if (fd != FAN_NOFD)
|
|
|
|
+ sys_close(fd);
|
|
|
|
+out:
|
|
|
|
+#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
|
|
|
|
+ if (event->mask & FAN_ALL_PERM_EVENTS) {
|
|
|
|
+ event->response = FAN_DENY;
|
|
|
|
+ wake_up(&group->fanotify_data.access_waitq);
|
|
|
|
+ }
|
|
|
|
+#endif
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -382,7 +401,7 @@ static int fanotify_release(struct inode *ignored, struct file *file)
|
|
|
|
|
|
mutex_lock(&group->fanotify_data.access_mutex);
|
|
mutex_lock(&group->fanotify_data.access_mutex);
|
|
|
|
|
|
- group->fanotify_data.bypass_perm = true;
|
|
|
|
|
|
+ atomic_inc(&group->fanotify_data.bypass_perm);
|
|
|
|
|
|
list_for_each_entry_safe(re, lre, &group->fanotify_data.access_list, list) {
|
|
list_for_each_entry_safe(re, lre, &group->fanotify_data.access_list, list) {
|
|
pr_debug("%s: found group=%p re=%p event=%p\n", __func__, group,
|
|
pr_debug("%s: found group=%p re=%p event=%p\n", __func__, group,
|
|
@@ -586,11 +605,10 @@ static int fanotify_add_vfsmount_mark(struct fsnotify_group *group,
|
|
{
|
|
{
|
|
struct fsnotify_mark *fsn_mark;
|
|
struct fsnotify_mark *fsn_mark;
|
|
__u32 added;
|
|
__u32 added;
|
|
|
|
+ int ret = 0;
|
|
|
|
|
|
fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);
|
|
fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);
|
|
if (!fsn_mark) {
|
|
if (!fsn_mark) {
|
|
- int ret;
|
|
|
|
-
|
|
|
|
if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks)
|
|
if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks)
|
|
return -ENOSPC;
|
|
return -ENOSPC;
|
|
|
|
|
|
@@ -600,17 +618,16 @@ static int fanotify_add_vfsmount_mark(struct fsnotify_group *group,
|
|
|
|
|
|
fsnotify_init_mark(fsn_mark, fanotify_free_mark);
|
|
fsnotify_init_mark(fsn_mark, fanotify_free_mark);
|
|
ret = fsnotify_add_mark(fsn_mark, group, NULL, mnt, 0);
|
|
ret = fsnotify_add_mark(fsn_mark, group, NULL, mnt, 0);
|
|
- if (ret) {
|
|
|
|
- fanotify_free_mark(fsn_mark);
|
|
|
|
- return ret;
|
|
|
|
- }
|
|
|
|
|
|
+ if (ret)
|
|
|
|
+ goto err;
|
|
}
|
|
}
|
|
added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
|
|
added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
|
|
- fsnotify_put_mark(fsn_mark);
|
|
|
|
|
|
+
|
|
if (added & ~mnt->mnt_fsnotify_mask)
|
|
if (added & ~mnt->mnt_fsnotify_mask)
|
|
fsnotify_recalc_vfsmount_mask(mnt);
|
|
fsnotify_recalc_vfsmount_mask(mnt);
|
|
-
|
|
|
|
- return 0;
|
|
|
|
|
|
+err:
|
|
|
|
+ fsnotify_put_mark(fsn_mark);
|
|
|
|
+ return ret;
|
|
}
|
|
}
|
|
|
|
|
|
static int fanotify_add_inode_mark(struct fsnotify_group *group,
|
|
static int fanotify_add_inode_mark(struct fsnotify_group *group,
|
|
@@ -619,6 +636,7 @@ static int fanotify_add_inode_mark(struct fsnotify_group *group,
|
|
{
|
|
{
|
|
struct fsnotify_mark *fsn_mark;
|
|
struct fsnotify_mark *fsn_mark;
|
|
__u32 added;
|
|
__u32 added;
|
|
|
|
+ int ret = 0;
|
|
|
|
|
|
pr_debug("%s: group=%p inode=%p\n", __func__, group, inode);
|
|
pr_debug("%s: group=%p inode=%p\n", __func__, group, inode);
|
|
|
|
|
|
@@ -634,8 +652,6 @@ static int fanotify_add_inode_mark(struct fsnotify_group *group,
|
|
|
|
|
|
fsn_mark = fsnotify_find_inode_mark(group, inode);
|
|
fsn_mark = fsnotify_find_inode_mark(group, inode);
|
|
if (!fsn_mark) {
|
|
if (!fsn_mark) {
|
|
- int ret;
|
|
|
|
-
|
|
|
|
if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks)
|
|
if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks)
|
|
return -ENOSPC;
|
|
return -ENOSPC;
|
|
|
|
|
|
@@ -645,16 +661,16 @@ static int fanotify_add_inode_mark(struct fsnotify_group *group,
|
|
|
|
|
|
fsnotify_init_mark(fsn_mark, fanotify_free_mark);
|
|
fsnotify_init_mark(fsn_mark, fanotify_free_mark);
|
|
ret = fsnotify_add_mark(fsn_mark, group, inode, NULL, 0);
|
|
ret = fsnotify_add_mark(fsn_mark, group, inode, NULL, 0);
|
|
- if (ret) {
|
|
|
|
- fanotify_free_mark(fsn_mark);
|
|
|
|
- return ret;
|
|
|
|
- }
|
|
|
|
|
|
+ if (ret)
|
|
|
|
+ goto err;
|
|
}
|
|
}
|
|
added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
|
|
added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
|
|
- fsnotify_put_mark(fsn_mark);
|
|
|
|
|
|
+
|
|
if (added & ~inode->i_fsnotify_mask)
|
|
if (added & ~inode->i_fsnotify_mask)
|
|
fsnotify_recalc_inode_mask(inode);
|
|
fsnotify_recalc_inode_mask(inode);
|
|
- return 0;
|
|
|
|
|
|
+err:
|
|
|
|
+ fsnotify_put_mark(fsn_mark);
|
|
|
|
+ return ret;
|
|
}
|
|
}
|
|
|
|
|
|
/* fanotify syscalls */
|
|
/* fanotify syscalls */
|
|
@@ -687,8 +703,10 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
|
|
|
|
|
|
/* fsnotify_alloc_group takes a ref. Dropped in fanotify_release */
|
|
/* fsnotify_alloc_group takes a ref. Dropped in fanotify_release */
|
|
group = fsnotify_alloc_group(&fanotify_fsnotify_ops);
|
|
group = fsnotify_alloc_group(&fanotify_fsnotify_ops);
|
|
- if (IS_ERR(group))
|
|
|
|
|
|
+ if (IS_ERR(group)) {
|
|
|
|
+ free_uid(user);
|
|
return PTR_ERR(group);
|
|
return PTR_ERR(group);
|
|
|
|
+ }
|
|
|
|
|
|
group->fanotify_data.user = user;
|
|
group->fanotify_data.user = user;
|
|
atomic_inc(&user->fanotify_listeners);
|
|
atomic_inc(&user->fanotify_listeners);
|
|
@@ -698,6 +716,7 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
|
|
mutex_init(&group->fanotify_data.access_mutex);
|
|
mutex_init(&group->fanotify_data.access_mutex);
|
|
init_waitqueue_head(&group->fanotify_data.access_waitq);
|
|
init_waitqueue_head(&group->fanotify_data.access_waitq);
|
|
INIT_LIST_HEAD(&group->fanotify_data.access_list);
|
|
INIT_LIST_HEAD(&group->fanotify_data.access_list);
|
|
|
|
+ atomic_set(&group->fanotify_data.bypass_perm, 0);
|
|
#endif
|
|
#endif
|
|
switch (flags & FAN_ALL_CLASS_BITS) {
|
|
switch (flags & FAN_ALL_CLASS_BITS) {
|
|
case FAN_CLASS_NOTIF:
|
|
case FAN_CLASS_NOTIF:
|
|
@@ -764,8 +783,10 @@ SYSCALL_DEFINE(fanotify_mark)(int fanotify_fd, unsigned int flags,
|
|
if (flags & ~FAN_ALL_MARK_FLAGS)
|
|
if (flags & ~FAN_ALL_MARK_FLAGS)
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) {
|
|
switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) {
|
|
- case FAN_MARK_ADD:
|
|
|
|
|
|
+ case FAN_MARK_ADD: /* fallthrough */
|
|
case FAN_MARK_REMOVE:
|
|
case FAN_MARK_REMOVE:
|
|
|
|
+ if (!mask)
|
|
|
|
+ return -EINVAL;
|
|
case FAN_MARK_FLUSH:
|
|
case FAN_MARK_FLUSH:
|
|
break;
|
|
break;
|
|
default:
|
|
default:
|