|
@@ -2917,7 +2917,7 @@ static void free_event_rcu(struct rcu_head *head)
|
|
|
kfree(event);
|
|
|
}
|
|
|
|
|
|
-static void ring_buffer_put(struct ring_buffer *rb);
|
|
|
+static bool ring_buffer_put(struct ring_buffer *rb);
|
|
|
|
|
|
static void free_event(struct perf_event *event)
|
|
|
{
|
|
@@ -3582,13 +3582,13 @@ static struct ring_buffer *ring_buffer_get(struct perf_event *event)
|
|
|
return rb;
|
|
|
}
|
|
|
|
|
|
-static void ring_buffer_put(struct ring_buffer *rb)
|
|
|
+static bool ring_buffer_put(struct ring_buffer *rb)
|
|
|
{
|
|
|
struct perf_event *event, *n;
|
|
|
unsigned long flags;
|
|
|
|
|
|
if (!atomic_dec_and_test(&rb->refcount))
|
|
|
- return;
|
|
|
+ return false;
|
|
|
|
|
|
spin_lock_irqsave(&rb->event_lock, flags);
|
|
|
list_for_each_entry_safe(event, n, &rb->event_list, rb_entry) {
|
|
@@ -3598,6 +3598,7 @@ static void ring_buffer_put(struct ring_buffer *rb)
|
|
|
spin_unlock_irqrestore(&rb->event_lock, flags);
|
|
|
|
|
|
call_rcu(&rb->rcu_head, rb_free_rcu);
|
|
|
+ return true;
|
|
|
}
|
|
|
|
|
|
static void perf_mmap_open(struct vm_area_struct *vma)
|
|
@@ -3612,18 +3613,20 @@ static void perf_mmap_close(struct vm_area_struct *vma)
|
|
|
struct perf_event *event = vma->vm_file->private_data;
|
|
|
|
|
|
if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) {
|
|
|
- unsigned long size = perf_data_size(event->rb);
|
|
|
- struct user_struct *user = event->mmap_user;
|
|
|
struct ring_buffer *rb = event->rb;
|
|
|
+ struct user_struct *mmap_user = rb->mmap_user;
|
|
|
+ int mmap_locked = rb->mmap_locked;
|
|
|
+ unsigned long size = perf_data_size(rb);
|
|
|
|
|
|
- atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
|
|
|
- vma->vm_mm->pinned_vm -= event->mmap_locked;
|
|
|
rcu_assign_pointer(event->rb, NULL);
|
|
|
ring_buffer_detach(event, rb);
|
|
|
mutex_unlock(&event->mmap_mutex);
|
|
|
|
|
|
- ring_buffer_put(rb);
|
|
|
- free_uid(user);
|
|
|
+ if (ring_buffer_put(rb)) {
|
|
|
+ atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm);
|
|
|
+ vma->vm_mm->pinned_vm -= mmap_locked;
|
|
|
+ free_uid(mmap_user);
|
|
|
+ }
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -3676,9 +3679,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
|
|
|
WARN_ON_ONCE(event->ctx->parent_ctx);
|
|
|
mutex_lock(&event->mmap_mutex);
|
|
|
if (event->rb) {
|
|
|
- if (event->rb->nr_pages == nr_pages)
|
|
|
- atomic_inc(&event->rb->refcount);
|
|
|
- else
|
|
|
+ if (event->rb->nr_pages != nr_pages)
|
|
|
ret = -EINVAL;
|
|
|
goto unlock;
|
|
|
}
|
|
@@ -3720,12 +3721,14 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
|
|
|
ret = -ENOMEM;
|
|
|
goto unlock;
|
|
|
}
|
|
|
- rcu_assign_pointer(event->rb, rb);
|
|
|
+
|
|
|
+ rb->mmap_locked = extra;
|
|
|
+ rb->mmap_user = get_current_user();
|
|
|
|
|
|
atomic_long_add(user_extra, &user->locked_vm);
|
|
|
- event->mmap_locked = extra;
|
|
|
- event->mmap_user = get_current_user();
|
|
|
- vma->vm_mm->pinned_vm += event->mmap_locked;
|
|
|
+ vma->vm_mm->pinned_vm += extra;
|
|
|
+
|
|
|
+ rcu_assign_pointer(event->rb, rb);
|
|
|
|
|
|
perf_event_update_userpage(event);
|
|
|
|
|
@@ -3734,7 +3737,7 @@ unlock:
|
|
|
atomic_inc(&event->mmap_count);
|
|
|
mutex_unlock(&event->mmap_mutex);
|
|
|
|
|
|
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
|
|
|
+ vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
|
|
|
vma->vm_ops = &perf_mmap_vmops;
|
|
|
|
|
|
return ret;
|