|
@@ -3544,7 +3544,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
|
|
|
struct ring_buffer *rb = event->rb;
|
|
|
|
|
|
atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
|
|
|
- vma->vm_mm->locked_vm -= event->mmap_locked;
|
|
|
+ vma->vm_mm->pinned_vm -= event->mmap_locked;
|
|
|
rcu_assign_pointer(event->rb, NULL);
|
|
|
mutex_unlock(&event->mmap_mutex);
|
|
|
|
|
@@ -3625,7 +3625,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
|
|
|
|
|
|
lock_limit = rlimit(RLIMIT_MEMLOCK);
|
|
|
lock_limit >>= PAGE_SHIFT;
|
|
|
- locked = vma->vm_mm->locked_vm + extra;
|
|
|
+ locked = vma->vm_mm->pinned_vm + extra;
|
|
|
|
|
|
if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
|
|
|
!capable(CAP_IPC_LOCK)) {
|
|
@@ -3651,7 +3651,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
|
|
|
atomic_long_add(user_extra, &user->locked_vm);
|
|
|
event->mmap_locked = extra;
|
|
|
event->mmap_user = get_current_user();
|
|
|
- vma->vm_mm->locked_vm += event->mmap_locked;
|
|
|
+ vma->vm_mm->pinned_vm += event->mmap_locked;
|
|
|
|
|
|
unlock:
|
|
|
if (!ret)
|