|
@@ -24,7 +24,7 @@
|
|
* inode->i_alloc_sem (vmtruncate_range)
|
|
* inode->i_alloc_sem (vmtruncate_range)
|
|
* mm->mmap_sem
|
|
* mm->mmap_sem
|
|
* page->flags PG_locked (lock_page)
|
|
* page->flags PG_locked (lock_page)
|
|
- * mapping->i_mmap_lock
|
|
|
|
|
|
+ * mapping->i_mmap_mutex
|
|
* anon_vma->lock
|
|
* anon_vma->lock
|
|
* mm->page_table_lock or pte_lock
|
|
* mm->page_table_lock or pte_lock
|
|
* zone->lru_lock (in mark_page_accessed, isolate_lru_page)
|
|
* zone->lru_lock (in mark_page_accessed, isolate_lru_page)
|
|
@@ -646,14 +646,14 @@ static int page_referenced_file(struct page *page,
|
|
* The page lock not only makes sure that page->mapping cannot
|
|
* The page lock not only makes sure that page->mapping cannot
|
|
* suddenly be NULLified by truncation, it makes sure that the
|
|
* suddenly be NULLified by truncation, it makes sure that the
|
|
* structure at mapping cannot be freed and reused yet,
|
|
* structure at mapping cannot be freed and reused yet,
|
|
- * so we can safely take mapping->i_mmap_lock.
|
|
|
|
|
|
+ * so we can safely take mapping->i_mmap_mutex.
|
|
*/
|
|
*/
|
|
BUG_ON(!PageLocked(page));
|
|
BUG_ON(!PageLocked(page));
|
|
|
|
|
|
- spin_lock(&mapping->i_mmap_lock);
|
|
|
|
|
|
+ mutex_lock(&mapping->i_mmap_mutex);
|
|
|
|
|
|
/*
|
|
/*
|
|
- * i_mmap_lock does not stabilize mapcount at all, but mapcount
|
|
|
|
|
|
+ * i_mmap_mutex does not stabilize mapcount at all, but mapcount
|
|
* is more likely to be accurate if we note it after spinning.
|
|
* is more likely to be accurate if we note it after spinning.
|
|
*/
|
|
*/
|
|
mapcount = page_mapcount(page);
|
|
mapcount = page_mapcount(page);
|
|
@@ -675,7 +675,7 @@ static int page_referenced_file(struct page *page,
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
|
|
|
|
- spin_unlock(&mapping->i_mmap_lock);
|
|
|
|
|
|
+ mutex_unlock(&mapping->i_mmap_mutex);
|
|
return referenced;
|
|
return referenced;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -762,7 +762,7 @@ static int page_mkclean_file(struct address_space *mapping, struct page *page)
|
|
|
|
|
|
BUG_ON(PageAnon(page));
|
|
BUG_ON(PageAnon(page));
|
|
|
|
|
|
- spin_lock(&mapping->i_mmap_lock);
|
|
|
|
|
|
+ mutex_lock(&mapping->i_mmap_mutex);
|
|
vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
|
|
vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
|
|
if (vma->vm_flags & VM_SHARED) {
|
|
if (vma->vm_flags & VM_SHARED) {
|
|
unsigned long address = vma_address(page, vma);
|
|
unsigned long address = vma_address(page, vma);
|
|
@@ -771,7 +771,7 @@ static int page_mkclean_file(struct address_space *mapping, struct page *page)
|
|
ret += page_mkclean_one(page, vma, address);
|
|
ret += page_mkclean_one(page, vma, address);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
- spin_unlock(&mapping->i_mmap_lock);
|
|
|
|
|
|
+ mutex_unlock(&mapping->i_mmap_mutex);
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1119,7 +1119,7 @@ out_mlock:
|
|
/*
|
|
/*
|
|
* We need mmap_sem locking, Otherwise VM_LOCKED check makes
|
|
* We need mmap_sem locking, Otherwise VM_LOCKED check makes
|
|
* unstable result and race. Plus, We can't wait here because
|
|
* unstable result and race. Plus, We can't wait here because
|
|
- * we now hold anon_vma->lock or mapping->i_mmap_lock.
|
|
|
|
|
|
+ * we now hold anon_vma->lock or mapping->i_mmap_mutex.
|
|
* if trylock failed, the page remain in evictable lru and later
|
|
* if trylock failed, the page remain in evictable lru and later
|
|
* vmscan could retry to move the page to unevictable lru if the
|
|
* vmscan could retry to move the page to unevictable lru if the
|
|
* page is actually mlocked.
|
|
* page is actually mlocked.
|
|
@@ -1345,7 +1345,7 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
|
|
unsigned long max_nl_size = 0;
|
|
unsigned long max_nl_size = 0;
|
|
unsigned int mapcount;
|
|
unsigned int mapcount;
|
|
|
|
|
|
- spin_lock(&mapping->i_mmap_lock);
|
|
|
|
|
|
+ mutex_lock(&mapping->i_mmap_mutex);
|
|
vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
|
|
vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
|
|
unsigned long address = vma_address(page, vma);
|
|
unsigned long address = vma_address(page, vma);
|
|
if (address == -EFAULT)
|
|
if (address == -EFAULT)
|
|
@@ -1391,7 +1391,7 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
|
|
mapcount = page_mapcount(page);
|
|
mapcount = page_mapcount(page);
|
|
if (!mapcount)
|
|
if (!mapcount)
|
|
goto out;
|
|
goto out;
|
|
- cond_resched_lock(&mapping->i_mmap_lock);
|
|
|
|
|
|
+ cond_resched();
|
|
|
|
|
|
max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK;
|
|
max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK;
|
|
if (max_nl_cursor == 0)
|
|
if (max_nl_cursor == 0)
|
|
@@ -1413,7 +1413,7 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
|
|
}
|
|
}
|
|
vma->vm_private_data = (void *) max_nl_cursor;
|
|
vma->vm_private_data = (void *) max_nl_cursor;
|
|
}
|
|
}
|
|
- cond_resched_lock(&mapping->i_mmap_lock);
|
|
|
|
|
|
+ cond_resched();
|
|
max_nl_cursor += CLUSTER_SIZE;
|
|
max_nl_cursor += CLUSTER_SIZE;
|
|
} while (max_nl_cursor <= max_nl_size);
|
|
} while (max_nl_cursor <= max_nl_size);
|
|
|
|
|
|
@@ -1425,7 +1425,7 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
|
|
list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list)
|
|
list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list)
|
|
vma->vm_private_data = NULL;
|
|
vma->vm_private_data = NULL;
|
|
out:
|
|
out:
|
|
- spin_unlock(&mapping->i_mmap_lock);
|
|
|
|
|
|
+ mutex_unlock(&mapping->i_mmap_mutex);
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1544,7 +1544,7 @@ static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *,
|
|
|
|
|
|
if (!mapping)
|
|
if (!mapping)
|
|
return ret;
|
|
return ret;
|
|
- spin_lock(&mapping->i_mmap_lock);
|
|
|
|
|
|
+ mutex_lock(&mapping->i_mmap_mutex);
|
|
vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
|
|
vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
|
|
unsigned long address = vma_address(page, vma);
|
|
unsigned long address = vma_address(page, vma);
|
|
if (address == -EFAULT)
|
|
if (address == -EFAULT)
|
|
@@ -1558,7 +1558,7 @@ static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *,
|
|
* never contain migration ptes. Decide what to do about this
|
|
* never contain migration ptes. Decide what to do about this
|
|
* limitation to linear when we need rmap_walk() on nonlinear.
|
|
* limitation to linear when we need rmap_walk() on nonlinear.
|
|
*/
|
|
*/
|
|
- spin_unlock(&mapping->i_mmap_lock);
|
|
|
|
|
|
+ mutex_unlock(&mapping->i_mmap_mutex);
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|