|
@@ -966,7 +966,7 @@ no_page_table:
|
|
|
* has touched so far, we don't want to allocate page tables.
|
|
|
*/
|
|
|
if (flags & FOLL_ANON) {
|
|
|
- page = ZERO_PAGE(address);
|
|
|
+ page = ZERO_PAGE(0);
|
|
|
if (flags & FOLL_GET)
|
|
|
get_page(page);
|
|
|
BUG_ON(flags & FOLL_WRITE);
|
|
@@ -1111,95 +1111,6 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
|
|
}
|
|
|
EXPORT_SYMBOL(get_user_pages);
|
|
|
|
|
|
-static int zeromap_pte_range(struct mm_struct *mm, pmd_t *pmd,
|
|
|
- unsigned long addr, unsigned long end, pgprot_t prot)
|
|
|
-{
|
|
|
- pte_t *pte;
|
|
|
- spinlock_t *ptl;
|
|
|
- int err = 0;
|
|
|
-
|
|
|
- pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
|
|
|
- if (!pte)
|
|
|
- return -EAGAIN;
|
|
|
- arch_enter_lazy_mmu_mode();
|
|
|
- do {
|
|
|
- struct page *page = ZERO_PAGE(addr);
|
|
|
- pte_t zero_pte = pte_wrprotect(mk_pte(page, prot));
|
|
|
-
|
|
|
- if (unlikely(!pte_none(*pte))) {
|
|
|
- err = -EEXIST;
|
|
|
- pte++;
|
|
|
- break;
|
|
|
- }
|
|
|
- page_cache_get(page);
|
|
|
- page_add_file_rmap(page);
|
|
|
- inc_mm_counter(mm, file_rss);
|
|
|
- set_pte_at(mm, addr, pte, zero_pte);
|
|
|
- } while (pte++, addr += PAGE_SIZE, addr != end);
|
|
|
- arch_leave_lazy_mmu_mode();
|
|
|
- pte_unmap_unlock(pte - 1, ptl);
|
|
|
- return err;
|
|
|
-}
|
|
|
-
|
|
|
-static inline int zeromap_pmd_range(struct mm_struct *mm, pud_t *pud,
|
|
|
- unsigned long addr, unsigned long end, pgprot_t prot)
|
|
|
-{
|
|
|
- pmd_t *pmd;
|
|
|
- unsigned long next;
|
|
|
- int err;
|
|
|
-
|
|
|
- pmd = pmd_alloc(mm, pud, addr);
|
|
|
- if (!pmd)
|
|
|
- return -EAGAIN;
|
|
|
- do {
|
|
|
- next = pmd_addr_end(addr, end);
|
|
|
- err = zeromap_pte_range(mm, pmd, addr, next, prot);
|
|
|
- if (err)
|
|
|
- break;
|
|
|
- } while (pmd++, addr = next, addr != end);
|
|
|
- return err;
|
|
|
-}
|
|
|
-
|
|
|
-static inline int zeromap_pud_range(struct mm_struct *mm, pgd_t *pgd,
|
|
|
- unsigned long addr, unsigned long end, pgprot_t prot)
|
|
|
-{
|
|
|
- pud_t *pud;
|
|
|
- unsigned long next;
|
|
|
- int err;
|
|
|
-
|
|
|
- pud = pud_alloc(mm, pgd, addr);
|
|
|
- if (!pud)
|
|
|
- return -EAGAIN;
|
|
|
- do {
|
|
|
- next = pud_addr_end(addr, end);
|
|
|
- err = zeromap_pmd_range(mm, pud, addr, next, prot);
|
|
|
- if (err)
|
|
|
- break;
|
|
|
- } while (pud++, addr = next, addr != end);
|
|
|
- return err;
|
|
|
-}
|
|
|
-
|
|
|
-int zeromap_page_range(struct vm_area_struct *vma,
|
|
|
- unsigned long addr, unsigned long size, pgprot_t prot)
|
|
|
-{
|
|
|
- pgd_t *pgd;
|
|
|
- unsigned long next;
|
|
|
- unsigned long end = addr + size;
|
|
|
- struct mm_struct *mm = vma->vm_mm;
|
|
|
- int err;
|
|
|
-
|
|
|
- BUG_ON(addr >= end);
|
|
|
- pgd = pgd_offset(mm, addr);
|
|
|
- flush_cache_range(vma, addr, end);
|
|
|
- do {
|
|
|
- next = pgd_addr_end(addr, end);
|
|
|
- err = zeromap_pud_range(mm, pgd, addr, next, prot);
|
|
|
- if (err)
|
|
|
- break;
|
|
|
- } while (pgd++, addr = next, addr != end);
|
|
|
- return err;
|
|
|
-}
|
|
|
-
|
|
|
pte_t * fastcall get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl)
|
|
|
{
|
|
|
pgd_t * pgd = pgd_offset(mm, addr);
|
|
@@ -1717,16 +1628,11 @@ gotten:
|
|
|
|
|
|
if (unlikely(anon_vma_prepare(vma)))
|
|
|
goto oom;
|
|
|
- if (old_page == ZERO_PAGE(address)) {
|
|
|
- new_page = alloc_zeroed_user_highpage_movable(vma, address);
|
|
|
- if (!new_page)
|
|
|
- goto oom;
|
|
|
- } else {
|
|
|
- new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
|
|
|
- if (!new_page)
|
|
|
- goto oom;
|
|
|
- cow_user_page(new_page, old_page, address, vma);
|
|
|
- }
|
|
|
+ VM_BUG_ON(old_page == ZERO_PAGE(0));
|
|
|
+ new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
|
|
|
+ if (!new_page)
|
|
|
+ goto oom;
|
|
|
+ cow_user_page(new_page, old_page, address, vma);
|
|
|
|
|
|
/*
|
|
|
* Re-check the pte - we dropped the lock
|
|
@@ -2252,39 +2158,24 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
spinlock_t *ptl;
|
|
|
pte_t entry;
|
|
|
|
|
|
- if (write_access) {
|
|
|
- /* Allocate our own private page. */
|
|
|
- pte_unmap(page_table);
|
|
|
-
|
|
|
- if (unlikely(anon_vma_prepare(vma)))
|
|
|
- goto oom;
|
|
|
- page = alloc_zeroed_user_highpage_movable(vma, address);
|
|
|
- if (!page)
|
|
|
- goto oom;
|
|
|
-
|
|
|
- entry = mk_pte(page, vma->vm_page_prot);
|
|
|
- entry = maybe_mkwrite(pte_mkdirty(entry), vma);
|
|
|
+ /* Allocate our own private page. */
|
|
|
+ pte_unmap(page_table);
|
|
|
|
|
|
- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
|
|
|
- if (!pte_none(*page_table))
|
|
|
- goto release;
|
|
|
- inc_mm_counter(mm, anon_rss);
|
|
|
- lru_cache_add_active(page);
|
|
|
- page_add_new_anon_rmap(page, vma, address);
|
|
|
- } else {
|
|
|
- /* Map the ZERO_PAGE - vm_page_prot is readonly */
|
|
|
- page = ZERO_PAGE(address);
|
|
|
- page_cache_get(page);
|
|
|
- entry = mk_pte(page, vma->vm_page_prot);
|
|
|
+ if (unlikely(anon_vma_prepare(vma)))
|
|
|
+ goto oom;
|
|
|
+ page = alloc_zeroed_user_highpage_movable(vma, address);
|
|
|
+ if (!page)
|
|
|
+ goto oom;
|
|
|
|
|
|
- ptl = pte_lockptr(mm, pmd);
|
|
|
- spin_lock(ptl);
|
|
|
- if (!pte_none(*page_table))
|
|
|
- goto release;
|
|
|
- inc_mm_counter(mm, file_rss);
|
|
|
- page_add_file_rmap(page);
|
|
|
- }
|
|
|
+ entry = mk_pte(page, vma->vm_page_prot);
|
|
|
+ entry = maybe_mkwrite(pte_mkdirty(entry), vma);
|
|
|
|
|
|
+ page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
|
|
|
+ if (!pte_none(*page_table))
|
|
|
+ goto release;
|
|
|
+ inc_mm_counter(mm, anon_rss);
|
|
|
+ lru_cache_add_active(page);
|
|
|
+ page_add_new_anon_rmap(page, vma, address);
|
|
|
set_pte_at(mm, address, page_table, entry);
|
|
|
|
|
|
/* No need to invalidate - it was non-present before */
|