|
@@ -27,6 +27,29 @@ static struct list_head hugepage_freelists[MAX_NUMNODES];
|
|
|
static unsigned int nr_huge_pages_node[MAX_NUMNODES];
|
|
|
static unsigned int free_huge_pages_node[MAX_NUMNODES];
|
|
|
|
|
|
+static void clear_huge_page(struct page *page, unsigned long addr)
|
|
|
+{
|
|
|
+ int i;
|
|
|
+
|
|
|
+ might_sleep();
|
|
|
+ for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) {
|
|
|
+ cond_resched();
|
|
|
+ clear_user_highpage(page + i, addr);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static void copy_huge_page(struct page *dst, struct page *src,
|
|
|
+ unsigned long addr)
|
|
|
+{
|
|
|
+ int i;
|
|
|
+
|
|
|
+ might_sleep();
|
|
|
+ for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++) {
|
|
|
+ cond_resched();
|
|
|
+ copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
|
|
|
*/
|
|
@@ -98,7 +121,6 @@ void free_huge_page(struct page *page)
|
|
|
struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr)
|
|
|
{
|
|
|
struct page *page;
|
|
|
- int i;
|
|
|
|
|
|
spin_lock(&hugetlb_lock);
|
|
|
page = dequeue_huge_page(vma, addr);
|
|
@@ -108,8 +130,6 @@ struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr)
|
|
|
}
|
|
|
spin_unlock(&hugetlb_lock);
|
|
|
set_page_refcounted(page);
|
|
|
- for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); ++i)
|
|
|
- clear_user_highpage(&page[i], addr);
|
|
|
return page;
|
|
|
}
|
|
|
|
|
@@ -367,7 +387,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
unsigned long address, pte_t *ptep, pte_t pte)
|
|
|
{
|
|
|
struct page *old_page, *new_page;
|
|
|
- int i, avoidcopy;
|
|
|
+ int avoidcopy;
|
|
|
|
|
|
old_page = pte_page(pte);
|
|
|
|
|
@@ -388,9 +408,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
}
|
|
|
|
|
|
spin_unlock(&mm->page_table_lock);
|
|
|
- for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++)
|
|
|
- copy_user_highpage(new_page + i, old_page + i,
|
|
|
- address + i*PAGE_SIZE);
|
|
|
+ copy_huge_page(new_page, old_page, address);
|
|
|
spin_lock(&mm->page_table_lock);
|
|
|
|
|
|
ptep = huge_pte_offset(mm, address & HPAGE_MASK);
|
|
@@ -435,6 +453,7 @@ retry:
|
|
|
ret = VM_FAULT_OOM;
|
|
|
goto out;
|
|
|
}
|
|
|
+ clear_huge_page(page, address);
|
|
|
|
|
|
if (vma->vm_flags & VM_SHARED) {
|
|
|
int err;
|