|
@@ -24,7 +24,7 @@
|
|
|
|
|
|
#include <asm/page.h>
|
|
|
#include <asm/pgtable.h>
|
|
|
-#include <asm/io.h>
|
|
|
+#include <linux/io.h>
|
|
|
|
|
|
#include <linux/hugetlb.h>
|
|
|
#include <linux/node.h>
|
|
@@ -62,10 +62,10 @@ static DEFINE_SPINLOCK(hugetlb_lock);
|
|
|
* must either hold the mmap_sem for write, or the mmap_sem for read and
|
|
|
* the hugetlb_instantiation mutex:
|
|
|
*
|
|
|
- * down_write(&mm->mmap_sem);
|
|
|
+ * down_write(&mm->mmap_sem);
|
|
|
* or
|
|
|
- * down_read(&mm->mmap_sem);
|
|
|
- * mutex_lock(&hugetlb_instantiation_mutex);
|
|
|
+ * down_read(&mm->mmap_sem);
|
|
|
+ * mutex_lock(&hugetlb_instantiation_mutex);
|
|
|
*/
|
|
|
struct file_region {
|
|
|
struct list_head link;
|
|
@@ -503,9 +503,10 @@ static void update_and_free_page(struct hstate *h, struct page *page)
|
|
|
h->nr_huge_pages--;
|
|
|
h->nr_huge_pages_node[page_to_nid(page)]--;
|
|
|
for (i = 0; i < pages_per_huge_page(h); i++) {
|
|
|
- page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
|
|
|
- 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
|
|
|
- 1 << PG_private | 1<< PG_writeback);
|
|
|
+ page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
|
|
|
+ 1 << PG_referenced | 1 << PG_dirty |
|
|
|
+ 1 << PG_active | 1 << PG_reserved |
|
|
|
+ 1 << PG_private | 1 << PG_writeback);
|
|
|
}
|
|
|
set_compound_page_dtor(page, NULL);
|
|
|
set_page_refcounted(page);
|
|
@@ -591,7 +592,6 @@ int PageHuge(struct page *page)
|
|
|
|
|
|
return dtor == free_huge_page;
|
|
|
}
|
|
|
-
|
|
|
EXPORT_SYMBOL_GPL(PageHuge);
|
|
|
|
|
|
static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
|
|
@@ -2132,9 +2132,8 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma,
|
|
|
pte_t entry;
|
|
|
|
|
|
entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep)));
|
|
|
- if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) {
|
|
|
+ if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
|
|
|
update_mmu_cache(vma, address, ptep);
|
|
|
- }
|
|
|
}
|
|
|
|
|
|
|
|
@@ -2189,9 +2188,9 @@ static int is_hugetlb_entry_migration(pte_t pte)
|
|
|
if (huge_pte_none(pte) || pte_present(pte))
|
|
|
return 0;
|
|
|
swp = pte_to_swp_entry(pte);
|
|
|
- if (non_swap_entry(swp) && is_migration_entry(swp)) {
|
|
|
+ if (non_swap_entry(swp) && is_migration_entry(swp))
|
|
|
return 1;
|
|
|
- } else
|
|
|
+ else
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -2202,9 +2201,9 @@ static int is_hugetlb_entry_hwpoisoned(pte_t pte)
|
|
|
if (huge_pte_none(pte) || pte_present(pte))
|
|
|
return 0;
|
|
|
swp = pte_to_swp_entry(pte);
|
|
|
- if (non_swap_entry(swp) && is_hwpoison_entry(swp)) {
|
|
|
+ if (non_swap_entry(swp) && is_hwpoison_entry(swp))
|
|
|
return 1;
|
|
|
- } else
|
|
|
+ else
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -2567,7 +2566,7 @@ retry:
|
|
|
* So we need to block hugepage fault by PG_hwpoison bit check.
|
|
|
*/
|
|
|
if (unlikely(PageHWPoison(page))) {
|
|
|
- ret = VM_FAULT_HWPOISON |
|
|
|
+ ret = VM_FAULT_HWPOISON |
|
|
|
VM_FAULT_SET_HINDEX(h - hstates);
|
|
|
goto backout_unlocked;
|
|
|
}
|
|
@@ -2635,7 +2634,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
migration_entry_wait(mm, (pmd_t *)ptep, address);
|
|
|
return 0;
|
|
|
} else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
|
|
|
- return VM_FAULT_HWPOISON_LARGE |
|
|
|
+ return VM_FAULT_HWPOISON_LARGE |
|
|
|
VM_FAULT_SET_HINDEX(h - hstates);
|
|
|
}
|
|
|
|