ソースを参照

x86: c_p_a() make it more robust against use of PAT bits

Use the page table level instead of the PSE bit to check if the PTE
is for a 4K page or not. This makes the code more robust when the PAT
bit is changed because the PAT bit on 4K pages is in the same position
as the PSE bit.

Signed-off-by: Andi Kleen <ak@suse.de>
Acked-by: Jan Beulich <jbeulich@novell.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Andi Kleen 17 年 前
コミット
895bdc2995
2 ファイル変更4 行追加4 行削除
  1. 2 2
      arch/x86/mm/pageattr_32.c
  2. 2 2
      arch/x86/mm/pageattr_64.c

+ 2 - 2
arch/x86/mm/pageattr_32.c

@@ -172,7 +172,7 @@ static int __change_page_attr(struct page *page, pgprot_t prot)
 	BUG_ON(PageCompound(kpte_page));
 
 	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) {
-		if (!pte_huge(*kpte)) {
+		if (level == 3) {
 			set_pte_atomic(kpte, mk_pte(page, prot));
 		} else {
 			struct page *split;
@@ -190,7 +190,7 @@ static int __change_page_attr(struct page *page, pgprot_t prot)
 		}
 		page_private(kpte_page)++;
 	} else {
-		if (!pte_huge(*kpte)) {
+		if (level == 3) {
 			set_pte_atomic(kpte, mk_pte(page, PAGE_KERNEL));
 			BUG_ON(page_private(kpte_page) == 0);
 			page_private(kpte_page)--;

+ 2 - 2
arch/x86/mm/pageattr_64.c

@@ -153,7 +153,7 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
 	BUG_ON(PageLRU(kpte_page));
 	BUG_ON(PageCompound(kpte_page));
 	if (pgprot_val(prot) != pgprot_val(ref_prot)) {
-		if (!pte_huge(*kpte)) {
+		if (level == 4) {
 			set_pte(kpte, pfn_pte(pfn, prot));
 		} else {
 			/*
@@ -172,7 +172,7 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
 		}
 		page_private(kpte_page)++;
 	} else {
-		if (!pte_huge(*kpte)) {
+		if (level == 4) {
 			set_pte(kpte, pfn_pte(pfn, ref_prot));
 			BUG_ON(page_private(kpte_page) == 0);
 			page_private(kpte_page)--;