Browse Source

Merge branch 'x86-pat-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'x86-pat-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  x86: pat: Remove ioremap_default()
  x86: pat: Clean up req_type special case for reserve_memtype()
  x86: Relegate CONFIG_PAT and CONFIG_MTRR configurability to EMBEDDED
Linus Torvalds 15 years ago
parent
commit
b391738bd1
3 changed files with 7 additions and 33 deletions
  1. 5 2
      arch/x86/Kconfig
  2. 1 25
      arch/x86/mm/ioremap.c
  3. 1 6
      arch/x86/mm/pat.c

+ 5 - 2
arch/x86/Kconfig

@@ -1332,7 +1332,9 @@ config MATH_EMULATION
 	  kernel, it won't hurt.
 	  kernel, it won't hurt.
 
 
 config MTRR
 config MTRR
-	bool "MTRR (Memory Type Range Register) support"
+	bool
+	default y
+	prompt "MTRR (Memory Type Range Register) support" if EMBEDDED
 	---help---
 	---help---
 	  On Intel P6 family processors (Pentium Pro, Pentium II and later)
 	  On Intel P6 family processors (Pentium Pro, Pentium II and later)
 	  the Memory Type Range Registers (MTRRs) may be used to control
 	  the Memory Type Range Registers (MTRRs) may be used to control
@@ -1398,7 +1400,8 @@ config MTRR_SANITIZER_SPARE_REG_NR_DEFAULT
 
 
 config X86_PAT
 config X86_PAT
 	bool
 	bool
-	prompt "x86 PAT support"
+	default y
+	prompt "x86 PAT support" if EMBEDDED
 	depends on MTRR
 	depends on MTRR
 	---help---
 	---help---
 	  Use PAT attributes to setup page level cache control.
 	  Use PAT attributes to setup page level cache control.

+ 1 - 25
arch/x86/mm/ioremap.c

@@ -281,30 +281,6 @@ void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
 }
 }
 EXPORT_SYMBOL(ioremap_cache);
 EXPORT_SYMBOL(ioremap_cache);
 
 
-static void __iomem *ioremap_default(resource_size_t phys_addr,
-					unsigned long size)
-{
-	unsigned long flags;
-	void __iomem *ret;
-	int err;
-
-	/*
-	 * - WB for WB-able memory and no other conflicting mappings
-	 * - UC_MINUS for non-WB-able memory with no other conflicting mappings
-	 * - Inherit from confliting mappings otherwise
-	 */
-	err = reserve_memtype(phys_addr, phys_addr + size,
-				_PAGE_CACHE_WB, &flags);
-	if (err < 0)
-		return NULL;
-
-	ret = __ioremap_caller(phys_addr, size, flags,
-			       __builtin_return_address(0));
-
-	free_memtype(phys_addr, phys_addr + size);
-	return ret;
-}
-
 void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
 void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
 				unsigned long prot_val)
 				unsigned long prot_val)
 {
 {
@@ -380,7 +356,7 @@ void *xlate_dev_mem_ptr(unsigned long phys)
 	if (page_is_ram(start >> PAGE_SHIFT))
 	if (page_is_ram(start >> PAGE_SHIFT))
 		return __va(phys);
 		return __va(phys);
 
 
-	addr = (void __force *)ioremap_default(start, PAGE_SIZE);
+	addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
 	if (addr)
 	if (addr)
 		addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
 		addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
 
 

+ 1 - 6
arch/x86/mm/pat.c

@@ -356,9 +356,6 @@ static int free_ram_pages_type(u64 start, u64 end)
  * - _PAGE_CACHE_UC_MINUS
  * - _PAGE_CACHE_UC_MINUS
  * - _PAGE_CACHE_UC
  * - _PAGE_CACHE_UC
  *
  *
- * req_type will have a special case value '-1', when requester want to inherit
- * the memory type from mtrr (if WB), existing PAT, defaulting to UC_MINUS.
- *
  * If new_type is NULL, function will return an error if it cannot reserve the
  * If new_type is NULL, function will return an error if it cannot reserve the
  * region with req_type. If new_type is non-NULL, function will return
  * region with req_type. If new_type is non-NULL, function will return
  * available type in new_type in case of no error. In case of any error
  * available type in new_type in case of no error. In case of any error
@@ -378,9 +375,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
 	if (!pat_enabled) {
 	if (!pat_enabled) {
 		/* This is identical to page table setting without PAT */
 		/* This is identical to page table setting without PAT */
 		if (new_type) {
 		if (new_type) {
-			if (req_type == -1)
-				*new_type = _PAGE_CACHE_WB;
-			else if (req_type == _PAGE_CACHE_WC)
+			if (req_type == _PAGE_CACHE_WC)
 				*new_type = _PAGE_CACHE_UC_MINUS;
 				*new_type = _PAGE_CACHE_UC_MINUS;
 			else
 			else
 				*new_type = req_type & _PAGE_CACHE_MASK;
 				*new_type = req_type & _PAGE_CACHE_MASK;