Prechádzať zdrojové kódy

Merge branch 'x86-pat-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'x86-pat-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  x86, pat: Fix cacheflush address in change_page_attr_set_clr()
  mm: remove !NUMA condition from PAGEFLAGS_EXTENDED condition set
  x86: Fix earlyprintk=dbgp for machines without NX
  x86, pat: Sanity check remap_pfn_range for RAM region
  x86, pat: Lookup the protection from memtype list on vm_insert_pfn()
  x86, pat: Add lookup_memtype to get the current memtype of a paddr
  x86, pat: Use page flags to track memtypes of RAM pages
  x86, pat: Generalize the use of page flag PG_uncached
  x86, pat: Add rbtree to do quick lookup in memtype tracking
  x86, pat: Add PAT reserve free to io_mapping* APIs
  x86, pat: New i/f for driver to request memtype for IO regions
  x86, pat: ioremap to follow same PAT restrictions as other PAT users
  x86, pat: Keep identity maps consistent with mmaps even when pat_disabled
  x86, mtrr: make mtrr_aps_delayed_init static bool
  x86, pat/mtrr: Rendezvous all the cpus for MTRR/PAT init
  generic-ipi: Allow cpus not yet online to call smp_call_function with irqs disabled
  x86: Fix an incorrect argument of reserve_bootmem()
  x86: Fix system crash when loading with "reservetop" parameter
Linus Torvalds 15 rokov pred
rodič
commit
227423904c

+ 4 - 0
arch/ia64/Kconfig

@@ -112,6 +112,10 @@ config IA64_UNCACHED_ALLOCATOR
 	bool
 	bool
 	select GENERIC_ALLOCATOR
 	select GENERIC_ALLOCATOR
 
 
+config ARCH_USES_PG_UNCACHED
+	def_bool y
+	depends on IA64_UNCACHED_ALLOCATOR
+
 config AUDIT_ARCH
 config AUDIT_ARCH
 	bool
 	bool
 	default y
 	default y

+ 4 - 0
arch/x86/Kconfig

@@ -1417,6 +1417,10 @@ config X86_PAT
 
 
 	  If unsure, say Y.
 	  If unsure, say Y.
 
 
+config ARCH_USES_PG_UNCACHED
+	def_bool y
+	depends on X86_PAT
+
 config EFI
 config EFI
 	bool "EFI runtime service support"
 	bool "EFI runtime service support"
 	depends on ACPI
 	depends on ACPI

+ 52 - 2
arch/x86/include/asm/cacheflush.h

@@ -43,8 +43,58 @@ static inline void copy_from_user_page(struct vm_area_struct *vma,
 	memcpy(dst, src, len);
 	memcpy(dst, src, len);
 }
 }
 
 
-#define PG_non_WB				PG_arch_1
-PAGEFLAG(NonWB, non_WB)
+#define PG_WC				PG_arch_1
+PAGEFLAG(WC, WC)
+
+#ifdef CONFIG_X86_PAT
+/*
+ * X86 PAT uses page flags WC and Uncached together to keep track of
+ * memory type of pages that have backing page struct. X86 PAT supports 3
+ * different memory types, _PAGE_CACHE_WB, _PAGE_CACHE_WC and
+ * _PAGE_CACHE_UC_MINUS and fourth state where page's memory type has not
+ * been changed from its default (value of -1 used to denote this).
+ * Note we do not support _PAGE_CACHE_UC here.
+ *
+ * Caller must hold memtype_lock for atomicity.
+ */
+static inline unsigned long get_page_memtype(struct page *pg)
+{
+	if (!PageUncached(pg) && !PageWC(pg))
+		return -1;
+	else if (!PageUncached(pg) && PageWC(pg))
+		return _PAGE_CACHE_WC;
+	else if (PageUncached(pg) && !PageWC(pg))
+		return _PAGE_CACHE_UC_MINUS;
+	else
+		return _PAGE_CACHE_WB;
+}
+
+static inline void set_page_memtype(struct page *pg, unsigned long memtype)
+{
+	switch (memtype) {
+	case _PAGE_CACHE_WC:
+		ClearPageUncached(pg);
+		SetPageWC(pg);
+		break;
+	case _PAGE_CACHE_UC_MINUS:
+		SetPageUncached(pg);
+		ClearPageWC(pg);
+		break;
+	case _PAGE_CACHE_WB:
+		SetPageUncached(pg);
+		SetPageWC(pg);
+		break;
+	default:
+	case -1:
+		ClearPageUncached(pg);
+		ClearPageWC(pg);
+		break;
+	}
+}
+#else
+static inline unsigned long get_page_memtype(struct page *pg) { return -1; }
+static inline void set_page_memtype(struct page *pg, unsigned long memtype) { }
+#endif
 
 
 /*
 /*
  * The set_memory_* API can be used to change various attributes of a virtual
  * The set_memory_* API can be used to change various attributes of a virtual

+ 6 - 3
arch/x86/include/asm/iomap.h

@@ -26,13 +26,16 @@
 #include <asm/pgtable.h>
 #include <asm/pgtable.h>
 #include <asm/tlbflush.h>
 #include <asm/tlbflush.h>
 
 
-int
-is_io_mapping_possible(resource_size_t base, unsigned long size);
-
 void *
 void *
 iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
 iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
 
 
 void
 void
 iounmap_atomic(void *kvaddr, enum km_type type);
 iounmap_atomic(void *kvaddr, enum km_type type);
 
 
+int
+iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot);
+
+void
+iomap_free(resource_size_t base, unsigned long size);
+
 #endif /* _ASM_X86_IOMAP_H */
 #endif /* _ASM_X86_IOMAP_H */

+ 6 - 0
arch/x86/include/asm/mtrr.h

@@ -121,6 +121,9 @@ extern int mtrr_del_page(int reg, unsigned long base, unsigned long size);
 extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi);
 extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi);
 extern void mtrr_ap_init(void);
 extern void mtrr_ap_init(void);
 extern void mtrr_bp_init(void);
 extern void mtrr_bp_init(void);
+extern void set_mtrr_aps_delayed_init(void);
+extern void mtrr_aps_init(void);
+extern void mtrr_bp_restore(void);
 extern int mtrr_trim_uncached_memory(unsigned long end_pfn);
 extern int mtrr_trim_uncached_memory(unsigned long end_pfn);
 extern int amd_special_default_mtrr(void);
 extern int amd_special_default_mtrr(void);
 #  else
 #  else
@@ -161,6 +164,9 @@ static inline void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi)
 
 
 #define mtrr_ap_init() do {} while (0)
 #define mtrr_ap_init() do {} while (0)
 #define mtrr_bp_init() do {} while (0)
 #define mtrr_bp_init() do {} while (0)
+#define set_mtrr_aps_delayed_init() do {} while (0)
+#define mtrr_aps_init() do {} while (0)
+#define mtrr_bp_restore() do {} while (0)
 #  endif
 #  endif
 
 
 #ifdef CONFIG_COMPAT
 #ifdef CONFIG_COMPAT

+ 5 - 0
arch/x86/include/asm/pat.h

@@ -19,4 +19,9 @@ extern int free_memtype(u64 start, u64 end);
 extern int kernel_map_sync_memtype(u64 base, unsigned long size,
 extern int kernel_map_sync_memtype(u64 base, unsigned long size,
 		unsigned long flag);
 		unsigned long flag);
 
 
+int io_reserve_memtype(resource_size_t start, resource_size_t end,
+			unsigned long *type);
+
+void io_free_memtype(resource_size_t start, resource_size_t end);
+
 #endif /* _ASM_X86_PAT_H */
 #endif /* _ASM_X86_PAT_H */

+ 37 - 9
arch/x86/kernel/cpu/mtrr/main.c

@@ -58,6 +58,7 @@ unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
 static DEFINE_MUTEX(mtrr_mutex);
 static DEFINE_MUTEX(mtrr_mutex);
 
 
 u64 size_or_mask, size_and_mask;
 u64 size_or_mask, size_and_mask;
+static bool mtrr_aps_delayed_init;
 
 
 static struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
 static struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
 
 
@@ -163,7 +164,10 @@ static void ipi_handler(void *info)
 	if (data->smp_reg != ~0U) {
 	if (data->smp_reg != ~0U) {
 		mtrr_if->set(data->smp_reg, data->smp_base,
 		mtrr_if->set(data->smp_reg, data->smp_base,
 			     data->smp_size, data->smp_type);
 			     data->smp_size, data->smp_type);
-	} else {
+	} else if (mtrr_aps_delayed_init) {
+		/*
+		 * Initialize the MTRRs inaddition to the synchronisation.
+		 */
 		mtrr_if->set_all();
 		mtrr_if->set_all();
 	}
 	}
 
 
@@ -265,6 +269,8 @@ set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type typ
 	 */
 	 */
 	if (reg != ~0U)
 	if (reg != ~0U)
 		mtrr_if->set(reg, base, size, type);
 		mtrr_if->set(reg, base, size, type);
+	else if (!mtrr_aps_delayed_init)
+		mtrr_if->set_all();
 
 
 	/* Wait for the others */
 	/* Wait for the others */
 	while (atomic_read(&data.count))
 	while (atomic_read(&data.count))
@@ -721,9 +727,7 @@ void __init mtrr_bp_init(void)
 
 
 void mtrr_ap_init(void)
 void mtrr_ap_init(void)
 {
 {
-	unsigned long flags;
-
-	if (!mtrr_if || !use_intel())
+	if (!use_intel() || mtrr_aps_delayed_init)
 		return;
 		return;
 	/*
 	/*
 	 * Ideally we should hold mtrr_mutex here to avoid mtrr entries
 	 * Ideally we should hold mtrr_mutex here to avoid mtrr entries
@@ -738,11 +742,7 @@ void mtrr_ap_init(void)
 	 *   2. cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug
 	 *   2. cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug
 	 *      lock to prevent mtrr entry changes
 	 *      lock to prevent mtrr entry changes
 	 */
 	 */
-	local_irq_save(flags);
-
-	mtrr_if->set_all();
-
-	local_irq_restore(flags);
+	set_mtrr(~0U, 0, 0, 0);
 }
 }
 
 
 /**
 /**
@@ -753,6 +753,34 @@ void mtrr_save_state(void)
 	smp_call_function_single(0, mtrr_save_fixed_ranges, NULL, 1);
 	smp_call_function_single(0, mtrr_save_fixed_ranges, NULL, 1);
 }
 }
 
 
+void set_mtrr_aps_delayed_init(void)
+{
+	if (!use_intel())
+		return;
+
+	mtrr_aps_delayed_init = true;
+}
+
+/*
+ * MTRR initialization for all AP's
+ */
+void mtrr_aps_init(void)
+{
+	if (!use_intel())
+		return;
+
+	set_mtrr(~0U, 0, 0, 0);
+	mtrr_aps_delayed_init = false;
+}
+
+void mtrr_bp_restore(void)
+{
+	if (!use_intel())
+		return;
+
+	mtrr_if->set_all();
+}
+
 static int __init mtrr_init_finialize(void)
 static int __init mtrr_init_finialize(void)
 {
 {
 	if (!mtrr_if)
 	if (!mtrr_if)

+ 15 - 5
arch/x86/kernel/setup.c

@@ -712,6 +712,21 @@ void __init setup_arch(char **cmdline_p)
 	printk(KERN_INFO "Command line: %s\n", boot_command_line);
 	printk(KERN_INFO "Command line: %s\n", boot_command_line);
 #endif
 #endif
 
 
+	strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
+	*cmdline_p = command_line;
+
+#ifdef CONFIG_X86_64
+	/*
+	 * Must call this twice: Once just to detect whether hardware doesn't
+	 * support NX (so that the early EHCI debug console setup can safely
+	 * call set_fixmap(), and then again after parsing early parameters to
+	 * honor the respective command line option.
+	 */
+	check_efer();
+#endif
+
+	parse_early_param();
+
 	/* VMI may relocate the fixmap; do this before touching ioremap area */
 	/* VMI may relocate the fixmap; do this before touching ioremap area */
 	vmi_init();
 	vmi_init();
 
 
@@ -794,11 +809,6 @@ void __init setup_arch(char **cmdline_p)
 #endif
 #endif
 #endif
 #endif
 
 
-	strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
-	*cmdline_p = command_line;
-
-	parse_early_param();
-
 #ifdef CONFIG_X86_64
 #ifdef CONFIG_X86_64
 	check_efer();
 	check_efer();
 #endif
 #endif

+ 14 - 0
arch/x86/kernel/smpboot.c

@@ -1118,9 +1118,22 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
 
 
 	if (is_uv_system())
 	if (is_uv_system())
 		uv_system_init();
 		uv_system_init();
+
+	set_mtrr_aps_delayed_init();
 out:
 out:
 	preempt_enable();
 	preempt_enable();
 }
 }
+
+void arch_enable_nonboot_cpus_begin(void)
+{
+	set_mtrr_aps_delayed_init();
+}
+
+void arch_enable_nonboot_cpus_end(void)
+{
+	mtrr_aps_init();
+}
+
 /*
 /*
  * Early setup to make printk work.
  * Early setup to make printk work.
  */
  */
@@ -1142,6 +1155,7 @@ void __init native_smp_cpus_done(unsigned int max_cpus)
 	setup_ioapic_dest();
 	setup_ioapic_dest();
 #endif
 #endif
 	check_nmi_watchdog();
 	check_nmi_watchdog();
+	mtrr_aps_init();
 }
 }
 
 
 static int __initdata setup_possible_cpus = -1;
 static int __initdata setup_possible_cpus = -1;

+ 25 - 2
arch/x86/mm/iomap_32.c

@@ -21,7 +21,7 @@
 #include <linux/module.h>
 #include <linux/module.h>
 #include <linux/highmem.h>
 #include <linux/highmem.h>
 
 
-int is_io_mapping_possible(resource_size_t base, unsigned long size)
+static int is_io_mapping_possible(resource_size_t base, unsigned long size)
 {
 {
 #if !defined(CONFIG_X86_PAE) && defined(CONFIG_PHYS_ADDR_T_64BIT)
 #if !defined(CONFIG_X86_PAE) && defined(CONFIG_PHYS_ADDR_T_64BIT)
 	/* There is no way to map greater than 1 << 32 address without PAE */
 	/* There is no way to map greater than 1 << 32 address without PAE */
@@ -30,7 +30,30 @@ int is_io_mapping_possible(resource_size_t base, unsigned long size)
 #endif
 #endif
 	return 1;
 	return 1;
 }
 }
-EXPORT_SYMBOL_GPL(is_io_mapping_possible);
+
+int iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot)
+{
+	unsigned long flag = _PAGE_CACHE_WC;
+	int ret;
+
+	if (!is_io_mapping_possible(base, size))
+		return -EINVAL;
+
+	ret = io_reserve_memtype(base, base + size, &flag);
+	if (ret)
+		return ret;
+
+	*prot = __pgprot(__PAGE_KERNEL | flag);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(iomap_create_wc);
+
+void
+iomap_free(resource_size_t base, unsigned long size)
+{
+	io_free_memtype(base, base + size);
+}
+EXPORT_SYMBOL_GPL(iomap_free);
 
 
 void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
 void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
 {
 {

+ 4 - 14
arch/x86/mm/ioremap.c

@@ -158,24 +158,14 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
 	retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
 	retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
 						prot_val, &new_prot_val);
 						prot_val, &new_prot_val);
 	if (retval) {
 	if (retval) {
-		pr_debug("Warning: reserve_memtype returned %d\n", retval);
+		printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval);
 		return NULL;
 		return NULL;
 	}
 	}
 
 
 	if (prot_val != new_prot_val) {
 	if (prot_val != new_prot_val) {
-		/*
-		 * Do not fallback to certain memory types with certain
-		 * requested type:
-		 * - request is uc-, return cannot be write-back
-		 * - request is uc-, return cannot be write-combine
-		 * - request is write-combine, return cannot be write-back
-		 */
-		if ((prot_val == _PAGE_CACHE_UC_MINUS &&
-		     (new_prot_val == _PAGE_CACHE_WB ||
-		      new_prot_val == _PAGE_CACHE_WC)) ||
-		    (prot_val == _PAGE_CACHE_WC &&
-		     new_prot_val == _PAGE_CACHE_WB)) {
-			pr_debug(
+		if (!is_new_memtype_allowed(phys_addr, size,
+					    prot_val, new_prot_val)) {
+			printk(KERN_ERR
 		"ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
 		"ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
 				(unsigned long long)phys_addr,
 				(unsigned long long)phys_addr,
 				(unsigned long long)(phys_addr + size),
 				(unsigned long long)(phys_addr + size),

+ 7 - 1
arch/x86/mm/pageattr.c

@@ -822,6 +822,7 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
 {
 {
 	struct cpa_data cpa;
 	struct cpa_data cpa;
 	int ret, cache, checkalias;
 	int ret, cache, checkalias;
+	unsigned long baddr = 0;
 
 
 	/*
 	/*
 	 * Check, if we are requested to change a not supported
 	 * Check, if we are requested to change a not supported
@@ -853,6 +854,11 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
 			 */
 			 */
 			WARN_ON_ONCE(1);
 			WARN_ON_ONCE(1);
 		}
 		}
+		/*
+		 * Save address for cache flush. *addr is modified in the call
+		 * to __change_page_attr_set_clr() below.
+		 */
+		baddr = *addr;
 	}
 	}
 
 
 	/* Must avoid aliasing mappings in the highmem code */
 	/* Must avoid aliasing mappings in the highmem code */
@@ -900,7 +906,7 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
 			cpa_flush_array(addr, numpages, cache,
 			cpa_flush_array(addr, numpages, cache,
 					cpa.flags, pages);
 					cpa.flags, pages);
 		} else
 		} else
-			cpa_flush_range(*addr, numpages, cache);
+			cpa_flush_range(baddr, numpages, cache);
 	} else
 	} else
 		cpa_flush_all(cache);
 		cpa_flush_all(cache);
 
 

+ 264 - 89
arch/x86/mm/pat.c

@@ -15,6 +15,7 @@
 #include <linux/gfp.h>
 #include <linux/gfp.h>
 #include <linux/mm.h>
 #include <linux/mm.h>
 #include <linux/fs.h>
 #include <linux/fs.h>
+#include <linux/rbtree.h>
 
 
 #include <asm/cacheflush.h>
 #include <asm/cacheflush.h>
 #include <asm/processor.h>
 #include <asm/processor.h>
@@ -148,11 +149,10 @@ static char *cattr_name(unsigned long flags)
  * areas). All the aliases have the same cache attributes of course.
  * areas). All the aliases have the same cache attributes of course.
  * Zero attributes are represented as holes.
  * Zero attributes are represented as holes.
  *
  *
- * Currently the data structure is a list because the number of mappings
- * are expected to be relatively small. If this should be a problem
- * it could be changed to a rbtree or similar.
+ * The data structure is a list that is also organized as an rbtree
+ * sorted on the start address of memtype range.
  *
  *
- * memtype_lock protects the whole list.
+ * memtype_lock protects both the linear list and rbtree.
  */
  */
 
 
 struct memtype {
 struct memtype {
@@ -160,11 +160,53 @@ struct memtype {
 	u64			end;
 	u64			end;
 	unsigned long		type;
 	unsigned long		type;
 	struct list_head	nd;
 	struct list_head	nd;
+	struct rb_node		rb;
 };
 };
 
 
+static struct rb_root memtype_rbroot = RB_ROOT;
 static LIST_HEAD(memtype_list);
 static LIST_HEAD(memtype_list);
 static DEFINE_SPINLOCK(memtype_lock);	/* protects memtype list */
 static DEFINE_SPINLOCK(memtype_lock);	/* protects memtype list */
 
 
+static struct memtype *memtype_rb_search(struct rb_root *root, u64 start)
+{
+	struct rb_node *node = root->rb_node;
+	struct memtype *last_lower = NULL;
+
+	while (node) {
+		struct memtype *data = container_of(node, struct memtype, rb);
+
+		if (data->start < start) {
+			last_lower = data;
+			node = node->rb_right;
+		} else if (data->start > start) {
+			node = node->rb_left;
+		} else
+			return data;
+	}
+
+	/* Will return NULL if there is no entry with its start <= start */
+	return last_lower;
+}
+
+static void memtype_rb_insert(struct rb_root *root, struct memtype *data)
+{
+	struct rb_node **new = &(root->rb_node);
+	struct rb_node *parent = NULL;
+
+	while (*new) {
+		struct memtype *this = container_of(*new, struct memtype, rb);
+
+		parent = *new;
+		if (data->start <= this->start)
+			new = &((*new)->rb_left);
+		else if (data->start > this->start)
+			new = &((*new)->rb_right);
+	}
+
+	rb_link_node(&data->rb, parent, new);
+	rb_insert_color(&data->rb, root);
+}
+
 /*
 /*
  * Does intersection of PAT memory type and MTRR memory type and returns
  * Does intersection of PAT memory type and MTRR memory type and returns
  * the resulting memory type as PAT understands it.
  * the resulting memory type as PAT understands it.
@@ -218,9 +260,6 @@ chk_conflict(struct memtype *new, struct memtype *entry, unsigned long *type)
 	return -EBUSY;
 	return -EBUSY;
 }
 }
 
 
-static struct memtype *cached_entry;
-static u64 cached_start;
-
 static int pat_pagerange_is_ram(unsigned long start, unsigned long end)
 static int pat_pagerange_is_ram(unsigned long start, unsigned long end)
 {
 {
 	int ram_page = 0, not_rampage = 0;
 	int ram_page = 0, not_rampage = 0;
@@ -249,63 +288,61 @@ static int pat_pagerange_is_ram(unsigned long start, unsigned long end)
 }
 }
 
 
 /*
 /*
- * For RAM pages, mark the pages as non WB memory type using
- * PageNonWB (PG_arch_1). We allow only one set_memory_uc() or
- * set_memory_wc() on a RAM page at a time before marking it as WB again.
- * This is ok, because only one driver will be owning the page and
- * doing set_memory_*() calls.
+ * For RAM pages, we use page flags to mark the pages with appropriate type.
+ * Here we do two pass:
+ * - Find the memtype of all the pages in the range, look for any conflicts
+ * - In case of no conflicts, set the new memtype for pages in the range
  *
  *
- * For now, we use PageNonWB to track that the RAM page is being mapped
- * as non WB. In future, we will have to use one more flag
- * (or some other mechanism in page_struct) to distinguish between
- * UC and WC mapping.
+ * Caller must hold memtype_lock for atomicity.
  */
  */
 static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type,
 static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type,
 				  unsigned long *new_type)
 				  unsigned long *new_type)
 {
 {
 	struct page *page;
 	struct page *page;
-	u64 pfn, end_pfn;
+	u64 pfn;
+
+	if (req_type == _PAGE_CACHE_UC) {
+		/* We do not support strong UC */
+		WARN_ON_ONCE(1);
+		req_type = _PAGE_CACHE_UC_MINUS;
+	}
 
 
 	for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
 	for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
-		page = pfn_to_page(pfn);
-		if (page_mapped(page) || PageNonWB(page))
-			goto out;
+		unsigned long type;
 
 
-		SetPageNonWB(page);
+		page = pfn_to_page(pfn);
+		type = get_page_memtype(page);
+		if (type != -1) {
+			printk(KERN_INFO "reserve_ram_pages_type failed "
+				"0x%Lx-0x%Lx, track 0x%lx, req 0x%lx\n",
+				start, end, type, req_type);
+			if (new_type)
+				*new_type = type;
+
+			return -EBUSY;
+		}
 	}
 	}
-	return 0;
 
 
-out:
-	end_pfn = pfn;
-	for (pfn = (start >> PAGE_SHIFT); pfn < end_pfn; ++pfn) {
+	if (new_type)
+		*new_type = req_type;
+
+	for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
 		page = pfn_to_page(pfn);
 		page = pfn_to_page(pfn);
-		ClearPageNonWB(page);
+		set_page_memtype(page, req_type);
 	}
 	}
-
-	return -EINVAL;
+	return 0;
 }
 }
 
 
 static int free_ram_pages_type(u64 start, u64 end)
 static int free_ram_pages_type(u64 start, u64 end)
 {
 {
 	struct page *page;
 	struct page *page;
-	u64 pfn, end_pfn;
+	u64 pfn;
 
 
 	for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
 	for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
 		page = pfn_to_page(pfn);
 		page = pfn_to_page(pfn);
-		if (page_mapped(page) || !PageNonWB(page))
-			goto out;
-
-		ClearPageNonWB(page);
+		set_page_memtype(page, -1);
 	}
 	}
 	return 0;
 	return 0;
-
-out:
-	end_pfn = pfn;
-	for (pfn = (start >> PAGE_SHIFT); pfn < end_pfn; ++pfn) {
-		page = pfn_to_page(pfn);
-		SetPageNonWB(page);
-	}
-	return -EINVAL;
 }
 }
 
 
 /*
 /*
@@ -339,6 +376,8 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
 		if (new_type) {
 		if (new_type) {
 			if (req_type == -1)
 			if (req_type == -1)
 				*new_type = _PAGE_CACHE_WB;
 				*new_type = _PAGE_CACHE_WB;
+			else if (req_type == _PAGE_CACHE_WC)
+				*new_type = _PAGE_CACHE_UC_MINUS;
 			else
 			else
 				*new_type = req_type & _PAGE_CACHE_MASK;
 				*new_type = req_type & _PAGE_CACHE_MASK;
 		}
 		}
@@ -364,11 +403,16 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
 		*new_type = actual_type;
 		*new_type = actual_type;
 
 
 	is_range_ram = pat_pagerange_is_ram(start, end);
 	is_range_ram = pat_pagerange_is_ram(start, end);
-	if (is_range_ram == 1)
-		return reserve_ram_pages_type(start, end, req_type,
-					      new_type);
-	else if (is_range_ram < 0)
+	if (is_range_ram == 1) {
+
+		spin_lock(&memtype_lock);
+		err = reserve_ram_pages_type(start, end, req_type, new_type);
+		spin_unlock(&memtype_lock);
+
+		return err;
+	} else if (is_range_ram < 0) {
 		return -EINVAL;
 		return -EINVAL;
+	}
 
 
 	new  = kmalloc(sizeof(struct memtype), GFP_KERNEL);
 	new  = kmalloc(sizeof(struct memtype), GFP_KERNEL);
 	if (!new)
 	if (!new)
@@ -380,17 +424,19 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
 
 
 	spin_lock(&memtype_lock);
 	spin_lock(&memtype_lock);
 
 
-	if (cached_entry && start >= cached_start)
-		entry = cached_entry;
-	else
+	entry = memtype_rb_search(&memtype_rbroot, new->start);
+	if (likely(entry != NULL)) {
+		/* To work correctly with list_for_each_entry_continue */
+		entry = list_entry(entry->nd.prev, struct memtype, nd);
+	} else {
 		entry = list_entry(&memtype_list, struct memtype, nd);
 		entry = list_entry(&memtype_list, struct memtype, nd);
+	}
 
 
 	/* Search for existing mapping that overlaps the current range */
 	/* Search for existing mapping that overlaps the current range */
 	where = NULL;
 	where = NULL;
 	list_for_each_entry_continue(entry, &memtype_list, nd) {
 	list_for_each_entry_continue(entry, &memtype_list, nd) {
 		if (end <= entry->start) {
 		if (end <= entry->start) {
 			where = entry->nd.prev;
 			where = entry->nd.prev;
-			cached_entry = list_entry(where, struct memtype, nd);
 			break;
 			break;
 		} else if (start <= entry->start) { /* end > entry->start */
 		} else if (start <= entry->start) { /* end > entry->start */
 			err = chk_conflict(new, entry, new_type);
 			err = chk_conflict(new, entry, new_type);
@@ -398,8 +444,6 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
 				dprintk("Overlap at 0x%Lx-0x%Lx\n",
 				dprintk("Overlap at 0x%Lx-0x%Lx\n",
 					entry->start, entry->end);
 					entry->start, entry->end);
 				where = entry->nd.prev;
 				where = entry->nd.prev;
-				cached_entry = list_entry(where,
-							struct memtype, nd);
 			}
 			}
 			break;
 			break;
 		} else if (start < entry->end) { /* start > entry->start */
 		} else if (start < entry->end) { /* start > entry->start */
@@ -407,8 +451,6 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
 			if (!err) {
 			if (!err) {
 				dprintk("Overlap at 0x%Lx-0x%Lx\n",
 				dprintk("Overlap at 0x%Lx-0x%Lx\n",
 					entry->start, entry->end);
 					entry->start, entry->end);
-				cached_entry = list_entry(entry->nd.prev,
-							struct memtype, nd);
 
 
 				/*
 				/*
 				 * Move to right position in the linked
 				 * Move to right position in the linked
@@ -436,13 +478,13 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
 		return err;
 		return err;
 	}
 	}
 
 
-	cached_start = start;
-
 	if (where)
 	if (where)
 		list_add(&new->nd, where);
 		list_add(&new->nd, where);
 	else
 	else
 		list_add_tail(&new->nd, &memtype_list);
 		list_add_tail(&new->nd, &memtype_list);
 
 
+	memtype_rb_insert(&memtype_rbroot, new);
+
 	spin_unlock(&memtype_lock);
 	spin_unlock(&memtype_lock);
 
 
 	dprintk("reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n",
 	dprintk("reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n",
@@ -454,7 +496,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
 
 
 int free_memtype(u64 start, u64 end)
 int free_memtype(u64 start, u64 end)
 {
 {
-	struct memtype *entry;
+	struct memtype *entry, *saved_entry;
 	int err = -EINVAL;
 	int err = -EINVAL;
 	int is_range_ram;
 	int is_range_ram;
 
 
@@ -466,23 +508,58 @@ int free_memtype(u64 start, u64 end)
 		return 0;
 		return 0;
 
 
 	is_range_ram = pat_pagerange_is_ram(start, end);
 	is_range_ram = pat_pagerange_is_ram(start, end);
-	if (is_range_ram == 1)
-		return free_ram_pages_type(start, end);
-	else if (is_range_ram < 0)
+	if (is_range_ram == 1) {
+
+		spin_lock(&memtype_lock);
+		err = free_ram_pages_type(start, end);
+		spin_unlock(&memtype_lock);
+
+		return err;
+	} else if (is_range_ram < 0) {
 		return -EINVAL;
 		return -EINVAL;
+	}
 
 
 	spin_lock(&memtype_lock);
 	spin_lock(&memtype_lock);
+
+	entry = memtype_rb_search(&memtype_rbroot, start);
+	if (unlikely(entry == NULL))
+		goto unlock_ret;
+
+	/*
+	 * Saved entry points to an entry with start same or less than what
+	 * we searched for. Now go through the list in both directions to look
+	 * for the entry that matches with both start and end, with list stored
+	 * in sorted start address
+	 */
+	saved_entry = entry;
 	list_for_each_entry(entry, &memtype_list, nd) {
 	list_for_each_entry(entry, &memtype_list, nd) {
 		if (entry->start == start && entry->end == end) {
 		if (entry->start == start && entry->end == end) {
-			if (cached_entry == entry || cached_start == start)
-				cached_entry = NULL;
+			rb_erase(&entry->rb, &memtype_rbroot);
+			list_del(&entry->nd);
+			kfree(entry);
+			err = 0;
+			break;
+		} else if (entry->start > start) {
+			break;
+		}
+	}
+
+	if (!err)
+		goto unlock_ret;
 
 
+	entry = saved_entry;
+	list_for_each_entry_reverse(entry, &memtype_list, nd) {
+		if (entry->start == start && entry->end == end) {
+			rb_erase(&entry->rb, &memtype_rbroot);
 			list_del(&entry->nd);
 			list_del(&entry->nd);
 			kfree(entry);
 			kfree(entry);
 			err = 0;
 			err = 0;
 			break;
 			break;
+		} else if (entry->start < start) {
+			break;
 		}
 		}
 	}
 	}
+unlock_ret:
 	spin_unlock(&memtype_lock);
 	spin_unlock(&memtype_lock);
 
 
 	if (err) {
 	if (err) {
@@ -496,6 +573,101 @@ int free_memtype(u64 start, u64 end)
 }
 }
 
 
 
 
+/**
+ * lookup_memtype - Looksup the memory type for a physical address
+ * @paddr: physical address of which memory type needs to be looked up
+ *
+ * Only to be called when PAT is enabled
+ *
+ * Returns _PAGE_CACHE_WB, _PAGE_CACHE_WC, _PAGE_CACHE_UC_MINUS or
+ * _PAGE_CACHE_UC
+ */
+static unsigned long lookup_memtype(u64 paddr)
+{
+	int rettype = _PAGE_CACHE_WB;
+	struct memtype *entry;
+
+	if (is_ISA_range(paddr, paddr + PAGE_SIZE - 1))
+		return rettype;
+
+	if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) {
+		struct page *page;
+		spin_lock(&memtype_lock);
+		page = pfn_to_page(paddr >> PAGE_SHIFT);
+		rettype = get_page_memtype(page);
+		spin_unlock(&memtype_lock);
+		/*
+		 * -1 from get_page_memtype() implies RAM page is in its
+		 * default state and not reserved, and hence of type WB
+		 */
+		if (rettype == -1)
+			rettype = _PAGE_CACHE_WB;
+
+		return rettype;
+	}
+
+	spin_lock(&memtype_lock);
+
+	entry = memtype_rb_search(&memtype_rbroot, paddr);
+	if (entry != NULL)
+		rettype = entry->type;
+	else
+		rettype = _PAGE_CACHE_UC_MINUS;
+
+	spin_unlock(&memtype_lock);
+	return rettype;
+}
+
+/**
+ * io_reserve_memtype - Request a memory type mapping for a region of memory
+ * @start: start (physical address) of the region
+ * @end: end (physical address) of the region
+ * @type: A pointer to memtype, with requested type. On success, requested
+ * or any other compatible type that was available for the region is returned
+ *
+ * On success, returns 0
+ * On failure, returns non-zero
+ */
+int io_reserve_memtype(resource_size_t start, resource_size_t end,
+			unsigned long *type)
+{
+	resource_size_t size = end - start;
+	unsigned long req_type = *type;
+	unsigned long new_type;
+	int ret;
+
+	WARN_ON_ONCE(iomem_map_sanity_check(start, size));
+
+	ret = reserve_memtype(start, end, req_type, &new_type);
+	if (ret)
+		goto out_err;
+
+	if (!is_new_memtype_allowed(start, size, req_type, new_type))
+		goto out_free;
+
+	if (kernel_map_sync_memtype(start, size, new_type) < 0)
+		goto out_free;
+
+	*type = new_type;
+	return 0;
+
+out_free:
+	free_memtype(start, end);
+	ret = -EBUSY;
+out_err:
+	return ret;
+}
+
+/**
+ * io_free_memtype - Release a memory type mapping for a region of memory
+ * @start: start (physical address) of the region
+ * @end: end (physical address) of the region
+ */
+void io_free_memtype(resource_size_t start, resource_size_t end)
+{
+	free_memtype(start, end);
+}
+
 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
 				unsigned long size, pgprot_t vma_prot)
 				unsigned long size, pgprot_t vma_prot)
 {
 {
@@ -577,7 +749,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
 {
 {
 	unsigned long id_sz;
 	unsigned long id_sz;
 
 
-	if (!pat_enabled || base >= __pa(high_memory))
+	if (base >= __pa(high_memory))
 		return 0;
 		return 0;
 
 
 	id_sz = (__pa(high_memory) < base + size) ?
 	id_sz = (__pa(high_memory) < base + size) ?
@@ -612,11 +784,29 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
 	is_ram = pat_pagerange_is_ram(paddr, paddr + size);
 	is_ram = pat_pagerange_is_ram(paddr, paddr + size);
 
 
 	/*
 	/*
-	 * reserve_pfn_range() doesn't support RAM pages. Maintain the current
-	 * behavior with RAM pages by returning success.
+	 * reserve_pfn_range() for RAM pages. We do not refcount to keep
+	 * track of number of mappings of RAM pages. We can assert that
+	 * the type requested matches the type of first page in the range.
 	 */
 	 */
-	if (is_ram != 0)
+	if (is_ram) {
+		if (!pat_enabled)
+			return 0;
+
+		flags = lookup_memtype(paddr);
+		if (want_flags != flags) {
+			printk(KERN_WARNING
+			"%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
+				current->comm, current->pid,
+				cattr_name(want_flags),
+				(unsigned long long)paddr,
+				(unsigned long long)(paddr + size),
+				cattr_name(flags));
+			*vma_prot = __pgprot((pgprot_val(*vma_prot) &
+					      (~_PAGE_CACHE_MASK)) |
+					     flags);
+		}
 		return 0;
 		return 0;
+	}
 
 
 	ret = reserve_memtype(paddr, paddr + size, want_flags, &flags);
 	ret = reserve_memtype(paddr, paddr + size, want_flags, &flags);
 	if (ret)
 	if (ret)
@@ -678,14 +868,6 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
 	unsigned long vma_size = vma->vm_end - vma->vm_start;
 	unsigned long vma_size = vma->vm_end - vma->vm_start;
 	pgprot_t pgprot;
 	pgprot_t pgprot;
 
 
-	if (!pat_enabled)
-		return 0;
-
-	/*
-	 * For now, only handle remap_pfn_range() vmas where
-	 * is_linear_pfn_mapping() == TRUE. Handling of
-	 * vm_insert_pfn() is TBD.
-	 */
 	if (is_linear_pfn_mapping(vma)) {
 	if (is_linear_pfn_mapping(vma)) {
 		/*
 		/*
 		 * reserve the whole chunk covered by vma. We need the
 		 * reserve the whole chunk covered by vma. We need the
@@ -713,23 +895,24 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
 int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
 int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
 			unsigned long pfn, unsigned long size)
 			unsigned long pfn, unsigned long size)
 {
 {
+	unsigned long flags;
 	resource_size_t paddr;
 	resource_size_t paddr;
 	unsigned long vma_size = vma->vm_end - vma->vm_start;
 	unsigned long vma_size = vma->vm_end - vma->vm_start;
 
 
-	if (!pat_enabled)
-		return 0;
-
-	/*
-	 * For now, only handle remap_pfn_range() vmas where
-	 * is_linear_pfn_mapping() == TRUE. Handling of
-	 * vm_insert_pfn() is TBD.
-	 */
 	if (is_linear_pfn_mapping(vma)) {
 	if (is_linear_pfn_mapping(vma)) {
 		/* reserve the whole chunk starting from vm_pgoff */
 		/* reserve the whole chunk starting from vm_pgoff */
 		paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
 		paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
 		return reserve_pfn_range(paddr, vma_size, prot, 0);
 		return reserve_pfn_range(paddr, vma_size, prot, 0);
 	}
 	}
 
 
+	if (!pat_enabled)
+		return 0;
+
+	/* for vm_insert_pfn and friends, we set prot based on lookup */
+	flags = lookup_memtype(pfn << PAGE_SHIFT);
+	*prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) |
+			 flags);
+
 	return 0;
 	return 0;
 }
 }
 
 
@@ -744,14 +927,6 @@ void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
 	resource_size_t paddr;
 	resource_size_t paddr;
 	unsigned long vma_size = vma->vm_end - vma->vm_start;
 	unsigned long vma_size = vma->vm_end - vma->vm_start;
 
 
-	if (!pat_enabled)
-		return;
-
-	/*
-	 * For now, only handle remap_pfn_range() vmas where
-	 * is_linear_pfn_mapping() == TRUE. Handling of
-	 * vm_insert_pfn() is TBD.
-	 */
 	if (is_linear_pfn_mapping(vma)) {
 	if (is_linear_pfn_mapping(vma)) {
 		/* free the whole chunk starting from vm_pgoff */
 		/* free the whole chunk starting from vm_pgoff */
 		paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
 		paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;

+ 1 - 1
arch/x86/power/cpu.c

@@ -242,7 +242,7 @@ static void __restore_processor_state(struct saved_context *ctxt)
 	fix_processor_context();
 	fix_processor_context();
 
 
 	do_fpu_end();
 	do_fpu_end();
-	mtrr_ap_init();
+	mtrr_bp_restore();
 
 
 #ifdef CONFIG_X86_OLD_MCE
 #ifdef CONFIG_X86_OLD_MCE
 	mcheck_init(&boot_cpu_data);
 	mcheck_init(&boot_cpu_data);

+ 12 - 5
include/linux/io-mapping.h

@@ -49,23 +49,30 @@ static inline struct io_mapping *
 io_mapping_create_wc(resource_size_t base, unsigned long size)
 io_mapping_create_wc(resource_size_t base, unsigned long size)
 {
 {
 	struct io_mapping *iomap;
 	struct io_mapping *iomap;
-
-	if (!is_io_mapping_possible(base, size))
-		return NULL;
+	pgprot_t prot;
 
 
 	iomap = kmalloc(sizeof(*iomap), GFP_KERNEL);
 	iomap = kmalloc(sizeof(*iomap), GFP_KERNEL);
 	if (!iomap)
 	if (!iomap)
-		return NULL;
+		goto out_err;
+
+	if (iomap_create_wc(base, size, &prot))
+		goto out_free;
 
 
 	iomap->base = base;
 	iomap->base = base;
 	iomap->size = size;
 	iomap->size = size;
-	iomap->prot = pgprot_writecombine(__pgprot(__PAGE_KERNEL));
+	iomap->prot = prot;
 	return iomap;
 	return iomap;
+
+out_free:
+	kfree(iomap);
+out_err:
+	return NULL;
 }
 }
 
 
 static inline void
 static inline void
 io_mapping_free(struct io_mapping *mapping)
 io_mapping_free(struct io_mapping *mapping)
 {
 {
+	iomap_free(mapping->base, mapping->size);
 	kfree(mapping);
 	kfree(mapping);
 }
 }
 
 

+ 2 - 2
include/linux/page-flags.h

@@ -99,7 +99,7 @@ enum pageflags {
 #ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
 #ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
 	PG_mlocked,		/* Page is vma mlocked */
 	PG_mlocked,		/* Page is vma mlocked */
 #endif
 #endif
-#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR
+#ifdef CONFIG_ARCH_USES_PG_UNCACHED
 	PG_uncached,		/* Page has been mapped as uncached */
 	PG_uncached,		/* Page has been mapped as uncached */
 #endif
 #endif
 	__NR_PAGEFLAGS,
 	__NR_PAGEFLAGS,
@@ -257,7 +257,7 @@ PAGEFLAG_FALSE(Mlocked)
 	SETPAGEFLAG_NOOP(Mlocked) TESTCLEARFLAG_FALSE(Mlocked)
 	SETPAGEFLAG_NOOP(Mlocked) TESTCLEARFLAG_FALSE(Mlocked)
 #endif
 #endif
 
 
-#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR
+#ifdef CONFIG_ARCH_USES_PG_UNCACHED
 PAGEFLAG(Uncached, uncached)
 PAGEFLAG(Uncached, uncached)
 #else
 #else
 PAGEFLAG_FALSE(Uncached)
 PAGEFLAG_FALSE(Uncached)

+ 14 - 0
kernel/cpu.c

@@ -414,6 +414,14 @@ int disable_nonboot_cpus(void)
 	return error;
 	return error;
 }
 }
 
 
+void __weak arch_enable_nonboot_cpus_begin(void)
+{
+}
+
+void __weak arch_enable_nonboot_cpus_end(void)
+{
+}
+
 void __ref enable_nonboot_cpus(void)
 void __ref enable_nonboot_cpus(void)
 {
 {
 	int cpu, error;
 	int cpu, error;
@@ -425,6 +433,9 @@ void __ref enable_nonboot_cpus(void)
 		goto out;
 		goto out;
 
 
 	printk("Enabling non-boot CPUs ...\n");
 	printk("Enabling non-boot CPUs ...\n");
+
+	arch_enable_nonboot_cpus_begin();
+
 	for_each_cpu(cpu, frozen_cpus) {
 	for_each_cpu(cpu, frozen_cpus) {
 		error = _cpu_up(cpu, 1);
 		error = _cpu_up(cpu, 1);
 		if (!error) {
 		if (!error) {
@@ -433,6 +444,9 @@ void __ref enable_nonboot_cpus(void)
 		}
 		}
 		printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error);
 		printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error);
 	}
 	}
+
+	arch_enable_nonboot_cpus_end();
+
 	cpumask_clear(frozen_cpus);
 	cpumask_clear(frozen_cpus);
 out:
 out:
 	cpu_maps_update_done();
 	cpu_maps_update_done();

+ 34 - 6
kernel/smp.c

@@ -176,6 +176,11 @@ void generic_smp_call_function_interrupt(void)
 	struct call_function_data *data;
 	struct call_function_data *data;
 	int cpu = get_cpu();
 	int cpu = get_cpu();
 
 
+	/*
+	 * Shouldn't receive this interrupt on a cpu that is not yet online.
+	 */
+	WARN_ON_ONCE(!cpu_online(cpu));
+
 	/*
 	/*
 	 * Ensure entry is visible on call_function_queue after we have
 	 * Ensure entry is visible on call_function_queue after we have
 	 * entered the IPI. See comment in smp_call_function_many.
 	 * entered the IPI. See comment in smp_call_function_many.
@@ -230,6 +235,11 @@ void generic_smp_call_function_single_interrupt(void)
 	unsigned int data_flags;
 	unsigned int data_flags;
 	LIST_HEAD(list);
 	LIST_HEAD(list);
 
 
+	/*
+	 * Shouldn't receive this interrupt on a cpu that is not yet online.
+	 */
+	WARN_ON_ONCE(!cpu_online(smp_processor_id()));
+
 	spin_lock(&q->lock);
 	spin_lock(&q->lock);
 	list_replace_init(&q->list, &list);
 	list_replace_init(&q->list, &list);
 	spin_unlock(&q->lock);
 	spin_unlock(&q->lock);
@@ -285,8 +295,14 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
 	 */
 	 */
 	this_cpu = get_cpu();
 	this_cpu = get_cpu();
 
 
-	/* Can deadlock when called with interrupts disabled */
-	WARN_ON_ONCE(irqs_disabled() && !oops_in_progress);
+	/*
+	 * Can deadlock when called with interrupts disabled.
+	 * We allow cpu's that are not yet online though, as no one else can
+	 * send smp call function interrupt to this cpu and as such deadlocks
+	 * can't happen.
+	 */
+	WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
+		     && !oops_in_progress);
 
 
 	if (cpu == this_cpu) {
 	if (cpu == this_cpu) {
 		local_irq_save(flags);
 		local_irq_save(flags);
@@ -329,8 +345,14 @@ void __smp_call_function_single(int cpu, struct call_single_data *data,
 {
 {
 	csd_lock(data);
 	csd_lock(data);
 
 
-	/* Can deadlock when called with interrupts disabled */
-	WARN_ON_ONCE(wait && irqs_disabled() && !oops_in_progress);
+	/*
+	 * Can deadlock when called with interrupts disabled.
+	 * We allow cpu's that are not yet online though, as no one else can
+	 * send smp call function interrupt to this cpu and as such deadlocks
+	 * can't happen.
+	 */
+	WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled()
+		     && !oops_in_progress);
 
 
 	generic_exec_single(cpu, data, wait);
 	generic_exec_single(cpu, data, wait);
 }
 }
@@ -365,8 +387,14 @@ void smp_call_function_many(const struct cpumask *mask,
 	unsigned long flags;
 	unsigned long flags;
 	int cpu, next_cpu, this_cpu = smp_processor_id();
 	int cpu, next_cpu, this_cpu = smp_processor_id();
 
 
-	/* Can deadlock when called with interrupts disabled */
-	WARN_ON_ONCE(irqs_disabled() && !oops_in_progress);
+	/*
+	 * Can deadlock when called with interrupts disabled.
+	 * We allow cpu's that are not yet online though, as no one else can
+	 * send smp call function interrupt to this cpu and as such deadlocks
+	 * can't happen.
+	 */
+	WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
+		     && !oops_in_progress);
 
 
 	/* So, what's a CPU they want? Ignoring this one. */
 	/* So, what's a CPU they want? Ignoring this one. */
 	cpu = cpumask_first_and(mask, cpu_online_mask);
 	cpu = cpumask_first_and(mask, cpu_online_mask);

+ 1 - 1
mm/Kconfig

@@ -153,7 +153,7 @@ config MEMORY_HOTREMOVE
 #
 #
 config PAGEFLAGS_EXTENDED
 config PAGEFLAGS_EXTENDED
 	def_bool y
 	def_bool y
-	depends on 64BIT || SPARSEMEM_VMEMMAP || !NUMA || !SPARSEMEM
+	depends on 64BIT || SPARSEMEM_VMEMMAP || !SPARSEMEM
 
 
 # Heavily threaded applications may benefit from splitting the mm-wide
 # Heavily threaded applications may benefit from splitting the mm-wide
 # page_table_lock, so that faults on different parts of the user address
 # page_table_lock, so that faults on different parts of the user address