Ver código fonte

Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  x86, k8 nb: Fix boot crash: enable k8_northbridges unconditionally on AMD systems
  x86, UV: Fix target_cpus() in x2apic_uv_x.c
  x86: Reduce per cpu warning boot up messages
  x86: Reduce per cpu MCA boot up messages
  x86_64, cpa: Don't work hard in preserving kernel 2M mappings when using 4K already
Linus Torvalds 15 anos atrás
pai
commit
15c989d4d1

+ 2 - 2
arch/x86/Kconfig

@@ -662,7 +662,7 @@ config GART_IOMMU
 	bool "GART IOMMU support" if EMBEDDED
 	bool "GART IOMMU support" if EMBEDDED
 	default y
 	default y
 	select SWIOTLB
 	select SWIOTLB
-	depends on X86_64 && PCI
+	depends on X86_64 && PCI && K8_NB
 	---help---
 	---help---
 	  Support for full DMA access of devices with 32bit memory access only
 	  Support for full DMA access of devices with 32bit memory access only
 	  on systems with more than 3GB. This is usually needed for USB,
 	  on systems with more than 3GB. This is usually needed for USB,
@@ -2061,7 +2061,7 @@ endif # X86_32
 
 
 config K8_NB
 config K8_NB
 	def_bool y
 	def_bool y
-	depends on AGP_AMD64 || (X86_64 && (GART_IOMMU || (PCI && NUMA)))
+	depends on CPU_SUP_AMD && PCI
 
 
 source "drivers/pcmcia/Kconfig"
 source "drivers/pcmcia/Kconfig"
 
 

+ 1 - 3
arch/x86/kernel/apic/x2apic_uv_x.c

@@ -120,11 +120,9 @@ EXPORT_SYMBOL_GPL(uv_possible_blades);
 unsigned long sn_rtc_cycles_per_second;
 unsigned long sn_rtc_cycles_per_second;
 EXPORT_SYMBOL(sn_rtc_cycles_per_second);
 EXPORT_SYMBOL(sn_rtc_cycles_per_second);
 
 
-/* Start with all IRQs pointing to boot CPU.  IRQ balancing will shift them. */
-
 static const struct cpumask *uv_target_cpus(void)
 static const struct cpumask *uv_target_cpus(void)
 {
 {
-	return cpumask_of(0);
+	return cpu_online_mask;
 }
 }
 
 
 static void uv_vector_allocation_domain(int cpu, struct cpumask *retmask)
 static void uv_vector_allocation_domain(int cpu, struct cpumask *retmask)

+ 2 - 2
arch/x86/kernel/cpu/mcheck/mce_intel.c

@@ -95,7 +95,7 @@ static void cmci_discover(int banks, int boot)
 
 
 		/* Already owned by someone else? */
 		/* Already owned by someone else? */
 		if (val & CMCI_EN) {
 		if (val & CMCI_EN) {
-			if (test_and_clear_bit(i, owned) || boot)
+			if (test_and_clear_bit(i, owned) && !boot)
 				print_update("SHD", &hdr, i);
 				print_update("SHD", &hdr, i);
 			__clear_bit(i, __get_cpu_var(mce_poll_banks));
 			__clear_bit(i, __get_cpu_var(mce_poll_banks));
 			continue;
 			continue;
@@ -107,7 +107,7 @@ static void cmci_discover(int banks, int boot)
 
 
 		/* Did the enable bit stick? -- the bank supports CMCI */
 		/* Did the enable bit stick? -- the bank supports CMCI */
 		if (val & CMCI_EN) {
 		if (val & CMCI_EN) {
-			if (!test_and_set_bit(i, owned) || boot)
+			if (!test_and_set_bit(i, owned) && !boot)
 				print_update("CMCI", &hdr, i);
 				print_update("CMCI", &hdr, i);
 			__clear_bit(i, __get_cpu_var(mce_poll_banks));
 			__clear_bit(i, __get_cpu_var(mce_poll_banks));
 		} else {
 		} else {

+ 14 - 0
arch/x86/kernel/k8.c

@@ -121,3 +121,17 @@ void k8_flush_garts(void)
 }
 }
 EXPORT_SYMBOL_GPL(k8_flush_garts);
 EXPORT_SYMBOL_GPL(k8_flush_garts);
 
 
+static __init int init_k8_nbs(void)
+{
+	int err = 0;
+
+	err = cache_k8_northbridges();
+
+	if (err < 0)
+		printk(KERN_NOTICE "K8 NB: Cannot enumerate AMD northbridges.\n");
+
+	return err;
+}
+
+/* This has to go after the PCI subsystem */
+fs_initcall(init_k8_nbs);

+ 1 - 1
arch/x86/kernel/pci-gart_64.c

@@ -735,7 +735,7 @@ int __init gart_iommu_init(void)
 	unsigned long scratch;
 	unsigned long scratch;
 	long i;
 	long i;
 
 
-	if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0)
+	if (num_k8_northbridges == 0)
 		return 0;
 		return 0;
 
 
 #ifndef CONFIG_AGP_AMD64
 #ifndef CONFIG_AGP_AMD64

+ 1 - 1
arch/x86/kernel/process.c

@@ -607,7 +607,7 @@ void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
 {
 {
 #ifdef CONFIG_SMP
 #ifdef CONFIG_SMP
 	if (pm_idle == poll_idle && smp_num_siblings > 1) {
 	if (pm_idle == poll_idle && smp_num_siblings > 1) {
-		printk(KERN_WARNING "WARNING: polling idle and HT enabled,"
+		printk_once(KERN_WARNING "WARNING: polling idle and HT enabled,"
 			" performance may degrade.\n");
 			" performance may degrade.\n");
 	}
 	}
 #endif
 #endif

+ 23 - 2
arch/x86/mm/pageattr.c

@@ -291,8 +291,29 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
 	 */
 	 */
 	if (kernel_set_to_readonly &&
 	if (kernel_set_to_readonly &&
 	    within(address, (unsigned long)_text,
 	    within(address, (unsigned long)_text,
-		   (unsigned long)__end_rodata_hpage_align))
-		pgprot_val(forbidden) |= _PAGE_RW;
+		   (unsigned long)__end_rodata_hpage_align)) {
+		unsigned int level;
+
+		/*
+		 * Don't enforce the !RW mapping for the kernel text mapping,
+		 * if the current mapping is already using small page mapping.
+		 * No need to work hard to preserve large page mappings in this
+		 * case.
+		 *
+		 * This also fixes the Linux Xen paravirt guest boot failure
+		 * (because of unexpected read-only mappings for kernel identity
+		 * mappings). In this paravirt guest case, the kernel text
+		 * mapping and the kernel identity mapping share the same
+		 * page-table pages. Thus we can't really use different
+		 * protections for the kernel text and identity mappings. Also,
+		 * these shared mappings are made of small page mappings.
+		 * Thus this don't enforce !RW mapping for small page kernel
+		 * text mapping logic will help Linux Xen parvirt guest boot
+		 * aswell.
+		 */
+		if (lookup_address(address, &level) && (level != PG_LEVEL_4K))
+			pgprot_val(forbidden) |= _PAGE_RW;
+	}
 #endif
 #endif
 
 
 	prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
 	prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));

+ 1 - 1
drivers/char/agp/Kconfig

@@ -57,7 +57,7 @@ config AGP_AMD
 
 
 config AGP_AMD64
 config AGP_AMD64
 	tristate "AMD Opteron/Athlon64 on-CPU GART support"
 	tristate "AMD Opteron/Athlon64 on-CPU GART support"
-	depends on AGP && X86
+	depends on AGP && X86 && K8_NB
 	help
 	help
 	  This option gives you AGP support for the GLX component of
 	  This option gives you AGP support for the GLX component of
 	  X using the on-CPU northbridge of the AMD Athlon64/Opteron CPUs.
 	  X using the on-CPU northbridge of the AMD Athlon64/Opteron CPUs.