Răsfoiți Sursa

Merge branch 'fixes' of git://git.linaro.org/people/rmk/linux-arm

Pull two ARM fixes from Russell King:
 "It's been fairly quiet with the fixes.  Just two this time.  One fixes
  a long standing problem with KALLSYMS needing an additional pass, and
  the other sorts a problem with the vmalloc space interacting with
  static IO mappings."

* 'fixes' of git://git.linaro.org/people/rmk/linux-arm:
  ARM: 7438/1: fill possible PMD empty section gaps
  ARM: 7428/1: Prevent KALLSYM size mismatch on ARM.
Linus Torvalds 13 ani în urmă
părinte
comite
ca24a14557
2 a modificat fișierele cu 76 adăugiri și 0 ștergeri
  1. 2 0
      arch/arm/kernel/vmlinux.lds.S
  2. 74 0
      arch/arm/mm/mmu.c

+ 2 - 0
arch/arm/kernel/vmlinux.lds.S

@@ -183,7 +183,9 @@ SECTIONS
 	}
 	}
 #endif
 #endif
 
 
+#ifdef CONFIG_SMP
 	PERCPU_SECTION(L1_CACHE_BYTES)
 	PERCPU_SECTION(L1_CACHE_BYTES)
+#endif
 
 
 #ifdef CONFIG_XIP_KERNEL
 #ifdef CONFIG_XIP_KERNEL
 	__data_loc = ALIGN(4);		/* location in binary */
 	__data_loc = ALIGN(4);		/* location in binary */

+ 74 - 0
arch/arm/mm/mmu.c

@@ -791,6 +791,79 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
 	}
 	}
 }
 }
 
 
+#ifndef CONFIG_ARM_LPAE
+
+/*
+ * The Linux PMD is made of two consecutive section entries covering 2MB
+ * (see definition in include/asm/pgtable-2level.h).  However a call to
+ * create_mapping() may optimize static mappings by using individual
+ * 1MB section mappings.  This leaves the actual PMD potentially half
+ * initialized if the top or bottom section entry isn't used, leaving it
+ * open to problems if a subsequent ioremap() or vmalloc() tries to use
+ * the virtual space left free by that unused section entry.
+ *
+ * Let's avoid the issue by inserting dummy vm entries covering the unused
+ * PMD halves once the static mappings are in place.
+ */
+
+static void __init pmd_empty_section_gap(unsigned long addr)
+{
+	struct vm_struct *vm;
+
+	vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm));
+	vm->addr = (void *)addr;
+	vm->size = SECTION_SIZE;
+	vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
+	vm->caller = pmd_empty_section_gap;
+	vm_area_add_early(vm);
+}
+
+static void __init fill_pmd_gaps(void)
+{
+	struct vm_struct *vm;
+	unsigned long addr, next = 0;
+	pmd_t *pmd;
+
+	/* we're still single threaded hence no lock needed here */
+	for (vm = vmlist; vm; vm = vm->next) {
+		if (!(vm->flags & VM_ARM_STATIC_MAPPING))
+			continue;
+		addr = (unsigned long)vm->addr;
+		if (addr < next)
+			continue;
+
+		/*
+		 * Check if this vm starts on an odd section boundary.
+		 * If so and the first section entry for this PMD is free
+		 * then we block the corresponding virtual address.
+		 */
+		if ((addr & ~PMD_MASK) == SECTION_SIZE) {
+			pmd = pmd_off_k(addr);
+			if (pmd_none(*pmd))
+				pmd_empty_section_gap(addr & PMD_MASK);
+		}
+
+		/*
+		 * Then check if this vm ends on an odd section boundary.
+		 * If so and the second section entry for this PMD is empty
+		 * then we block the corresponding virtual address.
+		 */
+		addr += vm->size;
+		if ((addr & ~PMD_MASK) == SECTION_SIZE) {
+			pmd = pmd_off_k(addr) + 1;
+			if (pmd_none(*pmd))
+				pmd_empty_section_gap(addr);
+		}
+
+		/* no need to look at any vm entry until we hit the next PMD */
+		next = (addr + PMD_SIZE - 1) & PMD_MASK;
+	}
+}
+
+#else
+#define fill_pmd_gaps() do { } while (0)
+#endif
+
 static void * __initdata vmalloc_min =
 static void * __initdata vmalloc_min =
 	(void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET);
 	(void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET);
 
 
@@ -1072,6 +1145,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
 	 */
 	 */
 	if (mdesc->map_io)
 	if (mdesc->map_io)
 		mdesc->map_io();
 		mdesc->map_io();
+	fill_pmd_gaps();
 
 
 	/*
 	/*
 	 * Finally flush the caches and tlb to ensure that we're in a
 	 * Finally flush the caches and tlb to ensure that we're in a