Bladeren bron

x86: clean up arch/x86/mm/mmap_32/64.c

White space and coding style clenaup.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Thomas Gleixner 17 jaren geleden
bovenliggende
commit
f8eeae6821
2 gewijzigde bestanden met toevoegingen van 9 en 6 verwijderingen
  1. 2 2
      arch/x86/mm/mmap_32.c
  2. 7 4
      arch/x86/mm/mmap_64.c

+ 2 - 2
arch/x86/mm/mmap_32.c

@@ -64,8 +64,8 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
 	 * bit is set, or if the expected stack growth is unlimited:
 	 */
 	if (sysctl_legacy_va_layout ||
-			(current->personality & ADDR_COMPAT_LAYOUT) ||
-			current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY) {
+	    (current->personality & ADDR_COMPAT_LAYOUT) ||
+	    current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY) {
 		mm->mmap_base = TASK_UNMAPPED_BASE;
 		mm->get_unmapped_area = arch_get_unmapped_area;
 		mm->unmap_area = arch_unmap_area;

+ 7 - 4
arch/x86/mm/mmap_64.c

@@ -16,11 +16,14 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
 #endif
 	mm->mmap_base = TASK_UNMAPPED_BASE;
 	if (current->flags & PF_RANDOMIZE) {
-		/* Add 28bit randomness which is about 40bits of address space
-		   because mmap base has to be page aligned.
- 		   or ~1/128 of the total user VM
-	   	   (total user address space is 47bits) */
+		/*
+		 * Add 28bit randomness which is about 40bits of
+		 * address space because mmap base has to be page
+		 * aligned.  or ~1/128 of the total user VM (total
+		 * user address space is 47bits)
+		 */
 		unsigned rnd = get_random_int() & 0xfffffff;
+
 		mm->mmap_base += ((unsigned long)rnd) << PAGE_SHIFT;
 	}
 	mm->get_unmapped_area = arch_get_unmapped_area;