Explorar o código

Merge branch 'stable/swiotlb-0.9' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb-2.6

* 'stable/swiotlb-0.9' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb-2.6:
  swiotlb: Use page alignment for early buffer allocation
  swiotlb: make io_tlb_overflow static
Linus Torvalds %!s(int64=14) %!d(string=hai) anos
pai
achega
a9ccd80aad
Modificáronse 1 ficheiros con 9 adicións e 9 borrados
  1. 9 9
      lib/swiotlb.c

+ 9 - 9
lib/swiotlb.c

@@ -70,7 +70,7 @@ static unsigned long io_tlb_nslabs;
  */
  */
 static unsigned long io_tlb_overflow = 32*1024;
 static unsigned long io_tlb_overflow = 32*1024;
 
 
-void *io_tlb_overflow_buffer;
+static void *io_tlb_overflow_buffer;
 
 
 /*
 /*
  * This is a free list describing the number of free entries available from
  * This is a free list describing the number of free entries available from
@@ -147,16 +147,16 @@ void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
 	 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
 	 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
 	 * between io_tlb_start and io_tlb_end.
 	 * between io_tlb_start and io_tlb_end.
 	 */
 	 */
-	io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int));
+	io_tlb_list = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(int)));
 	for (i = 0; i < io_tlb_nslabs; i++)
 	for (i = 0; i < io_tlb_nslabs; i++)
  		io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
  		io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
 	io_tlb_index = 0;
 	io_tlb_index = 0;
-	io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(phys_addr_t));
+	io_tlb_orig_addr = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)));
 
 
 	/*
 	/*
 	 * Get the overflow emergency buffer
 	 * Get the overflow emergency buffer
 	 */
 	 */
-	io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
+	io_tlb_overflow_buffer = alloc_bootmem_low_pages(PAGE_ALIGN(io_tlb_overflow));
 	if (!io_tlb_overflow_buffer)
 	if (!io_tlb_overflow_buffer)
 		panic("Cannot allocate SWIOTLB overflow buffer!\n");
 		panic("Cannot allocate SWIOTLB overflow buffer!\n");
 	if (verbose)
 	if (verbose)
@@ -182,7 +182,7 @@ swiotlb_init_with_default_size(size_t default_size, int verbose)
 	/*
 	/*
 	 * Get IO TLB memory from the low pages
 	 * Get IO TLB memory from the low pages
 	 */
 	 */
-	io_tlb_start = alloc_bootmem_low_pages(bytes);
+	io_tlb_start = alloc_bootmem_low_pages(PAGE_ALIGN(bytes));
 	if (!io_tlb_start)
 	if (!io_tlb_start)
 		panic("Cannot allocate SWIOTLB buffer");
 		panic("Cannot allocate SWIOTLB buffer");
 
 
@@ -308,13 +308,13 @@ void __init swiotlb_free(void)
 			   get_order(io_tlb_nslabs << IO_TLB_SHIFT));
 			   get_order(io_tlb_nslabs << IO_TLB_SHIFT));
 	} else {
 	} else {
 		free_bootmem_late(__pa(io_tlb_overflow_buffer),
 		free_bootmem_late(__pa(io_tlb_overflow_buffer),
-				  io_tlb_overflow);
+				  PAGE_ALIGN(io_tlb_overflow));
 		free_bootmem_late(__pa(io_tlb_orig_addr),
 		free_bootmem_late(__pa(io_tlb_orig_addr),
-				  io_tlb_nslabs * sizeof(phys_addr_t));
+				  PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)));
 		free_bootmem_late(__pa(io_tlb_list),
 		free_bootmem_late(__pa(io_tlb_list),
-				  io_tlb_nslabs * sizeof(int));
+				  PAGE_ALIGN(io_tlb_nslabs * sizeof(int)));
 		free_bootmem_late(__pa(io_tlb_start),
 		free_bootmem_late(__pa(io_tlb_start),
-				  io_tlb_nslabs << IO_TLB_SHIFT);
+				  PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
 	}
 	}
 }
 }