浏览代码

Merge branch 'fixes-for-linus' of git://git.linaro.org/people/mszyprowski/linux-dma-mapping

Pull DMA-mapping fixes from Marek Szyprowski:
 "A set of minor fixes for dma-mapping code (ARM and x86) required for
  Contiguous Memory Allocator (CMA) patches merged in v3.5-rc1."

* 'fixes-for-linus' of git://git.linaro.org/people/mszyprowski/linux-dma-mapping:
  x86: dma-mapping: fix broken allocation when dma_mask has been provided
  ARM: dma-mapping: fix debug messages in dmabounce code
  ARM: mm: fix type of the arm_dma_limit global variable
  ARM: dma-mapping: Add missing static storage class specifier
Linus Torvalds 13 年之前
父节点
当前提交
56b880e2e3
共有 5 个文件被更改,包括 14 次插入13 次删除
  1. 8 8
      arch/arm/common/dmabounce.c
  2. 2 2
      arch/arm/mm/dma-mapping.c
  3. 1 1
      arch/arm/mm/init.c
  4. 1 1
      arch/arm/mm/mm.h
  5. 2 1
      arch/x86/kernel/pci-dma.c

+ 8 - 8
arch/arm/common/dmabounce.c

@@ -366,8 +366,8 @@ static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
 	struct safe_buffer *buf;
 	struct safe_buffer *buf;
 	unsigned long off;
 	unsigned long off;
 
 
-	dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
-		__func__, addr, off, sz, dir);
+	dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n",
+		__func__, addr, sz, dir);
 
 
 	buf = find_safe_buffer_dev(dev, addr, __func__);
 	buf = find_safe_buffer_dev(dev, addr, __func__);
 	if (!buf)
 	if (!buf)
@@ -377,8 +377,8 @@ static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
 
 
 	BUG_ON(buf->direction != dir);
 	BUG_ON(buf->direction != dir);
 
 
-	dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
-		__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
+	dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n",
+		__func__, buf->ptr, virt_to_dma(dev, buf->ptr), off,
 		buf->safe, buf->safe_dma_addr);
 		buf->safe, buf->safe_dma_addr);
 
 
 	DO_STATS(dev->archdata.dmabounce->bounce_count++);
 	DO_STATS(dev->archdata.dmabounce->bounce_count++);
@@ -406,8 +406,8 @@ static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
 	struct safe_buffer *buf;
 	struct safe_buffer *buf;
 	unsigned long off;
 	unsigned long off;
 
 
-	dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
-		__func__, addr, off, sz, dir);
+	dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n",
+		__func__, addr, sz, dir);
 
 
 	buf = find_safe_buffer_dev(dev, addr, __func__);
 	buf = find_safe_buffer_dev(dev, addr, __func__);
 	if (!buf)
 	if (!buf)
@@ -417,8 +417,8 @@ static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
 
 
 	BUG_ON(buf->direction != dir);
 	BUG_ON(buf->direction != dir);
 
 
-	dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
-		__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
+	dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n",
+		__func__, buf->ptr, virt_to_dma(dev, buf->ptr), off,
 		buf->safe, buf->safe_dma_addr);
 		buf->safe, buf->safe_dma_addr);
 
 
 	DO_STATS(dev->archdata.dmabounce->bounce_count++);
 	DO_STATS(dev->archdata.dmabounce->bounce_count++);

+ 2 - 2
arch/arm/mm/dma-mapping.c

@@ -228,7 +228,7 @@ static pte_t **consistent_pte;
 
 
 #define DEFAULT_CONSISTENT_DMA_SIZE SZ_2M
 #define DEFAULT_CONSISTENT_DMA_SIZE SZ_2M
 
 
-unsigned long consistent_base = CONSISTENT_END - DEFAULT_CONSISTENT_DMA_SIZE;
+static unsigned long consistent_base = CONSISTENT_END - DEFAULT_CONSISTENT_DMA_SIZE;
 
 
 void __init init_consistent_dma_size(unsigned long size)
 void __init init_consistent_dma_size(unsigned long size)
 {
 {
@@ -321,7 +321,7 @@ static struct arm_vmregion_head coherent_head = {
 	.vm_list	= LIST_HEAD_INIT(coherent_head.vm_list),
 	.vm_list	= LIST_HEAD_INIT(coherent_head.vm_list),
 };
 };
 
 
-size_t coherent_pool_size = DEFAULT_CONSISTENT_DMA_SIZE / 8;
+static size_t coherent_pool_size = DEFAULT_CONSISTENT_DMA_SIZE / 8;
 
 
 static int __init early_coherent_pool(char *p)
 static int __init early_coherent_pool(char *p)
 {
 {

+ 1 - 1
arch/arm/mm/init.c

@@ -212,7 +212,7 @@ EXPORT_SYMBOL(arm_dma_zone_size);
  * allocations.  This must be the smallest DMA mask in the system,
  * allocations.  This must be the smallest DMA mask in the system,
  * so a successful GFP_DMA allocation will always satisfy this.
  * so a successful GFP_DMA allocation will always satisfy this.
  */
  */
-u32 arm_dma_limit;
+phys_addr_t arm_dma_limit;
 
 
 static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
 static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
 	unsigned long dma_size)
 	unsigned long dma_size)

+ 1 - 1
arch/arm/mm/mm.h

@@ -62,7 +62,7 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page
 #endif
 #endif
 
 
 #ifdef CONFIG_ZONE_DMA
 #ifdef CONFIG_ZONE_DMA
-extern u32 arm_dma_limit;
+extern phys_addr_t arm_dma_limit;
 #else
 #else
 #define arm_dma_limit ((u32)~0)
 #define arm_dma_limit ((u32)~0)
 #endif
 #endif

+ 2 - 1
arch/x86/kernel/pci-dma.c

@@ -100,7 +100,7 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size,
 				 struct dma_attrs *attrs)
 				 struct dma_attrs *attrs)
 {
 {
 	unsigned long dma_mask;
 	unsigned long dma_mask;
-	struct page *page = NULL;
+	struct page *page;
 	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
 	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
 	dma_addr_t addr;
 	dma_addr_t addr;
 
 
@@ -108,6 +108,7 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size,
 
 
 	flag |= __GFP_ZERO;
 	flag |= __GFP_ZERO;
 again:
 again:
+	page = NULL;
 	if (!(flag & GFP_ATOMIC))
 	if (!(flag & GFP_ATOMIC))
 		page = dma_alloc_from_contiguous(dev, count, get_order(size));
 		page = dma_alloc_from_contiguous(dev, count, get_order(size));
 	if (!page)
 	if (!page)