浏览代码

Merge branch 'for-v3.8' of git://git.linaro.org/people/mszyprowski/linux-dma-mapping

Pull CMA and DMA-mapping update from Marek Szyprowski:
 "Another set of Contiguous Memory Allocator and DMA-mapping framework
  updates for v3.8.

  This pull request consists only of two patches.  The first fixes a
  long standing issue with dmapools (the code predates current GIT
  history), which forced all allocations to use GFP_ATOMIC flag,
  ignoring the flags passed by the caller.  The second patch changes CMA
  code to correctly use phys_addr_t type what enables support for LPAE
  systems."

* 'for-v3.8' of git://git.linaro.org/people/mszyprowski/linux-dma-mapping:
  drivers: cma: represent physical addresses as phys_addr_t
  mm: dmapool: use provided gfp flags for all dma_alloc_coherent() calls
Linus Torvalds 12 年之前
父节点
当前提交
c45564e916
共有 3 个文件被更改,包括 19 次插入40 次删除
  1. 10 14
      drivers/base/dma-contiguous.c
  2. 2 2
      include/linux/dma-contiguous.h
  3. 7 24
      mm/dmapool.c

+ 10 - 14
drivers/base/dma-contiguous.c

@@ -57,8 +57,8 @@ struct cma *dma_contiguous_default_area;
  * Users, who want to set the size of global CMA area for their system
  * should use cma= kernel parameter.
  */
-static const unsigned long size_bytes = CMA_SIZE_MBYTES * SZ_1M;
-static long size_cmdline = -1;
+static const phys_addr_t size_bytes = CMA_SIZE_MBYTES * SZ_1M;
+static phys_addr_t size_cmdline = -1;
 
 static int __init early_cma(char *p)
 {
@@ -70,7 +70,7 @@ early_param("cma", early_cma);
 
 #ifdef CONFIG_CMA_SIZE_PERCENTAGE
 
-static unsigned long __init __maybe_unused cma_early_percent_memory(void)
+static phys_addr_t __init __maybe_unused cma_early_percent_memory(void)
 {
 	struct memblock_region *reg;
 	unsigned long total_pages = 0;
@@ -88,7 +88,7 @@ static unsigned long __init __maybe_unused cma_early_percent_memory(void)
 
 #else
 
-static inline __maybe_unused unsigned long cma_early_percent_memory(void)
+static inline __maybe_unused phys_addr_t cma_early_percent_memory(void)
 {
 	return 0;
 }
@@ -106,7 +106,7 @@ static inline __maybe_unused unsigned long cma_early_percent_memory(void)
  */
 void __init dma_contiguous_reserve(phys_addr_t limit)
 {
-	unsigned long selected_size = 0;
+	phys_addr_t selected_size = 0;
 
 	pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit);
 
@@ -126,7 +126,7 @@ void __init dma_contiguous_reserve(phys_addr_t limit)
 
 	if (selected_size) {
 		pr_debug("%s: reserving %ld MiB for global area\n", __func__,
-			 selected_size / SZ_1M);
+			 (unsigned long)selected_size / SZ_1M);
 
 		dma_declare_contiguous(NULL, selected_size, 0, limit);
 	}
@@ -227,11 +227,11 @@ core_initcall(cma_init_reserved_areas);
  * called by board specific code when early allocator (memblock or bootmem)
  * is still activate.
  */
-int __init dma_declare_contiguous(struct device *dev, unsigned long size,
+int __init dma_declare_contiguous(struct device *dev, phys_addr_t size,
 				  phys_addr_t base, phys_addr_t limit)
 {
 	struct cma_reserved *r = &cma_reserved[cma_reserved_count];
-	unsigned long alignment;
+	phys_addr_t alignment;
 
 	pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__,
 		 (unsigned long)size, (unsigned long)base,
@@ -268,10 +268,6 @@ int __init dma_declare_contiguous(struct device *dev, unsigned long size,
 		if (!addr) {
 			base = -ENOMEM;
 			goto err;
-		} else if (addr + size > ~(unsigned long)0) {
-			memblock_free(addr, size);
-			base = -EINVAL;
-			goto err;
 		} else {
 			base = addr;
 		}
@@ -285,14 +281,14 @@ int __init dma_declare_contiguous(struct device *dev, unsigned long size,
 	r->size = size;
 	r->dev = dev;
 	cma_reserved_count++;
-	pr_info("CMA: reserved %ld MiB at %08lx\n", size / SZ_1M,
+	pr_info("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M,
 		(unsigned long)base);
 
 	/* Architecture specific contiguous memory fixup. */
 	dma_contiguous_early_fixup(base, size);
 	return 0;
 err:
-	pr_err("CMA: failed to reserve %ld MiB\n", size / SZ_1M);
+	pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
 	return base;
 }
 

+ 2 - 2
include/linux/dma-contiguous.h

@@ -68,7 +68,7 @@ struct device;
 extern struct cma *dma_contiguous_default_area;
 
 void dma_contiguous_reserve(phys_addr_t addr_limit);
-int dma_declare_contiguous(struct device *dev, unsigned long size,
+int dma_declare_contiguous(struct device *dev, phys_addr_t size,
 			   phys_addr_t base, phys_addr_t limit);
 
 struct page *dma_alloc_from_contiguous(struct device *dev, int count,
@@ -83,7 +83,7 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
 static inline void dma_contiguous_reserve(phys_addr_t limit) { }
 
 static inline
-int dma_declare_contiguous(struct device *dev, unsigned long size,
+int dma_declare_contiguous(struct device *dev, phys_addr_t size,
 			   phys_addr_t base, phys_addr_t limit)
 {
 	return -ENOSYS;

+ 7 - 24
mm/dmapool.c

@@ -50,7 +50,6 @@ struct dma_pool {		/* the pool */
 	size_t allocation;
 	size_t boundary;
 	char name[32];
-	wait_queue_head_t waitq;
 	struct list_head pools;
 };
 
@@ -62,8 +61,6 @@ struct dma_page {		/* cacheable header for 'allocation' bytes */
 	unsigned int offset;
 };
 
-#define	POOL_TIMEOUT_JIFFIES	((100 /* msec */ * HZ) / 1000)
-
 static DEFINE_MUTEX(pools_lock);
 
 static ssize_t
@@ -172,7 +169,6 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev,
 	retval->size = size;
 	retval->boundary = boundary;
 	retval->allocation = allocation;
-	init_waitqueue_head(&retval->waitq);
 
 	if (dev) {
 		int ret;
@@ -227,7 +223,6 @@ static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
 		memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
 #endif
 		pool_initialise_page(pool, page);
-		list_add(&page->page_list, &pool->page_list);
 		page->in_use = 0;
 		page->offset = 0;
 	} else {
@@ -315,30 +310,21 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
 	might_sleep_if(mem_flags & __GFP_WAIT);
 
 	spin_lock_irqsave(&pool->lock, flags);
- restart:
 	list_for_each_entry(page, &pool->page_list, page_list) {
 		if (page->offset < pool->allocation)
 			goto ready;
 	}
-	page = pool_alloc_page(pool, GFP_ATOMIC);
-	if (!page) {
-		if (mem_flags & __GFP_WAIT) {
-			DECLARE_WAITQUEUE(wait, current);
 
-			__set_current_state(TASK_UNINTERRUPTIBLE);
-			__add_wait_queue(&pool->waitq, &wait);
-			spin_unlock_irqrestore(&pool->lock, flags);
+	/* pool_alloc_page() might sleep, so temporarily drop &pool->lock */
+	spin_unlock_irqrestore(&pool->lock, flags);
 
-			schedule_timeout(POOL_TIMEOUT_JIFFIES);
+	page = pool_alloc_page(pool, mem_flags);
+	if (!page)
+		return NULL;
 
-			spin_lock_irqsave(&pool->lock, flags);
-			__remove_wait_queue(&pool->waitq, &wait);
-			goto restart;
-		}
-		retval = NULL;
-		goto done;
-	}
+	spin_lock_irqsave(&pool->lock, flags);
 
+	list_add(&page->page_list, &pool->page_list);
  ready:
 	page->in_use++;
 	offset = page->offset;
@@ -348,7 +334,6 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
 #ifdef	DMAPOOL_DEBUG
 	memset(retval, POOL_POISON_ALLOCATED, pool->size);
 #endif
- done:
 	spin_unlock_irqrestore(&pool->lock, flags);
 	return retval;
 }
@@ -435,8 +420,6 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
 	page->in_use--;
 	*(int *)vaddr = page->offset;
 	page->offset = offset;
-	if (waitqueue_active(&pool->waitq))
-		wake_up_locked(&pool->waitq);
 	/*
 	 * Resist a temptation to do
 	 *    if (!is_page_busy(page)) pool_free_page(pool, page);