Jelajahi Sumber

Merge branch 'dma' of http://git.linaro.org/git/people/nico/linux into devel-stable

Russell King 14 tahun lalu
induk
melakukan
07f1c295de
55 mengubah file dengan 304 tambahan dan 288 penghapusan
  1. 83 110
      arch/arm/common/dmabounce.c
  2. 7 9
      arch/arm/common/it8152.c
  3. 31 29
      arch/arm/common/sa1111.c
  4. 13 75
      arch/arm/include/asm/dma-mapping.h
  5. 6 5
      arch/arm/include/asm/dma.h
  6. 4 0
      arch/arm/include/asm/mach/arch.h
  7. 0 12
      arch/arm/include/asm/memory.h
  8. 6 0
      arch/arm/kernel/setup.c
  9. 1 0
      arch/arm/mach-davinci/board-da830-evm.c
  10. 1 0
      arch/arm/mach-davinci/board-da850-evm.c
  11. 1 0
      arch/arm/mach-davinci/board-dm355-evm.c
  12. 1 0
      arch/arm/mach-davinci/board-dm355-leopard.c
  13. 1 0
      arch/arm/mach-davinci/board-dm365-evm.c
  14. 1 0
      arch/arm/mach-davinci/board-dm644x-evm.c
  15. 2 0
      arch/arm/mach-davinci/board-dm646x-evm.c
  16. 1 0
      arch/arm/mach-davinci/board-mityomapl138.c
  17. 1 0
      arch/arm/mach-davinci/board-neuros-osd2.c
  18. 1 0
      arch/arm/mach-davinci/board-omapl138-hawk.c
  19. 1 0
      arch/arm/mach-davinci/board-sffsdr.c
  20. 1 0
      arch/arm/mach-davinci/board-tnetv107x-evm.c
  21. 0 7
      arch/arm/mach-davinci/include/mach/memory.h
  22. 1 0
      arch/arm/mach-h720x/h7201-eval.c
  23. 1 0
      arch/arm/mach-h720x/h7202-eval.c
  24. 0 7
      arch/arm/mach-h720x/include/mach/memory.h
  25. 6 0
      arch/arm/mach-ixp4xx/avila-setup.c
  26. 6 6
      arch/arm/mach-ixp4xx/common-pci.c
  27. 3 0
      arch/arm/mach-ixp4xx/coyote-setup.c
  28. 3 0
      arch/arm/mach-ixp4xx/dsmg600-setup.c
  29. 3 0
      arch/arm/mach-ixp4xx/fsg-setup.c
  30. 3 0
      arch/arm/mach-ixp4xx/gateway7001-setup.c
  31. 3 0
      arch/arm/mach-ixp4xx/goramo_mlr.c
  32. 3 0
      arch/arm/mach-ixp4xx/gtwx5715-setup.c
  33. 0 4
      arch/arm/mach-ixp4xx/include/mach/memory.h
  34. 12 0
      arch/arm/mach-ixp4xx/ixdp425-setup.c
  35. 3 0
      arch/arm/mach-ixp4xx/nas100d-setup.c
  36. 3 0
      arch/arm/mach-ixp4xx/nslu2-setup.c
  37. 3 0
      arch/arm/mach-ixp4xx/vulcan-setup.c
  38. 3 0
      arch/arm/mach-ixp4xx/wg302v2-setup.c
  39. 3 0
      arch/arm/mach-pxa/cm-x2xx.c
  40. 0 4
      arch/arm/mach-pxa/include/mach/memory.h
  41. 0 4
      arch/arm/mach-realview/include/mach/memory.h
  42. 3 0
      arch/arm/mach-realview/realview_eb.c
  43. 3 0
      arch/arm/mach-realview/realview_pb1176.c
  44. 3 0
      arch/arm/mach-realview/realview_pb11mp.c
  45. 3 0
      arch/arm/mach-realview/realview_pba8.c
  46. 3 0
      arch/arm/mach-realview/realview_pbx.c
  47. 3 0
      arch/arm/mach-sa1100/assabet.c
  48. 3 0
      arch/arm/mach-sa1100/badge4.c
  49. 0 4
      arch/arm/mach-sa1100/include/mach/memory.h
  50. 3 0
      arch/arm/mach-sa1100/jornada720.c
  51. 1 0
      arch/arm/mach-shark/core.c
  52. 0 2
      arch/arm/mach-shark/include/mach/memory.h
  53. 32 3
      arch/arm/mm/dma-mapping.c
  54. 19 7
      arch/arm/mm/init.c
  55. 6 0
      arch/arm/mm/mm.h

+ 83 - 110
arch/arm/common/dmabounce.c

@@ -79,6 +79,8 @@ struct dmabounce_device_info {
 	struct dmabounce_pool	large;
 
 	rwlock_t lock;
+
+	int (*needs_bounce)(struct device *, dma_addr_t, size_t);
 };
 
 #ifdef STATS
@@ -210,114 +212,91 @@ static struct safe_buffer *find_safe_buffer_dev(struct device *dev,
 	if (!dev || !dev->archdata.dmabounce)
 		return NULL;
 	if (dma_mapping_error(dev, dma_addr)) {
-		if (dev)
-			dev_err(dev, "Trying to %s invalid mapping\n", where);
-		else
-			pr_err("unknown device: Trying to %s invalid mapping\n", where);
+		dev_err(dev, "Trying to %s invalid mapping\n", where);
 		return NULL;
 	}
 	return find_safe_buffer(dev->archdata.dmabounce, dma_addr);
 }
 
-static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
-		enum dma_data_direction dir)
+static int needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
 {
-	struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
-	dma_addr_t dma_addr;
-	int needs_bounce = 0;
-
-	if (device_info)
-		DO_STATS ( device_info->map_op_count++ );
-
-	dma_addr = virt_to_dma(dev, ptr);
+	if (!dev || !dev->archdata.dmabounce)
+		return 0;
 
 	if (dev->dma_mask) {
-		unsigned long mask = *dev->dma_mask;
-		unsigned long limit;
+		unsigned long limit, mask = *dev->dma_mask;
 
 		limit = (mask + 1) & ~mask;
 		if (limit && size > limit) {
 			dev_err(dev, "DMA mapping too big (requested %#x "
 				"mask %#Lx)\n", size, *dev->dma_mask);
-			return ~0;
+			return -E2BIG;
 		}
 
-		/*
-		 * Figure out if we need to bounce from the DMA mask.
-		 */
-		needs_bounce = (dma_addr | (dma_addr + size - 1)) & ~mask;
+		/* Figure out if we need to bounce from the DMA mask. */
+		if ((dma_addr | (dma_addr + size - 1)) & ~mask)
+			return 1;
 	}
 
-	if (device_info && (needs_bounce || dma_needs_bounce(dev, dma_addr, size))) {
-		struct safe_buffer *buf;
+	return !!dev->archdata.dmabounce->needs_bounce(dev, dma_addr, size);
+}
 
-		buf = alloc_safe_buffer(device_info, ptr, size, dir);
-		if (buf == 0) {
-			dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
-			       __func__, ptr);
-			return ~0;
-		}
+static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
+		enum dma_data_direction dir)
+{
+	struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
+	struct safe_buffer *buf;
 
-		dev_dbg(dev,
-			"%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
-			__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
-			buf->safe, buf->safe_dma_addr);
+	if (device_info)
+		DO_STATS ( device_info->map_op_count++ );
 
-		if ((dir == DMA_TO_DEVICE) ||
-		    (dir == DMA_BIDIRECTIONAL)) {
-			dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
-				__func__, ptr, buf->safe, size);
-			memcpy(buf->safe, ptr, size);
-		}
-		ptr = buf->safe;
+	buf = alloc_safe_buffer(device_info, ptr, size, dir);
+	if (buf == NULL) {
+		dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
+		       __func__, ptr);
+		return ~0;
+	}
 
-		dma_addr = buf->safe_dma_addr;
-	} else {
-		/*
-		 * We don't need to sync the DMA buffer since
-		 * it was allocated via the coherent allocators.
-		 */
-		__dma_single_cpu_to_dev(ptr, size, dir);
+	dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
+		__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
+		buf->safe, buf->safe_dma_addr);
+
+	if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) {
+		dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
+			__func__, ptr, buf->safe, size);
+		memcpy(buf->safe, ptr, size);
 	}
 
-	return dma_addr;
+	return buf->safe_dma_addr;
 }
 
-static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
+static inline void unmap_single(struct device *dev, struct safe_buffer *buf,
 		size_t size, enum dma_data_direction dir)
 {
-	struct safe_buffer *buf = find_safe_buffer_dev(dev, dma_addr, "unmap");
-
-	if (buf) {
-		BUG_ON(buf->size != size);
-		BUG_ON(buf->direction != dir);
+	BUG_ON(buf->size != size);
+	BUG_ON(buf->direction != dir);
 
-		dev_dbg(dev,
-			"%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
-			__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
-			buf->safe, buf->safe_dma_addr);
+	dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
+		__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
+		buf->safe, buf->safe_dma_addr);
 
-		DO_STATS(dev->archdata.dmabounce->bounce_count++);
+	DO_STATS(dev->archdata.dmabounce->bounce_count++);
 
-		if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
-			void *ptr = buf->ptr;
+	if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
+		void *ptr = buf->ptr;
 
-			dev_dbg(dev,
-				"%s: copy back safe %p to unsafe %p size %d\n",
-				__func__, buf->safe, ptr, size);
-			memcpy(ptr, buf->safe, size);
+		dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n",
+			__func__, buf->safe, ptr, size);
+		memcpy(ptr, buf->safe, size);
 
-			/*
-			 * Since we may have written to a page cache page,
-			 * we need to ensure that the data will be coherent
-			 * with user mappings.
-			 */
-			__cpuc_flush_dcache_area(ptr, size);
-		}
-		free_safe_buffer(dev->archdata.dmabounce, buf);
-	} else {
-		__dma_single_dev_to_cpu(dma_to_virt(dev, dma_addr), size, dir);
+		/*
+		 * Since we may have written to a page cache page,
+		 * we need to ensure that the data will be coherent
+		 * with user mappings.
+		 */
+		__cpuc_flush_dcache_area(ptr, size);
 	}
+	free_safe_buffer(dev->archdata.dmabounce, buf);
 }
 
 /* ************************************************** */
@@ -328,45 +307,28 @@ static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
  * substitute the safe buffer for the unsafe one.
  * (basically move the buffer from an unsafe area to a safe one)
  */
-dma_addr_t __dma_map_single(struct device *dev, void *ptr, size_t size,
-		enum dma_data_direction dir)
-{
-	dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
-		__func__, ptr, size, dir);
-
-	BUG_ON(!valid_dma_direction(dir));
-
-	return map_single(dev, ptr, size, dir);
-}
-EXPORT_SYMBOL(__dma_map_single);
-
-/*
- * see if a mapped address was really a "safe" buffer and if so, copy
- * the data from the safe buffer back to the unsafe buffer and free up
- * the safe buffer.  (basically return things back to the way they
- * should be)
- */
-void __dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
-		enum dma_data_direction dir)
-{
-	dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
-		__func__, (void *) dma_addr, size, dir);
-
-	unmap_single(dev, dma_addr, size, dir);
-}
-EXPORT_SYMBOL(__dma_unmap_single);
-
 dma_addr_t __dma_map_page(struct device *dev, struct page *page,
 		unsigned long offset, size_t size, enum dma_data_direction dir)
 {
+	dma_addr_t dma_addr;
+	int ret;
+
 	dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n",
 		__func__, page, offset, size, dir);
 
-	BUG_ON(!valid_dma_direction(dir));
+	dma_addr = pfn_to_dma(dev, page_to_pfn(page)) + offset;
+
+	ret = needs_bounce(dev, dma_addr, size);
+	if (ret < 0)
+		return ~0;
+
+	if (ret == 0) {
+		__dma_page_cpu_to_dev(page, offset, size, dir);
+		return dma_addr;
+	}
 
 	if (PageHighMem(page)) {
-		dev_err(dev, "DMA buffer bouncing of HIGHMEM pages "
-			     "is not supported\n");
+		dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n");
 		return ~0;
 	}
 
@@ -383,10 +345,19 @@ EXPORT_SYMBOL(__dma_map_page);
 void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
 		enum dma_data_direction dir)
 {
-	dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
-		__func__, (void *) dma_addr, size, dir);
+	struct safe_buffer *buf;
+
+	dev_dbg(dev, "%s(dma=%#x,size=%d,dir=%x)\n",
+		__func__, dma_addr, size, dir);
+
+	buf = find_safe_buffer_dev(dev, dma_addr, __func__);
+	if (!buf) {
+		__dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, dma_addr)),
+			dma_addr & ~PAGE_MASK, size, dir);
+		return;
+	}
 
-	unmap_single(dev, dma_addr, size, dir);
+	unmap_single(dev, buf, size, dir);
 }
 EXPORT_SYMBOL(__dma_unmap_page);
 
@@ -461,7 +432,8 @@ static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
 }
 
 int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
-		unsigned long large_buffer_size)
+		unsigned long large_buffer_size,
+		int (*needs_bounce_fn)(struct device *, dma_addr_t, size_t))
 {
 	struct dmabounce_device_info *device_info;
 	int ret;
@@ -497,6 +469,7 @@ int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
 	device_info->dev = dev;
 	INIT_LIST_HEAD(&device_info->safe_buffers);
 	rwlock_init(&device_info->lock);
+	device_info->needs_bounce = needs_bounce_fn;
 
 #ifdef STATS
 	device_info->total_allocs = 0;

+ 7 - 9
arch/arm/common/it8152.c

@@ -243,6 +243,12 @@ static struct resource it8152_mem = {
  * ITE8152 chip can address up to 64MByte, so all the devices
  * connected to ITE8152 (PCI and USB) should have limited DMA window
  */
+static int it8152_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
+{
+	dev_dbg(dev, "%s: dma_addr %08x, size %08x\n",
+		__func__, dma_addr, size);
+	return (dma_addr + size - PHYS_OFFSET) >= SZ_64M;
+}
 
 /*
  * Setup DMA mask to 64MB on devices connected to ITE8152. Ignore all
@@ -254,7 +260,7 @@ static int it8152_pci_platform_notify(struct device *dev)
 		if (dev->dma_mask)
 			*dev->dma_mask = (SZ_64M - 1) | PHYS_OFFSET;
 		dev->coherent_dma_mask = (SZ_64M - 1) | PHYS_OFFSET;
-		dmabounce_register_dev(dev, 2048, 4096);
+		dmabounce_register_dev(dev, 2048, 4096, it8152_needs_bounce);
 	}
 	return 0;
 }
@@ -267,14 +273,6 @@ static int it8152_pci_platform_notify_remove(struct device *dev)
 	return 0;
 }
 
-int dma_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
-{
-	dev_dbg(dev, "%s: dma_addr %08x, size %08x\n",
-		__func__, dma_addr, size);
-	return (dev->bus == &pci_bus_type) &&
-		((dma_addr + size - PHYS_OFFSET) >= SZ_64M);
-}
-
 int dma_set_coherent_mask(struct device *dev, u64 mask)
 {
 	if (mask >= PHYS_OFFSET + SZ_64M - 1)

+ 31 - 29
arch/arm/common/sa1111.c

@@ -579,7 +579,36 @@ sa1111_configure_smc(struct sa1111 *sachip, int sdram, unsigned int drac,
 
 	sachip->dev->coherent_dma_mask &= sa1111_dma_mask[drac >> 2];
 }
+#endif
 
+#ifdef CONFIG_DMABOUNCE
+/*
+ * According to the "Intel StrongARM SA-1111 Microprocessor Companion
+ * Chip Specification Update" (June 2000), erratum #7, there is a
+ * significant bug in the SA1111 SDRAM shared memory controller.  If
+ * an access to a region of memory above 1MB relative to the bank base,
+ * it is important that address bit 10 _NOT_ be asserted. Depending
+ * on the configuration of the RAM, bit 10 may correspond to one
+ * of several different (processor-relative) address bits.
+ *
+ * This routine only identifies whether or not a given DMA address
+ * is susceptible to the bug.
+ *
+ * This should only get called for sa1111_device types due to the
+ * way we configure our device dma_masks.
+ */
+static int sa1111_needs_bounce(struct device *dev, dma_addr_t addr, size_t size)
+{
+	/*
+	 * Section 4.6 of the "Intel StrongARM SA-1111 Development Module
+	 * User's Guide" mentions that jumpers R51 and R52 control the
+	 * target of SA-1111 DMA (either SDRAM bank 0 on Assabet, or
+	 * SDRAM bank 1 on Neponset). The default configuration selects
+	 * Assabet, so any address in bank 1 is necessarily invalid.
+	 */
+	return (machine_is_assabet() || machine_is_pfs168()) &&
+		(addr >= 0xc8000000 || (addr + size) >= 0xc8000000);
+}
 #endif
 
 static void sa1111_dev_release(struct device *_dev)
@@ -644,7 +673,8 @@ sa1111_init_one_child(struct sa1111 *sachip, struct resource *parent,
 		dev->dev.dma_mask = &dev->dma_mask;
 
 		if (dev->dma_mask != 0xffffffffUL) {
-			ret = dmabounce_register_dev(&dev->dev, 1024, 4096);
+			ret = dmabounce_register_dev(&dev->dev, 1024, 4096,
+					sa1111_needs_bounce);
 			if (ret) {
 				dev_err(&dev->dev, "SA1111: Failed to register"
 					" with dmabounce\n");
@@ -818,34 +848,6 @@ static void __sa1111_remove(struct sa1111 *sachip)
 	kfree(sachip);
 }
 
-/*
- * According to the "Intel StrongARM SA-1111 Microprocessor Companion
- * Chip Specification Update" (June 2000), erratum #7, there is a
- * significant bug in the SA1111 SDRAM shared memory controller.  If
- * an access to a region of memory above 1MB relative to the bank base,
- * it is important that address bit 10 _NOT_ be asserted. Depending
- * on the configuration of the RAM, bit 10 may correspond to one
- * of several different (processor-relative) address bits.
- *
- * This routine only identifies whether or not a given DMA address
- * is susceptible to the bug.
- *
- * This should only get called for sa1111_device types due to the
- * way we configure our device dma_masks.
- */
-int dma_needs_bounce(struct device *dev, dma_addr_t addr, size_t size)
-{
-	/*
-	 * Section 4.6 of the "Intel StrongARM SA-1111 Development Module
-	 * User's Guide" mentions that jumpers R51 and R52 control the
-	 * target of SA-1111 DMA (either SDRAM bank 0 on Assabet, or
-	 * SDRAM bank 1 on Neponset). The default configuration selects
-	 * Assabet, so any address in bank 1 is necessarily invalid.
-	 */
-	return ((machine_is_assabet() || machine_is_pfs168()) &&
-		(addr >= 0xc8000000 || (addr + size) >= 0xc8000000));
-}
-
 struct sa1111_save_data {
 	unsigned int	skcr;
 	unsigned int	skpcr;

+ 13 - 75
arch/arm/include/asm/dma-mapping.h

@@ -115,39 +115,8 @@ static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
 		___dma_page_dev_to_cpu(page, off, size, dir);
 }
 
-/*
- * Return whether the given device DMA address mask can be supported
- * properly.  For example, if your device can only drive the low 24-bits
- * during bus mastering, then you would pass 0x00ffffff as the mask
- * to this function.
- *
- * FIXME: This should really be a platform specific issue - we should
- * return false if GFP_DMA allocations may not satisfy the supplied 'mask'.
- */
-static inline int dma_supported(struct device *dev, u64 mask)
-{
-	if (mask < ISA_DMA_THRESHOLD)
-		return 0;
-	return 1;
-}
-
-static inline int dma_set_mask(struct device *dev, u64 dma_mask)
-{
-#ifdef CONFIG_DMABOUNCE
-	if (dev->archdata.dmabounce) {
-		if (dma_mask >= ISA_DMA_THRESHOLD)
-			return 0;
-		else
-			return -EIO;
-	}
-#endif
-	if (!dev->dma_mask || !dma_supported(dev, dma_mask))
-		return -EIO;
-
-	*dev->dma_mask = dma_mask;
-
-	return 0;
-}
+extern int dma_supported(struct device *, u64);
+extern int dma_set_mask(struct device *, u64);
 
 /*
  * DMA errors are defined by all-bits-set in the DMA address.
@@ -256,14 +225,14 @@ int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
  * @dev: valid struct device pointer
  * @small_buf_size: size of buffers to use with small buffer pool
  * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
+ * @needs_bounce_fn: called to determine whether buffer needs bouncing
  *
  * This function should be called by low-level platform code to register
  * a device as requireing DMA buffer bouncing. The function will allocate
  * appropriate DMA pools for the device.
- *
  */
 extern int dmabounce_register_dev(struct device *, unsigned long,
-		unsigned long);
+		unsigned long, int (*)(struct device *, dma_addr_t, size_t));
 
 /**
  * dmabounce_unregister_dev
@@ -277,31 +246,9 @@ extern int dmabounce_register_dev(struct device *, unsigned long,
  */
 extern void dmabounce_unregister_dev(struct device *);
 
-/**
- * dma_needs_bounce
- *
- * @dev: valid struct device pointer
- * @dma_handle: dma_handle of unbounced buffer
- * @size: size of region being mapped
- *
- * Platforms that utilize the dmabounce mechanism must implement
- * this function.
- *
- * The dmabounce routines call this function whenever a dma-mapping
- * is requested to determine whether a given buffer needs to be bounced
- * or not. The function must return 0 if the buffer is OK for
- * DMA access and 1 if the buffer needs to be bounced.
- *
- */
-extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
-
 /*
  * The DMA API, implemented by dmabounce.c.  See below for descriptions.
  */
-extern dma_addr_t __dma_map_single(struct device *, void *, size_t,
-		enum dma_data_direction);
-extern void __dma_unmap_single(struct device *, dma_addr_t, size_t,
-		enum dma_data_direction);
 extern dma_addr_t __dma_map_page(struct device *, struct page *,
 		unsigned long, size_t, enum dma_data_direction);
 extern void __dma_unmap_page(struct device *, dma_addr_t, size_t,
@@ -328,13 +275,6 @@ static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
 }
 
 
-static inline dma_addr_t __dma_map_single(struct device *dev, void *cpu_addr,
-		size_t size, enum dma_data_direction dir)
-{
-	__dma_single_cpu_to_dev(cpu_addr, size, dir);
-	return virt_to_dma(dev, cpu_addr);
-}
-
 static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page,
 	     unsigned long offset, size_t size, enum dma_data_direction dir)
 {
@@ -342,12 +282,6 @@ static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page,
 	return pfn_to_dma(dev, page_to_pfn(page)) + offset;
 }
 
-static inline void __dma_unmap_single(struct device *dev, dma_addr_t handle,
-		size_t size, enum dma_data_direction dir)
-{
-	__dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir);
-}
-
 static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
 		size_t size, enum dma_data_direction dir)
 {
@@ -373,14 +307,18 @@ static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
 static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
 		size_t size, enum dma_data_direction dir)
 {
+	unsigned long offset;
+	struct page *page;
 	dma_addr_t addr;
 
+	BUG_ON(!virt_addr_valid(cpu_addr));
+	BUG_ON(!virt_addr_valid(cpu_addr + size - 1));
 	BUG_ON(!valid_dma_direction(dir));
 
-	addr = __dma_map_single(dev, cpu_addr, size, dir);
-	debug_dma_map_page(dev, virt_to_page(cpu_addr),
-			(unsigned long)cpu_addr & ~PAGE_MASK, size,
-			dir, addr, true);
+	page = virt_to_page(cpu_addr);
+	offset = (unsigned long)cpu_addr & ~PAGE_MASK;
+	addr = __dma_map_page(dev, page, offset, size, dir);
+	debug_dma_map_page(dev, page, offset, size, dir, addr, true);
 
 	return addr;
 }
@@ -430,7 +368,7 @@ static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
 		size_t size, enum dma_data_direction dir)
 {
 	debug_dma_unmap_page(dev, handle, size, dir, true);
-	__dma_unmap_single(dev, handle, size, dir);
+	__dma_unmap_page(dev, handle, size, dir);
 }
 
 /**

+ 6 - 5
arch/arm/include/asm/dma.h

@@ -1,15 +1,16 @@
 #ifndef __ASM_ARM_DMA_H
 #define __ASM_ARM_DMA_H
 
-#include <asm/memory.h>
-
 /*
  * This is the maximum virtual address which can be DMA'd from.
  */
-#ifndef ARM_DMA_ZONE_SIZE
-#define MAX_DMA_ADDRESS	0xffffffff
+#ifndef CONFIG_ZONE_DMA
+#define MAX_DMA_ADDRESS	0xffffffffUL
 #else
-#define MAX_DMA_ADDRESS	(PAGE_OFFSET + ARM_DMA_ZONE_SIZE)
+#define MAX_DMA_ADDRESS	({ \
+	extern unsigned long arm_dma_zone_size; \
+	arm_dma_zone_size ? \
+		(PAGE_OFFSET + arm_dma_zone_size) : 0xffffffffUL; })
 #endif
 
 #ifdef CONFIG_ISA_DMA_API

+ 4 - 0
arch/arm/include/asm/mach/arch.h

@@ -23,6 +23,10 @@ struct machine_desc {
 
 	unsigned int		nr_irqs;	/* number of IRQs */
 
+#ifdef CONFIG_ZONE_DMA
+	unsigned long		dma_zone_size;	/* size of DMA-able area */
+#endif
+
 	unsigned int		video_start;	/* start of video RAM	*/
 	unsigned int		video_end;	/* end of video RAM	*/
 

+ 0 - 12
arch/arm/include/asm/memory.h

@@ -203,18 +203,6 @@ static inline unsigned long __phys_to_virt(unsigned long x)
 #define PHYS_OFFSET	PLAT_PHYS_OFFSET
 #endif
 
-/*
- * The DMA mask corresponding to the maximum bus address allocatable
- * using GFP_DMA.  The default here places no restriction on DMA
- * allocations.  This must be the smallest DMA mask in the system,
- * so a successful GFP_DMA allocation will always satisfy this.
- */
-#ifndef ARM_DMA_ZONE_SIZE
-#define ISA_DMA_THRESHOLD	(0xffffffffULL)
-#else
-#define ISA_DMA_THRESHOLD	(PHYS_OFFSET + ARM_DMA_ZONE_SIZE - 1)
-#endif
-
 /*
  * PFNs are used to describe any physical page; this means
  * PFN 0 == physical address 0.

+ 6 - 0
arch/arm/kernel/setup.c

@@ -918,6 +918,12 @@ void __init setup_arch(char **cmdline_p)
 	cpu_init();
 	tcm_init();
 
+#ifdef CONFIG_ZONE_DMA
+	if (mdesc->dma_zone_size) {
+		extern unsigned long arm_dma_zone_size;
+		arm_dma_zone_size = mdesc->dma_zone_size;
+	}
+#endif
 #ifdef CONFIG_MULTI_IRQ_HANDLER
 	handle_arch_irq = mdesc->handle_irq;
 #endif

+ 1 - 0
arch/arm/mach-davinci/board-da830-evm.c

@@ -681,4 +681,5 @@ MACHINE_START(DAVINCI_DA830_EVM, "DaVinci DA830/OMAP-L137/AM17x EVM")
 	.init_irq	= cp_intc_init,
 	.timer		= &davinci_timer,
 	.init_machine	= da830_evm_init,
+	.dma_zone_size	= SZ_128M,
 MACHINE_END

+ 1 - 0
arch/arm/mach-davinci/board-da850-evm.c

@@ -1261,4 +1261,5 @@ MACHINE_START(DAVINCI_DA850_EVM, "DaVinci DA850/OMAP-L138/AM18x EVM")
 	.init_irq	= cp_intc_init,
 	.timer		= &davinci_timer,
 	.init_machine	= da850_evm_init,
+	.dma_zone_size	= SZ_128M,
 MACHINE_END

+ 1 - 0
arch/arm/mach-davinci/board-dm355-evm.c

@@ -356,4 +356,5 @@ MACHINE_START(DAVINCI_DM355_EVM, "DaVinci DM355 EVM")
 	.init_irq     = davinci_irq_init,
 	.timer	      = &davinci_timer,
 	.init_machine = dm355_evm_init,
+	.dma_zone_size	= SZ_128M,
 MACHINE_END

+ 1 - 0
arch/arm/mach-davinci/board-dm355-leopard.c

@@ -275,4 +275,5 @@ MACHINE_START(DM355_LEOPARD, "DaVinci DM355 leopard")
 	.init_irq     = davinci_irq_init,
 	.timer	      = &davinci_timer,
 	.init_machine = dm355_leopard_init,
+	.dma_zone_size	= SZ_128M,
 MACHINE_END

+ 1 - 0
arch/arm/mach-davinci/board-dm365-evm.c

@@ -617,5 +617,6 @@ MACHINE_START(DAVINCI_DM365_EVM, "DaVinci DM365 EVM")
 	.init_irq	= davinci_irq_init,
 	.timer		= &davinci_timer,
 	.init_machine	= dm365_evm_init,
+	.dma_zone_size	= SZ_128M,
 MACHINE_END
 

+ 1 - 0
arch/arm/mach-davinci/board-dm644x-evm.c

@@ -717,4 +717,5 @@ MACHINE_START(DAVINCI_EVM, "DaVinci DM644x EVM")
 	.init_irq     = davinci_irq_init,
 	.timer	      = &davinci_timer,
 	.init_machine = davinci_evm_init,
+	.dma_zone_size	= SZ_128M,
 MACHINE_END

+ 2 - 0
arch/arm/mach-davinci/board-dm646x-evm.c

@@ -802,6 +802,7 @@ MACHINE_START(DAVINCI_DM6467_EVM, "DaVinci DM646x EVM")
 	.init_irq     = davinci_irq_init,
 	.timer        = &davinci_timer,
 	.init_machine = evm_init,
+	.dma_zone_size	= SZ_128M,
 MACHINE_END
 
 MACHINE_START(DAVINCI_DM6467TEVM, "DaVinci DM6467T EVM")
@@ -810,5 +811,6 @@ MACHINE_START(DAVINCI_DM6467TEVM, "DaVinci DM6467T EVM")
 	.init_irq     = davinci_irq_init,
 	.timer        = &davinci_timer,
 	.init_machine = evm_init,
+	.dma_zone_size	= SZ_128M,
 MACHINE_END
 

+ 1 - 0
arch/arm/mach-davinci/board-mityomapl138.c

@@ -570,4 +570,5 @@ MACHINE_START(MITYOMAPL138, "MityDSP-L138/MityARM-1808")
 	.init_irq	= cp_intc_init,
 	.timer		= &davinci_timer,
 	.init_machine	= mityomapl138_init,
+	.dma_zone_size	= SZ_128M,
 MACHINE_END

+ 1 - 0
arch/arm/mach-davinci/board-neuros-osd2.c

@@ -277,4 +277,5 @@ MACHINE_START(NEUROS_OSD2, "Neuros OSD2")
 	.init_irq	= davinci_irq_init,
 	.timer		= &davinci_timer,
 	.init_machine = davinci_ntosd2_init,
+	.dma_zone_size	= SZ_128M,
 MACHINE_END

+ 1 - 0
arch/arm/mach-davinci/board-omapl138-hawk.c

@@ -343,4 +343,5 @@ MACHINE_START(OMAPL138_HAWKBOARD, "AM18x/OMAP-L138 Hawkboard")
 	.init_irq	= cp_intc_init,
 	.timer		= &davinci_timer,
 	.init_machine	= omapl138_hawk_init,
+	.dma_zone_size	= SZ_128M,
 MACHINE_END

+ 1 - 0
arch/arm/mach-davinci/board-sffsdr.c

@@ -156,4 +156,5 @@ MACHINE_START(SFFSDR, "Lyrtech SFFSDR")
 	.init_irq     = davinci_irq_init,
 	.timer	      = &davinci_timer,
 	.init_machine = davinci_sffsdr_init,
+	.dma_zone_size	= SZ_128M,
 MACHINE_END

+ 1 - 0
arch/arm/mach-davinci/board-tnetv107x-evm.c

@@ -282,4 +282,5 @@ MACHINE_START(TNETV107X, "TNETV107X EVM")
 	.init_irq	= cp_intc_init,
 	.timer		= &davinci_timer,
 	.init_machine	= tnetv107x_evm_board_init,
+	.dma_zone_size	= SZ_128M,
 MACHINE_END

+ 0 - 7
arch/arm/mach-davinci/include/mach/memory.h

@@ -41,11 +41,4 @@
  */
 #define CONSISTENT_DMA_SIZE (14<<20)
 
-/*
- * Restrict DMA-able region to workaround silicon bug.  The bug
- * restricts buffers available for DMA to video hardware to be
- * below 128M
- */
-#define ARM_DMA_ZONE_SIZE	SZ_128M
-
 #endif /* __ASM_ARCH_MEMORY_H */

+ 1 - 0
arch/arm/mach-h720x/h7201-eval.c

@@ -33,4 +33,5 @@ MACHINE_START(H7201, "Hynix GMS30C7201")
 	.map_io		= h720x_map_io,
 	.init_irq	= h720x_init_irq,
 	.timer		= &h7201_timer,
+	.dma_zone_size	= SZ_256M,
 MACHINE_END

+ 1 - 0
arch/arm/mach-h720x/h7202-eval.c

@@ -76,4 +76,5 @@ MACHINE_START(H7202, "Hynix HMS30C7202")
 	.init_irq	= h7202_init_irq,
 	.timer		= &h7202_timer,
 	.init_machine	= init_eval_h7202,
+	.dma_zone_size	= SZ_256M,
 MACHINE_END

+ 0 - 7
arch/arm/mach-h720x/include/mach/memory.h

@@ -8,11 +8,4 @@
 #define __ASM_ARCH_MEMORY_H
 
 #define PLAT_PHYS_OFFSET	UL(0x40000000)
-/*
- * This is the maximum DMA address that can be DMAd to.
- * There should not be more than (0xd0000000 - 0xc0000000)
- * bytes of RAM.
- */
-#define ARM_DMA_ZONE_SIZE	SZ_256M
-
 #endif

+ 6 - 0
arch/arm/mach-ixp4xx/avila-setup.c

@@ -169,6 +169,9 @@ MACHINE_START(AVILA, "Gateworks Avila Network Platform")
 	.timer		= &ixp4xx_timer,
 	.boot_params	= 0x0100,
 	.init_machine	= avila_init,
+#if defined(CONFIG_PCI)
+	.dma_zone_size	= SZ_64M,
+#endif
 MACHINE_END
 
  /*
@@ -184,6 +187,9 @@ MACHINE_START(LOFT, "Giant Shoulder Inc Loft board")
 	.timer		= &ixp4xx_timer,
 	.boot_params	= 0x0100,
 	.init_machine	= avila_init,
+#if defined(CONFIG_PCI)
+	.dma_zone_size	= SZ_64M,
+#endif
 MACHINE_END
 #endif
 

+ 6 - 6
arch/arm/mach-ixp4xx/common-pci.c

@@ -316,6 +316,11 @@ static int abort_handler(unsigned long addr, unsigned int fsr, struct pt_regs *r
 }
 
 
+static int ixp4xx_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
+{
+	return (dma_addr + size) >= SZ_64M;
+}
+
 /*
  * Setup DMA mask to 64MB on PCI devices. Ignore all other devices.
  */
@@ -324,7 +329,7 @@ static int ixp4xx_pci_platform_notify(struct device *dev)
 	if(dev->bus == &pci_bus_type) {
 		*dev->dma_mask =  SZ_64M - 1;
 		dev->coherent_dma_mask = SZ_64M - 1;
-		dmabounce_register_dev(dev, 2048, 4096);
+		dmabounce_register_dev(dev, 2048, 4096, ixp4xx_needs_bounce);
 	}
 	return 0;
 }
@@ -337,11 +342,6 @@ static int ixp4xx_pci_platform_notify_remove(struct device *dev)
 	return 0;
 }
 
-int dma_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
-{
-	return (dev->bus == &pci_bus_type ) && ((dma_addr + size) >= SZ_64M);
-}
-
 void __init ixp4xx_pci_preinit(void)
 {
 	unsigned long cpuid = read_cpuid_id();

+ 3 - 0
arch/arm/mach-ixp4xx/coyote-setup.c

@@ -114,6 +114,9 @@ MACHINE_START(ADI_COYOTE, "ADI Engineering Coyote")
 	.timer		= &ixp4xx_timer,
 	.boot_params	= 0x0100,
 	.init_machine	= coyote_init,
+#if defined(CONFIG_PCI)
+	.dma_zone_size	= SZ_64M,
+#endif
 MACHINE_END
 #endif
 

+ 3 - 0
arch/arm/mach-ixp4xx/dsmg600-setup.c

@@ -284,4 +284,7 @@ MACHINE_START(DSMG600, "D-Link DSM-G600 RevA")
 	.init_irq	= ixp4xx_init_irq,
 	.timer          = &dsmg600_timer,
 	.init_machine	= dsmg600_init,
+#if defined(CONFIG_PCI)
+	.dma_zone_size	= SZ_64M,
+#endif
 MACHINE_END

+ 3 - 0
arch/arm/mach-ixp4xx/fsg-setup.c

@@ -275,5 +275,8 @@ MACHINE_START(FSG, "Freecom FSG-3")
 	.timer		= &ixp4xx_timer,
 	.boot_params	= 0x0100,
 	.init_machine	= fsg_init,
+#if defined(CONFIG_PCI)
+	.dma_zone_size	= SZ_64M,
+#endif
 MACHINE_END
 

+ 3 - 0
arch/arm/mach-ixp4xx/gateway7001-setup.c

@@ -101,5 +101,8 @@ MACHINE_START(GATEWAY7001, "Gateway 7001 AP")
 	.timer		= &ixp4xx_timer,
 	.boot_params	= 0x0100,
 	.init_machine	= gateway7001_init,
+#if defined(CONFIG_PCI)
+	.dma_zone_size	= SZ_64M,
+#endif
 MACHINE_END
 #endif

+ 3 - 0
arch/arm/mach-ixp4xx/goramo_mlr.c

@@ -501,4 +501,7 @@ MACHINE_START(GORAMO_MLR, "MultiLink")
 	.timer		= &ixp4xx_timer,
 	.boot_params	= 0x0100,
 	.init_machine	= gmlr_init,
+#if defined(CONFIG_PCI)
+	.dma_zone_size	= SZ_64M,
+#endif
 MACHINE_END

+ 3 - 0
arch/arm/mach-ixp4xx/gtwx5715-setup.c

@@ -169,6 +169,9 @@ MACHINE_START(GTWX5715, "Gemtek GTWX5715 (Linksys WRV54G)")
 	.timer		= &ixp4xx_timer,
 	.boot_params	= 0x0100,
 	.init_machine	= gtwx5715_init,
+#if defined(CONFIG_PCI)
+	.dma_zone_size	= SZ_64M,
+#endif
 MACHINE_END
 
 

+ 0 - 4
arch/arm/mach-ixp4xx/include/mach/memory.h

@@ -14,8 +14,4 @@
  */
 #define PLAT_PHYS_OFFSET	UL(0x00000000)
 
-#ifdef CONFIG_PCI
-#define ARM_DMA_ZONE_SIZE	SZ_64M
-#endif
-
 #endif

+ 12 - 0
arch/arm/mach-ixp4xx/ixdp425-setup.c

@@ -258,6 +258,9 @@ MACHINE_START(IXDP425, "Intel IXDP425 Development Platform")
 	.timer		= &ixp4xx_timer,
 	.boot_params	= 0x0100,
 	.init_machine	= ixdp425_init,
+#if defined(CONFIG_PCI)
+	.dma_zone_size	= SZ_64M,
+#endif
 MACHINE_END
 #endif
 
@@ -269,6 +272,9 @@ MACHINE_START(IXDP465, "Intel IXDP465 Development Platform")
 	.timer		= &ixp4xx_timer,
 	.boot_params	= 0x0100,
 	.init_machine	= ixdp425_init,
+#if defined(CONFIG_PCI)
+	.dma_zone_size	= SZ_64M,
+#endif
 MACHINE_END
 #endif
 
@@ -280,6 +286,9 @@ MACHINE_START(IXCDP1100, "Intel IXCDP1100 Development Platform")
 	.timer		= &ixp4xx_timer,
 	.boot_params	= 0x0100,
 	.init_machine	= ixdp425_init,
+#if defined(CONFIG_PCI)
+	.dma_zone_size	= SZ_64M,
+#endif
 MACHINE_END
 #endif
 
@@ -291,5 +300,8 @@ MACHINE_START(KIXRP435, "Intel KIXRP435 Reference Platform")
 	.timer		= &ixp4xx_timer,
 	.boot_params	= 0x0100,
 	.init_machine	= ixdp425_init,
+#if defined(CONFIG_PCI)
+	.dma_zone_size	= SZ_64M,
+#endif
 MACHINE_END
 #endif

+ 3 - 0
arch/arm/mach-ixp4xx/nas100d-setup.c

@@ -319,4 +319,7 @@ MACHINE_START(NAS100D, "Iomega NAS 100d")
 	.init_irq	= ixp4xx_init_irq,
 	.timer          = &ixp4xx_timer,
 	.init_machine	= nas100d_init,
+#if defined(CONFIG_PCI)
+	.dma_zone_size	= SZ_64M,
+#endif
 MACHINE_END

+ 3 - 0
arch/arm/mach-ixp4xx/nslu2-setup.c

@@ -305,4 +305,7 @@ MACHINE_START(NSLU2, "Linksys NSLU2")
 	.init_irq	= ixp4xx_init_irq,
 	.timer          = &nslu2_timer,
 	.init_machine	= nslu2_init,
+#if defined(CONFIG_PCI)
+	.dma_zone_size	= SZ_64M,
+#endif
 MACHINE_END

+ 3 - 0
arch/arm/mach-ixp4xx/vulcan-setup.c

@@ -241,4 +241,7 @@ MACHINE_START(ARCOM_VULCAN, "Arcom/Eurotech Vulcan")
 	.timer		= &ixp4xx_timer,
 	.boot_params	= 0x0100,
 	.init_machine	= vulcan_init,
+#if defined(CONFIG_PCI)
+	.dma_zone_size	= SZ_64M,
+#endif
 MACHINE_END

+ 3 - 0
arch/arm/mach-ixp4xx/wg302v2-setup.c

@@ -102,5 +102,8 @@ MACHINE_START(WG302V2, "Netgear WG302 v2 / WAG302 v2")
 	.timer		= &ixp4xx_timer,
 	.boot_params	= 0x0100,
 	.init_machine	= wg302v2_init,
+#if defined(CONFIG_PCI)
+	.dma_zone_size	= SZ_64M,
+#endif
 MACHINE_END
 #endif

+ 3 - 0
arch/arm/mach-pxa/cm-x2xx.c

@@ -518,4 +518,7 @@ MACHINE_START(ARMCORE, "Compulab CM-X2XX")
 	.init_irq	= cmx2xx_init_irq,
 	.timer		= &pxa_timer,
 	.init_machine	= cmx2xx_init,
+#ifdef CONFIG_PCI
+	.dma_zone_size	= SZ_64M,
+#endif
 MACHINE_END

+ 0 - 4
arch/arm/mach-pxa/include/mach/memory.h

@@ -17,8 +17,4 @@
  */
 #define PLAT_PHYS_OFFSET	UL(0xa0000000)
 
-#if defined(CONFIG_MACH_ARMCORE) && defined(CONFIG_PCI)
-#define ARM_DMA_ZONE_SIZE	SZ_64M
-#endif
-
 #endif

+ 0 - 4
arch/arm/mach-realview/include/mach/memory.h

@@ -29,10 +29,6 @@
 #define PLAT_PHYS_OFFSET		UL(0x00000000)
 #endif
 
-#ifdef CONFIG_ZONE_DMA
-#define ARM_DMA_ZONE_SIZE	SZ_256M
-#endif
-
 #ifdef CONFIG_SPARSEMEM
 
 /*

+ 3 - 0
arch/arm/mach-realview/realview_eb.c

@@ -470,4 +470,7 @@ MACHINE_START(REALVIEW_EB, "ARM-RealView EB")
 	.init_irq	= gic_init_irq,
 	.timer		= &realview_eb_timer,
 	.init_machine	= realview_eb_init,
+#ifdef CONFIG_ZONE_DMA
+	.dma_zone_size	= SZ_256M,
+#endif
 MACHINE_END

+ 3 - 0
arch/arm/mach-realview/realview_pb1176.c

@@ -365,4 +365,7 @@ MACHINE_START(REALVIEW_PB1176, "ARM-RealView PB1176")
 	.init_irq	= gic_init_irq,
 	.timer		= &realview_pb1176_timer,
 	.init_machine	= realview_pb1176_init,
+#ifdef CONFIG_ZONE_DMA
+	.dma_zone_size	= SZ_256M,
+#endif
 MACHINE_END

+ 3 - 0
arch/arm/mach-realview/realview_pb11mp.c

@@ -367,4 +367,7 @@ MACHINE_START(REALVIEW_PB11MP, "ARM-RealView PB11MPCore")
 	.init_irq	= gic_init_irq,
 	.timer		= &realview_pb11mp_timer,
 	.init_machine	= realview_pb11mp_init,
+#ifdef CONFIG_ZONE_DMA
+	.dma_zone_size	= SZ_256M,
+#endif
 MACHINE_END

+ 3 - 0
arch/arm/mach-realview/realview_pba8.c

@@ -317,4 +317,7 @@ MACHINE_START(REALVIEW_PBA8, "ARM-RealView PB-A8")
 	.init_irq	= gic_init_irq,
 	.timer		= &realview_pba8_timer,
 	.init_machine	= realview_pba8_init,
+#ifdef CONFIG_ZONE_DMA
+	.dma_zone_size	= SZ_256M,
+#endif
 MACHINE_END

+ 3 - 0
arch/arm/mach-realview/realview_pbx.c

@@ -400,4 +400,7 @@ MACHINE_START(REALVIEW_PBX, "ARM-RealView PBX")
 	.init_irq	= gic_init_irq,
 	.timer		= &realview_pbx_timer,
 	.init_machine	= realview_pbx_init,
+#ifdef CONFIG_ZONE_DMA
+	.dma_zone_size	= SZ_256M,
+#endif
 MACHINE_END

+ 3 - 0
arch/arm/mach-sa1100/assabet.c

@@ -453,4 +453,7 @@ MACHINE_START(ASSABET, "Intel-Assabet")
 	.init_irq	= sa1100_init_irq,
 	.timer		= &sa1100_timer,
 	.init_machine	= assabet_init,
+#ifdef CONFIG_SA1111
+	.dma_zone_size	= SZ_1M,
+#endif
 MACHINE_END

+ 3 - 0
arch/arm/mach-sa1100/badge4.c

@@ -306,4 +306,7 @@ MACHINE_START(BADGE4, "Hewlett-Packard Laboratories BadgePAD 4")
 	.map_io		= badge4_map_io,
 	.init_irq	= sa1100_init_irq,
 	.timer		= &sa1100_timer,
+#ifdef CONFIG_SA1111
+	.dma_zone_size	= SZ_1M,
+#endif
 MACHINE_END

+ 0 - 4
arch/arm/mach-sa1100/include/mach/memory.h

@@ -14,10 +14,6 @@
  */
 #define PLAT_PHYS_OFFSET	UL(0xc0000000)
 
-#ifdef CONFIG_SA1111
-#define ARM_DMA_ZONE_SIZE	SZ_1M
-#endif
-
 /*
  * Because of the wide memory address space between physical RAM banks on the
  * SA1100, it's much convenient to use Linux's SparseMEM support to implement

+ 3 - 0
arch/arm/mach-sa1100/jornada720.c

@@ -369,4 +369,7 @@ MACHINE_START(JORNADA720, "HP Jornada 720")
 	.init_irq	= sa1100_init_irq,
 	.timer		= &sa1100_timer,
 	.init_machine	= jornada720_mach_init,
+#ifdef CONFIG_SA1111
+	.dma_zone_size	= SZ_1M,
+#endif
 MACHINE_END

+ 1 - 0
arch/arm/mach-shark/core.c

@@ -156,4 +156,5 @@ MACHINE_START(SHARK, "Shark")
 	.map_io		= shark_map_io,
 	.init_irq	= shark_init_irq,
 	.timer		= &shark_timer,
+	.dma_zone_size	= SZ_4M,
 MACHINE_END

+ 0 - 2
arch/arm/mach-shark/include/mach/memory.h

@@ -17,8 +17,6 @@
  */
 #define PLAT_PHYS_OFFSET     UL(0x08000000)
 
-#define ARM_DMA_ZONE_SIZE	SZ_4M
-
 /*
  * Cache flushing area
  */

+ 32 - 3
arch/arm/mm/dma-mapping.c

@@ -25,9 +25,11 @@
 #include <asm/tlbflush.h>
 #include <asm/sizes.h>
 
+#include "mm.h"
+
 static u64 get_coherent_dma_mask(struct device *dev)
 {
-	u64 mask = ISA_DMA_THRESHOLD;
+	u64 mask = (u64)arm_dma_limit;
 
 	if (dev) {
 		mask = dev->coherent_dma_mask;
@@ -41,10 +43,10 @@ static u64 get_coherent_dma_mask(struct device *dev)
 			return 0;
 		}
 
-		if ((~mask) & ISA_DMA_THRESHOLD) {
+		if ((~mask) & (u64)arm_dma_limit) {
 			dev_warn(dev, "coherent DMA mask %#llx is smaller "
 				 "than system GFP_DMA mask %#llx\n",
-				 mask, (unsigned long long)ISA_DMA_THRESHOLD);
+				 mask, (u64)arm_dma_limit);
 			return 0;
 		}
 	}
@@ -657,6 +659,33 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
 }
 EXPORT_SYMBOL(dma_sync_sg_for_device);
 
+/*
+ * Return whether the given device DMA address mask can be supported
+ * properly.  For example, if your device can only drive the low 24-bits
+ * during bus mastering, then you would pass 0x00ffffff as the mask
+ * to this function.
+ */
+int dma_supported(struct device *dev, u64 mask)
+{
+	if (mask < (u64)arm_dma_limit)
+		return 0;
+	return 1;
+}
+EXPORT_SYMBOL(dma_supported);
+
+int dma_set_mask(struct device *dev, u64 dma_mask)
+{
+	if (!dev->dma_mask || !dma_supported(dev, dma_mask))
+		return -EIO;
+
+#ifndef CONFIG_DMABOUNCE
+	*dev->dma_mask = dma_mask;
+#endif
+
+	return 0;
+}
+EXPORT_SYMBOL(dma_set_mask);
+
 #define PREALLOC_DMA_DEBUG_ENTRIES	4096
 
 static int __init dma_debug_do_init(void)

+ 19 - 7
arch/arm/mm/init.c

@@ -212,6 +212,18 @@ static void __init arm_bootmem_init(unsigned long start_pfn,
 }
 
 #ifdef CONFIG_ZONE_DMA
+
+unsigned long arm_dma_zone_size __read_mostly;
+EXPORT_SYMBOL(arm_dma_zone_size);
+
+/*
+ * The DMA mask corresponding to the maximum bus address allocatable
+ * using GFP_DMA.  The default here places no restriction on DMA
+ * allocations.  This must be the smallest DMA mask in the system,
+ * so a successful GFP_DMA allocation will always satisfy this.
+ */
+u32 arm_dma_limit;
+
 static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
 	unsigned long dma_size)
 {
@@ -267,17 +279,17 @@ static void __init arm_bootmem_free(unsigned long min, unsigned long max_low,
 #endif
 	}
 
-#ifdef ARM_DMA_ZONE_SIZE
-#ifndef CONFIG_ZONE_DMA
-#error ARM_DMA_ZONE_SIZE set but no DMA zone to limit allocations
-#endif
-
+#ifdef CONFIG_ZONE_DMA
 	/*
 	 * Adjust the sizes according to any special requirements for
 	 * this machine type.
 	 */
-	arm_adjust_dma_zone(zone_size, zhole_size,
-		ARM_DMA_ZONE_SIZE >> PAGE_SHIFT);
+	if (arm_dma_zone_size) {
+		arm_adjust_dma_zone(zone_size, zhole_size,
+			arm_dma_zone_size >> PAGE_SHIFT);
+		arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
+	} else
+		arm_dma_limit = 0xffffffff;
 #endif
 
 	free_area_init_node(0, zone_size, min, zhole_size);

+ 6 - 0
arch/arm/mm/mm.h

@@ -23,5 +23,11 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page
 
 #endif
 
+#ifdef CONFIG_ZONE_DMA
+extern u32 arm_dma_limit;
+#else
+#define arm_dma_limit ((u32)~0)
+#endif
+
 void __init bootmem_init(void);
 void arm_mm_memblock_reserve(void);