dmabounce.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559
  1. /*
  2. * arch/arm/common/dmabounce.c
  3. *
  4. * Special dma_{map/unmap/dma_sync}_* routines for systems that have
  5. * limited DMA windows. These functions utilize bounce buffers to
  6. * copy data to/from buffers located outside the DMA region. This
  7. * only works for systems in which DMA memory is at the bottom of
  8. * RAM, the remainder of memory is at the top and the DMA memory
  9. * can be marked as ZONE_DMA. Anything beyond that such as discontiguous
  10. * DMA windows will require custom implementations that reserve memory
  11. * areas at early bootup.
  12. *
  13. * Original version by Brad Parker (brad@heeltoe.com)
  14. * Re-written by Christopher Hoover <ch@murgatroid.com>
  15. * Made generic by Deepak Saxena <dsaxena@plexity.net>
  16. *
  17. * Copyright (C) 2002 Hewlett Packard Company.
  18. * Copyright (C) 2004 MontaVista Software, Inc.
  19. *
  20. * This program is free software; you can redistribute it and/or
  21. * modify it under the terms of the GNU General Public License
  22. * version 2 as published by the Free Software Foundation.
  23. */
  24. #include <linux/module.h>
  25. #include <linux/init.h>
  26. #include <linux/slab.h>
  27. #include <linux/page-flags.h>
  28. #include <linux/device.h>
  29. #include <linux/dma-mapping.h>
  30. #include <linux/dmapool.h>
  31. #include <linux/list.h>
  32. #include <linux/scatterlist.h>
  33. #include <asm/cacheflush.h>
  34. #undef STATS
  35. #ifdef STATS
  36. #define DO_STATS(X) do { X ; } while (0)
  37. #else
  38. #define DO_STATS(X) do { } while (0)
  39. #endif
  40. /* ************************************************** */
  41. struct safe_buffer {
  42. struct list_head node;
  43. /* original request */
  44. void *ptr;
  45. size_t size;
  46. int direction;
  47. /* safe buffer info */
  48. struct dmabounce_pool *pool;
  49. void *safe;
  50. dma_addr_t safe_dma_addr;
  51. };
  52. struct dmabounce_pool {
  53. unsigned long size;
  54. struct dma_pool *pool;
  55. #ifdef STATS
  56. unsigned long allocs;
  57. #endif
  58. };
  59. struct dmabounce_device_info {
  60. struct device *dev;
  61. struct list_head safe_buffers;
  62. #ifdef STATS
  63. unsigned long total_allocs;
  64. unsigned long map_op_count;
  65. unsigned long bounce_count;
  66. int attr_res;
  67. #endif
  68. struct dmabounce_pool small;
  69. struct dmabounce_pool large;
  70. rwlock_t lock;
  71. };
  72. #ifdef STATS
  73. static ssize_t dmabounce_show(struct device *dev, struct device_attribute *attr,
  74. char *buf)
  75. {
  76. struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
  77. return sprintf(buf, "%lu %lu %lu %lu %lu %lu\n",
  78. device_info->small.allocs,
  79. device_info->large.allocs,
  80. device_info->total_allocs - device_info->small.allocs -
  81. device_info->large.allocs,
  82. device_info->total_allocs,
  83. device_info->map_op_count,
  84. device_info->bounce_count);
  85. }
  86. static DEVICE_ATTR(dmabounce_stats, 0400, dmabounce_show, NULL);
  87. #endif
  88. /* allocate a 'safe' buffer and keep track of it */
  89. static inline struct safe_buffer *
  90. alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr,
  91. size_t size, enum dma_data_direction dir)
  92. {
  93. struct safe_buffer *buf;
  94. struct dmabounce_pool *pool;
  95. struct device *dev = device_info->dev;
  96. unsigned long flags;
  97. dev_dbg(dev, "%s(ptr=%p, size=%d, dir=%d)\n",
  98. __func__, ptr, size, dir);
  99. if (size <= device_info->small.size) {
  100. pool = &device_info->small;
  101. } else if (size <= device_info->large.size) {
  102. pool = &device_info->large;
  103. } else {
  104. pool = NULL;
  105. }
  106. buf = kmalloc(sizeof(struct safe_buffer), GFP_ATOMIC);
  107. if (buf == NULL) {
  108. dev_warn(dev, "%s: kmalloc failed\n", __func__);
  109. return NULL;
  110. }
  111. buf->ptr = ptr;
  112. buf->size = size;
  113. buf->direction = dir;
  114. buf->pool = pool;
  115. if (pool) {
  116. buf->safe = dma_pool_alloc(pool->pool, GFP_ATOMIC,
  117. &buf->safe_dma_addr);
  118. } else {
  119. buf->safe = dma_alloc_coherent(dev, size, &buf->safe_dma_addr,
  120. GFP_ATOMIC);
  121. }
  122. if (buf->safe == NULL) {
  123. dev_warn(dev,
  124. "%s: could not alloc dma memory (size=%d)\n",
  125. __func__, size);
  126. kfree(buf);
  127. return NULL;
  128. }
  129. #ifdef STATS
  130. if (pool)
  131. pool->allocs++;
  132. device_info->total_allocs++;
  133. #endif
  134. write_lock_irqsave(&device_info->lock, flags);
  135. list_add(&buf->node, &device_info->safe_buffers);
  136. write_unlock_irqrestore(&device_info->lock, flags);
  137. return buf;
  138. }
  139. /* determine if a buffer is from our "safe" pool */
  140. static inline struct safe_buffer *
  141. find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_addr)
  142. {
  143. struct safe_buffer *b, *rb = NULL;
  144. unsigned long flags;
  145. read_lock_irqsave(&device_info->lock, flags);
  146. list_for_each_entry(b, &device_info->safe_buffers, node)
  147. if (b->safe_dma_addr == safe_dma_addr) {
  148. rb = b;
  149. break;
  150. }
  151. read_unlock_irqrestore(&device_info->lock, flags);
  152. return rb;
  153. }
  154. static inline void
  155. free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer *buf)
  156. {
  157. unsigned long flags;
  158. dev_dbg(device_info->dev, "%s(buf=%p)\n", __func__, buf);
  159. write_lock_irqsave(&device_info->lock, flags);
  160. list_del(&buf->node);
  161. write_unlock_irqrestore(&device_info->lock, flags);
  162. if (buf->pool)
  163. dma_pool_free(buf->pool->pool, buf->safe, buf->safe_dma_addr);
  164. else
  165. dma_free_coherent(device_info->dev, buf->size, buf->safe,
  166. buf->safe_dma_addr);
  167. kfree(buf);
  168. }
  169. /* ************************************************** */
  170. static struct safe_buffer *find_safe_buffer_dev(struct device *dev,
  171. dma_addr_t dma_addr, const char *where)
  172. {
  173. if (!dev || !dev->archdata.dmabounce)
  174. return NULL;
  175. if (dma_mapping_error(dev, dma_addr)) {
  176. if (dev)
  177. dev_err(dev, "Trying to %s invalid mapping\n", where);
  178. else
  179. pr_err("unknown device: Trying to %s invalid mapping\n", where);
  180. return NULL;
  181. }
  182. return find_safe_buffer(dev->archdata.dmabounce, dma_addr);
  183. }
  184. static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
  185. enum dma_data_direction dir)
  186. {
  187. struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
  188. dma_addr_t dma_addr;
  189. int needs_bounce = 0;
  190. if (device_info)
  191. DO_STATS ( device_info->map_op_count++ );
  192. dma_addr = virt_to_dma(dev, ptr);
  193. if (dev->dma_mask) {
  194. unsigned long mask = *dev->dma_mask;
  195. unsigned long limit;
  196. limit = (mask + 1) & ~mask;
  197. if (limit && size > limit) {
  198. dev_err(dev, "DMA mapping too big (requested %#x "
  199. "mask %#Lx)\n", size, *dev->dma_mask);
  200. return ~0;
  201. }
  202. /*
  203. * Figure out if we need to bounce from the DMA mask.
  204. */
  205. needs_bounce = (dma_addr | (dma_addr + size - 1)) & ~mask;
  206. }
  207. if (device_info && (needs_bounce || dma_needs_bounce(dev, dma_addr, size))) {
  208. struct safe_buffer *buf;
  209. buf = alloc_safe_buffer(device_info, ptr, size, dir);
  210. if (buf == 0) {
  211. dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
  212. __func__, ptr);
  213. return 0;
  214. }
  215. dev_dbg(dev,
  216. "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
  217. __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
  218. buf->safe, buf->safe_dma_addr);
  219. if ((dir == DMA_TO_DEVICE) ||
  220. (dir == DMA_BIDIRECTIONAL)) {
  221. dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
  222. __func__, ptr, buf->safe, size);
  223. memcpy(buf->safe, ptr, size);
  224. }
  225. ptr = buf->safe;
  226. dma_addr = buf->safe_dma_addr;
  227. } else {
  228. /*
  229. * We don't need to sync the DMA buffer since
  230. * it was allocated via the coherent allocators.
  231. */
  232. __dma_single_cpu_to_dev(ptr, size, dir);
  233. }
  234. return dma_addr;
  235. }
  236. static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
  237. size_t size, enum dma_data_direction dir)
  238. {
  239. struct safe_buffer *buf = find_safe_buffer_dev(dev, dma_addr, "unmap");
  240. if (buf) {
  241. BUG_ON(buf->size != size);
  242. BUG_ON(buf->direction != dir);
  243. dev_dbg(dev,
  244. "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
  245. __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
  246. buf->safe, buf->safe_dma_addr);
  247. DO_STATS(dev->archdata.dmabounce->bounce_count++);
  248. if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
  249. void *ptr = buf->ptr;
  250. dev_dbg(dev,
  251. "%s: copy back safe %p to unsafe %p size %d\n",
  252. __func__, buf->safe, ptr, size);
  253. memcpy(ptr, buf->safe, size);
  254. /*
  255. * Since we may have written to a page cache page,
  256. * we need to ensure that the data will be coherent
  257. * with user mappings.
  258. */
  259. __cpuc_flush_dcache_area(ptr, size);
  260. }
  261. free_safe_buffer(dev->archdata.dmabounce, buf);
  262. } else {
  263. __dma_single_dev_to_cpu(dma_to_virt(dev, dma_addr), size, dir);
  264. }
  265. }
  266. /* ************************************************** */
  267. /*
  268. * see if a buffer address is in an 'unsafe' range. if it is
  269. * allocate a 'safe' buffer and copy the unsafe buffer into it.
  270. * substitute the safe buffer for the unsafe one.
  271. * (basically move the buffer from an unsafe area to a safe one)
  272. */
  273. dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
  274. enum dma_data_direction dir)
  275. {
  276. dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
  277. __func__, ptr, size, dir);
  278. BUG_ON(!valid_dma_direction(dir));
  279. return map_single(dev, ptr, size, dir);
  280. }
  281. EXPORT_SYMBOL(dma_map_single);
  282. /*
  283. * see if a mapped address was really a "safe" buffer and if so, copy
  284. * the data from the safe buffer back to the unsafe buffer and free up
  285. * the safe buffer. (basically return things back to the way they
  286. * should be)
  287. */
  288. void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
  289. enum dma_data_direction dir)
  290. {
  291. dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
  292. __func__, (void *) dma_addr, size, dir);
  293. unmap_single(dev, dma_addr, size, dir);
  294. }
  295. EXPORT_SYMBOL(dma_unmap_single);
  296. dma_addr_t dma_map_page(struct device *dev, struct page *page,
  297. unsigned long offset, size_t size, enum dma_data_direction dir)
  298. {
  299. dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n",
  300. __func__, page, offset, size, dir);
  301. BUG_ON(!valid_dma_direction(dir));
  302. if (PageHighMem(page)) {
  303. dev_err(dev, "DMA buffer bouncing of HIGHMEM pages "
  304. "is not supported\n");
  305. return ~0;
  306. }
  307. return map_single(dev, page_address(page) + offset, size, dir);
  308. }
  309. EXPORT_SYMBOL(dma_map_page);
  310. /*
  311. * see if a mapped address was really a "safe" buffer and if so, copy
  312. * the data from the safe buffer back to the unsafe buffer and free up
  313. * the safe buffer. (basically return things back to the way they
  314. * should be)
  315. */
  316. void dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
  317. enum dma_data_direction dir)
  318. {
  319. dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
  320. __func__, (void *) dma_addr, size, dir);
  321. unmap_single(dev, dma_addr, size, dir);
  322. }
  323. EXPORT_SYMBOL(dma_unmap_page);
  324. int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
  325. unsigned long off, size_t sz, enum dma_data_direction dir)
  326. {
  327. struct safe_buffer *buf;
  328. dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
  329. __func__, addr, off, sz, dir);
  330. buf = find_safe_buffer_dev(dev, addr, __func__);
  331. if (!buf)
  332. return 1;
  333. BUG_ON(buf->direction != dir);
  334. dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
  335. __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
  336. buf->safe, buf->safe_dma_addr);
  337. DO_STATS(dev->archdata.dmabounce->bounce_count++);
  338. if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
  339. dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n",
  340. __func__, buf->safe + off, buf->ptr + off, sz);
  341. memcpy(buf->ptr + off, buf->safe + off, sz);
  342. }
  343. return 0;
  344. }
  345. EXPORT_SYMBOL(dmabounce_sync_for_cpu);
  346. int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
  347. unsigned long off, size_t sz, enum dma_data_direction dir)
  348. {
  349. struct safe_buffer *buf;
  350. dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
  351. __func__, addr, off, sz, dir);
  352. buf = find_safe_buffer_dev(dev, addr, __func__);
  353. if (!buf)
  354. return 1;
  355. BUG_ON(buf->direction != dir);
  356. dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
  357. __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
  358. buf->safe, buf->safe_dma_addr);
  359. DO_STATS(dev->archdata.dmabounce->bounce_count++);
  360. if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) {
  361. dev_dbg(dev, "%s: copy out unsafe %p to safe %p, size %d\n",
  362. __func__,buf->ptr + off, buf->safe + off, sz);
  363. memcpy(buf->safe + off, buf->ptr + off, sz);
  364. }
  365. return 0;
  366. }
  367. EXPORT_SYMBOL(dmabounce_sync_for_device);
  368. static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
  369. const char *name, unsigned long size)
  370. {
  371. pool->size = size;
  372. DO_STATS(pool->allocs = 0);
  373. pool->pool = dma_pool_create(name, dev, size,
  374. 0 /* byte alignment */,
  375. 0 /* no page-crossing issues */);
  376. return pool->pool ? 0 : -ENOMEM;
  377. }
  378. int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
  379. unsigned long large_buffer_size)
  380. {
  381. struct dmabounce_device_info *device_info;
  382. int ret;
  383. device_info = kmalloc(sizeof(struct dmabounce_device_info), GFP_ATOMIC);
  384. if (!device_info) {
  385. dev_err(dev,
  386. "Could not allocated dmabounce_device_info\n");
  387. return -ENOMEM;
  388. }
  389. ret = dmabounce_init_pool(&device_info->small, dev,
  390. "small_dmabounce_pool", small_buffer_size);
  391. if (ret) {
  392. dev_err(dev,
  393. "dmabounce: could not allocate DMA pool for %ld byte objects\n",
  394. small_buffer_size);
  395. goto err_free;
  396. }
  397. if (large_buffer_size) {
  398. ret = dmabounce_init_pool(&device_info->large, dev,
  399. "large_dmabounce_pool",
  400. large_buffer_size);
  401. if (ret) {
  402. dev_err(dev,
  403. "dmabounce: could not allocate DMA pool for %ld byte objects\n",
  404. large_buffer_size);
  405. goto err_destroy;
  406. }
  407. }
  408. device_info->dev = dev;
  409. INIT_LIST_HEAD(&device_info->safe_buffers);
  410. rwlock_init(&device_info->lock);
  411. #ifdef STATS
  412. device_info->total_allocs = 0;
  413. device_info->map_op_count = 0;
  414. device_info->bounce_count = 0;
  415. device_info->attr_res = device_create_file(dev, &dev_attr_dmabounce_stats);
  416. #endif
  417. dev->archdata.dmabounce = device_info;
  418. dev_info(dev, "dmabounce: registered device\n");
  419. return 0;
  420. err_destroy:
  421. dma_pool_destroy(device_info->small.pool);
  422. err_free:
  423. kfree(device_info);
  424. return ret;
  425. }
  426. EXPORT_SYMBOL(dmabounce_register_dev);
  427. void dmabounce_unregister_dev(struct device *dev)
  428. {
  429. struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
  430. dev->archdata.dmabounce = NULL;
  431. if (!device_info) {
  432. dev_warn(dev,
  433. "Never registered with dmabounce but attempting"
  434. "to unregister!\n");
  435. return;
  436. }
  437. if (!list_empty(&device_info->safe_buffers)) {
  438. dev_err(dev,
  439. "Removing from dmabounce with pending buffers!\n");
  440. BUG();
  441. }
  442. if (device_info->small.pool)
  443. dma_pool_destroy(device_info->small.pool);
  444. if (device_info->large.pool)
  445. dma_pool_destroy(device_info->large.pool);
  446. #ifdef STATS
  447. if (device_info->attr_res == 0)
  448. device_remove_file(dev, &dev_attr_dmabounce_stats);
  449. #endif
  450. kfree(device_info);
  451. dev_info(dev, "dmabounce: device unregistered\n");
  452. }
  453. EXPORT_SYMBOL(dmabounce_unregister_dev);
  454. MODULE_AUTHOR("Christopher Hoover <ch@hpl.hp.com>, Deepak Saxena <dsaxena@plexity.net>");
  455. MODULE_DESCRIPTION("Special dma_{map/unmap/dma_sync}_* routines for systems with limited DMA windows");
  456. MODULE_LICENSE("GPL");