dma-mapping.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721
  1. /*
  2. * linux/arch/arm/mm/dma-mapping.c
  3. *
  4. * Copyright (C) 2000-2004 Russell King
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * DMA uncached mapping support.
  11. */
  12. #include <linux/module.h>
  13. #include <linux/mm.h>
  14. #include <linux/gfp.h>
  15. #include <linux/errno.h>
  16. #include <linux/list.h>
  17. #include <linux/init.h>
  18. #include <linux/device.h>
  19. #include <linux/dma-mapping.h>
  20. #include <linux/highmem.h>
  21. #include <linux/slab.h>
  22. #include <asm/memory.h>
  23. #include <asm/highmem.h>
  24. #include <asm/cacheflush.h>
  25. #include <asm/tlbflush.h>
  26. #include <asm/sizes.h>
  27. #include <asm/mach/arch.h>
  28. #include "mm.h"
  29. static u64 get_coherent_dma_mask(struct device *dev)
  30. {
  31. u64 mask = (u64)arm_dma_limit;
  32. if (dev) {
  33. mask = dev->coherent_dma_mask;
  34. /*
  35. * Sanity check the DMA mask - it must be non-zero, and
  36. * must be able to be satisfied by a DMA allocation.
  37. */
  38. if (mask == 0) {
  39. dev_warn(dev, "coherent DMA mask is unset\n");
  40. return 0;
  41. }
  42. if ((~mask) & (u64)arm_dma_limit) {
  43. dev_warn(dev, "coherent DMA mask %#llx is smaller "
  44. "than system GFP_DMA mask %#llx\n",
  45. mask, (u64)arm_dma_limit);
  46. return 0;
  47. }
  48. }
  49. return mask;
  50. }
  51. /*
  52. * Allocate a DMA buffer for 'dev' of size 'size' using the
  53. * specified gfp mask. Note that 'size' must be page aligned.
  54. */
  55. static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
  56. {
  57. unsigned long order = get_order(size);
  58. struct page *page, *p, *e;
  59. void *ptr;
  60. u64 mask = get_coherent_dma_mask(dev);
  61. #ifdef CONFIG_DMA_API_DEBUG
  62. u64 limit = (mask + 1) & ~mask;
  63. if (limit && size >= limit) {
  64. dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n",
  65. size, mask);
  66. return NULL;
  67. }
  68. #endif
  69. if (!mask)
  70. return NULL;
  71. if (mask < 0xffffffffULL)
  72. gfp |= GFP_DMA;
  73. page = alloc_pages(gfp, order);
  74. if (!page)
  75. return NULL;
  76. /*
  77. * Now split the huge page and free the excess pages
  78. */
  79. split_page(page, order);
  80. for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
  81. __free_page(p);
  82. /*
  83. * Ensure that the allocated pages are zeroed, and that any data
  84. * lurking in the kernel direct-mapped region is invalidated.
  85. */
  86. ptr = page_address(page);
  87. memset(ptr, 0, size);
  88. dmac_flush_range(ptr, ptr + size);
  89. outer_flush_range(__pa(ptr), __pa(ptr) + size);
  90. return page;
  91. }
  92. /*
  93. * Free a DMA buffer. 'size' must be page aligned.
  94. */
  95. static void __dma_free_buffer(struct page *page, size_t size)
  96. {
  97. struct page *e = page + (size >> PAGE_SHIFT);
  98. while (page < e) {
  99. __free_page(page);
  100. page++;
  101. }
  102. }
  103. #ifdef CONFIG_MMU
  104. #define CONSISTENT_OFFSET(x) (((unsigned long)(x) - consistent_base) >> PAGE_SHIFT)
  105. #define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - consistent_base) >> PGDIR_SHIFT)
  106. /*
  107. * These are the page tables (2MB each) covering uncached, DMA consistent allocations
  108. */
  109. static pte_t **consistent_pte;
  110. #define DEFAULT_CONSISTENT_DMA_SIZE SZ_2M
  111. unsigned long consistent_base = CONSISTENT_END - DEFAULT_CONSISTENT_DMA_SIZE;
  112. void __init init_consistent_dma_size(unsigned long size)
  113. {
  114. unsigned long base = CONSISTENT_END - ALIGN(size, SZ_2M);
  115. BUG_ON(consistent_pte); /* Check we're called before DMA region init */
  116. BUG_ON(base < VMALLOC_END);
  117. /* Grow region to accommodate specified size */
  118. if (base < consistent_base)
  119. consistent_base = base;
  120. }
  121. #include "vmregion.h"
  122. static struct arm_vmregion_head consistent_head = {
  123. .vm_lock = __SPIN_LOCK_UNLOCKED(&consistent_head.vm_lock),
  124. .vm_list = LIST_HEAD_INIT(consistent_head.vm_list),
  125. .vm_end = CONSISTENT_END,
  126. };
  127. #ifdef CONFIG_HUGETLB_PAGE
  128. #error ARM Coherent DMA allocator does not (yet) support huge TLB
  129. #endif
  130. /*
  131. * Initialise the consistent memory allocation.
  132. */
  133. static int __init consistent_init(void)
  134. {
  135. int ret = 0;
  136. pgd_t *pgd;
  137. pud_t *pud;
  138. pmd_t *pmd;
  139. pte_t *pte;
  140. int i = 0;
  141. unsigned long base = consistent_base;
  142. unsigned long num_ptes = (CONSISTENT_END - base) >> PGDIR_SHIFT;
  143. consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL);
  144. if (!consistent_pte) {
  145. pr_err("%s: no memory\n", __func__);
  146. return -ENOMEM;
  147. }
  148. pr_debug("DMA memory: 0x%08lx - 0x%08lx:\n", base, CONSISTENT_END);
  149. consistent_head.vm_start = base;
  150. do {
  151. pgd = pgd_offset(&init_mm, base);
  152. pud = pud_alloc(&init_mm, pgd, base);
  153. if (!pud) {
  154. printk(KERN_ERR "%s: no pud tables\n", __func__);
  155. ret = -ENOMEM;
  156. break;
  157. }
  158. pmd = pmd_alloc(&init_mm, pud, base);
  159. if (!pmd) {
  160. printk(KERN_ERR "%s: no pmd tables\n", __func__);
  161. ret = -ENOMEM;
  162. break;
  163. }
  164. WARN_ON(!pmd_none(*pmd));
  165. pte = pte_alloc_kernel(pmd, base);
  166. if (!pte) {
  167. printk(KERN_ERR "%s: no pte tables\n", __func__);
  168. ret = -ENOMEM;
  169. break;
  170. }
  171. consistent_pte[i++] = pte;
  172. base += (1 << PGDIR_SHIFT);
  173. } while (base < CONSISTENT_END);
  174. return ret;
  175. }
  176. core_initcall(consistent_init);
  177. static void *
  178. __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot)
  179. {
  180. struct arm_vmregion *c;
  181. size_t align;
  182. int bit;
  183. if (!consistent_pte) {
  184. printk(KERN_ERR "%s: not initialised\n", __func__);
  185. dump_stack();
  186. return NULL;
  187. }
  188. /*
  189. * Align the virtual region allocation - maximum alignment is
  190. * a section size, minimum is a page size. This helps reduce
  191. * fragmentation of the DMA space, and also prevents allocations
  192. * smaller than a section from crossing a section boundary.
  193. */
  194. bit = fls(size - 1);
  195. if (bit > SECTION_SHIFT)
  196. bit = SECTION_SHIFT;
  197. align = 1 << bit;
  198. /*
  199. * Allocate a virtual address in the consistent mapping region.
  200. */
  201. c = arm_vmregion_alloc(&consistent_head, align, size,
  202. gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
  203. if (c) {
  204. pte_t *pte;
  205. int idx = CONSISTENT_PTE_INDEX(c->vm_start);
  206. u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
  207. pte = consistent_pte[idx] + off;
  208. c->vm_pages = page;
  209. do {
  210. BUG_ON(!pte_none(*pte));
  211. set_pte_ext(pte, mk_pte(page, prot), 0);
  212. page++;
  213. pte++;
  214. off++;
  215. if (off >= PTRS_PER_PTE) {
  216. off = 0;
  217. pte = consistent_pte[++idx];
  218. }
  219. } while (size -= PAGE_SIZE);
  220. dsb();
  221. return (void *)c->vm_start;
  222. }
  223. return NULL;
  224. }
  225. static void __dma_free_remap(void *cpu_addr, size_t size)
  226. {
  227. struct arm_vmregion *c;
  228. unsigned long addr;
  229. pte_t *ptep;
  230. int idx;
  231. u32 off;
  232. c = arm_vmregion_find_remove(&consistent_head, (unsigned long)cpu_addr);
  233. if (!c) {
  234. printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n",
  235. __func__, cpu_addr);
  236. dump_stack();
  237. return;
  238. }
  239. if ((c->vm_end - c->vm_start) != size) {
  240. printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
  241. __func__, c->vm_end - c->vm_start, size);
  242. dump_stack();
  243. size = c->vm_end - c->vm_start;
  244. }
  245. idx = CONSISTENT_PTE_INDEX(c->vm_start);
  246. off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
  247. ptep = consistent_pte[idx] + off;
  248. addr = c->vm_start;
  249. do {
  250. pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep);
  251. ptep++;
  252. addr += PAGE_SIZE;
  253. off++;
  254. if (off >= PTRS_PER_PTE) {
  255. off = 0;
  256. ptep = consistent_pte[++idx];
  257. }
  258. if (pte_none(pte) || !pte_present(pte))
  259. printk(KERN_CRIT "%s: bad page in kernel page table\n",
  260. __func__);
  261. } while (size -= PAGE_SIZE);
  262. flush_tlb_kernel_range(c->vm_start, c->vm_end);
  263. arm_vmregion_free(&consistent_head, c);
  264. }
  265. #else /* !CONFIG_MMU */
  266. #define __dma_alloc_remap(page, size, gfp, prot) page_address(page)
  267. #define __dma_free_remap(addr, size) do { } while (0)
  268. #endif /* CONFIG_MMU */
  269. static void *
  270. __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
  271. pgprot_t prot)
  272. {
  273. struct page *page;
  274. void *addr;
  275. *handle = ~0;
  276. size = PAGE_ALIGN(size);
  277. page = __dma_alloc_buffer(dev, size, gfp);
  278. if (!page)
  279. return NULL;
  280. if (!arch_is_coherent())
  281. addr = __dma_alloc_remap(page, size, gfp, prot);
  282. else
  283. addr = page_address(page);
  284. if (addr)
  285. *handle = pfn_to_dma(dev, page_to_pfn(page));
  286. else
  287. __dma_free_buffer(page, size);
  288. return addr;
  289. }
  290. /*
  291. * Allocate DMA-coherent memory space and return both the kernel remapped
  292. * virtual and bus address for that space.
  293. */
  294. void *
  295. dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
  296. {
  297. void *memory;
  298. if (dma_alloc_from_coherent(dev, size, handle, &memory))
  299. return memory;
  300. return __dma_alloc(dev, size, handle, gfp,
  301. pgprot_dmacoherent(pgprot_kernel));
  302. }
  303. EXPORT_SYMBOL(dma_alloc_coherent);
  304. /*
  305. * Allocate a writecombining region, in much the same way as
  306. * dma_alloc_coherent above.
  307. */
  308. void *
  309. dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
  310. {
  311. return __dma_alloc(dev, size, handle, gfp,
  312. pgprot_writecombine(pgprot_kernel));
  313. }
  314. EXPORT_SYMBOL(dma_alloc_writecombine);
  315. static int dma_mmap(struct device *dev, struct vm_area_struct *vma,
  316. void *cpu_addr, dma_addr_t dma_addr, size_t size)
  317. {
  318. int ret = -ENXIO;
  319. #ifdef CONFIG_MMU
  320. unsigned long user_size, kern_size;
  321. struct arm_vmregion *c;
  322. user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
  323. c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr);
  324. if (c) {
  325. unsigned long off = vma->vm_pgoff;
  326. kern_size = (c->vm_end - c->vm_start) >> PAGE_SHIFT;
  327. if (off < kern_size &&
  328. user_size <= (kern_size - off)) {
  329. ret = remap_pfn_range(vma, vma->vm_start,
  330. page_to_pfn(c->vm_pages) + off,
  331. user_size << PAGE_SHIFT,
  332. vma->vm_page_prot);
  333. }
  334. }
  335. #endif /* CONFIG_MMU */
  336. return ret;
  337. }
  338. int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
  339. void *cpu_addr, dma_addr_t dma_addr, size_t size)
  340. {
  341. vma->vm_page_prot = pgprot_dmacoherent(vma->vm_page_prot);
  342. return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
  343. }
  344. EXPORT_SYMBOL(dma_mmap_coherent);
  345. int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
  346. void *cpu_addr, dma_addr_t dma_addr, size_t size)
  347. {
  348. vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
  349. return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
  350. }
  351. EXPORT_SYMBOL(dma_mmap_writecombine);
  352. /*
  353. * free a page as defined by the above mapping.
  354. * Must not be called with IRQs disabled.
  355. */
  356. void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle)
  357. {
  358. WARN_ON(irqs_disabled());
  359. if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
  360. return;
  361. size = PAGE_ALIGN(size);
  362. if (!arch_is_coherent())
  363. __dma_free_remap(cpu_addr, size);
  364. __dma_free_buffer(pfn_to_page(dma_to_pfn(dev, handle)), size);
  365. }
  366. EXPORT_SYMBOL(dma_free_coherent);
  367. /*
  368. * Make an area consistent for devices.
  369. * Note: Drivers should NOT use this function directly, as it will break
  370. * platforms with CONFIG_DMABOUNCE.
  371. * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
  372. */
  373. void ___dma_single_cpu_to_dev(const void *kaddr, size_t size,
  374. enum dma_data_direction dir)
  375. {
  376. unsigned long paddr;
  377. BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
  378. dmac_map_area(kaddr, size, dir);
  379. paddr = __pa(kaddr);
  380. if (dir == DMA_FROM_DEVICE) {
  381. outer_inv_range(paddr, paddr + size);
  382. } else {
  383. outer_clean_range(paddr, paddr + size);
  384. }
  385. /* FIXME: non-speculating: flush on bidirectional mappings? */
  386. }
  387. EXPORT_SYMBOL(___dma_single_cpu_to_dev);
  388. void ___dma_single_dev_to_cpu(const void *kaddr, size_t size,
  389. enum dma_data_direction dir)
  390. {
  391. BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
  392. /* FIXME: non-speculating: not required */
  393. /* don't bother invalidating if DMA to device */
  394. if (dir != DMA_TO_DEVICE) {
  395. unsigned long paddr = __pa(kaddr);
  396. outer_inv_range(paddr, paddr + size);
  397. }
  398. dmac_unmap_area(kaddr, size, dir);
  399. }
  400. EXPORT_SYMBOL(___dma_single_dev_to_cpu);
  401. static void dma_cache_maint_page(struct page *page, unsigned long offset,
  402. size_t size, enum dma_data_direction dir,
  403. void (*op)(const void *, size_t, int))
  404. {
  405. /*
  406. * A single sg entry may refer to multiple physically contiguous
  407. * pages. But we still need to process highmem pages individually.
  408. * If highmem is not configured then the bulk of this loop gets
  409. * optimized out.
  410. */
  411. size_t left = size;
  412. do {
  413. size_t len = left;
  414. void *vaddr;
  415. if (PageHighMem(page)) {
  416. if (len + offset > PAGE_SIZE) {
  417. if (offset >= PAGE_SIZE) {
  418. page += offset / PAGE_SIZE;
  419. offset %= PAGE_SIZE;
  420. }
  421. len = PAGE_SIZE - offset;
  422. }
  423. vaddr = kmap_high_get(page);
  424. if (vaddr) {
  425. vaddr += offset;
  426. op(vaddr, len, dir);
  427. kunmap_high(page);
  428. } else if (cache_is_vipt()) {
  429. /* unmapped pages might still be cached */
  430. vaddr = kmap_atomic(page);
  431. op(vaddr + offset, len, dir);
  432. kunmap_atomic(vaddr);
  433. }
  434. } else {
  435. vaddr = page_address(page) + offset;
  436. op(vaddr, len, dir);
  437. }
  438. offset = 0;
  439. page++;
  440. left -= len;
  441. } while (left);
  442. }
  443. void ___dma_page_cpu_to_dev(struct page *page, unsigned long off,
  444. size_t size, enum dma_data_direction dir)
  445. {
  446. unsigned long paddr;
  447. dma_cache_maint_page(page, off, size, dir, dmac_map_area);
  448. paddr = page_to_phys(page) + off;
  449. if (dir == DMA_FROM_DEVICE) {
  450. outer_inv_range(paddr, paddr + size);
  451. } else {
  452. outer_clean_range(paddr, paddr + size);
  453. }
  454. /* FIXME: non-speculating: flush on bidirectional mappings? */
  455. }
  456. EXPORT_SYMBOL(___dma_page_cpu_to_dev);
  457. void ___dma_page_dev_to_cpu(struct page *page, unsigned long off,
  458. size_t size, enum dma_data_direction dir)
  459. {
  460. unsigned long paddr = page_to_phys(page) + off;
  461. /* FIXME: non-speculating: not required */
  462. /* don't bother invalidating if DMA to device */
  463. if (dir != DMA_TO_DEVICE)
  464. outer_inv_range(paddr, paddr + size);
  465. dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
  466. /*
  467. * Mark the D-cache clean for this page to avoid extra flushing.
  468. */
  469. if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE)
  470. set_bit(PG_dcache_clean, &page->flags);
  471. }
  472. EXPORT_SYMBOL(___dma_page_dev_to_cpu);
  473. /**
  474. * dma_map_sg - map a set of SG buffers for streaming mode DMA
  475. * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  476. * @sg: list of buffers
  477. * @nents: number of buffers to map
  478. * @dir: DMA transfer direction
  479. *
  480. * Map a set of buffers described by scatterlist in streaming mode for DMA.
  481. * This is the scatter-gather version of the dma_map_single interface.
  482. * Here the scatter gather list elements are each tagged with the
  483. * appropriate dma address and length. They are obtained via
  484. * sg_dma_{address,length}.
  485. *
  486. * Device ownership issues as mentioned for dma_map_single are the same
  487. * here.
  488. */
  489. int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
  490. enum dma_data_direction dir)
  491. {
  492. struct scatterlist *s;
  493. int i, j;
  494. BUG_ON(!valid_dma_direction(dir));
  495. for_each_sg(sg, s, nents, i) {
  496. s->dma_address = __dma_map_page(dev, sg_page(s), s->offset,
  497. s->length, dir);
  498. if (dma_mapping_error(dev, s->dma_address))
  499. goto bad_mapping;
  500. }
  501. debug_dma_map_sg(dev, sg, nents, nents, dir);
  502. return nents;
  503. bad_mapping:
  504. for_each_sg(sg, s, i, j)
  505. __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
  506. return 0;
  507. }
  508. EXPORT_SYMBOL(dma_map_sg);
  509. /**
  510. * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
  511. * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  512. * @sg: list of buffers
  513. * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
  514. * @dir: DMA transfer direction (same as was passed to dma_map_sg)
  515. *
  516. * Unmap a set of streaming mode DMA translations. Again, CPU access
  517. * rules concerning calls here are the same as for dma_unmap_single().
  518. */
  519. void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
  520. enum dma_data_direction dir)
  521. {
  522. struct scatterlist *s;
  523. int i;
  524. debug_dma_unmap_sg(dev, sg, nents, dir);
  525. for_each_sg(sg, s, nents, i)
  526. __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
  527. }
  528. EXPORT_SYMBOL(dma_unmap_sg);
  529. /**
  530. * dma_sync_sg_for_cpu
  531. * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  532. * @sg: list of buffers
  533. * @nents: number of buffers to map (returned from dma_map_sg)
  534. * @dir: DMA transfer direction (same as was passed to dma_map_sg)
  535. */
  536. void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
  537. int nents, enum dma_data_direction dir)
  538. {
  539. struct scatterlist *s;
  540. int i;
  541. for_each_sg(sg, s, nents, i) {
  542. if (!dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0,
  543. sg_dma_len(s), dir))
  544. continue;
  545. __dma_page_dev_to_cpu(sg_page(s), s->offset,
  546. s->length, dir);
  547. }
  548. debug_dma_sync_sg_for_cpu(dev, sg, nents, dir);
  549. }
  550. EXPORT_SYMBOL(dma_sync_sg_for_cpu);
  551. /**
  552. * dma_sync_sg_for_device
  553. * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  554. * @sg: list of buffers
  555. * @nents: number of buffers to map (returned from dma_map_sg)
  556. * @dir: DMA transfer direction (same as was passed to dma_map_sg)
  557. */
  558. void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
  559. int nents, enum dma_data_direction dir)
  560. {
  561. struct scatterlist *s;
  562. int i;
  563. for_each_sg(sg, s, nents, i) {
  564. if (!dmabounce_sync_for_device(dev, sg_dma_address(s), 0,
  565. sg_dma_len(s), dir))
  566. continue;
  567. __dma_page_cpu_to_dev(sg_page(s), s->offset,
  568. s->length, dir);
  569. }
  570. debug_dma_sync_sg_for_device(dev, sg, nents, dir);
  571. }
  572. EXPORT_SYMBOL(dma_sync_sg_for_device);
  573. /*
  574. * Return whether the given device DMA address mask can be supported
  575. * properly. For example, if your device can only drive the low 24-bits
  576. * during bus mastering, then you would pass 0x00ffffff as the mask
  577. * to this function.
  578. */
  579. int dma_supported(struct device *dev, u64 mask)
  580. {
  581. if (mask < (u64)arm_dma_limit)
  582. return 0;
  583. return 1;
  584. }
  585. EXPORT_SYMBOL(dma_supported);
  586. int dma_set_mask(struct device *dev, u64 dma_mask)
  587. {
  588. if (!dev->dma_mask || !dma_supported(dev, dma_mask))
  589. return -EIO;
  590. #ifndef CONFIG_DMABOUNCE
  591. *dev->dma_mask = dma_mask;
  592. #endif
  593. return 0;
  594. }
  595. EXPORT_SYMBOL(dma_set_mask);
  596. #define PREALLOC_DMA_DEBUG_ENTRIES 4096
  597. static int __init dma_debug_do_init(void)
  598. {
  599. dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
  600. return 0;
  601. }
  602. fs_initcall(dma_debug_do_init);