dma-noncoherent.c 9.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400
  1. /*
  2. * PowerPC version derived from arch/arm/mm/consistent.c
  3. * Copyright (C) 2001 Dan Malek (dmalek@jlc.net)
  4. *
  5. * Copyright (C) 2000 Russell King
  6. *
  7. * Consistent memory allocators. Used for DMA devices that want to
  8. * share uncached memory with the processor core. The function return
  9. * is the virtual address and 'dma_handle' is the physical address.
  10. * Mostly stolen from the ARM port, with some changes for PowerPC.
  11. * -- Dan
  12. *
  13. * Reorganized to get rid of the arch-specific consistent_* functions
  14. * and provide non-coherent implementations for the DMA API. -Matt
  15. *
  16. * Added in_interrupt() safe dma_alloc_coherent()/dma_free_coherent()
  17. * implementation. This is pulled straight from ARM and barely
  18. * modified. -Matt
  19. *
  20. * This program is free software; you can redistribute it and/or modify
  21. * it under the terms of the GNU General Public License version 2 as
  22. * published by the Free Software Foundation.
  23. */
  24. #include <linux/sched.h>
  25. #include <linux/kernel.h>
  26. #include <linux/errno.h>
  27. #include <linux/string.h>
  28. #include <linux/types.h>
  29. #include <linux/highmem.h>
  30. #include <linux/dma-mapping.h>
  31. #include <asm/tlbflush.h>
  32. #include "mmu_decl.h"
  33. /*
  34. * This address range defaults to a value that is safe for all
  35. * platforms which currently set CONFIG_NOT_COHERENT_CACHE. It
  36. * can be further configured for specific applications under
  37. * the "Advanced Setup" menu. -Matt
  38. */
  39. #define CONSISTENT_BASE (IOREMAP_TOP)
  40. #define CONSISTENT_END (CONSISTENT_BASE + CONFIG_CONSISTENT_SIZE)
  41. #define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
  42. /*
  43. * This is the page table (2MB) covering uncached, DMA consistent allocations
  44. */
  45. static DEFINE_SPINLOCK(consistent_lock);
  46. /*
  47. * VM region handling support.
  48. *
  49. * This should become something generic, handling VM region allocations for
  50. * vmalloc and similar (ioremap, module space, etc).
  51. *
  52. * I envisage vmalloc()'s supporting vm_struct becoming:
  53. *
  54. * struct vm_struct {
  55. * struct vm_region region;
  56. * unsigned long flags;
  57. * struct page **pages;
  58. * unsigned int nr_pages;
  59. * unsigned long phys_addr;
  60. * };
  61. *
  62. * get_vm_area() would then call vm_region_alloc with an appropriate
  63. * struct vm_region head (eg):
  64. *
  65. * struct vm_region vmalloc_head = {
  66. * .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list),
  67. * .vm_start = VMALLOC_START,
  68. * .vm_end = VMALLOC_END,
  69. * };
  70. *
  71. * However, vmalloc_head.vm_start is variable (typically, it is dependent on
  72. * the amount of RAM found at boot time.) I would imagine that get_vm_area()
  73. * would have to initialise this each time prior to calling vm_region_alloc().
  74. */
  75. struct ppc_vm_region {
  76. struct list_head vm_list;
  77. unsigned long vm_start;
  78. unsigned long vm_end;
  79. };
  80. static struct ppc_vm_region consistent_head = {
  81. .vm_list = LIST_HEAD_INIT(consistent_head.vm_list),
  82. .vm_start = CONSISTENT_BASE,
  83. .vm_end = CONSISTENT_END,
  84. };
  85. static struct ppc_vm_region *
  86. ppc_vm_region_alloc(struct ppc_vm_region *head, size_t size, gfp_t gfp)
  87. {
  88. unsigned long addr = head->vm_start, end = head->vm_end - size;
  89. unsigned long flags;
  90. struct ppc_vm_region *c, *new;
  91. new = kmalloc(sizeof(struct ppc_vm_region), gfp);
  92. if (!new)
  93. goto out;
  94. spin_lock_irqsave(&consistent_lock, flags);
  95. list_for_each_entry(c, &head->vm_list, vm_list) {
  96. if ((addr + size) < addr)
  97. goto nospc;
  98. if ((addr + size) <= c->vm_start)
  99. goto found;
  100. addr = c->vm_end;
  101. if (addr > end)
  102. goto nospc;
  103. }
  104. found:
  105. /*
  106. * Insert this entry _before_ the one we found.
  107. */
  108. list_add_tail(&new->vm_list, &c->vm_list);
  109. new->vm_start = addr;
  110. new->vm_end = addr + size;
  111. spin_unlock_irqrestore(&consistent_lock, flags);
  112. return new;
  113. nospc:
  114. spin_unlock_irqrestore(&consistent_lock, flags);
  115. kfree(new);
  116. out:
  117. return NULL;
  118. }
  119. static struct ppc_vm_region *ppc_vm_region_find(struct ppc_vm_region *head, unsigned long addr)
  120. {
  121. struct ppc_vm_region *c;
  122. list_for_each_entry(c, &head->vm_list, vm_list) {
  123. if (c->vm_start == addr)
  124. goto out;
  125. }
  126. c = NULL;
  127. out:
  128. return c;
  129. }
  130. /*
  131. * Allocate DMA-coherent memory space and return both the kernel remapped
  132. * virtual and bus address for that space.
  133. */
  134. void *
  135. __dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
  136. {
  137. struct page *page;
  138. struct ppc_vm_region *c;
  139. unsigned long order;
  140. u64 mask = ISA_DMA_THRESHOLD, limit;
  141. if (dev) {
  142. mask = dev->coherent_dma_mask;
  143. /*
  144. * Sanity check the DMA mask - it must be non-zero, and
  145. * must be able to be satisfied by a DMA allocation.
  146. */
  147. if (mask == 0) {
  148. dev_warn(dev, "coherent DMA mask is unset\n");
  149. goto no_page;
  150. }
  151. if ((~mask) & ISA_DMA_THRESHOLD) {
  152. dev_warn(dev, "coherent DMA mask %#llx is smaller "
  153. "than system GFP_DMA mask %#llx\n",
  154. mask, (unsigned long long)ISA_DMA_THRESHOLD);
  155. goto no_page;
  156. }
  157. }
  158. size = PAGE_ALIGN(size);
  159. limit = (mask + 1) & ~mask;
  160. if ((limit && size >= limit) ||
  161. size >= (CONSISTENT_END - CONSISTENT_BASE)) {
  162. printk(KERN_WARNING "coherent allocation too big (requested %#x mask %#Lx)\n",
  163. size, mask);
  164. return NULL;
  165. }
  166. order = get_order(size);
  167. /* Might be useful if we ever have a real legacy DMA zone... */
  168. if (mask != 0xffffffff)
  169. gfp |= GFP_DMA;
  170. page = alloc_pages(gfp, order);
  171. if (!page)
  172. goto no_page;
  173. /*
  174. * Invalidate any data that might be lurking in the
  175. * kernel direct-mapped region for device DMA.
  176. */
  177. {
  178. unsigned long kaddr = (unsigned long)page_address(page);
  179. memset(page_address(page), 0, size);
  180. flush_dcache_range(kaddr, kaddr + size);
  181. }
  182. /*
  183. * Allocate a virtual address in the consistent mapping region.
  184. */
  185. c = ppc_vm_region_alloc(&consistent_head, size,
  186. gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
  187. if (c) {
  188. unsigned long vaddr = c->vm_start;
  189. struct page *end = page + (1 << order);
  190. split_page(page, order);
  191. /*
  192. * Set the "dma handle"
  193. */
  194. *handle = page_to_phys(page);
  195. do {
  196. SetPageReserved(page);
  197. map_page(vaddr, page_to_phys(page),
  198. pgprot_noncached(PAGE_KERNEL));
  199. page++;
  200. vaddr += PAGE_SIZE;
  201. } while (size -= PAGE_SIZE);
  202. /*
  203. * Free the otherwise unused pages.
  204. */
  205. while (page < end) {
  206. __free_page(page);
  207. page++;
  208. }
  209. return (void *)c->vm_start;
  210. }
  211. if (page)
  212. __free_pages(page, order);
  213. no_page:
  214. return NULL;
  215. }
  216. EXPORT_SYMBOL(__dma_alloc_coherent);
  217. /*
  218. * free a page as defined by the above mapping.
  219. */
  220. void __dma_free_coherent(size_t size, void *vaddr)
  221. {
  222. struct ppc_vm_region *c;
  223. unsigned long flags, addr;
  224. size = PAGE_ALIGN(size);
  225. spin_lock_irqsave(&consistent_lock, flags);
  226. c = ppc_vm_region_find(&consistent_head, (unsigned long)vaddr);
  227. if (!c)
  228. goto no_area;
  229. if ((c->vm_end - c->vm_start) != size) {
  230. printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
  231. __func__, c->vm_end - c->vm_start, size);
  232. dump_stack();
  233. size = c->vm_end - c->vm_start;
  234. }
  235. addr = c->vm_start;
  236. do {
  237. pte_t *ptep;
  238. unsigned long pfn;
  239. ptep = pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(addr),
  240. addr),
  241. addr),
  242. addr);
  243. if (!pte_none(*ptep) && pte_present(*ptep)) {
  244. pfn = pte_pfn(*ptep);
  245. pte_clear(&init_mm, addr, ptep);
  246. if (pfn_valid(pfn)) {
  247. struct page *page = pfn_to_page(pfn);
  248. ClearPageReserved(page);
  249. __free_page(page);
  250. }
  251. }
  252. addr += PAGE_SIZE;
  253. } while (size -= PAGE_SIZE);
  254. flush_tlb_kernel_range(c->vm_start, c->vm_end);
  255. list_del(&c->vm_list);
  256. spin_unlock_irqrestore(&consistent_lock, flags);
  257. kfree(c);
  258. return;
  259. no_area:
  260. spin_unlock_irqrestore(&consistent_lock, flags);
  261. printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n",
  262. __func__, vaddr);
  263. dump_stack();
  264. }
  265. EXPORT_SYMBOL(__dma_free_coherent);
  266. /*
  267. * make an area consistent.
  268. */
  269. void __dma_sync(void *vaddr, size_t size, int direction)
  270. {
  271. unsigned long start = (unsigned long)vaddr;
  272. unsigned long end = start + size;
  273. switch (direction) {
  274. case DMA_NONE:
  275. BUG();
  276. case DMA_FROM_DEVICE:
  277. /*
  278. * invalidate only when cache-line aligned otherwise there is
  279. * the potential for discarding uncommitted data from the cache
  280. */
  281. if ((start & (L1_CACHE_BYTES - 1)) || (size & (L1_CACHE_BYTES - 1)))
  282. flush_dcache_range(start, end);
  283. else
  284. invalidate_dcache_range(start, end);
  285. break;
  286. case DMA_TO_DEVICE: /* writeback only */
  287. clean_dcache_range(start, end);
  288. break;
  289. case DMA_BIDIRECTIONAL: /* writeback and invalidate */
  290. flush_dcache_range(start, end);
  291. break;
  292. }
  293. }
  294. EXPORT_SYMBOL(__dma_sync);
  295. #ifdef CONFIG_HIGHMEM
  296. /*
  297. * __dma_sync_page() implementation for systems using highmem.
  298. * In this case, each page of a buffer must be kmapped/kunmapped
  299. * in order to have a virtual address for __dma_sync(). This must
  300. * not sleep so kmap_atomic()/kunmap_atomic() are used.
  301. *
  302. * Note: yes, it is possible and correct to have a buffer extend
  303. * beyond the first page.
  304. */
  305. static inline void __dma_sync_page_highmem(struct page *page,
  306. unsigned long offset, size_t size, int direction)
  307. {
  308. size_t seg_size = min((size_t)(PAGE_SIZE - offset), size);
  309. size_t cur_size = seg_size;
  310. unsigned long flags, start, seg_offset = offset;
  311. int nr_segs = 1 + ((size - seg_size) + PAGE_SIZE - 1)/PAGE_SIZE;
  312. int seg_nr = 0;
  313. local_irq_save(flags);
  314. do {
  315. start = (unsigned long)kmap_atomic(page + seg_nr,
  316. KM_PPC_SYNC_PAGE) + seg_offset;
  317. /* Sync this buffer segment */
  318. __dma_sync((void *)start, seg_size, direction);
  319. kunmap_atomic((void *)start, KM_PPC_SYNC_PAGE);
  320. seg_nr++;
  321. /* Calculate next buffer segment size */
  322. seg_size = min((size_t)PAGE_SIZE, size - cur_size);
  323. /* Add the segment size to our running total */
  324. cur_size += seg_size;
  325. seg_offset = 0;
  326. } while (seg_nr < nr_segs);
  327. local_irq_restore(flags);
  328. }
  329. #endif /* CONFIG_HIGHMEM */
  330. /*
  331. * __dma_sync_page makes memory consistent. identical to __dma_sync, but
  332. * takes a struct page instead of a virtual address
  333. */
  334. void __dma_sync_page(struct page *page, unsigned long offset,
  335. size_t size, int direction)
  336. {
  337. #ifdef CONFIG_HIGHMEM
  338. __dma_sync_page_highmem(page, offset, size, direction);
  339. #else
  340. unsigned long start = (unsigned long)page_address(page) + offset;
  341. __dma_sync((void *)start, size, direction);
  342. #endif
  343. }
  344. EXPORT_SYMBOL(__dma_sync_page);