consistent.c 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254
  1. /*
  2. * Microblaze support for cache consistent memory.
  3. * Copyright (C) 2010 Michal Simek <monstr@monstr.eu>
  4. * Copyright (C) 2010 PetaLogix
  5. * Copyright (C) 2005 John Williams <jwilliams@itee.uq.edu.au>
  6. *
  7. * Based on PowerPC version derived from arch/arm/mm/consistent.c
  8. * Copyright (C) 2001 Dan Malek (dmalek@jlc.net)
  9. * Copyright (C) 2000 Russell King
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License version 2 as
  13. * published by the Free Software Foundation.
  14. */
  15. #include <linux/export.h>
  16. #include <linux/signal.h>
  17. #include <linux/sched.h>
  18. #include <linux/kernel.h>
  19. #include <linux/errno.h>
  20. #include <linux/string.h>
  21. #include <linux/types.h>
  22. #include <linux/ptrace.h>
  23. #include <linux/mman.h>
  24. #include <linux/mm.h>
  25. #include <linux/swap.h>
  26. #include <linux/stddef.h>
  27. #include <linux/vmalloc.h>
  28. #include <linux/init.h>
  29. #include <linux/delay.h>
  30. #include <linux/bootmem.h>
  31. #include <linux/highmem.h>
  32. #include <linux/pci.h>
  33. #include <linux/interrupt.h>
  34. #include <linux/gfp.h>
  35. #include <asm/pgalloc.h>
  36. #include <linux/io.h>
  37. #include <linux/hardirq.h>
  38. #include <linux/mmu_context.h>
  39. #include <asm/mmu.h>
  40. #include <linux/uaccess.h>
  41. #include <asm/pgtable.h>
  42. #include <asm/cpuinfo.h>
  43. #include <asm/tlbflush.h>
  44. #ifndef CONFIG_MMU
  45. /* I have to use dcache values because I can't relate on ram size */
  46. # define UNCACHED_SHADOW_MASK (cpuinfo.dcache_high - cpuinfo.dcache_base + 1)
  47. #endif
  48. /*
  49. * Consistent memory allocators. Used for DMA devices that want to
  50. * share uncached memory with the processor core.
  51. * My crufty no-MMU approach is simple. In the HW platform we can optionally
  52. * mirror the DDR up above the processor cacheable region. So, memory accessed
  53. * in this mirror region will not be cached. It's alloced from the same
  54. * pool as normal memory, but the handle we return is shifted up into the
  55. * uncached region. This will no doubt cause big problems if memory allocated
  56. * here is not also freed properly. -- JW
  57. */
  58. void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *dma_handle)
  59. {
  60. unsigned long order, vaddr;
  61. void *ret;
  62. unsigned int i, err = 0;
  63. struct page *page, *end;
  64. #ifdef CONFIG_MMU
  65. phys_addr_t pa;
  66. struct vm_struct *area;
  67. unsigned long va;
  68. #endif
  69. if (in_interrupt())
  70. BUG();
  71. /* Only allocate page size areas. */
  72. size = PAGE_ALIGN(size);
  73. order = get_order(size);
  74. vaddr = __get_free_pages(gfp, order);
  75. if (!vaddr)
  76. return NULL;
  77. /*
  78. * we need to ensure that there are no cachelines in use,
  79. * or worse dirty in this area.
  80. */
  81. flush_dcache_range(virt_to_phys((void *)vaddr),
  82. virt_to_phys((void *)vaddr) + size);
  83. #ifndef CONFIG_MMU
  84. ret = (void *)vaddr;
  85. /*
  86. * Here's the magic! Note if the uncached shadow is not implemented,
  87. * it's up to the calling code to also test that condition and make
  88. * other arranegments, such as manually flushing the cache and so on.
  89. */
  90. # ifdef CONFIG_XILINX_UNCACHED_SHADOW
  91. ret = (void *)((unsigned) ret | UNCACHED_SHADOW_MASK);
  92. # endif
  93. if ((unsigned int)ret > cpuinfo.dcache_base &&
  94. (unsigned int)ret < cpuinfo.dcache_high)
  95. pr_warn("ERROR: Your cache coherent area is CACHED!!!\n");
  96. /* dma_handle is same as physical (shadowed) address */
  97. *dma_handle = (dma_addr_t)ret;
  98. #else
  99. /* Allocate some common virtual space to map the new pages. */
  100. area = get_vm_area(size, VM_ALLOC);
  101. if (!area) {
  102. free_pages(vaddr, order);
  103. return NULL;
  104. }
  105. va = (unsigned long) area->addr;
  106. ret = (void *)va;
  107. /* This gives us the real physical address of the first page. */
  108. *dma_handle = pa = virt_to_bus((void *)vaddr);
  109. #endif
  110. /*
  111. * free wasted pages. We skip the first page since we know
  112. * that it will have count = 1 and won't require freeing.
  113. * We also mark the pages in use as reserved so that
  114. * remap_page_range works.
  115. */
  116. page = virt_to_page(vaddr);
  117. end = page + (1 << order);
  118. split_page(page, order);
  119. for (i = 0; i < size && err == 0; i += PAGE_SIZE) {
  120. #ifdef CONFIG_MMU
  121. /* MS: This is the whole magic - use cache inhibit pages */
  122. err = map_page(va + i, pa + i, _PAGE_KERNEL | _PAGE_NO_CACHE);
  123. #endif
  124. SetPageReserved(page);
  125. page++;
  126. }
  127. /* Free the otherwise unused pages. */
  128. while (page < end) {
  129. __free_page(page);
  130. page++;
  131. }
  132. if (err) {
  133. free_pages(vaddr, order);
  134. return NULL;
  135. }
  136. return ret;
  137. }
  138. EXPORT_SYMBOL(consistent_alloc);
  139. /*
  140. * free page(s) as defined by the above mapping.
  141. */
  142. void consistent_free(size_t size, void *vaddr)
  143. {
  144. struct page *page;
  145. if (in_interrupt())
  146. BUG();
  147. size = PAGE_ALIGN(size);
  148. #ifndef CONFIG_MMU
  149. /* Clear SHADOW_MASK bit in address, and free as per usual */
  150. # ifdef CONFIG_XILINX_UNCACHED_SHADOW
  151. vaddr = (void *)((unsigned)vaddr & ~UNCACHED_SHADOW_MASK);
  152. # endif
  153. page = virt_to_page(vaddr);
  154. do {
  155. ClearPageReserved(page);
  156. __free_page(page);
  157. page++;
  158. } while (size -= PAGE_SIZE);
  159. #else
  160. do {
  161. pte_t *ptep;
  162. unsigned long pfn;
  163. ptep = pte_offset_kernel(pmd_offset(pgd_offset_k(
  164. (unsigned int)vaddr),
  165. (unsigned int)vaddr),
  166. (unsigned int)vaddr);
  167. if (!pte_none(*ptep) && pte_present(*ptep)) {
  168. pfn = pte_pfn(*ptep);
  169. pte_clear(&init_mm, (unsigned int)vaddr, ptep);
  170. if (pfn_valid(pfn)) {
  171. page = pfn_to_page(pfn);
  172. ClearPageReserved(page);
  173. __free_page(page);
  174. }
  175. }
  176. vaddr += PAGE_SIZE;
  177. } while (size -= PAGE_SIZE);
  178. /* flush tlb */
  179. flush_tlb_all();
  180. #endif
  181. }
  182. EXPORT_SYMBOL(consistent_free);
  183. /*
  184. * make an area consistent.
  185. */
  186. void consistent_sync(void *vaddr, size_t size, int direction)
  187. {
  188. unsigned long start;
  189. unsigned long end;
  190. start = (unsigned long)vaddr;
  191. /* Convert start address back down to unshadowed memory region */
  192. #ifdef CONFIG_XILINX_UNCACHED_SHADOW
  193. start &= ~UNCACHED_SHADOW_MASK;
  194. #endif
  195. end = start + size;
  196. switch (direction) {
  197. case PCI_DMA_NONE:
  198. BUG();
  199. case PCI_DMA_FROMDEVICE: /* invalidate only */
  200. invalidate_dcache_range(start, end);
  201. break;
  202. case PCI_DMA_TODEVICE: /* writeback only */
  203. flush_dcache_range(start, end);
  204. break;
  205. case PCI_DMA_BIDIRECTIONAL: /* writeback and invalidate */
  206. flush_dcache_range(start, end);
  207. break;
  208. }
  209. }
  210. EXPORT_SYMBOL(consistent_sync);
  211. /*
  212. * consistent_sync_page makes memory consistent. identical
  213. * to consistent_sync, but takes a struct page instead of a
  214. * virtual address
  215. */
  216. void consistent_sync_page(struct page *page, unsigned long offset,
  217. size_t size, int direction)
  218. {
  219. unsigned long start = (unsigned long)page_address(page) + offset;
  220. consistent_sync((void *)start, size, direction);
  221. }
  222. EXPORT_SYMBOL(consistent_sync_page);