uncached.c 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246
  1. /*
  2. * Copyright (C) 2001-2005 Silicon Graphics, Inc. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms of version 2 of the GNU General Public License
  6. * as published by the Free Software Foundation.
  7. *
  8. * A simple uncached page allocator using the generic allocator. This
  9. * allocator first utilizes the spare (spill) pages found in the EFI
  10. * memmap and will then start converting cached pages to uncached ones
  11. * at a granule at a time. Node awareness is implemented by having a
  12. * pool of pages per node.
  13. */
  14. #include <linux/types.h>
  15. #include <linux/kernel.h>
  16. #include <linux/module.h>
  17. #include <linux/init.h>
  18. #include <linux/errno.h>
  19. #include <linux/string.h>
  20. #include <linux/slab.h>
  21. #include <linux/efi.h>
  22. #include <linux/genalloc.h>
  23. #include <asm/page.h>
  24. #include <asm/pal.h>
  25. #include <asm/system.h>
  26. #include <asm/pgtable.h>
  27. #include <asm/atomic.h>
  28. #include <asm/tlbflush.h>
  29. #include <asm/sn/arch.h>
  30. #define DEBUG 0
  31. #if DEBUG
  32. #define dprintk printk
  33. #else
  34. #define dprintk(x...) do { } while (0)
  35. #endif
  36. void __init efi_memmap_walk_uc (efi_freemem_callback_t callback);
  37. #define MAX_UNCACHED_GRANULES 5
  38. static int allocated_granules;
  39. struct gen_pool *uncached_pool[MAX_NUMNODES];
  40. static void uncached_ipi_visibility(void *data)
  41. {
  42. int status;
  43. status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
  44. if ((status != PAL_VISIBILITY_OK) &&
  45. (status != PAL_VISIBILITY_OK_REMOTE_NEEDED))
  46. printk(KERN_DEBUG "pal_prefetch_visibility() returns %i on "
  47. "CPU %i\n", status, get_cpu());
  48. }
  49. static void uncached_ipi_mc_drain(void *data)
  50. {
  51. int status;
  52. status = ia64_pal_mc_drain();
  53. if (status)
  54. printk(KERN_WARNING "ia64_pal_mc_drain() failed with %i on "
  55. "CPU %i\n", status, get_cpu());
  56. }
  57. static unsigned long
  58. uncached_get_new_chunk(struct gen_pool *poolp)
  59. {
  60. struct page *page;
  61. void *tmp;
  62. int status, i;
  63. unsigned long addr, node;
  64. if (allocated_granules >= MAX_UNCACHED_GRANULES)
  65. return 0;
  66. node = poolp->private;
  67. page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO,
  68. IA64_GRANULE_SHIFT-PAGE_SHIFT);
  69. dprintk(KERN_INFO "get_new_chunk page %p, addr %lx\n",
  70. page, (unsigned long)(page-vmem_map) << PAGE_SHIFT);
  71. /*
  72. * Do magic if no mem on local node! XXX
  73. */
  74. if (!page)
  75. return 0;
  76. tmp = page_address(page);
  77. /*
  78. * There's a small race here where it's possible for someone to
  79. * access the page through /dev/mem halfway through the conversion
  80. * to uncached - not sure it's really worth bothering about
  81. */
  82. for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++)
  83. SetPageUncached(&page[i]);
  84. flush_tlb_kernel_range(tmp, tmp + IA64_GRANULE_SIZE);
  85. status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
  86. dprintk(KERN_INFO "pal_prefetch_visibility() returns %i on cpu %i\n",
  87. status, get_cpu());
  88. if (!status) {
  89. status = smp_call_function(uncached_ipi_visibility, NULL, 0, 1);
  90. if (status)
  91. printk(KERN_WARNING "smp_call_function failed for "
  92. "uncached_ipi_visibility! (%i)\n", status);
  93. }
  94. if (ia64_platform_is("sn2"))
  95. sn_flush_all_caches((unsigned long)tmp, IA64_GRANULE_SIZE);
  96. else
  97. flush_icache_range((unsigned long)tmp,
  98. (unsigned long)tmp+IA64_GRANULE_SIZE);
  99. ia64_pal_mc_drain();
  100. status = smp_call_function(uncached_ipi_mc_drain, NULL, 0, 1);
  101. if (status)
  102. printk(KERN_WARNING "smp_call_function failed for "
  103. "uncached_ipi_mc_drain! (%i)\n", status);
  104. addr = (unsigned long)tmp - PAGE_OFFSET + __IA64_UNCACHED_OFFSET;
  105. allocated_granules++;
  106. return addr;
  107. }
  108. /*
  109. * uncached_alloc_page
  110. *
  111. * Allocate 1 uncached page. Allocates on the requested node. If no
  112. * uncached pages are available on the requested node, roundrobin starting
  113. * with higher nodes.
  114. */
  115. unsigned long
  116. uncached_alloc_page(int nid)
  117. {
  118. unsigned long maddr;
  119. maddr = gen_pool_alloc(uncached_pool[nid], PAGE_SIZE);
  120. dprintk(KERN_DEBUG "uncached_alloc_page returns %lx on node %i\n",
  121. maddr, nid);
  122. /*
  123. * If no memory is availble on our local node, try the
  124. * remaining nodes in the system.
  125. */
  126. if (!maddr) {
  127. int i;
  128. for (i = MAX_NUMNODES - 1; i >= 0; i--) {
  129. if (i == nid || !node_online(i))
  130. continue;
  131. maddr = gen_pool_alloc(uncached_pool[i], PAGE_SIZE);
  132. dprintk(KERN_DEBUG "uncached_alloc_page alternate search "
  133. "returns %lx on node %i\n", maddr, i);
  134. if (maddr) {
  135. break;
  136. }
  137. }
  138. }
  139. return maddr;
  140. }
  141. EXPORT_SYMBOL(uncached_alloc_page);
  142. /*
  143. * uncached_free_page
  144. *
  145. * Free a single uncached page.
  146. */
  147. void
  148. uncached_free_page(unsigned long maddr)
  149. {
  150. int node;
  151. node = paddr_to_nid(maddr - __IA64_UNCACHED_OFFSET);
  152. dprintk(KERN_DEBUG "uncached_free_page(%lx) on node %i\n", maddr, node);
  153. if ((maddr & (0XFUL << 60)) != __IA64_UNCACHED_OFFSET)
  154. panic("uncached_free_page invalid address %lx\n", maddr);
  155. gen_pool_free(uncached_pool[node], maddr, PAGE_SIZE);
  156. }
  157. EXPORT_SYMBOL(uncached_free_page);
  158. /*
  159. * uncached_build_memmap,
  160. *
  161. * Called at boot time to build a map of pages that can be used for
  162. * memory special operations.
  163. */
  164. static int __init
  165. uncached_build_memmap(unsigned long start, unsigned long end, void *arg)
  166. {
  167. long length;
  168. unsigned long vstart, vend;
  169. int node;
  170. length = end - start;
  171. vstart = start + __IA64_UNCACHED_OFFSET;
  172. vend = end + __IA64_UNCACHED_OFFSET;
  173. dprintk(KERN_ERR "uncached_build_memmap(%lx %lx)\n", start, end);
  174. memset((char *)vstart, 0, length);
  175. node = paddr_to_nid(start);
  176. for (; vstart < vend ; vstart += PAGE_SIZE) {
  177. dprintk(KERN_INFO "sticking %lx into the pool!\n", vstart);
  178. gen_pool_free(uncached_pool[node], vstart, PAGE_SIZE);
  179. }
  180. return 0;
  181. }
  182. static int __init uncached_init(void) {
  183. int i;
  184. for (i = 0; i < MAX_NUMNODES; i++) {
  185. if (!node_online(i))
  186. continue;
  187. uncached_pool[i] = gen_pool_create(0, IA64_GRANULE_SHIFT,
  188. &uncached_get_new_chunk, i);
  189. }
  190. efi_memmap_walk_uc(uncached_build_memmap);
  191. return 0;
  192. }
  193. __initcall(uncached_init);