uncached.c 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252
  1. /*
  2. * Copyright (C) 2001-2006 Silicon Graphics, Inc. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms of version 2 of the GNU General Public License
  6. * as published by the Free Software Foundation.
  7. *
  8. * A simple uncached page allocator using the generic allocator. This
  9. * allocator first utilizes the spare (spill) pages found in the EFI
  10. * memmap and will then start converting cached pages to uncached ones
  11. * at a granule at a time. Node awareness is implemented by having a
  12. * pool of pages per node.
  13. */
  14. #include <linux/types.h>
  15. #include <linux/kernel.h>
  16. #include <linux/module.h>
  17. #include <linux/init.h>
  18. #include <linux/errno.h>
  19. #include <linux/string.h>
  20. #include <linux/slab.h>
  21. #include <linux/efi.h>
  22. #include <linux/genalloc.h>
  23. #include <asm/page.h>
  24. #include <asm/pal.h>
  25. #include <asm/system.h>
  26. #include <asm/pgtable.h>
  27. #include <asm/atomic.h>
  28. #include <asm/tlbflush.h>
  29. #include <asm/sn/arch.h>
  30. extern void __init efi_memmap_walk_uc(efi_freemem_callback_t, void *);
  31. #define MAX_UNCACHED_GRANULES 5
  32. static int allocated_granules;
  33. struct gen_pool *uncached_pool[MAX_NUMNODES];
  34. static void uncached_ipi_visibility(void *data)
  35. {
  36. int status;
  37. status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
  38. if ((status != PAL_VISIBILITY_OK) &&
  39. (status != PAL_VISIBILITY_OK_REMOTE_NEEDED))
  40. printk(KERN_DEBUG "pal_prefetch_visibility() returns %i on "
  41. "CPU %i\n", status, raw_smp_processor_id());
  42. }
  43. static void uncached_ipi_mc_drain(void *data)
  44. {
  45. int status;
  46. status = ia64_pal_mc_drain();
  47. if (status)
  48. printk(KERN_WARNING "ia64_pal_mc_drain() failed with %i on "
  49. "CPU %i\n", status, raw_smp_processor_id());
  50. }
  51. /*
  52. * Add a new chunk of uncached memory pages to the specified pool.
  53. *
  54. * @pool: pool to add new chunk of uncached memory to
  55. * @nid: node id of node to allocate memory from, or -1
  56. *
  57. * This is accomplished by first allocating a granule of cached memory pages
  58. * and then converting them to uncached memory pages.
  59. */
  60. static int uncached_add_chunk(struct gen_pool *pool, int nid)
  61. {
  62. struct page *page;
  63. int status, i;
  64. unsigned long c_addr, uc_addr;
  65. if (allocated_granules >= MAX_UNCACHED_GRANULES)
  66. return -1;
  67. /* attempt to allocate a granule's worth of cached memory pages */
  68. page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO,
  69. IA64_GRANULE_SHIFT-PAGE_SHIFT);
  70. if (!page)
  71. return -1;
  72. /* convert the memory pages from cached to uncached */
  73. c_addr = (unsigned long)page_address(page);
  74. uc_addr = c_addr - PAGE_OFFSET + __IA64_UNCACHED_OFFSET;
  75. /*
  76. * There's a small race here where it's possible for someone to
  77. * access the page through /dev/mem halfway through the conversion
  78. * to uncached - not sure it's really worth bothering about
  79. */
  80. for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++)
  81. SetPageUncached(&page[i]);
  82. flush_tlb_kernel_range(uc_addr, uc_adddr + IA64_GRANULE_SIZE);
  83. status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
  84. if (!status) {
  85. status = smp_call_function(uncached_ipi_visibility, NULL, 0, 1);
  86. if (status)
  87. goto failed;
  88. }
  89. preempt_disable();
  90. if (ia64_platform_is("sn2"))
  91. sn_flush_all_caches(uc_addr, IA64_GRANULE_SIZE);
  92. else
  93. flush_icache_range(uc_addr, uc_addr + IA64_GRANULE_SIZE);
  94. /* flush the just introduced uncached translation from the TLB */
  95. local_flush_tlb_all();
  96. preempt_enable();
  97. ia64_pal_mc_drain();
  98. status = smp_call_function(uncached_ipi_mc_drain, NULL, 0, 1);
  99. if (status)
  100. goto failed;
  101. /*
  102. * The chunk of memory pages has been converted to uncached so now we
  103. * can add it to the pool.
  104. */
  105. status = gen_pool_add(pool, uc_addr, IA64_GRANULE_SIZE, nid);
  106. if (status)
  107. goto failed;
  108. allocated_granules++;
  109. return 0;
  110. /* failed to convert or add the chunk so give it back to the kernel */
  111. failed:
  112. for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++)
  113. ClearPageUncached(&page[i]);
  114. free_pages(c_addr, IA64_GRANULE_SHIFT-PAGE_SHIFT);
  115. return -1;
  116. }
  117. /*
  118. * uncached_alloc_page
  119. *
  120. * @starting_nid: node id of node to start with, or -1
  121. *
  122. * Allocate 1 uncached page. Allocates on the requested node. If no
  123. * uncached pages are available on the requested node, roundrobin starting
  124. * with the next higher node.
  125. */
  126. unsigned long uncached_alloc_page(int starting_nid)
  127. {
  128. unsigned long uc_addr;
  129. struct gen_pool *pool;
  130. int nid;
  131. if (unlikely(starting_nid >= MAX_NUMNODES))
  132. return 0;
  133. if (starting_nid < 0)
  134. starting_nid = numa_node_id();
  135. nid = starting_nid;
  136. do {
  137. if (!node_online(nid))
  138. continue;
  139. pool = uncached_pool[nid];
  140. if (pool == NULL)
  141. continue;
  142. do {
  143. uc_addr = gen_pool_alloc(pool, PAGE_SIZE);
  144. if (uc_addr != 0)
  145. return uc_addr;
  146. } while (uncached_add_chunk(pool, nid) == 0);
  147. } while ((nid = (nid + 1) % MAX_NUMNODES) != starting_nid);
  148. return 0;
  149. }
  150. EXPORT_SYMBOL(uncached_alloc_page);
  151. /*
  152. * uncached_free_page
  153. *
  154. * @uc_addr: uncached address of page to free
  155. *
  156. * Free a single uncached page.
  157. */
  158. void uncached_free_page(unsigned long uc_addr)
  159. {
  160. int nid = paddr_to_nid(uc_addr - __IA64_UNCACHED_OFFSET);
  161. struct gen_pool *pool = uncached_pool[nid];
  162. if (unlikely(pool == NULL))
  163. return;
  164. if ((uc_addr & (0XFUL << 60)) != __IA64_UNCACHED_OFFSET)
  165. panic("uncached_free_page invalid address %lx\n", uc_addr);
  166. gen_pool_free(pool, uc_addr, PAGE_SIZE);
  167. }
  168. EXPORT_SYMBOL(uncached_free_page);
  169. /*
  170. * uncached_build_memmap,
  171. *
  172. * @uc_start: uncached starting address of a chunk of uncached memory
  173. * @uc_end: uncached ending address of a chunk of uncached memory
  174. * @arg: ignored, (NULL argument passed in on call to efi_memmap_walk_uc())
  175. *
  176. * Called at boot time to build a map of pages that can be used for
  177. * memory special operations.
  178. */
  179. static int __init uncached_build_memmap(unsigned long uc_start,
  180. unsigned long uc_end, void *arg)
  181. {
  182. int nid = paddr_to_nid(uc_start - __IA64_UNCACHED_OFFSET);
  183. struct gen_pool *pool = uncached_pool[nid];
  184. size_t size = uc_end - uc_start;
  185. touch_softlockup_watchdog();
  186. if (pool != NULL) {
  187. memset((char *)uc_start, 0, size);
  188. (void) gen_pool_add(pool, uc_start, size, nid);
  189. }
  190. return 0;
  191. }
  192. static int __init uncached_init(void)
  193. {
  194. int nid;
  195. for_each_online_node(nid) {
  196. uncached_pool[nid] = gen_pool_create(PAGE_SHIFT, nid);
  197. }
  198. efi_memmap_walk_uc(uncached_build_memmap, NULL);
  199. return 0;
  200. }
  201. __initcall(uncached_init);