homecache.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431
  1. /*
  2. * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11. * NON INFRINGEMENT. See the GNU General Public License for
  12. * more details.
  13. *
  14. * This code maintains the "home" for each page in the system.
  15. */
  16. #include <linux/kernel.h>
  17. #include <linux/mm.h>
  18. #include <linux/spinlock.h>
  19. #include <linux/list.h>
  20. #include <linux/bootmem.h>
  21. #include <linux/rmap.h>
  22. #include <linux/pagemap.h>
  23. #include <linux/mutex.h>
  24. #include <linux/interrupt.h>
  25. #include <linux/sysctl.h>
  26. #include <linux/pagevec.h>
  27. #include <linux/ptrace.h>
  28. #include <linux/timex.h>
  29. #include <linux/cache.h>
  30. #include <linux/smp.h>
  31. #include <linux/module.h>
  32. #include <asm/page.h>
  33. #include <asm/sections.h>
  34. #include <asm/tlbflush.h>
  35. #include <asm/pgalloc.h>
  36. #include <asm/homecache.h>
  37. #include <arch/sim.h>
  38. #include "migrate.h"
  39. #if CHIP_HAS_COHERENT_LOCAL_CACHE()
  40. /*
  41. * The noallocl2 option suppresses all use of the L2 cache to cache
  42. * locally from a remote home. There's no point in using it if we
  43. * don't have coherent local caching, though.
  44. */
  45. static int __write_once noallocl2;
  46. static int __init set_noallocl2(char *str)
  47. {
  48. noallocl2 = 1;
  49. return 0;
  50. }
  51. early_param("noallocl2", set_noallocl2);
  52. #else
  53. #define noallocl2 0
  54. #endif
  55. /* Provide no-op versions of these routines to keep flush_remote() cleaner. */
  56. #define mark_caches_evicted_start() 0
  57. #define mark_caches_evicted_finish(mask, timestamp) do {} while (0)
  58. /*
  59. * Update the irq_stat for cpus that we are going to interrupt
  60. * with TLB or cache flushes. Also handle removing dataplane cpus
  61. * from the TLB flush set, and setting dataplane_tlb_state instead.
  62. */
  63. static void hv_flush_update(const struct cpumask *cache_cpumask,
  64. struct cpumask *tlb_cpumask,
  65. unsigned long tlb_va, unsigned long tlb_length,
  66. HV_Remote_ASID *asids, int asidcount)
  67. {
  68. struct cpumask mask;
  69. int i, cpu;
  70. cpumask_clear(&mask);
  71. if (cache_cpumask)
  72. cpumask_or(&mask, &mask, cache_cpumask);
  73. if (tlb_cpumask && tlb_length) {
  74. cpumask_or(&mask, &mask, tlb_cpumask);
  75. }
  76. for (i = 0; i < asidcount; ++i)
  77. cpumask_set_cpu(asids[i].y * smp_width + asids[i].x, &mask);
  78. /*
  79. * Don't bother to update atomically; losing a count
  80. * here is not that critical.
  81. */
  82. for_each_cpu(cpu, &mask)
  83. ++per_cpu(irq_stat, cpu).irq_hv_flush_count;
  84. }
  85. /*
  86. * This wrapper function around hv_flush_remote() does several things:
  87. *
  88. * - Provides a return value error-checking panic path, since
  89. * there's never any good reason for hv_flush_remote() to fail.
  90. * - Accepts a 32-bit PFN rather than a 64-bit PA, which generally
  91. * is the type that Linux wants to pass around anyway.
  92. * - Centralizes the mark_caches_evicted() handling.
  93. * - Canonicalizes that lengths of zero make cpumasks NULL.
  94. * - Handles deferring TLB flushes for dataplane tiles.
  95. * - Tracks remote interrupts in the per-cpu irq_cpustat_t.
  96. *
  97. * Note that we have to wait until the cache flush completes before
  98. * updating the per-cpu last_cache_flush word, since otherwise another
  99. * concurrent flush can race, conclude the flush has already
  100. * completed, and start to use the page while it's still dirty
  101. * remotely (running concurrently with the actual evict, presumably).
  102. */
  103. void flush_remote(unsigned long cache_pfn, unsigned long cache_control,
  104. const struct cpumask *cache_cpumask_orig,
  105. HV_VirtAddr tlb_va, unsigned long tlb_length,
  106. unsigned long tlb_pgsize,
  107. const struct cpumask *tlb_cpumask_orig,
  108. HV_Remote_ASID *asids, int asidcount)
  109. {
  110. int rc;
  111. int timestamp = 0; /* happy compiler */
  112. struct cpumask cache_cpumask_copy, tlb_cpumask_copy;
  113. struct cpumask *cache_cpumask, *tlb_cpumask;
  114. HV_PhysAddr cache_pa;
  115. char cache_buf[NR_CPUS*5], tlb_buf[NR_CPUS*5];
  116. mb(); /* provided just to simplify "magic hypervisor" mode */
  117. /*
  118. * Canonicalize and copy the cpumasks.
  119. */
  120. if (cache_cpumask_orig && cache_control) {
  121. cpumask_copy(&cache_cpumask_copy, cache_cpumask_orig);
  122. cache_cpumask = &cache_cpumask_copy;
  123. } else {
  124. cpumask_clear(&cache_cpumask_copy);
  125. cache_cpumask = NULL;
  126. }
  127. if (cache_cpumask == NULL)
  128. cache_control = 0;
  129. if (tlb_cpumask_orig && tlb_length) {
  130. cpumask_copy(&tlb_cpumask_copy, tlb_cpumask_orig);
  131. tlb_cpumask = &tlb_cpumask_copy;
  132. } else {
  133. cpumask_clear(&tlb_cpumask_copy);
  134. tlb_cpumask = NULL;
  135. }
  136. hv_flush_update(cache_cpumask, tlb_cpumask, tlb_va, tlb_length,
  137. asids, asidcount);
  138. cache_pa = (HV_PhysAddr)cache_pfn << PAGE_SHIFT;
  139. if (cache_control & HV_FLUSH_EVICT_L2)
  140. timestamp = mark_caches_evicted_start();
  141. rc = hv_flush_remote(cache_pa, cache_control,
  142. cpumask_bits(cache_cpumask),
  143. tlb_va, tlb_length, tlb_pgsize,
  144. cpumask_bits(tlb_cpumask),
  145. asids, asidcount);
  146. if (cache_control & HV_FLUSH_EVICT_L2)
  147. mark_caches_evicted_finish(cache_cpumask, timestamp);
  148. if (rc == 0)
  149. return;
  150. cpumask_scnprintf(cache_buf, sizeof(cache_buf), &cache_cpumask_copy);
  151. cpumask_scnprintf(tlb_buf, sizeof(tlb_buf), &tlb_cpumask_copy);
  152. pr_err("hv_flush_remote(%#llx, %#lx, %p [%s],"
  153. " %#lx, %#lx, %#lx, %p [%s], %p, %d) = %d\n",
  154. cache_pa, cache_control, cache_cpumask, cache_buf,
  155. (unsigned long)tlb_va, tlb_length, tlb_pgsize,
  156. tlb_cpumask, tlb_buf,
  157. asids, asidcount, rc);
  158. panic("Unsafe to continue.");
  159. }
  160. void homecache_evict(const struct cpumask *mask)
  161. {
  162. flush_remote(0, HV_FLUSH_EVICT_L2, mask, 0, 0, 0, NULL, NULL, 0);
  163. }
  164. /* Return a mask of the cpus whose caches currently own these pages. */
  165. static void homecache_mask(struct page *page, int pages,
  166. struct cpumask *home_mask)
  167. {
  168. int i;
  169. cpumask_clear(home_mask);
  170. for (i = 0; i < pages; ++i) {
  171. int home = page_home(&page[i]);
  172. if (home == PAGE_HOME_IMMUTABLE ||
  173. home == PAGE_HOME_INCOHERENT) {
  174. cpumask_copy(home_mask, cpu_possible_mask);
  175. return;
  176. }
  177. #if CHIP_HAS_CBOX_HOME_MAP()
  178. if (home == PAGE_HOME_HASH) {
  179. cpumask_or(home_mask, home_mask, &hash_for_home_map);
  180. continue;
  181. }
  182. #endif
  183. if (home == PAGE_HOME_UNCACHED)
  184. continue;
  185. BUG_ON(home < 0 || home >= NR_CPUS);
  186. cpumask_set_cpu(home, home_mask);
  187. }
  188. }
  189. /*
  190. * Return the passed length, or zero if it's long enough that we
  191. * believe we should evict the whole L2 cache.
  192. */
  193. static unsigned long cache_flush_length(unsigned long length)
  194. {
  195. return (length >= CHIP_L2_CACHE_SIZE()) ? HV_FLUSH_EVICT_L2 : length;
  196. }
  197. /* Flush a page out of whatever cache(s) it is in. */
  198. void homecache_flush_cache(struct page *page, int order)
  199. {
  200. int pages = 1 << order;
  201. int length = cache_flush_length(pages * PAGE_SIZE);
  202. unsigned long pfn = page_to_pfn(page);
  203. struct cpumask home_mask;
  204. homecache_mask(page, pages, &home_mask);
  205. flush_remote(pfn, length, &home_mask, 0, 0, 0, NULL, NULL, 0);
  206. sim_validate_lines_evicted(PFN_PHYS(pfn), pages * PAGE_SIZE);
  207. }
  208. /* Report the home corresponding to a given PTE. */
  209. static int pte_to_home(pte_t pte)
  210. {
  211. if (hv_pte_get_nc(pte))
  212. return PAGE_HOME_IMMUTABLE;
  213. switch (hv_pte_get_mode(pte)) {
  214. case HV_PTE_MODE_CACHE_TILE_L3:
  215. return get_remote_cache_cpu(pte);
  216. case HV_PTE_MODE_CACHE_NO_L3:
  217. return PAGE_HOME_INCOHERENT;
  218. case HV_PTE_MODE_UNCACHED:
  219. return PAGE_HOME_UNCACHED;
  220. #if CHIP_HAS_CBOX_HOME_MAP()
  221. case HV_PTE_MODE_CACHE_HASH_L3:
  222. return PAGE_HOME_HASH;
  223. #endif
  224. }
  225. panic("Bad PTE %#llx\n", pte.val);
  226. }
  227. /* Update the home of a PTE if necessary (can also be used for a pgprot_t). */
  228. pte_t pte_set_home(pte_t pte, int home)
  229. {
  230. /* Check for non-linear file mapping "PTEs" and pass them through. */
  231. if (pte_file(pte))
  232. return pte;
  233. #if CHIP_HAS_MMIO()
  234. /* Check for MMIO mappings and pass them through. */
  235. if (hv_pte_get_mode(pte) == HV_PTE_MODE_MMIO)
  236. return pte;
  237. #endif
  238. /*
  239. * Only immutable pages get NC mappings. If we have a
  240. * non-coherent PTE, but the underlying page is not
  241. * immutable, it's likely the result of a forced
  242. * caching setting running up against ptrace setting
  243. * the page to be writable underneath. In this case,
  244. * just keep the PTE coherent.
  245. */
  246. if (hv_pte_get_nc(pte) && home != PAGE_HOME_IMMUTABLE) {
  247. pte = hv_pte_clear_nc(pte);
  248. pr_err("non-immutable page incoherently referenced: %#llx\n",
  249. pte.val);
  250. }
  251. switch (home) {
  252. case PAGE_HOME_UNCACHED:
  253. pte = hv_pte_set_mode(pte, HV_PTE_MODE_UNCACHED);
  254. break;
  255. case PAGE_HOME_INCOHERENT:
  256. pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3);
  257. break;
  258. case PAGE_HOME_IMMUTABLE:
  259. /*
  260. * We could home this page anywhere, since it's immutable,
  261. * but by default just home it to follow "hash_default".
  262. */
  263. BUG_ON(hv_pte_get_writable(pte));
  264. if (pte_get_forcecache(pte)) {
  265. /* Upgrade "force any cpu" to "No L3" for immutable. */
  266. if (hv_pte_get_mode(pte) == HV_PTE_MODE_CACHE_TILE_L3
  267. && pte_get_anyhome(pte)) {
  268. pte = hv_pte_set_mode(pte,
  269. HV_PTE_MODE_CACHE_NO_L3);
  270. }
  271. } else
  272. #if CHIP_HAS_CBOX_HOME_MAP()
  273. if (hash_default)
  274. pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_HASH_L3);
  275. else
  276. #endif
  277. pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3);
  278. pte = hv_pte_set_nc(pte);
  279. break;
  280. #if CHIP_HAS_CBOX_HOME_MAP()
  281. case PAGE_HOME_HASH:
  282. pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_HASH_L3);
  283. break;
  284. #endif
  285. default:
  286. BUG_ON(home < 0 || home >= NR_CPUS ||
  287. !cpu_is_valid_lotar(home));
  288. pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_TILE_L3);
  289. pte = set_remote_cache_cpu(pte, home);
  290. break;
  291. }
  292. #if CHIP_HAS_NC_AND_NOALLOC_BITS()
  293. if (noallocl2)
  294. pte = hv_pte_set_no_alloc_l2(pte);
  295. /* Simplify "no local and no l3" to "uncached" */
  296. if (hv_pte_get_no_alloc_l2(pte) && hv_pte_get_no_alloc_l1(pte) &&
  297. hv_pte_get_mode(pte) == HV_PTE_MODE_CACHE_NO_L3) {
  298. pte = hv_pte_set_mode(pte, HV_PTE_MODE_UNCACHED);
  299. }
  300. #endif
  301. /* Checking this case here gives a better panic than from the hv. */
  302. BUG_ON(hv_pte_get_mode(pte) == 0);
  303. return pte;
  304. }
  305. EXPORT_SYMBOL(pte_set_home);
  306. /*
  307. * The routines in this section are the "static" versions of the normal
  308. * dynamic homecaching routines; they just set the home cache
  309. * of a kernel page once, and require a full-chip cache/TLB flush,
  310. * so they're not suitable for anything but infrequent use.
  311. */
  312. #if CHIP_HAS_CBOX_HOME_MAP()
  313. static inline int initial_page_home(void) { return PAGE_HOME_HASH; }
  314. #else
  315. static inline int initial_page_home(void) { return 0; }
  316. #endif
  317. int page_home(struct page *page)
  318. {
  319. if (PageHighMem(page)) {
  320. return initial_page_home();
  321. } else {
  322. unsigned long kva = (unsigned long)page_address(page);
  323. return pte_to_home(*virt_to_pte(NULL, kva));
  324. }
  325. }
  326. void homecache_change_page_home(struct page *page, int order, int home)
  327. {
  328. int i, pages = (1 << order);
  329. unsigned long kva;
  330. BUG_ON(PageHighMem(page));
  331. BUG_ON(page_count(page) > 1);
  332. BUG_ON(page_mapcount(page) != 0);
  333. kva = (unsigned long) page_address(page);
  334. flush_remote(0, HV_FLUSH_EVICT_L2, &cpu_cacheable_map,
  335. kva, pages * PAGE_SIZE, PAGE_SIZE, cpu_online_mask,
  336. NULL, 0);
  337. for (i = 0; i < pages; ++i, kva += PAGE_SIZE) {
  338. pte_t *ptep = virt_to_pte(NULL, kva);
  339. pte_t pteval = *ptep;
  340. BUG_ON(!pte_present(pteval) || pte_huge(pteval));
  341. *ptep = pte_set_home(pteval, home);
  342. }
  343. }
  344. struct page *homecache_alloc_pages(gfp_t gfp_mask,
  345. unsigned int order, int home)
  346. {
  347. struct page *page;
  348. BUG_ON(gfp_mask & __GFP_HIGHMEM); /* must be lowmem */
  349. page = alloc_pages(gfp_mask, order);
  350. if (page)
  351. homecache_change_page_home(page, order, home);
  352. return page;
  353. }
  354. EXPORT_SYMBOL(homecache_alloc_pages);
  355. struct page *homecache_alloc_pages_node(int nid, gfp_t gfp_mask,
  356. unsigned int order, int home)
  357. {
  358. struct page *page;
  359. BUG_ON(gfp_mask & __GFP_HIGHMEM); /* must be lowmem */
  360. page = alloc_pages_node(nid, gfp_mask, order);
  361. if (page)
  362. homecache_change_page_home(page, order, home);
  363. return page;
  364. }
  365. void homecache_free_pages(unsigned long addr, unsigned int order)
  366. {
  367. struct page *page;
  368. if (addr == 0)
  369. return;
  370. VM_BUG_ON(!virt_addr_valid((void *)addr));
  371. page = virt_to_page((void *)addr);
  372. if (put_page_testzero(page)) {
  373. int pages = (1 << order);
  374. homecache_change_page_home(page, order, initial_page_home());
  375. while (pages--)
  376. __free_page(page++);
  377. }
  378. }