memory.c 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367
  1. /*
  2. * linux/arch/m68k/mm/memory.c
  3. *
  4. * Copyright (C) 1995 Hamish Macdonald
  5. */
  6. #include <linux/config.h>
  7. #include <linux/mm.h>
  8. #include <linux/kernel.h>
  9. #include <linux/string.h>
  10. #include <linux/types.h>
  11. #include <linux/slab.h>
  12. #include <linux/init.h>
  13. #include <linux/pagemap.h>
  14. #include <asm/setup.h>
  15. #include <asm/segment.h>
  16. #include <asm/page.h>
  17. #include <asm/pgalloc.h>
  18. #include <asm/system.h>
  19. #include <asm/traps.h>
  20. #include <asm/machdep.h>
  21. /* ++andreas: {get,free}_pointer_table rewritten to use unused fields from
  22. struct page instead of separately kmalloced struct. Stolen from
  23. arch/sparc/mm/srmmu.c ... */
  24. typedef struct list_head ptable_desc;
  25. static LIST_HEAD(ptable_list);
  26. #define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page(page)->lru))
  27. #define PD_PAGE(ptable) (list_entry(ptable, struct page, lru))
  28. #define PD_MARKBITS(dp) (*(unsigned char *)&PD_PAGE(dp)->index)
  29. #define PTABLE_SIZE (PTRS_PER_PMD * sizeof(pmd_t))
  30. void __init init_pointer_table(unsigned long ptable)
  31. {
  32. ptable_desc *dp;
  33. unsigned long page = ptable & PAGE_MASK;
  34. unsigned char mask = 1 << ((ptable - page)/PTABLE_SIZE);
  35. dp = PD_PTABLE(page);
  36. if (!(PD_MARKBITS(dp) & mask)) {
  37. PD_MARKBITS(dp) = 0xff;
  38. list_add(dp, &ptable_list);
  39. }
  40. PD_MARKBITS(dp) &= ~mask;
  41. #ifdef DEBUG
  42. printk("init_pointer_table: %lx, %x\n", ptable, PD_MARKBITS(dp));
  43. #endif
  44. /* unreserve the page so it's possible to free that page */
  45. PD_PAGE(dp)->flags &= ~(1 << PG_reserved);
  46. set_page_count(PD_PAGE(dp), 1);
  47. return;
  48. }
  49. pmd_t *get_pointer_table (void)
  50. {
  51. ptable_desc *dp = ptable_list.next;
  52. unsigned char mask = PD_MARKBITS (dp);
  53. unsigned char tmp;
  54. unsigned int off;
  55. /*
  56. * For a pointer table for a user process address space, a
  57. * table is taken from a page allocated for the purpose. Each
  58. * page can hold 8 pointer tables. The page is remapped in
  59. * virtual address space to be noncacheable.
  60. */
  61. if (mask == 0) {
  62. void *page;
  63. ptable_desc *new;
  64. if (!(page = (void *)get_zeroed_page(GFP_KERNEL)))
  65. return NULL;
  66. flush_tlb_kernel_page(page);
  67. nocache_page(page);
  68. new = PD_PTABLE(page);
  69. PD_MARKBITS(new) = 0xfe;
  70. list_add_tail(new, dp);
  71. return (pmd_t *)page;
  72. }
  73. for (tmp = 1, off = 0; (mask & tmp) == 0; tmp <<= 1, off += PTABLE_SIZE)
  74. ;
  75. PD_MARKBITS(dp) = mask & ~tmp;
  76. if (!PD_MARKBITS(dp)) {
  77. /* move to end of list */
  78. list_del(dp);
  79. list_add_tail(dp, &ptable_list);
  80. }
  81. return (pmd_t *) (page_address(PD_PAGE(dp)) + off);
  82. }
  83. int free_pointer_table (pmd_t *ptable)
  84. {
  85. ptable_desc *dp;
  86. unsigned long page = (unsigned long)ptable & PAGE_MASK;
  87. unsigned char mask = 1 << (((unsigned long)ptable - page)/PTABLE_SIZE);
  88. dp = PD_PTABLE(page);
  89. if (PD_MARKBITS (dp) & mask)
  90. panic ("table already free!");
  91. PD_MARKBITS (dp) |= mask;
  92. if (PD_MARKBITS(dp) == 0xff) {
  93. /* all tables in page are free, free page */
  94. list_del(dp);
  95. cache_page((void *)page);
  96. free_page (page);
  97. return 1;
  98. } else if (ptable_list.next != dp) {
  99. /*
  100. * move this descriptor to the front of the list, since
  101. * it has one or more free tables.
  102. */
  103. list_del(dp);
  104. list_add(dp, &ptable_list);
  105. }
  106. return 0;
  107. }
  108. #ifdef DEBUG_INVALID_PTOV
  109. int mm_inv_cnt = 5;
  110. #endif
  111. #ifndef CONFIG_SINGLE_MEMORY_CHUNK
  112. /*
  113. * The following two routines map from a physical address to a kernel
  114. * virtual address and vice versa.
  115. */
  116. unsigned long mm_vtop(unsigned long vaddr)
  117. {
  118. int i=0;
  119. unsigned long voff = (unsigned long)vaddr - PAGE_OFFSET;
  120. do {
  121. if (voff < m68k_memory[i].size) {
  122. #ifdef DEBUGPV
  123. printk ("VTOP(%p)=%lx\n", vaddr,
  124. m68k_memory[i].addr + voff);
  125. #endif
  126. return m68k_memory[i].addr + voff;
  127. }
  128. voff -= m68k_memory[i].size;
  129. } while (++i < m68k_num_memory);
  130. /* As a special case allow `__pa(high_memory)'. */
  131. if (voff == 0)
  132. return m68k_memory[i-1].addr + m68k_memory[i-1].size;
  133. return -1;
  134. }
  135. #endif
  136. #ifndef CONFIG_SINGLE_MEMORY_CHUNK
  137. unsigned long mm_ptov (unsigned long paddr)
  138. {
  139. int i = 0;
  140. unsigned long poff, voff = PAGE_OFFSET;
  141. do {
  142. poff = paddr - m68k_memory[i].addr;
  143. if (poff < m68k_memory[i].size) {
  144. #ifdef DEBUGPV
  145. printk ("PTOV(%lx)=%lx\n", paddr, poff + voff);
  146. #endif
  147. return poff + voff;
  148. }
  149. voff += m68k_memory[i].size;
  150. } while (++i < m68k_num_memory);
  151. #ifdef DEBUG_INVALID_PTOV
  152. if (mm_inv_cnt > 0) {
  153. mm_inv_cnt--;
  154. printk("Invalid use of phys_to_virt(0x%lx) at 0x%p!\n",
  155. paddr, __builtin_return_address(0));
  156. }
  157. #endif
  158. return -1;
  159. }
  160. #endif
  161. /* invalidate page in both caches */
  162. static inline void clear040(unsigned long paddr)
  163. {
  164. asm volatile (
  165. "nop\n\t"
  166. ".chip 68040\n\t"
  167. "cinvp %%bc,(%0)\n\t"
  168. ".chip 68k"
  169. : : "a" (paddr));
  170. }
  171. /* invalidate page in i-cache */
  172. static inline void cleari040(unsigned long paddr)
  173. {
  174. asm volatile (
  175. "nop\n\t"
  176. ".chip 68040\n\t"
  177. "cinvp %%ic,(%0)\n\t"
  178. ".chip 68k"
  179. : : "a" (paddr));
  180. }
  181. /* push page in both caches */
  182. /* RZ: cpush %bc DOES invalidate %ic, regardless of DPI */
  183. static inline void push040(unsigned long paddr)
  184. {
  185. asm volatile (
  186. "nop\n\t"
  187. ".chip 68040\n\t"
  188. "cpushp %%bc,(%0)\n\t"
  189. ".chip 68k"
  190. : : "a" (paddr));
  191. }
  192. /* push and invalidate page in both caches, must disable ints
  193. * to avoid invalidating valid data */
  194. static inline void pushcl040(unsigned long paddr)
  195. {
  196. unsigned long flags;
  197. local_irq_save(flags);
  198. push040(paddr);
  199. if (CPU_IS_060)
  200. clear040(paddr);
  201. local_irq_restore(flags);
  202. }
  203. /*
  204. * 040: Hit every page containing an address in the range paddr..paddr+len-1.
  205. * (Low order bits of the ea of a CINVP/CPUSHP are "don't care"s).
  206. * Hit every page until there is a page or less to go. Hit the next page,
  207. * and the one after that if the range hits it.
  208. */
  209. /* ++roman: A little bit more care is required here: The CINVP instruction
  210. * invalidates cache entries WITHOUT WRITING DIRTY DATA BACK! So the beginning
  211. * and the end of the region must be treated differently if they are not
  212. * exactly at the beginning or end of a page boundary. Else, maybe too much
  213. * data becomes invalidated and thus lost forever. CPUSHP does what we need:
  214. * it invalidates the page after pushing dirty data to memory. (Thanks to Jes
  215. * for discovering the problem!)
  216. */
  217. /* ... but on the '060, CPUSH doesn't invalidate (for us, since we have set
  218. * the DPI bit in the CACR; would it cause problems with temporarily changing
  219. * this?). So we have to push first and then additionally to invalidate.
  220. */
  221. /*
  222. * cache_clear() semantics: Clear any cache entries for the area in question,
  223. * without writing back dirty entries first. This is useful if the data will
  224. * be overwritten anyway, e.g. by DMA to memory. The range is defined by a
  225. * _physical_ address.
  226. */
  227. void cache_clear (unsigned long paddr, int len)
  228. {
  229. if (CPU_IS_040_OR_060) {
  230. int tmp;
  231. /*
  232. * We need special treatment for the first page, in case it
  233. * is not page-aligned. Page align the addresses to work
  234. * around bug I17 in the 68060.
  235. */
  236. if ((tmp = -paddr & (PAGE_SIZE - 1))) {
  237. pushcl040(paddr & PAGE_MASK);
  238. if ((len -= tmp) <= 0)
  239. return;
  240. paddr += tmp;
  241. }
  242. tmp = PAGE_SIZE;
  243. paddr &= PAGE_MASK;
  244. while ((len -= tmp) >= 0) {
  245. clear040(paddr);
  246. paddr += tmp;
  247. }
  248. if ((len += tmp))
  249. /* a page boundary gets crossed at the end */
  250. pushcl040(paddr);
  251. }
  252. else /* 68030 or 68020 */
  253. asm volatile ("movec %/cacr,%/d0\n\t"
  254. "oriw %0,%/d0\n\t"
  255. "movec %/d0,%/cacr"
  256. : : "i" (FLUSH_I_AND_D)
  257. : "d0");
  258. #ifdef CONFIG_M68K_L2_CACHE
  259. if(mach_l2_flush)
  260. mach_l2_flush(0);
  261. #endif
  262. }
  263. /*
  264. * cache_push() semantics: Write back any dirty cache data in the given area,
  265. * and invalidate the range in the instruction cache. It needs not (but may)
  266. * invalidate those entries also in the data cache. The range is defined by a
  267. * _physical_ address.
  268. */
  269. void cache_push (unsigned long paddr, int len)
  270. {
  271. if (CPU_IS_040_OR_060) {
  272. int tmp = PAGE_SIZE;
  273. /*
  274. * on 68040 or 68060, push cache lines for pages in the range;
  275. * on the '040 this also invalidates the pushed lines, but not on
  276. * the '060!
  277. */
  278. len += paddr & (PAGE_SIZE - 1);
  279. /*
  280. * Work around bug I17 in the 68060 affecting some instruction
  281. * lines not being invalidated properly.
  282. */
  283. paddr &= PAGE_MASK;
  284. do {
  285. push040(paddr);
  286. paddr += tmp;
  287. } while ((len -= tmp) > 0);
  288. }
  289. /*
  290. * 68030/68020 have no writeback cache. On the other hand,
  291. * cache_push is actually a superset of cache_clear (the lines
  292. * get written back and invalidated), so we should make sure
  293. * to perform the corresponding actions. After all, this is getting
  294. * called in places where we've just loaded code, or whatever, so
  295. * flushing the icache is appropriate; flushing the dcache shouldn't
  296. * be required.
  297. */
  298. else /* 68030 or 68020 */
  299. asm volatile ("movec %/cacr,%/d0\n\t"
  300. "oriw %0,%/d0\n\t"
  301. "movec %/d0,%/cacr"
  302. : : "i" (FLUSH_I)
  303. : "d0");
  304. #ifdef CONFIG_M68K_L2_CACHE
  305. if(mach_l2_flush)
  306. mach_l2_flush(1);
  307. #endif
  308. }
  309. #ifndef CONFIG_SINGLE_MEMORY_CHUNK
  310. int mm_end_of_chunk (unsigned long addr, int len)
  311. {
  312. int i;
  313. for (i = 0; i < m68k_num_memory; i++)
  314. if (m68k_memory[i].addr + m68k_memory[i].size == addr + len)
  315. return 1;
  316. return 0;
  317. }
  318. #endif