pgtable.c 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256
  1. /*
  2. * This file contains the routines setting up the linux page tables.
  3. *
  4. * Copyright (C) 2008 Michal Simek
  5. * Copyright (C) 2008 PetaLogix
  6. *
  7. * Copyright (C) 2007 Xilinx, Inc. All rights reserved.
  8. *
  9. * Derived from arch/ppc/mm/pgtable.c:
  10. * -- paulus
  11. *
  12. * Derived from arch/ppc/mm/init.c:
  13. * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  14. *
  15. * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
  16. * and Cort Dougan (PReP) (cort@cs.nmt.edu)
  17. * Copyright (C) 1996 Paul Mackerras
  18. * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
  19. *
  20. * Derived from "arch/i386/mm/init.c"
  21. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  22. *
  23. * This file is subject to the terms and conditions of the GNU General
  24. * Public License. See the file COPYING in the main directory of this
  25. * archive for more details.
  26. *
  27. */
  28. #include <linux/kernel.h>
  29. #include <linux/module.h>
  30. #include <linux/types.h>
  31. #include <linux/vmalloc.h>
  32. #include <linux/init.h>
  33. #include <asm/pgtable.h>
  34. #include <asm/pgalloc.h>
  35. #include <linux/io.h>
  36. #include <asm/mmu.h>
  37. #include <asm/sections.h>
  38. #define flush_HPTE(X, va, pg) _tlbie(va)
  39. unsigned long ioremap_base;
  40. unsigned long ioremap_bot;
  41. EXPORT_SYMBOL(ioremap_bot);
  42. /* The maximum lowmem defaults to 768Mb, but this can be configured to
  43. * another value.
  44. */
  45. #define MAX_LOW_MEM CONFIG_LOWMEM_SIZE
  46. #ifndef CONFIG_SMP
  47. struct pgtable_cache_struct quicklists;
  48. #endif
  49. static void __iomem *__ioremap(phys_addr_t addr, unsigned long size,
  50. unsigned long flags)
  51. {
  52. unsigned long v, i;
  53. phys_addr_t p;
  54. int err;
  55. /*
  56. * Choose an address to map it to.
  57. * Once the vmalloc system is running, we use it.
  58. * Before then, we use space going down from ioremap_base
  59. * (ioremap_bot records where we're up to).
  60. */
  61. p = addr & PAGE_MASK;
  62. size = PAGE_ALIGN(addr + size) - p;
  63. /*
  64. * Don't allow anybody to remap normal RAM that we're using.
  65. * mem_init() sets high_memory so only do the check after that.
  66. *
  67. * However, allow remap of rootfs: TBD
  68. */
  69. if (mem_init_done &&
  70. p >= memory_start && p < virt_to_phys(high_memory) &&
  71. !(p >= virt_to_phys((unsigned long)&__bss_stop) &&
  72. p < virt_to_phys((unsigned long)__bss_stop))) {
  73. printk(KERN_WARNING "__ioremap(): phys addr "PTE_FMT
  74. " is RAM lr %p\n", (unsigned long)p,
  75. __builtin_return_address(0));
  76. return NULL;
  77. }
  78. if (size == 0)
  79. return NULL;
  80. /*
  81. * Is it already mapped? If the whole area is mapped then we're
  82. * done, otherwise remap it since we want to keep the virt addrs for
  83. * each request contiguous.
  84. *
  85. * We make the assumption here that if the bottom and top
  86. * of the range we want are mapped then it's mapped to the
  87. * same virt address (and this is contiguous).
  88. * -- Cort
  89. */
  90. if (mem_init_done) {
  91. struct vm_struct *area;
  92. area = get_vm_area(size, VM_IOREMAP);
  93. if (area == NULL)
  94. return NULL;
  95. v = (unsigned long) area->addr;
  96. } else {
  97. v = (ioremap_bot -= size);
  98. }
  99. if ((flags & _PAGE_PRESENT) == 0)
  100. flags |= _PAGE_KERNEL;
  101. if (flags & _PAGE_NO_CACHE)
  102. flags |= _PAGE_GUARDED;
  103. err = 0;
  104. for (i = 0; i < size && err == 0; i += PAGE_SIZE)
  105. err = map_page(v + i, p + i, flags);
  106. if (err) {
  107. if (mem_init_done)
  108. vfree((void *)v);
  109. return NULL;
  110. }
  111. return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK));
  112. }
  113. void __iomem *ioremap(phys_addr_t addr, unsigned long size)
  114. {
  115. return __ioremap(addr, size, _PAGE_NO_CACHE);
  116. }
  117. EXPORT_SYMBOL(ioremap);
  118. void iounmap(void *addr)
  119. {
  120. if (addr > high_memory && (unsigned long) addr < ioremap_bot)
  121. vfree((void *) (PAGE_MASK & (unsigned long) addr));
  122. }
  123. EXPORT_SYMBOL(iounmap);
  124. int map_page(unsigned long va, phys_addr_t pa, int flags)
  125. {
  126. pmd_t *pd;
  127. pte_t *pg;
  128. int err = -ENOMEM;
  129. /* Use upper 10 bits of VA to index the first level map */
  130. pd = pmd_offset(pgd_offset_k(va), va);
  131. /* Use middle 10 bits of VA to index the second-level map */
  132. pg = pte_alloc_kernel(pd, va); /* from powerpc - pgtable.c */
  133. /* pg = pte_alloc_kernel(&init_mm, pd, va); */
  134. if (pg != NULL) {
  135. err = 0;
  136. set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT,
  137. __pgprot(flags)));
  138. if (unlikely(mem_init_done))
  139. flush_HPTE(0, va, pmd_val(*pd));
  140. /* flush_HPTE(0, va, pg); */
  141. }
  142. return err;
  143. }
  144. /*
  145. * Map in all of physical memory starting at CONFIG_KERNEL_START.
  146. */
  147. void __init mapin_ram(void)
  148. {
  149. unsigned long v, p, s, f;
  150. v = CONFIG_KERNEL_START;
  151. p = memory_start;
  152. for (s = 0; s < memory_size; s += PAGE_SIZE) {
  153. f = _PAGE_PRESENT | _PAGE_ACCESSED |
  154. _PAGE_SHARED | _PAGE_HWEXEC;
  155. if ((char *) v < _stext || (char *) v >= _etext)
  156. f |= _PAGE_WRENABLE;
  157. else
  158. /* On the MicroBlaze, no user access
  159. forces R/W kernel access */
  160. f |= _PAGE_USER;
  161. map_page(v, p, f);
  162. v += PAGE_SIZE;
  163. p += PAGE_SIZE;
  164. }
  165. }
  166. /* is x a power of 2? */
  167. #define is_power_of_2(x) ((x) != 0 && (((x) & ((x) - 1)) == 0))
  168. /* Scan the real Linux page tables and return a PTE pointer for
  169. * a virtual address in a context.
  170. * Returns true (1) if PTE was found, zero otherwise. The pointer to
  171. * the PTE pointer is unmodified if PTE is not found.
  172. */
  173. static int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep)
  174. {
  175. pgd_t *pgd;
  176. pmd_t *pmd;
  177. pte_t *pte;
  178. int retval = 0;
  179. pgd = pgd_offset(mm, addr & PAGE_MASK);
  180. if (pgd) {
  181. pmd = pmd_offset(pgd, addr & PAGE_MASK);
  182. if (pmd_present(*pmd)) {
  183. pte = pte_offset_kernel(pmd, addr & PAGE_MASK);
  184. if (pte) {
  185. retval = 1;
  186. *ptep = pte;
  187. }
  188. }
  189. }
  190. return retval;
  191. }
  192. /* Find physical address for this virtual address. Normally used by
  193. * I/O functions, but anyone can call it.
  194. */
  195. unsigned long iopa(unsigned long addr)
  196. {
  197. unsigned long pa;
  198. pte_t *pte;
  199. struct mm_struct *mm;
  200. /* Allow mapping of user addresses (within the thread)
  201. * for DMA if necessary.
  202. */
  203. if (addr < TASK_SIZE)
  204. mm = current->mm;
  205. else
  206. mm = &init_mm;
  207. pa = 0;
  208. if (get_pteptr(mm, addr, &pte))
  209. pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK);
  210. return pa;
  211. }
  212. __init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
  213. unsigned long address)
  214. {
  215. pte_t *pte;
  216. if (mem_init_done) {
  217. pte = (pte_t *)__get_free_page(GFP_KERNEL |
  218. __GFP_REPEAT | __GFP_ZERO);
  219. } else {
  220. pte = (pte_t *)early_get_page();
  221. if (pte)
  222. clear_page(pte);
  223. }
  224. return pte;
  225. }