fixmap.h 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252
  1. /*
  2. * include/asm-xtensa/fixmap.h
  3. *
  4. * This file is subject to the terms and conditions of the GNU General Public
  5. * License. See the file "COPYING" in the main directory of this archive
  6. * for more details.
  7. *
  8. * Copyright (C) 2001 - 2005 Tensilica Inc.
  9. */
  10. #ifndef _XTENSA_FIXMAP_H
  11. #define _XTENSA_FIXMAP_H
  12. #include <asm/processor.h>
  13. #ifdef CONFIG_MMU
  14. /*
  15. * Here we define all the compile-time virtual addresses.
  16. */
  17. #if XCHAL_SEG_MAPPABLE_VADDR != 0
  18. # error "Current port requires virtual user space starting at 0"
  19. #endif
  20. #if XCHAL_SEG_MAPPABLE_SIZE < 0x80000000
  21. # error "Current port requires at least 0x8000000 bytes for user space"
  22. #endif
  23. /* Verify instruction/data ram/rom and xlmi don't overlay vmalloc space. */
  24. #define __IN_VMALLOC(addr) \
  25. (((addr) >= VMALLOC_START) && ((addr) < VMALLOC_END))
  26. #define __SPAN_VMALLOC(start,end) \
  27. (((start) < VMALLOC_START) && ((end) >= VMALLOC_END))
  28. #define INSIDE_VMALLOC(start,end) \
  29. (__IN_VMALLOC((start)) || __IN_VMALLOC(end) || __SPAN_VMALLOC((start),(end)))
  30. #if XCHAL_NUM_INSTROM
  31. # if XCHAL_NUM_INSTROM == 1
  32. # if INSIDE_VMALLOC(XCHAL_INSTROM0_VADDR,XCHAL_INSTROM0_VADDR+XCHAL_INSTROM0_SIZE)
  33. # error vmalloc range conflicts with instrom0
  34. # endif
  35. # endif
  36. # if XCHAL_NUM_INSTROM == 2
  37. # if INSIDE_VMALLOC(XCHAL_INSTROM1_VADDR,XCHAL_INSTROM1_VADDR+XCHAL_INSTROM1_SIZE)
  38. # error vmalloc range conflicts with instrom1
  39. # endif
  40. # endif
  41. #endif
  42. #if XCHAL_NUM_INSTRAM
  43. # if XCHAL_NUM_INSTRAM == 1
  44. # if INSIDE_VMALLOC(XCHAL_INSTRAM0_VADDR,XCHAL_INSTRAM0_VADDR+XCHAL_INSTRAM0_SIZE)
  45. # error vmalloc range conflicts with instram0
  46. # endif
  47. # endif
  48. # if XCHAL_NUM_INSTRAM == 2
  49. # if INSIDE_VMALLOC(XCHAL_INSTRAM1_VADDR,XCHAL_INSTRAM1_VADDR+XCHAL_INSTRAM1_SIZE)
  50. # error vmalloc range conflicts with instram1
  51. # endif
  52. # endif
  53. #endif
  54. #if XCHAL_NUM_DATAROM
  55. # if XCHAL_NUM_DATAROM == 1
  56. # if INSIDE_VMALLOC(XCHAL_DATAROM0_VADDR,XCHAL_DATAROM0_VADDR+XCHAL_DATAROM0_SIZE)
  57. # error vmalloc range conflicts with datarom0
  58. # endif
  59. # endif
  60. # if XCHAL_NUM_DATAROM == 2
  61. # if INSIDE_VMALLOC(XCHAL_DATAROM1_VADDR,XCHAL_DATAROM1_VADDR+XCHAL_DATAROM1_SIZE)
  62. # error vmalloc range conflicts with datarom1
  63. # endif
  64. # endif
  65. #endif
  66. #if XCHAL_NUM_DATARAM
  67. # if XCHAL_NUM_DATARAM == 1
  68. # if INSIDE_VMALLOC(XCHAL_DATARAM0_VADDR,XCHAL_DATARAM0_VADDR+XCHAL_DATARAM0_SIZE)
  69. # error vmalloc range conflicts with dataram0
  70. # endif
  71. # endif
  72. # if XCHAL_NUM_DATARAM == 2
  73. # if INSIDE_VMALLOC(XCHAL_DATARAM1_VADDR,XCHAL_DATARAM1_VADDR+XCHAL_DATARAM1_SIZE)
  74. # error vmalloc range conflicts with dataram1
  75. # endif
  76. # endif
  77. #endif
  78. #if XCHAL_NUM_XLMI
  79. # if XCHAL_NUM_XLMI == 1
  80. # if INSIDE_VMALLOC(XCHAL_XLMI0_VADDR,XCHAL_XLMI0_VADDR+XCHAL_XLMI0_SIZE)
  81. # error vmalloc range conflicts with xlmi0
  82. # endif
  83. # endif
  84. # if XCHAL_NUM_XLMI == 2
  85. # if INSIDE_VMALLOC(XCHAL_XLMI1_VADDR,XCHAL_XLMI1_VADDR+XCHAL_XLMI1_SIZE)
  86. # error vmalloc range conflicts with xlmi1
  87. # endif
  88. # endif
  89. #endif
  90. #if (XCHAL_NUM_INSTROM > 2) || \
  91. (XCHAL_NUM_INSTRAM > 2) || \
  92. (XCHAL_NUM_DATARAM > 2) || \
  93. (XCHAL_NUM_DATAROM > 2) || \
  94. (XCHAL_NUM_XLMI > 2)
  95. # error Insufficient checks on vmalloc above for more than 2 devices
  96. #endif
  97. /*
  98. * USER_VM_SIZE does not necessarily equal TASK_SIZE. We bumped
  99. * TASK_SIZE down to 0x4000000 to simplify the handling of windowed
  100. * call instructions (currently limited to a range of 1 GByte). User
  101. * tasks may very well reclaim the VM space from 0x40000000 to
  102. * 0x7fffffff in the future, so we do not want the kernel becoming
  103. * accustomed to having any of its stuff (e.g., page tables) in this
  104. * region. This VM region is no-man's land for now.
  105. */
  106. #define USER_VM_START XCHAL_SEG_MAPPABLE_VADDR
  107. #define USER_VM_SIZE 0x80000000
  108. /* Size of page table: */
  109. #define PGTABLE_SIZE_BITS (32 - XCHAL_MMU_MIN_PTE_PAGE_SIZE + 2)
  110. #define PGTABLE_SIZE (1L << PGTABLE_SIZE_BITS)
  111. /* All kernel-mappable space: */
  112. #define KERNEL_ALLMAP_START (USER_VM_START + USER_VM_SIZE)
  113. #define KERNEL_ALLMAP_SIZE (XCHAL_SEG_MAPPABLE_SIZE - KERNEL_ALLMAP_START)
  114. /* Carve out page table at start of kernel-mappable area: */
  115. #if KERNEL_ALLMAP_SIZE < PGTABLE_SIZE
  116. #error "Gimme some space for page table!"
  117. #endif
  118. #define PGTABLE_START KERNEL_ALLMAP_START
  119. /* Remaining kernel-mappable space: */
  120. #define KERNEL_MAPPED_START (KERNEL_ALLMAP_START + PGTABLE_SIZE)
  121. #define KERNEL_MAPPED_SIZE (KERNEL_ALLMAP_SIZE - PGTABLE_SIZE)
  122. #if KERNEL_MAPPED_SIZE < 0x01000000 /* 16 MB is arbitrary for now */
  123. # error "Shouldn't the kernel have at least *some* mappable space?"
  124. #endif
  125. #define MAX_LOW_MEMORY XCHAL_KSEG_CACHED_SIZE
  126. #endif
  127. /*
  128. * Some constants used elsewhere, but perhaps only in Xtensa header
  129. * files, so maybe we can get rid of some and access compile-time HAL
  130. * directly...
  131. *
  132. * Note: We assume that system RAM is located at the very start of the
  133. * kernel segments !!
  134. */
  135. #define KERNEL_VM_LOW XCHAL_KSEG_CACHED_VADDR
  136. #define KERNEL_VM_HIGH XCHAL_KSEG_BYPASS_VADDR
  137. #define KERNEL_SPACE XCHAL_KSEG_CACHED_VADDR
  138. /*
  139. * Returns the physical/virtual addresses of the kernel space
  140. * (works with the cached kernel segment only, which is the
  141. * one normally used for kernel operation).
  142. */
  143. /* PHYSICAL BYPASS CACHED
  144. *
  145. * bypass vaddr bypass paddr * cached vaddr
  146. * cached vaddr cached paddr bypass vaddr *
  147. * bypass paddr * bypass vaddr cached vaddr
  148. * cached paddr * bypass vaddr cached vaddr
  149. * other * * *
  150. */
  151. #define PHYSADDR(a) \
  152. (((unsigned)(a) >= XCHAL_KSEG_BYPASS_VADDR \
  153. && (unsigned)(a) < XCHAL_KSEG_BYPASS_VADDR + XCHAL_KSEG_BYPASS_SIZE) ? \
  154. (unsigned)(a) - XCHAL_KSEG_BYPASS_VADDR + XCHAL_KSEG_BYPASS_PADDR : \
  155. ((unsigned)(a) >= XCHAL_KSEG_CACHED_VADDR \
  156. && (unsigned)(a) < XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_CACHED_SIZE) ? \
  157. (unsigned)(a) - XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_CACHED_PADDR : \
  158. (unsigned)(a))
  159. #define BYPASS_ADDR(a) \
  160. (((unsigned)(a) >= XCHAL_KSEG_BYPASS_PADDR \
  161. && (unsigned)(a) < XCHAL_KSEG_BYPASS_PADDR + XCHAL_KSEG_BYPASS_SIZE) ? \
  162. (unsigned)(a) - XCHAL_KSEG_BYPASS_PADDR + XCHAL_KSEG_BYPASS_VADDR : \
  163. ((unsigned)(a) >= XCHAL_KSEG_CACHED_PADDR \
  164. && (unsigned)(a) < XCHAL_KSEG_CACHED_PADDR + XCHAL_KSEG_CACHED_SIZE) ? \
  165. (unsigned)(a) - XCHAL_KSEG_CACHED_PADDR + XCHAL_KSEG_BYPASS_VADDR : \
  166. ((unsigned)(a) >= XCHAL_KSEG_CACHED_VADDR \
  167. && (unsigned)(a) < XCHAL_KSEG_CACHED_VADDR+XCHAL_KSEG_CACHED_SIZE)? \
  168. (unsigned)(a) - XCHAL_KSEG_CACHED_VADDR+XCHAL_KSEG_BYPASS_VADDR: \
  169. (unsigned)(a))
  170. #define CACHED_ADDR(a) \
  171. (((unsigned)(a) >= XCHAL_KSEG_BYPASS_PADDR \
  172. && (unsigned)(a) < XCHAL_KSEG_BYPASS_PADDR + XCHAL_KSEG_BYPASS_SIZE) ? \
  173. (unsigned)(a) - XCHAL_KSEG_BYPASS_PADDR + XCHAL_KSEG_CACHED_VADDR : \
  174. ((unsigned)(a) >= XCHAL_KSEG_CACHED_PADDR \
  175. && (unsigned)(a) < XCHAL_KSEG_CACHED_PADDR + XCHAL_KSEG_CACHED_SIZE) ? \
  176. (unsigned)(a) - XCHAL_KSEG_CACHED_PADDR + XCHAL_KSEG_CACHED_VADDR : \
  177. ((unsigned)(a) >= XCHAL_KSEG_BYPASS_VADDR \
  178. && (unsigned)(a) < XCHAL_KSEG_BYPASS_VADDR+XCHAL_KSEG_BYPASS_SIZE) ? \
  179. (unsigned)(a) - XCHAL_KSEG_BYPASS_VADDR+XCHAL_KSEG_CACHED_VADDR : \
  180. (unsigned)(a))
  181. #define PHYSADDR_IO(a) \
  182. (((unsigned)(a) >= XCHAL_KIO_BYPASS_VADDR \
  183. && (unsigned)(a) < XCHAL_KIO_BYPASS_VADDR + XCHAL_KIO_BYPASS_SIZE) ? \
  184. (unsigned)(a) - XCHAL_KIO_BYPASS_VADDR + XCHAL_KIO_BYPASS_PADDR : \
  185. ((unsigned)(a) >= XCHAL_KIO_CACHED_VADDR \
  186. && (unsigned)(a) < XCHAL_KIO_CACHED_VADDR + XCHAL_KIO_CACHED_SIZE) ? \
  187. (unsigned)(a) - XCHAL_KIO_CACHED_VADDR + XCHAL_KIO_CACHED_PADDR : \
  188. (unsigned)(a))
  189. #define BYPASS_ADDR_IO(a) \
  190. (((unsigned)(a) >= XCHAL_KIO_BYPASS_PADDR \
  191. && (unsigned)(a) < XCHAL_KIO_BYPASS_PADDR + XCHAL_KIO_BYPASS_SIZE) ? \
  192. (unsigned)(a) - XCHAL_KIO_BYPASS_PADDR + XCHAL_KIO_BYPASS_VADDR : \
  193. ((unsigned)(a) >= XCHAL_KIO_CACHED_PADDR \
  194. && (unsigned)(a) < XCHAL_KIO_CACHED_PADDR + XCHAL_KIO_CACHED_SIZE) ? \
  195. (unsigned)(a) - XCHAL_KIO_CACHED_PADDR + XCHAL_KIO_BYPASS_VADDR : \
  196. ((unsigned)(a) >= XCHAL_KIO_CACHED_VADDR \
  197. && (unsigned)(a) < XCHAL_KIO_CACHED_VADDR + XCHAL_KIO_CACHED_SIZE) ? \
  198. (unsigned)(a) - XCHAL_KIO_CACHED_VADDR + XCHAL_KIO_BYPASS_VADDR : \
  199. (unsigned)(a))
  200. #define CACHED_ADDR_IO(a) \
  201. (((unsigned)(a) >= XCHAL_KIO_BYPASS_PADDR \
  202. && (unsigned)(a) < XCHAL_KIO_BYPASS_PADDR + XCHAL_KIO_BYPASS_SIZE) ? \
  203. (unsigned)(a) - XCHAL_KIO_BYPASS_PADDR + XCHAL_KIO_CACHED_VADDR : \
  204. ((unsigned)(a) >= XCHAL_KIO_CACHED_PADDR \
  205. && (unsigned)(a) < XCHAL_KIO_CACHED_PADDR + XCHAL_KIO_CACHED_SIZE) ? \
  206. (unsigned)(a) - XCHAL_KIO_CACHED_PADDR + XCHAL_KIO_CACHED_VADDR : \
  207. ((unsigned)(a) >= XCHAL_KIO_BYPASS_VADDR \
  208. && (unsigned)(a) < XCHAL_KIO_BYPASS_VADDR + XCHAL_KIO_BYPASS_SIZE) ? \
  209. (unsigned)(a) - XCHAL_KIO_BYPASS_VADDR + XCHAL_KIO_CACHED_VADDR : \
  210. (unsigned)(a))
  211. #endif /* _XTENSA_ADDRSPACE_H */