memory.h 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323
  1. /*
  2. * arch/arm/include/asm/memory.h
  3. *
  4. * Copyright (C) 2000-2002 Russell King
  5. * modification for nommu, Hyok S. Choi, 2004
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * Note: this file should not be included by non-asm/.h files
  12. */
  13. #ifndef __ASM_ARM_MEMORY_H
  14. #define __ASM_ARM_MEMORY_H
  15. #include <linux/compiler.h>
  16. #include <linux/const.h>
  17. #include <mach/memory.h>
  18. #include <asm/sizes.h>
  19. /*
  20. * Allow for constants defined here to be used from assembly code
  21. * by prepending the UL suffix only with actual C code compilation.
  22. */
  23. #define UL(x) _AC(x, UL)
  24. #ifdef CONFIG_MMU
  25. /*
  26. * PAGE_OFFSET - the virtual address of the start of the kernel image
  27. * TASK_SIZE - the maximum size of a user space task.
  28. * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area
  29. */
  30. #define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET)
  31. #define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(0x01000000))
  32. #define TASK_UNMAPPED_BASE (UL(CONFIG_PAGE_OFFSET) / 3)
  33. /*
  34. * The maximum size of a 26-bit user space task.
  35. */
  36. #define TASK_SIZE_26 UL(0x04000000)
  37. /*
  38. * The module space lives between the addresses given by TASK_SIZE
  39. * and PAGE_OFFSET - it must be within 32MB of the kernel text.
  40. */
  41. #define MODULES_VADDR (PAGE_OFFSET - 16*1024*1024)
  42. #if TASK_SIZE > MODULES_VADDR
  43. #error Top of user space clashes with start of module space
  44. #endif
  45. /*
  46. * The highmem pkmap virtual space shares the end of the module area.
  47. */
  48. #ifdef CONFIG_HIGHMEM
  49. #define MODULES_END (PAGE_OFFSET - PMD_SIZE)
  50. #else
  51. #define MODULES_END (PAGE_OFFSET)
  52. #endif
  53. /*
  54. * The XIP kernel gets mapped at the bottom of the module vm area.
  55. * Since we use sections to map it, this macro replaces the physical address
  56. * with its virtual address while keeping offset from the base section.
  57. */
  58. #define XIP_VIRT_ADDR(physaddr) (MODULES_VADDR + ((physaddr) & 0x000fffff))
  59. /*
  60. * Allow 16MB-aligned ioremap pages
  61. */
  62. #define IOREMAP_MAX_ORDER 24
  63. #else /* CONFIG_MMU */
  64. /*
  65. * The limitation of user task size can grow up to the end of free ram region.
  66. * It is difficult to define and perhaps will never meet the original meaning
  67. * of this define that was meant to.
  68. * Fortunately, there is no reference for this in noMMU mode, for now.
  69. */
  70. #ifndef TASK_SIZE
  71. #define TASK_SIZE (CONFIG_DRAM_SIZE)
  72. #endif
  73. #ifndef TASK_UNMAPPED_BASE
  74. #define TASK_UNMAPPED_BASE UL(0x00000000)
  75. #endif
  76. #ifndef PHYS_OFFSET
  77. #define PHYS_OFFSET (CONFIG_DRAM_BASE)
  78. #endif
  79. #ifndef END_MEM
  80. #define END_MEM (CONFIG_DRAM_BASE + CONFIG_DRAM_SIZE)
  81. #endif
  82. #ifndef PAGE_OFFSET
  83. #define PAGE_OFFSET (PHYS_OFFSET)
  84. #endif
  85. /*
  86. * The module can be at any place in ram in nommu mode.
  87. */
  88. #define MODULES_END (END_MEM)
  89. #define MODULES_VADDR (PHYS_OFFSET)
  90. #endif /* !CONFIG_MMU */
  91. /*
  92. * Size of DMA-consistent memory region. Must be multiple of 2M,
  93. * between 2MB and 14MB inclusive.
  94. */
  95. #ifndef CONSISTENT_DMA_SIZE
  96. #define CONSISTENT_DMA_SIZE SZ_2M
  97. #endif
  98. /*
  99. * Physical vs virtual RAM address space conversion. These are
  100. * private definitions which should NOT be used outside memory.h
  101. * files. Use virt_to_phys/phys_to_virt/__pa/__va instead.
  102. */
  103. #define __virt_to_phys(x) ((x) - PAGE_OFFSET + PHYS_OFFSET)
  104. #define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET)
  105. /*
  106. * Convert a physical address to a Page Frame Number and back
  107. */
  108. #define __phys_to_pfn(paddr) ((paddr) >> PAGE_SHIFT)
  109. #define __pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT)
  110. #ifndef __ASSEMBLY__
  111. /*
  112. * The DMA mask corresponding to the maximum bus address allocatable
  113. * using GFP_DMA. The default here places no restriction on DMA
  114. * allocations. This must be the smallest DMA mask in the system,
  115. * so a successful GFP_DMA allocation will always satisfy this.
  116. */
  117. #ifndef ISA_DMA_THRESHOLD
  118. #define ISA_DMA_THRESHOLD (0xffffffffULL)
  119. #endif
  120. #ifndef arch_adjust_zones
  121. #define arch_adjust_zones(node,size,holes) do { } while (0)
  122. #elif !defined(CONFIG_ZONE_DMA)
  123. #error "custom arch_adjust_zones() requires CONFIG_ZONE_DMA"
  124. #endif
  125. /*
  126. * PFNs are used to describe any physical page; this means
  127. * PFN 0 == physical address 0.
  128. *
  129. * This is the PFN of the first RAM page in the kernel
  130. * direct-mapped view. We assume this is the first page
  131. * of RAM in the mem_map as well.
  132. */
  133. #define PHYS_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT)
  134. /*
  135. * These are *only* valid on the kernel direct mapped RAM memory.
  136. * Note: Drivers should NOT use these. They are the wrong
  137. * translation for translating DMA addresses. Use the driver
  138. * DMA support - see dma-mapping.h.
  139. */
  140. static inline unsigned long virt_to_phys(void *x)
  141. {
  142. return __virt_to_phys((unsigned long)(x));
  143. }
  144. static inline void *phys_to_virt(unsigned long x)
  145. {
  146. return (void *)(__phys_to_virt((unsigned long)(x)));
  147. }
  148. /*
  149. * Drivers should NOT use these either.
  150. */
  151. #define __pa(x) __virt_to_phys((unsigned long)(x))
  152. #define __va(x) ((void *)__phys_to_virt((unsigned long)(x)))
  153. #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
  154. /*
  155. * Virtual <-> DMA view memory address translations
  156. * Again, these are *only* valid on the kernel direct mapped RAM
  157. * memory. Use of these is *deprecated* (and that doesn't mean
  158. * use the __ prefixed forms instead.) See dma-mapping.h.
  159. */
  160. #ifndef __virt_to_bus
  161. #define __virt_to_bus __virt_to_phys
  162. #define __bus_to_virt __phys_to_virt
  163. #define __pfn_to_bus(x) ((x) << PAGE_SHIFT)
  164. #endif
  165. static inline __deprecated unsigned long virt_to_bus(void *x)
  166. {
  167. return __virt_to_bus((unsigned long)x);
  168. }
  169. static inline __deprecated void *bus_to_virt(unsigned long x)
  170. {
  171. return (void *)__bus_to_virt(x);
  172. }
  173. /*
  174. * Conversion between a struct page and a physical address.
  175. *
  176. * Note: when converting an unknown physical address to a
  177. * struct page, the resulting pointer must be validated
  178. * using VALID_PAGE(). It must return an invalid struct page
  179. * for any physical address not corresponding to a system
  180. * RAM address.
  181. *
  182. * page_to_pfn(page) convert a struct page * to a PFN number
  183. * pfn_to_page(pfn) convert a _valid_ PFN number to struct page *
  184. * pfn_valid(pfn) indicates whether a PFN number is valid
  185. *
  186. * virt_to_page(k) convert a _valid_ virtual address to struct page *
  187. * virt_addr_valid(k) indicates whether a virtual address is valid
  188. */
  189. #ifndef CONFIG_DISCONTIGMEM
  190. #define ARCH_PFN_OFFSET PHYS_PFN_OFFSET
  191. #ifndef CONFIG_SPARSEMEM
  192. #define pfn_valid(pfn) ((pfn) >= PHYS_PFN_OFFSET && (pfn) < (PHYS_PFN_OFFSET + max_mapnr))
  193. #endif
  194. #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
  195. #define virt_addr_valid(kaddr) ((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory)
  196. #define PHYS_TO_NID(addr) (0)
  197. #else /* CONFIG_DISCONTIGMEM */
  198. /*
  199. * This is more complex. We have a set of mem_map arrays spread
  200. * around in memory.
  201. */
  202. #include <linux/numa.h>
  203. #define arch_pfn_to_nid(pfn) PFN_TO_NID(pfn)
  204. #define arch_local_page_offset(pfn, nid) LOCAL_MAP_NR((pfn) << PAGE_SHIFT)
  205. #define pfn_valid(pfn) \
  206. ({ \
  207. unsigned int nid = PFN_TO_NID(pfn); \
  208. int valid = nid < MAX_NUMNODES; \
  209. if (valid) { \
  210. pg_data_t *node = NODE_DATA(nid); \
  211. valid = (pfn - node->node_start_pfn) < \
  212. node->node_spanned_pages; \
  213. } \
  214. valid; \
  215. })
  216. #define virt_to_page(kaddr) \
  217. (ADDR_TO_MAPBASE(kaddr) + LOCAL_MAP_NR(kaddr))
  218. #define virt_addr_valid(kaddr) (KVADDR_TO_NID(kaddr) < MAX_NUMNODES)
  219. /*
  220. * Common discontigmem stuff.
  221. * PHYS_TO_NID is used by the ARM kernel/setup.c
  222. */
  223. #define PHYS_TO_NID(addr) PFN_TO_NID((addr) >> PAGE_SHIFT)
  224. /*
  225. * Given a kaddr, ADDR_TO_MAPBASE finds the owning node of the memory
  226. * and returns the mem_map of that node.
  227. */
  228. #define ADDR_TO_MAPBASE(kaddr) NODE_MEM_MAP(KVADDR_TO_NID(kaddr))
  229. /*
  230. * Given a page frame number, find the owning node of the memory
  231. * and returns the mem_map of that node.
  232. */
  233. #define PFN_TO_MAPBASE(pfn) NODE_MEM_MAP(PFN_TO_NID(pfn))
  234. #ifdef NODE_MEM_SIZE_BITS
  235. #define NODE_MEM_SIZE_MASK ((1 << NODE_MEM_SIZE_BITS) - 1)
  236. /*
  237. * Given a kernel address, find the home node of the underlying memory.
  238. */
  239. #define KVADDR_TO_NID(addr) \
  240. (((unsigned long)(addr) - PAGE_OFFSET) >> NODE_MEM_SIZE_BITS)
  241. /*
  242. * Given a page frame number, convert it to a node id.
  243. */
  244. #define PFN_TO_NID(pfn) \
  245. (((pfn) - PHYS_PFN_OFFSET) >> (NODE_MEM_SIZE_BITS - PAGE_SHIFT))
  246. /*
  247. * Given a kaddr, LOCAL_MEM_MAP finds the owning node of the memory
  248. * and returns the index corresponding to the appropriate page in the
  249. * node's mem_map.
  250. */
  251. #define LOCAL_MAP_NR(addr) \
  252. (((unsigned long)(addr) & NODE_MEM_SIZE_MASK) >> PAGE_SHIFT)
  253. #endif /* NODE_MEM_SIZE_BITS */
  254. #endif /* !CONFIG_DISCONTIGMEM */
  255. /*
  256. * For BIO. "will die". Kill me when bio_to_phys() and bvec_to_phys() die.
  257. */
  258. #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
  259. /*
  260. * Optional coherency support. Currently used only by selected
  261. * Intel XSC3-based systems.
  262. */
  263. #ifndef arch_is_coherent
  264. #define arch_is_coherent() 0
  265. #endif
  266. #endif
  267. #include <asm-generic/memory_model.h>
  268. #endif