memory.h 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248
  1. /*
  2. * linux/include/asm-arm/memory.h
  3. *
  4. * Copyright (C) 2000-2002 Russell King
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * Note: this file should not be included by non-asm/.h files
  11. */
  12. #ifndef __ASM_ARM_MEMORY_H
  13. #define __ASM_ARM_MEMORY_H
  14. /*
  15. * Allow for constants defined here to be used from assembly code
  16. * by prepending the UL suffix only with actual C code compilation.
  17. */
  18. #ifndef __ASSEMBLY__
  19. #define UL(x) (x##UL)
  20. #else
  21. #define UL(x) (x)
  22. #endif
  23. #include <linux/compiler.h>
  24. #include <asm/arch/memory.h>
  25. #include <asm/sizes.h>
  26. #ifndef TASK_SIZE
  27. /*
  28. * TASK_SIZE - the maximum size of a user space task.
  29. * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area
  30. */
  31. #define TASK_SIZE UL(0xbf000000)
  32. #define TASK_UNMAPPED_BASE UL(0x40000000)
  33. #endif
  34. /*
  35. * The maximum size of a 26-bit user space task.
  36. */
  37. #define TASK_SIZE_26 UL(0x04000000)
  38. /*
  39. * Page offset: 3GB
  40. */
  41. #ifndef PAGE_OFFSET
  42. #define PAGE_OFFSET UL(0xc0000000)
  43. #endif
  44. /*
  45. * Size of DMA-consistent memory region. Must be multiple of 2M,
  46. * between 2MB and 14MB inclusive.
  47. */
  48. #ifndef CONSISTENT_DMA_SIZE
  49. #define CONSISTENT_DMA_SIZE SZ_2M
  50. #endif
  51. /*
  52. * Physical vs virtual RAM address space conversion. These are
  53. * private definitions which should NOT be used outside memory.h
  54. * files. Use virt_to_phys/phys_to_virt/__pa/__va instead.
  55. */
  56. #ifndef __virt_to_phys
  57. #define __virt_to_phys(x) ((x) - PAGE_OFFSET + PHYS_OFFSET)
  58. #define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET)
  59. #endif
  60. /*
  61. * Convert a physical address to a Page Frame Number and back
  62. */
  63. #define __phys_to_pfn(paddr) ((paddr) >> PAGE_SHIFT)
  64. #define __pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT)
  65. /*
  66. * The module space lives between the addresses given by TASK_SIZE
  67. * and PAGE_OFFSET - it must be within 32MB of the kernel text.
  68. */
  69. #define MODULE_END (PAGE_OFFSET)
  70. #define MODULE_START (MODULE_END - 16*1048576)
  71. #if TASK_SIZE > MODULE_START
  72. #error Top of user space clashes with start of module space
  73. #endif
  74. /*
  75. * The XIP kernel gets mapped at the bottom of the module vm area.
  76. * Since we use sections to map it, this macro replaces the physical address
  77. * with its virtual address while keeping offset from the base section.
  78. */
  79. #define XIP_VIRT_ADDR(physaddr) (MODULE_START + ((physaddr) & 0x000fffff))
  80. #ifndef __ASSEMBLY__
  81. /*
  82. * The DMA mask corresponding to the maximum bus address allocatable
  83. * using GFP_DMA. The default here places no restriction on DMA
  84. * allocations. This must be the smallest DMA mask in the system,
  85. * so a successful GFP_DMA allocation will always satisfy this.
  86. */
  87. #ifndef ISA_DMA_THRESHOLD
  88. #define ISA_DMA_THRESHOLD (0xffffffffULL)
  89. #endif
  90. #ifndef arch_adjust_zones
  91. #define arch_adjust_zones(node,size,holes) do { } while (0)
  92. #endif
  93. /*
  94. * PFNs are used to describe any physical page; this means
  95. * PFN 0 == physical address 0.
  96. *
  97. * This is the PFN of the first RAM page in the kernel
  98. * direct-mapped view. We assume this is the first page
  99. * of RAM in the mem_map as well.
  100. */
  101. #define PHYS_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT)
  102. /*
  103. * These are *only* valid on the kernel direct mapped RAM memory.
  104. * Note: Drivers should NOT use these. They are the wrong
  105. * translation for translating DMA addresses. Use the driver
  106. * DMA support - see dma-mapping.h.
  107. */
  108. static inline unsigned long virt_to_phys(void *x)
  109. {
  110. return __virt_to_phys((unsigned long)(x));
  111. }
  112. static inline void *phys_to_virt(unsigned long x)
  113. {
  114. return (void *)(__phys_to_virt((unsigned long)(x)));
  115. }
  116. /*
  117. * Drivers should NOT use these either.
  118. */
  119. #define __pa(x) __virt_to_phys((unsigned long)(x))
  120. #define __va(x) ((void *)__phys_to_virt((unsigned long)(x)))
  121. #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
  122. /*
  123. * Virtual <-> DMA view memory address translations
  124. * Again, these are *only* valid on the kernel direct mapped RAM
  125. * memory. Use of these is *deprecated* (and that doesn't mean
  126. * use the __ prefixed forms instead.) See dma-mapping.h.
  127. */
  128. static inline __deprecated unsigned long virt_to_bus(void *x)
  129. {
  130. return __virt_to_bus((unsigned long)x);
  131. }
  132. static inline __deprecated void *bus_to_virt(unsigned long x)
  133. {
  134. return (void *)__bus_to_virt(x);
  135. }
  136. /*
  137. * Conversion between a struct page and a physical address.
  138. *
  139. * Note: when converting an unknown physical address to a
  140. * struct page, the resulting pointer must be validated
  141. * using VALID_PAGE(). It must return an invalid struct page
  142. * for any physical address not corresponding to a system
  143. * RAM address.
  144. *
  145. * page_to_pfn(page) convert a struct page * to a PFN number
  146. * pfn_to_page(pfn) convert a _valid_ PFN number to struct page *
  147. * pfn_valid(pfn) indicates whether a PFN number is valid
  148. *
  149. * virt_to_page(k) convert a _valid_ virtual address to struct page *
  150. * virt_addr_valid(k) indicates whether a virtual address is valid
  151. */
  152. #ifndef CONFIG_DISCONTIGMEM
  153. #define ARCH_PFN_OFFSET PHYS_PFN_OFFSET
  154. #define pfn_valid(pfn) ((pfn) >= PHYS_PFN_OFFSET && (pfn) < (PHYS_PFN_OFFSET + max_mapnr))
  155. #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
  156. #define virt_addr_valid(kaddr) ((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory)
  157. #define PHYS_TO_NID(addr) (0)
  158. #else /* CONFIG_DISCONTIGMEM */
  159. /*
  160. * This is more complex. We have a set of mem_map arrays spread
  161. * around in memory.
  162. */
  163. #include <linux/numa.h>
  164. #define arch_pfn_to_nid(pfn) PFN_TO_NID(pfn)
  165. #define arch_local_page_offset(pfn, nid) LOCAL_MAP_NR((pfn) << PAGE_SHIFT)
  166. #define pfn_valid(pfn) \
  167. ({ \
  168. unsigned int nid = PFN_TO_NID(pfn); \
  169. int valid = nid < MAX_NUMNODES; \
  170. if (valid) { \
  171. pg_data_t *node = NODE_DATA(nid); \
  172. valid = (pfn - node->node_start_pfn) < \
  173. node->node_spanned_pages; \
  174. } \
  175. valid; \
  176. })
  177. #define virt_to_page(kaddr) \
  178. (ADDR_TO_MAPBASE(kaddr) + LOCAL_MAP_NR(kaddr))
  179. #define virt_addr_valid(kaddr) (KVADDR_TO_NID(kaddr) < MAX_NUMNODES)
  180. /*
  181. * Common discontigmem stuff.
  182. * PHYS_TO_NID is used by the ARM kernel/setup.c
  183. */
  184. #define PHYS_TO_NID(addr) PFN_TO_NID((addr) >> PAGE_SHIFT)
  185. #endif /* !CONFIG_DISCONTIGMEM */
  186. /*
  187. * For BIO. "will die". Kill me when bio_to_phys() and bvec_to_phys() die.
  188. */
  189. #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
  190. /*
  191. * Optional device DMA address remapping. Do _not_ use directly!
  192. * We should really eliminate virt_to_bus() here - it's deprecated.
  193. */
  194. #ifndef __arch_page_to_dma
  195. #define page_to_dma(dev, page) ((dma_addr_t)__virt_to_bus((unsigned long)page_address(page)))
  196. #define dma_to_virt(dev, addr) ((void *)__bus_to_virt(addr))
  197. #define virt_to_dma(dev, addr) ((dma_addr_t)__virt_to_bus((unsigned long)(addr)))
  198. #else
  199. #define page_to_dma(dev, page) (__arch_page_to_dma(dev, page))
  200. #define dma_to_virt(dev, addr) (__arch_dma_to_virt(dev, addr))
  201. #define virt_to_dma(dev, addr) (__arch_virt_to_dma(dev, addr))
  202. #endif
  203. /*
  204. * Optional coherency support. Currently used only by selected
  205. * Intel XSC3-based systems.
  206. */
  207. #ifndef arch_is_coherent
  208. #define arch_is_coherent() 0
  209. #endif
  210. #endif
  211. #include <asm-generic/memory_model.h>
  212. #endif