mmzone_32.h 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138
  1. /*
  2. * Written by Pat Gaughen (gone@us.ibm.com) Mar 2002
  3. *
  4. */
  5. #ifndef _ASM_X86_MMZONE_32_H
  6. #define _ASM_X86_MMZONE_32_H
  7. #include <asm/smp.h>
  8. #ifdef CONFIG_NUMA
  9. extern struct pglist_data *node_data[];
  10. #define NODE_DATA(nid) (node_data[nid])
  11. #include <asm/numaq.h>
  12. /* summit or generic arch */
  13. #include <asm/srat.h>
  14. extern int get_memcfg_numa_flat(void);
  15. /*
  16. * This allows any one NUMA architecture to be compiled
  17. * for, and still fall back to the flat function if it
  18. * fails.
  19. */
  20. static inline void get_memcfg_numa(void)
  21. {
  22. if (get_memcfg_numaq())
  23. return;
  24. if (get_memcfg_from_srat())
  25. return;
  26. get_memcfg_numa_flat();
  27. }
  28. extern int early_pfn_to_nid(unsigned long pfn);
  29. extern void resume_map_numa_kva(pgd_t *pgd);
  30. #else /* !CONFIG_NUMA */
  31. #define get_memcfg_numa get_memcfg_numa_flat
  32. static inline void resume_map_numa_kva(pgd_t *pgd) {}
  33. #endif /* CONFIG_NUMA */
  34. #ifdef CONFIG_DISCONTIGMEM
  35. /*
  36. * generic node memory support, the following assumptions apply:
  37. *
  38. * 1) memory comes in 64Mb contigious chunks which are either present or not
  39. * 2) we will not have more than 64Gb in total
  40. *
  41. * for now assume that 64Gb is max amount of RAM for whole system
  42. * 64Gb / 4096bytes/page = 16777216 pages
  43. */
  44. #define MAX_NR_PAGES 16777216
  45. #define MAX_ELEMENTS 1024
  46. #define PAGES_PER_ELEMENT (MAX_NR_PAGES/MAX_ELEMENTS)
  47. extern s8 physnode_map[];
  48. static inline int pfn_to_nid(unsigned long pfn)
  49. {
  50. #ifdef CONFIG_NUMA
  51. return((int) physnode_map[(pfn) / PAGES_PER_ELEMENT]);
  52. #else
  53. return 0;
  54. #endif
  55. }
  56. /*
  57. * Following are macros that each numa implmentation must define.
  58. */
  59. #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
  60. #define node_end_pfn(nid) \
  61. ({ \
  62. pg_data_t *__pgdat = NODE_DATA(nid); \
  63. __pgdat->node_start_pfn + __pgdat->node_spanned_pages; \
  64. })
  65. static inline int pfn_valid(int pfn)
  66. {
  67. int nid = pfn_to_nid(pfn);
  68. if (nid >= 0)
  69. return (pfn < node_end_pfn(nid));
  70. return 0;
  71. }
  72. #endif /* CONFIG_DISCONTIGMEM */
  73. #ifdef CONFIG_NEED_MULTIPLE_NODES
  74. /*
  75. * Following are macros that are specific to this numa platform.
  76. */
  77. #define reserve_bootmem(addr, size, flags) \
  78. reserve_bootmem_node(NODE_DATA(0), (addr), (size), (flags))
  79. #define alloc_bootmem(x) \
  80. __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
  81. #define alloc_bootmem_nopanic(x) \
  82. __alloc_bootmem_node_nopanic(NODE_DATA(0), (x), SMP_CACHE_BYTES, \
  83. __pa(MAX_DMA_ADDRESS))
  84. #define alloc_bootmem_low(x) \
  85. __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, 0)
  86. #define alloc_bootmem_pages(x) \
  87. __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
  88. #define alloc_bootmem_pages_nopanic(x) \
  89. __alloc_bootmem_node_nopanic(NODE_DATA(0), (x), PAGE_SIZE, \
  90. __pa(MAX_DMA_ADDRESS))
  91. #define alloc_bootmem_low_pages(x) \
  92. __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0)
  93. #define alloc_bootmem_node(pgdat, x) \
  94. ({ \
  95. struct pglist_data __maybe_unused \
  96. *__alloc_bootmem_node__pgdat = (pgdat); \
  97. __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, \
  98. __pa(MAX_DMA_ADDRESS)); \
  99. })
  100. #define alloc_bootmem_pages_node(pgdat, x) \
  101. ({ \
  102. struct pglist_data __maybe_unused \
  103. *__alloc_bootmem_node__pgdat = (pgdat); \
  104. __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, \
  105. __pa(MAX_DMA_ADDRESS)); \
  106. })
  107. #define alloc_bootmem_low_pages_node(pgdat, x) \
  108. ({ \
  109. struct pglist_data __maybe_unused \
  110. *__alloc_bootmem_node__pgdat = (pgdat); \
  111. __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0); \
  112. })
  113. #endif /* CONFIG_NEED_MULTIPLE_NODES */
  114. #endif /* _ASM_X86_MMZONE_32_H */