mmzone.h 2.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107
  1. /*
  2. * Written by Kanoj Sarcar (kanoj@sgi.com) Aug 99
  3. *
  4. * PowerPC64 port:
  5. * Copyright (C) 2002 Anton Blanchard, IBM Corp.
  6. */
  7. #ifndef _ASM_MMZONE_H_
  8. #define _ASM_MMZONE_H_
  9. #include <linux/config.h>
  10. #include <asm/smp.h>
  11. /* generic non-linear memory support:
  12. *
  13. * 1) we will not split memory into more chunks than will fit into the
  14. * flags field of the struct page
  15. */
  16. #ifdef CONFIG_NEED_MULTIPLE_NODES
  17. extern struct pglist_data *node_data[];
  18. /*
  19. * Return a pointer to the node data for node n.
  20. */
  21. #define NODE_DATA(nid) (node_data[nid])
  22. /*
  23. * Following are specific to this numa platform.
  24. */
  25. extern int numa_cpu_lookup_table[];
  26. extern char *numa_memory_lookup_table;
  27. extern cpumask_t numa_cpumask_lookup_table[];
  28. #ifdef CONFIG_MEMORY_HOTPLUG
  29. extern unsigned long max_pfn;
  30. #endif
  31. /* 16MB regions */
  32. #define MEMORY_INCREMENT_SHIFT 24
  33. #define MEMORY_INCREMENT (1UL << MEMORY_INCREMENT_SHIFT)
  34. /* NUMA debugging, will not work on a DLPAR machine */
  35. #undef DEBUG_NUMA
  36. static inline int pa_to_nid(unsigned long pa)
  37. {
  38. int nid;
  39. #ifdef CONFIG_MEMORY_HOTPLUG
  40. /* kludge hot added sections default to node 0 */
  41. if (pa >= (max_pfn << PAGE_SHIFT))
  42. return 0;
  43. #endif
  44. nid = numa_memory_lookup_table[pa >> MEMORY_INCREMENT_SHIFT];
  45. #ifdef DEBUG_NUMA
  46. /* the physical address passed in is not in the map for the system */
  47. if (nid == -1) {
  48. printk("bad address: %lx\n", pa);
  49. BUG();
  50. }
  51. #endif
  52. return nid;
  53. }
  54. /*
  55. * Following are macros that each numa implmentation must define.
  56. */
  57. #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
  58. #define node_end_pfn(nid) (NODE_DATA(nid)->node_end_pfn)
  59. #ifdef CONFIG_DISCONTIGMEM
  60. #define node_localnr(pfn, nid) ((pfn) - NODE_DATA(nid)->node_start_pfn)
  61. #define pfn_to_nid(pfn) pa_to_nid((unsigned long)(pfn) << PAGE_SHIFT)
  62. /* Written this way to avoid evaluating arguments twice */
  63. #define discontigmem_pfn_to_page(pfn) \
  64. ({ \
  65. unsigned long __tmp = pfn; \
  66. (NODE_DATA(pfn_to_nid(__tmp))->node_mem_map + \
  67. node_localnr(__tmp, pfn_to_nid(__tmp))); \
  68. })
  69. #define discontigmem_page_to_pfn(p) \
  70. ({ \
  71. struct page *__tmp = p; \
  72. (((__tmp) - page_zone(__tmp)->zone_mem_map) + \
  73. page_zone(__tmp)->zone_start_pfn); \
  74. })
  75. /* XXX fix for discontiguous physical memory */
  76. #define discontigmem_pfn_valid(pfn) ((pfn) < num_physpages)
  77. #endif /* CONFIG_DISCONTIGMEM */
  78. #endif /* CONFIG_NEED_MULTIPLE_NODES */
  79. #ifdef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
  80. #define early_pfn_to_nid(pfn) pa_to_nid(((unsigned long)pfn) << PAGE_SHIFT)
  81. #endif
  82. #endif /* _ASM_MMZONE_H_ */