mmzone.h 1.5 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859
  1. /*
  2. * Written by Pat Gaughen (gone@us.ibm.com) Mar 2002
  3. *
  4. */
  5. #ifndef _ASM_MMZONE_H_
  6. #define _ASM_MMZONE_H_
  7. #include <asm/smp.h>
  8. #ifdef CONFIG_DISCONTIGMEM
  9. extern struct pglist_data *node_data[];
  10. #define NODE_DATA(nid) (node_data[nid])
  11. #define node_localnr(pfn, nid) ((pfn) - NODE_DATA(nid)->node_start_pfn)
  12. #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
  13. #define node_end_pfn(nid) \
  14. ({ \
  15. pg_data_t *__pgdat = NODE_DATA(nid); \
  16. __pgdat->node_start_pfn + __pgdat->node_spanned_pages - 1; \
  17. })
  18. #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
  19. /*
  20. * pfn_valid should be made as fast as possible, and the current definition
  21. * is valid for machines that are NUMA, but still contiguous, which is what
  22. * is currently supported. A more generalised, but slower definition would
  23. * be something like this - mbligh:
  24. * ( pfn_to_pgdat(pfn) && ((pfn) < node_end_pfn(pfn_to_nid(pfn))) )
  25. */
  26. #if 1 /* M32R_FIXME */
  27. #define pfn_valid(pfn) (1)
  28. #else
  29. #define pfn_valid(pfn) ((pfn) < num_physpages)
  30. #endif
  31. /*
  32. * generic node memory support, the following assumptions apply:
  33. */
  34. static __inline__ int pfn_to_nid(unsigned long pfn)
  35. {
  36. int node;
  37. for (node = 0 ; node < MAX_NUMNODES ; node++)
  38. if (pfn >= node_start_pfn(node) && pfn <= node_end_pfn(node))
  39. break;
  40. return node;
  41. }
  42. static __inline__ struct pglist_data *pfn_to_pgdat(unsigned long pfn)
  43. {
  44. return(NODE_DATA(pfn_to_nid(pfn)));
  45. }
  46. #endif /* CONFIG_DISCONTIGMEM */
  47. #endif /* _ASM_MMZONE_H_ */