discontig.c 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162
  1. /*
  2. * linux/arch/m32r/mm/discontig.c
  3. *
  4. * Discontig memory support
  5. *
  6. * Copyright (c) 2003 Hitoshi Yamamoto
  7. */
  8. #include <linux/mm.h>
  9. #include <linux/bootmem.h>
  10. #include <linux/mmzone.h>
  11. #include <linux/initrd.h>
  12. #include <linux/nodemask.h>
  13. #include <linux/module.h>
  14. #include <linux/pfn.h>
  15. #include <asm/setup.h>
  16. extern char _end[];
  17. struct pglist_data *node_data[MAX_NUMNODES];
  18. EXPORT_SYMBOL(node_data);
  19. pg_data_t m32r_node_data[MAX_NUMNODES];
  20. /* Memory profile */
  21. typedef struct {
  22. unsigned long start_pfn;
  23. unsigned long pages;
  24. unsigned long holes;
  25. unsigned long free_pfn;
  26. } mem_prof_t;
  27. static mem_prof_t mem_prof[MAX_NUMNODES];
  28. static void __init mem_prof_init(void)
  29. {
  30. unsigned long start_pfn, holes, free_pfn;
  31. const unsigned long zone_alignment = 1UL << (MAX_ORDER - 1);
  32. unsigned long ul;
  33. mem_prof_t *mp;
  34. /* Node#0 SDRAM */
  35. mp = &mem_prof[0];
  36. mp->start_pfn = PFN_UP(CONFIG_MEMORY_START);
  37. mp->pages = PFN_DOWN(CONFIG_MEMORY_SIZE);
  38. mp->holes = 0;
  39. mp->free_pfn = PFN_UP(__pa(_end));
  40. /* Node#1 internal SRAM */
  41. mp = &mem_prof[1];
  42. start_pfn = free_pfn = PFN_UP(CONFIG_IRAM_START);
  43. holes = 0;
  44. if (start_pfn & (zone_alignment - 1)) {
  45. ul = zone_alignment;
  46. while (start_pfn >= ul)
  47. ul += zone_alignment;
  48. start_pfn = ul - zone_alignment;
  49. holes = free_pfn - start_pfn;
  50. }
  51. mp->start_pfn = start_pfn;
  52. mp->pages = PFN_DOWN(CONFIG_IRAM_SIZE) + holes;
  53. mp->holes = holes;
  54. mp->free_pfn = PFN_UP(CONFIG_IRAM_START);
  55. }
  56. unsigned long __init setup_memory(void)
  57. {
  58. unsigned long bootmap_size;
  59. unsigned long min_pfn;
  60. int nid;
  61. mem_prof_t *mp;
  62. max_low_pfn = 0;
  63. min_low_pfn = -1;
  64. mem_prof_init();
  65. for_each_online_node(nid) {
  66. mp = &mem_prof[nid];
  67. NODE_DATA(nid)=(pg_data_t *)&m32r_node_data[nid];
  68. NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
  69. min_pfn = mp->start_pfn;
  70. max_pfn = mp->start_pfn + mp->pages;
  71. bootmap_size = init_bootmem_node(NODE_DATA(nid), mp->free_pfn,
  72. mp->start_pfn, max_pfn);
  73. free_bootmem_node(NODE_DATA(nid), PFN_PHYS(mp->start_pfn),
  74. PFN_PHYS(mp->pages));
  75. reserve_bootmem_node(NODE_DATA(nid), PFN_PHYS(mp->start_pfn),
  76. PFN_PHYS(mp->free_pfn - mp->start_pfn) + bootmap_size,
  77. BOOTMEM_DEFAULT);
  78. if (max_low_pfn < max_pfn)
  79. max_low_pfn = max_pfn;
  80. if (min_low_pfn > min_pfn)
  81. min_low_pfn = min_pfn;
  82. }
  83. #ifdef CONFIG_BLK_DEV_INITRD
  84. if (LOADER_TYPE && INITRD_START) {
  85. if (INITRD_START + INITRD_SIZE <= PFN_PHYS(max_low_pfn)) {
  86. reserve_bootmem_node(NODE_DATA(0), INITRD_START,
  87. INITRD_SIZE, BOOTMEM_DEFAULT);
  88. initrd_start = INITRD_START + PAGE_OFFSET;
  89. initrd_end = initrd_start + INITRD_SIZE;
  90. printk("initrd:start[%08lx],size[%08lx]\n",
  91. initrd_start, INITRD_SIZE);
  92. } else {
  93. printk("initrd extends beyond end of memory "
  94. "(0x%08lx > 0x%08llx)\ndisabling initrd\n",
  95. INITRD_START + INITRD_SIZE,
  96. (unsigned long long)PFN_PHYS(max_low_pfn));
  97. initrd_start = 0;
  98. }
  99. }
  100. #endif /* CONFIG_BLK_DEV_INITRD */
  101. return max_low_pfn;
  102. }
  103. #define START_PFN(nid) (NODE_DATA(nid)->bdata->node_min_pfn)
  104. #define MAX_LOW_PFN(nid) (NODE_DATA(nid)->bdata->node_low_pfn)
  105. unsigned long __init zone_sizes_init(void)
  106. {
  107. unsigned long zones_size[MAX_NR_ZONES], zholes_size[MAX_NR_ZONES];
  108. unsigned long low, start_pfn;
  109. unsigned long holes = 0;
  110. int nid, i;
  111. mem_prof_t *mp;
  112. for_each_online_node(nid) {
  113. mp = &mem_prof[nid];
  114. for (i = 0 ; i < MAX_NR_ZONES ; i++) {
  115. zones_size[i] = 0;
  116. zholes_size[i] = 0;
  117. }
  118. start_pfn = START_PFN(nid);
  119. low = MAX_LOW_PFN(nid);
  120. zones_size[ZONE_DMA] = low - start_pfn;
  121. zholes_size[ZONE_DMA] = mp->holes;
  122. holes += zholes_size[ZONE_DMA];
  123. free_area_init_node(nid, zones_size, start_pfn, zholes_size);
  124. }
  125. /*
  126. * For test
  127. * Use all area of internal RAM.
  128. * see __alloc_pages()
  129. */
  130. NODE_DATA(1)->node_zones->watermark[WMARK_MIN] = 0;
  131. NODE_DATA(1)->node_zones->watermark[WMARK_LOW] = 0;
  132. NODE_DATA(1)->node_zones->watermark[WMARK_HIGH] = 0;
  133. return holes;
  134. }