discontig.c 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173
  1. /*
  2. * linux/arch/m32r/mm/discontig.c
  3. *
  4. * Discontig memory support
  5. *
  6. * Copyright (c) 2003 Hitoshi Yamamoto
  7. */
  8. #include <linux/config.h>
  9. #include <linux/mm.h>
  10. #include <linux/bootmem.h>
  11. #include <linux/mmzone.h>
  12. #include <linux/initrd.h>
  13. #include <linux/nodemask.h>
  14. #include <linux/module.h>
  15. #include <asm/setup.h>
  16. extern char _end[];
  17. struct pglist_data *node_data[MAX_NUMNODES];
  18. EXPORT_SYMBOL(node_data);
  19. static bootmem_data_t node_bdata[MAX_NUMNODES] __initdata;
  20. pg_data_t m32r_node_data[MAX_NUMNODES];
  21. /* Memory profile */
  22. typedef struct {
  23. unsigned long start_pfn;
  24. unsigned long pages;
  25. unsigned long holes;
  26. unsigned long free_pfn;
  27. } mem_prof_t;
  28. static mem_prof_t mem_prof[MAX_NUMNODES];
  29. static void __init mem_prof_init(void)
  30. {
  31. unsigned long start_pfn, holes, free_pfn;
  32. const unsigned long zone_alignment = 1UL << (MAX_ORDER - 1);
  33. unsigned long ul;
  34. mem_prof_t *mp;
  35. /* Node#0 SDRAM */
  36. mp = &mem_prof[0];
  37. mp->start_pfn = PFN_UP(CONFIG_MEMORY_START);
  38. mp->pages = PFN_DOWN(CONFIG_MEMORY_SIZE);
  39. mp->holes = 0;
  40. mp->free_pfn = PFN_UP(__pa(_end));
  41. /* Node#1 internal SRAM */
  42. mp = &mem_prof[1];
  43. start_pfn = free_pfn = PFN_UP(CONFIG_IRAM_START);
  44. holes = 0;
  45. if (start_pfn & (zone_alignment - 1)) {
  46. ul = zone_alignment;
  47. while (start_pfn >= ul)
  48. ul += zone_alignment;
  49. start_pfn = ul - zone_alignment;
  50. holes = free_pfn - start_pfn;
  51. }
  52. mp->start_pfn = start_pfn;
  53. mp->pages = PFN_DOWN(CONFIG_IRAM_SIZE) + holes;
  54. mp->holes = holes;
  55. mp->free_pfn = PFN_UP(CONFIG_IRAM_START);
  56. }
  57. unsigned long __init setup_memory(void)
  58. {
  59. unsigned long bootmap_size;
  60. unsigned long min_pfn;
  61. int nid;
  62. mem_prof_t *mp;
  63. max_low_pfn = 0;
  64. min_low_pfn = -1;
  65. mem_prof_init();
  66. for_each_online_node(nid) {
  67. mp = &mem_prof[nid];
  68. NODE_DATA(nid)=(pg_data_t *)&m32r_node_data[nid];
  69. NODE_DATA(nid)->bdata = &node_bdata[nid];
  70. min_pfn = mp->start_pfn;
  71. max_pfn = mp->start_pfn + mp->pages;
  72. bootmap_size = init_bootmem_node(NODE_DATA(nid), mp->free_pfn,
  73. mp->start_pfn, max_pfn);
  74. free_bootmem_node(NODE_DATA(nid), PFN_PHYS(mp->start_pfn),
  75. PFN_PHYS(mp->pages));
  76. reserve_bootmem_node(NODE_DATA(nid), PFN_PHYS(mp->start_pfn),
  77. PFN_PHYS(mp->free_pfn - mp->start_pfn) + bootmap_size);
  78. if (max_low_pfn < max_pfn)
  79. max_low_pfn = max_pfn;
  80. if (min_low_pfn > min_pfn)
  81. min_low_pfn = min_pfn;
  82. }
  83. #ifdef CONFIG_BLK_DEV_INITRD
  84. if (LOADER_TYPE && INITRD_START) {
  85. if (INITRD_START + INITRD_SIZE <= PFN_PHYS(max_low_pfn)) {
  86. reserve_bootmem_node(NODE_DATA(0), INITRD_START,
  87. INITRD_SIZE);
  88. initrd_start = INITRD_START ?
  89. INITRD_START + PAGE_OFFSET : 0;
  90. initrd_end = initrd_start + INITRD_SIZE;
  91. printk("initrd:start[%08lx],size[%08lx]\n",
  92. initrd_start, INITRD_SIZE);
  93. } else {
  94. printk("initrd extends beyond end of memory "
  95. "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
  96. INITRD_START + INITRD_SIZE,
  97. PFN_PHYS(max_low_pfn));
  98. initrd_start = 0;
  99. }
  100. }
  101. #endif /* CONFIG_BLK_DEV_INITRD */
  102. return max_low_pfn;
  103. }
  104. #define START_PFN(nid) \
  105. (NODE_DATA(nid)->bdata->node_boot_start >> PAGE_SHIFT)
  106. #define MAX_LOW_PFN(nid) (NODE_DATA(nid)->bdata->node_low_pfn)
  107. unsigned long __init zone_sizes_init(void)
  108. {
  109. unsigned long zones_size[MAX_NR_ZONES], zholes_size[MAX_NR_ZONES];
  110. unsigned long low, start_pfn;
  111. unsigned long holes = 0;
  112. int nid, i;
  113. mem_prof_t *mp;
  114. pgdat_list = NULL;
  115. for (nid = num_online_nodes() - 1 ; nid >= 0 ; nid--) {
  116. NODE_DATA(nid)->pgdat_next = pgdat_list;
  117. pgdat_list = NODE_DATA(nid);
  118. }
  119. for_each_online_node(nid) {
  120. mp = &mem_prof[nid];
  121. for (i = 0 ; i < MAX_NR_ZONES ; i++) {
  122. zones_size[i] = 0;
  123. zholes_size[i] = 0;
  124. }
  125. start_pfn = START_PFN(nid);
  126. low = MAX_LOW_PFN(nid);
  127. zones_size[ZONE_DMA] = low - start_pfn;
  128. zholes_size[ZONE_DMA] = mp->holes;
  129. holes += zholes_size[ZONE_DMA];
  130. free_area_init_node(nid, NODE_DATA(nid), zones_size,
  131. start_pfn, zholes_size);
  132. }
  133. /*
  134. * For test
  135. * Use all area of internal RAM.
  136. * see __alloc_pages()
  137. */
  138. NODE_DATA(1)->node_zones->pages_min = 0;
  139. NODE_DATA(1)->node_zones->pages_low = 0;
  140. NODE_DATA(1)->node_zones->pages_high = 0;
  141. return holes;
  142. }