|
@@ -70,6 +70,7 @@ struct {
|
|
|
#define CHUNK_READ_WRITE 0
|
|
|
#define CHUNK_READ_ONLY 1
|
|
|
volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */
|
|
|
+unsigned long __initdata zholes_size[MAX_NR_ZONES];
|
|
|
static unsigned long __initdata memory_end;
|
|
|
|
|
|
/*
|
|
@@ -357,6 +358,21 @@ void machine_power_off(void)
|
|
|
*/
|
|
|
void (*pm_power_off)(void) = machine_power_off;
|
|
|
|
|
|
+static void __init
|
|
|
+add_memory_hole(unsigned long start, unsigned long end)
|
|
|
+{
|
|
|
+ unsigned long dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT;
|
|
|
+
|
|
|
+ if (end <= dma_pfn)
|
|
|
+ zholes_size[ZONE_DMA] += end - start + 1;
|
|
|
+ else if (start > dma_pfn)
|
|
|
+ zholes_size[ZONE_NORMAL] += end - start + 1;
|
|
|
+ else {
|
|
|
+ zholes_size[ZONE_DMA] += dma_pfn - start + 1;
|
|
|
+ zholes_size[ZONE_NORMAL] += end - dma_pfn;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
static int __init early_parse_mem(char *p)
|
|
|
{
|
|
|
memory_end = memparse(p, &p);
|
|
@@ -434,7 +450,7 @@ setup_lowcore(void)
|
|
|
lc->extended_save_area_addr = (__u32)
|
|
|
__alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0);
|
|
|
/* enable extended save area */
|
|
|
- ctl_set_bit(14, 29);
|
|
|
+ __ctl_set_bit(14, 29);
|
|
|
}
|
|
|
#endif
|
|
|
set_prefix((u32)(unsigned long) lc);
|
|
@@ -478,6 +494,7 @@ setup_memory(void)
|
|
|
{
|
|
|
unsigned long bootmap_size;
|
|
|
unsigned long start_pfn, end_pfn, init_pfn;
|
|
|
+ unsigned long last_rw_end;
|
|
|
int i;
|
|
|
|
|
|
/*
|
|
@@ -533,27 +550,39 @@ setup_memory(void)
|
|
|
/*
|
|
|
* Register RAM areas with the bootmem allocator.
|
|
|
*/
|
|
|
+ last_rw_end = start_pfn;
|
|
|
|
|
|
for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
|
|
|
- unsigned long start_chunk, end_chunk, pfn;
|
|
|
+ unsigned long start_chunk, end_chunk;
|
|
|
|
|
|
if (memory_chunk[i].type != CHUNK_READ_WRITE)
|
|
|
continue;
|
|
|
- start_chunk = PFN_DOWN(memory_chunk[i].addr);
|
|
|
- end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size) - 1;
|
|
|
- end_chunk = min(end_chunk, end_pfn);
|
|
|
- if (start_chunk >= end_chunk)
|
|
|
- continue;
|
|
|
- add_active_range(0, start_chunk, end_chunk);
|
|
|
- pfn = max(start_chunk, start_pfn);
|
|
|
- for (; pfn <= end_chunk; pfn++)
|
|
|
- page_set_storage_key(PFN_PHYS(pfn), PAGE_DEFAULT_KEY);
|
|
|
+ start_chunk = (memory_chunk[i].addr + PAGE_SIZE - 1);
|
|
|
+ start_chunk >>= PAGE_SHIFT;
|
|
|
+ end_chunk = (memory_chunk[i].addr + memory_chunk[i].size);
|
|
|
+ end_chunk >>= PAGE_SHIFT;
|
|
|
+ if (start_chunk < start_pfn)
|
|
|
+ start_chunk = start_pfn;
|
|
|
+ if (end_chunk > end_pfn)
|
|
|
+ end_chunk = end_pfn;
|
|
|
+ if (start_chunk < end_chunk) {
|
|
|
+ /* Initialize storage key for RAM pages */
|
|
|
+ for (init_pfn = start_chunk ; init_pfn < end_chunk;
|
|
|
+ init_pfn++)
|
|
|
+ page_set_storage_key(init_pfn << PAGE_SHIFT,
|
|
|
+ PAGE_DEFAULT_KEY);
|
|
|
+ free_bootmem(start_chunk << PAGE_SHIFT,
|
|
|
+ (end_chunk - start_chunk) << PAGE_SHIFT);
|
|
|
+ if (last_rw_end < start_chunk)
|
|
|
+ add_memory_hole(last_rw_end, start_chunk - 1);
|
|
|
+ last_rw_end = end_chunk;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
psw_set_key(PAGE_DEFAULT_KEY);
|
|
|
|
|
|
- free_bootmem_with_active_regions(0, max_pfn);
|
|
|
- reserve_bootmem(0, PFN_PHYS(start_pfn));
|
|
|
+ if (last_rw_end < end_pfn - 1)
|
|
|
+ add_memory_hole(last_rw_end, end_pfn - 1);
|
|
|
|
|
|
/*
|
|
|
* Reserve the bootmem bitmap itself as well. We do this in two
|