|
@@ -1650,6 +1650,16 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
|
|
|
areas[group] = ptr;
|
|
|
|
|
|
base = min(ptr, base);
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Copy data and free unused parts. This should happen after all
|
|
|
+ * allocations are complete; otherwise, we may end up with
|
|
|
+ * overlapping groups.
|
|
|
+ */
|
|
|
+ for (group = 0; group < ai->nr_groups; group++) {
|
|
|
+ struct pcpu_group_info *gi = &ai->groups[group];
|
|
|
+ void *ptr = areas[group];
|
|
|
|
|
|
for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
|
|
|
if (gi->cpu_map[i] == NR_CPUS) {
|
|
@@ -1885,6 +1895,8 @@ void __init setup_per_cpu_areas(void)
|
|
|
fc = __alloc_bootmem(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
|
|
|
if (!ai || !fc)
|
|
|
panic("Failed to allocate memory for percpu areas.");
|
|
|
+ /* kmemleak tracks the percpu allocations separately */
|
|
|
+ kmemleak_free(fc);
|
|
|
|
|
|
ai->dyn_size = unit_size;
|
|
|
ai->unit_size = unit_size;
|