allocpercpu.c 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177
  1. /*
  2. * linux/mm/allocpercpu.c
  3. *
  4. * Separated from slab.c August 11, 2006 Christoph Lameter
  5. */
  6. #include <linux/mm.h>
  7. #include <linux/module.h>
  8. #include <linux/bootmem.h>
  9. #include <asm/sections.h>
  10. #ifndef cache_line_size
  11. #define cache_line_size() L1_CACHE_BYTES
  12. #endif
  13. /**
  14. * percpu_depopulate - depopulate per-cpu data for given cpu
  15. * @__pdata: per-cpu data to depopulate
  16. * @cpu: depopulate per-cpu data for this cpu
  17. *
  18. * Depopulating per-cpu data for a cpu going offline would be a typical
  19. * use case. You need to register a cpu hotplug handler for that purpose.
  20. */
  21. static void percpu_depopulate(void *__pdata, int cpu)
  22. {
  23. struct percpu_data *pdata = __percpu_disguise(__pdata);
  24. kfree(pdata->ptrs[cpu]);
  25. pdata->ptrs[cpu] = NULL;
  26. }
  27. /**
  28. * percpu_depopulate_mask - depopulate per-cpu data for some cpu's
  29. * @__pdata: per-cpu data to depopulate
  30. * @mask: depopulate per-cpu data for cpu's selected through mask bits
  31. */
  32. static void __percpu_depopulate_mask(void *__pdata, const cpumask_t *mask)
  33. {
  34. int cpu;
  35. for_each_cpu_mask_nr(cpu, *mask)
  36. percpu_depopulate(__pdata, cpu);
  37. }
  38. #define percpu_depopulate_mask(__pdata, mask) \
  39. __percpu_depopulate_mask((__pdata), &(mask))
  40. /**
  41. * percpu_populate - populate per-cpu data for given cpu
  42. * @__pdata: per-cpu data to populate further
  43. * @size: size of per-cpu object
  44. * @gfp: may sleep or not etc.
  45. * @cpu: populate per-data for this cpu
  46. *
  47. * Populating per-cpu data for a cpu coming online would be a typical
  48. * use case. You need to register a cpu hotplug handler for that purpose.
  49. * Per-cpu object is populated with zeroed buffer.
  50. */
  51. static void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, int cpu)
  52. {
  53. struct percpu_data *pdata = __percpu_disguise(__pdata);
  54. int node = cpu_to_node(cpu);
  55. /*
  56. * We should make sure each CPU gets private memory.
  57. */
  58. size = roundup(size, cache_line_size());
  59. BUG_ON(pdata->ptrs[cpu]);
  60. if (node_online(node))
  61. pdata->ptrs[cpu] = kmalloc_node(size, gfp|__GFP_ZERO, node);
  62. else
  63. pdata->ptrs[cpu] = kzalloc(size, gfp);
  64. return pdata->ptrs[cpu];
  65. }
  66. /**
  67. * percpu_populate_mask - populate per-cpu data for more cpu's
  68. * @__pdata: per-cpu data to populate further
  69. * @size: size of per-cpu object
  70. * @gfp: may sleep or not etc.
  71. * @mask: populate per-cpu data for cpu's selected through mask bits
  72. *
  73. * Per-cpu objects are populated with zeroed buffers.
  74. */
  75. static int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp,
  76. cpumask_t *mask)
  77. {
  78. cpumask_t populated;
  79. int cpu;
  80. cpus_clear(populated);
  81. for_each_cpu_mask_nr(cpu, *mask)
  82. if (unlikely(!percpu_populate(__pdata, size, gfp, cpu))) {
  83. __percpu_depopulate_mask(__pdata, &populated);
  84. return -ENOMEM;
  85. } else
  86. cpu_set(cpu, populated);
  87. return 0;
  88. }
  89. #define percpu_populate_mask(__pdata, size, gfp, mask) \
  90. __percpu_populate_mask((__pdata), (size), (gfp), &(mask))
  91. /**
  92. * alloc_percpu - initial setup of per-cpu data
  93. * @size: size of per-cpu object
  94. * @align: alignment
  95. *
  96. * Allocate dynamic percpu area. Percpu objects are populated with
  97. * zeroed buffers.
  98. */
  99. void *__alloc_percpu(size_t size, size_t align)
  100. {
  101. /*
  102. * We allocate whole cache lines to avoid false sharing
  103. */
  104. size_t sz = roundup(nr_cpu_ids * sizeof(void *), cache_line_size());
  105. void *pdata = kzalloc(sz, GFP_KERNEL);
  106. void *__pdata = __percpu_disguise(pdata);
  107. /*
  108. * Can't easily make larger alignment work with kmalloc. WARN
  109. * on it. Larger alignment should only be used for module
  110. * percpu sections on SMP for which this path isn't used.
  111. */
  112. WARN_ON_ONCE(align > SMP_CACHE_BYTES);
  113. if (unlikely(!pdata))
  114. return NULL;
  115. if (likely(!__percpu_populate_mask(__pdata, size, GFP_KERNEL,
  116. &cpu_possible_map)))
  117. return __pdata;
  118. kfree(pdata);
  119. return NULL;
  120. }
  121. EXPORT_SYMBOL_GPL(__alloc_percpu);
  122. /**
  123. * free_percpu - final cleanup of per-cpu data
  124. * @__pdata: object to clean up
  125. *
  126. * We simply clean up any per-cpu object left. No need for the client to
  127. * track and specify through a bis mask which per-cpu objects are to free.
  128. */
  129. void free_percpu(void *__pdata)
  130. {
  131. if (unlikely(!__pdata))
  132. return;
  133. __percpu_depopulate_mask(__pdata, cpu_possible_mask);
  134. kfree(__percpu_disguise(__pdata));
  135. }
  136. EXPORT_SYMBOL_GPL(free_percpu);
  137. /*
  138. * Generic percpu area setup.
  139. */
  140. #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
  141. unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
  142. EXPORT_SYMBOL(__per_cpu_offset);
  143. void __init setup_per_cpu_areas(void)
  144. {
  145. unsigned long size, i;
  146. char *ptr;
  147. unsigned long nr_possible_cpus = num_possible_cpus();
  148. /* Copy section for each CPU (we discard the original) */
  149. size = ALIGN(PERCPU_ENOUGH_ROOM, PAGE_SIZE);
  150. ptr = alloc_bootmem_pages(size * nr_possible_cpus);
  151. for_each_possible_cpu(i) {
  152. __per_cpu_offset[i] = ptr - __per_cpu_start;
  153. memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
  154. ptr += size;
  155. }
  156. }
  157. #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */