allocpercpu.c 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149
  1. /*
  2. * linux/mm/allocpercpu.c
  3. *
  4. * Separated from slab.c August 11, 2006 Christoph Lameter
  5. */
  6. #include <linux/mm.h>
  7. #include <linux/module.h>
  8. #ifndef cache_line_size
  9. #define cache_line_size() L1_CACHE_BYTES
  10. #endif
  11. /**
  12. * percpu_depopulate - depopulate per-cpu data for given cpu
  13. * @__pdata: per-cpu data to depopulate
  14. * @cpu: depopulate per-cpu data for this cpu
  15. *
  16. * Depopulating per-cpu data for a cpu going offline would be a typical
  17. * use case. You need to register a cpu hotplug handler for that purpose.
  18. */
  19. static void percpu_depopulate(void *__pdata, int cpu)
  20. {
  21. struct percpu_data *pdata = __percpu_disguise(__pdata);
  22. kfree(pdata->ptrs[cpu]);
  23. pdata->ptrs[cpu] = NULL;
  24. }
  25. /**
  26. * percpu_depopulate_mask - depopulate per-cpu data for some cpu's
  27. * @__pdata: per-cpu data to depopulate
  28. * @mask: depopulate per-cpu data for cpu's selected through mask bits
  29. */
  30. static void __percpu_depopulate_mask(void *__pdata, cpumask_t *mask)
  31. {
  32. int cpu;
  33. for_each_cpu_mask_nr(cpu, *mask)
  34. percpu_depopulate(__pdata, cpu);
  35. }
  36. #define percpu_depopulate_mask(__pdata, mask) \
  37. __percpu_depopulate_mask((__pdata), &(mask))
  38. /**
  39. * percpu_populate - populate per-cpu data for given cpu
  40. * @__pdata: per-cpu data to populate further
  41. * @size: size of per-cpu object
  42. * @gfp: may sleep or not etc.
  43. * @cpu: populate per-data for this cpu
  44. *
  45. * Populating per-cpu data for a cpu coming online would be a typical
  46. * use case. You need to register a cpu hotplug handler for that purpose.
  47. * Per-cpu object is populated with zeroed buffer.
  48. */
  49. static void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, int cpu)
  50. {
  51. struct percpu_data *pdata = __percpu_disguise(__pdata);
  52. int node = cpu_to_node(cpu);
  53. /*
  54. * We should make sure each CPU gets private memory.
  55. */
  56. size = roundup(size, cache_line_size());
  57. BUG_ON(pdata->ptrs[cpu]);
  58. if (node_online(node))
  59. pdata->ptrs[cpu] = kmalloc_node(size, gfp|__GFP_ZERO, node);
  60. else
  61. pdata->ptrs[cpu] = kzalloc(size, gfp);
  62. return pdata->ptrs[cpu];
  63. }
  64. /**
  65. * percpu_populate_mask - populate per-cpu data for more cpu's
  66. * @__pdata: per-cpu data to populate further
  67. * @size: size of per-cpu object
  68. * @gfp: may sleep or not etc.
  69. * @mask: populate per-cpu data for cpu's selected through mask bits
  70. *
  71. * Per-cpu objects are populated with zeroed buffers.
  72. */
  73. static int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp,
  74. cpumask_t *mask)
  75. {
  76. cpumask_t populated;
  77. int cpu;
  78. cpus_clear(populated);
  79. for_each_cpu_mask_nr(cpu, *mask)
  80. if (unlikely(!percpu_populate(__pdata, size, gfp, cpu))) {
  81. __percpu_depopulate_mask(__pdata, &populated);
  82. return -ENOMEM;
  83. } else
  84. cpu_set(cpu, populated);
  85. return 0;
  86. }
  87. #define percpu_populate_mask(__pdata, size, gfp, mask) \
  88. __percpu_populate_mask((__pdata), (size), (gfp), &(mask))
  89. /**
  90. * alloc_percpu - initial setup of per-cpu data
  91. * @size: size of per-cpu object
  92. * @align: alignment
  93. *
  94. * Allocate dynamic percpu area. Percpu objects are populated with
  95. * zeroed buffers.
  96. */
  97. void *__alloc_percpu(size_t size, size_t align)
  98. {
  99. /*
  100. * We allocate whole cache lines to avoid false sharing
  101. */
  102. size_t sz = roundup(nr_cpu_ids * sizeof(void *), cache_line_size());
  103. void *pdata = kzalloc(sz, GFP_KERNEL);
  104. void *__pdata = __percpu_disguise(pdata);
  105. /*
  106. * Can't easily make larger alignment work with kmalloc. WARN
  107. * on it. Larger alignment should only be used for module
  108. * percpu sections on SMP for which this path isn't used.
  109. */
  110. WARN_ON_ONCE(align > SMP_CACHE_BYTES);
  111. if (unlikely(!pdata))
  112. return NULL;
  113. if (likely(!__percpu_populate_mask(__pdata, size, GFP_KERNEL,
  114. &cpu_possible_map)))
  115. return __pdata;
  116. kfree(pdata);
  117. return NULL;
  118. }
  119. EXPORT_SYMBOL_GPL(__alloc_percpu);
  120. /**
  121. * free_percpu - final cleanup of per-cpu data
  122. * @__pdata: object to clean up
  123. *
  124. * We simply clean up any per-cpu object left. No need for the client to
  125. * track and specify through a bis mask which per-cpu objects are to free.
  126. */
  127. void free_percpu(void *__pdata)
  128. {
  129. if (unlikely(!__pdata))
  130. return;
  131. __percpu_depopulate_mask(__pdata, cpu_possible_mask);
  132. kfree(__percpu_disguise(__pdata));
  133. }
  134. EXPORT_SYMBOL_GPL(free_percpu);