allocpercpu.c 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141
  1. /*
  2. * linux/mm/allocpercpu.c
  3. *
  4. * Separated from slab.c August 11, 2006 Christoph Lameter
  5. */
  6. #include <linux/mm.h>
  7. #include <linux/module.h>
  8. #ifndef cache_line_size
  9. #define cache_line_size() L1_CACHE_BYTES
  10. #endif
  11. /**
  12. * percpu_depopulate - depopulate per-cpu data for given cpu
  13. * @__pdata: per-cpu data to depopulate
  14. * @cpu: depopulate per-cpu data for this cpu
  15. *
  16. * Depopulating per-cpu data for a cpu going offline would be a typical
  17. * use case. You need to register a cpu hotplug handler for that purpose.
  18. */
  19. void percpu_depopulate(void *__pdata, int cpu)
  20. {
  21. struct percpu_data *pdata = __percpu_disguise(__pdata);
  22. kfree(pdata->ptrs[cpu]);
  23. pdata->ptrs[cpu] = NULL;
  24. }
  25. EXPORT_SYMBOL_GPL(percpu_depopulate);
  26. /**
  27. * percpu_depopulate_mask - depopulate per-cpu data for some cpu's
  28. * @__pdata: per-cpu data to depopulate
  29. * @mask: depopulate per-cpu data for cpu's selected through mask bits
  30. */
  31. void __percpu_depopulate_mask(void *__pdata, cpumask_t *mask)
  32. {
  33. int cpu;
  34. for_each_cpu_mask(cpu, *mask)
  35. percpu_depopulate(__pdata, cpu);
  36. }
  37. EXPORT_SYMBOL_GPL(__percpu_depopulate_mask);
  38. /**
  39. * percpu_populate - populate per-cpu data for given cpu
  40. * @__pdata: per-cpu data to populate further
  41. * @size: size of per-cpu object
  42. * @gfp: may sleep or not etc.
  43. * @cpu: populate per-data for this cpu
  44. *
  45. * Populating per-cpu data for a cpu coming online would be a typical
  46. * use case. You need to register a cpu hotplug handler for that purpose.
  47. * Per-cpu object is populated with zeroed buffer.
  48. */
  49. void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, int cpu)
  50. {
  51. struct percpu_data *pdata = __percpu_disguise(__pdata);
  52. int node = cpu_to_node(cpu);
  53. /*
  54. * We should make sure each CPU gets private memory.
  55. */
  56. size = roundup(size, cache_line_size());
  57. BUG_ON(pdata->ptrs[cpu]);
  58. if (node_online(node))
  59. pdata->ptrs[cpu] = kmalloc_node(size, gfp|__GFP_ZERO, node);
  60. else
  61. pdata->ptrs[cpu] = kzalloc(size, gfp);
  62. return pdata->ptrs[cpu];
  63. }
  64. EXPORT_SYMBOL_GPL(percpu_populate);
  65. /**
  66. * percpu_populate_mask - populate per-cpu data for more cpu's
  67. * @__pdata: per-cpu data to populate further
  68. * @size: size of per-cpu object
  69. * @gfp: may sleep or not etc.
  70. * @mask: populate per-cpu data for cpu's selected through mask bits
  71. *
  72. * Per-cpu objects are populated with zeroed buffers.
  73. */
  74. int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp,
  75. cpumask_t *mask)
  76. {
  77. cpumask_t populated;
  78. int cpu;
  79. cpus_clear(populated);
  80. for_each_cpu_mask(cpu, *mask)
  81. if (unlikely(!percpu_populate(__pdata, size, gfp, cpu))) {
  82. __percpu_depopulate_mask(__pdata, &populated);
  83. return -ENOMEM;
  84. } else
  85. cpu_set(cpu, populated);
  86. return 0;
  87. }
  88. EXPORT_SYMBOL_GPL(__percpu_populate_mask);
  89. /**
  90. * percpu_alloc_mask - initial setup of per-cpu data
  91. * @size: size of per-cpu object
  92. * @gfp: may sleep or not etc.
  93. * @mask: populate per-data for cpu's selected through mask bits
  94. *
  95. * Populating per-cpu data for all online cpu's would be a typical use case,
  96. * which is simplified by the percpu_alloc() wrapper.
  97. * Per-cpu objects are populated with zeroed buffers.
  98. */
  99. void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask)
  100. {
  101. /*
  102. * We allocate whole cache lines to avoid false sharing
  103. */
  104. size_t sz = roundup(nr_cpu_ids * sizeof(void *), cache_line_size());
  105. void *pdata = kzalloc(sz, gfp);
  106. void *__pdata = __percpu_disguise(pdata);
  107. if (unlikely(!pdata))
  108. return NULL;
  109. if (likely(!__percpu_populate_mask(__pdata, size, gfp, mask)))
  110. return __pdata;
  111. kfree(pdata);
  112. return NULL;
  113. }
  114. EXPORT_SYMBOL_GPL(__percpu_alloc_mask);
  115. /**
  116. * percpu_free - final cleanup of per-cpu data
  117. * @__pdata: object to clean up
  118. *
  119. * We simply clean up any per-cpu object left. No need for the client to
  120. * track and specify through a bis mask which per-cpu objects are to free.
  121. */
  122. void percpu_free(void *__pdata)
  123. {
  124. if (unlikely(!__pdata))
  125. return;
  126. __percpu_depopulate_mask(__pdata, &cpu_possible_map);
  127. kfree(__percpu_disguise(__pdata));
  128. }
  129. EXPORT_SYMBOL_GPL(percpu_free);