allocpercpu.c 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140
  1. /*
  2. * linux/mm/allocpercpu.c
  3. *
  4. * Separated from slab.c August 11, 2006 Christoph Lameter <clameter@sgi.com>
  5. */
  6. #include <linux/mm.h>
  7. #include <linux/module.h>
  8. #ifndef cache_line_size
  9. #define cache_line_size() L1_CACHE_BYTES
  10. #endif
  11. /**
  12. * percpu_depopulate - depopulate per-cpu data for given cpu
  13. * @__pdata: per-cpu data to depopulate
  14. * @cpu: depopulate per-cpu data for this cpu
  15. *
  16. * Depopulating per-cpu data for a cpu going offline would be a typical
  17. * use case. You need to register a cpu hotplug handler for that purpose.
  18. */
  19. void percpu_depopulate(void *__pdata, int cpu)
  20. {
  21. struct percpu_data *pdata = __percpu_disguise(__pdata);
  22. kfree(pdata->ptrs[cpu]);
  23. pdata->ptrs[cpu] = NULL;
  24. }
  25. EXPORT_SYMBOL_GPL(percpu_depopulate);
  26. /**
  27. * percpu_depopulate_mask - depopulate per-cpu data for some cpu's
  28. * @__pdata: per-cpu data to depopulate
  29. * @mask: depopulate per-cpu data for cpu's selected through mask bits
  30. */
  31. void __percpu_depopulate_mask(void *__pdata, cpumask_t *mask)
  32. {
  33. int cpu;
  34. for_each_cpu_mask(cpu, *mask)
  35. percpu_depopulate(__pdata, cpu);
  36. }
  37. EXPORT_SYMBOL_GPL(__percpu_depopulate_mask);
  38. /**
  39. * percpu_populate - populate per-cpu data for given cpu
  40. * @__pdata: per-cpu data to populate further
  41. * @size: size of per-cpu object
  42. * @gfp: may sleep or not etc.
  43. * @cpu: populate per-data for this cpu
  44. *
  45. * Populating per-cpu data for a cpu coming online would be a typical
  46. * use case. You need to register a cpu hotplug handler for that purpose.
  47. * Per-cpu object is populated with zeroed buffer.
  48. */
  49. void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, int cpu)
  50. {
  51. struct percpu_data *pdata = __percpu_disguise(__pdata);
  52. int node = cpu_to_node(cpu);
  53. /*
  54. * We should make sure each CPU gets private memory.
  55. */
  56. size = roundup(size, cache_line_size());
  57. BUG_ON(pdata->ptrs[cpu]);
  58. if (node_online(node))
  59. pdata->ptrs[cpu] = kmalloc_node(size, gfp|__GFP_ZERO, node);
  60. else
  61. pdata->ptrs[cpu] = kzalloc(size, gfp);
  62. return pdata->ptrs[cpu];
  63. }
  64. EXPORT_SYMBOL_GPL(percpu_populate);
  65. /**
  66. * percpu_populate_mask - populate per-cpu data for more cpu's
  67. * @__pdata: per-cpu data to populate further
  68. * @size: size of per-cpu object
  69. * @gfp: may sleep or not etc.
  70. * @mask: populate per-cpu data for cpu's selected through mask bits
  71. *
  72. * Per-cpu objects are populated with zeroed buffers.
  73. */
  74. int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp,
  75. cpumask_t *mask)
  76. {
  77. cpumask_t populated = CPU_MASK_NONE;
  78. int cpu;
  79. for_each_cpu_mask(cpu, *mask)
  80. if (unlikely(!percpu_populate(__pdata, size, gfp, cpu))) {
  81. __percpu_depopulate_mask(__pdata, &populated);
  82. return -ENOMEM;
  83. } else
  84. cpu_set(cpu, populated);
  85. return 0;
  86. }
  87. EXPORT_SYMBOL_GPL(__percpu_populate_mask);
  88. /**
  89. * percpu_alloc_mask - initial setup of per-cpu data
  90. * @size: size of per-cpu object
  91. * @gfp: may sleep or not etc.
  92. * @mask: populate per-data for cpu's selected through mask bits
  93. *
  94. * Populating per-cpu data for all online cpu's would be a typical use case,
  95. * which is simplified by the percpu_alloc() wrapper.
  96. * Per-cpu objects are populated with zeroed buffers.
  97. */
  98. void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask)
  99. {
  100. /*
  101. * We allocate whole cache lines to avoid false sharing
  102. */
  103. size_t sz = roundup(nr_cpu_ids * sizeof(void *), cache_line_size());
  104. void *pdata = kzalloc(sz, gfp);
  105. void *__pdata = __percpu_disguise(pdata);
  106. if (unlikely(!pdata))
  107. return NULL;
  108. if (likely(!__percpu_populate_mask(__pdata, size, gfp, mask)))
  109. return __pdata;
  110. kfree(pdata);
  111. return NULL;
  112. }
  113. EXPORT_SYMBOL_GPL(__percpu_alloc_mask);
  114. /**
  115. * percpu_free - final cleanup of per-cpu data
  116. * @__pdata: object to clean up
  117. *
  118. * We simply clean up any per-cpu object left. No need for the client to
  119. * track and specify through a bis mask which per-cpu objects are to free.
  120. */
  121. void percpu_free(void *__pdata)
  122. {
  123. if (unlikely(!__pdata))
  124. return;
  125. __percpu_depopulate_mask(__pdata, &cpu_possible_map);
  126. kfree(__percpu_disguise(__pdata));
  127. }
  128. EXPORT_SYMBOL_GPL(percpu_free);