percpu.c 61 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071
  1. /*
  2. * linux/mm/percpu.c - percpu memory allocator
  3. *
  4. * Copyright (C) 2009 SUSE Linux Products GmbH
  5. * Copyright (C) 2009 Tejun Heo <tj@kernel.org>
  6. *
  7. * This file is released under the GPLv2.
  8. *
  9. * This is percpu allocator which can handle both static and dynamic
  10. * areas. Percpu areas are allocated in chunks in vmalloc area. Each
  11. * chunk is consisted of boot-time determined number of units and the
  12. * first chunk is used for static percpu variables in the kernel image
  13. * (special boot time alloc/init handling necessary as these areas
  14. * need to be brought up before allocation services are running).
  15. * Unit grows as necessary and all units grow or shrink in unison.
  16. * When a chunk is filled up, another chunk is allocated. ie. in
  17. * vmalloc area
  18. *
  19. * c0 c1 c2
  20. * ------------------- ------------------- ------------
  21. * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u
  22. * ------------------- ...... ------------------- .... ------------
  23. *
  24. * Allocation is done in offset-size areas of single unit space. Ie,
  25. * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0,
  26. * c1:u1, c1:u2 and c1:u3. On UMA, units corresponds directly to
  27. * cpus. On NUMA, the mapping can be non-linear and even sparse.
  28. * Percpu access can be done by configuring percpu base registers
  29. * according to cpu to unit mapping and pcpu_unit_size.
  30. *
  31. * There are usually many small percpu allocations many of them being
  32. * as small as 4 bytes. The allocator organizes chunks into lists
  33. * according to free size and tries to allocate from the fullest one.
  34. * Each chunk keeps the maximum contiguous area size hint which is
  35. * guaranteed to be eqaul to or larger than the maximum contiguous
  36. * area in the chunk. This helps the allocator not to iterate the
  37. * chunk maps unnecessarily.
  38. *
  39. * Allocation state in each chunk is kept using an array of integers
  40. * on chunk->map. A positive value in the map represents a free
  41. * region and negative allocated. Allocation inside a chunk is done
  42. * by scanning this map sequentially and serving the first matching
  43. * entry. This is mostly copied from the percpu_modalloc() allocator.
  44. * Chunks can be determined from the address using the index field
  45. * in the page struct. The index field contains a pointer to the chunk.
  46. *
  47. * To use this allocator, arch code should do the followings.
  48. *
  49. * - drop CONFIG_HAVE_LEGACY_PER_CPU_AREA
  50. *
  51. * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
  52. * regular address to percpu pointer and back if they need to be
  53. * different from the default
  54. *
  55. * - use pcpu_setup_first_chunk() during percpu area initialization to
  56. * setup the first chunk containing the kernel static percpu area
  57. */
  58. #include <linux/bitmap.h>
  59. #include <linux/bootmem.h>
  60. #include <linux/err.h>
  61. #include <linux/list.h>
  62. #include <linux/log2.h>
  63. #include <linux/mm.h>
  64. #include <linux/module.h>
  65. #include <linux/mutex.h>
  66. #include <linux/percpu.h>
  67. #include <linux/pfn.h>
  68. #include <linux/slab.h>
  69. #include <linux/spinlock.h>
  70. #include <linux/vmalloc.h>
  71. #include <linux/workqueue.h>
  72. #include <asm/cacheflush.h>
  73. #include <asm/sections.h>
  74. #include <asm/tlbflush.h>
  75. #define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */
  76. #define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */
  77. /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
  78. #ifndef __addr_to_pcpu_ptr
  79. #define __addr_to_pcpu_ptr(addr) \
  80. (void *)((unsigned long)(addr) - (unsigned long)pcpu_base_addr \
  81. + (unsigned long)__per_cpu_start)
  82. #endif
  83. #ifndef __pcpu_ptr_to_addr
  84. #define __pcpu_ptr_to_addr(ptr) \
  85. (void *)((unsigned long)(ptr) + (unsigned long)pcpu_base_addr \
  86. - (unsigned long)__per_cpu_start)
  87. #endif
  88. struct pcpu_chunk {
  89. struct list_head list; /* linked to pcpu_slot lists */
  90. int free_size; /* free bytes in the chunk */
  91. int contig_hint; /* max contiguous size hint */
  92. void *base_addr; /* base address of this chunk */
  93. int map_used; /* # of map entries used */
  94. int map_alloc; /* # of map entries allocated */
  95. int *map; /* allocation map */
  96. struct vm_struct **vms; /* mapped vmalloc regions */
  97. bool immutable; /* no [de]population allowed */
  98. unsigned long populated[]; /* populated bitmap */
  99. };
  100. static int pcpu_unit_pages __read_mostly;
  101. static int pcpu_unit_size __read_mostly;
  102. static int pcpu_nr_units __read_mostly;
  103. static int pcpu_atom_size __read_mostly;
  104. static int pcpu_nr_slots __read_mostly;
  105. static size_t pcpu_chunk_struct_size __read_mostly;
  106. /* cpus with the lowest and highest unit numbers */
  107. static unsigned int pcpu_first_unit_cpu __read_mostly;
  108. static unsigned int pcpu_last_unit_cpu __read_mostly;
  109. /* the address of the first chunk which starts with the kernel static area */
  110. void *pcpu_base_addr __read_mostly;
  111. EXPORT_SYMBOL_GPL(pcpu_base_addr);
  112. static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
  113. const unsigned long *pcpu_unit_offsets __read_mostly; /* cpu -> unit offset */
  114. /* group information, used for vm allocation */
  115. static int pcpu_nr_groups __read_mostly;
  116. static const unsigned long *pcpu_group_offsets __read_mostly;
  117. static const size_t *pcpu_group_sizes __read_mostly;
  118. /*
  119. * The first chunk which always exists. Note that unlike other
  120. * chunks, this one can be allocated and mapped in several different
  121. * ways and thus often doesn't live in the vmalloc area.
  122. */
  123. static struct pcpu_chunk *pcpu_first_chunk;
  124. /*
  125. * Optional reserved chunk. This chunk reserves part of the first
  126. * chunk and serves it for reserved allocations. The amount of
  127. * reserved offset is in pcpu_reserved_chunk_limit. When reserved
  128. * area doesn't exist, the following variables contain NULL and 0
  129. * respectively.
  130. */
  131. static struct pcpu_chunk *pcpu_reserved_chunk;
  132. static int pcpu_reserved_chunk_limit;
  133. /*
  134. * Synchronization rules.
  135. *
  136. * There are two locks - pcpu_alloc_mutex and pcpu_lock. The former
  137. * protects allocation/reclaim paths, chunks, populated bitmap and
  138. * vmalloc mapping. The latter is a spinlock and protects the index
  139. * data structures - chunk slots, chunks and area maps in chunks.
  140. *
  141. * During allocation, pcpu_alloc_mutex is kept locked all the time and
  142. * pcpu_lock is grabbed and released as necessary. All actual memory
  143. * allocations are done using GFP_KERNEL with pcpu_lock released. In
  144. * general, percpu memory can't be allocated with irq off but
  145. * irqsave/restore are still used in alloc path so that it can be used
  146. * from early init path - sched_init() specifically.
  147. *
  148. * Free path accesses and alters only the index data structures, so it
  149. * can be safely called from atomic context. When memory needs to be
  150. * returned to the system, free path schedules reclaim_work which
  151. * grabs both pcpu_alloc_mutex and pcpu_lock, unlinks chunks to be
  152. * reclaimed, release both locks and frees the chunks. Note that it's
  153. * necessary to grab both locks to remove a chunk from circulation as
  154. * allocation path might be referencing the chunk with only
  155. * pcpu_alloc_mutex locked.
  156. */
  157. static DEFINE_MUTEX(pcpu_alloc_mutex); /* protects whole alloc and reclaim */
  158. static DEFINE_SPINLOCK(pcpu_lock); /* protects index data structures */
  159. static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
  160. /* reclaim work to release fully free chunks, scheduled from free path */
  161. static void pcpu_reclaim(struct work_struct *work);
  162. static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim);
  163. static int __pcpu_size_to_slot(int size)
  164. {
  165. int highbit = fls(size); /* size is in bytes */
  166. return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
  167. }
  168. static int pcpu_size_to_slot(int size)
  169. {
  170. if (size == pcpu_unit_size)
  171. return pcpu_nr_slots - 1;
  172. return __pcpu_size_to_slot(size);
  173. }
  174. static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
  175. {
  176. if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int))
  177. return 0;
  178. return pcpu_size_to_slot(chunk->free_size);
  179. }
  180. static int pcpu_page_idx(unsigned int cpu, int page_idx)
  181. {
  182. return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
  183. }
  184. static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
  185. unsigned int cpu, int page_idx)
  186. {
  187. return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] +
  188. (page_idx << PAGE_SHIFT);
  189. }
  190. static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk,
  191. unsigned int cpu, int page_idx)
  192. {
  193. /* must not be used on pre-mapped chunk */
  194. WARN_ON(chunk->immutable);
  195. return vmalloc_to_page((void *)pcpu_chunk_addr(chunk, cpu, page_idx));
  196. }
  197. /* set the pointer to a chunk in a page struct */
  198. static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
  199. {
  200. page->index = (unsigned long)pcpu;
  201. }
  202. /* obtain pointer to a chunk from a page struct */
  203. static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
  204. {
  205. return (struct pcpu_chunk *)page->index;
  206. }
  207. static void pcpu_next_unpop(struct pcpu_chunk *chunk, int *rs, int *re, int end)
  208. {
  209. *rs = find_next_zero_bit(chunk->populated, end, *rs);
  210. *re = find_next_bit(chunk->populated, end, *rs + 1);
  211. }
  212. static void pcpu_next_pop(struct pcpu_chunk *chunk, int *rs, int *re, int end)
  213. {
  214. *rs = find_next_bit(chunk->populated, end, *rs);
  215. *re = find_next_zero_bit(chunk->populated, end, *rs + 1);
  216. }
  217. /*
  218. * (Un)populated page region iterators. Iterate over (un)populated
  219. * page regions betwen @start and @end in @chunk. @rs and @re should
  220. * be integer variables and will be set to start and end page index of
  221. * the current region.
  222. */
  223. #define pcpu_for_each_unpop_region(chunk, rs, re, start, end) \
  224. for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \
  225. (rs) < (re); \
  226. (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end)))
  227. #define pcpu_for_each_pop_region(chunk, rs, re, start, end) \
  228. for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end)); \
  229. (rs) < (re); \
  230. (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end)))
  231. /**
  232. * pcpu_mem_alloc - allocate memory
  233. * @size: bytes to allocate
  234. *
  235. * Allocate @size bytes. If @size is smaller than PAGE_SIZE,
  236. * kzalloc() is used; otherwise, vmalloc() is used. The returned
  237. * memory is always zeroed.
  238. *
  239. * CONTEXT:
  240. * Does GFP_KERNEL allocation.
  241. *
  242. * RETURNS:
  243. * Pointer to the allocated area on success, NULL on failure.
  244. */
  245. static void *pcpu_mem_alloc(size_t size)
  246. {
  247. if (size <= PAGE_SIZE)
  248. return kzalloc(size, GFP_KERNEL);
  249. else {
  250. void *ptr = vmalloc(size);
  251. if (ptr)
  252. memset(ptr, 0, size);
  253. return ptr;
  254. }
  255. }
  256. /**
  257. * pcpu_mem_free - free memory
  258. * @ptr: memory to free
  259. * @size: size of the area
  260. *
  261. * Free @ptr. @ptr should have been allocated using pcpu_mem_alloc().
  262. */
  263. static void pcpu_mem_free(void *ptr, size_t size)
  264. {
  265. if (size <= PAGE_SIZE)
  266. kfree(ptr);
  267. else
  268. vfree(ptr);
  269. }
  270. /**
  271. * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
  272. * @chunk: chunk of interest
  273. * @oslot: the previous slot it was on
  274. *
  275. * This function is called after an allocation or free changed @chunk.
  276. * New slot according to the changed state is determined and @chunk is
  277. * moved to the slot. Note that the reserved chunk is never put on
  278. * chunk slots.
  279. *
  280. * CONTEXT:
  281. * pcpu_lock.
  282. */
  283. static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
  284. {
  285. int nslot = pcpu_chunk_slot(chunk);
  286. if (chunk != pcpu_reserved_chunk && oslot != nslot) {
  287. if (oslot < nslot)
  288. list_move(&chunk->list, &pcpu_slot[nslot]);
  289. else
  290. list_move_tail(&chunk->list, &pcpu_slot[nslot]);
  291. }
  292. }
  293. /**
  294. * pcpu_chunk_addr_search - determine chunk containing specified address
  295. * @addr: address for which the chunk needs to be determined.
  296. *
  297. * RETURNS:
  298. * The address of the found chunk.
  299. */
  300. static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
  301. {
  302. void *first_start = pcpu_first_chunk->base_addr;
  303. /* is it in the first chunk? */
  304. if (addr >= first_start && addr < first_start + pcpu_unit_size) {
  305. /* is it in the reserved area? */
  306. if (addr < first_start + pcpu_reserved_chunk_limit)
  307. return pcpu_reserved_chunk;
  308. return pcpu_first_chunk;
  309. }
  310. /*
  311. * The address is relative to unit0 which might be unused and
  312. * thus unmapped. Offset the address to the unit space of the
  313. * current processor before looking it up in the vmalloc
  314. * space. Note that any possible cpu id can be used here, so
  315. * there's no need to worry about preemption or cpu hotplug.
  316. */
  317. addr += pcpu_unit_offsets[raw_smp_processor_id()];
  318. return pcpu_get_page_chunk(vmalloc_to_page(addr));
  319. }
  320. /**
  321. * pcpu_extend_area_map - extend area map for allocation
  322. * @chunk: target chunk
  323. *
  324. * Extend area map of @chunk so that it can accomodate an allocation.
  325. * A single allocation can split an area into three areas, so this
  326. * function makes sure that @chunk->map has at least two extra slots.
  327. *
  328. * CONTEXT:
  329. * pcpu_alloc_mutex, pcpu_lock. pcpu_lock is released and reacquired
  330. * if area map is extended.
  331. *
  332. * RETURNS:
  333. * 0 if noop, 1 if successfully extended, -errno on failure.
  334. */
  335. static int pcpu_extend_area_map(struct pcpu_chunk *chunk, unsigned long *flags)
  336. {
  337. int new_alloc;
  338. int *new;
  339. size_t size;
  340. /* has enough? */
  341. if (chunk->map_alloc >= chunk->map_used + 2)
  342. return 0;
  343. spin_unlock_irqrestore(&pcpu_lock, *flags);
  344. new_alloc = PCPU_DFL_MAP_ALLOC;
  345. while (new_alloc < chunk->map_used + 2)
  346. new_alloc *= 2;
  347. new = pcpu_mem_alloc(new_alloc * sizeof(new[0]));
  348. if (!new) {
  349. spin_lock_irqsave(&pcpu_lock, *flags);
  350. return -ENOMEM;
  351. }
  352. /*
  353. * Acquire pcpu_lock and switch to new area map. Only free
  354. * could have happened inbetween, so map_used couldn't have
  355. * grown.
  356. */
  357. spin_lock_irqsave(&pcpu_lock, *flags);
  358. BUG_ON(new_alloc < chunk->map_used + 2);
  359. size = chunk->map_alloc * sizeof(chunk->map[0]);
  360. memcpy(new, chunk->map, size);
  361. /*
  362. * map_alloc < PCPU_DFL_MAP_ALLOC indicates that the chunk is
  363. * one of the first chunks and still using static map.
  364. */
  365. if (chunk->map_alloc >= PCPU_DFL_MAP_ALLOC)
  366. pcpu_mem_free(chunk->map, size);
  367. chunk->map_alloc = new_alloc;
  368. chunk->map = new;
  369. return 0;
  370. }
  371. /**
  372. * pcpu_split_block - split a map block
  373. * @chunk: chunk of interest
  374. * @i: index of map block to split
  375. * @head: head size in bytes (can be 0)
  376. * @tail: tail size in bytes (can be 0)
  377. *
  378. * Split the @i'th map block into two or three blocks. If @head is
  379. * non-zero, @head bytes block is inserted before block @i moving it
  380. * to @i+1 and reducing its size by @head bytes.
  381. *
  382. * If @tail is non-zero, the target block, which can be @i or @i+1
  383. * depending on @head, is reduced by @tail bytes and @tail byte block
  384. * is inserted after the target block.
  385. *
  386. * @chunk->map must have enough free slots to accomodate the split.
  387. *
  388. * CONTEXT:
  389. * pcpu_lock.
  390. */
  391. static void pcpu_split_block(struct pcpu_chunk *chunk, int i,
  392. int head, int tail)
  393. {
  394. int nr_extra = !!head + !!tail;
  395. BUG_ON(chunk->map_alloc < chunk->map_used + nr_extra);
  396. /* insert new subblocks */
  397. memmove(&chunk->map[i + nr_extra], &chunk->map[i],
  398. sizeof(chunk->map[0]) * (chunk->map_used - i));
  399. chunk->map_used += nr_extra;
  400. if (head) {
  401. chunk->map[i + 1] = chunk->map[i] - head;
  402. chunk->map[i++] = head;
  403. }
  404. if (tail) {
  405. chunk->map[i++] -= tail;
  406. chunk->map[i] = tail;
  407. }
  408. }
  409. /**
  410. * pcpu_alloc_area - allocate area from a pcpu_chunk
  411. * @chunk: chunk of interest
  412. * @size: wanted size in bytes
  413. * @align: wanted align
  414. *
  415. * Try to allocate @size bytes area aligned at @align from @chunk.
  416. * Note that this function only allocates the offset. It doesn't
  417. * populate or map the area.
  418. *
  419. * @chunk->map must have at least two free slots.
  420. *
  421. * CONTEXT:
  422. * pcpu_lock.
  423. *
  424. * RETURNS:
  425. * Allocated offset in @chunk on success, -1 if no matching area is
  426. * found.
  427. */
  428. static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align)
  429. {
  430. int oslot = pcpu_chunk_slot(chunk);
  431. int max_contig = 0;
  432. int i, off;
  433. for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) {
  434. bool is_last = i + 1 == chunk->map_used;
  435. int head, tail;
  436. /* extra for alignment requirement */
  437. head = ALIGN(off, align) - off;
  438. BUG_ON(i == 0 && head != 0);
  439. if (chunk->map[i] < 0)
  440. continue;
  441. if (chunk->map[i] < head + size) {
  442. max_contig = max(chunk->map[i], max_contig);
  443. continue;
  444. }
  445. /*
  446. * If head is small or the previous block is free,
  447. * merge'em. Note that 'small' is defined as smaller
  448. * than sizeof(int), which is very small but isn't too
  449. * uncommon for percpu allocations.
  450. */
  451. if (head && (head < sizeof(int) || chunk->map[i - 1] > 0)) {
  452. if (chunk->map[i - 1] > 0)
  453. chunk->map[i - 1] += head;
  454. else {
  455. chunk->map[i - 1] -= head;
  456. chunk->free_size -= head;
  457. }
  458. chunk->map[i] -= head;
  459. off += head;
  460. head = 0;
  461. }
  462. /* if tail is small, just keep it around */
  463. tail = chunk->map[i] - head - size;
  464. if (tail < sizeof(int))
  465. tail = 0;
  466. /* split if warranted */
  467. if (head || tail) {
  468. pcpu_split_block(chunk, i, head, tail);
  469. if (head) {
  470. i++;
  471. off += head;
  472. max_contig = max(chunk->map[i - 1], max_contig);
  473. }
  474. if (tail)
  475. max_contig = max(chunk->map[i + 1], max_contig);
  476. }
  477. /* update hint and mark allocated */
  478. if (is_last)
  479. chunk->contig_hint = max_contig; /* fully scanned */
  480. else
  481. chunk->contig_hint = max(chunk->contig_hint,
  482. max_contig);
  483. chunk->free_size -= chunk->map[i];
  484. chunk->map[i] = -chunk->map[i];
  485. pcpu_chunk_relocate(chunk, oslot);
  486. return off;
  487. }
  488. chunk->contig_hint = max_contig; /* fully scanned */
  489. pcpu_chunk_relocate(chunk, oslot);
  490. /* tell the upper layer that this chunk has no matching area */
  491. return -1;
  492. }
  493. /**
  494. * pcpu_free_area - free area to a pcpu_chunk
  495. * @chunk: chunk of interest
  496. * @freeme: offset of area to free
  497. *
  498. * Free area starting from @freeme to @chunk. Note that this function
  499. * only modifies the allocation map. It doesn't depopulate or unmap
  500. * the area.
  501. *
  502. * CONTEXT:
  503. * pcpu_lock.
  504. */
  505. static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme)
  506. {
  507. int oslot = pcpu_chunk_slot(chunk);
  508. int i, off;
  509. for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++]))
  510. if (off == freeme)
  511. break;
  512. BUG_ON(off != freeme);
  513. BUG_ON(chunk->map[i] > 0);
  514. chunk->map[i] = -chunk->map[i];
  515. chunk->free_size += chunk->map[i];
  516. /* merge with previous? */
  517. if (i > 0 && chunk->map[i - 1] >= 0) {
  518. chunk->map[i - 1] += chunk->map[i];
  519. chunk->map_used--;
  520. memmove(&chunk->map[i], &chunk->map[i + 1],
  521. (chunk->map_used - i) * sizeof(chunk->map[0]));
  522. i--;
  523. }
  524. /* merge with next? */
  525. if (i + 1 < chunk->map_used && chunk->map[i + 1] >= 0) {
  526. chunk->map[i] += chunk->map[i + 1];
  527. chunk->map_used--;
  528. memmove(&chunk->map[i + 1], &chunk->map[i + 2],
  529. (chunk->map_used - (i + 1)) * sizeof(chunk->map[0]));
  530. }
  531. chunk->contig_hint = max(chunk->map[i], chunk->contig_hint);
  532. pcpu_chunk_relocate(chunk, oslot);
  533. }
  534. /**
  535. * pcpu_get_pages_and_bitmap - get temp pages array and bitmap
  536. * @chunk: chunk of interest
  537. * @bitmapp: output parameter for bitmap
  538. * @may_alloc: may allocate the array
  539. *
  540. * Returns pointer to array of pointers to struct page and bitmap,
  541. * both of which can be indexed with pcpu_page_idx(). The returned
  542. * array is cleared to zero and *@bitmapp is copied from
  543. * @chunk->populated. Note that there is only one array and bitmap
  544. * and access exclusion is the caller's responsibility.
  545. *
  546. * CONTEXT:
  547. * pcpu_alloc_mutex and does GFP_KERNEL allocation if @may_alloc.
  548. * Otherwise, don't care.
  549. *
  550. * RETURNS:
  551. * Pointer to temp pages array on success, NULL on failure.
  552. */
  553. static struct page **pcpu_get_pages_and_bitmap(struct pcpu_chunk *chunk,
  554. unsigned long **bitmapp,
  555. bool may_alloc)
  556. {
  557. static struct page **pages;
  558. static unsigned long *bitmap;
  559. size_t pages_size = pcpu_nr_units * pcpu_unit_pages * sizeof(pages[0]);
  560. size_t bitmap_size = BITS_TO_LONGS(pcpu_unit_pages) *
  561. sizeof(unsigned long);
  562. if (!pages || !bitmap) {
  563. if (may_alloc && !pages)
  564. pages = pcpu_mem_alloc(pages_size);
  565. if (may_alloc && !bitmap)
  566. bitmap = pcpu_mem_alloc(bitmap_size);
  567. if (!pages || !bitmap)
  568. return NULL;
  569. }
  570. memset(pages, 0, pages_size);
  571. bitmap_copy(bitmap, chunk->populated, pcpu_unit_pages);
  572. *bitmapp = bitmap;
  573. return pages;
  574. }
  575. /**
  576. * pcpu_free_pages - free pages which were allocated for @chunk
  577. * @chunk: chunk pages were allocated for
  578. * @pages: array of pages to be freed, indexed by pcpu_page_idx()
  579. * @populated: populated bitmap
  580. * @page_start: page index of the first page to be freed
  581. * @page_end: page index of the last page to be freed + 1
  582. *
  583. * Free pages [@page_start and @page_end) in @pages for all units.
  584. * The pages were allocated for @chunk.
  585. */
  586. static void pcpu_free_pages(struct pcpu_chunk *chunk,
  587. struct page **pages, unsigned long *populated,
  588. int page_start, int page_end)
  589. {
  590. unsigned int cpu;
  591. int i;
  592. for_each_possible_cpu(cpu) {
  593. for (i = page_start; i < page_end; i++) {
  594. struct page *page = pages[pcpu_page_idx(cpu, i)];
  595. if (page)
  596. __free_page(page);
  597. }
  598. }
  599. }
  600. /**
  601. * pcpu_alloc_pages - allocates pages for @chunk
  602. * @chunk: target chunk
  603. * @pages: array to put the allocated pages into, indexed by pcpu_page_idx()
  604. * @populated: populated bitmap
  605. * @page_start: page index of the first page to be allocated
  606. * @page_end: page index of the last page to be allocated + 1
  607. *
  608. * Allocate pages [@page_start,@page_end) into @pages for all units.
  609. * The allocation is for @chunk. Percpu core doesn't care about the
  610. * content of @pages and will pass it verbatim to pcpu_map_pages().
  611. */
  612. static int pcpu_alloc_pages(struct pcpu_chunk *chunk,
  613. struct page **pages, unsigned long *populated,
  614. int page_start, int page_end)
  615. {
  616. const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD;
  617. unsigned int cpu;
  618. int i;
  619. for_each_possible_cpu(cpu) {
  620. for (i = page_start; i < page_end; i++) {
  621. struct page **pagep = &pages[pcpu_page_idx(cpu, i)];
  622. *pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0);
  623. if (!*pagep) {
  624. pcpu_free_pages(chunk, pages, populated,
  625. page_start, page_end);
  626. return -ENOMEM;
  627. }
  628. }
  629. }
  630. return 0;
  631. }
  632. /**
  633. * pcpu_pre_unmap_flush - flush cache prior to unmapping
  634. * @chunk: chunk the regions to be flushed belongs to
  635. * @page_start: page index of the first page to be flushed
  636. * @page_end: page index of the last page to be flushed + 1
  637. *
  638. * Pages in [@page_start,@page_end) of @chunk are about to be
  639. * unmapped. Flush cache. As each flushing trial can be very
  640. * expensive, issue flush on the whole region at once rather than
  641. * doing it for each cpu. This could be an overkill but is more
  642. * scalable.
  643. */
  644. static void pcpu_pre_unmap_flush(struct pcpu_chunk *chunk,
  645. int page_start, int page_end)
  646. {
  647. flush_cache_vunmap(
  648. pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start),
  649. pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end));
  650. }
  651. static void __pcpu_unmap_pages(unsigned long addr, int nr_pages)
  652. {
  653. unmap_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT);
  654. }
  655. /**
  656. * pcpu_unmap_pages - unmap pages out of a pcpu_chunk
  657. * @chunk: chunk of interest
  658. * @pages: pages array which can be used to pass information to free
  659. * @populated: populated bitmap
  660. * @page_start: page index of the first page to unmap
  661. * @page_end: page index of the last page to unmap + 1
  662. *
  663. * For each cpu, unmap pages [@page_start,@page_end) out of @chunk.
  664. * Corresponding elements in @pages were cleared by the caller and can
  665. * be used to carry information to pcpu_free_pages() which will be
  666. * called after all unmaps are finished. The caller should call
  667. * proper pre/post flush functions.
  668. */
  669. static void pcpu_unmap_pages(struct pcpu_chunk *chunk,
  670. struct page **pages, unsigned long *populated,
  671. int page_start, int page_end)
  672. {
  673. unsigned int cpu;
  674. int i;
  675. for_each_possible_cpu(cpu) {
  676. for (i = page_start; i < page_end; i++) {
  677. struct page *page;
  678. page = pcpu_chunk_page(chunk, cpu, i);
  679. WARN_ON(!page);
  680. pages[pcpu_page_idx(cpu, i)] = page;
  681. }
  682. __pcpu_unmap_pages(pcpu_chunk_addr(chunk, cpu, page_start),
  683. page_end - page_start);
  684. }
  685. for (i = page_start; i < page_end; i++)
  686. __clear_bit(i, populated);
  687. }
  688. /**
  689. * pcpu_post_unmap_tlb_flush - flush TLB after unmapping
  690. * @chunk: pcpu_chunk the regions to be flushed belong to
  691. * @page_start: page index of the first page to be flushed
  692. * @page_end: page index of the last page to be flushed + 1
  693. *
  694. * Pages [@page_start,@page_end) of @chunk have been unmapped. Flush
  695. * TLB for the regions. This can be skipped if the area is to be
  696. * returned to vmalloc as vmalloc will handle TLB flushing lazily.
  697. *
  698. * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once
  699. * for the whole region.
  700. */
  701. static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk,
  702. int page_start, int page_end)
  703. {
  704. flush_tlb_kernel_range(
  705. pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start),
  706. pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end));
  707. }
  708. static int __pcpu_map_pages(unsigned long addr, struct page **pages,
  709. int nr_pages)
  710. {
  711. return map_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT,
  712. PAGE_KERNEL, pages);
  713. }
  714. /**
  715. * pcpu_map_pages - map pages into a pcpu_chunk
  716. * @chunk: chunk of interest
  717. * @pages: pages array containing pages to be mapped
  718. * @populated: populated bitmap
  719. * @page_start: page index of the first page to map
  720. * @page_end: page index of the last page to map + 1
  721. *
  722. * For each cpu, map pages [@page_start,@page_end) into @chunk. The
  723. * caller is responsible for calling pcpu_post_map_flush() after all
  724. * mappings are complete.
  725. *
  726. * This function is responsible for setting corresponding bits in
  727. * @chunk->populated bitmap and whatever is necessary for reverse
  728. * lookup (addr -> chunk).
  729. */
  730. static int pcpu_map_pages(struct pcpu_chunk *chunk,
  731. struct page **pages, unsigned long *populated,
  732. int page_start, int page_end)
  733. {
  734. unsigned int cpu, tcpu;
  735. int i, err;
  736. for_each_possible_cpu(cpu) {
  737. err = __pcpu_map_pages(pcpu_chunk_addr(chunk, cpu, page_start),
  738. &pages[pcpu_page_idx(cpu, page_start)],
  739. page_end - page_start);
  740. if (err < 0)
  741. goto err;
  742. }
  743. /* mapping successful, link chunk and mark populated */
  744. for (i = page_start; i < page_end; i++) {
  745. for_each_possible_cpu(cpu)
  746. pcpu_set_page_chunk(pages[pcpu_page_idx(cpu, i)],
  747. chunk);
  748. __set_bit(i, populated);
  749. }
  750. return 0;
  751. err:
  752. for_each_possible_cpu(tcpu) {
  753. if (tcpu == cpu)
  754. break;
  755. __pcpu_unmap_pages(pcpu_chunk_addr(chunk, tcpu, page_start),
  756. page_end - page_start);
  757. }
  758. return err;
  759. }
  760. /**
  761. * pcpu_post_map_flush - flush cache after mapping
  762. * @chunk: pcpu_chunk the regions to be flushed belong to
  763. * @page_start: page index of the first page to be flushed
  764. * @page_end: page index of the last page to be flushed + 1
  765. *
  766. * Pages [@page_start,@page_end) of @chunk have been mapped. Flush
  767. * cache.
  768. *
  769. * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once
  770. * for the whole region.
  771. */
  772. static void pcpu_post_map_flush(struct pcpu_chunk *chunk,
  773. int page_start, int page_end)
  774. {
  775. flush_cache_vmap(
  776. pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start),
  777. pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end));
  778. }
  779. /**
  780. * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk
  781. * @chunk: chunk to depopulate
  782. * @off: offset to the area to depopulate
  783. * @size: size of the area to depopulate in bytes
  784. * @flush: whether to flush cache and tlb or not
  785. *
  786. * For each cpu, depopulate and unmap pages [@page_start,@page_end)
  787. * from @chunk. If @flush is true, vcache is flushed before unmapping
  788. * and tlb after.
  789. *
  790. * CONTEXT:
  791. * pcpu_alloc_mutex.
  792. */
  793. static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size)
  794. {
  795. int page_start = PFN_DOWN(off);
  796. int page_end = PFN_UP(off + size);
  797. struct page **pages;
  798. unsigned long *populated;
  799. int rs, re;
  800. /* quick path, check whether it's empty already */
  801. pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) {
  802. if (rs == page_start && re == page_end)
  803. return;
  804. break;
  805. }
  806. /* immutable chunks can't be depopulated */
  807. WARN_ON(chunk->immutable);
  808. /*
  809. * If control reaches here, there must have been at least one
  810. * successful population attempt so the temp pages array must
  811. * be available now.
  812. */
  813. pages = pcpu_get_pages_and_bitmap(chunk, &populated, false);
  814. BUG_ON(!pages);
  815. /* unmap and free */
  816. pcpu_pre_unmap_flush(chunk, page_start, page_end);
  817. pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end)
  818. pcpu_unmap_pages(chunk, pages, populated, rs, re);
  819. /* no need to flush tlb, vmalloc will handle it lazily */
  820. pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end)
  821. pcpu_free_pages(chunk, pages, populated, rs, re);
  822. /* commit new bitmap */
  823. bitmap_copy(chunk->populated, populated, pcpu_unit_pages);
  824. }
  825. /**
  826. * pcpu_populate_chunk - populate and map an area of a pcpu_chunk
  827. * @chunk: chunk of interest
  828. * @off: offset to the area to populate
  829. * @size: size of the area to populate in bytes
  830. *
  831. * For each cpu, populate and map pages [@page_start,@page_end) into
  832. * @chunk. The area is cleared on return.
  833. *
  834. * CONTEXT:
  835. * pcpu_alloc_mutex, does GFP_KERNEL allocation.
  836. */
  837. static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size)
  838. {
  839. int page_start = PFN_DOWN(off);
  840. int page_end = PFN_UP(off + size);
  841. int free_end = page_start, unmap_end = page_start;
  842. struct page **pages;
  843. unsigned long *populated;
  844. unsigned int cpu;
  845. int rs, re, rc;
  846. /* quick path, check whether all pages are already there */
  847. pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end) {
  848. if (rs == page_start && re == page_end)
  849. goto clear;
  850. break;
  851. }
  852. /* need to allocate and map pages, this chunk can't be immutable */
  853. WARN_ON(chunk->immutable);
  854. pages = pcpu_get_pages_and_bitmap(chunk, &populated, true);
  855. if (!pages)
  856. return -ENOMEM;
  857. /* alloc and map */
  858. pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) {
  859. rc = pcpu_alloc_pages(chunk, pages, populated, rs, re);
  860. if (rc)
  861. goto err_free;
  862. free_end = re;
  863. }
  864. pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) {
  865. rc = pcpu_map_pages(chunk, pages, populated, rs, re);
  866. if (rc)
  867. goto err_unmap;
  868. unmap_end = re;
  869. }
  870. pcpu_post_map_flush(chunk, page_start, page_end);
  871. /* commit new bitmap */
  872. bitmap_copy(chunk->populated, populated, pcpu_unit_pages);
  873. clear:
  874. for_each_possible_cpu(cpu)
  875. memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);
  876. return 0;
  877. err_unmap:
  878. pcpu_pre_unmap_flush(chunk, page_start, unmap_end);
  879. pcpu_for_each_unpop_region(chunk, rs, re, page_start, unmap_end)
  880. pcpu_unmap_pages(chunk, pages, populated, rs, re);
  881. pcpu_post_unmap_tlb_flush(chunk, page_start, unmap_end);
  882. err_free:
  883. pcpu_for_each_unpop_region(chunk, rs, re, page_start, free_end)
  884. pcpu_free_pages(chunk, pages, populated, rs, re);
  885. return rc;
  886. }
  887. static void free_pcpu_chunk(struct pcpu_chunk *chunk)
  888. {
  889. if (!chunk)
  890. return;
  891. if (chunk->vms)
  892. pcpu_free_vm_areas(chunk->vms, pcpu_nr_groups);
  893. pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0]));
  894. kfree(chunk);
  895. }
  896. static struct pcpu_chunk *alloc_pcpu_chunk(void)
  897. {
  898. struct pcpu_chunk *chunk;
  899. chunk = kzalloc(pcpu_chunk_struct_size, GFP_KERNEL);
  900. if (!chunk)
  901. return NULL;
  902. chunk->map = pcpu_mem_alloc(PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0]));
  903. chunk->map_alloc = PCPU_DFL_MAP_ALLOC;
  904. chunk->map[chunk->map_used++] = pcpu_unit_size;
  905. chunk->vms = pcpu_get_vm_areas(pcpu_group_offsets, pcpu_group_sizes,
  906. pcpu_nr_groups, pcpu_atom_size,
  907. GFP_KERNEL);
  908. if (!chunk->vms) {
  909. free_pcpu_chunk(chunk);
  910. return NULL;
  911. }
  912. INIT_LIST_HEAD(&chunk->list);
  913. chunk->free_size = pcpu_unit_size;
  914. chunk->contig_hint = pcpu_unit_size;
  915. chunk->base_addr = chunk->vms[0]->addr - pcpu_group_offsets[0];
  916. return chunk;
  917. }
  918. /**
  919. * pcpu_alloc - the percpu allocator
  920. * @size: size of area to allocate in bytes
  921. * @align: alignment of area (max PAGE_SIZE)
  922. * @reserved: allocate from the reserved chunk if available
  923. *
  924. * Allocate percpu area of @size bytes aligned at @align.
  925. *
  926. * CONTEXT:
  927. * Does GFP_KERNEL allocation.
  928. *
  929. * RETURNS:
  930. * Percpu pointer to the allocated area on success, NULL on failure.
  931. */
  932. static void *pcpu_alloc(size_t size, size_t align, bool reserved)
  933. {
  934. static int warn_limit = 10;
  935. struct pcpu_chunk *chunk;
  936. const char *err;
  937. int slot, off;
  938. unsigned long flags;
  939. if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
  940. WARN(true, "illegal size (%zu) or align (%zu) for "
  941. "percpu allocation\n", size, align);
  942. return NULL;
  943. }
  944. mutex_lock(&pcpu_alloc_mutex);
  945. spin_lock_irqsave(&pcpu_lock, flags);
  946. /* serve reserved allocations from the reserved chunk if available */
  947. if (reserved && pcpu_reserved_chunk) {
  948. chunk = pcpu_reserved_chunk;
  949. if (size > chunk->contig_hint ||
  950. pcpu_extend_area_map(chunk, &flags) < 0) {
  951. err = "failed to extend area map of reserved chunk";
  952. goto fail_unlock;
  953. }
  954. off = pcpu_alloc_area(chunk, size, align);
  955. if (off >= 0)
  956. goto area_found;
  957. err = "alloc from reserved chunk failed";
  958. goto fail_unlock;
  959. }
  960. restart:
  961. /* search through normal chunks */
  962. for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) {
  963. list_for_each_entry(chunk, &pcpu_slot[slot], list) {
  964. if (size > chunk->contig_hint)
  965. continue;
  966. switch (pcpu_extend_area_map(chunk, &flags)) {
  967. case 0:
  968. break;
  969. case 1:
  970. goto restart; /* pcpu_lock dropped, restart */
  971. default:
  972. err = "failed to extend area map";
  973. goto fail_unlock;
  974. }
  975. off = pcpu_alloc_area(chunk, size, align);
  976. if (off >= 0)
  977. goto area_found;
  978. }
  979. }
  980. /* hmmm... no space left, create a new chunk */
  981. spin_unlock_irqrestore(&pcpu_lock, flags);
  982. chunk = alloc_pcpu_chunk();
  983. if (!chunk) {
  984. err = "failed to allocate new chunk";
  985. goto fail_unlock_mutex;
  986. }
  987. spin_lock_irqsave(&pcpu_lock, flags);
  988. pcpu_chunk_relocate(chunk, -1);
  989. goto restart;
  990. area_found:
  991. spin_unlock_irqrestore(&pcpu_lock, flags);
  992. /* populate, map and clear the area */
  993. if (pcpu_populate_chunk(chunk, off, size)) {
  994. spin_lock_irqsave(&pcpu_lock, flags);
  995. pcpu_free_area(chunk, off);
  996. err = "failed to populate";
  997. goto fail_unlock;
  998. }
  999. mutex_unlock(&pcpu_alloc_mutex);
  1000. /* return address relative to base address */
  1001. return __addr_to_pcpu_ptr(chunk->base_addr + off);
  1002. fail_unlock:
  1003. spin_unlock_irqrestore(&pcpu_lock, flags);
  1004. fail_unlock_mutex:
  1005. mutex_unlock(&pcpu_alloc_mutex);
  1006. if (warn_limit) {
  1007. pr_warning("PERCPU: allocation failed, size=%zu align=%zu, "
  1008. "%s\n", size, align, err);
  1009. dump_stack();
  1010. if (!--warn_limit)
  1011. pr_info("PERCPU: limit reached, disable warning\n");
  1012. }
  1013. return NULL;
  1014. }
  1015. /**
  1016. * __alloc_percpu - allocate dynamic percpu area
  1017. * @size: size of area to allocate in bytes
  1018. * @align: alignment of area (max PAGE_SIZE)
  1019. *
  1020. * Allocate percpu area of @size bytes aligned at @align. Might
  1021. * sleep. Might trigger writeouts.
  1022. *
  1023. * CONTEXT:
  1024. * Does GFP_KERNEL allocation.
  1025. *
  1026. * RETURNS:
  1027. * Percpu pointer to the allocated area on success, NULL on failure.
  1028. */
  1029. void *__alloc_percpu(size_t size, size_t align)
  1030. {
  1031. return pcpu_alloc(size, align, false);
  1032. }
  1033. EXPORT_SYMBOL_GPL(__alloc_percpu);
  1034. /**
  1035. * __alloc_reserved_percpu - allocate reserved percpu area
  1036. * @size: size of area to allocate in bytes
  1037. * @align: alignment of area (max PAGE_SIZE)
  1038. *
  1039. * Allocate percpu area of @size bytes aligned at @align from reserved
  1040. * percpu area if arch has set it up; otherwise, allocation is served
  1041. * from the same dynamic area. Might sleep. Might trigger writeouts.
  1042. *
  1043. * CONTEXT:
  1044. * Does GFP_KERNEL allocation.
  1045. *
  1046. * RETURNS:
  1047. * Percpu pointer to the allocated area on success, NULL on failure.
  1048. */
  1049. void *__alloc_reserved_percpu(size_t size, size_t align)
  1050. {
  1051. return pcpu_alloc(size, align, true);
  1052. }
  1053. /**
  1054. * pcpu_reclaim - reclaim fully free chunks, workqueue function
  1055. * @work: unused
  1056. *
  1057. * Reclaim all fully free chunks except for the first one.
  1058. *
  1059. * CONTEXT:
  1060. * workqueue context.
  1061. */
  1062. static void pcpu_reclaim(struct work_struct *work)
  1063. {
  1064. LIST_HEAD(todo);
  1065. struct list_head *head = &pcpu_slot[pcpu_nr_slots - 1];
  1066. struct pcpu_chunk *chunk, *next;
  1067. mutex_lock(&pcpu_alloc_mutex);
  1068. spin_lock_irq(&pcpu_lock);
  1069. list_for_each_entry_safe(chunk, next, head, list) {
  1070. WARN_ON(chunk->immutable);
  1071. /* spare the first one */
  1072. if (chunk == list_first_entry(head, struct pcpu_chunk, list))
  1073. continue;
  1074. list_move(&chunk->list, &todo);
  1075. }
  1076. spin_unlock_irq(&pcpu_lock);
  1077. list_for_each_entry_safe(chunk, next, &todo, list) {
  1078. pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size);
  1079. free_pcpu_chunk(chunk);
  1080. }
  1081. mutex_unlock(&pcpu_alloc_mutex);
  1082. }
  1083. /**
  1084. * free_percpu - free percpu area
  1085. * @ptr: pointer to area to free
  1086. *
  1087. * Free percpu area @ptr.
  1088. *
  1089. * CONTEXT:
  1090. * Can be called from atomic context.
  1091. */
  1092. void free_percpu(void *ptr)
  1093. {
  1094. void *addr = __pcpu_ptr_to_addr(ptr);
  1095. struct pcpu_chunk *chunk;
  1096. unsigned long flags;
  1097. int off;
  1098. if (!ptr)
  1099. return;
  1100. spin_lock_irqsave(&pcpu_lock, flags);
  1101. chunk = pcpu_chunk_addr_search(addr);
  1102. off = addr - chunk->base_addr;
  1103. pcpu_free_area(chunk, off);
  1104. /* if there are more than one fully free chunks, wake up grim reaper */
  1105. if (chunk->free_size == pcpu_unit_size) {
  1106. struct pcpu_chunk *pos;
  1107. list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
  1108. if (pos != chunk) {
  1109. schedule_work(&pcpu_reclaim_work);
  1110. break;
  1111. }
  1112. }
  1113. spin_unlock_irqrestore(&pcpu_lock, flags);
  1114. }
  1115. EXPORT_SYMBOL_GPL(free_percpu);
  1116. static inline size_t pcpu_calc_fc_sizes(size_t static_size,
  1117. size_t reserved_size,
  1118. ssize_t *dyn_sizep)
  1119. {
  1120. size_t size_sum;
  1121. size_sum = PFN_ALIGN(static_size + reserved_size +
  1122. (*dyn_sizep >= 0 ? *dyn_sizep : 0));
  1123. if (*dyn_sizep != 0)
  1124. *dyn_sizep = size_sum - static_size - reserved_size;
  1125. return size_sum;
  1126. }
  1127. /**
  1128. * pcpu_alloc_alloc_info - allocate percpu allocation info
  1129. * @nr_groups: the number of groups
  1130. * @nr_units: the number of units
  1131. *
  1132. * Allocate ai which is large enough for @nr_groups groups containing
  1133. * @nr_units units. The returned ai's groups[0].cpu_map points to the
  1134. * cpu_map array which is long enough for @nr_units and filled with
  1135. * NR_CPUS. It's the caller's responsibility to initialize cpu_map
  1136. * pointer of other groups.
  1137. *
  1138. * RETURNS:
  1139. * Pointer to the allocated pcpu_alloc_info on success, NULL on
  1140. * failure.
  1141. */
  1142. struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
  1143. int nr_units)
  1144. {
  1145. struct pcpu_alloc_info *ai;
  1146. size_t base_size, ai_size;
  1147. void *ptr;
  1148. int unit;
  1149. base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]),
  1150. __alignof__(ai->groups[0].cpu_map[0]));
  1151. ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
  1152. ptr = alloc_bootmem_nopanic(PFN_ALIGN(ai_size));
  1153. if (!ptr)
  1154. return NULL;
  1155. ai = ptr;
  1156. ptr += base_size;
  1157. ai->groups[0].cpu_map = ptr;
  1158. for (unit = 0; unit < nr_units; unit++)
  1159. ai->groups[0].cpu_map[unit] = NR_CPUS;
  1160. ai->nr_groups = nr_groups;
  1161. ai->__ai_size = PFN_ALIGN(ai_size);
  1162. return ai;
  1163. }
  1164. /**
  1165. * pcpu_free_alloc_info - free percpu allocation info
  1166. * @ai: pcpu_alloc_info to free
  1167. *
  1168. * Free @ai which was allocated by pcpu_alloc_alloc_info().
  1169. */
  1170. void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
  1171. {
  1172. free_bootmem(__pa(ai), ai->__ai_size);
  1173. }
  1174. /**
  1175. * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
  1176. * @reserved_size: the size of reserved percpu area in bytes
  1177. * @dyn_size: free size for dynamic allocation in bytes, -1 for auto
  1178. * @atom_size: allocation atom size
  1179. * @cpu_distance_fn: callback to determine distance between cpus, optional
  1180. *
  1181. * This function determines grouping of units, their mappings to cpus
  1182. * and other parameters considering needed percpu size, allocation
  1183. * atom size and distances between CPUs.
  1184. *
  1185. * Groups are always mutliples of atom size and CPUs which are of
  1186. * LOCAL_DISTANCE both ways are grouped together and share space for
  1187. * units in the same group. The returned configuration is guaranteed
  1188. * to have CPUs on different nodes on different groups and >=75% usage
  1189. * of allocated virtual address space.
  1190. *
  1191. * RETURNS:
  1192. * On success, pointer to the new allocation_info is returned. On
  1193. * failure, ERR_PTR value is returned.
  1194. */
  1195. struct pcpu_alloc_info * __init pcpu_build_alloc_info(
  1196. size_t reserved_size, ssize_t dyn_size,
  1197. size_t atom_size,
  1198. pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
  1199. {
  1200. static int group_map[NR_CPUS] __initdata;
  1201. static int group_cnt[NR_CPUS] __initdata;
  1202. const size_t static_size = __per_cpu_end - __per_cpu_start;
  1203. int group_cnt_max = 0, nr_groups = 1, nr_units = 0;
  1204. size_t size_sum, min_unit_size, alloc_size;
  1205. int upa, max_upa, uninitialized_var(best_upa); /* units_per_alloc */
  1206. int last_allocs, group, unit;
  1207. unsigned int cpu, tcpu;
  1208. struct pcpu_alloc_info *ai;
  1209. unsigned int *cpu_map;
  1210. /* this function may be called multiple times */
  1211. memset(group_map, 0, sizeof(group_map));
  1212. memset(group_cnt, 0, sizeof(group_map));
  1213. /*
  1214. * Determine min_unit_size, alloc_size and max_upa such that
  1215. * alloc_size is multiple of atom_size and is the smallest
  1216. * which can accomodate 4k aligned segments which are equal to
  1217. * or larger than min_unit_size.
  1218. */
  1219. size_sum = pcpu_calc_fc_sizes(static_size, reserved_size, &dyn_size);
  1220. min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
  1221. alloc_size = roundup(min_unit_size, atom_size);
  1222. upa = alloc_size / min_unit_size;
  1223. while (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
  1224. upa--;
  1225. max_upa = upa;
  1226. /* group cpus according to their proximity */
  1227. for_each_possible_cpu(cpu) {
  1228. group = 0;
  1229. next_group:
  1230. for_each_possible_cpu(tcpu) {
  1231. if (cpu == tcpu)
  1232. break;
  1233. if (group_map[tcpu] == group && cpu_distance_fn &&
  1234. (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE ||
  1235. cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) {
  1236. group++;
  1237. nr_groups = max(nr_groups, group + 1);
  1238. goto next_group;
  1239. }
  1240. }
  1241. group_map[cpu] = group;
  1242. group_cnt[group]++;
  1243. group_cnt_max = max(group_cnt_max, group_cnt[group]);
  1244. }
  1245. /*
  1246. * Expand unit size until address space usage goes over 75%
  1247. * and then as much as possible without using more address
  1248. * space.
  1249. */
  1250. last_allocs = INT_MAX;
  1251. for (upa = max_upa; upa; upa--) {
  1252. int allocs = 0, wasted = 0;
  1253. if (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
  1254. continue;
  1255. for (group = 0; group < nr_groups; group++) {
  1256. int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
  1257. allocs += this_allocs;
  1258. wasted += this_allocs * upa - group_cnt[group];
  1259. }
  1260. /*
  1261. * Don't accept if wastage is over 25%. The
  1262. * greater-than comparison ensures upa==1 always
  1263. * passes the following check.
  1264. */
  1265. if (wasted > num_possible_cpus() / 3)
  1266. continue;
  1267. /* and then don't consume more memory */
  1268. if (allocs > last_allocs)
  1269. break;
  1270. last_allocs = allocs;
  1271. best_upa = upa;
  1272. }
  1273. upa = best_upa;
  1274. /* allocate and fill alloc_info */
  1275. for (group = 0; group < nr_groups; group++)
  1276. nr_units += roundup(group_cnt[group], upa);
  1277. ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
  1278. if (!ai)
  1279. return ERR_PTR(-ENOMEM);
  1280. cpu_map = ai->groups[0].cpu_map;
  1281. for (group = 0; group < nr_groups; group++) {
  1282. ai->groups[group].cpu_map = cpu_map;
  1283. cpu_map += roundup(group_cnt[group], upa);
  1284. }
  1285. ai->static_size = static_size;
  1286. ai->reserved_size = reserved_size;
  1287. ai->dyn_size = dyn_size;
  1288. ai->unit_size = alloc_size / upa;
  1289. ai->atom_size = atom_size;
  1290. ai->alloc_size = alloc_size;
  1291. for (group = 0, unit = 0; group_cnt[group]; group++) {
  1292. struct pcpu_group_info *gi = &ai->groups[group];
  1293. /*
  1294. * Initialize base_offset as if all groups are located
  1295. * back-to-back. The caller should update this to
  1296. * reflect actual allocation.
  1297. */
  1298. gi->base_offset = unit * ai->unit_size;
  1299. for_each_possible_cpu(cpu)
  1300. if (group_map[cpu] == group)
  1301. gi->cpu_map[gi->nr_units++] = cpu;
  1302. gi->nr_units = roundup(gi->nr_units, upa);
  1303. unit += gi->nr_units;
  1304. }
  1305. BUG_ON(unit != nr_units);
  1306. return ai;
  1307. }
  1308. /**
  1309. * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
  1310. * @lvl: loglevel
  1311. * @ai: allocation info to dump
  1312. *
  1313. * Print out information about @ai using loglevel @lvl.
  1314. */
  1315. static void pcpu_dump_alloc_info(const char *lvl,
  1316. const struct pcpu_alloc_info *ai)
  1317. {
  1318. int group_width = 1, cpu_width = 1, width;
  1319. char empty_str[] = "--------";
  1320. int alloc = 0, alloc_end = 0;
  1321. int group, v;
  1322. int upa, apl; /* units per alloc, allocs per line */
  1323. v = ai->nr_groups;
  1324. while (v /= 10)
  1325. group_width++;
  1326. v = num_possible_cpus();
  1327. while (v /= 10)
  1328. cpu_width++;
  1329. empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0';
  1330. upa = ai->alloc_size / ai->unit_size;
  1331. width = upa * (cpu_width + 1) + group_width + 3;
  1332. apl = rounddown_pow_of_two(max(60 / width, 1));
  1333. printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
  1334. lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
  1335. ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
  1336. for (group = 0; group < ai->nr_groups; group++) {
  1337. const struct pcpu_group_info *gi = &ai->groups[group];
  1338. int unit = 0, unit_end = 0;
  1339. BUG_ON(gi->nr_units % upa);
  1340. for (alloc_end += gi->nr_units / upa;
  1341. alloc < alloc_end; alloc++) {
  1342. if (!(alloc % apl)) {
  1343. printk("\n");
  1344. printk("%spcpu-alloc: ", lvl);
  1345. }
  1346. printk("[%0*d] ", group_width, group);
  1347. for (unit_end += upa; unit < unit_end; unit++)
  1348. if (gi->cpu_map[unit] != NR_CPUS)
  1349. printk("%0*d ", cpu_width,
  1350. gi->cpu_map[unit]);
  1351. else
  1352. printk("%s ", empty_str);
  1353. }
  1354. }
  1355. printk("\n");
  1356. }
  1357. /**
  1358. * pcpu_setup_first_chunk - initialize the first percpu chunk
  1359. * @ai: pcpu_alloc_info describing how to percpu area is shaped
  1360. * @base_addr: mapped address
  1361. *
  1362. * Initialize the first percpu chunk which contains the kernel static
  1363. * perpcu area. This function is to be called from arch percpu area
  1364. * setup path.
  1365. *
  1366. * @ai contains all information necessary to initialize the first
  1367. * chunk and prime the dynamic percpu allocator.
  1368. *
  1369. * @ai->static_size is the size of static percpu area.
  1370. *
  1371. * @ai->reserved_size, if non-zero, specifies the amount of bytes to
  1372. * reserve after the static area in the first chunk. This reserves
  1373. * the first chunk such that it's available only through reserved
  1374. * percpu allocation. This is primarily used to serve module percpu
  1375. * static areas on architectures where the addressing model has
  1376. * limited offset range for symbol relocations to guarantee module
  1377. * percpu symbols fall inside the relocatable range.
  1378. *
  1379. * @ai->dyn_size determines the number of bytes available for dynamic
  1380. * allocation in the first chunk. The area between @ai->static_size +
  1381. * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
  1382. *
  1383. * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
  1384. * and equal to or larger than @ai->static_size + @ai->reserved_size +
  1385. * @ai->dyn_size.
  1386. *
  1387. * @ai->atom_size is the allocation atom size and used as alignment
  1388. * for vm areas.
  1389. *
  1390. * @ai->alloc_size is the allocation size and always multiple of
  1391. * @ai->atom_size. This is larger than @ai->atom_size if
  1392. * @ai->unit_size is larger than @ai->atom_size.
  1393. *
  1394. * @ai->nr_groups and @ai->groups describe virtual memory layout of
  1395. * percpu areas. Units which should be colocated are put into the
  1396. * same group. Dynamic VM areas will be allocated according to these
  1397. * groupings. If @ai->nr_groups is zero, a single group containing
  1398. * all units is assumed.
  1399. *
  1400. * The caller should have mapped the first chunk at @base_addr and
  1401. * copied static data to each unit.
  1402. *
  1403. * If the first chunk ends up with both reserved and dynamic areas, it
  1404. * is served by two chunks - one to serve the core static and reserved
  1405. * areas and the other for the dynamic area. They share the same vm
  1406. * and page map but uses different area allocation map to stay away
  1407. * from each other. The latter chunk is circulated in the chunk slots
  1408. * and available for dynamic allocation like any other chunks.
  1409. *
  1410. * RETURNS:
  1411. * 0 on success, -errno on failure.
  1412. */
  1413. int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
  1414. void *base_addr)
  1415. {
  1416. static char cpus_buf[4096] __initdata;
  1417. static int smap[2], dmap[2];
  1418. size_t dyn_size = ai->dyn_size;
  1419. size_t size_sum = ai->static_size + ai->reserved_size + dyn_size;
  1420. struct pcpu_chunk *schunk, *dchunk = NULL;
  1421. unsigned long *group_offsets;
  1422. size_t *group_sizes;
  1423. unsigned long *unit_off;
  1424. unsigned int cpu;
  1425. int *unit_map;
  1426. int group, unit, i;
  1427. cpumask_scnprintf(cpus_buf, sizeof(cpus_buf), cpu_possible_mask);
  1428. #define PCPU_SETUP_BUG_ON(cond) do { \
  1429. if (unlikely(cond)) { \
  1430. pr_emerg("PERCPU: failed to initialize, %s", #cond); \
  1431. pr_emerg("PERCPU: cpu_possible_mask=%s\n", cpus_buf); \
  1432. pcpu_dump_alloc_info(KERN_EMERG, ai); \
  1433. BUG(); \
  1434. } \
  1435. } while (0)
  1436. /* sanity checks */
  1437. BUILD_BUG_ON(ARRAY_SIZE(smap) >= PCPU_DFL_MAP_ALLOC ||
  1438. ARRAY_SIZE(dmap) >= PCPU_DFL_MAP_ALLOC);
  1439. PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
  1440. PCPU_SETUP_BUG_ON(!ai->static_size);
  1441. PCPU_SETUP_BUG_ON(!base_addr);
  1442. PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
  1443. PCPU_SETUP_BUG_ON(ai->unit_size & ~PAGE_MASK);
  1444. PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
  1445. /* process group information and build config tables accordingly */
  1446. group_offsets = alloc_bootmem(ai->nr_groups * sizeof(group_offsets[0]));
  1447. group_sizes = alloc_bootmem(ai->nr_groups * sizeof(group_sizes[0]));
  1448. unit_map = alloc_bootmem(nr_cpu_ids * sizeof(unit_map[0]));
  1449. unit_off = alloc_bootmem(nr_cpu_ids * sizeof(unit_off[0]));
  1450. for (cpu = 0; cpu < nr_cpu_ids; cpu++)
  1451. unit_map[cpu] = UINT_MAX;
  1452. pcpu_first_unit_cpu = NR_CPUS;
  1453. for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
  1454. const struct pcpu_group_info *gi = &ai->groups[group];
  1455. group_offsets[group] = gi->base_offset;
  1456. group_sizes[group] = gi->nr_units * ai->unit_size;
  1457. for (i = 0; i < gi->nr_units; i++) {
  1458. cpu = gi->cpu_map[i];
  1459. if (cpu == NR_CPUS)
  1460. continue;
  1461. PCPU_SETUP_BUG_ON(cpu > nr_cpu_ids);
  1462. PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
  1463. PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
  1464. unit_map[cpu] = unit + i;
  1465. unit_off[cpu] = gi->base_offset + i * ai->unit_size;
  1466. if (pcpu_first_unit_cpu == NR_CPUS)
  1467. pcpu_first_unit_cpu = cpu;
  1468. }
  1469. }
  1470. pcpu_last_unit_cpu = cpu;
  1471. pcpu_nr_units = unit;
  1472. for_each_possible_cpu(cpu)
  1473. PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);
  1474. /* we're done parsing the input, undefine BUG macro and dump config */
  1475. #undef PCPU_SETUP_BUG_ON
  1476. pcpu_dump_alloc_info(KERN_INFO, ai);
  1477. pcpu_nr_groups = ai->nr_groups;
  1478. pcpu_group_offsets = group_offsets;
  1479. pcpu_group_sizes = group_sizes;
  1480. pcpu_unit_map = unit_map;
  1481. pcpu_unit_offsets = unit_off;
  1482. /* determine basic parameters */
  1483. pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
  1484. pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
  1485. pcpu_atom_size = ai->atom_size;
  1486. pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) +
  1487. BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long);
  1488. /*
  1489. * Allocate chunk slots. The additional last slot is for
  1490. * empty chunks.
  1491. */
  1492. pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
  1493. pcpu_slot = alloc_bootmem(pcpu_nr_slots * sizeof(pcpu_slot[0]));
  1494. for (i = 0; i < pcpu_nr_slots; i++)
  1495. INIT_LIST_HEAD(&pcpu_slot[i]);
  1496. /*
  1497. * Initialize static chunk. If reserved_size is zero, the
  1498. * static chunk covers static area + dynamic allocation area
  1499. * in the first chunk. If reserved_size is not zero, it
  1500. * covers static area + reserved area (mostly used for module
  1501. * static percpu allocation).
  1502. */
  1503. schunk = alloc_bootmem(pcpu_chunk_struct_size);
  1504. INIT_LIST_HEAD(&schunk->list);
  1505. schunk->base_addr = base_addr;
  1506. schunk->map = smap;
  1507. schunk->map_alloc = ARRAY_SIZE(smap);
  1508. schunk->immutable = true;
  1509. bitmap_fill(schunk->populated, pcpu_unit_pages);
  1510. if (ai->reserved_size) {
  1511. schunk->free_size = ai->reserved_size;
  1512. pcpu_reserved_chunk = schunk;
  1513. pcpu_reserved_chunk_limit = ai->static_size + ai->reserved_size;
  1514. } else {
  1515. schunk->free_size = dyn_size;
  1516. dyn_size = 0; /* dynamic area covered */
  1517. }
  1518. schunk->contig_hint = schunk->free_size;
  1519. schunk->map[schunk->map_used++] = -ai->static_size;
  1520. if (schunk->free_size)
  1521. schunk->map[schunk->map_used++] = schunk->free_size;
  1522. /* init dynamic chunk if necessary */
  1523. if (dyn_size) {
  1524. dchunk = alloc_bootmem(pcpu_chunk_struct_size);
  1525. INIT_LIST_HEAD(&dchunk->list);
  1526. dchunk->base_addr = base_addr;
  1527. dchunk->map = dmap;
  1528. dchunk->map_alloc = ARRAY_SIZE(dmap);
  1529. dchunk->immutable = true;
  1530. bitmap_fill(dchunk->populated, pcpu_unit_pages);
  1531. dchunk->contig_hint = dchunk->free_size = dyn_size;
  1532. dchunk->map[dchunk->map_used++] = -pcpu_reserved_chunk_limit;
  1533. dchunk->map[dchunk->map_used++] = dchunk->free_size;
  1534. }
  1535. /* link the first chunk in */
  1536. pcpu_first_chunk = dchunk ?: schunk;
  1537. pcpu_chunk_relocate(pcpu_first_chunk, -1);
  1538. /* we're done */
  1539. pcpu_base_addr = base_addr;
  1540. return 0;
  1541. }
  1542. const char *pcpu_fc_names[PCPU_FC_NR] __initdata = {
  1543. [PCPU_FC_AUTO] = "auto",
  1544. [PCPU_FC_EMBED] = "embed",
  1545. [PCPU_FC_PAGE] = "page",
  1546. };
  1547. enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO;
  1548. static int __init percpu_alloc_setup(char *str)
  1549. {
  1550. if (0)
  1551. /* nada */;
  1552. #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
  1553. else if (!strcmp(str, "embed"))
  1554. pcpu_chosen_fc = PCPU_FC_EMBED;
  1555. #endif
  1556. #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
  1557. else if (!strcmp(str, "page"))
  1558. pcpu_chosen_fc = PCPU_FC_PAGE;
  1559. #endif
  1560. else
  1561. pr_warning("PERCPU: unknown allocator %s specified\n", str);
  1562. return 0;
  1563. }
  1564. early_param("percpu_alloc", percpu_alloc_setup);
  1565. #if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
  1566. !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
  1567. /**
  1568. * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
  1569. * @reserved_size: the size of reserved percpu area in bytes
  1570. * @dyn_size: free size for dynamic allocation in bytes, -1 for auto
  1571. * @atom_size: allocation atom size
  1572. * @cpu_distance_fn: callback to determine distance between cpus, optional
  1573. * @alloc_fn: function to allocate percpu page
  1574. * @free_fn: funtion to free percpu page
  1575. *
  1576. * This is a helper to ease setting up embedded first percpu chunk and
  1577. * can be called where pcpu_setup_first_chunk() is expected.
  1578. *
  1579. * If this function is used to setup the first chunk, it is allocated
  1580. * by calling @alloc_fn and used as-is without being mapped into
  1581. * vmalloc area. Allocations are always whole multiples of @atom_size
  1582. * aligned to @atom_size.
  1583. *
  1584. * This enables the first chunk to piggy back on the linear physical
  1585. * mapping which often uses larger page size. Please note that this
  1586. * can result in very sparse cpu->unit mapping on NUMA machines thus
  1587. * requiring large vmalloc address space. Don't use this allocator if
  1588. * vmalloc space is not orders of magnitude larger than distances
  1589. * between node memory addresses (ie. 32bit NUMA machines).
  1590. *
  1591. * When @dyn_size is positive, dynamic area might be larger than
  1592. * specified to fill page alignment. When @dyn_size is auto,
  1593. * @dyn_size is just big enough to fill page alignment after static
  1594. * and reserved areas.
  1595. *
  1596. * If the needed size is smaller than the minimum or specified unit
  1597. * size, the leftover is returned using @free_fn.
  1598. *
  1599. * RETURNS:
  1600. * 0 on success, -errno on failure.
  1601. */
  1602. int __init pcpu_embed_first_chunk(size_t reserved_size, ssize_t dyn_size,
  1603. size_t atom_size,
  1604. pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
  1605. pcpu_fc_alloc_fn_t alloc_fn,
  1606. pcpu_fc_free_fn_t free_fn)
  1607. {
  1608. void *base = (void *)ULONG_MAX;
  1609. void **areas = NULL;
  1610. struct pcpu_alloc_info *ai;
  1611. size_t size_sum, areas_size, max_distance;
  1612. int group, i, rc;
  1613. ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
  1614. cpu_distance_fn);
  1615. if (IS_ERR(ai))
  1616. return PTR_ERR(ai);
  1617. size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
  1618. areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
  1619. areas = alloc_bootmem_nopanic(areas_size);
  1620. if (!areas) {
  1621. rc = -ENOMEM;
  1622. goto out_free;
  1623. }
  1624. /* allocate, copy and determine base address */
  1625. for (group = 0; group < ai->nr_groups; group++) {
  1626. struct pcpu_group_info *gi = &ai->groups[group];
  1627. unsigned int cpu = NR_CPUS;
  1628. void *ptr;
  1629. for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++)
  1630. cpu = gi->cpu_map[i];
  1631. BUG_ON(cpu == NR_CPUS);
  1632. /* allocate space for the whole group */
  1633. ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size);
  1634. if (!ptr) {
  1635. rc = -ENOMEM;
  1636. goto out_free_areas;
  1637. }
  1638. areas[group] = ptr;
  1639. base = min(ptr, base);
  1640. for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
  1641. if (gi->cpu_map[i] == NR_CPUS) {
  1642. /* unused unit, free whole */
  1643. free_fn(ptr, ai->unit_size);
  1644. continue;
  1645. }
  1646. /* copy and return the unused part */
  1647. memcpy(ptr, __per_cpu_load, ai->static_size);
  1648. free_fn(ptr + size_sum, ai->unit_size - size_sum);
  1649. }
  1650. }
  1651. /* base address is now known, determine group base offsets */
  1652. max_distance = 0;
  1653. for (group = 0; group < ai->nr_groups; group++) {
  1654. ai->groups[group].base_offset = areas[group] - base;
  1655. max_distance = max_t(size_t, max_distance,
  1656. ai->groups[group].base_offset);
  1657. }
  1658. max_distance += ai->unit_size;
  1659. /* warn if maximum distance is further than 75% of vmalloc space */
  1660. if (max_distance > (VMALLOC_END - VMALLOC_START) * 3 / 4) {
  1661. pr_warning("PERCPU: max_distance=0x%zx too large for vmalloc "
  1662. "space 0x%lx\n",
  1663. max_distance, VMALLOC_END - VMALLOC_START);
  1664. #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
  1665. /* and fail if we have fallback */
  1666. rc = -EINVAL;
  1667. goto out_free;
  1668. #endif
  1669. }
  1670. pr_info("PERCPU: Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",
  1671. PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size,
  1672. ai->dyn_size, ai->unit_size);
  1673. rc = pcpu_setup_first_chunk(ai, base);
  1674. goto out_free;
  1675. out_free_areas:
  1676. for (group = 0; group < ai->nr_groups; group++)
  1677. free_fn(areas[group],
  1678. ai->groups[group].nr_units * ai->unit_size);
  1679. out_free:
  1680. pcpu_free_alloc_info(ai);
  1681. if (areas)
  1682. free_bootmem(__pa(areas), areas_size);
  1683. return rc;
  1684. }
  1685. #endif /* CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK ||
  1686. !CONFIG_HAVE_SETUP_PER_CPU_AREA */
  1687. #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
  1688. /**
  1689. * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
  1690. * @reserved_size: the size of reserved percpu area in bytes
  1691. * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
  1692. * @free_fn: funtion to free percpu page, always called with PAGE_SIZE
  1693. * @populate_pte_fn: function to populate pte
  1694. *
  1695. * This is a helper to ease setting up page-remapped first percpu
  1696. * chunk and can be called where pcpu_setup_first_chunk() is expected.
  1697. *
  1698. * This is the basic allocator. Static percpu area is allocated
  1699. * page-by-page into vmalloc area.
  1700. *
  1701. * RETURNS:
  1702. * 0 on success, -errno on failure.
  1703. */
  1704. int __init pcpu_page_first_chunk(size_t reserved_size,
  1705. pcpu_fc_alloc_fn_t alloc_fn,
  1706. pcpu_fc_free_fn_t free_fn,
  1707. pcpu_fc_populate_pte_fn_t populate_pte_fn)
  1708. {
  1709. static struct vm_struct vm;
  1710. struct pcpu_alloc_info *ai;
  1711. char psize_str[16];
  1712. int unit_pages;
  1713. size_t pages_size;
  1714. struct page **pages;
  1715. int unit, i, j, rc;
  1716. snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
  1717. ai = pcpu_build_alloc_info(reserved_size, -1, PAGE_SIZE, NULL);
  1718. if (IS_ERR(ai))
  1719. return PTR_ERR(ai);
  1720. BUG_ON(ai->nr_groups != 1);
  1721. BUG_ON(ai->groups[0].nr_units != num_possible_cpus());
  1722. unit_pages = ai->unit_size >> PAGE_SHIFT;
  1723. /* unaligned allocations can't be freed, round up to page size */
  1724. pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
  1725. sizeof(pages[0]));
  1726. pages = alloc_bootmem(pages_size);
  1727. /* allocate pages */
  1728. j = 0;
  1729. for (unit = 0; unit < num_possible_cpus(); unit++)
  1730. for (i = 0; i < unit_pages; i++) {
  1731. unsigned int cpu = ai->groups[0].cpu_map[unit];
  1732. void *ptr;
  1733. ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE);
  1734. if (!ptr) {
  1735. pr_warning("PERCPU: failed to allocate %s page "
  1736. "for cpu%u\n", psize_str, cpu);
  1737. goto enomem;
  1738. }
  1739. pages[j++] = virt_to_page(ptr);
  1740. }
  1741. /* allocate vm area, map the pages and copy static data */
  1742. vm.flags = VM_ALLOC;
  1743. vm.size = num_possible_cpus() * ai->unit_size;
  1744. vm_area_register_early(&vm, PAGE_SIZE);
  1745. for (unit = 0; unit < num_possible_cpus(); unit++) {
  1746. unsigned long unit_addr =
  1747. (unsigned long)vm.addr + unit * ai->unit_size;
  1748. for (i = 0; i < unit_pages; i++)
  1749. populate_pte_fn(unit_addr + (i << PAGE_SHIFT));
  1750. /* pte already populated, the following shouldn't fail */
  1751. rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
  1752. unit_pages);
  1753. if (rc < 0)
  1754. panic("failed to map percpu area, err=%d\n", rc);
  1755. /*
  1756. * FIXME: Archs with virtual cache should flush local
  1757. * cache for the linear mapping here - something
  1758. * equivalent to flush_cache_vmap() on the local cpu.
  1759. * flush_cache_vmap() can't be used as most supporting
  1760. * data structures are not set up yet.
  1761. */
  1762. /* copy static data */
  1763. memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
  1764. }
  1765. /* we're ready, commit */
  1766. pr_info("PERCPU: %d %s pages/cpu @%p s%zu r%zu d%zu\n",
  1767. unit_pages, psize_str, vm.addr, ai->static_size,
  1768. ai->reserved_size, ai->dyn_size);
  1769. rc = pcpu_setup_first_chunk(ai, vm.addr);
  1770. goto out_free_ar;
  1771. enomem:
  1772. while (--j >= 0)
  1773. free_fn(page_address(pages[j]), PAGE_SIZE);
  1774. rc = -ENOMEM;
  1775. out_free_ar:
  1776. free_bootmem(__pa(pages), pages_size);
  1777. pcpu_free_alloc_info(ai);
  1778. return rc;
  1779. }
  1780. #endif /* CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK */
  1781. /*
  1782. * Generic percpu area setup.
  1783. *
  1784. * The embedding helper is used because its behavior closely resembles
  1785. * the original non-dynamic generic percpu area setup. This is
  1786. * important because many archs have addressing restrictions and might
  1787. * fail if the percpu area is located far away from the previous
  1788. * location. As an added bonus, in non-NUMA cases, embedding is
  1789. * generally a good idea TLB-wise because percpu area can piggy back
  1790. * on the physical linear memory mapping which uses large page
  1791. * mappings on applicable archs.
  1792. */
  1793. #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
  1794. unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
  1795. EXPORT_SYMBOL(__per_cpu_offset);
  1796. static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
  1797. size_t align)
  1798. {
  1799. return __alloc_bootmem_nopanic(size, align, __pa(MAX_DMA_ADDRESS));
  1800. }
  1801. static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
  1802. {
  1803. free_bootmem(__pa(ptr), size);
  1804. }
  1805. void __init setup_per_cpu_areas(void)
  1806. {
  1807. unsigned long delta;
  1808. unsigned int cpu;
  1809. int rc;
  1810. /*
  1811. * Always reserve area for module percpu variables. That's
  1812. * what the legacy allocator did.
  1813. */
  1814. rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
  1815. PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL,
  1816. pcpu_dfl_fc_alloc, pcpu_dfl_fc_free);
  1817. if (rc < 0)
  1818. panic("Failed to initialized percpu areas.");
  1819. delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
  1820. for_each_possible_cpu(cpu)
  1821. __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
  1822. }
  1823. #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */