genalloc.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567
  1. /*
  2. * Basic general purpose allocator for managing special purpose
  3. * memory, for example, memory that is not managed by the regular
  4. * kmalloc/kfree interface. Uses for this includes on-device special
  5. * memory, uncached memory etc.
  6. *
  7. * It is safe to use the allocator in NMI handlers and other special
  8. * unblockable contexts that could otherwise deadlock on locks. This
  9. * is implemented by using atomic operations and retries on any
  10. * conflicts. The disadvantage is that there may be livelocks in
  11. * extreme cases. For better scalability, one allocator can be used
  12. * for each CPU.
  13. *
  14. * The lockless operation only works if there is enough memory
  15. * available. If new memory is added to the pool a lock has to be
  16. * still taken. So any user relying on locklessness has to ensure
  17. * that sufficient memory is preallocated.
  18. *
  19. * The basic atomic operation of this allocator is cmpxchg on long.
  20. * On architectures that don't have NMI-safe cmpxchg implementation,
  21. * the allocator can NOT be used in NMI handler. So code uses the
  22. * allocator in NMI handler should depend on
  23. * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
  24. *
  25. * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org>
  26. *
  27. * This source code is licensed under the GNU General Public License,
  28. * Version 2. See the file COPYING for more details.
  29. */
  30. #include <linux/slab.h>
  31. #include <linux/export.h>
  32. #include <linux/bitmap.h>
  33. #include <linux/rculist.h>
  34. #include <linux/interrupt.h>
  35. #include <linux/genalloc.h>
  36. #include <linux/of_address.h>
  37. #include <linux/of_device.h>
  38. static inline size_t chunk_size(const struct gen_pool_chunk *chunk)
  39. {
  40. return chunk->end_addr - chunk->start_addr + 1;
  41. }
  42. static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set)
  43. {
  44. unsigned long val, nval;
  45. nval = *addr;
  46. do {
  47. val = nval;
  48. if (val & mask_to_set)
  49. return -EBUSY;
  50. cpu_relax();
  51. } while ((nval = cmpxchg(addr, val, val | mask_to_set)) != val);
  52. return 0;
  53. }
  54. static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear)
  55. {
  56. unsigned long val, nval;
  57. nval = *addr;
  58. do {
  59. val = nval;
  60. if ((val & mask_to_clear) != mask_to_clear)
  61. return -EBUSY;
  62. cpu_relax();
  63. } while ((nval = cmpxchg(addr, val, val & ~mask_to_clear)) != val);
  64. return 0;
  65. }
  66. /*
  67. * bitmap_set_ll - set the specified number of bits at the specified position
  68. * @map: pointer to a bitmap
  69. * @start: a bit position in @map
  70. * @nr: number of bits to set
  71. *
  72. * Set @nr bits start from @start in @map lock-lessly. Several users
  73. * can set/clear the same bitmap simultaneously without lock. If two
  74. * users set the same bit, one user will return remain bits, otherwise
  75. * return 0.
  76. */
  77. static int bitmap_set_ll(unsigned long *map, int start, int nr)
  78. {
  79. unsigned long *p = map + BIT_WORD(start);
  80. const int size = start + nr;
  81. int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
  82. unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
  83. while (nr - bits_to_set >= 0) {
  84. if (set_bits_ll(p, mask_to_set))
  85. return nr;
  86. nr -= bits_to_set;
  87. bits_to_set = BITS_PER_LONG;
  88. mask_to_set = ~0UL;
  89. p++;
  90. }
  91. if (nr) {
  92. mask_to_set &= BITMAP_LAST_WORD_MASK(size);
  93. if (set_bits_ll(p, mask_to_set))
  94. return nr;
  95. }
  96. return 0;
  97. }
  98. /*
  99. * bitmap_clear_ll - clear the specified number of bits at the specified position
  100. * @map: pointer to a bitmap
  101. * @start: a bit position in @map
  102. * @nr: number of bits to set
  103. *
  104. * Clear @nr bits start from @start in @map lock-lessly. Several users
  105. * can set/clear the same bitmap simultaneously without lock. If two
  106. * users clear the same bit, one user will return remain bits,
  107. * otherwise return 0.
  108. */
  109. static int bitmap_clear_ll(unsigned long *map, int start, int nr)
  110. {
  111. unsigned long *p = map + BIT_WORD(start);
  112. const int size = start + nr;
  113. int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
  114. unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
  115. while (nr - bits_to_clear >= 0) {
  116. if (clear_bits_ll(p, mask_to_clear))
  117. return nr;
  118. nr -= bits_to_clear;
  119. bits_to_clear = BITS_PER_LONG;
  120. mask_to_clear = ~0UL;
  121. p++;
  122. }
  123. if (nr) {
  124. mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
  125. if (clear_bits_ll(p, mask_to_clear))
  126. return nr;
  127. }
  128. return 0;
  129. }
  130. /**
  131. * gen_pool_create - create a new special memory pool
  132. * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
  133. * @nid: node id of the node the pool structure should be allocated on, or -1
  134. *
  135. * Create a new special memory pool that can be used to manage special purpose
  136. * memory not managed by the regular kmalloc/kfree interface.
  137. */
  138. struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
  139. {
  140. struct gen_pool *pool;
  141. pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid);
  142. if (pool != NULL) {
  143. spin_lock_init(&pool->lock);
  144. INIT_LIST_HEAD(&pool->chunks);
  145. pool->min_alloc_order = min_alloc_order;
  146. pool->algo = gen_pool_first_fit;
  147. pool->data = NULL;
  148. }
  149. return pool;
  150. }
  151. EXPORT_SYMBOL(gen_pool_create);
  152. /**
  153. * gen_pool_add_virt - add a new chunk of special memory to the pool
  154. * @pool: pool to add new memory chunk to
  155. * @virt: virtual starting address of memory chunk to add to pool
  156. * @phys: physical starting address of memory chunk to add to pool
  157. * @size: size in bytes of the memory chunk to add to pool
  158. * @nid: node id of the node the chunk structure and bitmap should be
  159. * allocated on, or -1
  160. *
  161. * Add a new chunk of special memory to the specified pool.
  162. *
  163. * Returns 0 on success or a -ve errno on failure.
  164. */
  165. int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phys,
  166. size_t size, int nid)
  167. {
  168. struct gen_pool_chunk *chunk;
  169. int nbits = size >> pool->min_alloc_order;
  170. int nbytes = sizeof(struct gen_pool_chunk) +
  171. BITS_TO_LONGS(nbits) * sizeof(long);
  172. chunk = kzalloc_node(nbytes, GFP_KERNEL, nid);
  173. if (unlikely(chunk == NULL))
  174. return -ENOMEM;
  175. chunk->phys_addr = phys;
  176. chunk->start_addr = virt;
  177. chunk->end_addr = virt + size - 1;
  178. atomic_set(&chunk->avail, size);
  179. spin_lock(&pool->lock);
  180. list_add_rcu(&chunk->next_chunk, &pool->chunks);
  181. spin_unlock(&pool->lock);
  182. return 0;
  183. }
  184. EXPORT_SYMBOL(gen_pool_add_virt);
  185. /**
  186. * gen_pool_virt_to_phys - return the physical address of memory
  187. * @pool: pool to allocate from
  188. * @addr: starting address of memory
  189. *
  190. * Returns the physical address on success, or -1 on error.
  191. */
  192. phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr)
  193. {
  194. struct gen_pool_chunk *chunk;
  195. phys_addr_t paddr = -1;
  196. rcu_read_lock();
  197. list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
  198. if (addr >= chunk->start_addr && addr <= chunk->end_addr) {
  199. paddr = chunk->phys_addr + (addr - chunk->start_addr);
  200. break;
  201. }
  202. }
  203. rcu_read_unlock();
  204. return paddr;
  205. }
  206. EXPORT_SYMBOL(gen_pool_virt_to_phys);
  207. /**
  208. * gen_pool_destroy - destroy a special memory pool
  209. * @pool: pool to destroy
  210. *
  211. * Destroy the specified special memory pool. Verifies that there are no
  212. * outstanding allocations.
  213. */
  214. void gen_pool_destroy(struct gen_pool *pool)
  215. {
  216. struct list_head *_chunk, *_next_chunk;
  217. struct gen_pool_chunk *chunk;
  218. int order = pool->min_alloc_order;
  219. int bit, end_bit;
  220. list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
  221. chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
  222. list_del(&chunk->next_chunk);
  223. end_bit = chunk_size(chunk) >> order;
  224. bit = find_next_bit(chunk->bits, end_bit, 0);
  225. BUG_ON(bit < end_bit);
  226. kfree(chunk);
  227. }
  228. kfree(pool);
  229. return;
  230. }
  231. EXPORT_SYMBOL(gen_pool_destroy);
  232. /**
  233. * gen_pool_alloc - allocate special memory from the pool
  234. * @pool: pool to allocate from
  235. * @size: number of bytes to allocate from the pool
  236. *
  237. * Allocate the requested number of bytes from the specified pool.
  238. * Uses the pool allocation function (with first-fit algorithm by default).
  239. * Can not be used in NMI handler on architectures without
  240. * NMI-safe cmpxchg implementation.
  241. */
  242. unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
  243. {
  244. struct gen_pool_chunk *chunk;
  245. unsigned long addr = 0;
  246. int order = pool->min_alloc_order;
  247. int nbits, start_bit = 0, end_bit, remain;
  248. #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
  249. BUG_ON(in_nmi());
  250. #endif
  251. if (size == 0)
  252. return 0;
  253. nbits = (size + (1UL << order) - 1) >> order;
  254. rcu_read_lock();
  255. list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
  256. if (size > atomic_read(&chunk->avail))
  257. continue;
  258. end_bit = chunk_size(chunk) >> order;
  259. retry:
  260. start_bit = pool->algo(chunk->bits, end_bit, start_bit, nbits,
  261. pool->data);
  262. if (start_bit >= end_bit)
  263. continue;
  264. remain = bitmap_set_ll(chunk->bits, start_bit, nbits);
  265. if (remain) {
  266. remain = bitmap_clear_ll(chunk->bits, start_bit,
  267. nbits - remain);
  268. BUG_ON(remain);
  269. goto retry;
  270. }
  271. addr = chunk->start_addr + ((unsigned long)start_bit << order);
  272. size = nbits << order;
  273. atomic_sub(size, &chunk->avail);
  274. break;
  275. }
  276. rcu_read_unlock();
  277. return addr;
  278. }
  279. EXPORT_SYMBOL(gen_pool_alloc);
  280. /**
  281. * gen_pool_free - free allocated special memory back to the pool
  282. * @pool: pool to free to
  283. * @addr: starting address of memory to free back to pool
  284. * @size: size in bytes of memory to free
  285. *
  286. * Free previously allocated special memory back to the specified
  287. * pool. Can not be used in NMI handler on architectures without
  288. * NMI-safe cmpxchg implementation.
  289. */
  290. void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
  291. {
  292. struct gen_pool_chunk *chunk;
  293. int order = pool->min_alloc_order;
  294. int start_bit, nbits, remain;
  295. #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
  296. BUG_ON(in_nmi());
  297. #endif
  298. nbits = (size + (1UL << order) - 1) >> order;
  299. rcu_read_lock();
  300. list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
  301. if (addr >= chunk->start_addr && addr <= chunk->end_addr) {
  302. BUG_ON(addr + size - 1 > chunk->end_addr);
  303. start_bit = (addr - chunk->start_addr) >> order;
  304. remain = bitmap_clear_ll(chunk->bits, start_bit, nbits);
  305. BUG_ON(remain);
  306. size = nbits << order;
  307. atomic_add(size, &chunk->avail);
  308. rcu_read_unlock();
  309. return;
  310. }
  311. }
  312. rcu_read_unlock();
  313. BUG();
  314. }
  315. EXPORT_SYMBOL(gen_pool_free);
  316. /**
  317. * gen_pool_for_each_chunk - call func for every chunk of generic memory pool
  318. * @pool: the generic memory pool
  319. * @func: func to call
  320. * @data: additional data used by @func
  321. *
  322. * Call @func for every chunk of generic memory pool. The @func is
  323. * called with rcu_read_lock held.
  324. */
  325. void gen_pool_for_each_chunk(struct gen_pool *pool,
  326. void (*func)(struct gen_pool *pool, struct gen_pool_chunk *chunk, void *data),
  327. void *data)
  328. {
  329. struct gen_pool_chunk *chunk;
  330. rcu_read_lock();
  331. list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk)
  332. func(pool, chunk, data);
  333. rcu_read_unlock();
  334. }
  335. EXPORT_SYMBOL(gen_pool_for_each_chunk);
  336. /**
  337. * gen_pool_avail - get available free space of the pool
  338. * @pool: pool to get available free space
  339. *
  340. * Return available free space of the specified pool.
  341. */
  342. size_t gen_pool_avail(struct gen_pool *pool)
  343. {
  344. struct gen_pool_chunk *chunk;
  345. size_t avail = 0;
  346. rcu_read_lock();
  347. list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
  348. avail += atomic_read(&chunk->avail);
  349. rcu_read_unlock();
  350. return avail;
  351. }
  352. EXPORT_SYMBOL_GPL(gen_pool_avail);
  353. /**
  354. * gen_pool_size - get size in bytes of memory managed by the pool
  355. * @pool: pool to get size
  356. *
  357. * Return size in bytes of memory managed by the pool.
  358. */
  359. size_t gen_pool_size(struct gen_pool *pool)
  360. {
  361. struct gen_pool_chunk *chunk;
  362. size_t size = 0;
  363. rcu_read_lock();
  364. list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
  365. size += chunk_size(chunk);
  366. rcu_read_unlock();
  367. return size;
  368. }
  369. EXPORT_SYMBOL_GPL(gen_pool_size);
  370. /**
  371. * gen_pool_set_algo - set the allocation algorithm
  372. * @pool: pool to change allocation algorithm
  373. * @algo: custom algorithm function
  374. * @data: additional data used by @algo
  375. *
  376. * Call @algo for each memory allocation in the pool.
  377. * If @algo is NULL use gen_pool_first_fit as default
  378. * memory allocation function.
  379. */
  380. void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, void *data)
  381. {
  382. rcu_read_lock();
  383. pool->algo = algo;
  384. if (!pool->algo)
  385. pool->algo = gen_pool_first_fit;
  386. pool->data = data;
  387. rcu_read_unlock();
  388. }
  389. EXPORT_SYMBOL(gen_pool_set_algo);
  390. /**
  391. * gen_pool_first_fit - find the first available region
  392. * of memory matching the size requirement (no alignment constraint)
  393. * @map: The address to base the search on
  394. * @size: The bitmap size in bits
  395. * @start: The bitnumber to start searching at
  396. * @nr: The number of zeroed bits we're looking for
  397. * @data: additional data - unused
  398. */
  399. unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
  400. unsigned long start, unsigned int nr, void *data)
  401. {
  402. return bitmap_find_next_zero_area(map, size, start, nr, 0);
  403. }
  404. EXPORT_SYMBOL(gen_pool_first_fit);
  405. /**
  406. * gen_pool_best_fit - find the best fitting region of memory
  407. * macthing the size requirement (no alignment constraint)
  408. * @map: The address to base the search on
  409. * @size: The bitmap size in bits
  410. * @start: The bitnumber to start searching at
  411. * @nr: The number of zeroed bits we're looking for
  412. * @data: additional data - unused
  413. *
  414. * Iterate over the bitmap to find the smallest free region
  415. * which we can allocate the memory.
  416. */
  417. unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
  418. unsigned long start, unsigned int nr, void *data)
  419. {
  420. unsigned long start_bit = size;
  421. unsigned long len = size + 1;
  422. unsigned long index;
  423. index = bitmap_find_next_zero_area(map, size, start, nr, 0);
  424. while (index < size) {
  425. int next_bit = find_next_bit(map, size, index + nr);
  426. if ((next_bit - index) < len) {
  427. len = next_bit - index;
  428. start_bit = index;
  429. if (len == nr)
  430. return start_bit;
  431. }
  432. index = bitmap_find_next_zero_area(map, size,
  433. next_bit + 1, nr, 0);
  434. }
  435. return start_bit;
  436. }
  437. EXPORT_SYMBOL(gen_pool_best_fit);
  438. static void devm_gen_pool_release(struct device *dev, void *res)
  439. {
  440. gen_pool_destroy(*(struct gen_pool **)res);
  441. }
  442. /**
  443. * devm_gen_pool_create - managed gen_pool_create
  444. * @dev: device that provides the gen_pool
  445. * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
  446. * @nid: node id of the node the pool structure should be allocated on, or -1
  447. *
  448. * Create a new special memory pool that can be used to manage special purpose
  449. * memory not managed by the regular kmalloc/kfree interface. The pool will be
  450. * automatically destroyed by the device management code.
  451. */
  452. struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order,
  453. int nid)
  454. {
  455. struct gen_pool **ptr, *pool;
  456. ptr = devres_alloc(devm_gen_pool_release, sizeof(*ptr), GFP_KERNEL);
  457. pool = gen_pool_create(min_alloc_order, nid);
  458. if (pool) {
  459. *ptr = pool;
  460. devres_add(dev, ptr);
  461. } else {
  462. devres_free(ptr);
  463. }
  464. return pool;
  465. }
  466. /**
  467. * dev_get_gen_pool - Obtain the gen_pool (if any) for a device
  468. * @dev: device to retrieve the gen_pool from
  469. *
  470. * Returns the gen_pool for the device if one is present, or NULL.
  471. */
  472. struct gen_pool *dev_get_gen_pool(struct device *dev)
  473. {
  474. struct gen_pool **p = devres_find(dev, devm_gen_pool_release, NULL,
  475. NULL);
  476. if (!p)
  477. return NULL;
  478. return *p;
  479. }
  480. EXPORT_SYMBOL_GPL(dev_get_gen_pool);
  481. #ifdef CONFIG_OF
  482. /**
  483. * of_get_named_gen_pool - find a pool by phandle property
  484. * @np: device node
  485. * @propname: property name containing phandle(s)
  486. * @index: index into the phandle array
  487. *
  488. * Returns the pool that contains the chunk starting at the physical
  489. * address of the device tree node pointed at by the phandle property,
  490. * or NULL if not found.
  491. */
  492. struct gen_pool *of_get_named_gen_pool(struct device_node *np,
  493. const char *propname, int index)
  494. {
  495. struct platform_device *pdev;
  496. struct device_node *np_pool;
  497. np_pool = of_parse_phandle(np, propname, index);
  498. if (!np_pool)
  499. return NULL;
  500. pdev = of_find_device_by_node(np_pool);
  501. if (!pdev)
  502. return NULL;
  503. return dev_get_gen_pool(&pdev->dev);
  504. }
  505. EXPORT_SYMBOL_GPL(of_get_named_gen_pool);
  506. #endif /* CONFIG_OF */