book3s_hv_cma.c 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240
  1. /*
  2. * Contiguous Memory Allocator for ppc KVM hash pagetable based on CMA
  3. * for DMA mapping framework
  4. *
  5. * Copyright IBM Corporation, 2013
  6. * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public License as
  10. * published by the Free Software Foundation; either version 2 of the
  11. * License or (at your optional) any later version of the license.
  12. *
  13. */
  14. #define pr_fmt(fmt) "kvm_cma: " fmt
  15. #ifdef CONFIG_CMA_DEBUG
  16. #ifndef DEBUG
  17. # define DEBUG
  18. #endif
  19. #endif
  20. #include <linux/memblock.h>
  21. #include <linux/mutex.h>
  22. #include <linux/sizes.h>
  23. #include <linux/slab.h>
  24. #include "book3s_hv_cma.h"
  25. struct kvm_cma {
  26. unsigned long base_pfn;
  27. unsigned long count;
  28. unsigned long *bitmap;
  29. };
  30. static DEFINE_MUTEX(kvm_cma_mutex);
  31. static struct kvm_cma kvm_cma_area;
  32. /**
  33. * kvm_cma_declare_contiguous() - reserve area for contiguous memory handling
  34. * for kvm hash pagetable
  35. * @size: Size of the reserved memory.
  36. * @alignment: Alignment for the contiguous memory area
  37. *
  38. * This function reserves memory for kvm cma area. It should be
  39. * called by arch code when early allocator (memblock or bootmem)
  40. * is still activate.
  41. */
  42. long __init kvm_cma_declare_contiguous(phys_addr_t size, phys_addr_t alignment)
  43. {
  44. long base_pfn;
  45. phys_addr_t addr;
  46. struct kvm_cma *cma = &kvm_cma_area;
  47. pr_debug("%s(size %lx)\n", __func__, (unsigned long)size);
  48. if (!size)
  49. return -EINVAL;
  50. /*
  51. * Sanitise input arguments.
  52. * We should be pageblock aligned for CMA.
  53. */
  54. alignment = max(alignment, (phys_addr_t)(PAGE_SIZE << pageblock_order));
  55. size = ALIGN(size, alignment);
  56. /*
  57. * Reserve memory
  58. * Use __memblock_alloc_base() since
  59. * memblock_alloc_base() panic()s.
  60. */
  61. addr = __memblock_alloc_base(size, alignment, 0);
  62. if (!addr) {
  63. base_pfn = -ENOMEM;
  64. goto err;
  65. } else
  66. base_pfn = PFN_DOWN(addr);
  67. /*
  68. * Each reserved area must be initialised later, when more kernel
  69. * subsystems (like slab allocator) are available.
  70. */
  71. cma->base_pfn = base_pfn;
  72. cma->count = size >> PAGE_SHIFT;
  73. pr_info("CMA: reserved %ld MiB\n", (unsigned long)size / SZ_1M);
  74. return 0;
  75. err:
  76. pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
  77. return base_pfn;
  78. }
  79. /**
  80. * kvm_alloc_cma() - allocate pages from contiguous area
  81. * @nr_pages: Requested number of pages.
  82. * @align_pages: Requested alignment in number of pages
  83. *
  84. * This function allocates memory buffer for hash pagetable.
  85. */
  86. struct page *kvm_alloc_cma(unsigned long nr_pages, unsigned long align_pages)
  87. {
  88. int ret;
  89. struct page *page = NULL;
  90. struct kvm_cma *cma = &kvm_cma_area;
  91. unsigned long chunk_count, nr_chunk;
  92. unsigned long mask, pfn, pageno, start = 0;
  93. if (!cma || !cma->count)
  94. return NULL;
  95. pr_debug("%s(cma %p, count %lu, align pages %lu)\n", __func__,
  96. (void *)cma, nr_pages, align_pages);
  97. if (!nr_pages)
  98. return NULL;
  99. /*
  100. * align mask with chunk size. The bit tracks pages in chunk size
  101. */
  102. VM_BUG_ON(!is_power_of_2(align_pages));
  103. mask = (align_pages >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT)) - 1;
  104. BUILD_BUG_ON(PAGE_SHIFT > KVM_CMA_CHUNK_ORDER);
  105. chunk_count = cma->count >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
  106. nr_chunk = nr_pages >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
  107. mutex_lock(&kvm_cma_mutex);
  108. for (;;) {
  109. pageno = bitmap_find_next_zero_area(cma->bitmap, chunk_count,
  110. start, nr_chunk, mask);
  111. if (pageno >= chunk_count)
  112. break;
  113. pfn = cma->base_pfn + (pageno << (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT));
  114. ret = alloc_contig_range(pfn, pfn + nr_pages, MIGRATE_CMA);
  115. if (ret == 0) {
  116. bitmap_set(cma->bitmap, pageno, nr_chunk);
  117. page = pfn_to_page(pfn);
  118. memset(pfn_to_kaddr(pfn), 0, nr_pages << PAGE_SHIFT);
  119. break;
  120. } else if (ret != -EBUSY) {
  121. break;
  122. }
  123. pr_debug("%s(): memory range at %p is busy, retrying\n",
  124. __func__, pfn_to_page(pfn));
  125. /* try again with a bit different memory target */
  126. start = pageno + mask + 1;
  127. }
  128. mutex_unlock(&kvm_cma_mutex);
  129. pr_debug("%s(): returned %p\n", __func__, page);
  130. return page;
  131. }
  132. /**
  133. * kvm_release_cma() - release allocated pages for hash pagetable
  134. * @pages: Allocated pages.
  135. * @nr_pages: Number of allocated pages.
  136. *
  137. * This function releases memory allocated by kvm_alloc_cma().
  138. * It returns false when provided pages do not belong to contiguous area and
  139. * true otherwise.
  140. */
  141. bool kvm_release_cma(struct page *pages, unsigned long nr_pages)
  142. {
  143. unsigned long pfn;
  144. unsigned long nr_chunk;
  145. struct kvm_cma *cma = &kvm_cma_area;
  146. if (!cma || !pages)
  147. return false;
  148. pr_debug("%s(page %p count %lu)\n", __func__, (void *)pages, nr_pages);
  149. pfn = page_to_pfn(pages);
  150. if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
  151. return false;
  152. VM_BUG_ON(pfn + nr_pages > cma->base_pfn + cma->count);
  153. nr_chunk = nr_pages >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
  154. mutex_lock(&kvm_cma_mutex);
  155. bitmap_clear(cma->bitmap,
  156. (pfn - cma->base_pfn) >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT),
  157. nr_chunk);
  158. free_contig_range(pfn, nr_pages);
  159. mutex_unlock(&kvm_cma_mutex);
  160. return true;
  161. }
  162. static int __init kvm_cma_activate_area(unsigned long base_pfn,
  163. unsigned long count)
  164. {
  165. unsigned long pfn = base_pfn;
  166. unsigned i = count >> pageblock_order;
  167. struct zone *zone;
  168. WARN_ON_ONCE(!pfn_valid(pfn));
  169. zone = page_zone(pfn_to_page(pfn));
  170. do {
  171. unsigned j;
  172. base_pfn = pfn;
  173. for (j = pageblock_nr_pages; j; --j, pfn++) {
  174. WARN_ON_ONCE(!pfn_valid(pfn));
  175. /*
  176. * alloc_contig_range requires the pfn range
  177. * specified to be in the same zone. Make this
  178. * simple by forcing the entire CMA resv range
  179. * to be in the same zone.
  180. */
  181. if (page_zone(pfn_to_page(pfn)) != zone)
  182. return -EINVAL;
  183. }
  184. init_cma_reserved_pageblock(pfn_to_page(base_pfn));
  185. } while (--i);
  186. return 0;
  187. }
  188. static int __init kvm_cma_init_reserved_areas(void)
  189. {
  190. int bitmap_size, ret;
  191. unsigned long chunk_count;
  192. struct kvm_cma *cma = &kvm_cma_area;
  193. pr_debug("%s()\n", __func__);
  194. if (!cma->count)
  195. return 0;
  196. chunk_count = cma->count >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
  197. bitmap_size = BITS_TO_LONGS(chunk_count) * sizeof(long);
  198. cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
  199. if (!cma->bitmap)
  200. return -ENOMEM;
  201. ret = kvm_cma_activate_area(cma->base_pfn, cma->count);
  202. if (ret)
  203. goto error;
  204. return 0;
  205. error:
  206. kfree(cma->bitmap);
  207. return ret;
  208. }
  209. core_initcall(kvm_cma_init_reserved_areas);