nouveau_mm.c 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271
  1. /*
  2. * Copyright 2010 Red Hat Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Ben Skeggs
  23. */
  24. #include "drmP.h"
  25. #include "nouveau_drv.h"
  26. #include "nouveau_mm.h"
  27. static inline void
  28. region_put(struct nouveau_mm *rmm, struct nouveau_mm_node *a)
  29. {
  30. list_del(&a->nl_entry);
  31. list_del(&a->fl_entry);
  32. kfree(a);
  33. }
  34. static struct nouveau_mm_node *
  35. region_split(struct nouveau_mm *rmm, struct nouveau_mm_node *a, u32 size)
  36. {
  37. struct nouveau_mm_node *b;
  38. if (a->length == size)
  39. return a;
  40. b = kmalloc(sizeof(*b), GFP_KERNEL);
  41. if (unlikely(b == NULL))
  42. return NULL;
  43. b->offset = a->offset;
  44. b->length = size;
  45. b->free = a->free;
  46. b->type = a->type;
  47. a->offset += size;
  48. a->length -= size;
  49. list_add_tail(&b->nl_entry, &a->nl_entry);
  50. if (b->free)
  51. list_add_tail(&b->fl_entry, &a->fl_entry);
  52. return b;
  53. }
  54. static struct nouveau_mm_node *
  55. nouveau_mm_merge(struct nouveau_mm *rmm, struct nouveau_mm_node *this)
  56. {
  57. struct nouveau_mm_node *prev, *next;
  58. /* try to merge with free adjacent entries of same type */
  59. prev = list_entry(this->nl_entry.prev, struct nouveau_mm_node, nl_entry);
  60. if (this->nl_entry.prev != &rmm->nodes) {
  61. if (prev->free && prev->type == this->type) {
  62. prev->length += this->length;
  63. region_put(rmm, this);
  64. this = prev;
  65. }
  66. }
  67. next = list_entry(this->nl_entry.next, struct nouveau_mm_node, nl_entry);
  68. if (this->nl_entry.next != &rmm->nodes) {
  69. if (next->free && next->type == this->type) {
  70. next->offset = this->offset;
  71. next->length += this->length;
  72. region_put(rmm, this);
  73. this = next;
  74. }
  75. }
  76. return this;
  77. }
  78. void
  79. nouveau_mm_put(struct nouveau_mm *rmm, struct nouveau_mm_node *this)
  80. {
  81. u32 block_s, block_l;
  82. this->free = true;
  83. list_add(&this->fl_entry, &rmm->free);
  84. this = nouveau_mm_merge(rmm, this);
  85. /* any entirely free blocks now? we'll want to remove typing
  86. * on them now so they can be use for any memory allocation
  87. */
  88. block_s = roundup(this->offset, rmm->block_size);
  89. if (block_s + rmm->block_size > this->offset + this->length)
  90. return;
  91. /* split off any still-typed region at the start */
  92. if (block_s != this->offset) {
  93. if (!region_split(rmm, this, block_s - this->offset))
  94. return;
  95. }
  96. /* split off the soon-to-be-untyped block(s) */
  97. block_l = rounddown(this->length, rmm->block_size);
  98. if (block_l != this->length) {
  99. this = region_split(rmm, this, block_l);
  100. if (!this)
  101. return;
  102. }
  103. /* mark as having no type, and retry merge with any adjacent
  104. * untyped blocks
  105. */
  106. this->type = 0;
  107. nouveau_mm_merge(rmm, this);
  108. }
  109. int
  110. nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc,
  111. u32 align, struct nouveau_mm_node **pnode)
  112. {
  113. struct nouveau_mm_node *this, *tmp, *next;
  114. u32 splitoff, avail, alloc;
  115. list_for_each_entry_safe(this, tmp, &rmm->free, fl_entry) {
  116. next = list_entry(this->nl_entry.next, struct nouveau_mm_node, nl_entry);
  117. if (this->nl_entry.next == &rmm->nodes)
  118. next = NULL;
  119. /* skip wrongly typed blocks */
  120. if (this->type && this->type != type)
  121. continue;
  122. /* account for alignment */
  123. splitoff = this->offset & (align - 1);
  124. if (splitoff)
  125. splitoff = align - splitoff;
  126. if (this->length <= splitoff)
  127. continue;
  128. /* determine total memory available from this, and
  129. * the next block (if appropriate)
  130. */
  131. avail = this->length;
  132. if (next && next->free && (!next->type || next->type == type))
  133. avail += next->length;
  134. avail -= splitoff;
  135. /* determine allocation size */
  136. if (size_nc) {
  137. alloc = min(avail, size);
  138. alloc = rounddown(alloc, size_nc);
  139. if (alloc == 0)
  140. continue;
  141. } else {
  142. alloc = size;
  143. if (avail < alloc)
  144. continue;
  145. }
  146. /* untyped block, split off a chunk that's a multiple
  147. * of block_size and type it
  148. */
  149. if (!this->type) {
  150. u32 block = roundup(alloc + splitoff, rmm->block_size);
  151. if (this->length < block)
  152. continue;
  153. this = region_split(rmm, this, block);
  154. if (!this)
  155. return -ENOMEM;
  156. this->type = type;
  157. }
  158. /* stealing memory from adjacent block */
  159. if (alloc > this->length) {
  160. u32 amount = alloc - (this->length - splitoff);
  161. if (!next->type) {
  162. amount = roundup(amount, rmm->block_size);
  163. next = region_split(rmm, next, amount);
  164. if (!next)
  165. return -ENOMEM;
  166. next->type = type;
  167. }
  168. this->length += amount;
  169. next->offset += amount;
  170. next->length -= amount;
  171. if (!next->length) {
  172. list_del(&next->nl_entry);
  173. list_del(&next->fl_entry);
  174. kfree(next);
  175. }
  176. }
  177. if (splitoff) {
  178. if (!region_split(rmm, this, splitoff))
  179. return -ENOMEM;
  180. }
  181. this = region_split(rmm, this, alloc);
  182. if (this == NULL)
  183. return -ENOMEM;
  184. this->free = false;
  185. list_del(&this->fl_entry);
  186. *pnode = this;
  187. return 0;
  188. }
  189. return -ENOMEM;
  190. }
  191. int
  192. nouveau_mm_init(struct nouveau_mm **prmm, u32 offset, u32 length, u32 block)
  193. {
  194. struct nouveau_mm *rmm;
  195. struct nouveau_mm_node *heap;
  196. heap = kzalloc(sizeof(*heap), GFP_KERNEL);
  197. if (!heap)
  198. return -ENOMEM;
  199. heap->free = true;
  200. heap->offset = roundup(offset, block);
  201. heap->length = rounddown(offset + length, block) - heap->offset;
  202. rmm = kzalloc(sizeof(*rmm), GFP_KERNEL);
  203. if (!rmm) {
  204. kfree(heap);
  205. return -ENOMEM;
  206. }
  207. rmm->block_size = block;
  208. mutex_init(&rmm->mutex);
  209. INIT_LIST_HEAD(&rmm->nodes);
  210. INIT_LIST_HEAD(&rmm->free);
  211. list_add(&heap->nl_entry, &rmm->nodes);
  212. list_add(&heap->fl_entry, &rmm->free);
  213. *prmm = rmm;
  214. return 0;
  215. }
  216. int
  217. nouveau_mm_fini(struct nouveau_mm **prmm)
  218. {
  219. struct nouveau_mm *rmm = *prmm;
  220. struct nouveau_mm_node *heap =
  221. list_first_entry(&rmm->nodes, struct nouveau_mm_node, nl_entry);
  222. if (!list_is_singular(&rmm->nodes))
  223. return -EBUSY;
  224. kfree(heap);
  225. kfree(rmm);
  226. *prmm = NULL;
  227. return 0;
  228. }