drm_mm.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460
  1. /**************************************************************************
  2. *
  3. * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. *
  27. **************************************************************************/
  28. /*
  29. * Generic simple memory manager implementation. Intended to be used as a base
  30. * class implementation for more advanced memory managers.
  31. *
  32. * Note that the algorithm used is quite simple and there might be substantial
  33. * performance gains if a smarter free list is implemented. Currently it is just an
  34. * unordered stack of free regions. This could easily be improved if an RB-tree
  35. * is used instead. At least if we expect heavy fragmentation.
  36. *
  37. * Aligned allocations can also see improvement.
  38. *
  39. * Authors:
  40. * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
  41. */
  42. #include "drmP.h"
  43. #include "drm_mm.h"
  44. #include <linux/slab.h>
  45. #include <linux/seq_file.h>
  46. #define MM_UNUSED_TARGET 4
  47. static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
  48. {
  49. struct drm_mm_node *child;
  50. if (atomic)
  51. child = kmalloc(sizeof(*child), GFP_ATOMIC);
  52. else
  53. child = kmalloc(sizeof(*child), GFP_KERNEL);
  54. if (unlikely(child == NULL)) {
  55. spin_lock(&mm->unused_lock);
  56. if (list_empty(&mm->unused_nodes))
  57. child = NULL;
  58. else {
  59. child =
  60. list_entry(mm->unused_nodes.next,
  61. struct drm_mm_node, free_stack);
  62. list_del(&child->free_stack);
  63. --mm->num_unused;
  64. }
  65. spin_unlock(&mm->unused_lock);
  66. }
  67. return child;
  68. }
  69. /* drm_mm_pre_get() - pre allocate drm_mm_node structure
  70. * drm_mm: memory manager struct we are pre-allocating for
  71. *
  72. * Returns 0 on success or -ENOMEM if allocation fails.
  73. */
  74. int drm_mm_pre_get(struct drm_mm *mm)
  75. {
  76. struct drm_mm_node *node;
  77. spin_lock(&mm->unused_lock);
  78. while (mm->num_unused < MM_UNUSED_TARGET) {
  79. spin_unlock(&mm->unused_lock);
  80. node = kmalloc(sizeof(*node), GFP_KERNEL);
  81. spin_lock(&mm->unused_lock);
  82. if (unlikely(node == NULL)) {
  83. int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
  84. spin_unlock(&mm->unused_lock);
  85. return ret;
  86. }
  87. ++mm->num_unused;
  88. list_add_tail(&node->free_stack, &mm->unused_nodes);
  89. }
  90. spin_unlock(&mm->unused_lock);
  91. return 0;
  92. }
  93. EXPORT_SYMBOL(drm_mm_pre_get);
  94. static int drm_mm_create_tail_node(struct drm_mm *mm,
  95. unsigned long start,
  96. unsigned long size, int atomic)
  97. {
  98. struct drm_mm_node *child;
  99. child = drm_mm_kmalloc(mm, atomic);
  100. if (unlikely(child == NULL))
  101. return -ENOMEM;
  102. child->free = 1;
  103. child->size = size;
  104. child->start = start;
  105. child->mm = mm;
  106. list_add_tail(&child->node_list, &mm->node_list);
  107. list_add_tail(&child->free_stack, &mm->free_stack);
  108. return 0;
  109. }
  110. static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
  111. unsigned long size,
  112. int atomic)
  113. {
  114. struct drm_mm_node *child;
  115. child = drm_mm_kmalloc(parent->mm, atomic);
  116. if (unlikely(child == NULL))
  117. return NULL;
  118. INIT_LIST_HEAD(&child->free_stack);
  119. child->free = 0;
  120. child->size = size;
  121. child->start = parent->start;
  122. child->mm = parent->mm;
  123. list_add_tail(&child->node_list, &parent->node_list);
  124. INIT_LIST_HEAD(&child->free_stack);
  125. parent->size -= size;
  126. parent->start += size;
  127. return child;
  128. }
  129. struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
  130. unsigned long size,
  131. unsigned alignment,
  132. int atomic)
  133. {
  134. struct drm_mm_node *align_splitoff = NULL;
  135. unsigned tmp = 0;
  136. if (alignment)
  137. tmp = node->start % alignment;
  138. if (tmp) {
  139. align_splitoff =
  140. drm_mm_split_at_start(node, alignment - tmp, atomic);
  141. if (unlikely(align_splitoff == NULL))
  142. return NULL;
  143. }
  144. if (node->size == size) {
  145. list_del_init(&node->free_stack);
  146. node->free = 0;
  147. } else {
  148. node = drm_mm_split_at_start(node, size, atomic);
  149. }
  150. if (align_splitoff)
  151. drm_mm_put_block(align_splitoff);
  152. return node;
  153. }
  154. EXPORT_SYMBOL(drm_mm_get_block_generic);
  155. struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *node,
  156. unsigned long size,
  157. unsigned alignment,
  158. unsigned long start,
  159. unsigned long end,
  160. int atomic)
  161. {
  162. struct drm_mm_node *align_splitoff = NULL;
  163. unsigned tmp = 0;
  164. unsigned wasted = 0;
  165. if (node->start < start)
  166. wasted += start - node->start;
  167. if (alignment)
  168. tmp = ((node->start + wasted) % alignment);
  169. if (tmp)
  170. wasted += alignment - tmp;
  171. if (wasted) {
  172. align_splitoff = drm_mm_split_at_start(node, wasted, atomic);
  173. if (unlikely(align_splitoff == NULL))
  174. return NULL;
  175. }
  176. if (node->size == size) {
  177. list_del_init(&node->free_stack);
  178. node->free = 0;
  179. } else {
  180. node = drm_mm_split_at_start(node, size, atomic);
  181. }
  182. if (align_splitoff)
  183. drm_mm_put_block(align_splitoff);
  184. return node;
  185. }
  186. EXPORT_SYMBOL(drm_mm_get_block_range_generic);
  187. /*
  188. * Put a block. Merge with the previous and / or next block if they are free.
  189. * Otherwise add to the free stack.
  190. */
  191. void drm_mm_put_block(struct drm_mm_node *cur)
  192. {
  193. struct drm_mm *mm = cur->mm;
  194. struct list_head *cur_head = &cur->node_list;
  195. struct list_head *root_head = &mm->node_list;
  196. struct drm_mm_node *prev_node = NULL;
  197. struct drm_mm_node *next_node;
  198. int merged = 0;
  199. if (cur_head->prev != root_head) {
  200. prev_node =
  201. list_entry(cur_head->prev, struct drm_mm_node, node_list);
  202. if (prev_node->free) {
  203. prev_node->size += cur->size;
  204. merged = 1;
  205. }
  206. }
  207. if (cur_head->next != root_head) {
  208. next_node =
  209. list_entry(cur_head->next, struct drm_mm_node, node_list);
  210. if (next_node->free) {
  211. if (merged) {
  212. prev_node->size += next_node->size;
  213. list_del(&next_node->node_list);
  214. list_del(&next_node->free_stack);
  215. spin_lock(&mm->unused_lock);
  216. if (mm->num_unused < MM_UNUSED_TARGET) {
  217. list_add(&next_node->free_stack,
  218. &mm->unused_nodes);
  219. ++mm->num_unused;
  220. } else
  221. kfree(next_node);
  222. spin_unlock(&mm->unused_lock);
  223. } else {
  224. next_node->size += cur->size;
  225. next_node->start = cur->start;
  226. merged = 1;
  227. }
  228. }
  229. }
  230. if (!merged) {
  231. cur->free = 1;
  232. list_add(&cur->free_stack, &mm->free_stack);
  233. } else {
  234. list_del(&cur->node_list);
  235. spin_lock(&mm->unused_lock);
  236. if (mm->num_unused < MM_UNUSED_TARGET) {
  237. list_add(&cur->free_stack, &mm->unused_nodes);
  238. ++mm->num_unused;
  239. } else
  240. kfree(cur);
  241. spin_unlock(&mm->unused_lock);
  242. }
  243. }
  244. EXPORT_SYMBOL(drm_mm_put_block);
  245. struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
  246. unsigned long size,
  247. unsigned alignment, int best_match)
  248. {
  249. struct drm_mm_node *entry;
  250. struct drm_mm_node *best;
  251. unsigned long best_size;
  252. unsigned wasted;
  253. best = NULL;
  254. best_size = ~0UL;
  255. list_for_each_entry(entry, &mm->free_stack, free_stack) {
  256. wasted = 0;
  257. if (entry->size < size)
  258. continue;
  259. if (alignment) {
  260. register unsigned tmp = entry->start % alignment;
  261. if (tmp)
  262. wasted += alignment - tmp;
  263. }
  264. if (entry->size >= size + wasted) {
  265. if (!best_match)
  266. return entry;
  267. if (entry->size < best_size) {
  268. best = entry;
  269. best_size = entry->size;
  270. }
  271. }
  272. }
  273. return best;
  274. }
  275. EXPORT_SYMBOL(drm_mm_search_free);
  276. struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
  277. unsigned long size,
  278. unsigned alignment,
  279. unsigned long start,
  280. unsigned long end,
  281. int best_match)
  282. {
  283. struct drm_mm_node *entry;
  284. struct drm_mm_node *best;
  285. unsigned long best_size;
  286. unsigned wasted;
  287. best = NULL;
  288. best_size = ~0UL;
  289. list_for_each_entry(entry, &mm->free_stack, free_stack) {
  290. wasted = 0;
  291. if (entry->size < size)
  292. continue;
  293. if (entry->start > end || (entry->start+entry->size) < start)
  294. continue;
  295. if (entry->start < start)
  296. wasted += start - entry->start;
  297. if (alignment) {
  298. register unsigned tmp = (entry->start + wasted) % alignment;
  299. if (tmp)
  300. wasted += alignment - tmp;
  301. }
  302. if (entry->size >= size + wasted &&
  303. (entry->start + wasted + size) <= end) {
  304. if (!best_match)
  305. return entry;
  306. if (entry->size < best_size) {
  307. best = entry;
  308. best_size = entry->size;
  309. }
  310. }
  311. }
  312. return best;
  313. }
  314. EXPORT_SYMBOL(drm_mm_search_free_in_range);
  315. int drm_mm_clean(struct drm_mm * mm)
  316. {
  317. struct list_head *head = &mm->node_list;
  318. return (head->next->next == head);
  319. }
  320. EXPORT_SYMBOL(drm_mm_clean);
  321. int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
  322. {
  323. INIT_LIST_HEAD(&mm->node_list);
  324. INIT_LIST_HEAD(&mm->free_stack);
  325. INIT_LIST_HEAD(&mm->unused_nodes);
  326. mm->num_unused = 0;
  327. spin_lock_init(&mm->unused_lock);
  328. return drm_mm_create_tail_node(mm, start, size, 0);
  329. }
  330. EXPORT_SYMBOL(drm_mm_init);
  331. void drm_mm_takedown(struct drm_mm * mm)
  332. {
  333. struct list_head *bnode = mm->free_stack.next;
  334. struct drm_mm_node *entry;
  335. struct drm_mm_node *next;
  336. entry = list_entry(bnode, struct drm_mm_node, free_stack);
  337. if (entry->node_list.next != &mm->node_list ||
  338. entry->free_stack.next != &mm->free_stack) {
  339. DRM_ERROR("Memory manager not clean. Delaying takedown\n");
  340. return;
  341. }
  342. list_del(&entry->free_stack);
  343. list_del(&entry->node_list);
  344. kfree(entry);
  345. spin_lock(&mm->unused_lock);
  346. list_for_each_entry_safe(entry, next, &mm->unused_nodes, free_stack) {
  347. list_del(&entry->free_stack);
  348. kfree(entry);
  349. --mm->num_unused;
  350. }
  351. spin_unlock(&mm->unused_lock);
  352. BUG_ON(mm->num_unused != 0);
  353. }
  354. EXPORT_SYMBOL(drm_mm_takedown);
  355. void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
  356. {
  357. struct drm_mm_node *entry;
  358. int total_used = 0, total_free = 0, total = 0;
  359. list_for_each_entry(entry, &mm->node_list, node_list) {
  360. printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8ld: %s\n",
  361. prefix, entry->start, entry->start + entry->size,
  362. entry->size, entry->free ? "free" : "used");
  363. total += entry->size;
  364. if (entry->free)
  365. total_free += entry->size;
  366. else
  367. total_used += entry->size;
  368. }
  369. printk(KERN_DEBUG "%s total: %d, used %d free %d\n", prefix, total,
  370. total_used, total_free);
  371. }
  372. EXPORT_SYMBOL(drm_mm_debug_table);
  373. #if defined(CONFIG_DEBUG_FS)
  374. int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
  375. {
  376. struct drm_mm_node *entry;
  377. int total_used = 0, total_free = 0, total = 0;
  378. list_for_each_entry(entry, &mm->node_list, node_list) {
  379. seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: %s\n", entry->start, entry->start + entry->size, entry->size, entry->free ? "free" : "used");
  380. total += entry->size;
  381. if (entry->free)
  382. total_free += entry->size;
  383. else
  384. total_used += entry->size;
  385. }
  386. seq_printf(m, "total: %d, used %d free %d\n", total, total_used, total_free);
  387. return 0;
  388. }
  389. EXPORT_SYMBOL(drm_mm_dump_table);
  390. #endif