drm_mm.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620
  1. /**************************************************************************
  2. *
  3. * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. *
  27. **************************************************************************/
  28. /*
  29. * Generic simple memory manager implementation. Intended to be used as a base
  30. * class implementation for more advanced memory managers.
  31. *
  32. * Note that the algorithm used is quite simple and there might be substantial
  33. * performance gains if a smarter free list is implemented. Currently it is just an
  34. * unordered stack of free regions. This could easily be improved if an RB-tree
  35. * is used instead. At least if we expect heavy fragmentation.
  36. *
  37. * Aligned allocations can also see improvement.
  38. *
  39. * Authors:
  40. * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
  41. */
  42. #include "drmP.h"
  43. #include "drm_mm.h"
  44. #include <linux/slab.h>
  45. #include <linux/seq_file.h>
  46. #define MM_UNUSED_TARGET 4
  47. static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
  48. {
  49. struct drm_mm_node *child;
  50. if (atomic)
  51. child = kzalloc(sizeof(*child), GFP_ATOMIC);
  52. else
  53. child = kzalloc(sizeof(*child), GFP_KERNEL);
  54. if (unlikely(child == NULL)) {
  55. spin_lock(&mm->unused_lock);
  56. if (list_empty(&mm->unused_nodes))
  57. child = NULL;
  58. else {
  59. child =
  60. list_entry(mm->unused_nodes.next,
  61. struct drm_mm_node, free_stack);
  62. list_del(&child->free_stack);
  63. --mm->num_unused;
  64. }
  65. spin_unlock(&mm->unused_lock);
  66. }
  67. return child;
  68. }
  69. /* drm_mm_pre_get() - pre allocate drm_mm_node structure
  70. * drm_mm: memory manager struct we are pre-allocating for
  71. *
  72. * Returns 0 on success or -ENOMEM if allocation fails.
  73. */
  74. int drm_mm_pre_get(struct drm_mm *mm)
  75. {
  76. struct drm_mm_node *node;
  77. spin_lock(&mm->unused_lock);
  78. while (mm->num_unused < MM_UNUSED_TARGET) {
  79. spin_unlock(&mm->unused_lock);
  80. node = kzalloc(sizeof(*node), GFP_KERNEL);
  81. spin_lock(&mm->unused_lock);
  82. if (unlikely(node == NULL)) {
  83. int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
  84. spin_unlock(&mm->unused_lock);
  85. return ret;
  86. }
  87. ++mm->num_unused;
  88. list_add_tail(&node->free_stack, &mm->unused_nodes);
  89. }
  90. spin_unlock(&mm->unused_lock);
  91. return 0;
  92. }
  93. EXPORT_SYMBOL(drm_mm_pre_get);
  94. static int drm_mm_create_tail_node(struct drm_mm *mm,
  95. unsigned long start,
  96. unsigned long size, int atomic)
  97. {
  98. struct drm_mm_node *child;
  99. child = drm_mm_kmalloc(mm, atomic);
  100. if (unlikely(child == NULL))
  101. return -ENOMEM;
  102. child->free = 1;
  103. child->size = size;
  104. child->start = start;
  105. child->mm = mm;
  106. list_add_tail(&child->node_list, &mm->node_list);
  107. list_add_tail(&child->free_stack, &mm->free_stack);
  108. return 0;
  109. }
  110. static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
  111. unsigned long size,
  112. int atomic)
  113. {
  114. struct drm_mm_node *child;
  115. child = drm_mm_kmalloc(parent->mm, atomic);
  116. if (unlikely(child == NULL))
  117. return NULL;
  118. INIT_LIST_HEAD(&child->free_stack);
  119. child->size = size;
  120. child->start = parent->start;
  121. child->mm = parent->mm;
  122. list_add_tail(&child->node_list, &parent->node_list);
  123. INIT_LIST_HEAD(&child->free_stack);
  124. parent->size -= size;
  125. parent->start += size;
  126. return child;
  127. }
  128. struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
  129. unsigned long size,
  130. unsigned alignment,
  131. int atomic)
  132. {
  133. struct drm_mm_node *align_splitoff = NULL;
  134. unsigned tmp = 0;
  135. if (alignment)
  136. tmp = node->start % alignment;
  137. if (tmp) {
  138. align_splitoff =
  139. drm_mm_split_at_start(node, alignment - tmp, atomic);
  140. if (unlikely(align_splitoff == NULL))
  141. return NULL;
  142. }
  143. if (node->size == size) {
  144. list_del_init(&node->free_stack);
  145. node->free = 0;
  146. } else {
  147. node = drm_mm_split_at_start(node, size, atomic);
  148. }
  149. if (align_splitoff)
  150. drm_mm_put_block(align_splitoff);
  151. return node;
  152. }
  153. EXPORT_SYMBOL(drm_mm_get_block_generic);
  154. struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *node,
  155. unsigned long size,
  156. unsigned alignment,
  157. unsigned long start,
  158. unsigned long end,
  159. int atomic)
  160. {
  161. struct drm_mm_node *align_splitoff = NULL;
  162. unsigned tmp = 0;
  163. unsigned wasted = 0;
  164. if (node->start < start)
  165. wasted += start - node->start;
  166. if (alignment)
  167. tmp = ((node->start + wasted) % alignment);
  168. if (tmp)
  169. wasted += alignment - tmp;
  170. if (wasted) {
  171. align_splitoff = drm_mm_split_at_start(node, wasted, atomic);
  172. if (unlikely(align_splitoff == NULL))
  173. return NULL;
  174. }
  175. if (node->size == size) {
  176. list_del_init(&node->free_stack);
  177. node->free = 0;
  178. } else {
  179. node = drm_mm_split_at_start(node, size, atomic);
  180. }
  181. if (align_splitoff)
  182. drm_mm_put_block(align_splitoff);
  183. return node;
  184. }
  185. EXPORT_SYMBOL(drm_mm_get_block_range_generic);
  186. /*
  187. * Put a block. Merge with the previous and / or next block if they are free.
  188. * Otherwise add to the free stack.
  189. */
  190. void drm_mm_put_block(struct drm_mm_node *cur)
  191. {
  192. struct drm_mm *mm = cur->mm;
  193. struct list_head *cur_head = &cur->node_list;
  194. struct list_head *root_head = &mm->node_list;
  195. struct drm_mm_node *prev_node = NULL;
  196. struct drm_mm_node *next_node;
  197. int merged = 0;
  198. BUG_ON(cur->scanned_block || cur->scanned_prev_free
  199. || cur->scanned_next_free);
  200. if (cur_head->prev != root_head) {
  201. prev_node =
  202. list_entry(cur_head->prev, struct drm_mm_node, node_list);
  203. if (prev_node->free) {
  204. prev_node->size += cur->size;
  205. merged = 1;
  206. }
  207. }
  208. if (cur_head->next != root_head) {
  209. next_node =
  210. list_entry(cur_head->next, struct drm_mm_node, node_list);
  211. if (next_node->free) {
  212. if (merged) {
  213. prev_node->size += next_node->size;
  214. list_del(&next_node->node_list);
  215. list_del(&next_node->free_stack);
  216. spin_lock(&mm->unused_lock);
  217. if (mm->num_unused < MM_UNUSED_TARGET) {
  218. list_add(&next_node->free_stack,
  219. &mm->unused_nodes);
  220. ++mm->num_unused;
  221. } else
  222. kfree(next_node);
  223. spin_unlock(&mm->unused_lock);
  224. } else {
  225. next_node->size += cur->size;
  226. next_node->start = cur->start;
  227. merged = 1;
  228. }
  229. }
  230. }
  231. if (!merged) {
  232. cur->free = 1;
  233. list_add(&cur->free_stack, &mm->free_stack);
  234. } else {
  235. list_del(&cur->node_list);
  236. spin_lock(&mm->unused_lock);
  237. if (mm->num_unused < MM_UNUSED_TARGET) {
  238. list_add(&cur->free_stack, &mm->unused_nodes);
  239. ++mm->num_unused;
  240. } else
  241. kfree(cur);
  242. spin_unlock(&mm->unused_lock);
  243. }
  244. }
  245. EXPORT_SYMBOL(drm_mm_put_block);
  246. static int check_free_hole(unsigned long start, unsigned long end,
  247. unsigned long size, unsigned alignment)
  248. {
  249. unsigned wasted = 0;
  250. if (end - start < size)
  251. return 0;
  252. if (alignment) {
  253. unsigned tmp = start % alignment;
  254. if (tmp)
  255. wasted = alignment - tmp;
  256. }
  257. if (end >= start + size + wasted) {
  258. return 1;
  259. }
  260. return 0;
  261. }
  262. struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
  263. unsigned long size,
  264. unsigned alignment, int best_match)
  265. {
  266. struct drm_mm_node *entry;
  267. struct drm_mm_node *best;
  268. unsigned long best_size;
  269. BUG_ON(mm->scanned_blocks);
  270. best = NULL;
  271. best_size = ~0UL;
  272. list_for_each_entry(entry, &mm->free_stack, free_stack) {
  273. if (!check_free_hole(entry->start, entry->start + entry->size,
  274. size, alignment))
  275. continue;
  276. if (!best_match)
  277. return entry;
  278. if (entry->size < best_size) {
  279. best = entry;
  280. best_size = entry->size;
  281. }
  282. }
  283. return best;
  284. }
  285. EXPORT_SYMBOL(drm_mm_search_free);
  286. struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
  287. unsigned long size,
  288. unsigned alignment,
  289. unsigned long start,
  290. unsigned long end,
  291. int best_match)
  292. {
  293. struct drm_mm_node *entry;
  294. struct drm_mm_node *best;
  295. unsigned long best_size;
  296. BUG_ON(mm->scanned_blocks);
  297. best = NULL;
  298. best_size = ~0UL;
  299. list_for_each_entry(entry, &mm->free_stack, free_stack) {
  300. unsigned long adj_start = entry->start < start ?
  301. start : entry->start;
  302. unsigned long adj_end = entry->start + entry->size > end ?
  303. end : entry->start + entry->size;
  304. if (!check_free_hole(adj_start, adj_end, size, alignment))
  305. continue;
  306. if (!best_match)
  307. return entry;
  308. if (entry->size < best_size) {
  309. best = entry;
  310. best_size = entry->size;
  311. }
  312. }
  313. return best;
  314. }
  315. EXPORT_SYMBOL(drm_mm_search_free_in_range);
  316. /**
  317. * Initializa lru scanning.
  318. *
  319. * This simply sets up the scanning routines with the parameters for the desired
  320. * hole.
  321. *
  322. * Warning: As long as the scan list is non-empty, no other operations than
  323. * adding/removing nodes to/from the scan list are allowed.
  324. */
  325. void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
  326. unsigned alignment)
  327. {
  328. mm->scan_alignment = alignment;
  329. mm->scan_size = size;
  330. mm->scanned_blocks = 0;
  331. mm->scan_hit_start = 0;
  332. mm->scan_hit_size = 0;
  333. }
  334. EXPORT_SYMBOL(drm_mm_init_scan);
  335. /**
  336. * Add a node to the scan list that might be freed to make space for the desired
  337. * hole.
  338. *
  339. * Returns non-zero, if a hole has been found, zero otherwise.
  340. */
  341. int drm_mm_scan_add_block(struct drm_mm_node *node)
  342. {
  343. struct drm_mm *mm = node->mm;
  344. struct list_head *prev_free, *next_free;
  345. struct drm_mm_node *prev_node, *next_node;
  346. mm->scanned_blocks++;
  347. prev_free = next_free = NULL;
  348. BUG_ON(node->free);
  349. node->scanned_block = 1;
  350. node->free = 1;
  351. if (node->node_list.prev != &mm->node_list) {
  352. prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
  353. node_list);
  354. if (prev_node->free) {
  355. list_del(&prev_node->node_list);
  356. node->start = prev_node->start;
  357. node->size += prev_node->size;
  358. prev_node->scanned_prev_free = 1;
  359. prev_free = &prev_node->free_stack;
  360. }
  361. }
  362. if (node->node_list.next != &mm->node_list) {
  363. next_node = list_entry(node->node_list.next, struct drm_mm_node,
  364. node_list);
  365. if (next_node->free) {
  366. list_del(&next_node->node_list);
  367. node->size += next_node->size;
  368. next_node->scanned_next_free = 1;
  369. next_free = &next_node->free_stack;
  370. }
  371. }
  372. /* The free_stack list is not used for allocated objects, so these two
  373. * pointers can be abused (as long as no allocations in this memory
  374. * manager happens). */
  375. node->free_stack.prev = prev_free;
  376. node->free_stack.next = next_free;
  377. if (check_free_hole(node->start, node->start + node->size,
  378. mm->scan_size, mm->scan_alignment)) {
  379. mm->scan_hit_start = node->start;
  380. mm->scan_hit_size = node->size;
  381. return 1;
  382. }
  383. return 0;
  384. }
  385. EXPORT_SYMBOL(drm_mm_scan_add_block);
  386. /**
  387. * Remove a node from the scan list.
  388. *
  389. * Nodes _must_ be removed in the exact same order from the scan list as they
  390. * have been added, otherwise the internal state of the memory manager will be
  391. * corrupted.
  392. *
  393. * When the scan list is empty, the selected memory nodes can be freed. An
  394. * immediatly following drm_mm_search_free with best_match = 0 will then return
  395. * the just freed block (because its at the top of the free_stack list).
  396. *
  397. * Returns one if this block should be evicted, zero otherwise. Will always
  398. * return zero when no hole has been found.
  399. */
  400. int drm_mm_scan_remove_block(struct drm_mm_node *node)
  401. {
  402. struct drm_mm *mm = node->mm;
  403. struct drm_mm_node *prev_node, *next_node;
  404. mm->scanned_blocks--;
  405. BUG_ON(!node->scanned_block);
  406. node->scanned_block = 0;
  407. node->free = 0;
  408. prev_node = list_entry(node->free_stack.prev, struct drm_mm_node,
  409. free_stack);
  410. next_node = list_entry(node->free_stack.next, struct drm_mm_node,
  411. free_stack);
  412. if (prev_node) {
  413. BUG_ON(!prev_node->scanned_prev_free);
  414. prev_node->scanned_prev_free = 0;
  415. list_add_tail(&prev_node->node_list, &node->node_list);
  416. node->start = prev_node->start + prev_node->size;
  417. node->size -= prev_node->size;
  418. }
  419. if (next_node) {
  420. BUG_ON(!next_node->scanned_next_free);
  421. next_node->scanned_next_free = 0;
  422. list_add(&next_node->node_list, &node->node_list);
  423. node->size -= next_node->size;
  424. }
  425. INIT_LIST_HEAD(&node->free_stack);
  426. /* Only need to check for containement because start&size for the
  427. * complete resulting free block (not just the desired part) is
  428. * stored. */
  429. if (node->start >= mm->scan_hit_start &&
  430. node->start + node->size
  431. <= mm->scan_hit_start + mm->scan_hit_size) {
  432. return 1;
  433. }
  434. return 0;
  435. }
  436. EXPORT_SYMBOL(drm_mm_scan_remove_block);
  437. int drm_mm_clean(struct drm_mm * mm)
  438. {
  439. struct list_head *head = &mm->node_list;
  440. return (head->next->next == head);
  441. }
  442. EXPORT_SYMBOL(drm_mm_clean);
  443. int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
  444. {
  445. INIT_LIST_HEAD(&mm->node_list);
  446. INIT_LIST_HEAD(&mm->free_stack);
  447. INIT_LIST_HEAD(&mm->unused_nodes);
  448. mm->num_unused = 0;
  449. mm->scanned_blocks = 0;
  450. spin_lock_init(&mm->unused_lock);
  451. return drm_mm_create_tail_node(mm, start, size, 0);
  452. }
  453. EXPORT_SYMBOL(drm_mm_init);
  454. void drm_mm_takedown(struct drm_mm * mm)
  455. {
  456. struct list_head *bnode = mm->free_stack.next;
  457. struct drm_mm_node *entry;
  458. struct drm_mm_node *next;
  459. entry = list_entry(bnode, struct drm_mm_node, free_stack);
  460. if (entry->node_list.next != &mm->node_list ||
  461. entry->free_stack.next != &mm->free_stack) {
  462. DRM_ERROR("Memory manager not clean. Delaying takedown\n");
  463. return;
  464. }
  465. list_del(&entry->free_stack);
  466. list_del(&entry->node_list);
  467. kfree(entry);
  468. spin_lock(&mm->unused_lock);
  469. list_for_each_entry_safe(entry, next, &mm->unused_nodes, free_stack) {
  470. list_del(&entry->free_stack);
  471. kfree(entry);
  472. --mm->num_unused;
  473. }
  474. spin_unlock(&mm->unused_lock);
  475. BUG_ON(mm->num_unused != 0);
  476. }
  477. EXPORT_SYMBOL(drm_mm_takedown);
  478. void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
  479. {
  480. struct drm_mm_node *entry;
  481. int total_used = 0, total_free = 0, total = 0;
  482. list_for_each_entry(entry, &mm->node_list, node_list) {
  483. printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8ld: %s\n",
  484. prefix, entry->start, entry->start + entry->size,
  485. entry->size, entry->free ? "free" : "used");
  486. total += entry->size;
  487. if (entry->free)
  488. total_free += entry->size;
  489. else
  490. total_used += entry->size;
  491. }
  492. printk(KERN_DEBUG "%s total: %d, used %d free %d\n", prefix, total,
  493. total_used, total_free);
  494. }
  495. EXPORT_SYMBOL(drm_mm_debug_table);
  496. #if defined(CONFIG_DEBUG_FS)
  497. int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
  498. {
  499. struct drm_mm_node *entry;
  500. int total_used = 0, total_free = 0, total = 0;
  501. list_for_each_entry(entry, &mm->node_list, node_list) {
  502. seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: %s\n", entry->start, entry->start + entry->size, entry->size, entry->free ? "free" : "used");
  503. total += entry->size;
  504. if (entry->free)
  505. total_free += entry->size;
  506. else
  507. total_used += entry->size;
  508. }
  509. seq_printf(m, "total: %d, used %d free %d\n", total, total_used, total_free);
  510. return 0;
  511. }
  512. EXPORT_SYMBOL(drm_mm_dump_table);
  513. #endif