drm_mm.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637
  1. /**************************************************************************
  2. *
  3. * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. *
  27. **************************************************************************/
  28. /*
  29. * Generic simple memory manager implementation. Intended to be used as a base
  30. * class implementation for more advanced memory managers.
  31. *
  32. * Note that the algorithm used is quite simple and there might be substantial
  33. * performance gains if a smarter free list is implemented. Currently it is just an
  34. * unordered stack of free regions. This could easily be improved if an RB-tree
  35. * is used instead. At least if we expect heavy fragmentation.
  36. *
  37. * Aligned allocations can also see improvement.
  38. *
  39. * Authors:
  40. * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
  41. */
  42. #include "drmP.h"
  43. #include "drm_mm.h"
  44. #include <linux/slab.h>
  45. #include <linux/seq_file.h>
  46. #define MM_UNUSED_TARGET 4
  47. static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
  48. {
  49. struct drm_mm_node *child;
  50. if (atomic)
  51. child = kzalloc(sizeof(*child), GFP_ATOMIC);
  52. else
  53. child = kzalloc(sizeof(*child), GFP_KERNEL);
  54. if (unlikely(child == NULL)) {
  55. spin_lock(&mm->unused_lock);
  56. if (list_empty(&mm->unused_nodes))
  57. child = NULL;
  58. else {
  59. child =
  60. list_entry(mm->unused_nodes.next,
  61. struct drm_mm_node, node_list);
  62. list_del(&child->node_list);
  63. --mm->num_unused;
  64. }
  65. spin_unlock(&mm->unused_lock);
  66. }
  67. return child;
  68. }
  69. /* drm_mm_pre_get() - pre allocate drm_mm_node structure
  70. * drm_mm: memory manager struct we are pre-allocating for
  71. *
  72. * Returns 0 on success or -ENOMEM if allocation fails.
  73. */
  74. int drm_mm_pre_get(struct drm_mm *mm)
  75. {
  76. struct drm_mm_node *node;
  77. spin_lock(&mm->unused_lock);
  78. while (mm->num_unused < MM_UNUSED_TARGET) {
  79. spin_unlock(&mm->unused_lock);
  80. node = kzalloc(sizeof(*node), GFP_KERNEL);
  81. spin_lock(&mm->unused_lock);
  82. if (unlikely(node == NULL)) {
  83. int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
  84. spin_unlock(&mm->unused_lock);
  85. return ret;
  86. }
  87. ++mm->num_unused;
  88. list_add_tail(&node->node_list, &mm->unused_nodes);
  89. }
  90. spin_unlock(&mm->unused_lock);
  91. return 0;
  92. }
  93. EXPORT_SYMBOL(drm_mm_pre_get);
  94. static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
  95. {
  96. return hole_node->start + hole_node->size;
  97. }
  98. static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
  99. {
  100. struct drm_mm_node *next_node =
  101. list_entry(hole_node->node_list.next, struct drm_mm_node,
  102. node_list);
  103. return next_node->start;
  104. }
  105. static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
  106. struct drm_mm_node *node,
  107. unsigned long size, unsigned alignment)
  108. {
  109. struct drm_mm *mm = hole_node->mm;
  110. unsigned long tmp = 0, wasted = 0;
  111. unsigned long hole_start = drm_mm_hole_node_start(hole_node);
  112. unsigned long hole_end = drm_mm_hole_node_end(hole_node);
  113. if (alignment)
  114. tmp = hole_start % alignment;
  115. if (!tmp) {
  116. hole_node->hole_follows = 0;
  117. list_del_init(&hole_node->hole_stack);
  118. } else
  119. wasted = alignment - tmp;
  120. node->start = hole_start + wasted;
  121. node->size = size;
  122. node->mm = mm;
  123. INIT_LIST_HEAD(&node->hole_stack);
  124. list_add(&node->node_list, &hole_node->node_list);
  125. BUG_ON(node->start + node->size > hole_end);
  126. if (node->start + node->size < hole_end) {
  127. list_add(&node->hole_stack, &mm->hole_stack);
  128. node->hole_follows = 1;
  129. } else {
  130. node->hole_follows = 0;
  131. }
  132. }
  133. struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
  134. unsigned long size,
  135. unsigned alignment,
  136. int atomic)
  137. {
  138. struct drm_mm_node *node;
  139. BUG_ON(!hole_node->hole_follows);
  140. node = drm_mm_kmalloc(hole_node->mm, atomic);
  141. if (unlikely(node == NULL))
  142. return NULL;
  143. drm_mm_insert_helper(hole_node, node, size, alignment);
  144. return node;
  145. }
  146. EXPORT_SYMBOL(drm_mm_get_block_generic);
  147. static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
  148. struct drm_mm_node *node,
  149. unsigned long size, unsigned alignment,
  150. unsigned long start, unsigned long end)
  151. {
  152. struct drm_mm *mm = hole_node->mm;
  153. unsigned long tmp = 0, wasted = 0;
  154. unsigned long hole_start = drm_mm_hole_node_start(hole_node);
  155. unsigned long hole_end = drm_mm_hole_node_end(hole_node);
  156. if (hole_start < start)
  157. wasted += start - hole_start;
  158. if (alignment)
  159. tmp = (hole_start + wasted) % alignment;
  160. if (tmp)
  161. wasted += alignment - tmp;
  162. if (!wasted) {
  163. hole_node->hole_follows = 0;
  164. list_del_init(&hole_node->hole_stack);
  165. }
  166. node->start = hole_start + wasted;
  167. node->size = size;
  168. node->mm = mm;
  169. INIT_LIST_HEAD(&node->hole_stack);
  170. list_add(&node->node_list, &hole_node->node_list);
  171. BUG_ON(node->start + node->size > hole_end);
  172. BUG_ON(node->start + node->size > end);
  173. if (node->start + node->size < hole_end) {
  174. list_add(&node->hole_stack, &mm->hole_stack);
  175. node->hole_follows = 1;
  176. } else {
  177. node->hole_follows = 0;
  178. }
  179. }
  180. struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
  181. unsigned long size,
  182. unsigned alignment,
  183. unsigned long start,
  184. unsigned long end,
  185. int atomic)
  186. {
  187. struct drm_mm_node *node;
  188. BUG_ON(!hole_node->hole_follows);
  189. node = drm_mm_kmalloc(hole_node->mm, atomic);
  190. if (unlikely(node == NULL))
  191. return NULL;
  192. drm_mm_insert_helper_range(hole_node, node, size, alignment,
  193. start, end);
  194. return node;
  195. }
  196. EXPORT_SYMBOL(drm_mm_get_block_range_generic);
  197. /*
  198. * Put a block. Merge with the previous and / or next block if they are free.
  199. * Otherwise add to the free stack.
  200. */
  201. void drm_mm_put_block(struct drm_mm_node *node)
  202. {
  203. struct drm_mm *mm = node->mm;
  204. struct drm_mm_node *prev_node;
  205. BUG_ON(node->scanned_block || node->scanned_prev_free
  206. || node->scanned_next_free);
  207. prev_node =
  208. list_entry(node->node_list.prev, struct drm_mm_node, node_list);
  209. if (node->hole_follows) {
  210. BUG_ON(drm_mm_hole_node_start(node)
  211. == drm_mm_hole_node_end(node));
  212. list_del(&node->hole_stack);
  213. } else
  214. BUG_ON(drm_mm_hole_node_start(node)
  215. != drm_mm_hole_node_end(node));
  216. if (!prev_node->hole_follows) {
  217. prev_node->hole_follows = 1;
  218. list_add(&prev_node->hole_stack, &mm->hole_stack);
  219. } else
  220. list_move(&prev_node->hole_stack, &mm->hole_stack);
  221. list_del(&node->node_list);
  222. spin_lock(&mm->unused_lock);
  223. if (mm->num_unused < MM_UNUSED_TARGET) {
  224. list_add(&node->node_list, &mm->unused_nodes);
  225. ++mm->num_unused;
  226. } else
  227. kfree(node);
  228. spin_unlock(&mm->unused_lock);
  229. }
  230. EXPORT_SYMBOL(drm_mm_put_block);
  231. static int check_free_hole(unsigned long start, unsigned long end,
  232. unsigned long size, unsigned alignment)
  233. {
  234. unsigned wasted = 0;
  235. if (end - start < size)
  236. return 0;
  237. if (alignment) {
  238. unsigned tmp = start % alignment;
  239. if (tmp)
  240. wasted = alignment - tmp;
  241. }
  242. if (end >= start + size + wasted) {
  243. return 1;
  244. }
  245. return 0;
  246. }
  247. struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
  248. unsigned long size,
  249. unsigned alignment, int best_match)
  250. {
  251. struct drm_mm_node *entry;
  252. struct drm_mm_node *best;
  253. unsigned long best_size;
  254. BUG_ON(mm->scanned_blocks);
  255. best = NULL;
  256. best_size = ~0UL;
  257. list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
  258. BUG_ON(!entry->hole_follows);
  259. if (!check_free_hole(drm_mm_hole_node_start(entry),
  260. drm_mm_hole_node_end(entry),
  261. size, alignment))
  262. continue;
  263. if (!best_match)
  264. return entry;
  265. if (entry->size < best_size) {
  266. best = entry;
  267. best_size = entry->size;
  268. }
  269. }
  270. return best;
  271. }
  272. EXPORT_SYMBOL(drm_mm_search_free);
  273. struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
  274. unsigned long size,
  275. unsigned alignment,
  276. unsigned long start,
  277. unsigned long end,
  278. int best_match)
  279. {
  280. struct drm_mm_node *entry;
  281. struct drm_mm_node *best;
  282. unsigned long best_size;
  283. BUG_ON(mm->scanned_blocks);
  284. best = NULL;
  285. best_size = ~0UL;
  286. list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
  287. unsigned long adj_start = drm_mm_hole_node_start(entry) < start ?
  288. start : drm_mm_hole_node_start(entry);
  289. unsigned long adj_end = drm_mm_hole_node_end(entry) > end ?
  290. end : drm_mm_hole_node_end(entry);
  291. BUG_ON(!entry->hole_follows);
  292. if (!check_free_hole(adj_start, adj_end, size, alignment))
  293. continue;
  294. if (!best_match)
  295. return entry;
  296. if (entry->size < best_size) {
  297. best = entry;
  298. best_size = entry->size;
  299. }
  300. }
  301. return best;
  302. }
  303. EXPORT_SYMBOL(drm_mm_search_free_in_range);
  304. /**
  305. * Initializa lru scanning.
  306. *
  307. * This simply sets up the scanning routines with the parameters for the desired
  308. * hole.
  309. *
  310. * Warning: As long as the scan list is non-empty, no other operations than
  311. * adding/removing nodes to/from the scan list are allowed.
  312. */
  313. void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
  314. unsigned alignment)
  315. {
  316. mm->scan_alignment = alignment;
  317. mm->scan_size = size;
  318. mm->scanned_blocks = 0;
  319. mm->scan_hit_start = 0;
  320. mm->scan_hit_size = 0;
  321. mm->scan_check_range = 0;
  322. }
  323. EXPORT_SYMBOL(drm_mm_init_scan);
  324. /**
  325. * Initializa lru scanning.
  326. *
  327. * This simply sets up the scanning routines with the parameters for the desired
  328. * hole. This version is for range-restricted scans.
  329. *
  330. * Warning: As long as the scan list is non-empty, no other operations than
  331. * adding/removing nodes to/from the scan list are allowed.
  332. */
  333. void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size,
  334. unsigned alignment,
  335. unsigned long start,
  336. unsigned long end)
  337. {
  338. mm->scan_alignment = alignment;
  339. mm->scan_size = size;
  340. mm->scanned_blocks = 0;
  341. mm->scan_hit_start = 0;
  342. mm->scan_hit_size = 0;
  343. mm->scan_start = start;
  344. mm->scan_end = end;
  345. mm->scan_check_range = 1;
  346. }
  347. EXPORT_SYMBOL(drm_mm_init_scan_with_range);
  348. /**
  349. * Add a node to the scan list that might be freed to make space for the desired
  350. * hole.
  351. *
  352. * Returns non-zero, if a hole has been found, zero otherwise.
  353. */
  354. int drm_mm_scan_add_block(struct drm_mm_node *node)
  355. {
  356. struct drm_mm *mm = node->mm;
  357. struct drm_mm_node *prev_node;
  358. unsigned long hole_start, hole_end;
  359. unsigned long adj_start;
  360. unsigned long adj_end;
  361. mm->scanned_blocks++;
  362. BUG_ON(node->scanned_block);
  363. node->scanned_block = 1;
  364. prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
  365. node_list);
  366. node->scanned_preceeds_hole = prev_node->hole_follows;
  367. prev_node->hole_follows = 1;
  368. list_del(&node->node_list);
  369. node->node_list.prev = &prev_node->node_list;
  370. hole_start = drm_mm_hole_node_start(prev_node);
  371. hole_end = drm_mm_hole_node_end(prev_node);
  372. if (mm->scan_check_range) {
  373. adj_start = hole_start < mm->scan_start ?
  374. mm->scan_start : hole_start;
  375. adj_end = hole_end > mm->scan_end ?
  376. mm->scan_end : hole_end;
  377. } else {
  378. adj_start = hole_start;
  379. adj_end = hole_end;
  380. }
  381. if (check_free_hole(adj_start , adj_end,
  382. mm->scan_size, mm->scan_alignment)) {
  383. mm->scan_hit_start = hole_start;
  384. mm->scan_hit_size = hole_end;
  385. return 1;
  386. }
  387. return 0;
  388. }
  389. EXPORT_SYMBOL(drm_mm_scan_add_block);
  390. /**
  391. * Remove a node from the scan list.
  392. *
  393. * Nodes _must_ be removed in the exact same order from the scan list as they
  394. * have been added, otherwise the internal state of the memory manager will be
  395. * corrupted.
  396. *
  397. * When the scan list is empty, the selected memory nodes can be freed. An
  398. * immediatly following drm_mm_search_free with best_match = 0 will then return
  399. * the just freed block (because its at the top of the free_stack list).
  400. *
  401. * Returns one if this block should be evicted, zero otherwise. Will always
  402. * return zero when no hole has been found.
  403. */
  404. int drm_mm_scan_remove_block(struct drm_mm_node *node)
  405. {
  406. struct drm_mm *mm = node->mm;
  407. struct drm_mm_node *prev_node;
  408. mm->scanned_blocks--;
  409. BUG_ON(!node->scanned_block);
  410. node->scanned_block = 0;
  411. prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
  412. node_list);
  413. prev_node->hole_follows = node->scanned_preceeds_hole;
  414. INIT_LIST_HEAD(&node->node_list);
  415. list_add(&node->node_list, &prev_node->node_list);
  416. /* Only need to check for containement because start&size for the
  417. * complete resulting free block (not just the desired part) is
  418. * stored. */
  419. if (node->start >= mm->scan_hit_start &&
  420. node->start + node->size
  421. <= mm->scan_hit_start + mm->scan_hit_size) {
  422. return 1;
  423. }
  424. return 0;
  425. }
  426. EXPORT_SYMBOL(drm_mm_scan_remove_block);
  427. int drm_mm_clean(struct drm_mm * mm)
  428. {
  429. struct list_head *head = &mm->head_node.node_list;
  430. return (head->next->next == head);
  431. }
  432. EXPORT_SYMBOL(drm_mm_clean);
  433. int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
  434. {
  435. INIT_LIST_HEAD(&mm->hole_stack);
  436. INIT_LIST_HEAD(&mm->unused_nodes);
  437. mm->num_unused = 0;
  438. mm->scanned_blocks = 0;
  439. spin_lock_init(&mm->unused_lock);
  440. /* Clever trick to avoid a special case in the free hole tracking. */
  441. INIT_LIST_HEAD(&mm->head_node.node_list);
  442. INIT_LIST_HEAD(&mm->head_node.hole_stack);
  443. mm->head_node.hole_follows = 1;
  444. mm->head_node.scanned_block = 0;
  445. mm->head_node.scanned_prev_free = 0;
  446. mm->head_node.scanned_next_free = 0;
  447. mm->head_node.mm = mm;
  448. mm->head_node.start = start + size;
  449. mm->head_node.size = start - mm->head_node.start;
  450. list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
  451. return 0;
  452. }
  453. EXPORT_SYMBOL(drm_mm_init);
  454. void drm_mm_takedown(struct drm_mm * mm)
  455. {
  456. struct drm_mm_node *entry, *next;
  457. if (!list_empty(&mm->head_node.node_list)) {
  458. DRM_ERROR("Memory manager not clean. Delaying takedown\n");
  459. return;
  460. }
  461. spin_lock(&mm->unused_lock);
  462. list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) {
  463. list_del(&entry->node_list);
  464. kfree(entry);
  465. --mm->num_unused;
  466. }
  467. spin_unlock(&mm->unused_lock);
  468. BUG_ON(mm->num_unused != 0);
  469. }
  470. EXPORT_SYMBOL(drm_mm_takedown);
  471. void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
  472. {
  473. struct drm_mm_node *entry;
  474. unsigned long total_used = 0, total_free = 0, total = 0;
  475. unsigned long hole_start, hole_end, hole_size;
  476. hole_start = drm_mm_hole_node_start(&mm->head_node);
  477. hole_end = drm_mm_hole_node_end(&mm->head_node);
  478. hole_size = hole_end - hole_start;
  479. if (hole_size)
  480. printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
  481. prefix, hole_start, hole_end,
  482. hole_size);
  483. total_free += hole_size;
  484. drm_mm_for_each_node(entry, mm) {
  485. printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n",
  486. prefix, entry->start, entry->start + entry->size,
  487. entry->size);
  488. total_used += entry->size;
  489. if (entry->hole_follows) {
  490. hole_start = drm_mm_hole_node_start(entry);
  491. hole_end = drm_mm_hole_node_end(entry);
  492. hole_size = hole_end - hole_start;
  493. printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
  494. prefix, hole_start, hole_end,
  495. hole_size);
  496. total_free += hole_size;
  497. }
  498. }
  499. total = total_free + total_used;
  500. printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total,
  501. total_used, total_free);
  502. }
  503. EXPORT_SYMBOL(drm_mm_debug_table);
  504. #if defined(CONFIG_DEBUG_FS)
  505. int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
  506. {
  507. struct drm_mm_node *entry;
  508. unsigned long total_used = 0, total_free = 0, total = 0;
  509. unsigned long hole_start, hole_end, hole_size;
  510. hole_start = drm_mm_hole_node_start(&mm->head_node);
  511. hole_end = drm_mm_hole_node_end(&mm->head_node);
  512. hole_size = hole_end - hole_start;
  513. if (hole_size)
  514. seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
  515. hole_start, hole_end, hole_size);
  516. total_free += hole_size;
  517. drm_mm_for_each_node(entry, mm) {
  518. seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
  519. entry->start, entry->start + entry->size,
  520. entry->size);
  521. total_used += entry->size;
  522. if (entry->hole_follows) {
  523. hole_start = drm_mm_hole_node_start(&mm->head_node);
  524. hole_end = drm_mm_hole_node_end(&mm->head_node);
  525. hole_size = hole_end - hole_start;
  526. seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
  527. hole_start, hole_end, hole_size);
  528. total_free += hole_size;
  529. }
  530. }
  531. total = total_free + total_used;
  532. seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free);
  533. return 0;
  534. }
  535. EXPORT_SYMBOL(drm_mm_dump_table);
  536. #endif