radeon_mem.c 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322
  1. /* radeon_mem.c -- Simple GART/fb memory manager for radeon -*- linux-c -*-
  2. *
  3. * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
  4. *
  5. * The Weather Channel (TM) funded Tungsten Graphics to develop the
  6. * initial release of the Radeon 8500 driver under the XFree86 license.
  7. * This notice must be preserved.
  8. *
  9. * Permission is hereby granted, free of charge, to any person obtaining a
  10. * copy of this software and associated documentation files (the "Software"),
  11. * to deal in the Software without restriction, including without limitation
  12. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  13. * and/or sell copies of the Software, and to permit persons to whom the
  14. * Software is furnished to do so, subject to the following conditions:
  15. *
  16. * The above copyright notice and this permission notice (including the next
  17. * paragraph) shall be included in all copies or substantial portions of the
  18. * Software.
  19. *
  20. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  21. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  22. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  23. * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  24. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  25. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  26. * DEALINGS IN THE SOFTWARE.
  27. *
  28. * Authors:
  29. * Keith Whitwell <keith@tungstengraphics.com>
  30. */
  31. #include "drmP.h"
  32. #include "drm.h"
  33. #include "radeon_drm.h"
  34. #include "radeon_drv.h"
  35. /* Very simple allocator for GART memory, working on a static range
  36. * already mapped into each client's address space.
  37. */
  38. static struct mem_block *split_block(struct mem_block *p, int start, int size,
  39. DRMFILE filp )
  40. {
  41. /* Maybe cut off the start of an existing block */
  42. if (start > p->start) {
  43. struct mem_block *newblock = drm_alloc(sizeof(*newblock), DRM_MEM_BUFS );
  44. if (!newblock)
  45. goto out;
  46. newblock->start = start;
  47. newblock->size = p->size - (start - p->start);
  48. newblock->filp = NULL;
  49. newblock->next = p->next;
  50. newblock->prev = p;
  51. p->next->prev = newblock;
  52. p->next = newblock;
  53. p->size -= newblock->size;
  54. p = newblock;
  55. }
  56. /* Maybe cut off the end of an existing block */
  57. if (size < p->size) {
  58. struct mem_block *newblock = drm_alloc(sizeof(*newblock), DRM_MEM_BUFS );
  59. if (!newblock)
  60. goto out;
  61. newblock->start = start + size;
  62. newblock->size = p->size - size;
  63. newblock->filp = NULL;
  64. newblock->next = p->next;
  65. newblock->prev = p;
  66. p->next->prev = newblock;
  67. p->next = newblock;
  68. p->size = size;
  69. }
  70. out:
  71. /* Our block is in the middle */
  72. p->filp = filp;
  73. return p;
  74. }
  75. static struct mem_block *alloc_block( struct mem_block *heap, int size,
  76. int align2, DRMFILE filp )
  77. {
  78. struct mem_block *p;
  79. int mask = (1 << align2)-1;
  80. list_for_each(p, heap) {
  81. int start = (p->start + mask) & ~mask;
  82. if (p->filp == 0 && start + size <= p->start + p->size)
  83. return split_block( p, start, size, filp );
  84. }
  85. return NULL;
  86. }
  87. static struct mem_block *find_block( struct mem_block *heap, int start )
  88. {
  89. struct mem_block *p;
  90. list_for_each(p, heap)
  91. if (p->start == start)
  92. return p;
  93. return NULL;
  94. }
  95. static void free_block( struct mem_block *p )
  96. {
  97. p->filp = NULL;
  98. /* Assumes a single contiguous range. Needs a special filp in
  99. * 'heap' to stop it being subsumed.
  100. */
  101. if (p->next->filp == 0) {
  102. struct mem_block *q = p->next;
  103. p->size += q->size;
  104. p->next = q->next;
  105. p->next->prev = p;
  106. drm_free(q, sizeof(*q), DRM_MEM_BUFS );
  107. }
  108. if (p->prev->filp == 0) {
  109. struct mem_block *q = p->prev;
  110. q->size += p->size;
  111. q->next = p->next;
  112. q->next->prev = q;
  113. drm_free(p, sizeof(*q), DRM_MEM_BUFS );
  114. }
  115. }
  116. /* Initialize. How to check for an uninitialized heap?
  117. */
  118. static int init_heap(struct mem_block **heap, int start, int size)
  119. {
  120. struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFS );
  121. if (!blocks)
  122. return DRM_ERR(ENOMEM);
  123. *heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFS );
  124. if (!*heap) {
  125. drm_free( blocks, sizeof(*blocks), DRM_MEM_BUFS );
  126. return DRM_ERR(ENOMEM);
  127. }
  128. blocks->start = start;
  129. blocks->size = size;
  130. blocks->filp = NULL;
  131. blocks->next = blocks->prev = *heap;
  132. memset( *heap, 0, sizeof(**heap) );
  133. (*heap)->filp = (DRMFILE) -1;
  134. (*heap)->next = (*heap)->prev = blocks;
  135. return 0;
  136. }
  137. /* Free all blocks associated with the releasing file.
  138. */
  139. void radeon_mem_release( DRMFILE filp, struct mem_block *heap )
  140. {
  141. struct mem_block *p;
  142. if (!heap || !heap->next)
  143. return;
  144. list_for_each(p, heap) {
  145. if (p->filp == filp)
  146. p->filp = NULL;
  147. }
  148. /* Assumes a single contiguous range. Needs a special filp in
  149. * 'heap' to stop it being subsumed.
  150. */
  151. list_for_each(p, heap) {
  152. while (p->filp == 0 && p->next->filp == 0) {
  153. struct mem_block *q = p->next;
  154. p->size += q->size;
  155. p->next = q->next;
  156. p->next->prev = p;
  157. drm_free(q, sizeof(*q),DRM_MEM_DRIVER);
  158. }
  159. }
  160. }
  161. /* Shutdown.
  162. */
  163. void radeon_mem_takedown( struct mem_block **heap )
  164. {
  165. struct mem_block *p;
  166. if (!*heap)
  167. return;
  168. for (p = (*heap)->next ; p != *heap ; ) {
  169. struct mem_block *q = p;
  170. p = p->next;
  171. drm_free(q, sizeof(*q),DRM_MEM_DRIVER);
  172. }
  173. drm_free( *heap, sizeof(**heap),DRM_MEM_DRIVER );
  174. *heap = NULL;
  175. }
  176. /* IOCTL HANDLERS */
  177. static struct mem_block **get_heap( drm_radeon_private_t *dev_priv,
  178. int region )
  179. {
  180. switch( region ) {
  181. case RADEON_MEM_REGION_GART:
  182. return &dev_priv->gart_heap;
  183. case RADEON_MEM_REGION_FB:
  184. return &dev_priv->fb_heap;
  185. default:
  186. return NULL;
  187. }
  188. }
  189. int radeon_mem_alloc( DRM_IOCTL_ARGS )
  190. {
  191. DRM_DEVICE;
  192. drm_radeon_private_t *dev_priv = dev->dev_private;
  193. drm_radeon_mem_alloc_t alloc;
  194. struct mem_block *block, **heap;
  195. if ( !dev_priv ) {
  196. DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
  197. return DRM_ERR(EINVAL);
  198. }
  199. DRM_COPY_FROM_USER_IOCTL( alloc, (drm_radeon_mem_alloc_t __user *)data,
  200. sizeof(alloc) );
  201. heap = get_heap( dev_priv, alloc.region );
  202. if (!heap || !*heap)
  203. return DRM_ERR(EFAULT);
  204. /* Make things easier on ourselves: all allocations at least
  205. * 4k aligned.
  206. */
  207. if (alloc.alignment < 12)
  208. alloc.alignment = 12;
  209. block = alloc_block( *heap, alloc.size, alloc.alignment,
  210. filp );
  211. if (!block)
  212. return DRM_ERR(ENOMEM);
  213. if ( DRM_COPY_TO_USER( alloc.region_offset, &block->start,
  214. sizeof(int) ) ) {
  215. DRM_ERROR( "copy_to_user\n" );
  216. return DRM_ERR(EFAULT);
  217. }
  218. return 0;
  219. }
  220. int radeon_mem_free( DRM_IOCTL_ARGS )
  221. {
  222. DRM_DEVICE;
  223. drm_radeon_private_t *dev_priv = dev->dev_private;
  224. drm_radeon_mem_free_t memfree;
  225. struct mem_block *block, **heap;
  226. if ( !dev_priv ) {
  227. DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
  228. return DRM_ERR(EINVAL);
  229. }
  230. DRM_COPY_FROM_USER_IOCTL( memfree, (drm_radeon_mem_free_t __user *)data,
  231. sizeof(memfree) );
  232. heap = get_heap( dev_priv, memfree.region );
  233. if (!heap || !*heap)
  234. return DRM_ERR(EFAULT);
  235. block = find_block( *heap, memfree.region_offset );
  236. if (!block)
  237. return DRM_ERR(EFAULT);
  238. if (block->filp != filp)
  239. return DRM_ERR(EPERM);
  240. free_block( block );
  241. return 0;
  242. }
  243. int radeon_mem_init_heap( DRM_IOCTL_ARGS )
  244. {
  245. DRM_DEVICE;
  246. drm_radeon_private_t *dev_priv = dev->dev_private;
  247. drm_radeon_mem_init_heap_t initheap;
  248. struct mem_block **heap;
  249. if ( !dev_priv ) {
  250. DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
  251. return DRM_ERR(EINVAL);
  252. }
  253. DRM_COPY_FROM_USER_IOCTL( initheap, (drm_radeon_mem_init_heap_t __user *)data,
  254. sizeof(initheap) );
  255. heap = get_heap( dev_priv, initheap.region );
  256. if (!heap)
  257. return DRM_ERR(EFAULT);
  258. if (*heap) {
  259. DRM_ERROR("heap already initialized?");
  260. return DRM_ERR(EFAULT);
  261. }
  262. return init_heap( heap, initheap.start, initheap.size );
  263. }