drm_vma_manager.c 9.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281
  1. /*
  2. * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
  3. * Copyright (c) 2012 David Airlie <airlied@linux.ie>
  4. * Copyright (c) 2013 David Herrmann <dh.herrmann@gmail.com>
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the "Software"),
  8. * to deal in the Software without restriction, including without limitation
  9. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10. * and/or sell copies of the Software, and to permit persons to whom the
  11. * Software is furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22. * OTHER DEALINGS IN THE SOFTWARE.
  23. */
  24. #include <drm/drmP.h>
  25. #include <drm/drm_mm.h>
  26. #include <drm/drm_vma_manager.h>
  27. #include <linux/mm.h>
  28. #include <linux/module.h>
  29. #include <linux/rbtree.h>
  30. #include <linux/slab.h>
  31. #include <linux/spinlock.h>
  32. #include <linux/types.h>
  33. /**
  34. * DOC: vma offset manager
  35. *
  36. * The vma-manager is responsible to map arbitrary driver-dependent memory
  37. * regions into the linear user address-space. It provides offsets to the
  38. * caller which can then be used on the address_space of the drm-device. It
  39. * takes care to not overlap regions, size them appropriately and to not
  40. * confuse mm-core by inconsistent fake vm_pgoff fields.
  41. * Drivers shouldn't use this for object placement in VMEM. This manager should
  42. * only be used to manage mappings into linear user-space VMs.
  43. *
  44. * We use drm_mm as backend to manage object allocations. But it is highly
  45. * optimized for alloc/free calls, not lookups. Hence, we use an rb-tree to
  46. * speed up offset lookups.
  47. *
  48. * You must not use multiple offset managers on a single address_space.
  49. * Otherwise, mm-core will be unable to tear down memory mappings as the VM will
  50. * no longer be linear. Please use VM_NONLINEAR in that case and implement your
  51. * own offset managers.
  52. *
  53. * This offset manager works on page-based addresses. That is, every argument
  54. * and return code (with the exception of drm_vma_node_offset_addr()) is given
  55. * in number of pages, not number of bytes. That means, object sizes and offsets
  56. * must always be page-aligned (as usual).
  57. * If you want to get a valid byte-based user-space address for a given offset,
  58. * please see drm_vma_node_offset_addr().
  59. */
  60. /**
  61. * drm_vma_offset_manager_init - Initialize new offset-manager
  62. * @mgr: Manager object
  63. * @page_offset: Offset of available memory area (page-based)
  64. * @size: Size of available address space range (page-based)
  65. *
  66. * Initialize a new offset-manager. The offset and area size available for the
  67. * manager are given as @page_offset and @size. Both are interpreted as
  68. * page-numbers, not bytes.
  69. *
  70. * Adding/removing nodes from the manager is locked internally and protected
  71. * against concurrent access. However, node allocation and destruction is left
  72. * for the caller. While calling into the vma-manager, a given node must
  73. * always be guaranteed to be referenced.
  74. */
  75. void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr,
  76. unsigned long page_offset, unsigned long size)
  77. {
  78. rwlock_init(&mgr->vm_lock);
  79. mgr->vm_addr_space_rb = RB_ROOT;
  80. drm_mm_init(&mgr->vm_addr_space_mm, page_offset, size);
  81. }
  82. EXPORT_SYMBOL(drm_vma_offset_manager_init);
  83. /**
  84. * drm_vma_offset_manager_destroy() - Destroy offset manager
  85. * @mgr: Manager object
  86. *
  87. * Destroy an object manager which was previously created via
  88. * drm_vma_offset_manager_init(). The caller must remove all allocated nodes
  89. * before destroying the manager. Otherwise, drm_mm will refuse to free the
  90. * requested resources.
  91. *
  92. * The manager must not be accessed after this function is called.
  93. */
  94. void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr)
  95. {
  96. /* take the lock to protect against buggy drivers */
  97. write_lock(&mgr->vm_lock);
  98. drm_mm_takedown(&mgr->vm_addr_space_mm);
  99. write_unlock(&mgr->vm_lock);
  100. }
  101. EXPORT_SYMBOL(drm_vma_offset_manager_destroy);
  102. /**
  103. * drm_vma_offset_lookup() - Find node in offset space
  104. * @mgr: Manager object
  105. * @start: Start address for object (page-based)
  106. * @pages: Size of object (page-based)
  107. *
  108. * Find a node given a start address and object size. This returns the _best_
  109. * match for the given node. That is, @start may point somewhere into a valid
  110. * region and the given node will be returned, as long as the node spans the
  111. * whole requested area (given the size in number of pages as @pages).
  112. *
  113. * RETURNS:
  114. * Returns NULL if no suitable node can be found. Otherwise, the best match
  115. * is returned. It's the caller's responsibility to make sure the node doesn't
  116. * get destroyed before the caller can access it.
  117. */
  118. struct drm_vma_offset_node *drm_vma_offset_lookup(struct drm_vma_offset_manager *mgr,
  119. unsigned long start,
  120. unsigned long pages)
  121. {
  122. struct drm_vma_offset_node *node;
  123. read_lock(&mgr->vm_lock);
  124. node = drm_vma_offset_lookup_locked(mgr, start, pages);
  125. read_unlock(&mgr->vm_lock);
  126. return node;
  127. }
  128. EXPORT_SYMBOL(drm_vma_offset_lookup);
  129. /**
  130. * drm_vma_offset_lookup_locked() - Find node in offset space
  131. * @mgr: Manager object
  132. * @start: Start address for object (page-based)
  133. * @pages: Size of object (page-based)
  134. *
  135. * Same as drm_vma_offset_lookup() but requires the caller to lock offset lookup
  136. * manually. See drm_vma_offset_lock_lookup() for an example.
  137. *
  138. * RETURNS:
  139. * Returns NULL if no suitable node can be found. Otherwise, the best match
  140. * is returned.
  141. */
  142. struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr,
  143. unsigned long start,
  144. unsigned long pages)
  145. {
  146. struct drm_vma_offset_node *node, *best;
  147. struct rb_node *iter;
  148. unsigned long offset;
  149. iter = mgr->vm_addr_space_rb.rb_node;
  150. best = NULL;
  151. while (likely(iter)) {
  152. node = rb_entry(iter, struct drm_vma_offset_node, vm_rb);
  153. offset = node->vm_node.start;
  154. if (start >= offset) {
  155. iter = iter->rb_right;
  156. best = node;
  157. if (start == offset)
  158. break;
  159. } else {
  160. iter = iter->rb_left;
  161. }
  162. }
  163. /* verify that the node spans the requested area */
  164. if (best) {
  165. offset = best->vm_node.start + best->vm_node.size;
  166. if (offset < start + pages)
  167. best = NULL;
  168. }
  169. return best;
  170. }
  171. EXPORT_SYMBOL(drm_vma_offset_lookup_locked);
  172. /* internal helper to link @node into the rb-tree */
  173. static void _drm_vma_offset_add_rb(struct drm_vma_offset_manager *mgr,
  174. struct drm_vma_offset_node *node)
  175. {
  176. struct rb_node **iter = &mgr->vm_addr_space_rb.rb_node;
  177. struct rb_node *parent = NULL;
  178. struct drm_vma_offset_node *iter_node;
  179. while (likely(*iter)) {
  180. parent = *iter;
  181. iter_node = rb_entry(*iter, struct drm_vma_offset_node, vm_rb);
  182. if (node->vm_node.start < iter_node->vm_node.start)
  183. iter = &(*iter)->rb_left;
  184. else if (node->vm_node.start > iter_node->vm_node.start)
  185. iter = &(*iter)->rb_right;
  186. else
  187. BUG();
  188. }
  189. rb_link_node(&node->vm_rb, parent, iter);
  190. rb_insert_color(&node->vm_rb, &mgr->vm_addr_space_rb);
  191. }
  192. /**
  193. * drm_vma_offset_add() - Add offset node to manager
  194. * @mgr: Manager object
  195. * @node: Node to be added
  196. * @pages: Allocation size visible to user-space (in number of pages)
  197. *
  198. * Add a node to the offset-manager. If the node was already added, this does
  199. * nothing and return 0. @pages is the size of the object given in number of
  200. * pages.
  201. * After this call succeeds, you can access the offset of the node until it
  202. * is removed again.
  203. *
  204. * If this call fails, it is safe to retry the operation or call
  205. * drm_vma_offset_remove(), anyway. However, no cleanup is required in that
  206. * case.
  207. *
  208. * @pages is not required to be the same size as the underlying memory object
  209. * that you want to map. It only limits the size that user-space can map into
  210. * their address space.
  211. *
  212. * RETURNS:
  213. * 0 on success, negative error code on failure.
  214. */
  215. int drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
  216. struct drm_vma_offset_node *node, unsigned long pages)
  217. {
  218. int ret;
  219. write_lock(&mgr->vm_lock);
  220. if (drm_mm_node_allocated(&node->vm_node)) {
  221. ret = 0;
  222. goto out_unlock;
  223. }
  224. ret = drm_mm_insert_node(&mgr->vm_addr_space_mm, &node->vm_node,
  225. pages, 0, DRM_MM_SEARCH_DEFAULT);
  226. if (ret)
  227. goto out_unlock;
  228. _drm_vma_offset_add_rb(mgr, node);
  229. out_unlock:
  230. write_unlock(&mgr->vm_lock);
  231. return ret;
  232. }
  233. EXPORT_SYMBOL(drm_vma_offset_add);
  234. /**
  235. * drm_vma_offset_remove() - Remove offset node from manager
  236. * @mgr: Manager object
  237. * @node: Node to be removed
  238. *
  239. * Remove a node from the offset manager. If the node wasn't added before, this
  240. * does nothing. After this call returns, the offset and size will be 0 until a
  241. * new offset is allocated via drm_vma_offset_add() again. Helper functions like
  242. * drm_vma_node_start() and drm_vma_node_offset_addr() will return 0 if no
  243. * offset is allocated.
  244. */
  245. void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
  246. struct drm_vma_offset_node *node)
  247. {
  248. write_lock(&mgr->vm_lock);
  249. if (drm_mm_node_allocated(&node->vm_node)) {
  250. rb_erase(&node->vm_rb, &mgr->vm_addr_space_rb);
  251. drm_mm_remove_node(&node->vm_node);
  252. memset(&node->vm_node, 0, sizeof(node->vm_node));
  253. }
  254. write_unlock(&mgr->vm_lock);
  255. }
  256. EXPORT_SYMBOL(drm_vma_offset_remove);