qxl_release.c 9.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358
  1. /*
  2. * Copyright 2011 Red Hat, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * on the rights to use, copy, modify, merge, publish, distribute, sub
  8. * license, and/or sell copies of the Software, and to permit persons to whom
  9. * the Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
  19. * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  20. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  21. */
  22. #include "qxl_drv.h"
  23. #include "qxl_object.h"
  24. /*
  25. * drawable cmd cache - allocate a bunch of VRAM pages, suballocate
  26. * into 256 byte chunks for now - gives 16 cmds per page.
  27. *
  28. * use an ida to index into the chunks?
  29. */
  30. /* manage releaseables */
  31. /* stack them 16 high for now -drawable object is 191 */
  32. #define RELEASE_SIZE 256
  33. #define RELEASES_PER_BO (4096 / RELEASE_SIZE)
  34. /* put an alloc/dealloc surface cmd into one bo and round up to 128 */
  35. #define SURFACE_RELEASE_SIZE 128
  36. #define SURFACE_RELEASES_PER_BO (4096 / SURFACE_RELEASE_SIZE)
  37. static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE };
  38. static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO };
  39. static uint64_t
  40. qxl_release_alloc(struct qxl_device *qdev, int type,
  41. struct qxl_release **ret)
  42. {
  43. struct qxl_release *release;
  44. int handle;
  45. size_t size = sizeof(*release);
  46. int idr_ret;
  47. release = kmalloc(size, GFP_KERNEL);
  48. if (!release) {
  49. DRM_ERROR("Out of memory\n");
  50. return 0;
  51. }
  52. release->type = type;
  53. release->release_offset = 0;
  54. release->surface_release_id = 0;
  55. INIT_LIST_HEAD(&release->bos);
  56. idr_preload(GFP_KERNEL);
  57. spin_lock(&qdev->release_idr_lock);
  58. idr_ret = idr_alloc(&qdev->release_idr, release, 1, 0, GFP_NOWAIT);
  59. spin_unlock(&qdev->release_idr_lock);
  60. idr_preload_end();
  61. handle = idr_ret;
  62. if (idr_ret < 0)
  63. goto release_fail;
  64. *ret = release;
  65. QXL_INFO(qdev, "allocated release %lld\n", handle);
  66. release->id = handle;
  67. release_fail:
  68. return handle;
  69. }
  70. void
  71. qxl_release_free(struct qxl_device *qdev,
  72. struct qxl_release *release)
  73. {
  74. struct qxl_bo_list *entry, *tmp;
  75. QXL_INFO(qdev, "release %d, type %d\n", release->id,
  76. release->type);
  77. if (release->surface_release_id)
  78. qxl_surface_id_dealloc(qdev, release->surface_release_id);
  79. list_for_each_entry_safe(entry, tmp, &release->bos, tv.head) {
  80. struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
  81. QXL_INFO(qdev, "release %llx\n",
  82. drm_vma_node_offset_addr(&entry->tv.bo->vma_node)
  83. - DRM_FILE_OFFSET);
  84. qxl_fence_remove_release(&bo->fence, release->id);
  85. qxl_bo_unref(&bo);
  86. }
  87. spin_lock(&qdev->release_idr_lock);
  88. idr_remove(&qdev->release_idr, release->id);
  89. spin_unlock(&qdev->release_idr_lock);
  90. kfree(release);
  91. }
  92. static int qxl_release_bo_alloc(struct qxl_device *qdev,
  93. struct qxl_bo **bo)
  94. {
  95. int ret;
  96. /* pin releases bo's they are too messy to evict */
  97. ret = qxl_bo_create(qdev, PAGE_SIZE, false, true,
  98. QXL_GEM_DOMAIN_VRAM, NULL,
  99. bo);
  100. return ret;
  101. }
  102. int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
  103. {
  104. struct qxl_bo_list *entry;
  105. list_for_each_entry(entry, &release->bos, tv.head) {
  106. if (entry->tv.bo == &bo->tbo)
  107. return 0;
  108. }
  109. entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL);
  110. if (!entry)
  111. return -ENOMEM;
  112. qxl_bo_ref(bo);
  113. entry->tv.bo = &bo->tbo;
  114. list_add_tail(&entry->tv.head, &release->bos);
  115. return 0;
  116. }
  117. static int qxl_release_validate_bo(struct qxl_bo *bo)
  118. {
  119. int ret;
  120. if (!bo->pin_count) {
  121. qxl_ttm_placement_from_domain(bo, bo->type, false);
  122. ret = ttm_bo_validate(&bo->tbo, &bo->placement,
  123. true, false);
  124. if (ret)
  125. return ret;
  126. }
  127. /* allocate a surface for reserved + validated buffers */
  128. ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo);
  129. if (ret)
  130. return ret;
  131. return 0;
  132. }
  133. int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
  134. {
  135. int ret;
  136. struct qxl_bo_list *entry;
  137. /* if only one object on the release its the release itself
  138. since these objects are pinned no need to reserve */
  139. if (list_is_singular(&release->bos))
  140. return 0;
  141. ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos);
  142. if (ret)
  143. return ret;
  144. list_for_each_entry(entry, &release->bos, tv.head) {
  145. struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
  146. ret = qxl_release_validate_bo(bo);
  147. if (ret) {
  148. ttm_eu_backoff_reservation(&release->ticket, &release->bos);
  149. return ret;
  150. }
  151. }
  152. return 0;
  153. }
  154. void qxl_release_backoff_reserve_list(struct qxl_release *release)
  155. {
  156. /* if only one object on the release its the release itself
  157. since these objects are pinned no need to reserve */
  158. if (list_is_singular(&release->bos))
  159. return;
  160. ttm_eu_backoff_reservation(&release->ticket, &release->bos);
  161. }
  162. int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
  163. enum qxl_surface_cmd_type surface_cmd_type,
  164. struct qxl_release *create_rel,
  165. struct qxl_release **release)
  166. {
  167. if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) {
  168. int idr_ret;
  169. struct qxl_bo_list *entry = list_first_entry(&create_rel->bos, struct qxl_bo_list, tv.head);
  170. struct qxl_bo *bo;
  171. union qxl_release_info *info;
  172. /* stash the release after the create command */
  173. idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
  174. bo = qxl_bo_ref(to_qxl_bo(entry->tv.bo));
  175. (*release)->release_offset = create_rel->release_offset + 64;
  176. qxl_release_list_add(*release, bo);
  177. info = qxl_release_map(qdev, *release);
  178. info->id = idr_ret;
  179. qxl_release_unmap(qdev, *release, info);
  180. qxl_bo_unref(&bo);
  181. return 0;
  182. }
  183. return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd),
  184. QXL_RELEASE_SURFACE_CMD, release, NULL);
  185. }
  186. int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
  187. int type, struct qxl_release **release,
  188. struct qxl_bo **rbo)
  189. {
  190. struct qxl_bo *bo;
  191. int idr_ret;
  192. int ret = 0;
  193. union qxl_release_info *info;
  194. int cur_idx;
  195. if (type == QXL_RELEASE_DRAWABLE)
  196. cur_idx = 0;
  197. else if (type == QXL_RELEASE_SURFACE_CMD)
  198. cur_idx = 1;
  199. else if (type == QXL_RELEASE_CURSOR_CMD)
  200. cur_idx = 2;
  201. else {
  202. DRM_ERROR("got illegal type: %d\n", type);
  203. return -EINVAL;
  204. }
  205. idr_ret = qxl_release_alloc(qdev, type, release);
  206. mutex_lock(&qdev->release_mutex);
  207. if (qdev->current_release_bo_offset[cur_idx] + 1 >= releases_per_bo[cur_idx]) {
  208. qxl_bo_unref(&qdev->current_release_bo[cur_idx]);
  209. qdev->current_release_bo_offset[cur_idx] = 0;
  210. qdev->current_release_bo[cur_idx] = NULL;
  211. }
  212. if (!qdev->current_release_bo[cur_idx]) {
  213. ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx]);
  214. if (ret) {
  215. mutex_unlock(&qdev->release_mutex);
  216. return ret;
  217. }
  218. }
  219. bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]);
  220. (*release)->release_offset = qdev->current_release_bo_offset[cur_idx] * release_size_per_bo[cur_idx];
  221. qdev->current_release_bo_offset[cur_idx]++;
  222. if (rbo)
  223. *rbo = bo;
  224. mutex_unlock(&qdev->release_mutex);
  225. qxl_release_list_add(*release, bo);
  226. info = qxl_release_map(qdev, *release);
  227. info->id = idr_ret;
  228. qxl_release_unmap(qdev, *release, info);
  229. qxl_bo_unref(&bo);
  230. return ret;
  231. }
  232. struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
  233. uint64_t id)
  234. {
  235. struct qxl_release *release;
  236. spin_lock(&qdev->release_idr_lock);
  237. release = idr_find(&qdev->release_idr, id);
  238. spin_unlock(&qdev->release_idr_lock);
  239. if (!release) {
  240. DRM_ERROR("failed to find id in release_idr\n");
  241. return NULL;
  242. }
  243. return release;
  244. }
  245. union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
  246. struct qxl_release *release)
  247. {
  248. void *ptr;
  249. union qxl_release_info *info;
  250. struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
  251. struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
  252. ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_SIZE);
  253. if (!ptr)
  254. return NULL;
  255. info = ptr + (release->release_offset & ~PAGE_SIZE);
  256. return info;
  257. }
  258. void qxl_release_unmap(struct qxl_device *qdev,
  259. struct qxl_release *release,
  260. union qxl_release_info *info)
  261. {
  262. struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
  263. struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
  264. void *ptr;
  265. ptr = ((void *)info) - (release->release_offset & ~PAGE_SIZE);
  266. qxl_bo_kunmap_atomic_page(qdev, bo, ptr);
  267. }
  268. void qxl_release_fence_buffer_objects(struct qxl_release *release)
  269. {
  270. struct ttm_validate_buffer *entry;
  271. struct ttm_buffer_object *bo;
  272. struct ttm_bo_global *glob;
  273. struct ttm_bo_device *bdev;
  274. struct ttm_bo_driver *driver;
  275. struct qxl_bo *qbo;
  276. /* if only one object on the release its the release itself
  277. since these objects are pinned no need to reserve */
  278. if (list_is_singular(&release->bos))
  279. return;
  280. bo = list_first_entry(&release->bos, struct ttm_validate_buffer, head)->bo;
  281. bdev = bo->bdev;
  282. driver = bdev->driver;
  283. glob = bo->glob;
  284. spin_lock(&glob->lru_lock);
  285. spin_lock(&bdev->fence_lock);
  286. list_for_each_entry(entry, &release->bos, head) {
  287. bo = entry->bo;
  288. qbo = to_qxl_bo(bo);
  289. if (!entry->bo->sync_obj)
  290. entry->bo->sync_obj = &qbo->fence;
  291. qxl_fence_add_release_locked(&qbo->fence, release->id);
  292. ttm_bo_add_to_lru(bo);
  293. ww_mutex_unlock(&bo->resv->lock);
  294. entry->reserved = false;
  295. }
  296. spin_unlock(&bdev->fence_lock);
  297. spin_unlock(&glob->lru_lock);
  298. ww_acquire_fini(&release->ticket);
  299. }