qxl_release.c 9.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359
  1. /*
  2. * Copyright 2011 Red Hat, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * on the rights to use, copy, modify, merge, publish, distribute, sub
  8. * license, and/or sell copies of the Software, and to permit persons to whom
  9. * the Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
  19. * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  20. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  21. */
  22. #include "qxl_drv.h"
  23. #include "qxl_object.h"
  24. /*
  25. * drawable cmd cache - allocate a bunch of VRAM pages, suballocate
  26. * into 256 byte chunks for now - gives 16 cmds per page.
  27. *
  28. * use an ida to index into the chunks?
  29. */
  30. /* manage releaseables */
  31. /* stack them 16 high for now -drawable object is 191 */
  32. #define RELEASE_SIZE 256
  33. #define RELEASES_PER_BO (4096 / RELEASE_SIZE)
  34. /* put an alloc/dealloc surface cmd into one bo and round up to 128 */
  35. #define SURFACE_RELEASE_SIZE 128
  36. #define SURFACE_RELEASES_PER_BO (4096 / SURFACE_RELEASE_SIZE)
  37. static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE };
  38. static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO };
  39. static uint64_t
  40. qxl_release_alloc(struct qxl_device *qdev, int type,
  41. struct qxl_release **ret)
  42. {
  43. struct qxl_release *release;
  44. int handle;
  45. size_t size = sizeof(*release);
  46. int idr_ret;
  47. release = kmalloc(size, GFP_KERNEL);
  48. if (!release) {
  49. DRM_ERROR("Out of memory\n");
  50. return 0;
  51. }
  52. release->type = type;
  53. release->release_offset = 0;
  54. release->surface_release_id = 0;
  55. INIT_LIST_HEAD(&release->bos);
  56. idr_preload(GFP_KERNEL);
  57. spin_lock(&qdev->release_idr_lock);
  58. idr_ret = idr_alloc(&qdev->release_idr, release, 1, 0, GFP_NOWAIT);
  59. spin_unlock(&qdev->release_idr_lock);
  60. idr_preload_end();
  61. handle = idr_ret;
  62. if (idr_ret < 0)
  63. goto release_fail;
  64. *ret = release;
  65. QXL_INFO(qdev, "allocated release %lld\n", handle);
  66. release->id = handle;
  67. release_fail:
  68. return handle;
  69. }
  70. void
  71. qxl_release_free(struct qxl_device *qdev,
  72. struct qxl_release *release)
  73. {
  74. struct qxl_bo_list *entry, *tmp;
  75. QXL_INFO(qdev, "release %d, type %d\n", release->id,
  76. release->type);
  77. if (release->surface_release_id)
  78. qxl_surface_id_dealloc(qdev, release->surface_release_id);
  79. list_for_each_entry_safe(entry, tmp, &release->bos, tv.head) {
  80. struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
  81. QXL_INFO(qdev, "release %llx\n",
  82. drm_vma_node_offset_addr(&entry->tv.bo->vma_node)
  83. - DRM_FILE_OFFSET);
  84. qxl_fence_remove_release(&bo->fence, release->id);
  85. qxl_bo_unref(&bo);
  86. kfree(entry);
  87. }
  88. spin_lock(&qdev->release_idr_lock);
  89. idr_remove(&qdev->release_idr, release->id);
  90. spin_unlock(&qdev->release_idr_lock);
  91. kfree(release);
  92. }
  93. static int qxl_release_bo_alloc(struct qxl_device *qdev,
  94. struct qxl_bo **bo)
  95. {
  96. int ret;
  97. /* pin releases bo's they are too messy to evict */
  98. ret = qxl_bo_create(qdev, PAGE_SIZE, false, true,
  99. QXL_GEM_DOMAIN_VRAM, NULL,
  100. bo);
  101. return ret;
  102. }
  103. int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
  104. {
  105. struct qxl_bo_list *entry;
  106. list_for_each_entry(entry, &release->bos, tv.head) {
  107. if (entry->tv.bo == &bo->tbo)
  108. return 0;
  109. }
  110. entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL);
  111. if (!entry)
  112. return -ENOMEM;
  113. qxl_bo_ref(bo);
  114. entry->tv.bo = &bo->tbo;
  115. list_add_tail(&entry->tv.head, &release->bos);
  116. return 0;
  117. }
  118. static int qxl_release_validate_bo(struct qxl_bo *bo)
  119. {
  120. int ret;
  121. if (!bo->pin_count) {
  122. qxl_ttm_placement_from_domain(bo, bo->type, false);
  123. ret = ttm_bo_validate(&bo->tbo, &bo->placement,
  124. true, false);
  125. if (ret)
  126. return ret;
  127. }
  128. /* allocate a surface for reserved + validated buffers */
  129. ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo);
  130. if (ret)
  131. return ret;
  132. return 0;
  133. }
  134. int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
  135. {
  136. int ret;
  137. struct qxl_bo_list *entry;
  138. /* if only one object on the release its the release itself
  139. since these objects are pinned no need to reserve */
  140. if (list_is_singular(&release->bos))
  141. return 0;
  142. ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos);
  143. if (ret)
  144. return ret;
  145. list_for_each_entry(entry, &release->bos, tv.head) {
  146. struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
  147. ret = qxl_release_validate_bo(bo);
  148. if (ret) {
  149. ttm_eu_backoff_reservation(&release->ticket, &release->bos);
  150. return ret;
  151. }
  152. }
  153. return 0;
  154. }
  155. void qxl_release_backoff_reserve_list(struct qxl_release *release)
  156. {
  157. /* if only one object on the release its the release itself
  158. since these objects are pinned no need to reserve */
  159. if (list_is_singular(&release->bos))
  160. return;
  161. ttm_eu_backoff_reservation(&release->ticket, &release->bos);
  162. }
  163. int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
  164. enum qxl_surface_cmd_type surface_cmd_type,
  165. struct qxl_release *create_rel,
  166. struct qxl_release **release)
  167. {
  168. if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) {
  169. int idr_ret;
  170. struct qxl_bo_list *entry = list_first_entry(&create_rel->bos, struct qxl_bo_list, tv.head);
  171. struct qxl_bo *bo;
  172. union qxl_release_info *info;
  173. /* stash the release after the create command */
  174. idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
  175. bo = qxl_bo_ref(to_qxl_bo(entry->tv.bo));
  176. (*release)->release_offset = create_rel->release_offset + 64;
  177. qxl_release_list_add(*release, bo);
  178. info = qxl_release_map(qdev, *release);
  179. info->id = idr_ret;
  180. qxl_release_unmap(qdev, *release, info);
  181. qxl_bo_unref(&bo);
  182. return 0;
  183. }
  184. return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd),
  185. QXL_RELEASE_SURFACE_CMD, release, NULL);
  186. }
  187. int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
  188. int type, struct qxl_release **release,
  189. struct qxl_bo **rbo)
  190. {
  191. struct qxl_bo *bo;
  192. int idr_ret;
  193. int ret = 0;
  194. union qxl_release_info *info;
  195. int cur_idx;
  196. if (type == QXL_RELEASE_DRAWABLE)
  197. cur_idx = 0;
  198. else if (type == QXL_RELEASE_SURFACE_CMD)
  199. cur_idx = 1;
  200. else if (type == QXL_RELEASE_CURSOR_CMD)
  201. cur_idx = 2;
  202. else {
  203. DRM_ERROR("got illegal type: %d\n", type);
  204. return -EINVAL;
  205. }
  206. idr_ret = qxl_release_alloc(qdev, type, release);
  207. mutex_lock(&qdev->release_mutex);
  208. if (qdev->current_release_bo_offset[cur_idx] + 1 >= releases_per_bo[cur_idx]) {
  209. qxl_bo_unref(&qdev->current_release_bo[cur_idx]);
  210. qdev->current_release_bo_offset[cur_idx] = 0;
  211. qdev->current_release_bo[cur_idx] = NULL;
  212. }
  213. if (!qdev->current_release_bo[cur_idx]) {
  214. ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx]);
  215. if (ret) {
  216. mutex_unlock(&qdev->release_mutex);
  217. return ret;
  218. }
  219. }
  220. bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]);
  221. (*release)->release_offset = qdev->current_release_bo_offset[cur_idx] * release_size_per_bo[cur_idx];
  222. qdev->current_release_bo_offset[cur_idx]++;
  223. if (rbo)
  224. *rbo = bo;
  225. mutex_unlock(&qdev->release_mutex);
  226. qxl_release_list_add(*release, bo);
  227. info = qxl_release_map(qdev, *release);
  228. info->id = idr_ret;
  229. qxl_release_unmap(qdev, *release, info);
  230. qxl_bo_unref(&bo);
  231. return ret;
  232. }
  233. struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
  234. uint64_t id)
  235. {
  236. struct qxl_release *release;
  237. spin_lock(&qdev->release_idr_lock);
  238. release = idr_find(&qdev->release_idr, id);
  239. spin_unlock(&qdev->release_idr_lock);
  240. if (!release) {
  241. DRM_ERROR("failed to find id in release_idr\n");
  242. return NULL;
  243. }
  244. return release;
  245. }
  246. union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
  247. struct qxl_release *release)
  248. {
  249. void *ptr;
  250. union qxl_release_info *info;
  251. struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
  252. struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
  253. ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_SIZE);
  254. if (!ptr)
  255. return NULL;
  256. info = ptr + (release->release_offset & ~PAGE_SIZE);
  257. return info;
  258. }
  259. void qxl_release_unmap(struct qxl_device *qdev,
  260. struct qxl_release *release,
  261. union qxl_release_info *info)
  262. {
  263. struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
  264. struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
  265. void *ptr;
  266. ptr = ((void *)info) - (release->release_offset & ~PAGE_SIZE);
  267. qxl_bo_kunmap_atomic_page(qdev, bo, ptr);
  268. }
  269. void qxl_release_fence_buffer_objects(struct qxl_release *release)
  270. {
  271. struct ttm_validate_buffer *entry;
  272. struct ttm_buffer_object *bo;
  273. struct ttm_bo_global *glob;
  274. struct ttm_bo_device *bdev;
  275. struct ttm_bo_driver *driver;
  276. struct qxl_bo *qbo;
  277. /* if only one object on the release its the release itself
  278. since these objects are pinned no need to reserve */
  279. if (list_is_singular(&release->bos))
  280. return;
  281. bo = list_first_entry(&release->bos, struct ttm_validate_buffer, head)->bo;
  282. bdev = bo->bdev;
  283. driver = bdev->driver;
  284. glob = bo->glob;
  285. spin_lock(&glob->lru_lock);
  286. spin_lock(&bdev->fence_lock);
  287. list_for_each_entry(entry, &release->bos, head) {
  288. bo = entry->bo;
  289. qbo = to_qxl_bo(bo);
  290. if (!entry->bo->sync_obj)
  291. entry->bo->sync_obj = &qbo->fence;
  292. qxl_fence_add_release_locked(&qbo->fence, release->id);
  293. ttm_bo_add_to_lru(bo);
  294. ww_mutex_unlock(&bo->resv->lock);
  295. entry->reserved = false;
  296. }
  297. spin_unlock(&bdev->fence_lock);
  298. spin_unlock(&glob->lru_lock);
  299. ww_acquire_fini(&release->ticket);
  300. }