radeon_ring.c 10.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379
  1. /*
  2. * Copyright 2008 Advanced Micro Devices, Inc.
  3. * Copyright 2008 Red Hat Inc.
  4. * Copyright 2009 Jerome Glisse.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the "Software"),
  8. * to deal in the Software without restriction, including without limitation
  9. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10. * and/or sell copies of the Software, and to permit persons to whom the
  11. * Software is furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22. * OTHER DEALINGS IN THE SOFTWARE.
  23. *
  24. * Authors: Dave Airlie
  25. * Alex Deucher
  26. * Jerome Glisse
  27. */
  28. #include <linux/seq_file.h>
  29. #include "drmP.h"
  30. #include "radeon_drm.h"
  31. #include "radeon_reg.h"
  32. #include "radeon.h"
  33. #include "atom.h"
  34. int radeon_debugfs_ib_init(struct radeon_device *rdev);
  35. /*
  36. * IB.
  37. */
  38. int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib)
  39. {
  40. struct radeon_fence *fence;
  41. struct radeon_ib *nib;
  42. unsigned long i;
  43. int r = 0;
  44. *ib = NULL;
  45. r = radeon_fence_create(rdev, &fence);
  46. if (r) {
  47. DRM_ERROR("failed to create fence for new IB\n");
  48. return r;
  49. }
  50. mutex_lock(&rdev->ib_pool.mutex);
  51. i = find_first_zero_bit(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
  52. if (i < RADEON_IB_POOL_SIZE) {
  53. set_bit(i, rdev->ib_pool.alloc_bm);
  54. rdev->ib_pool.ibs[i].length_dw = 0;
  55. *ib = &rdev->ib_pool.ibs[i];
  56. mutex_unlock(&rdev->ib_pool.mutex);
  57. goto out;
  58. }
  59. if (list_empty(&rdev->ib_pool.scheduled_ibs)) {
  60. /* we go do nothings here */
  61. mutex_unlock(&rdev->ib_pool.mutex);
  62. DRM_ERROR("all IB allocated none scheduled.\n");
  63. r = -EINVAL;
  64. goto out;
  65. }
  66. /* get the first ib on the scheduled list */
  67. nib = list_entry(rdev->ib_pool.scheduled_ibs.next,
  68. struct radeon_ib, list);
  69. if (nib->fence == NULL) {
  70. /* we go do nothings here */
  71. mutex_unlock(&rdev->ib_pool.mutex);
  72. DRM_ERROR("IB %lu scheduled without a fence.\n", nib->idx);
  73. r = -EINVAL;
  74. goto out;
  75. }
  76. mutex_unlock(&rdev->ib_pool.mutex);
  77. r = radeon_fence_wait(nib->fence, false);
  78. if (r) {
  79. DRM_ERROR("radeon: IB(%lu:0x%016lX:%u)\n", nib->idx,
  80. (unsigned long)nib->gpu_addr, nib->length_dw);
  81. DRM_ERROR("radeon: GPU lockup detected, fail to get a IB\n");
  82. goto out;
  83. }
  84. radeon_fence_unref(&nib->fence);
  85. nib->length_dw = 0;
  86. /* scheduled list is accessed here */
  87. mutex_lock(&rdev->ib_pool.mutex);
  88. list_del(&nib->list);
  89. INIT_LIST_HEAD(&nib->list);
  90. mutex_unlock(&rdev->ib_pool.mutex);
  91. *ib = nib;
  92. out:
  93. if (r) {
  94. radeon_fence_unref(&fence);
  95. } else {
  96. (*ib)->fence = fence;
  97. }
  98. return r;
  99. }
  100. void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
  101. {
  102. struct radeon_ib *tmp = *ib;
  103. *ib = NULL;
  104. if (tmp == NULL) {
  105. return;
  106. }
  107. mutex_lock(&rdev->ib_pool.mutex);
  108. if (!list_empty(&tmp->list) && !radeon_fence_signaled(tmp->fence)) {
  109. /* IB is scheduled & not signaled don't do anythings */
  110. mutex_unlock(&rdev->ib_pool.mutex);
  111. return;
  112. }
  113. list_del(&tmp->list);
  114. INIT_LIST_HEAD(&tmp->list);
  115. if (tmp->fence)
  116. radeon_fence_unref(&tmp->fence);
  117. tmp->length_dw = 0;
  118. clear_bit(tmp->idx, rdev->ib_pool.alloc_bm);
  119. mutex_unlock(&rdev->ib_pool.mutex);
  120. }
  121. int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
  122. {
  123. int r = 0;
  124. if (!ib->length_dw || !rdev->cp.ready) {
  125. /* TODO: Nothings in the ib we should report. */
  126. DRM_ERROR("radeon: couldn't schedule IB(%lu).\n", ib->idx);
  127. return -EINVAL;
  128. }
  129. /* 64 dwords should be enough for fence too */
  130. r = radeon_ring_lock(rdev, 64);
  131. if (r) {
  132. DRM_ERROR("radeon: scheduling IB failled (%d).\n", r);
  133. return r;
  134. }
  135. radeon_ring_ib_execute(rdev, ib);
  136. radeon_fence_emit(rdev, ib->fence);
  137. mutex_lock(&rdev->ib_pool.mutex);
  138. list_add_tail(&ib->list, &rdev->ib_pool.scheduled_ibs);
  139. mutex_unlock(&rdev->ib_pool.mutex);
  140. radeon_ring_unlock_commit(rdev);
  141. return 0;
  142. }
  143. int radeon_ib_pool_init(struct radeon_device *rdev)
  144. {
  145. void *ptr;
  146. uint64_t gpu_addr;
  147. int i;
  148. int r = 0;
  149. if (rdev->ib_pool.robj)
  150. return 0;
  151. /* Allocate 1M object buffer */
  152. INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs);
  153. r = radeon_object_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024,
  154. true, RADEON_GEM_DOMAIN_GTT,
  155. false, &rdev->ib_pool.robj);
  156. if (r) {
  157. DRM_ERROR("radeon: failed to ib pool (%d).\n", r);
  158. return r;
  159. }
  160. r = radeon_object_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr);
  161. if (r) {
  162. DRM_ERROR("radeon: failed to pin ib pool (%d).\n", r);
  163. return r;
  164. }
  165. r = radeon_object_kmap(rdev->ib_pool.robj, &ptr);
  166. if (r) {
  167. DRM_ERROR("radeon: failed to map ib poll (%d).\n", r);
  168. return r;
  169. }
  170. for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
  171. unsigned offset;
  172. offset = i * 64 * 1024;
  173. rdev->ib_pool.ibs[i].gpu_addr = gpu_addr + offset;
  174. rdev->ib_pool.ibs[i].ptr = ptr + offset;
  175. rdev->ib_pool.ibs[i].idx = i;
  176. rdev->ib_pool.ibs[i].length_dw = 0;
  177. INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].list);
  178. }
  179. bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
  180. rdev->ib_pool.ready = true;
  181. DRM_INFO("radeon: ib pool ready.\n");
  182. if (radeon_debugfs_ib_init(rdev)) {
  183. DRM_ERROR("Failed to register debugfs file for IB !\n");
  184. }
  185. return r;
  186. }
  187. void radeon_ib_pool_fini(struct radeon_device *rdev)
  188. {
  189. if (!rdev->ib_pool.ready) {
  190. return;
  191. }
  192. mutex_lock(&rdev->ib_pool.mutex);
  193. bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
  194. if (rdev->ib_pool.robj) {
  195. radeon_object_kunmap(rdev->ib_pool.robj);
  196. radeon_object_unref(&rdev->ib_pool.robj);
  197. rdev->ib_pool.robj = NULL;
  198. }
  199. mutex_unlock(&rdev->ib_pool.mutex);
  200. }
  201. /*
  202. * Ring.
  203. */
  204. void radeon_ring_free_size(struct radeon_device *rdev)
  205. {
  206. if (rdev->family >= CHIP_R600)
  207. rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
  208. else
  209. rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
  210. /* This works because ring_size is a power of 2 */
  211. rdev->cp.ring_free_dw = (rdev->cp.rptr + (rdev->cp.ring_size / 4));
  212. rdev->cp.ring_free_dw -= rdev->cp.wptr;
  213. rdev->cp.ring_free_dw &= rdev->cp.ptr_mask;
  214. if (!rdev->cp.ring_free_dw) {
  215. rdev->cp.ring_free_dw = rdev->cp.ring_size / 4;
  216. }
  217. }
  218. int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw)
  219. {
  220. int r;
  221. /* Align requested size with padding so unlock_commit can
  222. * pad safely */
  223. ndw = (ndw + rdev->cp.align_mask) & ~rdev->cp.align_mask;
  224. mutex_lock(&rdev->cp.mutex);
  225. while (ndw > (rdev->cp.ring_free_dw - 1)) {
  226. radeon_ring_free_size(rdev);
  227. if (ndw < rdev->cp.ring_free_dw) {
  228. break;
  229. }
  230. r = radeon_fence_wait_next(rdev);
  231. if (r) {
  232. mutex_unlock(&rdev->cp.mutex);
  233. return r;
  234. }
  235. }
  236. rdev->cp.count_dw = ndw;
  237. rdev->cp.wptr_old = rdev->cp.wptr;
  238. return 0;
  239. }
  240. void radeon_ring_unlock_commit(struct radeon_device *rdev)
  241. {
  242. unsigned count_dw_pad;
  243. unsigned i;
  244. /* We pad to match fetch size */
  245. count_dw_pad = (rdev->cp.align_mask + 1) -
  246. (rdev->cp.wptr & rdev->cp.align_mask);
  247. for (i = 0; i < count_dw_pad; i++) {
  248. radeon_ring_write(rdev, 2 << 30);
  249. }
  250. DRM_MEMORYBARRIER();
  251. radeon_cp_commit(rdev);
  252. mutex_unlock(&rdev->cp.mutex);
  253. }
  254. void radeon_ring_unlock_undo(struct radeon_device *rdev)
  255. {
  256. rdev->cp.wptr = rdev->cp.wptr_old;
  257. mutex_unlock(&rdev->cp.mutex);
  258. }
  259. int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
  260. {
  261. int r;
  262. rdev->cp.ring_size = ring_size;
  263. /* Allocate ring buffer */
  264. if (rdev->cp.ring_obj == NULL) {
  265. r = radeon_object_create(rdev, NULL, rdev->cp.ring_size,
  266. true,
  267. RADEON_GEM_DOMAIN_GTT,
  268. false,
  269. &rdev->cp.ring_obj);
  270. if (r) {
  271. DRM_ERROR("radeon: failed to create ring buffer (%d).\n", r);
  272. mutex_unlock(&rdev->cp.mutex);
  273. return r;
  274. }
  275. r = radeon_object_pin(rdev->cp.ring_obj,
  276. RADEON_GEM_DOMAIN_GTT,
  277. &rdev->cp.gpu_addr);
  278. if (r) {
  279. DRM_ERROR("radeon: failed to pin ring buffer (%d).\n", r);
  280. mutex_unlock(&rdev->cp.mutex);
  281. return r;
  282. }
  283. r = radeon_object_kmap(rdev->cp.ring_obj,
  284. (void **)&rdev->cp.ring);
  285. if (r) {
  286. DRM_ERROR("radeon: failed to map ring buffer (%d).\n", r);
  287. mutex_unlock(&rdev->cp.mutex);
  288. return r;
  289. }
  290. }
  291. rdev->cp.ptr_mask = (rdev->cp.ring_size / 4) - 1;
  292. rdev->cp.ring_free_dw = rdev->cp.ring_size / 4;
  293. return 0;
  294. }
  295. void radeon_ring_fini(struct radeon_device *rdev)
  296. {
  297. mutex_lock(&rdev->cp.mutex);
  298. if (rdev->cp.ring_obj) {
  299. radeon_object_kunmap(rdev->cp.ring_obj);
  300. radeon_object_unpin(rdev->cp.ring_obj);
  301. radeon_object_unref(&rdev->cp.ring_obj);
  302. rdev->cp.ring = NULL;
  303. rdev->cp.ring_obj = NULL;
  304. }
  305. mutex_unlock(&rdev->cp.mutex);
  306. }
  307. /*
  308. * Debugfs info
  309. */
  310. #if defined(CONFIG_DEBUG_FS)
  311. static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
  312. {
  313. struct drm_info_node *node = (struct drm_info_node *) m->private;
  314. struct radeon_ib *ib = node->info_ent->data;
  315. unsigned i;
  316. if (ib == NULL) {
  317. return 0;
  318. }
  319. seq_printf(m, "IB %04lu\n", ib->idx);
  320. seq_printf(m, "IB fence %p\n", ib->fence);
  321. seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
  322. for (i = 0; i < ib->length_dw; i++) {
  323. seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
  324. }
  325. return 0;
  326. }
  327. static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE];
  328. static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32];
  329. #endif
  330. int radeon_debugfs_ib_init(struct radeon_device *rdev)
  331. {
  332. #if defined(CONFIG_DEBUG_FS)
  333. unsigned i;
  334. for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
  335. sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i);
  336. radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i];
  337. radeon_debugfs_ib_list[i].show = &radeon_debugfs_ib_info;
  338. radeon_debugfs_ib_list[i].driver_features = 0;
  339. radeon_debugfs_ib_list[i].data = &rdev->ib_pool.ibs[i];
  340. }
  341. return radeon_debugfs_add_files(rdev, radeon_debugfs_ib_list,
  342. RADEON_IB_POOL_SIZE);
  343. #else
  344. return 0;
  345. #endif
  346. }