uvd_v1_0.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436
  1. /*
  2. * Copyright 2013 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Christian König <christian.koenig@amd.com>
  23. */
  24. #include <drm/drmP.h>
  25. #include "radeon.h"
  26. #include "radeon_asic.h"
  27. #include "r600d.h"
  28. /**
  29. * uvd_v1_0_get_rptr - get read pointer
  30. *
  31. * @rdev: radeon_device pointer
  32. * @ring: radeon_ring pointer
  33. *
  34. * Returns the current hardware read pointer
  35. */
  36. uint32_t uvd_v1_0_get_rptr(struct radeon_device *rdev,
  37. struct radeon_ring *ring)
  38. {
  39. return RREG32(UVD_RBC_RB_RPTR);
  40. }
  41. /**
  42. * uvd_v1_0_get_wptr - get write pointer
  43. *
  44. * @rdev: radeon_device pointer
  45. * @ring: radeon_ring pointer
  46. *
  47. * Returns the current hardware write pointer
  48. */
  49. uint32_t uvd_v1_0_get_wptr(struct radeon_device *rdev,
  50. struct radeon_ring *ring)
  51. {
  52. return RREG32(UVD_RBC_RB_WPTR);
  53. }
  54. /**
  55. * uvd_v1_0_set_wptr - set write pointer
  56. *
  57. * @rdev: radeon_device pointer
  58. * @ring: radeon_ring pointer
  59. *
  60. * Commits the write pointer to the hardware
  61. */
  62. void uvd_v1_0_set_wptr(struct radeon_device *rdev,
  63. struct radeon_ring *ring)
  64. {
  65. WREG32(UVD_RBC_RB_WPTR, ring->wptr);
  66. }
  67. /**
  68. * uvd_v1_0_init - start and test UVD block
  69. *
  70. * @rdev: radeon_device pointer
  71. *
  72. * Initialize the hardware, boot up the VCPU and do some testing
  73. */
  74. int uvd_v1_0_init(struct radeon_device *rdev)
  75. {
  76. struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
  77. uint32_t tmp;
  78. int r;
  79. /* raise clocks while booting up the VCPU */
  80. radeon_set_uvd_clocks(rdev, 53300, 40000);
  81. r = uvd_v1_0_start(rdev);
  82. if (r)
  83. goto done;
  84. ring->ready = true;
  85. r = radeon_ring_test(rdev, R600_RING_TYPE_UVD_INDEX, ring);
  86. if (r) {
  87. ring->ready = false;
  88. goto done;
  89. }
  90. r = radeon_ring_lock(rdev, ring, 10);
  91. if (r) {
  92. DRM_ERROR("radeon: ring failed to lock UVD ring (%d).\n", r);
  93. goto done;
  94. }
  95. tmp = PACKET0(UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
  96. radeon_ring_write(ring, tmp);
  97. radeon_ring_write(ring, 0xFFFFF);
  98. tmp = PACKET0(UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
  99. radeon_ring_write(ring, tmp);
  100. radeon_ring_write(ring, 0xFFFFF);
  101. tmp = PACKET0(UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
  102. radeon_ring_write(ring, tmp);
  103. radeon_ring_write(ring, 0xFFFFF);
  104. /* Clear timeout status bits */
  105. radeon_ring_write(ring, PACKET0(UVD_SEMA_TIMEOUT_STATUS, 0));
  106. radeon_ring_write(ring, 0x8);
  107. radeon_ring_write(ring, PACKET0(UVD_SEMA_CNTL, 0));
  108. radeon_ring_write(ring, 3);
  109. radeon_ring_unlock_commit(rdev, ring);
  110. done:
  111. /* lower clocks again */
  112. radeon_set_uvd_clocks(rdev, 0, 0);
  113. if (!r)
  114. DRM_INFO("UVD initialized successfully.\n");
  115. return r;
  116. }
  117. /**
  118. * uvd_v1_0_fini - stop the hardware block
  119. *
  120. * @rdev: radeon_device pointer
  121. *
  122. * Stop the UVD block, mark ring as not ready any more
  123. */
  124. void uvd_v1_0_fini(struct radeon_device *rdev)
  125. {
  126. struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
  127. uvd_v1_0_stop(rdev);
  128. ring->ready = false;
  129. }
  130. /**
  131. * uvd_v1_0_start - start UVD block
  132. *
  133. * @rdev: radeon_device pointer
  134. *
  135. * Setup and start the UVD block
  136. */
  137. int uvd_v1_0_start(struct radeon_device *rdev)
  138. {
  139. struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
  140. uint32_t rb_bufsz;
  141. int i, j, r;
  142. /* disable byte swapping */
  143. u32 lmi_swap_cntl = 0;
  144. u32 mp_swap_cntl = 0;
  145. /* disable clock gating */
  146. WREG32(UVD_CGC_GATE, 0);
  147. /* disable interupt */
  148. WREG32_P(UVD_MASTINT_EN, 0, ~(1 << 1));
  149. /* Stall UMC and register bus before resetting VCPU */
  150. WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
  151. WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3));
  152. mdelay(1);
  153. /* put LMI, VCPU, RBC etc... into reset */
  154. WREG32(UVD_SOFT_RESET, LMI_SOFT_RESET | VCPU_SOFT_RESET |
  155. LBSI_SOFT_RESET | RBC_SOFT_RESET | CSM_SOFT_RESET |
  156. CXW_SOFT_RESET | TAP_SOFT_RESET | LMI_UMC_SOFT_RESET);
  157. mdelay(5);
  158. /* take UVD block out of reset */
  159. WREG32_P(SRBM_SOFT_RESET, 0, ~SOFT_RESET_UVD);
  160. mdelay(5);
  161. /* initialize UVD memory controller */
  162. WREG32(UVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
  163. (1 << 21) | (1 << 9) | (1 << 20));
  164. #ifdef __BIG_ENDIAN
  165. /* swap (8 in 32) RB and IB */
  166. lmi_swap_cntl = 0xa;
  167. mp_swap_cntl = 0;
  168. #endif
  169. WREG32(UVD_LMI_SWAP_CNTL, lmi_swap_cntl);
  170. WREG32(UVD_MP_SWAP_CNTL, mp_swap_cntl);
  171. WREG32(UVD_MPC_SET_MUXA0, 0x40c2040);
  172. WREG32(UVD_MPC_SET_MUXA1, 0x0);
  173. WREG32(UVD_MPC_SET_MUXB0, 0x40c2040);
  174. WREG32(UVD_MPC_SET_MUXB1, 0x0);
  175. WREG32(UVD_MPC_SET_ALU, 0);
  176. WREG32(UVD_MPC_SET_MUX, 0x88);
  177. /* take all subblocks out of reset, except VCPU */
  178. WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET);
  179. mdelay(5);
  180. /* enable VCPU clock */
  181. WREG32(UVD_VCPU_CNTL, 1 << 9);
  182. /* enable UMC and NC0 */
  183. WREG32_P(UVD_LMI_CTRL2, 1 << 13, ~((1 << 8) | (1 << 13)));
  184. /* boot up the VCPU */
  185. WREG32(UVD_SOFT_RESET, 0);
  186. mdelay(10);
  187. WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3));
  188. for (i = 0; i < 10; ++i) {
  189. uint32_t status;
  190. for (j = 0; j < 100; ++j) {
  191. status = RREG32(UVD_STATUS);
  192. if (status & 2)
  193. break;
  194. mdelay(10);
  195. }
  196. r = 0;
  197. if (status & 2)
  198. break;
  199. DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
  200. WREG32_P(UVD_SOFT_RESET, VCPU_SOFT_RESET, ~VCPU_SOFT_RESET);
  201. mdelay(10);
  202. WREG32_P(UVD_SOFT_RESET, 0, ~VCPU_SOFT_RESET);
  203. mdelay(10);
  204. r = -1;
  205. }
  206. if (r) {
  207. DRM_ERROR("UVD not responding, giving up!!!\n");
  208. return r;
  209. }
  210. /* enable interupt */
  211. WREG32_P(UVD_MASTINT_EN, 3<<1, ~(3 << 1));
  212. /* force RBC into idle state */
  213. WREG32(UVD_RBC_RB_CNTL, 0x11010101);
  214. /* Set the write pointer delay */
  215. WREG32(UVD_RBC_RB_WPTR_CNTL, 0);
  216. /* programm the 4GB memory segment for rptr and ring buffer */
  217. WREG32(UVD_LMI_EXT40_ADDR, upper_32_bits(ring->gpu_addr) |
  218. (0x7 << 16) | (0x1 << 31));
  219. /* Initialize the ring buffer's read and write pointers */
  220. WREG32(UVD_RBC_RB_RPTR, 0x0);
  221. ring->wptr = ring->rptr = RREG32(UVD_RBC_RB_RPTR);
  222. WREG32(UVD_RBC_RB_WPTR, ring->wptr);
  223. /* set the ring address */
  224. WREG32(UVD_RBC_RB_BASE, ring->gpu_addr);
  225. /* Set ring buffer size */
  226. rb_bufsz = order_base_2(ring->ring_size);
  227. rb_bufsz = (0x1 << 8) | rb_bufsz;
  228. WREG32_P(UVD_RBC_RB_CNTL, rb_bufsz, ~0x11f1f);
  229. return 0;
  230. }
  231. /**
  232. * uvd_v1_0_stop - stop UVD block
  233. *
  234. * @rdev: radeon_device pointer
  235. *
  236. * stop the UVD block
  237. */
  238. void uvd_v1_0_stop(struct radeon_device *rdev)
  239. {
  240. /* force RBC into idle state */
  241. WREG32(UVD_RBC_RB_CNTL, 0x11010101);
  242. /* Stall UMC and register bus before resetting VCPU */
  243. WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
  244. WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3));
  245. mdelay(1);
  246. /* put VCPU into reset */
  247. WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET);
  248. mdelay(5);
  249. /* disable VCPU clock */
  250. WREG32(UVD_VCPU_CNTL, 0x0);
  251. /* Unstall UMC and register bus */
  252. WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8));
  253. WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3));
  254. }
  255. /**
  256. * uvd_v1_0_ring_test - register write test
  257. *
  258. * @rdev: radeon_device pointer
  259. * @ring: radeon_ring pointer
  260. *
  261. * Test if we can successfully write to the context register
  262. */
  263. int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
  264. {
  265. uint32_t tmp = 0;
  266. unsigned i;
  267. int r;
  268. WREG32(UVD_CONTEXT_ID, 0xCAFEDEAD);
  269. r = radeon_ring_lock(rdev, ring, 3);
  270. if (r) {
  271. DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n",
  272. ring->idx, r);
  273. return r;
  274. }
  275. radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
  276. radeon_ring_write(ring, 0xDEADBEEF);
  277. radeon_ring_unlock_commit(rdev, ring);
  278. for (i = 0; i < rdev->usec_timeout; i++) {
  279. tmp = RREG32(UVD_CONTEXT_ID);
  280. if (tmp == 0xDEADBEEF)
  281. break;
  282. DRM_UDELAY(1);
  283. }
  284. if (i < rdev->usec_timeout) {
  285. DRM_INFO("ring test on %d succeeded in %d usecs\n",
  286. ring->idx, i);
  287. } else {
  288. DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
  289. ring->idx, tmp);
  290. r = -EINVAL;
  291. }
  292. return r;
  293. }
  294. /**
  295. * uvd_v1_0_semaphore_emit - emit semaphore command
  296. *
  297. * @rdev: radeon_device pointer
  298. * @ring: radeon_ring pointer
  299. * @semaphore: semaphore to emit commands for
  300. * @emit_wait: true if we should emit a wait command
  301. *
  302. * Emit a semaphore command (either wait or signal) to the UVD ring.
  303. */
  304. void uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
  305. struct radeon_ring *ring,
  306. struct radeon_semaphore *semaphore,
  307. bool emit_wait)
  308. {
  309. uint64_t addr = semaphore->gpu_addr;
  310. radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
  311. radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
  312. radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
  313. radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
  314. radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
  315. radeon_ring_write(ring, emit_wait ? 1 : 0);
  316. }
  317. /**
  318. * uvd_v1_0_ib_execute - execute indirect buffer
  319. *
  320. * @rdev: radeon_device pointer
  321. * @ib: indirect buffer to execute
  322. *
  323. * Write ring commands to execute the indirect buffer
  324. */
  325. void uvd_v1_0_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
  326. {
  327. struct radeon_ring *ring = &rdev->ring[ib->ring];
  328. radeon_ring_write(ring, PACKET0(UVD_RBC_IB_BASE, 0));
  329. radeon_ring_write(ring, ib->gpu_addr);
  330. radeon_ring_write(ring, PACKET0(UVD_RBC_IB_SIZE, 0));
  331. radeon_ring_write(ring, ib->length_dw);
  332. }
  333. /**
  334. * uvd_v1_0_ib_test - test ib execution
  335. *
  336. * @rdev: radeon_device pointer
  337. * @ring: radeon_ring pointer
  338. *
  339. * Test if we can successfully execute an IB
  340. */
  341. int uvd_v1_0_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
  342. {
  343. struct radeon_fence *fence = NULL;
  344. int r;
  345. r = radeon_set_uvd_clocks(rdev, 53300, 40000);
  346. if (r) {
  347. DRM_ERROR("radeon: failed to raise UVD clocks (%d).\n", r);
  348. return r;
  349. }
  350. r = radeon_uvd_get_create_msg(rdev, ring->idx, 1, NULL);
  351. if (r) {
  352. DRM_ERROR("radeon: failed to get create msg (%d).\n", r);
  353. goto error;
  354. }
  355. r = radeon_uvd_get_destroy_msg(rdev, ring->idx, 1, &fence);
  356. if (r) {
  357. DRM_ERROR("radeon: failed to get destroy ib (%d).\n", r);
  358. goto error;
  359. }
  360. r = radeon_fence_wait(fence, false);
  361. if (r) {
  362. DRM_ERROR("radeon: fence wait failed (%d).\n", r);
  363. goto error;
  364. }
  365. DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
  366. error:
  367. radeon_fence_unref(&fence);
  368. radeon_set_uvd_clocks(rdev, 0, 0);
  369. return r;
  370. }