radeon_ring.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626
  1. /*
  2. * Copyright 2008 Advanced Micro Devices, Inc.
  3. * Copyright 2008 Red Hat Inc.
  4. * Copyright 2009 Jerome Glisse.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the "Software"),
  8. * to deal in the Software without restriction, including without limitation
  9. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10. * and/or sell copies of the Software, and to permit persons to whom the
  11. * Software is furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22. * OTHER DEALINGS IN THE SOFTWARE.
  23. *
  24. * Authors: Dave Airlie
  25. * Alex Deucher
  26. * Jerome Glisse
  27. * Christian König
  28. */
  29. #include <linux/seq_file.h>
  30. #include <linux/slab.h>
  31. #include "drmP.h"
  32. #include "radeon_drm.h"
  33. #include "radeon_reg.h"
  34. #include "radeon.h"
  35. #include "atom.h"
  36. /*
  37. * IB.
  38. */
  39. int radeon_debugfs_sa_init(struct radeon_device *rdev);
  40. int radeon_ib_get(struct radeon_device *rdev, int ring,
  41. struct radeon_ib *ib, unsigned size)
  42. {
  43. int i, r;
  44. r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256, true);
  45. if (r) {
  46. dev_err(rdev->dev, "failed to get a new IB (%d)\n", r);
  47. return r;
  48. }
  49. r = radeon_semaphore_create(rdev, &ib->semaphore);
  50. if (r) {
  51. return r;
  52. }
  53. ib->ring = ring;
  54. ib->fence = NULL;
  55. ib->ptr = radeon_sa_bo_cpu_addr(ib->sa_bo);
  56. ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo);
  57. ib->vm_id = 0;
  58. ib->is_const_ib = false;
  59. for (i = 0; i < RADEON_NUM_RINGS; ++i)
  60. ib->sync_to[i] = NULL;
  61. return 0;
  62. }
  63. void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
  64. {
  65. radeon_semaphore_free(rdev, &ib->semaphore, ib->fence);
  66. radeon_sa_bo_free(rdev, &ib->sa_bo, ib->fence);
  67. radeon_fence_unref(&ib->fence);
  68. }
  69. int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
  70. struct radeon_ib *const_ib)
  71. {
  72. struct radeon_ring *ring = &rdev->ring[ib->ring];
  73. bool need_sync = false;
  74. int i, r = 0;
  75. if (!ib->length_dw || !ring->ready) {
  76. /* TODO: Nothings in the ib we should report. */
  77. dev_err(rdev->dev, "couldn't schedule ib\n");
  78. return -EINVAL;
  79. }
  80. /* 64 dwords should be enough for fence too */
  81. r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_RINGS * 8);
  82. if (r) {
  83. dev_err(rdev->dev, "scheduling IB failed (%d).\n", r);
  84. return r;
  85. }
  86. for (i = 0; i < RADEON_NUM_RINGS; ++i) {
  87. struct radeon_fence *fence = ib->sync_to[i];
  88. if (radeon_fence_need_sync(fence, ib->ring)) {
  89. need_sync = true;
  90. radeon_semaphore_sync_rings(rdev, ib->semaphore,
  91. fence->ring, ib->ring);
  92. radeon_fence_note_sync(fence, ib->ring);
  93. }
  94. }
  95. /* immediately free semaphore when we don't need to sync */
  96. if (!need_sync) {
  97. radeon_semaphore_free(rdev, &ib->semaphore, NULL);
  98. }
  99. if (const_ib) {
  100. radeon_ring_ib_execute(rdev, const_ib->ring, const_ib);
  101. radeon_semaphore_free(rdev, &const_ib->semaphore, NULL);
  102. }
  103. radeon_ring_ib_execute(rdev, ib->ring, ib);
  104. r = radeon_fence_emit(rdev, &ib->fence, ib->ring);
  105. if (r) {
  106. dev_err(rdev->dev, "failed to emit fence for new IB (%d)\n", r);
  107. radeon_ring_unlock_undo(rdev, ring);
  108. return r;
  109. }
  110. if (const_ib) {
  111. const_ib->fence = radeon_fence_ref(ib->fence);
  112. }
  113. radeon_ring_unlock_commit(rdev, ring);
  114. return 0;
  115. }
  116. int radeon_ib_pool_init(struct radeon_device *rdev)
  117. {
  118. int r;
  119. if (rdev->ib_pool_ready) {
  120. return 0;
  121. }
  122. r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo,
  123. RADEON_IB_POOL_SIZE*64*1024,
  124. RADEON_GEM_DOMAIN_GTT);
  125. if (r) {
  126. return r;
  127. }
  128. r = radeon_sa_bo_manager_start(rdev, &rdev->ring_tmp_bo);
  129. if (r) {
  130. return r;
  131. }
  132. rdev->ib_pool_ready = true;
  133. if (radeon_debugfs_sa_init(rdev)) {
  134. dev_err(rdev->dev, "failed to register debugfs file for SA\n");
  135. }
  136. return 0;
  137. }
  138. void radeon_ib_pool_fini(struct radeon_device *rdev)
  139. {
  140. if (rdev->ib_pool_ready) {
  141. radeon_sa_bo_manager_suspend(rdev, &rdev->ring_tmp_bo);
  142. radeon_sa_bo_manager_fini(rdev, &rdev->ring_tmp_bo);
  143. rdev->ib_pool_ready = false;
  144. }
  145. }
  146. int radeon_ib_ring_tests(struct radeon_device *rdev)
  147. {
  148. unsigned i;
  149. int r;
  150. for (i = 0; i < RADEON_NUM_RINGS; ++i) {
  151. struct radeon_ring *ring = &rdev->ring[i];
  152. if (!ring->ready)
  153. continue;
  154. r = radeon_ib_test(rdev, i, ring);
  155. if (r) {
  156. ring->ready = false;
  157. if (i == RADEON_RING_TYPE_GFX_INDEX) {
  158. /* oh, oh, that's really bad */
  159. DRM_ERROR("radeon: failed testing IB on GFX ring (%d).\n", r);
  160. rdev->accel_working = false;
  161. return r;
  162. } else {
  163. /* still not good, but we can live with it */
  164. DRM_ERROR("radeon: failed testing IB on ring %d (%d).\n", i, r);
  165. }
  166. }
  167. }
  168. return 0;
  169. }
  170. /*
  171. * Ring.
  172. */
  173. int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring);
  174. void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
  175. {
  176. #if DRM_DEBUG_CODE
  177. if (ring->count_dw <= 0) {
  178. DRM_ERROR("radeon: writting more dword to ring than expected !\n");
  179. }
  180. #endif
  181. ring->ring[ring->wptr++] = v;
  182. ring->wptr &= ring->ptr_mask;
  183. ring->count_dw--;
  184. ring->ring_free_dw--;
  185. }
  186. bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev,
  187. struct radeon_ring *ring)
  188. {
  189. switch (ring->idx) {
  190. case RADEON_RING_TYPE_GFX_INDEX:
  191. case CAYMAN_RING_TYPE_CP1_INDEX:
  192. case CAYMAN_RING_TYPE_CP2_INDEX:
  193. return true;
  194. default:
  195. return false;
  196. }
  197. }
  198. void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring)
  199. {
  200. u32 rptr;
  201. if (rdev->wb.enabled)
  202. rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
  203. else
  204. rptr = RREG32(ring->rptr_reg);
  205. ring->rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
  206. /* This works because ring_size is a power of 2 */
  207. ring->ring_free_dw = (ring->rptr + (ring->ring_size / 4));
  208. ring->ring_free_dw -= ring->wptr;
  209. ring->ring_free_dw &= ring->ptr_mask;
  210. if (!ring->ring_free_dw) {
  211. ring->ring_free_dw = ring->ring_size / 4;
  212. }
  213. }
  214. int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
  215. {
  216. int r;
  217. /* Align requested size with padding so unlock_commit can
  218. * pad safely */
  219. ndw = (ndw + ring->align_mask) & ~ring->align_mask;
  220. while (ndw > (ring->ring_free_dw - 1)) {
  221. radeon_ring_free_size(rdev, ring);
  222. if (ndw < ring->ring_free_dw) {
  223. break;
  224. }
  225. r = radeon_fence_wait_next_locked(rdev, ring->idx);
  226. if (r)
  227. return r;
  228. }
  229. ring->count_dw = ndw;
  230. ring->wptr_old = ring->wptr;
  231. return 0;
  232. }
  233. int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
  234. {
  235. int r;
  236. mutex_lock(&rdev->ring_lock);
  237. r = radeon_ring_alloc(rdev, ring, ndw);
  238. if (r) {
  239. mutex_unlock(&rdev->ring_lock);
  240. return r;
  241. }
  242. return 0;
  243. }
  244. void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
  245. {
  246. /* We pad to match fetch size */
  247. while (ring->wptr & ring->align_mask) {
  248. radeon_ring_write(ring, ring->nop);
  249. }
  250. DRM_MEMORYBARRIER();
  251. WREG32(ring->wptr_reg, (ring->wptr << ring->ptr_reg_shift) & ring->ptr_reg_mask);
  252. (void)RREG32(ring->wptr_reg);
  253. }
  254. void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring)
  255. {
  256. radeon_ring_commit(rdev, ring);
  257. mutex_unlock(&rdev->ring_lock);
  258. }
  259. void radeon_ring_undo(struct radeon_ring *ring)
  260. {
  261. ring->wptr = ring->wptr_old;
  262. }
  263. void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *ring)
  264. {
  265. radeon_ring_undo(ring);
  266. mutex_unlock(&rdev->ring_lock);
  267. }
  268. void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring)
  269. {
  270. int r;
  271. radeon_ring_free_size(rdev, ring);
  272. if (ring->rptr == ring->wptr) {
  273. r = radeon_ring_alloc(rdev, ring, 1);
  274. if (!r) {
  275. radeon_ring_write(ring, ring->nop);
  276. radeon_ring_commit(rdev, ring);
  277. }
  278. }
  279. }
  280. void radeon_ring_lockup_update(struct radeon_ring *ring)
  281. {
  282. ring->last_rptr = ring->rptr;
  283. ring->last_activity = jiffies;
  284. }
  285. /**
  286. * radeon_ring_test_lockup() - check if ring is lockedup by recording information
  287. * @rdev: radeon device structure
  288. * @ring: radeon_ring structure holding ring information
  289. *
  290. * We don't need to initialize the lockup tracking information as we will either
  291. * have CP rptr to a different value of jiffies wrap around which will force
  292. * initialization of the lockup tracking informations.
  293. *
  294. * A possible false positivie is if we get call after while and last_cp_rptr ==
  295. * the current CP rptr, even if it's unlikely it might happen. To avoid this
  296. * if the elapsed time since last call is bigger than 2 second than we return
  297. * false and update the tracking information. Due to this the caller must call
  298. * radeon_ring_test_lockup several time in less than 2sec for lockup to be reported
  299. * the fencing code should be cautious about that.
  300. *
  301. * Caller should write to the ring to force CP to do something so we don't get
  302. * false positive when CP is just gived nothing to do.
  303. *
  304. **/
  305. bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
  306. {
  307. unsigned long cjiffies, elapsed;
  308. uint32_t rptr;
  309. cjiffies = jiffies;
  310. if (!time_after(cjiffies, ring->last_activity)) {
  311. /* likely a wrap around */
  312. radeon_ring_lockup_update(ring);
  313. return false;
  314. }
  315. rptr = RREG32(ring->rptr_reg);
  316. ring->rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
  317. if (ring->rptr != ring->last_rptr) {
  318. /* CP is still working no lockup */
  319. radeon_ring_lockup_update(ring);
  320. return false;
  321. }
  322. elapsed = jiffies_to_msecs(cjiffies - ring->last_activity);
  323. if (radeon_lockup_timeout && elapsed >= radeon_lockup_timeout) {
  324. dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed);
  325. return true;
  326. }
  327. /* give a chance to the GPU ... */
  328. return false;
  329. }
  330. /**
  331. * radeon_ring_backup - Back up the content of a ring
  332. *
  333. * @rdev: radeon_device pointer
  334. * @ring: the ring we want to back up
  335. *
  336. * Saves all unprocessed commits from a ring, returns the number of dwords saved.
  337. */
  338. unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring,
  339. uint32_t **data)
  340. {
  341. unsigned size, ptr, i;
  342. /* just in case lock the ring */
  343. mutex_lock(&rdev->ring_lock);
  344. *data = NULL;
  345. if (ring->ring_obj == NULL) {
  346. mutex_unlock(&rdev->ring_lock);
  347. return 0;
  348. }
  349. /* it doesn't make sense to save anything if all fences are signaled */
  350. if (!radeon_fence_count_emitted(rdev, ring->idx)) {
  351. mutex_unlock(&rdev->ring_lock);
  352. return 0;
  353. }
  354. /* calculate the number of dw on the ring */
  355. if (ring->rptr_save_reg)
  356. ptr = RREG32(ring->rptr_save_reg);
  357. else if (rdev->wb.enabled)
  358. ptr = le32_to_cpu(*ring->next_rptr_cpu_addr);
  359. else {
  360. /* no way to read back the next rptr */
  361. mutex_unlock(&rdev->ring_lock);
  362. return 0;
  363. }
  364. size = ring->wptr + (ring->ring_size / 4);
  365. size -= ptr;
  366. size &= ring->ptr_mask;
  367. if (size == 0) {
  368. mutex_unlock(&rdev->ring_lock);
  369. return 0;
  370. }
  371. /* and then save the content of the ring */
  372. *data = kmalloc(size * 4, GFP_KERNEL);
  373. for (i = 0; i < size; ++i) {
  374. (*data)[i] = ring->ring[ptr++];
  375. ptr &= ring->ptr_mask;
  376. }
  377. mutex_unlock(&rdev->ring_lock);
  378. return size;
  379. }
  380. /**
  381. * radeon_ring_restore - append saved commands to the ring again
  382. *
  383. * @rdev: radeon_device pointer
  384. * @ring: ring to append commands to
  385. * @size: number of dwords we want to write
  386. * @data: saved commands
  387. *
  388. * Allocates space on the ring and restore the previously saved commands.
  389. */
  390. int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
  391. unsigned size, uint32_t *data)
  392. {
  393. int i, r;
  394. if (!size || !data)
  395. return 0;
  396. /* restore the saved ring content */
  397. r = radeon_ring_lock(rdev, ring, size);
  398. if (r)
  399. return r;
  400. for (i = 0; i < size; ++i) {
  401. radeon_ring_write(ring, data[i]);
  402. }
  403. radeon_ring_unlock_commit(rdev, ring);
  404. kfree(data);
  405. return 0;
  406. }
  407. int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size,
  408. unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
  409. u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop)
  410. {
  411. int r;
  412. ring->ring_size = ring_size;
  413. ring->rptr_offs = rptr_offs;
  414. ring->rptr_reg = rptr_reg;
  415. ring->wptr_reg = wptr_reg;
  416. ring->ptr_reg_shift = ptr_reg_shift;
  417. ring->ptr_reg_mask = ptr_reg_mask;
  418. ring->nop = nop;
  419. /* Allocate ring buffer */
  420. if (ring->ring_obj == NULL) {
  421. r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true,
  422. RADEON_GEM_DOMAIN_GTT,
  423. NULL, &ring->ring_obj);
  424. if (r) {
  425. dev_err(rdev->dev, "(%d) ring create failed\n", r);
  426. return r;
  427. }
  428. r = radeon_bo_reserve(ring->ring_obj, false);
  429. if (unlikely(r != 0))
  430. return r;
  431. r = radeon_bo_pin(ring->ring_obj, RADEON_GEM_DOMAIN_GTT,
  432. &ring->gpu_addr);
  433. if (r) {
  434. radeon_bo_unreserve(ring->ring_obj);
  435. dev_err(rdev->dev, "(%d) ring pin failed\n", r);
  436. return r;
  437. }
  438. r = radeon_bo_kmap(ring->ring_obj,
  439. (void **)&ring->ring);
  440. radeon_bo_unreserve(ring->ring_obj);
  441. if (r) {
  442. dev_err(rdev->dev, "(%d) ring map failed\n", r);
  443. return r;
  444. }
  445. }
  446. ring->ptr_mask = (ring->ring_size / 4) - 1;
  447. ring->ring_free_dw = ring->ring_size / 4;
  448. if (rdev->wb.enabled) {
  449. u32 index = RADEON_WB_RING0_NEXT_RPTR + (ring->idx * 4);
  450. ring->next_rptr_gpu_addr = rdev->wb.gpu_addr + index;
  451. ring->next_rptr_cpu_addr = &rdev->wb.wb[index/4];
  452. }
  453. if (radeon_debugfs_ring_init(rdev, ring)) {
  454. DRM_ERROR("Failed to register debugfs file for rings !\n");
  455. }
  456. return 0;
  457. }
  458. void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *ring)
  459. {
  460. int r;
  461. struct radeon_bo *ring_obj;
  462. mutex_lock(&rdev->ring_lock);
  463. ring_obj = ring->ring_obj;
  464. ring->ready = false;
  465. ring->ring = NULL;
  466. ring->ring_obj = NULL;
  467. mutex_unlock(&rdev->ring_lock);
  468. if (ring_obj) {
  469. r = radeon_bo_reserve(ring_obj, false);
  470. if (likely(r == 0)) {
  471. radeon_bo_kunmap(ring_obj);
  472. radeon_bo_unpin(ring_obj);
  473. radeon_bo_unreserve(ring_obj);
  474. }
  475. radeon_bo_unref(&ring_obj);
  476. }
  477. }
  478. /*
  479. * Debugfs info
  480. */
  481. #if defined(CONFIG_DEBUG_FS)
  482. static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
  483. {
  484. struct drm_info_node *node = (struct drm_info_node *) m->private;
  485. struct drm_device *dev = node->minor->dev;
  486. struct radeon_device *rdev = dev->dev_private;
  487. int ridx = *(int*)node->info_ent->data;
  488. struct radeon_ring *ring = &rdev->ring[ridx];
  489. unsigned count, i, j;
  490. radeon_ring_free_size(rdev, ring);
  491. count = (ring->ring_size / 4) - ring->ring_free_dw;
  492. seq_printf(m, "wptr(0x%04x): 0x%08x\n", ring->wptr_reg, RREG32(ring->wptr_reg));
  493. seq_printf(m, "rptr(0x%04x): 0x%08x\n", ring->rptr_reg, RREG32(ring->rptr_reg));
  494. if (ring->rptr_save_reg) {
  495. seq_printf(m, "rptr next(0x%04x): 0x%08x\n", ring->rptr_save_reg,
  496. RREG32(ring->rptr_save_reg));
  497. }
  498. seq_printf(m, "driver's copy of the wptr: 0x%08x\n", ring->wptr);
  499. seq_printf(m, "driver's copy of the rptr: 0x%08x\n", ring->rptr);
  500. seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
  501. seq_printf(m, "%u dwords in ring\n", count);
  502. i = ring->rptr;
  503. for (j = 0; j <= count; j++) {
  504. seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
  505. i = (i + 1) & ring->ptr_mask;
  506. }
  507. return 0;
  508. }
  509. static int radeon_ring_type_gfx_index = RADEON_RING_TYPE_GFX_INDEX;
  510. static int cayman_ring_type_cp1_index = CAYMAN_RING_TYPE_CP1_INDEX;
  511. static int cayman_ring_type_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX;
  512. static struct drm_info_list radeon_debugfs_ring_info_list[] = {
  513. {"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_ring_type_gfx_index},
  514. {"radeon_ring_cp1", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp1_index},
  515. {"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp2_index},
  516. };
  517. static int radeon_debugfs_sa_info(struct seq_file *m, void *data)
  518. {
  519. struct drm_info_node *node = (struct drm_info_node *) m->private;
  520. struct drm_device *dev = node->minor->dev;
  521. struct radeon_device *rdev = dev->dev_private;
  522. radeon_sa_bo_dump_debug_info(&rdev->ring_tmp_bo, m);
  523. return 0;
  524. }
  525. static struct drm_info_list radeon_debugfs_sa_list[] = {
  526. {"radeon_sa_info", &radeon_debugfs_sa_info, 0, NULL},
  527. };
  528. #endif
  529. int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring)
  530. {
  531. #if defined(CONFIG_DEBUG_FS)
  532. unsigned i;
  533. for (i = 0; i < ARRAY_SIZE(radeon_debugfs_ring_info_list); ++i) {
  534. struct drm_info_list *info = &radeon_debugfs_ring_info_list[i];
  535. int ridx = *(int*)radeon_debugfs_ring_info_list[i].data;
  536. unsigned r;
  537. if (&rdev->ring[ridx] != ring)
  538. continue;
  539. r = radeon_debugfs_add_files(rdev, info, 1);
  540. if (r)
  541. return r;
  542. }
  543. #endif
  544. return 0;
  545. }
  546. int radeon_debugfs_sa_init(struct radeon_device *rdev)
  547. {
  548. #if defined(CONFIG_DEBUG_FS)
  549. return radeon_debugfs_add_files(rdev, radeon_debugfs_sa_list, 1);
  550. #else
  551. return 0;
  552. #endif
  553. }