radeon_ring.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650
  1. /*
  2. * Copyright 2008 Advanced Micro Devices, Inc.
  3. * Copyright 2008 Red Hat Inc.
  4. * Copyright 2009 Jerome Glisse.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the "Software"),
  8. * to deal in the Software without restriction, including without limitation
  9. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10. * and/or sell copies of the Software, and to permit persons to whom the
  11. * Software is furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22. * OTHER DEALINGS IN THE SOFTWARE.
  23. *
  24. * Authors: Dave Airlie
  25. * Alex Deucher
  26. * Jerome Glisse
  27. */
  28. #include <linux/seq_file.h>
  29. #include <linux/slab.h>
  30. #include "drmP.h"
  31. #include "radeon_drm.h"
  32. #include "radeon_reg.h"
  33. #include "radeon.h"
  34. #include "atom.h"
  35. int radeon_debugfs_ib_init(struct radeon_device *rdev);
  36. int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring);
  37. u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
  38. {
  39. struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
  40. u32 pg_idx, pg_offset;
  41. u32 idx_value = 0;
  42. int new_page;
  43. pg_idx = (idx * 4) / PAGE_SIZE;
  44. pg_offset = (idx * 4) % PAGE_SIZE;
  45. if (ibc->kpage_idx[0] == pg_idx)
  46. return ibc->kpage[0][pg_offset/4];
  47. if (ibc->kpage_idx[1] == pg_idx)
  48. return ibc->kpage[1][pg_offset/4];
  49. new_page = radeon_cs_update_pages(p, pg_idx);
  50. if (new_page < 0) {
  51. p->parser_error = new_page;
  52. return 0;
  53. }
  54. idx_value = ibc->kpage[new_page][pg_offset/4];
  55. return idx_value;
  56. }
  57. void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
  58. {
  59. #if DRM_DEBUG_CODE
  60. if (ring->count_dw <= 0) {
  61. DRM_ERROR("radeon: writting more dword to ring than expected !\n");
  62. }
  63. #endif
  64. ring->ring[ring->wptr++] = v;
  65. ring->wptr &= ring->ptr_mask;
  66. ring->count_dw--;
  67. ring->ring_free_dw--;
  68. }
  69. /*
  70. * IB.
  71. */
  72. bool radeon_ib_try_free(struct radeon_device *rdev, struct radeon_ib *ib)
  73. {
  74. bool done = false;
  75. /* only free ib which have been emited */
  76. if (ib->fence && ib->fence->seq < RADEON_FENCE_NOTEMITED_SEQ) {
  77. if (radeon_fence_signaled(ib->fence)) {
  78. radeon_fence_unref(&ib->fence);
  79. radeon_sa_bo_free(rdev, &ib->sa_bo);
  80. done = true;
  81. }
  82. }
  83. return done;
  84. }
  85. int radeon_ib_get(struct radeon_device *rdev, int ring,
  86. struct radeon_ib **ib, unsigned size)
  87. {
  88. struct radeon_fence *fence;
  89. unsigned cretry = 0;
  90. int r = 0, i, idx;
  91. *ib = NULL;
  92. /* align size on 256 bytes */
  93. size = ALIGN(size, 256);
  94. r = radeon_fence_create(rdev, &fence, ring);
  95. if (r) {
  96. dev_err(rdev->dev, "failed to create fence for new IB\n");
  97. return r;
  98. }
  99. radeon_mutex_lock(&rdev->ib_pool.mutex);
  100. idx = rdev->ib_pool.head_id;
  101. retry:
  102. if (cretry > 5) {
  103. dev_err(rdev->dev, "failed to get an ib after 5 retry\n");
  104. radeon_mutex_unlock(&rdev->ib_pool.mutex);
  105. radeon_fence_unref(&fence);
  106. return -ENOMEM;
  107. }
  108. cretry++;
  109. for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
  110. radeon_ib_try_free(rdev, &rdev->ib_pool.ibs[idx]);
  111. if (rdev->ib_pool.ibs[idx].fence == NULL) {
  112. r = radeon_sa_bo_new(rdev, &rdev->ib_pool.sa_manager,
  113. &rdev->ib_pool.ibs[idx].sa_bo,
  114. size, 256);
  115. if (!r) {
  116. *ib = &rdev->ib_pool.ibs[idx];
  117. (*ib)->ptr = rdev->ib_pool.sa_manager.cpu_ptr;
  118. (*ib)->ptr += ((*ib)->sa_bo.offset >> 2);
  119. (*ib)->gpu_addr = rdev->ib_pool.sa_manager.gpu_addr;
  120. (*ib)->gpu_addr += (*ib)->sa_bo.offset;
  121. (*ib)->fence = fence;
  122. (*ib)->vm_id = 0;
  123. (*ib)->is_const_ib = false;
  124. /* ib are most likely to be allocated in a ring fashion
  125. * thus rdev->ib_pool.head_id should be the id of the
  126. * oldest ib
  127. */
  128. rdev->ib_pool.head_id = (1 + idx);
  129. rdev->ib_pool.head_id &= (RADEON_IB_POOL_SIZE - 1);
  130. radeon_mutex_unlock(&rdev->ib_pool.mutex);
  131. return 0;
  132. }
  133. }
  134. idx = (idx + 1) & (RADEON_IB_POOL_SIZE - 1);
  135. }
  136. /* this should be rare event, ie all ib scheduled none signaled yet.
  137. */
  138. for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
  139. struct radeon_fence *fence = rdev->ib_pool.ibs[idx].fence;
  140. if (fence && fence->seq < RADEON_FENCE_NOTEMITED_SEQ) {
  141. r = radeon_fence_wait(fence, false);
  142. if (!r) {
  143. goto retry;
  144. }
  145. /* an error happened */
  146. break;
  147. }
  148. idx = (idx + 1) & (RADEON_IB_POOL_SIZE - 1);
  149. }
  150. radeon_mutex_unlock(&rdev->ib_pool.mutex);
  151. radeon_fence_unref(&fence);
  152. return r;
  153. }
  154. void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
  155. {
  156. struct radeon_ib *tmp = *ib;
  157. *ib = NULL;
  158. if (tmp == NULL) {
  159. return;
  160. }
  161. radeon_mutex_lock(&rdev->ib_pool.mutex);
  162. if (tmp->fence && tmp->fence->seq == RADEON_FENCE_NOTEMITED_SEQ) {
  163. radeon_sa_bo_free(rdev, &tmp->sa_bo);
  164. radeon_fence_unref(&tmp->fence);
  165. }
  166. radeon_mutex_unlock(&rdev->ib_pool.mutex);
  167. }
  168. int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
  169. {
  170. struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
  171. int r = 0;
  172. if (!ib->length_dw || !ring->ready) {
  173. /* TODO: Nothings in the ib we should report. */
  174. DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx);
  175. return -EINVAL;
  176. }
  177. /* 64 dwords should be enough for fence too */
  178. r = radeon_ring_lock(rdev, ring, 64);
  179. if (r) {
  180. DRM_ERROR("radeon: scheduling IB failed (%d).\n", r);
  181. return r;
  182. }
  183. radeon_ring_ib_execute(rdev, ib->fence->ring, ib);
  184. radeon_fence_emit(rdev, ib->fence);
  185. radeon_ring_unlock_commit(rdev, ring);
  186. return 0;
  187. }
  188. int radeon_ib_pool_init(struct radeon_device *rdev)
  189. {
  190. struct radeon_sa_manager tmp;
  191. int i, r;
  192. r = radeon_sa_bo_manager_init(rdev, &tmp,
  193. RADEON_IB_POOL_SIZE*64*1024,
  194. RADEON_GEM_DOMAIN_GTT);
  195. if (r) {
  196. return r;
  197. }
  198. radeon_mutex_lock(&rdev->ib_pool.mutex);
  199. if (rdev->ib_pool.ready) {
  200. radeon_mutex_unlock(&rdev->ib_pool.mutex);
  201. radeon_sa_bo_manager_fini(rdev, &tmp);
  202. return 0;
  203. }
  204. rdev->ib_pool.sa_manager = tmp;
  205. INIT_LIST_HEAD(&rdev->ib_pool.sa_manager.sa_bo);
  206. for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
  207. rdev->ib_pool.ibs[i].fence = NULL;
  208. rdev->ib_pool.ibs[i].idx = i;
  209. rdev->ib_pool.ibs[i].length_dw = 0;
  210. INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].sa_bo.list);
  211. }
  212. rdev->ib_pool.head_id = 0;
  213. rdev->ib_pool.ready = true;
  214. DRM_INFO("radeon: ib pool ready.\n");
  215. if (radeon_debugfs_ib_init(rdev)) {
  216. DRM_ERROR("Failed to register debugfs file for IB !\n");
  217. }
  218. radeon_mutex_unlock(&rdev->ib_pool.mutex);
  219. return 0;
  220. }
  221. void radeon_ib_pool_fini(struct radeon_device *rdev)
  222. {
  223. unsigned i;
  224. radeon_mutex_lock(&rdev->ib_pool.mutex);
  225. if (rdev->ib_pool.ready) {
  226. for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
  227. radeon_sa_bo_free(rdev, &rdev->ib_pool.ibs[i].sa_bo);
  228. radeon_fence_unref(&rdev->ib_pool.ibs[i].fence);
  229. }
  230. radeon_sa_bo_manager_fini(rdev, &rdev->ib_pool.sa_manager);
  231. rdev->ib_pool.ready = false;
  232. }
  233. radeon_mutex_unlock(&rdev->ib_pool.mutex);
  234. }
  235. int radeon_ib_pool_start(struct radeon_device *rdev)
  236. {
  237. return radeon_sa_bo_manager_start(rdev, &rdev->ib_pool.sa_manager);
  238. }
  239. int radeon_ib_pool_suspend(struct radeon_device *rdev)
  240. {
  241. return radeon_sa_bo_manager_suspend(rdev, &rdev->ib_pool.sa_manager);
  242. }
  243. int radeon_ib_ring_tests(struct radeon_device *rdev)
  244. {
  245. unsigned i;
  246. int r;
  247. for (i = 0; i < RADEON_NUM_RINGS; ++i) {
  248. struct radeon_ring *ring = &rdev->ring[i];
  249. if (!ring->ready)
  250. continue;
  251. r = radeon_ib_test(rdev, i, ring);
  252. if (r) {
  253. ring->ready = false;
  254. if (i == RADEON_RING_TYPE_GFX_INDEX) {
  255. /* oh, oh, that's really bad */
  256. DRM_ERROR("radeon: failed testing IB on GFX ring (%d).\n", r);
  257. rdev->accel_working = false;
  258. return r;
  259. } else {
  260. /* still not good, but we can live with it */
  261. DRM_ERROR("radeon: failed testing IB on ring %d (%d).\n", i, r);
  262. }
  263. }
  264. }
  265. return 0;
  266. }
  267. /*
  268. * Ring.
  269. */
  270. int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *ring)
  271. {
  272. /* r1xx-r5xx only has CP ring */
  273. if (rdev->family < CHIP_R600)
  274. return RADEON_RING_TYPE_GFX_INDEX;
  275. if (rdev->family >= CHIP_CAYMAN) {
  276. if (ring == &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX])
  277. return CAYMAN_RING_TYPE_CP1_INDEX;
  278. else if (ring == &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX])
  279. return CAYMAN_RING_TYPE_CP2_INDEX;
  280. }
  281. return RADEON_RING_TYPE_GFX_INDEX;
  282. }
  283. void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring)
  284. {
  285. u32 rptr;
  286. if (rdev->wb.enabled)
  287. rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
  288. else
  289. rptr = RREG32(ring->rptr_reg);
  290. ring->rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
  291. /* This works because ring_size is a power of 2 */
  292. ring->ring_free_dw = (ring->rptr + (ring->ring_size / 4));
  293. ring->ring_free_dw -= ring->wptr;
  294. ring->ring_free_dw &= ring->ptr_mask;
  295. if (!ring->ring_free_dw) {
  296. ring->ring_free_dw = ring->ring_size / 4;
  297. }
  298. }
  299. int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
  300. {
  301. int r;
  302. /* Align requested size with padding so unlock_commit can
  303. * pad safely */
  304. ndw = (ndw + ring->align_mask) & ~ring->align_mask;
  305. while (ndw > (ring->ring_free_dw - 1)) {
  306. radeon_ring_free_size(rdev, ring);
  307. if (ndw < ring->ring_free_dw) {
  308. break;
  309. }
  310. mutex_unlock(&rdev->ring_lock);
  311. r = radeon_fence_wait_next(rdev, radeon_ring_index(rdev, ring));
  312. mutex_lock(&rdev->ring_lock);
  313. if (r)
  314. return r;
  315. }
  316. ring->count_dw = ndw;
  317. ring->wptr_old = ring->wptr;
  318. return 0;
  319. }
  320. int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
  321. {
  322. int r;
  323. mutex_lock(&rdev->ring_lock);
  324. r = radeon_ring_alloc(rdev, ring, ndw);
  325. if (r) {
  326. mutex_unlock(&rdev->ring_lock);
  327. return r;
  328. }
  329. return 0;
  330. }
  331. void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
  332. {
  333. unsigned count_dw_pad;
  334. unsigned i;
  335. /* We pad to match fetch size */
  336. count_dw_pad = (ring->align_mask + 1) -
  337. (ring->wptr & ring->align_mask);
  338. for (i = 0; i < count_dw_pad; i++) {
  339. radeon_ring_write(ring, ring->nop);
  340. }
  341. DRM_MEMORYBARRIER();
  342. WREG32(ring->wptr_reg, (ring->wptr << ring->ptr_reg_shift) & ring->ptr_reg_mask);
  343. (void)RREG32(ring->wptr_reg);
  344. }
  345. void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring)
  346. {
  347. radeon_ring_commit(rdev, ring);
  348. mutex_unlock(&rdev->ring_lock);
  349. }
  350. void radeon_ring_undo(struct radeon_ring *ring)
  351. {
  352. ring->wptr = ring->wptr_old;
  353. }
  354. void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *ring)
  355. {
  356. radeon_ring_undo(ring);
  357. mutex_unlock(&rdev->ring_lock);
  358. }
  359. void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring)
  360. {
  361. int r;
  362. mutex_lock(&rdev->ring_lock);
  363. radeon_ring_free_size(rdev, ring);
  364. if (ring->rptr == ring->wptr) {
  365. r = radeon_ring_alloc(rdev, ring, 1);
  366. if (!r) {
  367. radeon_ring_write(ring, ring->nop);
  368. radeon_ring_commit(rdev, ring);
  369. }
  370. }
  371. mutex_unlock(&rdev->ring_lock);
  372. }
  373. void radeon_ring_lockup_update(struct radeon_ring *ring)
  374. {
  375. ring->last_rptr = ring->rptr;
  376. ring->last_activity = jiffies;
  377. }
  378. /**
  379. * radeon_ring_test_lockup() - check if ring is lockedup by recording information
  380. * @rdev: radeon device structure
  381. * @ring: radeon_ring structure holding ring information
  382. *
  383. * We don't need to initialize the lockup tracking information as we will either
  384. * have CP rptr to a different value of jiffies wrap around which will force
  385. * initialization of the lockup tracking informations.
  386. *
  387. * A possible false positivie is if we get call after while and last_cp_rptr ==
  388. * the current CP rptr, even if it's unlikely it might happen. To avoid this
  389. * if the elapsed time since last call is bigger than 2 second than we return
  390. * false and update the tracking information. Due to this the caller must call
  391. * radeon_ring_test_lockup several time in less than 2sec for lockup to be reported
  392. * the fencing code should be cautious about that.
  393. *
  394. * Caller should write to the ring to force CP to do something so we don't get
  395. * false positive when CP is just gived nothing to do.
  396. *
  397. **/
  398. bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
  399. {
  400. unsigned long cjiffies, elapsed;
  401. uint32_t rptr;
  402. cjiffies = jiffies;
  403. if (!time_after(cjiffies, ring->last_activity)) {
  404. /* likely a wrap around */
  405. radeon_ring_lockup_update(ring);
  406. return false;
  407. }
  408. rptr = RREG32(ring->rptr_reg);
  409. ring->rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
  410. if (ring->rptr != ring->last_rptr) {
  411. /* CP is still working no lockup */
  412. radeon_ring_lockup_update(ring);
  413. return false;
  414. }
  415. elapsed = jiffies_to_msecs(cjiffies - ring->last_activity);
  416. if (radeon_lockup_timeout && elapsed >= radeon_lockup_timeout) {
  417. dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed);
  418. return true;
  419. }
  420. /* give a chance to the GPU ... */
  421. return false;
  422. }
  423. int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size,
  424. unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
  425. u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop)
  426. {
  427. int r;
  428. ring->ring_size = ring_size;
  429. ring->rptr_offs = rptr_offs;
  430. ring->rptr_reg = rptr_reg;
  431. ring->wptr_reg = wptr_reg;
  432. ring->ptr_reg_shift = ptr_reg_shift;
  433. ring->ptr_reg_mask = ptr_reg_mask;
  434. ring->nop = nop;
  435. /* Allocate ring buffer */
  436. if (ring->ring_obj == NULL) {
  437. r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true,
  438. RADEON_GEM_DOMAIN_GTT,
  439. &ring->ring_obj);
  440. if (r) {
  441. dev_err(rdev->dev, "(%d) ring create failed\n", r);
  442. return r;
  443. }
  444. r = radeon_bo_reserve(ring->ring_obj, false);
  445. if (unlikely(r != 0))
  446. return r;
  447. r = radeon_bo_pin(ring->ring_obj, RADEON_GEM_DOMAIN_GTT,
  448. &ring->gpu_addr);
  449. if (r) {
  450. radeon_bo_unreserve(ring->ring_obj);
  451. dev_err(rdev->dev, "(%d) ring pin failed\n", r);
  452. return r;
  453. }
  454. r = radeon_bo_kmap(ring->ring_obj,
  455. (void **)&ring->ring);
  456. radeon_bo_unreserve(ring->ring_obj);
  457. if (r) {
  458. dev_err(rdev->dev, "(%d) ring map failed\n", r);
  459. return r;
  460. }
  461. }
  462. ring->ptr_mask = (ring->ring_size / 4) - 1;
  463. ring->ring_free_dw = ring->ring_size / 4;
  464. if (radeon_debugfs_ring_init(rdev, ring)) {
  465. DRM_ERROR("Failed to register debugfs file for rings !\n");
  466. }
  467. return 0;
  468. }
  469. void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *ring)
  470. {
  471. int r;
  472. struct radeon_bo *ring_obj;
  473. mutex_lock(&rdev->ring_lock);
  474. ring_obj = ring->ring_obj;
  475. ring->ready = false;
  476. ring->ring = NULL;
  477. ring->ring_obj = NULL;
  478. mutex_unlock(&rdev->ring_lock);
  479. if (ring_obj) {
  480. r = radeon_bo_reserve(ring_obj, false);
  481. if (likely(r == 0)) {
  482. radeon_bo_kunmap(ring_obj);
  483. radeon_bo_unpin(ring_obj);
  484. radeon_bo_unreserve(ring_obj);
  485. }
  486. radeon_bo_unref(&ring_obj);
  487. }
  488. }
  489. /*
  490. * Debugfs info
  491. */
  492. #if defined(CONFIG_DEBUG_FS)
  493. static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
  494. {
  495. struct drm_info_node *node = (struct drm_info_node *) m->private;
  496. struct drm_device *dev = node->minor->dev;
  497. struct radeon_device *rdev = dev->dev_private;
  498. int ridx = *(int*)node->info_ent->data;
  499. struct radeon_ring *ring = &rdev->ring[ridx];
  500. unsigned count, i, j;
  501. radeon_ring_free_size(rdev, ring);
  502. count = (ring->ring_size / 4) - ring->ring_free_dw;
  503. seq_printf(m, "wptr(0x%04x): 0x%08x\n", ring->wptr_reg, RREG32(ring->wptr_reg));
  504. seq_printf(m, "rptr(0x%04x): 0x%08x\n", ring->rptr_reg, RREG32(ring->rptr_reg));
  505. seq_printf(m, "driver's copy of the wptr: 0x%08x\n", ring->wptr);
  506. seq_printf(m, "driver's copy of the rptr: 0x%08x\n", ring->rptr);
  507. seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
  508. seq_printf(m, "%u dwords in ring\n", count);
  509. i = ring->rptr;
  510. for (j = 0; j <= count; j++) {
  511. seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
  512. i = (i + 1) & ring->ptr_mask;
  513. }
  514. return 0;
  515. }
  516. static int radeon_ring_type_gfx_index = RADEON_RING_TYPE_GFX_INDEX;
  517. static int cayman_ring_type_cp1_index = CAYMAN_RING_TYPE_CP1_INDEX;
  518. static int cayman_ring_type_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX;
  519. static struct drm_info_list radeon_debugfs_ring_info_list[] = {
  520. {"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_ring_type_gfx_index},
  521. {"radeon_ring_cp1", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp1_index},
  522. {"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp2_index},
  523. };
  524. static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
  525. {
  526. struct drm_info_node *node = (struct drm_info_node *) m->private;
  527. struct drm_device *dev = node->minor->dev;
  528. struct radeon_device *rdev = dev->dev_private;
  529. struct radeon_ib *ib = &rdev->ib_pool.ibs[*((unsigned*)node->info_ent->data)];
  530. unsigned i;
  531. if (ib == NULL) {
  532. return 0;
  533. }
  534. seq_printf(m, "IB %04u\n", ib->idx);
  535. seq_printf(m, "IB fence %p\n", ib->fence);
  536. seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
  537. for (i = 0; i < ib->length_dw; i++) {
  538. seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
  539. }
  540. return 0;
  541. }
  542. static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE];
  543. static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32];
  544. static unsigned radeon_debugfs_ib_idx[RADEON_IB_POOL_SIZE];
  545. #endif
  546. int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring)
  547. {
  548. #if defined(CONFIG_DEBUG_FS)
  549. unsigned i;
  550. for (i = 0; i < ARRAY_SIZE(radeon_debugfs_ring_info_list); ++i) {
  551. struct drm_info_list *info = &radeon_debugfs_ring_info_list[i];
  552. int ridx = *(int*)radeon_debugfs_ring_info_list[i].data;
  553. unsigned r;
  554. if (&rdev->ring[ridx] != ring)
  555. continue;
  556. r = radeon_debugfs_add_files(rdev, info, 1);
  557. if (r)
  558. return r;
  559. }
  560. #endif
  561. return 0;
  562. }
  563. int radeon_debugfs_ib_init(struct radeon_device *rdev)
  564. {
  565. #if defined(CONFIG_DEBUG_FS)
  566. unsigned i;
  567. for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
  568. sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i);
  569. radeon_debugfs_ib_idx[i] = i;
  570. radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i];
  571. radeon_debugfs_ib_list[i].show = &radeon_debugfs_ib_info;
  572. radeon_debugfs_ib_list[i].driver_features = 0;
  573. radeon_debugfs_ib_list[i].data = &radeon_debugfs_ib_idx[i];
  574. }
  575. return radeon_debugfs_add_files(rdev, radeon_debugfs_ib_list,
  576. RADEON_IB_POOL_SIZE);
  577. #else
  578. return 0;
  579. #endif
  580. }