adreno_gpu.c 9.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378
  1. /*
  2. * Copyright (C) 2013 Red Hat
  3. * Author: Rob Clark <robdclark@gmail.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include "adreno_gpu.h"
  18. #include "msm_gem.h"
  19. struct adreno_info {
  20. struct adreno_rev rev;
  21. uint32_t revn;
  22. const char *name;
  23. const char *pm4fw, *pfpfw;
  24. uint32_t gmem;
  25. };
  26. #define ANY_ID 0xff
  27. static const struct adreno_info gpulist[] = {
  28. {
  29. .rev = ADRENO_REV(3, 0, 5, ANY_ID),
  30. .revn = 305,
  31. .name = "A305",
  32. .pm4fw = "a300_pm4.fw",
  33. .pfpfw = "a300_pfp.fw",
  34. .gmem = SZ_256K,
  35. }, {
  36. .rev = ADRENO_REV(3, 2, ANY_ID, ANY_ID),
  37. .revn = 320,
  38. .name = "A320",
  39. .pm4fw = "a300_pm4.fw",
  40. .pfpfw = "a300_pfp.fw",
  41. .gmem = SZ_512K,
  42. }, {
  43. .rev = ADRENO_REV(3, 3, 0, 0),
  44. .revn = 330,
  45. .name = "A330",
  46. .pm4fw = "a330_pm4.fw",
  47. .pfpfw = "a330_pfp.fw",
  48. .gmem = SZ_1M,
  49. },
  50. };
  51. #define RB_SIZE SZ_32K
  52. #define RB_BLKSIZE 16
  53. int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
  54. {
  55. struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
  56. switch (param) {
  57. case MSM_PARAM_GPU_ID:
  58. *value = adreno_gpu->info->revn;
  59. return 0;
  60. case MSM_PARAM_GMEM_SIZE:
  61. *value = adreno_gpu->info->gmem;
  62. return 0;
  63. default:
  64. DBG("%s: invalid param: %u", gpu->name, param);
  65. return -EINVAL;
  66. }
  67. }
  68. #define rbmemptr(adreno_gpu, member) \
  69. ((adreno_gpu)->memptrs_iova + offsetof(struct adreno_rbmemptrs, member))
  70. int adreno_hw_init(struct msm_gpu *gpu)
  71. {
  72. struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
  73. DBG("%s", gpu->name);
  74. /* Setup REG_CP_RB_CNTL: */
  75. gpu_write(gpu, REG_AXXX_CP_RB_CNTL,
  76. /* size is log2(quad-words): */
  77. AXXX_CP_RB_CNTL_BUFSZ(ilog2(gpu->rb->size / 8)) |
  78. AXXX_CP_RB_CNTL_BLKSZ(RB_BLKSIZE));
  79. /* Setup ringbuffer address: */
  80. gpu_write(gpu, REG_AXXX_CP_RB_BASE, gpu->rb_iova);
  81. gpu_write(gpu, REG_AXXX_CP_RB_RPTR_ADDR, rbmemptr(adreno_gpu, rptr));
  82. /* Setup scratch/timestamp: */
  83. gpu_write(gpu, REG_AXXX_SCRATCH_ADDR, rbmemptr(adreno_gpu, fence));
  84. gpu_write(gpu, REG_AXXX_SCRATCH_UMSK, 0x1);
  85. return 0;
  86. }
  87. static uint32_t get_wptr(struct msm_ringbuffer *ring)
  88. {
  89. return ring->cur - ring->start;
  90. }
  91. uint32_t adreno_last_fence(struct msm_gpu *gpu)
  92. {
  93. struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
  94. return adreno_gpu->memptrs->fence;
  95. }
  96. void adreno_recover(struct msm_gpu *gpu)
  97. {
  98. struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
  99. struct drm_device *dev = gpu->dev;
  100. int ret;
  101. gpu->funcs->pm_suspend(gpu);
  102. /* reset ringbuffer: */
  103. gpu->rb->cur = gpu->rb->start;
  104. /* reset completed fence seqno, just discard anything pending: */
  105. adreno_gpu->memptrs->fence = gpu->submitted_fence;
  106. adreno_gpu->memptrs->rptr = 0;
  107. adreno_gpu->memptrs->wptr = 0;
  108. gpu->funcs->pm_resume(gpu);
  109. ret = gpu->funcs->hw_init(gpu);
  110. if (ret) {
  111. dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
  112. /* hmm, oh well? */
  113. }
  114. }
  115. int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
  116. struct msm_file_private *ctx)
  117. {
  118. struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
  119. struct msm_drm_private *priv = gpu->dev->dev_private;
  120. struct msm_ringbuffer *ring = gpu->rb;
  121. unsigned i, ibs = 0;
  122. for (i = 0; i < submit->nr_cmds; i++) {
  123. switch (submit->cmd[i].type) {
  124. case MSM_SUBMIT_CMD_IB_TARGET_BUF:
  125. /* ignore IB-targets */
  126. break;
  127. case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
  128. /* ignore if there has not been a ctx switch: */
  129. if (priv->lastctx == ctx)
  130. break;
  131. case MSM_SUBMIT_CMD_BUF:
  132. OUT_PKT3(ring, CP_INDIRECT_BUFFER_PFD, 2);
  133. OUT_RING(ring, submit->cmd[i].iova);
  134. OUT_RING(ring, submit->cmd[i].size);
  135. ibs++;
  136. break;
  137. }
  138. }
  139. /* on a320, at least, we seem to need to pad things out to an
  140. * even number of qwords to avoid issue w/ CP hanging on wrap-
  141. * around:
  142. */
  143. if (ibs % 2)
  144. OUT_PKT2(ring);
  145. OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
  146. OUT_RING(ring, submit->fence);
  147. if (adreno_is_a3xx(adreno_gpu)) {
  148. /* Flush HLSQ lazy updates to make sure there is nothing
  149. * pending for indirect loads after the timestamp has
  150. * passed:
  151. */
  152. OUT_PKT3(ring, CP_EVENT_WRITE, 1);
  153. OUT_RING(ring, HLSQ_FLUSH);
  154. OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
  155. OUT_RING(ring, 0x00000000);
  156. }
  157. OUT_PKT3(ring, CP_EVENT_WRITE, 3);
  158. OUT_RING(ring, CACHE_FLUSH_TS);
  159. OUT_RING(ring, rbmemptr(adreno_gpu, fence));
  160. OUT_RING(ring, submit->fence);
  161. /* we could maybe be clever and only CP_COND_EXEC the interrupt: */
  162. OUT_PKT3(ring, CP_INTERRUPT, 1);
  163. OUT_RING(ring, 0x80000000);
  164. #if 0
  165. if (adreno_is_a3xx(adreno_gpu)) {
  166. /* Dummy set-constant to trigger context rollover */
  167. OUT_PKT3(ring, CP_SET_CONSTANT, 2);
  168. OUT_RING(ring, CP_REG(REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG));
  169. OUT_RING(ring, 0x00000000);
  170. }
  171. #endif
  172. gpu->funcs->flush(gpu);
  173. return 0;
  174. }
  175. void adreno_flush(struct msm_gpu *gpu)
  176. {
  177. uint32_t wptr = get_wptr(gpu->rb);
  178. /* ensure writes to ringbuffer have hit system memory: */
  179. mb();
  180. gpu_write(gpu, REG_AXXX_CP_RB_WPTR, wptr);
  181. }
  182. void adreno_idle(struct msm_gpu *gpu)
  183. {
  184. struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
  185. uint32_t rptr, wptr = get_wptr(gpu->rb);
  186. unsigned long t;
  187. t = jiffies + ADRENO_IDLE_TIMEOUT;
  188. /* then wait for CP to drain ringbuffer: */
  189. do {
  190. rptr = adreno_gpu->memptrs->rptr;
  191. if (rptr == wptr)
  192. return;
  193. } while(time_before(jiffies, t));
  194. DRM_ERROR("%s: timeout waiting to drain ringbuffer!\n", gpu->name);
  195. /* TODO maybe we need to reset GPU here to recover from hang? */
  196. }
  197. #ifdef CONFIG_DEBUG_FS
  198. void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
  199. {
  200. struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
  201. seq_printf(m, "revision: %d (%d.%d.%d.%d)\n",
  202. adreno_gpu->info->revn, adreno_gpu->rev.core,
  203. adreno_gpu->rev.major, adreno_gpu->rev.minor,
  204. adreno_gpu->rev.patchid);
  205. seq_printf(m, "fence: %d/%d\n", adreno_gpu->memptrs->fence,
  206. gpu->submitted_fence);
  207. seq_printf(m, "rptr: %d\n", adreno_gpu->memptrs->rptr);
  208. seq_printf(m, "wptr: %d\n", adreno_gpu->memptrs->wptr);
  209. seq_printf(m, "rb wptr: %d\n", get_wptr(gpu->rb));
  210. }
  211. #endif
  212. void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords)
  213. {
  214. struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
  215. uint32_t freedwords;
  216. unsigned long t = jiffies + ADRENO_IDLE_TIMEOUT;
  217. do {
  218. uint32_t size = gpu->rb->size / 4;
  219. uint32_t wptr = get_wptr(gpu->rb);
  220. uint32_t rptr = adreno_gpu->memptrs->rptr;
  221. freedwords = (rptr + (size - 1) - wptr) % size;
  222. if (time_after(jiffies, t)) {
  223. DRM_ERROR("%s: timeout waiting for ringbuffer space\n", gpu->name);
  224. break;
  225. }
  226. } while(freedwords < ndwords);
  227. }
  228. static const char *iommu_ports[] = {
  229. "gfx3d_user", "gfx3d_priv",
  230. "gfx3d1_user", "gfx3d1_priv",
  231. };
  232. static inline bool _rev_match(uint8_t entry, uint8_t id)
  233. {
  234. return (entry == ANY_ID) || (entry == id);
  235. }
  236. int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
  237. struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs,
  238. struct adreno_rev rev)
  239. {
  240. int i, ret;
  241. /* identify gpu: */
  242. for (i = 0; i < ARRAY_SIZE(gpulist); i++) {
  243. const struct adreno_info *info = &gpulist[i];
  244. if (_rev_match(info->rev.core, rev.core) &&
  245. _rev_match(info->rev.major, rev.major) &&
  246. _rev_match(info->rev.minor, rev.minor) &&
  247. _rev_match(info->rev.patchid, rev.patchid)) {
  248. gpu->info = info;
  249. gpu->revn = info->revn;
  250. break;
  251. }
  252. }
  253. if (i == ARRAY_SIZE(gpulist)) {
  254. dev_err(drm->dev, "Unknown GPU revision: %u.%u.%u.%u\n",
  255. rev.core, rev.major, rev.minor, rev.patchid);
  256. return -ENXIO;
  257. }
  258. DBG("Found GPU: %s (%u.%u.%u.%u)", gpu->info->name,
  259. rev.core, rev.major, rev.minor, rev.patchid);
  260. gpu->funcs = funcs;
  261. gpu->rev = rev;
  262. ret = request_firmware(&gpu->pm4, gpu->info->pm4fw, drm->dev);
  263. if (ret) {
  264. dev_err(drm->dev, "failed to load %s PM4 firmware: %d\n",
  265. gpu->info->pm4fw, ret);
  266. return ret;
  267. }
  268. ret = request_firmware(&gpu->pfp, gpu->info->pfpfw, drm->dev);
  269. if (ret) {
  270. dev_err(drm->dev, "failed to load %s PFP firmware: %d\n",
  271. gpu->info->pfpfw, ret);
  272. return ret;
  273. }
  274. ret = msm_gpu_init(drm, pdev, &gpu->base, &funcs->base,
  275. gpu->info->name, "kgsl_3d0_reg_memory", "kgsl_3d0_irq",
  276. RB_SIZE);
  277. if (ret)
  278. return ret;
  279. ret = msm_iommu_attach(drm, gpu->base.iommu,
  280. iommu_ports, ARRAY_SIZE(iommu_ports));
  281. if (ret)
  282. return ret;
  283. gpu->memptrs_bo = msm_gem_new(drm, sizeof(*gpu->memptrs),
  284. MSM_BO_UNCACHED);
  285. if (IS_ERR(gpu->memptrs_bo)) {
  286. ret = PTR_ERR(gpu->memptrs_bo);
  287. gpu->memptrs_bo = NULL;
  288. dev_err(drm->dev, "could not allocate memptrs: %d\n", ret);
  289. return ret;
  290. }
  291. gpu->memptrs = msm_gem_vaddr_locked(gpu->memptrs_bo);
  292. if (!gpu->memptrs) {
  293. dev_err(drm->dev, "could not vmap memptrs\n");
  294. return -ENOMEM;
  295. }
  296. ret = msm_gem_get_iova_locked(gpu->memptrs_bo, gpu->base.id,
  297. &gpu->memptrs_iova);
  298. if (ret) {
  299. dev_err(drm->dev, "could not map memptrs: %d\n", ret);
  300. return ret;
  301. }
  302. return 0;
  303. }
  304. void adreno_gpu_cleanup(struct adreno_gpu *gpu)
  305. {
  306. if (gpu->memptrs_bo) {
  307. if (gpu->memptrs_iova)
  308. msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id);
  309. drm_gem_object_unreference(gpu->memptrs_bo);
  310. }
  311. if (gpu->pm4)
  312. release_firmware(gpu->pm4);
  313. if (gpu->pfp)
  314. release_firmware(gpu->pfp);
  315. msm_gpu_cleanup(&gpu->base);
  316. }