i915_gem_debug.c 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248
  1. /*
  2. * Copyright © 2008 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Keith Packard <keithp@keithp.com>
  25. *
  26. */
  27. #include "drmP.h"
  28. #include "drm.h"
  29. #include "i915_drm.h"
  30. #include "i915_drv.h"
  31. #if WATCH_LISTS
  32. int
  33. i915_verify_lists(struct drm_device *dev)
  34. {
  35. static int warned;
  36. drm_i915_private_t *dev_priv = dev->dev_private;
  37. struct drm_i915_gem_object *obj;
  38. int err = 0;
  39. if (warned)
  40. return 0;
  41. list_for_each_entry(obj, &dev_priv->render_ring.active_list, list) {
  42. if (obj->base.dev != dev ||
  43. !atomic_read(&obj->base.refcount.refcount)) {
  44. DRM_ERROR("freed render active %p\n", obj);
  45. err++;
  46. break;
  47. } else if (!obj->active ||
  48. (obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0) {
  49. DRM_ERROR("invalid render active %p (a %d r %x)\n",
  50. obj,
  51. obj->active,
  52. obj->base.read_domains);
  53. err++;
  54. } else if (obj->base.write_domain && list_empty(&obj->gpu_write_list)) {
  55. DRM_ERROR("invalid render active %p (w %x, gwl %d)\n",
  56. obj,
  57. obj->base.write_domain,
  58. !list_empty(&obj->gpu_write_list));
  59. err++;
  60. }
  61. }
  62. list_for_each_entry(obj, &dev_priv->mm.flushing_list, list) {
  63. if (obj->base.dev != dev ||
  64. !atomic_read(&obj->base.refcount.refcount)) {
  65. DRM_ERROR("freed flushing %p\n", obj);
  66. err++;
  67. break;
  68. } else if (!obj->active ||
  69. (obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0 ||
  70. list_empty(&obj->gpu_write_list)){
  71. DRM_ERROR("invalid flushing %p (a %d w %x gwl %d)\n",
  72. obj,
  73. obj->active,
  74. obj->base.write_domain,
  75. !list_empty(&obj->gpu_write_list));
  76. err++;
  77. }
  78. }
  79. list_for_each_entry(obj, &dev_priv->mm.gpu_write_list, gpu_write_list) {
  80. if (obj->base.dev != dev ||
  81. !atomic_read(&obj->base.refcount.refcount)) {
  82. DRM_ERROR("freed gpu write %p\n", obj);
  83. err++;
  84. break;
  85. } else if (!obj->active ||
  86. (obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0) {
  87. DRM_ERROR("invalid gpu write %p (a %d w %x)\n",
  88. obj,
  89. obj->active,
  90. obj->base.write_domain);
  91. err++;
  92. }
  93. }
  94. list_for_each_entry(obj, &dev_priv->mm.inactive_list, list) {
  95. if (obj->base.dev != dev ||
  96. !atomic_read(&obj->base.refcount.refcount)) {
  97. DRM_ERROR("freed inactive %p\n", obj);
  98. err++;
  99. break;
  100. } else if (obj->pin_count || obj->active ||
  101. (obj->base.write_domain & I915_GEM_GPU_DOMAINS)) {
  102. DRM_ERROR("invalid inactive %p (p %d a %d w %x)\n",
  103. obj,
  104. obj->pin_count, obj->active,
  105. obj->base.write_domain);
  106. err++;
  107. }
  108. }
  109. list_for_each_entry(obj, &dev_priv->mm.pinned_list, list) {
  110. if (obj->base.dev != dev ||
  111. !atomic_read(&obj->base.refcount.refcount)) {
  112. DRM_ERROR("freed pinned %p\n", obj);
  113. err++;
  114. break;
  115. } else if (!obj->pin_count || obj->active ||
  116. (obj->base.write_domain & I915_GEM_GPU_DOMAINS)) {
  117. DRM_ERROR("invalid pinned %p (p %d a %d w %x)\n",
  118. obj,
  119. obj->pin_count, obj->active,
  120. obj->base.write_domain);
  121. err++;
  122. }
  123. }
  124. return warned = err;
  125. }
  126. #endif /* WATCH_INACTIVE */
  127. #if WATCH_EXEC | WATCH_PWRITE
  128. static void
  129. i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end,
  130. uint32_t bias, uint32_t mark)
  131. {
  132. uint32_t *mem = kmap_atomic(page, KM_USER0);
  133. int i;
  134. for (i = start; i < end; i += 4)
  135. DRM_INFO("%08x: %08x%s\n",
  136. (int) (bias + i), mem[i / 4],
  137. (bias + i == mark) ? " ********" : "");
  138. kunmap_atomic(mem, KM_USER0);
  139. /* give syslog time to catch up */
  140. msleep(1);
  141. }
  142. void
  143. i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
  144. const char *where, uint32_t mark)
  145. {
  146. int page;
  147. DRM_INFO("%s: object at offset %08x\n", where, obj->gtt_offset);
  148. for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) {
  149. int page_len, chunk, chunk_len;
  150. page_len = len - page * PAGE_SIZE;
  151. if (page_len > PAGE_SIZE)
  152. page_len = PAGE_SIZE;
  153. for (chunk = 0; chunk < page_len; chunk += 128) {
  154. chunk_len = page_len - chunk;
  155. if (chunk_len > 128)
  156. chunk_len = 128;
  157. i915_gem_dump_page(obj->pages[page],
  158. chunk, chunk + chunk_len,
  159. obj->gtt_offset +
  160. page * PAGE_SIZE,
  161. mark);
  162. }
  163. }
  164. }
  165. #endif
  166. #if WATCH_COHERENCY
  167. void
  168. i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle)
  169. {
  170. struct drm_device *dev = obj->base.dev;
  171. int page;
  172. uint32_t *gtt_mapping;
  173. uint32_t *backing_map = NULL;
  174. int bad_count = 0;
  175. DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %zdkb):\n",
  176. __func__, obj, obj->gtt_offset, handle,
  177. obj->size / 1024);
  178. gtt_mapping = ioremap(dev->agp->base + obj->gtt_offset, obj->base.size);
  179. if (gtt_mapping == NULL) {
  180. DRM_ERROR("failed to map GTT space\n");
  181. return;
  182. }
  183. for (page = 0; page < obj->size / PAGE_SIZE; page++) {
  184. int i;
  185. backing_map = kmap_atomic(obj->pages[page], KM_USER0);
  186. if (backing_map == NULL) {
  187. DRM_ERROR("failed to map backing page\n");
  188. goto out;
  189. }
  190. for (i = 0; i < PAGE_SIZE / 4; i++) {
  191. uint32_t cpuval = backing_map[i];
  192. uint32_t gttval = readl(gtt_mapping +
  193. page * 1024 + i);
  194. if (cpuval != gttval) {
  195. DRM_INFO("incoherent CPU vs GPU at 0x%08x: "
  196. "0x%08x vs 0x%08x\n",
  197. (int)(obj->gtt_offset +
  198. page * PAGE_SIZE + i * 4),
  199. cpuval, gttval);
  200. if (bad_count++ >= 8) {
  201. DRM_INFO("...\n");
  202. goto out;
  203. }
  204. }
  205. }
  206. kunmap_atomic(backing_map, KM_USER0);
  207. backing_map = NULL;
  208. }
  209. out:
  210. if (backing_map != NULL)
  211. kunmap_atomic(backing_map, KM_USER0);
  212. iounmap(gtt_mapping);
  213. /* give syslog time to catch up */
  214. msleep(1);
  215. /* Directly flush the object, since we just loaded values with the CPU
  216. * from the backing pages and we don't want to disturb the cache
  217. * management that we're trying to observe.
  218. */
  219. i915_gem_clflush_object(obj);
  220. }
  221. #endif