i915_gem_debug.c 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187
  1. /*
  2. * Copyright © 2008 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Keith Packard <keithp@keithp.com>
  25. *
  26. */
  27. #include <drm/drmP.h>
  28. #include <drm/i915_drm.h>
  29. #include "i915_drv.h"
  30. #if WATCH_LISTS
  31. int
  32. i915_verify_lists(struct drm_device *dev)
  33. {
  34. static int warned;
  35. drm_i915_private_t *dev_priv = dev->dev_private;
  36. struct drm_i915_gem_object *obj;
  37. int err = 0;
  38. if (warned)
  39. return 0;
  40. list_for_each_entry(obj, &dev_priv->render_ring.active_list, list) {
  41. if (obj->base.dev != dev ||
  42. !atomic_read(&obj->base.refcount.refcount)) {
  43. DRM_ERROR("freed render active %p\n", obj);
  44. err++;
  45. break;
  46. } else if (!obj->active ||
  47. (obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0) {
  48. DRM_ERROR("invalid render active %p (a %d r %x)\n",
  49. obj,
  50. obj->active,
  51. obj->base.read_domains);
  52. err++;
  53. } else if (obj->base.write_domain && list_empty(&obj->gpu_write_list)) {
  54. DRM_ERROR("invalid render active %p (w %x, gwl %d)\n",
  55. obj,
  56. obj->base.write_domain,
  57. !list_empty(&obj->gpu_write_list));
  58. err++;
  59. }
  60. }
  61. list_for_each_entry(obj, &dev_priv->mm.flushing_list, list) {
  62. if (obj->base.dev != dev ||
  63. !atomic_read(&obj->base.refcount.refcount)) {
  64. DRM_ERROR("freed flushing %p\n", obj);
  65. err++;
  66. break;
  67. } else if (!obj->active ||
  68. (obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0 ||
  69. list_empty(&obj->gpu_write_list)) {
  70. DRM_ERROR("invalid flushing %p (a %d w %x gwl %d)\n",
  71. obj,
  72. obj->active,
  73. obj->base.write_domain,
  74. !list_empty(&obj->gpu_write_list));
  75. err++;
  76. }
  77. }
  78. list_for_each_entry(obj, &dev_priv->mm.gpu_write_list, gpu_write_list) {
  79. if (obj->base.dev != dev ||
  80. !atomic_read(&obj->base.refcount.refcount)) {
  81. DRM_ERROR("freed gpu write %p\n", obj);
  82. err++;
  83. break;
  84. } else if (!obj->active ||
  85. (obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0) {
  86. DRM_ERROR("invalid gpu write %p (a %d w %x)\n",
  87. obj,
  88. obj->active,
  89. obj->base.write_domain);
  90. err++;
  91. }
  92. }
  93. list_for_each_entry(obj, &dev_priv->mm.inactive_list, list) {
  94. if (obj->base.dev != dev ||
  95. !atomic_read(&obj->base.refcount.refcount)) {
  96. DRM_ERROR("freed inactive %p\n", obj);
  97. err++;
  98. break;
  99. } else if (obj->pin_count || obj->active ||
  100. (obj->base.write_domain & I915_GEM_GPU_DOMAINS)) {
  101. DRM_ERROR("invalid inactive %p (p %d a %d w %x)\n",
  102. obj,
  103. obj->pin_count, obj->active,
  104. obj->base.write_domain);
  105. err++;
  106. }
  107. }
  108. return warned = err;
  109. }
  110. #endif /* WATCH_INACTIVE */
  111. #if WATCH_COHERENCY
  112. void
  113. i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle)
  114. {
  115. struct drm_device *dev = obj->base.dev;
  116. int page;
  117. uint32_t *gtt_mapping;
  118. uint32_t *backing_map = NULL;
  119. int bad_count = 0;
  120. DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %zdkb):\n",
  121. __func__, obj, obj->gtt_offset, handle,
  122. obj->size / 1024);
  123. gtt_mapping = ioremap(dev_priv->mm.gtt_base_addr + obj->gtt_offset,
  124. obj->base.size);
  125. if (gtt_mapping == NULL) {
  126. DRM_ERROR("failed to map GTT space\n");
  127. return;
  128. }
  129. for (page = 0; page < obj->size / PAGE_SIZE; page++) {
  130. int i;
  131. backing_map = kmap_atomic(obj->pages[page]);
  132. if (backing_map == NULL) {
  133. DRM_ERROR("failed to map backing page\n");
  134. goto out;
  135. }
  136. for (i = 0; i < PAGE_SIZE / 4; i++) {
  137. uint32_t cpuval = backing_map[i];
  138. uint32_t gttval = readl(gtt_mapping +
  139. page * 1024 + i);
  140. if (cpuval != gttval) {
  141. DRM_INFO("incoherent CPU vs GPU at 0x%08x: "
  142. "0x%08x vs 0x%08x\n",
  143. (int)(obj->gtt_offset +
  144. page * PAGE_SIZE + i * 4),
  145. cpuval, gttval);
  146. if (bad_count++ >= 8) {
  147. DRM_INFO("...\n");
  148. goto out;
  149. }
  150. }
  151. }
  152. kunmap_atomic(backing_map);
  153. backing_map = NULL;
  154. }
  155. out:
  156. if (backing_map != NULL)
  157. kunmap_atomic(backing_map);
  158. iounmap(gtt_mapping);
  159. /* give syslog time to catch up */
  160. msleep(1);
  161. /* Directly flush the object, since we just loaded values with the CPU
  162. * from the backing pages and we don't want to disturb the cache
  163. * management that we're trying to observe.
  164. */
  165. i915_gem_clflush_object(obj);
  166. }
  167. #endif