i915_debugfs.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558
  1. /*
  2. * Copyright © 2008 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Eric Anholt <eric@anholt.net>
  25. * Keith Packard <keithp@keithp.com>
  26. *
  27. */
  28. #include <linux/seq_file.h>
  29. #include <linux/debugfs.h>
  30. #include "drmP.h"
  31. #include "drm.h"
  32. #include "i915_drm.h"
  33. #include "i915_drv.h"
  34. #define DRM_I915_RING_DEBUG 1
  35. #if defined(CONFIG_DEBUG_FS)
  36. #define ACTIVE_LIST 1
  37. #define FLUSHING_LIST 2
  38. #define INACTIVE_LIST 3
  39. static const char *get_pin_flag(struct drm_i915_gem_object *obj_priv)
  40. {
  41. if (obj_priv->user_pin_count > 0)
  42. return "P";
  43. else if (obj_priv->pin_count > 0)
  44. return "p";
  45. else
  46. return " ";
  47. }
  48. static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv)
  49. {
  50. switch (obj_priv->tiling_mode) {
  51. default:
  52. case I915_TILING_NONE: return " ";
  53. case I915_TILING_X: return "X";
  54. case I915_TILING_Y: return "Y";
  55. }
  56. }
  57. static int i915_gem_object_list_info(struct seq_file *m, void *data)
  58. {
  59. struct drm_info_node *node = (struct drm_info_node *) m->private;
  60. uintptr_t list = (uintptr_t) node->info_ent->data;
  61. struct list_head *head;
  62. struct drm_device *dev = node->minor->dev;
  63. drm_i915_private_t *dev_priv = dev->dev_private;
  64. struct drm_i915_gem_object *obj_priv;
  65. spinlock_t *lock = NULL;
  66. switch (list) {
  67. case ACTIVE_LIST:
  68. seq_printf(m, "Active:\n");
  69. lock = &dev_priv->mm.active_list_lock;
  70. head = &dev_priv->mm.active_list;
  71. break;
  72. case INACTIVE_LIST:
  73. seq_printf(m, "Inactive:\n");
  74. head = &dev_priv->mm.inactive_list;
  75. break;
  76. case FLUSHING_LIST:
  77. seq_printf(m, "Flushing:\n");
  78. head = &dev_priv->mm.flushing_list;
  79. break;
  80. default:
  81. DRM_INFO("Ooops, unexpected list\n");
  82. return 0;
  83. }
  84. if (lock)
  85. spin_lock(lock);
  86. list_for_each_entry(obj_priv, head, list)
  87. {
  88. struct drm_gem_object *obj = obj_priv->obj;
  89. seq_printf(m, " %p: %s %8zd %08x %08x %d %s",
  90. obj,
  91. get_pin_flag(obj_priv),
  92. obj->size,
  93. obj->read_domains, obj->write_domain,
  94. obj_priv->last_rendering_seqno,
  95. obj_priv->dirty ? "dirty" : "");
  96. if (obj->name)
  97. seq_printf(m, " (name: %d)", obj->name);
  98. if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
  99. seq_printf(m, " (fence: %d)", obj_priv->fence_reg);
  100. if (obj_priv->gtt_space != NULL)
  101. seq_printf(m, " (gtt_offset: %08x)", obj_priv->gtt_offset);
  102. seq_printf(m, "\n");
  103. }
  104. if (lock)
  105. spin_unlock(lock);
  106. return 0;
  107. }
  108. static int i915_gem_request_info(struct seq_file *m, void *data)
  109. {
  110. struct drm_info_node *node = (struct drm_info_node *) m->private;
  111. struct drm_device *dev = node->minor->dev;
  112. drm_i915_private_t *dev_priv = dev->dev_private;
  113. struct drm_i915_gem_request *gem_request;
  114. seq_printf(m, "Request:\n");
  115. list_for_each_entry(gem_request, &dev_priv->mm.request_list, list) {
  116. seq_printf(m, " %d @ %d\n",
  117. gem_request->seqno,
  118. (int) (jiffies - gem_request->emitted_jiffies));
  119. }
  120. return 0;
  121. }
  122. static int i915_gem_seqno_info(struct seq_file *m, void *data)
  123. {
  124. struct drm_info_node *node = (struct drm_info_node *) m->private;
  125. struct drm_device *dev = node->minor->dev;
  126. drm_i915_private_t *dev_priv = dev->dev_private;
  127. if (dev_priv->hw_status_page != NULL) {
  128. seq_printf(m, "Current sequence: %d\n",
  129. i915_get_gem_seqno(dev));
  130. } else {
  131. seq_printf(m, "Current sequence: hws uninitialized\n");
  132. }
  133. seq_printf(m, "Waiter sequence: %d\n",
  134. dev_priv->mm.waiting_gem_seqno);
  135. seq_printf(m, "IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno);
  136. return 0;
  137. }
  138. static int i915_interrupt_info(struct seq_file *m, void *data)
  139. {
  140. struct drm_info_node *node = (struct drm_info_node *) m->private;
  141. struct drm_device *dev = node->minor->dev;
  142. drm_i915_private_t *dev_priv = dev->dev_private;
  143. if (!IS_IRONLAKE(dev)) {
  144. seq_printf(m, "Interrupt enable: %08x\n",
  145. I915_READ(IER));
  146. seq_printf(m, "Interrupt identity: %08x\n",
  147. I915_READ(IIR));
  148. seq_printf(m, "Interrupt mask: %08x\n",
  149. I915_READ(IMR));
  150. seq_printf(m, "Pipe A stat: %08x\n",
  151. I915_READ(PIPEASTAT));
  152. seq_printf(m, "Pipe B stat: %08x\n",
  153. I915_READ(PIPEBSTAT));
  154. } else {
  155. seq_printf(m, "North Display Interrupt enable: %08x\n",
  156. I915_READ(DEIER));
  157. seq_printf(m, "North Display Interrupt identity: %08x\n",
  158. I915_READ(DEIIR));
  159. seq_printf(m, "North Display Interrupt mask: %08x\n",
  160. I915_READ(DEIMR));
  161. seq_printf(m, "South Display Interrupt enable: %08x\n",
  162. I915_READ(SDEIER));
  163. seq_printf(m, "South Display Interrupt identity: %08x\n",
  164. I915_READ(SDEIIR));
  165. seq_printf(m, "South Display Interrupt mask: %08x\n",
  166. I915_READ(SDEIMR));
  167. seq_printf(m, "Graphics Interrupt enable: %08x\n",
  168. I915_READ(GTIER));
  169. seq_printf(m, "Graphics Interrupt identity: %08x\n",
  170. I915_READ(GTIIR));
  171. seq_printf(m, "Graphics Interrupt mask: %08x\n",
  172. I915_READ(GTIMR));
  173. }
  174. seq_printf(m, "Interrupts received: %d\n",
  175. atomic_read(&dev_priv->irq_received));
  176. if (dev_priv->hw_status_page != NULL) {
  177. seq_printf(m, "Current sequence: %d\n",
  178. i915_get_gem_seqno(dev));
  179. } else {
  180. seq_printf(m, "Current sequence: hws uninitialized\n");
  181. }
  182. seq_printf(m, "Waiter sequence: %d\n",
  183. dev_priv->mm.waiting_gem_seqno);
  184. seq_printf(m, "IRQ sequence: %d\n",
  185. dev_priv->mm.irq_gem_seqno);
  186. return 0;
  187. }
  188. static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
  189. {
  190. struct drm_info_node *node = (struct drm_info_node *) m->private;
  191. struct drm_device *dev = node->minor->dev;
  192. drm_i915_private_t *dev_priv = dev->dev_private;
  193. int i;
  194. seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
  195. seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
  196. for (i = 0; i < dev_priv->num_fence_regs; i++) {
  197. struct drm_gem_object *obj = dev_priv->fence_regs[i].obj;
  198. if (obj == NULL) {
  199. seq_printf(m, "Fenced object[%2d] = unused\n", i);
  200. } else {
  201. struct drm_i915_gem_object *obj_priv;
  202. obj_priv = obj->driver_private;
  203. seq_printf(m, "Fenced object[%2d] = %p: %s "
  204. "%08x %08zx %08x %s %08x %08x %d",
  205. i, obj, get_pin_flag(obj_priv),
  206. obj_priv->gtt_offset,
  207. obj->size, obj_priv->stride,
  208. get_tiling_flag(obj_priv),
  209. obj->read_domains, obj->write_domain,
  210. obj_priv->last_rendering_seqno);
  211. if (obj->name)
  212. seq_printf(m, " (name: %d)", obj->name);
  213. seq_printf(m, "\n");
  214. }
  215. }
  216. return 0;
  217. }
  218. static int i915_hws_info(struct seq_file *m, void *data)
  219. {
  220. struct drm_info_node *node = (struct drm_info_node *) m->private;
  221. struct drm_device *dev = node->minor->dev;
  222. drm_i915_private_t *dev_priv = dev->dev_private;
  223. int i;
  224. volatile u32 *hws;
  225. hws = (volatile u32 *)dev_priv->hw_status_page;
  226. if (hws == NULL)
  227. return 0;
  228. for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
  229. seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
  230. i * 4,
  231. hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
  232. }
  233. return 0;
  234. }
  235. static void i915_dump_pages(struct seq_file *m, struct page **pages, int page_count)
  236. {
  237. int page, i;
  238. uint32_t *mem;
  239. for (page = 0; page < page_count; page++) {
  240. mem = kmap_atomic(pages[page], KM_USER0);
  241. for (i = 0; i < PAGE_SIZE; i += 4)
  242. seq_printf(m, "%08x : %08x\n", i, mem[i / 4]);
  243. kunmap_atomic(pages[page], KM_USER0);
  244. }
  245. }
  246. static int i915_batchbuffer_info(struct seq_file *m, void *data)
  247. {
  248. struct drm_info_node *node = (struct drm_info_node *) m->private;
  249. struct drm_device *dev = node->minor->dev;
  250. drm_i915_private_t *dev_priv = dev->dev_private;
  251. struct drm_gem_object *obj;
  252. struct drm_i915_gem_object *obj_priv;
  253. int ret;
  254. spin_lock(&dev_priv->mm.active_list_lock);
  255. list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
  256. obj = obj_priv->obj;
  257. if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
  258. ret = i915_gem_object_get_pages(obj);
  259. if (ret) {
  260. DRM_ERROR("Failed to get pages: %d\n", ret);
  261. spin_unlock(&dev_priv->mm.active_list_lock);
  262. return ret;
  263. }
  264. seq_printf(m, "--- gtt_offset = 0x%08x\n", obj_priv->gtt_offset);
  265. i915_dump_pages(m, obj_priv->pages, obj->size / PAGE_SIZE);
  266. i915_gem_object_put_pages(obj);
  267. }
  268. }
  269. spin_unlock(&dev_priv->mm.active_list_lock);
  270. return 0;
  271. }
  272. static int i915_ringbuffer_data(struct seq_file *m, void *data)
  273. {
  274. struct drm_info_node *node = (struct drm_info_node *) m->private;
  275. struct drm_device *dev = node->minor->dev;
  276. drm_i915_private_t *dev_priv = dev->dev_private;
  277. u8 *virt;
  278. uint32_t *ptr, off;
  279. if (!dev_priv->ring.ring_obj) {
  280. seq_printf(m, "No ringbuffer setup\n");
  281. return 0;
  282. }
  283. virt = dev_priv->ring.virtual_start;
  284. for (off = 0; off < dev_priv->ring.Size; off += 4) {
  285. ptr = (uint32_t *)(virt + off);
  286. seq_printf(m, "%08x : %08x\n", off, *ptr);
  287. }
  288. return 0;
  289. }
  290. static int i915_ringbuffer_info(struct seq_file *m, void *data)
  291. {
  292. struct drm_info_node *node = (struct drm_info_node *) m->private;
  293. struct drm_device *dev = node->minor->dev;
  294. drm_i915_private_t *dev_priv = dev->dev_private;
  295. unsigned int head, tail;
  296. head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
  297. tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
  298. seq_printf(m, "RingHead : %08x\n", head);
  299. seq_printf(m, "RingTail : %08x\n", tail);
  300. seq_printf(m, "RingSize : %08lx\n", dev_priv->ring.Size);
  301. seq_printf(m, "Acthd : %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD));
  302. return 0;
  303. }
  304. static int i915_error_state(struct seq_file *m, void *unused)
  305. {
  306. struct drm_info_node *node = (struct drm_info_node *) m->private;
  307. struct drm_device *dev = node->minor->dev;
  308. drm_i915_private_t *dev_priv = dev->dev_private;
  309. struct drm_i915_error_state *error;
  310. unsigned long flags;
  311. spin_lock_irqsave(&dev_priv->error_lock, flags);
  312. if (!dev_priv->first_error) {
  313. seq_printf(m, "no error state collected\n");
  314. goto out;
  315. }
  316. error = dev_priv->first_error;
  317. seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
  318. error->time.tv_usec);
  319. seq_printf(m, "EIR: 0x%08x\n", error->eir);
  320. seq_printf(m, " PGTBL_ER: 0x%08x\n", error->pgtbl_er);
  321. seq_printf(m, " INSTPM: 0x%08x\n", error->instpm);
  322. seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir);
  323. seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr);
  324. seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone);
  325. seq_printf(m, " ACTHD: 0x%08x\n", error->acthd);
  326. if (IS_I965G(dev)) {
  327. seq_printf(m, " INSTPS: 0x%08x\n", error->instps);
  328. seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1);
  329. }
  330. out:
  331. spin_unlock_irqrestore(&dev_priv->error_lock, flags);
  332. return 0;
  333. }
  334. static int i915_registers_info(struct seq_file *m, void *data) {
  335. struct drm_info_node *node = (struct drm_info_node *) m->private;
  336. struct drm_device *dev = node->minor->dev;
  337. drm_i915_private_t *dev_priv = dev->dev_private;
  338. uint32_t reg;
  339. #define DUMP_RANGE(start, end) \
  340. for (reg=start; reg < end; reg += 4) \
  341. seq_printf(m, "%08x\t%08x\n", reg, I915_READ(reg));
  342. DUMP_RANGE(0x00000, 0x00fff); /* VGA registers */
  343. DUMP_RANGE(0x02000, 0x02fff); /* instruction, memory, interrupt control registers */
  344. DUMP_RANGE(0x03000, 0x031ff); /* FENCE and PPGTT control registers */
  345. DUMP_RANGE(0x03200, 0x03fff); /* frame buffer compression registers */
  346. DUMP_RANGE(0x05000, 0x05fff); /* I/O control registers */
  347. DUMP_RANGE(0x06000, 0x06fff); /* clock control registers */
  348. DUMP_RANGE(0x07000, 0x07fff); /* 3D internal debug registers */
  349. DUMP_RANGE(0x07400, 0x088ff); /* GPE debug registers */
  350. DUMP_RANGE(0x0a000, 0x0afff); /* display palette registers */
  351. DUMP_RANGE(0x10000, 0x13fff); /* MMIO MCHBAR */
  352. DUMP_RANGE(0x30000, 0x3ffff); /* overlay registers */
  353. DUMP_RANGE(0x60000, 0x6ffff); /* display engine pipeline registers */
  354. DUMP_RANGE(0x70000, 0x72fff); /* display and cursor registers */
  355. DUMP_RANGE(0x73000, 0x73fff); /* performance counters */
  356. return 0;
  357. }
  358. static int
  359. i915_wedged_open(struct inode *inode,
  360. struct file *filp)
  361. {
  362. filp->private_data = inode->i_private;
  363. return 0;
  364. }
  365. static ssize_t
  366. i915_wedged_read(struct file *filp,
  367. char __user *ubuf,
  368. size_t max,
  369. loff_t *ppos)
  370. {
  371. struct drm_device *dev = filp->private_data;
  372. drm_i915_private_t *dev_priv = dev->dev_private;
  373. char buf[80];
  374. int len;
  375. len = snprintf(buf, sizeof (buf),
  376. "wedged : %d\n",
  377. atomic_read(&dev_priv->mm.wedged));
  378. return simple_read_from_buffer(ubuf, max, ppos, buf, len);
  379. }
  380. static ssize_t
  381. i915_wedged_write(struct file *filp,
  382. const char __user *ubuf,
  383. size_t cnt,
  384. loff_t *ppos)
  385. {
  386. struct drm_device *dev = filp->private_data;
  387. drm_i915_private_t *dev_priv = dev->dev_private;
  388. char buf[20];
  389. int val = 1;
  390. if (cnt > 0) {
  391. if (cnt > sizeof (buf) - 1)
  392. return -EINVAL;
  393. if (copy_from_user(buf, ubuf, cnt))
  394. return -EFAULT;
  395. buf[cnt] = 0;
  396. val = simple_strtoul(buf, NULL, 0);
  397. }
  398. DRM_INFO("Manually setting wedged to %d\n", val);
  399. atomic_set(&dev_priv->mm.wedged, val);
  400. if (val) {
  401. DRM_WAKEUP(&dev_priv->irq_queue);
  402. queue_work(dev_priv->wq, &dev_priv->error_work);
  403. }
  404. return cnt;
  405. }
  406. static const struct file_operations i915_wedged_fops = {
  407. .owner = THIS_MODULE,
  408. .open = i915_wedged_open,
  409. .read = i915_wedged_read,
  410. .write = i915_wedged_write,
  411. };
  412. /* As the drm_debugfs_init() routines are called before dev->dev_private is
  413. * allocated we need to hook into the minor for release. */
  414. static int
  415. drm_add_fake_info_node(struct drm_minor *minor,
  416. struct dentry *ent,
  417. const void *key)
  418. {
  419. struct drm_info_node *node;
  420. node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
  421. if (node == NULL) {
  422. debugfs_remove(ent);
  423. return -ENOMEM;
  424. }
  425. node->minor = minor;
  426. node->dent = ent;
  427. node->info_ent = (void *) key;
  428. list_add(&node->list, &minor->debugfs_nodes.list);
  429. return 0;
  430. }
  431. static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
  432. {
  433. struct drm_device *dev = minor->dev;
  434. struct dentry *ent;
  435. ent = debugfs_create_file("i915_wedged",
  436. S_IRUGO | S_IWUSR,
  437. root, dev,
  438. &i915_wedged_fops);
  439. if (IS_ERR(ent))
  440. return PTR_ERR(ent);
  441. return drm_add_fake_info_node(minor, ent, &i915_wedged_fops);
  442. }
  443. static struct drm_info_list i915_debugfs_list[] = {
  444. {"i915_regs", i915_registers_info, 0},
  445. {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
  446. {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
  447. {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
  448. {"i915_gem_request", i915_gem_request_info, 0},
  449. {"i915_gem_seqno", i915_gem_seqno_info, 0},
  450. {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
  451. {"i915_gem_interrupt", i915_interrupt_info, 0},
  452. {"i915_gem_hws", i915_hws_info, 0},
  453. {"i915_ringbuffer_data", i915_ringbuffer_data, 0},
  454. {"i915_ringbuffer_info", i915_ringbuffer_info, 0},
  455. {"i915_batchbuffers", i915_batchbuffer_info, 0},
  456. {"i915_error_state", i915_error_state, 0},
  457. };
  458. #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
  459. int i915_debugfs_init(struct drm_minor *minor)
  460. {
  461. int ret;
  462. ret = i915_wedged_create(minor->debugfs_root, minor);
  463. if (ret)
  464. return ret;
  465. return drm_debugfs_create_files(i915_debugfs_list,
  466. I915_DEBUGFS_ENTRIES,
  467. minor->debugfs_root, minor);
  468. }
  469. void i915_debugfs_cleanup(struct drm_minor *minor)
  470. {
  471. drm_debugfs_remove_files(i915_debugfs_list,
  472. I915_DEBUGFS_ENTRIES, minor);
  473. drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops,
  474. 1, minor);
  475. }
  476. #endif /* CONFIG_DEBUG_FS */