i915_debugfs.c 46 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698
  1. /*
  2. * Copyright © 2008 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Eric Anholt <eric@anholt.net>
  25. * Keith Packard <keithp@keithp.com>
  26. *
  27. */
  28. #include <linux/seq_file.h>
  29. #include <linux/debugfs.h>
  30. #include <linux/slab.h>
  31. #include "drmP.h"
  32. #include "drm.h"
  33. #include "intel_drv.h"
  34. #include "intel_ringbuffer.h"
  35. #include "i915_drm.h"
  36. #include "i915_drv.h"
  37. #define DRM_I915_RING_DEBUG 1
  38. #if defined(CONFIG_DEBUG_FS)
  39. enum {
  40. ACTIVE_LIST,
  41. FLUSHING_LIST,
  42. INACTIVE_LIST,
  43. PINNED_LIST,
  44. DEFERRED_FREE_LIST,
  45. };
  46. static const char *yesno(int v)
  47. {
  48. return v ? "yes" : "no";
  49. }
  50. static int i915_capabilities(struct seq_file *m, void *data)
  51. {
  52. struct drm_info_node *node = (struct drm_info_node *) m->private;
  53. struct drm_device *dev = node->minor->dev;
  54. const struct intel_device_info *info = INTEL_INFO(dev);
  55. seq_printf(m, "gen: %d\n", info->gen);
  56. #define B(x) seq_printf(m, #x ": %s\n", yesno(info->x))
  57. B(is_mobile);
  58. B(is_i85x);
  59. B(is_i915g);
  60. B(is_i945gm);
  61. B(is_g33);
  62. B(need_gfx_hws);
  63. B(is_g4x);
  64. B(is_pineview);
  65. B(is_broadwater);
  66. B(is_crestline);
  67. B(has_fbc);
  68. B(has_pipe_cxsr);
  69. B(has_hotplug);
  70. B(cursor_needs_physical);
  71. B(has_overlay);
  72. B(overlay_needs_physical);
  73. B(supports_tv);
  74. B(has_bsd_ring);
  75. B(has_blt_ring);
  76. #undef B
  77. return 0;
  78. }
  79. static const char *get_pin_flag(struct drm_i915_gem_object *obj)
  80. {
  81. if (obj->user_pin_count > 0)
  82. return "P";
  83. else if (obj->pin_count > 0)
  84. return "p";
  85. else
  86. return " ";
  87. }
  88. static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
  89. {
  90. switch (obj->tiling_mode) {
  91. default:
  92. case I915_TILING_NONE: return " ";
  93. case I915_TILING_X: return "X";
  94. case I915_TILING_Y: return "Y";
  95. }
  96. }
  97. static const char *cache_level_str(int type)
  98. {
  99. switch (type) {
  100. case I915_CACHE_NONE: return " uncached";
  101. case I915_CACHE_LLC: return " snooped (LLC)";
  102. case I915_CACHE_LLC_MLC: return " snooped (LLC+MLC)";
  103. default: return "";
  104. }
  105. }
  106. static void
  107. describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
  108. {
  109. seq_printf(m, "%p: %s%s %8zd %04x %04x %d %d%s%s%s",
  110. &obj->base,
  111. get_pin_flag(obj),
  112. get_tiling_flag(obj),
  113. obj->base.size,
  114. obj->base.read_domains,
  115. obj->base.write_domain,
  116. obj->last_rendering_seqno,
  117. obj->last_fenced_seqno,
  118. cache_level_str(obj->cache_level),
  119. obj->dirty ? " dirty" : "",
  120. obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
  121. if (obj->base.name)
  122. seq_printf(m, " (name: %d)", obj->base.name);
  123. if (obj->fence_reg != I915_FENCE_REG_NONE)
  124. seq_printf(m, " (fence: %d)", obj->fence_reg);
  125. if (obj->gtt_space != NULL)
  126. seq_printf(m, " (gtt offset: %08x, size: %08x)",
  127. obj->gtt_offset, (unsigned int)obj->gtt_space->size);
  128. if (obj->pin_mappable || obj->fault_mappable) {
  129. char s[3], *t = s;
  130. if (obj->pin_mappable)
  131. *t++ = 'p';
  132. if (obj->fault_mappable)
  133. *t++ = 'f';
  134. *t = '\0';
  135. seq_printf(m, " (%s mappable)", s);
  136. }
  137. if (obj->ring != NULL)
  138. seq_printf(m, " (%s)", obj->ring->name);
  139. }
  140. static int i915_gem_object_list_info(struct seq_file *m, void *data)
  141. {
  142. struct drm_info_node *node = (struct drm_info_node *) m->private;
  143. uintptr_t list = (uintptr_t) node->info_ent->data;
  144. struct list_head *head;
  145. struct drm_device *dev = node->minor->dev;
  146. drm_i915_private_t *dev_priv = dev->dev_private;
  147. struct drm_i915_gem_object *obj;
  148. size_t total_obj_size, total_gtt_size;
  149. int count, ret;
  150. ret = mutex_lock_interruptible(&dev->struct_mutex);
  151. if (ret)
  152. return ret;
  153. switch (list) {
  154. case ACTIVE_LIST:
  155. seq_printf(m, "Active:\n");
  156. head = &dev_priv->mm.active_list;
  157. break;
  158. case INACTIVE_LIST:
  159. seq_printf(m, "Inactive:\n");
  160. head = &dev_priv->mm.inactive_list;
  161. break;
  162. case PINNED_LIST:
  163. seq_printf(m, "Pinned:\n");
  164. head = &dev_priv->mm.pinned_list;
  165. break;
  166. case FLUSHING_LIST:
  167. seq_printf(m, "Flushing:\n");
  168. head = &dev_priv->mm.flushing_list;
  169. break;
  170. case DEFERRED_FREE_LIST:
  171. seq_printf(m, "Deferred free:\n");
  172. head = &dev_priv->mm.deferred_free_list;
  173. break;
  174. default:
  175. mutex_unlock(&dev->struct_mutex);
  176. return -EINVAL;
  177. }
  178. total_obj_size = total_gtt_size = count = 0;
  179. list_for_each_entry(obj, head, mm_list) {
  180. seq_printf(m, " ");
  181. describe_obj(m, obj);
  182. seq_printf(m, "\n");
  183. total_obj_size += obj->base.size;
  184. total_gtt_size += obj->gtt_space->size;
  185. count++;
  186. }
  187. mutex_unlock(&dev->struct_mutex);
  188. seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
  189. count, total_obj_size, total_gtt_size);
  190. return 0;
  191. }
  192. #define count_objects(list, member) do { \
  193. list_for_each_entry(obj, list, member) { \
  194. size += obj->gtt_space->size; \
  195. ++count; \
  196. if (obj->map_and_fenceable) { \
  197. mappable_size += obj->gtt_space->size; \
  198. ++mappable_count; \
  199. } \
  200. } \
  201. } while(0)
  202. static int i915_gem_object_info(struct seq_file *m, void* data)
  203. {
  204. struct drm_info_node *node = (struct drm_info_node *) m->private;
  205. struct drm_device *dev = node->minor->dev;
  206. struct drm_i915_private *dev_priv = dev->dev_private;
  207. u32 count, mappable_count;
  208. size_t size, mappable_size;
  209. struct drm_i915_gem_object *obj;
  210. int ret;
  211. ret = mutex_lock_interruptible(&dev->struct_mutex);
  212. if (ret)
  213. return ret;
  214. seq_printf(m, "%u objects, %zu bytes\n",
  215. dev_priv->mm.object_count,
  216. dev_priv->mm.object_memory);
  217. size = count = mappable_size = mappable_count = 0;
  218. count_objects(&dev_priv->mm.gtt_list, gtt_list);
  219. seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
  220. count, mappable_count, size, mappable_size);
  221. size = count = mappable_size = mappable_count = 0;
  222. count_objects(&dev_priv->mm.active_list, mm_list);
  223. count_objects(&dev_priv->mm.flushing_list, mm_list);
  224. seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n",
  225. count, mappable_count, size, mappable_size);
  226. size = count = mappable_size = mappable_count = 0;
  227. count_objects(&dev_priv->mm.pinned_list, mm_list);
  228. seq_printf(m, " %u [%u] pinned objects, %zu [%zu] bytes\n",
  229. count, mappable_count, size, mappable_size);
  230. size = count = mappable_size = mappable_count = 0;
  231. count_objects(&dev_priv->mm.inactive_list, mm_list);
  232. seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
  233. count, mappable_count, size, mappable_size);
  234. size = count = mappable_size = mappable_count = 0;
  235. count_objects(&dev_priv->mm.deferred_free_list, mm_list);
  236. seq_printf(m, " %u [%u] freed objects, %zu [%zu] bytes\n",
  237. count, mappable_count, size, mappable_size);
  238. size = count = mappable_size = mappable_count = 0;
  239. list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
  240. if (obj->fault_mappable) {
  241. size += obj->gtt_space->size;
  242. ++count;
  243. }
  244. if (obj->pin_mappable) {
  245. mappable_size += obj->gtt_space->size;
  246. ++mappable_count;
  247. }
  248. }
  249. seq_printf(m, "%u pinned mappable objects, %zu bytes\n",
  250. mappable_count, mappable_size);
  251. seq_printf(m, "%u fault mappable objects, %zu bytes\n",
  252. count, size);
  253. seq_printf(m, "%zu [%zu] gtt total\n",
  254. dev_priv->mm.gtt_total, dev_priv->mm.mappable_gtt_total);
  255. mutex_unlock(&dev->struct_mutex);
  256. return 0;
  257. }
  258. static int i915_gem_gtt_info(struct seq_file *m, void* data)
  259. {
  260. struct drm_info_node *node = (struct drm_info_node *) m->private;
  261. struct drm_device *dev = node->minor->dev;
  262. struct drm_i915_private *dev_priv = dev->dev_private;
  263. struct drm_i915_gem_object *obj;
  264. size_t total_obj_size, total_gtt_size;
  265. int count, ret;
  266. ret = mutex_lock_interruptible(&dev->struct_mutex);
  267. if (ret)
  268. return ret;
  269. total_obj_size = total_gtt_size = count = 0;
  270. list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
  271. seq_printf(m, " ");
  272. describe_obj(m, obj);
  273. seq_printf(m, "\n");
  274. total_obj_size += obj->base.size;
  275. total_gtt_size += obj->gtt_space->size;
  276. count++;
  277. }
  278. mutex_unlock(&dev->struct_mutex);
  279. seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
  280. count, total_obj_size, total_gtt_size);
  281. return 0;
  282. }
  283. static int i915_gem_pageflip_info(struct seq_file *m, void *data)
  284. {
  285. struct drm_info_node *node = (struct drm_info_node *) m->private;
  286. struct drm_device *dev = node->minor->dev;
  287. unsigned long flags;
  288. struct intel_crtc *crtc;
  289. list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
  290. const char pipe = pipe_name(crtc->pipe);
  291. const char plane = plane_name(crtc->plane);
  292. struct intel_unpin_work *work;
  293. spin_lock_irqsave(&dev->event_lock, flags);
  294. work = crtc->unpin_work;
  295. if (work == NULL) {
  296. seq_printf(m, "No flip due on pipe %c (plane %c)\n",
  297. pipe, plane);
  298. } else {
  299. if (!work->pending) {
  300. seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
  301. pipe, plane);
  302. } else {
  303. seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
  304. pipe, plane);
  305. }
  306. if (work->enable_stall_check)
  307. seq_printf(m, "Stall check enabled, ");
  308. else
  309. seq_printf(m, "Stall check waiting for page flip ioctl, ");
  310. seq_printf(m, "%d prepares\n", work->pending);
  311. if (work->old_fb_obj) {
  312. struct drm_i915_gem_object *obj = work->old_fb_obj;
  313. if (obj)
  314. seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
  315. }
  316. if (work->pending_flip_obj) {
  317. struct drm_i915_gem_object *obj = work->pending_flip_obj;
  318. if (obj)
  319. seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
  320. }
  321. }
  322. spin_unlock_irqrestore(&dev->event_lock, flags);
  323. }
  324. return 0;
  325. }
  326. static int i915_gem_request_info(struct seq_file *m, void *data)
  327. {
  328. struct drm_info_node *node = (struct drm_info_node *) m->private;
  329. struct drm_device *dev = node->minor->dev;
  330. drm_i915_private_t *dev_priv = dev->dev_private;
  331. struct drm_i915_gem_request *gem_request;
  332. int ret, count;
  333. ret = mutex_lock_interruptible(&dev->struct_mutex);
  334. if (ret)
  335. return ret;
  336. count = 0;
  337. if (!list_empty(&dev_priv->ring[RCS].request_list)) {
  338. seq_printf(m, "Render requests:\n");
  339. list_for_each_entry(gem_request,
  340. &dev_priv->ring[RCS].request_list,
  341. list) {
  342. seq_printf(m, " %d @ %d\n",
  343. gem_request->seqno,
  344. (int) (jiffies - gem_request->emitted_jiffies));
  345. }
  346. count++;
  347. }
  348. if (!list_empty(&dev_priv->ring[VCS].request_list)) {
  349. seq_printf(m, "BSD requests:\n");
  350. list_for_each_entry(gem_request,
  351. &dev_priv->ring[VCS].request_list,
  352. list) {
  353. seq_printf(m, " %d @ %d\n",
  354. gem_request->seqno,
  355. (int) (jiffies - gem_request->emitted_jiffies));
  356. }
  357. count++;
  358. }
  359. if (!list_empty(&dev_priv->ring[BCS].request_list)) {
  360. seq_printf(m, "BLT requests:\n");
  361. list_for_each_entry(gem_request,
  362. &dev_priv->ring[BCS].request_list,
  363. list) {
  364. seq_printf(m, " %d @ %d\n",
  365. gem_request->seqno,
  366. (int) (jiffies - gem_request->emitted_jiffies));
  367. }
  368. count++;
  369. }
  370. mutex_unlock(&dev->struct_mutex);
  371. if (count == 0)
  372. seq_printf(m, "No requests\n");
  373. return 0;
  374. }
  375. static void i915_ring_seqno_info(struct seq_file *m,
  376. struct intel_ring_buffer *ring)
  377. {
  378. if (ring->get_seqno) {
  379. seq_printf(m, "Current sequence (%s): %d\n",
  380. ring->name, ring->get_seqno(ring));
  381. seq_printf(m, "Waiter sequence (%s): %d\n",
  382. ring->name, ring->waiting_seqno);
  383. seq_printf(m, "IRQ sequence (%s): %d\n",
  384. ring->name, ring->irq_seqno);
  385. }
  386. }
  387. static int i915_gem_seqno_info(struct seq_file *m, void *data)
  388. {
  389. struct drm_info_node *node = (struct drm_info_node *) m->private;
  390. struct drm_device *dev = node->minor->dev;
  391. drm_i915_private_t *dev_priv = dev->dev_private;
  392. int ret, i;
  393. ret = mutex_lock_interruptible(&dev->struct_mutex);
  394. if (ret)
  395. return ret;
  396. for (i = 0; i < I915_NUM_RINGS; i++)
  397. i915_ring_seqno_info(m, &dev_priv->ring[i]);
  398. mutex_unlock(&dev->struct_mutex);
  399. return 0;
  400. }
  401. static int i915_interrupt_info(struct seq_file *m, void *data)
  402. {
  403. struct drm_info_node *node = (struct drm_info_node *) m->private;
  404. struct drm_device *dev = node->minor->dev;
  405. drm_i915_private_t *dev_priv = dev->dev_private;
  406. int ret, i, pipe;
  407. ret = mutex_lock_interruptible(&dev->struct_mutex);
  408. if (ret)
  409. return ret;
  410. if (!HAS_PCH_SPLIT(dev)) {
  411. seq_printf(m, "Interrupt enable: %08x\n",
  412. I915_READ(IER));
  413. seq_printf(m, "Interrupt identity: %08x\n",
  414. I915_READ(IIR));
  415. seq_printf(m, "Interrupt mask: %08x\n",
  416. I915_READ(IMR));
  417. for_each_pipe(pipe)
  418. seq_printf(m, "Pipe %c stat: %08x\n",
  419. pipe_name(pipe),
  420. I915_READ(PIPESTAT(pipe)));
  421. } else {
  422. seq_printf(m, "North Display Interrupt enable: %08x\n",
  423. I915_READ(DEIER));
  424. seq_printf(m, "North Display Interrupt identity: %08x\n",
  425. I915_READ(DEIIR));
  426. seq_printf(m, "North Display Interrupt mask: %08x\n",
  427. I915_READ(DEIMR));
  428. seq_printf(m, "South Display Interrupt enable: %08x\n",
  429. I915_READ(SDEIER));
  430. seq_printf(m, "South Display Interrupt identity: %08x\n",
  431. I915_READ(SDEIIR));
  432. seq_printf(m, "South Display Interrupt mask: %08x\n",
  433. I915_READ(SDEIMR));
  434. seq_printf(m, "Graphics Interrupt enable: %08x\n",
  435. I915_READ(GTIER));
  436. seq_printf(m, "Graphics Interrupt identity: %08x\n",
  437. I915_READ(GTIIR));
  438. seq_printf(m, "Graphics Interrupt mask: %08x\n",
  439. I915_READ(GTIMR));
  440. }
  441. seq_printf(m, "Interrupts received: %d\n",
  442. atomic_read(&dev_priv->irq_received));
  443. for (i = 0; i < I915_NUM_RINGS; i++) {
  444. if (IS_GEN6(dev)) {
  445. seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
  446. dev_priv->ring[i].name,
  447. I915_READ_IMR(&dev_priv->ring[i]));
  448. }
  449. i915_ring_seqno_info(m, &dev_priv->ring[i]);
  450. }
  451. mutex_unlock(&dev->struct_mutex);
  452. return 0;
  453. }
  454. static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
  455. {
  456. struct drm_info_node *node = (struct drm_info_node *) m->private;
  457. struct drm_device *dev = node->minor->dev;
  458. drm_i915_private_t *dev_priv = dev->dev_private;
  459. int i, ret;
  460. ret = mutex_lock_interruptible(&dev->struct_mutex);
  461. if (ret)
  462. return ret;
  463. seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
  464. seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
  465. for (i = 0; i < dev_priv->num_fence_regs; i++) {
  466. struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
  467. seq_printf(m, "Fenced object[%2d] = ", i);
  468. if (obj == NULL)
  469. seq_printf(m, "unused");
  470. else
  471. describe_obj(m, obj);
  472. seq_printf(m, "\n");
  473. }
  474. mutex_unlock(&dev->struct_mutex);
  475. return 0;
  476. }
  477. static int i915_hws_info(struct seq_file *m, void *data)
  478. {
  479. struct drm_info_node *node = (struct drm_info_node *) m->private;
  480. struct drm_device *dev = node->minor->dev;
  481. drm_i915_private_t *dev_priv = dev->dev_private;
  482. struct intel_ring_buffer *ring;
  483. const volatile u32 __iomem *hws;
  484. int i;
  485. ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
  486. hws = (volatile u32 __iomem *)ring->status_page.page_addr;
  487. if (hws == NULL)
  488. return 0;
  489. for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
  490. seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
  491. i * 4,
  492. hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
  493. }
  494. return 0;
  495. }
  496. static void i915_dump_object(struct seq_file *m,
  497. struct io_mapping *mapping,
  498. struct drm_i915_gem_object *obj)
  499. {
  500. int page, page_count, i;
  501. page_count = obj->base.size / PAGE_SIZE;
  502. for (page = 0; page < page_count; page++) {
  503. u32 *mem = io_mapping_map_wc(mapping,
  504. obj->gtt_offset + page * PAGE_SIZE);
  505. for (i = 0; i < PAGE_SIZE; i += 4)
  506. seq_printf(m, "%08x : %08x\n", i, mem[i / 4]);
  507. io_mapping_unmap(mem);
  508. }
  509. }
  510. static int i915_batchbuffer_info(struct seq_file *m, void *data)
  511. {
  512. struct drm_info_node *node = (struct drm_info_node *) m->private;
  513. struct drm_device *dev = node->minor->dev;
  514. drm_i915_private_t *dev_priv = dev->dev_private;
  515. struct drm_i915_gem_object *obj;
  516. int ret;
  517. ret = mutex_lock_interruptible(&dev->struct_mutex);
  518. if (ret)
  519. return ret;
  520. list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
  521. if (obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) {
  522. seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset);
  523. i915_dump_object(m, dev_priv->mm.gtt_mapping, obj);
  524. }
  525. }
  526. mutex_unlock(&dev->struct_mutex);
  527. return 0;
  528. }
  529. static int i915_ringbuffer_data(struct seq_file *m, void *data)
  530. {
  531. struct drm_info_node *node = (struct drm_info_node *) m->private;
  532. struct drm_device *dev = node->minor->dev;
  533. drm_i915_private_t *dev_priv = dev->dev_private;
  534. struct intel_ring_buffer *ring;
  535. int ret;
  536. ret = mutex_lock_interruptible(&dev->struct_mutex);
  537. if (ret)
  538. return ret;
  539. ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
  540. if (!ring->obj) {
  541. seq_printf(m, "No ringbuffer setup\n");
  542. } else {
  543. const u8 __iomem *virt = ring->virtual_start;
  544. uint32_t off;
  545. for (off = 0; off < ring->size; off += 4) {
  546. uint32_t *ptr = (uint32_t *)(virt + off);
  547. seq_printf(m, "%08x : %08x\n", off, *ptr);
  548. }
  549. }
  550. mutex_unlock(&dev->struct_mutex);
  551. return 0;
  552. }
  553. static int i915_ringbuffer_info(struct seq_file *m, void *data)
  554. {
  555. struct drm_info_node *node = (struct drm_info_node *) m->private;
  556. struct drm_device *dev = node->minor->dev;
  557. drm_i915_private_t *dev_priv = dev->dev_private;
  558. struct intel_ring_buffer *ring;
  559. ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
  560. if (ring->size == 0)
  561. return 0;
  562. seq_printf(m, "Ring %s:\n", ring->name);
  563. seq_printf(m, " Head : %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR);
  564. seq_printf(m, " Tail : %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR);
  565. seq_printf(m, " Size : %08x\n", ring->size);
  566. seq_printf(m, " Active : %08x\n", intel_ring_get_active_head(ring));
  567. seq_printf(m, " NOPID : %08x\n", I915_READ_NOPID(ring));
  568. if (IS_GEN6(dev)) {
  569. seq_printf(m, " Sync 0 : %08x\n", I915_READ_SYNC_0(ring));
  570. seq_printf(m, " Sync 1 : %08x\n", I915_READ_SYNC_1(ring));
  571. }
  572. seq_printf(m, " Control : %08x\n", I915_READ_CTL(ring));
  573. seq_printf(m, " Start : %08x\n", I915_READ_START(ring));
  574. return 0;
  575. }
  576. static const char *ring_str(int ring)
  577. {
  578. switch (ring) {
  579. case RING_RENDER: return " render";
  580. case RING_BSD: return " bsd";
  581. case RING_BLT: return " blt";
  582. default: return "";
  583. }
  584. }
  585. static const char *pin_flag(int pinned)
  586. {
  587. if (pinned > 0)
  588. return " P";
  589. else if (pinned < 0)
  590. return " p";
  591. else
  592. return "";
  593. }
  594. static const char *tiling_flag(int tiling)
  595. {
  596. switch (tiling) {
  597. default:
  598. case I915_TILING_NONE: return "";
  599. case I915_TILING_X: return " X";
  600. case I915_TILING_Y: return " Y";
  601. }
  602. }
  603. static const char *dirty_flag(int dirty)
  604. {
  605. return dirty ? " dirty" : "";
  606. }
  607. static const char *purgeable_flag(int purgeable)
  608. {
  609. return purgeable ? " purgeable" : "";
  610. }
  611. static void print_error_buffers(struct seq_file *m,
  612. const char *name,
  613. struct drm_i915_error_buffer *err,
  614. int count)
  615. {
  616. seq_printf(m, "%s [%d]:\n", name, count);
  617. while (count--) {
  618. seq_printf(m, " %08x %8u %04x %04x %08x%s%s%s%s%s%s",
  619. err->gtt_offset,
  620. err->size,
  621. err->read_domains,
  622. err->write_domain,
  623. err->seqno,
  624. pin_flag(err->pinned),
  625. tiling_flag(err->tiling),
  626. dirty_flag(err->dirty),
  627. purgeable_flag(err->purgeable),
  628. ring_str(err->ring),
  629. cache_level_str(err->cache_level));
  630. if (err->name)
  631. seq_printf(m, " (name: %d)", err->name);
  632. if (err->fence_reg != I915_FENCE_REG_NONE)
  633. seq_printf(m, " (fence: %d)", err->fence_reg);
  634. seq_printf(m, "\n");
  635. err++;
  636. }
  637. }
  638. static int i915_error_state(struct seq_file *m, void *unused)
  639. {
  640. struct drm_info_node *node = (struct drm_info_node *) m->private;
  641. struct drm_device *dev = node->minor->dev;
  642. drm_i915_private_t *dev_priv = dev->dev_private;
  643. struct drm_i915_error_state *error;
  644. unsigned long flags;
  645. int i, page, offset, elt;
  646. spin_lock_irqsave(&dev_priv->error_lock, flags);
  647. if (!dev_priv->first_error) {
  648. seq_printf(m, "no error state collected\n");
  649. goto out;
  650. }
  651. error = dev_priv->first_error;
  652. seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
  653. error->time.tv_usec);
  654. seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
  655. seq_printf(m, "EIR: 0x%08x\n", error->eir);
  656. seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
  657. if (INTEL_INFO(dev)->gen >= 6) {
  658. seq_printf(m, "ERROR: 0x%08x\n", error->error);
  659. seq_printf(m, "Blitter command stream:\n");
  660. seq_printf(m, " ACTHD: 0x%08x\n", error->bcs_acthd);
  661. seq_printf(m, " IPEIR: 0x%08x\n", error->bcs_ipeir);
  662. seq_printf(m, " IPEHR: 0x%08x\n", error->bcs_ipehr);
  663. seq_printf(m, " INSTDONE: 0x%08x\n", error->bcs_instdone);
  664. seq_printf(m, " seqno: 0x%08x\n", error->bcs_seqno);
  665. seq_printf(m, "Video (BSD) command stream:\n");
  666. seq_printf(m, " ACTHD: 0x%08x\n", error->vcs_acthd);
  667. seq_printf(m, " IPEIR: 0x%08x\n", error->vcs_ipeir);
  668. seq_printf(m, " IPEHR: 0x%08x\n", error->vcs_ipehr);
  669. seq_printf(m, " INSTDONE: 0x%08x\n", error->vcs_instdone);
  670. seq_printf(m, " seqno: 0x%08x\n", error->vcs_seqno);
  671. }
  672. seq_printf(m, "Render command stream:\n");
  673. seq_printf(m, " ACTHD: 0x%08x\n", error->acthd);
  674. seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir);
  675. seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr);
  676. seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone);
  677. if (INTEL_INFO(dev)->gen >= 4) {
  678. seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1);
  679. seq_printf(m, " INSTPS: 0x%08x\n", error->instps);
  680. }
  681. seq_printf(m, " INSTPM: 0x%08x\n", error->instpm);
  682. seq_printf(m, " seqno: 0x%08x\n", error->seqno);
  683. for (i = 0; i < dev_priv->num_fence_regs; i++)
  684. seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
  685. if (error->active_bo)
  686. print_error_buffers(m, "Active",
  687. error->active_bo,
  688. error->active_bo_count);
  689. if (error->pinned_bo)
  690. print_error_buffers(m, "Pinned",
  691. error->pinned_bo,
  692. error->pinned_bo_count);
  693. for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) {
  694. if (error->batchbuffer[i]) {
  695. struct drm_i915_error_object *obj = error->batchbuffer[i];
  696. seq_printf(m, "%s --- gtt_offset = 0x%08x\n",
  697. dev_priv->ring[i].name,
  698. obj->gtt_offset);
  699. offset = 0;
  700. for (page = 0; page < obj->page_count; page++) {
  701. for (elt = 0; elt < PAGE_SIZE/4; elt++) {
  702. seq_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]);
  703. offset += 4;
  704. }
  705. }
  706. }
  707. }
  708. for (i = 0; i < ARRAY_SIZE(error->ringbuffer); i++) {
  709. if (error->ringbuffer[i]) {
  710. struct drm_i915_error_object *obj = error->ringbuffer[i];
  711. seq_printf(m, "%s --- ringbuffer = 0x%08x\n",
  712. dev_priv->ring[i].name,
  713. obj->gtt_offset);
  714. offset = 0;
  715. for (page = 0; page < obj->page_count; page++) {
  716. for (elt = 0; elt < PAGE_SIZE/4; elt++) {
  717. seq_printf(m, "%08x : %08x\n",
  718. offset,
  719. obj->pages[page][elt]);
  720. offset += 4;
  721. }
  722. }
  723. }
  724. }
  725. if (error->overlay)
  726. intel_overlay_print_error_state(m, error->overlay);
  727. if (error->display)
  728. intel_display_print_error_state(m, dev, error->display);
  729. out:
  730. spin_unlock_irqrestore(&dev_priv->error_lock, flags);
  731. return 0;
  732. }
  733. static int i915_rstdby_delays(struct seq_file *m, void *unused)
  734. {
  735. struct drm_info_node *node = (struct drm_info_node *) m->private;
  736. struct drm_device *dev = node->minor->dev;
  737. drm_i915_private_t *dev_priv = dev->dev_private;
  738. u16 crstanddelay = I915_READ16(CRSTANDVID);
  739. seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
  740. return 0;
  741. }
  742. static int i915_cur_delayinfo(struct seq_file *m, void *unused)
  743. {
  744. struct drm_info_node *node = (struct drm_info_node *) m->private;
  745. struct drm_device *dev = node->minor->dev;
  746. drm_i915_private_t *dev_priv = dev->dev_private;
  747. int ret;
  748. if (IS_GEN5(dev)) {
  749. u16 rgvswctl = I915_READ16(MEMSWCTL);
  750. u16 rgvstat = I915_READ16(MEMSTAT_ILK);
  751. seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
  752. seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
  753. seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
  754. MEMSTAT_VID_SHIFT);
  755. seq_printf(m, "Current P-state: %d\n",
  756. (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
  757. } else if (IS_GEN6(dev) || IS_GEN7(dev)) {
  758. u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
  759. u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
  760. u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
  761. u32 rpstat;
  762. u32 rpupei, rpcurup, rpprevup;
  763. u32 rpdownei, rpcurdown, rpprevdown;
  764. int max_freq;
  765. /* RPSTAT1 is in the GT power well */
  766. ret = mutex_lock_interruptible(&dev->struct_mutex);
  767. if (ret)
  768. return ret;
  769. gen6_gt_force_wake_get(dev_priv);
  770. rpstat = I915_READ(GEN6_RPSTAT1);
  771. rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
  772. rpcurup = I915_READ(GEN6_RP_CUR_UP);
  773. rpprevup = I915_READ(GEN6_RP_PREV_UP);
  774. rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
  775. rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
  776. rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
  777. gen6_gt_force_wake_put(dev_priv);
  778. mutex_unlock(&dev->struct_mutex);
  779. seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
  780. seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
  781. seq_printf(m, "Render p-state ratio: %d\n",
  782. (gt_perf_status & 0xff00) >> 8);
  783. seq_printf(m, "Render p-state VID: %d\n",
  784. gt_perf_status & 0xff);
  785. seq_printf(m, "Render p-state limit: %d\n",
  786. rp_state_limits & 0xff);
  787. seq_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >>
  788. GEN6_CAGF_SHIFT) * 50);
  789. seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
  790. GEN6_CURICONT_MASK);
  791. seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
  792. GEN6_CURBSYTAVG_MASK);
  793. seq_printf(m, "RP PREV UP: %dus\n", rpprevup &
  794. GEN6_CURBSYTAVG_MASK);
  795. seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei &
  796. GEN6_CURIAVG_MASK);
  797. seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown &
  798. GEN6_CURBSYTAVG_MASK);
  799. seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown &
  800. GEN6_CURBSYTAVG_MASK);
  801. max_freq = (rp_state_cap & 0xff0000) >> 16;
  802. seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
  803. max_freq * 50);
  804. max_freq = (rp_state_cap & 0xff00) >> 8;
  805. seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
  806. max_freq * 50);
  807. max_freq = rp_state_cap & 0xff;
  808. seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
  809. max_freq * 50);
  810. } else {
  811. seq_printf(m, "no P-state info available\n");
  812. }
  813. return 0;
  814. }
  815. static int i915_delayfreq_table(struct seq_file *m, void *unused)
  816. {
  817. struct drm_info_node *node = (struct drm_info_node *) m->private;
  818. struct drm_device *dev = node->minor->dev;
  819. drm_i915_private_t *dev_priv = dev->dev_private;
  820. u32 delayfreq;
  821. int i;
  822. for (i = 0; i < 16; i++) {
  823. delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
  824. seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq,
  825. (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
  826. }
  827. return 0;
  828. }
  829. static inline int MAP_TO_MV(int map)
  830. {
  831. return 1250 - (map * 25);
  832. }
  833. static int i915_inttoext_table(struct seq_file *m, void *unused)
  834. {
  835. struct drm_info_node *node = (struct drm_info_node *) m->private;
  836. struct drm_device *dev = node->minor->dev;
  837. drm_i915_private_t *dev_priv = dev->dev_private;
  838. u32 inttoext;
  839. int i;
  840. for (i = 1; i <= 32; i++) {
  841. inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
  842. seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
  843. }
  844. return 0;
  845. }
  846. static int i915_drpc_info(struct seq_file *m, void *unused)
  847. {
  848. struct drm_info_node *node = (struct drm_info_node *) m->private;
  849. struct drm_device *dev = node->minor->dev;
  850. drm_i915_private_t *dev_priv = dev->dev_private;
  851. u32 rgvmodectl = I915_READ(MEMMODECTL);
  852. u32 rstdbyctl = I915_READ(RSTDBYCTL);
  853. u16 crstandvid = I915_READ16(CRSTANDVID);
  854. seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
  855. "yes" : "no");
  856. seq_printf(m, "Boost freq: %d\n",
  857. (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
  858. MEMMODE_BOOST_FREQ_SHIFT);
  859. seq_printf(m, "HW control enabled: %s\n",
  860. rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no");
  861. seq_printf(m, "SW control enabled: %s\n",
  862. rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no");
  863. seq_printf(m, "Gated voltage change: %s\n",
  864. rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no");
  865. seq_printf(m, "Starting frequency: P%d\n",
  866. (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
  867. seq_printf(m, "Max P-state: P%d\n",
  868. (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
  869. seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
  870. seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
  871. seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
  872. seq_printf(m, "Render standby enabled: %s\n",
  873. (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
  874. seq_printf(m, "Current RS state: ");
  875. switch (rstdbyctl & RSX_STATUS_MASK) {
  876. case RSX_STATUS_ON:
  877. seq_printf(m, "on\n");
  878. break;
  879. case RSX_STATUS_RC1:
  880. seq_printf(m, "RC1\n");
  881. break;
  882. case RSX_STATUS_RC1E:
  883. seq_printf(m, "RC1E\n");
  884. break;
  885. case RSX_STATUS_RS1:
  886. seq_printf(m, "RS1\n");
  887. break;
  888. case RSX_STATUS_RS2:
  889. seq_printf(m, "RS2 (RC6)\n");
  890. break;
  891. case RSX_STATUS_RS3:
  892. seq_printf(m, "RC3 (RC6+)\n");
  893. break;
  894. default:
  895. seq_printf(m, "unknown\n");
  896. break;
  897. }
  898. return 0;
  899. }
  900. static int i915_fbc_status(struct seq_file *m, void *unused)
  901. {
  902. struct drm_info_node *node = (struct drm_info_node *) m->private;
  903. struct drm_device *dev = node->minor->dev;
  904. drm_i915_private_t *dev_priv = dev->dev_private;
  905. if (!I915_HAS_FBC(dev)) {
  906. seq_printf(m, "FBC unsupported on this chipset\n");
  907. return 0;
  908. }
  909. if (intel_fbc_enabled(dev)) {
  910. seq_printf(m, "FBC enabled\n");
  911. } else {
  912. seq_printf(m, "FBC disabled: ");
  913. switch (dev_priv->no_fbc_reason) {
  914. case FBC_NO_OUTPUT:
  915. seq_printf(m, "no outputs");
  916. break;
  917. case FBC_STOLEN_TOO_SMALL:
  918. seq_printf(m, "not enough stolen memory");
  919. break;
  920. case FBC_UNSUPPORTED_MODE:
  921. seq_printf(m, "mode not supported");
  922. break;
  923. case FBC_MODE_TOO_LARGE:
  924. seq_printf(m, "mode too large");
  925. break;
  926. case FBC_BAD_PLANE:
  927. seq_printf(m, "FBC unsupported on plane");
  928. break;
  929. case FBC_NOT_TILED:
  930. seq_printf(m, "scanout buffer not tiled");
  931. break;
  932. case FBC_MULTIPLE_PIPES:
  933. seq_printf(m, "multiple pipes are enabled");
  934. break;
  935. case FBC_MODULE_PARAM:
  936. seq_printf(m, "disabled per module param (default off)");
  937. break;
  938. default:
  939. seq_printf(m, "unknown reason");
  940. }
  941. seq_printf(m, "\n");
  942. }
  943. return 0;
  944. }
  945. static int i915_sr_status(struct seq_file *m, void *unused)
  946. {
  947. struct drm_info_node *node = (struct drm_info_node *) m->private;
  948. struct drm_device *dev = node->minor->dev;
  949. drm_i915_private_t *dev_priv = dev->dev_private;
  950. bool sr_enabled = false;
  951. if (HAS_PCH_SPLIT(dev))
  952. sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
  953. else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
  954. sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
  955. else if (IS_I915GM(dev))
  956. sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
  957. else if (IS_PINEVIEW(dev))
  958. sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
  959. seq_printf(m, "self-refresh: %s\n",
  960. sr_enabled ? "enabled" : "disabled");
  961. return 0;
  962. }
  963. static int i915_emon_status(struct seq_file *m, void *unused)
  964. {
  965. struct drm_info_node *node = (struct drm_info_node *) m->private;
  966. struct drm_device *dev = node->minor->dev;
  967. drm_i915_private_t *dev_priv = dev->dev_private;
  968. unsigned long temp, chipset, gfx;
  969. int ret;
  970. ret = mutex_lock_interruptible(&dev->struct_mutex);
  971. if (ret)
  972. return ret;
  973. temp = i915_mch_val(dev_priv);
  974. chipset = i915_chipset_val(dev_priv);
  975. gfx = i915_gfx_val(dev_priv);
  976. mutex_unlock(&dev->struct_mutex);
  977. seq_printf(m, "GMCH temp: %ld\n", temp);
  978. seq_printf(m, "Chipset power: %ld\n", chipset);
  979. seq_printf(m, "GFX power: %ld\n", gfx);
  980. seq_printf(m, "Total power: %ld\n", chipset + gfx);
  981. return 0;
  982. }
  983. static int i915_ring_freq_table(struct seq_file *m, void *unused)
  984. {
  985. struct drm_info_node *node = (struct drm_info_node *) m->private;
  986. struct drm_device *dev = node->minor->dev;
  987. drm_i915_private_t *dev_priv = dev->dev_private;
  988. int ret;
  989. int gpu_freq, ia_freq;
  990. if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
  991. seq_printf(m, "unsupported on this chipset\n");
  992. return 0;
  993. }
  994. ret = mutex_lock_interruptible(&dev->struct_mutex);
  995. if (ret)
  996. return ret;
  997. seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n");
  998. for (gpu_freq = dev_priv->min_delay; gpu_freq <= dev_priv->max_delay;
  999. gpu_freq++) {
  1000. I915_WRITE(GEN6_PCODE_DATA, gpu_freq);
  1001. I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
  1002. GEN6_PCODE_READ_MIN_FREQ_TABLE);
  1003. if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
  1004. GEN6_PCODE_READY) == 0, 10)) {
  1005. DRM_ERROR("pcode read of freq table timed out\n");
  1006. continue;
  1007. }
  1008. ia_freq = I915_READ(GEN6_PCODE_DATA);
  1009. seq_printf(m, "%d\t\t%d\n", gpu_freq * 50, ia_freq * 100);
  1010. }
  1011. mutex_unlock(&dev->struct_mutex);
  1012. return 0;
  1013. }
  1014. static int i915_gfxec(struct seq_file *m, void *unused)
  1015. {
  1016. struct drm_info_node *node = (struct drm_info_node *) m->private;
  1017. struct drm_device *dev = node->minor->dev;
  1018. drm_i915_private_t *dev_priv = dev->dev_private;
  1019. seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
  1020. return 0;
  1021. }
  1022. static int i915_opregion(struct seq_file *m, void *unused)
  1023. {
  1024. struct drm_info_node *node = (struct drm_info_node *) m->private;
  1025. struct drm_device *dev = node->minor->dev;
  1026. drm_i915_private_t *dev_priv = dev->dev_private;
  1027. struct intel_opregion *opregion = &dev_priv->opregion;
  1028. int ret;
  1029. ret = mutex_lock_interruptible(&dev->struct_mutex);
  1030. if (ret)
  1031. return ret;
  1032. if (opregion->header)
  1033. seq_write(m, opregion->header, OPREGION_SIZE);
  1034. mutex_unlock(&dev->struct_mutex);
  1035. return 0;
  1036. }
  1037. static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
  1038. {
  1039. struct drm_info_node *node = (struct drm_info_node *) m->private;
  1040. struct drm_device *dev = node->minor->dev;
  1041. drm_i915_private_t *dev_priv = dev->dev_private;
  1042. struct intel_fbdev *ifbdev;
  1043. struct intel_framebuffer *fb;
  1044. int ret;
  1045. ret = mutex_lock_interruptible(&dev->mode_config.mutex);
  1046. if (ret)
  1047. return ret;
  1048. ifbdev = dev_priv->fbdev;
  1049. fb = to_intel_framebuffer(ifbdev->helper.fb);
  1050. seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, obj ",
  1051. fb->base.width,
  1052. fb->base.height,
  1053. fb->base.depth,
  1054. fb->base.bits_per_pixel);
  1055. describe_obj(m, fb->obj);
  1056. seq_printf(m, "\n");
  1057. list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
  1058. if (&fb->base == ifbdev->helper.fb)
  1059. continue;
  1060. seq_printf(m, "user size: %d x %d, depth %d, %d bpp, obj ",
  1061. fb->base.width,
  1062. fb->base.height,
  1063. fb->base.depth,
  1064. fb->base.bits_per_pixel);
  1065. describe_obj(m, fb->obj);
  1066. seq_printf(m, "\n");
  1067. }
  1068. mutex_unlock(&dev->mode_config.mutex);
  1069. return 0;
  1070. }
  1071. static int i915_context_status(struct seq_file *m, void *unused)
  1072. {
  1073. struct drm_info_node *node = (struct drm_info_node *) m->private;
  1074. struct drm_device *dev = node->minor->dev;
  1075. drm_i915_private_t *dev_priv = dev->dev_private;
  1076. int ret;
  1077. ret = mutex_lock_interruptible(&dev->mode_config.mutex);
  1078. if (ret)
  1079. return ret;
  1080. if (dev_priv->pwrctx) {
  1081. seq_printf(m, "power context ");
  1082. describe_obj(m, dev_priv->pwrctx);
  1083. seq_printf(m, "\n");
  1084. }
  1085. if (dev_priv->renderctx) {
  1086. seq_printf(m, "render context ");
  1087. describe_obj(m, dev_priv->renderctx);
  1088. seq_printf(m, "\n");
  1089. }
  1090. mutex_unlock(&dev->mode_config.mutex);
  1091. return 0;
  1092. }
  1093. static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
  1094. {
  1095. struct drm_info_node *node = (struct drm_info_node *) m->private;
  1096. struct drm_device *dev = node->minor->dev;
  1097. struct drm_i915_private *dev_priv = dev->dev_private;
  1098. seq_printf(m, "forcewake count = %d\n",
  1099. atomic_read(&dev_priv->forcewake_count));
  1100. return 0;
  1101. }
  1102. static int
  1103. i915_wedged_open(struct inode *inode,
  1104. struct file *filp)
  1105. {
  1106. filp->private_data = inode->i_private;
  1107. return 0;
  1108. }
  1109. static ssize_t
  1110. i915_wedged_read(struct file *filp,
  1111. char __user *ubuf,
  1112. size_t max,
  1113. loff_t *ppos)
  1114. {
  1115. struct drm_device *dev = filp->private_data;
  1116. drm_i915_private_t *dev_priv = dev->dev_private;
  1117. char buf[80];
  1118. int len;
  1119. len = snprintf(buf, sizeof (buf),
  1120. "wedged : %d\n",
  1121. atomic_read(&dev_priv->mm.wedged));
  1122. if (len > sizeof (buf))
  1123. len = sizeof (buf);
  1124. return simple_read_from_buffer(ubuf, max, ppos, buf, len);
  1125. }
  1126. static ssize_t
  1127. i915_wedged_write(struct file *filp,
  1128. const char __user *ubuf,
  1129. size_t cnt,
  1130. loff_t *ppos)
  1131. {
  1132. struct drm_device *dev = filp->private_data;
  1133. char buf[20];
  1134. int val = 1;
  1135. if (cnt > 0) {
  1136. if (cnt > sizeof (buf) - 1)
  1137. return -EINVAL;
  1138. if (copy_from_user(buf, ubuf, cnt))
  1139. return -EFAULT;
  1140. buf[cnt] = 0;
  1141. val = simple_strtoul(buf, NULL, 0);
  1142. }
  1143. DRM_INFO("Manually setting wedged to %d\n", val);
  1144. i915_handle_error(dev, val);
  1145. return cnt;
  1146. }
  1147. static const struct file_operations i915_wedged_fops = {
  1148. .owner = THIS_MODULE,
  1149. .open = i915_wedged_open,
  1150. .read = i915_wedged_read,
  1151. .write = i915_wedged_write,
  1152. .llseek = default_llseek,
  1153. };
  1154. static int
  1155. i915_max_freq_open(struct inode *inode,
  1156. struct file *filp)
  1157. {
  1158. filp->private_data = inode->i_private;
  1159. return 0;
  1160. }
  1161. static ssize_t
  1162. i915_max_freq_read(struct file *filp,
  1163. char __user *ubuf,
  1164. size_t max,
  1165. loff_t *ppos)
  1166. {
  1167. struct drm_device *dev = filp->private_data;
  1168. drm_i915_private_t *dev_priv = dev->dev_private;
  1169. char buf[80];
  1170. int len;
  1171. len = snprintf(buf, sizeof (buf),
  1172. "max freq: %d\n", dev_priv->max_delay * 50);
  1173. if (len > sizeof (buf))
  1174. len = sizeof (buf);
  1175. return simple_read_from_buffer(ubuf, max, ppos, buf, len);
  1176. }
  1177. static ssize_t
  1178. i915_max_freq_write(struct file *filp,
  1179. const char __user *ubuf,
  1180. size_t cnt,
  1181. loff_t *ppos)
  1182. {
  1183. struct drm_device *dev = filp->private_data;
  1184. struct drm_i915_private *dev_priv = dev->dev_private;
  1185. char buf[20];
  1186. int val = 1;
  1187. if (cnt > 0) {
  1188. if (cnt > sizeof (buf) - 1)
  1189. return -EINVAL;
  1190. if (copy_from_user(buf, ubuf, cnt))
  1191. return -EFAULT;
  1192. buf[cnt] = 0;
  1193. val = simple_strtoul(buf, NULL, 0);
  1194. }
  1195. DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val);
  1196. /*
  1197. * Turbo will still be enabled, but won't go above the set value.
  1198. */
  1199. dev_priv->max_delay = val / 50;
  1200. gen6_set_rps(dev, val / 50);
  1201. return cnt;
  1202. }
  1203. static const struct file_operations i915_max_freq_fops = {
  1204. .owner = THIS_MODULE,
  1205. .open = i915_max_freq_open,
  1206. .read = i915_max_freq_read,
  1207. .write = i915_max_freq_write,
  1208. .llseek = default_llseek,
  1209. };
  1210. static int
  1211. i915_cache_sharing_open(struct inode *inode,
  1212. struct file *filp)
  1213. {
  1214. filp->private_data = inode->i_private;
  1215. return 0;
  1216. }
  1217. static ssize_t
  1218. i915_cache_sharing_read(struct file *filp,
  1219. char __user *ubuf,
  1220. size_t max,
  1221. loff_t *ppos)
  1222. {
  1223. struct drm_device *dev = filp->private_data;
  1224. drm_i915_private_t *dev_priv = dev->dev_private;
  1225. char buf[80];
  1226. u32 snpcr;
  1227. int len;
  1228. mutex_lock(&dev_priv->dev->struct_mutex);
  1229. snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
  1230. mutex_unlock(&dev_priv->dev->struct_mutex);
  1231. len = snprintf(buf, sizeof (buf),
  1232. "%d\n", (snpcr & GEN6_MBC_SNPCR_MASK) >>
  1233. GEN6_MBC_SNPCR_SHIFT);
  1234. if (len > sizeof (buf))
  1235. len = sizeof (buf);
  1236. return simple_read_from_buffer(ubuf, max, ppos, buf, len);
  1237. }
  1238. static ssize_t
  1239. i915_cache_sharing_write(struct file *filp,
  1240. const char __user *ubuf,
  1241. size_t cnt,
  1242. loff_t *ppos)
  1243. {
  1244. struct drm_device *dev = filp->private_data;
  1245. struct drm_i915_private *dev_priv = dev->dev_private;
  1246. char buf[20];
  1247. u32 snpcr;
  1248. int val = 1;
  1249. if (cnt > 0) {
  1250. if (cnt > sizeof (buf) - 1)
  1251. return -EINVAL;
  1252. if (copy_from_user(buf, ubuf, cnt))
  1253. return -EFAULT;
  1254. buf[cnt] = 0;
  1255. val = simple_strtoul(buf, NULL, 0);
  1256. }
  1257. if (val < 0 || val > 3)
  1258. return -EINVAL;
  1259. DRM_DEBUG_DRIVER("Manually setting uncore sharing to %d\n", val);
  1260. /* Update the cache sharing policy here as well */
  1261. snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
  1262. snpcr &= ~GEN6_MBC_SNPCR_MASK;
  1263. snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
  1264. I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
  1265. return cnt;
  1266. }
  1267. static const struct file_operations i915_cache_sharing_fops = {
  1268. .owner = THIS_MODULE,
  1269. .open = i915_cache_sharing_open,
  1270. .read = i915_cache_sharing_read,
  1271. .write = i915_cache_sharing_write,
  1272. .llseek = default_llseek,
  1273. };
  1274. /* As the drm_debugfs_init() routines are called before dev->dev_private is
  1275. * allocated we need to hook into the minor for release. */
  1276. static int
  1277. drm_add_fake_info_node(struct drm_minor *minor,
  1278. struct dentry *ent,
  1279. const void *key)
  1280. {
  1281. struct drm_info_node *node;
  1282. node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
  1283. if (node == NULL) {
  1284. debugfs_remove(ent);
  1285. return -ENOMEM;
  1286. }
  1287. node->minor = minor;
  1288. node->dent = ent;
  1289. node->info_ent = (void *) key;
  1290. list_add(&node->list, &minor->debugfs_nodes.list);
  1291. return 0;
  1292. }
  1293. static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
  1294. {
  1295. struct drm_device *dev = minor->dev;
  1296. struct dentry *ent;
  1297. ent = debugfs_create_file("i915_wedged",
  1298. S_IRUGO | S_IWUSR,
  1299. root, dev,
  1300. &i915_wedged_fops);
  1301. if (IS_ERR(ent))
  1302. return PTR_ERR(ent);
  1303. return drm_add_fake_info_node(minor, ent, &i915_wedged_fops);
  1304. }
  1305. static int i915_forcewake_open(struct inode *inode, struct file *file)
  1306. {
  1307. struct drm_device *dev = inode->i_private;
  1308. struct drm_i915_private *dev_priv = dev->dev_private;
  1309. int ret;
  1310. if (!IS_GEN6(dev))
  1311. return 0;
  1312. ret = mutex_lock_interruptible(&dev->struct_mutex);
  1313. if (ret)
  1314. return ret;
  1315. gen6_gt_force_wake_get(dev_priv);
  1316. mutex_unlock(&dev->struct_mutex);
  1317. return 0;
  1318. }
  1319. int i915_forcewake_release(struct inode *inode, struct file *file)
  1320. {
  1321. struct drm_device *dev = inode->i_private;
  1322. struct drm_i915_private *dev_priv = dev->dev_private;
  1323. if (!IS_GEN6(dev))
  1324. return 0;
  1325. /*
  1326. * It's bad that we can potentially hang userspace if struct_mutex gets
  1327. * forever stuck. However, if we cannot acquire this lock it means that
  1328. * almost certainly the driver has hung, is not unload-able. Therefore
  1329. * hanging here is probably a minor inconvenience not to be seen my
  1330. * almost every user.
  1331. */
  1332. mutex_lock(&dev->struct_mutex);
  1333. gen6_gt_force_wake_put(dev_priv);
  1334. mutex_unlock(&dev->struct_mutex);
  1335. return 0;
  1336. }
  1337. static const struct file_operations i915_forcewake_fops = {
  1338. .owner = THIS_MODULE,
  1339. .open = i915_forcewake_open,
  1340. .release = i915_forcewake_release,
  1341. };
  1342. static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
  1343. {
  1344. struct drm_device *dev = minor->dev;
  1345. struct dentry *ent;
  1346. ent = debugfs_create_file("i915_forcewake_user",
  1347. S_IRUSR,
  1348. root, dev,
  1349. &i915_forcewake_fops);
  1350. if (IS_ERR(ent))
  1351. return PTR_ERR(ent);
  1352. return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
  1353. }
  1354. static int i915_max_freq_create(struct dentry *root, struct drm_minor *minor)
  1355. {
  1356. struct drm_device *dev = minor->dev;
  1357. struct dentry *ent;
  1358. ent = debugfs_create_file("i915_max_freq",
  1359. S_IRUGO | S_IWUSR,
  1360. root, dev,
  1361. &i915_max_freq_fops);
  1362. if (IS_ERR(ent))
  1363. return PTR_ERR(ent);
  1364. return drm_add_fake_info_node(minor, ent, &i915_max_freq_fops);
  1365. }
  1366. static int i915_cache_sharing_create(struct dentry *root, struct drm_minor *minor)
  1367. {
  1368. struct drm_device *dev = minor->dev;
  1369. struct dentry *ent;
  1370. ent = debugfs_create_file("i915_cache_sharing",
  1371. S_IRUGO | S_IWUSR,
  1372. root, dev,
  1373. &i915_cache_sharing_fops);
  1374. if (IS_ERR(ent))
  1375. return PTR_ERR(ent);
  1376. return drm_add_fake_info_node(minor, ent, &i915_cache_sharing_fops);
  1377. }
  1378. static struct drm_info_list i915_debugfs_list[] = {
  1379. {"i915_capabilities", i915_capabilities, 0},
  1380. {"i915_gem_objects", i915_gem_object_info, 0},
  1381. {"i915_gem_gtt", i915_gem_gtt_info, 0},
  1382. {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
  1383. {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
  1384. {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
  1385. {"i915_gem_pinned", i915_gem_object_list_info, 0, (void *) PINNED_LIST},
  1386. {"i915_gem_deferred_free", i915_gem_object_list_info, 0, (void *) DEFERRED_FREE_LIST},
  1387. {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
  1388. {"i915_gem_request", i915_gem_request_info, 0},
  1389. {"i915_gem_seqno", i915_gem_seqno_info, 0},
  1390. {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
  1391. {"i915_gem_interrupt", i915_interrupt_info, 0},
  1392. {"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
  1393. {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
  1394. {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
  1395. {"i915_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RCS},
  1396. {"i915_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RCS},
  1397. {"i915_bsd_ringbuffer_data", i915_ringbuffer_data, 0, (void *)VCS},
  1398. {"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)VCS},
  1399. {"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BCS},
  1400. {"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BCS},
  1401. {"i915_batchbuffers", i915_batchbuffer_info, 0},
  1402. {"i915_error_state", i915_error_state, 0},
  1403. {"i915_rstdby_delays", i915_rstdby_delays, 0},
  1404. {"i915_cur_delayinfo", i915_cur_delayinfo, 0},
  1405. {"i915_delayfreq_table", i915_delayfreq_table, 0},
  1406. {"i915_inttoext_table", i915_inttoext_table, 0},
  1407. {"i915_drpc_info", i915_drpc_info, 0},
  1408. {"i915_emon_status", i915_emon_status, 0},
  1409. {"i915_ring_freq_table", i915_ring_freq_table, 0},
  1410. {"i915_gfxec", i915_gfxec, 0},
  1411. {"i915_fbc_status", i915_fbc_status, 0},
  1412. {"i915_sr_status", i915_sr_status, 0},
  1413. {"i915_opregion", i915_opregion, 0},
  1414. {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
  1415. {"i915_context_status", i915_context_status, 0},
  1416. {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0},
  1417. };
  1418. #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
  1419. int i915_debugfs_init(struct drm_minor *minor)
  1420. {
  1421. int ret;
  1422. ret = i915_wedged_create(minor->debugfs_root, minor);
  1423. if (ret)
  1424. return ret;
  1425. ret = i915_forcewake_create(minor->debugfs_root, minor);
  1426. if (ret)
  1427. return ret;
  1428. ret = i915_max_freq_create(minor->debugfs_root, minor);
  1429. if (ret)
  1430. return ret;
  1431. ret = i915_cache_sharing_create(minor->debugfs_root, minor);
  1432. if (ret)
  1433. return ret;
  1434. return drm_debugfs_create_files(i915_debugfs_list,
  1435. I915_DEBUGFS_ENTRIES,
  1436. minor->debugfs_root, minor);
  1437. }
  1438. void i915_debugfs_cleanup(struct drm_minor *minor)
  1439. {
  1440. drm_debugfs_remove_files(i915_debugfs_list,
  1441. I915_DEBUGFS_ENTRIES, minor);
  1442. drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
  1443. 1, minor);
  1444. drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops,
  1445. 1, minor);
  1446. drm_debugfs_remove_files((struct drm_info_list *) &i915_max_freq_fops,
  1447. 1, minor);
  1448. drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops,
  1449. 1, minor);
  1450. }
  1451. #endif /* CONFIG_DEBUG_FS */