i915_debugfs.c 56 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101
  1. /*
  2. * Copyright © 2008 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Eric Anholt <eric@anholt.net>
  25. * Keith Packard <keithp@keithp.com>
  26. *
  27. */
  28. #include <linux/seq_file.h>
  29. #include <linux/debugfs.h>
  30. #include <linux/slab.h>
  31. #include <linux/export.h>
  32. #include "drmP.h"
  33. #include "drm.h"
  34. #include "intel_drv.h"
  35. #include "intel_ringbuffer.h"
  36. #include "i915_drm.h"
  37. #include "i915_drv.h"
  38. #define DRM_I915_RING_DEBUG 1
  39. #if defined(CONFIG_DEBUG_FS)
  40. enum {
  41. ACTIVE_LIST,
  42. INACTIVE_LIST,
  43. PINNED_LIST,
  44. };
  45. static const char *yesno(int v)
  46. {
  47. return v ? "yes" : "no";
  48. }
  49. static int i915_capabilities(struct seq_file *m, void *data)
  50. {
  51. struct drm_info_node *node = (struct drm_info_node *) m->private;
  52. struct drm_device *dev = node->minor->dev;
  53. const struct intel_device_info *info = INTEL_INFO(dev);
  54. seq_printf(m, "gen: %d\n", info->gen);
  55. seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
  56. #define B(x) seq_printf(m, #x ": %s\n", yesno(info->x))
  57. B(is_mobile);
  58. B(is_i85x);
  59. B(is_i915g);
  60. B(is_i945gm);
  61. B(is_g33);
  62. B(need_gfx_hws);
  63. B(is_g4x);
  64. B(is_pineview);
  65. B(is_broadwater);
  66. B(is_crestline);
  67. B(has_fbc);
  68. B(has_pipe_cxsr);
  69. B(has_hotplug);
  70. B(cursor_needs_physical);
  71. B(has_overlay);
  72. B(overlay_needs_physical);
  73. B(supports_tv);
  74. B(has_bsd_ring);
  75. B(has_blt_ring);
  76. B(has_llc);
  77. #undef B
  78. return 0;
  79. }
  80. static const char *get_pin_flag(struct drm_i915_gem_object *obj)
  81. {
  82. if (obj->user_pin_count > 0)
  83. return "P";
  84. else if (obj->pin_count > 0)
  85. return "p";
  86. else
  87. return " ";
  88. }
  89. static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
  90. {
  91. switch (obj->tiling_mode) {
  92. default:
  93. case I915_TILING_NONE: return " ";
  94. case I915_TILING_X: return "X";
  95. case I915_TILING_Y: return "Y";
  96. }
  97. }
  98. static const char *cache_level_str(int type)
  99. {
  100. switch (type) {
  101. case I915_CACHE_NONE: return " uncached";
  102. case I915_CACHE_LLC: return " snooped (LLC)";
  103. case I915_CACHE_LLC_MLC: return " snooped (LLC+MLC)";
  104. default: return "";
  105. }
  106. }
  107. static void
  108. describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
  109. {
  110. seq_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d %d%s%s%s",
  111. &obj->base,
  112. get_pin_flag(obj),
  113. get_tiling_flag(obj),
  114. obj->base.size / 1024,
  115. obj->base.read_domains,
  116. obj->base.write_domain,
  117. obj->last_read_seqno,
  118. obj->last_write_seqno,
  119. obj->last_fenced_seqno,
  120. cache_level_str(obj->cache_level),
  121. obj->dirty ? " dirty" : "",
  122. obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
  123. if (obj->base.name)
  124. seq_printf(m, " (name: %d)", obj->base.name);
  125. if (obj->fence_reg != I915_FENCE_REG_NONE)
  126. seq_printf(m, " (fence: %d)", obj->fence_reg);
  127. if (obj->gtt_space != NULL)
  128. seq_printf(m, " (gtt offset: %08x, size: %08x)",
  129. obj->gtt_offset, (unsigned int)obj->gtt_space->size);
  130. if (obj->pin_mappable || obj->fault_mappable) {
  131. char s[3], *t = s;
  132. if (obj->pin_mappable)
  133. *t++ = 'p';
  134. if (obj->fault_mappable)
  135. *t++ = 'f';
  136. *t = '\0';
  137. seq_printf(m, " (%s mappable)", s);
  138. }
  139. if (obj->ring != NULL)
  140. seq_printf(m, " (%s)", obj->ring->name);
  141. }
  142. static int i915_gem_object_list_info(struct seq_file *m, void *data)
  143. {
  144. struct drm_info_node *node = (struct drm_info_node *) m->private;
  145. uintptr_t list = (uintptr_t) node->info_ent->data;
  146. struct list_head *head;
  147. struct drm_device *dev = node->minor->dev;
  148. drm_i915_private_t *dev_priv = dev->dev_private;
  149. struct drm_i915_gem_object *obj;
  150. size_t total_obj_size, total_gtt_size;
  151. int count, ret;
  152. ret = mutex_lock_interruptible(&dev->struct_mutex);
  153. if (ret)
  154. return ret;
  155. switch (list) {
  156. case ACTIVE_LIST:
  157. seq_printf(m, "Active:\n");
  158. head = &dev_priv->mm.active_list;
  159. break;
  160. case INACTIVE_LIST:
  161. seq_printf(m, "Inactive:\n");
  162. head = &dev_priv->mm.inactive_list;
  163. break;
  164. default:
  165. mutex_unlock(&dev->struct_mutex);
  166. return -EINVAL;
  167. }
  168. total_obj_size = total_gtt_size = count = 0;
  169. list_for_each_entry(obj, head, mm_list) {
  170. seq_printf(m, " ");
  171. describe_obj(m, obj);
  172. seq_printf(m, "\n");
  173. total_obj_size += obj->base.size;
  174. total_gtt_size += obj->gtt_space->size;
  175. count++;
  176. }
  177. mutex_unlock(&dev->struct_mutex);
  178. seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
  179. count, total_obj_size, total_gtt_size);
  180. return 0;
  181. }
  182. #define count_objects(list, member) do { \
  183. list_for_each_entry(obj, list, member) { \
  184. size += obj->gtt_space->size; \
  185. ++count; \
  186. if (obj->map_and_fenceable) { \
  187. mappable_size += obj->gtt_space->size; \
  188. ++mappable_count; \
  189. } \
  190. } \
  191. } while (0)
  192. static int i915_gem_object_info(struct seq_file *m, void* data)
  193. {
  194. struct drm_info_node *node = (struct drm_info_node *) m->private;
  195. struct drm_device *dev = node->minor->dev;
  196. struct drm_i915_private *dev_priv = dev->dev_private;
  197. u32 count, mappable_count;
  198. size_t size, mappable_size;
  199. struct drm_i915_gem_object *obj;
  200. int ret;
  201. ret = mutex_lock_interruptible(&dev->struct_mutex);
  202. if (ret)
  203. return ret;
  204. seq_printf(m, "%u objects, %zu bytes\n",
  205. dev_priv->mm.object_count,
  206. dev_priv->mm.object_memory);
  207. size = count = mappable_size = mappable_count = 0;
  208. count_objects(&dev_priv->mm.gtt_list, gtt_list);
  209. seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
  210. count, mappable_count, size, mappable_size);
  211. size = count = mappable_size = mappable_count = 0;
  212. count_objects(&dev_priv->mm.active_list, mm_list);
  213. seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n",
  214. count, mappable_count, size, mappable_size);
  215. size = count = mappable_size = mappable_count = 0;
  216. count_objects(&dev_priv->mm.inactive_list, mm_list);
  217. seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
  218. count, mappable_count, size, mappable_size);
  219. size = count = mappable_size = mappable_count = 0;
  220. list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
  221. if (obj->fault_mappable) {
  222. size += obj->gtt_space->size;
  223. ++count;
  224. }
  225. if (obj->pin_mappable) {
  226. mappable_size += obj->gtt_space->size;
  227. ++mappable_count;
  228. }
  229. }
  230. seq_printf(m, "%u pinned mappable objects, %zu bytes\n",
  231. mappable_count, mappable_size);
  232. seq_printf(m, "%u fault mappable objects, %zu bytes\n",
  233. count, size);
  234. seq_printf(m, "%zu [%zu] gtt total\n",
  235. dev_priv->mm.gtt_total, dev_priv->mm.mappable_gtt_total);
  236. mutex_unlock(&dev->struct_mutex);
  237. return 0;
  238. }
  239. static int i915_gem_gtt_info(struct seq_file *m, void* data)
  240. {
  241. struct drm_info_node *node = (struct drm_info_node *) m->private;
  242. struct drm_device *dev = node->minor->dev;
  243. uintptr_t list = (uintptr_t) node->info_ent->data;
  244. struct drm_i915_private *dev_priv = dev->dev_private;
  245. struct drm_i915_gem_object *obj;
  246. size_t total_obj_size, total_gtt_size;
  247. int count, ret;
  248. ret = mutex_lock_interruptible(&dev->struct_mutex);
  249. if (ret)
  250. return ret;
  251. total_obj_size = total_gtt_size = count = 0;
  252. list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
  253. if (list == PINNED_LIST && obj->pin_count == 0)
  254. continue;
  255. seq_printf(m, " ");
  256. describe_obj(m, obj);
  257. seq_printf(m, "\n");
  258. total_obj_size += obj->base.size;
  259. total_gtt_size += obj->gtt_space->size;
  260. count++;
  261. }
  262. mutex_unlock(&dev->struct_mutex);
  263. seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
  264. count, total_obj_size, total_gtt_size);
  265. return 0;
  266. }
  267. static int i915_gem_pageflip_info(struct seq_file *m, void *data)
  268. {
  269. struct drm_info_node *node = (struct drm_info_node *) m->private;
  270. struct drm_device *dev = node->minor->dev;
  271. unsigned long flags;
  272. struct intel_crtc *crtc;
  273. list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
  274. const char pipe = pipe_name(crtc->pipe);
  275. const char plane = plane_name(crtc->plane);
  276. struct intel_unpin_work *work;
  277. spin_lock_irqsave(&dev->event_lock, flags);
  278. work = crtc->unpin_work;
  279. if (work == NULL) {
  280. seq_printf(m, "No flip due on pipe %c (plane %c)\n",
  281. pipe, plane);
  282. } else {
  283. if (!work->pending) {
  284. seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
  285. pipe, plane);
  286. } else {
  287. seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
  288. pipe, plane);
  289. }
  290. if (work->enable_stall_check)
  291. seq_printf(m, "Stall check enabled, ");
  292. else
  293. seq_printf(m, "Stall check waiting for page flip ioctl, ");
  294. seq_printf(m, "%d prepares\n", work->pending);
  295. if (work->old_fb_obj) {
  296. struct drm_i915_gem_object *obj = work->old_fb_obj;
  297. if (obj)
  298. seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
  299. }
  300. if (work->pending_flip_obj) {
  301. struct drm_i915_gem_object *obj = work->pending_flip_obj;
  302. if (obj)
  303. seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
  304. }
  305. }
  306. spin_unlock_irqrestore(&dev->event_lock, flags);
  307. }
  308. return 0;
  309. }
  310. static int i915_gem_request_info(struct seq_file *m, void *data)
  311. {
  312. struct drm_info_node *node = (struct drm_info_node *) m->private;
  313. struct drm_device *dev = node->minor->dev;
  314. drm_i915_private_t *dev_priv = dev->dev_private;
  315. struct drm_i915_gem_request *gem_request;
  316. int ret, count;
  317. ret = mutex_lock_interruptible(&dev->struct_mutex);
  318. if (ret)
  319. return ret;
  320. count = 0;
  321. if (!list_empty(&dev_priv->ring[RCS].request_list)) {
  322. seq_printf(m, "Render requests:\n");
  323. list_for_each_entry(gem_request,
  324. &dev_priv->ring[RCS].request_list,
  325. list) {
  326. seq_printf(m, " %d @ %d\n",
  327. gem_request->seqno,
  328. (int) (jiffies - gem_request->emitted_jiffies));
  329. }
  330. count++;
  331. }
  332. if (!list_empty(&dev_priv->ring[VCS].request_list)) {
  333. seq_printf(m, "BSD requests:\n");
  334. list_for_each_entry(gem_request,
  335. &dev_priv->ring[VCS].request_list,
  336. list) {
  337. seq_printf(m, " %d @ %d\n",
  338. gem_request->seqno,
  339. (int) (jiffies - gem_request->emitted_jiffies));
  340. }
  341. count++;
  342. }
  343. if (!list_empty(&dev_priv->ring[BCS].request_list)) {
  344. seq_printf(m, "BLT requests:\n");
  345. list_for_each_entry(gem_request,
  346. &dev_priv->ring[BCS].request_list,
  347. list) {
  348. seq_printf(m, " %d @ %d\n",
  349. gem_request->seqno,
  350. (int) (jiffies - gem_request->emitted_jiffies));
  351. }
  352. count++;
  353. }
  354. mutex_unlock(&dev->struct_mutex);
  355. if (count == 0)
  356. seq_printf(m, "No requests\n");
  357. return 0;
  358. }
  359. static void i915_ring_seqno_info(struct seq_file *m,
  360. struct intel_ring_buffer *ring)
  361. {
  362. if (ring->get_seqno) {
  363. seq_printf(m, "Current sequence (%s): %d\n",
  364. ring->name, ring->get_seqno(ring));
  365. }
  366. }
  367. static int i915_gem_seqno_info(struct seq_file *m, void *data)
  368. {
  369. struct drm_info_node *node = (struct drm_info_node *) m->private;
  370. struct drm_device *dev = node->minor->dev;
  371. drm_i915_private_t *dev_priv = dev->dev_private;
  372. int ret, i;
  373. ret = mutex_lock_interruptible(&dev->struct_mutex);
  374. if (ret)
  375. return ret;
  376. for (i = 0; i < I915_NUM_RINGS; i++)
  377. i915_ring_seqno_info(m, &dev_priv->ring[i]);
  378. mutex_unlock(&dev->struct_mutex);
  379. return 0;
  380. }
  381. static int i915_interrupt_info(struct seq_file *m, void *data)
  382. {
  383. struct drm_info_node *node = (struct drm_info_node *) m->private;
  384. struct drm_device *dev = node->minor->dev;
  385. drm_i915_private_t *dev_priv = dev->dev_private;
  386. int ret, i, pipe;
  387. ret = mutex_lock_interruptible(&dev->struct_mutex);
  388. if (ret)
  389. return ret;
  390. if (IS_VALLEYVIEW(dev)) {
  391. seq_printf(m, "Display IER:\t%08x\n",
  392. I915_READ(VLV_IER));
  393. seq_printf(m, "Display IIR:\t%08x\n",
  394. I915_READ(VLV_IIR));
  395. seq_printf(m, "Display IIR_RW:\t%08x\n",
  396. I915_READ(VLV_IIR_RW));
  397. seq_printf(m, "Display IMR:\t%08x\n",
  398. I915_READ(VLV_IMR));
  399. for_each_pipe(pipe)
  400. seq_printf(m, "Pipe %c stat:\t%08x\n",
  401. pipe_name(pipe),
  402. I915_READ(PIPESTAT(pipe)));
  403. seq_printf(m, "Master IER:\t%08x\n",
  404. I915_READ(VLV_MASTER_IER));
  405. seq_printf(m, "Render IER:\t%08x\n",
  406. I915_READ(GTIER));
  407. seq_printf(m, "Render IIR:\t%08x\n",
  408. I915_READ(GTIIR));
  409. seq_printf(m, "Render IMR:\t%08x\n",
  410. I915_READ(GTIMR));
  411. seq_printf(m, "PM IER:\t\t%08x\n",
  412. I915_READ(GEN6_PMIER));
  413. seq_printf(m, "PM IIR:\t\t%08x\n",
  414. I915_READ(GEN6_PMIIR));
  415. seq_printf(m, "PM IMR:\t\t%08x\n",
  416. I915_READ(GEN6_PMIMR));
  417. seq_printf(m, "Port hotplug:\t%08x\n",
  418. I915_READ(PORT_HOTPLUG_EN));
  419. seq_printf(m, "DPFLIPSTAT:\t%08x\n",
  420. I915_READ(VLV_DPFLIPSTAT));
  421. seq_printf(m, "DPINVGTT:\t%08x\n",
  422. I915_READ(DPINVGTT));
  423. } else if (!HAS_PCH_SPLIT(dev)) {
  424. seq_printf(m, "Interrupt enable: %08x\n",
  425. I915_READ(IER));
  426. seq_printf(m, "Interrupt identity: %08x\n",
  427. I915_READ(IIR));
  428. seq_printf(m, "Interrupt mask: %08x\n",
  429. I915_READ(IMR));
  430. for_each_pipe(pipe)
  431. seq_printf(m, "Pipe %c stat: %08x\n",
  432. pipe_name(pipe),
  433. I915_READ(PIPESTAT(pipe)));
  434. } else {
  435. seq_printf(m, "North Display Interrupt enable: %08x\n",
  436. I915_READ(DEIER));
  437. seq_printf(m, "North Display Interrupt identity: %08x\n",
  438. I915_READ(DEIIR));
  439. seq_printf(m, "North Display Interrupt mask: %08x\n",
  440. I915_READ(DEIMR));
  441. seq_printf(m, "South Display Interrupt enable: %08x\n",
  442. I915_READ(SDEIER));
  443. seq_printf(m, "South Display Interrupt identity: %08x\n",
  444. I915_READ(SDEIIR));
  445. seq_printf(m, "South Display Interrupt mask: %08x\n",
  446. I915_READ(SDEIMR));
  447. seq_printf(m, "Graphics Interrupt enable: %08x\n",
  448. I915_READ(GTIER));
  449. seq_printf(m, "Graphics Interrupt identity: %08x\n",
  450. I915_READ(GTIIR));
  451. seq_printf(m, "Graphics Interrupt mask: %08x\n",
  452. I915_READ(GTIMR));
  453. }
  454. seq_printf(m, "Interrupts received: %d\n",
  455. atomic_read(&dev_priv->irq_received));
  456. for (i = 0; i < I915_NUM_RINGS; i++) {
  457. if (IS_GEN6(dev) || IS_GEN7(dev)) {
  458. seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
  459. dev_priv->ring[i].name,
  460. I915_READ_IMR(&dev_priv->ring[i]));
  461. }
  462. i915_ring_seqno_info(m, &dev_priv->ring[i]);
  463. }
  464. mutex_unlock(&dev->struct_mutex);
  465. return 0;
  466. }
  467. static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
  468. {
  469. struct drm_info_node *node = (struct drm_info_node *) m->private;
  470. struct drm_device *dev = node->minor->dev;
  471. drm_i915_private_t *dev_priv = dev->dev_private;
  472. int i, ret;
  473. ret = mutex_lock_interruptible(&dev->struct_mutex);
  474. if (ret)
  475. return ret;
  476. seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
  477. seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
  478. for (i = 0; i < dev_priv->num_fence_regs; i++) {
  479. struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
  480. seq_printf(m, "Fenced object[%2d] = ", i);
  481. if (obj == NULL)
  482. seq_printf(m, "unused");
  483. else
  484. describe_obj(m, obj);
  485. seq_printf(m, "\n");
  486. }
  487. mutex_unlock(&dev->struct_mutex);
  488. return 0;
  489. }
  490. static int i915_hws_info(struct seq_file *m, void *data)
  491. {
  492. struct drm_info_node *node = (struct drm_info_node *) m->private;
  493. struct drm_device *dev = node->minor->dev;
  494. drm_i915_private_t *dev_priv = dev->dev_private;
  495. struct intel_ring_buffer *ring;
  496. const volatile u32 __iomem *hws;
  497. int i;
  498. ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
  499. hws = (volatile u32 __iomem *)ring->status_page.page_addr;
  500. if (hws == NULL)
  501. return 0;
  502. for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
  503. seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
  504. i * 4,
  505. hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
  506. }
  507. return 0;
  508. }
  509. static const char *ring_str(int ring)
  510. {
  511. switch (ring) {
  512. case RCS: return "render";
  513. case VCS: return "bsd";
  514. case BCS: return "blt";
  515. default: return "";
  516. }
  517. }
  518. static const char *pin_flag(int pinned)
  519. {
  520. if (pinned > 0)
  521. return " P";
  522. else if (pinned < 0)
  523. return " p";
  524. else
  525. return "";
  526. }
  527. static const char *tiling_flag(int tiling)
  528. {
  529. switch (tiling) {
  530. default:
  531. case I915_TILING_NONE: return "";
  532. case I915_TILING_X: return " X";
  533. case I915_TILING_Y: return " Y";
  534. }
  535. }
  536. static const char *dirty_flag(int dirty)
  537. {
  538. return dirty ? " dirty" : "";
  539. }
  540. static const char *purgeable_flag(int purgeable)
  541. {
  542. return purgeable ? " purgeable" : "";
  543. }
  544. static void print_error_buffers(struct seq_file *m,
  545. const char *name,
  546. struct drm_i915_error_buffer *err,
  547. int count)
  548. {
  549. seq_printf(m, "%s [%d]:\n", name, count);
  550. while (count--) {
  551. seq_printf(m, " %08x %8u %04x %04x %x %x%s%s%s%s%s%s%s",
  552. err->gtt_offset,
  553. err->size,
  554. err->read_domains,
  555. err->write_domain,
  556. err->rseqno, err->wseqno,
  557. pin_flag(err->pinned),
  558. tiling_flag(err->tiling),
  559. dirty_flag(err->dirty),
  560. purgeable_flag(err->purgeable),
  561. err->ring != -1 ? " " : "",
  562. ring_str(err->ring),
  563. cache_level_str(err->cache_level));
  564. if (err->name)
  565. seq_printf(m, " (name: %d)", err->name);
  566. if (err->fence_reg != I915_FENCE_REG_NONE)
  567. seq_printf(m, " (fence: %d)", err->fence_reg);
  568. seq_printf(m, "\n");
  569. err++;
  570. }
  571. }
  572. static void i915_ring_error_state(struct seq_file *m,
  573. struct drm_device *dev,
  574. struct drm_i915_error_state *error,
  575. unsigned ring)
  576. {
  577. BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */
  578. seq_printf(m, "%s command stream:\n", ring_str(ring));
  579. seq_printf(m, " HEAD: 0x%08x\n", error->head[ring]);
  580. seq_printf(m, " TAIL: 0x%08x\n", error->tail[ring]);
  581. seq_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]);
  582. seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]);
  583. seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]);
  584. seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]);
  585. if (ring == RCS && INTEL_INFO(dev)->gen >= 4) {
  586. seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1);
  587. seq_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr);
  588. }
  589. if (INTEL_INFO(dev)->gen >= 4)
  590. seq_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]);
  591. seq_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]);
  592. seq_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]);
  593. if (INTEL_INFO(dev)->gen >= 6) {
  594. seq_printf(m, " RC PSMI: 0x%08x\n", error->rc_psmi[ring]);
  595. seq_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
  596. seq_printf(m, " SYNC_0: 0x%08x\n",
  597. error->semaphore_mboxes[ring][0]);
  598. seq_printf(m, " SYNC_1: 0x%08x\n",
  599. error->semaphore_mboxes[ring][1]);
  600. }
  601. seq_printf(m, " seqno: 0x%08x\n", error->seqno[ring]);
  602. seq_printf(m, " waiting: %s\n", yesno(error->waiting[ring]));
  603. seq_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
  604. seq_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
  605. }
  606. struct i915_error_state_file_priv {
  607. struct drm_device *dev;
  608. struct drm_i915_error_state *error;
  609. };
  610. static int i915_error_state(struct seq_file *m, void *unused)
  611. {
  612. struct i915_error_state_file_priv *error_priv = m->private;
  613. struct drm_device *dev = error_priv->dev;
  614. drm_i915_private_t *dev_priv = dev->dev_private;
  615. struct drm_i915_error_state *error = error_priv->error;
  616. struct intel_ring_buffer *ring;
  617. int i, j, page, offset, elt;
  618. if (!error) {
  619. seq_printf(m, "no error state collected\n");
  620. return 0;
  621. }
  622. seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
  623. error->time.tv_usec);
  624. seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
  625. seq_printf(m, "EIR: 0x%08x\n", error->eir);
  626. seq_printf(m, "IER: 0x%08x\n", error->ier);
  627. seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
  628. seq_printf(m, "CCID: 0x%08x\n", error->ccid);
  629. for (i = 0; i < dev_priv->num_fence_regs; i++)
  630. seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
  631. if (INTEL_INFO(dev)->gen >= 6) {
  632. seq_printf(m, "ERROR: 0x%08x\n", error->error);
  633. seq_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
  634. }
  635. for_each_ring(ring, dev_priv, i)
  636. i915_ring_error_state(m, dev, error, i);
  637. if (error->active_bo)
  638. print_error_buffers(m, "Active",
  639. error->active_bo,
  640. error->active_bo_count);
  641. if (error->pinned_bo)
  642. print_error_buffers(m, "Pinned",
  643. error->pinned_bo,
  644. error->pinned_bo_count);
  645. for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
  646. struct drm_i915_error_object *obj;
  647. if ((obj = error->ring[i].batchbuffer)) {
  648. seq_printf(m, "%s --- gtt_offset = 0x%08x\n",
  649. dev_priv->ring[i].name,
  650. obj->gtt_offset);
  651. offset = 0;
  652. for (page = 0; page < obj->page_count; page++) {
  653. for (elt = 0; elt < PAGE_SIZE/4; elt++) {
  654. seq_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]);
  655. offset += 4;
  656. }
  657. }
  658. }
  659. if (error->ring[i].num_requests) {
  660. seq_printf(m, "%s --- %d requests\n",
  661. dev_priv->ring[i].name,
  662. error->ring[i].num_requests);
  663. for (j = 0; j < error->ring[i].num_requests; j++) {
  664. seq_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n",
  665. error->ring[i].requests[j].seqno,
  666. error->ring[i].requests[j].jiffies,
  667. error->ring[i].requests[j].tail);
  668. }
  669. }
  670. if ((obj = error->ring[i].ringbuffer)) {
  671. seq_printf(m, "%s --- ringbuffer = 0x%08x\n",
  672. dev_priv->ring[i].name,
  673. obj->gtt_offset);
  674. offset = 0;
  675. for (page = 0; page < obj->page_count; page++) {
  676. for (elt = 0; elt < PAGE_SIZE/4; elt++) {
  677. seq_printf(m, "%08x : %08x\n",
  678. offset,
  679. obj->pages[page][elt]);
  680. offset += 4;
  681. }
  682. }
  683. }
  684. }
  685. if (error->overlay)
  686. intel_overlay_print_error_state(m, error->overlay);
  687. if (error->display)
  688. intel_display_print_error_state(m, dev, error->display);
  689. return 0;
  690. }
  691. static ssize_t
  692. i915_error_state_write(struct file *filp,
  693. const char __user *ubuf,
  694. size_t cnt,
  695. loff_t *ppos)
  696. {
  697. struct seq_file *m = filp->private_data;
  698. struct i915_error_state_file_priv *error_priv = m->private;
  699. struct drm_device *dev = error_priv->dev;
  700. DRM_DEBUG_DRIVER("Resetting error state\n");
  701. mutex_lock(&dev->struct_mutex);
  702. i915_destroy_error_state(dev);
  703. mutex_unlock(&dev->struct_mutex);
  704. return cnt;
  705. }
  706. static int i915_error_state_open(struct inode *inode, struct file *file)
  707. {
  708. struct drm_device *dev = inode->i_private;
  709. drm_i915_private_t *dev_priv = dev->dev_private;
  710. struct i915_error_state_file_priv *error_priv;
  711. unsigned long flags;
  712. error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL);
  713. if (!error_priv)
  714. return -ENOMEM;
  715. error_priv->dev = dev;
  716. spin_lock_irqsave(&dev_priv->error_lock, flags);
  717. error_priv->error = dev_priv->first_error;
  718. if (error_priv->error)
  719. kref_get(&error_priv->error->ref);
  720. spin_unlock_irqrestore(&dev_priv->error_lock, flags);
  721. return single_open(file, i915_error_state, error_priv);
  722. }
  723. static int i915_error_state_release(struct inode *inode, struct file *file)
  724. {
  725. struct seq_file *m = file->private_data;
  726. struct i915_error_state_file_priv *error_priv = m->private;
  727. if (error_priv->error)
  728. kref_put(&error_priv->error->ref, i915_error_state_free);
  729. kfree(error_priv);
  730. return single_release(inode, file);
  731. }
  732. static const struct file_operations i915_error_state_fops = {
  733. .owner = THIS_MODULE,
  734. .open = i915_error_state_open,
  735. .read = seq_read,
  736. .write = i915_error_state_write,
  737. .llseek = default_llseek,
  738. .release = i915_error_state_release,
  739. };
  740. static int i915_rstdby_delays(struct seq_file *m, void *unused)
  741. {
  742. struct drm_info_node *node = (struct drm_info_node *) m->private;
  743. struct drm_device *dev = node->minor->dev;
  744. drm_i915_private_t *dev_priv = dev->dev_private;
  745. u16 crstanddelay;
  746. int ret;
  747. ret = mutex_lock_interruptible(&dev->struct_mutex);
  748. if (ret)
  749. return ret;
  750. crstanddelay = I915_READ16(CRSTANDVID);
  751. mutex_unlock(&dev->struct_mutex);
  752. seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
  753. return 0;
  754. }
  755. static int i915_cur_delayinfo(struct seq_file *m, void *unused)
  756. {
  757. struct drm_info_node *node = (struct drm_info_node *) m->private;
  758. struct drm_device *dev = node->minor->dev;
  759. drm_i915_private_t *dev_priv = dev->dev_private;
  760. int ret;
  761. if (IS_GEN5(dev)) {
  762. u16 rgvswctl = I915_READ16(MEMSWCTL);
  763. u16 rgvstat = I915_READ16(MEMSTAT_ILK);
  764. seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
  765. seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
  766. seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
  767. MEMSTAT_VID_SHIFT);
  768. seq_printf(m, "Current P-state: %d\n",
  769. (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
  770. } else if (IS_GEN6(dev) || IS_GEN7(dev)) {
  771. u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
  772. u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
  773. u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
  774. u32 rpstat;
  775. u32 rpupei, rpcurup, rpprevup;
  776. u32 rpdownei, rpcurdown, rpprevdown;
  777. int max_freq;
  778. /* RPSTAT1 is in the GT power well */
  779. ret = mutex_lock_interruptible(&dev->struct_mutex);
  780. if (ret)
  781. return ret;
  782. gen6_gt_force_wake_get(dev_priv);
  783. rpstat = I915_READ(GEN6_RPSTAT1);
  784. rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
  785. rpcurup = I915_READ(GEN6_RP_CUR_UP);
  786. rpprevup = I915_READ(GEN6_RP_PREV_UP);
  787. rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
  788. rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
  789. rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
  790. gen6_gt_force_wake_put(dev_priv);
  791. mutex_unlock(&dev->struct_mutex);
  792. seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
  793. seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
  794. seq_printf(m, "Render p-state ratio: %d\n",
  795. (gt_perf_status & 0xff00) >> 8);
  796. seq_printf(m, "Render p-state VID: %d\n",
  797. gt_perf_status & 0xff);
  798. seq_printf(m, "Render p-state limit: %d\n",
  799. rp_state_limits & 0xff);
  800. seq_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >>
  801. GEN6_CAGF_SHIFT) * 50);
  802. seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
  803. GEN6_CURICONT_MASK);
  804. seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
  805. GEN6_CURBSYTAVG_MASK);
  806. seq_printf(m, "RP PREV UP: %dus\n", rpprevup &
  807. GEN6_CURBSYTAVG_MASK);
  808. seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei &
  809. GEN6_CURIAVG_MASK);
  810. seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown &
  811. GEN6_CURBSYTAVG_MASK);
  812. seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown &
  813. GEN6_CURBSYTAVG_MASK);
  814. max_freq = (rp_state_cap & 0xff0000) >> 16;
  815. seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
  816. max_freq * 50);
  817. max_freq = (rp_state_cap & 0xff00) >> 8;
  818. seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
  819. max_freq * 50);
  820. max_freq = rp_state_cap & 0xff;
  821. seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
  822. max_freq * 50);
  823. } else {
  824. seq_printf(m, "no P-state info available\n");
  825. }
  826. return 0;
  827. }
  828. static int i915_delayfreq_table(struct seq_file *m, void *unused)
  829. {
  830. struct drm_info_node *node = (struct drm_info_node *) m->private;
  831. struct drm_device *dev = node->minor->dev;
  832. drm_i915_private_t *dev_priv = dev->dev_private;
  833. u32 delayfreq;
  834. int ret, i;
  835. ret = mutex_lock_interruptible(&dev->struct_mutex);
  836. if (ret)
  837. return ret;
  838. for (i = 0; i < 16; i++) {
  839. delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
  840. seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq,
  841. (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
  842. }
  843. mutex_unlock(&dev->struct_mutex);
  844. return 0;
  845. }
  846. static inline int MAP_TO_MV(int map)
  847. {
  848. return 1250 - (map * 25);
  849. }
  850. static int i915_inttoext_table(struct seq_file *m, void *unused)
  851. {
  852. struct drm_info_node *node = (struct drm_info_node *) m->private;
  853. struct drm_device *dev = node->minor->dev;
  854. drm_i915_private_t *dev_priv = dev->dev_private;
  855. u32 inttoext;
  856. int ret, i;
  857. ret = mutex_lock_interruptible(&dev->struct_mutex);
  858. if (ret)
  859. return ret;
  860. for (i = 1; i <= 32; i++) {
  861. inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
  862. seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
  863. }
  864. mutex_unlock(&dev->struct_mutex);
  865. return 0;
  866. }
  867. static int ironlake_drpc_info(struct seq_file *m)
  868. {
  869. struct drm_info_node *node = (struct drm_info_node *) m->private;
  870. struct drm_device *dev = node->minor->dev;
  871. drm_i915_private_t *dev_priv = dev->dev_private;
  872. u32 rgvmodectl, rstdbyctl;
  873. u16 crstandvid;
  874. int ret;
  875. ret = mutex_lock_interruptible(&dev->struct_mutex);
  876. if (ret)
  877. return ret;
  878. rgvmodectl = I915_READ(MEMMODECTL);
  879. rstdbyctl = I915_READ(RSTDBYCTL);
  880. crstandvid = I915_READ16(CRSTANDVID);
  881. mutex_unlock(&dev->struct_mutex);
  882. seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
  883. "yes" : "no");
  884. seq_printf(m, "Boost freq: %d\n",
  885. (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
  886. MEMMODE_BOOST_FREQ_SHIFT);
  887. seq_printf(m, "HW control enabled: %s\n",
  888. rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no");
  889. seq_printf(m, "SW control enabled: %s\n",
  890. rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no");
  891. seq_printf(m, "Gated voltage change: %s\n",
  892. rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no");
  893. seq_printf(m, "Starting frequency: P%d\n",
  894. (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
  895. seq_printf(m, "Max P-state: P%d\n",
  896. (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
  897. seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
  898. seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
  899. seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
  900. seq_printf(m, "Render standby enabled: %s\n",
  901. (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
  902. seq_printf(m, "Current RS state: ");
  903. switch (rstdbyctl & RSX_STATUS_MASK) {
  904. case RSX_STATUS_ON:
  905. seq_printf(m, "on\n");
  906. break;
  907. case RSX_STATUS_RC1:
  908. seq_printf(m, "RC1\n");
  909. break;
  910. case RSX_STATUS_RC1E:
  911. seq_printf(m, "RC1E\n");
  912. break;
  913. case RSX_STATUS_RS1:
  914. seq_printf(m, "RS1\n");
  915. break;
  916. case RSX_STATUS_RS2:
  917. seq_printf(m, "RS2 (RC6)\n");
  918. break;
  919. case RSX_STATUS_RS3:
  920. seq_printf(m, "RC3 (RC6+)\n");
  921. break;
  922. default:
  923. seq_printf(m, "unknown\n");
  924. break;
  925. }
  926. return 0;
  927. }
  928. static int gen6_drpc_info(struct seq_file *m)
  929. {
  930. struct drm_info_node *node = (struct drm_info_node *) m->private;
  931. struct drm_device *dev = node->minor->dev;
  932. struct drm_i915_private *dev_priv = dev->dev_private;
  933. u32 rpmodectl1, gt_core_status, rcctl1;
  934. unsigned forcewake_count;
  935. int count=0, ret;
  936. ret = mutex_lock_interruptible(&dev->struct_mutex);
  937. if (ret)
  938. return ret;
  939. spin_lock_irq(&dev_priv->gt_lock);
  940. forcewake_count = dev_priv->forcewake_count;
  941. spin_unlock_irq(&dev_priv->gt_lock);
  942. if (forcewake_count) {
  943. seq_printf(m, "RC information inaccurate because somebody "
  944. "holds a forcewake reference \n");
  945. } else {
  946. /* NB: we cannot use forcewake, else we read the wrong values */
  947. while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
  948. udelay(10);
  949. seq_printf(m, "RC information accurate: %s\n", yesno(count < 51));
  950. }
  951. gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS);
  952. trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4);
  953. rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
  954. rcctl1 = I915_READ(GEN6_RC_CONTROL);
  955. mutex_unlock(&dev->struct_mutex);
  956. seq_printf(m, "Video Turbo Mode: %s\n",
  957. yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
  958. seq_printf(m, "HW control enabled: %s\n",
  959. yesno(rpmodectl1 & GEN6_RP_ENABLE));
  960. seq_printf(m, "SW control enabled: %s\n",
  961. yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
  962. GEN6_RP_MEDIA_SW_MODE));
  963. seq_printf(m, "RC1e Enabled: %s\n",
  964. yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
  965. seq_printf(m, "RC6 Enabled: %s\n",
  966. yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
  967. seq_printf(m, "Deep RC6 Enabled: %s\n",
  968. yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
  969. seq_printf(m, "Deepest RC6 Enabled: %s\n",
  970. yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
  971. seq_printf(m, "Current RC state: ");
  972. switch (gt_core_status & GEN6_RCn_MASK) {
  973. case GEN6_RC0:
  974. if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
  975. seq_printf(m, "Core Power Down\n");
  976. else
  977. seq_printf(m, "on\n");
  978. break;
  979. case GEN6_RC3:
  980. seq_printf(m, "RC3\n");
  981. break;
  982. case GEN6_RC6:
  983. seq_printf(m, "RC6\n");
  984. break;
  985. case GEN6_RC7:
  986. seq_printf(m, "RC7\n");
  987. break;
  988. default:
  989. seq_printf(m, "Unknown\n");
  990. break;
  991. }
  992. seq_printf(m, "Core Power Down: %s\n",
  993. yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
  994. /* Not exactly sure what this is */
  995. seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n",
  996. I915_READ(GEN6_GT_GFX_RC6_LOCKED));
  997. seq_printf(m, "RC6 residency since boot: %u\n",
  998. I915_READ(GEN6_GT_GFX_RC6));
  999. seq_printf(m, "RC6+ residency since boot: %u\n",
  1000. I915_READ(GEN6_GT_GFX_RC6p));
  1001. seq_printf(m, "RC6++ residency since boot: %u\n",
  1002. I915_READ(GEN6_GT_GFX_RC6pp));
  1003. return 0;
  1004. }
  1005. static int i915_drpc_info(struct seq_file *m, void *unused)
  1006. {
  1007. struct drm_info_node *node = (struct drm_info_node *) m->private;
  1008. struct drm_device *dev = node->minor->dev;
  1009. if (IS_GEN6(dev) || IS_GEN7(dev))
  1010. return gen6_drpc_info(m);
  1011. else
  1012. return ironlake_drpc_info(m);
  1013. }
  1014. static int i915_fbc_status(struct seq_file *m, void *unused)
  1015. {
  1016. struct drm_info_node *node = (struct drm_info_node *) m->private;
  1017. struct drm_device *dev = node->minor->dev;
  1018. drm_i915_private_t *dev_priv = dev->dev_private;
  1019. if (!I915_HAS_FBC(dev)) {
  1020. seq_printf(m, "FBC unsupported on this chipset\n");
  1021. return 0;
  1022. }
  1023. if (intel_fbc_enabled(dev)) {
  1024. seq_printf(m, "FBC enabled\n");
  1025. } else {
  1026. seq_printf(m, "FBC disabled: ");
  1027. switch (dev_priv->no_fbc_reason) {
  1028. case FBC_NO_OUTPUT:
  1029. seq_printf(m, "no outputs");
  1030. break;
  1031. case FBC_STOLEN_TOO_SMALL:
  1032. seq_printf(m, "not enough stolen memory");
  1033. break;
  1034. case FBC_UNSUPPORTED_MODE:
  1035. seq_printf(m, "mode not supported");
  1036. break;
  1037. case FBC_MODE_TOO_LARGE:
  1038. seq_printf(m, "mode too large");
  1039. break;
  1040. case FBC_BAD_PLANE:
  1041. seq_printf(m, "FBC unsupported on plane");
  1042. break;
  1043. case FBC_NOT_TILED:
  1044. seq_printf(m, "scanout buffer not tiled");
  1045. break;
  1046. case FBC_MULTIPLE_PIPES:
  1047. seq_printf(m, "multiple pipes are enabled");
  1048. break;
  1049. case FBC_MODULE_PARAM:
  1050. seq_printf(m, "disabled per module param (default off)");
  1051. break;
  1052. default:
  1053. seq_printf(m, "unknown reason");
  1054. }
  1055. seq_printf(m, "\n");
  1056. }
  1057. return 0;
  1058. }
  1059. static int i915_sr_status(struct seq_file *m, void *unused)
  1060. {
  1061. struct drm_info_node *node = (struct drm_info_node *) m->private;
  1062. struct drm_device *dev = node->minor->dev;
  1063. drm_i915_private_t *dev_priv = dev->dev_private;
  1064. bool sr_enabled = false;
  1065. if (HAS_PCH_SPLIT(dev))
  1066. sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
  1067. else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
  1068. sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
  1069. else if (IS_I915GM(dev))
  1070. sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
  1071. else if (IS_PINEVIEW(dev))
  1072. sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
  1073. seq_printf(m, "self-refresh: %s\n",
  1074. sr_enabled ? "enabled" : "disabled");
  1075. return 0;
  1076. }
  1077. static int i915_emon_status(struct seq_file *m, void *unused)
  1078. {
  1079. struct drm_info_node *node = (struct drm_info_node *) m->private;
  1080. struct drm_device *dev = node->minor->dev;
  1081. drm_i915_private_t *dev_priv = dev->dev_private;
  1082. unsigned long temp, chipset, gfx;
  1083. int ret;
  1084. if (!IS_GEN5(dev))
  1085. return -ENODEV;
  1086. ret = mutex_lock_interruptible(&dev->struct_mutex);
  1087. if (ret)
  1088. return ret;
  1089. temp = i915_mch_val(dev_priv);
  1090. chipset = i915_chipset_val(dev_priv);
  1091. gfx = i915_gfx_val(dev_priv);
  1092. mutex_unlock(&dev->struct_mutex);
  1093. seq_printf(m, "GMCH temp: %ld\n", temp);
  1094. seq_printf(m, "Chipset power: %ld\n", chipset);
  1095. seq_printf(m, "GFX power: %ld\n", gfx);
  1096. seq_printf(m, "Total power: %ld\n", chipset + gfx);
  1097. return 0;
  1098. }
  1099. static int i915_ring_freq_table(struct seq_file *m, void *unused)
  1100. {
  1101. struct drm_info_node *node = (struct drm_info_node *) m->private;
  1102. struct drm_device *dev = node->minor->dev;
  1103. drm_i915_private_t *dev_priv = dev->dev_private;
  1104. int ret;
  1105. int gpu_freq, ia_freq;
  1106. if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
  1107. seq_printf(m, "unsupported on this chipset\n");
  1108. return 0;
  1109. }
  1110. ret = mutex_lock_interruptible(&dev->struct_mutex);
  1111. if (ret)
  1112. return ret;
  1113. seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n");
  1114. for (gpu_freq = dev_priv->min_delay; gpu_freq <= dev_priv->max_delay;
  1115. gpu_freq++) {
  1116. I915_WRITE(GEN6_PCODE_DATA, gpu_freq);
  1117. I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
  1118. GEN6_PCODE_READ_MIN_FREQ_TABLE);
  1119. if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
  1120. GEN6_PCODE_READY) == 0, 10)) {
  1121. DRM_ERROR("pcode read of freq table timed out\n");
  1122. continue;
  1123. }
  1124. ia_freq = I915_READ(GEN6_PCODE_DATA);
  1125. seq_printf(m, "%d\t\t%d\n", gpu_freq * 50, ia_freq * 100);
  1126. }
  1127. mutex_unlock(&dev->struct_mutex);
  1128. return 0;
  1129. }
  1130. static int i915_gfxec(struct seq_file *m, void *unused)
  1131. {
  1132. struct drm_info_node *node = (struct drm_info_node *) m->private;
  1133. struct drm_device *dev = node->minor->dev;
  1134. drm_i915_private_t *dev_priv = dev->dev_private;
  1135. int ret;
  1136. ret = mutex_lock_interruptible(&dev->struct_mutex);
  1137. if (ret)
  1138. return ret;
  1139. seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
  1140. mutex_unlock(&dev->struct_mutex);
  1141. return 0;
  1142. }
  1143. static int i915_opregion(struct seq_file *m, void *unused)
  1144. {
  1145. struct drm_info_node *node = (struct drm_info_node *) m->private;
  1146. struct drm_device *dev = node->minor->dev;
  1147. drm_i915_private_t *dev_priv = dev->dev_private;
  1148. struct intel_opregion *opregion = &dev_priv->opregion;
  1149. void *data = kmalloc(OPREGION_SIZE, GFP_KERNEL);
  1150. int ret;
  1151. if (data == NULL)
  1152. return -ENOMEM;
  1153. ret = mutex_lock_interruptible(&dev->struct_mutex);
  1154. if (ret)
  1155. goto out;
  1156. if (opregion->header) {
  1157. memcpy_fromio(data, opregion->header, OPREGION_SIZE);
  1158. seq_write(m, data, OPREGION_SIZE);
  1159. }
  1160. mutex_unlock(&dev->struct_mutex);
  1161. out:
  1162. kfree(data);
  1163. return 0;
  1164. }
  1165. static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
  1166. {
  1167. struct drm_info_node *node = (struct drm_info_node *) m->private;
  1168. struct drm_device *dev = node->minor->dev;
  1169. drm_i915_private_t *dev_priv = dev->dev_private;
  1170. struct intel_fbdev *ifbdev;
  1171. struct intel_framebuffer *fb;
  1172. int ret;
  1173. ret = mutex_lock_interruptible(&dev->mode_config.mutex);
  1174. if (ret)
  1175. return ret;
  1176. ifbdev = dev_priv->fbdev;
  1177. fb = to_intel_framebuffer(ifbdev->helper.fb);
  1178. seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, obj ",
  1179. fb->base.width,
  1180. fb->base.height,
  1181. fb->base.depth,
  1182. fb->base.bits_per_pixel);
  1183. describe_obj(m, fb->obj);
  1184. seq_printf(m, "\n");
  1185. list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
  1186. if (&fb->base == ifbdev->helper.fb)
  1187. continue;
  1188. seq_printf(m, "user size: %d x %d, depth %d, %d bpp, obj ",
  1189. fb->base.width,
  1190. fb->base.height,
  1191. fb->base.depth,
  1192. fb->base.bits_per_pixel);
  1193. describe_obj(m, fb->obj);
  1194. seq_printf(m, "\n");
  1195. }
  1196. mutex_unlock(&dev->mode_config.mutex);
  1197. return 0;
  1198. }
  1199. static int i915_context_status(struct seq_file *m, void *unused)
  1200. {
  1201. struct drm_info_node *node = (struct drm_info_node *) m->private;
  1202. struct drm_device *dev = node->minor->dev;
  1203. drm_i915_private_t *dev_priv = dev->dev_private;
  1204. int ret;
  1205. ret = mutex_lock_interruptible(&dev->mode_config.mutex);
  1206. if (ret)
  1207. return ret;
  1208. if (dev_priv->pwrctx) {
  1209. seq_printf(m, "power context ");
  1210. describe_obj(m, dev_priv->pwrctx);
  1211. seq_printf(m, "\n");
  1212. }
  1213. if (dev_priv->renderctx) {
  1214. seq_printf(m, "render context ");
  1215. describe_obj(m, dev_priv->renderctx);
  1216. seq_printf(m, "\n");
  1217. }
  1218. mutex_unlock(&dev->mode_config.mutex);
  1219. return 0;
  1220. }
  1221. static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
  1222. {
  1223. struct drm_info_node *node = (struct drm_info_node *) m->private;
  1224. struct drm_device *dev = node->minor->dev;
  1225. struct drm_i915_private *dev_priv = dev->dev_private;
  1226. unsigned forcewake_count;
  1227. spin_lock_irq(&dev_priv->gt_lock);
  1228. forcewake_count = dev_priv->forcewake_count;
  1229. spin_unlock_irq(&dev_priv->gt_lock);
  1230. seq_printf(m, "forcewake count = %u\n", forcewake_count);
  1231. return 0;
  1232. }
  1233. static const char *swizzle_string(unsigned swizzle)
  1234. {
  1235. switch(swizzle) {
  1236. case I915_BIT_6_SWIZZLE_NONE:
  1237. return "none";
  1238. case I915_BIT_6_SWIZZLE_9:
  1239. return "bit9";
  1240. case I915_BIT_6_SWIZZLE_9_10:
  1241. return "bit9/bit10";
  1242. case I915_BIT_6_SWIZZLE_9_11:
  1243. return "bit9/bit11";
  1244. case I915_BIT_6_SWIZZLE_9_10_11:
  1245. return "bit9/bit10/bit11";
  1246. case I915_BIT_6_SWIZZLE_9_17:
  1247. return "bit9/bit17";
  1248. case I915_BIT_6_SWIZZLE_9_10_17:
  1249. return "bit9/bit10/bit17";
  1250. case I915_BIT_6_SWIZZLE_UNKNOWN:
  1251. return "unkown";
  1252. }
  1253. return "bug";
  1254. }
  1255. static int i915_swizzle_info(struct seq_file *m, void *data)
  1256. {
  1257. struct drm_info_node *node = (struct drm_info_node *) m->private;
  1258. struct drm_device *dev = node->minor->dev;
  1259. struct drm_i915_private *dev_priv = dev->dev_private;
  1260. mutex_lock(&dev->struct_mutex);
  1261. seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
  1262. swizzle_string(dev_priv->mm.bit_6_swizzle_x));
  1263. seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
  1264. swizzle_string(dev_priv->mm.bit_6_swizzle_y));
  1265. if (IS_GEN3(dev) || IS_GEN4(dev)) {
  1266. seq_printf(m, "DDC = 0x%08x\n",
  1267. I915_READ(DCC));
  1268. seq_printf(m, "C0DRB3 = 0x%04x\n",
  1269. I915_READ16(C0DRB3));
  1270. seq_printf(m, "C1DRB3 = 0x%04x\n",
  1271. I915_READ16(C1DRB3));
  1272. } else if (IS_GEN6(dev) || IS_GEN7(dev)) {
  1273. seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
  1274. I915_READ(MAD_DIMM_C0));
  1275. seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
  1276. I915_READ(MAD_DIMM_C1));
  1277. seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
  1278. I915_READ(MAD_DIMM_C2));
  1279. seq_printf(m, "TILECTL = 0x%08x\n",
  1280. I915_READ(TILECTL));
  1281. seq_printf(m, "ARB_MODE = 0x%08x\n",
  1282. I915_READ(ARB_MODE));
  1283. seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
  1284. I915_READ(DISP_ARB_CTL));
  1285. }
  1286. mutex_unlock(&dev->struct_mutex);
  1287. return 0;
  1288. }
  1289. static int i915_ppgtt_info(struct seq_file *m, void *data)
  1290. {
  1291. struct drm_info_node *node = (struct drm_info_node *) m->private;
  1292. struct drm_device *dev = node->minor->dev;
  1293. struct drm_i915_private *dev_priv = dev->dev_private;
  1294. struct intel_ring_buffer *ring;
  1295. int i, ret;
  1296. ret = mutex_lock_interruptible(&dev->struct_mutex);
  1297. if (ret)
  1298. return ret;
  1299. if (INTEL_INFO(dev)->gen == 6)
  1300. seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
  1301. for (i = 0; i < I915_NUM_RINGS; i++) {
  1302. ring = &dev_priv->ring[i];
  1303. seq_printf(m, "%s\n", ring->name);
  1304. if (INTEL_INFO(dev)->gen == 7)
  1305. seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring)));
  1306. seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring)));
  1307. seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring)));
  1308. seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring)));
  1309. }
  1310. if (dev_priv->mm.aliasing_ppgtt) {
  1311. struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
  1312. seq_printf(m, "aliasing PPGTT:\n");
  1313. seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
  1314. }
  1315. seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
  1316. mutex_unlock(&dev->struct_mutex);
  1317. return 0;
  1318. }
  1319. static int i915_dpio_info(struct seq_file *m, void *data)
  1320. {
  1321. struct drm_info_node *node = (struct drm_info_node *) m->private;
  1322. struct drm_device *dev = node->minor->dev;
  1323. struct drm_i915_private *dev_priv = dev->dev_private;
  1324. int ret;
  1325. if (!IS_VALLEYVIEW(dev)) {
  1326. seq_printf(m, "unsupported\n");
  1327. return 0;
  1328. }
  1329. ret = mutex_lock_interruptible(&dev->mode_config.mutex);
  1330. if (ret)
  1331. return ret;
  1332. seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL));
  1333. seq_printf(m, "DPIO_DIV_A: 0x%08x\n",
  1334. intel_dpio_read(dev_priv, _DPIO_DIV_A));
  1335. seq_printf(m, "DPIO_DIV_B: 0x%08x\n",
  1336. intel_dpio_read(dev_priv, _DPIO_DIV_B));
  1337. seq_printf(m, "DPIO_REFSFR_A: 0x%08x\n",
  1338. intel_dpio_read(dev_priv, _DPIO_REFSFR_A));
  1339. seq_printf(m, "DPIO_REFSFR_B: 0x%08x\n",
  1340. intel_dpio_read(dev_priv, _DPIO_REFSFR_B));
  1341. seq_printf(m, "DPIO_CORE_CLK_A: 0x%08x\n",
  1342. intel_dpio_read(dev_priv, _DPIO_CORE_CLK_A));
  1343. seq_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n",
  1344. intel_dpio_read(dev_priv, _DPIO_CORE_CLK_B));
  1345. seq_printf(m, "DPIO_LFP_COEFF_A: 0x%08x\n",
  1346. intel_dpio_read(dev_priv, _DPIO_LFP_COEFF_A));
  1347. seq_printf(m, "DPIO_LFP_COEFF_B: 0x%08x\n",
  1348. intel_dpio_read(dev_priv, _DPIO_LFP_COEFF_B));
  1349. seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
  1350. intel_dpio_read(dev_priv, DPIO_FASTCLK_DISABLE));
  1351. mutex_unlock(&dev->mode_config.mutex);
  1352. return 0;
  1353. }
  1354. static ssize_t
  1355. i915_wedged_read(struct file *filp,
  1356. char __user *ubuf,
  1357. size_t max,
  1358. loff_t *ppos)
  1359. {
  1360. struct drm_device *dev = filp->private_data;
  1361. drm_i915_private_t *dev_priv = dev->dev_private;
  1362. char buf[80];
  1363. int len;
  1364. len = snprintf(buf, sizeof(buf),
  1365. "wedged : %d\n",
  1366. atomic_read(&dev_priv->mm.wedged));
  1367. if (len > sizeof(buf))
  1368. len = sizeof(buf);
  1369. return simple_read_from_buffer(ubuf, max, ppos, buf, len);
  1370. }
  1371. static ssize_t
  1372. i915_wedged_write(struct file *filp,
  1373. const char __user *ubuf,
  1374. size_t cnt,
  1375. loff_t *ppos)
  1376. {
  1377. struct drm_device *dev = filp->private_data;
  1378. char buf[20];
  1379. int val = 1;
  1380. if (cnt > 0) {
  1381. if (cnt > sizeof(buf) - 1)
  1382. return -EINVAL;
  1383. if (copy_from_user(buf, ubuf, cnt))
  1384. return -EFAULT;
  1385. buf[cnt] = 0;
  1386. val = simple_strtoul(buf, NULL, 0);
  1387. }
  1388. DRM_INFO("Manually setting wedged to %d\n", val);
  1389. i915_handle_error(dev, val);
  1390. return cnt;
  1391. }
  1392. static const struct file_operations i915_wedged_fops = {
  1393. .owner = THIS_MODULE,
  1394. .open = simple_open,
  1395. .read = i915_wedged_read,
  1396. .write = i915_wedged_write,
  1397. .llseek = default_llseek,
  1398. };
  1399. static ssize_t
  1400. i915_ring_stop_read(struct file *filp,
  1401. char __user *ubuf,
  1402. size_t max,
  1403. loff_t *ppos)
  1404. {
  1405. struct drm_device *dev = filp->private_data;
  1406. drm_i915_private_t *dev_priv = dev->dev_private;
  1407. char buf[20];
  1408. int len;
  1409. len = snprintf(buf, sizeof(buf),
  1410. "0x%08x\n", dev_priv->stop_rings);
  1411. if (len > sizeof(buf))
  1412. len = sizeof(buf);
  1413. return simple_read_from_buffer(ubuf, max, ppos, buf, len);
  1414. }
  1415. static ssize_t
  1416. i915_ring_stop_write(struct file *filp,
  1417. const char __user *ubuf,
  1418. size_t cnt,
  1419. loff_t *ppos)
  1420. {
  1421. struct drm_device *dev = filp->private_data;
  1422. struct drm_i915_private *dev_priv = dev->dev_private;
  1423. char buf[20];
  1424. int val = 0;
  1425. if (cnt > 0) {
  1426. if (cnt > sizeof(buf) - 1)
  1427. return -EINVAL;
  1428. if (copy_from_user(buf, ubuf, cnt))
  1429. return -EFAULT;
  1430. buf[cnt] = 0;
  1431. val = simple_strtoul(buf, NULL, 0);
  1432. }
  1433. DRM_DEBUG_DRIVER("Stopping rings 0x%08x\n", val);
  1434. mutex_lock(&dev->struct_mutex);
  1435. dev_priv->stop_rings = val;
  1436. mutex_unlock(&dev->struct_mutex);
  1437. return cnt;
  1438. }
  1439. static const struct file_operations i915_ring_stop_fops = {
  1440. .owner = THIS_MODULE,
  1441. .open = simple_open,
  1442. .read = i915_ring_stop_read,
  1443. .write = i915_ring_stop_write,
  1444. .llseek = default_llseek,
  1445. };
  1446. static ssize_t
  1447. i915_max_freq_read(struct file *filp,
  1448. char __user *ubuf,
  1449. size_t max,
  1450. loff_t *ppos)
  1451. {
  1452. struct drm_device *dev = filp->private_data;
  1453. drm_i915_private_t *dev_priv = dev->dev_private;
  1454. char buf[80];
  1455. int len;
  1456. len = snprintf(buf, sizeof(buf),
  1457. "max freq: %d\n", dev_priv->max_delay * 50);
  1458. if (len > sizeof(buf))
  1459. len = sizeof(buf);
  1460. return simple_read_from_buffer(ubuf, max, ppos, buf, len);
  1461. }
  1462. static ssize_t
  1463. i915_max_freq_write(struct file *filp,
  1464. const char __user *ubuf,
  1465. size_t cnt,
  1466. loff_t *ppos)
  1467. {
  1468. struct drm_device *dev = filp->private_data;
  1469. struct drm_i915_private *dev_priv = dev->dev_private;
  1470. char buf[20];
  1471. int val = 1;
  1472. if (cnt > 0) {
  1473. if (cnt > sizeof(buf) - 1)
  1474. return -EINVAL;
  1475. if (copy_from_user(buf, ubuf, cnt))
  1476. return -EFAULT;
  1477. buf[cnt] = 0;
  1478. val = simple_strtoul(buf, NULL, 0);
  1479. }
  1480. DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val);
  1481. /*
  1482. * Turbo will still be enabled, but won't go above the set value.
  1483. */
  1484. dev_priv->max_delay = val / 50;
  1485. gen6_set_rps(dev, val / 50);
  1486. return cnt;
  1487. }
  1488. static const struct file_operations i915_max_freq_fops = {
  1489. .owner = THIS_MODULE,
  1490. .open = simple_open,
  1491. .read = i915_max_freq_read,
  1492. .write = i915_max_freq_write,
  1493. .llseek = default_llseek,
  1494. };
  1495. static ssize_t
  1496. i915_min_freq_read(struct file *filp, char __user *ubuf, size_t max,
  1497. loff_t *ppos)
  1498. {
  1499. struct drm_device *dev = filp->private_data;
  1500. drm_i915_private_t *dev_priv = dev->dev_private;
  1501. char buf[80];
  1502. int len;
  1503. len = snprintf(buf, sizeof(buf),
  1504. "min freq: %d\n", dev_priv->min_delay * 50);
  1505. if (len > sizeof(buf))
  1506. len = sizeof(buf);
  1507. return simple_read_from_buffer(ubuf, max, ppos, buf, len);
  1508. }
  1509. static ssize_t
  1510. i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt,
  1511. loff_t *ppos)
  1512. {
  1513. struct drm_device *dev = filp->private_data;
  1514. struct drm_i915_private *dev_priv = dev->dev_private;
  1515. char buf[20];
  1516. int val = 1;
  1517. if (cnt > 0) {
  1518. if (cnt > sizeof(buf) - 1)
  1519. return -EINVAL;
  1520. if (copy_from_user(buf, ubuf, cnt))
  1521. return -EFAULT;
  1522. buf[cnt] = 0;
  1523. val = simple_strtoul(buf, NULL, 0);
  1524. }
  1525. DRM_DEBUG_DRIVER("Manually setting min freq to %d\n", val);
  1526. /*
  1527. * Turbo will still be enabled, but won't go below the set value.
  1528. */
  1529. dev_priv->min_delay = val / 50;
  1530. gen6_set_rps(dev, val / 50);
  1531. return cnt;
  1532. }
  1533. static const struct file_operations i915_min_freq_fops = {
  1534. .owner = THIS_MODULE,
  1535. .open = simple_open,
  1536. .read = i915_min_freq_read,
  1537. .write = i915_min_freq_write,
  1538. .llseek = default_llseek,
  1539. };
  1540. static ssize_t
  1541. i915_cache_sharing_read(struct file *filp,
  1542. char __user *ubuf,
  1543. size_t max,
  1544. loff_t *ppos)
  1545. {
  1546. struct drm_device *dev = filp->private_data;
  1547. drm_i915_private_t *dev_priv = dev->dev_private;
  1548. char buf[80];
  1549. u32 snpcr;
  1550. int len;
  1551. mutex_lock(&dev_priv->dev->struct_mutex);
  1552. snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
  1553. mutex_unlock(&dev_priv->dev->struct_mutex);
  1554. len = snprintf(buf, sizeof(buf),
  1555. "%d\n", (snpcr & GEN6_MBC_SNPCR_MASK) >>
  1556. GEN6_MBC_SNPCR_SHIFT);
  1557. if (len > sizeof(buf))
  1558. len = sizeof(buf);
  1559. return simple_read_from_buffer(ubuf, max, ppos, buf, len);
  1560. }
  1561. static ssize_t
  1562. i915_cache_sharing_write(struct file *filp,
  1563. const char __user *ubuf,
  1564. size_t cnt,
  1565. loff_t *ppos)
  1566. {
  1567. struct drm_device *dev = filp->private_data;
  1568. struct drm_i915_private *dev_priv = dev->dev_private;
  1569. char buf[20];
  1570. u32 snpcr;
  1571. int val = 1;
  1572. if (cnt > 0) {
  1573. if (cnt > sizeof(buf) - 1)
  1574. return -EINVAL;
  1575. if (copy_from_user(buf, ubuf, cnt))
  1576. return -EFAULT;
  1577. buf[cnt] = 0;
  1578. val = simple_strtoul(buf, NULL, 0);
  1579. }
  1580. if (val < 0 || val > 3)
  1581. return -EINVAL;
  1582. DRM_DEBUG_DRIVER("Manually setting uncore sharing to %d\n", val);
  1583. /* Update the cache sharing policy here as well */
  1584. snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
  1585. snpcr &= ~GEN6_MBC_SNPCR_MASK;
  1586. snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
  1587. I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
  1588. return cnt;
  1589. }
  1590. static const struct file_operations i915_cache_sharing_fops = {
  1591. .owner = THIS_MODULE,
  1592. .open = simple_open,
  1593. .read = i915_cache_sharing_read,
  1594. .write = i915_cache_sharing_write,
  1595. .llseek = default_llseek,
  1596. };
  1597. /* As the drm_debugfs_init() routines are called before dev->dev_private is
  1598. * allocated we need to hook into the minor for release. */
  1599. static int
  1600. drm_add_fake_info_node(struct drm_minor *minor,
  1601. struct dentry *ent,
  1602. const void *key)
  1603. {
  1604. struct drm_info_node *node;
  1605. node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
  1606. if (node == NULL) {
  1607. debugfs_remove(ent);
  1608. return -ENOMEM;
  1609. }
  1610. node->minor = minor;
  1611. node->dent = ent;
  1612. node->info_ent = (void *) key;
  1613. mutex_lock(&minor->debugfs_lock);
  1614. list_add(&node->list, &minor->debugfs_list);
  1615. mutex_unlock(&minor->debugfs_lock);
  1616. return 0;
  1617. }
  1618. static int i915_forcewake_open(struct inode *inode, struct file *file)
  1619. {
  1620. struct drm_device *dev = inode->i_private;
  1621. struct drm_i915_private *dev_priv = dev->dev_private;
  1622. int ret;
  1623. if (INTEL_INFO(dev)->gen < 6)
  1624. return 0;
  1625. ret = mutex_lock_interruptible(&dev->struct_mutex);
  1626. if (ret)
  1627. return ret;
  1628. gen6_gt_force_wake_get(dev_priv);
  1629. mutex_unlock(&dev->struct_mutex);
  1630. return 0;
  1631. }
  1632. static int i915_forcewake_release(struct inode *inode, struct file *file)
  1633. {
  1634. struct drm_device *dev = inode->i_private;
  1635. struct drm_i915_private *dev_priv = dev->dev_private;
  1636. if (INTEL_INFO(dev)->gen < 6)
  1637. return 0;
  1638. /*
  1639. * It's bad that we can potentially hang userspace if struct_mutex gets
  1640. * forever stuck. However, if we cannot acquire this lock it means that
  1641. * almost certainly the driver has hung, is not unload-able. Therefore
  1642. * hanging here is probably a minor inconvenience not to be seen my
  1643. * almost every user.
  1644. */
  1645. mutex_lock(&dev->struct_mutex);
  1646. gen6_gt_force_wake_put(dev_priv);
  1647. mutex_unlock(&dev->struct_mutex);
  1648. return 0;
  1649. }
  1650. static const struct file_operations i915_forcewake_fops = {
  1651. .owner = THIS_MODULE,
  1652. .open = i915_forcewake_open,
  1653. .release = i915_forcewake_release,
  1654. };
  1655. static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
  1656. {
  1657. struct drm_device *dev = minor->dev;
  1658. struct dentry *ent;
  1659. ent = debugfs_create_file("i915_forcewake_user",
  1660. S_IRUSR,
  1661. root, dev,
  1662. &i915_forcewake_fops);
  1663. if (IS_ERR(ent))
  1664. return PTR_ERR(ent);
  1665. return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
  1666. }
  1667. static int i915_debugfs_create(struct dentry *root,
  1668. struct drm_minor *minor,
  1669. const char *name,
  1670. const struct file_operations *fops)
  1671. {
  1672. struct drm_device *dev = minor->dev;
  1673. struct dentry *ent;
  1674. ent = debugfs_create_file(name,
  1675. S_IRUGO | S_IWUSR,
  1676. root, dev,
  1677. fops);
  1678. if (IS_ERR(ent))
  1679. return PTR_ERR(ent);
  1680. return drm_add_fake_info_node(minor, ent, fops);
  1681. }
  1682. static struct drm_info_list i915_debugfs_list[] = {
  1683. {"i915_capabilities", i915_capabilities, 0},
  1684. {"i915_gem_objects", i915_gem_object_info, 0},
  1685. {"i915_gem_gtt", i915_gem_gtt_info, 0},
  1686. {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST},
  1687. {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
  1688. {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
  1689. {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
  1690. {"i915_gem_request", i915_gem_request_info, 0},
  1691. {"i915_gem_seqno", i915_gem_seqno_info, 0},
  1692. {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
  1693. {"i915_gem_interrupt", i915_interrupt_info, 0},
  1694. {"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
  1695. {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
  1696. {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
  1697. {"i915_rstdby_delays", i915_rstdby_delays, 0},
  1698. {"i915_cur_delayinfo", i915_cur_delayinfo, 0},
  1699. {"i915_delayfreq_table", i915_delayfreq_table, 0},
  1700. {"i915_inttoext_table", i915_inttoext_table, 0},
  1701. {"i915_drpc_info", i915_drpc_info, 0},
  1702. {"i915_emon_status", i915_emon_status, 0},
  1703. {"i915_ring_freq_table", i915_ring_freq_table, 0},
  1704. {"i915_gfxec", i915_gfxec, 0},
  1705. {"i915_fbc_status", i915_fbc_status, 0},
  1706. {"i915_sr_status", i915_sr_status, 0},
  1707. {"i915_opregion", i915_opregion, 0},
  1708. {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
  1709. {"i915_context_status", i915_context_status, 0},
  1710. {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0},
  1711. {"i915_swizzle_info", i915_swizzle_info, 0},
  1712. {"i915_ppgtt_info", i915_ppgtt_info, 0},
  1713. {"i915_dpio", i915_dpio_info, 0},
  1714. };
  1715. #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
  1716. int i915_debugfs_init(struct drm_minor *minor)
  1717. {
  1718. int ret;
  1719. ret = i915_debugfs_create(minor->debugfs_root, minor,
  1720. "i915_wedged",
  1721. &i915_wedged_fops);
  1722. if (ret)
  1723. return ret;
  1724. ret = i915_forcewake_create(minor->debugfs_root, minor);
  1725. if (ret)
  1726. return ret;
  1727. ret = i915_debugfs_create(minor->debugfs_root, minor,
  1728. "i915_max_freq",
  1729. &i915_max_freq_fops);
  1730. if (ret)
  1731. return ret;
  1732. ret = i915_debugfs_create(minor->debugfs_root, minor,
  1733. "i915_min_freq",
  1734. &i915_min_freq_fops);
  1735. if (ret)
  1736. return ret;
  1737. ret = i915_debugfs_create(minor->debugfs_root, minor,
  1738. "i915_cache_sharing",
  1739. &i915_cache_sharing_fops);
  1740. if (ret)
  1741. return ret;
  1742. ret = i915_debugfs_create(minor->debugfs_root, minor,
  1743. "i915_ring_stop",
  1744. &i915_ring_stop_fops);
  1745. if (ret)
  1746. return ret;
  1747. ret = i915_debugfs_create(minor->debugfs_root, minor,
  1748. "i915_error_state",
  1749. &i915_error_state_fops);
  1750. if (ret)
  1751. return ret;
  1752. return drm_debugfs_create_files(i915_debugfs_list,
  1753. I915_DEBUGFS_ENTRIES,
  1754. minor->debugfs_root, minor);
  1755. }
  1756. void i915_debugfs_cleanup(struct drm_minor *minor)
  1757. {
  1758. drm_debugfs_remove_files(i915_debugfs_list,
  1759. I915_DEBUGFS_ENTRIES, minor);
  1760. drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
  1761. 1, minor);
  1762. drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops,
  1763. 1, minor);
  1764. drm_debugfs_remove_files((struct drm_info_list *) &i915_max_freq_fops,
  1765. 1, minor);
  1766. drm_debugfs_remove_files((struct drm_info_list *) &i915_min_freq_fops,
  1767. 1, minor);
  1768. drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops,
  1769. 1, minor);
  1770. drm_debugfs_remove_files((struct drm_info_list *) &i915_ring_stop_fops,
  1771. 1, minor);
  1772. drm_debugfs_remove_files((struct drm_info_list *) &i915_error_state_fops,
  1773. 1, minor);
  1774. }
  1775. #endif /* CONFIG_DEBUG_FS */