i915_debugfs.c 49 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842
  1. /*
  2. * Copyright © 2008 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Eric Anholt <eric@anholt.net>
  25. * Keith Packard <keithp@keithp.com>
  26. *
  27. */
  28. #include <linux/seq_file.h>
  29. #include <linux/debugfs.h>
  30. #include <linux/slab.h>
  31. #include <linux/export.h>
  32. #include "drmP.h"
  33. #include "drm.h"
  34. #include "intel_drv.h"
  35. #include "intel_ringbuffer.h"
  36. #include "i915_drm.h"
  37. #include "i915_drv.h"
  38. #define DRM_I915_RING_DEBUG 1
  39. #if defined(CONFIG_DEBUG_FS)
  40. enum {
  41. ACTIVE_LIST,
  42. FLUSHING_LIST,
  43. INACTIVE_LIST,
  44. PINNED_LIST,
  45. DEFERRED_FREE_LIST,
  46. };
  47. static const char *yesno(int v)
  48. {
  49. return v ? "yes" : "no";
  50. }
  51. static int i915_capabilities(struct seq_file *m, void *data)
  52. {
  53. struct drm_info_node *node = (struct drm_info_node *) m->private;
  54. struct drm_device *dev = node->minor->dev;
  55. const struct intel_device_info *info = INTEL_INFO(dev);
  56. seq_printf(m, "gen: %d\n", info->gen);
  57. seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
  58. #define B(x) seq_printf(m, #x ": %s\n", yesno(info->x))
  59. B(is_mobile);
  60. B(is_i85x);
  61. B(is_i915g);
  62. B(is_i945gm);
  63. B(is_g33);
  64. B(need_gfx_hws);
  65. B(is_g4x);
  66. B(is_pineview);
  67. B(is_broadwater);
  68. B(is_crestline);
  69. B(has_fbc);
  70. B(has_pipe_cxsr);
  71. B(has_hotplug);
  72. B(cursor_needs_physical);
  73. B(has_overlay);
  74. B(overlay_needs_physical);
  75. B(supports_tv);
  76. B(has_bsd_ring);
  77. B(has_blt_ring);
  78. B(has_llc);
  79. #undef B
  80. return 0;
  81. }
  82. static const char *get_pin_flag(struct drm_i915_gem_object *obj)
  83. {
  84. if (obj->user_pin_count > 0)
  85. return "P";
  86. else if (obj->pin_count > 0)
  87. return "p";
  88. else
  89. return " ";
  90. }
  91. static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
  92. {
  93. switch (obj->tiling_mode) {
  94. default:
  95. case I915_TILING_NONE: return " ";
  96. case I915_TILING_X: return "X";
  97. case I915_TILING_Y: return "Y";
  98. }
  99. }
  100. static const char *cache_level_str(int type)
  101. {
  102. switch (type) {
  103. case I915_CACHE_NONE: return " uncached";
  104. case I915_CACHE_LLC: return " snooped (LLC)";
  105. case I915_CACHE_LLC_MLC: return " snooped (LLC+MLC)";
  106. default: return "";
  107. }
  108. }
  109. static void
  110. describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
  111. {
  112. seq_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d%s%s%s",
  113. &obj->base,
  114. get_pin_flag(obj),
  115. get_tiling_flag(obj),
  116. obj->base.size / 1024,
  117. obj->base.read_domains,
  118. obj->base.write_domain,
  119. obj->last_rendering_seqno,
  120. obj->last_fenced_seqno,
  121. cache_level_str(obj->cache_level),
  122. obj->dirty ? " dirty" : "",
  123. obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
  124. if (obj->base.name)
  125. seq_printf(m, " (name: %d)", obj->base.name);
  126. if (obj->fence_reg != I915_FENCE_REG_NONE)
  127. seq_printf(m, " (fence: %d)", obj->fence_reg);
  128. if (obj->gtt_space != NULL)
  129. seq_printf(m, " (gtt offset: %08x, size: %08x)",
  130. obj->gtt_offset, (unsigned int)obj->gtt_space->size);
  131. if (obj->pin_mappable || obj->fault_mappable) {
  132. char s[3], *t = s;
  133. if (obj->pin_mappable)
  134. *t++ = 'p';
  135. if (obj->fault_mappable)
  136. *t++ = 'f';
  137. *t = '\0';
  138. seq_printf(m, " (%s mappable)", s);
  139. }
  140. if (obj->ring != NULL)
  141. seq_printf(m, " (%s)", obj->ring->name);
  142. }
  143. static int i915_gem_object_list_info(struct seq_file *m, void *data)
  144. {
  145. struct drm_info_node *node = (struct drm_info_node *) m->private;
  146. uintptr_t list = (uintptr_t) node->info_ent->data;
  147. struct list_head *head;
  148. struct drm_device *dev = node->minor->dev;
  149. drm_i915_private_t *dev_priv = dev->dev_private;
  150. struct drm_i915_gem_object *obj;
  151. size_t total_obj_size, total_gtt_size;
  152. int count, ret;
  153. ret = mutex_lock_interruptible(&dev->struct_mutex);
  154. if (ret)
  155. return ret;
  156. switch (list) {
  157. case ACTIVE_LIST:
  158. seq_printf(m, "Active:\n");
  159. head = &dev_priv->mm.active_list;
  160. break;
  161. case INACTIVE_LIST:
  162. seq_printf(m, "Inactive:\n");
  163. head = &dev_priv->mm.inactive_list;
  164. break;
  165. case PINNED_LIST:
  166. seq_printf(m, "Pinned:\n");
  167. head = &dev_priv->mm.pinned_list;
  168. break;
  169. case FLUSHING_LIST:
  170. seq_printf(m, "Flushing:\n");
  171. head = &dev_priv->mm.flushing_list;
  172. break;
  173. case DEFERRED_FREE_LIST:
  174. seq_printf(m, "Deferred free:\n");
  175. head = &dev_priv->mm.deferred_free_list;
  176. break;
  177. default:
  178. mutex_unlock(&dev->struct_mutex);
  179. return -EINVAL;
  180. }
  181. total_obj_size = total_gtt_size = count = 0;
  182. list_for_each_entry(obj, head, mm_list) {
  183. seq_printf(m, " ");
  184. describe_obj(m, obj);
  185. seq_printf(m, "\n");
  186. total_obj_size += obj->base.size;
  187. total_gtt_size += obj->gtt_space->size;
  188. count++;
  189. }
  190. mutex_unlock(&dev->struct_mutex);
  191. seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
  192. count, total_obj_size, total_gtt_size);
  193. return 0;
  194. }
  195. #define count_objects(list, member) do { \
  196. list_for_each_entry(obj, list, member) { \
  197. size += obj->gtt_space->size; \
  198. ++count; \
  199. if (obj->map_and_fenceable) { \
  200. mappable_size += obj->gtt_space->size; \
  201. ++mappable_count; \
  202. } \
  203. } \
  204. } while (0)
  205. static int i915_gem_object_info(struct seq_file *m, void* data)
  206. {
  207. struct drm_info_node *node = (struct drm_info_node *) m->private;
  208. struct drm_device *dev = node->minor->dev;
  209. struct drm_i915_private *dev_priv = dev->dev_private;
  210. u32 count, mappable_count;
  211. size_t size, mappable_size;
  212. struct drm_i915_gem_object *obj;
  213. int ret;
  214. ret = mutex_lock_interruptible(&dev->struct_mutex);
  215. if (ret)
  216. return ret;
  217. seq_printf(m, "%u objects, %zu bytes\n",
  218. dev_priv->mm.object_count,
  219. dev_priv->mm.object_memory);
  220. size = count = mappable_size = mappable_count = 0;
  221. count_objects(&dev_priv->mm.gtt_list, gtt_list);
  222. seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
  223. count, mappable_count, size, mappable_size);
  224. size = count = mappable_size = mappable_count = 0;
  225. count_objects(&dev_priv->mm.active_list, mm_list);
  226. count_objects(&dev_priv->mm.flushing_list, mm_list);
  227. seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n",
  228. count, mappable_count, size, mappable_size);
  229. size = count = mappable_size = mappable_count = 0;
  230. count_objects(&dev_priv->mm.pinned_list, mm_list);
  231. seq_printf(m, " %u [%u] pinned objects, %zu [%zu] bytes\n",
  232. count, mappable_count, size, mappable_size);
  233. size = count = mappable_size = mappable_count = 0;
  234. count_objects(&dev_priv->mm.inactive_list, mm_list);
  235. seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
  236. count, mappable_count, size, mappable_size);
  237. size = count = mappable_size = mappable_count = 0;
  238. count_objects(&dev_priv->mm.deferred_free_list, mm_list);
  239. seq_printf(m, " %u [%u] freed objects, %zu [%zu] bytes\n",
  240. count, mappable_count, size, mappable_size);
  241. size = count = mappable_size = mappable_count = 0;
  242. list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
  243. if (obj->fault_mappable) {
  244. size += obj->gtt_space->size;
  245. ++count;
  246. }
  247. if (obj->pin_mappable) {
  248. mappable_size += obj->gtt_space->size;
  249. ++mappable_count;
  250. }
  251. }
  252. seq_printf(m, "%u pinned mappable objects, %zu bytes\n",
  253. mappable_count, mappable_size);
  254. seq_printf(m, "%u fault mappable objects, %zu bytes\n",
  255. count, size);
  256. seq_printf(m, "%zu [%zu] gtt total\n",
  257. dev_priv->mm.gtt_total, dev_priv->mm.mappable_gtt_total);
  258. mutex_unlock(&dev->struct_mutex);
  259. return 0;
  260. }
  261. static int i915_gem_gtt_info(struct seq_file *m, void* data)
  262. {
  263. struct drm_info_node *node = (struct drm_info_node *) m->private;
  264. struct drm_device *dev = node->minor->dev;
  265. struct drm_i915_private *dev_priv = dev->dev_private;
  266. struct drm_i915_gem_object *obj;
  267. size_t total_obj_size, total_gtt_size;
  268. int count, ret;
  269. ret = mutex_lock_interruptible(&dev->struct_mutex);
  270. if (ret)
  271. return ret;
  272. total_obj_size = total_gtt_size = count = 0;
  273. list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
  274. seq_printf(m, " ");
  275. describe_obj(m, obj);
  276. seq_printf(m, "\n");
  277. total_obj_size += obj->base.size;
  278. total_gtt_size += obj->gtt_space->size;
  279. count++;
  280. }
  281. mutex_unlock(&dev->struct_mutex);
  282. seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
  283. count, total_obj_size, total_gtt_size);
  284. return 0;
  285. }
  286. static int i915_gem_pageflip_info(struct seq_file *m, void *data)
  287. {
  288. struct drm_info_node *node = (struct drm_info_node *) m->private;
  289. struct drm_device *dev = node->minor->dev;
  290. unsigned long flags;
  291. struct intel_crtc *crtc;
  292. list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
  293. const char pipe = pipe_name(crtc->pipe);
  294. const char plane = plane_name(crtc->plane);
  295. struct intel_unpin_work *work;
  296. spin_lock_irqsave(&dev->event_lock, flags);
  297. work = crtc->unpin_work;
  298. if (work == NULL) {
  299. seq_printf(m, "No flip due on pipe %c (plane %c)\n",
  300. pipe, plane);
  301. } else {
  302. if (!work->pending) {
  303. seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
  304. pipe, plane);
  305. } else {
  306. seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
  307. pipe, plane);
  308. }
  309. if (work->enable_stall_check)
  310. seq_printf(m, "Stall check enabled, ");
  311. else
  312. seq_printf(m, "Stall check waiting for page flip ioctl, ");
  313. seq_printf(m, "%d prepares\n", work->pending);
  314. if (work->old_fb_obj) {
  315. struct drm_i915_gem_object *obj = work->old_fb_obj;
  316. if (obj)
  317. seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
  318. }
  319. if (work->pending_flip_obj) {
  320. struct drm_i915_gem_object *obj = work->pending_flip_obj;
  321. if (obj)
  322. seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
  323. }
  324. }
  325. spin_unlock_irqrestore(&dev->event_lock, flags);
  326. }
  327. return 0;
  328. }
  329. static int i915_gem_request_info(struct seq_file *m, void *data)
  330. {
  331. struct drm_info_node *node = (struct drm_info_node *) m->private;
  332. struct drm_device *dev = node->minor->dev;
  333. drm_i915_private_t *dev_priv = dev->dev_private;
  334. struct drm_i915_gem_request *gem_request;
  335. int ret, count;
  336. ret = mutex_lock_interruptible(&dev->struct_mutex);
  337. if (ret)
  338. return ret;
  339. count = 0;
  340. if (!list_empty(&dev_priv->ring[RCS].request_list)) {
  341. seq_printf(m, "Render requests:\n");
  342. list_for_each_entry(gem_request,
  343. &dev_priv->ring[RCS].request_list,
  344. list) {
  345. seq_printf(m, " %d @ %d\n",
  346. gem_request->seqno,
  347. (int) (jiffies - gem_request->emitted_jiffies));
  348. }
  349. count++;
  350. }
  351. if (!list_empty(&dev_priv->ring[VCS].request_list)) {
  352. seq_printf(m, "BSD requests:\n");
  353. list_for_each_entry(gem_request,
  354. &dev_priv->ring[VCS].request_list,
  355. list) {
  356. seq_printf(m, " %d @ %d\n",
  357. gem_request->seqno,
  358. (int) (jiffies - gem_request->emitted_jiffies));
  359. }
  360. count++;
  361. }
  362. if (!list_empty(&dev_priv->ring[BCS].request_list)) {
  363. seq_printf(m, "BLT requests:\n");
  364. list_for_each_entry(gem_request,
  365. &dev_priv->ring[BCS].request_list,
  366. list) {
  367. seq_printf(m, " %d @ %d\n",
  368. gem_request->seqno,
  369. (int) (jiffies - gem_request->emitted_jiffies));
  370. }
  371. count++;
  372. }
  373. mutex_unlock(&dev->struct_mutex);
  374. if (count == 0)
  375. seq_printf(m, "No requests\n");
  376. return 0;
  377. }
  378. static void i915_ring_seqno_info(struct seq_file *m,
  379. struct intel_ring_buffer *ring)
  380. {
  381. if (ring->get_seqno) {
  382. seq_printf(m, "Current sequence (%s): %d\n",
  383. ring->name, ring->get_seqno(ring));
  384. seq_printf(m, "Waiter sequence (%s): %d\n",
  385. ring->name, ring->waiting_seqno);
  386. seq_printf(m, "IRQ sequence (%s): %d\n",
  387. ring->name, ring->irq_seqno);
  388. }
  389. }
  390. static int i915_gem_seqno_info(struct seq_file *m, void *data)
  391. {
  392. struct drm_info_node *node = (struct drm_info_node *) m->private;
  393. struct drm_device *dev = node->minor->dev;
  394. drm_i915_private_t *dev_priv = dev->dev_private;
  395. int ret, i;
  396. ret = mutex_lock_interruptible(&dev->struct_mutex);
  397. if (ret)
  398. return ret;
  399. for (i = 0; i < I915_NUM_RINGS; i++)
  400. i915_ring_seqno_info(m, &dev_priv->ring[i]);
  401. mutex_unlock(&dev->struct_mutex);
  402. return 0;
  403. }
  404. static int i915_interrupt_info(struct seq_file *m, void *data)
  405. {
  406. struct drm_info_node *node = (struct drm_info_node *) m->private;
  407. struct drm_device *dev = node->minor->dev;
  408. drm_i915_private_t *dev_priv = dev->dev_private;
  409. int ret, i, pipe;
  410. ret = mutex_lock_interruptible(&dev->struct_mutex);
  411. if (ret)
  412. return ret;
  413. if (!HAS_PCH_SPLIT(dev)) {
  414. seq_printf(m, "Interrupt enable: %08x\n",
  415. I915_READ(IER));
  416. seq_printf(m, "Interrupt identity: %08x\n",
  417. I915_READ(IIR));
  418. seq_printf(m, "Interrupt mask: %08x\n",
  419. I915_READ(IMR));
  420. for_each_pipe(pipe)
  421. seq_printf(m, "Pipe %c stat: %08x\n",
  422. pipe_name(pipe),
  423. I915_READ(PIPESTAT(pipe)));
  424. } else {
  425. seq_printf(m, "North Display Interrupt enable: %08x\n",
  426. I915_READ(DEIER));
  427. seq_printf(m, "North Display Interrupt identity: %08x\n",
  428. I915_READ(DEIIR));
  429. seq_printf(m, "North Display Interrupt mask: %08x\n",
  430. I915_READ(DEIMR));
  431. seq_printf(m, "South Display Interrupt enable: %08x\n",
  432. I915_READ(SDEIER));
  433. seq_printf(m, "South Display Interrupt identity: %08x\n",
  434. I915_READ(SDEIIR));
  435. seq_printf(m, "South Display Interrupt mask: %08x\n",
  436. I915_READ(SDEIMR));
  437. seq_printf(m, "Graphics Interrupt enable: %08x\n",
  438. I915_READ(GTIER));
  439. seq_printf(m, "Graphics Interrupt identity: %08x\n",
  440. I915_READ(GTIIR));
  441. seq_printf(m, "Graphics Interrupt mask: %08x\n",
  442. I915_READ(GTIMR));
  443. }
  444. seq_printf(m, "Interrupts received: %d\n",
  445. atomic_read(&dev_priv->irq_received));
  446. for (i = 0; i < I915_NUM_RINGS; i++) {
  447. if (IS_GEN6(dev) || IS_GEN7(dev)) {
  448. seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
  449. dev_priv->ring[i].name,
  450. I915_READ_IMR(&dev_priv->ring[i]));
  451. }
  452. i915_ring_seqno_info(m, &dev_priv->ring[i]);
  453. }
  454. mutex_unlock(&dev->struct_mutex);
  455. return 0;
  456. }
  457. static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
  458. {
  459. struct drm_info_node *node = (struct drm_info_node *) m->private;
  460. struct drm_device *dev = node->minor->dev;
  461. drm_i915_private_t *dev_priv = dev->dev_private;
  462. int i, ret;
  463. ret = mutex_lock_interruptible(&dev->struct_mutex);
  464. if (ret)
  465. return ret;
  466. seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
  467. seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
  468. for (i = 0; i < dev_priv->num_fence_regs; i++) {
  469. struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
  470. seq_printf(m, "Fenced object[%2d] = ", i);
  471. if (obj == NULL)
  472. seq_printf(m, "unused");
  473. else
  474. describe_obj(m, obj);
  475. seq_printf(m, "\n");
  476. }
  477. mutex_unlock(&dev->struct_mutex);
  478. return 0;
  479. }
  480. static int i915_hws_info(struct seq_file *m, void *data)
  481. {
  482. struct drm_info_node *node = (struct drm_info_node *) m->private;
  483. struct drm_device *dev = node->minor->dev;
  484. drm_i915_private_t *dev_priv = dev->dev_private;
  485. struct intel_ring_buffer *ring;
  486. const volatile u32 __iomem *hws;
  487. int i;
  488. ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
  489. hws = (volatile u32 __iomem *)ring->status_page.page_addr;
  490. if (hws == NULL)
  491. return 0;
  492. for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
  493. seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
  494. i * 4,
  495. hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
  496. }
  497. return 0;
  498. }
  499. static void i915_dump_object(struct seq_file *m,
  500. struct io_mapping *mapping,
  501. struct drm_i915_gem_object *obj)
  502. {
  503. int page, page_count, i;
  504. page_count = obj->base.size / PAGE_SIZE;
  505. for (page = 0; page < page_count; page++) {
  506. u32 *mem = io_mapping_map_wc(mapping,
  507. obj->gtt_offset + page * PAGE_SIZE);
  508. for (i = 0; i < PAGE_SIZE; i += 4)
  509. seq_printf(m, "%08x : %08x\n", i, mem[i / 4]);
  510. io_mapping_unmap(mem);
  511. }
  512. }
  513. static int i915_batchbuffer_info(struct seq_file *m, void *data)
  514. {
  515. struct drm_info_node *node = (struct drm_info_node *) m->private;
  516. struct drm_device *dev = node->minor->dev;
  517. drm_i915_private_t *dev_priv = dev->dev_private;
  518. struct drm_i915_gem_object *obj;
  519. int ret;
  520. ret = mutex_lock_interruptible(&dev->struct_mutex);
  521. if (ret)
  522. return ret;
  523. list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
  524. if (obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) {
  525. seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset);
  526. i915_dump_object(m, dev_priv->mm.gtt_mapping, obj);
  527. }
  528. }
  529. mutex_unlock(&dev->struct_mutex);
  530. return 0;
  531. }
  532. static int i915_ringbuffer_data(struct seq_file *m, void *data)
  533. {
  534. struct drm_info_node *node = (struct drm_info_node *) m->private;
  535. struct drm_device *dev = node->minor->dev;
  536. drm_i915_private_t *dev_priv = dev->dev_private;
  537. struct intel_ring_buffer *ring;
  538. int ret;
  539. ret = mutex_lock_interruptible(&dev->struct_mutex);
  540. if (ret)
  541. return ret;
  542. ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
  543. if (!ring->obj) {
  544. seq_printf(m, "No ringbuffer setup\n");
  545. } else {
  546. const u8 __iomem *virt = ring->virtual_start;
  547. uint32_t off;
  548. for (off = 0; off < ring->size; off += 4) {
  549. uint32_t *ptr = (uint32_t *)(virt + off);
  550. seq_printf(m, "%08x : %08x\n", off, *ptr);
  551. }
  552. }
  553. mutex_unlock(&dev->struct_mutex);
  554. return 0;
  555. }
  556. static int i915_ringbuffer_info(struct seq_file *m, void *data)
  557. {
  558. struct drm_info_node *node = (struct drm_info_node *) m->private;
  559. struct drm_device *dev = node->minor->dev;
  560. drm_i915_private_t *dev_priv = dev->dev_private;
  561. struct intel_ring_buffer *ring;
  562. int ret;
  563. ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
  564. if (ring->size == 0)
  565. return 0;
  566. ret = mutex_lock_interruptible(&dev->struct_mutex);
  567. if (ret)
  568. return ret;
  569. seq_printf(m, "Ring %s:\n", ring->name);
  570. seq_printf(m, " Head : %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR);
  571. seq_printf(m, " Tail : %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR);
  572. seq_printf(m, " Size : %08x\n", ring->size);
  573. seq_printf(m, " Active : %08x\n", intel_ring_get_active_head(ring));
  574. seq_printf(m, " NOPID : %08x\n", I915_READ_NOPID(ring));
  575. if (IS_GEN6(dev) || IS_GEN7(dev)) {
  576. seq_printf(m, " Sync 0 : %08x\n", I915_READ_SYNC_0(ring));
  577. seq_printf(m, " Sync 1 : %08x\n", I915_READ_SYNC_1(ring));
  578. }
  579. seq_printf(m, " Control : %08x\n", I915_READ_CTL(ring));
  580. seq_printf(m, " Start : %08x\n", I915_READ_START(ring));
  581. mutex_unlock(&dev->struct_mutex);
  582. return 0;
  583. }
  584. static const char *ring_str(int ring)
  585. {
  586. switch (ring) {
  587. case RING_RENDER: return " render";
  588. case RING_BSD: return " bsd";
  589. case RING_BLT: return " blt";
  590. default: return "";
  591. }
  592. }
  593. static const char *pin_flag(int pinned)
  594. {
  595. if (pinned > 0)
  596. return " P";
  597. else if (pinned < 0)
  598. return " p";
  599. else
  600. return "";
  601. }
  602. static const char *tiling_flag(int tiling)
  603. {
  604. switch (tiling) {
  605. default:
  606. case I915_TILING_NONE: return "";
  607. case I915_TILING_X: return " X";
  608. case I915_TILING_Y: return " Y";
  609. }
  610. }
  611. static const char *dirty_flag(int dirty)
  612. {
  613. return dirty ? " dirty" : "";
  614. }
  615. static const char *purgeable_flag(int purgeable)
  616. {
  617. return purgeable ? " purgeable" : "";
  618. }
  619. static void print_error_buffers(struct seq_file *m,
  620. const char *name,
  621. struct drm_i915_error_buffer *err,
  622. int count)
  623. {
  624. seq_printf(m, "%s [%d]:\n", name, count);
  625. while (count--) {
  626. seq_printf(m, " %08x %8u %04x %04x %08x%s%s%s%s%s%s",
  627. err->gtt_offset,
  628. err->size,
  629. err->read_domains,
  630. err->write_domain,
  631. err->seqno,
  632. pin_flag(err->pinned),
  633. tiling_flag(err->tiling),
  634. dirty_flag(err->dirty),
  635. purgeable_flag(err->purgeable),
  636. ring_str(err->ring),
  637. cache_level_str(err->cache_level));
  638. if (err->name)
  639. seq_printf(m, " (name: %d)", err->name);
  640. if (err->fence_reg != I915_FENCE_REG_NONE)
  641. seq_printf(m, " (fence: %d)", err->fence_reg);
  642. seq_printf(m, "\n");
  643. err++;
  644. }
  645. }
  646. static int i915_error_state(struct seq_file *m, void *unused)
  647. {
  648. struct drm_info_node *node = (struct drm_info_node *) m->private;
  649. struct drm_device *dev = node->minor->dev;
  650. drm_i915_private_t *dev_priv = dev->dev_private;
  651. struct drm_i915_error_state *error;
  652. unsigned long flags;
  653. int i, page, offset, elt;
  654. spin_lock_irqsave(&dev_priv->error_lock, flags);
  655. if (!dev_priv->first_error) {
  656. seq_printf(m, "no error state collected\n");
  657. goto out;
  658. }
  659. error = dev_priv->first_error;
  660. seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
  661. error->time.tv_usec);
  662. seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
  663. seq_printf(m, "EIR: 0x%08x\n", error->eir);
  664. seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
  665. if (INTEL_INFO(dev)->gen >= 6) {
  666. seq_printf(m, "ERROR: 0x%08x\n", error->error);
  667. seq_printf(m, "Blitter command stream:\n");
  668. seq_printf(m, " ACTHD: 0x%08x\n", error->bcs_acthd);
  669. seq_printf(m, " IPEIR: 0x%08x\n", error->bcs_ipeir);
  670. seq_printf(m, " IPEHR: 0x%08x\n", error->bcs_ipehr);
  671. seq_printf(m, " INSTDONE: 0x%08x\n", error->bcs_instdone);
  672. seq_printf(m, " seqno: 0x%08x\n", error->bcs_seqno);
  673. seq_printf(m, "Video (BSD) command stream:\n");
  674. seq_printf(m, " ACTHD: 0x%08x\n", error->vcs_acthd);
  675. seq_printf(m, " IPEIR: 0x%08x\n", error->vcs_ipeir);
  676. seq_printf(m, " IPEHR: 0x%08x\n", error->vcs_ipehr);
  677. seq_printf(m, " INSTDONE: 0x%08x\n", error->vcs_instdone);
  678. seq_printf(m, " seqno: 0x%08x\n", error->vcs_seqno);
  679. }
  680. seq_printf(m, "Render command stream:\n");
  681. seq_printf(m, " ACTHD: 0x%08x\n", error->acthd);
  682. seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir);
  683. seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr);
  684. seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone);
  685. if (INTEL_INFO(dev)->gen >= 4) {
  686. seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1);
  687. seq_printf(m, " INSTPS: 0x%08x\n", error->instps);
  688. }
  689. seq_printf(m, " INSTPM: 0x%08x\n", error->instpm);
  690. seq_printf(m, " seqno: 0x%08x\n", error->seqno);
  691. for (i = 0; i < dev_priv->num_fence_regs; i++)
  692. seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
  693. if (error->active_bo)
  694. print_error_buffers(m, "Active",
  695. error->active_bo,
  696. error->active_bo_count);
  697. if (error->pinned_bo)
  698. print_error_buffers(m, "Pinned",
  699. error->pinned_bo,
  700. error->pinned_bo_count);
  701. for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) {
  702. if (error->batchbuffer[i]) {
  703. struct drm_i915_error_object *obj = error->batchbuffer[i];
  704. seq_printf(m, "%s --- gtt_offset = 0x%08x\n",
  705. dev_priv->ring[i].name,
  706. obj->gtt_offset);
  707. offset = 0;
  708. for (page = 0; page < obj->page_count; page++) {
  709. for (elt = 0; elt < PAGE_SIZE/4; elt++) {
  710. seq_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]);
  711. offset += 4;
  712. }
  713. }
  714. }
  715. }
  716. for (i = 0; i < ARRAY_SIZE(error->ringbuffer); i++) {
  717. if (error->ringbuffer[i]) {
  718. struct drm_i915_error_object *obj = error->ringbuffer[i];
  719. seq_printf(m, "%s --- ringbuffer = 0x%08x\n",
  720. dev_priv->ring[i].name,
  721. obj->gtt_offset);
  722. offset = 0;
  723. for (page = 0; page < obj->page_count; page++) {
  724. for (elt = 0; elt < PAGE_SIZE/4; elt++) {
  725. seq_printf(m, "%08x : %08x\n",
  726. offset,
  727. obj->pages[page][elt]);
  728. offset += 4;
  729. }
  730. }
  731. }
  732. }
  733. if (error->overlay)
  734. intel_overlay_print_error_state(m, error->overlay);
  735. if (error->display)
  736. intel_display_print_error_state(m, dev, error->display);
  737. out:
  738. spin_unlock_irqrestore(&dev_priv->error_lock, flags);
  739. return 0;
  740. }
  741. static int i915_rstdby_delays(struct seq_file *m, void *unused)
  742. {
  743. struct drm_info_node *node = (struct drm_info_node *) m->private;
  744. struct drm_device *dev = node->minor->dev;
  745. drm_i915_private_t *dev_priv = dev->dev_private;
  746. u16 crstanddelay;
  747. int ret;
  748. ret = mutex_lock_interruptible(&dev->struct_mutex);
  749. if (ret)
  750. return ret;
  751. crstanddelay = I915_READ16(CRSTANDVID);
  752. mutex_unlock(&dev->struct_mutex);
  753. seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
  754. return 0;
  755. }
  756. static int i915_cur_delayinfo(struct seq_file *m, void *unused)
  757. {
  758. struct drm_info_node *node = (struct drm_info_node *) m->private;
  759. struct drm_device *dev = node->minor->dev;
  760. drm_i915_private_t *dev_priv = dev->dev_private;
  761. int ret;
  762. if (IS_GEN5(dev)) {
  763. u16 rgvswctl = I915_READ16(MEMSWCTL);
  764. u16 rgvstat = I915_READ16(MEMSTAT_ILK);
  765. seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
  766. seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
  767. seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
  768. MEMSTAT_VID_SHIFT);
  769. seq_printf(m, "Current P-state: %d\n",
  770. (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
  771. } else if (IS_GEN6(dev) || IS_GEN7(dev)) {
  772. u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
  773. u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
  774. u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
  775. u32 rpstat;
  776. u32 rpupei, rpcurup, rpprevup;
  777. u32 rpdownei, rpcurdown, rpprevdown;
  778. int max_freq;
  779. /* RPSTAT1 is in the GT power well */
  780. ret = mutex_lock_interruptible(&dev->struct_mutex);
  781. if (ret)
  782. return ret;
  783. gen6_gt_force_wake_get(dev_priv);
  784. rpstat = I915_READ(GEN6_RPSTAT1);
  785. rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
  786. rpcurup = I915_READ(GEN6_RP_CUR_UP);
  787. rpprevup = I915_READ(GEN6_RP_PREV_UP);
  788. rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
  789. rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
  790. rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
  791. gen6_gt_force_wake_put(dev_priv);
  792. mutex_unlock(&dev->struct_mutex);
  793. seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
  794. seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
  795. seq_printf(m, "Render p-state ratio: %d\n",
  796. (gt_perf_status & 0xff00) >> 8);
  797. seq_printf(m, "Render p-state VID: %d\n",
  798. gt_perf_status & 0xff);
  799. seq_printf(m, "Render p-state limit: %d\n",
  800. rp_state_limits & 0xff);
  801. seq_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >>
  802. GEN6_CAGF_SHIFT) * 50);
  803. seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
  804. GEN6_CURICONT_MASK);
  805. seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
  806. GEN6_CURBSYTAVG_MASK);
  807. seq_printf(m, "RP PREV UP: %dus\n", rpprevup &
  808. GEN6_CURBSYTAVG_MASK);
  809. seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei &
  810. GEN6_CURIAVG_MASK);
  811. seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown &
  812. GEN6_CURBSYTAVG_MASK);
  813. seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown &
  814. GEN6_CURBSYTAVG_MASK);
  815. max_freq = (rp_state_cap & 0xff0000) >> 16;
  816. seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
  817. max_freq * 50);
  818. max_freq = (rp_state_cap & 0xff00) >> 8;
  819. seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
  820. max_freq * 50);
  821. max_freq = rp_state_cap & 0xff;
  822. seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
  823. max_freq * 50);
  824. } else {
  825. seq_printf(m, "no P-state info available\n");
  826. }
  827. return 0;
  828. }
  829. static int i915_delayfreq_table(struct seq_file *m, void *unused)
  830. {
  831. struct drm_info_node *node = (struct drm_info_node *) m->private;
  832. struct drm_device *dev = node->minor->dev;
  833. drm_i915_private_t *dev_priv = dev->dev_private;
  834. u32 delayfreq;
  835. int ret, i;
  836. ret = mutex_lock_interruptible(&dev->struct_mutex);
  837. if (ret)
  838. return ret;
  839. for (i = 0; i < 16; i++) {
  840. delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
  841. seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq,
  842. (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
  843. }
  844. mutex_unlock(&dev->struct_mutex);
  845. return 0;
  846. }
  847. static inline int MAP_TO_MV(int map)
  848. {
  849. return 1250 - (map * 25);
  850. }
  851. static int i915_inttoext_table(struct seq_file *m, void *unused)
  852. {
  853. struct drm_info_node *node = (struct drm_info_node *) m->private;
  854. struct drm_device *dev = node->minor->dev;
  855. drm_i915_private_t *dev_priv = dev->dev_private;
  856. u32 inttoext;
  857. int ret, i;
  858. ret = mutex_lock_interruptible(&dev->struct_mutex);
  859. if (ret)
  860. return ret;
  861. for (i = 1; i <= 32; i++) {
  862. inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
  863. seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
  864. }
  865. mutex_unlock(&dev->struct_mutex);
  866. return 0;
  867. }
  868. static int ironlake_drpc_info(struct seq_file *m)
  869. {
  870. struct drm_info_node *node = (struct drm_info_node *) m->private;
  871. struct drm_device *dev = node->minor->dev;
  872. drm_i915_private_t *dev_priv = dev->dev_private;
  873. u32 rgvmodectl, rstdbyctl;
  874. u16 crstandvid;
  875. int ret;
  876. ret = mutex_lock_interruptible(&dev->struct_mutex);
  877. if (ret)
  878. return ret;
  879. rgvmodectl = I915_READ(MEMMODECTL);
  880. rstdbyctl = I915_READ(RSTDBYCTL);
  881. crstandvid = I915_READ16(CRSTANDVID);
  882. mutex_unlock(&dev->struct_mutex);
  883. seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
  884. "yes" : "no");
  885. seq_printf(m, "Boost freq: %d\n",
  886. (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
  887. MEMMODE_BOOST_FREQ_SHIFT);
  888. seq_printf(m, "HW control enabled: %s\n",
  889. rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no");
  890. seq_printf(m, "SW control enabled: %s\n",
  891. rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no");
  892. seq_printf(m, "Gated voltage change: %s\n",
  893. rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no");
  894. seq_printf(m, "Starting frequency: P%d\n",
  895. (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
  896. seq_printf(m, "Max P-state: P%d\n",
  897. (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
  898. seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
  899. seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
  900. seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
  901. seq_printf(m, "Render standby enabled: %s\n",
  902. (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
  903. seq_printf(m, "Current RS state: ");
  904. switch (rstdbyctl & RSX_STATUS_MASK) {
  905. case RSX_STATUS_ON:
  906. seq_printf(m, "on\n");
  907. break;
  908. case RSX_STATUS_RC1:
  909. seq_printf(m, "RC1\n");
  910. break;
  911. case RSX_STATUS_RC1E:
  912. seq_printf(m, "RC1E\n");
  913. break;
  914. case RSX_STATUS_RS1:
  915. seq_printf(m, "RS1\n");
  916. break;
  917. case RSX_STATUS_RS2:
  918. seq_printf(m, "RS2 (RC6)\n");
  919. break;
  920. case RSX_STATUS_RS3:
  921. seq_printf(m, "RC3 (RC6+)\n");
  922. break;
  923. default:
  924. seq_printf(m, "unknown\n");
  925. break;
  926. }
  927. return 0;
  928. }
  929. static int gen6_drpc_info(struct seq_file *m)
  930. {
  931. struct drm_info_node *node = (struct drm_info_node *) m->private;
  932. struct drm_device *dev = node->minor->dev;
  933. struct drm_i915_private *dev_priv = dev->dev_private;
  934. u32 rpmodectl1, gt_core_status, rcctl1;
  935. unsigned forcewake_count;
  936. int count=0, ret;
  937. ret = mutex_lock_interruptible(&dev->struct_mutex);
  938. if (ret)
  939. return ret;
  940. spin_lock_irq(&dev_priv->gt_lock);
  941. forcewake_count = dev_priv->forcewake_count;
  942. spin_unlock_irq(&dev_priv->gt_lock);
  943. if (forcewake_count) {
  944. seq_printf(m, "RC information inaccurate because somebody "
  945. "holds a forcewake reference \n");
  946. } else {
  947. /* NB: we cannot use forcewake, else we read the wrong values */
  948. while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
  949. udelay(10);
  950. seq_printf(m, "RC information accurate: %s\n", yesno(count < 51));
  951. }
  952. gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS);
  953. trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4);
  954. rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
  955. rcctl1 = I915_READ(GEN6_RC_CONTROL);
  956. mutex_unlock(&dev->struct_mutex);
  957. seq_printf(m, "Video Turbo Mode: %s\n",
  958. yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
  959. seq_printf(m, "HW control enabled: %s\n",
  960. yesno(rpmodectl1 & GEN6_RP_ENABLE));
  961. seq_printf(m, "SW control enabled: %s\n",
  962. yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
  963. GEN6_RP_MEDIA_SW_MODE));
  964. seq_printf(m, "RC1e Enabled: %s\n",
  965. yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
  966. seq_printf(m, "RC6 Enabled: %s\n",
  967. yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
  968. seq_printf(m, "Deep RC6 Enabled: %s\n",
  969. yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
  970. seq_printf(m, "Deepest RC6 Enabled: %s\n",
  971. yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
  972. seq_printf(m, "Current RC state: ");
  973. switch (gt_core_status & GEN6_RCn_MASK) {
  974. case GEN6_RC0:
  975. if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
  976. seq_printf(m, "Core Power Down\n");
  977. else
  978. seq_printf(m, "on\n");
  979. break;
  980. case GEN6_RC3:
  981. seq_printf(m, "RC3\n");
  982. break;
  983. case GEN6_RC6:
  984. seq_printf(m, "RC6\n");
  985. break;
  986. case GEN6_RC7:
  987. seq_printf(m, "RC7\n");
  988. break;
  989. default:
  990. seq_printf(m, "Unknown\n");
  991. break;
  992. }
  993. seq_printf(m, "Core Power Down: %s\n",
  994. yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
  995. return 0;
  996. }
  997. static int i915_drpc_info(struct seq_file *m, void *unused)
  998. {
  999. struct drm_info_node *node = (struct drm_info_node *) m->private;
  1000. struct drm_device *dev = node->minor->dev;
  1001. if (IS_GEN6(dev) || IS_GEN7(dev))
  1002. return gen6_drpc_info(m);
  1003. else
  1004. return ironlake_drpc_info(m);
  1005. }
  1006. static int i915_fbc_status(struct seq_file *m, void *unused)
  1007. {
  1008. struct drm_info_node *node = (struct drm_info_node *) m->private;
  1009. struct drm_device *dev = node->minor->dev;
  1010. drm_i915_private_t *dev_priv = dev->dev_private;
  1011. if (!I915_HAS_FBC(dev)) {
  1012. seq_printf(m, "FBC unsupported on this chipset\n");
  1013. return 0;
  1014. }
  1015. if (intel_fbc_enabled(dev)) {
  1016. seq_printf(m, "FBC enabled\n");
  1017. } else {
  1018. seq_printf(m, "FBC disabled: ");
  1019. switch (dev_priv->no_fbc_reason) {
  1020. case FBC_NO_OUTPUT:
  1021. seq_printf(m, "no outputs");
  1022. break;
  1023. case FBC_STOLEN_TOO_SMALL:
  1024. seq_printf(m, "not enough stolen memory");
  1025. break;
  1026. case FBC_UNSUPPORTED_MODE:
  1027. seq_printf(m, "mode not supported");
  1028. break;
  1029. case FBC_MODE_TOO_LARGE:
  1030. seq_printf(m, "mode too large");
  1031. break;
  1032. case FBC_BAD_PLANE:
  1033. seq_printf(m, "FBC unsupported on plane");
  1034. break;
  1035. case FBC_NOT_TILED:
  1036. seq_printf(m, "scanout buffer not tiled");
  1037. break;
  1038. case FBC_MULTIPLE_PIPES:
  1039. seq_printf(m, "multiple pipes are enabled");
  1040. break;
  1041. case FBC_MODULE_PARAM:
  1042. seq_printf(m, "disabled per module param (default off)");
  1043. break;
  1044. default:
  1045. seq_printf(m, "unknown reason");
  1046. }
  1047. seq_printf(m, "\n");
  1048. }
  1049. return 0;
  1050. }
  1051. static int i915_sr_status(struct seq_file *m, void *unused)
  1052. {
  1053. struct drm_info_node *node = (struct drm_info_node *) m->private;
  1054. struct drm_device *dev = node->minor->dev;
  1055. drm_i915_private_t *dev_priv = dev->dev_private;
  1056. bool sr_enabled = false;
  1057. if (HAS_PCH_SPLIT(dev))
  1058. sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
  1059. else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
  1060. sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
  1061. else if (IS_I915GM(dev))
  1062. sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
  1063. else if (IS_PINEVIEW(dev))
  1064. sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
  1065. seq_printf(m, "self-refresh: %s\n",
  1066. sr_enabled ? "enabled" : "disabled");
  1067. return 0;
  1068. }
  1069. static int i915_emon_status(struct seq_file *m, void *unused)
  1070. {
  1071. struct drm_info_node *node = (struct drm_info_node *) m->private;
  1072. struct drm_device *dev = node->minor->dev;
  1073. drm_i915_private_t *dev_priv = dev->dev_private;
  1074. unsigned long temp, chipset, gfx;
  1075. int ret;
  1076. ret = mutex_lock_interruptible(&dev->struct_mutex);
  1077. if (ret)
  1078. return ret;
  1079. temp = i915_mch_val(dev_priv);
  1080. chipset = i915_chipset_val(dev_priv);
  1081. gfx = i915_gfx_val(dev_priv);
  1082. mutex_unlock(&dev->struct_mutex);
  1083. seq_printf(m, "GMCH temp: %ld\n", temp);
  1084. seq_printf(m, "Chipset power: %ld\n", chipset);
  1085. seq_printf(m, "GFX power: %ld\n", gfx);
  1086. seq_printf(m, "Total power: %ld\n", chipset + gfx);
  1087. return 0;
  1088. }
  1089. static int i915_ring_freq_table(struct seq_file *m, void *unused)
  1090. {
  1091. struct drm_info_node *node = (struct drm_info_node *) m->private;
  1092. struct drm_device *dev = node->minor->dev;
  1093. drm_i915_private_t *dev_priv = dev->dev_private;
  1094. int ret;
  1095. int gpu_freq, ia_freq;
  1096. if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
  1097. seq_printf(m, "unsupported on this chipset\n");
  1098. return 0;
  1099. }
  1100. ret = mutex_lock_interruptible(&dev->struct_mutex);
  1101. if (ret)
  1102. return ret;
  1103. seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n");
  1104. for (gpu_freq = dev_priv->min_delay; gpu_freq <= dev_priv->max_delay;
  1105. gpu_freq++) {
  1106. I915_WRITE(GEN6_PCODE_DATA, gpu_freq);
  1107. I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
  1108. GEN6_PCODE_READ_MIN_FREQ_TABLE);
  1109. if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
  1110. GEN6_PCODE_READY) == 0, 10)) {
  1111. DRM_ERROR("pcode read of freq table timed out\n");
  1112. continue;
  1113. }
  1114. ia_freq = I915_READ(GEN6_PCODE_DATA);
  1115. seq_printf(m, "%d\t\t%d\n", gpu_freq * 50, ia_freq * 100);
  1116. }
  1117. mutex_unlock(&dev->struct_mutex);
  1118. return 0;
  1119. }
  1120. static int i915_gfxec(struct seq_file *m, void *unused)
  1121. {
  1122. struct drm_info_node *node = (struct drm_info_node *) m->private;
  1123. struct drm_device *dev = node->minor->dev;
  1124. drm_i915_private_t *dev_priv = dev->dev_private;
  1125. int ret;
  1126. ret = mutex_lock_interruptible(&dev->struct_mutex);
  1127. if (ret)
  1128. return ret;
  1129. seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
  1130. mutex_unlock(&dev->struct_mutex);
  1131. return 0;
  1132. }
  1133. static int i915_opregion(struct seq_file *m, void *unused)
  1134. {
  1135. struct drm_info_node *node = (struct drm_info_node *) m->private;
  1136. struct drm_device *dev = node->minor->dev;
  1137. drm_i915_private_t *dev_priv = dev->dev_private;
  1138. struct intel_opregion *opregion = &dev_priv->opregion;
  1139. int ret;
  1140. ret = mutex_lock_interruptible(&dev->struct_mutex);
  1141. if (ret)
  1142. return ret;
  1143. if (opregion->header)
  1144. seq_write(m, opregion->header, OPREGION_SIZE);
  1145. mutex_unlock(&dev->struct_mutex);
  1146. return 0;
  1147. }
  1148. static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
  1149. {
  1150. struct drm_info_node *node = (struct drm_info_node *) m->private;
  1151. struct drm_device *dev = node->minor->dev;
  1152. drm_i915_private_t *dev_priv = dev->dev_private;
  1153. struct intel_fbdev *ifbdev;
  1154. struct intel_framebuffer *fb;
  1155. int ret;
  1156. ret = mutex_lock_interruptible(&dev->mode_config.mutex);
  1157. if (ret)
  1158. return ret;
  1159. ifbdev = dev_priv->fbdev;
  1160. fb = to_intel_framebuffer(ifbdev->helper.fb);
  1161. seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, obj ",
  1162. fb->base.width,
  1163. fb->base.height,
  1164. fb->base.depth,
  1165. fb->base.bits_per_pixel);
  1166. describe_obj(m, fb->obj);
  1167. seq_printf(m, "\n");
  1168. list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
  1169. if (&fb->base == ifbdev->helper.fb)
  1170. continue;
  1171. seq_printf(m, "user size: %d x %d, depth %d, %d bpp, obj ",
  1172. fb->base.width,
  1173. fb->base.height,
  1174. fb->base.depth,
  1175. fb->base.bits_per_pixel);
  1176. describe_obj(m, fb->obj);
  1177. seq_printf(m, "\n");
  1178. }
  1179. mutex_unlock(&dev->mode_config.mutex);
  1180. return 0;
  1181. }
  1182. static int i915_context_status(struct seq_file *m, void *unused)
  1183. {
  1184. struct drm_info_node *node = (struct drm_info_node *) m->private;
  1185. struct drm_device *dev = node->minor->dev;
  1186. drm_i915_private_t *dev_priv = dev->dev_private;
  1187. int ret;
  1188. ret = mutex_lock_interruptible(&dev->mode_config.mutex);
  1189. if (ret)
  1190. return ret;
  1191. if (dev_priv->pwrctx) {
  1192. seq_printf(m, "power context ");
  1193. describe_obj(m, dev_priv->pwrctx);
  1194. seq_printf(m, "\n");
  1195. }
  1196. if (dev_priv->renderctx) {
  1197. seq_printf(m, "render context ");
  1198. describe_obj(m, dev_priv->renderctx);
  1199. seq_printf(m, "\n");
  1200. }
  1201. mutex_unlock(&dev->mode_config.mutex);
  1202. return 0;
  1203. }
  1204. static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
  1205. {
  1206. struct drm_info_node *node = (struct drm_info_node *) m->private;
  1207. struct drm_device *dev = node->minor->dev;
  1208. struct drm_i915_private *dev_priv = dev->dev_private;
  1209. unsigned forcewake_count;
  1210. spin_lock_irq(&dev_priv->gt_lock);
  1211. forcewake_count = dev_priv->forcewake_count;
  1212. spin_unlock_irq(&dev_priv->gt_lock);
  1213. seq_printf(m, "forcewake count = %u\n", forcewake_count);
  1214. return 0;
  1215. }
  1216. static int
  1217. i915_wedged_open(struct inode *inode,
  1218. struct file *filp)
  1219. {
  1220. filp->private_data = inode->i_private;
  1221. return 0;
  1222. }
  1223. static ssize_t
  1224. i915_wedged_read(struct file *filp,
  1225. char __user *ubuf,
  1226. size_t max,
  1227. loff_t *ppos)
  1228. {
  1229. struct drm_device *dev = filp->private_data;
  1230. drm_i915_private_t *dev_priv = dev->dev_private;
  1231. char buf[80];
  1232. int len;
  1233. len = snprintf(buf, sizeof(buf),
  1234. "wedged : %d\n",
  1235. atomic_read(&dev_priv->mm.wedged));
  1236. if (len > sizeof(buf))
  1237. len = sizeof(buf);
  1238. return simple_read_from_buffer(ubuf, max, ppos, buf, len);
  1239. }
  1240. static ssize_t
  1241. i915_wedged_write(struct file *filp,
  1242. const char __user *ubuf,
  1243. size_t cnt,
  1244. loff_t *ppos)
  1245. {
  1246. struct drm_device *dev = filp->private_data;
  1247. char buf[20];
  1248. int val = 1;
  1249. if (cnt > 0) {
  1250. if (cnt > sizeof(buf) - 1)
  1251. return -EINVAL;
  1252. if (copy_from_user(buf, ubuf, cnt))
  1253. return -EFAULT;
  1254. buf[cnt] = 0;
  1255. val = simple_strtoul(buf, NULL, 0);
  1256. }
  1257. DRM_INFO("Manually setting wedged to %d\n", val);
  1258. i915_handle_error(dev, val);
  1259. return cnt;
  1260. }
  1261. static const struct file_operations i915_wedged_fops = {
  1262. .owner = THIS_MODULE,
  1263. .open = i915_wedged_open,
  1264. .read = i915_wedged_read,
  1265. .write = i915_wedged_write,
  1266. .llseek = default_llseek,
  1267. };
  1268. static int
  1269. i915_max_freq_open(struct inode *inode,
  1270. struct file *filp)
  1271. {
  1272. filp->private_data = inode->i_private;
  1273. return 0;
  1274. }
  1275. static ssize_t
  1276. i915_max_freq_read(struct file *filp,
  1277. char __user *ubuf,
  1278. size_t max,
  1279. loff_t *ppos)
  1280. {
  1281. struct drm_device *dev = filp->private_data;
  1282. drm_i915_private_t *dev_priv = dev->dev_private;
  1283. char buf[80];
  1284. int len;
  1285. len = snprintf(buf, sizeof(buf),
  1286. "max freq: %d\n", dev_priv->max_delay * 50);
  1287. if (len > sizeof(buf))
  1288. len = sizeof(buf);
  1289. return simple_read_from_buffer(ubuf, max, ppos, buf, len);
  1290. }
  1291. static ssize_t
  1292. i915_max_freq_write(struct file *filp,
  1293. const char __user *ubuf,
  1294. size_t cnt,
  1295. loff_t *ppos)
  1296. {
  1297. struct drm_device *dev = filp->private_data;
  1298. struct drm_i915_private *dev_priv = dev->dev_private;
  1299. char buf[20];
  1300. int val = 1;
  1301. if (cnt > 0) {
  1302. if (cnt > sizeof(buf) - 1)
  1303. return -EINVAL;
  1304. if (copy_from_user(buf, ubuf, cnt))
  1305. return -EFAULT;
  1306. buf[cnt] = 0;
  1307. val = simple_strtoul(buf, NULL, 0);
  1308. }
  1309. DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val);
  1310. /*
  1311. * Turbo will still be enabled, but won't go above the set value.
  1312. */
  1313. dev_priv->max_delay = val / 50;
  1314. gen6_set_rps(dev, val / 50);
  1315. return cnt;
  1316. }
  1317. static const struct file_operations i915_max_freq_fops = {
  1318. .owner = THIS_MODULE,
  1319. .open = i915_max_freq_open,
  1320. .read = i915_max_freq_read,
  1321. .write = i915_max_freq_write,
  1322. .llseek = default_llseek,
  1323. };
  1324. static int
  1325. i915_cache_sharing_open(struct inode *inode,
  1326. struct file *filp)
  1327. {
  1328. filp->private_data = inode->i_private;
  1329. return 0;
  1330. }
  1331. static ssize_t
  1332. i915_cache_sharing_read(struct file *filp,
  1333. char __user *ubuf,
  1334. size_t max,
  1335. loff_t *ppos)
  1336. {
  1337. struct drm_device *dev = filp->private_data;
  1338. drm_i915_private_t *dev_priv = dev->dev_private;
  1339. char buf[80];
  1340. u32 snpcr;
  1341. int len;
  1342. mutex_lock(&dev_priv->dev->struct_mutex);
  1343. snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
  1344. mutex_unlock(&dev_priv->dev->struct_mutex);
  1345. len = snprintf(buf, sizeof(buf),
  1346. "%d\n", (snpcr & GEN6_MBC_SNPCR_MASK) >>
  1347. GEN6_MBC_SNPCR_SHIFT);
  1348. if (len > sizeof(buf))
  1349. len = sizeof(buf);
  1350. return simple_read_from_buffer(ubuf, max, ppos, buf, len);
  1351. }
  1352. static ssize_t
  1353. i915_cache_sharing_write(struct file *filp,
  1354. const char __user *ubuf,
  1355. size_t cnt,
  1356. loff_t *ppos)
  1357. {
  1358. struct drm_device *dev = filp->private_data;
  1359. struct drm_i915_private *dev_priv = dev->dev_private;
  1360. char buf[20];
  1361. u32 snpcr;
  1362. int val = 1;
  1363. if (cnt > 0) {
  1364. if (cnt > sizeof(buf) - 1)
  1365. return -EINVAL;
  1366. if (copy_from_user(buf, ubuf, cnt))
  1367. return -EFAULT;
  1368. buf[cnt] = 0;
  1369. val = simple_strtoul(buf, NULL, 0);
  1370. }
  1371. if (val < 0 || val > 3)
  1372. return -EINVAL;
  1373. DRM_DEBUG_DRIVER("Manually setting uncore sharing to %d\n", val);
  1374. /* Update the cache sharing policy here as well */
  1375. snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
  1376. snpcr &= ~GEN6_MBC_SNPCR_MASK;
  1377. snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
  1378. I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
  1379. return cnt;
  1380. }
  1381. static const struct file_operations i915_cache_sharing_fops = {
  1382. .owner = THIS_MODULE,
  1383. .open = i915_cache_sharing_open,
  1384. .read = i915_cache_sharing_read,
  1385. .write = i915_cache_sharing_write,
  1386. .llseek = default_llseek,
  1387. };
  1388. /* As the drm_debugfs_init() routines are called before dev->dev_private is
  1389. * allocated we need to hook into the minor for release. */
  1390. static int
  1391. drm_add_fake_info_node(struct drm_minor *minor,
  1392. struct dentry *ent,
  1393. const void *key)
  1394. {
  1395. struct drm_info_node *node;
  1396. node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
  1397. if (node == NULL) {
  1398. debugfs_remove(ent);
  1399. return -ENOMEM;
  1400. }
  1401. node->minor = minor;
  1402. node->dent = ent;
  1403. node->info_ent = (void *) key;
  1404. mutex_lock(&minor->debugfs_lock);
  1405. list_add(&node->list, &minor->debugfs_list);
  1406. mutex_unlock(&minor->debugfs_lock);
  1407. return 0;
  1408. }
  1409. static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
  1410. {
  1411. struct drm_device *dev = minor->dev;
  1412. struct dentry *ent;
  1413. ent = debugfs_create_file("i915_wedged",
  1414. S_IRUGO | S_IWUSR,
  1415. root, dev,
  1416. &i915_wedged_fops);
  1417. if (IS_ERR(ent))
  1418. return PTR_ERR(ent);
  1419. return drm_add_fake_info_node(minor, ent, &i915_wedged_fops);
  1420. }
  1421. static int i915_forcewake_open(struct inode *inode, struct file *file)
  1422. {
  1423. struct drm_device *dev = inode->i_private;
  1424. struct drm_i915_private *dev_priv = dev->dev_private;
  1425. int ret;
  1426. if (INTEL_INFO(dev)->gen < 6)
  1427. return 0;
  1428. ret = mutex_lock_interruptible(&dev->struct_mutex);
  1429. if (ret)
  1430. return ret;
  1431. gen6_gt_force_wake_get(dev_priv);
  1432. mutex_unlock(&dev->struct_mutex);
  1433. return 0;
  1434. }
  1435. int i915_forcewake_release(struct inode *inode, struct file *file)
  1436. {
  1437. struct drm_device *dev = inode->i_private;
  1438. struct drm_i915_private *dev_priv = dev->dev_private;
  1439. if (INTEL_INFO(dev)->gen < 6)
  1440. return 0;
  1441. /*
  1442. * It's bad that we can potentially hang userspace if struct_mutex gets
  1443. * forever stuck. However, if we cannot acquire this lock it means that
  1444. * almost certainly the driver has hung, is not unload-able. Therefore
  1445. * hanging here is probably a minor inconvenience not to be seen my
  1446. * almost every user.
  1447. */
  1448. mutex_lock(&dev->struct_mutex);
  1449. gen6_gt_force_wake_put(dev_priv);
  1450. mutex_unlock(&dev->struct_mutex);
  1451. return 0;
  1452. }
  1453. static const struct file_operations i915_forcewake_fops = {
  1454. .owner = THIS_MODULE,
  1455. .open = i915_forcewake_open,
  1456. .release = i915_forcewake_release,
  1457. };
  1458. static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
  1459. {
  1460. struct drm_device *dev = minor->dev;
  1461. struct dentry *ent;
  1462. ent = debugfs_create_file("i915_forcewake_user",
  1463. S_IRUSR,
  1464. root, dev,
  1465. &i915_forcewake_fops);
  1466. if (IS_ERR(ent))
  1467. return PTR_ERR(ent);
  1468. return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
  1469. }
  1470. static int i915_max_freq_create(struct dentry *root, struct drm_minor *minor)
  1471. {
  1472. struct drm_device *dev = minor->dev;
  1473. struct dentry *ent;
  1474. ent = debugfs_create_file("i915_max_freq",
  1475. S_IRUGO | S_IWUSR,
  1476. root, dev,
  1477. &i915_max_freq_fops);
  1478. if (IS_ERR(ent))
  1479. return PTR_ERR(ent);
  1480. return drm_add_fake_info_node(minor, ent, &i915_max_freq_fops);
  1481. }
  1482. static int i915_cache_sharing_create(struct dentry *root, struct drm_minor *minor)
  1483. {
  1484. struct drm_device *dev = minor->dev;
  1485. struct dentry *ent;
  1486. ent = debugfs_create_file("i915_cache_sharing",
  1487. S_IRUGO | S_IWUSR,
  1488. root, dev,
  1489. &i915_cache_sharing_fops);
  1490. if (IS_ERR(ent))
  1491. return PTR_ERR(ent);
  1492. return drm_add_fake_info_node(minor, ent, &i915_cache_sharing_fops);
  1493. }
  1494. static struct drm_info_list i915_debugfs_list[] = {
  1495. {"i915_capabilities", i915_capabilities, 0},
  1496. {"i915_gem_objects", i915_gem_object_info, 0},
  1497. {"i915_gem_gtt", i915_gem_gtt_info, 0},
  1498. {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
  1499. {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
  1500. {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
  1501. {"i915_gem_pinned", i915_gem_object_list_info, 0, (void *) PINNED_LIST},
  1502. {"i915_gem_deferred_free", i915_gem_object_list_info, 0, (void *) DEFERRED_FREE_LIST},
  1503. {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
  1504. {"i915_gem_request", i915_gem_request_info, 0},
  1505. {"i915_gem_seqno", i915_gem_seqno_info, 0},
  1506. {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
  1507. {"i915_gem_interrupt", i915_interrupt_info, 0},
  1508. {"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
  1509. {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
  1510. {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
  1511. {"i915_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RCS},
  1512. {"i915_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RCS},
  1513. {"i915_bsd_ringbuffer_data", i915_ringbuffer_data, 0, (void *)VCS},
  1514. {"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)VCS},
  1515. {"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BCS},
  1516. {"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BCS},
  1517. {"i915_batchbuffers", i915_batchbuffer_info, 0},
  1518. {"i915_error_state", i915_error_state, 0},
  1519. {"i915_rstdby_delays", i915_rstdby_delays, 0},
  1520. {"i915_cur_delayinfo", i915_cur_delayinfo, 0},
  1521. {"i915_delayfreq_table", i915_delayfreq_table, 0},
  1522. {"i915_inttoext_table", i915_inttoext_table, 0},
  1523. {"i915_drpc_info", i915_drpc_info, 0},
  1524. {"i915_emon_status", i915_emon_status, 0},
  1525. {"i915_ring_freq_table", i915_ring_freq_table, 0},
  1526. {"i915_gfxec", i915_gfxec, 0},
  1527. {"i915_fbc_status", i915_fbc_status, 0},
  1528. {"i915_sr_status", i915_sr_status, 0},
  1529. {"i915_opregion", i915_opregion, 0},
  1530. {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
  1531. {"i915_context_status", i915_context_status, 0},
  1532. {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0},
  1533. };
  1534. #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
  1535. int i915_debugfs_init(struct drm_minor *minor)
  1536. {
  1537. int ret;
  1538. ret = i915_wedged_create(minor->debugfs_root, minor);
  1539. if (ret)
  1540. return ret;
  1541. ret = i915_forcewake_create(minor->debugfs_root, minor);
  1542. if (ret)
  1543. return ret;
  1544. ret = i915_max_freq_create(minor->debugfs_root, minor);
  1545. if (ret)
  1546. return ret;
  1547. ret = i915_cache_sharing_create(minor->debugfs_root, minor);
  1548. if (ret)
  1549. return ret;
  1550. return drm_debugfs_create_files(i915_debugfs_list,
  1551. I915_DEBUGFS_ENTRIES,
  1552. minor->debugfs_root, minor);
  1553. }
  1554. void i915_debugfs_cleanup(struct drm_minor *minor)
  1555. {
  1556. drm_debugfs_remove_files(i915_debugfs_list,
  1557. I915_DEBUGFS_ENTRIES, minor);
  1558. drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
  1559. 1, minor);
  1560. drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops,
  1561. 1, minor);
  1562. drm_debugfs_remove_files((struct drm_info_list *) &i915_max_freq_fops,
  1563. 1, minor);
  1564. drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops,
  1565. 1, minor);
  1566. }
  1567. #endif /* CONFIG_DEBUG_FS */