i915_sysfs.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575
  1. /*
  2. * Copyright © 2012 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Ben Widawsky <ben@bwidawsk.net>
  25. *
  26. */
  27. #include <linux/device.h>
  28. #include <linux/module.h>
  29. #include <linux/stat.h>
  30. #include <linux/sysfs.h>
  31. #include "intel_drv.h"
  32. #include "i915_drv.h"
  33. #ifdef CONFIG_PM
  34. static u32 calc_residency(struct drm_device *dev, const u32 reg)
  35. {
  36. struct drm_i915_private *dev_priv = dev->dev_private;
  37. u64 raw_time; /* 32b value may overflow during fixed point math */
  38. if (!intel_enable_rc6(dev))
  39. return 0;
  40. raw_time = I915_READ(reg) * 128ULL;
  41. return DIV_ROUND_UP_ULL(raw_time, 100000);
  42. }
  43. static ssize_t
  44. show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
  45. {
  46. struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
  47. return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev));
  48. }
  49. static ssize_t
  50. show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
  51. {
  52. struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
  53. u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
  54. return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
  55. }
  56. static ssize_t
  57. show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
  58. {
  59. struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
  60. u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
  61. return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency);
  62. }
  63. static ssize_t
  64. show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
  65. {
  66. struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
  67. u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
  68. return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
  69. }
  70. static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL);
  71. static DEVICE_ATTR(rc6_residency_ms, S_IRUGO, show_rc6_ms, NULL);
  72. static DEVICE_ATTR(rc6p_residency_ms, S_IRUGO, show_rc6p_ms, NULL);
  73. static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL);
  74. static struct attribute *rc6_attrs[] = {
  75. &dev_attr_rc6_enable.attr,
  76. &dev_attr_rc6_residency_ms.attr,
  77. &dev_attr_rc6p_residency_ms.attr,
  78. &dev_attr_rc6pp_residency_ms.attr,
  79. NULL
  80. };
  81. static struct attribute_group rc6_attr_group = {
  82. .name = power_group_name,
  83. .attrs = rc6_attrs
  84. };
  85. #endif
  86. static int l3_access_valid(struct drm_device *dev, loff_t offset)
  87. {
  88. if (!HAS_L3_GPU_CACHE(dev))
  89. return -EPERM;
  90. if (offset % 4 != 0)
  91. return -EINVAL;
  92. if (offset >= GEN7_L3LOG_SIZE)
  93. return -ENXIO;
  94. return 0;
  95. }
  96. static ssize_t
  97. i915_l3_read(struct file *filp, struct kobject *kobj,
  98. struct bin_attribute *attr, char *buf,
  99. loff_t offset, size_t count)
  100. {
  101. struct device *dev = container_of(kobj, struct device, kobj);
  102. struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
  103. struct drm_device *drm_dev = dminor->dev;
  104. struct drm_i915_private *dev_priv = drm_dev->dev_private;
  105. uint32_t misccpctl;
  106. int slice = (int)(uintptr_t)attr->private;
  107. int i, ret;
  108. count = round_down(count, 4);
  109. ret = l3_access_valid(drm_dev, offset);
  110. if (ret)
  111. return ret;
  112. count = min_t(int, GEN7_L3LOG_SIZE-offset, count);
  113. ret = i915_mutex_lock_interruptible(drm_dev);
  114. if (ret)
  115. return ret;
  116. if (IS_HASWELL(drm_dev)) {
  117. if (dev_priv->l3_parity.remap_info[slice])
  118. memcpy(buf,
  119. dev_priv->l3_parity.remap_info[slice] + (offset/4),
  120. count);
  121. else
  122. memset(buf, 0, count);
  123. goto out;
  124. }
  125. misccpctl = I915_READ(GEN7_MISCCPCTL);
  126. I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
  127. for (i = 0; i < count; i += 4)
  128. *((uint32_t *)(&buf[i])) = I915_READ(GEN7_L3LOG_BASE + offset + i);
  129. I915_WRITE(GEN7_MISCCPCTL, misccpctl);
  130. out:
  131. mutex_unlock(&drm_dev->struct_mutex);
  132. return count;
  133. }
  134. static ssize_t
  135. i915_l3_write(struct file *filp, struct kobject *kobj,
  136. struct bin_attribute *attr, char *buf,
  137. loff_t offset, size_t count)
  138. {
  139. struct device *dev = container_of(kobj, struct device, kobj);
  140. struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
  141. struct drm_device *drm_dev = dminor->dev;
  142. struct drm_i915_private *dev_priv = drm_dev->dev_private;
  143. u32 *temp = NULL; /* Just here to make handling failures easy */
  144. int slice = (int)(uintptr_t)attr->private;
  145. int ret;
  146. ret = l3_access_valid(drm_dev, offset);
  147. if (ret)
  148. return ret;
  149. ret = i915_mutex_lock_interruptible(drm_dev);
  150. if (ret)
  151. return ret;
  152. if (!dev_priv->l3_parity.remap_info[slice]) {
  153. temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
  154. if (!temp) {
  155. mutex_unlock(&drm_dev->struct_mutex);
  156. return -ENOMEM;
  157. }
  158. }
  159. ret = i915_gpu_idle(drm_dev);
  160. if (ret) {
  161. kfree(temp);
  162. mutex_unlock(&drm_dev->struct_mutex);
  163. return ret;
  164. }
  165. /* TODO: Ideally we really want a GPU reset here to make sure errors
  166. * aren't propagated. Since I cannot find a stable way to reset the GPU
  167. * at this point it is left as a TODO.
  168. */
  169. if (temp)
  170. dev_priv->l3_parity.remap_info[slice] = temp;
  171. memcpy(dev_priv->l3_parity.remap_info[slice] + (offset/4), buf, count);
  172. i915_gem_l3_remap(drm_dev, slice);
  173. mutex_unlock(&drm_dev->struct_mutex);
  174. return count;
  175. }
  176. static struct bin_attribute dpf_attrs = {
  177. .attr = {.name = "l3_parity", .mode = (S_IRUSR | S_IWUSR)},
  178. .size = GEN7_L3LOG_SIZE,
  179. .read = i915_l3_read,
  180. .write = i915_l3_write,
  181. .mmap = NULL,
  182. .private = (void *)0
  183. };
  184. static struct bin_attribute dpf_attrs_1 = {
  185. .attr = {.name = "l3_parity_slice_1", .mode = (S_IRUSR | S_IWUSR)},
  186. .size = GEN7_L3LOG_SIZE,
  187. .read = i915_l3_read,
  188. .write = i915_l3_write,
  189. .mmap = NULL,
  190. .private = (void *)1
  191. };
  192. static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
  193. struct device_attribute *attr, char *buf)
  194. {
  195. struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
  196. struct drm_device *dev = minor->dev;
  197. struct drm_i915_private *dev_priv = dev->dev_private;
  198. int ret;
  199. mutex_lock(&dev_priv->rps.hw_lock);
  200. if (IS_VALLEYVIEW(dev_priv->dev)) {
  201. u32 freq;
  202. freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
  203. ret = vlv_gpu_freq(dev_priv->mem_freq, (freq >> 8) & 0xff);
  204. } else {
  205. ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER;
  206. }
  207. mutex_unlock(&dev_priv->rps.hw_lock);
  208. return snprintf(buf, PAGE_SIZE, "%d\n", ret);
  209. }
  210. static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
  211. struct device_attribute *attr, char *buf)
  212. {
  213. struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
  214. struct drm_device *dev = minor->dev;
  215. struct drm_i915_private *dev_priv = dev->dev_private;
  216. return snprintf(buf, PAGE_SIZE, "%d\n",
  217. vlv_gpu_freq(dev_priv->mem_freq,
  218. dev_priv->rps.rpe_delay));
  219. }
  220. static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
  221. {
  222. struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
  223. struct drm_device *dev = minor->dev;
  224. struct drm_i915_private *dev_priv = dev->dev_private;
  225. int ret;
  226. mutex_lock(&dev_priv->rps.hw_lock);
  227. if (IS_VALLEYVIEW(dev_priv->dev))
  228. ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.max_delay);
  229. else
  230. ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
  231. mutex_unlock(&dev_priv->rps.hw_lock);
  232. return snprintf(buf, PAGE_SIZE, "%d\n", ret);
  233. }
  234. static ssize_t gt_max_freq_mhz_store(struct device *kdev,
  235. struct device_attribute *attr,
  236. const char *buf, size_t count)
  237. {
  238. struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
  239. struct drm_device *dev = minor->dev;
  240. struct drm_i915_private *dev_priv = dev->dev_private;
  241. u32 val, rp_state_cap, hw_max, hw_min, non_oc_max;
  242. ssize_t ret;
  243. ret = kstrtou32(buf, 0, &val);
  244. if (ret)
  245. return ret;
  246. mutex_lock(&dev_priv->rps.hw_lock);
  247. if (IS_VALLEYVIEW(dev_priv->dev)) {
  248. val = vlv_freq_opcode(dev_priv->mem_freq, val);
  249. hw_max = valleyview_rps_max_freq(dev_priv);
  250. hw_min = valleyview_rps_min_freq(dev_priv);
  251. non_oc_max = hw_max;
  252. } else {
  253. val /= GT_FREQUENCY_MULTIPLIER;
  254. rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
  255. hw_max = dev_priv->rps.hw_max;
  256. non_oc_max = (rp_state_cap & 0xff);
  257. hw_min = ((rp_state_cap & 0xff0000) >> 16);
  258. }
  259. if (val < hw_min || val > hw_max ||
  260. val < dev_priv->rps.min_delay) {
  261. mutex_unlock(&dev_priv->rps.hw_lock);
  262. return -EINVAL;
  263. }
  264. if (val > non_oc_max)
  265. DRM_DEBUG("User requested overclocking to %d\n",
  266. val * GT_FREQUENCY_MULTIPLIER);
  267. if (dev_priv->rps.cur_delay > val) {
  268. if (IS_VALLEYVIEW(dev_priv->dev))
  269. valleyview_set_rps(dev_priv->dev, val);
  270. else
  271. gen6_set_rps(dev_priv->dev, val);
  272. }
  273. dev_priv->rps.max_delay = val;
  274. mutex_unlock(&dev_priv->rps.hw_lock);
  275. return count;
  276. }
  277. static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
  278. {
  279. struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
  280. struct drm_device *dev = minor->dev;
  281. struct drm_i915_private *dev_priv = dev->dev_private;
  282. int ret;
  283. mutex_lock(&dev_priv->rps.hw_lock);
  284. if (IS_VALLEYVIEW(dev_priv->dev))
  285. ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.min_delay);
  286. else
  287. ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
  288. mutex_unlock(&dev_priv->rps.hw_lock);
  289. return snprintf(buf, PAGE_SIZE, "%d\n", ret);
  290. }
  291. static ssize_t gt_min_freq_mhz_store(struct device *kdev,
  292. struct device_attribute *attr,
  293. const char *buf, size_t count)
  294. {
  295. struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
  296. struct drm_device *dev = minor->dev;
  297. struct drm_i915_private *dev_priv = dev->dev_private;
  298. u32 val, rp_state_cap, hw_max, hw_min;
  299. ssize_t ret;
  300. ret = kstrtou32(buf, 0, &val);
  301. if (ret)
  302. return ret;
  303. mutex_lock(&dev_priv->rps.hw_lock);
  304. if (IS_VALLEYVIEW(dev)) {
  305. val = vlv_freq_opcode(dev_priv->mem_freq, val);
  306. hw_max = valleyview_rps_max_freq(dev_priv);
  307. hw_min = valleyview_rps_min_freq(dev_priv);
  308. } else {
  309. val /= GT_FREQUENCY_MULTIPLIER;
  310. rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
  311. hw_max = dev_priv->rps.hw_max;
  312. hw_min = ((rp_state_cap & 0xff0000) >> 16);
  313. }
  314. if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) {
  315. mutex_unlock(&dev_priv->rps.hw_lock);
  316. return -EINVAL;
  317. }
  318. if (dev_priv->rps.cur_delay < val) {
  319. if (IS_VALLEYVIEW(dev))
  320. valleyview_set_rps(dev, val);
  321. else
  322. gen6_set_rps(dev_priv->dev, val);
  323. }
  324. dev_priv->rps.min_delay = val;
  325. mutex_unlock(&dev_priv->rps.hw_lock);
  326. return count;
  327. }
  328. static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL);
  329. static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
  330. static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
  331. static DEVICE_ATTR(vlv_rpe_freq_mhz, S_IRUGO, vlv_rpe_freq_mhz_show, NULL);
  332. static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf);
  333. static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
  334. static DEVICE_ATTR(gt_RP1_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
  335. static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
  336. /* For now we have a static number of RP states */
  337. static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
  338. {
  339. struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
  340. struct drm_device *dev = minor->dev;
  341. struct drm_i915_private *dev_priv = dev->dev_private;
  342. u32 val, rp_state_cap;
  343. ssize_t ret;
  344. ret = mutex_lock_interruptible(&dev->struct_mutex);
  345. if (ret)
  346. return ret;
  347. rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
  348. mutex_unlock(&dev->struct_mutex);
  349. if (attr == &dev_attr_gt_RP0_freq_mhz) {
  350. val = ((rp_state_cap & 0x0000ff) >> 0) * GT_FREQUENCY_MULTIPLIER;
  351. } else if (attr == &dev_attr_gt_RP1_freq_mhz) {
  352. val = ((rp_state_cap & 0x00ff00) >> 8) * GT_FREQUENCY_MULTIPLIER;
  353. } else if (attr == &dev_attr_gt_RPn_freq_mhz) {
  354. val = ((rp_state_cap & 0xff0000) >> 16) * GT_FREQUENCY_MULTIPLIER;
  355. } else {
  356. BUG();
  357. }
  358. return snprintf(buf, PAGE_SIZE, "%d\n", val);
  359. }
  360. static const struct attribute *gen6_attrs[] = {
  361. &dev_attr_gt_cur_freq_mhz.attr,
  362. &dev_attr_gt_max_freq_mhz.attr,
  363. &dev_attr_gt_min_freq_mhz.attr,
  364. &dev_attr_gt_RP0_freq_mhz.attr,
  365. &dev_attr_gt_RP1_freq_mhz.attr,
  366. &dev_attr_gt_RPn_freq_mhz.attr,
  367. NULL,
  368. };
  369. static const struct attribute *vlv_attrs[] = {
  370. &dev_attr_gt_cur_freq_mhz.attr,
  371. &dev_attr_gt_max_freq_mhz.attr,
  372. &dev_attr_gt_min_freq_mhz.attr,
  373. &dev_attr_vlv_rpe_freq_mhz.attr,
  374. NULL,
  375. };
  376. static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
  377. struct bin_attribute *attr, char *buf,
  378. loff_t off, size_t count)
  379. {
  380. struct device *kdev = container_of(kobj, struct device, kobj);
  381. struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
  382. struct drm_device *dev = minor->dev;
  383. struct i915_error_state_file_priv error_priv;
  384. struct drm_i915_error_state_buf error_str;
  385. ssize_t ret_count = 0;
  386. int ret;
  387. memset(&error_priv, 0, sizeof(error_priv));
  388. ret = i915_error_state_buf_init(&error_str, count, off);
  389. if (ret)
  390. return ret;
  391. error_priv.dev = dev;
  392. i915_error_state_get(dev, &error_priv);
  393. ret = i915_error_state_to_str(&error_str, &error_priv);
  394. if (ret)
  395. goto out;
  396. ret_count = count < error_str.bytes ? count : error_str.bytes;
  397. memcpy(buf, error_str.buf, ret_count);
  398. out:
  399. i915_error_state_put(&error_priv);
  400. i915_error_state_buf_release(&error_str);
  401. return ret ?: ret_count;
  402. }
  403. static ssize_t error_state_write(struct file *file, struct kobject *kobj,
  404. struct bin_attribute *attr, char *buf,
  405. loff_t off, size_t count)
  406. {
  407. struct device *kdev = container_of(kobj, struct device, kobj);
  408. struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
  409. struct drm_device *dev = minor->dev;
  410. int ret;
  411. DRM_DEBUG_DRIVER("Resetting error state\n");
  412. ret = mutex_lock_interruptible(&dev->struct_mutex);
  413. if (ret)
  414. return ret;
  415. i915_destroy_error_state(dev);
  416. mutex_unlock(&dev->struct_mutex);
  417. return count;
  418. }
  419. static struct bin_attribute error_state_attr = {
  420. .attr.name = "error",
  421. .attr.mode = S_IRUSR | S_IWUSR,
  422. .size = 0,
  423. .read = error_state_read,
  424. .write = error_state_write,
  425. };
  426. void i915_setup_sysfs(struct drm_device *dev)
  427. {
  428. int ret;
  429. #ifdef CONFIG_PM
  430. if (INTEL_INFO(dev)->gen >= 6) {
  431. ret = sysfs_merge_group(&dev->primary->kdev.kobj,
  432. &rc6_attr_group);
  433. if (ret)
  434. DRM_ERROR("RC6 residency sysfs setup failed\n");
  435. }
  436. #endif
  437. if (HAS_L3_GPU_CACHE(dev)) {
  438. ret = device_create_bin_file(&dev->primary->kdev, &dpf_attrs);
  439. if (ret)
  440. DRM_ERROR("l3 parity sysfs setup failed\n");
  441. if (NUM_L3_SLICES(dev) > 1) {
  442. ret = device_create_bin_file(&dev->primary->kdev,
  443. &dpf_attrs_1);
  444. if (ret)
  445. DRM_ERROR("l3 parity slice 1 setup failed\n");
  446. }
  447. }
  448. ret = 0;
  449. if (IS_VALLEYVIEW(dev))
  450. ret = sysfs_create_files(&dev->primary->kdev.kobj, vlv_attrs);
  451. else if (INTEL_INFO(dev)->gen >= 6)
  452. ret = sysfs_create_files(&dev->primary->kdev.kobj, gen6_attrs);
  453. if (ret)
  454. DRM_ERROR("RPS sysfs setup failed\n");
  455. ret = sysfs_create_bin_file(&dev->primary->kdev.kobj,
  456. &error_state_attr);
  457. if (ret)
  458. DRM_ERROR("error_state sysfs setup failed\n");
  459. }
  460. void i915_teardown_sysfs(struct drm_device *dev)
  461. {
  462. sysfs_remove_bin_file(&dev->primary->kdev.kobj, &error_state_attr);
  463. if (IS_VALLEYVIEW(dev))
  464. sysfs_remove_files(&dev->primary->kdev.kobj, vlv_attrs);
  465. else
  466. sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs);
  467. device_remove_bin_file(&dev->primary->kdev, &dpf_attrs_1);
  468. device_remove_bin_file(&dev->primary->kdev, &dpf_attrs);
  469. #ifdef CONFIG_PM
  470. sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group);
  471. #endif
  472. }