i915_sysfs.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555
  1. /*
  2. * Copyright © 2012 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Ben Widawsky <ben@bwidawsk.net>
  25. *
  26. */
  27. #include <linux/device.h>
  28. #include <linux/module.h>
  29. #include <linux/stat.h>
  30. #include <linux/sysfs.h>
  31. #include "intel_drv.h"
  32. #include "i915_drv.h"
  33. #ifdef CONFIG_PM
  34. static u32 calc_residency(struct drm_device *dev, const u32 reg)
  35. {
  36. struct drm_i915_private *dev_priv = dev->dev_private;
  37. u64 raw_time; /* 32b value may overflow during fixed point math */
  38. if (!intel_enable_rc6(dev))
  39. return 0;
  40. raw_time = I915_READ(reg) * 128ULL;
  41. return DIV_ROUND_UP_ULL(raw_time, 100000);
  42. }
  43. static ssize_t
  44. show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
  45. {
  46. struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
  47. return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev));
  48. }
  49. static ssize_t
  50. show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
  51. {
  52. struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
  53. u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
  54. return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
  55. }
  56. static ssize_t
  57. show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
  58. {
  59. struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
  60. u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
  61. return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency);
  62. }
  63. static ssize_t
  64. show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
  65. {
  66. struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
  67. u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
  68. return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
  69. }
  70. static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL);
  71. static DEVICE_ATTR(rc6_residency_ms, S_IRUGO, show_rc6_ms, NULL);
  72. static DEVICE_ATTR(rc6p_residency_ms, S_IRUGO, show_rc6p_ms, NULL);
  73. static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL);
  74. static struct attribute *rc6_attrs[] = {
  75. &dev_attr_rc6_enable.attr,
  76. &dev_attr_rc6_residency_ms.attr,
  77. &dev_attr_rc6p_residency_ms.attr,
  78. &dev_attr_rc6pp_residency_ms.attr,
  79. NULL
  80. };
  81. static struct attribute_group rc6_attr_group = {
  82. .name = power_group_name,
  83. .attrs = rc6_attrs
  84. };
  85. #endif
  86. static int l3_access_valid(struct drm_device *dev, loff_t offset)
  87. {
  88. if (!HAS_L3_GPU_CACHE(dev))
  89. return -EPERM;
  90. if (offset % 4 != 0)
  91. return -EINVAL;
  92. if (offset >= GEN7_L3LOG_SIZE)
  93. return -ENXIO;
  94. return 0;
  95. }
  96. static ssize_t
  97. i915_l3_read(struct file *filp, struct kobject *kobj,
  98. struct bin_attribute *attr, char *buf,
  99. loff_t offset, size_t count)
  100. {
  101. struct device *dev = container_of(kobj, struct device, kobj);
  102. struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
  103. struct drm_device *drm_dev = dminor->dev;
  104. struct drm_i915_private *dev_priv = drm_dev->dev_private;
  105. uint32_t misccpctl;
  106. int i, ret;
  107. count = round_down(count, 4);
  108. ret = l3_access_valid(drm_dev, offset);
  109. if (ret)
  110. return ret;
  111. count = min_t(int, GEN7_L3LOG_SIZE-offset, count);
  112. ret = i915_mutex_lock_interruptible(drm_dev);
  113. if (ret)
  114. return ret;
  115. if (IS_HASWELL(drm_dev)) {
  116. if (dev_priv->l3_parity.remap_info)
  117. memcpy(buf,
  118. dev_priv->l3_parity.remap_info + (offset/4),
  119. count);
  120. else
  121. memset(buf, 0, count);
  122. goto out;
  123. }
  124. misccpctl = I915_READ(GEN7_MISCCPCTL);
  125. I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
  126. for (i = 0; i < count; i += 4)
  127. *((uint32_t *)(&buf[i])) = I915_READ(GEN7_L3LOG_BASE + offset + i);
  128. I915_WRITE(GEN7_MISCCPCTL, misccpctl);
  129. out:
  130. mutex_unlock(&drm_dev->struct_mutex);
  131. return count;
  132. }
  133. static ssize_t
  134. i915_l3_write(struct file *filp, struct kobject *kobj,
  135. struct bin_attribute *attr, char *buf,
  136. loff_t offset, size_t count)
  137. {
  138. struct device *dev = container_of(kobj, struct device, kobj);
  139. struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
  140. struct drm_device *drm_dev = dminor->dev;
  141. struct drm_i915_private *dev_priv = drm_dev->dev_private;
  142. u32 *temp = NULL; /* Just here to make handling failures easy */
  143. int ret;
  144. ret = l3_access_valid(drm_dev, offset);
  145. if (ret)
  146. return ret;
  147. ret = i915_mutex_lock_interruptible(drm_dev);
  148. if (ret)
  149. return ret;
  150. if (!dev_priv->l3_parity.remap_info) {
  151. temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
  152. if (!temp) {
  153. mutex_unlock(&drm_dev->struct_mutex);
  154. return -ENOMEM;
  155. }
  156. }
  157. ret = i915_gpu_idle(drm_dev);
  158. if (ret) {
  159. kfree(temp);
  160. mutex_unlock(&drm_dev->struct_mutex);
  161. return ret;
  162. }
  163. /* TODO: Ideally we really want a GPU reset here to make sure errors
  164. * aren't propagated. Since I cannot find a stable way to reset the GPU
  165. * at this point it is left as a TODO.
  166. */
  167. if (temp)
  168. dev_priv->l3_parity.remap_info = temp;
  169. memcpy(dev_priv->l3_parity.remap_info + (offset/4), buf, count);
  170. i915_gem_l3_remap(drm_dev);
  171. mutex_unlock(&drm_dev->struct_mutex);
  172. return count;
  173. }
  174. static struct bin_attribute dpf_attrs = {
  175. .attr = {.name = "l3_parity", .mode = (S_IRUSR | S_IWUSR)},
  176. .size = GEN7_L3LOG_SIZE,
  177. .read = i915_l3_read,
  178. .write = i915_l3_write,
  179. .mmap = NULL
  180. };
  181. static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
  182. struct device_attribute *attr, char *buf)
  183. {
  184. struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
  185. struct drm_device *dev = minor->dev;
  186. struct drm_i915_private *dev_priv = dev->dev_private;
  187. int ret;
  188. mutex_lock(&dev_priv->rps.hw_lock);
  189. if (IS_VALLEYVIEW(dev_priv->dev)) {
  190. u32 freq;
  191. freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
  192. ret = vlv_gpu_freq(dev_priv->mem_freq, (freq >> 8) & 0xff);
  193. } else {
  194. ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER;
  195. }
  196. mutex_unlock(&dev_priv->rps.hw_lock);
  197. return snprintf(buf, PAGE_SIZE, "%d\n", ret);
  198. }
  199. static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
  200. struct device_attribute *attr, char *buf)
  201. {
  202. struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
  203. struct drm_device *dev = minor->dev;
  204. struct drm_i915_private *dev_priv = dev->dev_private;
  205. return snprintf(buf, PAGE_SIZE, "%d\n",
  206. vlv_gpu_freq(dev_priv->mem_freq,
  207. dev_priv->rps.rpe_delay));
  208. }
  209. static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
  210. {
  211. struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
  212. struct drm_device *dev = minor->dev;
  213. struct drm_i915_private *dev_priv = dev->dev_private;
  214. int ret;
  215. mutex_lock(&dev_priv->rps.hw_lock);
  216. if (IS_VALLEYVIEW(dev_priv->dev))
  217. ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.max_delay);
  218. else
  219. ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
  220. mutex_unlock(&dev_priv->rps.hw_lock);
  221. return snprintf(buf, PAGE_SIZE, "%d\n", ret);
  222. }
  223. static ssize_t gt_max_freq_mhz_store(struct device *kdev,
  224. struct device_attribute *attr,
  225. const char *buf, size_t count)
  226. {
  227. struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
  228. struct drm_device *dev = minor->dev;
  229. struct drm_i915_private *dev_priv = dev->dev_private;
  230. u32 val, rp_state_cap, hw_max, hw_min, non_oc_max;
  231. ssize_t ret;
  232. ret = kstrtou32(buf, 0, &val);
  233. if (ret)
  234. return ret;
  235. mutex_lock(&dev_priv->rps.hw_lock);
  236. if (IS_VALLEYVIEW(dev_priv->dev)) {
  237. val = vlv_freq_opcode(dev_priv->mem_freq, val);
  238. hw_max = valleyview_rps_max_freq(dev_priv);
  239. hw_min = valleyview_rps_min_freq(dev_priv);
  240. non_oc_max = hw_max;
  241. } else {
  242. val /= GT_FREQUENCY_MULTIPLIER;
  243. rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
  244. hw_max = dev_priv->rps.hw_max;
  245. non_oc_max = (rp_state_cap & 0xff);
  246. hw_min = ((rp_state_cap & 0xff0000) >> 16);
  247. }
  248. if (val < hw_min || val > hw_max ||
  249. val < dev_priv->rps.min_delay) {
  250. mutex_unlock(&dev_priv->rps.hw_lock);
  251. return -EINVAL;
  252. }
  253. if (val > non_oc_max)
  254. DRM_DEBUG("User requested overclocking to %d\n",
  255. val * GT_FREQUENCY_MULTIPLIER);
  256. if (dev_priv->rps.cur_delay > val) {
  257. if (IS_VALLEYVIEW(dev_priv->dev))
  258. valleyview_set_rps(dev_priv->dev, val);
  259. else
  260. gen6_set_rps(dev_priv->dev, val);
  261. }
  262. dev_priv->rps.max_delay = val;
  263. mutex_unlock(&dev_priv->rps.hw_lock);
  264. return count;
  265. }
  266. static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
  267. {
  268. struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
  269. struct drm_device *dev = minor->dev;
  270. struct drm_i915_private *dev_priv = dev->dev_private;
  271. int ret;
  272. mutex_lock(&dev_priv->rps.hw_lock);
  273. if (IS_VALLEYVIEW(dev_priv->dev))
  274. ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.min_delay);
  275. else
  276. ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
  277. mutex_unlock(&dev_priv->rps.hw_lock);
  278. return snprintf(buf, PAGE_SIZE, "%d\n", ret);
  279. }
  280. static ssize_t gt_min_freq_mhz_store(struct device *kdev,
  281. struct device_attribute *attr,
  282. const char *buf, size_t count)
  283. {
  284. struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
  285. struct drm_device *dev = minor->dev;
  286. struct drm_i915_private *dev_priv = dev->dev_private;
  287. u32 val, rp_state_cap, hw_max, hw_min;
  288. ssize_t ret;
  289. ret = kstrtou32(buf, 0, &val);
  290. if (ret)
  291. return ret;
  292. mutex_lock(&dev_priv->rps.hw_lock);
  293. if (IS_VALLEYVIEW(dev)) {
  294. val = vlv_freq_opcode(dev_priv->mem_freq, val);
  295. hw_max = valleyview_rps_max_freq(dev_priv);
  296. hw_min = valleyview_rps_min_freq(dev_priv);
  297. } else {
  298. val /= GT_FREQUENCY_MULTIPLIER;
  299. rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
  300. hw_max = dev_priv->rps.hw_max;
  301. hw_min = ((rp_state_cap & 0xff0000) >> 16);
  302. }
  303. if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) {
  304. mutex_unlock(&dev_priv->rps.hw_lock);
  305. return -EINVAL;
  306. }
  307. if (dev_priv->rps.cur_delay < val) {
  308. if (IS_VALLEYVIEW(dev))
  309. valleyview_set_rps(dev, val);
  310. else
  311. gen6_set_rps(dev_priv->dev, val);
  312. }
  313. dev_priv->rps.min_delay = val;
  314. mutex_unlock(&dev_priv->rps.hw_lock);
  315. return count;
  316. }
  317. static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL);
  318. static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
  319. static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
  320. static DEVICE_ATTR(vlv_rpe_freq_mhz, S_IRUGO, vlv_rpe_freq_mhz_show, NULL);
  321. static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf);
  322. static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
  323. static DEVICE_ATTR(gt_RP1_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
  324. static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
  325. /* For now we have a static number of RP states */
  326. static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
  327. {
  328. struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
  329. struct drm_device *dev = minor->dev;
  330. struct drm_i915_private *dev_priv = dev->dev_private;
  331. u32 val, rp_state_cap;
  332. ssize_t ret;
  333. ret = mutex_lock_interruptible(&dev->struct_mutex);
  334. if (ret)
  335. return ret;
  336. rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
  337. mutex_unlock(&dev->struct_mutex);
  338. if (attr == &dev_attr_gt_RP0_freq_mhz) {
  339. val = ((rp_state_cap & 0x0000ff) >> 0) * GT_FREQUENCY_MULTIPLIER;
  340. } else if (attr == &dev_attr_gt_RP1_freq_mhz) {
  341. val = ((rp_state_cap & 0x00ff00) >> 8) * GT_FREQUENCY_MULTIPLIER;
  342. } else if (attr == &dev_attr_gt_RPn_freq_mhz) {
  343. val = ((rp_state_cap & 0xff0000) >> 16) * GT_FREQUENCY_MULTIPLIER;
  344. } else {
  345. BUG();
  346. }
  347. return snprintf(buf, PAGE_SIZE, "%d\n", val);
  348. }
  349. static const struct attribute *gen6_attrs[] = {
  350. &dev_attr_gt_cur_freq_mhz.attr,
  351. &dev_attr_gt_max_freq_mhz.attr,
  352. &dev_attr_gt_min_freq_mhz.attr,
  353. &dev_attr_gt_RP0_freq_mhz.attr,
  354. &dev_attr_gt_RP1_freq_mhz.attr,
  355. &dev_attr_gt_RPn_freq_mhz.attr,
  356. NULL,
  357. };
  358. static const struct attribute *vlv_attrs[] = {
  359. &dev_attr_gt_cur_freq_mhz.attr,
  360. &dev_attr_gt_max_freq_mhz.attr,
  361. &dev_attr_gt_min_freq_mhz.attr,
  362. &dev_attr_vlv_rpe_freq_mhz.attr,
  363. NULL,
  364. };
  365. static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
  366. struct bin_attribute *attr, char *buf,
  367. loff_t off, size_t count)
  368. {
  369. struct device *kdev = container_of(kobj, struct device, kobj);
  370. struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
  371. struct drm_device *dev = minor->dev;
  372. struct i915_error_state_file_priv error_priv;
  373. struct drm_i915_error_state_buf error_str;
  374. ssize_t ret_count = 0;
  375. int ret;
  376. memset(&error_priv, 0, sizeof(error_priv));
  377. ret = i915_error_state_buf_init(&error_str, count, off);
  378. if (ret)
  379. return ret;
  380. error_priv.dev = dev;
  381. i915_error_state_get(dev, &error_priv);
  382. ret = i915_error_state_to_str(&error_str, &error_priv);
  383. if (ret)
  384. goto out;
  385. ret_count = count < error_str.bytes ? count : error_str.bytes;
  386. memcpy(buf, error_str.buf, ret_count);
  387. out:
  388. i915_error_state_put(&error_priv);
  389. i915_error_state_buf_release(&error_str);
  390. return ret ?: ret_count;
  391. }
  392. static ssize_t error_state_write(struct file *file, struct kobject *kobj,
  393. struct bin_attribute *attr, char *buf,
  394. loff_t off, size_t count)
  395. {
  396. struct device *kdev = container_of(kobj, struct device, kobj);
  397. struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
  398. struct drm_device *dev = minor->dev;
  399. int ret;
  400. DRM_DEBUG_DRIVER("Resetting error state\n");
  401. ret = mutex_lock_interruptible(&dev->struct_mutex);
  402. if (ret)
  403. return ret;
  404. i915_destroy_error_state(dev);
  405. mutex_unlock(&dev->struct_mutex);
  406. return count;
  407. }
  408. static struct bin_attribute error_state_attr = {
  409. .attr.name = "error",
  410. .attr.mode = S_IRUSR | S_IWUSR,
  411. .size = 0,
  412. .read = error_state_read,
  413. .write = error_state_write,
  414. };
  415. void i915_setup_sysfs(struct drm_device *dev)
  416. {
  417. int ret;
  418. #ifdef CONFIG_PM
  419. if (INTEL_INFO(dev)->gen >= 6) {
  420. ret = sysfs_merge_group(&dev->primary->kdev.kobj,
  421. &rc6_attr_group);
  422. if (ret)
  423. DRM_ERROR("RC6 residency sysfs setup failed\n");
  424. }
  425. #endif
  426. if (HAS_L3_GPU_CACHE(dev)) {
  427. ret = device_create_bin_file(&dev->primary->kdev, &dpf_attrs);
  428. if (ret)
  429. DRM_ERROR("l3 parity sysfs setup failed\n");
  430. }
  431. ret = 0;
  432. if (IS_VALLEYVIEW(dev))
  433. ret = sysfs_create_files(&dev->primary->kdev.kobj, vlv_attrs);
  434. else if (INTEL_INFO(dev)->gen >= 6)
  435. ret = sysfs_create_files(&dev->primary->kdev.kobj, gen6_attrs);
  436. if (ret)
  437. DRM_ERROR("RPS sysfs setup failed\n");
  438. ret = sysfs_create_bin_file(&dev->primary->kdev.kobj,
  439. &error_state_attr);
  440. if (ret)
  441. DRM_ERROR("error_state sysfs setup failed\n");
  442. }
  443. void i915_teardown_sysfs(struct drm_device *dev)
  444. {
  445. sysfs_remove_bin_file(&dev->primary->kdev.kobj, &error_state_attr);
  446. if (IS_VALLEYVIEW(dev))
  447. sysfs_remove_files(&dev->primary->kdev.kobj, vlv_attrs);
  448. else
  449. sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs);
  450. device_remove_bin_file(&dev->primary->kdev, &dpf_attrs);
  451. #ifdef CONFIG_PM
  452. sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group);
  453. #endif
  454. }