i915_sysfs.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515
  1. /*
  2. * Copyright © 2012 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Ben Widawsky <ben@bwidawsk.net>
  25. *
  26. */
  27. #include <linux/device.h>
  28. #include <linux/module.h>
  29. #include <linux/stat.h>
  30. #include <linux/sysfs.h>
  31. #include "intel_drv.h"
  32. #include "i915_drv.h"
  33. #ifdef CONFIG_PM
  34. static u32 calc_residency(struct drm_device *dev, const u32 reg)
  35. {
  36. struct drm_i915_private *dev_priv = dev->dev_private;
  37. u64 raw_time; /* 32b value may overflow during fixed point math */
  38. if (!intel_enable_rc6(dev))
  39. return 0;
  40. raw_time = I915_READ(reg) * 128ULL;
  41. return DIV_ROUND_UP_ULL(raw_time, 100000);
  42. }
  43. static ssize_t
  44. show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
  45. {
  46. struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
  47. return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev));
  48. }
  49. static ssize_t
  50. show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
  51. {
  52. struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
  53. u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
  54. return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
  55. }
  56. static ssize_t
  57. show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
  58. {
  59. struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
  60. u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
  61. return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency);
  62. }
  63. static ssize_t
  64. show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
  65. {
  66. struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
  67. u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
  68. return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
  69. }
  70. static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL);
  71. static DEVICE_ATTR(rc6_residency_ms, S_IRUGO, show_rc6_ms, NULL);
  72. static DEVICE_ATTR(rc6p_residency_ms, S_IRUGO, show_rc6p_ms, NULL);
  73. static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL);
  74. static struct attribute *rc6_attrs[] = {
  75. &dev_attr_rc6_enable.attr,
  76. &dev_attr_rc6_residency_ms.attr,
  77. &dev_attr_rc6p_residency_ms.attr,
  78. &dev_attr_rc6pp_residency_ms.attr,
  79. NULL
  80. };
  81. static struct attribute_group rc6_attr_group = {
  82. .name = power_group_name,
  83. .attrs = rc6_attrs
  84. };
  85. #endif
  86. static int l3_access_valid(struct drm_device *dev, loff_t offset)
  87. {
  88. if (!HAS_L3_GPU_CACHE(dev))
  89. return -EPERM;
  90. if (offset % 4 != 0)
  91. return -EINVAL;
  92. if (offset >= GEN7_L3LOG_SIZE)
  93. return -ENXIO;
  94. return 0;
  95. }
  96. static ssize_t
  97. i915_l3_read(struct file *filp, struct kobject *kobj,
  98. struct bin_attribute *attr, char *buf,
  99. loff_t offset, size_t count)
  100. {
  101. struct device *dev = container_of(kobj, struct device, kobj);
  102. struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
  103. struct drm_device *drm_dev = dminor->dev;
  104. struct drm_i915_private *dev_priv = drm_dev->dev_private;
  105. uint32_t misccpctl;
  106. int i, ret;
  107. ret = l3_access_valid(drm_dev, offset);
  108. if (ret)
  109. return ret;
  110. ret = i915_mutex_lock_interruptible(drm_dev);
  111. if (ret)
  112. return ret;
  113. misccpctl = I915_READ(GEN7_MISCCPCTL);
  114. I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
  115. for (i = offset; count >= 4 && i < GEN7_L3LOG_SIZE; i += 4, count -= 4)
  116. *((uint32_t *)(&buf[i])) = I915_READ(GEN7_L3LOG_BASE + i);
  117. I915_WRITE(GEN7_MISCCPCTL, misccpctl);
  118. mutex_unlock(&drm_dev->struct_mutex);
  119. return i - offset;
  120. }
  121. static ssize_t
  122. i915_l3_write(struct file *filp, struct kobject *kobj,
  123. struct bin_attribute *attr, char *buf,
  124. loff_t offset, size_t count)
  125. {
  126. struct device *dev = container_of(kobj, struct device, kobj);
  127. struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
  128. struct drm_device *drm_dev = dminor->dev;
  129. struct drm_i915_private *dev_priv = drm_dev->dev_private;
  130. u32 *temp = NULL; /* Just here to make handling failures easy */
  131. int ret;
  132. ret = l3_access_valid(drm_dev, offset);
  133. if (ret)
  134. return ret;
  135. ret = i915_mutex_lock_interruptible(drm_dev);
  136. if (ret)
  137. return ret;
  138. if (!dev_priv->l3_parity.remap_info) {
  139. temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
  140. if (!temp) {
  141. mutex_unlock(&drm_dev->struct_mutex);
  142. return -ENOMEM;
  143. }
  144. }
  145. ret = i915_gpu_idle(drm_dev);
  146. if (ret) {
  147. kfree(temp);
  148. mutex_unlock(&drm_dev->struct_mutex);
  149. return ret;
  150. }
  151. /* TODO: Ideally we really want a GPU reset here to make sure errors
  152. * aren't propagated. Since I cannot find a stable way to reset the GPU
  153. * at this point it is left as a TODO.
  154. */
  155. if (temp)
  156. dev_priv->l3_parity.remap_info = temp;
  157. memcpy(dev_priv->l3_parity.remap_info + (offset/4),
  158. buf + (offset/4),
  159. count);
  160. i915_gem_l3_remap(drm_dev);
  161. mutex_unlock(&drm_dev->struct_mutex);
  162. return count;
  163. }
  164. static struct bin_attribute dpf_attrs = {
  165. .attr = {.name = "l3_parity", .mode = (S_IRUSR | S_IWUSR)},
  166. .size = GEN7_L3LOG_SIZE,
  167. .read = i915_l3_read,
  168. .write = i915_l3_write,
  169. .mmap = NULL
  170. };
  171. static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
  172. struct device_attribute *attr, char *buf)
  173. {
  174. struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
  175. struct drm_device *dev = minor->dev;
  176. struct drm_i915_private *dev_priv = dev->dev_private;
  177. int ret;
  178. mutex_lock(&dev_priv->rps.hw_lock);
  179. if (IS_VALLEYVIEW(dev_priv->dev)) {
  180. u32 freq;
  181. freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
  182. ret = vlv_gpu_freq(dev_priv->mem_freq, (freq >> 8) & 0xff);
  183. } else {
  184. ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER;
  185. }
  186. mutex_unlock(&dev_priv->rps.hw_lock);
  187. return snprintf(buf, PAGE_SIZE, "%d\n", ret);
  188. }
  189. static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
  190. {
  191. struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
  192. struct drm_device *dev = minor->dev;
  193. struct drm_i915_private *dev_priv = dev->dev_private;
  194. int ret;
  195. mutex_lock(&dev_priv->rps.hw_lock);
  196. if (IS_VALLEYVIEW(dev_priv->dev))
  197. ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.max_delay);
  198. else
  199. ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
  200. mutex_unlock(&dev_priv->rps.hw_lock);
  201. return snprintf(buf, PAGE_SIZE, "%d\n", ret);
  202. }
  203. static ssize_t gt_max_freq_mhz_store(struct device *kdev,
  204. struct device_attribute *attr,
  205. const char *buf, size_t count)
  206. {
  207. struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
  208. struct drm_device *dev = minor->dev;
  209. struct drm_i915_private *dev_priv = dev->dev_private;
  210. u32 val, rp_state_cap, hw_max, hw_min, non_oc_max;
  211. ssize_t ret;
  212. ret = kstrtou32(buf, 0, &val);
  213. if (ret)
  214. return ret;
  215. mutex_lock(&dev_priv->rps.hw_lock);
  216. if (IS_VALLEYVIEW(dev_priv->dev)) {
  217. val = vlv_freq_opcode(dev_priv->mem_freq, val);
  218. hw_max = valleyview_rps_max_freq(dev_priv);
  219. hw_min = valleyview_rps_min_freq(dev_priv);
  220. non_oc_max = hw_max;
  221. } else {
  222. val /= GT_FREQUENCY_MULTIPLIER;
  223. rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
  224. hw_max = dev_priv->rps.hw_max;
  225. non_oc_max = (rp_state_cap & 0xff);
  226. hw_min = ((rp_state_cap & 0xff0000) >> 16);
  227. }
  228. if (val < hw_min || val > hw_max ||
  229. val < dev_priv->rps.min_delay) {
  230. mutex_unlock(&dev_priv->rps.hw_lock);
  231. return -EINVAL;
  232. }
  233. if (val > non_oc_max)
  234. DRM_DEBUG("User requested overclocking to %d\n",
  235. val * GT_FREQUENCY_MULTIPLIER);
  236. if (dev_priv->rps.cur_delay > val) {
  237. if (IS_VALLEYVIEW(dev_priv->dev))
  238. valleyview_set_rps(dev_priv->dev, val);
  239. else
  240. gen6_set_rps(dev_priv->dev, val);
  241. }
  242. dev_priv->rps.max_delay = val;
  243. mutex_unlock(&dev_priv->rps.hw_lock);
  244. return count;
  245. }
  246. static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
  247. {
  248. struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
  249. struct drm_device *dev = minor->dev;
  250. struct drm_i915_private *dev_priv = dev->dev_private;
  251. int ret;
  252. mutex_lock(&dev_priv->rps.hw_lock);
  253. if (IS_VALLEYVIEW(dev_priv->dev))
  254. ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.min_delay);
  255. else
  256. ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
  257. mutex_unlock(&dev_priv->rps.hw_lock);
  258. return snprintf(buf, PAGE_SIZE, "%d\n", ret);
  259. }
  260. static ssize_t gt_min_freq_mhz_store(struct device *kdev,
  261. struct device_attribute *attr,
  262. const char *buf, size_t count)
  263. {
  264. struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
  265. struct drm_device *dev = minor->dev;
  266. struct drm_i915_private *dev_priv = dev->dev_private;
  267. u32 val, rp_state_cap, hw_max, hw_min;
  268. ssize_t ret;
  269. ret = kstrtou32(buf, 0, &val);
  270. if (ret)
  271. return ret;
  272. mutex_lock(&dev_priv->rps.hw_lock);
  273. if (IS_VALLEYVIEW(dev)) {
  274. val = vlv_freq_opcode(dev_priv->mem_freq, val);
  275. hw_max = valleyview_rps_max_freq(dev_priv);
  276. hw_min = valleyview_rps_min_freq(dev_priv);
  277. } else {
  278. val /= GT_FREQUENCY_MULTIPLIER;
  279. rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
  280. hw_max = dev_priv->rps.hw_max;
  281. hw_min = ((rp_state_cap & 0xff0000) >> 16);
  282. }
  283. if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) {
  284. mutex_unlock(&dev_priv->rps.hw_lock);
  285. return -EINVAL;
  286. }
  287. if (dev_priv->rps.cur_delay < val) {
  288. if (IS_VALLEYVIEW(dev))
  289. valleyview_set_rps(dev, val);
  290. else
  291. gen6_set_rps(dev_priv->dev, val);
  292. }
  293. dev_priv->rps.min_delay = val;
  294. mutex_unlock(&dev_priv->rps.hw_lock);
  295. return count;
  296. }
  297. static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL);
  298. static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
  299. static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
  300. static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf);
  301. static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
  302. static DEVICE_ATTR(gt_RP1_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
  303. static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
  304. /* For now we have a static number of RP states */
  305. static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
  306. {
  307. struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
  308. struct drm_device *dev = minor->dev;
  309. struct drm_i915_private *dev_priv = dev->dev_private;
  310. u32 val, rp_state_cap;
  311. ssize_t ret;
  312. ret = mutex_lock_interruptible(&dev->struct_mutex);
  313. if (ret)
  314. return ret;
  315. rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
  316. mutex_unlock(&dev->struct_mutex);
  317. if (attr == &dev_attr_gt_RP0_freq_mhz) {
  318. val = ((rp_state_cap & 0x0000ff) >> 0) * GT_FREQUENCY_MULTIPLIER;
  319. } else if (attr == &dev_attr_gt_RP1_freq_mhz) {
  320. val = ((rp_state_cap & 0x00ff00) >> 8) * GT_FREQUENCY_MULTIPLIER;
  321. } else if (attr == &dev_attr_gt_RPn_freq_mhz) {
  322. val = ((rp_state_cap & 0xff0000) >> 16) * GT_FREQUENCY_MULTIPLIER;
  323. } else {
  324. BUG();
  325. }
  326. return snprintf(buf, PAGE_SIZE, "%d\n", val);
  327. }
  328. static const struct attribute *gen6_attrs[] = {
  329. &dev_attr_gt_cur_freq_mhz.attr,
  330. &dev_attr_gt_max_freq_mhz.attr,
  331. &dev_attr_gt_min_freq_mhz.attr,
  332. &dev_attr_gt_RP0_freq_mhz.attr,
  333. &dev_attr_gt_RP1_freq_mhz.attr,
  334. &dev_attr_gt_RPn_freq_mhz.attr,
  335. NULL,
  336. };
  337. static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
  338. struct bin_attribute *attr, char *buf,
  339. loff_t off, size_t count)
  340. {
  341. struct device *kdev = container_of(kobj, struct device, kobj);
  342. struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
  343. struct drm_device *dev = minor->dev;
  344. struct i915_error_state_file_priv error_priv;
  345. struct drm_i915_error_state_buf error_str;
  346. ssize_t ret_count = 0;
  347. int ret;
  348. memset(&error_priv, 0, sizeof(error_priv));
  349. ret = i915_error_state_buf_init(&error_str, count, off);
  350. if (ret)
  351. return ret;
  352. error_priv.dev = dev;
  353. i915_error_state_get(dev, &error_priv);
  354. ret = i915_error_state_to_str(&error_str, &error_priv);
  355. if (ret)
  356. goto out;
  357. ret_count = count < error_str.bytes ? count : error_str.bytes;
  358. memcpy(buf, error_str.buf, ret_count);
  359. out:
  360. i915_error_state_put(&error_priv);
  361. i915_error_state_buf_release(&error_str);
  362. return ret ?: ret_count;
  363. }
  364. static ssize_t error_state_write(struct file *file, struct kobject *kobj,
  365. struct bin_attribute *attr, char *buf,
  366. loff_t off, size_t count)
  367. {
  368. struct device *kdev = container_of(kobj, struct device, kobj);
  369. struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
  370. struct drm_device *dev = minor->dev;
  371. int ret;
  372. DRM_DEBUG_DRIVER("Resetting error state\n");
  373. ret = mutex_lock_interruptible(&dev->struct_mutex);
  374. if (ret)
  375. return ret;
  376. i915_destroy_error_state(dev);
  377. mutex_unlock(&dev->struct_mutex);
  378. return count;
  379. }
  380. static struct bin_attribute error_state_attr = {
  381. .attr.name = "error",
  382. .attr.mode = S_IRUSR | S_IWUSR,
  383. .size = 0,
  384. .read = error_state_read,
  385. .write = error_state_write,
  386. };
  387. void i915_setup_sysfs(struct drm_device *dev)
  388. {
  389. int ret;
  390. #ifdef CONFIG_PM
  391. if (INTEL_INFO(dev)->gen >= 6) {
  392. ret = sysfs_merge_group(&dev->primary->kdev.kobj,
  393. &rc6_attr_group);
  394. if (ret)
  395. DRM_ERROR("RC6 residency sysfs setup failed\n");
  396. }
  397. #endif
  398. if (HAS_L3_GPU_CACHE(dev)) {
  399. ret = device_create_bin_file(&dev->primary->kdev, &dpf_attrs);
  400. if (ret)
  401. DRM_ERROR("l3 parity sysfs setup failed\n");
  402. }
  403. if (INTEL_INFO(dev)->gen >= 6) {
  404. ret = sysfs_create_files(&dev->primary->kdev.kobj, gen6_attrs);
  405. if (ret)
  406. DRM_ERROR("gen6 sysfs setup failed\n");
  407. }
  408. ret = sysfs_create_bin_file(&dev->primary->kdev.kobj,
  409. &error_state_attr);
  410. if (ret)
  411. DRM_ERROR("error_state sysfs setup failed\n");
  412. }
  413. void i915_teardown_sysfs(struct drm_device *dev)
  414. {
  415. sysfs_remove_bin_file(&dev->primary->kdev.kobj, &error_state_attr);
  416. sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs);
  417. device_remove_bin_file(&dev->primary->kdev, &dpf_attrs);
  418. #ifdef CONFIG_PM
  419. sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group);
  420. #endif
  421. }