radeon_pm.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475
  1. /*
  2. * Permission is hereby granted, free of charge, to any person obtaining a
  3. * copy of this software and associated documentation files (the "Software"),
  4. * to deal in the Software without restriction, including without limitation
  5. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  6. * and/or sell copies of the Software, and to permit persons to whom the
  7. * Software is furnished to do so, subject to the following conditions:
  8. *
  9. * The above copyright notice and this permission notice shall be included in
  10. * all copies or substantial portions of the Software.
  11. *
  12. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  13. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  14. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  15. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  16. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  17. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  18. * OTHER DEALINGS IN THE SOFTWARE.
  19. *
  20. * Authors: Rafał Miłecki <zajec5@gmail.com>
  21. * Alex Deucher <alexdeucher@gmail.com>
  22. */
  23. #include "drmP.h"
  24. #include "radeon.h"
  25. #define RADEON_IDLE_LOOP_MS 100
  26. #define RADEON_RECLOCK_DELAY_MS 200
  27. static void radeon_pm_check_limits(struct radeon_device *rdev);
  28. static void radeon_pm_set_clocks_locked(struct radeon_device *rdev);
  29. static void radeon_pm_set_clocks(struct radeon_device *rdev);
  30. static void radeon_pm_reclock_work_handler(struct work_struct *work);
  31. static void radeon_pm_idle_work_handler(struct work_struct *work);
  32. static int radeon_debugfs_pm_init(struct radeon_device *rdev);
  33. static const char *pm_state_names[4] = {
  34. "PM_STATE_DISABLED",
  35. "PM_STATE_MINIMUM",
  36. "PM_STATE_PAUSED",
  37. "PM_STATE_ACTIVE"
  38. };
  39. static const char *pm_state_types[5] = {
  40. "Default",
  41. "Powersave",
  42. "Battery",
  43. "Balanced",
  44. "Performance",
  45. };
  46. static void radeon_print_power_mode_info(struct radeon_device *rdev)
  47. {
  48. int i, j;
  49. bool is_default;
  50. DRM_INFO("%d Power State(s)\n", rdev->pm.num_power_states);
  51. for (i = 0; i < rdev->pm.num_power_states; i++) {
  52. if (rdev->pm.default_power_state == &rdev->pm.power_state[i])
  53. is_default = true;
  54. else
  55. is_default = false;
  56. DRM_INFO("State %d %s %s\n", i,
  57. pm_state_types[rdev->pm.power_state[i].type],
  58. is_default ? "(default)" : "");
  59. if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP))
  60. DRM_INFO("\t%d PCIE Lanes\n", rdev->pm.power_state[i].non_clock_info.pcie_lanes);
  61. DRM_INFO("\t%d Clock Mode(s)\n", rdev->pm.power_state[i].num_clock_modes);
  62. for (j = 0; j < rdev->pm.power_state[i].num_clock_modes; j++) {
  63. if (rdev->flags & RADEON_IS_IGP)
  64. DRM_INFO("\t\t%d engine: %d\n",
  65. j,
  66. rdev->pm.power_state[i].clock_info[j].sclk * 10);
  67. else
  68. DRM_INFO("\t\t%d engine/memory: %d/%d\n",
  69. j,
  70. rdev->pm.power_state[i].clock_info[j].sclk * 10,
  71. rdev->pm.power_state[i].clock_info[j].mclk * 10);
  72. }
  73. }
  74. }
  75. static struct radeon_power_state * radeon_pick_power_state(struct radeon_device *rdev,
  76. enum radeon_pm_state_type type)
  77. {
  78. int i;
  79. struct radeon_power_state *power_state = NULL;
  80. switch (type) {
  81. case POWER_STATE_TYPE_DEFAULT:
  82. default:
  83. return rdev->pm.default_power_state;
  84. case POWER_STATE_TYPE_POWERSAVE:
  85. for (i = 0; i < rdev->pm.num_power_states; i++) {
  86. if (rdev->pm.power_state[i].type == POWER_STATE_TYPE_POWERSAVE) {
  87. power_state = &rdev->pm.power_state[i];
  88. break;
  89. }
  90. }
  91. if (power_state == NULL) {
  92. for (i = 0; i < rdev->pm.num_power_states; i++) {
  93. if (rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY) {
  94. power_state = &rdev->pm.power_state[i];
  95. break;
  96. }
  97. }
  98. }
  99. break;
  100. case POWER_STATE_TYPE_BATTERY:
  101. for (i = 0; i < rdev->pm.num_power_states; i++) {
  102. if (rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY) {
  103. power_state = &rdev->pm.power_state[i];
  104. break;
  105. }
  106. }
  107. if (power_state == NULL) {
  108. for (i = 0; i < rdev->pm.num_power_states; i++) {
  109. if (rdev->pm.power_state[i].type == POWER_STATE_TYPE_POWERSAVE) {
  110. power_state = &rdev->pm.power_state[i];
  111. break;
  112. }
  113. }
  114. }
  115. break;
  116. case POWER_STATE_TYPE_BALANCED:
  117. case POWER_STATE_TYPE_PERFORMANCE:
  118. for (i = 0; i < rdev->pm.num_power_states; i++) {
  119. if (rdev->pm.power_state[i].type == type) {
  120. power_state = &rdev->pm.power_state[i];
  121. break;
  122. }
  123. }
  124. break;
  125. }
  126. if (power_state == NULL)
  127. return rdev->pm.default_power_state;
  128. return power_state;
  129. }
  130. static struct radeon_pm_clock_info * radeon_pick_clock_mode(struct radeon_device *rdev,
  131. struct radeon_power_state *power_state,
  132. enum radeon_pm_clock_mode_type type)
  133. {
  134. switch (type) {
  135. case POWER_MODE_TYPE_DEFAULT:
  136. default:
  137. return power_state->default_clock_mode;
  138. case POWER_MODE_TYPE_LOW:
  139. return &power_state->clock_info[0];
  140. case POWER_MODE_TYPE_MID:
  141. if (power_state->num_clock_modes > 2)
  142. return &power_state->clock_info[1];
  143. else
  144. return &power_state->clock_info[0];
  145. break;
  146. case POWER_MODE_TYPE_HIGH:
  147. return &power_state->clock_info[power_state->num_clock_modes - 1];
  148. }
  149. }
  150. static void radeon_get_power_state(struct radeon_device *rdev,
  151. enum radeon_pm_action action)
  152. {
  153. switch (action) {
  154. case PM_ACTION_NONE:
  155. default:
  156. rdev->pm.requested_power_state = rdev->pm.current_power_state;
  157. rdev->pm.requested_power_state->requested_clock_mode =
  158. rdev->pm.requested_power_state->current_clock_mode;
  159. break;
  160. case PM_ACTION_MINIMUM:
  161. rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_BATTERY);
  162. rdev->pm.requested_power_state->requested_clock_mode =
  163. radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_LOW);
  164. break;
  165. case PM_ACTION_DOWNCLOCK:
  166. rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_POWERSAVE);
  167. rdev->pm.requested_power_state->requested_clock_mode =
  168. radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_MID);
  169. break;
  170. case PM_ACTION_UPCLOCK:
  171. rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_DEFAULT);
  172. rdev->pm.requested_power_state->requested_clock_mode =
  173. radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_HIGH);
  174. break;
  175. }
  176. }
  177. static void radeon_set_power_state(struct radeon_device *rdev)
  178. {
  179. if (rdev->pm.requested_power_state == rdev->pm.current_power_state)
  180. return;
  181. /* set pcie lanes */
  182. /* set voltage */
  183. /* set engine clock */
  184. radeon_set_engine_clock(rdev, rdev->pm.requested_power_state->requested_clock_mode->sclk);
  185. /* set memory clock */
  186. rdev->pm.current_power_state = rdev->pm.requested_power_state;
  187. }
  188. int radeon_pm_init(struct radeon_device *rdev)
  189. {
  190. rdev->pm.state = PM_STATE_DISABLED;
  191. rdev->pm.planned_action = PM_ACTION_NONE;
  192. rdev->pm.downclocked = false;
  193. rdev->pm.vblank_callback = false;
  194. if (rdev->bios) {
  195. if (rdev->is_atom_bios)
  196. radeon_atombios_get_power_modes(rdev);
  197. else
  198. radeon_combios_get_power_modes(rdev);
  199. radeon_print_power_mode_info(rdev);
  200. }
  201. radeon_pm_check_limits(rdev);
  202. if (radeon_debugfs_pm_init(rdev)) {
  203. DRM_ERROR("Failed to register debugfs file for PM!\n");
  204. }
  205. INIT_WORK(&rdev->pm.reclock_work, radeon_pm_reclock_work_handler);
  206. INIT_DELAYED_WORK(&rdev->pm.idle_work, radeon_pm_idle_work_handler);
  207. if (radeon_dynpm != -1 && radeon_dynpm) {
  208. rdev->pm.state = PM_STATE_PAUSED;
  209. DRM_INFO("radeon: dynamic power management enabled\n");
  210. }
  211. DRM_INFO("radeon: power management initialized\n");
  212. return 0;
  213. }
  214. static void radeon_pm_check_limits(struct radeon_device *rdev)
  215. {
  216. rdev->pm.min_gpu_engine_clock = rdev->clock.default_sclk - 5000;
  217. rdev->pm.min_gpu_memory_clock = rdev->clock.default_mclk - 5000;
  218. }
  219. void radeon_pm_compute_clocks(struct radeon_device *rdev)
  220. {
  221. struct drm_device *ddev = rdev->ddev;
  222. struct drm_connector *connector;
  223. struct radeon_crtc *radeon_crtc;
  224. int count = 0;
  225. if (rdev->pm.state == PM_STATE_DISABLED)
  226. return;
  227. mutex_lock(&rdev->pm.mutex);
  228. rdev->pm.active_crtcs = 0;
  229. list_for_each_entry(connector,
  230. &ddev->mode_config.connector_list, head) {
  231. if (connector->encoder &&
  232. connector->dpms != DRM_MODE_DPMS_OFF) {
  233. radeon_crtc = to_radeon_crtc(connector->encoder->crtc);
  234. rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
  235. ++count;
  236. }
  237. }
  238. if (count > 1) {
  239. if (rdev->pm.state == PM_STATE_ACTIVE) {
  240. wait_queue_head_t wait;
  241. init_waitqueue_head(&wait);
  242. cancel_delayed_work(&rdev->pm.idle_work);
  243. rdev->pm.state = PM_STATE_PAUSED;
  244. rdev->pm.planned_action = PM_ACTION_UPCLOCK;
  245. rdev->pm.vblank_callback = true;
  246. mutex_unlock(&rdev->pm.mutex);
  247. wait_event_timeout(wait, !rdev->pm.downclocked,
  248. msecs_to_jiffies(300));
  249. if (!rdev->pm.downclocked)
  250. radeon_pm_set_clocks(rdev);
  251. DRM_DEBUG("radeon: dynamic power management deactivated\n");
  252. } else {
  253. mutex_unlock(&rdev->pm.mutex);
  254. }
  255. } else if (count == 1) {
  256. rdev->pm.min_mode_engine_clock = rdev->pm.min_gpu_engine_clock;
  257. rdev->pm.min_mode_memory_clock = rdev->pm.min_gpu_memory_clock;
  258. /* TODO: Increase clocks if needed for current mode */
  259. if (rdev->pm.state == PM_STATE_MINIMUM) {
  260. rdev->pm.state = PM_STATE_ACTIVE;
  261. rdev->pm.planned_action = PM_ACTION_UPCLOCK;
  262. radeon_pm_set_clocks_locked(rdev);
  263. queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
  264. msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
  265. }
  266. else if (rdev->pm.state == PM_STATE_PAUSED) {
  267. rdev->pm.state = PM_STATE_ACTIVE;
  268. queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
  269. msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
  270. DRM_DEBUG("radeon: dynamic power management activated\n");
  271. }
  272. mutex_unlock(&rdev->pm.mutex);
  273. }
  274. else { /* count == 0 */
  275. if (rdev->pm.state != PM_STATE_MINIMUM) {
  276. cancel_delayed_work(&rdev->pm.idle_work);
  277. rdev->pm.state = PM_STATE_MINIMUM;
  278. rdev->pm.planned_action = PM_ACTION_MINIMUM;
  279. radeon_pm_set_clocks_locked(rdev);
  280. }
  281. mutex_unlock(&rdev->pm.mutex);
  282. }
  283. }
  284. static void radeon_pm_set_clocks_locked(struct radeon_device *rdev)
  285. {
  286. /*radeon_fence_wait_last(rdev);*/
  287. switch (rdev->pm.planned_action) {
  288. case PM_ACTION_UPCLOCK:
  289. radeon_set_engine_clock(rdev, rdev->clock.default_sclk);
  290. rdev->pm.downclocked = false;
  291. break;
  292. case PM_ACTION_DOWNCLOCK:
  293. radeon_set_engine_clock(rdev,
  294. rdev->pm.min_mode_engine_clock);
  295. rdev->pm.downclocked = true;
  296. break;
  297. case PM_ACTION_MINIMUM:
  298. radeon_set_engine_clock(rdev,
  299. rdev->pm.min_gpu_engine_clock);
  300. break;
  301. case PM_ACTION_NONE:
  302. DRM_ERROR("%s: PM_ACTION_NONE\n", __func__);
  303. break;
  304. }
  305. rdev->pm.planned_action = PM_ACTION_NONE;
  306. }
  307. static void radeon_pm_set_clocks(struct radeon_device *rdev)
  308. {
  309. mutex_lock(&rdev->pm.mutex);
  310. /* new VBLANK irq may come before handling previous one */
  311. if (rdev->pm.vblank_callback) {
  312. mutex_lock(&rdev->cp.mutex);
  313. if (rdev->pm.req_vblank & (1 << 0)) {
  314. rdev->pm.req_vblank &= ~(1 << 0);
  315. drm_vblank_put(rdev->ddev, 0);
  316. }
  317. if (rdev->pm.req_vblank & (1 << 1)) {
  318. rdev->pm.req_vblank &= ~(1 << 1);
  319. drm_vblank_put(rdev->ddev, 1);
  320. }
  321. rdev->pm.vblank_callback = false;
  322. radeon_pm_set_clocks_locked(rdev);
  323. mutex_unlock(&rdev->cp.mutex);
  324. }
  325. mutex_unlock(&rdev->pm.mutex);
  326. }
  327. static void radeon_pm_reclock_work_handler(struct work_struct *work)
  328. {
  329. struct radeon_device *rdev;
  330. rdev = container_of(work, struct radeon_device,
  331. pm.reclock_work);
  332. radeon_pm_set_clocks(rdev);
  333. }
  334. static void radeon_pm_idle_work_handler(struct work_struct *work)
  335. {
  336. struct radeon_device *rdev;
  337. rdev = container_of(work, struct radeon_device,
  338. pm.idle_work.work);
  339. mutex_lock(&rdev->pm.mutex);
  340. if (rdev->pm.state == PM_STATE_ACTIVE &&
  341. !rdev->pm.vblank_callback) {
  342. unsigned long irq_flags;
  343. int not_processed = 0;
  344. read_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
  345. if (!list_empty(&rdev->fence_drv.emited)) {
  346. struct list_head *ptr;
  347. list_for_each(ptr, &rdev->fence_drv.emited) {
  348. /* count up to 3, that's enought info */
  349. if (++not_processed >= 3)
  350. break;
  351. }
  352. }
  353. read_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
  354. if (not_processed >= 3) { /* should upclock */
  355. if (rdev->pm.planned_action == PM_ACTION_DOWNCLOCK) {
  356. rdev->pm.planned_action = PM_ACTION_NONE;
  357. } else if (rdev->pm.planned_action == PM_ACTION_NONE &&
  358. rdev->pm.downclocked) {
  359. rdev->pm.planned_action =
  360. PM_ACTION_UPCLOCK;
  361. rdev->pm.action_timeout = jiffies +
  362. msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
  363. }
  364. } else if (not_processed == 0) { /* should downclock */
  365. if (rdev->pm.planned_action == PM_ACTION_UPCLOCK) {
  366. rdev->pm.planned_action = PM_ACTION_NONE;
  367. } else if (rdev->pm.planned_action == PM_ACTION_NONE &&
  368. !rdev->pm.downclocked) {
  369. rdev->pm.planned_action =
  370. PM_ACTION_DOWNCLOCK;
  371. rdev->pm.action_timeout = jiffies +
  372. msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
  373. }
  374. }
  375. if (rdev->pm.planned_action != PM_ACTION_NONE &&
  376. jiffies > rdev->pm.action_timeout) {
  377. if (rdev->pm.active_crtcs & (1 << 0)) {
  378. rdev->pm.req_vblank |= (1 << 0);
  379. drm_vblank_get(rdev->ddev, 0);
  380. }
  381. if (rdev->pm.active_crtcs & (1 << 1)) {
  382. rdev->pm.req_vblank |= (1 << 1);
  383. drm_vblank_get(rdev->ddev, 1);
  384. }
  385. rdev->pm.vblank_callback = true;
  386. }
  387. }
  388. mutex_unlock(&rdev->pm.mutex);
  389. queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
  390. msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
  391. }
  392. /*
  393. * Debugfs info
  394. */
  395. #if defined(CONFIG_DEBUG_FS)
  396. static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
  397. {
  398. struct drm_info_node *node = (struct drm_info_node *) m->private;
  399. struct drm_device *dev = node->minor->dev;
  400. struct radeon_device *rdev = dev->dev_private;
  401. seq_printf(m, "state: %s\n", pm_state_names[rdev->pm.state]);
  402. seq_printf(m, "default engine clock: %u0 kHz\n", rdev->clock.default_sclk);
  403. seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
  404. seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk);
  405. if (rdev->asic->get_memory_clock)
  406. seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
  407. return 0;
  408. }
  409. static struct drm_info_list radeon_pm_info_list[] = {
  410. {"radeon_pm_info", radeon_debugfs_pm_info, 0, NULL},
  411. };
  412. #endif
  413. static int radeon_debugfs_pm_init(struct radeon_device *rdev)
  414. {
  415. #if defined(CONFIG_DEBUG_FS)
  416. return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list));
  417. #else
  418. return 0;
  419. #endif
  420. }