radeon_pm.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472
  1. /*
  2. * Permission is hereby granted, free of charge, to any person obtaining a
  3. * copy of this software and associated documentation files (the "Software"),
  4. * to deal in the Software without restriction, including without limitation
  5. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  6. * and/or sell copies of the Software, and to permit persons to whom the
  7. * Software is furnished to do so, subject to the following conditions:
  8. *
  9. * The above copyright notice and this permission notice shall be included in
  10. * all copies or substantial portions of the Software.
  11. *
  12. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  13. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  14. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  15. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  16. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  17. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  18. * OTHER DEALINGS IN THE SOFTWARE.
  19. *
  20. * Authors: Rafał Miłecki <zajec5@gmail.com>
  21. * Alex Deucher <alexdeucher@gmail.com>
  22. */
  23. #include "drmP.h"
  24. #include "radeon.h"
  25. #define RADEON_IDLE_LOOP_MS 100
  26. #define RADEON_RECLOCK_DELAY_MS 200
  27. static void radeon_pm_set_clocks_locked(struct radeon_device *rdev);
  28. static void radeon_pm_set_clocks(struct radeon_device *rdev);
  29. static void radeon_pm_reclock_work_handler(struct work_struct *work);
  30. static void radeon_pm_idle_work_handler(struct work_struct *work);
  31. static int radeon_debugfs_pm_init(struct radeon_device *rdev);
  32. static const char *pm_state_names[4] = {
  33. "PM_STATE_DISABLED",
  34. "PM_STATE_MINIMUM",
  35. "PM_STATE_PAUSED",
  36. "PM_STATE_ACTIVE"
  37. };
  38. static const char *pm_state_types[5] = {
  39. "Default",
  40. "Powersave",
  41. "Battery",
  42. "Balanced",
  43. "Performance",
  44. };
  45. static void radeon_print_power_mode_info(struct radeon_device *rdev)
  46. {
  47. int i, j;
  48. bool is_default;
  49. DRM_INFO("%d Power State(s)\n", rdev->pm.num_power_states);
  50. for (i = 0; i < rdev->pm.num_power_states; i++) {
  51. if (rdev->pm.default_power_state == &rdev->pm.power_state[i])
  52. is_default = true;
  53. else
  54. is_default = false;
  55. DRM_INFO("State %d %s %s\n", i,
  56. pm_state_types[rdev->pm.power_state[i].type],
  57. is_default ? "(default)" : "");
  58. if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP))
  59. DRM_INFO("\t%d PCIE Lanes\n", rdev->pm.power_state[i].non_clock_info.pcie_lanes);
  60. DRM_INFO("\t%d Clock Mode(s)\n", rdev->pm.power_state[i].num_clock_modes);
  61. for (j = 0; j < rdev->pm.power_state[i].num_clock_modes; j++) {
  62. if (rdev->flags & RADEON_IS_IGP)
  63. DRM_INFO("\t\t%d engine: %d\n",
  64. j,
  65. rdev->pm.power_state[i].clock_info[j].sclk * 10);
  66. else
  67. DRM_INFO("\t\t%d engine/memory: %d/%d\n",
  68. j,
  69. rdev->pm.power_state[i].clock_info[j].sclk * 10,
  70. rdev->pm.power_state[i].clock_info[j].mclk * 10);
  71. }
  72. }
  73. }
  74. static struct radeon_power_state * radeon_pick_power_state(struct radeon_device *rdev,
  75. enum radeon_pm_state_type type)
  76. {
  77. int i;
  78. struct radeon_power_state *power_state = NULL;
  79. switch (type) {
  80. case POWER_STATE_TYPE_DEFAULT:
  81. default:
  82. return rdev->pm.default_power_state;
  83. case POWER_STATE_TYPE_POWERSAVE:
  84. for (i = 0; i < rdev->pm.num_power_states; i++) {
  85. if (rdev->pm.power_state[i].type == POWER_STATE_TYPE_POWERSAVE) {
  86. power_state = &rdev->pm.power_state[i];
  87. break;
  88. }
  89. }
  90. if (power_state == NULL) {
  91. for (i = 0; i < rdev->pm.num_power_states; i++) {
  92. if (rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY) {
  93. power_state = &rdev->pm.power_state[i];
  94. break;
  95. }
  96. }
  97. }
  98. break;
  99. case POWER_STATE_TYPE_BATTERY:
  100. for (i = 0; i < rdev->pm.num_power_states; i++) {
  101. if (rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY) {
  102. power_state = &rdev->pm.power_state[i];
  103. break;
  104. }
  105. }
  106. if (power_state == NULL) {
  107. for (i = 0; i < rdev->pm.num_power_states; i++) {
  108. if (rdev->pm.power_state[i].type == POWER_STATE_TYPE_POWERSAVE) {
  109. power_state = &rdev->pm.power_state[i];
  110. break;
  111. }
  112. }
  113. }
  114. break;
  115. case POWER_STATE_TYPE_BALANCED:
  116. case POWER_STATE_TYPE_PERFORMANCE:
  117. for (i = 0; i < rdev->pm.num_power_states; i++) {
  118. if (rdev->pm.power_state[i].type == type) {
  119. power_state = &rdev->pm.power_state[i];
  120. break;
  121. }
  122. }
  123. break;
  124. }
  125. if (power_state == NULL)
  126. return rdev->pm.default_power_state;
  127. return power_state;
  128. }
  129. static struct radeon_pm_clock_info * radeon_pick_clock_mode(struct radeon_device *rdev,
  130. struct radeon_power_state *power_state,
  131. enum radeon_pm_clock_mode_type type)
  132. {
  133. switch (type) {
  134. case POWER_MODE_TYPE_DEFAULT:
  135. default:
  136. return power_state->default_clock_mode;
  137. case POWER_MODE_TYPE_LOW:
  138. return &power_state->clock_info[0];
  139. case POWER_MODE_TYPE_MID:
  140. if (power_state->num_clock_modes > 2)
  141. return &power_state->clock_info[1];
  142. else
  143. return &power_state->clock_info[0];
  144. break;
  145. case POWER_MODE_TYPE_HIGH:
  146. return &power_state->clock_info[power_state->num_clock_modes - 1];
  147. }
  148. }
  149. static void radeon_get_power_state(struct radeon_device *rdev,
  150. enum radeon_pm_action action)
  151. {
  152. switch (action) {
  153. case PM_ACTION_NONE:
  154. default:
  155. rdev->pm.requested_power_state = rdev->pm.current_power_state;
  156. rdev->pm.requested_power_state->requested_clock_mode =
  157. rdev->pm.requested_power_state->current_clock_mode;
  158. break;
  159. case PM_ACTION_MINIMUM:
  160. rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_BATTERY);
  161. rdev->pm.requested_power_state->requested_clock_mode =
  162. radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_LOW);
  163. break;
  164. case PM_ACTION_DOWNCLOCK:
  165. rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_POWERSAVE);
  166. rdev->pm.requested_power_state->requested_clock_mode =
  167. radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_MID);
  168. break;
  169. case PM_ACTION_UPCLOCK:
  170. rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_DEFAULT);
  171. rdev->pm.requested_power_state->requested_clock_mode =
  172. radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_HIGH);
  173. break;
  174. }
  175. DRM_INFO("Requested: e: %d m: %d p: %d\n",
  176. rdev->pm.requested_power_state->requested_clock_mode->sclk,
  177. rdev->pm.requested_power_state->requested_clock_mode->mclk,
  178. rdev->pm.requested_power_state->non_clock_info.pcie_lanes);
  179. }
  180. static void radeon_set_power_state(struct radeon_device *rdev)
  181. {
  182. if (rdev->pm.requested_power_state == rdev->pm.current_power_state)
  183. return;
  184. DRM_INFO("Setting: e: %d m: %d p: %d\n",
  185. rdev->pm.requested_power_state->requested_clock_mode->sclk,
  186. rdev->pm.requested_power_state->requested_clock_mode->mclk,
  187. rdev->pm.requested_power_state->non_clock_info.pcie_lanes);
  188. /* set pcie lanes */
  189. /* set voltage */
  190. /* set engine clock */
  191. radeon_set_engine_clock(rdev, rdev->pm.requested_power_state->requested_clock_mode->sclk);
  192. /* set memory clock */
  193. rdev->pm.current_power_state = rdev->pm.requested_power_state;
  194. }
  195. int radeon_pm_init(struct radeon_device *rdev)
  196. {
  197. rdev->pm.state = PM_STATE_DISABLED;
  198. rdev->pm.planned_action = PM_ACTION_NONE;
  199. rdev->pm.downclocked = false;
  200. rdev->pm.vblank_callback = false;
  201. if (rdev->bios) {
  202. if (rdev->is_atom_bios)
  203. radeon_atombios_get_power_modes(rdev);
  204. else
  205. radeon_combios_get_power_modes(rdev);
  206. radeon_print_power_mode_info(rdev);
  207. }
  208. if (radeon_debugfs_pm_init(rdev)) {
  209. DRM_ERROR("Failed to register debugfs file for PM!\n");
  210. }
  211. INIT_WORK(&rdev->pm.reclock_work, radeon_pm_reclock_work_handler);
  212. INIT_DELAYED_WORK(&rdev->pm.idle_work, radeon_pm_idle_work_handler);
  213. if (radeon_dynpm != -1 && radeon_dynpm) {
  214. rdev->pm.state = PM_STATE_PAUSED;
  215. DRM_INFO("radeon: dynamic power management enabled\n");
  216. }
  217. DRM_INFO("radeon: power management initialized\n");
  218. return 0;
  219. }
  220. void radeon_pm_compute_clocks(struct radeon_device *rdev)
  221. {
  222. struct drm_device *ddev = rdev->ddev;
  223. struct drm_connector *connector;
  224. struct radeon_crtc *radeon_crtc;
  225. int count = 0;
  226. if (rdev->pm.state == PM_STATE_DISABLED)
  227. return;
  228. mutex_lock(&rdev->pm.mutex);
  229. rdev->pm.active_crtcs = 0;
  230. list_for_each_entry(connector,
  231. &ddev->mode_config.connector_list, head) {
  232. if (connector->encoder &&
  233. connector->dpms != DRM_MODE_DPMS_OFF) {
  234. radeon_crtc = to_radeon_crtc(connector->encoder->crtc);
  235. rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
  236. ++count;
  237. }
  238. }
  239. if (count > 1) {
  240. if (rdev->pm.state == PM_STATE_ACTIVE) {
  241. wait_queue_head_t wait;
  242. init_waitqueue_head(&wait);
  243. cancel_delayed_work(&rdev->pm.idle_work);
  244. rdev->pm.state = PM_STATE_PAUSED;
  245. rdev->pm.planned_action = PM_ACTION_UPCLOCK;
  246. rdev->pm.vblank_callback = true;
  247. mutex_unlock(&rdev->pm.mutex);
  248. wait_event_timeout(wait, !rdev->pm.downclocked,
  249. msecs_to_jiffies(300));
  250. if (!rdev->pm.downclocked)
  251. radeon_pm_set_clocks(rdev);
  252. DRM_DEBUG("radeon: dynamic power management deactivated\n");
  253. } else {
  254. mutex_unlock(&rdev->pm.mutex);
  255. }
  256. } else if (count == 1) {
  257. /* TODO: Increase clocks if needed for current mode */
  258. if (rdev->pm.state == PM_STATE_MINIMUM) {
  259. rdev->pm.state = PM_STATE_ACTIVE;
  260. rdev->pm.planned_action = PM_ACTION_UPCLOCK;
  261. radeon_pm_set_clocks_locked(rdev);
  262. queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
  263. msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
  264. }
  265. else if (rdev->pm.state == PM_STATE_PAUSED) {
  266. rdev->pm.state = PM_STATE_ACTIVE;
  267. queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
  268. msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
  269. DRM_DEBUG("radeon: dynamic power management activated\n");
  270. }
  271. mutex_unlock(&rdev->pm.mutex);
  272. }
  273. else { /* count == 0 */
  274. if (rdev->pm.state != PM_STATE_MINIMUM) {
  275. cancel_delayed_work(&rdev->pm.idle_work);
  276. rdev->pm.state = PM_STATE_MINIMUM;
  277. rdev->pm.planned_action = PM_ACTION_MINIMUM;
  278. radeon_pm_set_clocks_locked(rdev);
  279. }
  280. mutex_unlock(&rdev->pm.mutex);
  281. }
  282. }
  283. static void radeon_pm_set_clocks_locked(struct radeon_device *rdev)
  284. {
  285. /*radeon_fence_wait_last(rdev);*/
  286. switch (rdev->pm.planned_action) {
  287. case PM_ACTION_UPCLOCK:
  288. radeon_get_power_state(rdev, PM_ACTION_UPCLOCK);
  289. rdev->pm.downclocked = false;
  290. break;
  291. case PM_ACTION_DOWNCLOCK:
  292. radeon_get_power_state(rdev, PM_ACTION_DOWNCLOCK);
  293. rdev->pm.downclocked = true;
  294. break;
  295. case PM_ACTION_MINIMUM:
  296. radeon_get_power_state(rdev, PM_ACTION_MINIMUM);
  297. break;
  298. case PM_ACTION_NONE:
  299. radeon_get_power_state(rdev, PM_ACTION_NONE);
  300. DRM_ERROR("%s: PM_ACTION_NONE\n", __func__);
  301. break;
  302. }
  303. radeon_set_power_state(rdev);
  304. rdev->pm.planned_action = PM_ACTION_NONE;
  305. }
  306. static void radeon_pm_set_clocks(struct radeon_device *rdev)
  307. {
  308. mutex_lock(&rdev->pm.mutex);
  309. /* new VBLANK irq may come before handling previous one */
  310. if (rdev->pm.vblank_callback) {
  311. mutex_lock(&rdev->cp.mutex);
  312. if (rdev->pm.req_vblank & (1 << 0)) {
  313. rdev->pm.req_vblank &= ~(1 << 0);
  314. drm_vblank_put(rdev->ddev, 0);
  315. }
  316. if (rdev->pm.req_vblank & (1 << 1)) {
  317. rdev->pm.req_vblank &= ~(1 << 1);
  318. drm_vblank_put(rdev->ddev, 1);
  319. }
  320. rdev->pm.vblank_callback = false;
  321. radeon_pm_set_clocks_locked(rdev);
  322. mutex_unlock(&rdev->cp.mutex);
  323. }
  324. mutex_unlock(&rdev->pm.mutex);
  325. }
  326. static void radeon_pm_reclock_work_handler(struct work_struct *work)
  327. {
  328. struct radeon_device *rdev;
  329. rdev = container_of(work, struct radeon_device,
  330. pm.reclock_work);
  331. radeon_pm_set_clocks(rdev);
  332. }
  333. static void radeon_pm_idle_work_handler(struct work_struct *work)
  334. {
  335. struct radeon_device *rdev;
  336. rdev = container_of(work, struct radeon_device,
  337. pm.idle_work.work);
  338. mutex_lock(&rdev->pm.mutex);
  339. if (rdev->pm.state == PM_STATE_ACTIVE &&
  340. !rdev->pm.vblank_callback) {
  341. unsigned long irq_flags;
  342. int not_processed = 0;
  343. read_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
  344. if (!list_empty(&rdev->fence_drv.emited)) {
  345. struct list_head *ptr;
  346. list_for_each(ptr, &rdev->fence_drv.emited) {
  347. /* count up to 3, that's enought info */
  348. if (++not_processed >= 3)
  349. break;
  350. }
  351. }
  352. read_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
  353. if (not_processed >= 3) { /* should upclock */
  354. if (rdev->pm.planned_action == PM_ACTION_DOWNCLOCK) {
  355. rdev->pm.planned_action = PM_ACTION_NONE;
  356. } else if (rdev->pm.planned_action == PM_ACTION_NONE &&
  357. rdev->pm.downclocked) {
  358. rdev->pm.planned_action =
  359. PM_ACTION_UPCLOCK;
  360. rdev->pm.action_timeout = jiffies +
  361. msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
  362. }
  363. } else if (not_processed == 0) { /* should downclock */
  364. if (rdev->pm.planned_action == PM_ACTION_UPCLOCK) {
  365. rdev->pm.planned_action = PM_ACTION_NONE;
  366. } else if (rdev->pm.planned_action == PM_ACTION_NONE &&
  367. !rdev->pm.downclocked) {
  368. rdev->pm.planned_action =
  369. PM_ACTION_DOWNCLOCK;
  370. rdev->pm.action_timeout = jiffies +
  371. msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
  372. }
  373. }
  374. if (rdev->pm.planned_action != PM_ACTION_NONE &&
  375. jiffies > rdev->pm.action_timeout) {
  376. if (rdev->pm.active_crtcs & (1 << 0)) {
  377. rdev->pm.req_vblank |= (1 << 0);
  378. drm_vblank_get(rdev->ddev, 0);
  379. }
  380. if (rdev->pm.active_crtcs & (1 << 1)) {
  381. rdev->pm.req_vblank |= (1 << 1);
  382. drm_vblank_get(rdev->ddev, 1);
  383. }
  384. rdev->pm.vblank_callback = true;
  385. }
  386. }
  387. mutex_unlock(&rdev->pm.mutex);
  388. queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
  389. msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
  390. }
  391. /*
  392. * Debugfs info
  393. */
  394. #if defined(CONFIG_DEBUG_FS)
  395. static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
  396. {
  397. struct drm_info_node *node = (struct drm_info_node *) m->private;
  398. struct drm_device *dev = node->minor->dev;
  399. struct radeon_device *rdev = dev->dev_private;
  400. seq_printf(m, "state: %s\n", pm_state_names[rdev->pm.state]);
  401. seq_printf(m, "default engine clock: %u0 kHz\n", rdev->clock.default_sclk);
  402. seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
  403. seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk);
  404. if (rdev->asic->get_memory_clock)
  405. seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
  406. return 0;
  407. }
  408. static struct drm_info_list radeon_pm_info_list[] = {
  409. {"radeon_pm_info", radeon_debugfs_pm_info, 0, NULL},
  410. };
  411. #endif
  412. static int radeon_debugfs_pm_init(struct radeon_device *rdev)
  413. {
  414. #if defined(CONFIG_DEBUG_FS)
  415. return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list));
  416. #else
  417. return 0;
  418. #endif
  419. }