rs690.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743
  1. /*
  2. * Copyright 2008 Advanced Micro Devices, Inc.
  3. * Copyright 2008 Red Hat Inc.
  4. * Copyright 2009 Jerome Glisse.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the "Software"),
  8. * to deal in the Software without restriction, including without limitation
  9. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10. * and/or sell copies of the Software, and to permit persons to whom the
  11. * Software is furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22. * OTHER DEALINGS IN THE SOFTWARE.
  23. *
  24. * Authors: Dave Airlie
  25. * Alex Deucher
  26. * Jerome Glisse
  27. */
  28. #include "drmP.h"
  29. #include "radeon.h"
  30. #include "atom.h"
  31. #include "rs690d.h"
  32. static int rs690_mc_wait_for_idle(struct radeon_device *rdev)
  33. {
  34. unsigned i;
  35. uint32_t tmp;
  36. for (i = 0; i < rdev->usec_timeout; i++) {
  37. /* read MC_STATUS */
  38. tmp = RREG32_MC(R_000090_MC_SYSTEM_STATUS);
  39. if (G_000090_MC_SYSTEM_IDLE(tmp))
  40. return 0;
  41. udelay(1);
  42. }
  43. return -1;
  44. }
  45. static void rs690_gpu_init(struct radeon_device *rdev)
  46. {
  47. /* FIXME: HDP same place on rs690 ? */
  48. r100_hdp_reset(rdev);
  49. /* FIXME: is this correct ? */
  50. r420_pipes_init(rdev);
  51. if (rs690_mc_wait_for_idle(rdev)) {
  52. printk(KERN_WARNING "Failed to wait MC idle while "
  53. "programming pipes. Bad things might happen.\n");
  54. }
  55. }
  56. void rs690_pm_info(struct radeon_device *rdev)
  57. {
  58. int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
  59. struct _ATOM_INTEGRATED_SYSTEM_INFO *info;
  60. struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 *info_v2;
  61. void *ptr;
  62. uint16_t data_offset;
  63. uint8_t frev, crev;
  64. fixed20_12 tmp;
  65. atom_parse_data_header(rdev->mode_info.atom_context, index, NULL,
  66. &frev, &crev, &data_offset);
  67. ptr = rdev->mode_info.atom_context->bios + data_offset;
  68. info = (struct _ATOM_INTEGRATED_SYSTEM_INFO *)ptr;
  69. info_v2 = (struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 *)ptr;
  70. /* Get various system informations from bios */
  71. switch (crev) {
  72. case 1:
  73. tmp.full = rfixed_const(100);
  74. rdev->pm.igp_sideport_mclk.full = rfixed_const(info->ulBootUpMemoryClock);
  75. rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp);
  76. rdev->pm.igp_system_mclk.full = rfixed_const(le16_to_cpu(info->usK8MemoryClock));
  77. rdev->pm.igp_ht_link_clk.full = rfixed_const(le16_to_cpu(info->usFSBClock));
  78. rdev->pm.igp_ht_link_width.full = rfixed_const(info->ucHTLinkWidth);
  79. break;
  80. case 2:
  81. tmp.full = rfixed_const(100);
  82. rdev->pm.igp_sideport_mclk.full = rfixed_const(info_v2->ulBootUpSidePortClock);
  83. rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp);
  84. rdev->pm.igp_system_mclk.full = rfixed_const(info_v2->ulBootUpUMAClock);
  85. rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp);
  86. rdev->pm.igp_ht_link_clk.full = rfixed_const(info_v2->ulHTLinkFreq);
  87. rdev->pm.igp_ht_link_clk.full = rfixed_div(rdev->pm.igp_ht_link_clk, tmp);
  88. rdev->pm.igp_ht_link_width.full = rfixed_const(le16_to_cpu(info_v2->usMinHTLinkWidth));
  89. break;
  90. default:
  91. tmp.full = rfixed_const(100);
  92. /* We assume the slower possible clock ie worst case */
  93. /* DDR 333Mhz */
  94. rdev->pm.igp_sideport_mclk.full = rfixed_const(333);
  95. /* FIXME: system clock ? */
  96. rdev->pm.igp_system_mclk.full = rfixed_const(100);
  97. rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp);
  98. rdev->pm.igp_ht_link_clk.full = rfixed_const(200);
  99. rdev->pm.igp_ht_link_width.full = rfixed_const(8);
  100. DRM_ERROR("No integrated system info for your GPU, using safe default\n");
  101. break;
  102. }
  103. /* Compute various bandwidth */
  104. /* k8_bandwidth = (memory_clk / 2) * 2 * 8 * 0.5 = memory_clk * 4 */
  105. tmp.full = rfixed_const(4);
  106. rdev->pm.k8_bandwidth.full = rfixed_mul(rdev->pm.igp_system_mclk, tmp);
  107. /* ht_bandwidth = ht_clk * 2 * ht_width / 8 * 0.8
  108. * = ht_clk * ht_width / 5
  109. */
  110. tmp.full = rfixed_const(5);
  111. rdev->pm.ht_bandwidth.full = rfixed_mul(rdev->pm.igp_ht_link_clk,
  112. rdev->pm.igp_ht_link_width);
  113. rdev->pm.ht_bandwidth.full = rfixed_div(rdev->pm.ht_bandwidth, tmp);
  114. if (tmp.full < rdev->pm.max_bandwidth.full) {
  115. /* HT link is a limiting factor */
  116. rdev->pm.max_bandwidth.full = tmp.full;
  117. }
  118. /* sideport_bandwidth = (sideport_clk / 2) * 2 * 2 * 0.7
  119. * = (sideport_clk * 14) / 10
  120. */
  121. tmp.full = rfixed_const(14);
  122. rdev->pm.sideport_bandwidth.full = rfixed_mul(rdev->pm.igp_sideport_mclk, tmp);
  123. tmp.full = rfixed_const(10);
  124. rdev->pm.sideport_bandwidth.full = rfixed_div(rdev->pm.sideport_bandwidth, tmp);
  125. }
  126. void rs690_mc_init(struct radeon_device *rdev)
  127. {
  128. fixed20_12 a;
  129. u64 base;
  130. rs400_gart_adjust_size(rdev);
  131. rdev->mc.vram_is_ddr = true;
  132. rdev->mc.vram_width = 128;
  133. rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
  134. rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
  135. rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
  136. rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
  137. rdev->mc.visible_vram_size = rdev->mc.aper_size;
  138. base = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
  139. base = G_000100_MC_FB_START(base) << 16;
  140. rs690_pm_info(rdev);
  141. /* FIXME: we should enforce default clock in case GPU is not in
  142. * default setup
  143. */
  144. a.full = rfixed_const(100);
  145. rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
  146. rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
  147. a.full = rfixed_const(16);
  148. /* core_bandwidth = sclk(Mhz) * 16 */
  149. rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a);
  150. rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
  151. radeon_vram_location(rdev, &rdev->mc, base);
  152. radeon_gtt_location(rdev, &rdev->mc);
  153. }
  154. void rs690_line_buffer_adjust(struct radeon_device *rdev,
  155. struct drm_display_mode *mode1,
  156. struct drm_display_mode *mode2)
  157. {
  158. u32 tmp;
  159. /*
  160. * Line Buffer Setup
  161. * There is a single line buffer shared by both display controllers.
  162. * R_006520_DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
  163. * the display controllers. The paritioning can either be done
  164. * manually or via one of four preset allocations specified in bits 1:0:
  165. * 0 - line buffer is divided in half and shared between crtc
  166. * 1 - D1 gets 3/4 of the line buffer, D2 gets 1/4
  167. * 2 - D1 gets the whole buffer
  168. * 3 - D1 gets 1/4 of the line buffer, D2 gets 3/4
  169. * Setting bit 2 of R_006520_DC_LB_MEMORY_SPLIT controls switches to manual
  170. * allocation mode. In manual allocation mode, D1 always starts at 0,
  171. * D1 end/2 is specified in bits 14:4; D2 allocation follows D1.
  172. */
  173. tmp = RREG32(R_006520_DC_LB_MEMORY_SPLIT) & C_006520_DC_LB_MEMORY_SPLIT;
  174. tmp &= ~C_006520_DC_LB_MEMORY_SPLIT_MODE;
  175. /* auto */
  176. if (mode1 && mode2) {
  177. if (mode1->hdisplay > mode2->hdisplay) {
  178. if (mode1->hdisplay > 2560)
  179. tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q;
  180. else
  181. tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
  182. } else if (mode2->hdisplay > mode1->hdisplay) {
  183. if (mode2->hdisplay > 2560)
  184. tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q;
  185. else
  186. tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
  187. } else
  188. tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
  189. } else if (mode1) {
  190. tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_ONLY;
  191. } else if (mode2) {
  192. tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q;
  193. }
  194. WREG32(R_006520_DC_LB_MEMORY_SPLIT, tmp);
  195. }
  196. struct rs690_watermark {
  197. u32 lb_request_fifo_depth;
  198. fixed20_12 num_line_pair;
  199. fixed20_12 estimated_width;
  200. fixed20_12 worst_case_latency;
  201. fixed20_12 consumption_rate;
  202. fixed20_12 active_time;
  203. fixed20_12 dbpp;
  204. fixed20_12 priority_mark_max;
  205. fixed20_12 priority_mark;
  206. fixed20_12 sclk;
  207. };
  208. void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
  209. struct radeon_crtc *crtc,
  210. struct rs690_watermark *wm)
  211. {
  212. struct drm_display_mode *mode = &crtc->base.mode;
  213. fixed20_12 a, b, c;
  214. fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width;
  215. fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency;
  216. /* FIXME: detect IGP with sideport memory, i don't think there is any
  217. * such product available
  218. */
  219. bool sideport = false;
  220. if (!crtc->base.enabled) {
  221. /* FIXME: wouldn't it better to set priority mark to maximum */
  222. wm->lb_request_fifo_depth = 4;
  223. return;
  224. }
  225. if (crtc->vsc.full > rfixed_const(2))
  226. wm->num_line_pair.full = rfixed_const(2);
  227. else
  228. wm->num_line_pair.full = rfixed_const(1);
  229. b.full = rfixed_const(mode->crtc_hdisplay);
  230. c.full = rfixed_const(256);
  231. a.full = rfixed_div(b, c);
  232. request_fifo_depth.full = rfixed_mul(a, wm->num_line_pair);
  233. request_fifo_depth.full = rfixed_ceil(request_fifo_depth);
  234. if (a.full < rfixed_const(4)) {
  235. wm->lb_request_fifo_depth = 4;
  236. } else {
  237. wm->lb_request_fifo_depth = rfixed_trunc(request_fifo_depth);
  238. }
  239. /* Determine consumption rate
  240. * pclk = pixel clock period(ns) = 1000 / (mode.clock / 1000)
  241. * vtaps = number of vertical taps,
  242. * vsc = vertical scaling ratio, defined as source/destination
  243. * hsc = horizontal scaling ration, defined as source/destination
  244. */
  245. a.full = rfixed_const(mode->clock);
  246. b.full = rfixed_const(1000);
  247. a.full = rfixed_div(a, b);
  248. pclk.full = rfixed_div(b, a);
  249. if (crtc->rmx_type != RMX_OFF) {
  250. b.full = rfixed_const(2);
  251. if (crtc->vsc.full > b.full)
  252. b.full = crtc->vsc.full;
  253. b.full = rfixed_mul(b, crtc->hsc);
  254. c.full = rfixed_const(2);
  255. b.full = rfixed_div(b, c);
  256. consumption_time.full = rfixed_div(pclk, b);
  257. } else {
  258. consumption_time.full = pclk.full;
  259. }
  260. a.full = rfixed_const(1);
  261. wm->consumption_rate.full = rfixed_div(a, consumption_time);
  262. /* Determine line time
  263. * LineTime = total time for one line of displayhtotal
  264. * LineTime = total number of horizontal pixels
  265. * pclk = pixel clock period(ns)
  266. */
  267. a.full = rfixed_const(crtc->base.mode.crtc_htotal);
  268. line_time.full = rfixed_mul(a, pclk);
  269. /* Determine active time
  270. * ActiveTime = time of active region of display within one line,
  271. * hactive = total number of horizontal active pixels
  272. * htotal = total number of horizontal pixels
  273. */
  274. a.full = rfixed_const(crtc->base.mode.crtc_htotal);
  275. b.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
  276. wm->active_time.full = rfixed_mul(line_time, b);
  277. wm->active_time.full = rfixed_div(wm->active_time, a);
  278. /* Maximun bandwidth is the minimun bandwidth of all component */
  279. rdev->pm.max_bandwidth = rdev->pm.core_bandwidth;
  280. if (sideport) {
  281. if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
  282. rdev->pm.sideport_bandwidth.full)
  283. rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
  284. read_delay_latency.full = rfixed_const(370 * 800 * 1000);
  285. read_delay_latency.full = rfixed_div(read_delay_latency,
  286. rdev->pm.igp_sideport_mclk);
  287. } else {
  288. if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
  289. rdev->pm.k8_bandwidth.full)
  290. rdev->pm.max_bandwidth = rdev->pm.k8_bandwidth;
  291. if (rdev->pm.max_bandwidth.full > rdev->pm.ht_bandwidth.full &&
  292. rdev->pm.ht_bandwidth.full)
  293. rdev->pm.max_bandwidth = rdev->pm.ht_bandwidth;
  294. read_delay_latency.full = rfixed_const(5000);
  295. }
  296. /* sclk = system clocks(ns) = 1000 / max_bandwidth / 16 */
  297. a.full = rfixed_const(16);
  298. rdev->pm.sclk.full = rfixed_mul(rdev->pm.max_bandwidth, a);
  299. a.full = rfixed_const(1000);
  300. rdev->pm.sclk.full = rfixed_div(a, rdev->pm.sclk);
  301. /* Determine chunk time
  302. * ChunkTime = the time it takes the DCP to send one chunk of data
  303. * to the LB which consists of pipeline delay and inter chunk gap
  304. * sclk = system clock(ns)
  305. */
  306. a.full = rfixed_const(256 * 13);
  307. chunk_time.full = rfixed_mul(rdev->pm.sclk, a);
  308. a.full = rfixed_const(10);
  309. chunk_time.full = rfixed_div(chunk_time, a);
  310. /* Determine the worst case latency
  311. * NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines)
  312. * WorstCaseLatency = worst case time from urgent to when the MC starts
  313. * to return data
  314. * READ_DELAY_IDLE_MAX = constant of 1us
  315. * ChunkTime = time it takes the DCP to send one chunk of data to the LB
  316. * which consists of pipeline delay and inter chunk gap
  317. */
  318. if (rfixed_trunc(wm->num_line_pair) > 1) {
  319. a.full = rfixed_const(3);
  320. wm->worst_case_latency.full = rfixed_mul(a, chunk_time);
  321. wm->worst_case_latency.full += read_delay_latency.full;
  322. } else {
  323. a.full = rfixed_const(2);
  324. wm->worst_case_latency.full = rfixed_mul(a, chunk_time);
  325. wm->worst_case_latency.full += read_delay_latency.full;
  326. }
  327. /* Determine the tolerable latency
  328. * TolerableLatency = Any given request has only 1 line time
  329. * for the data to be returned
  330. * LBRequestFifoDepth = Number of chunk requests the LB can
  331. * put into the request FIFO for a display
  332. * LineTime = total time for one line of display
  333. * ChunkTime = the time it takes the DCP to send one chunk
  334. * of data to the LB which consists of
  335. * pipeline delay and inter chunk gap
  336. */
  337. if ((2+wm->lb_request_fifo_depth) >= rfixed_trunc(request_fifo_depth)) {
  338. tolerable_latency.full = line_time.full;
  339. } else {
  340. tolerable_latency.full = rfixed_const(wm->lb_request_fifo_depth - 2);
  341. tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full;
  342. tolerable_latency.full = rfixed_mul(tolerable_latency, chunk_time);
  343. tolerable_latency.full = line_time.full - tolerable_latency.full;
  344. }
  345. /* We assume worst case 32bits (4 bytes) */
  346. wm->dbpp.full = rfixed_const(4 * 8);
  347. /* Determine the maximum priority mark
  348. * width = viewport width in pixels
  349. */
  350. a.full = rfixed_const(16);
  351. wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
  352. wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a);
  353. wm->priority_mark_max.full = rfixed_ceil(wm->priority_mark_max);
  354. /* Determine estimated width */
  355. estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
  356. estimated_width.full = rfixed_div(estimated_width, consumption_time);
  357. if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
  358. wm->priority_mark.full = rfixed_const(10);
  359. } else {
  360. a.full = rfixed_const(16);
  361. wm->priority_mark.full = rfixed_div(estimated_width, a);
  362. wm->priority_mark.full = rfixed_ceil(wm->priority_mark);
  363. wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
  364. }
  365. }
  366. void rs690_bandwidth_update(struct radeon_device *rdev)
  367. {
  368. struct drm_display_mode *mode0 = NULL;
  369. struct drm_display_mode *mode1 = NULL;
  370. struct rs690_watermark wm0;
  371. struct rs690_watermark wm1;
  372. u32 tmp;
  373. fixed20_12 priority_mark02, priority_mark12, fill_rate;
  374. fixed20_12 a, b;
  375. if (rdev->mode_info.crtcs[0]->base.enabled)
  376. mode0 = &rdev->mode_info.crtcs[0]->base.mode;
  377. if (rdev->mode_info.crtcs[1]->base.enabled)
  378. mode1 = &rdev->mode_info.crtcs[1]->base.mode;
  379. /*
  380. * Set display0/1 priority up in the memory controller for
  381. * modes if the user specifies HIGH for displaypriority
  382. * option.
  383. */
  384. if (rdev->disp_priority == 2) {
  385. tmp = RREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER);
  386. tmp &= C_000104_MC_DISP0R_INIT_LAT;
  387. tmp &= C_000104_MC_DISP1R_INIT_LAT;
  388. if (mode0)
  389. tmp |= S_000104_MC_DISP0R_INIT_LAT(1);
  390. if (mode1)
  391. tmp |= S_000104_MC_DISP1R_INIT_LAT(1);
  392. WREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER, tmp);
  393. }
  394. rs690_line_buffer_adjust(rdev, mode0, mode1);
  395. if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740))
  396. WREG32(R_006C9C_DCP_CONTROL, 0);
  397. if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
  398. WREG32(R_006C9C_DCP_CONTROL, 2);
  399. rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0);
  400. rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1);
  401. tmp = (wm0.lb_request_fifo_depth - 1);
  402. tmp |= (wm1.lb_request_fifo_depth - 1) << 16;
  403. WREG32(R_006D58_LB_MAX_REQ_OUTSTANDING, tmp);
  404. if (mode0 && mode1) {
  405. if (rfixed_trunc(wm0.dbpp) > 64)
  406. a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair);
  407. else
  408. a.full = wm0.num_line_pair.full;
  409. if (rfixed_trunc(wm1.dbpp) > 64)
  410. b.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair);
  411. else
  412. b.full = wm1.num_line_pair.full;
  413. a.full += b.full;
  414. fill_rate.full = rfixed_div(wm0.sclk, a);
  415. if (wm0.consumption_rate.full > fill_rate.full) {
  416. b.full = wm0.consumption_rate.full - fill_rate.full;
  417. b.full = rfixed_mul(b, wm0.active_time);
  418. a.full = rfixed_mul(wm0.worst_case_latency,
  419. wm0.consumption_rate);
  420. a.full = a.full + b.full;
  421. b.full = rfixed_const(16 * 1000);
  422. priority_mark02.full = rfixed_div(a, b);
  423. } else {
  424. a.full = rfixed_mul(wm0.worst_case_latency,
  425. wm0.consumption_rate);
  426. b.full = rfixed_const(16 * 1000);
  427. priority_mark02.full = rfixed_div(a, b);
  428. }
  429. if (wm1.consumption_rate.full > fill_rate.full) {
  430. b.full = wm1.consumption_rate.full - fill_rate.full;
  431. b.full = rfixed_mul(b, wm1.active_time);
  432. a.full = rfixed_mul(wm1.worst_case_latency,
  433. wm1.consumption_rate);
  434. a.full = a.full + b.full;
  435. b.full = rfixed_const(16 * 1000);
  436. priority_mark12.full = rfixed_div(a, b);
  437. } else {
  438. a.full = rfixed_mul(wm1.worst_case_latency,
  439. wm1.consumption_rate);
  440. b.full = rfixed_const(16 * 1000);
  441. priority_mark12.full = rfixed_div(a, b);
  442. }
  443. if (wm0.priority_mark.full > priority_mark02.full)
  444. priority_mark02.full = wm0.priority_mark.full;
  445. if (rfixed_trunc(priority_mark02) < 0)
  446. priority_mark02.full = 0;
  447. if (wm0.priority_mark_max.full > priority_mark02.full)
  448. priority_mark02.full = wm0.priority_mark_max.full;
  449. if (wm1.priority_mark.full > priority_mark12.full)
  450. priority_mark12.full = wm1.priority_mark.full;
  451. if (rfixed_trunc(priority_mark12) < 0)
  452. priority_mark12.full = 0;
  453. if (wm1.priority_mark_max.full > priority_mark12.full)
  454. priority_mark12.full = wm1.priority_mark_max.full;
  455. WREG32(R_006548_D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02));
  456. WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02));
  457. WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12));
  458. WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12));
  459. } else if (mode0) {
  460. if (rfixed_trunc(wm0.dbpp) > 64)
  461. a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair);
  462. else
  463. a.full = wm0.num_line_pair.full;
  464. fill_rate.full = rfixed_div(wm0.sclk, a);
  465. if (wm0.consumption_rate.full > fill_rate.full) {
  466. b.full = wm0.consumption_rate.full - fill_rate.full;
  467. b.full = rfixed_mul(b, wm0.active_time);
  468. a.full = rfixed_mul(wm0.worst_case_latency,
  469. wm0.consumption_rate);
  470. a.full = a.full + b.full;
  471. b.full = rfixed_const(16 * 1000);
  472. priority_mark02.full = rfixed_div(a, b);
  473. } else {
  474. a.full = rfixed_mul(wm0.worst_case_latency,
  475. wm0.consumption_rate);
  476. b.full = rfixed_const(16 * 1000);
  477. priority_mark02.full = rfixed_div(a, b);
  478. }
  479. if (wm0.priority_mark.full > priority_mark02.full)
  480. priority_mark02.full = wm0.priority_mark.full;
  481. if (rfixed_trunc(priority_mark02) < 0)
  482. priority_mark02.full = 0;
  483. if (wm0.priority_mark_max.full > priority_mark02.full)
  484. priority_mark02.full = wm0.priority_mark_max.full;
  485. WREG32(R_006548_D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02));
  486. WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02));
  487. WREG32(R_006D48_D2MODE_PRIORITY_A_CNT,
  488. S_006D48_D2MODE_PRIORITY_A_OFF(1));
  489. WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT,
  490. S_006D4C_D2MODE_PRIORITY_B_OFF(1));
  491. } else {
  492. if (rfixed_trunc(wm1.dbpp) > 64)
  493. a.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair);
  494. else
  495. a.full = wm1.num_line_pair.full;
  496. fill_rate.full = rfixed_div(wm1.sclk, a);
  497. if (wm1.consumption_rate.full > fill_rate.full) {
  498. b.full = wm1.consumption_rate.full - fill_rate.full;
  499. b.full = rfixed_mul(b, wm1.active_time);
  500. a.full = rfixed_mul(wm1.worst_case_latency,
  501. wm1.consumption_rate);
  502. a.full = a.full + b.full;
  503. b.full = rfixed_const(16 * 1000);
  504. priority_mark12.full = rfixed_div(a, b);
  505. } else {
  506. a.full = rfixed_mul(wm1.worst_case_latency,
  507. wm1.consumption_rate);
  508. b.full = rfixed_const(16 * 1000);
  509. priority_mark12.full = rfixed_div(a, b);
  510. }
  511. if (wm1.priority_mark.full > priority_mark12.full)
  512. priority_mark12.full = wm1.priority_mark.full;
  513. if (rfixed_trunc(priority_mark12) < 0)
  514. priority_mark12.full = 0;
  515. if (wm1.priority_mark_max.full > priority_mark12.full)
  516. priority_mark12.full = wm1.priority_mark_max.full;
  517. WREG32(R_006548_D1MODE_PRIORITY_A_CNT,
  518. S_006548_D1MODE_PRIORITY_A_OFF(1));
  519. WREG32(R_00654C_D1MODE_PRIORITY_B_CNT,
  520. S_00654C_D1MODE_PRIORITY_B_OFF(1));
  521. WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12));
  522. WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12));
  523. }
  524. }
  525. uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg)
  526. {
  527. uint32_t r;
  528. WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg));
  529. r = RREG32(R_00007C_MC_DATA);
  530. WREG32(R_000078_MC_INDEX, ~C_000078_MC_IND_ADDR);
  531. return r;
  532. }
  533. void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
  534. {
  535. WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg) |
  536. S_000078_MC_IND_WR_EN(1));
  537. WREG32(R_00007C_MC_DATA, v);
  538. WREG32(R_000078_MC_INDEX, 0x7F);
  539. }
  540. void rs690_mc_program(struct radeon_device *rdev)
  541. {
  542. struct rv515_mc_save save;
  543. /* Stops all mc clients */
  544. rv515_mc_stop(rdev, &save);
  545. /* Wait for mc idle */
  546. if (rs690_mc_wait_for_idle(rdev))
  547. dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
  548. /* Program MC, should be a 32bits limited address space */
  549. WREG32_MC(R_000100_MCCFG_FB_LOCATION,
  550. S_000100_MC_FB_START(rdev->mc.vram_start >> 16) |
  551. S_000100_MC_FB_TOP(rdev->mc.vram_end >> 16));
  552. WREG32(R_000134_HDP_FB_LOCATION,
  553. S_000134_HDP_FB_START(rdev->mc.vram_start >> 16));
  554. rv515_mc_resume(rdev, &save);
  555. }
  556. static int rs690_startup(struct radeon_device *rdev)
  557. {
  558. int r;
  559. rs690_mc_program(rdev);
  560. /* Resume clock */
  561. rv515_clock_startup(rdev);
  562. /* Initialize GPU configuration (# pipes, ...) */
  563. rs690_gpu_init(rdev);
  564. /* Initialize GART (initialize after TTM so we can allocate
  565. * memory through TTM but finalize after TTM) */
  566. r = rs400_gart_enable(rdev);
  567. if (r)
  568. return r;
  569. /* Enable IRQ */
  570. rs600_irq_set(rdev);
  571. rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
  572. /* 1M ring buffer */
  573. r = r100_cp_init(rdev, 1024 * 1024);
  574. if (r) {
  575. dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
  576. return r;
  577. }
  578. r = r100_wb_init(rdev);
  579. if (r)
  580. dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
  581. r = r100_ib_init(rdev);
  582. if (r) {
  583. dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
  584. return r;
  585. }
  586. return 0;
  587. }
  588. int rs690_resume(struct radeon_device *rdev)
  589. {
  590. /* Make sur GART are not working */
  591. rs400_gart_disable(rdev);
  592. /* Resume clock before doing reset */
  593. rv515_clock_startup(rdev);
  594. /* Reset gpu before posting otherwise ATOM will enter infinite loop */
  595. if (radeon_gpu_reset(rdev)) {
  596. dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
  597. RREG32(R_000E40_RBBM_STATUS),
  598. RREG32(R_0007C0_CP_STAT));
  599. }
  600. /* post */
  601. atom_asic_init(rdev->mode_info.atom_context);
  602. /* Resume clock after posting */
  603. rv515_clock_startup(rdev);
  604. /* Initialize surface registers */
  605. radeon_surface_init(rdev);
  606. return rs690_startup(rdev);
  607. }
  608. int rs690_suspend(struct radeon_device *rdev)
  609. {
  610. r100_cp_disable(rdev);
  611. r100_wb_disable(rdev);
  612. rs600_irq_disable(rdev);
  613. rs400_gart_disable(rdev);
  614. return 0;
  615. }
  616. void rs690_fini(struct radeon_device *rdev)
  617. {
  618. r100_cp_fini(rdev);
  619. r100_wb_fini(rdev);
  620. r100_ib_fini(rdev);
  621. radeon_gem_fini(rdev);
  622. rs400_gart_fini(rdev);
  623. radeon_irq_kms_fini(rdev);
  624. radeon_fence_driver_fini(rdev);
  625. radeon_bo_fini(rdev);
  626. radeon_atombios_fini(rdev);
  627. kfree(rdev->bios);
  628. rdev->bios = NULL;
  629. }
  630. int rs690_init(struct radeon_device *rdev)
  631. {
  632. int r;
  633. /* Disable VGA */
  634. rv515_vga_render_disable(rdev);
  635. /* Initialize scratch registers */
  636. radeon_scratch_init(rdev);
  637. /* Initialize surface registers */
  638. radeon_surface_init(rdev);
  639. /* TODO: disable VGA need to use VGA request */
  640. /* BIOS*/
  641. if (!radeon_get_bios(rdev)) {
  642. if (ASIC_IS_AVIVO(rdev))
  643. return -EINVAL;
  644. }
  645. if (rdev->is_atom_bios) {
  646. r = radeon_atombios_init(rdev);
  647. if (r)
  648. return r;
  649. } else {
  650. dev_err(rdev->dev, "Expecting atombios for RV515 GPU\n");
  651. return -EINVAL;
  652. }
  653. /* Reset gpu before posting otherwise ATOM will enter infinite loop */
  654. if (radeon_gpu_reset(rdev)) {
  655. dev_warn(rdev->dev,
  656. "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
  657. RREG32(R_000E40_RBBM_STATUS),
  658. RREG32(R_0007C0_CP_STAT));
  659. }
  660. /* check if cards are posted or not */
  661. if (radeon_boot_test_post_card(rdev) == false)
  662. return -EINVAL;
  663. /* Initialize clocks */
  664. radeon_get_clock_info(rdev->ddev);
  665. /* Initialize power management */
  666. radeon_pm_init(rdev);
  667. /* initialize memory controller */
  668. rs690_mc_init(rdev);
  669. rv515_debugfs(rdev);
  670. /* Fence driver */
  671. r = radeon_fence_driver_init(rdev);
  672. if (r)
  673. return r;
  674. r = radeon_irq_kms_init(rdev);
  675. if (r)
  676. return r;
  677. /* Memory manager */
  678. r = radeon_bo_init(rdev);
  679. if (r)
  680. return r;
  681. r = rs400_gart_init(rdev);
  682. if (r)
  683. return r;
  684. rs600_set_safe_registers(rdev);
  685. rdev->accel_working = true;
  686. r = rs690_startup(rdev);
  687. if (r) {
  688. /* Somethings want wront with the accel init stop accel */
  689. dev_err(rdev->dev, "Disabling GPU acceleration\n");
  690. r100_cp_fini(rdev);
  691. r100_wb_fini(rdev);
  692. r100_ib_fini(rdev);
  693. rs400_gart_fini(rdev);
  694. radeon_irq_kms_fini(rdev);
  695. rdev->accel_working = false;
  696. }
  697. return 0;
  698. }