gma_display.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776
  1. /*
  2. * Copyright © 2006-2011 Intel Corporation
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along with
  14. * this program; if not, write to the Free Software Foundation, Inc.,
  15. * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  16. *
  17. * Authors:
  18. * Eric Anholt <eric@anholt.net>
  19. * Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
  20. */
  21. #include <drm/drmP.h>
  22. #include "gma_display.h"
  23. #include "psb_intel_drv.h"
  24. #include "psb_intel_reg.h"
  25. #include "psb_drv.h"
  26. #include "framebuffer.h"
  27. /**
  28. * Returns whether any output on the specified pipe is of the specified type
  29. */
  30. bool gma_pipe_has_type(struct drm_crtc *crtc, int type)
  31. {
  32. struct drm_device *dev = crtc->dev;
  33. struct drm_mode_config *mode_config = &dev->mode_config;
  34. struct drm_connector *l_entry;
  35. list_for_each_entry(l_entry, &mode_config->connector_list, head) {
  36. if (l_entry->encoder && l_entry->encoder->crtc == crtc) {
  37. struct gma_encoder *gma_encoder =
  38. gma_attached_encoder(l_entry);
  39. if (gma_encoder->type == type)
  40. return true;
  41. }
  42. }
  43. return false;
  44. }
  45. void gma_wait_for_vblank(struct drm_device *dev)
  46. {
  47. /* Wait for 20ms, i.e. one cycle at 50hz. */
  48. mdelay(20);
  49. }
  50. int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y,
  51. struct drm_framebuffer *old_fb)
  52. {
  53. struct drm_device *dev = crtc->dev;
  54. struct drm_psb_private *dev_priv = dev->dev_private;
  55. struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
  56. struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
  57. int pipe = gma_crtc->pipe;
  58. const struct psb_offset *map = &dev_priv->regmap[pipe];
  59. unsigned long start, offset;
  60. u32 dspcntr;
  61. int ret = 0;
  62. if (!gma_power_begin(dev, true))
  63. return 0;
  64. /* no fb bound */
  65. if (!crtc->fb) {
  66. dev_err(dev->dev, "No FB bound\n");
  67. goto gma_pipe_cleaner;
  68. }
  69. /* We are displaying this buffer, make sure it is actually loaded
  70. into the GTT */
  71. ret = psb_gtt_pin(psbfb->gtt);
  72. if (ret < 0)
  73. goto gma_pipe_set_base_exit;
  74. start = psbfb->gtt->offset;
  75. offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
  76. REG_WRITE(map->stride, crtc->fb->pitches[0]);
  77. dspcntr = REG_READ(map->cntr);
  78. dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
  79. switch (crtc->fb->bits_per_pixel) {
  80. case 8:
  81. dspcntr |= DISPPLANE_8BPP;
  82. break;
  83. case 16:
  84. if (crtc->fb->depth == 15)
  85. dspcntr |= DISPPLANE_15_16BPP;
  86. else
  87. dspcntr |= DISPPLANE_16BPP;
  88. break;
  89. case 24:
  90. case 32:
  91. dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
  92. break;
  93. default:
  94. dev_err(dev->dev, "Unknown color depth\n");
  95. ret = -EINVAL;
  96. goto gma_pipe_set_base_exit;
  97. }
  98. REG_WRITE(map->cntr, dspcntr);
  99. dev_dbg(dev->dev,
  100. "Writing base %08lX %08lX %d %d\n", start, offset, x, y);
  101. /* FIXME: Investigate whether this really is the base for psb and why
  102. the linear offset is named base for the other chips. map->surf
  103. should be the base and map->linoff the offset for all chips */
  104. if (IS_PSB(dev)) {
  105. REG_WRITE(map->base, offset + start);
  106. REG_READ(map->base);
  107. } else {
  108. REG_WRITE(map->base, offset);
  109. REG_READ(map->base);
  110. REG_WRITE(map->surf, start);
  111. REG_READ(map->surf);
  112. }
  113. gma_pipe_cleaner:
  114. /* If there was a previous display we can now unpin it */
  115. if (old_fb)
  116. psb_gtt_unpin(to_psb_fb(old_fb)->gtt);
  117. gma_pipe_set_base_exit:
  118. gma_power_end(dev);
  119. return ret;
  120. }
  121. /* Loads the palette/gamma unit for the CRTC with the prepared values */
  122. void gma_crtc_load_lut(struct drm_crtc *crtc)
  123. {
  124. struct drm_device *dev = crtc->dev;
  125. struct drm_psb_private *dev_priv = dev->dev_private;
  126. struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
  127. const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
  128. int palreg = map->palette;
  129. int i;
  130. /* The clocks have to be on to load the palette. */
  131. if (!crtc->enabled)
  132. return;
  133. if (gma_power_begin(dev, false)) {
  134. for (i = 0; i < 256; i++) {
  135. REG_WRITE(palreg + 4 * i,
  136. ((gma_crtc->lut_r[i] +
  137. gma_crtc->lut_adj[i]) << 16) |
  138. ((gma_crtc->lut_g[i] +
  139. gma_crtc->lut_adj[i]) << 8) |
  140. (gma_crtc->lut_b[i] +
  141. gma_crtc->lut_adj[i]));
  142. }
  143. gma_power_end(dev);
  144. } else {
  145. for (i = 0; i < 256; i++) {
  146. /* FIXME: Why pipe[0] and not pipe[..._crtc->pipe]? */
  147. dev_priv->regs.pipe[0].palette[i] =
  148. ((gma_crtc->lut_r[i] +
  149. gma_crtc->lut_adj[i]) << 16) |
  150. ((gma_crtc->lut_g[i] +
  151. gma_crtc->lut_adj[i]) << 8) |
  152. (gma_crtc->lut_b[i] +
  153. gma_crtc->lut_adj[i]);
  154. }
  155. }
  156. }
  157. void gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue,
  158. u32 start, u32 size)
  159. {
  160. struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
  161. int i;
  162. int end = (start + size > 256) ? 256 : start + size;
  163. for (i = start; i < end; i++) {
  164. gma_crtc->lut_r[i] = red[i] >> 8;
  165. gma_crtc->lut_g[i] = green[i] >> 8;
  166. gma_crtc->lut_b[i] = blue[i] >> 8;
  167. }
  168. gma_crtc_load_lut(crtc);
  169. }
  170. /**
  171. * Sets the power management mode of the pipe and plane.
  172. *
  173. * This code should probably grow support for turning the cursor off and back
  174. * on appropriately at the same time as we're turning the pipe off/on.
  175. */
  176. void gma_crtc_dpms(struct drm_crtc *crtc, int mode)
  177. {
  178. struct drm_device *dev = crtc->dev;
  179. struct drm_psb_private *dev_priv = dev->dev_private;
  180. struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
  181. int pipe = gma_crtc->pipe;
  182. const struct psb_offset *map = &dev_priv->regmap[pipe];
  183. u32 temp;
  184. /* XXX: When our outputs are all unaware of DPMS modes other than off
  185. * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
  186. */
  187. if (IS_CDV(dev))
  188. dev_priv->ops->disable_sr(dev);
  189. switch (mode) {
  190. case DRM_MODE_DPMS_ON:
  191. case DRM_MODE_DPMS_STANDBY:
  192. case DRM_MODE_DPMS_SUSPEND:
  193. if (gma_crtc->active)
  194. break;
  195. gma_crtc->active = true;
  196. /* Enable the DPLL */
  197. temp = REG_READ(map->dpll);
  198. if ((temp & DPLL_VCO_ENABLE) == 0) {
  199. REG_WRITE(map->dpll, temp);
  200. REG_READ(map->dpll);
  201. /* Wait for the clocks to stabilize. */
  202. udelay(150);
  203. REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
  204. REG_READ(map->dpll);
  205. /* Wait for the clocks to stabilize. */
  206. udelay(150);
  207. REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
  208. REG_READ(map->dpll);
  209. /* Wait for the clocks to stabilize. */
  210. udelay(150);
  211. }
  212. /* Enable the plane */
  213. temp = REG_READ(map->cntr);
  214. if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
  215. REG_WRITE(map->cntr,
  216. temp | DISPLAY_PLANE_ENABLE);
  217. /* Flush the plane changes */
  218. REG_WRITE(map->base, REG_READ(map->base));
  219. }
  220. udelay(150);
  221. /* Enable the pipe */
  222. temp = REG_READ(map->conf);
  223. if ((temp & PIPEACONF_ENABLE) == 0)
  224. REG_WRITE(map->conf, temp | PIPEACONF_ENABLE);
  225. temp = REG_READ(map->status);
  226. temp &= ~(0xFFFF);
  227. temp |= PIPE_FIFO_UNDERRUN;
  228. REG_WRITE(map->status, temp);
  229. REG_READ(map->status);
  230. gma_crtc_load_lut(crtc);
  231. /* Give the overlay scaler a chance to enable
  232. * if it's on this pipe */
  233. /* psb_intel_crtc_dpms_video(crtc, true); TODO */
  234. break;
  235. case DRM_MODE_DPMS_OFF:
  236. if (!gma_crtc->active)
  237. break;
  238. gma_crtc->active = false;
  239. /* Give the overlay scaler a chance to disable
  240. * if it's on this pipe */
  241. /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
  242. /* Disable the VGA plane that we never use */
  243. REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
  244. /* Turn off vblank interrupts */
  245. drm_vblank_off(dev, pipe);
  246. /* Wait for vblank for the disable to take effect */
  247. gma_wait_for_vblank(dev);
  248. /* Disable plane */
  249. temp = REG_READ(map->cntr);
  250. if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
  251. REG_WRITE(map->cntr,
  252. temp & ~DISPLAY_PLANE_ENABLE);
  253. /* Flush the plane changes */
  254. REG_WRITE(map->base, REG_READ(map->base));
  255. REG_READ(map->base);
  256. }
  257. /* Disable pipe */
  258. temp = REG_READ(map->conf);
  259. if ((temp & PIPEACONF_ENABLE) != 0) {
  260. REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE);
  261. REG_READ(map->conf);
  262. }
  263. /* Wait for vblank for the disable to take effect. */
  264. gma_wait_for_vblank(dev);
  265. udelay(150);
  266. /* Disable DPLL */
  267. temp = REG_READ(map->dpll);
  268. if ((temp & DPLL_VCO_ENABLE) != 0) {
  269. REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE);
  270. REG_READ(map->dpll);
  271. }
  272. /* Wait for the clocks to turn off. */
  273. udelay(150);
  274. break;
  275. }
  276. if (IS_CDV(dev))
  277. dev_priv->ops->update_wm(dev, crtc);
  278. /* Set FIFO watermarks */
  279. REG_WRITE(DSPARB, 0x3F3E);
  280. }
  281. int gma_crtc_cursor_set(struct drm_crtc *crtc,
  282. struct drm_file *file_priv,
  283. uint32_t handle,
  284. uint32_t width, uint32_t height)
  285. {
  286. struct drm_device *dev = crtc->dev;
  287. struct drm_psb_private *dev_priv = dev->dev_private;
  288. struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
  289. int pipe = gma_crtc->pipe;
  290. uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
  291. uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
  292. uint32_t temp;
  293. size_t addr = 0;
  294. struct gtt_range *gt;
  295. struct gtt_range *cursor_gt = gma_crtc->cursor_gt;
  296. struct drm_gem_object *obj;
  297. void *tmp_dst, *tmp_src;
  298. int ret = 0, i, cursor_pages;
  299. /* If we didn't get a handle then turn the cursor off */
  300. if (!handle) {
  301. temp = CURSOR_MODE_DISABLE;
  302. if (gma_power_begin(dev, false)) {
  303. REG_WRITE(control, temp);
  304. REG_WRITE(base, 0);
  305. gma_power_end(dev);
  306. }
  307. /* Unpin the old GEM object */
  308. if (gma_crtc->cursor_obj) {
  309. gt = container_of(gma_crtc->cursor_obj,
  310. struct gtt_range, gem);
  311. psb_gtt_unpin(gt);
  312. drm_gem_object_unreference(gma_crtc->cursor_obj);
  313. gma_crtc->cursor_obj = NULL;
  314. }
  315. return 0;
  316. }
  317. /* Currently we only support 64x64 cursors */
  318. if (width != 64 || height != 64) {
  319. dev_dbg(dev->dev, "We currently only support 64x64 cursors\n");
  320. return -EINVAL;
  321. }
  322. obj = drm_gem_object_lookup(dev, file_priv, handle);
  323. if (!obj)
  324. return -ENOENT;
  325. if (obj->size < width * height * 4) {
  326. dev_dbg(dev->dev, "Buffer is too small\n");
  327. ret = -ENOMEM;
  328. goto unref_cursor;
  329. }
  330. gt = container_of(obj, struct gtt_range, gem);
  331. /* Pin the memory into the GTT */
  332. ret = psb_gtt_pin(gt);
  333. if (ret) {
  334. dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
  335. goto unref_cursor;
  336. }
  337. if (dev_priv->ops->cursor_needs_phys) {
  338. if (cursor_gt == NULL) {
  339. dev_err(dev->dev, "No hardware cursor mem available");
  340. ret = -ENOMEM;
  341. goto unref_cursor;
  342. }
  343. /* Prevent overflow */
  344. if (gt->npage > 4)
  345. cursor_pages = 4;
  346. else
  347. cursor_pages = gt->npage;
  348. /* Copy the cursor to cursor mem */
  349. tmp_dst = dev_priv->vram_addr + cursor_gt->offset;
  350. for (i = 0; i < cursor_pages; i++) {
  351. tmp_src = kmap(gt->pages[i]);
  352. memcpy(tmp_dst, tmp_src, PAGE_SIZE);
  353. kunmap(gt->pages[i]);
  354. tmp_dst += PAGE_SIZE;
  355. }
  356. addr = gma_crtc->cursor_addr;
  357. } else {
  358. addr = gt->offset;
  359. gma_crtc->cursor_addr = addr;
  360. }
  361. temp = 0;
  362. /* set the pipe for the cursor */
  363. temp |= (pipe << 28);
  364. temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
  365. if (gma_power_begin(dev, false)) {
  366. REG_WRITE(control, temp);
  367. REG_WRITE(base, addr);
  368. gma_power_end(dev);
  369. }
  370. /* unpin the old bo */
  371. if (gma_crtc->cursor_obj) {
  372. gt = container_of(gma_crtc->cursor_obj, struct gtt_range, gem);
  373. psb_gtt_unpin(gt);
  374. drm_gem_object_unreference(gma_crtc->cursor_obj);
  375. }
  376. gma_crtc->cursor_obj = obj;
  377. return ret;
  378. unref_cursor:
  379. drm_gem_object_unreference(obj);
  380. return ret;
  381. }
  382. int gma_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
  383. {
  384. struct drm_device *dev = crtc->dev;
  385. struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
  386. int pipe = gma_crtc->pipe;
  387. uint32_t temp = 0;
  388. uint32_t addr;
  389. if (x < 0) {
  390. temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
  391. x = -x;
  392. }
  393. if (y < 0) {
  394. temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
  395. y = -y;
  396. }
  397. temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
  398. temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
  399. addr = gma_crtc->cursor_addr;
  400. if (gma_power_begin(dev, false)) {
  401. REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
  402. REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, addr);
  403. gma_power_end(dev);
  404. }
  405. return 0;
  406. }
  407. bool gma_crtc_mode_fixup(struct drm_crtc *crtc,
  408. const struct drm_display_mode *mode,
  409. struct drm_display_mode *adjusted_mode)
  410. {
  411. return true;
  412. }
  413. void gma_crtc_prepare(struct drm_crtc *crtc)
  414. {
  415. struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
  416. crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
  417. }
  418. void gma_crtc_commit(struct drm_crtc *crtc)
  419. {
  420. struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
  421. crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
  422. }
  423. void gma_crtc_disable(struct drm_crtc *crtc)
  424. {
  425. struct gtt_range *gt;
  426. struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
  427. crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
  428. if (crtc->fb) {
  429. gt = to_psb_fb(crtc->fb)->gtt;
  430. psb_gtt_unpin(gt);
  431. }
  432. }
  433. void gma_crtc_destroy(struct drm_crtc *crtc)
  434. {
  435. struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
  436. kfree(gma_crtc->crtc_state);
  437. drm_crtc_cleanup(crtc);
  438. kfree(gma_crtc);
  439. }
  440. int gma_crtc_set_config(struct drm_mode_set *set)
  441. {
  442. struct drm_device *dev = set->crtc->dev;
  443. struct drm_psb_private *dev_priv = dev->dev_private;
  444. int ret;
  445. if (!dev_priv->rpm_enabled)
  446. return drm_crtc_helper_set_config(set);
  447. pm_runtime_forbid(&dev->pdev->dev);
  448. ret = drm_crtc_helper_set_config(set);
  449. pm_runtime_allow(&dev->pdev->dev);
  450. return ret;
  451. }
  452. /**
  453. * Save HW states of given crtc
  454. */
  455. void gma_crtc_save(struct drm_crtc *crtc)
  456. {
  457. struct drm_device *dev = crtc->dev;
  458. struct drm_psb_private *dev_priv = dev->dev_private;
  459. struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
  460. struct psb_intel_crtc_state *crtc_state = gma_crtc->crtc_state;
  461. const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
  462. uint32_t palette_reg;
  463. int i;
  464. if (!crtc_state) {
  465. dev_err(dev->dev, "No CRTC state found\n");
  466. return;
  467. }
  468. crtc_state->saveDSPCNTR = REG_READ(map->cntr);
  469. crtc_state->savePIPECONF = REG_READ(map->conf);
  470. crtc_state->savePIPESRC = REG_READ(map->src);
  471. crtc_state->saveFP0 = REG_READ(map->fp0);
  472. crtc_state->saveFP1 = REG_READ(map->fp1);
  473. crtc_state->saveDPLL = REG_READ(map->dpll);
  474. crtc_state->saveHTOTAL = REG_READ(map->htotal);
  475. crtc_state->saveHBLANK = REG_READ(map->hblank);
  476. crtc_state->saveHSYNC = REG_READ(map->hsync);
  477. crtc_state->saveVTOTAL = REG_READ(map->vtotal);
  478. crtc_state->saveVBLANK = REG_READ(map->vblank);
  479. crtc_state->saveVSYNC = REG_READ(map->vsync);
  480. crtc_state->saveDSPSTRIDE = REG_READ(map->stride);
  481. /* NOTE: DSPSIZE DSPPOS only for psb */
  482. crtc_state->saveDSPSIZE = REG_READ(map->size);
  483. crtc_state->saveDSPPOS = REG_READ(map->pos);
  484. crtc_state->saveDSPBASE = REG_READ(map->base);
  485. palette_reg = map->palette;
  486. for (i = 0; i < 256; ++i)
  487. crtc_state->savePalette[i] = REG_READ(palette_reg + (i << 2));
  488. }
  489. /**
  490. * Restore HW states of given crtc
  491. */
  492. void gma_crtc_restore(struct drm_crtc *crtc)
  493. {
  494. struct drm_device *dev = crtc->dev;
  495. struct drm_psb_private *dev_priv = dev->dev_private;
  496. struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
  497. struct psb_intel_crtc_state *crtc_state = gma_crtc->crtc_state;
  498. const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
  499. uint32_t palette_reg;
  500. int i;
  501. if (!crtc_state) {
  502. dev_err(dev->dev, "No crtc state\n");
  503. return;
  504. }
  505. if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
  506. REG_WRITE(map->dpll,
  507. crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
  508. REG_READ(map->dpll);
  509. udelay(150);
  510. }
  511. REG_WRITE(map->fp0, crtc_state->saveFP0);
  512. REG_READ(map->fp0);
  513. REG_WRITE(map->fp1, crtc_state->saveFP1);
  514. REG_READ(map->fp1);
  515. REG_WRITE(map->dpll, crtc_state->saveDPLL);
  516. REG_READ(map->dpll);
  517. udelay(150);
  518. REG_WRITE(map->htotal, crtc_state->saveHTOTAL);
  519. REG_WRITE(map->hblank, crtc_state->saveHBLANK);
  520. REG_WRITE(map->hsync, crtc_state->saveHSYNC);
  521. REG_WRITE(map->vtotal, crtc_state->saveVTOTAL);
  522. REG_WRITE(map->vblank, crtc_state->saveVBLANK);
  523. REG_WRITE(map->vsync, crtc_state->saveVSYNC);
  524. REG_WRITE(map->stride, crtc_state->saveDSPSTRIDE);
  525. REG_WRITE(map->size, crtc_state->saveDSPSIZE);
  526. REG_WRITE(map->pos, crtc_state->saveDSPPOS);
  527. REG_WRITE(map->src, crtc_state->savePIPESRC);
  528. REG_WRITE(map->base, crtc_state->saveDSPBASE);
  529. REG_WRITE(map->conf, crtc_state->savePIPECONF);
  530. gma_wait_for_vblank(dev);
  531. REG_WRITE(map->cntr, crtc_state->saveDSPCNTR);
  532. REG_WRITE(map->base, crtc_state->saveDSPBASE);
  533. gma_wait_for_vblank(dev);
  534. palette_reg = map->palette;
  535. for (i = 0; i < 256; ++i)
  536. REG_WRITE(palette_reg + (i << 2), crtc_state->savePalette[i]);
  537. }
  538. void gma_encoder_prepare(struct drm_encoder *encoder)
  539. {
  540. struct drm_encoder_helper_funcs *encoder_funcs =
  541. encoder->helper_private;
  542. /* lvds has its own version of prepare see psb_intel_lvds_prepare */
  543. encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
  544. }
  545. void gma_encoder_commit(struct drm_encoder *encoder)
  546. {
  547. struct drm_encoder_helper_funcs *encoder_funcs =
  548. encoder->helper_private;
  549. /* lvds has its own version of commit see psb_intel_lvds_commit */
  550. encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
  551. }
  552. void gma_encoder_destroy(struct drm_encoder *encoder)
  553. {
  554. struct gma_encoder *intel_encoder = to_gma_encoder(encoder);
  555. drm_encoder_cleanup(encoder);
  556. kfree(intel_encoder);
  557. }
  558. /* Currently there is only a 1:1 mapping of encoders and connectors */
  559. struct drm_encoder *gma_best_encoder(struct drm_connector *connector)
  560. {
  561. struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
  562. return &gma_encoder->base;
  563. }
  564. void gma_connector_attach_encoder(struct gma_connector *connector,
  565. struct gma_encoder *encoder)
  566. {
  567. connector->encoder = encoder;
  568. drm_mode_connector_attach_encoder(&connector->base,
  569. &encoder->base);
  570. }
  571. #define GMA_PLL_INVALID(s) { /* DRM_ERROR(s); */ return false; }
  572. bool gma_pll_is_valid(struct drm_crtc *crtc,
  573. const struct gma_limit_t *limit,
  574. struct gma_clock_t *clock)
  575. {
  576. if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
  577. GMA_PLL_INVALID("p1 out of range");
  578. if (clock->p < limit->p.min || limit->p.max < clock->p)
  579. GMA_PLL_INVALID("p out of range");
  580. if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
  581. GMA_PLL_INVALID("m2 out of range");
  582. if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
  583. GMA_PLL_INVALID("m1 out of range");
  584. /* On CDV m1 is always 0 */
  585. if (clock->m1 <= clock->m2 && clock->m1 != 0)
  586. GMA_PLL_INVALID("m1 <= m2 && m1 != 0");
  587. if (clock->m < limit->m.min || limit->m.max < clock->m)
  588. GMA_PLL_INVALID("m out of range");
  589. if (clock->n < limit->n.min || limit->n.max < clock->n)
  590. GMA_PLL_INVALID("n out of range");
  591. if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
  592. GMA_PLL_INVALID("vco out of range");
  593. /* XXX: We may need to be checking "Dot clock"
  594. * depending on the multiplier, connector, etc.,
  595. * rather than just a single range.
  596. */
  597. if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
  598. GMA_PLL_INVALID("dot out of range");
  599. return true;
  600. }
  601. bool gma_find_best_pll(const struct gma_limit_t *limit,
  602. struct drm_crtc *crtc, int target, int refclk,
  603. struct gma_clock_t *best_clock)
  604. {
  605. struct drm_device *dev = crtc->dev;
  606. const struct gma_clock_funcs *clock_funcs =
  607. to_gma_crtc(crtc)->clock_funcs;
  608. struct gma_clock_t clock;
  609. int err = target;
  610. if (gma_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
  611. (REG_READ(LVDS) & LVDS_PORT_EN) != 0) {
  612. /*
  613. * For LVDS, if the panel is on, just rely on its current
  614. * settings for dual-channel. We haven't figured out how to
  615. * reliably set up different single/dual channel state, if we
  616. * even can.
  617. */
  618. if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
  619. LVDS_CLKB_POWER_UP)
  620. clock.p2 = limit->p2.p2_fast;
  621. else
  622. clock.p2 = limit->p2.p2_slow;
  623. } else {
  624. if (target < limit->p2.dot_limit)
  625. clock.p2 = limit->p2.p2_slow;
  626. else
  627. clock.p2 = limit->p2.p2_fast;
  628. }
  629. memset(best_clock, 0, sizeof(*best_clock));
  630. /* m1 is always 0 on CDV so the outmost loop will run just once */
  631. for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
  632. for (clock.m2 = limit->m2.min;
  633. (clock.m2 < clock.m1 || clock.m1 == 0) &&
  634. clock.m2 <= limit->m2.max; clock.m2++) {
  635. for (clock.n = limit->n.min;
  636. clock.n <= limit->n.max; clock.n++) {
  637. for (clock.p1 = limit->p1.min;
  638. clock.p1 <= limit->p1.max;
  639. clock.p1++) {
  640. int this_err;
  641. clock_funcs->clock(refclk, &clock);
  642. if (!clock_funcs->pll_is_valid(crtc,
  643. limit, &clock))
  644. continue;
  645. this_err = abs(clock.dot - target);
  646. if (this_err < err) {
  647. *best_clock = clock;
  648. err = this_err;
  649. }
  650. }
  651. }
  652. }
  653. }
  654. return err != target;
  655. }