mdp4_crtc.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685
  1. /*
  2. * Copyright (C) 2013 Red Hat
  3. * Author: Rob Clark <robdclark@gmail.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include "mdp4_kms.h"
  18. #include <drm/drm_mode.h>
  19. #include "drm_crtc.h"
  20. #include "drm_crtc_helper.h"
  21. #include "drm_flip_work.h"
  22. struct mdp4_crtc {
  23. struct drm_crtc base;
  24. char name[8];
  25. struct drm_plane *plane;
  26. int id;
  27. int ovlp;
  28. enum mdp4_dma dma;
  29. bool enabled;
  30. /* which mixer/encoder we route output to: */
  31. int mixer;
  32. struct {
  33. spinlock_t lock;
  34. bool stale;
  35. uint32_t width, height;
  36. /* next cursor to scan-out: */
  37. uint32_t next_iova;
  38. struct drm_gem_object *next_bo;
  39. /* current cursor being scanned out: */
  40. struct drm_gem_object *scanout_bo;
  41. } cursor;
  42. /* if there is a pending flip, these will be non-null: */
  43. struct drm_pending_vblank_event *event;
  44. struct work_struct pageflip_work;
  45. /* the fb that we currently hold a scanout ref to: */
  46. struct drm_framebuffer *fb;
  47. /* for unref'ing framebuffers after scanout completes: */
  48. struct drm_flip_work unref_fb_work;
  49. /* for unref'ing cursor bo's after scanout completes: */
  50. struct drm_flip_work unref_cursor_work;
  51. struct mdp4_irq vblank;
  52. struct mdp4_irq err;
  53. };
  54. #define to_mdp4_crtc(x) container_of(x, struct mdp4_crtc, base)
  55. static struct mdp4_kms *get_kms(struct drm_crtc *crtc)
  56. {
  57. struct msm_drm_private *priv = crtc->dev->dev_private;
  58. return to_mdp4_kms(priv->kms);
  59. }
  60. static void update_fb(struct drm_crtc *crtc, bool async,
  61. struct drm_framebuffer *new_fb)
  62. {
  63. struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
  64. struct drm_framebuffer *old_fb = mdp4_crtc->fb;
  65. if (old_fb)
  66. drm_flip_work_queue(&mdp4_crtc->unref_fb_work, old_fb);
  67. /* grab reference to incoming scanout fb: */
  68. drm_framebuffer_reference(new_fb);
  69. mdp4_crtc->base.fb = new_fb;
  70. mdp4_crtc->fb = new_fb;
  71. if (!async) {
  72. /* enable vblank to pick up the old_fb */
  73. mdp4_irq_register(get_kms(crtc), &mdp4_crtc->vblank);
  74. }
  75. }
  76. static void complete_flip(struct drm_crtc *crtc, bool canceled)
  77. {
  78. struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
  79. struct drm_device *dev = crtc->dev;
  80. struct drm_pending_vblank_event *event;
  81. unsigned long flags;
  82. spin_lock_irqsave(&dev->event_lock, flags);
  83. event = mdp4_crtc->event;
  84. if (event) {
  85. mdp4_crtc->event = NULL;
  86. if (canceled)
  87. event->base.destroy(&event->base);
  88. else
  89. drm_send_vblank_event(dev, mdp4_crtc->id, event);
  90. }
  91. spin_unlock_irqrestore(&dev->event_lock, flags);
  92. }
  93. static void crtc_flush(struct drm_crtc *crtc)
  94. {
  95. struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
  96. struct mdp4_kms *mdp4_kms = get_kms(crtc);
  97. uint32_t flush = 0;
  98. flush |= pipe2flush(mdp4_plane_pipe(mdp4_crtc->plane));
  99. flush |= ovlp2flush(mdp4_crtc->ovlp);
  100. DBG("%s: flush=%08x", mdp4_crtc->name, flush);
  101. mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush);
  102. }
  103. static void pageflip_worker(struct work_struct *work)
  104. {
  105. struct mdp4_crtc *mdp4_crtc =
  106. container_of(work, struct mdp4_crtc, pageflip_work);
  107. struct drm_crtc *crtc = &mdp4_crtc->base;
  108. mdp4_plane_set_scanout(mdp4_crtc->plane, crtc->fb);
  109. crtc_flush(crtc);
  110. /* enable vblank to complete flip: */
  111. mdp4_irq_register(get_kms(crtc), &mdp4_crtc->vblank);
  112. }
  113. static void unref_fb_worker(struct drm_flip_work *work, void *val)
  114. {
  115. struct mdp4_crtc *mdp4_crtc =
  116. container_of(work, struct mdp4_crtc, unref_fb_work);
  117. struct drm_device *dev = mdp4_crtc->base.dev;
  118. mutex_lock(&dev->mode_config.mutex);
  119. drm_framebuffer_unreference(val);
  120. mutex_unlock(&dev->mode_config.mutex);
  121. }
  122. static void unref_cursor_worker(struct drm_flip_work *work, void *val)
  123. {
  124. struct mdp4_crtc *mdp4_crtc =
  125. container_of(work, struct mdp4_crtc, unref_cursor_work);
  126. struct mdp4_kms *mdp4_kms = get_kms(&mdp4_crtc->base);
  127. msm_gem_put_iova(val, mdp4_kms->id);
  128. drm_gem_object_unreference_unlocked(val);
  129. }
  130. static void mdp4_crtc_destroy(struct drm_crtc *crtc)
  131. {
  132. struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
  133. mdp4_crtc->plane->funcs->destroy(mdp4_crtc->plane);
  134. drm_crtc_cleanup(crtc);
  135. drm_flip_work_cleanup(&mdp4_crtc->unref_fb_work);
  136. drm_flip_work_cleanup(&mdp4_crtc->unref_cursor_work);
  137. kfree(mdp4_crtc);
  138. }
  139. static void mdp4_crtc_dpms(struct drm_crtc *crtc, int mode)
  140. {
  141. struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
  142. struct mdp4_kms *mdp4_kms = get_kms(crtc);
  143. bool enabled = (mode == DRM_MODE_DPMS_ON);
  144. DBG("%s: mode=%d", mdp4_crtc->name, mode);
  145. if (enabled != mdp4_crtc->enabled) {
  146. if (enabled) {
  147. mdp4_enable(mdp4_kms);
  148. mdp4_irq_register(mdp4_kms, &mdp4_crtc->err);
  149. } else {
  150. mdp4_irq_unregister(mdp4_kms, &mdp4_crtc->err);
  151. mdp4_disable(mdp4_kms);
  152. }
  153. mdp4_crtc->enabled = enabled;
  154. }
  155. }
  156. static bool mdp4_crtc_mode_fixup(struct drm_crtc *crtc,
  157. const struct drm_display_mode *mode,
  158. struct drm_display_mode *adjusted_mode)
  159. {
  160. return true;
  161. }
  162. static void blend_setup(struct drm_crtc *crtc)
  163. {
  164. struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
  165. struct mdp4_kms *mdp4_kms = get_kms(crtc);
  166. int i, ovlp = mdp4_crtc->ovlp;
  167. uint32_t mixer_cfg = 0;
  168. /*
  169. * This probably would also need to be triggered by any attached
  170. * plane when it changes.. for now since we are only using a single
  171. * private plane, the configuration is hard-coded:
  172. */
  173. mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW0(ovlp), 0);
  174. mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW1(ovlp), 0);
  175. mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH0(ovlp), 0);
  176. mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH1(ovlp), 0);
  177. for (i = 0; i < 4; i++) {
  178. mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_FG_ALPHA(ovlp, i), 0);
  179. mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_BG_ALPHA(ovlp, i), 0);
  180. mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_OP(ovlp, i),
  181. MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_CONST) |
  182. MDP4_OVLP_STAGE_OP_BG_ALPHA(BG_CONST));
  183. mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_CO3(ovlp, i), 0);
  184. mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW0(ovlp, i), 0);
  185. mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW1(ovlp, i), 0);
  186. mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH0(ovlp, i), 0);
  187. mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(ovlp, i), 0);
  188. }
  189. /* TODO single register for all CRTCs, so this won't work properly
  190. * when multiple CRTCs are active..
  191. */
  192. switch (mdp4_plane_pipe(mdp4_crtc->plane)) {
  193. case VG1:
  194. mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE0(STAGE_BASE) |
  195. COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1);
  196. break;
  197. case VG2:
  198. mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE1(STAGE_BASE) |
  199. COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1);
  200. break;
  201. case RGB1:
  202. mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE2(STAGE_BASE) |
  203. COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1);
  204. break;
  205. case RGB2:
  206. mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE3(STAGE_BASE) |
  207. COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1);
  208. break;
  209. case RGB3:
  210. mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE4(STAGE_BASE) |
  211. COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1);
  212. break;
  213. case VG3:
  214. mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE5(STAGE_BASE) |
  215. COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1);
  216. break;
  217. case VG4:
  218. mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE6(STAGE_BASE) |
  219. COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1);
  220. break;
  221. default:
  222. WARN_ON("invalid pipe");
  223. break;
  224. }
  225. mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, mixer_cfg);
  226. }
  227. static int mdp4_crtc_mode_set(struct drm_crtc *crtc,
  228. struct drm_display_mode *mode,
  229. struct drm_display_mode *adjusted_mode,
  230. int x, int y,
  231. struct drm_framebuffer *old_fb)
  232. {
  233. struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
  234. struct mdp4_kms *mdp4_kms = get_kms(crtc);
  235. enum mdp4_dma dma = mdp4_crtc->dma;
  236. int ret, ovlp = mdp4_crtc->ovlp;
  237. mode = adjusted_mode;
  238. DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
  239. mdp4_crtc->name, mode->base.id, mode->name,
  240. mode->vrefresh, mode->clock,
  241. mode->hdisplay, mode->hsync_start,
  242. mode->hsync_end, mode->htotal,
  243. mode->vdisplay, mode->vsync_start,
  244. mode->vsync_end, mode->vtotal,
  245. mode->type, mode->flags);
  246. mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma),
  247. MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) |
  248. MDP4_DMA_SRC_SIZE_HEIGHT(mode->vdisplay));
  249. /* take data from pipe: */
  250. mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_BASE(dma), 0);
  251. mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_STRIDE(dma),
  252. crtc->fb->pitches[0]);
  253. mdp4_write(mdp4_kms, REG_MDP4_DMA_DST_SIZE(dma),
  254. MDP4_DMA_DST_SIZE_WIDTH(0) |
  255. MDP4_DMA_DST_SIZE_HEIGHT(0));
  256. mdp4_write(mdp4_kms, REG_MDP4_OVLP_BASE(ovlp), 0);
  257. mdp4_write(mdp4_kms, REG_MDP4_OVLP_SIZE(ovlp),
  258. MDP4_OVLP_SIZE_WIDTH(mode->hdisplay) |
  259. MDP4_OVLP_SIZE_HEIGHT(mode->vdisplay));
  260. mdp4_write(mdp4_kms, REG_MDP4_OVLP_STRIDE(ovlp),
  261. crtc->fb->pitches[0]);
  262. mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1);
  263. update_fb(crtc, false, crtc->fb);
  264. ret = mdp4_plane_mode_set(mdp4_crtc->plane, crtc, crtc->fb,
  265. 0, 0, mode->hdisplay, mode->vdisplay,
  266. x << 16, y << 16,
  267. mode->hdisplay << 16, mode->vdisplay << 16);
  268. if (ret) {
  269. dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
  270. mdp4_crtc->name, ret);
  271. return ret;
  272. }
  273. if (dma == DMA_E) {
  274. mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(0), 0x00ff0000);
  275. mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(1), 0x00ff0000);
  276. mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000);
  277. }
  278. return 0;
  279. }
  280. static void mdp4_crtc_prepare(struct drm_crtc *crtc)
  281. {
  282. struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
  283. DBG("%s", mdp4_crtc->name);
  284. /* make sure we hold a ref to mdp clks while setting up mode: */
  285. mdp4_enable(get_kms(crtc));
  286. mdp4_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
  287. }
  288. static void mdp4_crtc_commit(struct drm_crtc *crtc)
  289. {
  290. mdp4_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
  291. crtc_flush(crtc);
  292. /* drop the ref to mdp clk's that we got in prepare: */
  293. mdp4_disable(get_kms(crtc));
  294. }
  295. static int mdp4_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
  296. struct drm_framebuffer *old_fb)
  297. {
  298. struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
  299. struct drm_plane *plane = mdp4_crtc->plane;
  300. struct drm_display_mode *mode = &crtc->mode;
  301. update_fb(crtc, false, crtc->fb);
  302. return mdp4_plane_mode_set(plane, crtc, crtc->fb,
  303. 0, 0, mode->hdisplay, mode->vdisplay,
  304. x << 16, y << 16,
  305. mode->hdisplay << 16, mode->vdisplay << 16);
  306. }
  307. static void mdp4_crtc_load_lut(struct drm_crtc *crtc)
  308. {
  309. }
  310. static int mdp4_crtc_page_flip(struct drm_crtc *crtc,
  311. struct drm_framebuffer *new_fb,
  312. struct drm_pending_vblank_event *event,
  313. uint32_t page_flip_flags)
  314. {
  315. struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
  316. struct drm_device *dev = crtc->dev;
  317. struct drm_gem_object *obj;
  318. if (mdp4_crtc->event) {
  319. dev_err(dev->dev, "already pending flip!\n");
  320. return -EBUSY;
  321. }
  322. obj = msm_framebuffer_bo(new_fb, 0);
  323. mdp4_crtc->event = event;
  324. update_fb(crtc, true, new_fb);
  325. return msm_gem_queue_inactive_work(obj,
  326. &mdp4_crtc->pageflip_work);
  327. }
  328. static int mdp4_crtc_set_property(struct drm_crtc *crtc,
  329. struct drm_property *property, uint64_t val)
  330. {
  331. // XXX
  332. return -EINVAL;
  333. }
  334. #define CURSOR_WIDTH 64
  335. #define CURSOR_HEIGHT 64
  336. /* called from IRQ to update cursor related registers (if needed). The
  337. * cursor registers, other than x/y position, appear not to be double
  338. * buffered, and changing them other than from vblank seems to trigger
  339. * underflow.
  340. */
  341. static void update_cursor(struct drm_crtc *crtc)
  342. {
  343. struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
  344. enum mdp4_dma dma = mdp4_crtc->dma;
  345. unsigned long flags;
  346. spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
  347. if (mdp4_crtc->cursor.stale) {
  348. struct mdp4_kms *mdp4_kms = get_kms(crtc);
  349. struct drm_gem_object *next_bo = mdp4_crtc->cursor.next_bo;
  350. struct drm_gem_object *prev_bo = mdp4_crtc->cursor.scanout_bo;
  351. uint32_t iova = mdp4_crtc->cursor.next_iova;
  352. if (next_bo) {
  353. /* take a obj ref + iova ref when we start scanning out: */
  354. drm_gem_object_reference(next_bo);
  355. msm_gem_get_iova_locked(next_bo, mdp4_kms->id, &iova);
  356. /* enable cursor: */
  357. mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma),
  358. MDP4_DMA_CURSOR_SIZE_WIDTH(mdp4_crtc->cursor.width) |
  359. MDP4_DMA_CURSOR_SIZE_HEIGHT(mdp4_crtc->cursor.height));
  360. mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), iova);
  361. mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BLEND_CONFIG(dma),
  362. MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(CURSOR_ARGB) |
  363. MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN);
  364. } else {
  365. /* disable cursor: */
  366. mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), 0);
  367. mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BLEND_CONFIG(dma),
  368. MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(CURSOR_ARGB));
  369. }
  370. /* and drop the iova ref + obj rev when done scanning out: */
  371. if (prev_bo)
  372. drm_flip_work_queue(&mdp4_crtc->unref_cursor_work, prev_bo);
  373. mdp4_crtc->cursor.scanout_bo = next_bo;
  374. mdp4_crtc->cursor.stale = false;
  375. }
  376. spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
  377. }
  378. static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
  379. struct drm_file *file_priv, uint32_t handle,
  380. uint32_t width, uint32_t height)
  381. {
  382. struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
  383. struct mdp4_kms *mdp4_kms = get_kms(crtc);
  384. struct drm_device *dev = crtc->dev;
  385. struct drm_gem_object *cursor_bo, *old_bo;
  386. unsigned long flags;
  387. uint32_t iova;
  388. int ret;
  389. if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
  390. dev_err(dev->dev, "bad cursor size: %dx%d\n", width, height);
  391. return -EINVAL;
  392. }
  393. if (handle) {
  394. cursor_bo = drm_gem_object_lookup(dev, file_priv, handle);
  395. if (!cursor_bo)
  396. return -ENOENT;
  397. } else {
  398. cursor_bo = NULL;
  399. }
  400. if (cursor_bo) {
  401. ret = msm_gem_get_iova(cursor_bo, mdp4_kms->id, &iova);
  402. if (ret)
  403. goto fail;
  404. } else {
  405. iova = 0;
  406. }
  407. spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
  408. old_bo = mdp4_crtc->cursor.next_bo;
  409. mdp4_crtc->cursor.next_bo = cursor_bo;
  410. mdp4_crtc->cursor.next_iova = iova;
  411. mdp4_crtc->cursor.width = width;
  412. mdp4_crtc->cursor.height = height;
  413. mdp4_crtc->cursor.stale = true;
  414. spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
  415. if (old_bo) {
  416. /* drop our previous reference: */
  417. msm_gem_put_iova(old_bo, mdp4_kms->id);
  418. drm_gem_object_unreference_unlocked(old_bo);
  419. }
  420. return 0;
  421. fail:
  422. drm_gem_object_unreference_unlocked(cursor_bo);
  423. return ret;
  424. }
  425. static int mdp4_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
  426. {
  427. struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
  428. struct mdp4_kms *mdp4_kms = get_kms(crtc);
  429. enum mdp4_dma dma = mdp4_crtc->dma;
  430. mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_POS(dma),
  431. MDP4_DMA_CURSOR_POS_X(x) |
  432. MDP4_DMA_CURSOR_POS_Y(y));
  433. return 0;
  434. }
  435. static const struct drm_crtc_funcs mdp4_crtc_funcs = {
  436. .set_config = drm_crtc_helper_set_config,
  437. .destroy = mdp4_crtc_destroy,
  438. .page_flip = mdp4_crtc_page_flip,
  439. .set_property = mdp4_crtc_set_property,
  440. .cursor_set = mdp4_crtc_cursor_set,
  441. .cursor_move = mdp4_crtc_cursor_move,
  442. };
  443. static const struct drm_crtc_helper_funcs mdp4_crtc_helper_funcs = {
  444. .dpms = mdp4_crtc_dpms,
  445. .mode_fixup = mdp4_crtc_mode_fixup,
  446. .mode_set = mdp4_crtc_mode_set,
  447. .prepare = mdp4_crtc_prepare,
  448. .commit = mdp4_crtc_commit,
  449. .mode_set_base = mdp4_crtc_mode_set_base,
  450. .load_lut = mdp4_crtc_load_lut,
  451. };
  452. static void mdp4_crtc_vblank_irq(struct mdp4_irq *irq, uint32_t irqstatus)
  453. {
  454. struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, vblank);
  455. struct drm_crtc *crtc = &mdp4_crtc->base;
  456. struct msm_drm_private *priv = crtc->dev->dev_private;
  457. update_cursor(crtc);
  458. complete_flip(crtc, false);
  459. mdp4_irq_unregister(get_kms(crtc), &mdp4_crtc->vblank);
  460. drm_flip_work_commit(&mdp4_crtc->unref_fb_work, priv->wq);
  461. drm_flip_work_commit(&mdp4_crtc->unref_cursor_work, priv->wq);
  462. }
  463. static void mdp4_crtc_err_irq(struct mdp4_irq *irq, uint32_t irqstatus)
  464. {
  465. struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, err);
  466. struct drm_crtc *crtc = &mdp4_crtc->base;
  467. DBG("%s: error: %08x", mdp4_crtc->name, irqstatus);
  468. crtc_flush(crtc);
  469. }
  470. uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc)
  471. {
  472. struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
  473. return mdp4_crtc->vblank.irqmask;
  474. }
  475. void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc)
  476. {
  477. complete_flip(crtc, true);
  478. }
  479. /* set dma config, ie. the format the encoder wants. */
  480. void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config)
  481. {
  482. struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
  483. struct mdp4_kms *mdp4_kms = get_kms(crtc);
  484. mdp4_write(mdp4_kms, REG_MDP4_DMA_CONFIG(mdp4_crtc->dma), config);
  485. }
  486. /* set interface for routing crtc->encoder: */
  487. void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf)
  488. {
  489. struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
  490. struct mdp4_kms *mdp4_kms = get_kms(crtc);
  491. uint32_t intf_sel;
  492. intf_sel = mdp4_read(mdp4_kms, REG_MDP4_DISP_INTF_SEL);
  493. switch (mdp4_crtc->dma) {
  494. case DMA_P:
  495. intf_sel &= ~MDP4_DISP_INTF_SEL_PRIM__MASK;
  496. intf_sel |= MDP4_DISP_INTF_SEL_PRIM(intf);
  497. break;
  498. case DMA_S:
  499. intf_sel &= ~MDP4_DISP_INTF_SEL_SEC__MASK;
  500. intf_sel |= MDP4_DISP_INTF_SEL_SEC(intf);
  501. break;
  502. case DMA_E:
  503. intf_sel &= ~MDP4_DISP_INTF_SEL_EXT__MASK;
  504. intf_sel |= MDP4_DISP_INTF_SEL_EXT(intf);
  505. break;
  506. }
  507. if (intf == INTF_DSI_VIDEO) {
  508. intf_sel &= ~MDP4_DISP_INTF_SEL_DSI_CMD;
  509. intf_sel |= MDP4_DISP_INTF_SEL_DSI_VIDEO;
  510. mdp4_crtc->mixer = 0;
  511. } else if (intf == INTF_DSI_CMD) {
  512. intf_sel &= ~MDP4_DISP_INTF_SEL_DSI_VIDEO;
  513. intf_sel |= MDP4_DISP_INTF_SEL_DSI_CMD;
  514. mdp4_crtc->mixer = 0;
  515. } else if (intf == INTF_LCDC_DTV){
  516. mdp4_crtc->mixer = 1;
  517. }
  518. blend_setup(crtc);
  519. DBG("%s: intf_sel=%08x", mdp4_crtc->name, intf_sel);
  520. mdp4_write(mdp4_kms, REG_MDP4_DISP_INTF_SEL, intf_sel);
  521. }
  522. static const char *dma_names[] = {
  523. "DMA_P", "DMA_S", "DMA_E",
  524. };
  525. /* initialize crtc */
  526. struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
  527. struct drm_plane *plane, int id, int ovlp_id,
  528. enum mdp4_dma dma_id)
  529. {
  530. struct drm_crtc *crtc = NULL;
  531. struct mdp4_crtc *mdp4_crtc;
  532. int ret;
  533. mdp4_crtc = kzalloc(sizeof(*mdp4_crtc), GFP_KERNEL);
  534. if (!mdp4_crtc) {
  535. ret = -ENOMEM;
  536. goto fail;
  537. }
  538. crtc = &mdp4_crtc->base;
  539. mdp4_crtc->plane = plane;
  540. mdp4_crtc->plane->crtc = crtc;
  541. mdp4_crtc->ovlp = ovlp_id;
  542. mdp4_crtc->dma = dma_id;
  543. mdp4_crtc->vblank.irqmask = dma2irq(mdp4_crtc->dma);
  544. mdp4_crtc->vblank.irq = mdp4_crtc_vblank_irq;
  545. mdp4_crtc->err.irqmask = dma2err(mdp4_crtc->dma);
  546. mdp4_crtc->err.irq = mdp4_crtc_err_irq;
  547. snprintf(mdp4_crtc->name, sizeof(mdp4_crtc->name), "%s:%d",
  548. dma_names[dma_id], ovlp_id);
  549. spin_lock_init(&mdp4_crtc->cursor.lock);
  550. ret = drm_flip_work_init(&mdp4_crtc->unref_fb_work, 16,
  551. "unref fb", unref_fb_worker);
  552. if (ret)
  553. goto fail;
  554. ret = drm_flip_work_init(&mdp4_crtc->unref_cursor_work, 64,
  555. "unref cursor", unref_cursor_worker);
  556. INIT_WORK(&mdp4_crtc->pageflip_work, pageflip_worker);
  557. drm_crtc_init(dev, crtc, &mdp4_crtc_funcs);
  558. drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs);
  559. mdp4_plane_install_properties(mdp4_crtc->plane, &crtc->base);
  560. return crtc;
  561. fail:
  562. if (crtc)
  563. mdp4_crtc_destroy(crtc);
  564. return ERR_PTR(ret);
  565. }