apply.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784
  1. /*
  2. * Copyright (C) 2011 Texas Instruments
  3. * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #define DSS_SUBSYS_NAME "APPLY"
  18. #include <linux/kernel.h>
  19. #include <linux/slab.h>
  20. #include <linux/spinlock.h>
  21. #include <linux/jiffies.h>
  22. #include <video/omapdss.h>
  23. #include "dss.h"
  24. #include "dss_features.h"
  25. /*
  26. * We have 4 levels of cache for the dispc settings. First two are in SW and
  27. * the latter two in HW.
  28. *
  29. * +--------------------+
  30. * |overlay/manager_info|
  31. * +--------------------+
  32. * v
  33. * apply()
  34. * v
  35. * +--------------------+
  36. * | info |
  37. * +--------------------+
  38. * v
  39. * write_regs()
  40. * v
  41. * +--------------------+
  42. * | shadow registers |
  43. * +--------------------+
  44. * v
  45. * VFP or lcd/digit_enable
  46. * v
  47. * +--------------------+
  48. * | registers |
  49. * +--------------------+
  50. */
  51. struct ovl_priv_data {
  52. /* If true, cache changed, but not written to shadow registers. Set
  53. * in apply(), cleared when registers written. */
  54. bool dirty;
  55. /* If true, shadow registers contain changed values not yet in real
  56. * registers. Set when writing to shadow registers, cleared at
  57. * VSYNC/EVSYNC */
  58. bool shadow_dirty;
  59. bool enabled;
  60. struct omap_overlay_info info;
  61. enum omap_channel channel;
  62. u32 fifo_low;
  63. u32 fifo_high;
  64. };
  65. struct mgr_priv_data {
  66. /* If true, cache changed, but not written to shadow registers. Set
  67. * in apply(), cleared when registers written. */
  68. bool dirty;
  69. /* If true, shadow registers contain changed values not yet in real
  70. * registers. Set when writing to shadow registers, cleared at
  71. * VSYNC/EVSYNC */
  72. bool shadow_dirty;
  73. struct omap_overlay_manager_info info;
  74. bool manual_update;
  75. bool do_manual_update;
  76. };
  77. static struct {
  78. struct ovl_priv_data ovl_priv_data_array[MAX_DSS_OVERLAYS];
  79. struct mgr_priv_data mgr_priv_data_array[MAX_DSS_MANAGERS];
  80. bool irq_enabled;
  81. } dss_data;
  82. /* protects dss_data */
  83. static spinlock_t data_lock;
  84. static struct ovl_priv_data *get_ovl_priv(struct omap_overlay *ovl)
  85. {
  86. return &dss_data.ovl_priv_data_array[ovl->id];
  87. }
  88. static struct mgr_priv_data *get_mgr_priv(struct omap_overlay_manager *mgr)
  89. {
  90. return &dss_data.mgr_priv_data_array[mgr->id];
  91. }
  92. void dss_apply_init(void)
  93. {
  94. spin_lock_init(&data_lock);
  95. }
  96. static bool ovl_manual_update(struct omap_overlay *ovl)
  97. {
  98. return ovl->manager->device->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
  99. }
  100. static bool mgr_manual_update(struct omap_overlay_manager *mgr)
  101. {
  102. return mgr->device->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
  103. }
  104. static int overlay_enabled(struct omap_overlay *ovl)
  105. {
  106. return ovl->info.enabled && ovl->manager && ovl->manager->device;
  107. }
  108. int dss_mgr_wait_for_go(struct omap_overlay_manager *mgr)
  109. {
  110. unsigned long timeout = msecs_to_jiffies(500);
  111. struct mgr_priv_data *mp;
  112. u32 irq;
  113. int r;
  114. int i;
  115. struct omap_dss_device *dssdev = mgr->device;
  116. if (!dssdev || dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
  117. return 0;
  118. if (mgr_manual_update(mgr))
  119. return 0;
  120. irq = dispc_mgr_get_vsync_irq(mgr->id);
  121. mp = get_mgr_priv(mgr);
  122. i = 0;
  123. while (1) {
  124. unsigned long flags;
  125. bool shadow_dirty, dirty;
  126. spin_lock_irqsave(&data_lock, flags);
  127. dirty = mp->dirty;
  128. shadow_dirty = mp->shadow_dirty;
  129. spin_unlock_irqrestore(&data_lock, flags);
  130. if (!dirty && !shadow_dirty) {
  131. r = 0;
  132. break;
  133. }
  134. /* 4 iterations is the worst case:
  135. * 1 - initial iteration, dirty = true (between VFP and VSYNC)
  136. * 2 - first VSYNC, dirty = true
  137. * 3 - dirty = false, shadow_dirty = true
  138. * 4 - shadow_dirty = false */
  139. if (i++ == 3) {
  140. DSSERR("mgr(%d)->wait_for_go() not finishing\n",
  141. mgr->id);
  142. r = 0;
  143. break;
  144. }
  145. r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
  146. if (r == -ERESTARTSYS)
  147. break;
  148. if (r) {
  149. DSSERR("mgr(%d)->wait_for_go() timeout\n", mgr->id);
  150. break;
  151. }
  152. }
  153. return r;
  154. }
  155. int dss_mgr_wait_for_go_ovl(struct omap_overlay *ovl)
  156. {
  157. unsigned long timeout = msecs_to_jiffies(500);
  158. struct ovl_priv_data *op;
  159. struct omap_dss_device *dssdev;
  160. u32 irq;
  161. int r;
  162. int i;
  163. if (!ovl->manager)
  164. return 0;
  165. dssdev = ovl->manager->device;
  166. if (!dssdev || dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
  167. return 0;
  168. if (ovl_manual_update(ovl))
  169. return 0;
  170. irq = dispc_mgr_get_vsync_irq(ovl->manager->id);
  171. op = get_ovl_priv(ovl);
  172. i = 0;
  173. while (1) {
  174. unsigned long flags;
  175. bool shadow_dirty, dirty;
  176. spin_lock_irqsave(&data_lock, flags);
  177. dirty = op->dirty;
  178. shadow_dirty = op->shadow_dirty;
  179. spin_unlock_irqrestore(&data_lock, flags);
  180. if (!dirty && !shadow_dirty) {
  181. r = 0;
  182. break;
  183. }
  184. /* 4 iterations is the worst case:
  185. * 1 - initial iteration, dirty = true (between VFP and VSYNC)
  186. * 2 - first VSYNC, dirty = true
  187. * 3 - dirty = false, shadow_dirty = true
  188. * 4 - shadow_dirty = false */
  189. if (i++ == 3) {
  190. DSSERR("ovl(%d)->wait_for_go() not finishing\n",
  191. ovl->id);
  192. r = 0;
  193. break;
  194. }
  195. r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
  196. if (r == -ERESTARTSYS)
  197. break;
  198. if (r) {
  199. DSSERR("ovl(%d)->wait_for_go() timeout\n", ovl->id);
  200. break;
  201. }
  202. }
  203. return r;
  204. }
  205. static int dss_ovl_write_regs(struct omap_overlay *ovl)
  206. {
  207. struct ovl_priv_data *op;
  208. struct omap_overlay_info *oi;
  209. bool ilace, replication;
  210. int r;
  211. DSSDBGF("%d", ovl->id);
  212. op = get_ovl_priv(ovl);
  213. oi = &op->info;
  214. if (!op->enabled) {
  215. dispc_ovl_enable(ovl->id, 0);
  216. return 0;
  217. }
  218. replication = dss_use_replication(ovl->manager->device, oi->color_mode);
  219. ilace = ovl->manager->device->type == OMAP_DISPLAY_TYPE_VENC;
  220. dispc_ovl_set_channel_out(ovl->id, op->channel);
  221. r = dispc_ovl_setup(ovl->id, oi, ilace, replication);
  222. if (r) {
  223. /* this shouldn't happen */
  224. DSSERR("dispc_ovl_setup failed for ovl %d\n", ovl->id);
  225. dispc_ovl_enable(ovl->id, 0);
  226. return r;
  227. }
  228. dispc_ovl_set_fifo_threshold(ovl->id, op->fifo_low, op->fifo_high);
  229. dispc_ovl_enable(ovl->id, 1);
  230. return 0;
  231. }
  232. static void dss_mgr_write_regs(struct omap_overlay_manager *mgr)
  233. {
  234. struct mgr_priv_data *mp;
  235. struct omap_overlay_manager_info *mi;
  236. DSSDBGF("%d", mgr->id);
  237. mp = get_mgr_priv(mgr);
  238. mi = &mp->info;
  239. dispc_mgr_setup(mgr->id, mi);
  240. }
  241. /* dss_write_regs() tries to write values from cache to shadow registers.
  242. * It writes only to those managers/overlays that are not busy.
  243. * returns 0 if everything could be written to shadow registers.
  244. * returns 1 if not everything could be written to shadow registers. */
  245. static int dss_write_regs(void)
  246. {
  247. struct omap_overlay *ovl;
  248. struct omap_overlay_manager *mgr;
  249. struct ovl_priv_data *op;
  250. struct mgr_priv_data *mp;
  251. const int num_ovls = dss_feat_get_num_ovls();
  252. const int num_mgrs = dss_feat_get_num_mgrs();
  253. int i;
  254. int r;
  255. bool mgr_busy[MAX_DSS_MANAGERS];
  256. bool mgr_go[MAX_DSS_MANAGERS];
  257. bool busy;
  258. r = 0;
  259. busy = false;
  260. for (i = 0; i < num_mgrs; i++) {
  261. mgr_busy[i] = dispc_mgr_go_busy(i);
  262. mgr_go[i] = false;
  263. }
  264. /* Commit overlay settings */
  265. for (i = 0; i < num_ovls; ++i) {
  266. ovl = omap_dss_get_overlay(i);
  267. op = get_ovl_priv(ovl);
  268. if (!op->dirty)
  269. continue;
  270. mp = get_mgr_priv(ovl->manager);
  271. if (mp->manual_update && !mp->do_manual_update)
  272. continue;
  273. if (mgr_busy[op->channel]) {
  274. busy = true;
  275. continue;
  276. }
  277. r = dss_ovl_write_regs(ovl);
  278. if (r)
  279. DSSERR("dss_ovl_write_regs %d failed\n", i);
  280. op->dirty = false;
  281. op->shadow_dirty = true;
  282. mgr_go[op->channel] = true;
  283. }
  284. /* Commit manager settings */
  285. for (i = 0; i < num_mgrs; ++i) {
  286. mgr = omap_dss_get_overlay_manager(i);
  287. mp = get_mgr_priv(mgr);
  288. if (!mp->dirty)
  289. continue;
  290. if (mp->manual_update && !mp->do_manual_update)
  291. continue;
  292. if (mgr_busy[i]) {
  293. busy = true;
  294. continue;
  295. }
  296. dss_mgr_write_regs(mgr);
  297. mp->dirty = false;
  298. mp->shadow_dirty = true;
  299. mgr_go[i] = true;
  300. }
  301. /* set GO */
  302. for (i = 0; i < num_mgrs; ++i) {
  303. mgr = omap_dss_get_overlay_manager(i);
  304. mp = get_mgr_priv(mgr);
  305. if (!mgr_go[i])
  306. continue;
  307. /* We don't need GO with manual update display. LCD iface will
  308. * always be turned off after frame, and new settings will be
  309. * taken in to use at next update */
  310. if (!mp->manual_update)
  311. dispc_mgr_go(i);
  312. }
  313. if (busy)
  314. r = 1;
  315. else
  316. r = 0;
  317. return r;
  318. }
  319. void dss_mgr_start_update(struct omap_overlay_manager *mgr)
  320. {
  321. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  322. struct ovl_priv_data *op;
  323. struct omap_overlay *ovl;
  324. mp->do_manual_update = true;
  325. dss_write_regs();
  326. mp->do_manual_update = false;
  327. list_for_each_entry(ovl, &mgr->overlays, list) {
  328. op = get_ovl_priv(ovl);
  329. op->shadow_dirty = false;
  330. }
  331. mp->shadow_dirty = false;
  332. dispc_mgr_enable(mgr->id, true);
  333. }
  334. static void dss_apply_irq_handler(void *data, u32 mask);
  335. static void dss_register_vsync_isr(void)
  336. {
  337. const int num_mgrs = dss_feat_get_num_mgrs();
  338. u32 mask;
  339. int r, i;
  340. mask = 0;
  341. for (i = 0; i < num_mgrs; ++i)
  342. mask |= dispc_mgr_get_vsync_irq(i);
  343. r = omap_dispc_register_isr(dss_apply_irq_handler, NULL, mask);
  344. WARN_ON(r);
  345. dss_data.irq_enabled = true;
  346. }
  347. static void dss_unregister_vsync_isr(void)
  348. {
  349. const int num_mgrs = dss_feat_get_num_mgrs();
  350. u32 mask;
  351. int r, i;
  352. mask = 0;
  353. for (i = 0; i < num_mgrs; ++i)
  354. mask |= dispc_mgr_get_vsync_irq(i);
  355. r = omap_dispc_unregister_isr(dss_apply_irq_handler, NULL, mask);
  356. WARN_ON(r);
  357. dss_data.irq_enabled = false;
  358. }
  359. static void dss_apply_irq_handler(void *data, u32 mask)
  360. {
  361. struct omap_overlay *ovl;
  362. struct omap_overlay_manager *mgr;
  363. struct mgr_priv_data *mp;
  364. struct ovl_priv_data *op;
  365. const int num_ovls = dss_feat_get_num_ovls();
  366. const int num_mgrs = dss_feat_get_num_mgrs();
  367. int i, r;
  368. bool mgr_busy[MAX_DSS_MANAGERS];
  369. for (i = 0; i < num_mgrs; i++)
  370. mgr_busy[i] = dispc_mgr_go_busy(i);
  371. spin_lock(&data_lock);
  372. for (i = 0; i < num_ovls; ++i) {
  373. ovl = omap_dss_get_overlay(i);
  374. op = get_ovl_priv(ovl);
  375. if (!mgr_busy[op->channel])
  376. op->shadow_dirty = false;
  377. }
  378. for (i = 0; i < num_mgrs; ++i) {
  379. mgr = omap_dss_get_overlay_manager(i);
  380. mp = get_mgr_priv(mgr);
  381. if (!mgr_busy[i])
  382. mp->shadow_dirty = false;
  383. }
  384. r = dss_write_regs();
  385. if (r == 1)
  386. goto end;
  387. /* re-read busy flags */
  388. for (i = 0; i < num_mgrs; i++)
  389. mgr_busy[i] = dispc_mgr_go_busy(i);
  390. /* keep running as long as there are busy managers, so that
  391. * we can collect overlay-applied information */
  392. for (i = 0; i < num_mgrs; ++i) {
  393. if (mgr_busy[i])
  394. goto end;
  395. }
  396. dss_unregister_vsync_isr();
  397. end:
  398. spin_unlock(&data_lock);
  399. }
  400. static void omap_dss_mgr_apply_ovl(struct omap_overlay *ovl)
  401. {
  402. struct ovl_priv_data *op;
  403. op = get_ovl_priv(ovl);
  404. if (ovl->manager_changed) {
  405. ovl->manager_changed = false;
  406. ovl->info_dirty = true;
  407. }
  408. if (!overlay_enabled(ovl)) {
  409. if (op->enabled) {
  410. op->enabled = false;
  411. op->dirty = true;
  412. }
  413. return;
  414. }
  415. if (!ovl->info_dirty)
  416. return;
  417. ovl->info_dirty = false;
  418. op->dirty = true;
  419. op->info = ovl->info;
  420. op->channel = ovl->manager->id;
  421. op->enabled = true;
  422. }
  423. static void omap_dss_mgr_apply_mgr(struct omap_overlay_manager *mgr)
  424. {
  425. struct mgr_priv_data *mp;
  426. mp = get_mgr_priv(mgr);
  427. if (mgr->device_changed) {
  428. mgr->device_changed = false;
  429. mgr->info_dirty = true;
  430. }
  431. if (!mgr->info_dirty)
  432. return;
  433. if (!mgr->device)
  434. return;
  435. mgr->info_dirty = false;
  436. mp->dirty = true;
  437. mp->info = mgr->info;
  438. mp->manual_update = mgr_manual_update(mgr);
  439. }
  440. static void omap_dss_mgr_apply_ovl_fifos(struct omap_overlay *ovl)
  441. {
  442. struct ovl_priv_data *op;
  443. struct omap_dss_device *dssdev;
  444. u32 size, burst_size;
  445. op = get_ovl_priv(ovl);
  446. if (!op->enabled)
  447. return;
  448. dssdev = ovl->manager->device;
  449. size = dispc_ovl_get_fifo_size(ovl->id);
  450. burst_size = dispc_ovl_get_burst_size(ovl->id);
  451. switch (dssdev->type) {
  452. case OMAP_DISPLAY_TYPE_DPI:
  453. case OMAP_DISPLAY_TYPE_DBI:
  454. case OMAP_DISPLAY_TYPE_SDI:
  455. case OMAP_DISPLAY_TYPE_VENC:
  456. case OMAP_DISPLAY_TYPE_HDMI:
  457. default_get_overlay_fifo_thresholds(ovl->id, size,
  458. burst_size, &op->fifo_low,
  459. &op->fifo_high);
  460. break;
  461. #ifdef CONFIG_OMAP2_DSS_DSI
  462. case OMAP_DISPLAY_TYPE_DSI:
  463. dsi_get_overlay_fifo_thresholds(ovl->id, size,
  464. burst_size, &op->fifo_low,
  465. &op->fifo_high);
  466. break;
  467. #endif
  468. default:
  469. BUG();
  470. }
  471. }
  472. int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
  473. {
  474. int r;
  475. unsigned long flags;
  476. struct omap_overlay *ovl;
  477. DSSDBG("omap_dss_mgr_apply(%s)\n", mgr->name);
  478. r = dispc_runtime_get();
  479. if (r)
  480. return r;
  481. spin_lock_irqsave(&data_lock, flags);
  482. /* Configure overlays */
  483. list_for_each_entry(ovl, &mgr->overlays, list)
  484. omap_dss_mgr_apply_ovl(ovl);
  485. /* Configure manager */
  486. omap_dss_mgr_apply_mgr(mgr);
  487. /* Configure overlay fifos */
  488. list_for_each_entry(ovl, &mgr->overlays, list)
  489. omap_dss_mgr_apply_ovl_fifos(ovl);
  490. r = 0;
  491. if (mgr->enabled && !mgr_manual_update(mgr)) {
  492. if (!dss_data.irq_enabled)
  493. dss_register_vsync_isr();
  494. dss_write_regs();
  495. }
  496. spin_unlock_irqrestore(&data_lock, flags);
  497. dispc_runtime_put();
  498. return r;
  499. }
  500. void dss_mgr_enable(struct omap_overlay_manager *mgr)
  501. {
  502. dispc_mgr_enable(mgr->id, true);
  503. mgr->enabled = true;
  504. }
  505. void dss_mgr_disable(struct omap_overlay_manager *mgr)
  506. {
  507. dispc_mgr_enable(mgr->id, false);
  508. mgr->enabled = false;
  509. }
  510. int dss_mgr_set_info(struct omap_overlay_manager *mgr,
  511. struct omap_overlay_manager_info *info)
  512. {
  513. mgr->info = *info;
  514. mgr->info_dirty = true;
  515. return 0;
  516. }
  517. void dss_mgr_get_info(struct omap_overlay_manager *mgr,
  518. struct omap_overlay_manager_info *info)
  519. {
  520. *info = mgr->info;
  521. }
  522. int dss_mgr_set_device(struct omap_overlay_manager *mgr,
  523. struct omap_dss_device *dssdev)
  524. {
  525. int r;
  526. if (dssdev->manager) {
  527. DSSERR("display '%s' already has a manager '%s'\n",
  528. dssdev->name, dssdev->manager->name);
  529. return -EINVAL;
  530. }
  531. if ((mgr->supported_displays & dssdev->type) == 0) {
  532. DSSERR("display '%s' does not support manager '%s'\n",
  533. dssdev->name, mgr->name);
  534. return -EINVAL;
  535. }
  536. dssdev->manager = mgr;
  537. mgr->device = dssdev;
  538. mgr->device_changed = true;
  539. return 0;
  540. }
  541. int dss_mgr_unset_device(struct omap_overlay_manager *mgr)
  542. {
  543. if (!mgr->device) {
  544. DSSERR("failed to unset display, display not set.\n");
  545. return -EINVAL;
  546. }
  547. /*
  548. * Don't allow currently enabled displays to have the overlay manager
  549. * pulled out from underneath them
  550. */
  551. if (mgr->device->state != OMAP_DSS_DISPLAY_DISABLED)
  552. return -EINVAL;
  553. mgr->device->manager = NULL;
  554. mgr->device = NULL;
  555. mgr->device_changed = true;
  556. return 0;
  557. }
  558. int dss_ovl_set_info(struct omap_overlay *ovl,
  559. struct omap_overlay_info *info)
  560. {
  561. ovl->info = *info;
  562. ovl->info_dirty = true;
  563. return 0;
  564. }
  565. void dss_ovl_get_info(struct omap_overlay *ovl,
  566. struct omap_overlay_info *info)
  567. {
  568. *info = ovl->info;
  569. }
  570. int dss_ovl_set_manager(struct omap_overlay *ovl,
  571. struct omap_overlay_manager *mgr)
  572. {
  573. if (!mgr)
  574. return -EINVAL;
  575. if (ovl->manager) {
  576. DSSERR("overlay '%s' already has a manager '%s'\n",
  577. ovl->name, ovl->manager->name);
  578. return -EINVAL;
  579. }
  580. if (ovl->info.enabled) {
  581. DSSERR("overlay has to be disabled to change the manager\n");
  582. return -EINVAL;
  583. }
  584. ovl->manager = mgr;
  585. list_add_tail(&ovl->list, &mgr->overlays);
  586. ovl->manager_changed = true;
  587. /* XXX: When there is an overlay on a DSI manual update display, and
  588. * the overlay is first disabled, then moved to tv, and enabled, we
  589. * seem to get SYNC_LOST_DIGIT error.
  590. *
  591. * Waiting doesn't seem to help, but updating the manual update display
  592. * after disabling the overlay seems to fix this. This hints that the
  593. * overlay is perhaps somehow tied to the LCD output until the output
  594. * is updated.
  595. *
  596. * Userspace workaround for this is to update the LCD after disabling
  597. * the overlay, but before moving the overlay to TV.
  598. */
  599. return 0;
  600. }
  601. int dss_ovl_unset_manager(struct omap_overlay *ovl)
  602. {
  603. if (!ovl->manager) {
  604. DSSERR("failed to detach overlay: manager not set\n");
  605. return -EINVAL;
  606. }
  607. if (ovl->info.enabled) {
  608. DSSERR("overlay has to be disabled to unset the manager\n");
  609. return -EINVAL;
  610. }
  611. ovl->manager = NULL;
  612. list_del(&ovl->list);
  613. ovl->manager_changed = true;
  614. return 0;
  615. }