apply.c 20 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015
  1. /*
  2. * Copyright (C) 2011 Texas Instruments
  3. * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #define DSS_SUBSYS_NAME "APPLY"
  18. #include <linux/kernel.h>
  19. #include <linux/slab.h>
  20. #include <linux/spinlock.h>
  21. #include <linux/jiffies.h>
  22. #include <video/omapdss.h>
  23. #include "dss.h"
  24. #include "dss_features.h"
  25. /*
  26. * We have 4 levels of cache for the dispc settings. First two are in SW and
  27. * the latter two in HW.
  28. *
  29. * +--------------------+
  30. * |overlay/manager_info|
  31. * +--------------------+
  32. * v
  33. * apply()
  34. * v
  35. * +--------------------+
  36. * | info |
  37. * +--------------------+
  38. * v
  39. * write_regs()
  40. * v
  41. * +--------------------+
  42. * | shadow registers |
  43. * +--------------------+
  44. * v
  45. * VFP or lcd/digit_enable
  46. * v
  47. * +--------------------+
  48. * | registers |
  49. * +--------------------+
  50. */
  51. struct ovl_priv_data {
  52. /* If true, cache changed, but not written to shadow registers. Set
  53. * in apply(), cleared when registers written. */
  54. bool dirty;
  55. /* If true, shadow registers contain changed values not yet in real
  56. * registers. Set when writing to shadow registers, cleared at
  57. * VSYNC/EVSYNC */
  58. bool shadow_dirty;
  59. struct omap_overlay_info info;
  60. enum omap_channel channel;
  61. u32 fifo_low;
  62. u32 fifo_high;
  63. bool extra_info_dirty;
  64. bool shadow_extra_info_dirty;
  65. bool enabled;
  66. };
  67. struct mgr_priv_data {
  68. bool user_info_dirty;
  69. struct omap_overlay_manager_info user_info;
  70. /* If true, cache changed, but not written to shadow registers. Set
  71. * in apply(), cleared when registers written. */
  72. bool dirty;
  73. /* If true, shadow registers contain changed values not yet in real
  74. * registers. Set when writing to shadow registers, cleared at
  75. * VSYNC/EVSYNC */
  76. bool shadow_dirty;
  77. struct omap_overlay_manager_info info;
  78. /* If true, GO bit is up and shadow registers cannot be written.
  79. * Never true for manual update displays */
  80. bool busy;
  81. /* If true, dispc output is enabled */
  82. bool updating;
  83. /* If true, a display is enabled using this manager */
  84. bool enabled;
  85. };
  86. static struct {
  87. struct ovl_priv_data ovl_priv_data_array[MAX_DSS_OVERLAYS];
  88. struct mgr_priv_data mgr_priv_data_array[MAX_DSS_MANAGERS];
  89. bool irq_enabled;
  90. } dss_data;
  91. /* protects dss_data */
  92. static spinlock_t data_lock;
  93. /* lock for blocking functions */
  94. static DEFINE_MUTEX(apply_lock);
  95. static void dss_register_vsync_isr(void);
  96. static struct ovl_priv_data *get_ovl_priv(struct omap_overlay *ovl)
  97. {
  98. return &dss_data.ovl_priv_data_array[ovl->id];
  99. }
  100. static struct mgr_priv_data *get_mgr_priv(struct omap_overlay_manager *mgr)
  101. {
  102. return &dss_data.mgr_priv_data_array[mgr->id];
  103. }
  104. void dss_apply_init(void)
  105. {
  106. spin_lock_init(&data_lock);
  107. }
  108. static bool ovl_manual_update(struct omap_overlay *ovl)
  109. {
  110. return ovl->manager->device->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
  111. }
  112. static bool mgr_manual_update(struct omap_overlay_manager *mgr)
  113. {
  114. return mgr->device->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
  115. }
  116. static bool need_isr(void)
  117. {
  118. const int num_mgrs = dss_feat_get_num_mgrs();
  119. int i;
  120. for (i = 0; i < num_mgrs; ++i) {
  121. struct omap_overlay_manager *mgr;
  122. struct mgr_priv_data *mp;
  123. struct omap_overlay *ovl;
  124. mgr = omap_dss_get_overlay_manager(i);
  125. mp = get_mgr_priv(mgr);
  126. if (!mp->enabled)
  127. continue;
  128. if (mgr_manual_update(mgr)) {
  129. /* to catch FRAMEDONE */
  130. if (mp->updating)
  131. return true;
  132. } else {
  133. /* to catch GO bit going down */
  134. if (mp->busy)
  135. return true;
  136. /* to write new values to registers */
  137. if (mp->dirty)
  138. return true;
  139. list_for_each_entry(ovl, &mgr->overlays, list) {
  140. struct ovl_priv_data *op;
  141. op = get_ovl_priv(ovl);
  142. if (!op->enabled)
  143. continue;
  144. /* to write new values to registers */
  145. if (op->dirty || op->extra_info_dirty)
  146. return true;
  147. }
  148. }
  149. }
  150. return false;
  151. }
  152. static bool need_go(struct omap_overlay_manager *mgr)
  153. {
  154. struct omap_overlay *ovl;
  155. struct mgr_priv_data *mp;
  156. struct ovl_priv_data *op;
  157. mp = get_mgr_priv(mgr);
  158. if (mp->shadow_dirty)
  159. return true;
  160. list_for_each_entry(ovl, &mgr->overlays, list) {
  161. op = get_ovl_priv(ovl);
  162. if (op->shadow_dirty || op->shadow_extra_info_dirty)
  163. return true;
  164. }
  165. return false;
  166. }
  167. int dss_mgr_wait_for_go(struct omap_overlay_manager *mgr)
  168. {
  169. unsigned long timeout = msecs_to_jiffies(500);
  170. struct mgr_priv_data *mp;
  171. u32 irq;
  172. int r;
  173. int i;
  174. struct omap_dss_device *dssdev = mgr->device;
  175. if (!dssdev || dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
  176. return 0;
  177. if (mgr_manual_update(mgr))
  178. return 0;
  179. irq = dispc_mgr_get_vsync_irq(mgr->id);
  180. mp = get_mgr_priv(mgr);
  181. i = 0;
  182. while (1) {
  183. unsigned long flags;
  184. bool shadow_dirty, dirty;
  185. spin_lock_irqsave(&data_lock, flags);
  186. dirty = mp->dirty;
  187. shadow_dirty = mp->shadow_dirty;
  188. spin_unlock_irqrestore(&data_lock, flags);
  189. if (!dirty && !shadow_dirty) {
  190. r = 0;
  191. break;
  192. }
  193. /* 4 iterations is the worst case:
  194. * 1 - initial iteration, dirty = true (between VFP and VSYNC)
  195. * 2 - first VSYNC, dirty = true
  196. * 3 - dirty = false, shadow_dirty = true
  197. * 4 - shadow_dirty = false */
  198. if (i++ == 3) {
  199. DSSERR("mgr(%d)->wait_for_go() not finishing\n",
  200. mgr->id);
  201. r = 0;
  202. break;
  203. }
  204. r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
  205. if (r == -ERESTARTSYS)
  206. break;
  207. if (r) {
  208. DSSERR("mgr(%d)->wait_for_go() timeout\n", mgr->id);
  209. break;
  210. }
  211. }
  212. return r;
  213. }
  214. int dss_mgr_wait_for_go_ovl(struct omap_overlay *ovl)
  215. {
  216. unsigned long timeout = msecs_to_jiffies(500);
  217. struct ovl_priv_data *op;
  218. struct omap_dss_device *dssdev;
  219. u32 irq;
  220. int r;
  221. int i;
  222. if (!ovl->manager)
  223. return 0;
  224. dssdev = ovl->manager->device;
  225. if (!dssdev || dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
  226. return 0;
  227. if (ovl_manual_update(ovl))
  228. return 0;
  229. irq = dispc_mgr_get_vsync_irq(ovl->manager->id);
  230. op = get_ovl_priv(ovl);
  231. i = 0;
  232. while (1) {
  233. unsigned long flags;
  234. bool shadow_dirty, dirty;
  235. spin_lock_irqsave(&data_lock, flags);
  236. dirty = op->dirty;
  237. shadow_dirty = op->shadow_dirty;
  238. spin_unlock_irqrestore(&data_lock, flags);
  239. if (!dirty && !shadow_dirty) {
  240. r = 0;
  241. break;
  242. }
  243. /* 4 iterations is the worst case:
  244. * 1 - initial iteration, dirty = true (between VFP and VSYNC)
  245. * 2 - first VSYNC, dirty = true
  246. * 3 - dirty = false, shadow_dirty = true
  247. * 4 - shadow_dirty = false */
  248. if (i++ == 3) {
  249. DSSERR("ovl(%d)->wait_for_go() not finishing\n",
  250. ovl->id);
  251. r = 0;
  252. break;
  253. }
  254. r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
  255. if (r == -ERESTARTSYS)
  256. break;
  257. if (r) {
  258. DSSERR("ovl(%d)->wait_for_go() timeout\n", ovl->id);
  259. break;
  260. }
  261. }
  262. return r;
  263. }
  264. static void dss_ovl_write_regs(struct omap_overlay *ovl)
  265. {
  266. struct ovl_priv_data *op = get_ovl_priv(ovl);
  267. struct omap_overlay_info *oi;
  268. bool ilace, replication;
  269. struct mgr_priv_data *mp;
  270. int r;
  271. DSSDBGF("%d", ovl->id);
  272. if (!op->enabled || !op->dirty)
  273. return;
  274. oi = &op->info;
  275. replication = dss_use_replication(ovl->manager->device, oi->color_mode);
  276. ilace = ovl->manager->device->type == OMAP_DISPLAY_TYPE_VENC;
  277. dispc_ovl_set_channel_out(ovl->id, op->channel);
  278. r = dispc_ovl_setup(ovl->id, oi, ilace, replication);
  279. if (r) {
  280. /*
  281. * We can't do much here, as this function can be called from
  282. * vsync interrupt.
  283. */
  284. DSSERR("dispc_ovl_setup failed for ovl %d\n", ovl->id);
  285. /* This will leave fifo configurations in a nonoptimal state */
  286. op->enabled = false;
  287. dispc_ovl_enable(ovl->id, false);
  288. return;
  289. }
  290. dispc_ovl_set_fifo_threshold(ovl->id, op->fifo_low, op->fifo_high);
  291. mp = get_mgr_priv(ovl->manager);
  292. op->dirty = false;
  293. if (mp->updating)
  294. op->shadow_dirty = true;
  295. }
  296. static void dss_ovl_write_regs_extra(struct omap_overlay *ovl)
  297. {
  298. struct ovl_priv_data *op = get_ovl_priv(ovl);
  299. struct mgr_priv_data *mp;
  300. DSSDBGF("%d", ovl->id);
  301. if (!op->extra_info_dirty)
  302. return;
  303. /* note: write also when op->enabled == false, so that the ovl gets
  304. * disabled */
  305. dispc_ovl_enable(ovl->id, op->enabled);
  306. mp = get_mgr_priv(ovl->manager);
  307. op->extra_info_dirty = false;
  308. if (mp->updating)
  309. op->shadow_extra_info_dirty = true;
  310. }
  311. static void dss_mgr_write_regs(struct omap_overlay_manager *mgr)
  312. {
  313. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  314. struct omap_overlay *ovl;
  315. DSSDBGF("%d", mgr->id);
  316. if (!mp->enabled)
  317. return;
  318. WARN_ON(mp->busy);
  319. /* Commit overlay settings */
  320. list_for_each_entry(ovl, &mgr->overlays, list) {
  321. dss_ovl_write_regs(ovl);
  322. dss_ovl_write_regs_extra(ovl);
  323. }
  324. if (mp->dirty) {
  325. dispc_mgr_setup(mgr->id, &mp->info);
  326. mp->dirty = false;
  327. if (mp->updating)
  328. mp->shadow_dirty = true;
  329. }
  330. }
  331. static void dss_write_regs(void)
  332. {
  333. const int num_mgrs = omap_dss_get_num_overlay_managers();
  334. int i;
  335. for (i = 0; i < num_mgrs; ++i) {
  336. struct omap_overlay_manager *mgr;
  337. struct mgr_priv_data *mp;
  338. mgr = omap_dss_get_overlay_manager(i);
  339. mp = get_mgr_priv(mgr);
  340. if (!mp->enabled || mgr_manual_update(mgr) || mp->busy)
  341. continue;
  342. dss_mgr_write_regs(mgr);
  343. if (need_go(mgr)) {
  344. mp->busy = true;
  345. if (!dss_data.irq_enabled && need_isr())
  346. dss_register_vsync_isr();
  347. dispc_mgr_go(mgr->id);
  348. }
  349. }
  350. }
  351. void dss_mgr_start_update(struct omap_overlay_manager *mgr)
  352. {
  353. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  354. unsigned long flags;
  355. spin_lock_irqsave(&data_lock, flags);
  356. WARN_ON(mp->updating);
  357. dss_mgr_write_regs(mgr);
  358. mp->updating = true;
  359. if (!dss_data.irq_enabled && need_isr())
  360. dss_register_vsync_isr();
  361. dispc_mgr_enable(mgr->id, true);
  362. spin_unlock_irqrestore(&data_lock, flags);
  363. }
  364. static void dss_apply_irq_handler(void *data, u32 mask);
  365. static void dss_register_vsync_isr(void)
  366. {
  367. const int num_mgrs = dss_feat_get_num_mgrs();
  368. u32 mask;
  369. int r, i;
  370. mask = 0;
  371. for (i = 0; i < num_mgrs; ++i)
  372. mask |= dispc_mgr_get_vsync_irq(i);
  373. for (i = 0; i < num_mgrs; ++i)
  374. mask |= dispc_mgr_get_framedone_irq(i);
  375. r = omap_dispc_register_isr(dss_apply_irq_handler, NULL, mask);
  376. WARN_ON(r);
  377. dss_data.irq_enabled = true;
  378. }
  379. static void dss_unregister_vsync_isr(void)
  380. {
  381. const int num_mgrs = dss_feat_get_num_mgrs();
  382. u32 mask;
  383. int r, i;
  384. mask = 0;
  385. for (i = 0; i < num_mgrs; ++i)
  386. mask |= dispc_mgr_get_vsync_irq(i);
  387. for (i = 0; i < num_mgrs; ++i)
  388. mask |= dispc_mgr_get_framedone_irq(i);
  389. r = omap_dispc_unregister_isr(dss_apply_irq_handler, NULL, mask);
  390. WARN_ON(r);
  391. dss_data.irq_enabled = false;
  392. }
  393. static void mgr_clear_shadow_dirty(struct omap_overlay_manager *mgr)
  394. {
  395. struct omap_overlay *ovl;
  396. struct mgr_priv_data *mp;
  397. struct ovl_priv_data *op;
  398. mp = get_mgr_priv(mgr);
  399. mp->shadow_dirty = false;
  400. list_for_each_entry(ovl, &mgr->overlays, list) {
  401. op = get_ovl_priv(ovl);
  402. op->shadow_dirty = false;
  403. op->shadow_extra_info_dirty = false;
  404. }
  405. }
  406. static void dss_apply_irq_handler(void *data, u32 mask)
  407. {
  408. const int num_mgrs = dss_feat_get_num_mgrs();
  409. int i;
  410. spin_lock(&data_lock);
  411. /* clear busy, updating flags, shadow_dirty flags */
  412. for (i = 0; i < num_mgrs; i++) {
  413. struct omap_overlay_manager *mgr;
  414. struct mgr_priv_data *mp;
  415. mgr = omap_dss_get_overlay_manager(i);
  416. mp = get_mgr_priv(mgr);
  417. if (!mp->enabled)
  418. continue;
  419. mp->updating = dispc_mgr_is_enabled(i);
  420. if (!mgr_manual_update(mgr)) {
  421. mp->busy = dispc_mgr_go_busy(i);
  422. if (!mp->busy)
  423. mgr_clear_shadow_dirty(mgr);
  424. } else {
  425. if (!mp->updating)
  426. mgr_clear_shadow_dirty(mgr);
  427. }
  428. }
  429. dss_write_regs();
  430. if (!need_isr())
  431. dss_unregister_vsync_isr();
  432. spin_unlock(&data_lock);
  433. }
  434. static void omap_dss_mgr_apply_ovl(struct omap_overlay *ovl)
  435. {
  436. struct ovl_priv_data *op;
  437. op = get_ovl_priv(ovl);
  438. if (ovl->manager_changed) {
  439. ovl->manager_changed = false;
  440. ovl->info_dirty = true;
  441. }
  442. if (!ovl->info_dirty)
  443. return;
  444. ovl->info_dirty = false;
  445. op->dirty = true;
  446. op->info = ovl->info;
  447. op->channel = ovl->manager->id;
  448. }
  449. static void omap_dss_mgr_apply_mgr(struct omap_overlay_manager *mgr)
  450. {
  451. struct mgr_priv_data *mp;
  452. mp = get_mgr_priv(mgr);
  453. if (mgr->device_changed) {
  454. mgr->device_changed = false;
  455. mp->user_info_dirty = true;
  456. }
  457. if (!mp->user_info_dirty)
  458. return;
  459. mp->user_info_dirty = false;
  460. mp->dirty = true;
  461. mp->info = mp->user_info;
  462. }
  463. static void omap_dss_mgr_apply_ovl_fifos(struct omap_overlay *ovl)
  464. {
  465. struct ovl_priv_data *op;
  466. struct omap_dss_device *dssdev;
  467. u32 size, burst_size;
  468. op = get_ovl_priv(ovl);
  469. dssdev = ovl->manager->device;
  470. size = dispc_ovl_get_fifo_size(ovl->id);
  471. burst_size = dispc_ovl_get_burst_size(ovl->id);
  472. switch (dssdev->type) {
  473. case OMAP_DISPLAY_TYPE_DPI:
  474. case OMAP_DISPLAY_TYPE_DBI:
  475. case OMAP_DISPLAY_TYPE_SDI:
  476. case OMAP_DISPLAY_TYPE_VENC:
  477. case OMAP_DISPLAY_TYPE_HDMI:
  478. default_get_overlay_fifo_thresholds(ovl->id, size,
  479. burst_size, &op->fifo_low,
  480. &op->fifo_high);
  481. break;
  482. #ifdef CONFIG_OMAP2_DSS_DSI
  483. case OMAP_DISPLAY_TYPE_DSI:
  484. dsi_get_overlay_fifo_thresholds(ovl->id, size,
  485. burst_size, &op->fifo_low,
  486. &op->fifo_high);
  487. break;
  488. #endif
  489. default:
  490. BUG();
  491. }
  492. }
  493. int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
  494. {
  495. int r;
  496. unsigned long flags;
  497. struct omap_overlay *ovl;
  498. DSSDBG("omap_dss_mgr_apply(%s)\n", mgr->name);
  499. r = dispc_runtime_get();
  500. if (r)
  501. return r;
  502. spin_lock_irqsave(&data_lock, flags);
  503. /* Configure overlays */
  504. list_for_each_entry(ovl, &mgr->overlays, list)
  505. omap_dss_mgr_apply_ovl(ovl);
  506. /* Configure manager */
  507. omap_dss_mgr_apply_mgr(mgr);
  508. /* Configure overlay fifos */
  509. list_for_each_entry(ovl, &mgr->overlays, list)
  510. omap_dss_mgr_apply_ovl_fifos(ovl);
  511. dss_write_regs();
  512. spin_unlock_irqrestore(&data_lock, flags);
  513. dispc_runtime_put();
  514. return r;
  515. }
  516. void dss_mgr_enable(struct omap_overlay_manager *mgr)
  517. {
  518. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  519. unsigned long flags;
  520. mutex_lock(&apply_lock);
  521. spin_lock_irqsave(&data_lock, flags);
  522. mp->enabled = true;
  523. dss_write_regs();
  524. if (!mgr_manual_update(mgr))
  525. mp->updating = true;
  526. spin_unlock_irqrestore(&data_lock, flags);
  527. if (!mgr_manual_update(mgr))
  528. dispc_mgr_enable(mgr->id, true);
  529. mutex_unlock(&apply_lock);
  530. }
  531. void dss_mgr_disable(struct omap_overlay_manager *mgr)
  532. {
  533. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  534. unsigned long flags;
  535. mutex_lock(&apply_lock);
  536. if (!mgr_manual_update(mgr))
  537. dispc_mgr_enable(mgr->id, false);
  538. spin_lock_irqsave(&data_lock, flags);
  539. mp->updating = false;
  540. mp->enabled = false;
  541. spin_unlock_irqrestore(&data_lock, flags);
  542. mutex_unlock(&apply_lock);
  543. }
  544. int dss_mgr_set_info(struct omap_overlay_manager *mgr,
  545. struct omap_overlay_manager_info *info)
  546. {
  547. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  548. unsigned long flags;
  549. spin_lock_irqsave(&data_lock, flags);
  550. mp->user_info = *info;
  551. mp->user_info_dirty = true;
  552. spin_unlock_irqrestore(&data_lock, flags);
  553. return 0;
  554. }
  555. void dss_mgr_get_info(struct omap_overlay_manager *mgr,
  556. struct omap_overlay_manager_info *info)
  557. {
  558. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  559. unsigned long flags;
  560. spin_lock_irqsave(&data_lock, flags);
  561. *info = mp->user_info;
  562. spin_unlock_irqrestore(&data_lock, flags);
  563. }
  564. int dss_mgr_set_device(struct omap_overlay_manager *mgr,
  565. struct omap_dss_device *dssdev)
  566. {
  567. int r;
  568. mutex_lock(&apply_lock);
  569. if (dssdev->manager) {
  570. DSSERR("display '%s' already has a manager '%s'\n",
  571. dssdev->name, dssdev->manager->name);
  572. r = -EINVAL;
  573. goto err;
  574. }
  575. if ((mgr->supported_displays & dssdev->type) == 0) {
  576. DSSERR("display '%s' does not support manager '%s'\n",
  577. dssdev->name, mgr->name);
  578. r = -EINVAL;
  579. goto err;
  580. }
  581. dssdev->manager = mgr;
  582. mgr->device = dssdev;
  583. mgr->device_changed = true;
  584. mutex_unlock(&apply_lock);
  585. return 0;
  586. err:
  587. mutex_unlock(&apply_lock);
  588. return r;
  589. }
  590. int dss_mgr_unset_device(struct omap_overlay_manager *mgr)
  591. {
  592. int r;
  593. mutex_lock(&apply_lock);
  594. if (!mgr->device) {
  595. DSSERR("failed to unset display, display not set.\n");
  596. r = -EINVAL;
  597. goto err;
  598. }
  599. /*
  600. * Don't allow currently enabled displays to have the overlay manager
  601. * pulled out from underneath them
  602. */
  603. if (mgr->device->state != OMAP_DSS_DISPLAY_DISABLED) {
  604. r = -EINVAL;
  605. goto err;
  606. }
  607. mgr->device->manager = NULL;
  608. mgr->device = NULL;
  609. mgr->device_changed = true;
  610. mutex_unlock(&apply_lock);
  611. return 0;
  612. err:
  613. mutex_unlock(&apply_lock);
  614. return r;
  615. }
  616. int dss_ovl_set_info(struct omap_overlay *ovl,
  617. struct omap_overlay_info *info)
  618. {
  619. unsigned long flags;
  620. spin_lock_irqsave(&data_lock, flags);
  621. ovl->info = *info;
  622. ovl->info_dirty = true;
  623. spin_unlock_irqrestore(&data_lock, flags);
  624. return 0;
  625. }
  626. void dss_ovl_get_info(struct omap_overlay *ovl,
  627. struct omap_overlay_info *info)
  628. {
  629. unsigned long flags;
  630. spin_lock_irqsave(&data_lock, flags);
  631. *info = ovl->info;
  632. spin_unlock_irqrestore(&data_lock, flags);
  633. }
  634. int dss_ovl_set_manager(struct omap_overlay *ovl,
  635. struct omap_overlay_manager *mgr)
  636. {
  637. struct ovl_priv_data *op = get_ovl_priv(ovl);
  638. unsigned long flags;
  639. int r;
  640. if (!mgr)
  641. return -EINVAL;
  642. mutex_lock(&apply_lock);
  643. if (ovl->manager) {
  644. DSSERR("overlay '%s' already has a manager '%s'\n",
  645. ovl->name, ovl->manager->name);
  646. r = -EINVAL;
  647. goto err;
  648. }
  649. spin_lock_irqsave(&data_lock, flags);
  650. if (op->enabled) {
  651. spin_unlock_irqrestore(&data_lock, flags);
  652. DSSERR("overlay has to be disabled to change the manager\n");
  653. r = -EINVAL;
  654. goto err;
  655. }
  656. ovl->manager = mgr;
  657. list_add_tail(&ovl->list, &mgr->overlays);
  658. ovl->manager_changed = true;
  659. spin_unlock_irqrestore(&data_lock, flags);
  660. /* XXX: When there is an overlay on a DSI manual update display, and
  661. * the overlay is first disabled, then moved to tv, and enabled, we
  662. * seem to get SYNC_LOST_DIGIT error.
  663. *
  664. * Waiting doesn't seem to help, but updating the manual update display
  665. * after disabling the overlay seems to fix this. This hints that the
  666. * overlay is perhaps somehow tied to the LCD output until the output
  667. * is updated.
  668. *
  669. * Userspace workaround for this is to update the LCD after disabling
  670. * the overlay, but before moving the overlay to TV.
  671. */
  672. mutex_unlock(&apply_lock);
  673. return 0;
  674. err:
  675. mutex_unlock(&apply_lock);
  676. return r;
  677. }
  678. int dss_ovl_unset_manager(struct omap_overlay *ovl)
  679. {
  680. struct ovl_priv_data *op = get_ovl_priv(ovl);
  681. unsigned long flags;
  682. int r;
  683. mutex_lock(&apply_lock);
  684. if (!ovl->manager) {
  685. DSSERR("failed to detach overlay: manager not set\n");
  686. r = -EINVAL;
  687. goto err;
  688. }
  689. spin_lock_irqsave(&data_lock, flags);
  690. if (op->enabled) {
  691. spin_unlock_irqrestore(&data_lock, flags);
  692. DSSERR("overlay has to be disabled to unset the manager\n");
  693. r = -EINVAL;
  694. goto err;
  695. }
  696. ovl->manager = NULL;
  697. list_del(&ovl->list);
  698. ovl->manager_changed = true;
  699. spin_unlock_irqrestore(&data_lock, flags);
  700. mutex_unlock(&apply_lock);
  701. return 0;
  702. err:
  703. mutex_unlock(&apply_lock);
  704. return r;
  705. }
  706. bool dss_ovl_is_enabled(struct omap_overlay *ovl)
  707. {
  708. struct ovl_priv_data *op = get_ovl_priv(ovl);
  709. unsigned long flags;
  710. bool e;
  711. spin_lock_irqsave(&data_lock, flags);
  712. e = op->enabled;
  713. spin_unlock_irqrestore(&data_lock, flags);
  714. return e;
  715. }
  716. int dss_ovl_enable(struct omap_overlay *ovl)
  717. {
  718. struct ovl_priv_data *op = get_ovl_priv(ovl);
  719. unsigned long flags;
  720. int r;
  721. mutex_lock(&apply_lock);
  722. if (ovl->manager == NULL || ovl->manager->device == NULL) {
  723. r = -EINVAL;
  724. goto err;
  725. }
  726. spin_lock_irqsave(&data_lock, flags);
  727. op->enabled = true;
  728. op->extra_info_dirty = true;
  729. dss_write_regs();
  730. spin_unlock_irqrestore(&data_lock, flags);
  731. mutex_unlock(&apply_lock);
  732. return 0;
  733. err:
  734. mutex_unlock(&apply_lock);
  735. return r;
  736. }
  737. int dss_ovl_disable(struct omap_overlay *ovl)
  738. {
  739. struct ovl_priv_data *op = get_ovl_priv(ovl);
  740. unsigned long flags;
  741. int r;
  742. mutex_lock(&apply_lock);
  743. if (ovl->manager == NULL || ovl->manager->device == NULL) {
  744. r = -EINVAL;
  745. goto err;
  746. }
  747. spin_lock_irqsave(&data_lock, flags);
  748. op->enabled = false;
  749. op->extra_info_dirty = true;
  750. dss_write_regs();
  751. spin_unlock_irqrestore(&data_lock, flags);
  752. mutex_unlock(&apply_lock);
  753. return 0;
  754. err:
  755. mutex_unlock(&apply_lock);
  756. return r;
  757. }