apply.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005
  1. /*
  2. * Copyright (C) 2011 Texas Instruments
  3. * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #define DSS_SUBSYS_NAME "APPLY"
  18. #include <linux/kernel.h>
  19. #include <linux/slab.h>
  20. #include <linux/spinlock.h>
  21. #include <linux/jiffies.h>
  22. #include <video/omapdss.h>
  23. #include "dss.h"
  24. #include "dss_features.h"
  25. /*
  26. * We have 4 levels of cache for the dispc settings. First two are in SW and
  27. * the latter two in HW.
  28. *
  29. * +--------------------+
  30. * |overlay/manager_info|
  31. * +--------------------+
  32. * v
  33. * apply()
  34. * v
  35. * +--------------------+
  36. * | info |
  37. * +--------------------+
  38. * v
  39. * write_regs()
  40. * v
  41. * +--------------------+
  42. * | shadow registers |
  43. * +--------------------+
  44. * v
  45. * VFP or lcd/digit_enable
  46. * v
  47. * +--------------------+
  48. * | registers |
  49. * +--------------------+
  50. */
  51. struct ovl_priv_data {
  52. /* If true, cache changed, but not written to shadow registers. Set
  53. * in apply(), cleared when registers written. */
  54. bool dirty;
  55. /* If true, shadow registers contain changed values not yet in real
  56. * registers. Set when writing to shadow registers, cleared at
  57. * VSYNC/EVSYNC */
  58. bool shadow_dirty;
  59. struct omap_overlay_info info;
  60. enum omap_channel channel;
  61. u32 fifo_low;
  62. u32 fifo_high;
  63. bool extra_info_dirty;
  64. bool shadow_extra_info_dirty;
  65. bool enabled;
  66. };
  67. struct mgr_priv_data {
  68. /* If true, cache changed, but not written to shadow registers. Set
  69. * in apply(), cleared when registers written. */
  70. bool dirty;
  71. /* If true, shadow registers contain changed values not yet in real
  72. * registers. Set when writing to shadow registers, cleared at
  73. * VSYNC/EVSYNC */
  74. bool shadow_dirty;
  75. struct omap_overlay_manager_info info;
  76. /* If true, GO bit is up and shadow registers cannot be written.
  77. * Never true for manual update displays */
  78. bool busy;
  79. /* If true, dispc output is enabled */
  80. bool updating;
  81. /* If true, a display is enabled using this manager */
  82. bool enabled;
  83. };
  84. static struct {
  85. struct ovl_priv_data ovl_priv_data_array[MAX_DSS_OVERLAYS];
  86. struct mgr_priv_data mgr_priv_data_array[MAX_DSS_MANAGERS];
  87. bool irq_enabled;
  88. } dss_data;
  89. /* protects dss_data */
  90. static spinlock_t data_lock;
  91. /* lock for blocking functions */
  92. static DEFINE_MUTEX(apply_lock);
  93. static void dss_register_vsync_isr(void);
  94. static struct ovl_priv_data *get_ovl_priv(struct omap_overlay *ovl)
  95. {
  96. return &dss_data.ovl_priv_data_array[ovl->id];
  97. }
  98. static struct mgr_priv_data *get_mgr_priv(struct omap_overlay_manager *mgr)
  99. {
  100. return &dss_data.mgr_priv_data_array[mgr->id];
  101. }
  102. void dss_apply_init(void)
  103. {
  104. spin_lock_init(&data_lock);
  105. }
  106. static bool ovl_manual_update(struct omap_overlay *ovl)
  107. {
  108. return ovl->manager->device->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
  109. }
  110. static bool mgr_manual_update(struct omap_overlay_manager *mgr)
  111. {
  112. return mgr->device->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
  113. }
  114. static bool need_isr(void)
  115. {
  116. const int num_mgrs = dss_feat_get_num_mgrs();
  117. int i;
  118. for (i = 0; i < num_mgrs; ++i) {
  119. struct omap_overlay_manager *mgr;
  120. struct mgr_priv_data *mp;
  121. struct omap_overlay *ovl;
  122. mgr = omap_dss_get_overlay_manager(i);
  123. mp = get_mgr_priv(mgr);
  124. if (!mp->enabled)
  125. continue;
  126. if (mgr_manual_update(mgr)) {
  127. /* to catch FRAMEDONE */
  128. if (mp->updating)
  129. return true;
  130. } else {
  131. /* to catch GO bit going down */
  132. if (mp->busy)
  133. return true;
  134. /* to write new values to registers */
  135. if (mp->dirty)
  136. return true;
  137. list_for_each_entry(ovl, &mgr->overlays, list) {
  138. struct ovl_priv_data *op;
  139. op = get_ovl_priv(ovl);
  140. if (!op->enabled)
  141. continue;
  142. /* to write new values to registers */
  143. if (op->dirty || op->extra_info_dirty)
  144. return true;
  145. }
  146. }
  147. }
  148. return false;
  149. }
  150. static bool need_go(struct omap_overlay_manager *mgr)
  151. {
  152. struct omap_overlay *ovl;
  153. struct mgr_priv_data *mp;
  154. struct ovl_priv_data *op;
  155. mp = get_mgr_priv(mgr);
  156. if (mp->shadow_dirty)
  157. return true;
  158. list_for_each_entry(ovl, &mgr->overlays, list) {
  159. op = get_ovl_priv(ovl);
  160. if (op->shadow_dirty || op->shadow_extra_info_dirty)
  161. return true;
  162. }
  163. return false;
  164. }
  165. int dss_mgr_wait_for_go(struct omap_overlay_manager *mgr)
  166. {
  167. unsigned long timeout = msecs_to_jiffies(500);
  168. struct mgr_priv_data *mp;
  169. u32 irq;
  170. int r;
  171. int i;
  172. struct omap_dss_device *dssdev = mgr->device;
  173. if (!dssdev || dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
  174. return 0;
  175. if (mgr_manual_update(mgr))
  176. return 0;
  177. irq = dispc_mgr_get_vsync_irq(mgr->id);
  178. mp = get_mgr_priv(mgr);
  179. i = 0;
  180. while (1) {
  181. unsigned long flags;
  182. bool shadow_dirty, dirty;
  183. spin_lock_irqsave(&data_lock, flags);
  184. dirty = mp->dirty;
  185. shadow_dirty = mp->shadow_dirty;
  186. spin_unlock_irqrestore(&data_lock, flags);
  187. if (!dirty && !shadow_dirty) {
  188. r = 0;
  189. break;
  190. }
  191. /* 4 iterations is the worst case:
  192. * 1 - initial iteration, dirty = true (between VFP and VSYNC)
  193. * 2 - first VSYNC, dirty = true
  194. * 3 - dirty = false, shadow_dirty = true
  195. * 4 - shadow_dirty = false */
  196. if (i++ == 3) {
  197. DSSERR("mgr(%d)->wait_for_go() not finishing\n",
  198. mgr->id);
  199. r = 0;
  200. break;
  201. }
  202. r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
  203. if (r == -ERESTARTSYS)
  204. break;
  205. if (r) {
  206. DSSERR("mgr(%d)->wait_for_go() timeout\n", mgr->id);
  207. break;
  208. }
  209. }
  210. return r;
  211. }
  212. int dss_mgr_wait_for_go_ovl(struct omap_overlay *ovl)
  213. {
  214. unsigned long timeout = msecs_to_jiffies(500);
  215. struct ovl_priv_data *op;
  216. struct omap_dss_device *dssdev;
  217. u32 irq;
  218. int r;
  219. int i;
  220. if (!ovl->manager)
  221. return 0;
  222. dssdev = ovl->manager->device;
  223. if (!dssdev || dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
  224. return 0;
  225. if (ovl_manual_update(ovl))
  226. return 0;
  227. irq = dispc_mgr_get_vsync_irq(ovl->manager->id);
  228. op = get_ovl_priv(ovl);
  229. i = 0;
  230. while (1) {
  231. unsigned long flags;
  232. bool shadow_dirty, dirty;
  233. spin_lock_irqsave(&data_lock, flags);
  234. dirty = op->dirty;
  235. shadow_dirty = op->shadow_dirty;
  236. spin_unlock_irqrestore(&data_lock, flags);
  237. if (!dirty && !shadow_dirty) {
  238. r = 0;
  239. break;
  240. }
  241. /* 4 iterations is the worst case:
  242. * 1 - initial iteration, dirty = true (between VFP and VSYNC)
  243. * 2 - first VSYNC, dirty = true
  244. * 3 - dirty = false, shadow_dirty = true
  245. * 4 - shadow_dirty = false */
  246. if (i++ == 3) {
  247. DSSERR("ovl(%d)->wait_for_go() not finishing\n",
  248. ovl->id);
  249. r = 0;
  250. break;
  251. }
  252. r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
  253. if (r == -ERESTARTSYS)
  254. break;
  255. if (r) {
  256. DSSERR("ovl(%d)->wait_for_go() timeout\n", ovl->id);
  257. break;
  258. }
  259. }
  260. return r;
  261. }
  262. static void dss_ovl_write_regs(struct omap_overlay *ovl)
  263. {
  264. struct ovl_priv_data *op = get_ovl_priv(ovl);
  265. struct omap_overlay_info *oi;
  266. bool ilace, replication;
  267. struct mgr_priv_data *mp;
  268. int r;
  269. DSSDBGF("%d", ovl->id);
  270. if (!op->enabled || !op->dirty)
  271. return;
  272. oi = &op->info;
  273. replication = dss_use_replication(ovl->manager->device, oi->color_mode);
  274. ilace = ovl->manager->device->type == OMAP_DISPLAY_TYPE_VENC;
  275. dispc_ovl_set_channel_out(ovl->id, op->channel);
  276. r = dispc_ovl_setup(ovl->id, oi, ilace, replication);
  277. if (r) {
  278. /*
  279. * We can't do much here, as this function can be called from
  280. * vsync interrupt.
  281. */
  282. DSSERR("dispc_ovl_setup failed for ovl %d\n", ovl->id);
  283. /* This will leave fifo configurations in a nonoptimal state */
  284. op->enabled = false;
  285. dispc_ovl_enable(ovl->id, false);
  286. return;
  287. }
  288. dispc_ovl_set_fifo_threshold(ovl->id, op->fifo_low, op->fifo_high);
  289. mp = get_mgr_priv(ovl->manager);
  290. op->dirty = false;
  291. if (mp->updating)
  292. op->shadow_dirty = true;
  293. }
  294. static void dss_ovl_write_regs_extra(struct omap_overlay *ovl)
  295. {
  296. struct ovl_priv_data *op = get_ovl_priv(ovl);
  297. struct mgr_priv_data *mp;
  298. DSSDBGF("%d", ovl->id);
  299. if (!op->extra_info_dirty)
  300. return;
  301. /* note: write also when op->enabled == false, so that the ovl gets
  302. * disabled */
  303. dispc_ovl_enable(ovl->id, op->enabled);
  304. mp = get_mgr_priv(ovl->manager);
  305. op->extra_info_dirty = false;
  306. if (mp->updating)
  307. op->shadow_extra_info_dirty = true;
  308. }
  309. static void dss_mgr_write_regs(struct omap_overlay_manager *mgr)
  310. {
  311. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  312. struct omap_overlay *ovl;
  313. DSSDBGF("%d", mgr->id);
  314. if (!mp->enabled)
  315. return;
  316. WARN_ON(mp->busy);
  317. /* Commit overlay settings */
  318. list_for_each_entry(ovl, &mgr->overlays, list) {
  319. dss_ovl_write_regs(ovl);
  320. dss_ovl_write_regs_extra(ovl);
  321. }
  322. if (mp->dirty) {
  323. dispc_mgr_setup(mgr->id, &mp->info);
  324. mp->dirty = false;
  325. if (mp->updating)
  326. mp->shadow_dirty = true;
  327. }
  328. }
  329. static void dss_write_regs(void)
  330. {
  331. const int num_mgrs = omap_dss_get_num_overlay_managers();
  332. int i;
  333. for (i = 0; i < num_mgrs; ++i) {
  334. struct omap_overlay_manager *mgr;
  335. struct mgr_priv_data *mp;
  336. mgr = omap_dss_get_overlay_manager(i);
  337. mp = get_mgr_priv(mgr);
  338. if (!mp->enabled || mgr_manual_update(mgr) || mp->busy)
  339. continue;
  340. dss_mgr_write_regs(mgr);
  341. if (need_go(mgr)) {
  342. mp->busy = true;
  343. if (!dss_data.irq_enabled && need_isr())
  344. dss_register_vsync_isr();
  345. dispc_mgr_go(mgr->id);
  346. }
  347. }
  348. }
  349. void dss_mgr_start_update(struct omap_overlay_manager *mgr)
  350. {
  351. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  352. unsigned long flags;
  353. spin_lock_irqsave(&data_lock, flags);
  354. WARN_ON(mp->updating);
  355. dss_mgr_write_regs(mgr);
  356. mp->updating = true;
  357. if (!dss_data.irq_enabled && need_isr())
  358. dss_register_vsync_isr();
  359. dispc_mgr_enable(mgr->id, true);
  360. spin_unlock_irqrestore(&data_lock, flags);
  361. }
  362. static void dss_apply_irq_handler(void *data, u32 mask);
  363. static void dss_register_vsync_isr(void)
  364. {
  365. const int num_mgrs = dss_feat_get_num_mgrs();
  366. u32 mask;
  367. int r, i;
  368. mask = 0;
  369. for (i = 0; i < num_mgrs; ++i)
  370. mask |= dispc_mgr_get_vsync_irq(i);
  371. for (i = 0; i < num_mgrs; ++i)
  372. mask |= dispc_mgr_get_framedone_irq(i);
  373. r = omap_dispc_register_isr(dss_apply_irq_handler, NULL, mask);
  374. WARN_ON(r);
  375. dss_data.irq_enabled = true;
  376. }
  377. static void dss_unregister_vsync_isr(void)
  378. {
  379. const int num_mgrs = dss_feat_get_num_mgrs();
  380. u32 mask;
  381. int r, i;
  382. mask = 0;
  383. for (i = 0; i < num_mgrs; ++i)
  384. mask |= dispc_mgr_get_vsync_irq(i);
  385. for (i = 0; i < num_mgrs; ++i)
  386. mask |= dispc_mgr_get_framedone_irq(i);
  387. r = omap_dispc_unregister_isr(dss_apply_irq_handler, NULL, mask);
  388. WARN_ON(r);
  389. dss_data.irq_enabled = false;
  390. }
  391. static void dss_apply_irq_handler(void *data, u32 mask)
  392. {
  393. struct omap_overlay *ovl;
  394. struct omap_overlay_manager *mgr;
  395. struct mgr_priv_data *mp;
  396. struct ovl_priv_data *op;
  397. const int num_ovls = dss_feat_get_num_ovls();
  398. const int num_mgrs = dss_feat_get_num_mgrs();
  399. int i;
  400. spin_lock(&data_lock);
  401. for (i = 0; i < num_mgrs; i++) {
  402. mgr = omap_dss_get_overlay_manager(i);
  403. mp = get_mgr_priv(mgr);
  404. mp->busy = dispc_mgr_go_busy(i);
  405. mp->updating = dispc_mgr_is_enabled(i);
  406. }
  407. for (i = 0; i < num_ovls; ++i) {
  408. ovl = omap_dss_get_overlay(i);
  409. op = get_ovl_priv(ovl);
  410. if (!op->enabled)
  411. continue;
  412. mp = get_mgr_priv(ovl->manager);
  413. if (!mp->busy) {
  414. op->shadow_dirty = false;
  415. op->shadow_extra_info_dirty = false;
  416. }
  417. }
  418. for (i = 0; i < num_mgrs; ++i) {
  419. mgr = omap_dss_get_overlay_manager(i);
  420. mp = get_mgr_priv(mgr);
  421. if (!mp->busy)
  422. mp->shadow_dirty = false;
  423. }
  424. dss_write_regs();
  425. if (!need_isr())
  426. dss_unregister_vsync_isr();
  427. spin_unlock(&data_lock);
  428. }
  429. static void omap_dss_mgr_apply_ovl(struct omap_overlay *ovl)
  430. {
  431. struct ovl_priv_data *op;
  432. op = get_ovl_priv(ovl);
  433. if (ovl->manager_changed) {
  434. ovl->manager_changed = false;
  435. ovl->info_dirty = true;
  436. }
  437. if (!ovl->info_dirty)
  438. return;
  439. ovl->info_dirty = false;
  440. op->dirty = true;
  441. op->info = ovl->info;
  442. op->channel = ovl->manager->id;
  443. }
  444. static void omap_dss_mgr_apply_mgr(struct omap_overlay_manager *mgr)
  445. {
  446. struct mgr_priv_data *mp;
  447. mp = get_mgr_priv(mgr);
  448. if (mgr->device_changed) {
  449. mgr->device_changed = false;
  450. mgr->info_dirty = true;
  451. }
  452. if (!mgr->info_dirty)
  453. return;
  454. mgr->info_dirty = false;
  455. mp->dirty = true;
  456. mp->info = mgr->info;
  457. }
  458. static void omap_dss_mgr_apply_ovl_fifos(struct omap_overlay *ovl)
  459. {
  460. struct ovl_priv_data *op;
  461. struct omap_dss_device *dssdev;
  462. u32 size, burst_size;
  463. op = get_ovl_priv(ovl);
  464. dssdev = ovl->manager->device;
  465. size = dispc_ovl_get_fifo_size(ovl->id);
  466. burst_size = dispc_ovl_get_burst_size(ovl->id);
  467. switch (dssdev->type) {
  468. case OMAP_DISPLAY_TYPE_DPI:
  469. case OMAP_DISPLAY_TYPE_DBI:
  470. case OMAP_DISPLAY_TYPE_SDI:
  471. case OMAP_DISPLAY_TYPE_VENC:
  472. case OMAP_DISPLAY_TYPE_HDMI:
  473. default_get_overlay_fifo_thresholds(ovl->id, size,
  474. burst_size, &op->fifo_low,
  475. &op->fifo_high);
  476. break;
  477. #ifdef CONFIG_OMAP2_DSS_DSI
  478. case OMAP_DISPLAY_TYPE_DSI:
  479. dsi_get_overlay_fifo_thresholds(ovl->id, size,
  480. burst_size, &op->fifo_low,
  481. &op->fifo_high);
  482. break;
  483. #endif
  484. default:
  485. BUG();
  486. }
  487. }
  488. int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
  489. {
  490. int r;
  491. unsigned long flags;
  492. struct omap_overlay *ovl;
  493. DSSDBG("omap_dss_mgr_apply(%s)\n", mgr->name);
  494. r = dispc_runtime_get();
  495. if (r)
  496. return r;
  497. spin_lock_irqsave(&data_lock, flags);
  498. /* Configure overlays */
  499. list_for_each_entry(ovl, &mgr->overlays, list)
  500. omap_dss_mgr_apply_ovl(ovl);
  501. /* Configure manager */
  502. omap_dss_mgr_apply_mgr(mgr);
  503. /* Configure overlay fifos */
  504. list_for_each_entry(ovl, &mgr->overlays, list)
  505. omap_dss_mgr_apply_ovl_fifos(ovl);
  506. dss_write_regs();
  507. spin_unlock_irqrestore(&data_lock, flags);
  508. dispc_runtime_put();
  509. return r;
  510. }
  511. void dss_mgr_enable(struct omap_overlay_manager *mgr)
  512. {
  513. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  514. unsigned long flags;
  515. mutex_lock(&apply_lock);
  516. spin_lock_irqsave(&data_lock, flags);
  517. mp->enabled = true;
  518. dss_write_regs();
  519. if (!mgr_manual_update(mgr))
  520. mp->updating = true;
  521. spin_unlock_irqrestore(&data_lock, flags);
  522. if (!mgr_manual_update(mgr))
  523. dispc_mgr_enable(mgr->id, true);
  524. mutex_unlock(&apply_lock);
  525. }
  526. void dss_mgr_disable(struct omap_overlay_manager *mgr)
  527. {
  528. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  529. unsigned long flags;
  530. mutex_lock(&apply_lock);
  531. if (!mgr_manual_update(mgr))
  532. dispc_mgr_enable(mgr->id, false);
  533. spin_lock_irqsave(&data_lock, flags);
  534. mp->updating = false;
  535. mp->enabled = false;
  536. spin_unlock_irqrestore(&data_lock, flags);
  537. mutex_unlock(&apply_lock);
  538. }
  539. int dss_mgr_set_info(struct omap_overlay_manager *mgr,
  540. struct omap_overlay_manager_info *info)
  541. {
  542. unsigned long flags;
  543. spin_lock_irqsave(&data_lock, flags);
  544. mgr->info = *info;
  545. mgr->info_dirty = true;
  546. spin_unlock_irqrestore(&data_lock, flags);
  547. return 0;
  548. }
  549. void dss_mgr_get_info(struct omap_overlay_manager *mgr,
  550. struct omap_overlay_manager_info *info)
  551. {
  552. unsigned long flags;
  553. spin_lock_irqsave(&data_lock, flags);
  554. *info = mgr->info;
  555. spin_unlock_irqrestore(&data_lock, flags);
  556. }
  557. int dss_mgr_set_device(struct omap_overlay_manager *mgr,
  558. struct omap_dss_device *dssdev)
  559. {
  560. int r;
  561. mutex_lock(&apply_lock);
  562. if (dssdev->manager) {
  563. DSSERR("display '%s' already has a manager '%s'\n",
  564. dssdev->name, dssdev->manager->name);
  565. r = -EINVAL;
  566. goto err;
  567. }
  568. if ((mgr->supported_displays & dssdev->type) == 0) {
  569. DSSERR("display '%s' does not support manager '%s'\n",
  570. dssdev->name, mgr->name);
  571. r = -EINVAL;
  572. goto err;
  573. }
  574. dssdev->manager = mgr;
  575. mgr->device = dssdev;
  576. mgr->device_changed = true;
  577. mutex_unlock(&apply_lock);
  578. return 0;
  579. err:
  580. mutex_unlock(&apply_lock);
  581. return r;
  582. }
  583. int dss_mgr_unset_device(struct omap_overlay_manager *mgr)
  584. {
  585. int r;
  586. mutex_lock(&apply_lock);
  587. if (!mgr->device) {
  588. DSSERR("failed to unset display, display not set.\n");
  589. r = -EINVAL;
  590. goto err;
  591. }
  592. /*
  593. * Don't allow currently enabled displays to have the overlay manager
  594. * pulled out from underneath them
  595. */
  596. if (mgr->device->state != OMAP_DSS_DISPLAY_DISABLED) {
  597. r = -EINVAL;
  598. goto err;
  599. }
  600. mgr->device->manager = NULL;
  601. mgr->device = NULL;
  602. mgr->device_changed = true;
  603. mutex_unlock(&apply_lock);
  604. return 0;
  605. err:
  606. mutex_unlock(&apply_lock);
  607. return r;
  608. }
  609. int dss_ovl_set_info(struct omap_overlay *ovl,
  610. struct omap_overlay_info *info)
  611. {
  612. unsigned long flags;
  613. spin_lock_irqsave(&data_lock, flags);
  614. ovl->info = *info;
  615. ovl->info_dirty = true;
  616. spin_unlock_irqrestore(&data_lock, flags);
  617. return 0;
  618. }
  619. void dss_ovl_get_info(struct omap_overlay *ovl,
  620. struct omap_overlay_info *info)
  621. {
  622. unsigned long flags;
  623. spin_lock_irqsave(&data_lock, flags);
  624. *info = ovl->info;
  625. spin_unlock_irqrestore(&data_lock, flags);
  626. }
  627. int dss_ovl_set_manager(struct omap_overlay *ovl,
  628. struct omap_overlay_manager *mgr)
  629. {
  630. struct ovl_priv_data *op = get_ovl_priv(ovl);
  631. unsigned long flags;
  632. int r;
  633. if (!mgr)
  634. return -EINVAL;
  635. mutex_lock(&apply_lock);
  636. if (ovl->manager) {
  637. DSSERR("overlay '%s' already has a manager '%s'\n",
  638. ovl->name, ovl->manager->name);
  639. r = -EINVAL;
  640. goto err;
  641. }
  642. spin_lock_irqsave(&data_lock, flags);
  643. if (op->enabled) {
  644. spin_unlock_irqrestore(&data_lock, flags);
  645. DSSERR("overlay has to be disabled to change the manager\n");
  646. r = -EINVAL;
  647. goto err;
  648. }
  649. ovl->manager = mgr;
  650. list_add_tail(&ovl->list, &mgr->overlays);
  651. ovl->manager_changed = true;
  652. spin_unlock_irqrestore(&data_lock, flags);
  653. /* XXX: When there is an overlay on a DSI manual update display, and
  654. * the overlay is first disabled, then moved to tv, and enabled, we
  655. * seem to get SYNC_LOST_DIGIT error.
  656. *
  657. * Waiting doesn't seem to help, but updating the manual update display
  658. * after disabling the overlay seems to fix this. This hints that the
  659. * overlay is perhaps somehow tied to the LCD output until the output
  660. * is updated.
  661. *
  662. * Userspace workaround for this is to update the LCD after disabling
  663. * the overlay, but before moving the overlay to TV.
  664. */
  665. mutex_unlock(&apply_lock);
  666. return 0;
  667. err:
  668. mutex_unlock(&apply_lock);
  669. return r;
  670. }
  671. int dss_ovl_unset_manager(struct omap_overlay *ovl)
  672. {
  673. struct ovl_priv_data *op = get_ovl_priv(ovl);
  674. unsigned long flags;
  675. int r;
  676. mutex_lock(&apply_lock);
  677. if (!ovl->manager) {
  678. DSSERR("failed to detach overlay: manager not set\n");
  679. r = -EINVAL;
  680. goto err;
  681. }
  682. spin_lock_irqsave(&data_lock, flags);
  683. if (op->enabled) {
  684. spin_unlock_irqrestore(&data_lock, flags);
  685. DSSERR("overlay has to be disabled to unset the manager\n");
  686. r = -EINVAL;
  687. goto err;
  688. }
  689. ovl->manager = NULL;
  690. list_del(&ovl->list);
  691. ovl->manager_changed = true;
  692. spin_unlock_irqrestore(&data_lock, flags);
  693. mutex_unlock(&apply_lock);
  694. return 0;
  695. err:
  696. mutex_unlock(&apply_lock);
  697. return r;
  698. }
  699. bool dss_ovl_is_enabled(struct omap_overlay *ovl)
  700. {
  701. struct ovl_priv_data *op = get_ovl_priv(ovl);
  702. unsigned long flags;
  703. bool e;
  704. spin_lock_irqsave(&data_lock, flags);
  705. e = op->enabled;
  706. spin_unlock_irqrestore(&data_lock, flags);
  707. return e;
  708. }
  709. int dss_ovl_enable(struct omap_overlay *ovl)
  710. {
  711. struct ovl_priv_data *op = get_ovl_priv(ovl);
  712. unsigned long flags;
  713. int r;
  714. mutex_lock(&apply_lock);
  715. if (ovl->manager == NULL || ovl->manager->device == NULL) {
  716. r = -EINVAL;
  717. goto err;
  718. }
  719. spin_lock_irqsave(&data_lock, flags);
  720. op->enabled = true;
  721. op->extra_info_dirty = true;
  722. dss_write_regs();
  723. spin_unlock_irqrestore(&data_lock, flags);
  724. mutex_unlock(&apply_lock);
  725. return 0;
  726. err:
  727. mutex_unlock(&apply_lock);
  728. return r;
  729. }
  730. int dss_ovl_disable(struct omap_overlay *ovl)
  731. {
  732. struct ovl_priv_data *op = get_ovl_priv(ovl);
  733. unsigned long flags;
  734. int r;
  735. mutex_lock(&apply_lock);
  736. if (ovl->manager == NULL || ovl->manager->device == NULL) {
  737. r = -EINVAL;
  738. goto err;
  739. }
  740. spin_lock_irqsave(&data_lock, flags);
  741. op->enabled = false;
  742. op->extra_info_dirty = true;
  743. dss_write_regs();
  744. spin_unlock_irqrestore(&data_lock, flags);
  745. mutex_unlock(&apply_lock);
  746. return 0;
  747. err:
  748. mutex_unlock(&apply_lock);
  749. return r;
  750. }