apply.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056
  1. /*
  2. * Copyright (C) 2011 Texas Instruments
  3. * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #define DSS_SUBSYS_NAME "APPLY"
  18. #include <linux/kernel.h>
  19. #include <linux/slab.h>
  20. #include <linux/spinlock.h>
  21. #include <linux/jiffies.h>
  22. #include <video/omapdss.h>
  23. #include "dss.h"
  24. #include "dss_features.h"
  25. /*
  26. * We have 4 levels of cache for the dispc settings. First two are in SW and
  27. * the latter two in HW.
  28. *
  29. * set_info()
  30. * v
  31. * +--------------------+
  32. * | user_info |
  33. * +--------------------+
  34. * v
  35. * apply()
  36. * v
  37. * +--------------------+
  38. * | info |
  39. * +--------------------+
  40. * v
  41. * write_regs()
  42. * v
  43. * +--------------------+
  44. * | shadow registers |
  45. * +--------------------+
  46. * v
  47. * VFP or lcd/digit_enable
  48. * v
  49. * +--------------------+
  50. * | registers |
  51. * +--------------------+
  52. */
  53. struct ovl_priv_data {
  54. bool user_info_dirty;
  55. struct omap_overlay_info user_info;
  56. bool info_dirty;
  57. struct omap_overlay_info info;
  58. bool shadow_info_dirty;
  59. bool extra_info_dirty;
  60. bool shadow_extra_info_dirty;
  61. bool enabled;
  62. enum omap_channel channel;
  63. u32 fifo_low, fifo_high;
  64. };
  65. struct mgr_priv_data {
  66. bool user_info_dirty;
  67. struct omap_overlay_manager_info user_info;
  68. bool info_dirty;
  69. struct omap_overlay_manager_info info;
  70. bool shadow_info_dirty;
  71. /* If true, GO bit is up and shadow registers cannot be written.
  72. * Never true for manual update displays */
  73. bool busy;
  74. /* If true, dispc output is enabled */
  75. bool updating;
  76. /* If true, a display is enabled using this manager */
  77. bool enabled;
  78. };
  79. static struct {
  80. struct ovl_priv_data ovl_priv_data_array[MAX_DSS_OVERLAYS];
  81. struct mgr_priv_data mgr_priv_data_array[MAX_DSS_MANAGERS];
  82. bool irq_enabled;
  83. } dss_data;
  84. /* protects dss_data */
  85. static spinlock_t data_lock;
  86. /* lock for blocking functions */
  87. static DEFINE_MUTEX(apply_lock);
  88. static void dss_register_vsync_isr(void);
  89. static struct ovl_priv_data *get_ovl_priv(struct omap_overlay *ovl)
  90. {
  91. return &dss_data.ovl_priv_data_array[ovl->id];
  92. }
  93. static struct mgr_priv_data *get_mgr_priv(struct omap_overlay_manager *mgr)
  94. {
  95. return &dss_data.mgr_priv_data_array[mgr->id];
  96. }
  97. void dss_apply_init(void)
  98. {
  99. const int num_ovls = dss_feat_get_num_ovls();
  100. int i;
  101. spin_lock_init(&data_lock);
  102. for (i = 0; i < num_ovls; ++i) {
  103. struct ovl_priv_data *op;
  104. op = &dss_data.ovl_priv_data_array[i];
  105. op->info.global_alpha = 255;
  106. switch (i) {
  107. case 0:
  108. op->info.zorder = 0;
  109. break;
  110. case 1:
  111. op->info.zorder =
  112. dss_has_feature(FEAT_ALPHA_FREE_ZORDER) ? 3 : 0;
  113. break;
  114. case 2:
  115. op->info.zorder =
  116. dss_has_feature(FEAT_ALPHA_FREE_ZORDER) ? 2 : 0;
  117. break;
  118. case 3:
  119. op->info.zorder =
  120. dss_has_feature(FEAT_ALPHA_FREE_ZORDER) ? 1 : 0;
  121. break;
  122. }
  123. op->user_info = op->info;
  124. }
  125. }
  126. static bool ovl_manual_update(struct omap_overlay *ovl)
  127. {
  128. return ovl->manager->device->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
  129. }
  130. static bool mgr_manual_update(struct omap_overlay_manager *mgr)
  131. {
  132. return mgr->device->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
  133. }
  134. static bool need_isr(void)
  135. {
  136. const int num_mgrs = dss_feat_get_num_mgrs();
  137. int i;
  138. for (i = 0; i < num_mgrs; ++i) {
  139. struct omap_overlay_manager *mgr;
  140. struct mgr_priv_data *mp;
  141. struct omap_overlay *ovl;
  142. mgr = omap_dss_get_overlay_manager(i);
  143. mp = get_mgr_priv(mgr);
  144. if (!mp->enabled)
  145. continue;
  146. if (mgr_manual_update(mgr)) {
  147. /* to catch FRAMEDONE */
  148. if (mp->updating)
  149. return true;
  150. } else {
  151. /* to catch GO bit going down */
  152. if (mp->busy)
  153. return true;
  154. /* to write new values to registers */
  155. if (mp->info_dirty)
  156. return true;
  157. list_for_each_entry(ovl, &mgr->overlays, list) {
  158. struct ovl_priv_data *op;
  159. op = get_ovl_priv(ovl);
  160. if (!op->enabled)
  161. continue;
  162. /* to write new values to registers */
  163. if (op->info_dirty || op->extra_info_dirty)
  164. return true;
  165. }
  166. }
  167. }
  168. return false;
  169. }
  170. static bool need_go(struct omap_overlay_manager *mgr)
  171. {
  172. struct omap_overlay *ovl;
  173. struct mgr_priv_data *mp;
  174. struct ovl_priv_data *op;
  175. mp = get_mgr_priv(mgr);
  176. if (mp->shadow_info_dirty)
  177. return true;
  178. list_for_each_entry(ovl, &mgr->overlays, list) {
  179. op = get_ovl_priv(ovl);
  180. if (op->shadow_info_dirty || op->shadow_extra_info_dirty)
  181. return true;
  182. }
  183. return false;
  184. }
  185. int dss_mgr_wait_for_go(struct omap_overlay_manager *mgr)
  186. {
  187. unsigned long timeout = msecs_to_jiffies(500);
  188. struct mgr_priv_data *mp;
  189. u32 irq;
  190. int r;
  191. int i;
  192. struct omap_dss_device *dssdev = mgr->device;
  193. if (!dssdev || dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
  194. return 0;
  195. if (mgr_manual_update(mgr))
  196. return 0;
  197. irq = dispc_mgr_get_vsync_irq(mgr->id);
  198. mp = get_mgr_priv(mgr);
  199. i = 0;
  200. while (1) {
  201. unsigned long flags;
  202. bool shadow_dirty, dirty;
  203. spin_lock_irqsave(&data_lock, flags);
  204. dirty = mp->info_dirty;
  205. shadow_dirty = mp->shadow_info_dirty;
  206. spin_unlock_irqrestore(&data_lock, flags);
  207. if (!dirty && !shadow_dirty) {
  208. r = 0;
  209. break;
  210. }
  211. /* 4 iterations is the worst case:
  212. * 1 - initial iteration, dirty = true (between VFP and VSYNC)
  213. * 2 - first VSYNC, dirty = true
  214. * 3 - dirty = false, shadow_dirty = true
  215. * 4 - shadow_dirty = false */
  216. if (i++ == 3) {
  217. DSSERR("mgr(%d)->wait_for_go() not finishing\n",
  218. mgr->id);
  219. r = 0;
  220. break;
  221. }
  222. r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
  223. if (r == -ERESTARTSYS)
  224. break;
  225. if (r) {
  226. DSSERR("mgr(%d)->wait_for_go() timeout\n", mgr->id);
  227. break;
  228. }
  229. }
  230. return r;
  231. }
  232. int dss_mgr_wait_for_go_ovl(struct omap_overlay *ovl)
  233. {
  234. unsigned long timeout = msecs_to_jiffies(500);
  235. struct ovl_priv_data *op;
  236. struct omap_dss_device *dssdev;
  237. u32 irq;
  238. int r;
  239. int i;
  240. if (!ovl->manager)
  241. return 0;
  242. dssdev = ovl->manager->device;
  243. if (!dssdev || dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
  244. return 0;
  245. if (ovl_manual_update(ovl))
  246. return 0;
  247. irq = dispc_mgr_get_vsync_irq(ovl->manager->id);
  248. op = get_ovl_priv(ovl);
  249. i = 0;
  250. while (1) {
  251. unsigned long flags;
  252. bool shadow_dirty, dirty;
  253. spin_lock_irqsave(&data_lock, flags);
  254. dirty = op->info_dirty;
  255. shadow_dirty = op->shadow_info_dirty;
  256. spin_unlock_irqrestore(&data_lock, flags);
  257. if (!dirty && !shadow_dirty) {
  258. r = 0;
  259. break;
  260. }
  261. /* 4 iterations is the worst case:
  262. * 1 - initial iteration, dirty = true (between VFP and VSYNC)
  263. * 2 - first VSYNC, dirty = true
  264. * 3 - dirty = false, shadow_dirty = true
  265. * 4 - shadow_dirty = false */
  266. if (i++ == 3) {
  267. DSSERR("ovl(%d)->wait_for_go() not finishing\n",
  268. ovl->id);
  269. r = 0;
  270. break;
  271. }
  272. r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
  273. if (r == -ERESTARTSYS)
  274. break;
  275. if (r) {
  276. DSSERR("ovl(%d)->wait_for_go() timeout\n", ovl->id);
  277. break;
  278. }
  279. }
  280. return r;
  281. }
  282. static void dss_ovl_write_regs(struct omap_overlay *ovl)
  283. {
  284. struct ovl_priv_data *op = get_ovl_priv(ovl);
  285. struct omap_overlay_info *oi;
  286. bool ilace, replication;
  287. struct mgr_priv_data *mp;
  288. int r;
  289. DSSDBGF("%d", ovl->id);
  290. if (!op->enabled || !op->info_dirty)
  291. return;
  292. oi = &op->info;
  293. replication = dss_use_replication(ovl->manager->device, oi->color_mode);
  294. ilace = ovl->manager->device->type == OMAP_DISPLAY_TYPE_VENC;
  295. r = dispc_ovl_setup(ovl->id, oi, ilace, replication);
  296. if (r) {
  297. /*
  298. * We can't do much here, as this function can be called from
  299. * vsync interrupt.
  300. */
  301. DSSERR("dispc_ovl_setup failed for ovl %d\n", ovl->id);
  302. /* This will leave fifo configurations in a nonoptimal state */
  303. op->enabled = false;
  304. dispc_ovl_enable(ovl->id, false);
  305. return;
  306. }
  307. mp = get_mgr_priv(ovl->manager);
  308. op->info_dirty = false;
  309. if (mp->updating)
  310. op->shadow_info_dirty = true;
  311. }
  312. static void dss_ovl_write_regs_extra(struct omap_overlay *ovl)
  313. {
  314. struct ovl_priv_data *op = get_ovl_priv(ovl);
  315. struct mgr_priv_data *mp;
  316. DSSDBGF("%d", ovl->id);
  317. if (!op->extra_info_dirty)
  318. return;
  319. /* note: write also when op->enabled == false, so that the ovl gets
  320. * disabled */
  321. dispc_ovl_enable(ovl->id, op->enabled);
  322. dispc_ovl_set_channel_out(ovl->id, op->channel);
  323. dispc_ovl_set_fifo_threshold(ovl->id, op->fifo_low, op->fifo_high);
  324. mp = get_mgr_priv(ovl->manager);
  325. op->extra_info_dirty = false;
  326. if (mp->updating)
  327. op->shadow_extra_info_dirty = true;
  328. }
  329. static void dss_mgr_write_regs(struct omap_overlay_manager *mgr)
  330. {
  331. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  332. struct omap_overlay *ovl;
  333. DSSDBGF("%d", mgr->id);
  334. if (!mp->enabled)
  335. return;
  336. WARN_ON(mp->busy);
  337. /* Commit overlay settings */
  338. list_for_each_entry(ovl, &mgr->overlays, list) {
  339. dss_ovl_write_regs(ovl);
  340. dss_ovl_write_regs_extra(ovl);
  341. }
  342. if (mp->info_dirty) {
  343. dispc_mgr_setup(mgr->id, &mp->info);
  344. mp->info_dirty = false;
  345. if (mp->updating)
  346. mp->shadow_info_dirty = true;
  347. }
  348. }
  349. static void dss_write_regs(void)
  350. {
  351. const int num_mgrs = omap_dss_get_num_overlay_managers();
  352. int i;
  353. for (i = 0; i < num_mgrs; ++i) {
  354. struct omap_overlay_manager *mgr;
  355. struct mgr_priv_data *mp;
  356. mgr = omap_dss_get_overlay_manager(i);
  357. mp = get_mgr_priv(mgr);
  358. if (!mp->enabled || mgr_manual_update(mgr) || mp->busy)
  359. continue;
  360. dss_mgr_write_regs(mgr);
  361. if (need_go(mgr)) {
  362. mp->busy = true;
  363. if (!dss_data.irq_enabled && need_isr())
  364. dss_register_vsync_isr();
  365. dispc_mgr_go(mgr->id);
  366. }
  367. }
  368. }
  369. void dss_mgr_start_update(struct omap_overlay_manager *mgr)
  370. {
  371. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  372. unsigned long flags;
  373. spin_lock_irqsave(&data_lock, flags);
  374. WARN_ON(mp->updating);
  375. dss_mgr_write_regs(mgr);
  376. mp->updating = true;
  377. if (!dss_data.irq_enabled && need_isr())
  378. dss_register_vsync_isr();
  379. dispc_mgr_enable(mgr->id, true);
  380. spin_unlock_irqrestore(&data_lock, flags);
  381. }
  382. static void dss_apply_irq_handler(void *data, u32 mask);
  383. static void dss_register_vsync_isr(void)
  384. {
  385. const int num_mgrs = dss_feat_get_num_mgrs();
  386. u32 mask;
  387. int r, i;
  388. mask = 0;
  389. for (i = 0; i < num_mgrs; ++i)
  390. mask |= dispc_mgr_get_vsync_irq(i);
  391. for (i = 0; i < num_mgrs; ++i)
  392. mask |= dispc_mgr_get_framedone_irq(i);
  393. r = omap_dispc_register_isr(dss_apply_irq_handler, NULL, mask);
  394. WARN_ON(r);
  395. dss_data.irq_enabled = true;
  396. }
  397. static void dss_unregister_vsync_isr(void)
  398. {
  399. const int num_mgrs = dss_feat_get_num_mgrs();
  400. u32 mask;
  401. int r, i;
  402. mask = 0;
  403. for (i = 0; i < num_mgrs; ++i)
  404. mask |= dispc_mgr_get_vsync_irq(i);
  405. for (i = 0; i < num_mgrs; ++i)
  406. mask |= dispc_mgr_get_framedone_irq(i);
  407. r = omap_dispc_unregister_isr(dss_apply_irq_handler, NULL, mask);
  408. WARN_ON(r);
  409. dss_data.irq_enabled = false;
  410. }
  411. static void mgr_clear_shadow_dirty(struct omap_overlay_manager *mgr)
  412. {
  413. struct omap_overlay *ovl;
  414. struct mgr_priv_data *mp;
  415. struct ovl_priv_data *op;
  416. mp = get_mgr_priv(mgr);
  417. mp->shadow_info_dirty = false;
  418. list_for_each_entry(ovl, &mgr->overlays, list) {
  419. op = get_ovl_priv(ovl);
  420. op->shadow_info_dirty = false;
  421. op->shadow_extra_info_dirty = false;
  422. }
  423. }
  424. static void dss_apply_irq_handler(void *data, u32 mask)
  425. {
  426. const int num_mgrs = dss_feat_get_num_mgrs();
  427. int i;
  428. spin_lock(&data_lock);
  429. /* clear busy, updating flags, shadow_dirty flags */
  430. for (i = 0; i < num_mgrs; i++) {
  431. struct omap_overlay_manager *mgr;
  432. struct mgr_priv_data *mp;
  433. mgr = omap_dss_get_overlay_manager(i);
  434. mp = get_mgr_priv(mgr);
  435. if (!mp->enabled)
  436. continue;
  437. mp->updating = dispc_mgr_is_enabled(i);
  438. if (!mgr_manual_update(mgr)) {
  439. mp->busy = dispc_mgr_go_busy(i);
  440. if (!mp->busy)
  441. mgr_clear_shadow_dirty(mgr);
  442. } else {
  443. if (!mp->updating)
  444. mgr_clear_shadow_dirty(mgr);
  445. }
  446. }
  447. dss_write_regs();
  448. if (!need_isr())
  449. dss_unregister_vsync_isr();
  450. spin_unlock(&data_lock);
  451. }
  452. static void omap_dss_mgr_apply_ovl(struct omap_overlay *ovl)
  453. {
  454. struct ovl_priv_data *op;
  455. op = get_ovl_priv(ovl);
  456. if (!op->user_info_dirty)
  457. return;
  458. op->user_info_dirty = false;
  459. op->info_dirty = true;
  460. op->info = op->user_info;
  461. }
  462. static void omap_dss_mgr_apply_mgr(struct omap_overlay_manager *mgr)
  463. {
  464. struct mgr_priv_data *mp;
  465. mp = get_mgr_priv(mgr);
  466. if (mgr->device_changed) {
  467. mgr->device_changed = false;
  468. mp->user_info_dirty = true;
  469. }
  470. if (!mp->user_info_dirty)
  471. return;
  472. mp->user_info_dirty = false;
  473. mp->info_dirty = true;
  474. mp->info = mp->user_info;
  475. }
  476. int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
  477. {
  478. int r;
  479. unsigned long flags;
  480. struct omap_overlay *ovl;
  481. DSSDBG("omap_dss_mgr_apply(%s)\n", mgr->name);
  482. r = dispc_runtime_get();
  483. if (r)
  484. return r;
  485. spin_lock_irqsave(&data_lock, flags);
  486. /* Configure overlays */
  487. list_for_each_entry(ovl, &mgr->overlays, list)
  488. omap_dss_mgr_apply_ovl(ovl);
  489. /* Configure manager */
  490. omap_dss_mgr_apply_mgr(mgr);
  491. dss_write_regs();
  492. spin_unlock_irqrestore(&data_lock, flags);
  493. dispc_runtime_put();
  494. return r;
  495. }
  496. static void dss_ovl_setup_fifo(struct omap_overlay *ovl)
  497. {
  498. struct ovl_priv_data *op = get_ovl_priv(ovl);
  499. struct omap_dss_device *dssdev;
  500. u32 size, burst_size;
  501. u32 fifo_low, fifo_high;
  502. dssdev = ovl->manager->device;
  503. size = dispc_ovl_get_fifo_size(ovl->id);
  504. burst_size = dispc_ovl_get_burst_size(ovl->id);
  505. switch (dssdev->type) {
  506. case OMAP_DISPLAY_TYPE_DPI:
  507. case OMAP_DISPLAY_TYPE_DBI:
  508. case OMAP_DISPLAY_TYPE_SDI:
  509. case OMAP_DISPLAY_TYPE_VENC:
  510. case OMAP_DISPLAY_TYPE_HDMI:
  511. default_get_overlay_fifo_thresholds(ovl->id, size,
  512. burst_size, &fifo_low, &fifo_high);
  513. break;
  514. #ifdef CONFIG_OMAP2_DSS_DSI
  515. case OMAP_DISPLAY_TYPE_DSI:
  516. dsi_get_overlay_fifo_thresholds(ovl->id, size,
  517. burst_size, &fifo_low, &fifo_high);
  518. break;
  519. #endif
  520. default:
  521. BUG();
  522. }
  523. op->fifo_low = fifo_low;
  524. op->fifo_high = fifo_high;
  525. op->extra_info_dirty = true;
  526. }
  527. static void dss_mgr_setup_fifos(struct omap_overlay_manager *mgr)
  528. {
  529. struct omap_overlay *ovl;
  530. struct ovl_priv_data *op;
  531. struct mgr_priv_data *mp;
  532. mp = get_mgr_priv(mgr);
  533. if (!mp->enabled)
  534. return;
  535. list_for_each_entry(ovl, &mgr->overlays, list) {
  536. op = get_ovl_priv(ovl);
  537. if (!op->enabled)
  538. continue;
  539. dss_ovl_setup_fifo(ovl);
  540. }
  541. }
  542. void dss_mgr_enable(struct omap_overlay_manager *mgr)
  543. {
  544. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  545. unsigned long flags;
  546. mutex_lock(&apply_lock);
  547. spin_lock_irqsave(&data_lock, flags);
  548. mp->enabled = true;
  549. dss_mgr_setup_fifos(mgr);
  550. dss_write_regs();
  551. if (!mgr_manual_update(mgr))
  552. mp->updating = true;
  553. spin_unlock_irqrestore(&data_lock, flags);
  554. if (!mgr_manual_update(mgr))
  555. dispc_mgr_enable(mgr->id, true);
  556. mutex_unlock(&apply_lock);
  557. }
  558. void dss_mgr_disable(struct omap_overlay_manager *mgr)
  559. {
  560. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  561. unsigned long flags;
  562. mutex_lock(&apply_lock);
  563. if (!mgr_manual_update(mgr))
  564. dispc_mgr_enable(mgr->id, false);
  565. spin_lock_irqsave(&data_lock, flags);
  566. mp->updating = false;
  567. mp->enabled = false;
  568. spin_unlock_irqrestore(&data_lock, flags);
  569. mutex_unlock(&apply_lock);
  570. }
  571. int dss_mgr_set_info(struct omap_overlay_manager *mgr,
  572. struct omap_overlay_manager_info *info)
  573. {
  574. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  575. unsigned long flags;
  576. spin_lock_irqsave(&data_lock, flags);
  577. mp->user_info = *info;
  578. mp->user_info_dirty = true;
  579. spin_unlock_irqrestore(&data_lock, flags);
  580. return 0;
  581. }
  582. void dss_mgr_get_info(struct omap_overlay_manager *mgr,
  583. struct omap_overlay_manager_info *info)
  584. {
  585. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  586. unsigned long flags;
  587. spin_lock_irqsave(&data_lock, flags);
  588. *info = mp->user_info;
  589. spin_unlock_irqrestore(&data_lock, flags);
  590. }
  591. int dss_mgr_set_device(struct omap_overlay_manager *mgr,
  592. struct omap_dss_device *dssdev)
  593. {
  594. int r;
  595. mutex_lock(&apply_lock);
  596. if (dssdev->manager) {
  597. DSSERR("display '%s' already has a manager '%s'\n",
  598. dssdev->name, dssdev->manager->name);
  599. r = -EINVAL;
  600. goto err;
  601. }
  602. if ((mgr->supported_displays & dssdev->type) == 0) {
  603. DSSERR("display '%s' does not support manager '%s'\n",
  604. dssdev->name, mgr->name);
  605. r = -EINVAL;
  606. goto err;
  607. }
  608. dssdev->manager = mgr;
  609. mgr->device = dssdev;
  610. mgr->device_changed = true;
  611. mutex_unlock(&apply_lock);
  612. return 0;
  613. err:
  614. mutex_unlock(&apply_lock);
  615. return r;
  616. }
  617. int dss_mgr_unset_device(struct omap_overlay_manager *mgr)
  618. {
  619. int r;
  620. mutex_lock(&apply_lock);
  621. if (!mgr->device) {
  622. DSSERR("failed to unset display, display not set.\n");
  623. r = -EINVAL;
  624. goto err;
  625. }
  626. /*
  627. * Don't allow currently enabled displays to have the overlay manager
  628. * pulled out from underneath them
  629. */
  630. if (mgr->device->state != OMAP_DSS_DISPLAY_DISABLED) {
  631. r = -EINVAL;
  632. goto err;
  633. }
  634. mgr->device->manager = NULL;
  635. mgr->device = NULL;
  636. mgr->device_changed = true;
  637. mutex_unlock(&apply_lock);
  638. return 0;
  639. err:
  640. mutex_unlock(&apply_lock);
  641. return r;
  642. }
  643. int dss_ovl_set_info(struct omap_overlay *ovl,
  644. struct omap_overlay_info *info)
  645. {
  646. struct ovl_priv_data *op = get_ovl_priv(ovl);
  647. unsigned long flags;
  648. spin_lock_irqsave(&data_lock, flags);
  649. op->user_info = *info;
  650. op->user_info_dirty = true;
  651. spin_unlock_irqrestore(&data_lock, flags);
  652. return 0;
  653. }
  654. void dss_ovl_get_info(struct omap_overlay *ovl,
  655. struct omap_overlay_info *info)
  656. {
  657. struct ovl_priv_data *op = get_ovl_priv(ovl);
  658. unsigned long flags;
  659. spin_lock_irqsave(&data_lock, flags);
  660. *info = op->user_info;
  661. spin_unlock_irqrestore(&data_lock, flags);
  662. }
  663. int dss_ovl_set_manager(struct omap_overlay *ovl,
  664. struct omap_overlay_manager *mgr)
  665. {
  666. struct ovl_priv_data *op = get_ovl_priv(ovl);
  667. unsigned long flags;
  668. int r;
  669. if (!mgr)
  670. return -EINVAL;
  671. mutex_lock(&apply_lock);
  672. if (ovl->manager) {
  673. DSSERR("overlay '%s' already has a manager '%s'\n",
  674. ovl->name, ovl->manager->name);
  675. r = -EINVAL;
  676. goto err;
  677. }
  678. spin_lock_irqsave(&data_lock, flags);
  679. if (op->enabled) {
  680. spin_unlock_irqrestore(&data_lock, flags);
  681. DSSERR("overlay has to be disabled to change the manager\n");
  682. r = -EINVAL;
  683. goto err;
  684. }
  685. op->channel = mgr->id;
  686. op->extra_info_dirty = true;
  687. ovl->manager = mgr;
  688. list_add_tail(&ovl->list, &mgr->overlays);
  689. spin_unlock_irqrestore(&data_lock, flags);
  690. /* XXX: When there is an overlay on a DSI manual update display, and
  691. * the overlay is first disabled, then moved to tv, and enabled, we
  692. * seem to get SYNC_LOST_DIGIT error.
  693. *
  694. * Waiting doesn't seem to help, but updating the manual update display
  695. * after disabling the overlay seems to fix this. This hints that the
  696. * overlay is perhaps somehow tied to the LCD output until the output
  697. * is updated.
  698. *
  699. * Userspace workaround for this is to update the LCD after disabling
  700. * the overlay, but before moving the overlay to TV.
  701. */
  702. mutex_unlock(&apply_lock);
  703. return 0;
  704. err:
  705. mutex_unlock(&apply_lock);
  706. return r;
  707. }
  708. int dss_ovl_unset_manager(struct omap_overlay *ovl)
  709. {
  710. struct ovl_priv_data *op = get_ovl_priv(ovl);
  711. unsigned long flags;
  712. int r;
  713. mutex_lock(&apply_lock);
  714. if (!ovl->manager) {
  715. DSSERR("failed to detach overlay: manager not set\n");
  716. r = -EINVAL;
  717. goto err;
  718. }
  719. spin_lock_irqsave(&data_lock, flags);
  720. if (op->enabled) {
  721. spin_unlock_irqrestore(&data_lock, flags);
  722. DSSERR("overlay has to be disabled to unset the manager\n");
  723. r = -EINVAL;
  724. goto err;
  725. }
  726. op->channel = -1;
  727. ovl->manager = NULL;
  728. list_del(&ovl->list);
  729. spin_unlock_irqrestore(&data_lock, flags);
  730. mutex_unlock(&apply_lock);
  731. return 0;
  732. err:
  733. mutex_unlock(&apply_lock);
  734. return r;
  735. }
  736. bool dss_ovl_is_enabled(struct omap_overlay *ovl)
  737. {
  738. struct ovl_priv_data *op = get_ovl_priv(ovl);
  739. unsigned long flags;
  740. bool e;
  741. spin_lock_irqsave(&data_lock, flags);
  742. e = op->enabled;
  743. spin_unlock_irqrestore(&data_lock, flags);
  744. return e;
  745. }
  746. int dss_ovl_enable(struct omap_overlay *ovl)
  747. {
  748. struct ovl_priv_data *op = get_ovl_priv(ovl);
  749. unsigned long flags;
  750. int r;
  751. mutex_lock(&apply_lock);
  752. if (ovl->manager == NULL || ovl->manager->device == NULL) {
  753. r = -EINVAL;
  754. goto err;
  755. }
  756. spin_lock_irqsave(&data_lock, flags);
  757. op->enabled = true;
  758. op->extra_info_dirty = true;
  759. dss_ovl_setup_fifo(ovl);
  760. dss_write_regs();
  761. spin_unlock_irqrestore(&data_lock, flags);
  762. mutex_unlock(&apply_lock);
  763. return 0;
  764. err:
  765. mutex_unlock(&apply_lock);
  766. return r;
  767. }
  768. int dss_ovl_disable(struct omap_overlay *ovl)
  769. {
  770. struct ovl_priv_data *op = get_ovl_priv(ovl);
  771. unsigned long flags;
  772. int r;
  773. mutex_lock(&apply_lock);
  774. if (ovl->manager == NULL || ovl->manager->device == NULL) {
  775. r = -EINVAL;
  776. goto err;
  777. }
  778. spin_lock_irqsave(&data_lock, flags);
  779. op->enabled = false;
  780. op->extra_info_dirty = true;
  781. dss_write_regs();
  782. spin_unlock_irqrestore(&data_lock, flags);
  783. mutex_unlock(&apply_lock);
  784. return 0;
  785. err:
  786. mutex_unlock(&apply_lock);
  787. return r;
  788. }