apply.c 21 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052
  1. /*
  2. * Copyright (C) 2011 Texas Instruments
  3. * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #define DSS_SUBSYS_NAME "APPLY"
  18. #include <linux/kernel.h>
  19. #include <linux/slab.h>
  20. #include <linux/spinlock.h>
  21. #include <linux/jiffies.h>
  22. #include <video/omapdss.h>
  23. #include "dss.h"
  24. #include "dss_features.h"
  25. /*
  26. * We have 4 levels of cache for the dispc settings. First two are in SW and
  27. * the latter two in HW.
  28. *
  29. * +--------------------+
  30. * |overlay/manager_info|
  31. * +--------------------+
  32. * v
  33. * apply()
  34. * v
  35. * +--------------------+
  36. * | info |
  37. * +--------------------+
  38. * v
  39. * write_regs()
  40. * v
  41. * +--------------------+
  42. * | shadow registers |
  43. * +--------------------+
  44. * v
  45. * VFP or lcd/digit_enable
  46. * v
  47. * +--------------------+
  48. * | registers |
  49. * +--------------------+
  50. */
  51. struct ovl_priv_data {
  52. bool user_info_dirty;
  53. struct omap_overlay_info user_info;
  54. /* If true, cache changed, but not written to shadow registers. Set
  55. * in apply(), cleared when registers written. */
  56. bool dirty;
  57. /* If true, shadow registers contain changed values not yet in real
  58. * registers. Set when writing to shadow registers, cleared at
  59. * VSYNC/EVSYNC */
  60. bool shadow_dirty;
  61. struct omap_overlay_info info;
  62. enum omap_channel channel;
  63. u32 fifo_low;
  64. u32 fifo_high;
  65. bool extra_info_dirty;
  66. bool shadow_extra_info_dirty;
  67. bool enabled;
  68. };
  69. struct mgr_priv_data {
  70. bool user_info_dirty;
  71. struct omap_overlay_manager_info user_info;
  72. /* If true, cache changed, but not written to shadow registers. Set
  73. * in apply(), cleared when registers written. */
  74. bool dirty;
  75. /* If true, shadow registers contain changed values not yet in real
  76. * registers. Set when writing to shadow registers, cleared at
  77. * VSYNC/EVSYNC */
  78. bool shadow_dirty;
  79. struct omap_overlay_manager_info info;
  80. /* If true, GO bit is up and shadow registers cannot be written.
  81. * Never true for manual update displays */
  82. bool busy;
  83. /* If true, dispc output is enabled */
  84. bool updating;
  85. /* If true, a display is enabled using this manager */
  86. bool enabled;
  87. };
  88. static struct {
  89. struct ovl_priv_data ovl_priv_data_array[MAX_DSS_OVERLAYS];
  90. struct mgr_priv_data mgr_priv_data_array[MAX_DSS_MANAGERS];
  91. bool irq_enabled;
  92. } dss_data;
  93. /* protects dss_data */
  94. static spinlock_t data_lock;
  95. /* lock for blocking functions */
  96. static DEFINE_MUTEX(apply_lock);
  97. static void dss_register_vsync_isr(void);
  98. static struct ovl_priv_data *get_ovl_priv(struct omap_overlay *ovl)
  99. {
  100. return &dss_data.ovl_priv_data_array[ovl->id];
  101. }
  102. static struct mgr_priv_data *get_mgr_priv(struct omap_overlay_manager *mgr)
  103. {
  104. return &dss_data.mgr_priv_data_array[mgr->id];
  105. }
  106. void dss_apply_init(void)
  107. {
  108. const int num_ovls = dss_feat_get_num_ovls();
  109. int i;
  110. spin_lock_init(&data_lock);
  111. for (i = 0; i < num_ovls; ++i) {
  112. struct ovl_priv_data *op;
  113. op = &dss_data.ovl_priv_data_array[i];
  114. op->info.global_alpha = 255;
  115. switch (i) {
  116. case 0:
  117. op->info.zorder = 0;
  118. break;
  119. case 1:
  120. op->info.zorder =
  121. dss_has_feature(FEAT_ALPHA_FREE_ZORDER) ? 3 : 0;
  122. break;
  123. case 2:
  124. op->info.zorder =
  125. dss_has_feature(FEAT_ALPHA_FREE_ZORDER) ? 2 : 0;
  126. break;
  127. case 3:
  128. op->info.zorder =
  129. dss_has_feature(FEAT_ALPHA_FREE_ZORDER) ? 1 : 0;
  130. break;
  131. }
  132. op->user_info = op->info;
  133. }
  134. }
  135. static bool ovl_manual_update(struct omap_overlay *ovl)
  136. {
  137. return ovl->manager->device->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
  138. }
  139. static bool mgr_manual_update(struct omap_overlay_manager *mgr)
  140. {
  141. return mgr->device->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
  142. }
  143. static bool need_isr(void)
  144. {
  145. const int num_mgrs = dss_feat_get_num_mgrs();
  146. int i;
  147. for (i = 0; i < num_mgrs; ++i) {
  148. struct omap_overlay_manager *mgr;
  149. struct mgr_priv_data *mp;
  150. struct omap_overlay *ovl;
  151. mgr = omap_dss_get_overlay_manager(i);
  152. mp = get_mgr_priv(mgr);
  153. if (!mp->enabled)
  154. continue;
  155. if (mgr_manual_update(mgr)) {
  156. /* to catch FRAMEDONE */
  157. if (mp->updating)
  158. return true;
  159. } else {
  160. /* to catch GO bit going down */
  161. if (mp->busy)
  162. return true;
  163. /* to write new values to registers */
  164. if (mp->dirty)
  165. return true;
  166. list_for_each_entry(ovl, &mgr->overlays, list) {
  167. struct ovl_priv_data *op;
  168. op = get_ovl_priv(ovl);
  169. if (!op->enabled)
  170. continue;
  171. /* to write new values to registers */
  172. if (op->dirty || op->extra_info_dirty)
  173. return true;
  174. }
  175. }
  176. }
  177. return false;
  178. }
  179. static bool need_go(struct omap_overlay_manager *mgr)
  180. {
  181. struct omap_overlay *ovl;
  182. struct mgr_priv_data *mp;
  183. struct ovl_priv_data *op;
  184. mp = get_mgr_priv(mgr);
  185. if (mp->shadow_dirty)
  186. return true;
  187. list_for_each_entry(ovl, &mgr->overlays, list) {
  188. op = get_ovl_priv(ovl);
  189. if (op->shadow_dirty || op->shadow_extra_info_dirty)
  190. return true;
  191. }
  192. return false;
  193. }
  194. int dss_mgr_wait_for_go(struct omap_overlay_manager *mgr)
  195. {
  196. unsigned long timeout = msecs_to_jiffies(500);
  197. struct mgr_priv_data *mp;
  198. u32 irq;
  199. int r;
  200. int i;
  201. struct omap_dss_device *dssdev = mgr->device;
  202. if (!dssdev || dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
  203. return 0;
  204. if (mgr_manual_update(mgr))
  205. return 0;
  206. irq = dispc_mgr_get_vsync_irq(mgr->id);
  207. mp = get_mgr_priv(mgr);
  208. i = 0;
  209. while (1) {
  210. unsigned long flags;
  211. bool shadow_dirty, dirty;
  212. spin_lock_irqsave(&data_lock, flags);
  213. dirty = mp->dirty;
  214. shadow_dirty = mp->shadow_dirty;
  215. spin_unlock_irqrestore(&data_lock, flags);
  216. if (!dirty && !shadow_dirty) {
  217. r = 0;
  218. break;
  219. }
  220. /* 4 iterations is the worst case:
  221. * 1 - initial iteration, dirty = true (between VFP and VSYNC)
  222. * 2 - first VSYNC, dirty = true
  223. * 3 - dirty = false, shadow_dirty = true
  224. * 4 - shadow_dirty = false */
  225. if (i++ == 3) {
  226. DSSERR("mgr(%d)->wait_for_go() not finishing\n",
  227. mgr->id);
  228. r = 0;
  229. break;
  230. }
  231. r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
  232. if (r == -ERESTARTSYS)
  233. break;
  234. if (r) {
  235. DSSERR("mgr(%d)->wait_for_go() timeout\n", mgr->id);
  236. break;
  237. }
  238. }
  239. return r;
  240. }
  241. int dss_mgr_wait_for_go_ovl(struct omap_overlay *ovl)
  242. {
  243. unsigned long timeout = msecs_to_jiffies(500);
  244. struct ovl_priv_data *op;
  245. struct omap_dss_device *dssdev;
  246. u32 irq;
  247. int r;
  248. int i;
  249. if (!ovl->manager)
  250. return 0;
  251. dssdev = ovl->manager->device;
  252. if (!dssdev || dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
  253. return 0;
  254. if (ovl_manual_update(ovl))
  255. return 0;
  256. irq = dispc_mgr_get_vsync_irq(ovl->manager->id);
  257. op = get_ovl_priv(ovl);
  258. i = 0;
  259. while (1) {
  260. unsigned long flags;
  261. bool shadow_dirty, dirty;
  262. spin_lock_irqsave(&data_lock, flags);
  263. dirty = op->dirty;
  264. shadow_dirty = op->shadow_dirty;
  265. spin_unlock_irqrestore(&data_lock, flags);
  266. if (!dirty && !shadow_dirty) {
  267. r = 0;
  268. break;
  269. }
  270. /* 4 iterations is the worst case:
  271. * 1 - initial iteration, dirty = true (between VFP and VSYNC)
  272. * 2 - first VSYNC, dirty = true
  273. * 3 - dirty = false, shadow_dirty = true
  274. * 4 - shadow_dirty = false */
  275. if (i++ == 3) {
  276. DSSERR("ovl(%d)->wait_for_go() not finishing\n",
  277. ovl->id);
  278. r = 0;
  279. break;
  280. }
  281. r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
  282. if (r == -ERESTARTSYS)
  283. break;
  284. if (r) {
  285. DSSERR("ovl(%d)->wait_for_go() timeout\n", ovl->id);
  286. break;
  287. }
  288. }
  289. return r;
  290. }
  291. static void dss_ovl_write_regs(struct omap_overlay *ovl)
  292. {
  293. struct ovl_priv_data *op = get_ovl_priv(ovl);
  294. struct omap_overlay_info *oi;
  295. bool ilace, replication;
  296. struct mgr_priv_data *mp;
  297. int r;
  298. DSSDBGF("%d", ovl->id);
  299. if (!op->enabled || !op->dirty)
  300. return;
  301. oi = &op->info;
  302. replication = dss_use_replication(ovl->manager->device, oi->color_mode);
  303. ilace = ovl->manager->device->type == OMAP_DISPLAY_TYPE_VENC;
  304. dispc_ovl_set_channel_out(ovl->id, op->channel);
  305. r = dispc_ovl_setup(ovl->id, oi, ilace, replication);
  306. if (r) {
  307. /*
  308. * We can't do much here, as this function can be called from
  309. * vsync interrupt.
  310. */
  311. DSSERR("dispc_ovl_setup failed for ovl %d\n", ovl->id);
  312. /* This will leave fifo configurations in a nonoptimal state */
  313. op->enabled = false;
  314. dispc_ovl_enable(ovl->id, false);
  315. return;
  316. }
  317. dispc_ovl_set_fifo_threshold(ovl->id, op->fifo_low, op->fifo_high);
  318. mp = get_mgr_priv(ovl->manager);
  319. op->dirty = false;
  320. if (mp->updating)
  321. op->shadow_dirty = true;
  322. }
  323. static void dss_ovl_write_regs_extra(struct omap_overlay *ovl)
  324. {
  325. struct ovl_priv_data *op = get_ovl_priv(ovl);
  326. struct mgr_priv_data *mp;
  327. DSSDBGF("%d", ovl->id);
  328. if (!op->extra_info_dirty)
  329. return;
  330. /* note: write also when op->enabled == false, so that the ovl gets
  331. * disabled */
  332. dispc_ovl_enable(ovl->id, op->enabled);
  333. mp = get_mgr_priv(ovl->manager);
  334. op->extra_info_dirty = false;
  335. if (mp->updating)
  336. op->shadow_extra_info_dirty = true;
  337. }
  338. static void dss_mgr_write_regs(struct omap_overlay_manager *mgr)
  339. {
  340. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  341. struct omap_overlay *ovl;
  342. DSSDBGF("%d", mgr->id);
  343. if (!mp->enabled)
  344. return;
  345. WARN_ON(mp->busy);
  346. /* Commit overlay settings */
  347. list_for_each_entry(ovl, &mgr->overlays, list) {
  348. dss_ovl_write_regs(ovl);
  349. dss_ovl_write_regs_extra(ovl);
  350. }
  351. if (mp->dirty) {
  352. dispc_mgr_setup(mgr->id, &mp->info);
  353. mp->dirty = false;
  354. if (mp->updating)
  355. mp->shadow_dirty = true;
  356. }
  357. }
  358. static void dss_write_regs(void)
  359. {
  360. const int num_mgrs = omap_dss_get_num_overlay_managers();
  361. int i;
  362. for (i = 0; i < num_mgrs; ++i) {
  363. struct omap_overlay_manager *mgr;
  364. struct mgr_priv_data *mp;
  365. mgr = omap_dss_get_overlay_manager(i);
  366. mp = get_mgr_priv(mgr);
  367. if (!mp->enabled || mgr_manual_update(mgr) || mp->busy)
  368. continue;
  369. dss_mgr_write_regs(mgr);
  370. if (need_go(mgr)) {
  371. mp->busy = true;
  372. if (!dss_data.irq_enabled && need_isr())
  373. dss_register_vsync_isr();
  374. dispc_mgr_go(mgr->id);
  375. }
  376. }
  377. }
  378. void dss_mgr_start_update(struct omap_overlay_manager *mgr)
  379. {
  380. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  381. unsigned long flags;
  382. spin_lock_irqsave(&data_lock, flags);
  383. WARN_ON(mp->updating);
  384. dss_mgr_write_regs(mgr);
  385. mp->updating = true;
  386. if (!dss_data.irq_enabled && need_isr())
  387. dss_register_vsync_isr();
  388. dispc_mgr_enable(mgr->id, true);
  389. spin_unlock_irqrestore(&data_lock, flags);
  390. }
  391. static void dss_apply_irq_handler(void *data, u32 mask);
  392. static void dss_register_vsync_isr(void)
  393. {
  394. const int num_mgrs = dss_feat_get_num_mgrs();
  395. u32 mask;
  396. int r, i;
  397. mask = 0;
  398. for (i = 0; i < num_mgrs; ++i)
  399. mask |= dispc_mgr_get_vsync_irq(i);
  400. for (i = 0; i < num_mgrs; ++i)
  401. mask |= dispc_mgr_get_framedone_irq(i);
  402. r = omap_dispc_register_isr(dss_apply_irq_handler, NULL, mask);
  403. WARN_ON(r);
  404. dss_data.irq_enabled = true;
  405. }
  406. static void dss_unregister_vsync_isr(void)
  407. {
  408. const int num_mgrs = dss_feat_get_num_mgrs();
  409. u32 mask;
  410. int r, i;
  411. mask = 0;
  412. for (i = 0; i < num_mgrs; ++i)
  413. mask |= dispc_mgr_get_vsync_irq(i);
  414. for (i = 0; i < num_mgrs; ++i)
  415. mask |= dispc_mgr_get_framedone_irq(i);
  416. r = omap_dispc_unregister_isr(dss_apply_irq_handler, NULL, mask);
  417. WARN_ON(r);
  418. dss_data.irq_enabled = false;
  419. }
  420. static void mgr_clear_shadow_dirty(struct omap_overlay_manager *mgr)
  421. {
  422. struct omap_overlay *ovl;
  423. struct mgr_priv_data *mp;
  424. struct ovl_priv_data *op;
  425. mp = get_mgr_priv(mgr);
  426. mp->shadow_dirty = false;
  427. list_for_each_entry(ovl, &mgr->overlays, list) {
  428. op = get_ovl_priv(ovl);
  429. op->shadow_dirty = false;
  430. op->shadow_extra_info_dirty = false;
  431. }
  432. }
  433. static void dss_apply_irq_handler(void *data, u32 mask)
  434. {
  435. const int num_mgrs = dss_feat_get_num_mgrs();
  436. int i;
  437. spin_lock(&data_lock);
  438. /* clear busy, updating flags, shadow_dirty flags */
  439. for (i = 0; i < num_mgrs; i++) {
  440. struct omap_overlay_manager *mgr;
  441. struct mgr_priv_data *mp;
  442. mgr = omap_dss_get_overlay_manager(i);
  443. mp = get_mgr_priv(mgr);
  444. if (!mp->enabled)
  445. continue;
  446. mp->updating = dispc_mgr_is_enabled(i);
  447. if (!mgr_manual_update(mgr)) {
  448. mp->busy = dispc_mgr_go_busy(i);
  449. if (!mp->busy)
  450. mgr_clear_shadow_dirty(mgr);
  451. } else {
  452. if (!mp->updating)
  453. mgr_clear_shadow_dirty(mgr);
  454. }
  455. }
  456. dss_write_regs();
  457. if (!need_isr())
  458. dss_unregister_vsync_isr();
  459. spin_unlock(&data_lock);
  460. }
  461. static void omap_dss_mgr_apply_ovl(struct omap_overlay *ovl)
  462. {
  463. struct ovl_priv_data *op;
  464. op = get_ovl_priv(ovl);
  465. if (ovl->manager_changed) {
  466. ovl->manager_changed = false;
  467. op->user_info_dirty = true;
  468. }
  469. if (!op->user_info_dirty)
  470. return;
  471. op->user_info_dirty = false;
  472. op->dirty = true;
  473. op->info = op->user_info;
  474. op->channel = ovl->manager->id;
  475. }
  476. static void omap_dss_mgr_apply_mgr(struct omap_overlay_manager *mgr)
  477. {
  478. struct mgr_priv_data *mp;
  479. mp = get_mgr_priv(mgr);
  480. if (mgr->device_changed) {
  481. mgr->device_changed = false;
  482. mp->user_info_dirty = true;
  483. }
  484. if (!mp->user_info_dirty)
  485. return;
  486. mp->user_info_dirty = false;
  487. mp->dirty = true;
  488. mp->info = mp->user_info;
  489. }
  490. static void omap_dss_mgr_apply_ovl_fifos(struct omap_overlay *ovl)
  491. {
  492. struct ovl_priv_data *op;
  493. struct omap_dss_device *dssdev;
  494. u32 size, burst_size;
  495. op = get_ovl_priv(ovl);
  496. dssdev = ovl->manager->device;
  497. size = dispc_ovl_get_fifo_size(ovl->id);
  498. burst_size = dispc_ovl_get_burst_size(ovl->id);
  499. switch (dssdev->type) {
  500. case OMAP_DISPLAY_TYPE_DPI:
  501. case OMAP_DISPLAY_TYPE_DBI:
  502. case OMAP_DISPLAY_TYPE_SDI:
  503. case OMAP_DISPLAY_TYPE_VENC:
  504. case OMAP_DISPLAY_TYPE_HDMI:
  505. default_get_overlay_fifo_thresholds(ovl->id, size,
  506. burst_size, &op->fifo_low,
  507. &op->fifo_high);
  508. break;
  509. #ifdef CONFIG_OMAP2_DSS_DSI
  510. case OMAP_DISPLAY_TYPE_DSI:
  511. dsi_get_overlay_fifo_thresholds(ovl->id, size,
  512. burst_size, &op->fifo_low,
  513. &op->fifo_high);
  514. break;
  515. #endif
  516. default:
  517. BUG();
  518. }
  519. }
  520. int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
  521. {
  522. int r;
  523. unsigned long flags;
  524. struct omap_overlay *ovl;
  525. DSSDBG("omap_dss_mgr_apply(%s)\n", mgr->name);
  526. r = dispc_runtime_get();
  527. if (r)
  528. return r;
  529. spin_lock_irqsave(&data_lock, flags);
  530. /* Configure overlays */
  531. list_for_each_entry(ovl, &mgr->overlays, list)
  532. omap_dss_mgr_apply_ovl(ovl);
  533. /* Configure manager */
  534. omap_dss_mgr_apply_mgr(mgr);
  535. /* Configure overlay fifos */
  536. list_for_each_entry(ovl, &mgr->overlays, list)
  537. omap_dss_mgr_apply_ovl_fifos(ovl);
  538. dss_write_regs();
  539. spin_unlock_irqrestore(&data_lock, flags);
  540. dispc_runtime_put();
  541. return r;
  542. }
  543. void dss_mgr_enable(struct omap_overlay_manager *mgr)
  544. {
  545. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  546. unsigned long flags;
  547. mutex_lock(&apply_lock);
  548. spin_lock_irqsave(&data_lock, flags);
  549. mp->enabled = true;
  550. dss_write_regs();
  551. if (!mgr_manual_update(mgr))
  552. mp->updating = true;
  553. spin_unlock_irqrestore(&data_lock, flags);
  554. if (!mgr_manual_update(mgr))
  555. dispc_mgr_enable(mgr->id, true);
  556. mutex_unlock(&apply_lock);
  557. }
  558. void dss_mgr_disable(struct omap_overlay_manager *mgr)
  559. {
  560. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  561. unsigned long flags;
  562. mutex_lock(&apply_lock);
  563. if (!mgr_manual_update(mgr))
  564. dispc_mgr_enable(mgr->id, false);
  565. spin_lock_irqsave(&data_lock, flags);
  566. mp->updating = false;
  567. mp->enabled = false;
  568. spin_unlock_irqrestore(&data_lock, flags);
  569. mutex_unlock(&apply_lock);
  570. }
  571. int dss_mgr_set_info(struct omap_overlay_manager *mgr,
  572. struct omap_overlay_manager_info *info)
  573. {
  574. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  575. unsigned long flags;
  576. spin_lock_irqsave(&data_lock, flags);
  577. mp->user_info = *info;
  578. mp->user_info_dirty = true;
  579. spin_unlock_irqrestore(&data_lock, flags);
  580. return 0;
  581. }
  582. void dss_mgr_get_info(struct omap_overlay_manager *mgr,
  583. struct omap_overlay_manager_info *info)
  584. {
  585. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  586. unsigned long flags;
  587. spin_lock_irqsave(&data_lock, flags);
  588. *info = mp->user_info;
  589. spin_unlock_irqrestore(&data_lock, flags);
  590. }
  591. int dss_mgr_set_device(struct omap_overlay_manager *mgr,
  592. struct omap_dss_device *dssdev)
  593. {
  594. int r;
  595. mutex_lock(&apply_lock);
  596. if (dssdev->manager) {
  597. DSSERR("display '%s' already has a manager '%s'\n",
  598. dssdev->name, dssdev->manager->name);
  599. r = -EINVAL;
  600. goto err;
  601. }
  602. if ((mgr->supported_displays & dssdev->type) == 0) {
  603. DSSERR("display '%s' does not support manager '%s'\n",
  604. dssdev->name, mgr->name);
  605. r = -EINVAL;
  606. goto err;
  607. }
  608. dssdev->manager = mgr;
  609. mgr->device = dssdev;
  610. mgr->device_changed = true;
  611. mutex_unlock(&apply_lock);
  612. return 0;
  613. err:
  614. mutex_unlock(&apply_lock);
  615. return r;
  616. }
  617. int dss_mgr_unset_device(struct omap_overlay_manager *mgr)
  618. {
  619. int r;
  620. mutex_lock(&apply_lock);
  621. if (!mgr->device) {
  622. DSSERR("failed to unset display, display not set.\n");
  623. r = -EINVAL;
  624. goto err;
  625. }
  626. /*
  627. * Don't allow currently enabled displays to have the overlay manager
  628. * pulled out from underneath them
  629. */
  630. if (mgr->device->state != OMAP_DSS_DISPLAY_DISABLED) {
  631. r = -EINVAL;
  632. goto err;
  633. }
  634. mgr->device->manager = NULL;
  635. mgr->device = NULL;
  636. mgr->device_changed = true;
  637. mutex_unlock(&apply_lock);
  638. return 0;
  639. err:
  640. mutex_unlock(&apply_lock);
  641. return r;
  642. }
  643. int dss_ovl_set_info(struct omap_overlay *ovl,
  644. struct omap_overlay_info *info)
  645. {
  646. struct ovl_priv_data *op = get_ovl_priv(ovl);
  647. unsigned long flags;
  648. spin_lock_irqsave(&data_lock, flags);
  649. op->user_info = *info;
  650. op->user_info_dirty = true;
  651. spin_unlock_irqrestore(&data_lock, flags);
  652. return 0;
  653. }
  654. void dss_ovl_get_info(struct omap_overlay *ovl,
  655. struct omap_overlay_info *info)
  656. {
  657. struct ovl_priv_data *op = get_ovl_priv(ovl);
  658. unsigned long flags;
  659. spin_lock_irqsave(&data_lock, flags);
  660. *info = op->user_info;
  661. spin_unlock_irqrestore(&data_lock, flags);
  662. }
  663. int dss_ovl_set_manager(struct omap_overlay *ovl,
  664. struct omap_overlay_manager *mgr)
  665. {
  666. struct ovl_priv_data *op = get_ovl_priv(ovl);
  667. unsigned long flags;
  668. int r;
  669. if (!mgr)
  670. return -EINVAL;
  671. mutex_lock(&apply_lock);
  672. if (ovl->manager) {
  673. DSSERR("overlay '%s' already has a manager '%s'\n",
  674. ovl->name, ovl->manager->name);
  675. r = -EINVAL;
  676. goto err;
  677. }
  678. spin_lock_irqsave(&data_lock, flags);
  679. if (op->enabled) {
  680. spin_unlock_irqrestore(&data_lock, flags);
  681. DSSERR("overlay has to be disabled to change the manager\n");
  682. r = -EINVAL;
  683. goto err;
  684. }
  685. ovl->manager = mgr;
  686. list_add_tail(&ovl->list, &mgr->overlays);
  687. ovl->manager_changed = true;
  688. spin_unlock_irqrestore(&data_lock, flags);
  689. /* XXX: When there is an overlay on a DSI manual update display, and
  690. * the overlay is first disabled, then moved to tv, and enabled, we
  691. * seem to get SYNC_LOST_DIGIT error.
  692. *
  693. * Waiting doesn't seem to help, but updating the manual update display
  694. * after disabling the overlay seems to fix this. This hints that the
  695. * overlay is perhaps somehow tied to the LCD output until the output
  696. * is updated.
  697. *
  698. * Userspace workaround for this is to update the LCD after disabling
  699. * the overlay, but before moving the overlay to TV.
  700. */
  701. mutex_unlock(&apply_lock);
  702. return 0;
  703. err:
  704. mutex_unlock(&apply_lock);
  705. return r;
  706. }
  707. int dss_ovl_unset_manager(struct omap_overlay *ovl)
  708. {
  709. struct ovl_priv_data *op = get_ovl_priv(ovl);
  710. unsigned long flags;
  711. int r;
  712. mutex_lock(&apply_lock);
  713. if (!ovl->manager) {
  714. DSSERR("failed to detach overlay: manager not set\n");
  715. r = -EINVAL;
  716. goto err;
  717. }
  718. spin_lock_irqsave(&data_lock, flags);
  719. if (op->enabled) {
  720. spin_unlock_irqrestore(&data_lock, flags);
  721. DSSERR("overlay has to be disabled to unset the manager\n");
  722. r = -EINVAL;
  723. goto err;
  724. }
  725. ovl->manager = NULL;
  726. list_del(&ovl->list);
  727. ovl->manager_changed = true;
  728. spin_unlock_irqrestore(&data_lock, flags);
  729. mutex_unlock(&apply_lock);
  730. return 0;
  731. err:
  732. mutex_unlock(&apply_lock);
  733. return r;
  734. }
  735. bool dss_ovl_is_enabled(struct omap_overlay *ovl)
  736. {
  737. struct ovl_priv_data *op = get_ovl_priv(ovl);
  738. unsigned long flags;
  739. bool e;
  740. spin_lock_irqsave(&data_lock, flags);
  741. e = op->enabled;
  742. spin_unlock_irqrestore(&data_lock, flags);
  743. return e;
  744. }
  745. int dss_ovl_enable(struct omap_overlay *ovl)
  746. {
  747. struct ovl_priv_data *op = get_ovl_priv(ovl);
  748. unsigned long flags;
  749. int r;
  750. mutex_lock(&apply_lock);
  751. if (ovl->manager == NULL || ovl->manager->device == NULL) {
  752. r = -EINVAL;
  753. goto err;
  754. }
  755. spin_lock_irqsave(&data_lock, flags);
  756. op->enabled = true;
  757. op->extra_info_dirty = true;
  758. dss_write_regs();
  759. spin_unlock_irqrestore(&data_lock, flags);
  760. mutex_unlock(&apply_lock);
  761. return 0;
  762. err:
  763. mutex_unlock(&apply_lock);
  764. return r;
  765. }
  766. int dss_ovl_disable(struct omap_overlay *ovl)
  767. {
  768. struct ovl_priv_data *op = get_ovl_priv(ovl);
  769. unsigned long flags;
  770. int r;
  771. mutex_lock(&apply_lock);
  772. if (ovl->manager == NULL || ovl->manager->device == NULL) {
  773. r = -EINVAL;
  774. goto err;
  775. }
  776. spin_lock_irqsave(&data_lock, flags);
  777. op->enabled = false;
  778. op->extra_info_dirty = true;
  779. dss_write_regs();
  780. spin_unlock_irqrestore(&data_lock, flags);
  781. mutex_unlock(&apply_lock);
  782. return 0;
  783. err:
  784. mutex_unlock(&apply_lock);
  785. return r;
  786. }