apply.c 20 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049
  1. /*
  2. * Copyright (C) 2011 Texas Instruments
  3. * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #define DSS_SUBSYS_NAME "APPLY"
  18. #include <linux/kernel.h>
  19. #include <linux/slab.h>
  20. #include <linux/spinlock.h>
  21. #include <linux/jiffies.h>
  22. #include <video/omapdss.h>
  23. #include "dss.h"
  24. #include "dss_features.h"
  25. /*
  26. * We have 4 levels of cache for the dispc settings. First two are in SW and
  27. * the latter two in HW.
  28. *
  29. * set_info()
  30. * v
  31. * +--------------------+
  32. * | user_info |
  33. * +--------------------+
  34. * v
  35. * apply()
  36. * v
  37. * +--------------------+
  38. * | info |
  39. * +--------------------+
  40. * v
  41. * write_regs()
  42. * v
  43. * +--------------------+
  44. * | shadow registers |
  45. * +--------------------+
  46. * v
  47. * VFP or lcd/digit_enable
  48. * v
  49. * +--------------------+
  50. * | registers |
  51. * +--------------------+
  52. */
  53. struct ovl_priv_data {
  54. bool user_info_dirty;
  55. struct omap_overlay_info user_info;
  56. bool info_dirty;
  57. struct omap_overlay_info info;
  58. bool shadow_info_dirty;
  59. bool extra_info_dirty;
  60. bool shadow_extra_info_dirty;
  61. bool enabled;
  62. enum omap_channel channel;
  63. u32 fifo_low, fifo_high;
  64. };
  65. struct mgr_priv_data {
  66. bool user_info_dirty;
  67. struct omap_overlay_manager_info user_info;
  68. bool info_dirty;
  69. struct omap_overlay_manager_info info;
  70. bool shadow_info_dirty;
  71. /* If true, GO bit is up and shadow registers cannot be written.
  72. * Never true for manual update displays */
  73. bool busy;
  74. /* If true, dispc output is enabled */
  75. bool updating;
  76. /* If true, a display is enabled using this manager */
  77. bool enabled;
  78. };
  79. static struct {
  80. struct ovl_priv_data ovl_priv_data_array[MAX_DSS_OVERLAYS];
  81. struct mgr_priv_data mgr_priv_data_array[MAX_DSS_MANAGERS];
  82. bool irq_enabled;
  83. } dss_data;
  84. /* protects dss_data */
  85. static spinlock_t data_lock;
  86. /* lock for blocking functions */
  87. static DEFINE_MUTEX(apply_lock);
  88. static void dss_register_vsync_isr(void);
  89. static struct ovl_priv_data *get_ovl_priv(struct omap_overlay *ovl)
  90. {
  91. return &dss_data.ovl_priv_data_array[ovl->id];
  92. }
  93. static struct mgr_priv_data *get_mgr_priv(struct omap_overlay_manager *mgr)
  94. {
  95. return &dss_data.mgr_priv_data_array[mgr->id];
  96. }
  97. void dss_apply_init(void)
  98. {
  99. const int num_ovls = dss_feat_get_num_ovls();
  100. int i;
  101. spin_lock_init(&data_lock);
  102. for (i = 0; i < num_ovls; ++i) {
  103. struct ovl_priv_data *op;
  104. op = &dss_data.ovl_priv_data_array[i];
  105. op->info.global_alpha = 255;
  106. switch (i) {
  107. case 0:
  108. op->info.zorder = 0;
  109. break;
  110. case 1:
  111. op->info.zorder =
  112. dss_has_feature(FEAT_ALPHA_FREE_ZORDER) ? 3 : 0;
  113. break;
  114. case 2:
  115. op->info.zorder =
  116. dss_has_feature(FEAT_ALPHA_FREE_ZORDER) ? 2 : 0;
  117. break;
  118. case 3:
  119. op->info.zorder =
  120. dss_has_feature(FEAT_ALPHA_FREE_ZORDER) ? 1 : 0;
  121. break;
  122. }
  123. op->user_info = op->info;
  124. }
  125. }
  126. static bool ovl_manual_update(struct omap_overlay *ovl)
  127. {
  128. return ovl->manager->device->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
  129. }
  130. static bool mgr_manual_update(struct omap_overlay_manager *mgr)
  131. {
  132. return mgr->device->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
  133. }
  134. static bool need_isr(void)
  135. {
  136. const int num_mgrs = dss_feat_get_num_mgrs();
  137. int i;
  138. for (i = 0; i < num_mgrs; ++i) {
  139. struct omap_overlay_manager *mgr;
  140. struct mgr_priv_data *mp;
  141. struct omap_overlay *ovl;
  142. mgr = omap_dss_get_overlay_manager(i);
  143. mp = get_mgr_priv(mgr);
  144. if (!mp->enabled)
  145. continue;
  146. if (mgr_manual_update(mgr)) {
  147. /* to catch FRAMEDONE */
  148. if (mp->updating)
  149. return true;
  150. } else {
  151. /* to catch GO bit going down */
  152. if (mp->busy)
  153. return true;
  154. /* to write new values to registers */
  155. if (mp->info_dirty)
  156. return true;
  157. list_for_each_entry(ovl, &mgr->overlays, list) {
  158. struct ovl_priv_data *op;
  159. op = get_ovl_priv(ovl);
  160. if (!op->enabled)
  161. continue;
  162. /* to write new values to registers */
  163. if (op->info_dirty || op->extra_info_dirty)
  164. return true;
  165. }
  166. }
  167. }
  168. return false;
  169. }
  170. static bool need_go(struct omap_overlay_manager *mgr)
  171. {
  172. struct omap_overlay *ovl;
  173. struct mgr_priv_data *mp;
  174. struct ovl_priv_data *op;
  175. mp = get_mgr_priv(mgr);
  176. if (mp->shadow_info_dirty)
  177. return true;
  178. list_for_each_entry(ovl, &mgr->overlays, list) {
  179. op = get_ovl_priv(ovl);
  180. if (op->shadow_info_dirty || op->shadow_extra_info_dirty)
  181. return true;
  182. }
  183. return false;
  184. }
  185. int dss_mgr_wait_for_go(struct omap_overlay_manager *mgr)
  186. {
  187. unsigned long timeout = msecs_to_jiffies(500);
  188. struct mgr_priv_data *mp;
  189. u32 irq;
  190. int r;
  191. int i;
  192. struct omap_dss_device *dssdev = mgr->device;
  193. if (!dssdev || dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
  194. return 0;
  195. if (mgr_manual_update(mgr))
  196. return 0;
  197. irq = dispc_mgr_get_vsync_irq(mgr->id);
  198. mp = get_mgr_priv(mgr);
  199. i = 0;
  200. while (1) {
  201. unsigned long flags;
  202. bool shadow_dirty, dirty;
  203. spin_lock_irqsave(&data_lock, flags);
  204. dirty = mp->info_dirty;
  205. shadow_dirty = mp->shadow_info_dirty;
  206. spin_unlock_irqrestore(&data_lock, flags);
  207. if (!dirty && !shadow_dirty) {
  208. r = 0;
  209. break;
  210. }
  211. /* 4 iterations is the worst case:
  212. * 1 - initial iteration, dirty = true (between VFP and VSYNC)
  213. * 2 - first VSYNC, dirty = true
  214. * 3 - dirty = false, shadow_dirty = true
  215. * 4 - shadow_dirty = false */
  216. if (i++ == 3) {
  217. DSSERR("mgr(%d)->wait_for_go() not finishing\n",
  218. mgr->id);
  219. r = 0;
  220. break;
  221. }
  222. r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
  223. if (r == -ERESTARTSYS)
  224. break;
  225. if (r) {
  226. DSSERR("mgr(%d)->wait_for_go() timeout\n", mgr->id);
  227. break;
  228. }
  229. }
  230. return r;
  231. }
  232. int dss_mgr_wait_for_go_ovl(struct omap_overlay *ovl)
  233. {
  234. unsigned long timeout = msecs_to_jiffies(500);
  235. struct ovl_priv_data *op;
  236. struct omap_dss_device *dssdev;
  237. u32 irq;
  238. int r;
  239. int i;
  240. if (!ovl->manager)
  241. return 0;
  242. dssdev = ovl->manager->device;
  243. if (!dssdev || dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
  244. return 0;
  245. if (ovl_manual_update(ovl))
  246. return 0;
  247. irq = dispc_mgr_get_vsync_irq(ovl->manager->id);
  248. op = get_ovl_priv(ovl);
  249. i = 0;
  250. while (1) {
  251. unsigned long flags;
  252. bool shadow_dirty, dirty;
  253. spin_lock_irqsave(&data_lock, flags);
  254. dirty = op->info_dirty;
  255. shadow_dirty = op->shadow_info_dirty;
  256. spin_unlock_irqrestore(&data_lock, flags);
  257. if (!dirty && !shadow_dirty) {
  258. r = 0;
  259. break;
  260. }
  261. /* 4 iterations is the worst case:
  262. * 1 - initial iteration, dirty = true (between VFP and VSYNC)
  263. * 2 - first VSYNC, dirty = true
  264. * 3 - dirty = false, shadow_dirty = true
  265. * 4 - shadow_dirty = false */
  266. if (i++ == 3) {
  267. DSSERR("ovl(%d)->wait_for_go() not finishing\n",
  268. ovl->id);
  269. r = 0;
  270. break;
  271. }
  272. r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
  273. if (r == -ERESTARTSYS)
  274. break;
  275. if (r) {
  276. DSSERR("ovl(%d)->wait_for_go() timeout\n", ovl->id);
  277. break;
  278. }
  279. }
  280. return r;
  281. }
  282. static void dss_ovl_write_regs(struct omap_overlay *ovl)
  283. {
  284. struct ovl_priv_data *op = get_ovl_priv(ovl);
  285. struct omap_overlay_info *oi;
  286. bool ilace, replication;
  287. struct mgr_priv_data *mp;
  288. int r;
  289. DSSDBGF("%d", ovl->id);
  290. if (!op->enabled || !op->info_dirty)
  291. return;
  292. oi = &op->info;
  293. replication = dss_use_replication(ovl->manager->device, oi->color_mode);
  294. ilace = ovl->manager->device->type == OMAP_DISPLAY_TYPE_VENC;
  295. r = dispc_ovl_setup(ovl->id, oi, ilace, replication);
  296. if (r) {
  297. /*
  298. * We can't do much here, as this function can be called from
  299. * vsync interrupt.
  300. */
  301. DSSERR("dispc_ovl_setup failed for ovl %d\n", ovl->id);
  302. /* This will leave fifo configurations in a nonoptimal state */
  303. op->enabled = false;
  304. dispc_ovl_enable(ovl->id, false);
  305. return;
  306. }
  307. mp = get_mgr_priv(ovl->manager);
  308. op->info_dirty = false;
  309. if (mp->updating)
  310. op->shadow_info_dirty = true;
  311. }
  312. static void dss_ovl_write_regs_extra(struct omap_overlay *ovl)
  313. {
  314. struct ovl_priv_data *op = get_ovl_priv(ovl);
  315. struct mgr_priv_data *mp;
  316. DSSDBGF("%d", ovl->id);
  317. if (!op->extra_info_dirty)
  318. return;
  319. /* note: write also when op->enabled == false, so that the ovl gets
  320. * disabled */
  321. dispc_ovl_enable(ovl->id, op->enabled);
  322. dispc_ovl_set_channel_out(ovl->id, op->channel);
  323. dispc_ovl_set_fifo_threshold(ovl->id, op->fifo_low, op->fifo_high);
  324. mp = get_mgr_priv(ovl->manager);
  325. op->extra_info_dirty = false;
  326. if (mp->updating)
  327. op->shadow_extra_info_dirty = true;
  328. }
  329. static void dss_mgr_write_regs(struct omap_overlay_manager *mgr)
  330. {
  331. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  332. struct omap_overlay *ovl;
  333. DSSDBGF("%d", mgr->id);
  334. if (!mp->enabled)
  335. return;
  336. WARN_ON(mp->busy);
  337. /* Commit overlay settings */
  338. list_for_each_entry(ovl, &mgr->overlays, list) {
  339. dss_ovl_write_regs(ovl);
  340. dss_ovl_write_regs_extra(ovl);
  341. }
  342. if (mp->info_dirty) {
  343. dispc_mgr_setup(mgr->id, &mp->info);
  344. mp->info_dirty = false;
  345. if (mp->updating)
  346. mp->shadow_info_dirty = true;
  347. }
  348. }
  349. static void dss_write_regs(void)
  350. {
  351. const int num_mgrs = omap_dss_get_num_overlay_managers();
  352. int i;
  353. for (i = 0; i < num_mgrs; ++i) {
  354. struct omap_overlay_manager *mgr;
  355. struct mgr_priv_data *mp;
  356. mgr = omap_dss_get_overlay_manager(i);
  357. mp = get_mgr_priv(mgr);
  358. if (!mp->enabled || mgr_manual_update(mgr) || mp->busy)
  359. continue;
  360. dss_mgr_write_regs(mgr);
  361. if (need_go(mgr)) {
  362. mp->busy = true;
  363. if (!dss_data.irq_enabled && need_isr())
  364. dss_register_vsync_isr();
  365. dispc_mgr_go(mgr->id);
  366. }
  367. }
  368. }
  369. void dss_mgr_start_update(struct omap_overlay_manager *mgr)
  370. {
  371. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  372. unsigned long flags;
  373. spin_lock_irqsave(&data_lock, flags);
  374. WARN_ON(mp->updating);
  375. dss_mgr_write_regs(mgr);
  376. mp->updating = true;
  377. if (!dss_data.irq_enabled && need_isr())
  378. dss_register_vsync_isr();
  379. dispc_mgr_enable(mgr->id, true);
  380. spin_unlock_irqrestore(&data_lock, flags);
  381. }
  382. static void dss_apply_irq_handler(void *data, u32 mask);
  383. static void dss_register_vsync_isr(void)
  384. {
  385. const int num_mgrs = dss_feat_get_num_mgrs();
  386. u32 mask;
  387. int r, i;
  388. mask = 0;
  389. for (i = 0; i < num_mgrs; ++i)
  390. mask |= dispc_mgr_get_vsync_irq(i);
  391. for (i = 0; i < num_mgrs; ++i)
  392. mask |= dispc_mgr_get_framedone_irq(i);
  393. r = omap_dispc_register_isr(dss_apply_irq_handler, NULL, mask);
  394. WARN_ON(r);
  395. dss_data.irq_enabled = true;
  396. }
  397. static void dss_unregister_vsync_isr(void)
  398. {
  399. const int num_mgrs = dss_feat_get_num_mgrs();
  400. u32 mask;
  401. int r, i;
  402. mask = 0;
  403. for (i = 0; i < num_mgrs; ++i)
  404. mask |= dispc_mgr_get_vsync_irq(i);
  405. for (i = 0; i < num_mgrs; ++i)
  406. mask |= dispc_mgr_get_framedone_irq(i);
  407. r = omap_dispc_unregister_isr(dss_apply_irq_handler, NULL, mask);
  408. WARN_ON(r);
  409. dss_data.irq_enabled = false;
  410. }
  411. static void mgr_clear_shadow_dirty(struct omap_overlay_manager *mgr)
  412. {
  413. struct omap_overlay *ovl;
  414. struct mgr_priv_data *mp;
  415. struct ovl_priv_data *op;
  416. mp = get_mgr_priv(mgr);
  417. mp->shadow_info_dirty = false;
  418. list_for_each_entry(ovl, &mgr->overlays, list) {
  419. op = get_ovl_priv(ovl);
  420. op->shadow_info_dirty = false;
  421. op->shadow_extra_info_dirty = false;
  422. }
  423. }
  424. static void dss_apply_irq_handler(void *data, u32 mask)
  425. {
  426. const int num_mgrs = dss_feat_get_num_mgrs();
  427. int i;
  428. spin_lock(&data_lock);
  429. /* clear busy, updating flags, shadow_dirty flags */
  430. for (i = 0; i < num_mgrs; i++) {
  431. struct omap_overlay_manager *mgr;
  432. struct mgr_priv_data *mp;
  433. mgr = omap_dss_get_overlay_manager(i);
  434. mp = get_mgr_priv(mgr);
  435. if (!mp->enabled)
  436. continue;
  437. mp->updating = dispc_mgr_is_enabled(i);
  438. if (!mgr_manual_update(mgr)) {
  439. mp->busy = dispc_mgr_go_busy(i);
  440. if (!mp->busy)
  441. mgr_clear_shadow_dirty(mgr);
  442. } else {
  443. if (!mp->updating)
  444. mgr_clear_shadow_dirty(mgr);
  445. }
  446. }
  447. dss_write_regs();
  448. if (!need_isr())
  449. dss_unregister_vsync_isr();
  450. spin_unlock(&data_lock);
  451. }
  452. static void omap_dss_mgr_apply_ovl(struct omap_overlay *ovl)
  453. {
  454. struct ovl_priv_data *op;
  455. op = get_ovl_priv(ovl);
  456. if (!op->user_info_dirty)
  457. return;
  458. op->user_info_dirty = false;
  459. op->info_dirty = true;
  460. op->info = op->user_info;
  461. }
  462. static void omap_dss_mgr_apply_mgr(struct omap_overlay_manager *mgr)
  463. {
  464. struct mgr_priv_data *mp;
  465. mp = get_mgr_priv(mgr);
  466. if (!mp->user_info_dirty)
  467. return;
  468. mp->user_info_dirty = false;
  469. mp->info_dirty = true;
  470. mp->info = mp->user_info;
  471. }
  472. int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
  473. {
  474. int r;
  475. unsigned long flags;
  476. struct omap_overlay *ovl;
  477. DSSDBG("omap_dss_mgr_apply(%s)\n", mgr->name);
  478. r = dispc_runtime_get();
  479. if (r)
  480. return r;
  481. spin_lock_irqsave(&data_lock, flags);
  482. /* Configure overlays */
  483. list_for_each_entry(ovl, &mgr->overlays, list)
  484. omap_dss_mgr_apply_ovl(ovl);
  485. /* Configure manager */
  486. omap_dss_mgr_apply_mgr(mgr);
  487. dss_write_regs();
  488. spin_unlock_irqrestore(&data_lock, flags);
  489. dispc_runtime_put();
  490. return r;
  491. }
  492. static void dss_ovl_setup_fifo(struct omap_overlay *ovl)
  493. {
  494. struct ovl_priv_data *op = get_ovl_priv(ovl);
  495. struct omap_dss_device *dssdev;
  496. u32 size, burst_size;
  497. u32 fifo_low, fifo_high;
  498. dssdev = ovl->manager->device;
  499. size = dispc_ovl_get_fifo_size(ovl->id);
  500. burst_size = dispc_ovl_get_burst_size(ovl->id);
  501. switch (dssdev->type) {
  502. case OMAP_DISPLAY_TYPE_DPI:
  503. case OMAP_DISPLAY_TYPE_DBI:
  504. case OMAP_DISPLAY_TYPE_SDI:
  505. case OMAP_DISPLAY_TYPE_VENC:
  506. case OMAP_DISPLAY_TYPE_HDMI:
  507. default_get_overlay_fifo_thresholds(ovl->id, size,
  508. burst_size, &fifo_low, &fifo_high);
  509. break;
  510. #ifdef CONFIG_OMAP2_DSS_DSI
  511. case OMAP_DISPLAY_TYPE_DSI:
  512. dsi_get_overlay_fifo_thresholds(ovl->id, size,
  513. burst_size, &fifo_low, &fifo_high);
  514. break;
  515. #endif
  516. default:
  517. BUG();
  518. }
  519. op->fifo_low = fifo_low;
  520. op->fifo_high = fifo_high;
  521. op->extra_info_dirty = true;
  522. }
  523. static void dss_mgr_setup_fifos(struct omap_overlay_manager *mgr)
  524. {
  525. struct omap_overlay *ovl;
  526. struct ovl_priv_data *op;
  527. struct mgr_priv_data *mp;
  528. mp = get_mgr_priv(mgr);
  529. if (!mp->enabled)
  530. return;
  531. list_for_each_entry(ovl, &mgr->overlays, list) {
  532. op = get_ovl_priv(ovl);
  533. if (!op->enabled)
  534. continue;
  535. dss_ovl_setup_fifo(ovl);
  536. }
  537. }
  538. void dss_mgr_enable(struct omap_overlay_manager *mgr)
  539. {
  540. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  541. unsigned long flags;
  542. mutex_lock(&apply_lock);
  543. spin_lock_irqsave(&data_lock, flags);
  544. mp->enabled = true;
  545. dss_mgr_setup_fifos(mgr);
  546. dss_write_regs();
  547. if (!mgr_manual_update(mgr))
  548. mp->updating = true;
  549. spin_unlock_irqrestore(&data_lock, flags);
  550. if (!mgr_manual_update(mgr))
  551. dispc_mgr_enable(mgr->id, true);
  552. mutex_unlock(&apply_lock);
  553. }
  554. void dss_mgr_disable(struct omap_overlay_manager *mgr)
  555. {
  556. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  557. unsigned long flags;
  558. mutex_lock(&apply_lock);
  559. if (!mgr_manual_update(mgr))
  560. dispc_mgr_enable(mgr->id, false);
  561. spin_lock_irqsave(&data_lock, flags);
  562. mp->updating = false;
  563. mp->enabled = false;
  564. spin_unlock_irqrestore(&data_lock, flags);
  565. mutex_unlock(&apply_lock);
  566. }
  567. int dss_mgr_set_info(struct omap_overlay_manager *mgr,
  568. struct omap_overlay_manager_info *info)
  569. {
  570. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  571. unsigned long flags;
  572. spin_lock_irqsave(&data_lock, flags);
  573. mp->user_info = *info;
  574. mp->user_info_dirty = true;
  575. spin_unlock_irqrestore(&data_lock, flags);
  576. return 0;
  577. }
  578. void dss_mgr_get_info(struct omap_overlay_manager *mgr,
  579. struct omap_overlay_manager_info *info)
  580. {
  581. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  582. unsigned long flags;
  583. spin_lock_irqsave(&data_lock, flags);
  584. *info = mp->user_info;
  585. spin_unlock_irqrestore(&data_lock, flags);
  586. }
  587. int dss_mgr_set_device(struct omap_overlay_manager *mgr,
  588. struct omap_dss_device *dssdev)
  589. {
  590. int r;
  591. mutex_lock(&apply_lock);
  592. if (dssdev->manager) {
  593. DSSERR("display '%s' already has a manager '%s'\n",
  594. dssdev->name, dssdev->manager->name);
  595. r = -EINVAL;
  596. goto err;
  597. }
  598. if ((mgr->supported_displays & dssdev->type) == 0) {
  599. DSSERR("display '%s' does not support manager '%s'\n",
  600. dssdev->name, mgr->name);
  601. r = -EINVAL;
  602. goto err;
  603. }
  604. dssdev->manager = mgr;
  605. mgr->device = dssdev;
  606. mutex_unlock(&apply_lock);
  607. return 0;
  608. err:
  609. mutex_unlock(&apply_lock);
  610. return r;
  611. }
  612. int dss_mgr_unset_device(struct omap_overlay_manager *mgr)
  613. {
  614. int r;
  615. mutex_lock(&apply_lock);
  616. if (!mgr->device) {
  617. DSSERR("failed to unset display, display not set.\n");
  618. r = -EINVAL;
  619. goto err;
  620. }
  621. /*
  622. * Don't allow currently enabled displays to have the overlay manager
  623. * pulled out from underneath them
  624. */
  625. if (mgr->device->state != OMAP_DSS_DISPLAY_DISABLED) {
  626. r = -EINVAL;
  627. goto err;
  628. }
  629. mgr->device->manager = NULL;
  630. mgr->device = NULL;
  631. mutex_unlock(&apply_lock);
  632. return 0;
  633. err:
  634. mutex_unlock(&apply_lock);
  635. return r;
  636. }
  637. int dss_ovl_set_info(struct omap_overlay *ovl,
  638. struct omap_overlay_info *info)
  639. {
  640. struct ovl_priv_data *op = get_ovl_priv(ovl);
  641. unsigned long flags;
  642. spin_lock_irqsave(&data_lock, flags);
  643. op->user_info = *info;
  644. op->user_info_dirty = true;
  645. spin_unlock_irqrestore(&data_lock, flags);
  646. return 0;
  647. }
  648. void dss_ovl_get_info(struct omap_overlay *ovl,
  649. struct omap_overlay_info *info)
  650. {
  651. struct ovl_priv_data *op = get_ovl_priv(ovl);
  652. unsigned long flags;
  653. spin_lock_irqsave(&data_lock, flags);
  654. *info = op->user_info;
  655. spin_unlock_irqrestore(&data_lock, flags);
  656. }
  657. int dss_ovl_set_manager(struct omap_overlay *ovl,
  658. struct omap_overlay_manager *mgr)
  659. {
  660. struct ovl_priv_data *op = get_ovl_priv(ovl);
  661. unsigned long flags;
  662. int r;
  663. if (!mgr)
  664. return -EINVAL;
  665. mutex_lock(&apply_lock);
  666. if (ovl->manager) {
  667. DSSERR("overlay '%s' already has a manager '%s'\n",
  668. ovl->name, ovl->manager->name);
  669. r = -EINVAL;
  670. goto err;
  671. }
  672. spin_lock_irqsave(&data_lock, flags);
  673. if (op->enabled) {
  674. spin_unlock_irqrestore(&data_lock, flags);
  675. DSSERR("overlay has to be disabled to change the manager\n");
  676. r = -EINVAL;
  677. goto err;
  678. }
  679. op->channel = mgr->id;
  680. op->extra_info_dirty = true;
  681. ovl->manager = mgr;
  682. list_add_tail(&ovl->list, &mgr->overlays);
  683. spin_unlock_irqrestore(&data_lock, flags);
  684. /* XXX: When there is an overlay on a DSI manual update display, and
  685. * the overlay is first disabled, then moved to tv, and enabled, we
  686. * seem to get SYNC_LOST_DIGIT error.
  687. *
  688. * Waiting doesn't seem to help, but updating the manual update display
  689. * after disabling the overlay seems to fix this. This hints that the
  690. * overlay is perhaps somehow tied to the LCD output until the output
  691. * is updated.
  692. *
  693. * Userspace workaround for this is to update the LCD after disabling
  694. * the overlay, but before moving the overlay to TV.
  695. */
  696. mutex_unlock(&apply_lock);
  697. return 0;
  698. err:
  699. mutex_unlock(&apply_lock);
  700. return r;
  701. }
  702. int dss_ovl_unset_manager(struct omap_overlay *ovl)
  703. {
  704. struct ovl_priv_data *op = get_ovl_priv(ovl);
  705. unsigned long flags;
  706. int r;
  707. mutex_lock(&apply_lock);
  708. if (!ovl->manager) {
  709. DSSERR("failed to detach overlay: manager not set\n");
  710. r = -EINVAL;
  711. goto err;
  712. }
  713. spin_lock_irqsave(&data_lock, flags);
  714. if (op->enabled) {
  715. spin_unlock_irqrestore(&data_lock, flags);
  716. DSSERR("overlay has to be disabled to unset the manager\n");
  717. r = -EINVAL;
  718. goto err;
  719. }
  720. op->channel = -1;
  721. ovl->manager = NULL;
  722. list_del(&ovl->list);
  723. spin_unlock_irqrestore(&data_lock, flags);
  724. mutex_unlock(&apply_lock);
  725. return 0;
  726. err:
  727. mutex_unlock(&apply_lock);
  728. return r;
  729. }
  730. bool dss_ovl_is_enabled(struct omap_overlay *ovl)
  731. {
  732. struct ovl_priv_data *op = get_ovl_priv(ovl);
  733. unsigned long flags;
  734. bool e;
  735. spin_lock_irqsave(&data_lock, flags);
  736. e = op->enabled;
  737. spin_unlock_irqrestore(&data_lock, flags);
  738. return e;
  739. }
  740. int dss_ovl_enable(struct omap_overlay *ovl)
  741. {
  742. struct ovl_priv_data *op = get_ovl_priv(ovl);
  743. unsigned long flags;
  744. int r;
  745. mutex_lock(&apply_lock);
  746. if (ovl->manager == NULL || ovl->manager->device == NULL) {
  747. r = -EINVAL;
  748. goto err;
  749. }
  750. spin_lock_irqsave(&data_lock, flags);
  751. op->enabled = true;
  752. op->extra_info_dirty = true;
  753. dss_ovl_setup_fifo(ovl);
  754. dss_write_regs();
  755. spin_unlock_irqrestore(&data_lock, flags);
  756. mutex_unlock(&apply_lock);
  757. return 0;
  758. err:
  759. mutex_unlock(&apply_lock);
  760. return r;
  761. }
  762. int dss_ovl_disable(struct omap_overlay *ovl)
  763. {
  764. struct ovl_priv_data *op = get_ovl_priv(ovl);
  765. unsigned long flags;
  766. int r;
  767. mutex_lock(&apply_lock);
  768. if (ovl->manager == NULL || ovl->manager->device == NULL) {
  769. r = -EINVAL;
  770. goto err;
  771. }
  772. spin_lock_irqsave(&data_lock, flags);
  773. op->enabled = false;
  774. op->extra_info_dirty = true;
  775. dss_write_regs();
  776. spin_unlock_irqrestore(&data_lock, flags);
  777. mutex_unlock(&apply_lock);
  778. return 0;
  779. err:
  780. mutex_unlock(&apply_lock);
  781. return r;
  782. }