apply.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572
  1. /*
  2. * Copyright (C) 2011 Texas Instruments
  3. * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #define DSS_SUBSYS_NAME "APPLY"
  18. #include <linux/kernel.h>
  19. #include <linux/module.h>
  20. #include <linux/slab.h>
  21. #include <linux/spinlock.h>
  22. #include <linux/jiffies.h>
  23. #include <video/omapdss.h>
  24. #include "dss.h"
  25. #include "dss_features.h"
  26. /*
  27. * We have 4 levels of cache for the dispc settings. First two are in SW and
  28. * the latter two in HW.
  29. *
  30. * set_info()
  31. * v
  32. * +--------------------+
  33. * | user_info |
  34. * +--------------------+
  35. * v
  36. * apply()
  37. * v
  38. * +--------------------+
  39. * | info |
  40. * +--------------------+
  41. * v
  42. * write_regs()
  43. * v
  44. * +--------------------+
  45. * | shadow registers |
  46. * +--------------------+
  47. * v
  48. * VFP or lcd/digit_enable
  49. * v
  50. * +--------------------+
  51. * | registers |
  52. * +--------------------+
  53. */
  54. struct ovl_priv_data {
  55. bool user_info_dirty;
  56. struct omap_overlay_info user_info;
  57. bool info_dirty;
  58. struct omap_overlay_info info;
  59. bool shadow_info_dirty;
  60. bool extra_info_dirty;
  61. bool shadow_extra_info_dirty;
  62. bool enabled;
  63. u32 fifo_low, fifo_high;
  64. /*
  65. * True if overlay is to be enabled. Used to check and calculate configs
  66. * for the overlay before it is enabled in the HW.
  67. */
  68. bool enabling;
  69. };
  70. struct mgr_priv_data {
  71. bool user_info_dirty;
  72. struct omap_overlay_manager_info user_info;
  73. bool info_dirty;
  74. struct omap_overlay_manager_info info;
  75. bool shadow_info_dirty;
  76. /* If true, GO bit is up and shadow registers cannot be written.
  77. * Never true for manual update displays */
  78. bool busy;
  79. /* If true, dispc output is enabled */
  80. bool updating;
  81. /* If true, a display is enabled using this manager */
  82. bool enabled;
  83. bool extra_info_dirty;
  84. bool shadow_extra_info_dirty;
  85. struct omap_video_timings timings;
  86. struct dss_lcd_mgr_config lcd_config;
  87. };
  88. static struct {
  89. struct ovl_priv_data ovl_priv_data_array[MAX_DSS_OVERLAYS];
  90. struct mgr_priv_data mgr_priv_data_array[MAX_DSS_MANAGERS];
  91. bool irq_enabled;
  92. } dss_data;
  93. /* protects dss_data */
  94. static spinlock_t data_lock;
  95. /* lock for blocking functions */
  96. static DEFINE_MUTEX(apply_lock);
  97. static DECLARE_COMPLETION(extra_updated_completion);
  98. static void dss_register_vsync_isr(void);
  99. static struct ovl_priv_data *get_ovl_priv(struct omap_overlay *ovl)
  100. {
  101. return &dss_data.ovl_priv_data_array[ovl->id];
  102. }
  103. static struct mgr_priv_data *get_mgr_priv(struct omap_overlay_manager *mgr)
  104. {
  105. return &dss_data.mgr_priv_data_array[mgr->id];
  106. }
  107. static void apply_init_priv(void)
  108. {
  109. const int num_ovls = dss_feat_get_num_ovls();
  110. struct mgr_priv_data *mp;
  111. int i;
  112. spin_lock_init(&data_lock);
  113. for (i = 0; i < num_ovls; ++i) {
  114. struct ovl_priv_data *op;
  115. op = &dss_data.ovl_priv_data_array[i];
  116. op->info.global_alpha = 255;
  117. switch (i) {
  118. case 0:
  119. op->info.zorder = 0;
  120. break;
  121. case 1:
  122. op->info.zorder =
  123. dss_has_feature(FEAT_ALPHA_FREE_ZORDER) ? 3 : 0;
  124. break;
  125. case 2:
  126. op->info.zorder =
  127. dss_has_feature(FEAT_ALPHA_FREE_ZORDER) ? 2 : 0;
  128. break;
  129. case 3:
  130. op->info.zorder =
  131. dss_has_feature(FEAT_ALPHA_FREE_ZORDER) ? 1 : 0;
  132. break;
  133. }
  134. op->user_info = op->info;
  135. }
  136. /*
  137. * Initialize some of the lcd_config fields for TV manager, this lets
  138. * us prevent checking if the manager is LCD or TV at some places
  139. */
  140. mp = &dss_data.mgr_priv_data_array[OMAP_DSS_CHANNEL_DIGIT];
  141. mp->lcd_config.video_port_width = 24;
  142. mp->lcd_config.clock_info.lck_div = 1;
  143. mp->lcd_config.clock_info.pck_div = 1;
  144. }
  145. /*
  146. * A LCD manager's stallmode decides whether it is in manual or auto update. TV
  147. * manager is always auto update, stallmode field for TV manager is false by
  148. * default
  149. */
  150. static bool ovl_manual_update(struct omap_overlay *ovl)
  151. {
  152. struct mgr_priv_data *mp = get_mgr_priv(ovl->manager);
  153. return mp->lcd_config.stallmode;
  154. }
  155. static bool mgr_manual_update(struct omap_overlay_manager *mgr)
  156. {
  157. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  158. return mp->lcd_config.stallmode;
  159. }
  160. static int dss_check_settings_low(struct omap_overlay_manager *mgr,
  161. bool applying)
  162. {
  163. struct omap_overlay_info *oi;
  164. struct omap_overlay_manager_info *mi;
  165. struct omap_overlay *ovl;
  166. struct omap_overlay_info *ois[MAX_DSS_OVERLAYS];
  167. struct ovl_priv_data *op;
  168. struct mgr_priv_data *mp;
  169. mp = get_mgr_priv(mgr);
  170. if (!mp->enabled)
  171. return 0;
  172. if (applying && mp->user_info_dirty)
  173. mi = &mp->user_info;
  174. else
  175. mi = &mp->info;
  176. /* collect the infos to be tested into the array */
  177. list_for_each_entry(ovl, &mgr->overlays, list) {
  178. op = get_ovl_priv(ovl);
  179. if (!op->enabled && !op->enabling)
  180. oi = NULL;
  181. else if (applying && op->user_info_dirty)
  182. oi = &op->user_info;
  183. else
  184. oi = &op->info;
  185. ois[ovl->id] = oi;
  186. }
  187. return dss_mgr_check(mgr, mi, &mp->timings, &mp->lcd_config, ois);
  188. }
  189. /*
  190. * check manager and overlay settings using overlay_info from data->info
  191. */
  192. static int dss_check_settings(struct omap_overlay_manager *mgr)
  193. {
  194. return dss_check_settings_low(mgr, false);
  195. }
  196. /*
  197. * check manager and overlay settings using overlay_info from ovl->info if
  198. * dirty and from data->info otherwise
  199. */
  200. static int dss_check_settings_apply(struct omap_overlay_manager *mgr)
  201. {
  202. return dss_check_settings_low(mgr, true);
  203. }
  204. static bool need_isr(void)
  205. {
  206. const int num_mgrs = dss_feat_get_num_mgrs();
  207. int i;
  208. for (i = 0; i < num_mgrs; ++i) {
  209. struct omap_overlay_manager *mgr;
  210. struct mgr_priv_data *mp;
  211. struct omap_overlay *ovl;
  212. mgr = omap_dss_get_overlay_manager(i);
  213. mp = get_mgr_priv(mgr);
  214. if (!mp->enabled)
  215. continue;
  216. if (mgr_manual_update(mgr)) {
  217. /* to catch FRAMEDONE */
  218. if (mp->updating)
  219. return true;
  220. } else {
  221. /* to catch GO bit going down */
  222. if (mp->busy)
  223. return true;
  224. /* to write new values to registers */
  225. if (mp->info_dirty)
  226. return true;
  227. /* to set GO bit */
  228. if (mp->shadow_info_dirty)
  229. return true;
  230. /*
  231. * NOTE: we don't check extra_info flags for disabled
  232. * managers, once the manager is enabled, the extra_info
  233. * related manager changes will be taken in by HW.
  234. */
  235. /* to write new values to registers */
  236. if (mp->extra_info_dirty)
  237. return true;
  238. /* to set GO bit */
  239. if (mp->shadow_extra_info_dirty)
  240. return true;
  241. list_for_each_entry(ovl, &mgr->overlays, list) {
  242. struct ovl_priv_data *op;
  243. op = get_ovl_priv(ovl);
  244. /*
  245. * NOTE: we check extra_info flags even for
  246. * disabled overlays, as extra_infos need to be
  247. * always written.
  248. */
  249. /* to write new values to registers */
  250. if (op->extra_info_dirty)
  251. return true;
  252. /* to set GO bit */
  253. if (op->shadow_extra_info_dirty)
  254. return true;
  255. if (!op->enabled)
  256. continue;
  257. /* to write new values to registers */
  258. if (op->info_dirty)
  259. return true;
  260. /* to set GO bit */
  261. if (op->shadow_info_dirty)
  262. return true;
  263. }
  264. }
  265. }
  266. return false;
  267. }
  268. static bool need_go(struct omap_overlay_manager *mgr)
  269. {
  270. struct omap_overlay *ovl;
  271. struct mgr_priv_data *mp;
  272. struct ovl_priv_data *op;
  273. mp = get_mgr_priv(mgr);
  274. if (mp->shadow_info_dirty || mp->shadow_extra_info_dirty)
  275. return true;
  276. list_for_each_entry(ovl, &mgr->overlays, list) {
  277. op = get_ovl_priv(ovl);
  278. if (op->shadow_info_dirty || op->shadow_extra_info_dirty)
  279. return true;
  280. }
  281. return false;
  282. }
  283. /* returns true if an extra_info field is currently being updated */
  284. static bool extra_info_update_ongoing(void)
  285. {
  286. const int num_mgrs = dss_feat_get_num_mgrs();
  287. int i;
  288. for (i = 0; i < num_mgrs; ++i) {
  289. struct omap_overlay_manager *mgr;
  290. struct omap_overlay *ovl;
  291. struct mgr_priv_data *mp;
  292. mgr = omap_dss_get_overlay_manager(i);
  293. mp = get_mgr_priv(mgr);
  294. if (!mp->enabled)
  295. continue;
  296. if (!mp->updating)
  297. continue;
  298. if (mp->extra_info_dirty || mp->shadow_extra_info_dirty)
  299. return true;
  300. list_for_each_entry(ovl, &mgr->overlays, list) {
  301. struct ovl_priv_data *op = get_ovl_priv(ovl);
  302. if (op->extra_info_dirty || op->shadow_extra_info_dirty)
  303. return true;
  304. }
  305. }
  306. return false;
  307. }
  308. /* wait until no extra_info updates are pending */
  309. static void wait_pending_extra_info_updates(void)
  310. {
  311. bool updating;
  312. unsigned long flags;
  313. unsigned long t;
  314. int r;
  315. spin_lock_irqsave(&data_lock, flags);
  316. updating = extra_info_update_ongoing();
  317. if (!updating) {
  318. spin_unlock_irqrestore(&data_lock, flags);
  319. return;
  320. }
  321. init_completion(&extra_updated_completion);
  322. spin_unlock_irqrestore(&data_lock, flags);
  323. t = msecs_to_jiffies(500);
  324. r = wait_for_completion_timeout(&extra_updated_completion, t);
  325. if (r == 0)
  326. DSSWARN("timeout in wait_pending_extra_info_updates\n");
  327. }
  328. static inline struct omap_dss_device *dss_ovl_get_device(struct omap_overlay *ovl)
  329. {
  330. return ovl->manager ?
  331. (ovl->manager->output ? ovl->manager->output->device : NULL) :
  332. NULL;
  333. }
  334. static inline struct omap_dss_device *dss_mgr_get_device(struct omap_overlay_manager *mgr)
  335. {
  336. return mgr->output ? mgr->output->device : NULL;
  337. }
  338. static int dss_mgr_wait_for_vsync(struct omap_overlay_manager *mgr)
  339. {
  340. unsigned long timeout = msecs_to_jiffies(500);
  341. struct omap_dss_device *dssdev = mgr->get_device(mgr);
  342. u32 irq;
  343. int r;
  344. r = dispc_runtime_get();
  345. if (r)
  346. return r;
  347. if (dssdev->type == OMAP_DISPLAY_TYPE_VENC)
  348. irq = DISPC_IRQ_EVSYNC_ODD;
  349. else if (dssdev->type == OMAP_DISPLAY_TYPE_HDMI)
  350. irq = DISPC_IRQ_EVSYNC_EVEN;
  351. else
  352. irq = dispc_mgr_get_vsync_irq(mgr->id);
  353. r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
  354. dispc_runtime_put();
  355. return r;
  356. }
  357. static int dss_mgr_wait_for_go(struct omap_overlay_manager *mgr)
  358. {
  359. unsigned long timeout = msecs_to_jiffies(500);
  360. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  361. u32 irq;
  362. unsigned long flags;
  363. int r;
  364. int i;
  365. spin_lock_irqsave(&data_lock, flags);
  366. if (mgr_manual_update(mgr)) {
  367. spin_unlock_irqrestore(&data_lock, flags);
  368. return 0;
  369. }
  370. if (!mp->enabled) {
  371. spin_unlock_irqrestore(&data_lock, flags);
  372. return 0;
  373. }
  374. spin_unlock_irqrestore(&data_lock, flags);
  375. r = dispc_runtime_get();
  376. if (r)
  377. return r;
  378. irq = dispc_mgr_get_vsync_irq(mgr->id);
  379. i = 0;
  380. while (1) {
  381. bool shadow_dirty, dirty;
  382. spin_lock_irqsave(&data_lock, flags);
  383. dirty = mp->info_dirty;
  384. shadow_dirty = mp->shadow_info_dirty;
  385. spin_unlock_irqrestore(&data_lock, flags);
  386. if (!dirty && !shadow_dirty) {
  387. r = 0;
  388. break;
  389. }
  390. /* 4 iterations is the worst case:
  391. * 1 - initial iteration, dirty = true (between VFP and VSYNC)
  392. * 2 - first VSYNC, dirty = true
  393. * 3 - dirty = false, shadow_dirty = true
  394. * 4 - shadow_dirty = false */
  395. if (i++ == 3) {
  396. DSSERR("mgr(%d)->wait_for_go() not finishing\n",
  397. mgr->id);
  398. r = 0;
  399. break;
  400. }
  401. r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
  402. if (r == -ERESTARTSYS)
  403. break;
  404. if (r) {
  405. DSSERR("mgr(%d)->wait_for_go() timeout\n", mgr->id);
  406. break;
  407. }
  408. }
  409. dispc_runtime_put();
  410. return r;
  411. }
  412. static int dss_mgr_wait_for_go_ovl(struct omap_overlay *ovl)
  413. {
  414. unsigned long timeout = msecs_to_jiffies(500);
  415. struct ovl_priv_data *op;
  416. struct mgr_priv_data *mp;
  417. u32 irq;
  418. unsigned long flags;
  419. int r;
  420. int i;
  421. if (!ovl->manager)
  422. return 0;
  423. mp = get_mgr_priv(ovl->manager);
  424. spin_lock_irqsave(&data_lock, flags);
  425. if (ovl_manual_update(ovl)) {
  426. spin_unlock_irqrestore(&data_lock, flags);
  427. return 0;
  428. }
  429. if (!mp->enabled) {
  430. spin_unlock_irqrestore(&data_lock, flags);
  431. return 0;
  432. }
  433. spin_unlock_irqrestore(&data_lock, flags);
  434. r = dispc_runtime_get();
  435. if (r)
  436. return r;
  437. irq = dispc_mgr_get_vsync_irq(ovl->manager->id);
  438. op = get_ovl_priv(ovl);
  439. i = 0;
  440. while (1) {
  441. bool shadow_dirty, dirty;
  442. spin_lock_irqsave(&data_lock, flags);
  443. dirty = op->info_dirty;
  444. shadow_dirty = op->shadow_info_dirty;
  445. spin_unlock_irqrestore(&data_lock, flags);
  446. if (!dirty && !shadow_dirty) {
  447. r = 0;
  448. break;
  449. }
  450. /* 4 iterations is the worst case:
  451. * 1 - initial iteration, dirty = true (between VFP and VSYNC)
  452. * 2 - first VSYNC, dirty = true
  453. * 3 - dirty = false, shadow_dirty = true
  454. * 4 - shadow_dirty = false */
  455. if (i++ == 3) {
  456. DSSERR("ovl(%d)->wait_for_go() not finishing\n",
  457. ovl->id);
  458. r = 0;
  459. break;
  460. }
  461. r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
  462. if (r == -ERESTARTSYS)
  463. break;
  464. if (r) {
  465. DSSERR("ovl(%d)->wait_for_go() timeout\n", ovl->id);
  466. break;
  467. }
  468. }
  469. dispc_runtime_put();
  470. return r;
  471. }
  472. static void dss_ovl_write_regs(struct omap_overlay *ovl)
  473. {
  474. struct ovl_priv_data *op = get_ovl_priv(ovl);
  475. struct omap_overlay_info *oi;
  476. bool replication;
  477. struct mgr_priv_data *mp;
  478. int r;
  479. DSSDBG("writing ovl %d regs", ovl->id);
  480. if (!op->enabled || !op->info_dirty)
  481. return;
  482. oi = &op->info;
  483. mp = get_mgr_priv(ovl->manager);
  484. replication = dss_ovl_use_replication(mp->lcd_config, oi->color_mode);
  485. r = dispc_ovl_setup(ovl->id, oi, replication, &mp->timings, false);
  486. if (r) {
  487. /*
  488. * We can't do much here, as this function can be called from
  489. * vsync interrupt.
  490. */
  491. DSSERR("dispc_ovl_setup failed for ovl %d\n", ovl->id);
  492. /* This will leave fifo configurations in a nonoptimal state */
  493. op->enabled = false;
  494. dispc_ovl_enable(ovl->id, false);
  495. return;
  496. }
  497. op->info_dirty = false;
  498. if (mp->updating)
  499. op->shadow_info_dirty = true;
  500. }
  501. static void dss_ovl_write_regs_extra(struct omap_overlay *ovl)
  502. {
  503. struct ovl_priv_data *op = get_ovl_priv(ovl);
  504. struct mgr_priv_data *mp;
  505. DSSDBG("writing ovl %d regs extra", ovl->id);
  506. if (!op->extra_info_dirty)
  507. return;
  508. /* note: write also when op->enabled == false, so that the ovl gets
  509. * disabled */
  510. dispc_ovl_enable(ovl->id, op->enabled);
  511. dispc_ovl_set_fifo_threshold(ovl->id, op->fifo_low, op->fifo_high);
  512. mp = get_mgr_priv(ovl->manager);
  513. op->extra_info_dirty = false;
  514. if (mp->updating)
  515. op->shadow_extra_info_dirty = true;
  516. }
  517. static void dss_mgr_write_regs(struct omap_overlay_manager *mgr)
  518. {
  519. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  520. struct omap_overlay *ovl;
  521. DSSDBG("writing mgr %d regs", mgr->id);
  522. if (!mp->enabled)
  523. return;
  524. WARN_ON(mp->busy);
  525. /* Commit overlay settings */
  526. list_for_each_entry(ovl, &mgr->overlays, list) {
  527. dss_ovl_write_regs(ovl);
  528. dss_ovl_write_regs_extra(ovl);
  529. }
  530. if (mp->info_dirty) {
  531. dispc_mgr_setup(mgr->id, &mp->info);
  532. mp->info_dirty = false;
  533. if (mp->updating)
  534. mp->shadow_info_dirty = true;
  535. }
  536. }
  537. static void dss_mgr_write_regs_extra(struct omap_overlay_manager *mgr)
  538. {
  539. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  540. DSSDBG("writing mgr %d regs extra", mgr->id);
  541. if (!mp->extra_info_dirty)
  542. return;
  543. dispc_mgr_set_timings(mgr->id, &mp->timings);
  544. /* lcd_config parameters */
  545. if (dss_mgr_is_lcd(mgr->id))
  546. dispc_mgr_set_lcd_config(mgr->id, &mp->lcd_config);
  547. mp->extra_info_dirty = false;
  548. if (mp->updating)
  549. mp->shadow_extra_info_dirty = true;
  550. }
  551. static void dss_write_regs(void)
  552. {
  553. const int num_mgrs = omap_dss_get_num_overlay_managers();
  554. int i;
  555. for (i = 0; i < num_mgrs; ++i) {
  556. struct omap_overlay_manager *mgr;
  557. struct mgr_priv_data *mp;
  558. int r;
  559. mgr = omap_dss_get_overlay_manager(i);
  560. mp = get_mgr_priv(mgr);
  561. if (!mp->enabled || mgr_manual_update(mgr) || mp->busy)
  562. continue;
  563. r = dss_check_settings(mgr);
  564. if (r) {
  565. DSSERR("cannot write registers for manager %s: "
  566. "illegal configuration\n", mgr->name);
  567. continue;
  568. }
  569. dss_mgr_write_regs(mgr);
  570. dss_mgr_write_regs_extra(mgr);
  571. }
  572. }
  573. static void dss_set_go_bits(void)
  574. {
  575. const int num_mgrs = omap_dss_get_num_overlay_managers();
  576. int i;
  577. for (i = 0; i < num_mgrs; ++i) {
  578. struct omap_overlay_manager *mgr;
  579. struct mgr_priv_data *mp;
  580. mgr = omap_dss_get_overlay_manager(i);
  581. mp = get_mgr_priv(mgr);
  582. if (!mp->enabled || mgr_manual_update(mgr) || mp->busy)
  583. continue;
  584. if (!need_go(mgr))
  585. continue;
  586. mp->busy = true;
  587. if (!dss_data.irq_enabled && need_isr())
  588. dss_register_vsync_isr();
  589. dispc_mgr_go(mgr->id);
  590. }
  591. }
  592. static void mgr_clear_shadow_dirty(struct omap_overlay_manager *mgr)
  593. {
  594. struct omap_overlay *ovl;
  595. struct mgr_priv_data *mp;
  596. struct ovl_priv_data *op;
  597. mp = get_mgr_priv(mgr);
  598. mp->shadow_info_dirty = false;
  599. mp->shadow_extra_info_dirty = false;
  600. list_for_each_entry(ovl, &mgr->overlays, list) {
  601. op = get_ovl_priv(ovl);
  602. op->shadow_info_dirty = false;
  603. op->shadow_extra_info_dirty = false;
  604. }
  605. }
  606. void dss_mgr_start_update(struct omap_overlay_manager *mgr)
  607. {
  608. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  609. unsigned long flags;
  610. int r;
  611. spin_lock_irqsave(&data_lock, flags);
  612. WARN_ON(mp->updating);
  613. r = dss_check_settings(mgr);
  614. if (r) {
  615. DSSERR("cannot start manual update: illegal configuration\n");
  616. spin_unlock_irqrestore(&data_lock, flags);
  617. return;
  618. }
  619. dss_mgr_write_regs(mgr);
  620. dss_mgr_write_regs_extra(mgr);
  621. mp->updating = true;
  622. if (!dss_data.irq_enabled && need_isr())
  623. dss_register_vsync_isr();
  624. dispc_mgr_enable_sync(mgr->id);
  625. spin_unlock_irqrestore(&data_lock, flags);
  626. }
  627. static void dss_apply_irq_handler(void *data, u32 mask);
  628. static void dss_register_vsync_isr(void)
  629. {
  630. const int num_mgrs = dss_feat_get_num_mgrs();
  631. u32 mask;
  632. int r, i;
  633. mask = 0;
  634. for (i = 0; i < num_mgrs; ++i)
  635. mask |= dispc_mgr_get_vsync_irq(i);
  636. for (i = 0; i < num_mgrs; ++i)
  637. mask |= dispc_mgr_get_framedone_irq(i);
  638. r = omap_dispc_register_isr(dss_apply_irq_handler, NULL, mask);
  639. WARN_ON(r);
  640. dss_data.irq_enabled = true;
  641. }
  642. static void dss_unregister_vsync_isr(void)
  643. {
  644. const int num_mgrs = dss_feat_get_num_mgrs();
  645. u32 mask;
  646. int r, i;
  647. mask = 0;
  648. for (i = 0; i < num_mgrs; ++i)
  649. mask |= dispc_mgr_get_vsync_irq(i);
  650. for (i = 0; i < num_mgrs; ++i)
  651. mask |= dispc_mgr_get_framedone_irq(i);
  652. r = omap_dispc_unregister_isr(dss_apply_irq_handler, NULL, mask);
  653. WARN_ON(r);
  654. dss_data.irq_enabled = false;
  655. }
  656. static void dss_apply_irq_handler(void *data, u32 mask)
  657. {
  658. const int num_mgrs = dss_feat_get_num_mgrs();
  659. int i;
  660. bool extra_updating;
  661. spin_lock(&data_lock);
  662. /* clear busy, updating flags, shadow_dirty flags */
  663. for (i = 0; i < num_mgrs; i++) {
  664. struct omap_overlay_manager *mgr;
  665. struct mgr_priv_data *mp;
  666. mgr = omap_dss_get_overlay_manager(i);
  667. mp = get_mgr_priv(mgr);
  668. if (!mp->enabled)
  669. continue;
  670. mp->updating = dispc_mgr_is_enabled(i);
  671. if (!mgr_manual_update(mgr)) {
  672. bool was_busy = mp->busy;
  673. mp->busy = dispc_mgr_go_busy(i);
  674. if (was_busy && !mp->busy)
  675. mgr_clear_shadow_dirty(mgr);
  676. }
  677. }
  678. dss_write_regs();
  679. dss_set_go_bits();
  680. extra_updating = extra_info_update_ongoing();
  681. if (!extra_updating)
  682. complete_all(&extra_updated_completion);
  683. if (!need_isr())
  684. dss_unregister_vsync_isr();
  685. spin_unlock(&data_lock);
  686. }
  687. static void omap_dss_mgr_apply_ovl(struct omap_overlay *ovl)
  688. {
  689. struct ovl_priv_data *op;
  690. op = get_ovl_priv(ovl);
  691. if (!op->user_info_dirty)
  692. return;
  693. op->user_info_dirty = false;
  694. op->info_dirty = true;
  695. op->info = op->user_info;
  696. }
  697. static void omap_dss_mgr_apply_mgr(struct omap_overlay_manager *mgr)
  698. {
  699. struct mgr_priv_data *mp;
  700. mp = get_mgr_priv(mgr);
  701. if (!mp->user_info_dirty)
  702. return;
  703. mp->user_info_dirty = false;
  704. mp->info_dirty = true;
  705. mp->info = mp->user_info;
  706. }
  707. static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
  708. {
  709. unsigned long flags;
  710. struct omap_overlay *ovl;
  711. int r;
  712. DSSDBG("omap_dss_mgr_apply(%s)\n", mgr->name);
  713. spin_lock_irqsave(&data_lock, flags);
  714. r = dss_check_settings_apply(mgr);
  715. if (r) {
  716. spin_unlock_irqrestore(&data_lock, flags);
  717. DSSERR("failed to apply settings: illegal configuration.\n");
  718. return r;
  719. }
  720. /* Configure overlays */
  721. list_for_each_entry(ovl, &mgr->overlays, list)
  722. omap_dss_mgr_apply_ovl(ovl);
  723. /* Configure manager */
  724. omap_dss_mgr_apply_mgr(mgr);
  725. dss_write_regs();
  726. dss_set_go_bits();
  727. spin_unlock_irqrestore(&data_lock, flags);
  728. return 0;
  729. }
  730. static void dss_apply_ovl_enable(struct omap_overlay *ovl, bool enable)
  731. {
  732. struct ovl_priv_data *op;
  733. op = get_ovl_priv(ovl);
  734. if (op->enabled == enable)
  735. return;
  736. op->enabled = enable;
  737. op->extra_info_dirty = true;
  738. }
  739. static void dss_apply_ovl_fifo_thresholds(struct omap_overlay *ovl,
  740. u32 fifo_low, u32 fifo_high)
  741. {
  742. struct ovl_priv_data *op = get_ovl_priv(ovl);
  743. if (op->fifo_low == fifo_low && op->fifo_high == fifo_high)
  744. return;
  745. op->fifo_low = fifo_low;
  746. op->fifo_high = fifo_high;
  747. op->extra_info_dirty = true;
  748. }
  749. static void dss_ovl_setup_fifo(struct omap_overlay *ovl)
  750. {
  751. struct ovl_priv_data *op = get_ovl_priv(ovl);
  752. u32 fifo_low, fifo_high;
  753. bool use_fifo_merge = false;
  754. if (!op->enabled && !op->enabling)
  755. return;
  756. dispc_ovl_compute_fifo_thresholds(ovl->id, &fifo_low, &fifo_high,
  757. use_fifo_merge, ovl_manual_update(ovl));
  758. dss_apply_ovl_fifo_thresholds(ovl, fifo_low, fifo_high);
  759. }
  760. static void dss_mgr_setup_fifos(struct omap_overlay_manager *mgr)
  761. {
  762. struct omap_overlay *ovl;
  763. struct mgr_priv_data *mp;
  764. mp = get_mgr_priv(mgr);
  765. if (!mp->enabled)
  766. return;
  767. list_for_each_entry(ovl, &mgr->overlays, list)
  768. dss_ovl_setup_fifo(ovl);
  769. }
  770. static void dss_setup_fifos(void)
  771. {
  772. const int num_mgrs = omap_dss_get_num_overlay_managers();
  773. struct omap_overlay_manager *mgr;
  774. int i;
  775. for (i = 0; i < num_mgrs; ++i) {
  776. mgr = omap_dss_get_overlay_manager(i);
  777. dss_mgr_setup_fifos(mgr);
  778. }
  779. }
  780. int dss_mgr_enable(struct omap_overlay_manager *mgr)
  781. {
  782. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  783. unsigned long flags;
  784. int r;
  785. mutex_lock(&apply_lock);
  786. if (mp->enabled)
  787. goto out;
  788. spin_lock_irqsave(&data_lock, flags);
  789. mp->enabled = true;
  790. r = dss_check_settings(mgr);
  791. if (r) {
  792. DSSERR("failed to enable manager %d: check_settings failed\n",
  793. mgr->id);
  794. goto err;
  795. }
  796. dss_setup_fifos();
  797. dss_write_regs();
  798. dss_set_go_bits();
  799. if (!mgr_manual_update(mgr))
  800. mp->updating = true;
  801. if (!dss_data.irq_enabled && need_isr())
  802. dss_register_vsync_isr();
  803. spin_unlock_irqrestore(&data_lock, flags);
  804. if (!mgr_manual_update(mgr))
  805. dispc_mgr_enable_sync(mgr->id);
  806. out:
  807. mutex_unlock(&apply_lock);
  808. return 0;
  809. err:
  810. mp->enabled = false;
  811. spin_unlock_irqrestore(&data_lock, flags);
  812. mutex_unlock(&apply_lock);
  813. return r;
  814. }
  815. void dss_mgr_disable(struct omap_overlay_manager *mgr)
  816. {
  817. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  818. unsigned long flags;
  819. mutex_lock(&apply_lock);
  820. if (!mp->enabled)
  821. goto out;
  822. if (!mgr_manual_update(mgr))
  823. dispc_mgr_disable_sync(mgr->id);
  824. spin_lock_irqsave(&data_lock, flags);
  825. mp->updating = false;
  826. mp->enabled = false;
  827. spin_unlock_irqrestore(&data_lock, flags);
  828. out:
  829. mutex_unlock(&apply_lock);
  830. }
  831. static int dss_mgr_set_info(struct omap_overlay_manager *mgr,
  832. struct omap_overlay_manager_info *info)
  833. {
  834. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  835. unsigned long flags;
  836. int r;
  837. r = dss_mgr_simple_check(mgr, info);
  838. if (r)
  839. return r;
  840. spin_lock_irqsave(&data_lock, flags);
  841. mp->user_info = *info;
  842. mp->user_info_dirty = true;
  843. spin_unlock_irqrestore(&data_lock, flags);
  844. return 0;
  845. }
  846. static void dss_mgr_get_info(struct omap_overlay_manager *mgr,
  847. struct omap_overlay_manager_info *info)
  848. {
  849. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  850. unsigned long flags;
  851. spin_lock_irqsave(&data_lock, flags);
  852. *info = mp->user_info;
  853. spin_unlock_irqrestore(&data_lock, flags);
  854. }
  855. static int dss_mgr_set_output(struct omap_overlay_manager *mgr,
  856. struct omap_dss_output *output)
  857. {
  858. int r;
  859. mutex_lock(&apply_lock);
  860. if (mgr->output) {
  861. DSSERR("manager %s is already connected to an output\n",
  862. mgr->name);
  863. r = -EINVAL;
  864. goto err;
  865. }
  866. if ((mgr->supported_outputs & output->id) == 0) {
  867. DSSERR("output does not support manager %s\n",
  868. mgr->name);
  869. r = -EINVAL;
  870. goto err;
  871. }
  872. output->manager = mgr;
  873. mgr->output = output;
  874. mutex_unlock(&apply_lock);
  875. return 0;
  876. err:
  877. mutex_unlock(&apply_lock);
  878. return r;
  879. }
  880. static int dss_mgr_unset_output(struct omap_overlay_manager *mgr)
  881. {
  882. int r;
  883. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  884. unsigned long flags;
  885. mutex_lock(&apply_lock);
  886. if (!mgr->output) {
  887. DSSERR("failed to unset output, output not set\n");
  888. r = -EINVAL;
  889. goto err;
  890. }
  891. spin_lock_irqsave(&data_lock, flags);
  892. if (mp->enabled) {
  893. DSSERR("output can't be unset when manager is enabled\n");
  894. r = -EINVAL;
  895. goto err1;
  896. }
  897. spin_unlock_irqrestore(&data_lock, flags);
  898. mgr->output->manager = NULL;
  899. mgr->output = NULL;
  900. mutex_unlock(&apply_lock);
  901. return 0;
  902. err1:
  903. spin_unlock_irqrestore(&data_lock, flags);
  904. err:
  905. mutex_unlock(&apply_lock);
  906. return r;
  907. }
  908. static void dss_apply_mgr_timings(struct omap_overlay_manager *mgr,
  909. const struct omap_video_timings *timings)
  910. {
  911. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  912. mp->timings = *timings;
  913. mp->extra_info_dirty = true;
  914. }
  915. void dss_mgr_set_timings(struct omap_overlay_manager *mgr,
  916. const struct omap_video_timings *timings)
  917. {
  918. unsigned long flags;
  919. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  920. spin_lock_irqsave(&data_lock, flags);
  921. if (mp->updating) {
  922. DSSERR("cannot set timings for %s: manager needs to be disabled\n",
  923. mgr->name);
  924. goto out;
  925. }
  926. dss_apply_mgr_timings(mgr, timings);
  927. out:
  928. spin_unlock_irqrestore(&data_lock, flags);
  929. }
  930. static void dss_apply_mgr_lcd_config(struct omap_overlay_manager *mgr,
  931. const struct dss_lcd_mgr_config *config)
  932. {
  933. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  934. mp->lcd_config = *config;
  935. mp->extra_info_dirty = true;
  936. }
  937. void dss_mgr_set_lcd_config(struct omap_overlay_manager *mgr,
  938. const struct dss_lcd_mgr_config *config)
  939. {
  940. unsigned long flags;
  941. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  942. spin_lock_irqsave(&data_lock, flags);
  943. if (mp->enabled) {
  944. DSSERR("cannot apply lcd config for %s: manager needs to be disabled\n",
  945. mgr->name);
  946. goto out;
  947. }
  948. dss_apply_mgr_lcd_config(mgr, config);
  949. out:
  950. spin_unlock_irqrestore(&data_lock, flags);
  951. }
  952. static int dss_ovl_set_info(struct omap_overlay *ovl,
  953. struct omap_overlay_info *info)
  954. {
  955. struct ovl_priv_data *op = get_ovl_priv(ovl);
  956. unsigned long flags;
  957. int r;
  958. r = dss_ovl_simple_check(ovl, info);
  959. if (r)
  960. return r;
  961. spin_lock_irqsave(&data_lock, flags);
  962. op->user_info = *info;
  963. op->user_info_dirty = true;
  964. spin_unlock_irqrestore(&data_lock, flags);
  965. return 0;
  966. }
  967. static void dss_ovl_get_info(struct omap_overlay *ovl,
  968. struct omap_overlay_info *info)
  969. {
  970. struct ovl_priv_data *op = get_ovl_priv(ovl);
  971. unsigned long flags;
  972. spin_lock_irqsave(&data_lock, flags);
  973. *info = op->user_info;
  974. spin_unlock_irqrestore(&data_lock, flags);
  975. }
  976. static int dss_ovl_set_manager(struct omap_overlay *ovl,
  977. struct omap_overlay_manager *mgr)
  978. {
  979. struct ovl_priv_data *op = get_ovl_priv(ovl);
  980. unsigned long flags;
  981. int r;
  982. if (!mgr)
  983. return -EINVAL;
  984. mutex_lock(&apply_lock);
  985. if (ovl->manager) {
  986. DSSERR("overlay '%s' already has a manager '%s'\n",
  987. ovl->name, ovl->manager->name);
  988. r = -EINVAL;
  989. goto err;
  990. }
  991. r = dispc_runtime_get();
  992. if (r)
  993. goto err;
  994. spin_lock_irqsave(&data_lock, flags);
  995. if (op->enabled) {
  996. spin_unlock_irqrestore(&data_lock, flags);
  997. DSSERR("overlay has to be disabled to change the manager\n");
  998. r = -EINVAL;
  999. goto err1;
  1000. }
  1001. dispc_ovl_set_channel_out(ovl->id, mgr->id);
  1002. ovl->manager = mgr;
  1003. list_add_tail(&ovl->list, &mgr->overlays);
  1004. spin_unlock_irqrestore(&data_lock, flags);
  1005. dispc_runtime_put();
  1006. mutex_unlock(&apply_lock);
  1007. return 0;
  1008. err1:
  1009. dispc_runtime_put();
  1010. err:
  1011. mutex_unlock(&apply_lock);
  1012. return r;
  1013. }
  1014. static int dss_ovl_unset_manager(struct omap_overlay *ovl)
  1015. {
  1016. struct ovl_priv_data *op = get_ovl_priv(ovl);
  1017. unsigned long flags;
  1018. int r;
  1019. mutex_lock(&apply_lock);
  1020. if (!ovl->manager) {
  1021. DSSERR("failed to detach overlay: manager not set\n");
  1022. r = -EINVAL;
  1023. goto err;
  1024. }
  1025. spin_lock_irqsave(&data_lock, flags);
  1026. if (op->enabled) {
  1027. spin_unlock_irqrestore(&data_lock, flags);
  1028. DSSERR("overlay has to be disabled to unset the manager\n");
  1029. r = -EINVAL;
  1030. goto err;
  1031. }
  1032. spin_unlock_irqrestore(&data_lock, flags);
  1033. /* wait for pending extra_info updates to ensure the ovl is disabled */
  1034. wait_pending_extra_info_updates();
  1035. /*
  1036. * For a manual update display, there is no guarantee that the overlay
  1037. * is really disabled in HW, we may need an extra update from this
  1038. * manager before the configurations can go in. Return an error if the
  1039. * overlay needed an update from the manager.
  1040. *
  1041. * TODO: Instead of returning an error, try to do a dummy manager update
  1042. * here to disable the overlay in hardware. Use the *GATED fields in
  1043. * the DISPC_CONFIG registers to do a dummy update.
  1044. */
  1045. spin_lock_irqsave(&data_lock, flags);
  1046. if (ovl_manual_update(ovl) && op->extra_info_dirty) {
  1047. spin_unlock_irqrestore(&data_lock, flags);
  1048. DSSERR("need an update to change the manager\n");
  1049. r = -EINVAL;
  1050. goto err;
  1051. }
  1052. ovl->manager = NULL;
  1053. list_del(&ovl->list);
  1054. spin_unlock_irqrestore(&data_lock, flags);
  1055. mutex_unlock(&apply_lock);
  1056. return 0;
  1057. err:
  1058. mutex_unlock(&apply_lock);
  1059. return r;
  1060. }
  1061. static bool dss_ovl_is_enabled(struct omap_overlay *ovl)
  1062. {
  1063. struct ovl_priv_data *op = get_ovl_priv(ovl);
  1064. unsigned long flags;
  1065. bool e;
  1066. spin_lock_irqsave(&data_lock, flags);
  1067. e = op->enabled;
  1068. spin_unlock_irqrestore(&data_lock, flags);
  1069. return e;
  1070. }
  1071. static int dss_ovl_enable(struct omap_overlay *ovl)
  1072. {
  1073. struct ovl_priv_data *op = get_ovl_priv(ovl);
  1074. unsigned long flags;
  1075. int r;
  1076. mutex_lock(&apply_lock);
  1077. if (op->enabled) {
  1078. r = 0;
  1079. goto err1;
  1080. }
  1081. if (ovl->manager == NULL || ovl->manager->output == NULL) {
  1082. r = -EINVAL;
  1083. goto err1;
  1084. }
  1085. spin_lock_irqsave(&data_lock, flags);
  1086. op->enabling = true;
  1087. r = dss_check_settings(ovl->manager);
  1088. if (r) {
  1089. DSSERR("failed to enable overlay %d: check_settings failed\n",
  1090. ovl->id);
  1091. goto err2;
  1092. }
  1093. dss_setup_fifos();
  1094. op->enabling = false;
  1095. dss_apply_ovl_enable(ovl, true);
  1096. dss_write_regs();
  1097. dss_set_go_bits();
  1098. spin_unlock_irqrestore(&data_lock, flags);
  1099. mutex_unlock(&apply_lock);
  1100. return 0;
  1101. err2:
  1102. op->enabling = false;
  1103. spin_unlock_irqrestore(&data_lock, flags);
  1104. err1:
  1105. mutex_unlock(&apply_lock);
  1106. return r;
  1107. }
  1108. static int dss_ovl_disable(struct omap_overlay *ovl)
  1109. {
  1110. struct ovl_priv_data *op = get_ovl_priv(ovl);
  1111. unsigned long flags;
  1112. int r;
  1113. mutex_lock(&apply_lock);
  1114. if (!op->enabled) {
  1115. r = 0;
  1116. goto err;
  1117. }
  1118. if (ovl->manager == NULL || ovl->manager->output == NULL) {
  1119. r = -EINVAL;
  1120. goto err;
  1121. }
  1122. spin_lock_irqsave(&data_lock, flags);
  1123. dss_apply_ovl_enable(ovl, false);
  1124. dss_write_regs();
  1125. dss_set_go_bits();
  1126. spin_unlock_irqrestore(&data_lock, flags);
  1127. mutex_unlock(&apply_lock);
  1128. return 0;
  1129. err:
  1130. mutex_unlock(&apply_lock);
  1131. return r;
  1132. }
  1133. static int compat_refcnt;
  1134. static DEFINE_MUTEX(compat_init_lock);
  1135. int omapdss_compat_init(void)
  1136. {
  1137. struct platform_device *pdev = dss_get_core_pdev();
  1138. int i;
  1139. mutex_lock(&compat_init_lock);
  1140. if (compat_refcnt++ > 0)
  1141. goto out;
  1142. apply_init_priv();
  1143. dss_init_overlay_managers(pdev);
  1144. dss_init_overlays(pdev);
  1145. for (i = 0; i < omap_dss_get_num_overlay_managers(); i++) {
  1146. struct omap_overlay_manager *mgr;
  1147. mgr = omap_dss_get_overlay_manager(i);
  1148. mgr->set_output = &dss_mgr_set_output;
  1149. mgr->unset_output = &dss_mgr_unset_output;
  1150. mgr->apply = &omap_dss_mgr_apply;
  1151. mgr->set_manager_info = &dss_mgr_set_info;
  1152. mgr->get_manager_info = &dss_mgr_get_info;
  1153. mgr->wait_for_go = &dss_mgr_wait_for_go;
  1154. mgr->wait_for_vsync = &dss_mgr_wait_for_vsync;
  1155. mgr->get_device = &dss_mgr_get_device;
  1156. }
  1157. for (i = 0; i < omap_dss_get_num_overlays(); i++) {
  1158. struct omap_overlay *ovl = omap_dss_get_overlay(i);
  1159. ovl->is_enabled = &dss_ovl_is_enabled;
  1160. ovl->enable = &dss_ovl_enable;
  1161. ovl->disable = &dss_ovl_disable;
  1162. ovl->set_manager = &dss_ovl_set_manager;
  1163. ovl->unset_manager = &dss_ovl_unset_manager;
  1164. ovl->set_overlay_info = &dss_ovl_set_info;
  1165. ovl->get_overlay_info = &dss_ovl_get_info;
  1166. ovl->wait_for_go = &dss_mgr_wait_for_go_ovl;
  1167. ovl->get_device = &dss_ovl_get_device;
  1168. }
  1169. out:
  1170. mutex_unlock(&compat_init_lock);
  1171. return 0;
  1172. }
  1173. EXPORT_SYMBOL(omapdss_compat_init);
  1174. void omapdss_compat_uninit(void)
  1175. {
  1176. struct platform_device *pdev = dss_get_core_pdev();
  1177. mutex_lock(&compat_init_lock);
  1178. if (--compat_refcnt > 0)
  1179. goto out;
  1180. dss_uninit_overlay_managers(pdev);
  1181. dss_uninit_overlays(pdev);
  1182. out:
  1183. mutex_unlock(&compat_init_lock);
  1184. }
  1185. EXPORT_SYMBOL(omapdss_compat_uninit);