apply.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465
  1. /*
  2. * Copyright (C) 2011 Texas Instruments
  3. * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #define DSS_SUBSYS_NAME "APPLY"
  18. #include <linux/kernel.h>
  19. #include <linux/slab.h>
  20. #include <linux/spinlock.h>
  21. #include <linux/jiffies.h>
  22. #include <video/omapdss.h>
  23. #include "dss.h"
  24. #include "dss_features.h"
  25. /*
  26. * We have 4 levels of cache for the dispc settings. First two are in SW and
  27. * the latter two in HW.
  28. *
  29. * set_info()
  30. * v
  31. * +--------------------+
  32. * | user_info |
  33. * +--------------------+
  34. * v
  35. * apply()
  36. * v
  37. * +--------------------+
  38. * | info |
  39. * +--------------------+
  40. * v
  41. * write_regs()
  42. * v
  43. * +--------------------+
  44. * | shadow registers |
  45. * +--------------------+
  46. * v
  47. * VFP or lcd/digit_enable
  48. * v
  49. * +--------------------+
  50. * | registers |
  51. * +--------------------+
  52. */
  53. struct ovl_priv_data {
  54. bool user_info_dirty;
  55. struct omap_overlay_info user_info;
  56. bool info_dirty;
  57. struct omap_overlay_info info;
  58. bool shadow_info_dirty;
  59. bool extra_info_dirty;
  60. bool shadow_extra_info_dirty;
  61. bool enabled;
  62. u32 fifo_low, fifo_high;
  63. /*
  64. * True if overlay is to be enabled. Used to check and calculate configs
  65. * for the overlay before it is enabled in the HW.
  66. */
  67. bool enabling;
  68. };
  69. struct mgr_priv_data {
  70. bool user_info_dirty;
  71. struct omap_overlay_manager_info user_info;
  72. bool info_dirty;
  73. struct omap_overlay_manager_info info;
  74. bool shadow_info_dirty;
  75. /* If true, GO bit is up and shadow registers cannot be written.
  76. * Never true for manual update displays */
  77. bool busy;
  78. /* If true, dispc output is enabled */
  79. bool updating;
  80. /* If true, a display is enabled using this manager */
  81. bool enabled;
  82. bool extra_info_dirty;
  83. bool shadow_extra_info_dirty;
  84. struct omap_video_timings timings;
  85. struct dss_lcd_mgr_config lcd_config;
  86. };
  87. static struct {
  88. struct ovl_priv_data ovl_priv_data_array[MAX_DSS_OVERLAYS];
  89. struct mgr_priv_data mgr_priv_data_array[MAX_DSS_MANAGERS];
  90. bool irq_enabled;
  91. } dss_data;
  92. /* protects dss_data */
  93. static spinlock_t data_lock;
  94. /* lock for blocking functions */
  95. static DEFINE_MUTEX(apply_lock);
  96. static DECLARE_COMPLETION(extra_updated_completion);
  97. static void dss_register_vsync_isr(void);
  98. static struct ovl_priv_data *get_ovl_priv(struct omap_overlay *ovl)
  99. {
  100. return &dss_data.ovl_priv_data_array[ovl->id];
  101. }
  102. static struct mgr_priv_data *get_mgr_priv(struct omap_overlay_manager *mgr)
  103. {
  104. return &dss_data.mgr_priv_data_array[mgr->id];
  105. }
  106. void dss_apply_init(void)
  107. {
  108. const int num_ovls = dss_feat_get_num_ovls();
  109. struct mgr_priv_data *mp;
  110. int i;
  111. spin_lock_init(&data_lock);
  112. for (i = 0; i < num_ovls; ++i) {
  113. struct ovl_priv_data *op;
  114. op = &dss_data.ovl_priv_data_array[i];
  115. op->info.global_alpha = 255;
  116. switch (i) {
  117. case 0:
  118. op->info.zorder = 0;
  119. break;
  120. case 1:
  121. op->info.zorder =
  122. dss_has_feature(FEAT_ALPHA_FREE_ZORDER) ? 3 : 0;
  123. break;
  124. case 2:
  125. op->info.zorder =
  126. dss_has_feature(FEAT_ALPHA_FREE_ZORDER) ? 2 : 0;
  127. break;
  128. case 3:
  129. op->info.zorder =
  130. dss_has_feature(FEAT_ALPHA_FREE_ZORDER) ? 1 : 0;
  131. break;
  132. }
  133. op->user_info = op->info;
  134. }
  135. /*
  136. * Initialize some of the lcd_config fields for TV manager, this lets
  137. * us prevent checking if the manager is LCD or TV at some places
  138. */
  139. mp = &dss_data.mgr_priv_data_array[OMAP_DSS_CHANNEL_DIGIT];
  140. mp->lcd_config.video_port_width = 24;
  141. mp->lcd_config.clock_info.lck_div = 1;
  142. mp->lcd_config.clock_info.pck_div = 1;
  143. }
  144. /*
  145. * A LCD manager's stallmode decides whether it is in manual or auto update. TV
  146. * manager is always auto update, stallmode field for TV manager is false by
  147. * default
  148. */
  149. static bool ovl_manual_update(struct omap_overlay *ovl)
  150. {
  151. struct mgr_priv_data *mp = get_mgr_priv(ovl->manager);
  152. return mp->lcd_config.stallmode;
  153. }
  154. static bool mgr_manual_update(struct omap_overlay_manager *mgr)
  155. {
  156. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  157. return mp->lcd_config.stallmode;
  158. }
  159. static int dss_check_settings_low(struct omap_overlay_manager *mgr,
  160. bool applying)
  161. {
  162. struct omap_overlay_info *oi;
  163. struct omap_overlay_manager_info *mi;
  164. struct omap_overlay *ovl;
  165. struct omap_overlay_info *ois[MAX_DSS_OVERLAYS];
  166. struct ovl_priv_data *op;
  167. struct mgr_priv_data *mp;
  168. mp = get_mgr_priv(mgr);
  169. if (!mp->enabled)
  170. return 0;
  171. if (applying && mp->user_info_dirty)
  172. mi = &mp->user_info;
  173. else
  174. mi = &mp->info;
  175. /* collect the infos to be tested into the array */
  176. list_for_each_entry(ovl, &mgr->overlays, list) {
  177. op = get_ovl_priv(ovl);
  178. if (!op->enabled && !op->enabling)
  179. oi = NULL;
  180. else if (applying && op->user_info_dirty)
  181. oi = &op->user_info;
  182. else
  183. oi = &op->info;
  184. ois[ovl->id] = oi;
  185. }
  186. return dss_mgr_check(mgr, mi, &mp->timings, &mp->lcd_config, ois);
  187. }
  188. /*
  189. * check manager and overlay settings using overlay_info from data->info
  190. */
  191. static int dss_check_settings(struct omap_overlay_manager *mgr)
  192. {
  193. return dss_check_settings_low(mgr, false);
  194. }
  195. /*
  196. * check manager and overlay settings using overlay_info from ovl->info if
  197. * dirty and from data->info otherwise
  198. */
  199. static int dss_check_settings_apply(struct omap_overlay_manager *mgr)
  200. {
  201. return dss_check_settings_low(mgr, true);
  202. }
  203. static bool need_isr(void)
  204. {
  205. const int num_mgrs = dss_feat_get_num_mgrs();
  206. int i;
  207. for (i = 0; i < num_mgrs; ++i) {
  208. struct omap_overlay_manager *mgr;
  209. struct mgr_priv_data *mp;
  210. struct omap_overlay *ovl;
  211. mgr = omap_dss_get_overlay_manager(i);
  212. mp = get_mgr_priv(mgr);
  213. if (!mp->enabled)
  214. continue;
  215. if (mgr_manual_update(mgr)) {
  216. /* to catch FRAMEDONE */
  217. if (mp->updating)
  218. return true;
  219. } else {
  220. /* to catch GO bit going down */
  221. if (mp->busy)
  222. return true;
  223. /* to write new values to registers */
  224. if (mp->info_dirty)
  225. return true;
  226. /* to set GO bit */
  227. if (mp->shadow_info_dirty)
  228. return true;
  229. /*
  230. * NOTE: we don't check extra_info flags for disabled
  231. * managers, once the manager is enabled, the extra_info
  232. * related manager changes will be taken in by HW.
  233. */
  234. /* to write new values to registers */
  235. if (mp->extra_info_dirty)
  236. return true;
  237. /* to set GO bit */
  238. if (mp->shadow_extra_info_dirty)
  239. return true;
  240. list_for_each_entry(ovl, &mgr->overlays, list) {
  241. struct ovl_priv_data *op;
  242. op = get_ovl_priv(ovl);
  243. /*
  244. * NOTE: we check extra_info flags even for
  245. * disabled overlays, as extra_infos need to be
  246. * always written.
  247. */
  248. /* to write new values to registers */
  249. if (op->extra_info_dirty)
  250. return true;
  251. /* to set GO bit */
  252. if (op->shadow_extra_info_dirty)
  253. return true;
  254. if (!op->enabled)
  255. continue;
  256. /* to write new values to registers */
  257. if (op->info_dirty)
  258. return true;
  259. /* to set GO bit */
  260. if (op->shadow_info_dirty)
  261. return true;
  262. }
  263. }
  264. }
  265. return false;
  266. }
  267. static bool need_go(struct omap_overlay_manager *mgr)
  268. {
  269. struct omap_overlay *ovl;
  270. struct mgr_priv_data *mp;
  271. struct ovl_priv_data *op;
  272. mp = get_mgr_priv(mgr);
  273. if (mp->shadow_info_dirty || mp->shadow_extra_info_dirty)
  274. return true;
  275. list_for_each_entry(ovl, &mgr->overlays, list) {
  276. op = get_ovl_priv(ovl);
  277. if (op->shadow_info_dirty || op->shadow_extra_info_dirty)
  278. return true;
  279. }
  280. return false;
  281. }
  282. /* returns true if an extra_info field is currently being updated */
  283. static bool extra_info_update_ongoing(void)
  284. {
  285. const int num_mgrs = dss_feat_get_num_mgrs();
  286. int i;
  287. for (i = 0; i < num_mgrs; ++i) {
  288. struct omap_overlay_manager *mgr;
  289. struct omap_overlay *ovl;
  290. struct mgr_priv_data *mp;
  291. mgr = omap_dss_get_overlay_manager(i);
  292. mp = get_mgr_priv(mgr);
  293. if (!mp->enabled)
  294. continue;
  295. if (!mp->updating)
  296. continue;
  297. if (mp->extra_info_dirty || mp->shadow_extra_info_dirty)
  298. return true;
  299. list_for_each_entry(ovl, &mgr->overlays, list) {
  300. struct ovl_priv_data *op = get_ovl_priv(ovl);
  301. if (op->extra_info_dirty || op->shadow_extra_info_dirty)
  302. return true;
  303. }
  304. }
  305. return false;
  306. }
  307. /* wait until no extra_info updates are pending */
  308. static void wait_pending_extra_info_updates(void)
  309. {
  310. bool updating;
  311. unsigned long flags;
  312. unsigned long t;
  313. int r;
  314. spin_lock_irqsave(&data_lock, flags);
  315. updating = extra_info_update_ongoing();
  316. if (!updating) {
  317. spin_unlock_irqrestore(&data_lock, flags);
  318. return;
  319. }
  320. init_completion(&extra_updated_completion);
  321. spin_unlock_irqrestore(&data_lock, flags);
  322. t = msecs_to_jiffies(500);
  323. r = wait_for_completion_timeout(&extra_updated_completion, t);
  324. if (r == 0)
  325. DSSWARN("timeout in wait_pending_extra_info_updates\n");
  326. }
  327. int dss_mgr_wait_for_go(struct omap_overlay_manager *mgr)
  328. {
  329. unsigned long timeout = msecs_to_jiffies(500);
  330. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  331. u32 irq;
  332. unsigned long flags;
  333. int r;
  334. int i;
  335. spin_lock_irqsave(&data_lock, flags);
  336. if (mgr_manual_update(mgr)) {
  337. spin_unlock_irqrestore(&data_lock, flags);
  338. return 0;
  339. }
  340. if (!mp->enabled) {
  341. spin_unlock_irqrestore(&data_lock, flags);
  342. return 0;
  343. }
  344. spin_unlock_irqrestore(&data_lock, flags);
  345. r = dispc_runtime_get();
  346. if (r)
  347. return r;
  348. irq = dispc_mgr_get_vsync_irq(mgr->id);
  349. i = 0;
  350. while (1) {
  351. bool shadow_dirty, dirty;
  352. spin_lock_irqsave(&data_lock, flags);
  353. dirty = mp->info_dirty;
  354. shadow_dirty = mp->shadow_info_dirty;
  355. spin_unlock_irqrestore(&data_lock, flags);
  356. if (!dirty && !shadow_dirty) {
  357. r = 0;
  358. break;
  359. }
  360. /* 4 iterations is the worst case:
  361. * 1 - initial iteration, dirty = true (between VFP and VSYNC)
  362. * 2 - first VSYNC, dirty = true
  363. * 3 - dirty = false, shadow_dirty = true
  364. * 4 - shadow_dirty = false */
  365. if (i++ == 3) {
  366. DSSERR("mgr(%d)->wait_for_go() not finishing\n",
  367. mgr->id);
  368. r = 0;
  369. break;
  370. }
  371. r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
  372. if (r == -ERESTARTSYS)
  373. break;
  374. if (r) {
  375. DSSERR("mgr(%d)->wait_for_go() timeout\n", mgr->id);
  376. break;
  377. }
  378. }
  379. dispc_runtime_put();
  380. return r;
  381. }
  382. int dss_mgr_wait_for_go_ovl(struct omap_overlay *ovl)
  383. {
  384. unsigned long timeout = msecs_to_jiffies(500);
  385. struct ovl_priv_data *op;
  386. struct mgr_priv_data *mp;
  387. u32 irq;
  388. unsigned long flags;
  389. int r;
  390. int i;
  391. if (!ovl->manager)
  392. return 0;
  393. mp = get_mgr_priv(ovl->manager);
  394. spin_lock_irqsave(&data_lock, flags);
  395. if (ovl_manual_update(ovl)) {
  396. spin_unlock_irqrestore(&data_lock, flags);
  397. return 0;
  398. }
  399. if (!mp->enabled) {
  400. spin_unlock_irqrestore(&data_lock, flags);
  401. return 0;
  402. }
  403. spin_unlock_irqrestore(&data_lock, flags);
  404. r = dispc_runtime_get();
  405. if (r)
  406. return r;
  407. irq = dispc_mgr_get_vsync_irq(ovl->manager->id);
  408. op = get_ovl_priv(ovl);
  409. i = 0;
  410. while (1) {
  411. bool shadow_dirty, dirty;
  412. spin_lock_irqsave(&data_lock, flags);
  413. dirty = op->info_dirty;
  414. shadow_dirty = op->shadow_info_dirty;
  415. spin_unlock_irqrestore(&data_lock, flags);
  416. if (!dirty && !shadow_dirty) {
  417. r = 0;
  418. break;
  419. }
  420. /* 4 iterations is the worst case:
  421. * 1 - initial iteration, dirty = true (between VFP and VSYNC)
  422. * 2 - first VSYNC, dirty = true
  423. * 3 - dirty = false, shadow_dirty = true
  424. * 4 - shadow_dirty = false */
  425. if (i++ == 3) {
  426. DSSERR("ovl(%d)->wait_for_go() not finishing\n",
  427. ovl->id);
  428. r = 0;
  429. break;
  430. }
  431. r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
  432. if (r == -ERESTARTSYS)
  433. break;
  434. if (r) {
  435. DSSERR("ovl(%d)->wait_for_go() timeout\n", ovl->id);
  436. break;
  437. }
  438. }
  439. dispc_runtime_put();
  440. return r;
  441. }
  442. static void dss_ovl_write_regs(struct omap_overlay *ovl)
  443. {
  444. struct ovl_priv_data *op = get_ovl_priv(ovl);
  445. struct omap_overlay_info *oi;
  446. bool replication;
  447. struct mgr_priv_data *mp;
  448. int r;
  449. DSSDBG("writing ovl %d regs", ovl->id);
  450. if (!op->enabled || !op->info_dirty)
  451. return;
  452. oi = &op->info;
  453. mp = get_mgr_priv(ovl->manager);
  454. replication = dss_ovl_use_replication(mp->lcd_config, oi->color_mode);
  455. r = dispc_ovl_setup(ovl->id, oi, replication, &mp->timings, false);
  456. if (r) {
  457. /*
  458. * We can't do much here, as this function can be called from
  459. * vsync interrupt.
  460. */
  461. DSSERR("dispc_ovl_setup failed for ovl %d\n", ovl->id);
  462. /* This will leave fifo configurations in a nonoptimal state */
  463. op->enabled = false;
  464. dispc_ovl_enable(ovl->id, false);
  465. return;
  466. }
  467. op->info_dirty = false;
  468. if (mp->updating)
  469. op->shadow_info_dirty = true;
  470. }
  471. static void dss_ovl_write_regs_extra(struct omap_overlay *ovl)
  472. {
  473. struct ovl_priv_data *op = get_ovl_priv(ovl);
  474. struct mgr_priv_data *mp;
  475. DSSDBG("writing ovl %d regs extra", ovl->id);
  476. if (!op->extra_info_dirty)
  477. return;
  478. /* note: write also when op->enabled == false, so that the ovl gets
  479. * disabled */
  480. dispc_ovl_enable(ovl->id, op->enabled);
  481. dispc_ovl_set_fifo_threshold(ovl->id, op->fifo_low, op->fifo_high);
  482. mp = get_mgr_priv(ovl->manager);
  483. op->extra_info_dirty = false;
  484. if (mp->updating)
  485. op->shadow_extra_info_dirty = true;
  486. }
  487. static void dss_mgr_write_regs(struct omap_overlay_manager *mgr)
  488. {
  489. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  490. struct omap_overlay *ovl;
  491. DSSDBG("writing mgr %d regs", mgr->id);
  492. if (!mp->enabled)
  493. return;
  494. WARN_ON(mp->busy);
  495. /* Commit overlay settings */
  496. list_for_each_entry(ovl, &mgr->overlays, list) {
  497. dss_ovl_write_regs(ovl);
  498. dss_ovl_write_regs_extra(ovl);
  499. }
  500. if (mp->info_dirty) {
  501. dispc_mgr_setup(mgr->id, &mp->info);
  502. mp->info_dirty = false;
  503. if (mp->updating)
  504. mp->shadow_info_dirty = true;
  505. }
  506. }
  507. static void dss_mgr_write_regs_extra(struct omap_overlay_manager *mgr)
  508. {
  509. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  510. DSSDBG("writing mgr %d regs extra", mgr->id);
  511. if (!mp->extra_info_dirty)
  512. return;
  513. dispc_mgr_set_timings(mgr->id, &mp->timings);
  514. /* lcd_config parameters */
  515. if (dss_mgr_is_lcd(mgr->id))
  516. dispc_mgr_set_lcd_config(mgr->id, &mp->lcd_config);
  517. mp->extra_info_dirty = false;
  518. if (mp->updating)
  519. mp->shadow_extra_info_dirty = true;
  520. }
  521. static void dss_write_regs(void)
  522. {
  523. const int num_mgrs = omap_dss_get_num_overlay_managers();
  524. int i;
  525. for (i = 0; i < num_mgrs; ++i) {
  526. struct omap_overlay_manager *mgr;
  527. struct mgr_priv_data *mp;
  528. int r;
  529. mgr = omap_dss_get_overlay_manager(i);
  530. mp = get_mgr_priv(mgr);
  531. if (!mp->enabled || mgr_manual_update(mgr) || mp->busy)
  532. continue;
  533. r = dss_check_settings(mgr);
  534. if (r) {
  535. DSSERR("cannot write registers for manager %s: "
  536. "illegal configuration\n", mgr->name);
  537. continue;
  538. }
  539. dss_mgr_write_regs(mgr);
  540. dss_mgr_write_regs_extra(mgr);
  541. }
  542. }
  543. static void dss_set_go_bits(void)
  544. {
  545. const int num_mgrs = omap_dss_get_num_overlay_managers();
  546. int i;
  547. for (i = 0; i < num_mgrs; ++i) {
  548. struct omap_overlay_manager *mgr;
  549. struct mgr_priv_data *mp;
  550. mgr = omap_dss_get_overlay_manager(i);
  551. mp = get_mgr_priv(mgr);
  552. if (!mp->enabled || mgr_manual_update(mgr) || mp->busy)
  553. continue;
  554. if (!need_go(mgr))
  555. continue;
  556. mp->busy = true;
  557. if (!dss_data.irq_enabled && need_isr())
  558. dss_register_vsync_isr();
  559. dispc_mgr_go(mgr->id);
  560. }
  561. }
  562. static void mgr_clear_shadow_dirty(struct omap_overlay_manager *mgr)
  563. {
  564. struct omap_overlay *ovl;
  565. struct mgr_priv_data *mp;
  566. struct ovl_priv_data *op;
  567. mp = get_mgr_priv(mgr);
  568. mp->shadow_info_dirty = false;
  569. mp->shadow_extra_info_dirty = false;
  570. list_for_each_entry(ovl, &mgr->overlays, list) {
  571. op = get_ovl_priv(ovl);
  572. op->shadow_info_dirty = false;
  573. op->shadow_extra_info_dirty = false;
  574. }
  575. }
  576. void dss_mgr_start_update(struct omap_overlay_manager *mgr)
  577. {
  578. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  579. unsigned long flags;
  580. int r;
  581. spin_lock_irqsave(&data_lock, flags);
  582. WARN_ON(mp->updating);
  583. r = dss_check_settings(mgr);
  584. if (r) {
  585. DSSERR("cannot start manual update: illegal configuration\n");
  586. spin_unlock_irqrestore(&data_lock, flags);
  587. return;
  588. }
  589. dss_mgr_write_regs(mgr);
  590. dss_mgr_write_regs_extra(mgr);
  591. mp->updating = true;
  592. if (!dss_data.irq_enabled && need_isr())
  593. dss_register_vsync_isr();
  594. dispc_mgr_enable_sync(mgr->id);
  595. spin_unlock_irqrestore(&data_lock, flags);
  596. }
  597. static void dss_apply_irq_handler(void *data, u32 mask);
  598. static void dss_register_vsync_isr(void)
  599. {
  600. const int num_mgrs = dss_feat_get_num_mgrs();
  601. u32 mask;
  602. int r, i;
  603. mask = 0;
  604. for (i = 0; i < num_mgrs; ++i)
  605. mask |= dispc_mgr_get_vsync_irq(i);
  606. for (i = 0; i < num_mgrs; ++i)
  607. mask |= dispc_mgr_get_framedone_irq(i);
  608. r = omap_dispc_register_isr(dss_apply_irq_handler, NULL, mask);
  609. WARN_ON(r);
  610. dss_data.irq_enabled = true;
  611. }
  612. static void dss_unregister_vsync_isr(void)
  613. {
  614. const int num_mgrs = dss_feat_get_num_mgrs();
  615. u32 mask;
  616. int r, i;
  617. mask = 0;
  618. for (i = 0; i < num_mgrs; ++i)
  619. mask |= dispc_mgr_get_vsync_irq(i);
  620. for (i = 0; i < num_mgrs; ++i)
  621. mask |= dispc_mgr_get_framedone_irq(i);
  622. r = omap_dispc_unregister_isr(dss_apply_irq_handler, NULL, mask);
  623. WARN_ON(r);
  624. dss_data.irq_enabled = false;
  625. }
  626. static void dss_apply_irq_handler(void *data, u32 mask)
  627. {
  628. const int num_mgrs = dss_feat_get_num_mgrs();
  629. int i;
  630. bool extra_updating;
  631. spin_lock(&data_lock);
  632. /* clear busy, updating flags, shadow_dirty flags */
  633. for (i = 0; i < num_mgrs; i++) {
  634. struct omap_overlay_manager *mgr;
  635. struct mgr_priv_data *mp;
  636. mgr = omap_dss_get_overlay_manager(i);
  637. mp = get_mgr_priv(mgr);
  638. if (!mp->enabled)
  639. continue;
  640. mp->updating = dispc_mgr_is_enabled(i);
  641. if (!mgr_manual_update(mgr)) {
  642. bool was_busy = mp->busy;
  643. mp->busy = dispc_mgr_go_busy(i);
  644. if (was_busy && !mp->busy)
  645. mgr_clear_shadow_dirty(mgr);
  646. }
  647. }
  648. dss_write_regs();
  649. dss_set_go_bits();
  650. extra_updating = extra_info_update_ongoing();
  651. if (!extra_updating)
  652. complete_all(&extra_updated_completion);
  653. if (!need_isr())
  654. dss_unregister_vsync_isr();
  655. spin_unlock(&data_lock);
  656. }
  657. static void omap_dss_mgr_apply_ovl(struct omap_overlay *ovl)
  658. {
  659. struct ovl_priv_data *op;
  660. op = get_ovl_priv(ovl);
  661. if (!op->user_info_dirty)
  662. return;
  663. op->user_info_dirty = false;
  664. op->info_dirty = true;
  665. op->info = op->user_info;
  666. }
  667. static void omap_dss_mgr_apply_mgr(struct omap_overlay_manager *mgr)
  668. {
  669. struct mgr_priv_data *mp;
  670. mp = get_mgr_priv(mgr);
  671. if (!mp->user_info_dirty)
  672. return;
  673. mp->user_info_dirty = false;
  674. mp->info_dirty = true;
  675. mp->info = mp->user_info;
  676. }
  677. int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
  678. {
  679. unsigned long flags;
  680. struct omap_overlay *ovl;
  681. int r;
  682. DSSDBG("omap_dss_mgr_apply(%s)\n", mgr->name);
  683. spin_lock_irqsave(&data_lock, flags);
  684. r = dss_check_settings_apply(mgr);
  685. if (r) {
  686. spin_unlock_irqrestore(&data_lock, flags);
  687. DSSERR("failed to apply settings: illegal configuration.\n");
  688. return r;
  689. }
  690. /* Configure overlays */
  691. list_for_each_entry(ovl, &mgr->overlays, list)
  692. omap_dss_mgr_apply_ovl(ovl);
  693. /* Configure manager */
  694. omap_dss_mgr_apply_mgr(mgr);
  695. dss_write_regs();
  696. dss_set_go_bits();
  697. spin_unlock_irqrestore(&data_lock, flags);
  698. return 0;
  699. }
  700. static void dss_apply_ovl_enable(struct omap_overlay *ovl, bool enable)
  701. {
  702. struct ovl_priv_data *op;
  703. op = get_ovl_priv(ovl);
  704. if (op->enabled == enable)
  705. return;
  706. op->enabled = enable;
  707. op->extra_info_dirty = true;
  708. }
  709. static void dss_apply_ovl_fifo_thresholds(struct omap_overlay *ovl,
  710. u32 fifo_low, u32 fifo_high)
  711. {
  712. struct ovl_priv_data *op = get_ovl_priv(ovl);
  713. if (op->fifo_low == fifo_low && op->fifo_high == fifo_high)
  714. return;
  715. op->fifo_low = fifo_low;
  716. op->fifo_high = fifo_high;
  717. op->extra_info_dirty = true;
  718. }
  719. static void dss_ovl_setup_fifo(struct omap_overlay *ovl)
  720. {
  721. struct ovl_priv_data *op = get_ovl_priv(ovl);
  722. u32 fifo_low, fifo_high;
  723. bool use_fifo_merge = false;
  724. if (!op->enabled && !op->enabling)
  725. return;
  726. dispc_ovl_compute_fifo_thresholds(ovl->id, &fifo_low, &fifo_high,
  727. use_fifo_merge, ovl_manual_update(ovl));
  728. dss_apply_ovl_fifo_thresholds(ovl, fifo_low, fifo_high);
  729. }
  730. static void dss_mgr_setup_fifos(struct omap_overlay_manager *mgr)
  731. {
  732. struct omap_overlay *ovl;
  733. struct mgr_priv_data *mp;
  734. mp = get_mgr_priv(mgr);
  735. if (!mp->enabled)
  736. return;
  737. list_for_each_entry(ovl, &mgr->overlays, list)
  738. dss_ovl_setup_fifo(ovl);
  739. }
  740. static void dss_setup_fifos(void)
  741. {
  742. const int num_mgrs = omap_dss_get_num_overlay_managers();
  743. struct omap_overlay_manager *mgr;
  744. int i;
  745. for (i = 0; i < num_mgrs; ++i) {
  746. mgr = omap_dss_get_overlay_manager(i);
  747. dss_mgr_setup_fifos(mgr);
  748. }
  749. }
  750. int dss_mgr_enable(struct omap_overlay_manager *mgr)
  751. {
  752. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  753. unsigned long flags;
  754. int r;
  755. mutex_lock(&apply_lock);
  756. if (mp->enabled)
  757. goto out;
  758. spin_lock_irqsave(&data_lock, flags);
  759. mp->enabled = true;
  760. r = dss_check_settings(mgr);
  761. if (r) {
  762. DSSERR("failed to enable manager %d: check_settings failed\n",
  763. mgr->id);
  764. goto err;
  765. }
  766. dss_setup_fifos();
  767. dss_write_regs();
  768. dss_set_go_bits();
  769. if (!mgr_manual_update(mgr))
  770. mp->updating = true;
  771. if (!dss_data.irq_enabled && need_isr())
  772. dss_register_vsync_isr();
  773. spin_unlock_irqrestore(&data_lock, flags);
  774. if (!mgr_manual_update(mgr))
  775. dispc_mgr_enable_sync(mgr->id);
  776. out:
  777. mutex_unlock(&apply_lock);
  778. return 0;
  779. err:
  780. mp->enabled = false;
  781. spin_unlock_irqrestore(&data_lock, flags);
  782. mutex_unlock(&apply_lock);
  783. return r;
  784. }
  785. void dss_mgr_disable(struct omap_overlay_manager *mgr)
  786. {
  787. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  788. unsigned long flags;
  789. mutex_lock(&apply_lock);
  790. if (!mp->enabled)
  791. goto out;
  792. if (!mgr_manual_update(mgr))
  793. dispc_mgr_disable_sync(mgr->id);
  794. spin_lock_irqsave(&data_lock, flags);
  795. mp->updating = false;
  796. mp->enabled = false;
  797. spin_unlock_irqrestore(&data_lock, flags);
  798. out:
  799. mutex_unlock(&apply_lock);
  800. }
  801. int dss_mgr_set_info(struct omap_overlay_manager *mgr,
  802. struct omap_overlay_manager_info *info)
  803. {
  804. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  805. unsigned long flags;
  806. int r;
  807. r = dss_mgr_simple_check(mgr, info);
  808. if (r)
  809. return r;
  810. spin_lock_irqsave(&data_lock, flags);
  811. mp->user_info = *info;
  812. mp->user_info_dirty = true;
  813. spin_unlock_irqrestore(&data_lock, flags);
  814. return 0;
  815. }
  816. void dss_mgr_get_info(struct omap_overlay_manager *mgr,
  817. struct omap_overlay_manager_info *info)
  818. {
  819. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  820. unsigned long flags;
  821. spin_lock_irqsave(&data_lock, flags);
  822. *info = mp->user_info;
  823. spin_unlock_irqrestore(&data_lock, flags);
  824. }
  825. int dss_mgr_set_output(struct omap_overlay_manager *mgr,
  826. struct omap_dss_output *output)
  827. {
  828. int r;
  829. mutex_lock(&apply_lock);
  830. if (mgr->output) {
  831. DSSERR("manager %s is already connected to an output\n",
  832. mgr->name);
  833. r = -EINVAL;
  834. goto err;
  835. }
  836. if ((mgr->supported_outputs & output->id) == 0) {
  837. DSSERR("output does not support manager %s\n",
  838. mgr->name);
  839. r = -EINVAL;
  840. goto err;
  841. }
  842. output->manager = mgr;
  843. mgr->output = output;
  844. mutex_unlock(&apply_lock);
  845. return 0;
  846. err:
  847. mutex_unlock(&apply_lock);
  848. return r;
  849. }
  850. int dss_mgr_unset_output(struct omap_overlay_manager *mgr)
  851. {
  852. int r;
  853. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  854. unsigned long flags;
  855. mutex_lock(&apply_lock);
  856. if (!mgr->output) {
  857. DSSERR("failed to unset output, output not set\n");
  858. r = -EINVAL;
  859. goto err;
  860. }
  861. spin_lock_irqsave(&data_lock, flags);
  862. if (mp->enabled) {
  863. DSSERR("output can't be unset when manager is enabled\n");
  864. r = -EINVAL;
  865. goto err1;
  866. }
  867. spin_unlock_irqrestore(&data_lock, flags);
  868. mgr->output->manager = NULL;
  869. mgr->output = NULL;
  870. mutex_unlock(&apply_lock);
  871. return 0;
  872. err1:
  873. spin_unlock_irqrestore(&data_lock, flags);
  874. err:
  875. mutex_unlock(&apply_lock);
  876. return r;
  877. }
  878. static void dss_apply_mgr_timings(struct omap_overlay_manager *mgr,
  879. const struct omap_video_timings *timings)
  880. {
  881. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  882. mp->timings = *timings;
  883. mp->extra_info_dirty = true;
  884. }
  885. void dss_mgr_set_timings(struct omap_overlay_manager *mgr,
  886. const struct omap_video_timings *timings)
  887. {
  888. unsigned long flags;
  889. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  890. spin_lock_irqsave(&data_lock, flags);
  891. if (mp->updating) {
  892. DSSERR("cannot set timings for %s: manager needs to be disabled\n",
  893. mgr->name);
  894. goto out;
  895. }
  896. dss_apply_mgr_timings(mgr, timings);
  897. out:
  898. spin_unlock_irqrestore(&data_lock, flags);
  899. }
  900. static void dss_apply_mgr_lcd_config(struct omap_overlay_manager *mgr,
  901. const struct dss_lcd_mgr_config *config)
  902. {
  903. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  904. mp->lcd_config = *config;
  905. mp->extra_info_dirty = true;
  906. }
  907. void dss_mgr_set_lcd_config(struct omap_overlay_manager *mgr,
  908. const struct dss_lcd_mgr_config *config)
  909. {
  910. unsigned long flags;
  911. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  912. spin_lock_irqsave(&data_lock, flags);
  913. if (mp->enabled) {
  914. DSSERR("cannot apply lcd config for %s: manager needs to be disabled\n",
  915. mgr->name);
  916. goto out;
  917. }
  918. dss_apply_mgr_lcd_config(mgr, config);
  919. out:
  920. spin_unlock_irqrestore(&data_lock, flags);
  921. }
  922. int dss_ovl_set_info(struct omap_overlay *ovl,
  923. struct omap_overlay_info *info)
  924. {
  925. struct ovl_priv_data *op = get_ovl_priv(ovl);
  926. unsigned long flags;
  927. int r;
  928. r = dss_ovl_simple_check(ovl, info);
  929. if (r)
  930. return r;
  931. spin_lock_irqsave(&data_lock, flags);
  932. op->user_info = *info;
  933. op->user_info_dirty = true;
  934. spin_unlock_irqrestore(&data_lock, flags);
  935. return 0;
  936. }
  937. void dss_ovl_get_info(struct omap_overlay *ovl,
  938. struct omap_overlay_info *info)
  939. {
  940. struct ovl_priv_data *op = get_ovl_priv(ovl);
  941. unsigned long flags;
  942. spin_lock_irqsave(&data_lock, flags);
  943. *info = op->user_info;
  944. spin_unlock_irqrestore(&data_lock, flags);
  945. }
  946. int dss_ovl_set_manager(struct omap_overlay *ovl,
  947. struct omap_overlay_manager *mgr)
  948. {
  949. struct ovl_priv_data *op = get_ovl_priv(ovl);
  950. unsigned long flags;
  951. int r;
  952. if (!mgr)
  953. return -EINVAL;
  954. mutex_lock(&apply_lock);
  955. if (ovl->manager) {
  956. DSSERR("overlay '%s' already has a manager '%s'\n",
  957. ovl->name, ovl->manager->name);
  958. r = -EINVAL;
  959. goto err;
  960. }
  961. r = dispc_runtime_get();
  962. if (r)
  963. goto err;
  964. spin_lock_irqsave(&data_lock, flags);
  965. if (op->enabled) {
  966. spin_unlock_irqrestore(&data_lock, flags);
  967. DSSERR("overlay has to be disabled to change the manager\n");
  968. r = -EINVAL;
  969. goto err1;
  970. }
  971. dispc_ovl_set_channel_out(ovl->id, mgr->id);
  972. ovl->manager = mgr;
  973. list_add_tail(&ovl->list, &mgr->overlays);
  974. spin_unlock_irqrestore(&data_lock, flags);
  975. dispc_runtime_put();
  976. mutex_unlock(&apply_lock);
  977. return 0;
  978. err1:
  979. dispc_runtime_put();
  980. err:
  981. mutex_unlock(&apply_lock);
  982. return r;
  983. }
  984. int dss_ovl_unset_manager(struct omap_overlay *ovl)
  985. {
  986. struct ovl_priv_data *op = get_ovl_priv(ovl);
  987. unsigned long flags;
  988. int r;
  989. mutex_lock(&apply_lock);
  990. if (!ovl->manager) {
  991. DSSERR("failed to detach overlay: manager not set\n");
  992. r = -EINVAL;
  993. goto err;
  994. }
  995. spin_lock_irqsave(&data_lock, flags);
  996. if (op->enabled) {
  997. spin_unlock_irqrestore(&data_lock, flags);
  998. DSSERR("overlay has to be disabled to unset the manager\n");
  999. r = -EINVAL;
  1000. goto err;
  1001. }
  1002. spin_unlock_irqrestore(&data_lock, flags);
  1003. /* wait for pending extra_info updates to ensure the ovl is disabled */
  1004. wait_pending_extra_info_updates();
  1005. /*
  1006. * For a manual update display, there is no guarantee that the overlay
  1007. * is really disabled in HW, we may need an extra update from this
  1008. * manager before the configurations can go in. Return an error if the
  1009. * overlay needed an update from the manager.
  1010. *
  1011. * TODO: Instead of returning an error, try to do a dummy manager update
  1012. * here to disable the overlay in hardware. Use the *GATED fields in
  1013. * the DISPC_CONFIG registers to do a dummy update.
  1014. */
  1015. spin_lock_irqsave(&data_lock, flags);
  1016. if (ovl_manual_update(ovl) && op->extra_info_dirty) {
  1017. spin_unlock_irqrestore(&data_lock, flags);
  1018. DSSERR("need an update to change the manager\n");
  1019. r = -EINVAL;
  1020. goto err;
  1021. }
  1022. ovl->manager = NULL;
  1023. list_del(&ovl->list);
  1024. spin_unlock_irqrestore(&data_lock, flags);
  1025. mutex_unlock(&apply_lock);
  1026. return 0;
  1027. err:
  1028. mutex_unlock(&apply_lock);
  1029. return r;
  1030. }
  1031. bool dss_ovl_is_enabled(struct omap_overlay *ovl)
  1032. {
  1033. struct ovl_priv_data *op = get_ovl_priv(ovl);
  1034. unsigned long flags;
  1035. bool e;
  1036. spin_lock_irqsave(&data_lock, flags);
  1037. e = op->enabled;
  1038. spin_unlock_irqrestore(&data_lock, flags);
  1039. return e;
  1040. }
  1041. int dss_ovl_enable(struct omap_overlay *ovl)
  1042. {
  1043. struct ovl_priv_data *op = get_ovl_priv(ovl);
  1044. unsigned long flags;
  1045. int r;
  1046. mutex_lock(&apply_lock);
  1047. if (op->enabled) {
  1048. r = 0;
  1049. goto err1;
  1050. }
  1051. if (ovl->manager == NULL || ovl->manager->output == NULL) {
  1052. r = -EINVAL;
  1053. goto err1;
  1054. }
  1055. spin_lock_irqsave(&data_lock, flags);
  1056. op->enabling = true;
  1057. r = dss_check_settings(ovl->manager);
  1058. if (r) {
  1059. DSSERR("failed to enable overlay %d: check_settings failed\n",
  1060. ovl->id);
  1061. goto err2;
  1062. }
  1063. dss_setup_fifos();
  1064. op->enabling = false;
  1065. dss_apply_ovl_enable(ovl, true);
  1066. dss_write_regs();
  1067. dss_set_go_bits();
  1068. spin_unlock_irqrestore(&data_lock, flags);
  1069. mutex_unlock(&apply_lock);
  1070. return 0;
  1071. err2:
  1072. op->enabling = false;
  1073. spin_unlock_irqrestore(&data_lock, flags);
  1074. err1:
  1075. mutex_unlock(&apply_lock);
  1076. return r;
  1077. }
  1078. int dss_ovl_disable(struct omap_overlay *ovl)
  1079. {
  1080. struct ovl_priv_data *op = get_ovl_priv(ovl);
  1081. unsigned long flags;
  1082. int r;
  1083. mutex_lock(&apply_lock);
  1084. if (!op->enabled) {
  1085. r = 0;
  1086. goto err;
  1087. }
  1088. if (ovl->manager == NULL || ovl->manager->output == NULL) {
  1089. r = -EINVAL;
  1090. goto err;
  1091. }
  1092. spin_lock_irqsave(&data_lock, flags);
  1093. dss_apply_ovl_enable(ovl, false);
  1094. dss_write_regs();
  1095. dss_set_go_bits();
  1096. spin_unlock_irqrestore(&data_lock, flags);
  1097. mutex_unlock(&apply_lock);
  1098. return 0;
  1099. err:
  1100. mutex_unlock(&apply_lock);
  1101. return r;
  1102. }