apply.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467
  1. /*
  2. * Copyright (C) 2011 Texas Instruments
  3. * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #define DSS_SUBSYS_NAME "APPLY"
  18. #include <linux/kernel.h>
  19. #include <linux/slab.h>
  20. #include <linux/spinlock.h>
  21. #include <linux/jiffies.h>
  22. #include <video/omapdss.h>
  23. #include "dss.h"
  24. #include "dss_features.h"
  25. /*
  26. * We have 4 levels of cache for the dispc settings. First two are in SW and
  27. * the latter two in HW.
  28. *
  29. * set_info()
  30. * v
  31. * +--------------------+
  32. * | user_info |
  33. * +--------------------+
  34. * v
  35. * apply()
  36. * v
  37. * +--------------------+
  38. * | info |
  39. * +--------------------+
  40. * v
  41. * write_regs()
  42. * v
  43. * +--------------------+
  44. * | shadow registers |
  45. * +--------------------+
  46. * v
  47. * VFP or lcd/digit_enable
  48. * v
  49. * +--------------------+
  50. * | registers |
  51. * +--------------------+
  52. */
  53. struct ovl_priv_data {
  54. bool user_info_dirty;
  55. struct omap_overlay_info user_info;
  56. bool info_dirty;
  57. struct omap_overlay_info info;
  58. bool shadow_info_dirty;
  59. bool extra_info_dirty;
  60. bool shadow_extra_info_dirty;
  61. bool enabled;
  62. u32 fifo_low, fifo_high;
  63. /*
  64. * True if overlay is to be enabled. Used to check and calculate configs
  65. * for the overlay before it is enabled in the HW.
  66. */
  67. bool enabling;
  68. };
  69. struct mgr_priv_data {
  70. bool user_info_dirty;
  71. struct omap_overlay_manager_info user_info;
  72. bool info_dirty;
  73. struct omap_overlay_manager_info info;
  74. bool shadow_info_dirty;
  75. /* If true, GO bit is up and shadow registers cannot be written.
  76. * Never true for manual update displays */
  77. bool busy;
  78. /* If true, dispc output is enabled */
  79. bool updating;
  80. /* If true, a display is enabled using this manager */
  81. bool enabled;
  82. bool extra_info_dirty;
  83. bool shadow_extra_info_dirty;
  84. struct omap_video_timings timings;
  85. struct dss_lcd_mgr_config lcd_config;
  86. };
  87. static struct {
  88. struct ovl_priv_data ovl_priv_data_array[MAX_DSS_OVERLAYS];
  89. struct mgr_priv_data mgr_priv_data_array[MAX_DSS_MANAGERS];
  90. bool irq_enabled;
  91. } dss_data;
  92. /* protects dss_data */
  93. static spinlock_t data_lock;
  94. /* lock for blocking functions */
  95. static DEFINE_MUTEX(apply_lock);
  96. static DECLARE_COMPLETION(extra_updated_completion);
  97. static void dss_register_vsync_isr(void);
  98. static struct ovl_priv_data *get_ovl_priv(struct omap_overlay *ovl)
  99. {
  100. return &dss_data.ovl_priv_data_array[ovl->id];
  101. }
  102. static struct mgr_priv_data *get_mgr_priv(struct omap_overlay_manager *mgr)
  103. {
  104. return &dss_data.mgr_priv_data_array[mgr->id];
  105. }
  106. void dss_apply_init(void)
  107. {
  108. const int num_ovls = dss_feat_get_num_ovls();
  109. struct mgr_priv_data *mp;
  110. int i;
  111. spin_lock_init(&data_lock);
  112. for (i = 0; i < num_ovls; ++i) {
  113. struct ovl_priv_data *op;
  114. op = &dss_data.ovl_priv_data_array[i];
  115. op->info.global_alpha = 255;
  116. switch (i) {
  117. case 0:
  118. op->info.zorder = 0;
  119. break;
  120. case 1:
  121. op->info.zorder =
  122. dss_has_feature(FEAT_ALPHA_FREE_ZORDER) ? 3 : 0;
  123. break;
  124. case 2:
  125. op->info.zorder =
  126. dss_has_feature(FEAT_ALPHA_FREE_ZORDER) ? 2 : 0;
  127. break;
  128. case 3:
  129. op->info.zorder =
  130. dss_has_feature(FEAT_ALPHA_FREE_ZORDER) ? 1 : 0;
  131. break;
  132. }
  133. op->user_info = op->info;
  134. }
  135. /*
  136. * Initialize some of the lcd_config fields for TV manager, this lets
  137. * us prevent checking if the manager is LCD or TV at some places
  138. */
  139. mp = &dss_data.mgr_priv_data_array[OMAP_DSS_CHANNEL_DIGIT];
  140. mp->lcd_config.video_port_width = 24;
  141. mp->lcd_config.clock_info.lck_div = 1;
  142. mp->lcd_config.clock_info.pck_div = 1;
  143. }
  144. /*
  145. * A LCD manager's stallmode decides whether it is in manual or auto update. TV
  146. * manager is always auto update, stallmode field for TV manager is false by
  147. * default
  148. */
  149. static bool ovl_manual_update(struct omap_overlay *ovl)
  150. {
  151. struct mgr_priv_data *mp = get_mgr_priv(ovl->manager);
  152. return mp->lcd_config.stallmode;
  153. }
  154. static bool mgr_manual_update(struct omap_overlay_manager *mgr)
  155. {
  156. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  157. return mp->lcd_config.stallmode;
  158. }
  159. static int dss_check_settings_low(struct omap_overlay_manager *mgr,
  160. bool applying)
  161. {
  162. struct omap_overlay_info *oi;
  163. struct omap_overlay_manager_info *mi;
  164. struct omap_overlay *ovl;
  165. struct omap_overlay_info *ois[MAX_DSS_OVERLAYS];
  166. struct ovl_priv_data *op;
  167. struct mgr_priv_data *mp;
  168. mp = get_mgr_priv(mgr);
  169. if (!mp->enabled)
  170. return 0;
  171. if (applying && mp->user_info_dirty)
  172. mi = &mp->user_info;
  173. else
  174. mi = &mp->info;
  175. /* collect the infos to be tested into the array */
  176. list_for_each_entry(ovl, &mgr->overlays, list) {
  177. op = get_ovl_priv(ovl);
  178. if (!op->enabled && !op->enabling)
  179. oi = NULL;
  180. else if (applying && op->user_info_dirty)
  181. oi = &op->user_info;
  182. else
  183. oi = &op->info;
  184. ois[ovl->id] = oi;
  185. }
  186. return dss_mgr_check(mgr, mi, &mp->timings, &mp->lcd_config, ois);
  187. }
  188. /*
  189. * check manager and overlay settings using overlay_info from data->info
  190. */
  191. static int dss_check_settings(struct omap_overlay_manager *mgr)
  192. {
  193. return dss_check_settings_low(mgr, false);
  194. }
  195. /*
  196. * check manager and overlay settings using overlay_info from ovl->info if
  197. * dirty and from data->info otherwise
  198. */
  199. static int dss_check_settings_apply(struct omap_overlay_manager *mgr)
  200. {
  201. return dss_check_settings_low(mgr, true);
  202. }
  203. static bool need_isr(void)
  204. {
  205. const int num_mgrs = dss_feat_get_num_mgrs();
  206. int i;
  207. for (i = 0; i < num_mgrs; ++i) {
  208. struct omap_overlay_manager *mgr;
  209. struct mgr_priv_data *mp;
  210. struct omap_overlay *ovl;
  211. mgr = omap_dss_get_overlay_manager(i);
  212. mp = get_mgr_priv(mgr);
  213. if (!mp->enabled)
  214. continue;
  215. if (mgr_manual_update(mgr)) {
  216. /* to catch FRAMEDONE */
  217. if (mp->updating)
  218. return true;
  219. } else {
  220. /* to catch GO bit going down */
  221. if (mp->busy)
  222. return true;
  223. /* to write new values to registers */
  224. if (mp->info_dirty)
  225. return true;
  226. /* to set GO bit */
  227. if (mp->shadow_info_dirty)
  228. return true;
  229. /*
  230. * NOTE: we don't check extra_info flags for disabled
  231. * managers, once the manager is enabled, the extra_info
  232. * related manager changes will be taken in by HW.
  233. */
  234. /* to write new values to registers */
  235. if (mp->extra_info_dirty)
  236. return true;
  237. /* to set GO bit */
  238. if (mp->shadow_extra_info_dirty)
  239. return true;
  240. list_for_each_entry(ovl, &mgr->overlays, list) {
  241. struct ovl_priv_data *op;
  242. op = get_ovl_priv(ovl);
  243. /*
  244. * NOTE: we check extra_info flags even for
  245. * disabled overlays, as extra_infos need to be
  246. * always written.
  247. */
  248. /* to write new values to registers */
  249. if (op->extra_info_dirty)
  250. return true;
  251. /* to set GO bit */
  252. if (op->shadow_extra_info_dirty)
  253. return true;
  254. if (!op->enabled)
  255. continue;
  256. /* to write new values to registers */
  257. if (op->info_dirty)
  258. return true;
  259. /* to set GO bit */
  260. if (op->shadow_info_dirty)
  261. return true;
  262. }
  263. }
  264. }
  265. return false;
  266. }
  267. static bool need_go(struct omap_overlay_manager *mgr)
  268. {
  269. struct omap_overlay *ovl;
  270. struct mgr_priv_data *mp;
  271. struct ovl_priv_data *op;
  272. mp = get_mgr_priv(mgr);
  273. if (mp->shadow_info_dirty || mp->shadow_extra_info_dirty)
  274. return true;
  275. list_for_each_entry(ovl, &mgr->overlays, list) {
  276. op = get_ovl_priv(ovl);
  277. if (op->shadow_info_dirty || op->shadow_extra_info_dirty)
  278. return true;
  279. }
  280. return false;
  281. }
  282. /* returns true if an extra_info field is currently being updated */
  283. static bool extra_info_update_ongoing(void)
  284. {
  285. const int num_mgrs = dss_feat_get_num_mgrs();
  286. int i;
  287. for (i = 0; i < num_mgrs; ++i) {
  288. struct omap_overlay_manager *mgr;
  289. struct omap_overlay *ovl;
  290. struct mgr_priv_data *mp;
  291. mgr = omap_dss_get_overlay_manager(i);
  292. mp = get_mgr_priv(mgr);
  293. if (!mp->enabled)
  294. continue;
  295. if (!mp->updating)
  296. continue;
  297. if (mp->extra_info_dirty || mp->shadow_extra_info_dirty)
  298. return true;
  299. list_for_each_entry(ovl, &mgr->overlays, list) {
  300. struct ovl_priv_data *op = get_ovl_priv(ovl);
  301. if (op->extra_info_dirty || op->shadow_extra_info_dirty)
  302. return true;
  303. }
  304. }
  305. return false;
  306. }
  307. /* wait until no extra_info updates are pending */
  308. static void wait_pending_extra_info_updates(void)
  309. {
  310. bool updating;
  311. unsigned long flags;
  312. unsigned long t;
  313. int r;
  314. spin_lock_irqsave(&data_lock, flags);
  315. updating = extra_info_update_ongoing();
  316. if (!updating) {
  317. spin_unlock_irqrestore(&data_lock, flags);
  318. return;
  319. }
  320. init_completion(&extra_updated_completion);
  321. spin_unlock_irqrestore(&data_lock, flags);
  322. t = msecs_to_jiffies(500);
  323. r = wait_for_completion_timeout(&extra_updated_completion, t);
  324. if (r == 0)
  325. DSSWARN("timeout in wait_pending_extra_info_updates\n");
  326. }
  327. int dss_mgr_wait_for_go(struct omap_overlay_manager *mgr)
  328. {
  329. unsigned long timeout = msecs_to_jiffies(500);
  330. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  331. u32 irq;
  332. unsigned long flags;
  333. int r;
  334. int i;
  335. spin_lock_irqsave(&data_lock, flags);
  336. if (mgr_manual_update(mgr)) {
  337. spin_unlock_irqrestore(&data_lock, flags);
  338. return 0;
  339. }
  340. if (!mp->enabled) {
  341. spin_unlock_irqrestore(&data_lock, flags);
  342. return 0;
  343. }
  344. spin_unlock_irqrestore(&data_lock, flags);
  345. r = dispc_runtime_get();
  346. if (r)
  347. return r;
  348. irq = dispc_mgr_get_vsync_irq(mgr->id);
  349. i = 0;
  350. while (1) {
  351. bool shadow_dirty, dirty;
  352. spin_lock_irqsave(&data_lock, flags);
  353. dirty = mp->info_dirty;
  354. shadow_dirty = mp->shadow_info_dirty;
  355. spin_unlock_irqrestore(&data_lock, flags);
  356. if (!dirty && !shadow_dirty) {
  357. r = 0;
  358. break;
  359. }
  360. /* 4 iterations is the worst case:
  361. * 1 - initial iteration, dirty = true (between VFP and VSYNC)
  362. * 2 - first VSYNC, dirty = true
  363. * 3 - dirty = false, shadow_dirty = true
  364. * 4 - shadow_dirty = false */
  365. if (i++ == 3) {
  366. DSSERR("mgr(%d)->wait_for_go() not finishing\n",
  367. mgr->id);
  368. r = 0;
  369. break;
  370. }
  371. r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
  372. if (r == -ERESTARTSYS)
  373. break;
  374. if (r) {
  375. DSSERR("mgr(%d)->wait_for_go() timeout\n", mgr->id);
  376. break;
  377. }
  378. }
  379. dispc_runtime_put();
  380. return r;
  381. }
  382. int dss_mgr_wait_for_go_ovl(struct omap_overlay *ovl)
  383. {
  384. unsigned long timeout = msecs_to_jiffies(500);
  385. struct ovl_priv_data *op;
  386. struct mgr_priv_data *mp;
  387. u32 irq;
  388. unsigned long flags;
  389. int r;
  390. int i;
  391. if (!ovl->manager)
  392. return 0;
  393. mp = get_mgr_priv(ovl->manager);
  394. spin_lock_irqsave(&data_lock, flags);
  395. if (ovl_manual_update(ovl)) {
  396. spin_unlock_irqrestore(&data_lock, flags);
  397. return 0;
  398. }
  399. if (!mp->enabled) {
  400. spin_unlock_irqrestore(&data_lock, flags);
  401. return 0;
  402. }
  403. spin_unlock_irqrestore(&data_lock, flags);
  404. r = dispc_runtime_get();
  405. if (r)
  406. return r;
  407. irq = dispc_mgr_get_vsync_irq(ovl->manager->id);
  408. op = get_ovl_priv(ovl);
  409. i = 0;
  410. while (1) {
  411. bool shadow_dirty, dirty;
  412. spin_lock_irqsave(&data_lock, flags);
  413. dirty = op->info_dirty;
  414. shadow_dirty = op->shadow_info_dirty;
  415. spin_unlock_irqrestore(&data_lock, flags);
  416. if (!dirty && !shadow_dirty) {
  417. r = 0;
  418. break;
  419. }
  420. /* 4 iterations is the worst case:
  421. * 1 - initial iteration, dirty = true (between VFP and VSYNC)
  422. * 2 - first VSYNC, dirty = true
  423. * 3 - dirty = false, shadow_dirty = true
  424. * 4 - shadow_dirty = false */
  425. if (i++ == 3) {
  426. DSSERR("ovl(%d)->wait_for_go() not finishing\n",
  427. ovl->id);
  428. r = 0;
  429. break;
  430. }
  431. r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
  432. if (r == -ERESTARTSYS)
  433. break;
  434. if (r) {
  435. DSSERR("ovl(%d)->wait_for_go() timeout\n", ovl->id);
  436. break;
  437. }
  438. }
  439. dispc_runtime_put();
  440. return r;
  441. }
  442. static void dss_ovl_write_regs(struct omap_overlay *ovl)
  443. {
  444. struct ovl_priv_data *op = get_ovl_priv(ovl);
  445. struct omap_overlay_info *oi;
  446. bool replication;
  447. struct mgr_priv_data *mp;
  448. int r;
  449. DSSDBG("writing ovl %d regs", ovl->id);
  450. if (!op->enabled || !op->info_dirty)
  451. return;
  452. oi = &op->info;
  453. mp = get_mgr_priv(ovl->manager);
  454. replication = dss_ovl_use_replication(mp->lcd_config, oi->color_mode);
  455. r = dispc_ovl_setup(ovl->id, oi, replication, &mp->timings, false);
  456. if (r) {
  457. /*
  458. * We can't do much here, as this function can be called from
  459. * vsync interrupt.
  460. */
  461. DSSERR("dispc_ovl_setup failed for ovl %d\n", ovl->id);
  462. /* This will leave fifo configurations in a nonoptimal state */
  463. op->enabled = false;
  464. dispc_ovl_enable(ovl->id, false);
  465. return;
  466. }
  467. op->info_dirty = false;
  468. if (mp->updating)
  469. op->shadow_info_dirty = true;
  470. }
  471. static void dss_ovl_write_regs_extra(struct omap_overlay *ovl)
  472. {
  473. struct ovl_priv_data *op = get_ovl_priv(ovl);
  474. struct mgr_priv_data *mp;
  475. DSSDBG("writing ovl %d regs extra", ovl->id);
  476. if (!op->extra_info_dirty)
  477. return;
  478. /* note: write also when op->enabled == false, so that the ovl gets
  479. * disabled */
  480. dispc_ovl_enable(ovl->id, op->enabled);
  481. dispc_ovl_set_fifo_threshold(ovl->id, op->fifo_low, op->fifo_high);
  482. mp = get_mgr_priv(ovl->manager);
  483. op->extra_info_dirty = false;
  484. if (mp->updating)
  485. op->shadow_extra_info_dirty = true;
  486. }
  487. static void dss_mgr_write_regs(struct omap_overlay_manager *mgr)
  488. {
  489. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  490. struct omap_overlay *ovl;
  491. DSSDBG("writing mgr %d regs", mgr->id);
  492. if (!mp->enabled)
  493. return;
  494. WARN_ON(mp->busy);
  495. /* Commit overlay settings */
  496. list_for_each_entry(ovl, &mgr->overlays, list) {
  497. dss_ovl_write_regs(ovl);
  498. dss_ovl_write_regs_extra(ovl);
  499. }
  500. if (mp->info_dirty) {
  501. dispc_mgr_setup(mgr->id, &mp->info);
  502. mp->info_dirty = false;
  503. if (mp->updating)
  504. mp->shadow_info_dirty = true;
  505. }
  506. }
  507. static void dss_mgr_write_regs_extra(struct omap_overlay_manager *mgr)
  508. {
  509. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  510. DSSDBG("writing mgr %d regs extra", mgr->id);
  511. if (!mp->extra_info_dirty)
  512. return;
  513. dispc_mgr_set_timings(mgr->id, &mp->timings);
  514. /* lcd_config parameters */
  515. if (dss_mgr_is_lcd(mgr->id))
  516. dispc_mgr_set_lcd_config(mgr->id, &mp->lcd_config);
  517. mp->extra_info_dirty = false;
  518. if (mp->updating)
  519. mp->shadow_extra_info_dirty = true;
  520. }
  521. static void dss_write_regs(void)
  522. {
  523. const int num_mgrs = omap_dss_get_num_overlay_managers();
  524. int i;
  525. for (i = 0; i < num_mgrs; ++i) {
  526. struct omap_overlay_manager *mgr;
  527. struct mgr_priv_data *mp;
  528. int r;
  529. mgr = omap_dss_get_overlay_manager(i);
  530. mp = get_mgr_priv(mgr);
  531. if (!mp->enabled || mgr_manual_update(mgr) || mp->busy)
  532. continue;
  533. r = dss_check_settings(mgr);
  534. if (r) {
  535. DSSERR("cannot write registers for manager %s: "
  536. "illegal configuration\n", mgr->name);
  537. continue;
  538. }
  539. dss_mgr_write_regs(mgr);
  540. dss_mgr_write_regs_extra(mgr);
  541. }
  542. }
  543. static void dss_set_go_bits(void)
  544. {
  545. const int num_mgrs = omap_dss_get_num_overlay_managers();
  546. int i;
  547. for (i = 0; i < num_mgrs; ++i) {
  548. struct omap_overlay_manager *mgr;
  549. struct mgr_priv_data *mp;
  550. mgr = omap_dss_get_overlay_manager(i);
  551. mp = get_mgr_priv(mgr);
  552. if (!mp->enabled || mgr_manual_update(mgr) || mp->busy)
  553. continue;
  554. if (!need_go(mgr))
  555. continue;
  556. mp->busy = true;
  557. if (!dss_data.irq_enabled && need_isr())
  558. dss_register_vsync_isr();
  559. dispc_mgr_go(mgr->id);
  560. }
  561. }
  562. static void mgr_clear_shadow_dirty(struct omap_overlay_manager *mgr)
  563. {
  564. struct omap_overlay *ovl;
  565. struct mgr_priv_data *mp;
  566. struct ovl_priv_data *op;
  567. mp = get_mgr_priv(mgr);
  568. mp->shadow_info_dirty = false;
  569. mp->shadow_extra_info_dirty = false;
  570. list_for_each_entry(ovl, &mgr->overlays, list) {
  571. op = get_ovl_priv(ovl);
  572. op->shadow_info_dirty = false;
  573. op->shadow_extra_info_dirty = false;
  574. }
  575. }
  576. void dss_mgr_start_update(struct omap_overlay_manager *mgr)
  577. {
  578. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  579. unsigned long flags;
  580. int r;
  581. spin_lock_irqsave(&data_lock, flags);
  582. WARN_ON(mp->updating);
  583. r = dss_check_settings(mgr);
  584. if (r) {
  585. DSSERR("cannot start manual update: illegal configuration\n");
  586. spin_unlock_irqrestore(&data_lock, flags);
  587. return;
  588. }
  589. dss_mgr_write_regs(mgr);
  590. dss_mgr_write_regs_extra(mgr);
  591. mp->updating = true;
  592. if (!dss_data.irq_enabled && need_isr())
  593. dss_register_vsync_isr();
  594. dispc_mgr_enable_sync(mgr->id);
  595. mgr_clear_shadow_dirty(mgr);
  596. spin_unlock_irqrestore(&data_lock, flags);
  597. }
  598. static void dss_apply_irq_handler(void *data, u32 mask);
  599. static void dss_register_vsync_isr(void)
  600. {
  601. const int num_mgrs = dss_feat_get_num_mgrs();
  602. u32 mask;
  603. int r, i;
  604. mask = 0;
  605. for (i = 0; i < num_mgrs; ++i)
  606. mask |= dispc_mgr_get_vsync_irq(i);
  607. for (i = 0; i < num_mgrs; ++i)
  608. mask |= dispc_mgr_get_framedone_irq(i);
  609. r = omap_dispc_register_isr(dss_apply_irq_handler, NULL, mask);
  610. WARN_ON(r);
  611. dss_data.irq_enabled = true;
  612. }
  613. static void dss_unregister_vsync_isr(void)
  614. {
  615. const int num_mgrs = dss_feat_get_num_mgrs();
  616. u32 mask;
  617. int r, i;
  618. mask = 0;
  619. for (i = 0; i < num_mgrs; ++i)
  620. mask |= dispc_mgr_get_vsync_irq(i);
  621. for (i = 0; i < num_mgrs; ++i)
  622. mask |= dispc_mgr_get_framedone_irq(i);
  623. r = omap_dispc_unregister_isr(dss_apply_irq_handler, NULL, mask);
  624. WARN_ON(r);
  625. dss_data.irq_enabled = false;
  626. }
  627. static void dss_apply_irq_handler(void *data, u32 mask)
  628. {
  629. const int num_mgrs = dss_feat_get_num_mgrs();
  630. int i;
  631. bool extra_updating;
  632. spin_lock(&data_lock);
  633. /* clear busy, updating flags, shadow_dirty flags */
  634. for (i = 0; i < num_mgrs; i++) {
  635. struct omap_overlay_manager *mgr;
  636. struct mgr_priv_data *mp;
  637. mgr = omap_dss_get_overlay_manager(i);
  638. mp = get_mgr_priv(mgr);
  639. if (!mp->enabled)
  640. continue;
  641. mp->updating = dispc_mgr_is_enabled(i);
  642. if (!mgr_manual_update(mgr)) {
  643. bool was_busy = mp->busy;
  644. mp->busy = dispc_mgr_go_busy(i);
  645. if (was_busy && !mp->busy)
  646. mgr_clear_shadow_dirty(mgr);
  647. }
  648. }
  649. dss_write_regs();
  650. dss_set_go_bits();
  651. extra_updating = extra_info_update_ongoing();
  652. if (!extra_updating)
  653. complete_all(&extra_updated_completion);
  654. if (!need_isr())
  655. dss_unregister_vsync_isr();
  656. spin_unlock(&data_lock);
  657. }
  658. static void omap_dss_mgr_apply_ovl(struct omap_overlay *ovl)
  659. {
  660. struct ovl_priv_data *op;
  661. op = get_ovl_priv(ovl);
  662. if (!op->user_info_dirty)
  663. return;
  664. op->user_info_dirty = false;
  665. op->info_dirty = true;
  666. op->info = op->user_info;
  667. }
  668. static void omap_dss_mgr_apply_mgr(struct omap_overlay_manager *mgr)
  669. {
  670. struct mgr_priv_data *mp;
  671. mp = get_mgr_priv(mgr);
  672. if (!mp->user_info_dirty)
  673. return;
  674. mp->user_info_dirty = false;
  675. mp->info_dirty = true;
  676. mp->info = mp->user_info;
  677. }
  678. int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
  679. {
  680. unsigned long flags;
  681. struct omap_overlay *ovl;
  682. int r;
  683. DSSDBG("omap_dss_mgr_apply(%s)\n", mgr->name);
  684. spin_lock_irqsave(&data_lock, flags);
  685. r = dss_check_settings_apply(mgr);
  686. if (r) {
  687. spin_unlock_irqrestore(&data_lock, flags);
  688. DSSERR("failed to apply settings: illegal configuration.\n");
  689. return r;
  690. }
  691. /* Configure overlays */
  692. list_for_each_entry(ovl, &mgr->overlays, list)
  693. omap_dss_mgr_apply_ovl(ovl);
  694. /* Configure manager */
  695. omap_dss_mgr_apply_mgr(mgr);
  696. dss_write_regs();
  697. dss_set_go_bits();
  698. spin_unlock_irqrestore(&data_lock, flags);
  699. return 0;
  700. }
  701. static void dss_apply_ovl_enable(struct omap_overlay *ovl, bool enable)
  702. {
  703. struct ovl_priv_data *op;
  704. op = get_ovl_priv(ovl);
  705. if (op->enabled == enable)
  706. return;
  707. op->enabled = enable;
  708. op->extra_info_dirty = true;
  709. }
  710. static void dss_apply_ovl_fifo_thresholds(struct omap_overlay *ovl,
  711. u32 fifo_low, u32 fifo_high)
  712. {
  713. struct ovl_priv_data *op = get_ovl_priv(ovl);
  714. if (op->fifo_low == fifo_low && op->fifo_high == fifo_high)
  715. return;
  716. op->fifo_low = fifo_low;
  717. op->fifo_high = fifo_high;
  718. op->extra_info_dirty = true;
  719. }
  720. static void dss_ovl_setup_fifo(struct omap_overlay *ovl)
  721. {
  722. struct ovl_priv_data *op = get_ovl_priv(ovl);
  723. u32 fifo_low, fifo_high;
  724. bool use_fifo_merge = false;
  725. if (!op->enabled && !op->enabling)
  726. return;
  727. dispc_ovl_compute_fifo_thresholds(ovl->id, &fifo_low, &fifo_high,
  728. use_fifo_merge, ovl_manual_update(ovl));
  729. dss_apply_ovl_fifo_thresholds(ovl, fifo_low, fifo_high);
  730. }
  731. static void dss_mgr_setup_fifos(struct omap_overlay_manager *mgr)
  732. {
  733. struct omap_overlay *ovl;
  734. struct mgr_priv_data *mp;
  735. mp = get_mgr_priv(mgr);
  736. if (!mp->enabled)
  737. return;
  738. list_for_each_entry(ovl, &mgr->overlays, list)
  739. dss_ovl_setup_fifo(ovl);
  740. }
  741. static void dss_setup_fifos(void)
  742. {
  743. const int num_mgrs = omap_dss_get_num_overlay_managers();
  744. struct omap_overlay_manager *mgr;
  745. int i;
  746. for (i = 0; i < num_mgrs; ++i) {
  747. mgr = omap_dss_get_overlay_manager(i);
  748. dss_mgr_setup_fifos(mgr);
  749. }
  750. }
  751. int dss_mgr_enable(struct omap_overlay_manager *mgr)
  752. {
  753. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  754. unsigned long flags;
  755. int r;
  756. mutex_lock(&apply_lock);
  757. if (mp->enabled)
  758. goto out;
  759. spin_lock_irqsave(&data_lock, flags);
  760. mp->enabled = true;
  761. r = dss_check_settings(mgr);
  762. if (r) {
  763. DSSERR("failed to enable manager %d: check_settings failed\n",
  764. mgr->id);
  765. goto err;
  766. }
  767. dss_setup_fifos();
  768. dss_write_regs();
  769. dss_set_go_bits();
  770. if (!mgr_manual_update(mgr))
  771. mp->updating = true;
  772. if (!dss_data.irq_enabled && need_isr())
  773. dss_register_vsync_isr();
  774. spin_unlock_irqrestore(&data_lock, flags);
  775. if (!mgr_manual_update(mgr))
  776. dispc_mgr_enable_sync(mgr->id);
  777. out:
  778. mutex_unlock(&apply_lock);
  779. return 0;
  780. err:
  781. mp->enabled = false;
  782. spin_unlock_irqrestore(&data_lock, flags);
  783. mutex_unlock(&apply_lock);
  784. return r;
  785. }
  786. void dss_mgr_disable(struct omap_overlay_manager *mgr)
  787. {
  788. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  789. unsigned long flags;
  790. mutex_lock(&apply_lock);
  791. if (!mp->enabled)
  792. goto out;
  793. if (!mgr_manual_update(mgr))
  794. dispc_mgr_disable_sync(mgr->id);
  795. spin_lock_irqsave(&data_lock, flags);
  796. mp->updating = false;
  797. mp->enabled = false;
  798. spin_unlock_irqrestore(&data_lock, flags);
  799. out:
  800. mutex_unlock(&apply_lock);
  801. }
  802. int dss_mgr_set_info(struct omap_overlay_manager *mgr,
  803. struct omap_overlay_manager_info *info)
  804. {
  805. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  806. unsigned long flags;
  807. int r;
  808. r = dss_mgr_simple_check(mgr, info);
  809. if (r)
  810. return r;
  811. spin_lock_irqsave(&data_lock, flags);
  812. mp->user_info = *info;
  813. mp->user_info_dirty = true;
  814. spin_unlock_irqrestore(&data_lock, flags);
  815. return 0;
  816. }
  817. void dss_mgr_get_info(struct omap_overlay_manager *mgr,
  818. struct omap_overlay_manager_info *info)
  819. {
  820. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  821. unsigned long flags;
  822. spin_lock_irqsave(&data_lock, flags);
  823. *info = mp->user_info;
  824. spin_unlock_irqrestore(&data_lock, flags);
  825. }
  826. int dss_mgr_set_output(struct omap_overlay_manager *mgr,
  827. struct omap_dss_output *output)
  828. {
  829. int r;
  830. mutex_lock(&apply_lock);
  831. if (mgr->output) {
  832. DSSERR("manager %s is already connected to an output\n",
  833. mgr->name);
  834. r = -EINVAL;
  835. goto err;
  836. }
  837. if ((mgr->supported_outputs & output->id) == 0) {
  838. DSSERR("output does not support manager %s\n",
  839. mgr->name);
  840. r = -EINVAL;
  841. goto err;
  842. }
  843. output->manager = mgr;
  844. mgr->output = output;
  845. mutex_unlock(&apply_lock);
  846. return 0;
  847. err:
  848. mutex_unlock(&apply_lock);
  849. return r;
  850. }
  851. int dss_mgr_unset_output(struct omap_overlay_manager *mgr)
  852. {
  853. int r;
  854. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  855. unsigned long flags;
  856. mutex_lock(&apply_lock);
  857. if (!mgr->output) {
  858. DSSERR("failed to unset output, output not set\n");
  859. r = -EINVAL;
  860. goto err;
  861. }
  862. spin_lock_irqsave(&data_lock, flags);
  863. if (mp->enabled) {
  864. DSSERR("output can't be unset when manager is enabled\n");
  865. r = -EINVAL;
  866. goto err1;
  867. }
  868. spin_unlock_irqrestore(&data_lock, flags);
  869. mgr->output->manager = NULL;
  870. mgr->output = NULL;
  871. mutex_unlock(&apply_lock);
  872. return 0;
  873. err1:
  874. spin_unlock_irqrestore(&data_lock, flags);
  875. err:
  876. mutex_unlock(&apply_lock);
  877. return r;
  878. }
  879. static void dss_apply_mgr_timings(struct omap_overlay_manager *mgr,
  880. const struct omap_video_timings *timings)
  881. {
  882. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  883. mp->timings = *timings;
  884. mp->extra_info_dirty = true;
  885. }
  886. void dss_mgr_set_timings(struct omap_overlay_manager *mgr,
  887. const struct omap_video_timings *timings)
  888. {
  889. unsigned long flags;
  890. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  891. spin_lock_irqsave(&data_lock, flags);
  892. if (mp->updating) {
  893. DSSERR("cannot set timings for %s: manager needs to be disabled\n",
  894. mgr->name);
  895. goto out;
  896. }
  897. dss_apply_mgr_timings(mgr, timings);
  898. out:
  899. spin_unlock_irqrestore(&data_lock, flags);
  900. }
  901. static void dss_apply_mgr_lcd_config(struct omap_overlay_manager *mgr,
  902. const struct dss_lcd_mgr_config *config)
  903. {
  904. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  905. mp->lcd_config = *config;
  906. mp->extra_info_dirty = true;
  907. }
  908. void dss_mgr_set_lcd_config(struct omap_overlay_manager *mgr,
  909. const struct dss_lcd_mgr_config *config)
  910. {
  911. unsigned long flags;
  912. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  913. spin_lock_irqsave(&data_lock, flags);
  914. if (mp->enabled) {
  915. DSSERR("cannot apply lcd config for %s: manager needs to be disabled\n",
  916. mgr->name);
  917. goto out;
  918. }
  919. dss_apply_mgr_lcd_config(mgr, config);
  920. out:
  921. spin_unlock_irqrestore(&data_lock, flags);
  922. }
  923. int dss_ovl_set_info(struct omap_overlay *ovl,
  924. struct omap_overlay_info *info)
  925. {
  926. struct ovl_priv_data *op = get_ovl_priv(ovl);
  927. unsigned long flags;
  928. int r;
  929. r = dss_ovl_simple_check(ovl, info);
  930. if (r)
  931. return r;
  932. spin_lock_irqsave(&data_lock, flags);
  933. op->user_info = *info;
  934. op->user_info_dirty = true;
  935. spin_unlock_irqrestore(&data_lock, flags);
  936. return 0;
  937. }
  938. void dss_ovl_get_info(struct omap_overlay *ovl,
  939. struct omap_overlay_info *info)
  940. {
  941. struct ovl_priv_data *op = get_ovl_priv(ovl);
  942. unsigned long flags;
  943. spin_lock_irqsave(&data_lock, flags);
  944. *info = op->user_info;
  945. spin_unlock_irqrestore(&data_lock, flags);
  946. }
  947. int dss_ovl_set_manager(struct omap_overlay *ovl,
  948. struct omap_overlay_manager *mgr)
  949. {
  950. struct ovl_priv_data *op = get_ovl_priv(ovl);
  951. unsigned long flags;
  952. int r;
  953. if (!mgr)
  954. return -EINVAL;
  955. mutex_lock(&apply_lock);
  956. if (ovl->manager) {
  957. DSSERR("overlay '%s' already has a manager '%s'\n",
  958. ovl->name, ovl->manager->name);
  959. r = -EINVAL;
  960. goto err;
  961. }
  962. r = dispc_runtime_get();
  963. if (r)
  964. goto err;
  965. spin_lock_irqsave(&data_lock, flags);
  966. if (op->enabled) {
  967. spin_unlock_irqrestore(&data_lock, flags);
  968. DSSERR("overlay has to be disabled to change the manager\n");
  969. r = -EINVAL;
  970. goto err1;
  971. }
  972. dispc_ovl_set_channel_out(ovl->id, mgr->id);
  973. ovl->manager = mgr;
  974. list_add_tail(&ovl->list, &mgr->overlays);
  975. spin_unlock_irqrestore(&data_lock, flags);
  976. dispc_runtime_put();
  977. mutex_unlock(&apply_lock);
  978. return 0;
  979. err1:
  980. dispc_runtime_put();
  981. err:
  982. mutex_unlock(&apply_lock);
  983. return r;
  984. }
  985. int dss_ovl_unset_manager(struct omap_overlay *ovl)
  986. {
  987. struct ovl_priv_data *op = get_ovl_priv(ovl);
  988. unsigned long flags;
  989. int r;
  990. mutex_lock(&apply_lock);
  991. if (!ovl->manager) {
  992. DSSERR("failed to detach overlay: manager not set\n");
  993. r = -EINVAL;
  994. goto err;
  995. }
  996. spin_lock_irqsave(&data_lock, flags);
  997. if (op->enabled) {
  998. spin_unlock_irqrestore(&data_lock, flags);
  999. DSSERR("overlay has to be disabled to unset the manager\n");
  1000. r = -EINVAL;
  1001. goto err;
  1002. }
  1003. spin_unlock_irqrestore(&data_lock, flags);
  1004. /* wait for pending extra_info updates to ensure the ovl is disabled */
  1005. wait_pending_extra_info_updates();
  1006. /*
  1007. * For a manual update display, there is no guarantee that the overlay
  1008. * is really disabled in HW, we may need an extra update from this
  1009. * manager before the configurations can go in. Return an error if the
  1010. * overlay needed an update from the manager.
  1011. *
  1012. * TODO: Instead of returning an error, try to do a dummy manager update
  1013. * here to disable the overlay in hardware. Use the *GATED fields in
  1014. * the DISPC_CONFIG registers to do a dummy update.
  1015. */
  1016. spin_lock_irqsave(&data_lock, flags);
  1017. if (ovl_manual_update(ovl) && op->extra_info_dirty) {
  1018. spin_unlock_irqrestore(&data_lock, flags);
  1019. DSSERR("need an update to change the manager\n");
  1020. r = -EINVAL;
  1021. goto err;
  1022. }
  1023. ovl->manager = NULL;
  1024. list_del(&ovl->list);
  1025. spin_unlock_irqrestore(&data_lock, flags);
  1026. mutex_unlock(&apply_lock);
  1027. return 0;
  1028. err:
  1029. mutex_unlock(&apply_lock);
  1030. return r;
  1031. }
  1032. bool dss_ovl_is_enabled(struct omap_overlay *ovl)
  1033. {
  1034. struct ovl_priv_data *op = get_ovl_priv(ovl);
  1035. unsigned long flags;
  1036. bool e;
  1037. spin_lock_irqsave(&data_lock, flags);
  1038. e = op->enabled;
  1039. spin_unlock_irqrestore(&data_lock, flags);
  1040. return e;
  1041. }
  1042. int dss_ovl_enable(struct omap_overlay *ovl)
  1043. {
  1044. struct ovl_priv_data *op = get_ovl_priv(ovl);
  1045. unsigned long flags;
  1046. int r;
  1047. mutex_lock(&apply_lock);
  1048. if (op->enabled) {
  1049. r = 0;
  1050. goto err1;
  1051. }
  1052. if (ovl->manager == NULL || ovl->manager->output == NULL) {
  1053. r = -EINVAL;
  1054. goto err1;
  1055. }
  1056. spin_lock_irqsave(&data_lock, flags);
  1057. op->enabling = true;
  1058. r = dss_check_settings(ovl->manager);
  1059. if (r) {
  1060. DSSERR("failed to enable overlay %d: check_settings failed\n",
  1061. ovl->id);
  1062. goto err2;
  1063. }
  1064. dss_setup_fifos();
  1065. op->enabling = false;
  1066. dss_apply_ovl_enable(ovl, true);
  1067. dss_write_regs();
  1068. dss_set_go_bits();
  1069. spin_unlock_irqrestore(&data_lock, flags);
  1070. mutex_unlock(&apply_lock);
  1071. return 0;
  1072. err2:
  1073. op->enabling = false;
  1074. spin_unlock_irqrestore(&data_lock, flags);
  1075. err1:
  1076. mutex_unlock(&apply_lock);
  1077. return r;
  1078. }
  1079. int dss_ovl_disable(struct omap_overlay *ovl)
  1080. {
  1081. struct ovl_priv_data *op = get_ovl_priv(ovl);
  1082. unsigned long flags;
  1083. int r;
  1084. mutex_lock(&apply_lock);
  1085. if (!op->enabled) {
  1086. r = 0;
  1087. goto err;
  1088. }
  1089. if (ovl->manager == NULL || ovl->manager->output == NULL) {
  1090. r = -EINVAL;
  1091. goto err;
  1092. }
  1093. spin_lock_irqsave(&data_lock, flags);
  1094. dss_apply_ovl_enable(ovl, false);
  1095. dss_write_regs();
  1096. dss_set_go_bits();
  1097. spin_unlock_irqrestore(&data_lock, flags);
  1098. mutex_unlock(&apply_lock);
  1099. return 0;
  1100. err:
  1101. mutex_unlock(&apply_lock);
  1102. return r;
  1103. }