apply.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646
  1. /*
  2. * Copyright (C) 2011 Texas Instruments
  3. * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #define DSS_SUBSYS_NAME "APPLY"
  18. #include <linux/kernel.h>
  19. #include <linux/slab.h>
  20. #include <linux/spinlock.h>
  21. #include <linux/jiffies.h>
  22. #include <video/omapdss.h>
  23. #include "dss.h"
  24. #include "dss_features.h"
  25. /*
  26. * We have 4 levels of cache for the dispc settings. First two are in SW and
  27. * the latter two in HW.
  28. *
  29. * +--------------------+
  30. * |overlay/manager_info|
  31. * +--------------------+
  32. * v
  33. * apply()
  34. * v
  35. * +--------------------+
  36. * | dss_cache |
  37. * +--------------------+
  38. * v
  39. * configure()
  40. * v
  41. * +--------------------+
  42. * | shadow registers |
  43. * +--------------------+
  44. * v
  45. * VFP or lcd/digit_enable
  46. * v
  47. * +--------------------+
  48. * | registers |
  49. * +--------------------+
  50. */
  51. struct overlay_cache_data {
  52. /* If true, cache changed, but not written to shadow registers. Set
  53. * in apply(), cleared when registers written. */
  54. bool dirty;
  55. /* If true, shadow registers contain changed values not yet in real
  56. * registers. Set when writing to shadow registers, cleared at
  57. * VSYNC/EVSYNC */
  58. bool shadow_dirty;
  59. bool enabled;
  60. struct omap_overlay_info info;
  61. enum omap_channel channel;
  62. u32 fifo_low;
  63. u32 fifo_high;
  64. };
  65. struct manager_cache_data {
  66. /* If true, cache changed, but not written to shadow registers. Set
  67. * in apply(), cleared when registers written. */
  68. bool dirty;
  69. /* If true, shadow registers contain changed values not yet in real
  70. * registers. Set when writing to shadow registers, cleared at
  71. * VSYNC/EVSYNC */
  72. bool shadow_dirty;
  73. struct omap_overlay_manager_info info;
  74. bool manual_update;
  75. bool do_manual_update;
  76. };
  77. static struct {
  78. spinlock_t lock;
  79. struct overlay_cache_data overlay_cache[MAX_DSS_OVERLAYS];
  80. struct manager_cache_data manager_cache[MAX_DSS_MANAGERS];
  81. bool irq_enabled;
  82. } dss_cache;
  83. void dss_apply_init(void)
  84. {
  85. spin_lock_init(&dss_cache.lock);
  86. }
  87. static bool ovl_manual_update(struct omap_overlay *ovl)
  88. {
  89. return ovl->manager->device->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
  90. }
  91. static bool mgr_manual_update(struct omap_overlay_manager *mgr)
  92. {
  93. return mgr->device->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
  94. }
  95. static int overlay_enabled(struct omap_overlay *ovl)
  96. {
  97. return ovl->info.enabled && ovl->manager && ovl->manager->device;
  98. }
  99. int dss_mgr_wait_for_go(struct omap_overlay_manager *mgr)
  100. {
  101. unsigned long timeout = msecs_to_jiffies(500);
  102. struct manager_cache_data *mc;
  103. u32 irq;
  104. int r;
  105. int i;
  106. struct omap_dss_device *dssdev = mgr->device;
  107. if (!dssdev || dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
  108. return 0;
  109. if (mgr_manual_update(mgr))
  110. return 0;
  111. if (dssdev->type == OMAP_DISPLAY_TYPE_VENC
  112. || dssdev->type == OMAP_DISPLAY_TYPE_HDMI) {
  113. irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN;
  114. } else {
  115. irq = (dssdev->manager->id == OMAP_DSS_CHANNEL_LCD) ?
  116. DISPC_IRQ_VSYNC : DISPC_IRQ_VSYNC2;
  117. }
  118. mc = &dss_cache.manager_cache[mgr->id];
  119. i = 0;
  120. while (1) {
  121. unsigned long flags;
  122. bool shadow_dirty, dirty;
  123. spin_lock_irqsave(&dss_cache.lock, flags);
  124. dirty = mc->dirty;
  125. shadow_dirty = mc->shadow_dirty;
  126. spin_unlock_irqrestore(&dss_cache.lock, flags);
  127. if (!dirty && !shadow_dirty) {
  128. r = 0;
  129. break;
  130. }
  131. /* 4 iterations is the worst case:
  132. * 1 - initial iteration, dirty = true (between VFP and VSYNC)
  133. * 2 - first VSYNC, dirty = true
  134. * 3 - dirty = false, shadow_dirty = true
  135. * 4 - shadow_dirty = false */
  136. if (i++ == 3) {
  137. DSSERR("mgr(%d)->wait_for_go() not finishing\n",
  138. mgr->id);
  139. r = 0;
  140. break;
  141. }
  142. r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
  143. if (r == -ERESTARTSYS)
  144. break;
  145. if (r) {
  146. DSSERR("mgr(%d)->wait_for_go() timeout\n", mgr->id);
  147. break;
  148. }
  149. }
  150. return r;
  151. }
  152. int dss_mgr_wait_for_go_ovl(struct omap_overlay *ovl)
  153. {
  154. unsigned long timeout = msecs_to_jiffies(500);
  155. struct overlay_cache_data *oc;
  156. struct omap_dss_device *dssdev;
  157. u32 irq;
  158. int r;
  159. int i;
  160. if (!ovl->manager)
  161. return 0;
  162. dssdev = ovl->manager->device;
  163. if (!dssdev || dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
  164. return 0;
  165. if (ovl_manual_update(ovl))
  166. return 0;
  167. if (dssdev->type == OMAP_DISPLAY_TYPE_VENC
  168. || dssdev->type == OMAP_DISPLAY_TYPE_HDMI) {
  169. irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN;
  170. } else {
  171. irq = (dssdev->manager->id == OMAP_DSS_CHANNEL_LCD) ?
  172. DISPC_IRQ_VSYNC : DISPC_IRQ_VSYNC2;
  173. }
  174. oc = &dss_cache.overlay_cache[ovl->id];
  175. i = 0;
  176. while (1) {
  177. unsigned long flags;
  178. bool shadow_dirty, dirty;
  179. spin_lock_irqsave(&dss_cache.lock, flags);
  180. dirty = oc->dirty;
  181. shadow_dirty = oc->shadow_dirty;
  182. spin_unlock_irqrestore(&dss_cache.lock, flags);
  183. if (!dirty && !shadow_dirty) {
  184. r = 0;
  185. break;
  186. }
  187. /* 4 iterations is the worst case:
  188. * 1 - initial iteration, dirty = true (between VFP and VSYNC)
  189. * 2 - first VSYNC, dirty = true
  190. * 3 - dirty = false, shadow_dirty = true
  191. * 4 - shadow_dirty = false */
  192. if (i++ == 3) {
  193. DSSERR("ovl(%d)->wait_for_go() not finishing\n",
  194. ovl->id);
  195. r = 0;
  196. break;
  197. }
  198. r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
  199. if (r == -ERESTARTSYS)
  200. break;
  201. if (r) {
  202. DSSERR("ovl(%d)->wait_for_go() timeout\n", ovl->id);
  203. break;
  204. }
  205. }
  206. return r;
  207. }
  208. static int configure_overlay(enum omap_plane plane)
  209. {
  210. struct omap_overlay *ovl;
  211. struct overlay_cache_data *c;
  212. struct omap_overlay_info *oi;
  213. bool ilace, replication;
  214. int r;
  215. DSSDBGF("%d", plane);
  216. c = &dss_cache.overlay_cache[plane];
  217. oi = &c->info;
  218. if (!c->enabled) {
  219. dispc_ovl_enable(plane, 0);
  220. return 0;
  221. }
  222. ovl = omap_dss_get_overlay(plane);
  223. replication = dss_use_replication(ovl->manager->device, oi->color_mode);
  224. ilace = ovl->manager->device->type == OMAP_DISPLAY_TYPE_VENC;
  225. dispc_ovl_set_channel_out(plane, c->channel);
  226. r = dispc_ovl_setup(plane, oi, ilace, replication);
  227. if (r) {
  228. /* this shouldn't happen */
  229. DSSERR("dispc_ovl_setup failed for ovl %d\n", plane);
  230. dispc_ovl_enable(plane, 0);
  231. return r;
  232. }
  233. dispc_ovl_set_fifo_threshold(plane, c->fifo_low, c->fifo_high);
  234. dispc_ovl_enable(plane, 1);
  235. return 0;
  236. }
  237. static void configure_manager(enum omap_channel channel)
  238. {
  239. struct omap_overlay_manager_info *mi;
  240. DSSDBGF("%d", channel);
  241. /* picking info from the cache */
  242. mi = &dss_cache.manager_cache[channel].info;
  243. dispc_mgr_setup(channel, mi);
  244. }
  245. /* configure_dispc() tries to write values from cache to shadow registers.
  246. * It writes only to those managers/overlays that are not busy.
  247. * returns 0 if everything could be written to shadow registers.
  248. * returns 1 if not everything could be written to shadow registers. */
  249. static int configure_dispc(void)
  250. {
  251. struct overlay_cache_data *oc;
  252. struct manager_cache_data *mc;
  253. const int num_ovls = dss_feat_get_num_ovls();
  254. const int num_mgrs = dss_feat_get_num_mgrs();
  255. int i;
  256. int r;
  257. bool mgr_busy[MAX_DSS_MANAGERS];
  258. bool mgr_go[MAX_DSS_MANAGERS];
  259. bool busy;
  260. r = 0;
  261. busy = false;
  262. for (i = 0; i < num_mgrs; i++) {
  263. mgr_busy[i] = dispc_mgr_go_busy(i);
  264. mgr_go[i] = false;
  265. }
  266. /* Commit overlay settings */
  267. for (i = 0; i < num_ovls; ++i) {
  268. oc = &dss_cache.overlay_cache[i];
  269. mc = &dss_cache.manager_cache[oc->channel];
  270. if (!oc->dirty)
  271. continue;
  272. if (mc->manual_update && !mc->do_manual_update)
  273. continue;
  274. if (mgr_busy[oc->channel]) {
  275. busy = true;
  276. continue;
  277. }
  278. r = configure_overlay(i);
  279. if (r)
  280. DSSERR("configure_overlay %d failed\n", i);
  281. oc->dirty = false;
  282. oc->shadow_dirty = true;
  283. mgr_go[oc->channel] = true;
  284. }
  285. /* Commit manager settings */
  286. for (i = 0; i < num_mgrs; ++i) {
  287. mc = &dss_cache.manager_cache[i];
  288. if (!mc->dirty)
  289. continue;
  290. if (mc->manual_update && !mc->do_manual_update)
  291. continue;
  292. if (mgr_busy[i]) {
  293. busy = true;
  294. continue;
  295. }
  296. configure_manager(i);
  297. mc->dirty = false;
  298. mc->shadow_dirty = true;
  299. mgr_go[i] = true;
  300. }
  301. /* set GO */
  302. for (i = 0; i < num_mgrs; ++i) {
  303. mc = &dss_cache.manager_cache[i];
  304. if (!mgr_go[i])
  305. continue;
  306. /* We don't need GO with manual update display. LCD iface will
  307. * always be turned off after frame, and new settings will be
  308. * taken in to use at next update */
  309. if (!mc->manual_update)
  310. dispc_mgr_go(i);
  311. }
  312. if (busy)
  313. r = 1;
  314. else
  315. r = 0;
  316. return r;
  317. }
  318. void dss_mgr_start_update(struct omap_overlay_manager *mgr)
  319. {
  320. struct manager_cache_data *mc;
  321. struct overlay_cache_data *oc;
  322. struct omap_overlay *ovl;
  323. mc = &dss_cache.manager_cache[mgr->id];
  324. mc->do_manual_update = true;
  325. configure_dispc();
  326. mc->do_manual_update = false;
  327. list_for_each_entry(ovl, &mgr->overlays, list) {
  328. oc = &dss_cache.overlay_cache[ovl->id];
  329. oc->shadow_dirty = false;
  330. }
  331. mc = &dss_cache.manager_cache[mgr->id];
  332. mc->shadow_dirty = false;
  333. dispc_mgr_enable(mgr->id, true);
  334. }
  335. static void dss_apply_irq_handler(void *data, u32 mask)
  336. {
  337. struct manager_cache_data *mc;
  338. struct overlay_cache_data *oc;
  339. const int num_ovls = dss_feat_get_num_ovls();
  340. const int num_mgrs = dss_feat_get_num_mgrs();
  341. int i, r;
  342. bool mgr_busy[MAX_DSS_MANAGERS];
  343. u32 irq_mask;
  344. for (i = 0; i < num_mgrs; i++)
  345. mgr_busy[i] = dispc_mgr_go_busy(i);
  346. spin_lock(&dss_cache.lock);
  347. for (i = 0; i < num_ovls; ++i) {
  348. oc = &dss_cache.overlay_cache[i];
  349. if (!mgr_busy[oc->channel])
  350. oc->shadow_dirty = false;
  351. }
  352. for (i = 0; i < num_mgrs; ++i) {
  353. mc = &dss_cache.manager_cache[i];
  354. if (!mgr_busy[i])
  355. mc->shadow_dirty = false;
  356. }
  357. r = configure_dispc();
  358. if (r == 1)
  359. goto end;
  360. /* re-read busy flags */
  361. for (i = 0; i < num_mgrs; i++)
  362. mgr_busy[i] = dispc_mgr_go_busy(i);
  363. /* keep running as long as there are busy managers, so that
  364. * we can collect overlay-applied information */
  365. for (i = 0; i < num_mgrs; ++i) {
  366. if (mgr_busy[i])
  367. goto end;
  368. }
  369. irq_mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_ODD |
  370. DISPC_IRQ_EVSYNC_EVEN;
  371. if (dss_has_feature(FEAT_MGR_LCD2))
  372. irq_mask |= DISPC_IRQ_VSYNC2;
  373. omap_dispc_unregister_isr(dss_apply_irq_handler, NULL, irq_mask);
  374. dss_cache.irq_enabled = false;
  375. end:
  376. spin_unlock(&dss_cache.lock);
  377. }
  378. static int omap_dss_mgr_apply_ovl(struct omap_overlay *ovl)
  379. {
  380. struct overlay_cache_data *oc;
  381. struct omap_dss_device *dssdev;
  382. oc = &dss_cache.overlay_cache[ovl->id];
  383. if (ovl->manager_changed) {
  384. ovl->manager_changed = false;
  385. ovl->info_dirty = true;
  386. }
  387. if (!overlay_enabled(ovl)) {
  388. if (oc->enabled) {
  389. oc->enabled = false;
  390. oc->dirty = true;
  391. }
  392. return 0;
  393. }
  394. if (!ovl->info_dirty)
  395. return 0;
  396. dssdev = ovl->manager->device;
  397. if (dss_check_overlay(ovl, dssdev)) {
  398. if (oc->enabled) {
  399. oc->enabled = false;
  400. oc->dirty = true;
  401. }
  402. return -EINVAL;
  403. }
  404. ovl->info_dirty = false;
  405. oc->dirty = true;
  406. oc->info = ovl->info;
  407. oc->channel = ovl->manager->id;
  408. oc->enabled = true;
  409. return 0;
  410. }
  411. static void omap_dss_mgr_apply_mgr(struct omap_overlay_manager *mgr)
  412. {
  413. struct manager_cache_data *mc;
  414. mc = &dss_cache.manager_cache[mgr->id];
  415. if (mgr->device_changed) {
  416. mgr->device_changed = false;
  417. mgr->info_dirty = true;
  418. }
  419. if (!mgr->info_dirty)
  420. return;
  421. if (!mgr->device)
  422. return;
  423. mgr->info_dirty = false;
  424. mc->dirty = true;
  425. mc->info = mgr->info;
  426. mc->manual_update = mgr_manual_update(mgr);
  427. }
  428. static void omap_dss_mgr_apply_ovl_fifos(struct omap_overlay *ovl)
  429. {
  430. struct overlay_cache_data *oc;
  431. struct omap_dss_device *dssdev;
  432. u32 size, burst_size;
  433. oc = &dss_cache.overlay_cache[ovl->id];
  434. if (!oc->enabled)
  435. return;
  436. dssdev = ovl->manager->device;
  437. size = dispc_ovl_get_fifo_size(ovl->id);
  438. burst_size = dispc_ovl_get_burst_size(ovl->id);
  439. switch (dssdev->type) {
  440. case OMAP_DISPLAY_TYPE_DPI:
  441. case OMAP_DISPLAY_TYPE_DBI:
  442. case OMAP_DISPLAY_TYPE_SDI:
  443. case OMAP_DISPLAY_TYPE_VENC:
  444. case OMAP_DISPLAY_TYPE_HDMI:
  445. default_get_overlay_fifo_thresholds(ovl->id, size,
  446. burst_size, &oc->fifo_low,
  447. &oc->fifo_high);
  448. break;
  449. #ifdef CONFIG_OMAP2_DSS_DSI
  450. case OMAP_DISPLAY_TYPE_DSI:
  451. dsi_get_overlay_fifo_thresholds(ovl->id, size,
  452. burst_size, &oc->fifo_low,
  453. &oc->fifo_high);
  454. break;
  455. #endif
  456. default:
  457. BUG();
  458. }
  459. }
  460. int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
  461. {
  462. int r;
  463. unsigned long flags;
  464. struct omap_overlay *ovl;
  465. DSSDBG("omap_dss_mgr_apply(%s)\n", mgr->name);
  466. r = dispc_runtime_get();
  467. if (r)
  468. return r;
  469. spin_lock_irqsave(&dss_cache.lock, flags);
  470. /* Configure overlays */
  471. list_for_each_entry(ovl, &mgr->overlays, list)
  472. omap_dss_mgr_apply_ovl(ovl);
  473. /* Configure manager */
  474. omap_dss_mgr_apply_mgr(mgr);
  475. /* Configure overlay fifos */
  476. list_for_each_entry(ovl, &mgr->overlays, list)
  477. omap_dss_mgr_apply_ovl_fifos(ovl);
  478. r = 0;
  479. if (mgr->enabled && !mgr_manual_update(mgr)) {
  480. if (!dss_cache.irq_enabled) {
  481. u32 mask;
  482. mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_ODD |
  483. DISPC_IRQ_EVSYNC_EVEN;
  484. if (dss_has_feature(FEAT_MGR_LCD2))
  485. mask |= DISPC_IRQ_VSYNC2;
  486. r = omap_dispc_register_isr(dss_apply_irq_handler,
  487. NULL, mask);
  488. if (r)
  489. DSSERR("failed to register apply isr\n");
  490. dss_cache.irq_enabled = true;
  491. }
  492. configure_dispc();
  493. }
  494. spin_unlock_irqrestore(&dss_cache.lock, flags);
  495. dispc_runtime_put();
  496. return r;
  497. }
  498. void dss_mgr_enable(struct omap_overlay_manager *mgr)
  499. {
  500. dispc_mgr_enable(mgr->id, true);
  501. mgr->enabled = true;
  502. }
  503. void dss_mgr_disable(struct omap_overlay_manager *mgr)
  504. {
  505. dispc_mgr_enable(mgr->id, false);
  506. mgr->enabled = false;
  507. }