apply.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665
  1. /*
  2. * Copyright (C) 2011 Texas Instruments
  3. * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #define DSS_SUBSYS_NAME "APPLY"
  18. #include <linux/kernel.h>
  19. #include <linux/slab.h>
  20. #include <linux/spinlock.h>
  21. #include <linux/jiffies.h>
  22. #include <video/omapdss.h>
  23. #include "dss.h"
  24. #include "dss_features.h"
  25. /*
  26. * We have 4 levels of cache for the dispc settings. First two are in SW and
  27. * the latter two in HW.
  28. *
  29. * +--------------------+
  30. * |overlay/manager_info|
  31. * +--------------------+
  32. * v
  33. * apply()
  34. * v
  35. * +--------------------+
  36. * | dss_cache |
  37. * +--------------------+
  38. * v
  39. * configure()
  40. * v
  41. * +--------------------+
  42. * | shadow registers |
  43. * +--------------------+
  44. * v
  45. * VFP or lcd/digit_enable
  46. * v
  47. * +--------------------+
  48. * | registers |
  49. * +--------------------+
  50. */
  51. struct overlay_cache_data {
  52. /* If true, cache changed, but not written to shadow registers. Set
  53. * in apply(), cleared when registers written. */
  54. bool dirty;
  55. /* If true, shadow registers contain changed values not yet in real
  56. * registers. Set when writing to shadow registers, cleared at
  57. * VSYNC/EVSYNC */
  58. bool shadow_dirty;
  59. bool enabled;
  60. struct omap_overlay_info info;
  61. enum omap_channel channel;
  62. u32 fifo_low;
  63. u32 fifo_high;
  64. };
  65. struct manager_cache_data {
  66. /* If true, cache changed, but not written to shadow registers. Set
  67. * in apply(), cleared when registers written. */
  68. bool dirty;
  69. /* If true, shadow registers contain changed values not yet in real
  70. * registers. Set when writing to shadow registers, cleared at
  71. * VSYNC/EVSYNC */
  72. bool shadow_dirty;
  73. struct omap_overlay_manager_info info;
  74. bool manual_update;
  75. bool do_manual_update;
  76. };
  77. static struct {
  78. spinlock_t lock;
  79. struct overlay_cache_data overlay_cache[MAX_DSS_OVERLAYS];
  80. struct manager_cache_data manager_cache[MAX_DSS_MANAGERS];
  81. bool irq_enabled;
  82. } dss_cache;
  83. void dss_apply_init(void)
  84. {
  85. spin_lock_init(&dss_cache.lock);
  86. }
  87. static bool ovl_manual_update(struct omap_overlay *ovl)
  88. {
  89. return ovl->manager->device->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
  90. }
  91. static bool mgr_manual_update(struct omap_overlay_manager *mgr)
  92. {
  93. return mgr->device->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
  94. }
  95. static int overlay_enabled(struct omap_overlay *ovl)
  96. {
  97. return ovl->info.enabled && ovl->manager && ovl->manager->device;
  98. }
  99. int dss_mgr_wait_for_go(struct omap_overlay_manager *mgr)
  100. {
  101. unsigned long timeout = msecs_to_jiffies(500);
  102. struct manager_cache_data *mc;
  103. u32 irq;
  104. int r;
  105. int i;
  106. struct omap_dss_device *dssdev = mgr->device;
  107. if (!dssdev || dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
  108. return 0;
  109. if (mgr_manual_update(mgr))
  110. return 0;
  111. if (dssdev->type == OMAP_DISPLAY_TYPE_VENC
  112. || dssdev->type == OMAP_DISPLAY_TYPE_HDMI) {
  113. irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN;
  114. } else {
  115. irq = (dssdev->manager->id == OMAP_DSS_CHANNEL_LCD) ?
  116. DISPC_IRQ_VSYNC : DISPC_IRQ_VSYNC2;
  117. }
  118. mc = &dss_cache.manager_cache[mgr->id];
  119. i = 0;
  120. while (1) {
  121. unsigned long flags;
  122. bool shadow_dirty, dirty;
  123. spin_lock_irqsave(&dss_cache.lock, flags);
  124. dirty = mc->dirty;
  125. shadow_dirty = mc->shadow_dirty;
  126. spin_unlock_irqrestore(&dss_cache.lock, flags);
  127. if (!dirty && !shadow_dirty) {
  128. r = 0;
  129. break;
  130. }
  131. /* 4 iterations is the worst case:
  132. * 1 - initial iteration, dirty = true (between VFP and VSYNC)
  133. * 2 - first VSYNC, dirty = true
  134. * 3 - dirty = false, shadow_dirty = true
  135. * 4 - shadow_dirty = false */
  136. if (i++ == 3) {
  137. DSSERR("mgr(%d)->wait_for_go() not finishing\n",
  138. mgr->id);
  139. r = 0;
  140. break;
  141. }
  142. r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
  143. if (r == -ERESTARTSYS)
  144. break;
  145. if (r) {
  146. DSSERR("mgr(%d)->wait_for_go() timeout\n", mgr->id);
  147. break;
  148. }
  149. }
  150. return r;
  151. }
  152. int dss_mgr_wait_for_go_ovl(struct omap_overlay *ovl)
  153. {
  154. unsigned long timeout = msecs_to_jiffies(500);
  155. struct overlay_cache_data *oc;
  156. struct omap_dss_device *dssdev;
  157. u32 irq;
  158. int r;
  159. int i;
  160. if (!ovl->manager)
  161. return 0;
  162. dssdev = ovl->manager->device;
  163. if (!dssdev || dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
  164. return 0;
  165. if (ovl_manual_update(ovl))
  166. return 0;
  167. if (dssdev->type == OMAP_DISPLAY_TYPE_VENC
  168. || dssdev->type == OMAP_DISPLAY_TYPE_HDMI) {
  169. irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN;
  170. } else {
  171. irq = (dssdev->manager->id == OMAP_DSS_CHANNEL_LCD) ?
  172. DISPC_IRQ_VSYNC : DISPC_IRQ_VSYNC2;
  173. }
  174. oc = &dss_cache.overlay_cache[ovl->id];
  175. i = 0;
  176. while (1) {
  177. unsigned long flags;
  178. bool shadow_dirty, dirty;
  179. spin_lock_irqsave(&dss_cache.lock, flags);
  180. dirty = oc->dirty;
  181. shadow_dirty = oc->shadow_dirty;
  182. spin_unlock_irqrestore(&dss_cache.lock, flags);
  183. if (!dirty && !shadow_dirty) {
  184. r = 0;
  185. break;
  186. }
  187. /* 4 iterations is the worst case:
  188. * 1 - initial iteration, dirty = true (between VFP and VSYNC)
  189. * 2 - first VSYNC, dirty = true
  190. * 3 - dirty = false, shadow_dirty = true
  191. * 4 - shadow_dirty = false */
  192. if (i++ == 3) {
  193. DSSERR("ovl(%d)->wait_for_go() not finishing\n",
  194. ovl->id);
  195. r = 0;
  196. break;
  197. }
  198. r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
  199. if (r == -ERESTARTSYS)
  200. break;
  201. if (r) {
  202. DSSERR("ovl(%d)->wait_for_go() timeout\n", ovl->id);
  203. break;
  204. }
  205. }
  206. return r;
  207. }
  208. static int configure_overlay(enum omap_plane plane)
  209. {
  210. struct omap_overlay *ovl;
  211. struct overlay_cache_data *c;
  212. struct omap_overlay_info *oi;
  213. bool ilace, replication;
  214. int r;
  215. DSSDBGF("%d", plane);
  216. c = &dss_cache.overlay_cache[plane];
  217. oi = &c->info;
  218. if (!c->enabled) {
  219. dispc_ovl_enable(plane, 0);
  220. return 0;
  221. }
  222. ovl = omap_dss_get_overlay(plane);
  223. replication = dss_use_replication(ovl->manager->device, oi->color_mode);
  224. ilace = ovl->manager->device->type == OMAP_DISPLAY_TYPE_VENC;
  225. dispc_ovl_set_channel_out(plane, c->channel);
  226. r = dispc_ovl_setup(plane, oi, ilace, replication);
  227. if (r) {
  228. /* this shouldn't happen */
  229. DSSERR("dispc_ovl_setup failed for ovl %d\n", plane);
  230. dispc_ovl_enable(plane, 0);
  231. return r;
  232. }
  233. dispc_ovl_set_fifo_threshold(plane, c->fifo_low, c->fifo_high);
  234. dispc_ovl_enable(plane, 1);
  235. return 0;
  236. }
  237. static void configure_manager(enum omap_channel channel)
  238. {
  239. struct omap_overlay_manager_info *mi;
  240. DSSDBGF("%d", channel);
  241. /* picking info from the cache */
  242. mi = &dss_cache.manager_cache[channel].info;
  243. dispc_mgr_setup(channel, mi);
  244. }
  245. /* configure_dispc() tries to write values from cache to shadow registers.
  246. * It writes only to those managers/overlays that are not busy.
  247. * returns 0 if everything could be written to shadow registers.
  248. * returns 1 if not everything could be written to shadow registers. */
  249. static int configure_dispc(void)
  250. {
  251. struct overlay_cache_data *oc;
  252. struct manager_cache_data *mc;
  253. const int num_ovls = dss_feat_get_num_ovls();
  254. const int num_mgrs = dss_feat_get_num_mgrs();
  255. int i;
  256. int r;
  257. bool mgr_busy[MAX_DSS_MANAGERS];
  258. bool mgr_go[MAX_DSS_MANAGERS];
  259. bool busy;
  260. r = 0;
  261. busy = false;
  262. for (i = 0; i < num_mgrs; i++) {
  263. mgr_busy[i] = dispc_mgr_go_busy(i);
  264. mgr_go[i] = false;
  265. }
  266. /* Commit overlay settings */
  267. for (i = 0; i < num_ovls; ++i) {
  268. oc = &dss_cache.overlay_cache[i];
  269. mc = &dss_cache.manager_cache[oc->channel];
  270. if (!oc->dirty)
  271. continue;
  272. if (mc->manual_update && !mc->do_manual_update)
  273. continue;
  274. if (mgr_busy[oc->channel]) {
  275. busy = true;
  276. continue;
  277. }
  278. r = configure_overlay(i);
  279. if (r)
  280. DSSERR("configure_overlay %d failed\n", i);
  281. oc->dirty = false;
  282. oc->shadow_dirty = true;
  283. mgr_go[oc->channel] = true;
  284. }
  285. /* Commit manager settings */
  286. for (i = 0; i < num_mgrs; ++i) {
  287. mc = &dss_cache.manager_cache[i];
  288. if (!mc->dirty)
  289. continue;
  290. if (mc->manual_update && !mc->do_manual_update)
  291. continue;
  292. if (mgr_busy[i]) {
  293. busy = true;
  294. continue;
  295. }
  296. configure_manager(i);
  297. mc->dirty = false;
  298. mc->shadow_dirty = true;
  299. mgr_go[i] = true;
  300. }
  301. /* set GO */
  302. for (i = 0; i < num_mgrs; ++i) {
  303. mc = &dss_cache.manager_cache[i];
  304. if (!mgr_go[i])
  305. continue;
  306. /* We don't need GO with manual update display. LCD iface will
  307. * always be turned off after frame, and new settings will be
  308. * taken in to use at next update */
  309. if (!mc->manual_update)
  310. dispc_mgr_go(i);
  311. }
  312. if (busy)
  313. r = 1;
  314. else
  315. r = 0;
  316. return r;
  317. }
  318. void dss_mgr_start_update(struct omap_overlay_manager *mgr)
  319. {
  320. struct manager_cache_data *mc;
  321. struct overlay_cache_data *oc;
  322. const int num_ovls = dss_feat_get_num_ovls();
  323. int i;
  324. mc = &dss_cache.manager_cache[mgr->id];
  325. mc->do_manual_update = true;
  326. configure_dispc();
  327. mc->do_manual_update = false;
  328. for (i = 0; i < num_ovls; ++i) {
  329. oc = &dss_cache.overlay_cache[i];
  330. if (oc->channel != mgr->id)
  331. continue;
  332. oc->shadow_dirty = false;
  333. }
  334. mc = &dss_cache.manager_cache[mgr->id];
  335. mc->shadow_dirty = false;
  336. dispc_mgr_enable(mgr->id, true);
  337. }
  338. static void dss_apply_irq_handler(void *data, u32 mask)
  339. {
  340. struct manager_cache_data *mc;
  341. struct overlay_cache_data *oc;
  342. const int num_ovls = dss_feat_get_num_ovls();
  343. const int num_mgrs = dss_feat_get_num_mgrs();
  344. int i, r;
  345. bool mgr_busy[MAX_DSS_MANAGERS];
  346. u32 irq_mask;
  347. for (i = 0; i < num_mgrs; i++)
  348. mgr_busy[i] = dispc_mgr_go_busy(i);
  349. spin_lock(&dss_cache.lock);
  350. for (i = 0; i < num_ovls; ++i) {
  351. oc = &dss_cache.overlay_cache[i];
  352. if (!mgr_busy[oc->channel])
  353. oc->shadow_dirty = false;
  354. }
  355. for (i = 0; i < num_mgrs; ++i) {
  356. mc = &dss_cache.manager_cache[i];
  357. if (!mgr_busy[i])
  358. mc->shadow_dirty = false;
  359. }
  360. r = configure_dispc();
  361. if (r == 1)
  362. goto end;
  363. /* re-read busy flags */
  364. for (i = 0; i < num_mgrs; i++)
  365. mgr_busy[i] = dispc_mgr_go_busy(i);
  366. /* keep running as long as there are busy managers, so that
  367. * we can collect overlay-applied information */
  368. for (i = 0; i < num_mgrs; ++i) {
  369. if (mgr_busy[i])
  370. goto end;
  371. }
  372. irq_mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_ODD |
  373. DISPC_IRQ_EVSYNC_EVEN;
  374. if (dss_has_feature(FEAT_MGR_LCD2))
  375. irq_mask |= DISPC_IRQ_VSYNC2;
  376. omap_dispc_unregister_isr(dss_apply_irq_handler, NULL, irq_mask);
  377. dss_cache.irq_enabled = false;
  378. end:
  379. spin_unlock(&dss_cache.lock);
  380. }
  381. static int omap_dss_mgr_apply_ovl(struct omap_overlay *ovl)
  382. {
  383. struct overlay_cache_data *oc;
  384. struct omap_dss_device *dssdev;
  385. oc = &dss_cache.overlay_cache[ovl->id];
  386. if (ovl->manager_changed) {
  387. ovl->manager_changed = false;
  388. ovl->info_dirty = true;
  389. }
  390. if (!overlay_enabled(ovl)) {
  391. if (oc->enabled) {
  392. oc->enabled = false;
  393. oc->dirty = true;
  394. }
  395. return 0;
  396. }
  397. if (!ovl->info_dirty)
  398. return 0;
  399. dssdev = ovl->manager->device;
  400. if (dss_check_overlay(ovl, dssdev)) {
  401. if (oc->enabled) {
  402. oc->enabled = false;
  403. oc->dirty = true;
  404. }
  405. return -EINVAL;
  406. }
  407. ovl->info_dirty = false;
  408. oc->dirty = true;
  409. oc->info = ovl->info;
  410. oc->channel = ovl->manager->id;
  411. oc->enabled = true;
  412. return 0;
  413. }
  414. static void omap_dss_mgr_apply_mgr(struct omap_overlay_manager *mgr)
  415. {
  416. struct manager_cache_data *mc;
  417. mc = &dss_cache.manager_cache[mgr->id];
  418. if (mgr->device_changed) {
  419. mgr->device_changed = false;
  420. mgr->info_dirty = true;
  421. }
  422. if (!mgr->info_dirty)
  423. return;
  424. if (!mgr->device)
  425. return;
  426. mgr->info_dirty = false;
  427. mc->dirty = true;
  428. mc->info = mgr->info;
  429. mc->manual_update = mgr_manual_update(mgr);
  430. }
  431. static void omap_dss_mgr_apply_ovl_fifos(struct omap_overlay *ovl)
  432. {
  433. struct overlay_cache_data *oc;
  434. struct omap_dss_device *dssdev;
  435. u32 size, burst_size;
  436. oc = &dss_cache.overlay_cache[ovl->id];
  437. if (!oc->enabled)
  438. return;
  439. dssdev = ovl->manager->device;
  440. size = dispc_ovl_get_fifo_size(ovl->id);
  441. burst_size = dispc_ovl_get_burst_size(ovl->id);
  442. switch (dssdev->type) {
  443. case OMAP_DISPLAY_TYPE_DPI:
  444. case OMAP_DISPLAY_TYPE_DBI:
  445. case OMAP_DISPLAY_TYPE_SDI:
  446. case OMAP_DISPLAY_TYPE_VENC:
  447. case OMAP_DISPLAY_TYPE_HDMI:
  448. default_get_overlay_fifo_thresholds(ovl->id, size,
  449. burst_size, &oc->fifo_low,
  450. &oc->fifo_high);
  451. break;
  452. #ifdef CONFIG_OMAP2_DSS_DSI
  453. case OMAP_DISPLAY_TYPE_DSI:
  454. dsi_get_overlay_fifo_thresholds(ovl->id, size,
  455. burst_size, &oc->fifo_low,
  456. &oc->fifo_high);
  457. break;
  458. #endif
  459. default:
  460. BUG();
  461. }
  462. }
  463. int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
  464. {
  465. int i, r;
  466. unsigned long flags;
  467. DSSDBG("omap_dss_mgr_apply(%s)\n", mgr->name);
  468. r = dispc_runtime_get();
  469. if (r)
  470. return r;
  471. spin_lock_irqsave(&dss_cache.lock, flags);
  472. /* Configure overlays */
  473. for (i = 0; i < mgr->num_overlays; ++i) {
  474. struct omap_overlay *ovl;
  475. ovl = mgr->overlays[i];
  476. if (ovl->manager != mgr)
  477. continue;
  478. omap_dss_mgr_apply_ovl(ovl);
  479. }
  480. /* Configure manager */
  481. omap_dss_mgr_apply_mgr(mgr);
  482. /* Configure overlay fifos */
  483. for (i = 0; i < mgr->num_overlays; ++i) {
  484. struct omap_overlay *ovl;
  485. ovl = mgr->overlays[i];
  486. if (ovl->manager != mgr)
  487. continue;
  488. omap_dss_mgr_apply_ovl_fifos(ovl);
  489. }
  490. r = 0;
  491. if (mgr->enabled && !mgr_manual_update(mgr)) {
  492. if (!dss_cache.irq_enabled) {
  493. u32 mask;
  494. mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_ODD |
  495. DISPC_IRQ_EVSYNC_EVEN;
  496. if (dss_has_feature(FEAT_MGR_LCD2))
  497. mask |= DISPC_IRQ_VSYNC2;
  498. r = omap_dispc_register_isr(dss_apply_irq_handler,
  499. NULL, mask);
  500. if (r)
  501. DSSERR("failed to register apply isr\n");
  502. dss_cache.irq_enabled = true;
  503. }
  504. configure_dispc();
  505. }
  506. spin_unlock_irqrestore(&dss_cache.lock, flags);
  507. dispc_runtime_put();
  508. return r;
  509. }
  510. void dss_mgr_enable(struct omap_overlay_manager *mgr)
  511. {
  512. dispc_mgr_enable(mgr->id, true);
  513. mgr->enabled = true;
  514. }
  515. void dss_mgr_disable(struct omap_overlay_manager *mgr)
  516. {
  517. dispc_mgr_enable(mgr->id, false);
  518. mgr->enabled = false;
  519. }