apply.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664
  1. /*
  2. * Copyright (C) 2011 Texas Instruments
  3. * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #define DSS_SUBSYS_NAME "APPLY"
  18. #include <linux/kernel.h>
  19. #include <linux/slab.h>
  20. #include <linux/spinlock.h>
  21. #include <linux/jiffies.h>
  22. #include <video/omapdss.h>
  23. #include "dss.h"
  24. #include "dss_features.h"
  25. /*
  26. * We have 4 levels of cache for the dispc settings. First two are in SW and
  27. * the latter two in HW.
  28. *
  29. * +--------------------+
  30. * |overlay/manager_info|
  31. * +--------------------+
  32. * v
  33. * apply()
  34. * v
  35. * +--------------------+
  36. * | info |
  37. * +--------------------+
  38. * v
  39. * write_regs()
  40. * v
  41. * +--------------------+
  42. * | shadow registers |
  43. * +--------------------+
  44. * v
  45. * VFP or lcd/digit_enable
  46. * v
  47. * +--------------------+
  48. * | registers |
  49. * +--------------------+
  50. */
  51. struct ovl_priv_data {
  52. /* If true, cache changed, but not written to shadow registers. Set
  53. * in apply(), cleared when registers written. */
  54. bool dirty;
  55. /* If true, shadow registers contain changed values not yet in real
  56. * registers. Set when writing to shadow registers, cleared at
  57. * VSYNC/EVSYNC */
  58. bool shadow_dirty;
  59. bool enabled;
  60. struct omap_overlay_info info;
  61. enum omap_channel channel;
  62. u32 fifo_low;
  63. u32 fifo_high;
  64. };
  65. struct mgr_priv_data {
  66. /* If true, cache changed, but not written to shadow registers. Set
  67. * in apply(), cleared when registers written. */
  68. bool dirty;
  69. /* If true, shadow registers contain changed values not yet in real
  70. * registers. Set when writing to shadow registers, cleared at
  71. * VSYNC/EVSYNC */
  72. bool shadow_dirty;
  73. struct omap_overlay_manager_info info;
  74. bool manual_update;
  75. bool do_manual_update;
  76. };
  77. static struct {
  78. struct ovl_priv_data ovl_priv_data_array[MAX_DSS_OVERLAYS];
  79. struct mgr_priv_data mgr_priv_data_array[MAX_DSS_MANAGERS];
  80. bool irq_enabled;
  81. } dss_data;
  82. /* protects dss_data */
  83. static spinlock_t data_lock;
  84. static struct ovl_priv_data *get_ovl_priv(struct omap_overlay *ovl)
  85. {
  86. return &dss_data.ovl_priv_data_array[ovl->id];
  87. }
  88. static struct mgr_priv_data *get_mgr_priv(struct omap_overlay_manager *mgr)
  89. {
  90. return &dss_data.mgr_priv_data_array[mgr->id];
  91. }
  92. void dss_apply_init(void)
  93. {
  94. spin_lock_init(&data_lock);
  95. }
  96. static bool ovl_manual_update(struct omap_overlay *ovl)
  97. {
  98. return ovl->manager->device->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
  99. }
  100. static bool mgr_manual_update(struct omap_overlay_manager *mgr)
  101. {
  102. return mgr->device->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
  103. }
  104. static int overlay_enabled(struct omap_overlay *ovl)
  105. {
  106. return ovl->info.enabled && ovl->manager && ovl->manager->device;
  107. }
  108. int dss_mgr_wait_for_go(struct omap_overlay_manager *mgr)
  109. {
  110. unsigned long timeout = msecs_to_jiffies(500);
  111. struct mgr_priv_data *mp;
  112. u32 irq;
  113. int r;
  114. int i;
  115. struct omap_dss_device *dssdev = mgr->device;
  116. if (!dssdev || dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
  117. return 0;
  118. if (mgr_manual_update(mgr))
  119. return 0;
  120. irq = dispc_mgr_get_vsync_irq(mgr->id);
  121. mp = get_mgr_priv(mgr);
  122. i = 0;
  123. while (1) {
  124. unsigned long flags;
  125. bool shadow_dirty, dirty;
  126. spin_lock_irqsave(&data_lock, flags);
  127. dirty = mp->dirty;
  128. shadow_dirty = mp->shadow_dirty;
  129. spin_unlock_irqrestore(&data_lock, flags);
  130. if (!dirty && !shadow_dirty) {
  131. r = 0;
  132. break;
  133. }
  134. /* 4 iterations is the worst case:
  135. * 1 - initial iteration, dirty = true (between VFP and VSYNC)
  136. * 2 - first VSYNC, dirty = true
  137. * 3 - dirty = false, shadow_dirty = true
  138. * 4 - shadow_dirty = false */
  139. if (i++ == 3) {
  140. DSSERR("mgr(%d)->wait_for_go() not finishing\n",
  141. mgr->id);
  142. r = 0;
  143. break;
  144. }
  145. r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
  146. if (r == -ERESTARTSYS)
  147. break;
  148. if (r) {
  149. DSSERR("mgr(%d)->wait_for_go() timeout\n", mgr->id);
  150. break;
  151. }
  152. }
  153. return r;
  154. }
  155. int dss_mgr_wait_for_go_ovl(struct omap_overlay *ovl)
  156. {
  157. unsigned long timeout = msecs_to_jiffies(500);
  158. struct ovl_priv_data *op;
  159. struct omap_dss_device *dssdev;
  160. u32 irq;
  161. int r;
  162. int i;
  163. if (!ovl->manager)
  164. return 0;
  165. dssdev = ovl->manager->device;
  166. if (!dssdev || dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
  167. return 0;
  168. if (ovl_manual_update(ovl))
  169. return 0;
  170. irq = dispc_mgr_get_vsync_irq(ovl->manager->id);
  171. op = get_ovl_priv(ovl);
  172. i = 0;
  173. while (1) {
  174. unsigned long flags;
  175. bool shadow_dirty, dirty;
  176. spin_lock_irqsave(&data_lock, flags);
  177. dirty = op->dirty;
  178. shadow_dirty = op->shadow_dirty;
  179. spin_unlock_irqrestore(&data_lock, flags);
  180. if (!dirty && !shadow_dirty) {
  181. r = 0;
  182. break;
  183. }
  184. /* 4 iterations is the worst case:
  185. * 1 - initial iteration, dirty = true (between VFP and VSYNC)
  186. * 2 - first VSYNC, dirty = true
  187. * 3 - dirty = false, shadow_dirty = true
  188. * 4 - shadow_dirty = false */
  189. if (i++ == 3) {
  190. DSSERR("ovl(%d)->wait_for_go() not finishing\n",
  191. ovl->id);
  192. r = 0;
  193. break;
  194. }
  195. r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
  196. if (r == -ERESTARTSYS)
  197. break;
  198. if (r) {
  199. DSSERR("ovl(%d)->wait_for_go() timeout\n", ovl->id);
  200. break;
  201. }
  202. }
  203. return r;
  204. }
  205. static int dss_ovl_write_regs(struct omap_overlay *ovl)
  206. {
  207. struct ovl_priv_data *op;
  208. struct omap_overlay_info *oi;
  209. bool ilace, replication;
  210. int r;
  211. DSSDBGF("%d", ovl->id);
  212. op = get_ovl_priv(ovl);
  213. oi = &op->info;
  214. if (!op->enabled) {
  215. dispc_ovl_enable(ovl->id, 0);
  216. return 0;
  217. }
  218. replication = dss_use_replication(ovl->manager->device, oi->color_mode);
  219. ilace = ovl->manager->device->type == OMAP_DISPLAY_TYPE_VENC;
  220. dispc_ovl_set_channel_out(ovl->id, op->channel);
  221. r = dispc_ovl_setup(ovl->id, oi, ilace, replication);
  222. if (r) {
  223. /* this shouldn't happen */
  224. DSSERR("dispc_ovl_setup failed for ovl %d\n", ovl->id);
  225. dispc_ovl_enable(ovl->id, 0);
  226. return r;
  227. }
  228. dispc_ovl_set_fifo_threshold(ovl->id, op->fifo_low, op->fifo_high);
  229. dispc_ovl_enable(ovl->id, 1);
  230. return 0;
  231. }
  232. static void dss_mgr_write_regs(struct omap_overlay_manager *mgr)
  233. {
  234. struct mgr_priv_data *mp;
  235. struct omap_overlay_manager_info *mi;
  236. DSSDBGF("%d", mgr->id);
  237. mp = get_mgr_priv(mgr);
  238. mi = &mp->info;
  239. dispc_mgr_setup(mgr->id, mi);
  240. }
  241. /* dss_write_regs() tries to write values from cache to shadow registers.
  242. * It writes only to those managers/overlays that are not busy.
  243. * returns 0 if everything could be written to shadow registers.
  244. * returns 1 if not everything could be written to shadow registers. */
  245. static int dss_write_regs(void)
  246. {
  247. struct omap_overlay *ovl;
  248. struct omap_overlay_manager *mgr;
  249. struct ovl_priv_data *op;
  250. struct mgr_priv_data *mp;
  251. const int num_ovls = dss_feat_get_num_ovls();
  252. const int num_mgrs = dss_feat_get_num_mgrs();
  253. int i;
  254. int r;
  255. bool mgr_busy[MAX_DSS_MANAGERS];
  256. bool mgr_go[MAX_DSS_MANAGERS];
  257. bool busy;
  258. r = 0;
  259. busy = false;
  260. for (i = 0; i < num_mgrs; i++) {
  261. mgr_busy[i] = dispc_mgr_go_busy(i);
  262. mgr_go[i] = false;
  263. }
  264. /* Commit overlay settings */
  265. for (i = 0; i < num_ovls; ++i) {
  266. ovl = omap_dss_get_overlay(i);
  267. op = get_ovl_priv(ovl);
  268. if (!op->dirty)
  269. continue;
  270. mp = get_mgr_priv(ovl->manager);
  271. if (mp->manual_update && !mp->do_manual_update)
  272. continue;
  273. if (mgr_busy[op->channel]) {
  274. busy = true;
  275. continue;
  276. }
  277. r = dss_ovl_write_regs(ovl);
  278. if (r)
  279. DSSERR("dss_ovl_write_regs %d failed\n", i);
  280. op->dirty = false;
  281. op->shadow_dirty = true;
  282. mgr_go[op->channel] = true;
  283. }
  284. /* Commit manager settings */
  285. for (i = 0; i < num_mgrs; ++i) {
  286. mgr = omap_dss_get_overlay_manager(i);
  287. mp = get_mgr_priv(mgr);
  288. if (!mp->dirty)
  289. continue;
  290. if (mp->manual_update && !mp->do_manual_update)
  291. continue;
  292. if (mgr_busy[i]) {
  293. busy = true;
  294. continue;
  295. }
  296. dss_mgr_write_regs(mgr);
  297. mp->dirty = false;
  298. mp->shadow_dirty = true;
  299. mgr_go[i] = true;
  300. }
  301. /* set GO */
  302. for (i = 0; i < num_mgrs; ++i) {
  303. mgr = omap_dss_get_overlay_manager(i);
  304. mp = get_mgr_priv(mgr);
  305. if (!mgr_go[i])
  306. continue;
  307. /* We don't need GO with manual update display. LCD iface will
  308. * always be turned off after frame, and new settings will be
  309. * taken in to use at next update */
  310. if (!mp->manual_update)
  311. dispc_mgr_go(i);
  312. }
  313. if (busy)
  314. r = 1;
  315. else
  316. r = 0;
  317. return r;
  318. }
  319. void dss_mgr_start_update(struct omap_overlay_manager *mgr)
  320. {
  321. struct mgr_priv_data *mp = get_mgr_priv(mgr);
  322. struct ovl_priv_data *op;
  323. struct omap_overlay *ovl;
  324. mp->do_manual_update = true;
  325. dss_write_regs();
  326. mp->do_manual_update = false;
  327. list_for_each_entry(ovl, &mgr->overlays, list) {
  328. op = get_ovl_priv(ovl);
  329. op->shadow_dirty = false;
  330. }
  331. mp->shadow_dirty = false;
  332. dispc_mgr_enable(mgr->id, true);
  333. }
  334. static void dss_apply_irq_handler(void *data, u32 mask);
  335. static void dss_register_vsync_isr(void)
  336. {
  337. const int num_mgrs = dss_feat_get_num_mgrs();
  338. u32 mask;
  339. int r, i;
  340. mask = 0;
  341. for (i = 0; i < num_mgrs; ++i)
  342. mask |= dispc_mgr_get_vsync_irq(i);
  343. r = omap_dispc_register_isr(dss_apply_irq_handler, NULL, mask);
  344. WARN_ON(r);
  345. dss_data.irq_enabled = true;
  346. }
  347. static void dss_unregister_vsync_isr(void)
  348. {
  349. const int num_mgrs = dss_feat_get_num_mgrs();
  350. u32 mask;
  351. int r, i;
  352. mask = 0;
  353. for (i = 0; i < num_mgrs; ++i)
  354. mask |= dispc_mgr_get_vsync_irq(i);
  355. r = omap_dispc_unregister_isr(dss_apply_irq_handler, NULL, mask);
  356. WARN_ON(r);
  357. dss_data.irq_enabled = false;
  358. }
  359. static void dss_apply_irq_handler(void *data, u32 mask)
  360. {
  361. struct omap_overlay *ovl;
  362. struct omap_overlay_manager *mgr;
  363. struct mgr_priv_data *mp;
  364. struct ovl_priv_data *op;
  365. const int num_ovls = dss_feat_get_num_ovls();
  366. const int num_mgrs = dss_feat_get_num_mgrs();
  367. int i, r;
  368. bool mgr_busy[MAX_DSS_MANAGERS];
  369. for (i = 0; i < num_mgrs; i++)
  370. mgr_busy[i] = dispc_mgr_go_busy(i);
  371. spin_lock(&data_lock);
  372. for (i = 0; i < num_ovls; ++i) {
  373. ovl = omap_dss_get_overlay(i);
  374. op = get_ovl_priv(ovl);
  375. if (!mgr_busy[op->channel])
  376. op->shadow_dirty = false;
  377. }
  378. for (i = 0; i < num_mgrs; ++i) {
  379. mgr = omap_dss_get_overlay_manager(i);
  380. mp = get_mgr_priv(mgr);
  381. if (!mgr_busy[i])
  382. mp->shadow_dirty = false;
  383. }
  384. r = dss_write_regs();
  385. if (r == 1)
  386. goto end;
  387. /* re-read busy flags */
  388. for (i = 0; i < num_mgrs; i++)
  389. mgr_busy[i] = dispc_mgr_go_busy(i);
  390. /* keep running as long as there are busy managers, so that
  391. * we can collect overlay-applied information */
  392. for (i = 0; i < num_mgrs; ++i) {
  393. if (mgr_busy[i])
  394. goto end;
  395. }
  396. dss_unregister_vsync_isr();
  397. end:
  398. spin_unlock(&data_lock);
  399. }
  400. static int omap_dss_mgr_apply_ovl(struct omap_overlay *ovl)
  401. {
  402. struct ovl_priv_data *op;
  403. struct omap_dss_device *dssdev;
  404. op = get_ovl_priv(ovl);
  405. if (ovl->manager_changed) {
  406. ovl->manager_changed = false;
  407. ovl->info_dirty = true;
  408. }
  409. if (!overlay_enabled(ovl)) {
  410. if (op->enabled) {
  411. op->enabled = false;
  412. op->dirty = true;
  413. }
  414. return 0;
  415. }
  416. if (!ovl->info_dirty)
  417. return 0;
  418. dssdev = ovl->manager->device;
  419. if (dss_check_overlay(ovl, dssdev)) {
  420. if (op->enabled) {
  421. op->enabled = false;
  422. op->dirty = true;
  423. }
  424. return -EINVAL;
  425. }
  426. ovl->info_dirty = false;
  427. op->dirty = true;
  428. op->info = ovl->info;
  429. op->channel = ovl->manager->id;
  430. op->enabled = true;
  431. return 0;
  432. }
  433. static void omap_dss_mgr_apply_mgr(struct omap_overlay_manager *mgr)
  434. {
  435. struct mgr_priv_data *mp;
  436. mp = get_mgr_priv(mgr);
  437. if (mgr->device_changed) {
  438. mgr->device_changed = false;
  439. mgr->info_dirty = true;
  440. }
  441. if (!mgr->info_dirty)
  442. return;
  443. if (!mgr->device)
  444. return;
  445. mgr->info_dirty = false;
  446. mp->dirty = true;
  447. mp->info = mgr->info;
  448. mp->manual_update = mgr_manual_update(mgr);
  449. }
  450. static void omap_dss_mgr_apply_ovl_fifos(struct omap_overlay *ovl)
  451. {
  452. struct ovl_priv_data *op;
  453. struct omap_dss_device *dssdev;
  454. u32 size, burst_size;
  455. op = get_ovl_priv(ovl);
  456. if (!op->enabled)
  457. return;
  458. dssdev = ovl->manager->device;
  459. size = dispc_ovl_get_fifo_size(ovl->id);
  460. burst_size = dispc_ovl_get_burst_size(ovl->id);
  461. switch (dssdev->type) {
  462. case OMAP_DISPLAY_TYPE_DPI:
  463. case OMAP_DISPLAY_TYPE_DBI:
  464. case OMAP_DISPLAY_TYPE_SDI:
  465. case OMAP_DISPLAY_TYPE_VENC:
  466. case OMAP_DISPLAY_TYPE_HDMI:
  467. default_get_overlay_fifo_thresholds(ovl->id, size,
  468. burst_size, &op->fifo_low,
  469. &op->fifo_high);
  470. break;
  471. #ifdef CONFIG_OMAP2_DSS_DSI
  472. case OMAP_DISPLAY_TYPE_DSI:
  473. dsi_get_overlay_fifo_thresholds(ovl->id, size,
  474. burst_size, &op->fifo_low,
  475. &op->fifo_high);
  476. break;
  477. #endif
  478. default:
  479. BUG();
  480. }
  481. }
  482. int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
  483. {
  484. int r;
  485. unsigned long flags;
  486. struct omap_overlay *ovl;
  487. DSSDBG("omap_dss_mgr_apply(%s)\n", mgr->name);
  488. r = dispc_runtime_get();
  489. if (r)
  490. return r;
  491. spin_lock_irqsave(&data_lock, flags);
  492. /* Configure overlays */
  493. list_for_each_entry(ovl, &mgr->overlays, list)
  494. omap_dss_mgr_apply_ovl(ovl);
  495. /* Configure manager */
  496. omap_dss_mgr_apply_mgr(mgr);
  497. /* Configure overlay fifos */
  498. list_for_each_entry(ovl, &mgr->overlays, list)
  499. omap_dss_mgr_apply_ovl_fifos(ovl);
  500. r = 0;
  501. if (mgr->enabled && !mgr_manual_update(mgr)) {
  502. if (!dss_data.irq_enabled)
  503. dss_register_vsync_isr();
  504. dss_write_regs();
  505. }
  506. spin_unlock_irqrestore(&data_lock, flags);
  507. dispc_runtime_put();
  508. return r;
  509. }
  510. void dss_mgr_enable(struct omap_overlay_manager *mgr)
  511. {
  512. dispc_mgr_enable(mgr->id, true);
  513. mgr->enabled = true;
  514. }
  515. void dss_mgr_disable(struct omap_overlay_manager *mgr)
  516. {
  517. dispc_mgr_enable(mgr->id, false);
  518. mgr->enabled = false;
  519. }