mdp.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538
  1. /* drivers/video/msm_fb/mdp.c
  2. *
  3. * MSM MDP Interface (used by framebuffer core)
  4. *
  5. * Copyright (C) 2007 QUALCOMM Incorporated
  6. * Copyright (C) 2007 Google Incorporated
  7. *
  8. * This software is licensed under the terms of the GNU General Public
  9. * License version 2, as published by the Free Software Foundation, and
  10. * may be copied, distributed, and modified under those terms.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. */
  17. #include <linux/kernel.h>
  18. #include <linux/fb.h>
  19. #include <linux/msm_mdp.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/wait.h>
  22. #include <linux/clk.h>
  23. #include <linux/file.h>
  24. #ifdef CONFIG_ANDROID_PMEM
  25. #include <linux/android_pmem.h>
  26. #endif
  27. #include <linux/major.h>
  28. #include <mach/msm_iomap.h>
  29. #include <mach/msm_fb.h>
  30. #include <linux/platform_device.h>
  31. #include "mdp_hw.h"
  32. struct class *mdp_class;
  33. #define MDP_CMD_DEBUG_ACCESS_BASE (0x10000)
  34. static uint16_t mdp_default_ccs[] = {
  35. 0x254, 0x000, 0x331, 0x254, 0xF38, 0xE61, 0x254, 0x409, 0x000,
  36. 0x010, 0x080, 0x080
  37. };
  38. static DECLARE_WAIT_QUEUE_HEAD(mdp_dma2_waitqueue);
  39. static DECLARE_WAIT_QUEUE_HEAD(mdp_ppp_waitqueue);
  40. static struct msmfb_callback *dma_callback;
  41. static struct clk *clk;
  42. static unsigned int mdp_irq_mask;
  43. static DEFINE_SPINLOCK(mdp_lock);
  44. DEFINE_MUTEX(mdp_mutex);
  45. static int enable_mdp_irq(struct mdp_info *mdp, uint32_t mask)
  46. {
  47. unsigned long irq_flags;
  48. int ret = 0;
  49. BUG_ON(!mask);
  50. spin_lock_irqsave(&mdp_lock, irq_flags);
  51. /* if the mask bits are already set return an error, this interrupt
  52. * is already enabled */
  53. if (mdp_irq_mask & mask) {
  54. printk(KERN_ERR "mdp irq already on already on %x %x\n",
  55. mdp_irq_mask, mask);
  56. ret = -1;
  57. }
  58. /* if the mdp irq is not already enabled enable it */
  59. if (!mdp_irq_mask) {
  60. if (clk)
  61. clk_enable(clk);
  62. enable_irq(mdp->irq);
  63. }
  64. /* update the irq mask to reflect the fact that the interrupt is
  65. * enabled */
  66. mdp_irq_mask |= mask;
  67. spin_unlock_irqrestore(&mdp_lock, irq_flags);
  68. return ret;
  69. }
  70. static int locked_disable_mdp_irq(struct mdp_info *mdp, uint32_t mask)
  71. {
  72. /* this interrupt is already disabled! */
  73. if (!(mdp_irq_mask & mask)) {
  74. printk(KERN_ERR "mdp irq already off %x %x\n",
  75. mdp_irq_mask, mask);
  76. return -1;
  77. }
  78. /* update the irq mask to reflect the fact that the interrupt is
  79. * disabled */
  80. mdp_irq_mask &= ~(mask);
  81. /* if no one is waiting on the interrupt, disable it */
  82. if (!mdp_irq_mask) {
  83. disable_irq(mdp->irq);
  84. if (clk)
  85. clk_disable(clk);
  86. }
  87. return 0;
  88. }
  89. static int disable_mdp_irq(struct mdp_info *mdp, uint32_t mask)
  90. {
  91. unsigned long irq_flags;
  92. int ret;
  93. spin_lock_irqsave(&mdp_lock, irq_flags);
  94. ret = locked_disable_mdp_irq(mdp, mask);
  95. spin_unlock_irqrestore(&mdp_lock, irq_flags);
  96. return ret;
  97. }
  98. static irqreturn_t mdp_isr(int irq, void *data)
  99. {
  100. uint32_t status;
  101. unsigned long irq_flags;
  102. struct mdp_info *mdp = data;
  103. spin_lock_irqsave(&mdp_lock, irq_flags);
  104. status = mdp_readl(mdp, MDP_INTR_STATUS);
  105. mdp_writel(mdp, status, MDP_INTR_CLEAR);
  106. status &= mdp_irq_mask;
  107. if (status & DL0_DMA2_TERM_DONE) {
  108. if (dma_callback) {
  109. dma_callback->func(dma_callback);
  110. dma_callback = NULL;
  111. }
  112. wake_up(&mdp_dma2_waitqueue);
  113. }
  114. if (status & DL0_ROI_DONE)
  115. wake_up(&mdp_ppp_waitqueue);
  116. if (status)
  117. locked_disable_mdp_irq(mdp, status);
  118. spin_unlock_irqrestore(&mdp_lock, irq_flags);
  119. return IRQ_HANDLED;
  120. }
  121. static uint32_t mdp_check_mask(uint32_t mask)
  122. {
  123. uint32_t ret;
  124. unsigned long irq_flags;
  125. spin_lock_irqsave(&mdp_lock, irq_flags);
  126. ret = mdp_irq_mask & mask;
  127. spin_unlock_irqrestore(&mdp_lock, irq_flags);
  128. return ret;
  129. }
  130. static int mdp_wait(struct mdp_info *mdp, uint32_t mask, wait_queue_head_t *wq)
  131. {
  132. int ret = 0;
  133. unsigned long irq_flags;
  134. wait_event_timeout(*wq, !mdp_check_mask(mask), HZ);
  135. spin_lock_irqsave(&mdp_lock, irq_flags);
  136. if (mdp_irq_mask & mask) {
  137. locked_disable_mdp_irq(mdp, mask);
  138. printk(KERN_WARNING "timeout waiting for mdp to complete %x\n",
  139. mask);
  140. ret = -ETIMEDOUT;
  141. }
  142. spin_unlock_irqrestore(&mdp_lock, irq_flags);
  143. return ret;
  144. }
  145. void mdp_dma_wait(struct mdp_device *mdp_dev)
  146. {
  147. #define MDP_MAX_TIMEOUTS 20
  148. static int timeout_count;
  149. struct mdp_info *mdp = container_of(mdp_dev, struct mdp_info, mdp_dev);
  150. if (mdp_wait(mdp, DL0_DMA2_TERM_DONE, &mdp_dma2_waitqueue) == -ETIMEDOUT)
  151. timeout_count++;
  152. else
  153. timeout_count = 0;
  154. if (timeout_count > MDP_MAX_TIMEOUTS) {
  155. printk(KERN_ERR "mdp: dma failed %d times, somethings wrong!\n",
  156. MDP_MAX_TIMEOUTS);
  157. BUG();
  158. }
  159. }
  160. static int mdp_ppp_wait(struct mdp_info *mdp)
  161. {
  162. return mdp_wait(mdp, DL0_ROI_DONE, &mdp_ppp_waitqueue);
  163. }
  164. void mdp_dma_to_mddi(struct mdp_info *mdp, uint32_t addr, uint32_t stride,
  165. uint32_t width, uint32_t height, uint32_t x, uint32_t y,
  166. struct msmfb_callback *callback)
  167. {
  168. uint32_t dma2_cfg;
  169. uint16_t ld_param = 0; /* 0=PRIM, 1=SECD, 2=EXT */
  170. if (enable_mdp_irq(mdp, DL0_DMA2_TERM_DONE)) {
  171. printk(KERN_ERR "mdp_dma_to_mddi: busy\n");
  172. return;
  173. }
  174. dma_callback = callback;
  175. dma2_cfg = DMA_PACK_TIGHT |
  176. DMA_PACK_ALIGN_LSB |
  177. DMA_PACK_PATTERN_RGB |
  178. DMA_OUT_SEL_AHB |
  179. DMA_IBUF_NONCONTIGUOUS;
  180. dma2_cfg |= DMA_IBUF_FORMAT_RGB565;
  181. dma2_cfg |= DMA_OUT_SEL_MDDI;
  182. dma2_cfg |= DMA_MDDI_DMAOUT_LCD_SEL_PRIMARY;
  183. dma2_cfg |= DMA_DITHER_EN;
  184. /* setup size, address, and stride */
  185. mdp_writel(mdp, (height << 16) | (width),
  186. MDP_CMD_DEBUG_ACCESS_BASE + 0x0184);
  187. mdp_writel(mdp, addr, MDP_CMD_DEBUG_ACCESS_BASE + 0x0188);
  188. mdp_writel(mdp, stride, MDP_CMD_DEBUG_ACCESS_BASE + 0x018C);
  189. /* 666 18BPP */
  190. dma2_cfg |= DMA_DSTC0G_6BITS | DMA_DSTC1B_6BITS | DMA_DSTC2R_6BITS;
  191. /* set y & x offset and MDDI transaction parameters */
  192. mdp_writel(mdp, (y << 16) | (x), MDP_CMD_DEBUG_ACCESS_BASE + 0x0194);
  193. mdp_writel(mdp, ld_param, MDP_CMD_DEBUG_ACCESS_BASE + 0x01a0);
  194. mdp_writel(mdp, (MDDI_VDO_PACKET_DESC << 16) | MDDI_VDO_PACKET_PRIM,
  195. MDP_CMD_DEBUG_ACCESS_BASE + 0x01a4);
  196. mdp_writel(mdp, dma2_cfg, MDP_CMD_DEBUG_ACCESS_BASE + 0x0180);
  197. /* start DMA2 */
  198. mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0044);
  199. }
  200. void mdp_dma(struct mdp_device *mdp_dev, uint32_t addr, uint32_t stride,
  201. uint32_t width, uint32_t height, uint32_t x, uint32_t y,
  202. struct msmfb_callback *callback, int interface)
  203. {
  204. struct mdp_info *mdp = container_of(mdp_dev, struct mdp_info, mdp_dev);
  205. if (interface == MSM_MDDI_PMDH_INTERFACE) {
  206. mdp_dma_to_mddi(mdp, addr, stride, width, height, x, y,
  207. callback);
  208. }
  209. }
  210. int get_img(struct mdp_img *img, struct fb_info *info,
  211. unsigned long *start, unsigned long *len,
  212. struct file **filep)
  213. {
  214. int put_needed, ret = 0;
  215. struct file *file;
  216. unsigned long vstart;
  217. #ifdef CONFIG_ANDROID_PMEM
  218. if (!get_pmem_file(img->memory_id, start, &vstart, len, filep))
  219. return 0;
  220. #endif
  221. file = fget_light(img->memory_id, &put_needed);
  222. if (file == NULL)
  223. return -1;
  224. if (MAJOR(file->f_dentry->d_inode->i_rdev) == FB_MAJOR) {
  225. *start = info->fix.smem_start;
  226. *len = info->fix.smem_len;
  227. } else
  228. ret = -1;
  229. fput_light(file, put_needed);
  230. return ret;
  231. }
  232. void put_img(struct file *src_file, struct file *dst_file)
  233. {
  234. #ifdef CONFIG_ANDROID_PMEM
  235. if (src_file)
  236. put_pmem_file(src_file);
  237. if (dst_file)
  238. put_pmem_file(dst_file);
  239. #endif
  240. }
  241. int mdp_blit(struct mdp_device *mdp_dev, struct fb_info *fb,
  242. struct mdp_blit_req *req)
  243. {
  244. int ret;
  245. unsigned long src_start = 0, src_len = 0, dst_start = 0, dst_len = 0;
  246. struct mdp_info *mdp = container_of(mdp_dev, struct mdp_info, mdp_dev);
  247. struct file *src_file = 0, *dst_file = 0;
  248. /* WORKAROUND FOR HARDWARE BUG IN BG TILE FETCH */
  249. if (unlikely(req->src_rect.h == 0 ||
  250. req->src_rect.w == 0)) {
  251. printk(KERN_ERR "mpd_ppp: src img of zero size!\n");
  252. return -EINVAL;
  253. }
  254. if (unlikely(req->dst_rect.h == 0 ||
  255. req->dst_rect.w == 0))
  256. return -EINVAL;
  257. /* do this first so that if this fails, the caller can always
  258. * safely call put_img */
  259. if (unlikely(get_img(&req->src, fb, &src_start, &src_len, &src_file))) {
  260. printk(KERN_ERR "mpd_ppp: could not retrieve src image from "
  261. "memory\n");
  262. return -EINVAL;
  263. }
  264. if (unlikely(get_img(&req->dst, fb, &dst_start, &dst_len, &dst_file))) {
  265. printk(KERN_ERR "mpd_ppp: could not retrieve dst image from "
  266. "memory\n");
  267. #ifdef CONFIG_ANDROID_PMEM
  268. put_pmem_file(src_file);
  269. #endif
  270. return -EINVAL;
  271. }
  272. mutex_lock(&mdp_mutex);
  273. /* transp_masking unimplemented */
  274. req->transp_mask = MDP_TRANSP_NOP;
  275. if (unlikely((req->transp_mask != MDP_TRANSP_NOP ||
  276. req->alpha != MDP_ALPHA_NOP ||
  277. HAS_ALPHA(req->src.format)) &&
  278. (req->flags & MDP_ROT_90 &&
  279. req->dst_rect.w <= 16 && req->dst_rect.h >= 16))) {
  280. int i;
  281. unsigned int tiles = req->dst_rect.h / 16;
  282. unsigned int remainder = req->dst_rect.h % 16;
  283. req->src_rect.w = 16*req->src_rect.w / req->dst_rect.h;
  284. req->dst_rect.h = 16;
  285. for (i = 0; i < tiles; i++) {
  286. enable_mdp_irq(mdp, DL0_ROI_DONE);
  287. ret = mdp_ppp_blit(mdp, req, src_file, src_start,
  288. src_len, dst_file, dst_start,
  289. dst_len);
  290. if (ret)
  291. goto err_bad_blit;
  292. ret = mdp_ppp_wait(mdp);
  293. if (ret)
  294. goto err_wait_failed;
  295. req->dst_rect.y += 16;
  296. req->src_rect.x += req->src_rect.w;
  297. }
  298. if (!remainder)
  299. goto end;
  300. req->src_rect.w = remainder*req->src_rect.w / req->dst_rect.h;
  301. req->dst_rect.h = remainder;
  302. }
  303. enable_mdp_irq(mdp, DL0_ROI_DONE);
  304. ret = mdp_ppp_blit(mdp, req, src_file, src_start, src_len, dst_file,
  305. dst_start,
  306. dst_len);
  307. if (ret)
  308. goto err_bad_blit;
  309. ret = mdp_ppp_wait(mdp);
  310. if (ret)
  311. goto err_wait_failed;
  312. end:
  313. put_img(src_file, dst_file);
  314. mutex_unlock(&mdp_mutex);
  315. return 0;
  316. err_bad_blit:
  317. disable_mdp_irq(mdp, DL0_ROI_DONE);
  318. err_wait_failed:
  319. put_img(src_file, dst_file);
  320. mutex_unlock(&mdp_mutex);
  321. return ret;
  322. }
  323. void mdp_set_grp_disp(struct mdp_device *mdp_dev, unsigned disp_id)
  324. {
  325. struct mdp_info *mdp = container_of(mdp_dev, struct mdp_info, mdp_dev);
  326. disp_id &= 0xf;
  327. mdp_writel(mdp, disp_id, MDP_FULL_BYPASS_WORD43);
  328. }
  329. int register_mdp_client(struct class_interface *cint)
  330. {
  331. if (!mdp_class) {
  332. pr_err("mdp: no mdp_class when registering mdp client\n");
  333. return -ENODEV;
  334. }
  335. cint->class = mdp_class;
  336. return class_interface_register(cint);
  337. }
  338. #include "mdp_csc_table.h"
  339. #include "mdp_scale_tables.h"
  340. int mdp_probe(struct platform_device *pdev)
  341. {
  342. struct resource *resource;
  343. int ret;
  344. int n;
  345. struct mdp_info *mdp;
  346. resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  347. if (!resource) {
  348. pr_err("mdp: can not get mdp mem resource!\n");
  349. return -ENOMEM;
  350. }
  351. mdp = kzalloc(sizeof(struct mdp_info), GFP_KERNEL);
  352. if (!mdp)
  353. return -ENOMEM;
  354. mdp->irq = platform_get_irq(pdev, 0);
  355. if (mdp->irq < 0) {
  356. pr_err("mdp: can not get mdp irq\n");
  357. ret = mdp->irq;
  358. goto error_get_irq;
  359. }
  360. mdp->base = ioremap(resource->start,
  361. resource->end - resource->start);
  362. if (mdp->base == 0) {
  363. printk(KERN_ERR "msmfb: cannot allocate mdp regs!\n");
  364. ret = -ENOMEM;
  365. goto error_ioremap;
  366. }
  367. mdp->mdp_dev.dma = mdp_dma;
  368. mdp->mdp_dev.dma_wait = mdp_dma_wait;
  369. mdp->mdp_dev.blit = mdp_blit;
  370. mdp->mdp_dev.set_grp_disp = mdp_set_grp_disp;
  371. clk = clk_get(&pdev->dev, "mdp_clk");
  372. if (IS_ERR(clk)) {
  373. printk(KERN_INFO "mdp: failed to get mdp clk");
  374. return PTR_ERR(clk);
  375. }
  376. ret = request_irq(mdp->irq, mdp_isr, IRQF_DISABLED, "msm_mdp", mdp);
  377. if (ret)
  378. goto error_request_irq;
  379. disable_irq(mdp->irq);
  380. mdp_irq_mask = 0;
  381. /* debug interface write access */
  382. mdp_writel(mdp, 1, 0x60);
  383. mdp_writel(mdp, MDP_ANY_INTR_MASK, MDP_INTR_ENABLE);
  384. mdp_writel(mdp, 1, MDP_EBI2_PORTMAP_MODE);
  385. mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01f8);
  386. mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01fc);
  387. for (n = 0; n < ARRAY_SIZE(csc_table); n++)
  388. mdp_writel(mdp, csc_table[n].val, csc_table[n].reg);
  389. /* clear up unused fg/main registers */
  390. /* comp.plane 2&3 ystride */
  391. mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0120);
  392. /* unpacked pattern */
  393. mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x012c);
  394. mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0130);
  395. mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0134);
  396. mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0158);
  397. mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x015c);
  398. mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0160);
  399. mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0170);
  400. mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0174);
  401. mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x017c);
  402. /* comp.plane 2 & 3 */
  403. mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0114);
  404. mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0118);
  405. /* clear unused bg registers */
  406. mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01c8);
  407. mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01d0);
  408. mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01dc);
  409. mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01e0);
  410. mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01e4);
  411. for (n = 0; n < ARRAY_SIZE(mdp_upscale_table); n++)
  412. mdp_writel(mdp, mdp_upscale_table[n].val,
  413. mdp_upscale_table[n].reg);
  414. for (n = 0; n < 9; n++)
  415. mdp_writel(mdp, mdp_default_ccs[n], 0x40440 + 4 * n);
  416. mdp_writel(mdp, mdp_default_ccs[9], 0x40500 + 4 * 0);
  417. mdp_writel(mdp, mdp_default_ccs[10], 0x40500 + 4 * 0);
  418. mdp_writel(mdp, mdp_default_ccs[11], 0x40500 + 4 * 0);
  419. /* register mdp device */
  420. mdp->mdp_dev.dev.parent = &pdev->dev;
  421. mdp->mdp_dev.dev.class = mdp_class;
  422. snprintf(mdp->mdp_dev.dev.bus_id, BUS_ID_SIZE, "mdp%d", pdev->id);
  423. /* if you can remove the platform device you'd have to implement
  424. * this:
  425. mdp_dev.release = mdp_class; */
  426. ret = device_register(&mdp->mdp_dev.dev);
  427. if (ret)
  428. goto error_device_register;
  429. return 0;
  430. error_device_register:
  431. free_irq(mdp->irq, mdp);
  432. error_request_irq:
  433. iounmap(mdp->base);
  434. error_get_irq:
  435. error_ioremap:
  436. kfree(mdp);
  437. return ret;
  438. }
  439. static struct platform_driver msm_mdp_driver = {
  440. .probe = mdp_probe,
  441. .driver = {.name = "msm_mdp"},
  442. };
  443. static int __init mdp_init(void)
  444. {
  445. mdp_class = class_create(THIS_MODULE, "msm_mdp");
  446. if (IS_ERR(mdp_class)) {
  447. printk(KERN_ERR "Error creating mdp class\n");
  448. return PTR_ERR(mdp_class);
  449. }
  450. return platform_driver_register(&msm_mdp_driver);
  451. }
  452. subsys_initcall(mdp_init);