vpdma.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578
  1. /*
  2. * VPDMA helper library
  3. *
  4. * Copyright (c) 2013 Texas Instruments Inc.
  5. *
  6. * David Griego, <dagriego@biglakesoftware.com>
  7. * Dale Farnsworth, <dale@farnsworth.org>
  8. * Archit Taneja, <archit@ti.com>
  9. *
  10. * This program is free software; you can redistribute it and/or modify it
  11. * under the terms of the GNU General Public License version 2 as published by
  12. * the Free Software Foundation.
  13. */
  14. #include <linux/delay.h>
  15. #include <linux/dma-mapping.h>
  16. #include <linux/err.h>
  17. #include <linux/firmware.h>
  18. #include <linux/io.h>
  19. #include <linux/module.h>
  20. #include <linux/platform_device.h>
  21. #include <linux/sched.h>
  22. #include <linux/slab.h>
  23. #include "vpdma.h"
  24. #include "vpdma_priv.h"
  25. #define VPDMA_FIRMWARE "vpdma-1b8.bin"
  26. const struct vpdma_data_format vpdma_yuv_fmts[] = {
  27. [VPDMA_DATA_FMT_Y444] = {
  28. .data_type = DATA_TYPE_Y444,
  29. .depth = 8,
  30. },
  31. [VPDMA_DATA_FMT_Y422] = {
  32. .data_type = DATA_TYPE_Y422,
  33. .depth = 8,
  34. },
  35. [VPDMA_DATA_FMT_Y420] = {
  36. .data_type = DATA_TYPE_Y420,
  37. .depth = 8,
  38. },
  39. [VPDMA_DATA_FMT_C444] = {
  40. .data_type = DATA_TYPE_C444,
  41. .depth = 8,
  42. },
  43. [VPDMA_DATA_FMT_C422] = {
  44. .data_type = DATA_TYPE_C422,
  45. .depth = 8,
  46. },
  47. [VPDMA_DATA_FMT_C420] = {
  48. .data_type = DATA_TYPE_C420,
  49. .depth = 4,
  50. },
  51. [VPDMA_DATA_FMT_YC422] = {
  52. .data_type = DATA_TYPE_YC422,
  53. .depth = 16,
  54. },
  55. [VPDMA_DATA_FMT_YC444] = {
  56. .data_type = DATA_TYPE_YC444,
  57. .depth = 24,
  58. },
  59. [VPDMA_DATA_FMT_CY422] = {
  60. .data_type = DATA_TYPE_CY422,
  61. .depth = 16,
  62. },
  63. };
  64. const struct vpdma_data_format vpdma_rgb_fmts[] = {
  65. [VPDMA_DATA_FMT_RGB565] = {
  66. .data_type = DATA_TYPE_RGB16_565,
  67. .depth = 16,
  68. },
  69. [VPDMA_DATA_FMT_ARGB16_1555] = {
  70. .data_type = DATA_TYPE_ARGB_1555,
  71. .depth = 16,
  72. },
  73. [VPDMA_DATA_FMT_ARGB16] = {
  74. .data_type = DATA_TYPE_ARGB_4444,
  75. .depth = 16,
  76. },
  77. [VPDMA_DATA_FMT_RGBA16_5551] = {
  78. .data_type = DATA_TYPE_RGBA_5551,
  79. .depth = 16,
  80. },
  81. [VPDMA_DATA_FMT_RGBA16] = {
  82. .data_type = DATA_TYPE_RGBA_4444,
  83. .depth = 16,
  84. },
  85. [VPDMA_DATA_FMT_ARGB24] = {
  86. .data_type = DATA_TYPE_ARGB24_6666,
  87. .depth = 24,
  88. },
  89. [VPDMA_DATA_FMT_RGB24] = {
  90. .data_type = DATA_TYPE_RGB24_888,
  91. .depth = 24,
  92. },
  93. [VPDMA_DATA_FMT_ARGB32] = {
  94. .data_type = DATA_TYPE_ARGB32_8888,
  95. .depth = 32,
  96. },
  97. [VPDMA_DATA_FMT_RGBA24] = {
  98. .data_type = DATA_TYPE_RGBA24_6666,
  99. .depth = 24,
  100. },
  101. [VPDMA_DATA_FMT_RGBA32] = {
  102. .data_type = DATA_TYPE_RGBA32_8888,
  103. .depth = 32,
  104. },
  105. [VPDMA_DATA_FMT_BGR565] = {
  106. .data_type = DATA_TYPE_BGR16_565,
  107. .depth = 16,
  108. },
  109. [VPDMA_DATA_FMT_ABGR16_1555] = {
  110. .data_type = DATA_TYPE_ABGR_1555,
  111. .depth = 16,
  112. },
  113. [VPDMA_DATA_FMT_ABGR16] = {
  114. .data_type = DATA_TYPE_ABGR_4444,
  115. .depth = 16,
  116. },
  117. [VPDMA_DATA_FMT_BGRA16_5551] = {
  118. .data_type = DATA_TYPE_BGRA_5551,
  119. .depth = 16,
  120. },
  121. [VPDMA_DATA_FMT_BGRA16] = {
  122. .data_type = DATA_TYPE_BGRA_4444,
  123. .depth = 16,
  124. },
  125. [VPDMA_DATA_FMT_ABGR24] = {
  126. .data_type = DATA_TYPE_ABGR24_6666,
  127. .depth = 24,
  128. },
  129. [VPDMA_DATA_FMT_BGR24] = {
  130. .data_type = DATA_TYPE_BGR24_888,
  131. .depth = 24,
  132. },
  133. [VPDMA_DATA_FMT_ABGR32] = {
  134. .data_type = DATA_TYPE_ABGR32_8888,
  135. .depth = 32,
  136. },
  137. [VPDMA_DATA_FMT_BGRA24] = {
  138. .data_type = DATA_TYPE_BGRA24_6666,
  139. .depth = 24,
  140. },
  141. [VPDMA_DATA_FMT_BGRA32] = {
  142. .data_type = DATA_TYPE_BGRA32_8888,
  143. .depth = 32,
  144. },
  145. };
  146. const struct vpdma_data_format vpdma_misc_fmts[] = {
  147. [VPDMA_DATA_FMT_MV] = {
  148. .data_type = DATA_TYPE_MV,
  149. .depth = 4,
  150. },
  151. };
  152. struct vpdma_channel_info {
  153. int num; /* VPDMA channel number */
  154. int cstat_offset; /* client CSTAT register offset */
  155. };
  156. static const struct vpdma_channel_info chan_info[] = {
  157. [VPE_CHAN_LUMA1_IN] = {
  158. .num = VPE_CHAN_NUM_LUMA1_IN,
  159. .cstat_offset = VPDMA_DEI_LUMA1_CSTAT,
  160. },
  161. [VPE_CHAN_CHROMA1_IN] = {
  162. .num = VPE_CHAN_NUM_CHROMA1_IN,
  163. .cstat_offset = VPDMA_DEI_CHROMA1_CSTAT,
  164. },
  165. [VPE_CHAN_LUMA2_IN] = {
  166. .num = VPE_CHAN_NUM_LUMA2_IN,
  167. .cstat_offset = VPDMA_DEI_LUMA2_CSTAT,
  168. },
  169. [VPE_CHAN_CHROMA2_IN] = {
  170. .num = VPE_CHAN_NUM_CHROMA2_IN,
  171. .cstat_offset = VPDMA_DEI_CHROMA2_CSTAT,
  172. },
  173. [VPE_CHAN_LUMA3_IN] = {
  174. .num = VPE_CHAN_NUM_LUMA3_IN,
  175. .cstat_offset = VPDMA_DEI_LUMA3_CSTAT,
  176. },
  177. [VPE_CHAN_CHROMA3_IN] = {
  178. .num = VPE_CHAN_NUM_CHROMA3_IN,
  179. .cstat_offset = VPDMA_DEI_CHROMA3_CSTAT,
  180. },
  181. [VPE_CHAN_MV_IN] = {
  182. .num = VPE_CHAN_NUM_MV_IN,
  183. .cstat_offset = VPDMA_DEI_MV_IN_CSTAT,
  184. },
  185. [VPE_CHAN_MV_OUT] = {
  186. .num = VPE_CHAN_NUM_MV_OUT,
  187. .cstat_offset = VPDMA_DEI_MV_OUT_CSTAT,
  188. },
  189. [VPE_CHAN_LUMA_OUT] = {
  190. .num = VPE_CHAN_NUM_LUMA_OUT,
  191. .cstat_offset = VPDMA_VIP_UP_Y_CSTAT,
  192. },
  193. [VPE_CHAN_CHROMA_OUT] = {
  194. .num = VPE_CHAN_NUM_CHROMA_OUT,
  195. .cstat_offset = VPDMA_VIP_UP_UV_CSTAT,
  196. },
  197. [VPE_CHAN_RGB_OUT] = {
  198. .num = VPE_CHAN_NUM_RGB_OUT,
  199. .cstat_offset = VPDMA_VIP_UP_Y_CSTAT,
  200. },
  201. };
  202. static u32 read_reg(struct vpdma_data *vpdma, int offset)
  203. {
  204. return ioread32(vpdma->base + offset);
  205. }
  206. static void write_reg(struct vpdma_data *vpdma, int offset, u32 value)
  207. {
  208. iowrite32(value, vpdma->base + offset);
  209. }
  210. static int read_field_reg(struct vpdma_data *vpdma, int offset,
  211. u32 mask, int shift)
  212. {
  213. return (read_reg(vpdma, offset) & (mask << shift)) >> shift;
  214. }
  215. static void write_field_reg(struct vpdma_data *vpdma, int offset, u32 field,
  216. u32 mask, int shift)
  217. {
  218. u32 val = read_reg(vpdma, offset);
  219. val &= ~(mask << shift);
  220. val |= (field & mask) << shift;
  221. write_reg(vpdma, offset, val);
  222. }
  223. void vpdma_dump_regs(struct vpdma_data *vpdma)
  224. {
  225. struct device *dev = &vpdma->pdev->dev;
  226. #define DUMPREG(r) dev_dbg(dev, "%-35s %08x\n", #r, read_reg(vpdma, VPDMA_##r))
  227. dev_dbg(dev, "VPDMA Registers:\n");
  228. DUMPREG(PID);
  229. DUMPREG(LIST_ADDR);
  230. DUMPREG(LIST_ATTR);
  231. DUMPREG(LIST_STAT_SYNC);
  232. DUMPREG(BG_RGB);
  233. DUMPREG(BG_YUV);
  234. DUMPREG(SETUP);
  235. DUMPREG(MAX_SIZE1);
  236. DUMPREG(MAX_SIZE2);
  237. DUMPREG(MAX_SIZE3);
  238. /*
  239. * dumping registers of only group0 and group3, because VPE channels
  240. * lie within group0 and group3 registers
  241. */
  242. DUMPREG(INT_CHAN_STAT(0));
  243. DUMPREG(INT_CHAN_MASK(0));
  244. DUMPREG(INT_CHAN_STAT(3));
  245. DUMPREG(INT_CHAN_MASK(3));
  246. DUMPREG(INT_CLIENT0_STAT);
  247. DUMPREG(INT_CLIENT0_MASK);
  248. DUMPREG(INT_CLIENT1_STAT);
  249. DUMPREG(INT_CLIENT1_MASK);
  250. DUMPREG(INT_LIST0_STAT);
  251. DUMPREG(INT_LIST0_MASK);
  252. /*
  253. * these are registers specific to VPE clients, we can make this
  254. * function dump client registers specific to VPE or VIP based on
  255. * who is using it
  256. */
  257. DUMPREG(DEI_CHROMA1_CSTAT);
  258. DUMPREG(DEI_LUMA1_CSTAT);
  259. DUMPREG(DEI_CHROMA2_CSTAT);
  260. DUMPREG(DEI_LUMA2_CSTAT);
  261. DUMPREG(DEI_CHROMA3_CSTAT);
  262. DUMPREG(DEI_LUMA3_CSTAT);
  263. DUMPREG(DEI_MV_IN_CSTAT);
  264. DUMPREG(DEI_MV_OUT_CSTAT);
  265. DUMPREG(VIP_UP_Y_CSTAT);
  266. DUMPREG(VIP_UP_UV_CSTAT);
  267. DUMPREG(VPI_CTL_CSTAT);
  268. }
  269. /*
  270. * Allocate a DMA buffer
  271. */
  272. int vpdma_alloc_desc_buf(struct vpdma_buf *buf, size_t size)
  273. {
  274. buf->size = size;
  275. buf->mapped = false;
  276. buf->addr = kzalloc(size, GFP_KERNEL);
  277. if (!buf->addr)
  278. return -ENOMEM;
  279. WARN_ON((u32) buf->addr & VPDMA_DESC_ALIGN);
  280. return 0;
  281. }
  282. void vpdma_free_desc_buf(struct vpdma_buf *buf)
  283. {
  284. WARN_ON(buf->mapped);
  285. kfree(buf->addr);
  286. buf->addr = NULL;
  287. buf->size = 0;
  288. }
  289. /*
  290. * map descriptor/payload DMA buffer, enabling DMA access
  291. */
  292. int vpdma_map_desc_buf(struct vpdma_data *vpdma, struct vpdma_buf *buf)
  293. {
  294. struct device *dev = &vpdma->pdev->dev;
  295. WARN_ON(buf->mapped);
  296. buf->dma_addr = dma_map_single(dev, buf->addr, buf->size,
  297. DMA_TO_DEVICE);
  298. if (dma_mapping_error(dev, buf->dma_addr)) {
  299. dev_err(dev, "failed to map buffer\n");
  300. return -EINVAL;
  301. }
  302. buf->mapped = true;
  303. return 0;
  304. }
  305. /*
  306. * unmap descriptor/payload DMA buffer, disabling DMA access and
  307. * allowing the main processor to acces the data
  308. */
  309. void vpdma_unmap_desc_buf(struct vpdma_data *vpdma, struct vpdma_buf *buf)
  310. {
  311. struct device *dev = &vpdma->pdev->dev;
  312. if (buf->mapped)
  313. dma_unmap_single(dev, buf->dma_addr, buf->size, DMA_TO_DEVICE);
  314. buf->mapped = false;
  315. }
  316. /*
  317. * create a descriptor list, the user of this list will append configuration,
  318. * control and data descriptors to this list, this list will be submitted to
  319. * VPDMA. VPDMA's list parser will go through each descriptor and perform the
  320. * required DMA operations
  321. */
  322. int vpdma_create_desc_list(struct vpdma_desc_list *list, size_t size, int type)
  323. {
  324. int r;
  325. r = vpdma_alloc_desc_buf(&list->buf, size);
  326. if (r)
  327. return r;
  328. list->next = list->buf.addr;
  329. list->type = type;
  330. return 0;
  331. }
  332. /*
  333. * once a descriptor list is parsed by VPDMA, we reset the list by emptying it,
  334. * to allow new descriptors to be added to the list.
  335. */
  336. void vpdma_reset_desc_list(struct vpdma_desc_list *list)
  337. {
  338. list->next = list->buf.addr;
  339. }
  340. /*
  341. * free the buffer allocated fot the VPDMA descriptor list, this should be
  342. * called when the user doesn't want to use VPDMA any more.
  343. */
  344. void vpdma_free_desc_list(struct vpdma_desc_list *list)
  345. {
  346. vpdma_free_desc_buf(&list->buf);
  347. list->next = NULL;
  348. }
  349. static bool vpdma_list_busy(struct vpdma_data *vpdma, int list_num)
  350. {
  351. return read_reg(vpdma, VPDMA_LIST_STAT_SYNC) & BIT(list_num + 16);
  352. }
  353. /*
  354. * submit a list of DMA descriptors to the VPE VPDMA, do not wait for completion
  355. */
  356. int vpdma_submit_descs(struct vpdma_data *vpdma, struct vpdma_desc_list *list)
  357. {
  358. /* we always use the first list */
  359. int list_num = 0;
  360. int list_size;
  361. if (vpdma_list_busy(vpdma, list_num))
  362. return -EBUSY;
  363. /* 16-byte granularity */
  364. list_size = (list->next - list->buf.addr) >> 4;
  365. write_reg(vpdma, VPDMA_LIST_ADDR, (u32) list->buf.dma_addr);
  366. write_reg(vpdma, VPDMA_LIST_ATTR,
  367. (list_num << VPDMA_LIST_NUM_SHFT) |
  368. (list->type << VPDMA_LIST_TYPE_SHFT) |
  369. list_size);
  370. return 0;
  371. }
  372. /* set or clear the mask for list complete interrupt */
  373. void vpdma_enable_list_complete_irq(struct vpdma_data *vpdma, int list_num,
  374. bool enable)
  375. {
  376. u32 val;
  377. val = read_reg(vpdma, VPDMA_INT_LIST0_MASK);
  378. if (enable)
  379. val |= (1 << (list_num * 2));
  380. else
  381. val &= ~(1 << (list_num * 2));
  382. write_reg(vpdma, VPDMA_INT_LIST0_MASK, val);
  383. }
  384. /* clear previosuly occured list intterupts in the LIST_STAT register */
  385. void vpdma_clear_list_stat(struct vpdma_data *vpdma)
  386. {
  387. write_reg(vpdma, VPDMA_INT_LIST0_STAT,
  388. read_reg(vpdma, VPDMA_INT_LIST0_STAT));
  389. }
  390. /*
  391. * configures the output mode of the line buffer for the given client, the
  392. * line buffer content can either be mirrored(each line repeated twice) or
  393. * passed to the client as is
  394. */
  395. void vpdma_set_line_mode(struct vpdma_data *vpdma, int line_mode,
  396. enum vpdma_channel chan)
  397. {
  398. int client_cstat = chan_info[chan].cstat_offset;
  399. write_field_reg(vpdma, client_cstat, line_mode,
  400. VPDMA_CSTAT_LINE_MODE_MASK, VPDMA_CSTAT_LINE_MODE_SHIFT);
  401. }
  402. /*
  403. * configures the event which should trigger VPDMA transfer for the given
  404. * client
  405. */
  406. void vpdma_set_frame_start_event(struct vpdma_data *vpdma,
  407. enum vpdma_frame_start_event fs_event,
  408. enum vpdma_channel chan)
  409. {
  410. int client_cstat = chan_info[chan].cstat_offset;
  411. write_field_reg(vpdma, client_cstat, fs_event,
  412. VPDMA_CSTAT_FRAME_START_MASK, VPDMA_CSTAT_FRAME_START_SHIFT);
  413. }
  414. static void vpdma_firmware_cb(const struct firmware *f, void *context)
  415. {
  416. struct vpdma_data *vpdma = context;
  417. struct vpdma_buf fw_dma_buf;
  418. int i, r;
  419. dev_dbg(&vpdma->pdev->dev, "firmware callback\n");
  420. if (!f || !f->data) {
  421. dev_err(&vpdma->pdev->dev, "couldn't get firmware\n");
  422. return;
  423. }
  424. /* already initialized */
  425. if (read_field_reg(vpdma, VPDMA_LIST_ATTR, VPDMA_LIST_RDY_MASK,
  426. VPDMA_LIST_RDY_SHFT)) {
  427. vpdma->ready = true;
  428. return;
  429. }
  430. r = vpdma_alloc_desc_buf(&fw_dma_buf, f->size);
  431. if (r) {
  432. dev_err(&vpdma->pdev->dev,
  433. "failed to allocate dma buffer for firmware\n");
  434. goto rel_fw;
  435. }
  436. memcpy(fw_dma_buf.addr, f->data, f->size);
  437. vpdma_map_desc_buf(vpdma, &fw_dma_buf);
  438. write_reg(vpdma, VPDMA_LIST_ADDR, (u32) fw_dma_buf.dma_addr);
  439. for (i = 0; i < 100; i++) { /* max 1 second */
  440. msleep_interruptible(10);
  441. if (read_field_reg(vpdma, VPDMA_LIST_ATTR, VPDMA_LIST_RDY_MASK,
  442. VPDMA_LIST_RDY_SHFT))
  443. break;
  444. }
  445. if (i == 100) {
  446. dev_err(&vpdma->pdev->dev, "firmware upload failed\n");
  447. goto free_buf;
  448. }
  449. vpdma->ready = true;
  450. free_buf:
  451. vpdma_unmap_desc_buf(vpdma, &fw_dma_buf);
  452. vpdma_free_desc_buf(&fw_dma_buf);
  453. rel_fw:
  454. release_firmware(f);
  455. }
  456. static int vpdma_load_firmware(struct vpdma_data *vpdma)
  457. {
  458. int r;
  459. struct device *dev = &vpdma->pdev->dev;
  460. r = request_firmware_nowait(THIS_MODULE, 1,
  461. (const char *) VPDMA_FIRMWARE, dev, GFP_KERNEL, vpdma,
  462. vpdma_firmware_cb);
  463. if (r) {
  464. dev_err(dev, "firmware not available %s\n", VPDMA_FIRMWARE);
  465. return r;
  466. } else {
  467. dev_info(dev, "loading firmware %s\n", VPDMA_FIRMWARE);
  468. }
  469. return 0;
  470. }
  471. struct vpdma_data *vpdma_create(struct platform_device *pdev)
  472. {
  473. struct resource *res;
  474. struct vpdma_data *vpdma;
  475. int r;
  476. dev_dbg(&pdev->dev, "vpdma_create\n");
  477. vpdma = devm_kzalloc(&pdev->dev, sizeof(*vpdma), GFP_KERNEL);
  478. if (!vpdma) {
  479. dev_err(&pdev->dev, "couldn't alloc vpdma_dev\n");
  480. return ERR_PTR(-ENOMEM);
  481. }
  482. vpdma->pdev = pdev;
  483. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpdma");
  484. if (res == NULL) {
  485. dev_err(&pdev->dev, "missing platform resources data\n");
  486. return ERR_PTR(-ENODEV);
  487. }
  488. vpdma->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
  489. if (!vpdma->base) {
  490. dev_err(&pdev->dev, "failed to ioremap\n");
  491. return ERR_PTR(-ENOMEM);
  492. }
  493. r = vpdma_load_firmware(vpdma);
  494. if (r) {
  495. pr_err("failed to load firmware %s\n", VPDMA_FIRMWARE);
  496. return ERR_PTR(r);
  497. }
  498. return vpdma;
  499. }
  500. MODULE_FIRMWARE(VPDMA_FIRMWARE);