s5p_mfc_opr_v5.c 45 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456
  1. /*
  2. * drivers/media/platform/samsung/mfc5/s5p_mfc_opr.c
  3. *
  4. * Samsung MFC (Multi Function Codec - FIMV) driver
  5. * This file contains hw related functions.
  6. *
  7. * Kamil Debski, Copyright (c) 2011 Samsung Electronics
  8. * http://www.samsung.com/
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License version 2 as
  12. * published by the Free Software Foundation.
  13. */
  14. #include "regs-mfc.h"
  15. #include "s5p_mfc_cmd_v5.h"
  16. #include "s5p_mfc_common.h"
  17. #include "s5p_mfc_ctrl.h"
  18. #include "s5p_mfc_debug.h"
  19. #include "s5p_mfc_intr.h"
  20. #include "s5p_mfc_opr_v5.h"
  21. #include "s5p_mfc_pm.h"
  22. #include <asm/cacheflush.h>
  23. #include <linux/delay.h>
  24. #include <linux/dma-mapping.h>
  25. #include <linux/err.h>
  26. #include <linux/firmware.h>
  27. #include <linux/io.h>
  28. #include <linux/jiffies.h>
  29. #include <linux/mm.h>
  30. #include <linux/sched.h>
  31. #define OFFSETA(x) (((x) - dev->bank1) >> MFC_OFFSET_SHIFT)
  32. #define OFFSETB(x) (((x) - dev->bank2) >> MFC_OFFSET_SHIFT)
  33. /* Allocate temporary buffers for decoding */
  34. int s5p_mfc_alloc_dec_temp_buffers(struct s5p_mfc_ctx *ctx)
  35. {
  36. void *desc_virt;
  37. struct s5p_mfc_dev *dev = ctx->dev;
  38. ctx->desc_buf = vb2_dma_contig_memops.alloc(
  39. dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], DESC_BUF_SIZE);
  40. if (IS_ERR_VALUE((int)ctx->desc_buf)) {
  41. ctx->desc_buf = NULL;
  42. mfc_err("Allocating DESC buffer failed\n");
  43. return -ENOMEM;
  44. }
  45. ctx->desc_phys = s5p_mfc_mem_cookie(
  46. dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->desc_buf);
  47. BUG_ON(ctx->desc_phys & ((1 << MFC_BANK1_ALIGN_ORDER) - 1));
  48. desc_virt = vb2_dma_contig_memops.vaddr(ctx->desc_buf);
  49. if (desc_virt == NULL) {
  50. vb2_dma_contig_memops.put(ctx->desc_buf);
  51. ctx->desc_phys = 0;
  52. ctx->desc_buf = NULL;
  53. mfc_err("Remapping DESC buffer failed\n");
  54. return -ENOMEM;
  55. }
  56. memset(desc_virt, 0, DESC_BUF_SIZE);
  57. wmb();
  58. return 0;
  59. }
  60. /* Release temporary buffers for decoding */
  61. void s5p_mfc_release_dec_desc_buffer(struct s5p_mfc_ctx *ctx)
  62. {
  63. if (ctx->desc_phys) {
  64. vb2_dma_contig_memops.put(ctx->desc_buf);
  65. ctx->desc_phys = 0;
  66. ctx->desc_buf = NULL;
  67. }
  68. }
  69. /* Allocate codec buffers */
  70. int s5p_mfc_alloc_codec_buffers(struct s5p_mfc_ctx *ctx)
  71. {
  72. struct s5p_mfc_dev *dev = ctx->dev;
  73. unsigned int enc_ref_y_size = 0;
  74. unsigned int enc_ref_c_size = 0;
  75. unsigned int guard_width, guard_height;
  76. if (ctx->type == MFCINST_DECODER) {
  77. mfc_debug(2, "Luma size:%d Chroma size:%d MV size:%d\n",
  78. ctx->luma_size, ctx->chroma_size, ctx->mv_size);
  79. mfc_debug(2, "Totals bufs: %d\n", ctx->total_dpb_count);
  80. } else if (ctx->type == MFCINST_ENCODER) {
  81. enc_ref_y_size = ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN)
  82. * ALIGN(ctx->img_height, S5P_FIMV_NV12MT_VALIGN);
  83. enc_ref_y_size = ALIGN(enc_ref_y_size, S5P_FIMV_NV12MT_SALIGN);
  84. if (ctx->codec_mode == S5P_FIMV_CODEC_H264_ENC) {
  85. enc_ref_c_size = ALIGN(ctx->img_width,
  86. S5P_FIMV_NV12MT_HALIGN)
  87. * ALIGN(ctx->img_height >> 1,
  88. S5P_FIMV_NV12MT_VALIGN);
  89. enc_ref_c_size = ALIGN(enc_ref_c_size,
  90. S5P_FIMV_NV12MT_SALIGN);
  91. } else {
  92. guard_width = ALIGN(ctx->img_width + 16,
  93. S5P_FIMV_NV12MT_HALIGN);
  94. guard_height = ALIGN((ctx->img_height >> 1) + 4,
  95. S5P_FIMV_NV12MT_VALIGN);
  96. enc_ref_c_size = ALIGN(guard_width * guard_height,
  97. S5P_FIMV_NV12MT_SALIGN);
  98. }
  99. mfc_debug(2, "recon luma size: %d chroma size: %d\n",
  100. enc_ref_y_size, enc_ref_c_size);
  101. } else {
  102. return -EINVAL;
  103. }
  104. /* Codecs have different memory requirements */
  105. switch (ctx->codec_mode) {
  106. case S5P_FIMV_CODEC_H264_DEC:
  107. ctx->bank1_size =
  108. ALIGN(S5P_FIMV_DEC_NB_IP_SIZE +
  109. S5P_FIMV_DEC_VERT_NB_MV_SIZE,
  110. S5P_FIMV_DEC_BUF_ALIGN);
  111. ctx->bank2_size = ctx->total_dpb_count * ctx->mv_size;
  112. break;
  113. case S5P_FIMV_CODEC_MPEG4_DEC:
  114. ctx->bank1_size =
  115. ALIGN(S5P_FIMV_DEC_NB_DCAC_SIZE +
  116. S5P_FIMV_DEC_UPNB_MV_SIZE +
  117. S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE +
  118. S5P_FIMV_DEC_STX_PARSER_SIZE +
  119. S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE,
  120. S5P_FIMV_DEC_BUF_ALIGN);
  121. ctx->bank2_size = 0;
  122. break;
  123. case S5P_FIMV_CODEC_VC1RCV_DEC:
  124. case S5P_FIMV_CODEC_VC1_DEC:
  125. ctx->bank1_size =
  126. ALIGN(S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE +
  127. S5P_FIMV_DEC_UPNB_MV_SIZE +
  128. S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE +
  129. S5P_FIMV_DEC_NB_DCAC_SIZE +
  130. 3 * S5P_FIMV_DEC_VC1_BITPLANE_SIZE,
  131. S5P_FIMV_DEC_BUF_ALIGN);
  132. ctx->bank2_size = 0;
  133. break;
  134. case S5P_FIMV_CODEC_MPEG2_DEC:
  135. ctx->bank1_size = 0;
  136. ctx->bank2_size = 0;
  137. break;
  138. case S5P_FIMV_CODEC_H263_DEC:
  139. ctx->bank1_size =
  140. ALIGN(S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE +
  141. S5P_FIMV_DEC_UPNB_MV_SIZE +
  142. S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE +
  143. S5P_FIMV_DEC_NB_DCAC_SIZE,
  144. S5P_FIMV_DEC_BUF_ALIGN);
  145. ctx->bank2_size = 0;
  146. break;
  147. case S5P_FIMV_CODEC_H264_ENC:
  148. ctx->bank1_size = (enc_ref_y_size * 2) +
  149. S5P_FIMV_ENC_UPMV_SIZE +
  150. S5P_FIMV_ENC_COLFLG_SIZE +
  151. S5P_FIMV_ENC_INTRAMD_SIZE +
  152. S5P_FIMV_ENC_NBORINFO_SIZE;
  153. ctx->bank2_size = (enc_ref_y_size * 2) +
  154. (enc_ref_c_size * 4) +
  155. S5P_FIMV_ENC_INTRAPRED_SIZE;
  156. break;
  157. case S5P_FIMV_CODEC_MPEG4_ENC:
  158. ctx->bank1_size = (enc_ref_y_size * 2) +
  159. S5P_FIMV_ENC_UPMV_SIZE +
  160. S5P_FIMV_ENC_COLFLG_SIZE +
  161. S5P_FIMV_ENC_ACDCCOEF_SIZE;
  162. ctx->bank2_size = (enc_ref_y_size * 2) +
  163. (enc_ref_c_size * 4);
  164. break;
  165. case S5P_FIMV_CODEC_H263_ENC:
  166. ctx->bank1_size = (enc_ref_y_size * 2) +
  167. S5P_FIMV_ENC_UPMV_SIZE +
  168. S5P_FIMV_ENC_ACDCCOEF_SIZE;
  169. ctx->bank2_size = (enc_ref_y_size * 2) +
  170. (enc_ref_c_size * 4);
  171. break;
  172. default:
  173. break;
  174. }
  175. /* Allocate only if memory from bank 1 is necessary */
  176. if (ctx->bank1_size > 0) {
  177. ctx->bank1_buf = vb2_dma_contig_memops.alloc(
  178. dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->bank1_size);
  179. if (IS_ERR(ctx->bank1_buf)) {
  180. ctx->bank1_buf = NULL;
  181. printk(KERN_ERR
  182. "Buf alloc for decoding failed (port A)\n");
  183. return -ENOMEM;
  184. }
  185. ctx->bank1_phys = s5p_mfc_mem_cookie(
  186. dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->bank1_buf);
  187. BUG_ON(ctx->bank1_phys & ((1 << MFC_BANK1_ALIGN_ORDER) - 1));
  188. }
  189. /* Allocate only if memory from bank 2 is necessary */
  190. if (ctx->bank2_size > 0) {
  191. ctx->bank2_buf = vb2_dma_contig_memops.alloc(
  192. dev->alloc_ctx[MFC_BANK2_ALLOC_CTX], ctx->bank2_size);
  193. if (IS_ERR(ctx->bank2_buf)) {
  194. ctx->bank2_buf = NULL;
  195. mfc_err("Buf alloc for decoding failed (port B)\n");
  196. return -ENOMEM;
  197. }
  198. ctx->bank2_phys = s5p_mfc_mem_cookie(
  199. dev->alloc_ctx[MFC_BANK2_ALLOC_CTX], ctx->bank2_buf);
  200. BUG_ON(ctx->bank2_phys & ((1 << MFC_BANK2_ALIGN_ORDER) - 1));
  201. }
  202. return 0;
  203. }
  204. /* Release buffers allocated for codec */
  205. void s5p_mfc_release_codec_buffers(struct s5p_mfc_ctx *ctx)
  206. {
  207. if (ctx->bank1_buf) {
  208. vb2_dma_contig_memops.put(ctx->bank1_buf);
  209. ctx->bank1_buf = NULL;
  210. ctx->bank1_phys = 0;
  211. ctx->bank1_size = 0;
  212. }
  213. if (ctx->bank2_buf) {
  214. vb2_dma_contig_memops.put(ctx->bank2_buf);
  215. ctx->bank2_buf = NULL;
  216. ctx->bank2_phys = 0;
  217. ctx->bank2_size = 0;
  218. }
  219. }
  220. /* Allocate memory for instance data buffer */
  221. int s5p_mfc_alloc_instance_buffer(struct s5p_mfc_ctx *ctx)
  222. {
  223. void *context_virt;
  224. struct s5p_mfc_dev *dev = ctx->dev;
  225. if (ctx->codec_mode == S5P_FIMV_CODEC_H264_DEC ||
  226. ctx->codec_mode == S5P_FIMV_CODEC_H264_ENC)
  227. ctx->ctx_size = MFC_H264_CTX_BUF_SIZE;
  228. else
  229. ctx->ctx_size = MFC_CTX_BUF_SIZE;
  230. ctx->ctx_buf = vb2_dma_contig_memops.alloc(
  231. dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->ctx_size);
  232. if (IS_ERR(ctx->ctx_buf)) {
  233. mfc_err("Allocating context buffer failed\n");
  234. ctx->ctx_phys = 0;
  235. ctx->ctx_buf = NULL;
  236. return -ENOMEM;
  237. }
  238. ctx->ctx_phys = s5p_mfc_mem_cookie(
  239. dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->ctx_buf);
  240. BUG_ON(ctx->ctx_phys & ((1 << MFC_BANK1_ALIGN_ORDER) - 1));
  241. ctx->ctx_ofs = OFFSETA(ctx->ctx_phys);
  242. context_virt = vb2_dma_contig_memops.vaddr(ctx->ctx_buf);
  243. if (context_virt == NULL) {
  244. mfc_err("Remapping instance buffer failed\n");
  245. vb2_dma_contig_memops.put(ctx->ctx_buf);
  246. ctx->ctx_phys = 0;
  247. ctx->ctx_buf = NULL;
  248. return -ENOMEM;
  249. }
  250. /* Zero content of the allocated memory */
  251. memset(context_virt, 0, ctx->ctx_size);
  252. wmb();
  253. /* Initialize shared memory */
  254. ctx->shm_alloc = vb2_dma_contig_memops.alloc(
  255. dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], SHARED_BUF_SIZE);
  256. if (IS_ERR(ctx->shm_alloc)) {
  257. mfc_err("failed to allocate shared memory\n");
  258. return PTR_ERR(ctx->shm_alloc);
  259. }
  260. /* shared memory offset only keeps the offset from base (port a) */
  261. ctx->shm_ofs = s5p_mfc_mem_cookie(
  262. dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->shm_alloc)
  263. - dev->bank1;
  264. BUG_ON(ctx->shm_ofs & ((1 << MFC_BANK1_ALIGN_ORDER) - 1));
  265. ctx->shm = vb2_dma_contig_memops.vaddr(ctx->shm_alloc);
  266. if (!ctx->shm) {
  267. vb2_dma_contig_memops.put(ctx->shm_alloc);
  268. ctx->shm_ofs = 0;
  269. ctx->shm_alloc = NULL;
  270. mfc_err("failed to virt addr of shared memory\n");
  271. return -ENOMEM;
  272. }
  273. memset((void *)ctx->shm, 0, SHARED_BUF_SIZE);
  274. wmb();
  275. return 0;
  276. }
  277. /* Release instance buffer */
  278. void s5p_mfc_release_instance_buffer(struct s5p_mfc_ctx *ctx)
  279. {
  280. if (ctx->ctx_buf) {
  281. vb2_dma_contig_memops.put(ctx->ctx_buf);
  282. ctx->ctx_phys = 0;
  283. ctx->ctx_buf = NULL;
  284. }
  285. if (ctx->shm_alloc) {
  286. vb2_dma_contig_memops.put(ctx->shm_alloc);
  287. ctx->shm_alloc = NULL;
  288. ctx->shm = NULL;
  289. }
  290. }
  291. void s5p_mfc_write_info_v5(struct s5p_mfc_ctx *ctx, unsigned int data,
  292. unsigned int ofs)
  293. {
  294. writel(data, (ctx->shm + ofs));
  295. wmb();
  296. }
  297. unsigned int s5p_mfc_read_info_v5(struct s5p_mfc_ctx *ctx,
  298. unsigned int ofs)
  299. {
  300. rmb();
  301. return readl(ctx->shm + ofs);
  302. }
  303. /* Set registers for decoding temporary buffers */
  304. void s5p_mfc_set_dec_desc_buffer(struct s5p_mfc_ctx *ctx)
  305. {
  306. struct s5p_mfc_dev *dev = ctx->dev;
  307. mfc_write(dev, OFFSETA(ctx->desc_phys), S5P_FIMV_SI_CH0_DESC_ADR);
  308. mfc_write(dev, DESC_BUF_SIZE, S5P_FIMV_SI_CH0_DESC_SIZE);
  309. }
  310. /* Set registers for shared buffer */
  311. static void s5p_mfc_set_shared_buffer(struct s5p_mfc_ctx *ctx)
  312. {
  313. struct s5p_mfc_dev *dev = ctx->dev;
  314. mfc_write(dev, ctx->shm_ofs, S5P_FIMV_SI_CH0_HOST_WR_ADR);
  315. }
  316. /* Set registers for decoding stream buffer */
  317. int s5p_mfc_set_dec_stream_buffer(struct s5p_mfc_ctx *ctx, int buf_addr,
  318. unsigned int start_num_byte, unsigned int buf_size)
  319. {
  320. struct s5p_mfc_dev *dev = ctx->dev;
  321. mfc_write(dev, OFFSETA(buf_addr), S5P_FIMV_SI_CH0_SB_ST_ADR);
  322. mfc_write(dev, ctx->dec_src_buf_size, S5P_FIMV_SI_CH0_CPB_SIZE);
  323. mfc_write(dev, buf_size, S5P_FIMV_SI_CH0_SB_FRM_SIZE);
  324. s5p_mfc_write_info_v5(ctx, start_num_byte, START_BYTE_NUM);
  325. return 0;
  326. }
  327. /* Set decoding frame buffer */
  328. int s5p_mfc_set_dec_frame_buffer(struct s5p_mfc_ctx *ctx)
  329. {
  330. unsigned int frame_size, i;
  331. unsigned int frame_size_ch, frame_size_mv;
  332. struct s5p_mfc_dev *dev = ctx->dev;
  333. unsigned int dpb;
  334. size_t buf_addr1, buf_addr2;
  335. int buf_size1, buf_size2;
  336. buf_addr1 = ctx->bank1_phys;
  337. buf_size1 = ctx->bank1_size;
  338. buf_addr2 = ctx->bank2_phys;
  339. buf_size2 = ctx->bank2_size;
  340. dpb = mfc_read(dev, S5P_FIMV_SI_CH0_DPB_CONF_CTRL) &
  341. ~S5P_FIMV_DPB_COUNT_MASK;
  342. mfc_write(dev, ctx->total_dpb_count | dpb,
  343. S5P_FIMV_SI_CH0_DPB_CONF_CTRL);
  344. s5p_mfc_set_shared_buffer(ctx);
  345. switch (ctx->codec_mode) {
  346. case S5P_FIMV_CODEC_H264_DEC:
  347. mfc_write(dev, OFFSETA(buf_addr1),
  348. S5P_FIMV_H264_VERT_NB_MV_ADR);
  349. buf_addr1 += S5P_FIMV_DEC_VERT_NB_MV_SIZE;
  350. buf_size1 -= S5P_FIMV_DEC_VERT_NB_MV_SIZE;
  351. mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H264_NB_IP_ADR);
  352. buf_addr1 += S5P_FIMV_DEC_NB_IP_SIZE;
  353. buf_size1 -= S5P_FIMV_DEC_NB_IP_SIZE;
  354. break;
  355. case S5P_FIMV_CODEC_MPEG4_DEC:
  356. mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_NB_DCAC_ADR);
  357. buf_addr1 += S5P_FIMV_DEC_NB_DCAC_SIZE;
  358. buf_size1 -= S5P_FIMV_DEC_NB_DCAC_SIZE;
  359. mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_UP_NB_MV_ADR);
  360. buf_addr1 += S5P_FIMV_DEC_UPNB_MV_SIZE;
  361. buf_size1 -= S5P_FIMV_DEC_UPNB_MV_SIZE;
  362. mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_SA_MV_ADR);
  363. buf_addr1 += S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE;
  364. buf_size1 -= S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE;
  365. mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_SP_ADR);
  366. buf_addr1 += S5P_FIMV_DEC_STX_PARSER_SIZE;
  367. buf_size1 -= S5P_FIMV_DEC_STX_PARSER_SIZE;
  368. mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_OT_LINE_ADR);
  369. buf_addr1 += S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE;
  370. buf_size1 -= S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE;
  371. break;
  372. case S5P_FIMV_CODEC_H263_DEC:
  373. mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_OT_LINE_ADR);
  374. buf_addr1 += S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE;
  375. buf_size1 -= S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE;
  376. mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_UP_NB_MV_ADR);
  377. buf_addr1 += S5P_FIMV_DEC_UPNB_MV_SIZE;
  378. buf_size1 -= S5P_FIMV_DEC_UPNB_MV_SIZE;
  379. mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_SA_MV_ADR);
  380. buf_addr1 += S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE;
  381. buf_size1 -= S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE;
  382. mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_NB_DCAC_ADR);
  383. buf_addr1 += S5P_FIMV_DEC_NB_DCAC_SIZE;
  384. buf_size1 -= S5P_FIMV_DEC_NB_DCAC_SIZE;
  385. break;
  386. case S5P_FIMV_CODEC_VC1_DEC:
  387. case S5P_FIMV_CODEC_VC1RCV_DEC:
  388. mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_NB_DCAC_ADR);
  389. buf_addr1 += S5P_FIMV_DEC_NB_DCAC_SIZE;
  390. buf_size1 -= S5P_FIMV_DEC_NB_DCAC_SIZE;
  391. mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_OT_LINE_ADR);
  392. buf_addr1 += S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE;
  393. buf_size1 -= S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE;
  394. mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_UP_NB_MV_ADR);
  395. buf_addr1 += S5P_FIMV_DEC_UPNB_MV_SIZE;
  396. buf_size1 -= S5P_FIMV_DEC_UPNB_MV_SIZE;
  397. mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_SA_MV_ADR);
  398. buf_addr1 += S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE;
  399. buf_size1 -= S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE;
  400. mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_BITPLANE3_ADR);
  401. buf_addr1 += S5P_FIMV_DEC_VC1_BITPLANE_SIZE;
  402. buf_size1 -= S5P_FIMV_DEC_VC1_BITPLANE_SIZE;
  403. mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_BITPLANE2_ADR);
  404. buf_addr1 += S5P_FIMV_DEC_VC1_BITPLANE_SIZE;
  405. buf_size1 -= S5P_FIMV_DEC_VC1_BITPLANE_SIZE;
  406. mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_BITPLANE1_ADR);
  407. buf_addr1 += S5P_FIMV_DEC_VC1_BITPLANE_SIZE;
  408. buf_size1 -= S5P_FIMV_DEC_VC1_BITPLANE_SIZE;
  409. break;
  410. case S5P_FIMV_CODEC_MPEG2_DEC:
  411. break;
  412. default:
  413. mfc_err("Unknown codec for decoding (%x)\n",
  414. ctx->codec_mode);
  415. return -EINVAL;
  416. break;
  417. }
  418. frame_size = ctx->luma_size;
  419. frame_size_ch = ctx->chroma_size;
  420. frame_size_mv = ctx->mv_size;
  421. mfc_debug(2, "Frm size: %d ch: %d mv: %d\n", frame_size, frame_size_ch,
  422. frame_size_mv);
  423. for (i = 0; i < ctx->total_dpb_count; i++) {
  424. /* Bank2 */
  425. mfc_debug(2, "Luma %d: %x\n", i,
  426. ctx->dst_bufs[i].cookie.raw.luma);
  427. mfc_write(dev, OFFSETB(ctx->dst_bufs[i].cookie.raw.luma),
  428. S5P_FIMV_DEC_LUMA_ADR + i * 4);
  429. mfc_debug(2, "\tChroma %d: %x\n", i,
  430. ctx->dst_bufs[i].cookie.raw.chroma);
  431. mfc_write(dev, OFFSETA(ctx->dst_bufs[i].cookie.raw.chroma),
  432. S5P_FIMV_DEC_CHROMA_ADR + i * 4);
  433. if (ctx->codec_mode == S5P_FIMV_CODEC_H264_DEC) {
  434. mfc_debug(2, "\tBuf2: %x, size: %d\n",
  435. buf_addr2, buf_size2);
  436. mfc_write(dev, OFFSETB(buf_addr2),
  437. S5P_FIMV_H264_MV_ADR + i * 4);
  438. buf_addr2 += frame_size_mv;
  439. buf_size2 -= frame_size_mv;
  440. }
  441. }
  442. mfc_debug(2, "Buf1: %u, buf_size1: %d\n", buf_addr1, buf_size1);
  443. mfc_debug(2, "Buf 1/2 size after: %d/%d (frames %d)\n",
  444. buf_size1, buf_size2, ctx->total_dpb_count);
  445. if (buf_size1 < 0 || buf_size2 < 0) {
  446. mfc_debug(2, "Not enough memory has been allocated\n");
  447. return -ENOMEM;
  448. }
  449. s5p_mfc_write_info_v5(ctx, frame_size, ALLOC_LUMA_DPB_SIZE);
  450. s5p_mfc_write_info_v5(ctx, frame_size_ch, ALLOC_CHROMA_DPB_SIZE);
  451. if (ctx->codec_mode == S5P_FIMV_CODEC_H264_DEC)
  452. s5p_mfc_write_info_v5(ctx, frame_size_mv, ALLOC_MV_SIZE);
  453. mfc_write(dev, ((S5P_FIMV_CH_INIT_BUFS & S5P_FIMV_CH_MASK)
  454. << S5P_FIMV_CH_SHIFT) | (ctx->inst_no),
  455. S5P_FIMV_SI_CH0_INST_ID);
  456. return 0;
  457. }
  458. /* Set registers for encoding stream buffer */
  459. int s5p_mfc_set_enc_stream_buffer(struct s5p_mfc_ctx *ctx,
  460. unsigned long addr, unsigned int size)
  461. {
  462. struct s5p_mfc_dev *dev = ctx->dev;
  463. mfc_write(dev, OFFSETA(addr), S5P_FIMV_ENC_SI_CH0_SB_ADR);
  464. mfc_write(dev, size, S5P_FIMV_ENC_SI_CH0_SB_SIZE);
  465. return 0;
  466. }
  467. void s5p_mfc_set_enc_frame_buffer(struct s5p_mfc_ctx *ctx,
  468. unsigned long y_addr, unsigned long c_addr)
  469. {
  470. struct s5p_mfc_dev *dev = ctx->dev;
  471. mfc_write(dev, OFFSETB(y_addr), S5P_FIMV_ENC_SI_CH0_CUR_Y_ADR);
  472. mfc_write(dev, OFFSETB(c_addr), S5P_FIMV_ENC_SI_CH0_CUR_C_ADR);
  473. }
  474. void s5p_mfc_get_enc_frame_buffer(struct s5p_mfc_ctx *ctx,
  475. unsigned long *y_addr, unsigned long *c_addr)
  476. {
  477. struct s5p_mfc_dev *dev = ctx->dev;
  478. *y_addr = dev->bank2 + (mfc_read(dev, S5P_FIMV_ENCODED_Y_ADDR)
  479. << MFC_OFFSET_SHIFT);
  480. *c_addr = dev->bank2 + (mfc_read(dev, S5P_FIMV_ENCODED_C_ADDR)
  481. << MFC_OFFSET_SHIFT);
  482. }
  483. /* Set encoding ref & codec buffer */
  484. int s5p_mfc_set_enc_ref_buffer(struct s5p_mfc_ctx *ctx)
  485. {
  486. struct s5p_mfc_dev *dev = ctx->dev;
  487. size_t buf_addr1, buf_addr2;
  488. size_t buf_size1, buf_size2;
  489. unsigned int enc_ref_y_size, enc_ref_c_size;
  490. unsigned int guard_width, guard_height;
  491. int i;
  492. buf_addr1 = ctx->bank1_phys;
  493. buf_size1 = ctx->bank1_size;
  494. buf_addr2 = ctx->bank2_phys;
  495. buf_size2 = ctx->bank2_size;
  496. enc_ref_y_size = ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN)
  497. * ALIGN(ctx->img_height, S5P_FIMV_NV12MT_VALIGN);
  498. enc_ref_y_size = ALIGN(enc_ref_y_size, S5P_FIMV_NV12MT_SALIGN);
  499. if (ctx->codec_mode == S5P_FIMV_CODEC_H264_ENC) {
  500. enc_ref_c_size = ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN)
  501. * ALIGN((ctx->img_height >> 1), S5P_FIMV_NV12MT_VALIGN);
  502. enc_ref_c_size = ALIGN(enc_ref_c_size, S5P_FIMV_NV12MT_SALIGN);
  503. } else {
  504. guard_width = ALIGN(ctx->img_width + 16,
  505. S5P_FIMV_NV12MT_HALIGN);
  506. guard_height = ALIGN((ctx->img_height >> 1) + 4,
  507. S5P_FIMV_NV12MT_VALIGN);
  508. enc_ref_c_size = ALIGN(guard_width * guard_height,
  509. S5P_FIMV_NV12MT_SALIGN);
  510. }
  511. mfc_debug(2, "buf_size1: %d, buf_size2: %d\n", buf_size1, buf_size2);
  512. switch (ctx->codec_mode) {
  513. case S5P_FIMV_CODEC_H264_ENC:
  514. for (i = 0; i < 2; i++) {
  515. mfc_write(dev, OFFSETA(buf_addr1),
  516. S5P_FIMV_ENC_REF0_LUMA_ADR + (4 * i));
  517. buf_addr1 += enc_ref_y_size;
  518. buf_size1 -= enc_ref_y_size;
  519. mfc_write(dev, OFFSETB(buf_addr2),
  520. S5P_FIMV_ENC_REF2_LUMA_ADR + (4 * i));
  521. buf_addr2 += enc_ref_y_size;
  522. buf_size2 -= enc_ref_y_size;
  523. }
  524. for (i = 0; i < 4; i++) {
  525. mfc_write(dev, OFFSETB(buf_addr2),
  526. S5P_FIMV_ENC_REF0_CHROMA_ADR + (4 * i));
  527. buf_addr2 += enc_ref_c_size;
  528. buf_size2 -= enc_ref_c_size;
  529. }
  530. mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H264_UP_MV_ADR);
  531. buf_addr1 += S5P_FIMV_ENC_UPMV_SIZE;
  532. buf_size1 -= S5P_FIMV_ENC_UPMV_SIZE;
  533. mfc_write(dev, OFFSETA(buf_addr1),
  534. S5P_FIMV_H264_COZERO_FLAG_ADR);
  535. buf_addr1 += S5P_FIMV_ENC_COLFLG_SIZE;
  536. buf_size1 -= S5P_FIMV_ENC_COLFLG_SIZE;
  537. mfc_write(dev, OFFSETA(buf_addr1),
  538. S5P_FIMV_H264_UP_INTRA_MD_ADR);
  539. buf_addr1 += S5P_FIMV_ENC_INTRAMD_SIZE;
  540. buf_size1 -= S5P_FIMV_ENC_INTRAMD_SIZE;
  541. mfc_write(dev, OFFSETB(buf_addr2),
  542. S5P_FIMV_H264_UP_INTRA_PRED_ADR);
  543. buf_addr2 += S5P_FIMV_ENC_INTRAPRED_SIZE;
  544. buf_size2 -= S5P_FIMV_ENC_INTRAPRED_SIZE;
  545. mfc_write(dev, OFFSETA(buf_addr1),
  546. S5P_FIMV_H264_NBOR_INFO_ADR);
  547. buf_addr1 += S5P_FIMV_ENC_NBORINFO_SIZE;
  548. buf_size1 -= S5P_FIMV_ENC_NBORINFO_SIZE;
  549. mfc_debug(2, "buf_size1: %d, buf_size2: %d\n",
  550. buf_size1, buf_size2);
  551. break;
  552. case S5P_FIMV_CODEC_MPEG4_ENC:
  553. for (i = 0; i < 2; i++) {
  554. mfc_write(dev, OFFSETA(buf_addr1),
  555. S5P_FIMV_ENC_REF0_LUMA_ADR + (4 * i));
  556. buf_addr1 += enc_ref_y_size;
  557. buf_size1 -= enc_ref_y_size;
  558. mfc_write(dev, OFFSETB(buf_addr2),
  559. S5P_FIMV_ENC_REF2_LUMA_ADR + (4 * i));
  560. buf_addr2 += enc_ref_y_size;
  561. buf_size2 -= enc_ref_y_size;
  562. }
  563. for (i = 0; i < 4; i++) {
  564. mfc_write(dev, OFFSETB(buf_addr2),
  565. S5P_FIMV_ENC_REF0_CHROMA_ADR + (4 * i));
  566. buf_addr2 += enc_ref_c_size;
  567. buf_size2 -= enc_ref_c_size;
  568. }
  569. mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_UP_MV_ADR);
  570. buf_addr1 += S5P_FIMV_ENC_UPMV_SIZE;
  571. buf_size1 -= S5P_FIMV_ENC_UPMV_SIZE;
  572. mfc_write(dev, OFFSETA(buf_addr1),
  573. S5P_FIMV_MPEG4_COZERO_FLAG_ADR);
  574. buf_addr1 += S5P_FIMV_ENC_COLFLG_SIZE;
  575. buf_size1 -= S5P_FIMV_ENC_COLFLG_SIZE;
  576. mfc_write(dev, OFFSETA(buf_addr1),
  577. S5P_FIMV_MPEG4_ACDC_COEF_ADR);
  578. buf_addr1 += S5P_FIMV_ENC_ACDCCOEF_SIZE;
  579. buf_size1 -= S5P_FIMV_ENC_ACDCCOEF_SIZE;
  580. mfc_debug(2, "buf_size1: %d, buf_size2: %d\n",
  581. buf_size1, buf_size2);
  582. break;
  583. case S5P_FIMV_CODEC_H263_ENC:
  584. for (i = 0; i < 2; i++) {
  585. mfc_write(dev, OFFSETA(buf_addr1),
  586. S5P_FIMV_ENC_REF0_LUMA_ADR + (4 * i));
  587. buf_addr1 += enc_ref_y_size;
  588. buf_size1 -= enc_ref_y_size;
  589. mfc_write(dev, OFFSETB(buf_addr2),
  590. S5P_FIMV_ENC_REF2_LUMA_ADR + (4 * i));
  591. buf_addr2 += enc_ref_y_size;
  592. buf_size2 -= enc_ref_y_size;
  593. }
  594. for (i = 0; i < 4; i++) {
  595. mfc_write(dev, OFFSETB(buf_addr2),
  596. S5P_FIMV_ENC_REF0_CHROMA_ADR + (4 * i));
  597. buf_addr2 += enc_ref_c_size;
  598. buf_size2 -= enc_ref_c_size;
  599. }
  600. mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_UP_MV_ADR);
  601. buf_addr1 += S5P_FIMV_ENC_UPMV_SIZE;
  602. buf_size1 -= S5P_FIMV_ENC_UPMV_SIZE;
  603. mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_ACDC_COEF_ADR);
  604. buf_addr1 += S5P_FIMV_ENC_ACDCCOEF_SIZE;
  605. buf_size1 -= S5P_FIMV_ENC_ACDCCOEF_SIZE;
  606. mfc_debug(2, "buf_size1: %d, buf_size2: %d\n",
  607. buf_size1, buf_size2);
  608. break;
  609. default:
  610. mfc_err("Unknown codec set for encoding: %d\n",
  611. ctx->codec_mode);
  612. return -EINVAL;
  613. }
  614. return 0;
  615. }
  616. static int s5p_mfc_set_enc_params(struct s5p_mfc_ctx *ctx)
  617. {
  618. struct s5p_mfc_dev *dev = ctx->dev;
  619. struct s5p_mfc_enc_params *p = &ctx->enc_params;
  620. unsigned int reg;
  621. unsigned int shm;
  622. /* width */
  623. mfc_write(dev, ctx->img_width, S5P_FIMV_ENC_HSIZE_PX);
  624. /* height */
  625. mfc_write(dev, ctx->img_height, S5P_FIMV_ENC_VSIZE_PX);
  626. /* pictype : enable, IDR period */
  627. reg = mfc_read(dev, S5P_FIMV_ENC_PIC_TYPE_CTRL);
  628. reg |= (1 << 18);
  629. reg &= ~(0xFFFF);
  630. reg |= p->gop_size;
  631. mfc_write(dev, reg, S5P_FIMV_ENC_PIC_TYPE_CTRL);
  632. mfc_write(dev, 0, S5P_FIMV_ENC_B_RECON_WRITE_ON);
  633. /* multi-slice control */
  634. /* multi-slice MB number or bit size */
  635. mfc_write(dev, p->slice_mode, S5P_FIMV_ENC_MSLICE_CTRL);
  636. if (p->slice_mode == V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_MB) {
  637. mfc_write(dev, p->slice_mb, S5P_FIMV_ENC_MSLICE_MB);
  638. } else if (p->slice_mode == V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES) {
  639. mfc_write(dev, p->slice_bit, S5P_FIMV_ENC_MSLICE_BIT);
  640. } else {
  641. mfc_write(dev, 0, S5P_FIMV_ENC_MSLICE_MB);
  642. mfc_write(dev, 0, S5P_FIMV_ENC_MSLICE_BIT);
  643. }
  644. /* cyclic intra refresh */
  645. mfc_write(dev, p->intra_refresh_mb, S5P_FIMV_ENC_CIR_CTRL);
  646. /* memory structure cur. frame */
  647. if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12M)
  648. mfc_write(dev, 0, S5P_FIMV_ENC_MAP_FOR_CUR);
  649. else if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12MT)
  650. mfc_write(dev, 3, S5P_FIMV_ENC_MAP_FOR_CUR);
  651. /* padding control & value */
  652. reg = mfc_read(dev, S5P_FIMV_ENC_PADDING_CTRL);
  653. if (p->pad) {
  654. /** enable */
  655. reg |= (1 << 31);
  656. /** cr value */
  657. reg &= ~(0xFF << 16);
  658. reg |= (p->pad_cr << 16);
  659. /** cb value */
  660. reg &= ~(0xFF << 8);
  661. reg |= (p->pad_cb << 8);
  662. /** y value */
  663. reg &= ~(0xFF);
  664. reg |= (p->pad_luma);
  665. } else {
  666. /** disable & all value clear */
  667. reg = 0;
  668. }
  669. mfc_write(dev, reg, S5P_FIMV_ENC_PADDING_CTRL);
  670. /* rate control config. */
  671. reg = mfc_read(dev, S5P_FIMV_ENC_RC_CONFIG);
  672. /** frame-level rate control */
  673. reg &= ~(0x1 << 9);
  674. reg |= (p->rc_frame << 9);
  675. mfc_write(dev, reg, S5P_FIMV_ENC_RC_CONFIG);
  676. /* bit rate */
  677. if (p->rc_frame)
  678. mfc_write(dev, p->rc_bitrate,
  679. S5P_FIMV_ENC_RC_BIT_RATE);
  680. else
  681. mfc_write(dev, 0, S5P_FIMV_ENC_RC_BIT_RATE);
  682. /* reaction coefficient */
  683. if (p->rc_frame)
  684. mfc_write(dev, p->rc_reaction_coeff, S5P_FIMV_ENC_RC_RPARA);
  685. shm = s5p_mfc_read_info_v5(ctx, EXT_ENC_CONTROL);
  686. /* seq header ctrl */
  687. shm &= ~(0x1 << 3);
  688. shm |= (p->seq_hdr_mode << 3);
  689. /* frame skip mode */
  690. shm &= ~(0x3 << 1);
  691. shm |= (p->frame_skip_mode << 1);
  692. s5p_mfc_write_info_v5(ctx, shm, EXT_ENC_CONTROL);
  693. /* fixed target bit */
  694. s5p_mfc_write_info_v5(ctx, p->fixed_target_bit, RC_CONTROL_CONFIG);
  695. return 0;
  696. }
  697. static int s5p_mfc_set_enc_params_h264(struct s5p_mfc_ctx *ctx)
  698. {
  699. struct s5p_mfc_dev *dev = ctx->dev;
  700. struct s5p_mfc_enc_params *p = &ctx->enc_params;
  701. struct s5p_mfc_h264_enc_params *p_264 = &p->codec.h264;
  702. unsigned int reg;
  703. unsigned int shm;
  704. s5p_mfc_set_enc_params(ctx);
  705. /* pictype : number of B */
  706. reg = mfc_read(dev, S5P_FIMV_ENC_PIC_TYPE_CTRL);
  707. /* num_b_frame - 0 ~ 2 */
  708. reg &= ~(0x3 << 16);
  709. reg |= (p->num_b_frame << 16);
  710. mfc_write(dev, reg, S5P_FIMV_ENC_PIC_TYPE_CTRL);
  711. /* profile & level */
  712. reg = mfc_read(dev, S5P_FIMV_ENC_PROFILE);
  713. /* level */
  714. reg &= ~(0xFF << 8);
  715. reg |= (p_264->level << 8);
  716. /* profile - 0 ~ 2 */
  717. reg &= ~(0x3F);
  718. reg |= p_264->profile;
  719. mfc_write(dev, reg, S5P_FIMV_ENC_PROFILE);
  720. /* interlace */
  721. mfc_write(dev, p->interlace, S5P_FIMV_ENC_PIC_STRUCT);
  722. /* height */
  723. if (p->interlace)
  724. mfc_write(dev, ctx->img_height >> 1, S5P_FIMV_ENC_VSIZE_PX);
  725. /* loopfilter ctrl */
  726. mfc_write(dev, p_264->loop_filter_mode, S5P_FIMV_ENC_LF_CTRL);
  727. /* loopfilter alpha offset */
  728. if (p_264->loop_filter_alpha < 0) {
  729. reg = 0x10;
  730. reg |= (0xFF - p_264->loop_filter_alpha) + 1;
  731. } else {
  732. reg = 0x00;
  733. reg |= (p_264->loop_filter_alpha & 0xF);
  734. }
  735. mfc_write(dev, reg, S5P_FIMV_ENC_ALPHA_OFF);
  736. /* loopfilter beta offset */
  737. if (p_264->loop_filter_beta < 0) {
  738. reg = 0x10;
  739. reg |= (0xFF - p_264->loop_filter_beta) + 1;
  740. } else {
  741. reg = 0x00;
  742. reg |= (p_264->loop_filter_beta & 0xF);
  743. }
  744. mfc_write(dev, reg, S5P_FIMV_ENC_BETA_OFF);
  745. /* entropy coding mode */
  746. if (p_264->entropy_mode == V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC)
  747. mfc_write(dev, 1, S5P_FIMV_ENC_H264_ENTROPY_MODE);
  748. else
  749. mfc_write(dev, 0, S5P_FIMV_ENC_H264_ENTROPY_MODE);
  750. /* number of ref. picture */
  751. reg = mfc_read(dev, S5P_FIMV_ENC_H264_NUM_OF_REF);
  752. /* num of ref. pictures of P */
  753. reg &= ~(0x3 << 5);
  754. reg |= (p_264->num_ref_pic_4p << 5);
  755. /* max number of ref. pictures */
  756. reg &= ~(0x1F);
  757. reg |= p_264->max_ref_pic;
  758. mfc_write(dev, reg, S5P_FIMV_ENC_H264_NUM_OF_REF);
  759. /* 8x8 transform enable */
  760. mfc_write(dev, p_264->_8x8_transform, S5P_FIMV_ENC_H264_TRANS_FLAG);
  761. /* rate control config. */
  762. reg = mfc_read(dev, S5P_FIMV_ENC_RC_CONFIG);
  763. /* macroblock level rate control */
  764. reg &= ~(0x1 << 8);
  765. reg |= (p_264->rc_mb << 8);
  766. /* frame QP */
  767. reg &= ~(0x3F);
  768. reg |= p_264->rc_frame_qp;
  769. mfc_write(dev, reg, S5P_FIMV_ENC_RC_CONFIG);
  770. /* frame rate */
  771. if (p->rc_frame && p->rc_framerate_denom)
  772. mfc_write(dev, p->rc_framerate_num * 1000
  773. / p->rc_framerate_denom, S5P_FIMV_ENC_RC_FRAME_RATE);
  774. else
  775. mfc_write(dev, 0, S5P_FIMV_ENC_RC_FRAME_RATE);
  776. /* max & min value of QP */
  777. reg = mfc_read(dev, S5P_FIMV_ENC_RC_QBOUND);
  778. /* max QP */
  779. reg &= ~(0x3F << 8);
  780. reg |= (p_264->rc_max_qp << 8);
  781. /* min QP */
  782. reg &= ~(0x3F);
  783. reg |= p_264->rc_min_qp;
  784. mfc_write(dev, reg, S5P_FIMV_ENC_RC_QBOUND);
  785. /* macroblock adaptive scaling features */
  786. if (p_264->rc_mb) {
  787. reg = mfc_read(dev, S5P_FIMV_ENC_RC_MB_CTRL);
  788. /* dark region */
  789. reg &= ~(0x1 << 3);
  790. reg |= (p_264->rc_mb_dark << 3);
  791. /* smooth region */
  792. reg &= ~(0x1 << 2);
  793. reg |= (p_264->rc_mb_smooth << 2);
  794. /* static region */
  795. reg &= ~(0x1 << 1);
  796. reg |= (p_264->rc_mb_static << 1);
  797. /* high activity region */
  798. reg &= ~(0x1);
  799. reg |= p_264->rc_mb_activity;
  800. mfc_write(dev, reg, S5P_FIMV_ENC_RC_MB_CTRL);
  801. }
  802. if (!p->rc_frame &&
  803. !p_264->rc_mb) {
  804. shm = s5p_mfc_read_info_v5(ctx, P_B_FRAME_QP);
  805. shm &= ~(0xFFF);
  806. shm |= ((p_264->rc_b_frame_qp & 0x3F) << 6);
  807. shm |= (p_264->rc_p_frame_qp & 0x3F);
  808. s5p_mfc_write_info_v5(ctx, shm, P_B_FRAME_QP);
  809. }
  810. /* extended encoder ctrl */
  811. shm = s5p_mfc_read_info_v5(ctx, EXT_ENC_CONTROL);
  812. /* AR VUI control */
  813. shm &= ~(0x1 << 15);
  814. shm |= (p_264->vui_sar << 1);
  815. s5p_mfc_write_info_v5(ctx, shm, EXT_ENC_CONTROL);
  816. if (p_264->vui_sar) {
  817. /* aspect ration IDC */
  818. shm = s5p_mfc_read_info_v5(ctx, SAMPLE_ASPECT_RATIO_IDC);
  819. shm &= ~(0xFF);
  820. shm |= p_264->vui_sar_idc;
  821. s5p_mfc_write_info_v5(ctx, shm, SAMPLE_ASPECT_RATIO_IDC);
  822. if (p_264->vui_sar_idc == 0xFF) {
  823. /* sample AR info */
  824. shm = s5p_mfc_read_info_v5(ctx, EXTENDED_SAR);
  825. shm &= ~(0xFFFFFFFF);
  826. shm |= p_264->vui_ext_sar_width << 16;
  827. shm |= p_264->vui_ext_sar_height;
  828. s5p_mfc_write_info_v5(ctx, shm, EXTENDED_SAR);
  829. }
  830. }
  831. /* intra picture period for H.264 */
  832. shm = s5p_mfc_read_info_v5(ctx, H264_I_PERIOD);
  833. /* control */
  834. shm &= ~(0x1 << 16);
  835. shm |= (p_264->open_gop << 16);
  836. /* value */
  837. if (p_264->open_gop) {
  838. shm &= ~(0xFFFF);
  839. shm |= p_264->open_gop_size;
  840. }
  841. s5p_mfc_write_info_v5(ctx, shm, H264_I_PERIOD);
  842. /* extended encoder ctrl */
  843. shm = s5p_mfc_read_info_v5(ctx, EXT_ENC_CONTROL);
  844. /* vbv buffer size */
  845. if (p->frame_skip_mode ==
  846. V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT) {
  847. shm &= ~(0xFFFF << 16);
  848. shm |= (p_264->cpb_size << 16);
  849. }
  850. s5p_mfc_write_info_v5(ctx, shm, EXT_ENC_CONTROL);
  851. return 0;
  852. }
  853. static int s5p_mfc_set_enc_params_mpeg4(struct s5p_mfc_ctx *ctx)
  854. {
  855. struct s5p_mfc_dev *dev = ctx->dev;
  856. struct s5p_mfc_enc_params *p = &ctx->enc_params;
  857. struct s5p_mfc_mpeg4_enc_params *p_mpeg4 = &p->codec.mpeg4;
  858. unsigned int reg;
  859. unsigned int shm;
  860. unsigned int framerate;
  861. s5p_mfc_set_enc_params(ctx);
  862. /* pictype : number of B */
  863. reg = mfc_read(dev, S5P_FIMV_ENC_PIC_TYPE_CTRL);
  864. /* num_b_frame - 0 ~ 2 */
  865. reg &= ~(0x3 << 16);
  866. reg |= (p->num_b_frame << 16);
  867. mfc_write(dev, reg, S5P_FIMV_ENC_PIC_TYPE_CTRL);
  868. /* profile & level */
  869. reg = mfc_read(dev, S5P_FIMV_ENC_PROFILE);
  870. /* level */
  871. reg &= ~(0xFF << 8);
  872. reg |= (p_mpeg4->level << 8);
  873. /* profile - 0 ~ 2 */
  874. reg &= ~(0x3F);
  875. reg |= p_mpeg4->profile;
  876. mfc_write(dev, reg, S5P_FIMV_ENC_PROFILE);
  877. /* quarter_pixel */
  878. mfc_write(dev, p_mpeg4->quarter_pixel, S5P_FIMV_ENC_MPEG4_QUART_PXL);
  879. /* qp */
  880. if (!p->rc_frame) {
  881. shm = s5p_mfc_read_info_v5(ctx, P_B_FRAME_QP);
  882. shm &= ~(0xFFF);
  883. shm |= ((p_mpeg4->rc_b_frame_qp & 0x3F) << 6);
  884. shm |= (p_mpeg4->rc_p_frame_qp & 0x3F);
  885. s5p_mfc_write_info_v5(ctx, shm, P_B_FRAME_QP);
  886. }
  887. /* frame rate */
  888. if (p->rc_frame) {
  889. if (p->rc_framerate_denom > 0) {
  890. framerate = p->rc_framerate_num * 1000 /
  891. p->rc_framerate_denom;
  892. mfc_write(dev, framerate,
  893. S5P_FIMV_ENC_RC_FRAME_RATE);
  894. shm = s5p_mfc_read_info_v5(ctx, RC_VOP_TIMING);
  895. shm &= ~(0xFFFFFFFF);
  896. shm |= (1 << 31);
  897. shm |= ((p->rc_framerate_num & 0x7FFF) << 16);
  898. shm |= (p->rc_framerate_denom & 0xFFFF);
  899. s5p_mfc_write_info_v5(ctx, shm, RC_VOP_TIMING);
  900. }
  901. } else {
  902. mfc_write(dev, 0, S5P_FIMV_ENC_RC_FRAME_RATE);
  903. }
  904. /* rate control config. */
  905. reg = mfc_read(dev, S5P_FIMV_ENC_RC_CONFIG);
  906. /* frame QP */
  907. reg &= ~(0x3F);
  908. reg |= p_mpeg4->rc_frame_qp;
  909. mfc_write(dev, reg, S5P_FIMV_ENC_RC_CONFIG);
  910. /* max & min value of QP */
  911. reg = mfc_read(dev, S5P_FIMV_ENC_RC_QBOUND);
  912. /* max QP */
  913. reg &= ~(0x3F << 8);
  914. reg |= (p_mpeg4->rc_max_qp << 8);
  915. /* min QP */
  916. reg &= ~(0x3F);
  917. reg |= p_mpeg4->rc_min_qp;
  918. mfc_write(dev, reg, S5P_FIMV_ENC_RC_QBOUND);
  919. /* extended encoder ctrl */
  920. shm = s5p_mfc_read_info_v5(ctx, EXT_ENC_CONTROL);
  921. /* vbv buffer size */
  922. if (p->frame_skip_mode ==
  923. V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT) {
  924. shm &= ~(0xFFFF << 16);
  925. shm |= (p->vbv_size << 16);
  926. }
  927. s5p_mfc_write_info_v5(ctx, shm, EXT_ENC_CONTROL);
  928. return 0;
  929. }
  930. static int s5p_mfc_set_enc_params_h263(struct s5p_mfc_ctx *ctx)
  931. {
  932. struct s5p_mfc_dev *dev = ctx->dev;
  933. struct s5p_mfc_enc_params *p = &ctx->enc_params;
  934. struct s5p_mfc_mpeg4_enc_params *p_h263 = &p->codec.mpeg4;
  935. unsigned int reg;
  936. unsigned int shm;
  937. s5p_mfc_set_enc_params(ctx);
  938. /* qp */
  939. if (!p->rc_frame) {
  940. shm = s5p_mfc_read_info_v5(ctx, P_B_FRAME_QP);
  941. shm &= ~(0xFFF);
  942. shm |= (p_h263->rc_p_frame_qp & 0x3F);
  943. s5p_mfc_write_info_v5(ctx, shm, P_B_FRAME_QP);
  944. }
  945. /* frame rate */
  946. if (p->rc_frame && p->rc_framerate_denom)
  947. mfc_write(dev, p->rc_framerate_num * 1000
  948. / p->rc_framerate_denom, S5P_FIMV_ENC_RC_FRAME_RATE);
  949. else
  950. mfc_write(dev, 0, S5P_FIMV_ENC_RC_FRAME_RATE);
  951. /* rate control config. */
  952. reg = mfc_read(dev, S5P_FIMV_ENC_RC_CONFIG);
  953. /* frame QP */
  954. reg &= ~(0x3F);
  955. reg |= p_h263->rc_frame_qp;
  956. mfc_write(dev, reg, S5P_FIMV_ENC_RC_CONFIG);
  957. /* max & min value of QP */
  958. reg = mfc_read(dev, S5P_FIMV_ENC_RC_QBOUND);
  959. /* max QP */
  960. reg &= ~(0x3F << 8);
  961. reg |= (p_h263->rc_max_qp << 8);
  962. /* min QP */
  963. reg &= ~(0x3F);
  964. reg |= p_h263->rc_min_qp;
  965. mfc_write(dev, reg, S5P_FIMV_ENC_RC_QBOUND);
  966. /* extended encoder ctrl */
  967. shm = s5p_mfc_read_info_v5(ctx, EXT_ENC_CONTROL);
  968. /* vbv buffer size */
  969. if (p->frame_skip_mode ==
  970. V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT) {
  971. shm &= ~(0xFFFF << 16);
  972. shm |= (p->vbv_size << 16);
  973. }
  974. s5p_mfc_write_info_v5(ctx, shm, EXT_ENC_CONTROL);
  975. return 0;
  976. }
  977. /* Initialize decoding */
  978. int s5p_mfc_init_decode(struct s5p_mfc_ctx *ctx)
  979. {
  980. struct s5p_mfc_dev *dev = ctx->dev;
  981. s5p_mfc_set_shared_buffer(ctx);
  982. /* Setup loop filter, for decoding this is only valid for MPEG4 */
  983. if (ctx->codec_mode == S5P_FIMV_CODEC_MPEG4_DEC)
  984. mfc_write(dev, ctx->loop_filter_mpeg4, S5P_FIMV_ENC_LF_CTRL);
  985. else
  986. mfc_write(dev, 0, S5P_FIMV_ENC_LF_CTRL);
  987. mfc_write(dev, ((ctx->slice_interface & S5P_FIMV_SLICE_INT_MASK) <<
  988. S5P_FIMV_SLICE_INT_SHIFT) | (ctx->display_delay_enable <<
  989. S5P_FIMV_DDELAY_ENA_SHIFT) | ((ctx->display_delay &
  990. S5P_FIMV_DDELAY_VAL_MASK) << S5P_FIMV_DDELAY_VAL_SHIFT),
  991. S5P_FIMV_SI_CH0_DPB_CONF_CTRL);
  992. mfc_write(dev,
  993. ((S5P_FIMV_CH_SEQ_HEADER & S5P_FIMV_CH_MASK) << S5P_FIMV_CH_SHIFT)
  994. | (ctx->inst_no), S5P_FIMV_SI_CH0_INST_ID);
  995. return 0;
  996. }
  997. static void s5p_mfc_set_flush(struct s5p_mfc_ctx *ctx, int flush)
  998. {
  999. struct s5p_mfc_dev *dev = ctx->dev;
  1000. unsigned int dpb;
  1001. if (flush)
  1002. dpb = mfc_read(dev, S5P_FIMV_SI_CH0_DPB_CONF_CTRL) | (
  1003. S5P_FIMV_DPB_FLUSH_MASK << S5P_FIMV_DPB_FLUSH_SHIFT);
  1004. else
  1005. dpb = mfc_read(dev, S5P_FIMV_SI_CH0_DPB_CONF_CTRL) &
  1006. ~(S5P_FIMV_DPB_FLUSH_MASK << S5P_FIMV_DPB_FLUSH_SHIFT);
  1007. mfc_write(dev, dpb, S5P_FIMV_SI_CH0_DPB_CONF_CTRL);
  1008. }
  1009. /* Decode a single frame */
  1010. int s5p_mfc_decode_one_frame(struct s5p_mfc_ctx *ctx,
  1011. enum s5p_mfc_decode_arg last_frame)
  1012. {
  1013. struct s5p_mfc_dev *dev = ctx->dev;
  1014. mfc_write(dev, ctx->dec_dst_flag, S5P_FIMV_SI_CH0_RELEASE_BUF);
  1015. s5p_mfc_set_shared_buffer(ctx);
  1016. s5p_mfc_set_flush(ctx, ctx->dpb_flush_flag);
  1017. /* Issue different commands to instance basing on whether it
  1018. * is the last frame or not. */
  1019. switch (last_frame) {
  1020. case MFC_DEC_FRAME:
  1021. mfc_write(dev, ((S5P_FIMV_CH_FRAME_START & S5P_FIMV_CH_MASK) <<
  1022. S5P_FIMV_CH_SHIFT) | (ctx->inst_no), S5P_FIMV_SI_CH0_INST_ID);
  1023. break;
  1024. case MFC_DEC_LAST_FRAME:
  1025. mfc_write(dev, ((S5P_FIMV_CH_LAST_FRAME & S5P_FIMV_CH_MASK) <<
  1026. S5P_FIMV_CH_SHIFT) | (ctx->inst_no), S5P_FIMV_SI_CH0_INST_ID);
  1027. break;
  1028. case MFC_DEC_RES_CHANGE:
  1029. mfc_write(dev, ((S5P_FIMV_CH_FRAME_START_REALLOC &
  1030. S5P_FIMV_CH_MASK) << S5P_FIMV_CH_SHIFT) | (ctx->inst_no),
  1031. S5P_FIMV_SI_CH0_INST_ID);
  1032. break;
  1033. }
  1034. mfc_debug(2, "Decoding a usual frame\n");
  1035. return 0;
  1036. }
  1037. int s5p_mfc_init_encode(struct s5p_mfc_ctx *ctx)
  1038. {
  1039. struct s5p_mfc_dev *dev = ctx->dev;
  1040. if (ctx->codec_mode == S5P_FIMV_CODEC_H264_ENC)
  1041. s5p_mfc_set_enc_params_h264(ctx);
  1042. else if (ctx->codec_mode == S5P_FIMV_CODEC_MPEG4_ENC)
  1043. s5p_mfc_set_enc_params_mpeg4(ctx);
  1044. else if (ctx->codec_mode == S5P_FIMV_CODEC_H263_ENC)
  1045. s5p_mfc_set_enc_params_h263(ctx);
  1046. else {
  1047. mfc_err("Unknown codec for encoding (%x)\n",
  1048. ctx->codec_mode);
  1049. return -EINVAL;
  1050. }
  1051. s5p_mfc_set_shared_buffer(ctx);
  1052. mfc_write(dev, ((S5P_FIMV_CH_SEQ_HEADER << 16) & 0x70000) |
  1053. (ctx->inst_no), S5P_FIMV_SI_CH0_INST_ID);
  1054. return 0;
  1055. }
  1056. /* Encode a single frame */
  1057. int s5p_mfc_encode_one_frame(struct s5p_mfc_ctx *ctx)
  1058. {
  1059. struct s5p_mfc_dev *dev = ctx->dev;
  1060. int cmd;
  1061. /* memory structure cur. frame */
  1062. if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12M)
  1063. mfc_write(dev, 0, S5P_FIMV_ENC_MAP_FOR_CUR);
  1064. else if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12MT)
  1065. mfc_write(dev, 3, S5P_FIMV_ENC_MAP_FOR_CUR);
  1066. s5p_mfc_set_shared_buffer(ctx);
  1067. if (ctx->state == MFCINST_FINISHING)
  1068. cmd = S5P_FIMV_CH_LAST_FRAME;
  1069. else
  1070. cmd = S5P_FIMV_CH_FRAME_START;
  1071. mfc_write(dev, ((cmd & S5P_FIMV_CH_MASK) << S5P_FIMV_CH_SHIFT)
  1072. | (ctx->inst_no), S5P_FIMV_SI_CH0_INST_ID);
  1073. return 0;
  1074. }
  1075. static int s5p_mfc_get_new_ctx(struct s5p_mfc_dev *dev)
  1076. {
  1077. unsigned long flags;
  1078. int new_ctx;
  1079. int cnt;
  1080. spin_lock_irqsave(&dev->condlock, flags);
  1081. new_ctx = (dev->curr_ctx + 1) % MFC_NUM_CONTEXTS;
  1082. cnt = 0;
  1083. while (!test_bit(new_ctx, &dev->ctx_work_bits)) {
  1084. new_ctx = (new_ctx + 1) % MFC_NUM_CONTEXTS;
  1085. if (++cnt > MFC_NUM_CONTEXTS) {
  1086. /* No contexts to run */
  1087. spin_unlock_irqrestore(&dev->condlock, flags);
  1088. return -EAGAIN;
  1089. }
  1090. }
  1091. spin_unlock_irqrestore(&dev->condlock, flags);
  1092. return new_ctx;
  1093. }
  1094. static void s5p_mfc_run_res_change(struct s5p_mfc_ctx *ctx)
  1095. {
  1096. struct s5p_mfc_dev *dev = ctx->dev;
  1097. s5p_mfc_set_dec_stream_buffer(ctx, 0, 0, 0);
  1098. dev->curr_ctx = ctx->num;
  1099. s5p_mfc_clean_ctx_int_flags(ctx);
  1100. s5p_mfc_decode_one_frame(ctx, MFC_DEC_RES_CHANGE);
  1101. }
  1102. static int s5p_mfc_run_dec_frame(struct s5p_mfc_ctx *ctx, int last_frame)
  1103. {
  1104. struct s5p_mfc_dev *dev = ctx->dev;
  1105. struct s5p_mfc_buf *temp_vb;
  1106. unsigned long flags;
  1107. unsigned int index;
  1108. spin_lock_irqsave(&dev->irqlock, flags);
  1109. /* Frames are being decoded */
  1110. if (list_empty(&ctx->src_queue)) {
  1111. mfc_debug(2, "No src buffers\n");
  1112. spin_unlock_irqrestore(&dev->irqlock, flags);
  1113. return -EAGAIN;
  1114. }
  1115. /* Get the next source buffer */
  1116. temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
  1117. temp_vb->flags |= MFC_BUF_FLAG_USED;
  1118. s5p_mfc_set_dec_stream_buffer(ctx,
  1119. vb2_dma_contig_plane_dma_addr(temp_vb->b, 0), ctx->consumed_stream,
  1120. temp_vb->b->v4l2_planes[0].bytesused);
  1121. spin_unlock_irqrestore(&dev->irqlock, flags);
  1122. index = temp_vb->b->v4l2_buf.index;
  1123. dev->curr_ctx = ctx->num;
  1124. s5p_mfc_clean_ctx_int_flags(ctx);
  1125. if (temp_vb->b->v4l2_planes[0].bytesused == 0) {
  1126. last_frame = MFC_DEC_LAST_FRAME;
  1127. mfc_debug(2, "Setting ctx->state to FINISHING\n");
  1128. ctx->state = MFCINST_FINISHING;
  1129. }
  1130. s5p_mfc_decode_one_frame(ctx, last_frame);
  1131. return 0;
  1132. }
  1133. static int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx)
  1134. {
  1135. struct s5p_mfc_dev *dev = ctx->dev;
  1136. unsigned long flags;
  1137. struct s5p_mfc_buf *dst_mb;
  1138. struct s5p_mfc_buf *src_mb;
  1139. unsigned long src_y_addr, src_c_addr, dst_addr;
  1140. unsigned int dst_size;
  1141. spin_lock_irqsave(&dev->irqlock, flags);
  1142. if (list_empty(&ctx->src_queue) && ctx->state != MFCINST_FINISHING) {
  1143. mfc_debug(2, "no src buffers\n");
  1144. spin_unlock_irqrestore(&dev->irqlock, flags);
  1145. return -EAGAIN;
  1146. }
  1147. if (list_empty(&ctx->dst_queue)) {
  1148. mfc_debug(2, "no dst buffers\n");
  1149. spin_unlock_irqrestore(&dev->irqlock, flags);
  1150. return -EAGAIN;
  1151. }
  1152. if (list_empty(&ctx->src_queue)) {
  1153. /* send null frame */
  1154. s5p_mfc_set_enc_frame_buffer(ctx, dev->bank2, dev->bank2);
  1155. src_mb = NULL;
  1156. } else {
  1157. src_mb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf,
  1158. list);
  1159. src_mb->flags |= MFC_BUF_FLAG_USED;
  1160. if (src_mb->b->v4l2_planes[0].bytesused == 0) {
  1161. /* send null frame */
  1162. s5p_mfc_set_enc_frame_buffer(ctx, dev->bank2,
  1163. dev->bank2);
  1164. ctx->state = MFCINST_FINISHING;
  1165. } else {
  1166. src_y_addr = vb2_dma_contig_plane_dma_addr(src_mb->b,
  1167. 0);
  1168. src_c_addr = vb2_dma_contig_plane_dma_addr(src_mb->b,
  1169. 1);
  1170. s5p_mfc_set_enc_frame_buffer(ctx, src_y_addr,
  1171. src_c_addr);
  1172. if (src_mb->flags & MFC_BUF_FLAG_EOS)
  1173. ctx->state = MFCINST_FINISHING;
  1174. }
  1175. }
  1176. dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list);
  1177. dst_mb->flags |= MFC_BUF_FLAG_USED;
  1178. dst_addr = vb2_dma_contig_plane_dma_addr(dst_mb->b, 0);
  1179. dst_size = vb2_plane_size(dst_mb->b, 0);
  1180. s5p_mfc_set_enc_stream_buffer(ctx, dst_addr, dst_size);
  1181. spin_unlock_irqrestore(&dev->irqlock, flags);
  1182. dev->curr_ctx = ctx->num;
  1183. s5p_mfc_clean_ctx_int_flags(ctx);
  1184. mfc_debug(2, "encoding buffer with index=%d state=%d",
  1185. src_mb ? src_mb->b->v4l2_buf.index : -1, ctx->state);
  1186. s5p_mfc_encode_one_frame(ctx);
  1187. return 0;
  1188. }
  1189. static void s5p_mfc_run_init_dec(struct s5p_mfc_ctx *ctx)
  1190. {
  1191. struct s5p_mfc_dev *dev = ctx->dev;
  1192. unsigned long flags;
  1193. struct s5p_mfc_buf *temp_vb;
  1194. /* Initializing decoding - parsing header */
  1195. spin_lock_irqsave(&dev->irqlock, flags);
  1196. mfc_debug(2, "Preparing to init decoding\n");
  1197. temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
  1198. s5p_mfc_set_dec_desc_buffer(ctx);
  1199. mfc_debug(2, "Header size: %d\n", temp_vb->b->v4l2_planes[0].bytesused);
  1200. s5p_mfc_set_dec_stream_buffer(ctx,
  1201. vb2_dma_contig_plane_dma_addr(temp_vb->b, 0),
  1202. 0, temp_vb->b->v4l2_planes[0].bytesused);
  1203. spin_unlock_irqrestore(&dev->irqlock, flags);
  1204. dev->curr_ctx = ctx->num;
  1205. s5p_mfc_clean_ctx_int_flags(ctx);
  1206. s5p_mfc_init_decode(ctx);
  1207. }
  1208. static void s5p_mfc_run_init_enc(struct s5p_mfc_ctx *ctx)
  1209. {
  1210. struct s5p_mfc_dev *dev = ctx->dev;
  1211. unsigned long flags;
  1212. struct s5p_mfc_buf *dst_mb;
  1213. unsigned long dst_addr;
  1214. unsigned int dst_size;
  1215. s5p_mfc_set_enc_ref_buffer(ctx);
  1216. spin_lock_irqsave(&dev->irqlock, flags);
  1217. dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list);
  1218. dst_addr = vb2_dma_contig_plane_dma_addr(dst_mb->b, 0);
  1219. dst_size = vb2_plane_size(dst_mb->b, 0);
  1220. s5p_mfc_set_enc_stream_buffer(ctx, dst_addr, dst_size);
  1221. spin_unlock_irqrestore(&dev->irqlock, flags);
  1222. dev->curr_ctx = ctx->num;
  1223. s5p_mfc_clean_ctx_int_flags(ctx);
  1224. s5p_mfc_init_encode(ctx);
  1225. }
  1226. static int s5p_mfc_run_init_dec_buffers(struct s5p_mfc_ctx *ctx)
  1227. {
  1228. struct s5p_mfc_dev *dev = ctx->dev;
  1229. unsigned long flags;
  1230. struct s5p_mfc_buf *temp_vb;
  1231. int ret;
  1232. /*
  1233. * Header was parsed now starting processing
  1234. * First set the output frame buffers
  1235. */
  1236. if (ctx->capture_state != QUEUE_BUFS_MMAPED) {
  1237. mfc_err("It seems that not all destionation buffers were "
  1238. "mmaped\nMFC requires that all destination are mmaped "
  1239. "before starting processing\n");
  1240. return -EAGAIN;
  1241. }
  1242. spin_lock_irqsave(&dev->irqlock, flags);
  1243. if (list_empty(&ctx->src_queue)) {
  1244. mfc_err("Header has been deallocated in the middle of"
  1245. " initialization\n");
  1246. spin_unlock_irqrestore(&dev->irqlock, flags);
  1247. return -EIO;
  1248. }
  1249. temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
  1250. mfc_debug(2, "Header size: %d\n", temp_vb->b->v4l2_planes[0].bytesused);
  1251. s5p_mfc_set_dec_stream_buffer(ctx,
  1252. vb2_dma_contig_plane_dma_addr(temp_vb->b, 0),
  1253. 0, temp_vb->b->v4l2_planes[0].bytesused);
  1254. spin_unlock_irqrestore(&dev->irqlock, flags);
  1255. dev->curr_ctx = ctx->num;
  1256. s5p_mfc_clean_ctx_int_flags(ctx);
  1257. ret = s5p_mfc_set_dec_frame_buffer(ctx);
  1258. if (ret) {
  1259. mfc_err("Failed to alloc frame mem\n");
  1260. ctx->state = MFCINST_ERROR;
  1261. }
  1262. return ret;
  1263. }
  1264. /* Try running an operation on hardware */
  1265. void s5p_mfc_try_run(struct s5p_mfc_dev *dev)
  1266. {
  1267. struct s5p_mfc_ctx *ctx;
  1268. int new_ctx;
  1269. unsigned int ret = 0;
  1270. if (test_bit(0, &dev->enter_suspend)) {
  1271. mfc_debug(1, "Entering suspend so do not schedule any jobs\n");
  1272. return;
  1273. }
  1274. /* Check whether hardware is not running */
  1275. if (test_and_set_bit(0, &dev->hw_lock) != 0) {
  1276. /* This is perfectly ok, the scheduled ctx should wait */
  1277. mfc_debug(1, "Couldn't lock HW\n");
  1278. return;
  1279. }
  1280. /* Choose the context to run */
  1281. new_ctx = s5p_mfc_get_new_ctx(dev);
  1282. if (new_ctx < 0) {
  1283. /* No contexts to run */
  1284. if (test_and_clear_bit(0, &dev->hw_lock) == 0) {
  1285. mfc_err("Failed to unlock hardware\n");
  1286. return;
  1287. }
  1288. mfc_debug(1, "No ctx is scheduled to be run\n");
  1289. return;
  1290. }
  1291. ctx = dev->ctx[new_ctx];
  1292. /* Got context to run in ctx */
  1293. /*
  1294. * Last frame has already been sent to MFC.
  1295. * Now obtaining frames from MFC buffer
  1296. */
  1297. s5p_mfc_clock_on();
  1298. if (ctx->type == MFCINST_DECODER) {
  1299. s5p_mfc_set_dec_desc_buffer(ctx);
  1300. switch (ctx->state) {
  1301. case MFCINST_FINISHING:
  1302. s5p_mfc_run_dec_frame(ctx, MFC_DEC_LAST_FRAME);
  1303. break;
  1304. case MFCINST_RUNNING:
  1305. ret = s5p_mfc_run_dec_frame(ctx, MFC_DEC_FRAME);
  1306. break;
  1307. case MFCINST_INIT:
  1308. s5p_mfc_clean_ctx_int_flags(ctx);
  1309. ret = s5p_mfc_open_inst_cmd(ctx);
  1310. break;
  1311. case MFCINST_RETURN_INST:
  1312. s5p_mfc_clean_ctx_int_flags(ctx);
  1313. ret = s5p_mfc_close_inst_cmd(ctx);
  1314. break;
  1315. case MFCINST_GOT_INST:
  1316. s5p_mfc_run_init_dec(ctx);
  1317. break;
  1318. case MFCINST_HEAD_PARSED:
  1319. ret = s5p_mfc_run_init_dec_buffers(ctx);
  1320. mfc_debug(1, "head parsed\n");
  1321. break;
  1322. case MFCINST_RES_CHANGE_INIT:
  1323. s5p_mfc_run_res_change(ctx);
  1324. break;
  1325. case MFCINST_RES_CHANGE_FLUSH:
  1326. s5p_mfc_run_dec_frame(ctx, MFC_DEC_FRAME);
  1327. break;
  1328. case MFCINST_RES_CHANGE_END:
  1329. mfc_debug(2, "Finished remaining frames after resolution change\n");
  1330. ctx->capture_state = QUEUE_FREE;
  1331. mfc_debug(2, "Will re-init the codec\n");
  1332. s5p_mfc_run_init_dec(ctx);
  1333. break;
  1334. default:
  1335. ret = -EAGAIN;
  1336. }
  1337. } else if (ctx->type == MFCINST_ENCODER) {
  1338. switch (ctx->state) {
  1339. case MFCINST_FINISHING:
  1340. case MFCINST_RUNNING:
  1341. ret = s5p_mfc_run_enc_frame(ctx);
  1342. break;
  1343. case MFCINST_INIT:
  1344. s5p_mfc_clean_ctx_int_flags(ctx);
  1345. ret = s5p_mfc_open_inst_cmd(ctx);
  1346. break;
  1347. case MFCINST_RETURN_INST:
  1348. s5p_mfc_clean_ctx_int_flags(ctx);
  1349. ret = s5p_mfc_close_inst_cmd(ctx);
  1350. break;
  1351. case MFCINST_GOT_INST:
  1352. s5p_mfc_run_init_enc(ctx);
  1353. break;
  1354. default:
  1355. ret = -EAGAIN;
  1356. }
  1357. } else {
  1358. mfc_err("Invalid context type: %d\n", ctx->type);
  1359. ret = -EAGAIN;
  1360. }
  1361. if (ret) {
  1362. /* Free hardware lock */
  1363. if (test_and_clear_bit(0, &dev->hw_lock) == 0)
  1364. mfc_err("Failed to unlock hardware\n");
  1365. /* This is in deed imporant, as no operation has been
  1366. * scheduled, reduce the clock count as no one will
  1367. * ever do this, because no interrupt related to this try_run
  1368. * will ever come from hardware. */
  1369. s5p_mfc_clock_off();
  1370. }
  1371. }
  1372. void s5p_mfc_cleanup_queue(struct list_head *lh, struct vb2_queue *vq)
  1373. {
  1374. struct s5p_mfc_buf *b;
  1375. int i;
  1376. while (!list_empty(lh)) {
  1377. b = list_entry(lh->next, struct s5p_mfc_buf, list);
  1378. for (i = 0; i < b->b->num_planes; i++)
  1379. vb2_set_plane_payload(b->b, i, 0);
  1380. vb2_buffer_done(b->b, VB2_BUF_STATE_ERROR);
  1381. list_del(&b->list);
  1382. }
  1383. }