exynos_drm_fimc.c 48 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955
  1. /*
  2. * Copyright (C) 2012 Samsung Electronics Co.Ltd
  3. * Authors:
  4. * Eunchul Kim <chulspro.kim@samsung.com>
  5. * Jinyoung Jeon <jy0.jeon@samsung.com>
  6. * Sangmin Lee <lsmin.lee@samsung.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License as published by the
  10. * Free Software Foundation; either version 2 of the License, or (at your
  11. * option) any later version.
  12. *
  13. */
  14. #include <linux/kernel.h>
  15. #include <linux/module.h>
  16. #include <linux/platform_device.h>
  17. #include <linux/clk.h>
  18. #include <linux/pm_runtime.h>
  19. #include <plat/map-base.h>
  20. #include <drm/drmP.h>
  21. #include <drm/exynos_drm.h>
  22. #include "regs-fimc.h"
  23. #include "exynos_drm_ipp.h"
  24. #include "exynos_drm_fimc.h"
  25. /*
  26. * FIMC stands for Fully Interactive Mobile Camera and
  27. * supports image scaler/rotator and input/output DMA operations.
  28. * input DMA reads image data from the memory.
  29. * output DMA writes image data to memory.
  30. * FIMC supports image rotation and image effect functions.
  31. *
  32. * M2M operation : supports crop/scale/rotation/csc so on.
  33. * Memory ----> FIMC H/W ----> Memory.
  34. * Writeback operation : supports cloned screen with FIMD.
  35. * FIMD ----> FIMC H/W ----> Memory.
  36. * Output operation : supports direct display using local path.
  37. * Memory ----> FIMC H/W ----> FIMD.
  38. */
  39. /*
  40. * TODO
  41. * 1. check suspend/resume api if needed.
  42. * 2. need to check use case platform_device_id.
  43. * 3. check src/dst size with, height.
  44. * 4. added check_prepare api for right register.
  45. * 5. need to add supported list in prop_list.
  46. * 6. check prescaler/scaler optimization.
  47. */
  48. #define FIMC_MAX_DEVS 4
  49. #define FIMC_MAX_SRC 2
  50. #define FIMC_MAX_DST 32
  51. #define FIMC_SHFACTOR 10
  52. #define FIMC_BUF_STOP 1
  53. #define FIMC_BUF_START 2
  54. #define FIMC_REG_SZ 32
  55. #define FIMC_WIDTH_ITU_709 1280
  56. #define FIMC_REFRESH_MAX 60
  57. #define FIMC_REFRESH_MIN 12
  58. #define FIMC_CROP_MAX 8192
  59. #define FIMC_CROP_MIN 32
  60. #define FIMC_SCALE_MAX 4224
  61. #define FIMC_SCALE_MIN 32
  62. #define get_fimc_context(dev) platform_get_drvdata(to_platform_device(dev))
  63. #define get_ctx_from_ippdrv(ippdrv) container_of(ippdrv,\
  64. struct fimc_context, ippdrv);
  65. #define fimc_read(offset) readl(ctx->regs + (offset))
  66. #define fimc_write(cfg, offset) writel(cfg, ctx->regs + (offset))
  67. enum fimc_wb {
  68. FIMC_WB_NONE,
  69. FIMC_WB_A,
  70. FIMC_WB_B,
  71. };
  72. /*
  73. * A structure of scaler.
  74. *
  75. * @range: narrow, wide.
  76. * @bypass: unused scaler path.
  77. * @up_h: horizontal scale up.
  78. * @up_v: vertical scale up.
  79. * @hratio: horizontal ratio.
  80. * @vratio: vertical ratio.
  81. */
  82. struct fimc_scaler {
  83. bool range;
  84. bool bypass;
  85. bool up_h;
  86. bool up_v;
  87. u32 hratio;
  88. u32 vratio;
  89. };
  90. /*
  91. * A structure of scaler capability.
  92. *
  93. * find user manual table 43-1.
  94. * @in_hori: scaler input horizontal size.
  95. * @bypass: scaler bypass mode.
  96. * @dst_h_wo_rot: target horizontal size without output rotation.
  97. * @dst_h_rot: target horizontal size with output rotation.
  98. * @rl_w_wo_rot: real width without input rotation.
  99. * @rl_h_rot: real height without output rotation.
  100. */
  101. struct fimc_capability {
  102. /* scaler */
  103. u32 in_hori;
  104. u32 bypass;
  105. /* output rotator */
  106. u32 dst_h_wo_rot;
  107. u32 dst_h_rot;
  108. /* input rotator */
  109. u32 rl_w_wo_rot;
  110. u32 rl_h_rot;
  111. };
  112. /*
  113. * A structure of fimc driver data.
  114. *
  115. * @parent_clk: name of parent clock.
  116. */
  117. struct fimc_driverdata {
  118. char *parent_clk;
  119. };
  120. /*
  121. * A structure of fimc context.
  122. *
  123. * @ippdrv: prepare initialization using ippdrv.
  124. * @regs_res: register resources.
  125. * @regs: memory mapped io registers.
  126. * @lock: locking of operations.
  127. * @sclk_fimc_clk: fimc source clock.
  128. * @fimc_clk: fimc clock.
  129. * @wb_clk: writeback a clock.
  130. * @wb_b_clk: writeback b clock.
  131. * @sc: scaler infomations.
  132. * @odr: ordering of YUV.
  133. * @ver: fimc version.
  134. * @pol: porarity of writeback.
  135. * @id: fimc id.
  136. * @irq: irq number.
  137. * @suspended: qos operations.
  138. */
  139. struct fimc_context {
  140. struct exynos_drm_ippdrv ippdrv;
  141. struct resource *regs_res;
  142. void __iomem *regs;
  143. struct mutex lock;
  144. struct clk *sclk_fimc_clk;
  145. struct clk *fimc_clk;
  146. struct clk *wb_clk;
  147. struct clk *wb_b_clk;
  148. struct fimc_scaler sc;
  149. struct fimc_driverdata *ddata;
  150. struct exynos_drm_ipp_pol pol;
  151. int id;
  152. int irq;
  153. bool suspended;
  154. };
  155. static void fimc_sw_reset(struct fimc_context *ctx)
  156. {
  157. u32 cfg;
  158. DRM_DEBUG_KMS("%s\n", __func__);
  159. /* stop dma operation */
  160. cfg = fimc_read(EXYNOS_CISTATUS);
  161. if (EXYNOS_CISTATUS_GET_ENVID_STATUS(cfg)) {
  162. cfg = fimc_read(EXYNOS_MSCTRL);
  163. cfg &= ~EXYNOS_MSCTRL_ENVID;
  164. fimc_write(cfg, EXYNOS_MSCTRL);
  165. }
  166. cfg = fimc_read(EXYNOS_CISRCFMT);
  167. cfg |= EXYNOS_CISRCFMT_ITU601_8BIT;
  168. fimc_write(cfg, EXYNOS_CISRCFMT);
  169. /* disable image capture */
  170. cfg = fimc_read(EXYNOS_CIIMGCPT);
  171. cfg &= ~(EXYNOS_CIIMGCPT_IMGCPTEN_SC | EXYNOS_CIIMGCPT_IMGCPTEN);
  172. fimc_write(cfg, EXYNOS_CIIMGCPT);
  173. /* s/w reset */
  174. cfg = fimc_read(EXYNOS_CIGCTRL);
  175. cfg |= (EXYNOS_CIGCTRL_SWRST);
  176. fimc_write(cfg, EXYNOS_CIGCTRL);
  177. /* s/w reset complete */
  178. cfg = fimc_read(EXYNOS_CIGCTRL);
  179. cfg &= ~EXYNOS_CIGCTRL_SWRST;
  180. fimc_write(cfg, EXYNOS_CIGCTRL);
  181. /* reset sequence */
  182. fimc_write(0x0, EXYNOS_CIFCNTSEQ);
  183. }
  184. static void fimc_set_camblk_fimd0_wb(struct fimc_context *ctx)
  185. {
  186. u32 camblk_cfg;
  187. DRM_DEBUG_KMS("%s\n", __func__);
  188. camblk_cfg = readl(SYSREG_CAMERA_BLK);
  189. camblk_cfg &= ~(SYSREG_FIMD0WB_DEST_MASK);
  190. camblk_cfg |= ctx->id << (SYSREG_FIMD0WB_DEST_SHIFT);
  191. writel(camblk_cfg, SYSREG_CAMERA_BLK);
  192. }
  193. static void fimc_set_type_ctrl(struct fimc_context *ctx, enum fimc_wb wb)
  194. {
  195. u32 cfg;
  196. DRM_DEBUG_KMS("%s:wb[%d]\n", __func__, wb);
  197. cfg = fimc_read(EXYNOS_CIGCTRL);
  198. cfg &= ~(EXYNOS_CIGCTRL_TESTPATTERN_MASK |
  199. EXYNOS_CIGCTRL_SELCAM_ITU_MASK |
  200. EXYNOS_CIGCTRL_SELCAM_MIPI_MASK |
  201. EXYNOS_CIGCTRL_SELCAM_FIMC_MASK |
  202. EXYNOS_CIGCTRL_SELWB_CAMIF_MASK |
  203. EXYNOS_CIGCTRL_SELWRITEBACK_MASK);
  204. switch (wb) {
  205. case FIMC_WB_A:
  206. cfg |= (EXYNOS_CIGCTRL_SELWRITEBACK_A |
  207. EXYNOS_CIGCTRL_SELWB_CAMIF_WRITEBACK);
  208. break;
  209. case FIMC_WB_B:
  210. cfg |= (EXYNOS_CIGCTRL_SELWRITEBACK_B |
  211. EXYNOS_CIGCTRL_SELWB_CAMIF_WRITEBACK);
  212. break;
  213. case FIMC_WB_NONE:
  214. default:
  215. cfg |= (EXYNOS_CIGCTRL_SELCAM_ITU_A |
  216. EXYNOS_CIGCTRL_SELWRITEBACK_A |
  217. EXYNOS_CIGCTRL_SELCAM_MIPI_A |
  218. EXYNOS_CIGCTRL_SELCAM_FIMC_ITU);
  219. break;
  220. }
  221. fimc_write(cfg, EXYNOS_CIGCTRL);
  222. }
  223. static void fimc_set_polarity(struct fimc_context *ctx,
  224. struct exynos_drm_ipp_pol *pol)
  225. {
  226. u32 cfg;
  227. DRM_DEBUG_KMS("%s:inv_pclk[%d]inv_vsync[%d]\n",
  228. __func__, pol->inv_pclk, pol->inv_vsync);
  229. DRM_DEBUG_KMS("%s:inv_href[%d]inv_hsync[%d]\n",
  230. __func__, pol->inv_href, pol->inv_hsync);
  231. cfg = fimc_read(EXYNOS_CIGCTRL);
  232. cfg &= ~(EXYNOS_CIGCTRL_INVPOLPCLK | EXYNOS_CIGCTRL_INVPOLVSYNC |
  233. EXYNOS_CIGCTRL_INVPOLHREF | EXYNOS_CIGCTRL_INVPOLHSYNC);
  234. if (pol->inv_pclk)
  235. cfg |= EXYNOS_CIGCTRL_INVPOLPCLK;
  236. if (pol->inv_vsync)
  237. cfg |= EXYNOS_CIGCTRL_INVPOLVSYNC;
  238. if (pol->inv_href)
  239. cfg |= EXYNOS_CIGCTRL_INVPOLHREF;
  240. if (pol->inv_hsync)
  241. cfg |= EXYNOS_CIGCTRL_INVPOLHSYNC;
  242. fimc_write(cfg, EXYNOS_CIGCTRL);
  243. }
  244. static void fimc_handle_jpeg(struct fimc_context *ctx, bool enable)
  245. {
  246. u32 cfg;
  247. DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
  248. cfg = fimc_read(EXYNOS_CIGCTRL);
  249. if (enable)
  250. cfg |= EXYNOS_CIGCTRL_CAM_JPEG;
  251. else
  252. cfg &= ~EXYNOS_CIGCTRL_CAM_JPEG;
  253. fimc_write(cfg, EXYNOS_CIGCTRL);
  254. }
  255. static void fimc_handle_irq(struct fimc_context *ctx, bool enable,
  256. bool overflow, bool level)
  257. {
  258. u32 cfg;
  259. DRM_DEBUG_KMS("%s:enable[%d]overflow[%d]level[%d]\n", __func__,
  260. enable, overflow, level);
  261. cfg = fimc_read(EXYNOS_CIGCTRL);
  262. if (enable) {
  263. cfg &= ~(EXYNOS_CIGCTRL_IRQ_OVFEN | EXYNOS_CIGCTRL_IRQ_LEVEL);
  264. cfg |= EXYNOS_CIGCTRL_IRQ_ENABLE;
  265. if (overflow)
  266. cfg |= EXYNOS_CIGCTRL_IRQ_OVFEN;
  267. if (level)
  268. cfg |= EXYNOS_CIGCTRL_IRQ_LEVEL;
  269. } else
  270. cfg &= ~(EXYNOS_CIGCTRL_IRQ_OVFEN | EXYNOS_CIGCTRL_IRQ_ENABLE);
  271. fimc_write(cfg, EXYNOS_CIGCTRL);
  272. }
  273. static void fimc_clear_irq(struct fimc_context *ctx)
  274. {
  275. u32 cfg;
  276. DRM_DEBUG_KMS("%s\n", __func__);
  277. cfg = fimc_read(EXYNOS_CIGCTRL);
  278. cfg |= EXYNOS_CIGCTRL_IRQ_CLR;
  279. fimc_write(cfg, EXYNOS_CIGCTRL);
  280. }
  281. static bool fimc_check_ovf(struct fimc_context *ctx)
  282. {
  283. struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
  284. u32 cfg, status, flag;
  285. status = fimc_read(EXYNOS_CISTATUS);
  286. flag = EXYNOS_CISTATUS_OVFIY | EXYNOS_CISTATUS_OVFICB |
  287. EXYNOS_CISTATUS_OVFICR;
  288. DRM_DEBUG_KMS("%s:flag[0x%x]\n", __func__, flag);
  289. if (status & flag) {
  290. cfg = fimc_read(EXYNOS_CIWDOFST);
  291. cfg |= (EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB |
  292. EXYNOS_CIWDOFST_CLROVFICR);
  293. fimc_write(cfg, EXYNOS_CIWDOFST);
  294. cfg = fimc_read(EXYNOS_CIWDOFST);
  295. cfg &= ~(EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB |
  296. EXYNOS_CIWDOFST_CLROVFICR);
  297. fimc_write(cfg, EXYNOS_CIWDOFST);
  298. dev_err(ippdrv->dev, "occured overflow at %d, status 0x%x.\n",
  299. ctx->id, status);
  300. return true;
  301. }
  302. return false;
  303. }
  304. static bool fimc_check_frame_end(struct fimc_context *ctx)
  305. {
  306. u32 cfg;
  307. cfg = fimc_read(EXYNOS_CISTATUS);
  308. DRM_DEBUG_KMS("%s:cfg[0x%x]\n", __func__, cfg);
  309. if (!(cfg & EXYNOS_CISTATUS_FRAMEEND))
  310. return false;
  311. cfg &= ~(EXYNOS_CISTATUS_FRAMEEND);
  312. fimc_write(cfg, EXYNOS_CISTATUS);
  313. return true;
  314. }
  315. static int fimc_get_buf_id(struct fimc_context *ctx)
  316. {
  317. u32 cfg;
  318. int frame_cnt, buf_id;
  319. DRM_DEBUG_KMS("%s\n", __func__);
  320. cfg = fimc_read(EXYNOS_CISTATUS2);
  321. frame_cnt = EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(cfg);
  322. if (frame_cnt == 0)
  323. frame_cnt = EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(cfg);
  324. DRM_DEBUG_KMS("%s:present[%d]before[%d]\n", __func__,
  325. EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(cfg),
  326. EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(cfg));
  327. if (frame_cnt == 0) {
  328. DRM_ERROR("failed to get frame count.\n");
  329. return -EIO;
  330. }
  331. buf_id = frame_cnt - 1;
  332. DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, buf_id);
  333. return buf_id;
  334. }
  335. static void fimc_handle_lastend(struct fimc_context *ctx, bool enable)
  336. {
  337. u32 cfg;
  338. DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
  339. cfg = fimc_read(EXYNOS_CIOCTRL);
  340. if (enable)
  341. cfg |= EXYNOS_CIOCTRL_LASTENDEN;
  342. else
  343. cfg &= ~EXYNOS_CIOCTRL_LASTENDEN;
  344. fimc_write(cfg, EXYNOS_CIOCTRL);
  345. }
  346. static int fimc_src_set_fmt_order(struct fimc_context *ctx, u32 fmt)
  347. {
  348. struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
  349. u32 cfg;
  350. DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
  351. /* RGB */
  352. cfg = fimc_read(EXYNOS_CISCCTRL);
  353. cfg &= ~EXYNOS_CISCCTRL_INRGB_FMT_RGB_MASK;
  354. switch (fmt) {
  355. case DRM_FORMAT_RGB565:
  356. cfg |= EXYNOS_CISCCTRL_INRGB_FMT_RGB565;
  357. fimc_write(cfg, EXYNOS_CISCCTRL);
  358. return 0;
  359. case DRM_FORMAT_RGB888:
  360. case DRM_FORMAT_XRGB8888:
  361. cfg |= EXYNOS_CISCCTRL_INRGB_FMT_RGB888;
  362. fimc_write(cfg, EXYNOS_CISCCTRL);
  363. return 0;
  364. default:
  365. /* bypass */
  366. break;
  367. }
  368. /* YUV */
  369. cfg = fimc_read(EXYNOS_MSCTRL);
  370. cfg &= ~(EXYNOS_MSCTRL_ORDER2P_SHIFT_MASK |
  371. EXYNOS_MSCTRL_C_INT_IN_2PLANE |
  372. EXYNOS_MSCTRL_ORDER422_YCBYCR);
  373. switch (fmt) {
  374. case DRM_FORMAT_YUYV:
  375. cfg |= EXYNOS_MSCTRL_ORDER422_YCBYCR;
  376. break;
  377. case DRM_FORMAT_YVYU:
  378. cfg |= EXYNOS_MSCTRL_ORDER422_YCRYCB;
  379. break;
  380. case DRM_FORMAT_UYVY:
  381. cfg |= EXYNOS_MSCTRL_ORDER422_CBYCRY;
  382. break;
  383. case DRM_FORMAT_VYUY:
  384. case DRM_FORMAT_YUV444:
  385. cfg |= EXYNOS_MSCTRL_ORDER422_CRYCBY;
  386. break;
  387. case DRM_FORMAT_NV21:
  388. case DRM_FORMAT_NV61:
  389. cfg |= (EXYNOS_MSCTRL_ORDER2P_LSB_CRCB |
  390. EXYNOS_MSCTRL_C_INT_IN_2PLANE);
  391. break;
  392. case DRM_FORMAT_YUV422:
  393. case DRM_FORMAT_YUV420:
  394. case DRM_FORMAT_YVU420:
  395. cfg |= EXYNOS_MSCTRL_C_INT_IN_3PLANE;
  396. break;
  397. case DRM_FORMAT_NV12:
  398. case DRM_FORMAT_NV12MT:
  399. case DRM_FORMAT_NV16:
  400. cfg |= (EXYNOS_MSCTRL_ORDER2P_LSB_CBCR |
  401. EXYNOS_MSCTRL_C_INT_IN_2PLANE);
  402. break;
  403. default:
  404. dev_err(ippdrv->dev, "inavlid source yuv order 0x%x.\n", fmt);
  405. return -EINVAL;
  406. }
  407. fimc_write(cfg, EXYNOS_MSCTRL);
  408. return 0;
  409. }
  410. static int fimc_src_set_fmt(struct device *dev, u32 fmt)
  411. {
  412. struct fimc_context *ctx = get_fimc_context(dev);
  413. struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
  414. u32 cfg;
  415. DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
  416. cfg = fimc_read(EXYNOS_MSCTRL);
  417. cfg &= ~EXYNOS_MSCTRL_INFORMAT_RGB;
  418. switch (fmt) {
  419. case DRM_FORMAT_RGB565:
  420. case DRM_FORMAT_RGB888:
  421. case DRM_FORMAT_XRGB8888:
  422. cfg |= EXYNOS_MSCTRL_INFORMAT_RGB;
  423. break;
  424. case DRM_FORMAT_YUV444:
  425. cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR420;
  426. break;
  427. case DRM_FORMAT_YUYV:
  428. case DRM_FORMAT_YVYU:
  429. case DRM_FORMAT_UYVY:
  430. case DRM_FORMAT_VYUY:
  431. cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR422_1PLANE;
  432. break;
  433. case DRM_FORMAT_NV16:
  434. case DRM_FORMAT_NV61:
  435. case DRM_FORMAT_YUV422:
  436. cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR422;
  437. break;
  438. case DRM_FORMAT_YUV420:
  439. case DRM_FORMAT_YVU420:
  440. case DRM_FORMAT_NV12:
  441. case DRM_FORMAT_NV21:
  442. case DRM_FORMAT_NV12MT:
  443. cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR420;
  444. break;
  445. default:
  446. dev_err(ippdrv->dev, "inavlid source format 0x%x.\n", fmt);
  447. return -EINVAL;
  448. }
  449. fimc_write(cfg, EXYNOS_MSCTRL);
  450. cfg = fimc_read(EXYNOS_CIDMAPARAM);
  451. cfg &= ~EXYNOS_CIDMAPARAM_R_MODE_MASK;
  452. if (fmt == DRM_FORMAT_NV12MT)
  453. cfg |= EXYNOS_CIDMAPARAM_R_MODE_64X32;
  454. else
  455. cfg |= EXYNOS_CIDMAPARAM_R_MODE_LINEAR;
  456. fimc_write(cfg, EXYNOS_CIDMAPARAM);
  457. return fimc_src_set_fmt_order(ctx, fmt);
  458. }
  459. static int fimc_src_set_transf(struct device *dev,
  460. enum drm_exynos_degree degree,
  461. enum drm_exynos_flip flip, bool *swap)
  462. {
  463. struct fimc_context *ctx = get_fimc_context(dev);
  464. struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
  465. u32 cfg1, cfg2;
  466. DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
  467. degree, flip);
  468. cfg1 = fimc_read(EXYNOS_MSCTRL);
  469. cfg1 &= ~(EXYNOS_MSCTRL_FLIP_X_MIRROR |
  470. EXYNOS_MSCTRL_FLIP_Y_MIRROR);
  471. cfg2 = fimc_read(EXYNOS_CITRGFMT);
  472. cfg2 &= ~EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
  473. switch (degree) {
  474. case EXYNOS_DRM_DEGREE_0:
  475. if (flip & EXYNOS_DRM_FLIP_VERTICAL)
  476. cfg1 |= EXYNOS_MSCTRL_FLIP_X_MIRROR;
  477. if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
  478. cfg1 |= EXYNOS_MSCTRL_FLIP_Y_MIRROR;
  479. break;
  480. case EXYNOS_DRM_DEGREE_90:
  481. cfg2 |= EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
  482. if (flip & EXYNOS_DRM_FLIP_VERTICAL)
  483. cfg1 |= EXYNOS_MSCTRL_FLIP_X_MIRROR;
  484. if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
  485. cfg1 |= EXYNOS_MSCTRL_FLIP_Y_MIRROR;
  486. break;
  487. case EXYNOS_DRM_DEGREE_180:
  488. cfg1 |= (EXYNOS_MSCTRL_FLIP_X_MIRROR |
  489. EXYNOS_MSCTRL_FLIP_Y_MIRROR);
  490. if (flip & EXYNOS_DRM_FLIP_VERTICAL)
  491. cfg1 &= ~EXYNOS_MSCTRL_FLIP_X_MIRROR;
  492. if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
  493. cfg1 &= ~EXYNOS_MSCTRL_FLIP_Y_MIRROR;
  494. break;
  495. case EXYNOS_DRM_DEGREE_270:
  496. cfg1 |= (EXYNOS_MSCTRL_FLIP_X_MIRROR |
  497. EXYNOS_MSCTRL_FLIP_Y_MIRROR);
  498. cfg2 |= EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
  499. if (flip & EXYNOS_DRM_FLIP_VERTICAL)
  500. cfg1 &= ~EXYNOS_MSCTRL_FLIP_X_MIRROR;
  501. if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
  502. cfg1 &= ~EXYNOS_MSCTRL_FLIP_Y_MIRROR;
  503. break;
  504. default:
  505. dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
  506. return -EINVAL;
  507. }
  508. fimc_write(cfg1, EXYNOS_MSCTRL);
  509. fimc_write(cfg2, EXYNOS_CITRGFMT);
  510. *swap = (cfg2 & EXYNOS_CITRGFMT_INROT90_CLOCKWISE) ? 1 : 0;
  511. return 0;
  512. }
  513. static int fimc_set_window(struct fimc_context *ctx,
  514. struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
  515. {
  516. u32 cfg, h1, h2, v1, v2;
  517. /* cropped image */
  518. h1 = pos->x;
  519. h2 = sz->hsize - pos->w - pos->x;
  520. v1 = pos->y;
  521. v2 = sz->vsize - pos->h - pos->y;
  522. DRM_DEBUG_KMS("%s:x[%d]y[%d]w[%d]h[%d]hsize[%d]vsize[%d]\n",
  523. __func__, pos->x, pos->y, pos->w, pos->h, sz->hsize, sz->vsize);
  524. DRM_DEBUG_KMS("%s:h1[%d]h2[%d]v1[%d]v2[%d]\n", __func__,
  525. h1, h2, v1, v2);
  526. /*
  527. * set window offset 1, 2 size
  528. * check figure 43-21 in user manual
  529. */
  530. cfg = fimc_read(EXYNOS_CIWDOFST);
  531. cfg &= ~(EXYNOS_CIWDOFST_WINHOROFST_MASK |
  532. EXYNOS_CIWDOFST_WINVEROFST_MASK);
  533. cfg |= (EXYNOS_CIWDOFST_WINHOROFST(h1) |
  534. EXYNOS_CIWDOFST_WINVEROFST(v1));
  535. cfg |= EXYNOS_CIWDOFST_WINOFSEN;
  536. fimc_write(cfg, EXYNOS_CIWDOFST);
  537. cfg = (EXYNOS_CIWDOFST2_WINHOROFST2(h2) |
  538. EXYNOS_CIWDOFST2_WINVEROFST2(v2));
  539. fimc_write(cfg, EXYNOS_CIWDOFST2);
  540. return 0;
  541. }
  542. static int fimc_src_set_size(struct device *dev, int swap,
  543. struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
  544. {
  545. struct fimc_context *ctx = get_fimc_context(dev);
  546. struct drm_exynos_pos img_pos = *pos;
  547. struct drm_exynos_sz img_sz = *sz;
  548. u32 cfg;
  549. DRM_DEBUG_KMS("%s:swap[%d]hsize[%d]vsize[%d]\n",
  550. __func__, swap, sz->hsize, sz->vsize);
  551. /* original size */
  552. cfg = (EXYNOS_ORGISIZE_HORIZONTAL(img_sz.hsize) |
  553. EXYNOS_ORGISIZE_VERTICAL(img_sz.vsize));
  554. fimc_write(cfg, EXYNOS_ORGISIZE);
  555. DRM_DEBUG_KMS("%s:x[%d]y[%d]w[%d]h[%d]\n", __func__,
  556. pos->x, pos->y, pos->w, pos->h);
  557. if (swap) {
  558. img_pos.w = pos->h;
  559. img_pos.h = pos->w;
  560. img_sz.hsize = sz->vsize;
  561. img_sz.vsize = sz->hsize;
  562. }
  563. /* set input DMA image size */
  564. cfg = fimc_read(EXYNOS_CIREAL_ISIZE);
  565. cfg &= ~(EXYNOS_CIREAL_ISIZE_HEIGHT_MASK |
  566. EXYNOS_CIREAL_ISIZE_WIDTH_MASK);
  567. cfg |= (EXYNOS_CIREAL_ISIZE_WIDTH(img_pos.w) |
  568. EXYNOS_CIREAL_ISIZE_HEIGHT(img_pos.h));
  569. fimc_write(cfg, EXYNOS_CIREAL_ISIZE);
  570. /*
  571. * set input FIFO image size
  572. * for now, we support only ITU601 8 bit mode
  573. */
  574. cfg = (EXYNOS_CISRCFMT_ITU601_8BIT |
  575. EXYNOS_CISRCFMT_SOURCEHSIZE(img_sz.hsize) |
  576. EXYNOS_CISRCFMT_SOURCEVSIZE(img_sz.vsize));
  577. fimc_write(cfg, EXYNOS_CISRCFMT);
  578. /* offset Y(RGB), Cb, Cr */
  579. cfg = (EXYNOS_CIIYOFF_HORIZONTAL(img_pos.x) |
  580. EXYNOS_CIIYOFF_VERTICAL(img_pos.y));
  581. fimc_write(cfg, EXYNOS_CIIYOFF);
  582. cfg = (EXYNOS_CIICBOFF_HORIZONTAL(img_pos.x) |
  583. EXYNOS_CIICBOFF_VERTICAL(img_pos.y));
  584. fimc_write(cfg, EXYNOS_CIICBOFF);
  585. cfg = (EXYNOS_CIICROFF_HORIZONTAL(img_pos.x) |
  586. EXYNOS_CIICROFF_VERTICAL(img_pos.y));
  587. fimc_write(cfg, EXYNOS_CIICROFF);
  588. return fimc_set_window(ctx, &img_pos, &img_sz);
  589. }
  590. static int fimc_src_set_addr(struct device *dev,
  591. struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
  592. enum drm_exynos_ipp_buf_type buf_type)
  593. {
  594. struct fimc_context *ctx = get_fimc_context(dev);
  595. struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
  596. struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
  597. struct drm_exynos_ipp_property *property;
  598. struct drm_exynos_ipp_config *config;
  599. if (!c_node) {
  600. DRM_ERROR("failed to get c_node.\n");
  601. return -EINVAL;
  602. }
  603. property = &c_node->property;
  604. DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
  605. property->prop_id, buf_id, buf_type);
  606. if (buf_id > FIMC_MAX_SRC) {
  607. dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
  608. return -ENOMEM;
  609. }
  610. /* address register set */
  611. switch (buf_type) {
  612. case IPP_BUF_ENQUEUE:
  613. config = &property->config[EXYNOS_DRM_OPS_SRC];
  614. fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
  615. EXYNOS_CIIYSA(buf_id));
  616. if (config->fmt == DRM_FORMAT_YVU420) {
  617. fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
  618. EXYNOS_CIICBSA(buf_id));
  619. fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
  620. EXYNOS_CIICRSA(buf_id));
  621. } else {
  622. fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
  623. EXYNOS_CIICBSA(buf_id));
  624. fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
  625. EXYNOS_CIICRSA(buf_id));
  626. }
  627. break;
  628. case IPP_BUF_DEQUEUE:
  629. fimc_write(0x0, EXYNOS_CIIYSA(buf_id));
  630. fimc_write(0x0, EXYNOS_CIICBSA(buf_id));
  631. fimc_write(0x0, EXYNOS_CIICRSA(buf_id));
  632. break;
  633. default:
  634. /* bypass */
  635. break;
  636. }
  637. return 0;
  638. }
  639. static struct exynos_drm_ipp_ops fimc_src_ops = {
  640. .set_fmt = fimc_src_set_fmt,
  641. .set_transf = fimc_src_set_transf,
  642. .set_size = fimc_src_set_size,
  643. .set_addr = fimc_src_set_addr,
  644. };
  645. static int fimc_dst_set_fmt_order(struct fimc_context *ctx, u32 fmt)
  646. {
  647. struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
  648. u32 cfg;
  649. DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
  650. /* RGB */
  651. cfg = fimc_read(EXYNOS_CISCCTRL);
  652. cfg &= ~EXYNOS_CISCCTRL_OUTRGB_FMT_RGB_MASK;
  653. switch (fmt) {
  654. case DRM_FORMAT_RGB565:
  655. cfg |= EXYNOS_CISCCTRL_OUTRGB_FMT_RGB565;
  656. fimc_write(cfg, EXYNOS_CISCCTRL);
  657. return 0;
  658. case DRM_FORMAT_RGB888:
  659. cfg |= EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888;
  660. fimc_write(cfg, EXYNOS_CISCCTRL);
  661. return 0;
  662. case DRM_FORMAT_XRGB8888:
  663. cfg |= (EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888 |
  664. EXYNOS_CISCCTRL_EXTRGB_EXTENSION);
  665. fimc_write(cfg, EXYNOS_CISCCTRL);
  666. break;
  667. default:
  668. /* bypass */
  669. break;
  670. }
  671. /* YUV */
  672. cfg = fimc_read(EXYNOS_CIOCTRL);
  673. cfg &= ~(EXYNOS_CIOCTRL_ORDER2P_MASK |
  674. EXYNOS_CIOCTRL_ORDER422_MASK |
  675. EXYNOS_CIOCTRL_YCBCR_PLANE_MASK);
  676. switch (fmt) {
  677. case DRM_FORMAT_XRGB8888:
  678. cfg |= EXYNOS_CIOCTRL_ALPHA_OUT;
  679. break;
  680. case DRM_FORMAT_YUYV:
  681. cfg |= EXYNOS_CIOCTRL_ORDER422_YCBYCR;
  682. break;
  683. case DRM_FORMAT_YVYU:
  684. cfg |= EXYNOS_CIOCTRL_ORDER422_YCRYCB;
  685. break;
  686. case DRM_FORMAT_UYVY:
  687. cfg |= EXYNOS_CIOCTRL_ORDER422_CBYCRY;
  688. break;
  689. case DRM_FORMAT_VYUY:
  690. cfg |= EXYNOS_CIOCTRL_ORDER422_CRYCBY;
  691. break;
  692. case DRM_FORMAT_NV21:
  693. case DRM_FORMAT_NV61:
  694. cfg |= EXYNOS_CIOCTRL_ORDER2P_LSB_CRCB;
  695. cfg |= EXYNOS_CIOCTRL_YCBCR_2PLANE;
  696. break;
  697. case DRM_FORMAT_YUV422:
  698. case DRM_FORMAT_YUV420:
  699. case DRM_FORMAT_YVU420:
  700. cfg |= EXYNOS_CIOCTRL_YCBCR_3PLANE;
  701. break;
  702. case DRM_FORMAT_NV12:
  703. case DRM_FORMAT_NV12MT:
  704. case DRM_FORMAT_NV16:
  705. cfg |= EXYNOS_CIOCTRL_ORDER2P_LSB_CBCR;
  706. cfg |= EXYNOS_CIOCTRL_YCBCR_2PLANE;
  707. break;
  708. default:
  709. dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt);
  710. return -EINVAL;
  711. }
  712. fimc_write(cfg, EXYNOS_CIOCTRL);
  713. return 0;
  714. }
  715. static int fimc_dst_set_fmt(struct device *dev, u32 fmt)
  716. {
  717. struct fimc_context *ctx = get_fimc_context(dev);
  718. struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
  719. u32 cfg;
  720. DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
  721. cfg = fimc_read(EXYNOS_CIEXTEN);
  722. if (fmt == DRM_FORMAT_AYUV) {
  723. cfg |= EXYNOS_CIEXTEN_YUV444_OUT;
  724. fimc_write(cfg, EXYNOS_CIEXTEN);
  725. } else {
  726. cfg &= ~EXYNOS_CIEXTEN_YUV444_OUT;
  727. fimc_write(cfg, EXYNOS_CIEXTEN);
  728. cfg = fimc_read(EXYNOS_CITRGFMT);
  729. cfg &= ~EXYNOS_CITRGFMT_OUTFORMAT_MASK;
  730. switch (fmt) {
  731. case DRM_FORMAT_RGB565:
  732. case DRM_FORMAT_RGB888:
  733. case DRM_FORMAT_XRGB8888:
  734. cfg |= EXYNOS_CITRGFMT_OUTFORMAT_RGB;
  735. break;
  736. case DRM_FORMAT_YUYV:
  737. case DRM_FORMAT_YVYU:
  738. case DRM_FORMAT_UYVY:
  739. case DRM_FORMAT_VYUY:
  740. cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422_1PLANE;
  741. break;
  742. case DRM_FORMAT_NV16:
  743. case DRM_FORMAT_NV61:
  744. case DRM_FORMAT_YUV422:
  745. cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422;
  746. break;
  747. case DRM_FORMAT_YUV420:
  748. case DRM_FORMAT_YVU420:
  749. case DRM_FORMAT_NV12:
  750. case DRM_FORMAT_NV12MT:
  751. case DRM_FORMAT_NV21:
  752. cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR420;
  753. break;
  754. default:
  755. dev_err(ippdrv->dev, "inavlid target format 0x%x.\n",
  756. fmt);
  757. return -EINVAL;
  758. }
  759. fimc_write(cfg, EXYNOS_CITRGFMT);
  760. }
  761. cfg = fimc_read(EXYNOS_CIDMAPARAM);
  762. cfg &= ~EXYNOS_CIDMAPARAM_W_MODE_MASK;
  763. if (fmt == DRM_FORMAT_NV12MT)
  764. cfg |= EXYNOS_CIDMAPARAM_W_MODE_64X32;
  765. else
  766. cfg |= EXYNOS_CIDMAPARAM_W_MODE_LINEAR;
  767. fimc_write(cfg, EXYNOS_CIDMAPARAM);
  768. return fimc_dst_set_fmt_order(ctx, fmt);
  769. }
  770. static int fimc_dst_set_transf(struct device *dev,
  771. enum drm_exynos_degree degree,
  772. enum drm_exynos_flip flip, bool *swap)
  773. {
  774. struct fimc_context *ctx = get_fimc_context(dev);
  775. struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
  776. u32 cfg;
  777. DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
  778. degree, flip);
  779. cfg = fimc_read(EXYNOS_CITRGFMT);
  780. cfg &= ~EXYNOS_CITRGFMT_FLIP_MASK;
  781. cfg &= ~EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE;
  782. switch (degree) {
  783. case EXYNOS_DRM_DEGREE_0:
  784. if (flip & EXYNOS_DRM_FLIP_VERTICAL)
  785. cfg |= EXYNOS_CITRGFMT_FLIP_X_MIRROR;
  786. if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
  787. cfg |= EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
  788. break;
  789. case EXYNOS_DRM_DEGREE_90:
  790. cfg |= EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE;
  791. if (flip & EXYNOS_DRM_FLIP_VERTICAL)
  792. cfg |= EXYNOS_CITRGFMT_FLIP_X_MIRROR;
  793. if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
  794. cfg |= EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
  795. break;
  796. case EXYNOS_DRM_DEGREE_180:
  797. cfg |= (EXYNOS_CITRGFMT_FLIP_X_MIRROR |
  798. EXYNOS_CITRGFMT_FLIP_Y_MIRROR);
  799. if (flip & EXYNOS_DRM_FLIP_VERTICAL)
  800. cfg &= ~EXYNOS_CITRGFMT_FLIP_X_MIRROR;
  801. if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
  802. cfg &= ~EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
  803. break;
  804. case EXYNOS_DRM_DEGREE_270:
  805. cfg |= (EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE |
  806. EXYNOS_CITRGFMT_FLIP_X_MIRROR |
  807. EXYNOS_CITRGFMT_FLIP_Y_MIRROR);
  808. if (flip & EXYNOS_DRM_FLIP_VERTICAL)
  809. cfg &= ~EXYNOS_CITRGFMT_FLIP_X_MIRROR;
  810. if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
  811. cfg &= ~EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
  812. break;
  813. default:
  814. dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
  815. return -EINVAL;
  816. }
  817. fimc_write(cfg, EXYNOS_CITRGFMT);
  818. *swap = (cfg & EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE) ? 1 : 0;
  819. return 0;
  820. }
  821. static int fimc_get_ratio_shift(u32 src, u32 dst, u32 *ratio, u32 *shift)
  822. {
  823. DRM_DEBUG_KMS("%s:src[%d]dst[%d]\n", __func__, src, dst);
  824. if (src >= dst * 64) {
  825. DRM_ERROR("failed to make ratio and shift.\n");
  826. return -EINVAL;
  827. } else if (src >= dst * 32) {
  828. *ratio = 32;
  829. *shift = 5;
  830. } else if (src >= dst * 16) {
  831. *ratio = 16;
  832. *shift = 4;
  833. } else if (src >= dst * 8) {
  834. *ratio = 8;
  835. *shift = 3;
  836. } else if (src >= dst * 4) {
  837. *ratio = 4;
  838. *shift = 2;
  839. } else if (src >= dst * 2) {
  840. *ratio = 2;
  841. *shift = 1;
  842. } else {
  843. *ratio = 1;
  844. *shift = 0;
  845. }
  846. return 0;
  847. }
  848. static int fimc_set_prescaler(struct fimc_context *ctx, struct fimc_scaler *sc,
  849. struct drm_exynos_pos *src, struct drm_exynos_pos *dst)
  850. {
  851. struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
  852. u32 cfg, cfg_ext, shfactor;
  853. u32 pre_dst_width, pre_dst_height;
  854. u32 pre_hratio, hfactor, pre_vratio, vfactor;
  855. int ret = 0;
  856. u32 src_w, src_h, dst_w, dst_h;
  857. cfg_ext = fimc_read(EXYNOS_CITRGFMT);
  858. if (cfg_ext & EXYNOS_CITRGFMT_INROT90_CLOCKWISE) {
  859. src_w = src->h;
  860. src_h = src->w;
  861. } else {
  862. src_w = src->w;
  863. src_h = src->h;
  864. }
  865. if (cfg_ext & EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE) {
  866. dst_w = dst->h;
  867. dst_h = dst->w;
  868. } else {
  869. dst_w = dst->w;
  870. dst_h = dst->h;
  871. }
  872. ret = fimc_get_ratio_shift(src_w, dst_w, &pre_hratio, &hfactor);
  873. if (ret) {
  874. dev_err(ippdrv->dev, "failed to get ratio horizontal.\n");
  875. return ret;
  876. }
  877. ret = fimc_get_ratio_shift(src_h, dst_h, &pre_vratio, &vfactor);
  878. if (ret) {
  879. dev_err(ippdrv->dev, "failed to get ratio vertical.\n");
  880. return ret;
  881. }
  882. pre_dst_width = src_w / pre_hratio;
  883. pre_dst_height = src_h / pre_vratio;
  884. DRM_DEBUG_KMS("%s:pre_dst_width[%d]pre_dst_height[%d]\n", __func__,
  885. pre_dst_width, pre_dst_height);
  886. DRM_DEBUG_KMS("%s:pre_hratio[%d]hfactor[%d]pre_vratio[%d]vfactor[%d]\n",
  887. __func__, pre_hratio, hfactor, pre_vratio, vfactor);
  888. sc->hratio = (src_w << 14) / (dst_w << hfactor);
  889. sc->vratio = (src_h << 14) / (dst_h << vfactor);
  890. sc->up_h = (dst_w >= src_w) ? true : false;
  891. sc->up_v = (dst_h >= src_h) ? true : false;
  892. DRM_DEBUG_KMS("%s:hratio[%d]vratio[%d]up_h[%d]up_v[%d]\n",
  893. __func__, sc->hratio, sc->vratio, sc->up_h, sc->up_v);
  894. shfactor = FIMC_SHFACTOR - (hfactor + vfactor);
  895. DRM_DEBUG_KMS("%s:shfactor[%d]\n", __func__, shfactor);
  896. cfg = (EXYNOS_CISCPRERATIO_SHFACTOR(shfactor) |
  897. EXYNOS_CISCPRERATIO_PREHORRATIO(pre_hratio) |
  898. EXYNOS_CISCPRERATIO_PREVERRATIO(pre_vratio));
  899. fimc_write(cfg, EXYNOS_CISCPRERATIO);
  900. cfg = (EXYNOS_CISCPREDST_PREDSTWIDTH(pre_dst_width) |
  901. EXYNOS_CISCPREDST_PREDSTHEIGHT(pre_dst_height));
  902. fimc_write(cfg, EXYNOS_CISCPREDST);
  903. return ret;
  904. }
  905. static void fimc_set_scaler(struct fimc_context *ctx, struct fimc_scaler *sc)
  906. {
  907. u32 cfg, cfg_ext;
  908. DRM_DEBUG_KMS("%s:range[%d]bypass[%d]up_h[%d]up_v[%d]\n",
  909. __func__, sc->range, sc->bypass, sc->up_h, sc->up_v);
  910. DRM_DEBUG_KMS("%s:hratio[%d]vratio[%d]\n",
  911. __func__, sc->hratio, sc->vratio);
  912. cfg = fimc_read(EXYNOS_CISCCTRL);
  913. cfg &= ~(EXYNOS_CISCCTRL_SCALERBYPASS |
  914. EXYNOS_CISCCTRL_SCALEUP_H | EXYNOS_CISCCTRL_SCALEUP_V |
  915. EXYNOS_CISCCTRL_MAIN_V_RATIO_MASK |
  916. EXYNOS_CISCCTRL_MAIN_H_RATIO_MASK |
  917. EXYNOS_CISCCTRL_CSCR2Y_WIDE |
  918. EXYNOS_CISCCTRL_CSCY2R_WIDE);
  919. if (sc->range)
  920. cfg |= (EXYNOS_CISCCTRL_CSCR2Y_WIDE |
  921. EXYNOS_CISCCTRL_CSCY2R_WIDE);
  922. if (sc->bypass)
  923. cfg |= EXYNOS_CISCCTRL_SCALERBYPASS;
  924. if (sc->up_h)
  925. cfg |= EXYNOS_CISCCTRL_SCALEUP_H;
  926. if (sc->up_v)
  927. cfg |= EXYNOS_CISCCTRL_SCALEUP_V;
  928. cfg |= (EXYNOS_CISCCTRL_MAINHORRATIO((sc->hratio >> 6)) |
  929. EXYNOS_CISCCTRL_MAINVERRATIO((sc->vratio >> 6)));
  930. fimc_write(cfg, EXYNOS_CISCCTRL);
  931. cfg_ext = fimc_read(EXYNOS_CIEXTEN);
  932. cfg_ext &= ~EXYNOS_CIEXTEN_MAINHORRATIO_EXT_MASK;
  933. cfg_ext &= ~EXYNOS_CIEXTEN_MAINVERRATIO_EXT_MASK;
  934. cfg_ext |= (EXYNOS_CIEXTEN_MAINHORRATIO_EXT(sc->hratio) |
  935. EXYNOS_CIEXTEN_MAINVERRATIO_EXT(sc->vratio));
  936. fimc_write(cfg_ext, EXYNOS_CIEXTEN);
  937. }
  938. static int fimc_dst_set_size(struct device *dev, int swap,
  939. struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
  940. {
  941. struct fimc_context *ctx = get_fimc_context(dev);
  942. struct drm_exynos_pos img_pos = *pos;
  943. struct drm_exynos_sz img_sz = *sz;
  944. u32 cfg;
  945. DRM_DEBUG_KMS("%s:swap[%d]hsize[%d]vsize[%d]\n",
  946. __func__, swap, sz->hsize, sz->vsize);
  947. /* original size */
  948. cfg = (EXYNOS_ORGOSIZE_HORIZONTAL(img_sz.hsize) |
  949. EXYNOS_ORGOSIZE_VERTICAL(img_sz.vsize));
  950. fimc_write(cfg, EXYNOS_ORGOSIZE);
  951. DRM_DEBUG_KMS("%s:x[%d]y[%d]w[%d]h[%d]\n",
  952. __func__, pos->x, pos->y, pos->w, pos->h);
  953. /* CSC ITU */
  954. cfg = fimc_read(EXYNOS_CIGCTRL);
  955. cfg &= ~EXYNOS_CIGCTRL_CSC_MASK;
  956. if (sz->hsize >= FIMC_WIDTH_ITU_709)
  957. cfg |= EXYNOS_CIGCTRL_CSC_ITU709;
  958. else
  959. cfg |= EXYNOS_CIGCTRL_CSC_ITU601;
  960. fimc_write(cfg, EXYNOS_CIGCTRL);
  961. if (swap) {
  962. img_pos.w = pos->h;
  963. img_pos.h = pos->w;
  964. img_sz.hsize = sz->vsize;
  965. img_sz.vsize = sz->hsize;
  966. }
  967. /* target image size */
  968. cfg = fimc_read(EXYNOS_CITRGFMT);
  969. cfg &= ~(EXYNOS_CITRGFMT_TARGETH_MASK |
  970. EXYNOS_CITRGFMT_TARGETV_MASK);
  971. cfg |= (EXYNOS_CITRGFMT_TARGETHSIZE(img_pos.w) |
  972. EXYNOS_CITRGFMT_TARGETVSIZE(img_pos.h));
  973. fimc_write(cfg, EXYNOS_CITRGFMT);
  974. /* target area */
  975. cfg = EXYNOS_CITAREA_TARGET_AREA(img_pos.w * img_pos.h);
  976. fimc_write(cfg, EXYNOS_CITAREA);
  977. /* offset Y(RGB), Cb, Cr */
  978. cfg = (EXYNOS_CIOYOFF_HORIZONTAL(img_pos.x) |
  979. EXYNOS_CIOYOFF_VERTICAL(img_pos.y));
  980. fimc_write(cfg, EXYNOS_CIOYOFF);
  981. cfg = (EXYNOS_CIOCBOFF_HORIZONTAL(img_pos.x) |
  982. EXYNOS_CIOCBOFF_VERTICAL(img_pos.y));
  983. fimc_write(cfg, EXYNOS_CIOCBOFF);
  984. cfg = (EXYNOS_CIOCROFF_HORIZONTAL(img_pos.x) |
  985. EXYNOS_CIOCROFF_VERTICAL(img_pos.y));
  986. fimc_write(cfg, EXYNOS_CIOCROFF);
  987. return 0;
  988. }
  989. static int fimc_dst_get_buf_seq(struct fimc_context *ctx)
  990. {
  991. u32 cfg, i, buf_num = 0;
  992. u32 mask = 0x00000001;
  993. cfg = fimc_read(EXYNOS_CIFCNTSEQ);
  994. for (i = 0; i < FIMC_REG_SZ; i++)
  995. if (cfg & (mask << i))
  996. buf_num++;
  997. DRM_DEBUG_KMS("%s:buf_num[%d]\n", __func__, buf_num);
  998. return buf_num;
  999. }
  1000. static int fimc_dst_set_buf_seq(struct fimc_context *ctx, u32 buf_id,
  1001. enum drm_exynos_ipp_buf_type buf_type)
  1002. {
  1003. struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
  1004. bool enable;
  1005. u32 cfg;
  1006. u32 mask = 0x00000001 << buf_id;
  1007. int ret = 0;
  1008. DRM_DEBUG_KMS("%s:buf_id[%d]buf_type[%d]\n", __func__,
  1009. buf_id, buf_type);
  1010. mutex_lock(&ctx->lock);
  1011. /* mask register set */
  1012. cfg = fimc_read(EXYNOS_CIFCNTSEQ);
  1013. switch (buf_type) {
  1014. case IPP_BUF_ENQUEUE:
  1015. enable = true;
  1016. break;
  1017. case IPP_BUF_DEQUEUE:
  1018. enable = false;
  1019. break;
  1020. default:
  1021. dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n");
  1022. ret = -EINVAL;
  1023. goto err_unlock;
  1024. }
  1025. /* sequence id */
  1026. cfg &= ~mask;
  1027. cfg |= (enable << buf_id);
  1028. fimc_write(cfg, EXYNOS_CIFCNTSEQ);
  1029. /* interrupt enable */
  1030. if (buf_type == IPP_BUF_ENQUEUE &&
  1031. fimc_dst_get_buf_seq(ctx) >= FIMC_BUF_START)
  1032. fimc_handle_irq(ctx, true, false, true);
  1033. /* interrupt disable */
  1034. if (buf_type == IPP_BUF_DEQUEUE &&
  1035. fimc_dst_get_buf_seq(ctx) <= FIMC_BUF_STOP)
  1036. fimc_handle_irq(ctx, false, false, true);
  1037. err_unlock:
  1038. mutex_unlock(&ctx->lock);
  1039. return ret;
  1040. }
  1041. static int fimc_dst_set_addr(struct device *dev,
  1042. struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
  1043. enum drm_exynos_ipp_buf_type buf_type)
  1044. {
  1045. struct fimc_context *ctx = get_fimc_context(dev);
  1046. struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
  1047. struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
  1048. struct drm_exynos_ipp_property *property;
  1049. struct drm_exynos_ipp_config *config;
  1050. if (!c_node) {
  1051. DRM_ERROR("failed to get c_node.\n");
  1052. return -EINVAL;
  1053. }
  1054. property = &c_node->property;
  1055. DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
  1056. property->prop_id, buf_id, buf_type);
  1057. if (buf_id > FIMC_MAX_DST) {
  1058. dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
  1059. return -ENOMEM;
  1060. }
  1061. /* address register set */
  1062. switch (buf_type) {
  1063. case IPP_BUF_ENQUEUE:
  1064. config = &property->config[EXYNOS_DRM_OPS_DST];
  1065. fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
  1066. EXYNOS_CIOYSA(buf_id));
  1067. if (config->fmt == DRM_FORMAT_YVU420) {
  1068. fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
  1069. EXYNOS_CIOCBSA(buf_id));
  1070. fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
  1071. EXYNOS_CIOCRSA(buf_id));
  1072. } else {
  1073. fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
  1074. EXYNOS_CIOCBSA(buf_id));
  1075. fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
  1076. EXYNOS_CIOCRSA(buf_id));
  1077. }
  1078. break;
  1079. case IPP_BUF_DEQUEUE:
  1080. fimc_write(0x0, EXYNOS_CIOYSA(buf_id));
  1081. fimc_write(0x0, EXYNOS_CIOCBSA(buf_id));
  1082. fimc_write(0x0, EXYNOS_CIOCRSA(buf_id));
  1083. break;
  1084. default:
  1085. /* bypass */
  1086. break;
  1087. }
  1088. return fimc_dst_set_buf_seq(ctx, buf_id, buf_type);
  1089. }
  1090. static struct exynos_drm_ipp_ops fimc_dst_ops = {
  1091. .set_fmt = fimc_dst_set_fmt,
  1092. .set_transf = fimc_dst_set_transf,
  1093. .set_size = fimc_dst_set_size,
  1094. .set_addr = fimc_dst_set_addr,
  1095. };
  1096. static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable)
  1097. {
  1098. DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
  1099. if (enable) {
  1100. clk_enable(ctx->sclk_fimc_clk);
  1101. clk_enable(ctx->fimc_clk);
  1102. clk_enable(ctx->wb_clk);
  1103. ctx->suspended = false;
  1104. } else {
  1105. clk_disable(ctx->sclk_fimc_clk);
  1106. clk_disable(ctx->fimc_clk);
  1107. clk_disable(ctx->wb_clk);
  1108. ctx->suspended = true;
  1109. }
  1110. return 0;
  1111. }
  1112. static irqreturn_t fimc_irq_handler(int irq, void *dev_id)
  1113. {
  1114. struct fimc_context *ctx = dev_id;
  1115. struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
  1116. struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
  1117. struct drm_exynos_ipp_event_work *event_work =
  1118. c_node->event_work;
  1119. int buf_id;
  1120. DRM_DEBUG_KMS("%s:fimc id[%d]\n", __func__, ctx->id);
  1121. fimc_clear_irq(ctx);
  1122. if (fimc_check_ovf(ctx))
  1123. return IRQ_NONE;
  1124. if (!fimc_check_frame_end(ctx))
  1125. return IRQ_NONE;
  1126. buf_id = fimc_get_buf_id(ctx);
  1127. if (buf_id < 0)
  1128. return IRQ_HANDLED;
  1129. DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, buf_id);
  1130. if (fimc_dst_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE) < 0) {
  1131. DRM_ERROR("failed to dequeue.\n");
  1132. return IRQ_HANDLED;
  1133. }
  1134. event_work->ippdrv = ippdrv;
  1135. event_work->buf_id[EXYNOS_DRM_OPS_DST] = buf_id;
  1136. queue_work(ippdrv->event_workq, (struct work_struct *)event_work);
  1137. return IRQ_HANDLED;
  1138. }
  1139. static int fimc_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
  1140. {
  1141. struct drm_exynos_ipp_prop_list *prop_list;
  1142. DRM_DEBUG_KMS("%s\n", __func__);
  1143. prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
  1144. if (!prop_list) {
  1145. DRM_ERROR("failed to alloc property list.\n");
  1146. return -ENOMEM;
  1147. }
  1148. prop_list->version = 1;
  1149. prop_list->writeback = 1;
  1150. prop_list->refresh_min = FIMC_REFRESH_MIN;
  1151. prop_list->refresh_max = FIMC_REFRESH_MAX;
  1152. prop_list->flip = (1 << EXYNOS_DRM_FLIP_NONE) |
  1153. (1 << EXYNOS_DRM_FLIP_VERTICAL) |
  1154. (1 << EXYNOS_DRM_FLIP_HORIZONTAL);
  1155. prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) |
  1156. (1 << EXYNOS_DRM_DEGREE_90) |
  1157. (1 << EXYNOS_DRM_DEGREE_180) |
  1158. (1 << EXYNOS_DRM_DEGREE_270);
  1159. prop_list->csc = 1;
  1160. prop_list->crop = 1;
  1161. prop_list->crop_max.hsize = FIMC_CROP_MAX;
  1162. prop_list->crop_max.vsize = FIMC_CROP_MAX;
  1163. prop_list->crop_min.hsize = FIMC_CROP_MIN;
  1164. prop_list->crop_min.vsize = FIMC_CROP_MIN;
  1165. prop_list->scale = 1;
  1166. prop_list->scale_max.hsize = FIMC_SCALE_MAX;
  1167. prop_list->scale_max.vsize = FIMC_SCALE_MAX;
  1168. prop_list->scale_min.hsize = FIMC_SCALE_MIN;
  1169. prop_list->scale_min.vsize = FIMC_SCALE_MIN;
  1170. ippdrv->prop_list = prop_list;
  1171. return 0;
  1172. }
  1173. static inline bool fimc_check_drm_flip(enum drm_exynos_flip flip)
  1174. {
  1175. switch (flip) {
  1176. case EXYNOS_DRM_FLIP_NONE:
  1177. case EXYNOS_DRM_FLIP_VERTICAL:
  1178. case EXYNOS_DRM_FLIP_HORIZONTAL:
  1179. case EXYNOS_DRM_FLIP_BOTH:
  1180. return true;
  1181. default:
  1182. DRM_DEBUG_KMS("%s:invalid flip\n", __func__);
  1183. return false;
  1184. }
  1185. }
  1186. static int fimc_ippdrv_check_property(struct device *dev,
  1187. struct drm_exynos_ipp_property *property)
  1188. {
  1189. struct fimc_context *ctx = get_fimc_context(dev);
  1190. struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
  1191. struct drm_exynos_ipp_prop_list *pp = ippdrv->prop_list;
  1192. struct drm_exynos_ipp_config *config;
  1193. struct drm_exynos_pos *pos;
  1194. struct drm_exynos_sz *sz;
  1195. bool swap;
  1196. int i;
  1197. DRM_DEBUG_KMS("%s\n", __func__);
  1198. for_each_ipp_ops(i) {
  1199. if ((i == EXYNOS_DRM_OPS_SRC) &&
  1200. (property->cmd == IPP_CMD_WB))
  1201. continue;
  1202. config = &property->config[i];
  1203. pos = &config->pos;
  1204. sz = &config->sz;
  1205. /* check for flip */
  1206. if (!fimc_check_drm_flip(config->flip)) {
  1207. DRM_ERROR("invalid flip.\n");
  1208. goto err_property;
  1209. }
  1210. /* check for degree */
  1211. switch (config->degree) {
  1212. case EXYNOS_DRM_DEGREE_90:
  1213. case EXYNOS_DRM_DEGREE_270:
  1214. swap = true;
  1215. break;
  1216. case EXYNOS_DRM_DEGREE_0:
  1217. case EXYNOS_DRM_DEGREE_180:
  1218. swap = false;
  1219. break;
  1220. default:
  1221. DRM_ERROR("invalid degree.\n");
  1222. goto err_property;
  1223. }
  1224. /* check for buffer bound */
  1225. if ((pos->x + pos->w > sz->hsize) ||
  1226. (pos->y + pos->h > sz->vsize)) {
  1227. DRM_ERROR("out of buf bound.\n");
  1228. goto err_property;
  1229. }
  1230. /* check for crop */
  1231. if ((i == EXYNOS_DRM_OPS_SRC) && (pp->crop)) {
  1232. if (swap) {
  1233. if ((pos->h < pp->crop_min.hsize) ||
  1234. (sz->vsize > pp->crop_max.hsize) ||
  1235. (pos->w < pp->crop_min.vsize) ||
  1236. (sz->hsize > pp->crop_max.vsize)) {
  1237. DRM_ERROR("out of crop size.\n");
  1238. goto err_property;
  1239. }
  1240. } else {
  1241. if ((pos->w < pp->crop_min.hsize) ||
  1242. (sz->hsize > pp->crop_max.hsize) ||
  1243. (pos->h < pp->crop_min.vsize) ||
  1244. (sz->vsize > pp->crop_max.vsize)) {
  1245. DRM_ERROR("out of crop size.\n");
  1246. goto err_property;
  1247. }
  1248. }
  1249. }
  1250. /* check for scale */
  1251. if ((i == EXYNOS_DRM_OPS_DST) && (pp->scale)) {
  1252. if (swap) {
  1253. if ((pos->h < pp->scale_min.hsize) ||
  1254. (sz->vsize > pp->scale_max.hsize) ||
  1255. (pos->w < pp->scale_min.vsize) ||
  1256. (sz->hsize > pp->scale_max.vsize)) {
  1257. DRM_ERROR("out of scale size.\n");
  1258. goto err_property;
  1259. }
  1260. } else {
  1261. if ((pos->w < pp->scale_min.hsize) ||
  1262. (sz->hsize > pp->scale_max.hsize) ||
  1263. (pos->h < pp->scale_min.vsize) ||
  1264. (sz->vsize > pp->scale_max.vsize)) {
  1265. DRM_ERROR("out of scale size.\n");
  1266. goto err_property;
  1267. }
  1268. }
  1269. }
  1270. }
  1271. return 0;
  1272. err_property:
  1273. for_each_ipp_ops(i) {
  1274. if ((i == EXYNOS_DRM_OPS_SRC) &&
  1275. (property->cmd == IPP_CMD_WB))
  1276. continue;
  1277. config = &property->config[i];
  1278. pos = &config->pos;
  1279. sz = &config->sz;
  1280. DRM_ERROR("[%s]f[%d]r[%d]pos[%d %d %d %d]sz[%d %d]\n",
  1281. i ? "dst" : "src", config->flip, config->degree,
  1282. pos->x, pos->y, pos->w, pos->h,
  1283. sz->hsize, sz->vsize);
  1284. }
  1285. return -EINVAL;
  1286. }
  1287. static void fimc_clear_addr(struct fimc_context *ctx)
  1288. {
  1289. int i;
  1290. DRM_DEBUG_KMS("%s:\n", __func__);
  1291. for (i = 0; i < FIMC_MAX_SRC; i++) {
  1292. fimc_write(0, EXYNOS_CIIYSA(i));
  1293. fimc_write(0, EXYNOS_CIICBSA(i));
  1294. fimc_write(0, EXYNOS_CIICRSA(i));
  1295. }
  1296. for (i = 0; i < FIMC_MAX_DST; i++) {
  1297. fimc_write(0, EXYNOS_CIOYSA(i));
  1298. fimc_write(0, EXYNOS_CIOCBSA(i));
  1299. fimc_write(0, EXYNOS_CIOCRSA(i));
  1300. }
  1301. }
  1302. static int fimc_ippdrv_reset(struct device *dev)
  1303. {
  1304. struct fimc_context *ctx = get_fimc_context(dev);
  1305. DRM_DEBUG_KMS("%s\n", __func__);
  1306. /* reset h/w block */
  1307. fimc_sw_reset(ctx);
  1308. /* reset scaler capability */
  1309. memset(&ctx->sc, 0x0, sizeof(ctx->sc));
  1310. fimc_clear_addr(ctx);
  1311. return 0;
  1312. }
  1313. static int fimc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
  1314. {
  1315. struct fimc_context *ctx = get_fimc_context(dev);
  1316. struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
  1317. struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
  1318. struct drm_exynos_ipp_property *property;
  1319. struct drm_exynos_ipp_config *config;
  1320. struct drm_exynos_pos img_pos[EXYNOS_DRM_OPS_MAX];
  1321. struct drm_exynos_ipp_set_wb set_wb;
  1322. int ret, i;
  1323. u32 cfg0, cfg1;
  1324. DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
  1325. if (!c_node) {
  1326. DRM_ERROR("failed to get c_node.\n");
  1327. return -EINVAL;
  1328. }
  1329. property = &c_node->property;
  1330. fimc_handle_irq(ctx, true, false, true);
  1331. for_each_ipp_ops(i) {
  1332. config = &property->config[i];
  1333. img_pos[i] = config->pos;
  1334. }
  1335. ret = fimc_set_prescaler(ctx, &ctx->sc,
  1336. &img_pos[EXYNOS_DRM_OPS_SRC],
  1337. &img_pos[EXYNOS_DRM_OPS_DST]);
  1338. if (ret) {
  1339. dev_err(dev, "failed to set precalser.\n");
  1340. return ret;
  1341. }
  1342. /* If set ture, we can save jpeg about screen */
  1343. fimc_handle_jpeg(ctx, false);
  1344. fimc_set_scaler(ctx, &ctx->sc);
  1345. fimc_set_polarity(ctx, &ctx->pol);
  1346. switch (cmd) {
  1347. case IPP_CMD_M2M:
  1348. fimc_set_type_ctrl(ctx, FIMC_WB_NONE);
  1349. fimc_handle_lastend(ctx, false);
  1350. /* setup dma */
  1351. cfg0 = fimc_read(EXYNOS_MSCTRL);
  1352. cfg0 &= ~EXYNOS_MSCTRL_INPUT_MASK;
  1353. cfg0 |= EXYNOS_MSCTRL_INPUT_MEMORY;
  1354. fimc_write(cfg0, EXYNOS_MSCTRL);
  1355. break;
  1356. case IPP_CMD_WB:
  1357. fimc_set_type_ctrl(ctx, FIMC_WB_A);
  1358. fimc_handle_lastend(ctx, true);
  1359. /* setup FIMD */
  1360. fimc_set_camblk_fimd0_wb(ctx);
  1361. set_wb.enable = 1;
  1362. set_wb.refresh = property->refresh_rate;
  1363. exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
  1364. break;
  1365. case IPP_CMD_OUTPUT:
  1366. default:
  1367. ret = -EINVAL;
  1368. dev_err(dev, "invalid operations.\n");
  1369. return ret;
  1370. }
  1371. /* Reset status */
  1372. fimc_write(0x0, EXYNOS_CISTATUS);
  1373. cfg0 = fimc_read(EXYNOS_CIIMGCPT);
  1374. cfg0 &= ~EXYNOS_CIIMGCPT_IMGCPTEN_SC;
  1375. cfg0 |= EXYNOS_CIIMGCPT_IMGCPTEN_SC;
  1376. /* Scaler */
  1377. cfg1 = fimc_read(EXYNOS_CISCCTRL);
  1378. cfg1 &= ~EXYNOS_CISCCTRL_SCAN_MASK;
  1379. cfg1 |= (EXYNOS_CISCCTRL_PROGRESSIVE |
  1380. EXYNOS_CISCCTRL_SCALERSTART);
  1381. fimc_write(cfg1, EXYNOS_CISCCTRL);
  1382. /* Enable image capture*/
  1383. cfg0 |= EXYNOS_CIIMGCPT_IMGCPTEN;
  1384. fimc_write(cfg0, EXYNOS_CIIMGCPT);
  1385. /* Disable frame end irq */
  1386. cfg0 = fimc_read(EXYNOS_CIGCTRL);
  1387. cfg0 &= ~EXYNOS_CIGCTRL_IRQ_END_DISABLE;
  1388. fimc_write(cfg0, EXYNOS_CIGCTRL);
  1389. cfg0 = fimc_read(EXYNOS_CIOCTRL);
  1390. cfg0 &= ~EXYNOS_CIOCTRL_WEAVE_MASK;
  1391. fimc_write(cfg0, EXYNOS_CIOCTRL);
  1392. if (cmd == IPP_CMD_M2M) {
  1393. cfg0 = fimc_read(EXYNOS_MSCTRL);
  1394. cfg0 |= EXYNOS_MSCTRL_ENVID;
  1395. fimc_write(cfg0, EXYNOS_MSCTRL);
  1396. cfg0 = fimc_read(EXYNOS_MSCTRL);
  1397. cfg0 |= EXYNOS_MSCTRL_ENVID;
  1398. fimc_write(cfg0, EXYNOS_MSCTRL);
  1399. }
  1400. return 0;
  1401. }
  1402. static void fimc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd)
  1403. {
  1404. struct fimc_context *ctx = get_fimc_context(dev);
  1405. struct drm_exynos_ipp_set_wb set_wb = {0, 0};
  1406. u32 cfg;
  1407. DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
  1408. switch (cmd) {
  1409. case IPP_CMD_M2M:
  1410. /* Source clear */
  1411. cfg = fimc_read(EXYNOS_MSCTRL);
  1412. cfg &= ~EXYNOS_MSCTRL_INPUT_MASK;
  1413. cfg &= ~EXYNOS_MSCTRL_ENVID;
  1414. fimc_write(cfg, EXYNOS_MSCTRL);
  1415. break;
  1416. case IPP_CMD_WB:
  1417. exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
  1418. break;
  1419. case IPP_CMD_OUTPUT:
  1420. default:
  1421. dev_err(dev, "invalid operations.\n");
  1422. break;
  1423. }
  1424. fimc_handle_irq(ctx, false, false, true);
  1425. /* reset sequence */
  1426. fimc_write(0x0, EXYNOS_CIFCNTSEQ);
  1427. /* Scaler disable */
  1428. cfg = fimc_read(EXYNOS_CISCCTRL);
  1429. cfg &= ~EXYNOS_CISCCTRL_SCALERSTART;
  1430. fimc_write(cfg, EXYNOS_CISCCTRL);
  1431. /* Disable image capture */
  1432. cfg = fimc_read(EXYNOS_CIIMGCPT);
  1433. cfg &= ~(EXYNOS_CIIMGCPT_IMGCPTEN_SC | EXYNOS_CIIMGCPT_IMGCPTEN);
  1434. fimc_write(cfg, EXYNOS_CIIMGCPT);
  1435. /* Enable frame end irq */
  1436. cfg = fimc_read(EXYNOS_CIGCTRL);
  1437. cfg |= EXYNOS_CIGCTRL_IRQ_END_DISABLE;
  1438. fimc_write(cfg, EXYNOS_CIGCTRL);
  1439. }
  1440. static int fimc_probe(struct platform_device *pdev)
  1441. {
  1442. struct device *dev = &pdev->dev;
  1443. struct fimc_context *ctx;
  1444. struct clk *parent_clk;
  1445. struct resource *res;
  1446. struct exynos_drm_ippdrv *ippdrv;
  1447. struct exynos_drm_fimc_pdata *pdata;
  1448. struct fimc_driverdata *ddata;
  1449. int ret;
  1450. pdata = pdev->dev.platform_data;
  1451. if (!pdata) {
  1452. dev_err(dev, "no platform data specified.\n");
  1453. return -EINVAL;
  1454. }
  1455. ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
  1456. if (!ctx)
  1457. return -ENOMEM;
  1458. ddata = (struct fimc_driverdata *)
  1459. platform_get_device_id(pdev)->driver_data;
  1460. /* clock control */
  1461. ctx->sclk_fimc_clk = devm_clk_get(dev, "sclk_fimc");
  1462. if (IS_ERR(ctx->sclk_fimc_clk)) {
  1463. dev_err(dev, "failed to get src fimc clock.\n");
  1464. return PTR_ERR(ctx->sclk_fimc_clk);
  1465. }
  1466. clk_enable(ctx->sclk_fimc_clk);
  1467. ctx->fimc_clk = devm_clk_get(dev, "fimc");
  1468. if (IS_ERR(ctx->fimc_clk)) {
  1469. dev_err(dev, "failed to get fimc clock.\n");
  1470. clk_disable(ctx->sclk_fimc_clk);
  1471. return PTR_ERR(ctx->fimc_clk);
  1472. }
  1473. ctx->wb_clk = devm_clk_get(dev, "pxl_async0");
  1474. if (IS_ERR(ctx->wb_clk)) {
  1475. dev_err(dev, "failed to get writeback a clock.\n");
  1476. clk_disable(ctx->sclk_fimc_clk);
  1477. return PTR_ERR(ctx->wb_clk);
  1478. }
  1479. ctx->wb_b_clk = devm_clk_get(dev, "pxl_async1");
  1480. if (IS_ERR(ctx->wb_b_clk)) {
  1481. dev_err(dev, "failed to get writeback b clock.\n");
  1482. clk_disable(ctx->sclk_fimc_clk);
  1483. return PTR_ERR(ctx->wb_b_clk);
  1484. }
  1485. parent_clk = devm_clk_get(dev, ddata->parent_clk);
  1486. if (IS_ERR(parent_clk)) {
  1487. dev_err(dev, "failed to get parent clock.\n");
  1488. clk_disable(ctx->sclk_fimc_clk);
  1489. return PTR_ERR(parent_clk);
  1490. }
  1491. if (clk_set_parent(ctx->sclk_fimc_clk, parent_clk)) {
  1492. dev_err(dev, "failed to set parent.\n");
  1493. clk_disable(ctx->sclk_fimc_clk);
  1494. return -EINVAL;
  1495. }
  1496. devm_clk_put(dev, parent_clk);
  1497. clk_set_rate(ctx->sclk_fimc_clk, pdata->clk_rate);
  1498. /* resource memory */
  1499. ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1500. ctx->regs = devm_request_and_ioremap(dev, ctx->regs_res);
  1501. if (!ctx->regs) {
  1502. dev_err(dev, "failed to map registers.\n");
  1503. return -ENXIO;
  1504. }
  1505. /* resource irq */
  1506. res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
  1507. if (!res) {
  1508. dev_err(dev, "failed to request irq resource.\n");
  1509. return -ENOENT;
  1510. }
  1511. ctx->irq = res->start;
  1512. ret = request_threaded_irq(ctx->irq, NULL, fimc_irq_handler,
  1513. IRQF_ONESHOT, "drm_fimc", ctx);
  1514. if (ret < 0) {
  1515. dev_err(dev, "failed to request irq.\n");
  1516. return ret;
  1517. }
  1518. /* context initailization */
  1519. ctx->id = pdev->id;
  1520. ctx->pol = pdata->pol;
  1521. ctx->ddata = ddata;
  1522. ippdrv = &ctx->ippdrv;
  1523. ippdrv->dev = dev;
  1524. ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &fimc_src_ops;
  1525. ippdrv->ops[EXYNOS_DRM_OPS_DST] = &fimc_dst_ops;
  1526. ippdrv->check_property = fimc_ippdrv_check_property;
  1527. ippdrv->reset = fimc_ippdrv_reset;
  1528. ippdrv->start = fimc_ippdrv_start;
  1529. ippdrv->stop = fimc_ippdrv_stop;
  1530. ret = fimc_init_prop_list(ippdrv);
  1531. if (ret < 0) {
  1532. dev_err(dev, "failed to init property list.\n");
  1533. goto err_get_irq;
  1534. }
  1535. DRM_DEBUG_KMS("%s:id[%d]ippdrv[0x%x]\n", __func__, ctx->id,
  1536. (int)ippdrv);
  1537. mutex_init(&ctx->lock);
  1538. platform_set_drvdata(pdev, ctx);
  1539. pm_runtime_set_active(dev);
  1540. pm_runtime_enable(dev);
  1541. ret = exynos_drm_ippdrv_register(ippdrv);
  1542. if (ret < 0) {
  1543. dev_err(dev, "failed to register drm fimc device.\n");
  1544. goto err_ippdrv_register;
  1545. }
  1546. dev_info(&pdev->dev, "drm fimc registered successfully.\n");
  1547. return 0;
  1548. err_ippdrv_register:
  1549. devm_kfree(dev, ippdrv->prop_list);
  1550. pm_runtime_disable(dev);
  1551. err_get_irq:
  1552. free_irq(ctx->irq, ctx);
  1553. return ret;
  1554. }
  1555. static int fimc_remove(struct platform_device *pdev)
  1556. {
  1557. struct device *dev = &pdev->dev;
  1558. struct fimc_context *ctx = get_fimc_context(dev);
  1559. struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
  1560. devm_kfree(dev, ippdrv->prop_list);
  1561. exynos_drm_ippdrv_unregister(ippdrv);
  1562. mutex_destroy(&ctx->lock);
  1563. pm_runtime_set_suspended(dev);
  1564. pm_runtime_disable(dev);
  1565. free_irq(ctx->irq, ctx);
  1566. return 0;
  1567. }
  1568. #ifdef CONFIG_PM_SLEEP
  1569. static int fimc_suspend(struct device *dev)
  1570. {
  1571. struct fimc_context *ctx = get_fimc_context(dev);
  1572. DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
  1573. if (pm_runtime_suspended(dev))
  1574. return 0;
  1575. return fimc_clk_ctrl(ctx, false);
  1576. }
  1577. static int fimc_resume(struct device *dev)
  1578. {
  1579. struct fimc_context *ctx = get_fimc_context(dev);
  1580. DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
  1581. if (!pm_runtime_suspended(dev))
  1582. return fimc_clk_ctrl(ctx, true);
  1583. return 0;
  1584. }
  1585. #endif
  1586. #ifdef CONFIG_PM_RUNTIME
  1587. static int fimc_runtime_suspend(struct device *dev)
  1588. {
  1589. struct fimc_context *ctx = get_fimc_context(dev);
  1590. DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
  1591. return fimc_clk_ctrl(ctx, false);
  1592. }
  1593. static int fimc_runtime_resume(struct device *dev)
  1594. {
  1595. struct fimc_context *ctx = get_fimc_context(dev);
  1596. DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
  1597. return fimc_clk_ctrl(ctx, true);
  1598. }
  1599. #endif
  1600. static struct fimc_driverdata exynos4210_fimc_data = {
  1601. .parent_clk = "mout_mpll",
  1602. };
  1603. static struct fimc_driverdata exynos4410_fimc_data = {
  1604. .parent_clk = "mout_mpll_user",
  1605. };
  1606. static struct platform_device_id fimc_driver_ids[] = {
  1607. {
  1608. .name = "exynos4210-fimc",
  1609. .driver_data = (unsigned long)&exynos4210_fimc_data,
  1610. }, {
  1611. .name = "exynos4412-fimc",
  1612. .driver_data = (unsigned long)&exynos4410_fimc_data,
  1613. },
  1614. {},
  1615. };
  1616. MODULE_DEVICE_TABLE(platform, fimc_driver_ids);
  1617. static const struct dev_pm_ops fimc_pm_ops = {
  1618. SET_SYSTEM_SLEEP_PM_OPS(fimc_suspend, fimc_resume)
  1619. SET_RUNTIME_PM_OPS(fimc_runtime_suspend, fimc_runtime_resume, NULL)
  1620. };
  1621. struct platform_driver fimc_driver = {
  1622. .probe = fimc_probe,
  1623. .remove = fimc_remove,
  1624. .id_table = fimc_driver_ids,
  1625. .driver = {
  1626. .name = "exynos-drm-fimc",
  1627. .owner = THIS_MODULE,
  1628. .pm = &fimc_pm_ops,
  1629. },
  1630. };