exynos_drm_fimc.c 48 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953
  1. /*
  2. * Copyright (C) 2012 Samsung Electronics Co.Ltd
  3. * Authors:
  4. * Eunchul Kim <chulspro.kim@samsung.com>
  5. * Jinyoung Jeon <jy0.jeon@samsung.com>
  6. * Sangmin Lee <lsmin.lee@samsung.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License as published by the
  10. * Free Software Foundation; either version 2 of the License, or (at your
  11. * option) any later version.
  12. *
  13. */
  14. #include <linux/kernel.h>
  15. #include <linux/module.h>
  16. #include <linux/platform_device.h>
  17. #include <linux/clk.h>
  18. #include <linux/pm_runtime.h>
  19. #include <plat/map-base.h>
  20. #include <drm/drmP.h>
  21. #include <drm/exynos_drm.h>
  22. #include "regs-fimc.h"
  23. #include "exynos_drm_ipp.h"
  24. #include "exynos_drm_fimc.h"
  25. /*
  26. * FIMC stands for Fully Interactive Mobile Camera and
  27. * supports image scaler/rotator and input/output DMA operations.
  28. * input DMA reads image data from the memory.
  29. * output DMA writes image data to memory.
  30. * FIMC supports image rotation and image effect functions.
  31. *
  32. * M2M operation : supports crop/scale/rotation/csc so on.
  33. * Memory ----> FIMC H/W ----> Memory.
  34. * Writeback operation : supports cloned screen with FIMD.
  35. * FIMD ----> FIMC H/W ----> Memory.
  36. * Output operation : supports direct display using local path.
  37. * Memory ----> FIMC H/W ----> FIMD.
  38. */
  39. /*
  40. * TODO
  41. * 1. check suspend/resume api if needed.
  42. * 2. need to check use case platform_device_id.
  43. * 3. check src/dst size with, height.
  44. * 4. added check_prepare api for right register.
  45. * 5. need to add supported list in prop_list.
  46. * 6. check prescaler/scaler optimization.
  47. */
  48. #define FIMC_MAX_DEVS 4
  49. #define FIMC_MAX_SRC 2
  50. #define FIMC_MAX_DST 32
  51. #define FIMC_SHFACTOR 10
  52. #define FIMC_BUF_STOP 1
  53. #define FIMC_BUF_START 2
  54. #define FIMC_REG_SZ 32
  55. #define FIMC_WIDTH_ITU_709 1280
  56. #define FIMC_REFRESH_MAX 60
  57. #define FIMC_REFRESH_MIN 12
  58. #define FIMC_CROP_MAX 8192
  59. #define FIMC_CROP_MIN 32
  60. #define FIMC_SCALE_MAX 4224
  61. #define FIMC_SCALE_MIN 32
  62. #define get_fimc_context(dev) platform_get_drvdata(to_platform_device(dev))
  63. #define get_ctx_from_ippdrv(ippdrv) container_of(ippdrv,\
  64. struct fimc_context, ippdrv);
  65. #define fimc_read(offset) readl(ctx->regs + (offset))
  66. #define fimc_write(cfg, offset) writel(cfg, ctx->regs + (offset))
  67. enum fimc_wb {
  68. FIMC_WB_NONE,
  69. FIMC_WB_A,
  70. FIMC_WB_B,
  71. };
  72. /*
  73. * A structure of scaler.
  74. *
  75. * @range: narrow, wide.
  76. * @bypass: unused scaler path.
  77. * @up_h: horizontal scale up.
  78. * @up_v: vertical scale up.
  79. * @hratio: horizontal ratio.
  80. * @vratio: vertical ratio.
  81. */
  82. struct fimc_scaler {
  83. bool range;
  84. bool bypass;
  85. bool up_h;
  86. bool up_v;
  87. u32 hratio;
  88. u32 vratio;
  89. };
  90. /*
  91. * A structure of scaler capability.
  92. *
  93. * find user manual table 43-1.
  94. * @in_hori: scaler input horizontal size.
  95. * @bypass: scaler bypass mode.
  96. * @dst_h_wo_rot: target horizontal size without output rotation.
  97. * @dst_h_rot: target horizontal size with output rotation.
  98. * @rl_w_wo_rot: real width without input rotation.
  99. * @rl_h_rot: real height without output rotation.
  100. */
  101. struct fimc_capability {
  102. /* scaler */
  103. u32 in_hori;
  104. u32 bypass;
  105. /* output rotator */
  106. u32 dst_h_wo_rot;
  107. u32 dst_h_rot;
  108. /* input rotator */
  109. u32 rl_w_wo_rot;
  110. u32 rl_h_rot;
  111. };
  112. /*
  113. * A structure of fimc driver data.
  114. *
  115. * @parent_clk: name of parent clock.
  116. */
  117. struct fimc_driverdata {
  118. char *parent_clk;
  119. };
  120. /*
  121. * A structure of fimc context.
  122. *
  123. * @ippdrv: prepare initialization using ippdrv.
  124. * @regs_res: register resources.
  125. * @regs: memory mapped io registers.
  126. * @lock: locking of operations.
  127. * @sclk_fimc_clk: fimc source clock.
  128. * @fimc_clk: fimc clock.
  129. * @wb_clk: writeback a clock.
  130. * @wb_b_clk: writeback b clock.
  131. * @sc: scaler infomations.
  132. * @odr: ordering of YUV.
  133. * @ver: fimc version.
  134. * @pol: porarity of writeback.
  135. * @id: fimc id.
  136. * @irq: irq number.
  137. * @suspended: qos operations.
  138. */
  139. struct fimc_context {
  140. struct exynos_drm_ippdrv ippdrv;
  141. struct resource *regs_res;
  142. void __iomem *regs;
  143. struct mutex lock;
  144. struct clk *sclk_fimc_clk;
  145. struct clk *fimc_clk;
  146. struct clk *wb_clk;
  147. struct clk *wb_b_clk;
  148. struct fimc_scaler sc;
  149. struct fimc_driverdata *ddata;
  150. struct exynos_drm_ipp_pol pol;
  151. int id;
  152. int irq;
  153. bool suspended;
  154. };
  155. static void fimc_sw_reset(struct fimc_context *ctx)
  156. {
  157. u32 cfg;
  158. DRM_DEBUG_KMS("%s\n", __func__);
  159. /* stop dma operation */
  160. cfg = fimc_read(EXYNOS_CISTATUS);
  161. if (EXYNOS_CISTATUS_GET_ENVID_STATUS(cfg)) {
  162. cfg = fimc_read(EXYNOS_MSCTRL);
  163. cfg &= ~EXYNOS_MSCTRL_ENVID;
  164. fimc_write(cfg, EXYNOS_MSCTRL);
  165. }
  166. cfg = fimc_read(EXYNOS_CISRCFMT);
  167. cfg |= EXYNOS_CISRCFMT_ITU601_8BIT;
  168. fimc_write(cfg, EXYNOS_CISRCFMT);
  169. /* disable image capture */
  170. cfg = fimc_read(EXYNOS_CIIMGCPT);
  171. cfg &= ~(EXYNOS_CIIMGCPT_IMGCPTEN_SC | EXYNOS_CIIMGCPT_IMGCPTEN);
  172. fimc_write(cfg, EXYNOS_CIIMGCPT);
  173. /* s/w reset */
  174. cfg = fimc_read(EXYNOS_CIGCTRL);
  175. cfg |= (EXYNOS_CIGCTRL_SWRST);
  176. fimc_write(cfg, EXYNOS_CIGCTRL);
  177. /* s/w reset complete */
  178. cfg = fimc_read(EXYNOS_CIGCTRL);
  179. cfg &= ~EXYNOS_CIGCTRL_SWRST;
  180. fimc_write(cfg, EXYNOS_CIGCTRL);
  181. /* reset sequence */
  182. fimc_write(0x0, EXYNOS_CIFCNTSEQ);
  183. }
  184. static void fimc_set_camblk_fimd0_wb(struct fimc_context *ctx)
  185. {
  186. u32 camblk_cfg;
  187. DRM_DEBUG_KMS("%s\n", __func__);
  188. camblk_cfg = readl(SYSREG_CAMERA_BLK);
  189. camblk_cfg &= ~(SYSREG_FIMD0WB_DEST_MASK);
  190. camblk_cfg |= ctx->id << (SYSREG_FIMD0WB_DEST_SHIFT);
  191. writel(camblk_cfg, SYSREG_CAMERA_BLK);
  192. }
  193. static void fimc_set_type_ctrl(struct fimc_context *ctx, enum fimc_wb wb)
  194. {
  195. u32 cfg;
  196. DRM_DEBUG_KMS("%s:wb[%d]\n", __func__, wb);
  197. cfg = fimc_read(EXYNOS_CIGCTRL);
  198. cfg &= ~(EXYNOS_CIGCTRL_TESTPATTERN_MASK |
  199. EXYNOS_CIGCTRL_SELCAM_ITU_MASK |
  200. EXYNOS_CIGCTRL_SELCAM_MIPI_MASK |
  201. EXYNOS_CIGCTRL_SELCAM_FIMC_MASK |
  202. EXYNOS_CIGCTRL_SELWB_CAMIF_MASK |
  203. EXYNOS_CIGCTRL_SELWRITEBACK_MASK);
  204. switch (wb) {
  205. case FIMC_WB_A:
  206. cfg |= (EXYNOS_CIGCTRL_SELWRITEBACK_A |
  207. EXYNOS_CIGCTRL_SELWB_CAMIF_WRITEBACK);
  208. break;
  209. case FIMC_WB_B:
  210. cfg |= (EXYNOS_CIGCTRL_SELWRITEBACK_B |
  211. EXYNOS_CIGCTRL_SELWB_CAMIF_WRITEBACK);
  212. break;
  213. case FIMC_WB_NONE:
  214. default:
  215. cfg |= (EXYNOS_CIGCTRL_SELCAM_ITU_A |
  216. EXYNOS_CIGCTRL_SELWRITEBACK_A |
  217. EXYNOS_CIGCTRL_SELCAM_MIPI_A |
  218. EXYNOS_CIGCTRL_SELCAM_FIMC_ITU);
  219. break;
  220. }
  221. fimc_write(cfg, EXYNOS_CIGCTRL);
  222. }
  223. static void fimc_set_polarity(struct fimc_context *ctx,
  224. struct exynos_drm_ipp_pol *pol)
  225. {
  226. u32 cfg;
  227. DRM_DEBUG_KMS("%s:inv_pclk[%d]inv_vsync[%d]\n",
  228. __func__, pol->inv_pclk, pol->inv_vsync);
  229. DRM_DEBUG_KMS("%s:inv_href[%d]inv_hsync[%d]\n",
  230. __func__, pol->inv_href, pol->inv_hsync);
  231. cfg = fimc_read(EXYNOS_CIGCTRL);
  232. cfg &= ~(EXYNOS_CIGCTRL_INVPOLPCLK | EXYNOS_CIGCTRL_INVPOLVSYNC |
  233. EXYNOS_CIGCTRL_INVPOLHREF | EXYNOS_CIGCTRL_INVPOLHSYNC);
  234. if (pol->inv_pclk)
  235. cfg |= EXYNOS_CIGCTRL_INVPOLPCLK;
  236. if (pol->inv_vsync)
  237. cfg |= EXYNOS_CIGCTRL_INVPOLVSYNC;
  238. if (pol->inv_href)
  239. cfg |= EXYNOS_CIGCTRL_INVPOLHREF;
  240. if (pol->inv_hsync)
  241. cfg |= EXYNOS_CIGCTRL_INVPOLHSYNC;
  242. fimc_write(cfg, EXYNOS_CIGCTRL);
  243. }
  244. static void fimc_handle_jpeg(struct fimc_context *ctx, bool enable)
  245. {
  246. u32 cfg;
  247. DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
  248. cfg = fimc_read(EXYNOS_CIGCTRL);
  249. if (enable)
  250. cfg |= EXYNOS_CIGCTRL_CAM_JPEG;
  251. else
  252. cfg &= ~EXYNOS_CIGCTRL_CAM_JPEG;
  253. fimc_write(cfg, EXYNOS_CIGCTRL);
  254. }
  255. static void fimc_handle_irq(struct fimc_context *ctx, bool enable,
  256. bool overflow, bool level)
  257. {
  258. u32 cfg;
  259. DRM_DEBUG_KMS("%s:enable[%d]overflow[%d]level[%d]\n", __func__,
  260. enable, overflow, level);
  261. cfg = fimc_read(EXYNOS_CIGCTRL);
  262. if (enable) {
  263. cfg &= ~(EXYNOS_CIGCTRL_IRQ_OVFEN | EXYNOS_CIGCTRL_IRQ_LEVEL);
  264. cfg |= EXYNOS_CIGCTRL_IRQ_ENABLE;
  265. if (overflow)
  266. cfg |= EXYNOS_CIGCTRL_IRQ_OVFEN;
  267. if (level)
  268. cfg |= EXYNOS_CIGCTRL_IRQ_LEVEL;
  269. } else
  270. cfg &= ~(EXYNOS_CIGCTRL_IRQ_OVFEN | EXYNOS_CIGCTRL_IRQ_ENABLE);
  271. fimc_write(cfg, EXYNOS_CIGCTRL);
  272. }
  273. static void fimc_clear_irq(struct fimc_context *ctx)
  274. {
  275. u32 cfg;
  276. DRM_DEBUG_KMS("%s\n", __func__);
  277. cfg = fimc_read(EXYNOS_CIGCTRL);
  278. cfg |= EXYNOS_CIGCTRL_IRQ_CLR;
  279. fimc_write(cfg, EXYNOS_CIGCTRL);
  280. }
  281. static bool fimc_check_ovf(struct fimc_context *ctx)
  282. {
  283. struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
  284. u32 cfg, status, flag;
  285. status = fimc_read(EXYNOS_CISTATUS);
  286. flag = EXYNOS_CISTATUS_OVFIY | EXYNOS_CISTATUS_OVFICB |
  287. EXYNOS_CISTATUS_OVFICR;
  288. DRM_DEBUG_KMS("%s:flag[0x%x]\n", __func__, flag);
  289. if (status & flag) {
  290. cfg = fimc_read(EXYNOS_CIWDOFST);
  291. cfg |= (EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB |
  292. EXYNOS_CIWDOFST_CLROVFICR);
  293. fimc_write(cfg, EXYNOS_CIWDOFST);
  294. cfg = fimc_read(EXYNOS_CIWDOFST);
  295. cfg &= ~(EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB |
  296. EXYNOS_CIWDOFST_CLROVFICR);
  297. fimc_write(cfg, EXYNOS_CIWDOFST);
  298. dev_err(ippdrv->dev, "occured overflow at %d, status 0x%x.\n",
  299. ctx->id, status);
  300. return true;
  301. }
  302. return false;
  303. }
  304. static bool fimc_check_frame_end(struct fimc_context *ctx)
  305. {
  306. u32 cfg;
  307. cfg = fimc_read(EXYNOS_CISTATUS);
  308. DRM_DEBUG_KMS("%s:cfg[0x%x]\n", __func__, cfg);
  309. if (!(cfg & EXYNOS_CISTATUS_FRAMEEND))
  310. return false;
  311. cfg &= ~(EXYNOS_CISTATUS_FRAMEEND);
  312. fimc_write(cfg, EXYNOS_CISTATUS);
  313. return true;
  314. }
  315. static int fimc_get_buf_id(struct fimc_context *ctx)
  316. {
  317. u32 cfg;
  318. int frame_cnt, buf_id;
  319. DRM_DEBUG_KMS("%s\n", __func__);
  320. cfg = fimc_read(EXYNOS_CISTATUS2);
  321. frame_cnt = EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(cfg);
  322. if (frame_cnt == 0)
  323. frame_cnt = EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(cfg);
  324. DRM_DEBUG_KMS("%s:present[%d]before[%d]\n", __func__,
  325. EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(cfg),
  326. EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(cfg));
  327. if (frame_cnt == 0) {
  328. DRM_ERROR("failed to get frame count.\n");
  329. return -EIO;
  330. }
  331. buf_id = frame_cnt - 1;
  332. DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, buf_id);
  333. return buf_id;
  334. }
  335. static void fimc_handle_lastend(struct fimc_context *ctx, bool enable)
  336. {
  337. u32 cfg;
  338. DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
  339. cfg = fimc_read(EXYNOS_CIOCTRL);
  340. if (enable)
  341. cfg |= EXYNOS_CIOCTRL_LASTENDEN;
  342. else
  343. cfg &= ~EXYNOS_CIOCTRL_LASTENDEN;
  344. fimc_write(cfg, EXYNOS_CIOCTRL);
  345. }
  346. static int fimc_src_set_fmt_order(struct fimc_context *ctx, u32 fmt)
  347. {
  348. struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
  349. u32 cfg;
  350. DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
  351. /* RGB */
  352. cfg = fimc_read(EXYNOS_CISCCTRL);
  353. cfg &= ~EXYNOS_CISCCTRL_INRGB_FMT_RGB_MASK;
  354. switch (fmt) {
  355. case DRM_FORMAT_RGB565:
  356. cfg |= EXYNOS_CISCCTRL_INRGB_FMT_RGB565;
  357. fimc_write(cfg, EXYNOS_CISCCTRL);
  358. return 0;
  359. case DRM_FORMAT_RGB888:
  360. case DRM_FORMAT_XRGB8888:
  361. cfg |= EXYNOS_CISCCTRL_INRGB_FMT_RGB888;
  362. fimc_write(cfg, EXYNOS_CISCCTRL);
  363. return 0;
  364. default:
  365. /* bypass */
  366. break;
  367. }
  368. /* YUV */
  369. cfg = fimc_read(EXYNOS_MSCTRL);
  370. cfg &= ~(EXYNOS_MSCTRL_ORDER2P_SHIFT_MASK |
  371. EXYNOS_MSCTRL_C_INT_IN_2PLANE |
  372. EXYNOS_MSCTRL_ORDER422_YCBYCR);
  373. switch (fmt) {
  374. case DRM_FORMAT_YUYV:
  375. cfg |= EXYNOS_MSCTRL_ORDER422_YCBYCR;
  376. break;
  377. case DRM_FORMAT_YVYU:
  378. cfg |= EXYNOS_MSCTRL_ORDER422_YCRYCB;
  379. break;
  380. case DRM_FORMAT_UYVY:
  381. cfg |= EXYNOS_MSCTRL_ORDER422_CBYCRY;
  382. break;
  383. case DRM_FORMAT_VYUY:
  384. case DRM_FORMAT_YUV444:
  385. cfg |= EXYNOS_MSCTRL_ORDER422_CRYCBY;
  386. break;
  387. case DRM_FORMAT_NV21:
  388. case DRM_FORMAT_NV61:
  389. cfg |= (EXYNOS_MSCTRL_ORDER2P_LSB_CRCB |
  390. EXYNOS_MSCTRL_C_INT_IN_2PLANE);
  391. break;
  392. case DRM_FORMAT_YUV422:
  393. case DRM_FORMAT_YUV420:
  394. case DRM_FORMAT_YVU420:
  395. cfg |= EXYNOS_MSCTRL_C_INT_IN_3PLANE;
  396. break;
  397. case DRM_FORMAT_NV12:
  398. case DRM_FORMAT_NV12MT:
  399. case DRM_FORMAT_NV16:
  400. cfg |= (EXYNOS_MSCTRL_ORDER2P_LSB_CBCR |
  401. EXYNOS_MSCTRL_C_INT_IN_2PLANE);
  402. break;
  403. default:
  404. dev_err(ippdrv->dev, "inavlid source yuv order 0x%x.\n", fmt);
  405. return -EINVAL;
  406. }
  407. fimc_write(cfg, EXYNOS_MSCTRL);
  408. return 0;
  409. }
  410. static int fimc_src_set_fmt(struct device *dev, u32 fmt)
  411. {
  412. struct fimc_context *ctx = get_fimc_context(dev);
  413. struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
  414. u32 cfg;
  415. DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
  416. cfg = fimc_read(EXYNOS_MSCTRL);
  417. cfg &= ~EXYNOS_MSCTRL_INFORMAT_RGB;
  418. switch (fmt) {
  419. case DRM_FORMAT_RGB565:
  420. case DRM_FORMAT_RGB888:
  421. case DRM_FORMAT_XRGB8888:
  422. cfg |= EXYNOS_MSCTRL_INFORMAT_RGB;
  423. break;
  424. case DRM_FORMAT_YUV444:
  425. cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR420;
  426. break;
  427. case DRM_FORMAT_YUYV:
  428. case DRM_FORMAT_YVYU:
  429. case DRM_FORMAT_UYVY:
  430. case DRM_FORMAT_VYUY:
  431. cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR422_1PLANE;
  432. break;
  433. case DRM_FORMAT_NV16:
  434. case DRM_FORMAT_NV61:
  435. case DRM_FORMAT_YUV422:
  436. cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR422;
  437. break;
  438. case DRM_FORMAT_YUV420:
  439. case DRM_FORMAT_YVU420:
  440. case DRM_FORMAT_NV12:
  441. case DRM_FORMAT_NV21:
  442. case DRM_FORMAT_NV12MT:
  443. cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR420;
  444. break;
  445. default:
  446. dev_err(ippdrv->dev, "inavlid source format 0x%x.\n", fmt);
  447. return -EINVAL;
  448. }
  449. fimc_write(cfg, EXYNOS_MSCTRL);
  450. cfg = fimc_read(EXYNOS_CIDMAPARAM);
  451. cfg &= ~EXYNOS_CIDMAPARAM_R_MODE_MASK;
  452. if (fmt == DRM_FORMAT_NV12MT)
  453. cfg |= EXYNOS_CIDMAPARAM_R_MODE_64X32;
  454. else
  455. cfg |= EXYNOS_CIDMAPARAM_R_MODE_LINEAR;
  456. fimc_write(cfg, EXYNOS_CIDMAPARAM);
  457. return fimc_src_set_fmt_order(ctx, fmt);
  458. }
  459. static int fimc_src_set_transf(struct device *dev,
  460. enum drm_exynos_degree degree,
  461. enum drm_exynos_flip flip, bool *swap)
  462. {
  463. struct fimc_context *ctx = get_fimc_context(dev);
  464. struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
  465. u32 cfg1, cfg2;
  466. DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
  467. degree, flip);
  468. cfg1 = fimc_read(EXYNOS_MSCTRL);
  469. cfg1 &= ~(EXYNOS_MSCTRL_FLIP_X_MIRROR |
  470. EXYNOS_MSCTRL_FLIP_Y_MIRROR);
  471. cfg2 = fimc_read(EXYNOS_CITRGFMT);
  472. cfg2 &= ~EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
  473. switch (degree) {
  474. case EXYNOS_DRM_DEGREE_0:
  475. if (flip & EXYNOS_DRM_FLIP_VERTICAL)
  476. cfg1 |= EXYNOS_MSCTRL_FLIP_X_MIRROR;
  477. if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
  478. cfg1 |= EXYNOS_MSCTRL_FLIP_Y_MIRROR;
  479. break;
  480. case EXYNOS_DRM_DEGREE_90:
  481. cfg2 |= EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
  482. if (flip & EXYNOS_DRM_FLIP_VERTICAL)
  483. cfg1 |= EXYNOS_MSCTRL_FLIP_X_MIRROR;
  484. if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
  485. cfg1 |= EXYNOS_MSCTRL_FLIP_Y_MIRROR;
  486. break;
  487. case EXYNOS_DRM_DEGREE_180:
  488. cfg1 |= (EXYNOS_MSCTRL_FLIP_X_MIRROR |
  489. EXYNOS_MSCTRL_FLIP_Y_MIRROR);
  490. if (flip & EXYNOS_DRM_FLIP_VERTICAL)
  491. cfg1 &= ~EXYNOS_MSCTRL_FLIP_X_MIRROR;
  492. if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
  493. cfg1 &= ~EXYNOS_MSCTRL_FLIP_Y_MIRROR;
  494. break;
  495. case EXYNOS_DRM_DEGREE_270:
  496. cfg1 |= (EXYNOS_MSCTRL_FLIP_X_MIRROR |
  497. EXYNOS_MSCTRL_FLIP_Y_MIRROR);
  498. cfg2 |= EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
  499. if (flip & EXYNOS_DRM_FLIP_VERTICAL)
  500. cfg1 &= ~EXYNOS_MSCTRL_FLIP_X_MIRROR;
  501. if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
  502. cfg1 &= ~EXYNOS_MSCTRL_FLIP_Y_MIRROR;
  503. break;
  504. default:
  505. dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
  506. return -EINVAL;
  507. }
  508. fimc_write(cfg1, EXYNOS_MSCTRL);
  509. fimc_write(cfg2, EXYNOS_CITRGFMT);
  510. *swap = (cfg2 & EXYNOS_CITRGFMT_INROT90_CLOCKWISE) ? 1 : 0;
  511. return 0;
  512. }
  513. static int fimc_set_window(struct fimc_context *ctx,
  514. struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
  515. {
  516. u32 cfg, h1, h2, v1, v2;
  517. /* cropped image */
  518. h1 = pos->x;
  519. h2 = sz->hsize - pos->w - pos->x;
  520. v1 = pos->y;
  521. v2 = sz->vsize - pos->h - pos->y;
  522. DRM_DEBUG_KMS("%s:x[%d]y[%d]w[%d]h[%d]hsize[%d]vsize[%d]\n",
  523. __func__, pos->x, pos->y, pos->w, pos->h, sz->hsize, sz->vsize);
  524. DRM_DEBUG_KMS("%s:h1[%d]h2[%d]v1[%d]v2[%d]\n", __func__,
  525. h1, h2, v1, v2);
  526. /*
  527. * set window offset 1, 2 size
  528. * check figure 43-21 in user manual
  529. */
  530. cfg = fimc_read(EXYNOS_CIWDOFST);
  531. cfg &= ~(EXYNOS_CIWDOFST_WINHOROFST_MASK |
  532. EXYNOS_CIWDOFST_WINVEROFST_MASK);
  533. cfg |= (EXYNOS_CIWDOFST_WINHOROFST(h1) |
  534. EXYNOS_CIWDOFST_WINVEROFST(v1));
  535. cfg |= EXYNOS_CIWDOFST_WINOFSEN;
  536. fimc_write(cfg, EXYNOS_CIWDOFST);
  537. cfg = (EXYNOS_CIWDOFST2_WINHOROFST2(h2) |
  538. EXYNOS_CIWDOFST2_WINVEROFST2(v2));
  539. fimc_write(cfg, EXYNOS_CIWDOFST2);
  540. return 0;
  541. }
  542. static int fimc_src_set_size(struct device *dev, int swap,
  543. struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
  544. {
  545. struct fimc_context *ctx = get_fimc_context(dev);
  546. struct drm_exynos_pos img_pos = *pos;
  547. struct drm_exynos_sz img_sz = *sz;
  548. u32 cfg;
  549. DRM_DEBUG_KMS("%s:swap[%d]hsize[%d]vsize[%d]\n",
  550. __func__, swap, sz->hsize, sz->vsize);
  551. /* original size */
  552. cfg = (EXYNOS_ORGISIZE_HORIZONTAL(img_sz.hsize) |
  553. EXYNOS_ORGISIZE_VERTICAL(img_sz.vsize));
  554. fimc_write(cfg, EXYNOS_ORGISIZE);
  555. DRM_DEBUG_KMS("%s:x[%d]y[%d]w[%d]h[%d]\n", __func__,
  556. pos->x, pos->y, pos->w, pos->h);
  557. if (swap) {
  558. img_pos.w = pos->h;
  559. img_pos.h = pos->w;
  560. img_sz.hsize = sz->vsize;
  561. img_sz.vsize = sz->hsize;
  562. }
  563. /* set input DMA image size */
  564. cfg = fimc_read(EXYNOS_CIREAL_ISIZE);
  565. cfg &= ~(EXYNOS_CIREAL_ISIZE_HEIGHT_MASK |
  566. EXYNOS_CIREAL_ISIZE_WIDTH_MASK);
  567. cfg |= (EXYNOS_CIREAL_ISIZE_WIDTH(img_pos.w) |
  568. EXYNOS_CIREAL_ISIZE_HEIGHT(img_pos.h));
  569. fimc_write(cfg, EXYNOS_CIREAL_ISIZE);
  570. /*
  571. * set input FIFO image size
  572. * for now, we support only ITU601 8 bit mode
  573. */
  574. cfg = (EXYNOS_CISRCFMT_ITU601_8BIT |
  575. EXYNOS_CISRCFMT_SOURCEHSIZE(img_sz.hsize) |
  576. EXYNOS_CISRCFMT_SOURCEVSIZE(img_sz.vsize));
  577. fimc_write(cfg, EXYNOS_CISRCFMT);
  578. /* offset Y(RGB), Cb, Cr */
  579. cfg = (EXYNOS_CIIYOFF_HORIZONTAL(img_pos.x) |
  580. EXYNOS_CIIYOFF_VERTICAL(img_pos.y));
  581. fimc_write(cfg, EXYNOS_CIIYOFF);
  582. cfg = (EXYNOS_CIICBOFF_HORIZONTAL(img_pos.x) |
  583. EXYNOS_CIICBOFF_VERTICAL(img_pos.y));
  584. fimc_write(cfg, EXYNOS_CIICBOFF);
  585. cfg = (EXYNOS_CIICROFF_HORIZONTAL(img_pos.x) |
  586. EXYNOS_CIICROFF_VERTICAL(img_pos.y));
  587. fimc_write(cfg, EXYNOS_CIICROFF);
  588. return fimc_set_window(ctx, &img_pos, &img_sz);
  589. }
  590. static int fimc_src_set_addr(struct device *dev,
  591. struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
  592. enum drm_exynos_ipp_buf_type buf_type)
  593. {
  594. struct fimc_context *ctx = get_fimc_context(dev);
  595. struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
  596. struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
  597. struct drm_exynos_ipp_property *property;
  598. struct drm_exynos_ipp_config *config;
  599. if (!c_node) {
  600. DRM_ERROR("failed to get c_node.\n");
  601. return -EINVAL;
  602. }
  603. property = &c_node->property;
  604. DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
  605. property->prop_id, buf_id, buf_type);
  606. if (buf_id > FIMC_MAX_SRC) {
  607. dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
  608. return -ENOMEM;
  609. }
  610. /* address register set */
  611. switch (buf_type) {
  612. case IPP_BUF_ENQUEUE:
  613. config = &property->config[EXYNOS_DRM_OPS_SRC];
  614. fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
  615. EXYNOS_CIIYSA(buf_id));
  616. if (config->fmt == DRM_FORMAT_YVU420) {
  617. fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
  618. EXYNOS_CIICBSA(buf_id));
  619. fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
  620. EXYNOS_CIICRSA(buf_id));
  621. } else {
  622. fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
  623. EXYNOS_CIICBSA(buf_id));
  624. fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
  625. EXYNOS_CIICRSA(buf_id));
  626. }
  627. break;
  628. case IPP_BUF_DEQUEUE:
  629. fimc_write(0x0, EXYNOS_CIIYSA(buf_id));
  630. fimc_write(0x0, EXYNOS_CIICBSA(buf_id));
  631. fimc_write(0x0, EXYNOS_CIICRSA(buf_id));
  632. break;
  633. default:
  634. /* bypass */
  635. break;
  636. }
  637. return 0;
  638. }
  639. static struct exynos_drm_ipp_ops fimc_src_ops = {
  640. .set_fmt = fimc_src_set_fmt,
  641. .set_transf = fimc_src_set_transf,
  642. .set_size = fimc_src_set_size,
  643. .set_addr = fimc_src_set_addr,
  644. };
  645. static int fimc_dst_set_fmt_order(struct fimc_context *ctx, u32 fmt)
  646. {
  647. struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
  648. u32 cfg;
  649. DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
  650. /* RGB */
  651. cfg = fimc_read(EXYNOS_CISCCTRL);
  652. cfg &= ~EXYNOS_CISCCTRL_OUTRGB_FMT_RGB_MASK;
  653. switch (fmt) {
  654. case DRM_FORMAT_RGB565:
  655. cfg |= EXYNOS_CISCCTRL_OUTRGB_FMT_RGB565;
  656. fimc_write(cfg, EXYNOS_CISCCTRL);
  657. return 0;
  658. case DRM_FORMAT_RGB888:
  659. cfg |= EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888;
  660. fimc_write(cfg, EXYNOS_CISCCTRL);
  661. return 0;
  662. case DRM_FORMAT_XRGB8888:
  663. cfg |= (EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888 |
  664. EXYNOS_CISCCTRL_EXTRGB_EXTENSION);
  665. fimc_write(cfg, EXYNOS_CISCCTRL);
  666. break;
  667. default:
  668. /* bypass */
  669. break;
  670. }
  671. /* YUV */
  672. cfg = fimc_read(EXYNOS_CIOCTRL);
  673. cfg &= ~(EXYNOS_CIOCTRL_ORDER2P_MASK |
  674. EXYNOS_CIOCTRL_ORDER422_MASK |
  675. EXYNOS_CIOCTRL_YCBCR_PLANE_MASK);
  676. switch (fmt) {
  677. case DRM_FORMAT_XRGB8888:
  678. cfg |= EXYNOS_CIOCTRL_ALPHA_OUT;
  679. break;
  680. case DRM_FORMAT_YUYV:
  681. cfg |= EXYNOS_CIOCTRL_ORDER422_YCBYCR;
  682. break;
  683. case DRM_FORMAT_YVYU:
  684. cfg |= EXYNOS_CIOCTRL_ORDER422_YCRYCB;
  685. break;
  686. case DRM_FORMAT_UYVY:
  687. cfg |= EXYNOS_CIOCTRL_ORDER422_CBYCRY;
  688. break;
  689. case DRM_FORMAT_VYUY:
  690. cfg |= EXYNOS_CIOCTRL_ORDER422_CRYCBY;
  691. break;
  692. case DRM_FORMAT_NV21:
  693. case DRM_FORMAT_NV61:
  694. cfg |= EXYNOS_CIOCTRL_ORDER2P_LSB_CRCB;
  695. cfg |= EXYNOS_CIOCTRL_YCBCR_2PLANE;
  696. break;
  697. case DRM_FORMAT_YUV422:
  698. case DRM_FORMAT_YUV420:
  699. case DRM_FORMAT_YVU420:
  700. cfg |= EXYNOS_CIOCTRL_YCBCR_3PLANE;
  701. break;
  702. case DRM_FORMAT_NV12:
  703. case DRM_FORMAT_NV12MT:
  704. case DRM_FORMAT_NV16:
  705. cfg |= EXYNOS_CIOCTRL_ORDER2P_LSB_CBCR;
  706. cfg |= EXYNOS_CIOCTRL_YCBCR_2PLANE;
  707. break;
  708. default:
  709. dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt);
  710. return -EINVAL;
  711. }
  712. fimc_write(cfg, EXYNOS_CIOCTRL);
  713. return 0;
  714. }
  715. static int fimc_dst_set_fmt(struct device *dev, u32 fmt)
  716. {
  717. struct fimc_context *ctx = get_fimc_context(dev);
  718. struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
  719. u32 cfg;
  720. DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
  721. cfg = fimc_read(EXYNOS_CIEXTEN);
  722. if (fmt == DRM_FORMAT_AYUV) {
  723. cfg |= EXYNOS_CIEXTEN_YUV444_OUT;
  724. fimc_write(cfg, EXYNOS_CIEXTEN);
  725. } else {
  726. cfg &= ~EXYNOS_CIEXTEN_YUV444_OUT;
  727. fimc_write(cfg, EXYNOS_CIEXTEN);
  728. cfg = fimc_read(EXYNOS_CITRGFMT);
  729. cfg &= ~EXYNOS_CITRGFMT_OUTFORMAT_MASK;
  730. switch (fmt) {
  731. case DRM_FORMAT_RGB565:
  732. case DRM_FORMAT_RGB888:
  733. case DRM_FORMAT_XRGB8888:
  734. cfg |= EXYNOS_CITRGFMT_OUTFORMAT_RGB;
  735. break;
  736. case DRM_FORMAT_YUYV:
  737. case DRM_FORMAT_YVYU:
  738. case DRM_FORMAT_UYVY:
  739. case DRM_FORMAT_VYUY:
  740. cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422_1PLANE;
  741. break;
  742. case DRM_FORMAT_NV16:
  743. case DRM_FORMAT_NV61:
  744. case DRM_FORMAT_YUV422:
  745. cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422;
  746. break;
  747. case DRM_FORMAT_YUV420:
  748. case DRM_FORMAT_YVU420:
  749. case DRM_FORMAT_NV12:
  750. case DRM_FORMAT_NV12MT:
  751. case DRM_FORMAT_NV21:
  752. cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR420;
  753. break;
  754. default:
  755. dev_err(ippdrv->dev, "inavlid target format 0x%x.\n",
  756. fmt);
  757. return -EINVAL;
  758. }
  759. fimc_write(cfg, EXYNOS_CITRGFMT);
  760. }
  761. cfg = fimc_read(EXYNOS_CIDMAPARAM);
  762. cfg &= ~EXYNOS_CIDMAPARAM_W_MODE_MASK;
  763. if (fmt == DRM_FORMAT_NV12MT)
  764. cfg |= EXYNOS_CIDMAPARAM_W_MODE_64X32;
  765. else
  766. cfg |= EXYNOS_CIDMAPARAM_W_MODE_LINEAR;
  767. fimc_write(cfg, EXYNOS_CIDMAPARAM);
  768. return fimc_dst_set_fmt_order(ctx, fmt);
  769. }
  770. static int fimc_dst_set_transf(struct device *dev,
  771. enum drm_exynos_degree degree,
  772. enum drm_exynos_flip flip, bool *swap)
  773. {
  774. struct fimc_context *ctx = get_fimc_context(dev);
  775. struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
  776. u32 cfg;
  777. DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
  778. degree, flip);
  779. cfg = fimc_read(EXYNOS_CITRGFMT);
  780. cfg &= ~EXYNOS_CITRGFMT_FLIP_MASK;
  781. cfg &= ~EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE;
  782. switch (degree) {
  783. case EXYNOS_DRM_DEGREE_0:
  784. if (flip & EXYNOS_DRM_FLIP_VERTICAL)
  785. cfg |= EXYNOS_CITRGFMT_FLIP_X_MIRROR;
  786. if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
  787. cfg |= EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
  788. break;
  789. case EXYNOS_DRM_DEGREE_90:
  790. cfg |= EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE;
  791. if (flip & EXYNOS_DRM_FLIP_VERTICAL)
  792. cfg |= EXYNOS_CITRGFMT_FLIP_X_MIRROR;
  793. if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
  794. cfg |= EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
  795. break;
  796. case EXYNOS_DRM_DEGREE_180:
  797. cfg |= (EXYNOS_CITRGFMT_FLIP_X_MIRROR |
  798. EXYNOS_CITRGFMT_FLIP_Y_MIRROR);
  799. if (flip & EXYNOS_DRM_FLIP_VERTICAL)
  800. cfg &= ~EXYNOS_CITRGFMT_FLIP_X_MIRROR;
  801. if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
  802. cfg &= ~EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
  803. break;
  804. case EXYNOS_DRM_DEGREE_270:
  805. cfg |= (EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE |
  806. EXYNOS_CITRGFMT_FLIP_X_MIRROR |
  807. EXYNOS_CITRGFMT_FLIP_Y_MIRROR);
  808. if (flip & EXYNOS_DRM_FLIP_VERTICAL)
  809. cfg &= ~EXYNOS_CITRGFMT_FLIP_X_MIRROR;
  810. if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
  811. cfg &= ~EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
  812. break;
  813. default:
  814. dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
  815. return -EINVAL;
  816. }
  817. fimc_write(cfg, EXYNOS_CITRGFMT);
  818. *swap = (cfg & EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE) ? 1 : 0;
  819. return 0;
  820. }
  821. static int fimc_get_ratio_shift(u32 src, u32 dst, u32 *ratio, u32 *shift)
  822. {
  823. DRM_DEBUG_KMS("%s:src[%d]dst[%d]\n", __func__, src, dst);
  824. if (src >= dst * 64) {
  825. DRM_ERROR("failed to make ratio and shift.\n");
  826. return -EINVAL;
  827. } else if (src >= dst * 32) {
  828. *ratio = 32;
  829. *shift = 5;
  830. } else if (src >= dst * 16) {
  831. *ratio = 16;
  832. *shift = 4;
  833. } else if (src >= dst * 8) {
  834. *ratio = 8;
  835. *shift = 3;
  836. } else if (src >= dst * 4) {
  837. *ratio = 4;
  838. *shift = 2;
  839. } else if (src >= dst * 2) {
  840. *ratio = 2;
  841. *shift = 1;
  842. } else {
  843. *ratio = 1;
  844. *shift = 0;
  845. }
  846. return 0;
  847. }
  848. static int fimc_set_prescaler(struct fimc_context *ctx, struct fimc_scaler *sc,
  849. struct drm_exynos_pos *src, struct drm_exynos_pos *dst)
  850. {
  851. struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
  852. u32 cfg, cfg_ext, shfactor;
  853. u32 pre_dst_width, pre_dst_height;
  854. u32 pre_hratio, hfactor, pre_vratio, vfactor;
  855. int ret = 0;
  856. u32 src_w, src_h, dst_w, dst_h;
  857. cfg_ext = fimc_read(EXYNOS_CITRGFMT);
  858. if (cfg_ext & EXYNOS_CITRGFMT_INROT90_CLOCKWISE) {
  859. src_w = src->h;
  860. src_h = src->w;
  861. } else {
  862. src_w = src->w;
  863. src_h = src->h;
  864. }
  865. if (cfg_ext & EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE) {
  866. dst_w = dst->h;
  867. dst_h = dst->w;
  868. } else {
  869. dst_w = dst->w;
  870. dst_h = dst->h;
  871. }
  872. ret = fimc_get_ratio_shift(src_w, dst_w, &pre_hratio, &hfactor);
  873. if (ret) {
  874. dev_err(ippdrv->dev, "failed to get ratio horizontal.\n");
  875. return ret;
  876. }
  877. ret = fimc_get_ratio_shift(src_h, dst_h, &pre_vratio, &vfactor);
  878. if (ret) {
  879. dev_err(ippdrv->dev, "failed to get ratio vertical.\n");
  880. return ret;
  881. }
  882. pre_dst_width = src_w / pre_hratio;
  883. pre_dst_height = src_h / pre_vratio;
  884. DRM_DEBUG_KMS("%s:pre_dst_width[%d]pre_dst_height[%d]\n", __func__,
  885. pre_dst_width, pre_dst_height);
  886. DRM_DEBUG_KMS("%s:pre_hratio[%d]hfactor[%d]pre_vratio[%d]vfactor[%d]\n",
  887. __func__, pre_hratio, hfactor, pre_vratio, vfactor);
  888. sc->hratio = (src_w << 14) / (dst_w << hfactor);
  889. sc->vratio = (src_h << 14) / (dst_h << vfactor);
  890. sc->up_h = (dst_w >= src_w) ? true : false;
  891. sc->up_v = (dst_h >= src_h) ? true : false;
  892. DRM_DEBUG_KMS("%s:hratio[%d]vratio[%d]up_h[%d]up_v[%d]\n",
  893. __func__, sc->hratio, sc->vratio, sc->up_h, sc->up_v);
  894. shfactor = FIMC_SHFACTOR - (hfactor + vfactor);
  895. DRM_DEBUG_KMS("%s:shfactor[%d]\n", __func__, shfactor);
  896. cfg = (EXYNOS_CISCPRERATIO_SHFACTOR(shfactor) |
  897. EXYNOS_CISCPRERATIO_PREHORRATIO(pre_hratio) |
  898. EXYNOS_CISCPRERATIO_PREVERRATIO(pre_vratio));
  899. fimc_write(cfg, EXYNOS_CISCPRERATIO);
  900. cfg = (EXYNOS_CISCPREDST_PREDSTWIDTH(pre_dst_width) |
  901. EXYNOS_CISCPREDST_PREDSTHEIGHT(pre_dst_height));
  902. fimc_write(cfg, EXYNOS_CISCPREDST);
  903. return ret;
  904. }
  905. static void fimc_set_scaler(struct fimc_context *ctx, struct fimc_scaler *sc)
  906. {
  907. u32 cfg, cfg_ext;
  908. DRM_DEBUG_KMS("%s:range[%d]bypass[%d]up_h[%d]up_v[%d]\n",
  909. __func__, sc->range, sc->bypass, sc->up_h, sc->up_v);
  910. DRM_DEBUG_KMS("%s:hratio[%d]vratio[%d]\n",
  911. __func__, sc->hratio, sc->vratio);
  912. cfg = fimc_read(EXYNOS_CISCCTRL);
  913. cfg &= ~(EXYNOS_CISCCTRL_SCALERBYPASS |
  914. EXYNOS_CISCCTRL_SCALEUP_H | EXYNOS_CISCCTRL_SCALEUP_V |
  915. EXYNOS_CISCCTRL_MAIN_V_RATIO_MASK |
  916. EXYNOS_CISCCTRL_MAIN_H_RATIO_MASK |
  917. EXYNOS_CISCCTRL_CSCR2Y_WIDE |
  918. EXYNOS_CISCCTRL_CSCY2R_WIDE);
  919. if (sc->range)
  920. cfg |= (EXYNOS_CISCCTRL_CSCR2Y_WIDE |
  921. EXYNOS_CISCCTRL_CSCY2R_WIDE);
  922. if (sc->bypass)
  923. cfg |= EXYNOS_CISCCTRL_SCALERBYPASS;
  924. if (sc->up_h)
  925. cfg |= EXYNOS_CISCCTRL_SCALEUP_H;
  926. if (sc->up_v)
  927. cfg |= EXYNOS_CISCCTRL_SCALEUP_V;
  928. cfg |= (EXYNOS_CISCCTRL_MAINHORRATIO((sc->hratio >> 6)) |
  929. EXYNOS_CISCCTRL_MAINVERRATIO((sc->vratio >> 6)));
  930. fimc_write(cfg, EXYNOS_CISCCTRL);
  931. cfg_ext = fimc_read(EXYNOS_CIEXTEN);
  932. cfg_ext &= ~EXYNOS_CIEXTEN_MAINHORRATIO_EXT_MASK;
  933. cfg_ext &= ~EXYNOS_CIEXTEN_MAINVERRATIO_EXT_MASK;
  934. cfg_ext |= (EXYNOS_CIEXTEN_MAINHORRATIO_EXT(sc->hratio) |
  935. EXYNOS_CIEXTEN_MAINVERRATIO_EXT(sc->vratio));
  936. fimc_write(cfg_ext, EXYNOS_CIEXTEN);
  937. }
  938. static int fimc_dst_set_size(struct device *dev, int swap,
  939. struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
  940. {
  941. struct fimc_context *ctx = get_fimc_context(dev);
  942. struct drm_exynos_pos img_pos = *pos;
  943. struct drm_exynos_sz img_sz = *sz;
  944. u32 cfg;
  945. DRM_DEBUG_KMS("%s:swap[%d]hsize[%d]vsize[%d]\n",
  946. __func__, swap, sz->hsize, sz->vsize);
  947. /* original size */
  948. cfg = (EXYNOS_ORGOSIZE_HORIZONTAL(img_sz.hsize) |
  949. EXYNOS_ORGOSIZE_VERTICAL(img_sz.vsize));
  950. fimc_write(cfg, EXYNOS_ORGOSIZE);
  951. DRM_DEBUG_KMS("%s:x[%d]y[%d]w[%d]h[%d]\n",
  952. __func__, pos->x, pos->y, pos->w, pos->h);
  953. /* CSC ITU */
  954. cfg = fimc_read(EXYNOS_CIGCTRL);
  955. cfg &= ~EXYNOS_CIGCTRL_CSC_MASK;
  956. if (sz->hsize >= FIMC_WIDTH_ITU_709)
  957. cfg |= EXYNOS_CIGCTRL_CSC_ITU709;
  958. else
  959. cfg |= EXYNOS_CIGCTRL_CSC_ITU601;
  960. fimc_write(cfg, EXYNOS_CIGCTRL);
  961. if (swap) {
  962. img_pos.w = pos->h;
  963. img_pos.h = pos->w;
  964. img_sz.hsize = sz->vsize;
  965. img_sz.vsize = sz->hsize;
  966. }
  967. /* target image size */
  968. cfg = fimc_read(EXYNOS_CITRGFMT);
  969. cfg &= ~(EXYNOS_CITRGFMT_TARGETH_MASK |
  970. EXYNOS_CITRGFMT_TARGETV_MASK);
  971. cfg |= (EXYNOS_CITRGFMT_TARGETHSIZE(img_pos.w) |
  972. EXYNOS_CITRGFMT_TARGETVSIZE(img_pos.h));
  973. fimc_write(cfg, EXYNOS_CITRGFMT);
  974. /* target area */
  975. cfg = EXYNOS_CITAREA_TARGET_AREA(img_pos.w * img_pos.h);
  976. fimc_write(cfg, EXYNOS_CITAREA);
  977. /* offset Y(RGB), Cb, Cr */
  978. cfg = (EXYNOS_CIOYOFF_HORIZONTAL(img_pos.x) |
  979. EXYNOS_CIOYOFF_VERTICAL(img_pos.y));
  980. fimc_write(cfg, EXYNOS_CIOYOFF);
  981. cfg = (EXYNOS_CIOCBOFF_HORIZONTAL(img_pos.x) |
  982. EXYNOS_CIOCBOFF_VERTICAL(img_pos.y));
  983. fimc_write(cfg, EXYNOS_CIOCBOFF);
  984. cfg = (EXYNOS_CIOCROFF_HORIZONTAL(img_pos.x) |
  985. EXYNOS_CIOCROFF_VERTICAL(img_pos.y));
  986. fimc_write(cfg, EXYNOS_CIOCROFF);
  987. return 0;
  988. }
  989. static int fimc_dst_get_buf_seq(struct fimc_context *ctx)
  990. {
  991. u32 cfg, i, buf_num = 0;
  992. u32 mask = 0x00000001;
  993. cfg = fimc_read(EXYNOS_CIFCNTSEQ);
  994. for (i = 0; i < FIMC_REG_SZ; i++)
  995. if (cfg & (mask << i))
  996. buf_num++;
  997. DRM_DEBUG_KMS("%s:buf_num[%d]\n", __func__, buf_num);
  998. return buf_num;
  999. }
  1000. static int fimc_dst_set_buf_seq(struct fimc_context *ctx, u32 buf_id,
  1001. enum drm_exynos_ipp_buf_type buf_type)
  1002. {
  1003. struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
  1004. bool enable;
  1005. u32 cfg;
  1006. u32 mask = 0x00000001 << buf_id;
  1007. int ret = 0;
  1008. DRM_DEBUG_KMS("%s:buf_id[%d]buf_type[%d]\n", __func__,
  1009. buf_id, buf_type);
  1010. mutex_lock(&ctx->lock);
  1011. /* mask register set */
  1012. cfg = fimc_read(EXYNOS_CIFCNTSEQ);
  1013. switch (buf_type) {
  1014. case IPP_BUF_ENQUEUE:
  1015. enable = true;
  1016. break;
  1017. case IPP_BUF_DEQUEUE:
  1018. enable = false;
  1019. break;
  1020. default:
  1021. dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n");
  1022. ret = -EINVAL;
  1023. goto err_unlock;
  1024. }
  1025. /* sequence id */
  1026. cfg &= ~mask;
  1027. cfg |= (enable << buf_id);
  1028. fimc_write(cfg, EXYNOS_CIFCNTSEQ);
  1029. /* interrupt enable */
  1030. if (buf_type == IPP_BUF_ENQUEUE &&
  1031. fimc_dst_get_buf_seq(ctx) >= FIMC_BUF_START)
  1032. fimc_handle_irq(ctx, true, false, true);
  1033. /* interrupt disable */
  1034. if (buf_type == IPP_BUF_DEQUEUE &&
  1035. fimc_dst_get_buf_seq(ctx) <= FIMC_BUF_STOP)
  1036. fimc_handle_irq(ctx, false, false, true);
  1037. err_unlock:
  1038. mutex_unlock(&ctx->lock);
  1039. return ret;
  1040. }
  1041. static int fimc_dst_set_addr(struct device *dev,
  1042. struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
  1043. enum drm_exynos_ipp_buf_type buf_type)
  1044. {
  1045. struct fimc_context *ctx = get_fimc_context(dev);
  1046. struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
  1047. struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
  1048. struct drm_exynos_ipp_property *property;
  1049. struct drm_exynos_ipp_config *config;
  1050. if (!c_node) {
  1051. DRM_ERROR("failed to get c_node.\n");
  1052. return -EINVAL;
  1053. }
  1054. property = &c_node->property;
  1055. DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
  1056. property->prop_id, buf_id, buf_type);
  1057. if (buf_id > FIMC_MAX_DST) {
  1058. dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
  1059. return -ENOMEM;
  1060. }
  1061. /* address register set */
  1062. switch (buf_type) {
  1063. case IPP_BUF_ENQUEUE:
  1064. config = &property->config[EXYNOS_DRM_OPS_DST];
  1065. fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
  1066. EXYNOS_CIOYSA(buf_id));
  1067. if (config->fmt == DRM_FORMAT_YVU420) {
  1068. fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
  1069. EXYNOS_CIOCBSA(buf_id));
  1070. fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
  1071. EXYNOS_CIOCRSA(buf_id));
  1072. } else {
  1073. fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
  1074. EXYNOS_CIOCBSA(buf_id));
  1075. fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
  1076. EXYNOS_CIOCRSA(buf_id));
  1077. }
  1078. break;
  1079. case IPP_BUF_DEQUEUE:
  1080. fimc_write(0x0, EXYNOS_CIOYSA(buf_id));
  1081. fimc_write(0x0, EXYNOS_CIOCBSA(buf_id));
  1082. fimc_write(0x0, EXYNOS_CIOCRSA(buf_id));
  1083. break;
  1084. default:
  1085. /* bypass */
  1086. break;
  1087. }
  1088. return fimc_dst_set_buf_seq(ctx, buf_id, buf_type);
  1089. }
  1090. static struct exynos_drm_ipp_ops fimc_dst_ops = {
  1091. .set_fmt = fimc_dst_set_fmt,
  1092. .set_transf = fimc_dst_set_transf,
  1093. .set_size = fimc_dst_set_size,
  1094. .set_addr = fimc_dst_set_addr,
  1095. };
  1096. static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable)
  1097. {
  1098. DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
  1099. if (enable) {
  1100. clk_enable(ctx->sclk_fimc_clk);
  1101. clk_enable(ctx->fimc_clk);
  1102. clk_enable(ctx->wb_clk);
  1103. ctx->suspended = false;
  1104. } else {
  1105. clk_disable(ctx->sclk_fimc_clk);
  1106. clk_disable(ctx->fimc_clk);
  1107. clk_disable(ctx->wb_clk);
  1108. ctx->suspended = true;
  1109. }
  1110. return 0;
  1111. }
  1112. static irqreturn_t fimc_irq_handler(int irq, void *dev_id)
  1113. {
  1114. struct fimc_context *ctx = dev_id;
  1115. struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
  1116. struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
  1117. struct drm_exynos_ipp_event_work *event_work =
  1118. c_node->event_work;
  1119. int buf_id;
  1120. DRM_DEBUG_KMS("%s:fimc id[%d]\n", __func__, ctx->id);
  1121. fimc_clear_irq(ctx);
  1122. if (fimc_check_ovf(ctx))
  1123. return IRQ_NONE;
  1124. if (!fimc_check_frame_end(ctx))
  1125. return IRQ_NONE;
  1126. buf_id = fimc_get_buf_id(ctx);
  1127. if (buf_id < 0)
  1128. return IRQ_HANDLED;
  1129. DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, buf_id);
  1130. if (fimc_dst_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE) < 0) {
  1131. DRM_ERROR("failed to dequeue.\n");
  1132. return IRQ_HANDLED;
  1133. }
  1134. event_work->ippdrv = ippdrv;
  1135. event_work->buf_id[EXYNOS_DRM_OPS_DST] = buf_id;
  1136. queue_work(ippdrv->event_workq, (struct work_struct *)event_work);
  1137. return IRQ_HANDLED;
  1138. }
  1139. static int fimc_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
  1140. {
  1141. struct drm_exynos_ipp_prop_list *prop_list;
  1142. DRM_DEBUG_KMS("%s\n", __func__);
  1143. prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
  1144. if (!prop_list) {
  1145. DRM_ERROR("failed to alloc property list.\n");
  1146. return -ENOMEM;
  1147. }
  1148. prop_list->version = 1;
  1149. prop_list->writeback = 1;
  1150. prop_list->refresh_min = FIMC_REFRESH_MIN;
  1151. prop_list->refresh_max = FIMC_REFRESH_MAX;
  1152. prop_list->flip = (1 << EXYNOS_DRM_FLIP_NONE) |
  1153. (1 << EXYNOS_DRM_FLIP_VERTICAL) |
  1154. (1 << EXYNOS_DRM_FLIP_HORIZONTAL);
  1155. prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) |
  1156. (1 << EXYNOS_DRM_DEGREE_90) |
  1157. (1 << EXYNOS_DRM_DEGREE_180) |
  1158. (1 << EXYNOS_DRM_DEGREE_270);
  1159. prop_list->csc = 1;
  1160. prop_list->crop = 1;
  1161. prop_list->crop_max.hsize = FIMC_CROP_MAX;
  1162. prop_list->crop_max.vsize = FIMC_CROP_MAX;
  1163. prop_list->crop_min.hsize = FIMC_CROP_MIN;
  1164. prop_list->crop_min.vsize = FIMC_CROP_MIN;
  1165. prop_list->scale = 1;
  1166. prop_list->scale_max.hsize = FIMC_SCALE_MAX;
  1167. prop_list->scale_max.vsize = FIMC_SCALE_MAX;
  1168. prop_list->scale_min.hsize = FIMC_SCALE_MIN;
  1169. prop_list->scale_min.vsize = FIMC_SCALE_MIN;
  1170. ippdrv->prop_list = prop_list;
  1171. return 0;
  1172. }
  1173. static inline bool fimc_check_drm_flip(enum drm_exynos_flip flip)
  1174. {
  1175. switch (flip) {
  1176. case EXYNOS_DRM_FLIP_NONE:
  1177. case EXYNOS_DRM_FLIP_VERTICAL:
  1178. case EXYNOS_DRM_FLIP_HORIZONTAL:
  1179. case EXYNOS_DRM_FLIP_BOTH:
  1180. return true;
  1181. default:
  1182. DRM_DEBUG_KMS("%s:invalid flip\n", __func__);
  1183. return false;
  1184. }
  1185. }
  1186. static int fimc_ippdrv_check_property(struct device *dev,
  1187. struct drm_exynos_ipp_property *property)
  1188. {
  1189. struct fimc_context *ctx = get_fimc_context(dev);
  1190. struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
  1191. struct drm_exynos_ipp_prop_list *pp = ippdrv->prop_list;
  1192. struct drm_exynos_ipp_config *config;
  1193. struct drm_exynos_pos *pos;
  1194. struct drm_exynos_sz *sz;
  1195. bool swap;
  1196. int i;
  1197. DRM_DEBUG_KMS("%s\n", __func__);
  1198. for_each_ipp_ops(i) {
  1199. if ((i == EXYNOS_DRM_OPS_SRC) &&
  1200. (property->cmd == IPP_CMD_WB))
  1201. continue;
  1202. config = &property->config[i];
  1203. pos = &config->pos;
  1204. sz = &config->sz;
  1205. /* check for flip */
  1206. if (!fimc_check_drm_flip(config->flip)) {
  1207. DRM_ERROR("invalid flip.\n");
  1208. goto err_property;
  1209. }
  1210. /* check for degree */
  1211. switch (config->degree) {
  1212. case EXYNOS_DRM_DEGREE_90:
  1213. case EXYNOS_DRM_DEGREE_270:
  1214. swap = true;
  1215. break;
  1216. case EXYNOS_DRM_DEGREE_0:
  1217. case EXYNOS_DRM_DEGREE_180:
  1218. swap = false;
  1219. break;
  1220. default:
  1221. DRM_ERROR("invalid degree.\n");
  1222. goto err_property;
  1223. }
  1224. /* check for buffer bound */
  1225. if ((pos->x + pos->w > sz->hsize) ||
  1226. (pos->y + pos->h > sz->vsize)) {
  1227. DRM_ERROR("out of buf bound.\n");
  1228. goto err_property;
  1229. }
  1230. /* check for crop */
  1231. if ((i == EXYNOS_DRM_OPS_SRC) && (pp->crop)) {
  1232. if (swap) {
  1233. if ((pos->h < pp->crop_min.hsize) ||
  1234. (sz->vsize > pp->crop_max.hsize) ||
  1235. (pos->w < pp->crop_min.vsize) ||
  1236. (sz->hsize > pp->crop_max.vsize)) {
  1237. DRM_ERROR("out of crop size.\n");
  1238. goto err_property;
  1239. }
  1240. } else {
  1241. if ((pos->w < pp->crop_min.hsize) ||
  1242. (sz->hsize > pp->crop_max.hsize) ||
  1243. (pos->h < pp->crop_min.vsize) ||
  1244. (sz->vsize > pp->crop_max.vsize)) {
  1245. DRM_ERROR("out of crop size.\n");
  1246. goto err_property;
  1247. }
  1248. }
  1249. }
  1250. /* check for scale */
  1251. if ((i == EXYNOS_DRM_OPS_DST) && (pp->scale)) {
  1252. if (swap) {
  1253. if ((pos->h < pp->scale_min.hsize) ||
  1254. (sz->vsize > pp->scale_max.hsize) ||
  1255. (pos->w < pp->scale_min.vsize) ||
  1256. (sz->hsize > pp->scale_max.vsize)) {
  1257. DRM_ERROR("out of scale size.\n");
  1258. goto err_property;
  1259. }
  1260. } else {
  1261. if ((pos->w < pp->scale_min.hsize) ||
  1262. (sz->hsize > pp->scale_max.hsize) ||
  1263. (pos->h < pp->scale_min.vsize) ||
  1264. (sz->vsize > pp->scale_max.vsize)) {
  1265. DRM_ERROR("out of scale size.\n");
  1266. goto err_property;
  1267. }
  1268. }
  1269. }
  1270. }
  1271. return 0;
  1272. err_property:
  1273. for_each_ipp_ops(i) {
  1274. if ((i == EXYNOS_DRM_OPS_SRC) &&
  1275. (property->cmd == IPP_CMD_WB))
  1276. continue;
  1277. config = &property->config[i];
  1278. pos = &config->pos;
  1279. sz = &config->sz;
  1280. DRM_ERROR("[%s]f[%d]r[%d]pos[%d %d %d %d]sz[%d %d]\n",
  1281. i ? "dst" : "src", config->flip, config->degree,
  1282. pos->x, pos->y, pos->w, pos->h,
  1283. sz->hsize, sz->vsize);
  1284. }
  1285. return -EINVAL;
  1286. }
  1287. static void fimc_clear_addr(struct fimc_context *ctx)
  1288. {
  1289. int i;
  1290. DRM_DEBUG_KMS("%s:\n", __func__);
  1291. for (i = 0; i < FIMC_MAX_SRC; i++) {
  1292. fimc_write(0, EXYNOS_CIIYSA(i));
  1293. fimc_write(0, EXYNOS_CIICBSA(i));
  1294. fimc_write(0, EXYNOS_CIICRSA(i));
  1295. }
  1296. for (i = 0; i < FIMC_MAX_DST; i++) {
  1297. fimc_write(0, EXYNOS_CIOYSA(i));
  1298. fimc_write(0, EXYNOS_CIOCBSA(i));
  1299. fimc_write(0, EXYNOS_CIOCRSA(i));
  1300. }
  1301. }
  1302. static int fimc_ippdrv_reset(struct device *dev)
  1303. {
  1304. struct fimc_context *ctx = get_fimc_context(dev);
  1305. DRM_DEBUG_KMS("%s\n", __func__);
  1306. /* reset h/w block */
  1307. fimc_sw_reset(ctx);
  1308. /* reset scaler capability */
  1309. memset(&ctx->sc, 0x0, sizeof(ctx->sc));
  1310. fimc_clear_addr(ctx);
  1311. return 0;
  1312. }
  1313. static int fimc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
  1314. {
  1315. struct fimc_context *ctx = get_fimc_context(dev);
  1316. struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
  1317. struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
  1318. struct drm_exynos_ipp_property *property;
  1319. struct drm_exynos_ipp_config *config;
  1320. struct drm_exynos_pos img_pos[EXYNOS_DRM_OPS_MAX];
  1321. struct drm_exynos_ipp_set_wb set_wb;
  1322. int ret, i;
  1323. u32 cfg0, cfg1;
  1324. DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
  1325. if (!c_node) {
  1326. DRM_ERROR("failed to get c_node.\n");
  1327. return -EINVAL;
  1328. }
  1329. property = &c_node->property;
  1330. fimc_handle_irq(ctx, true, false, true);
  1331. for_each_ipp_ops(i) {
  1332. config = &property->config[i];
  1333. img_pos[i] = config->pos;
  1334. }
  1335. ret = fimc_set_prescaler(ctx, &ctx->sc,
  1336. &img_pos[EXYNOS_DRM_OPS_SRC],
  1337. &img_pos[EXYNOS_DRM_OPS_DST]);
  1338. if (ret) {
  1339. dev_err(dev, "failed to set precalser.\n");
  1340. return ret;
  1341. }
  1342. /* If set ture, we can save jpeg about screen */
  1343. fimc_handle_jpeg(ctx, false);
  1344. fimc_set_scaler(ctx, &ctx->sc);
  1345. fimc_set_polarity(ctx, &ctx->pol);
  1346. switch (cmd) {
  1347. case IPP_CMD_M2M:
  1348. fimc_set_type_ctrl(ctx, FIMC_WB_NONE);
  1349. fimc_handle_lastend(ctx, false);
  1350. /* setup dma */
  1351. cfg0 = fimc_read(EXYNOS_MSCTRL);
  1352. cfg0 &= ~EXYNOS_MSCTRL_INPUT_MASK;
  1353. cfg0 |= EXYNOS_MSCTRL_INPUT_MEMORY;
  1354. fimc_write(cfg0, EXYNOS_MSCTRL);
  1355. break;
  1356. case IPP_CMD_WB:
  1357. fimc_set_type_ctrl(ctx, FIMC_WB_A);
  1358. fimc_handle_lastend(ctx, true);
  1359. /* setup FIMD */
  1360. fimc_set_camblk_fimd0_wb(ctx);
  1361. set_wb.enable = 1;
  1362. set_wb.refresh = property->refresh_rate;
  1363. exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
  1364. break;
  1365. case IPP_CMD_OUTPUT:
  1366. default:
  1367. ret = -EINVAL;
  1368. dev_err(dev, "invalid operations.\n");
  1369. return ret;
  1370. }
  1371. /* Reset status */
  1372. fimc_write(0x0, EXYNOS_CISTATUS);
  1373. cfg0 = fimc_read(EXYNOS_CIIMGCPT);
  1374. cfg0 &= ~EXYNOS_CIIMGCPT_IMGCPTEN_SC;
  1375. cfg0 |= EXYNOS_CIIMGCPT_IMGCPTEN_SC;
  1376. /* Scaler */
  1377. cfg1 = fimc_read(EXYNOS_CISCCTRL);
  1378. cfg1 &= ~EXYNOS_CISCCTRL_SCAN_MASK;
  1379. cfg1 |= (EXYNOS_CISCCTRL_PROGRESSIVE |
  1380. EXYNOS_CISCCTRL_SCALERSTART);
  1381. fimc_write(cfg1, EXYNOS_CISCCTRL);
  1382. /* Enable image capture*/
  1383. cfg0 |= EXYNOS_CIIMGCPT_IMGCPTEN;
  1384. fimc_write(cfg0, EXYNOS_CIIMGCPT);
  1385. /* Disable frame end irq */
  1386. cfg0 = fimc_read(EXYNOS_CIGCTRL);
  1387. cfg0 &= ~EXYNOS_CIGCTRL_IRQ_END_DISABLE;
  1388. fimc_write(cfg0, EXYNOS_CIGCTRL);
  1389. cfg0 = fimc_read(EXYNOS_CIOCTRL);
  1390. cfg0 &= ~EXYNOS_CIOCTRL_WEAVE_MASK;
  1391. fimc_write(cfg0, EXYNOS_CIOCTRL);
  1392. if (cmd == IPP_CMD_M2M) {
  1393. cfg0 = fimc_read(EXYNOS_MSCTRL);
  1394. cfg0 |= EXYNOS_MSCTRL_ENVID;
  1395. fimc_write(cfg0, EXYNOS_MSCTRL);
  1396. cfg0 = fimc_read(EXYNOS_MSCTRL);
  1397. cfg0 |= EXYNOS_MSCTRL_ENVID;
  1398. fimc_write(cfg0, EXYNOS_MSCTRL);
  1399. }
  1400. return 0;
  1401. }
  1402. static void fimc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd)
  1403. {
  1404. struct fimc_context *ctx = get_fimc_context(dev);
  1405. struct drm_exynos_ipp_set_wb set_wb = {0, 0};
  1406. u32 cfg;
  1407. DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
  1408. switch (cmd) {
  1409. case IPP_CMD_M2M:
  1410. /* Source clear */
  1411. cfg = fimc_read(EXYNOS_MSCTRL);
  1412. cfg &= ~EXYNOS_MSCTRL_INPUT_MASK;
  1413. cfg &= ~EXYNOS_MSCTRL_ENVID;
  1414. fimc_write(cfg, EXYNOS_MSCTRL);
  1415. break;
  1416. case IPP_CMD_WB:
  1417. exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
  1418. break;
  1419. case IPP_CMD_OUTPUT:
  1420. default:
  1421. dev_err(dev, "invalid operations.\n");
  1422. break;
  1423. }
  1424. fimc_handle_irq(ctx, false, false, true);
  1425. /* reset sequence */
  1426. fimc_write(0x0, EXYNOS_CIFCNTSEQ);
  1427. /* Scaler disable */
  1428. cfg = fimc_read(EXYNOS_CISCCTRL);
  1429. cfg &= ~EXYNOS_CISCCTRL_SCALERSTART;
  1430. fimc_write(cfg, EXYNOS_CISCCTRL);
  1431. /* Disable image capture */
  1432. cfg = fimc_read(EXYNOS_CIIMGCPT);
  1433. cfg &= ~(EXYNOS_CIIMGCPT_IMGCPTEN_SC | EXYNOS_CIIMGCPT_IMGCPTEN);
  1434. fimc_write(cfg, EXYNOS_CIIMGCPT);
  1435. /* Enable frame end irq */
  1436. cfg = fimc_read(EXYNOS_CIGCTRL);
  1437. cfg |= EXYNOS_CIGCTRL_IRQ_END_DISABLE;
  1438. fimc_write(cfg, EXYNOS_CIGCTRL);
  1439. }
  1440. static int fimc_probe(struct platform_device *pdev)
  1441. {
  1442. struct device *dev = &pdev->dev;
  1443. struct fimc_context *ctx;
  1444. struct clk *parent_clk;
  1445. struct resource *res;
  1446. struct exynos_drm_ippdrv *ippdrv;
  1447. struct exynos_drm_fimc_pdata *pdata;
  1448. struct fimc_driverdata *ddata;
  1449. int ret;
  1450. pdata = pdev->dev.platform_data;
  1451. if (!pdata) {
  1452. dev_err(dev, "no platform data specified.\n");
  1453. return -EINVAL;
  1454. }
  1455. ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
  1456. if (!ctx)
  1457. return -ENOMEM;
  1458. ddata = (struct fimc_driverdata *)
  1459. platform_get_device_id(pdev)->driver_data;
  1460. /* clock control */
  1461. ctx->sclk_fimc_clk = devm_clk_get(dev, "sclk_fimc");
  1462. if (IS_ERR(ctx->sclk_fimc_clk)) {
  1463. dev_err(dev, "failed to get src fimc clock.\n");
  1464. return PTR_ERR(ctx->sclk_fimc_clk);
  1465. }
  1466. clk_enable(ctx->sclk_fimc_clk);
  1467. ctx->fimc_clk = devm_clk_get(dev, "fimc");
  1468. if (IS_ERR(ctx->fimc_clk)) {
  1469. dev_err(dev, "failed to get fimc clock.\n");
  1470. clk_disable(ctx->sclk_fimc_clk);
  1471. return PTR_ERR(ctx->fimc_clk);
  1472. }
  1473. ctx->wb_clk = devm_clk_get(dev, "pxl_async0");
  1474. if (IS_ERR(ctx->wb_clk)) {
  1475. dev_err(dev, "failed to get writeback a clock.\n");
  1476. clk_disable(ctx->sclk_fimc_clk);
  1477. return PTR_ERR(ctx->wb_clk);
  1478. }
  1479. ctx->wb_b_clk = devm_clk_get(dev, "pxl_async1");
  1480. if (IS_ERR(ctx->wb_b_clk)) {
  1481. dev_err(dev, "failed to get writeback b clock.\n");
  1482. clk_disable(ctx->sclk_fimc_clk);
  1483. return PTR_ERR(ctx->wb_b_clk);
  1484. }
  1485. parent_clk = devm_clk_get(dev, ddata->parent_clk);
  1486. if (IS_ERR(parent_clk)) {
  1487. dev_err(dev, "failed to get parent clock.\n");
  1488. clk_disable(ctx->sclk_fimc_clk);
  1489. return PTR_ERR(parent_clk);
  1490. }
  1491. if (clk_set_parent(ctx->sclk_fimc_clk, parent_clk)) {
  1492. dev_err(dev, "failed to set parent.\n");
  1493. clk_disable(ctx->sclk_fimc_clk);
  1494. return -EINVAL;
  1495. }
  1496. devm_clk_put(dev, parent_clk);
  1497. clk_set_rate(ctx->sclk_fimc_clk, pdata->clk_rate);
  1498. /* resource memory */
  1499. ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1500. ctx->regs = devm_ioremap_resource(dev, ctx->regs_res);
  1501. if (IS_ERR(ctx->regs))
  1502. return PTR_ERR(ctx->regs);
  1503. /* resource irq */
  1504. res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
  1505. if (!res) {
  1506. dev_err(dev, "failed to request irq resource.\n");
  1507. return -ENOENT;
  1508. }
  1509. ctx->irq = res->start;
  1510. ret = request_threaded_irq(ctx->irq, NULL, fimc_irq_handler,
  1511. IRQF_ONESHOT, "drm_fimc", ctx);
  1512. if (ret < 0) {
  1513. dev_err(dev, "failed to request irq.\n");
  1514. return ret;
  1515. }
  1516. /* context initailization */
  1517. ctx->id = pdev->id;
  1518. ctx->pol = pdata->pol;
  1519. ctx->ddata = ddata;
  1520. ippdrv = &ctx->ippdrv;
  1521. ippdrv->dev = dev;
  1522. ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &fimc_src_ops;
  1523. ippdrv->ops[EXYNOS_DRM_OPS_DST] = &fimc_dst_ops;
  1524. ippdrv->check_property = fimc_ippdrv_check_property;
  1525. ippdrv->reset = fimc_ippdrv_reset;
  1526. ippdrv->start = fimc_ippdrv_start;
  1527. ippdrv->stop = fimc_ippdrv_stop;
  1528. ret = fimc_init_prop_list(ippdrv);
  1529. if (ret < 0) {
  1530. dev_err(dev, "failed to init property list.\n");
  1531. goto err_get_irq;
  1532. }
  1533. DRM_DEBUG_KMS("%s:id[%d]ippdrv[0x%x]\n", __func__, ctx->id,
  1534. (int)ippdrv);
  1535. mutex_init(&ctx->lock);
  1536. platform_set_drvdata(pdev, ctx);
  1537. pm_runtime_set_active(dev);
  1538. pm_runtime_enable(dev);
  1539. ret = exynos_drm_ippdrv_register(ippdrv);
  1540. if (ret < 0) {
  1541. dev_err(dev, "failed to register drm fimc device.\n");
  1542. goto err_ippdrv_register;
  1543. }
  1544. dev_info(&pdev->dev, "drm fimc registered successfully.\n");
  1545. return 0;
  1546. err_ippdrv_register:
  1547. devm_kfree(dev, ippdrv->prop_list);
  1548. pm_runtime_disable(dev);
  1549. err_get_irq:
  1550. free_irq(ctx->irq, ctx);
  1551. return ret;
  1552. }
  1553. static int fimc_remove(struct platform_device *pdev)
  1554. {
  1555. struct device *dev = &pdev->dev;
  1556. struct fimc_context *ctx = get_fimc_context(dev);
  1557. struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
  1558. devm_kfree(dev, ippdrv->prop_list);
  1559. exynos_drm_ippdrv_unregister(ippdrv);
  1560. mutex_destroy(&ctx->lock);
  1561. pm_runtime_set_suspended(dev);
  1562. pm_runtime_disable(dev);
  1563. free_irq(ctx->irq, ctx);
  1564. return 0;
  1565. }
  1566. #ifdef CONFIG_PM_SLEEP
  1567. static int fimc_suspend(struct device *dev)
  1568. {
  1569. struct fimc_context *ctx = get_fimc_context(dev);
  1570. DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
  1571. if (pm_runtime_suspended(dev))
  1572. return 0;
  1573. return fimc_clk_ctrl(ctx, false);
  1574. }
  1575. static int fimc_resume(struct device *dev)
  1576. {
  1577. struct fimc_context *ctx = get_fimc_context(dev);
  1578. DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
  1579. if (!pm_runtime_suspended(dev))
  1580. return fimc_clk_ctrl(ctx, true);
  1581. return 0;
  1582. }
  1583. #endif
  1584. #ifdef CONFIG_PM_RUNTIME
  1585. static int fimc_runtime_suspend(struct device *dev)
  1586. {
  1587. struct fimc_context *ctx = get_fimc_context(dev);
  1588. DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
  1589. return fimc_clk_ctrl(ctx, false);
  1590. }
  1591. static int fimc_runtime_resume(struct device *dev)
  1592. {
  1593. struct fimc_context *ctx = get_fimc_context(dev);
  1594. DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
  1595. return fimc_clk_ctrl(ctx, true);
  1596. }
  1597. #endif
  1598. static struct fimc_driverdata exynos4210_fimc_data = {
  1599. .parent_clk = "mout_mpll",
  1600. };
  1601. static struct fimc_driverdata exynos4410_fimc_data = {
  1602. .parent_clk = "mout_mpll_user",
  1603. };
  1604. static struct platform_device_id fimc_driver_ids[] = {
  1605. {
  1606. .name = "exynos4210-fimc",
  1607. .driver_data = (unsigned long)&exynos4210_fimc_data,
  1608. }, {
  1609. .name = "exynos4412-fimc",
  1610. .driver_data = (unsigned long)&exynos4410_fimc_data,
  1611. },
  1612. {},
  1613. };
  1614. MODULE_DEVICE_TABLE(platform, fimc_driver_ids);
  1615. static const struct dev_pm_ops fimc_pm_ops = {
  1616. SET_SYSTEM_SLEEP_PM_OPS(fimc_suspend, fimc_resume)
  1617. SET_RUNTIME_PM_OPS(fimc_runtime_suspend, fimc_runtime_resume, NULL)
  1618. };
  1619. struct platform_driver fimc_driver = {
  1620. .probe = fimc_probe,
  1621. .remove = fimc_remove,
  1622. .id_table = fimc_driver_ids,
  1623. .driver = {
  1624. .name = "exynos-drm-fimc",
  1625. .owner = THIS_MODULE,
  1626. .pm = &fimc_pm_ops,
  1627. },
  1628. };