sh_veu.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266
  1. /*
  2. * sh-mobile VEU mem2mem driver
  3. *
  4. * Copyright (C) 2012 Renesas Electronics Corporation
  5. * Author: Guennadi Liakhovetski, <g.liakhovetski@gmx.de>
  6. * Copyright (C) 2008 Magnus Damm
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the version 2 of the GNU General Public License as
  10. * published by the Free Software Foundation
  11. */
  12. #include <linux/fs.h>
  13. #include <linux/kernel.h>
  14. #include <linux/module.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/io.h>
  17. #include <linux/platform_device.h>
  18. #include <linux/pm_runtime.h>
  19. #include <linux/slab.h>
  20. #include <linux/types.h>
  21. #include <linux/videodev2.h>
  22. #include <media/v4l2-dev.h>
  23. #include <media/v4l2-device.h>
  24. #include <media/v4l2-ioctl.h>
  25. #include <media/v4l2-mem2mem.h>
  26. #include <media/videobuf2-dma-contig.h>
  27. #define VEU_STR 0x00 /* start register */
  28. #define VEU_SWR 0x10 /* src: line length */
  29. #define VEU_SSR 0x14 /* src: image size */
  30. #define VEU_SAYR 0x18 /* src: y/rgb plane address */
  31. #define VEU_SACR 0x1c /* src: c plane address */
  32. #define VEU_BSSR 0x20 /* bundle mode register */
  33. #define VEU_EDWR 0x30 /* dst: line length */
  34. #define VEU_DAYR 0x34 /* dst: y/rgb plane address */
  35. #define VEU_DACR 0x38 /* dst: c plane address */
  36. #define VEU_TRCR 0x50 /* transform control */
  37. #define VEU_RFCR 0x54 /* resize scale */
  38. #define VEU_RFSR 0x58 /* resize clip */
  39. #define VEU_ENHR 0x5c /* enhance */
  40. #define VEU_FMCR 0x70 /* filter mode */
  41. #define VEU_VTCR 0x74 /* lowpass vertical */
  42. #define VEU_HTCR 0x78 /* lowpass horizontal */
  43. #define VEU_APCR 0x80 /* color match */
  44. #define VEU_ECCR 0x84 /* color replace */
  45. #define VEU_AFXR 0x90 /* fixed mode */
  46. #define VEU_SWPR 0x94 /* swap */
  47. #define VEU_EIER 0xa0 /* interrupt mask */
  48. #define VEU_EVTR 0xa4 /* interrupt event */
  49. #define VEU_STAR 0xb0 /* status */
  50. #define VEU_BSRR 0xb4 /* reset */
  51. #define VEU_MCR00 0x200 /* color conversion matrix coefficient 00 */
  52. #define VEU_MCR01 0x204 /* color conversion matrix coefficient 01 */
  53. #define VEU_MCR02 0x208 /* color conversion matrix coefficient 02 */
  54. #define VEU_MCR10 0x20c /* color conversion matrix coefficient 10 */
  55. #define VEU_MCR11 0x210 /* color conversion matrix coefficient 11 */
  56. #define VEU_MCR12 0x214 /* color conversion matrix coefficient 12 */
  57. #define VEU_MCR20 0x218 /* color conversion matrix coefficient 20 */
  58. #define VEU_MCR21 0x21c /* color conversion matrix coefficient 21 */
  59. #define VEU_MCR22 0x220 /* color conversion matrix coefficient 22 */
  60. #define VEU_COFFR 0x224 /* color conversion offset */
  61. #define VEU_CBR 0x228 /* color conversion clip */
  62. /*
  63. * 4092x4092 max size is the normal case. In some cases it can be reduced to
  64. * 2048x2048, in other cases it can be 4092x8188 or even 8188x8188.
  65. */
  66. #define MAX_W 4092
  67. #define MAX_H 4092
  68. #define MIN_W 8
  69. #define MIN_H 8
  70. #define ALIGN_W 4
  71. /* 3 buffers of 2048 x 1536 - 3 megapixels @ 16bpp */
  72. #define VIDEO_MEM_LIMIT ALIGN(2048 * 1536 * 2 * 3, 1024 * 1024)
  73. #define MEM2MEM_DEF_TRANSLEN 1
  74. struct sh_veu_dev;
  75. struct sh_veu_file {
  76. struct sh_veu_dev *veu_dev;
  77. bool cfg_needed;
  78. };
  79. struct sh_veu_format {
  80. char *name;
  81. u32 fourcc;
  82. unsigned int depth;
  83. unsigned int ydepth;
  84. };
  85. /* video data format */
  86. struct sh_veu_vfmt {
  87. /* Replace with v4l2_rect */
  88. struct v4l2_rect frame;
  89. unsigned int bytesperline;
  90. unsigned int offset_y;
  91. unsigned int offset_c;
  92. const struct sh_veu_format *fmt;
  93. };
  94. struct sh_veu_dev {
  95. struct v4l2_device v4l2_dev;
  96. struct video_device vdev;
  97. struct v4l2_m2m_dev *m2m_dev;
  98. struct device *dev;
  99. struct v4l2_m2m_ctx *m2m_ctx;
  100. struct sh_veu_vfmt vfmt_out;
  101. struct sh_veu_vfmt vfmt_in;
  102. /* Only single user per direction so far */
  103. struct sh_veu_file *capture;
  104. struct sh_veu_file *output;
  105. struct mutex fop_lock;
  106. void __iomem *base;
  107. struct vb2_alloc_ctx *alloc_ctx;
  108. spinlock_t lock;
  109. bool is_2h;
  110. unsigned int xaction;
  111. bool aborting;
  112. };
  113. enum sh_veu_fmt_idx {
  114. SH_VEU_FMT_NV12,
  115. SH_VEU_FMT_NV16,
  116. SH_VEU_FMT_NV24,
  117. SH_VEU_FMT_RGB332,
  118. SH_VEU_FMT_RGB444,
  119. SH_VEU_FMT_RGB565,
  120. SH_VEU_FMT_RGB666,
  121. SH_VEU_FMT_RGB24,
  122. };
  123. #define VGA_WIDTH 640
  124. #define VGA_HEIGHT 480
  125. #define DEFAULT_IN_WIDTH VGA_WIDTH
  126. #define DEFAULT_IN_HEIGHT VGA_HEIGHT
  127. #define DEFAULT_IN_FMTIDX SH_VEU_FMT_NV12
  128. #define DEFAULT_OUT_WIDTH VGA_WIDTH
  129. #define DEFAULT_OUT_HEIGHT VGA_HEIGHT
  130. #define DEFAULT_OUT_FMTIDX SH_VEU_FMT_RGB565
  131. /*
  132. * Alignment: Y-plane should be 4-byte aligned for NV12 and NV16, and 8-byte
  133. * aligned for NV24.
  134. */
  135. static const struct sh_veu_format sh_veu_fmt[] = {
  136. [SH_VEU_FMT_NV12] = { .ydepth = 8, .depth = 12, .name = "NV12", .fourcc = V4L2_PIX_FMT_NV12 },
  137. [SH_VEU_FMT_NV16] = { .ydepth = 8, .depth = 16, .name = "NV16", .fourcc = V4L2_PIX_FMT_NV16 },
  138. [SH_VEU_FMT_NV24] = { .ydepth = 8, .depth = 24, .name = "NV24", .fourcc = V4L2_PIX_FMT_NV24 },
  139. [SH_VEU_FMT_RGB332] = { .ydepth = 8, .depth = 8, .name = "RGB332", .fourcc = V4L2_PIX_FMT_RGB332 },
  140. [SH_VEU_FMT_RGB444] = { .ydepth = 16, .depth = 16, .name = "RGB444", .fourcc = V4L2_PIX_FMT_RGB444 },
  141. [SH_VEU_FMT_RGB565] = { .ydepth = 16, .depth = 16, .name = "RGB565", .fourcc = V4L2_PIX_FMT_RGB565 },
  142. [SH_VEU_FMT_RGB666] = { .ydepth = 32, .depth = 32, .name = "BGR666", .fourcc = V4L2_PIX_FMT_BGR666 },
  143. [SH_VEU_FMT_RGB24] = { .ydepth = 24, .depth = 24, .name = "RGB24", .fourcc = V4L2_PIX_FMT_RGB24 },
  144. };
  145. #define DEFAULT_IN_VFMT (struct sh_veu_vfmt){ \
  146. .frame = { \
  147. .width = VGA_WIDTH, \
  148. .height = VGA_HEIGHT, \
  149. }, \
  150. .bytesperline = (VGA_WIDTH * sh_veu_fmt[DEFAULT_IN_FMTIDX].ydepth) >> 3, \
  151. .fmt = &sh_veu_fmt[DEFAULT_IN_FMTIDX], \
  152. }
  153. #define DEFAULT_OUT_VFMT (struct sh_veu_vfmt){ \
  154. .frame = { \
  155. .width = VGA_WIDTH, \
  156. .height = VGA_HEIGHT, \
  157. }, \
  158. .bytesperline = (VGA_WIDTH * sh_veu_fmt[DEFAULT_OUT_FMTIDX].ydepth) >> 3, \
  159. .fmt = &sh_veu_fmt[DEFAULT_OUT_FMTIDX], \
  160. }
  161. /*
  162. * TODO: add support for further output formats:
  163. * SH_VEU_FMT_NV12,
  164. * SH_VEU_FMT_NV16,
  165. * SH_VEU_FMT_NV24,
  166. * SH_VEU_FMT_RGB332,
  167. * SH_VEU_FMT_RGB444,
  168. * SH_VEU_FMT_RGB666,
  169. * SH_VEU_FMT_RGB24,
  170. */
  171. static const int sh_veu_fmt_out[] = {
  172. SH_VEU_FMT_RGB565,
  173. };
  174. /*
  175. * TODO: add support for further input formats:
  176. * SH_VEU_FMT_NV16,
  177. * SH_VEU_FMT_NV24,
  178. * SH_VEU_FMT_RGB565,
  179. * SH_VEU_FMT_RGB666,
  180. * SH_VEU_FMT_RGB24,
  181. */
  182. static const int sh_veu_fmt_in[] = {
  183. SH_VEU_FMT_NV12,
  184. };
  185. static enum v4l2_colorspace sh_veu_4cc2cspace(u32 fourcc)
  186. {
  187. switch (fourcc) {
  188. default:
  189. BUG();
  190. case V4L2_PIX_FMT_NV12:
  191. case V4L2_PIX_FMT_NV16:
  192. case V4L2_PIX_FMT_NV24:
  193. return V4L2_COLORSPACE_JPEG;
  194. case V4L2_PIX_FMT_RGB332:
  195. case V4L2_PIX_FMT_RGB444:
  196. case V4L2_PIX_FMT_RGB565:
  197. case V4L2_PIX_FMT_BGR666:
  198. case V4L2_PIX_FMT_RGB24:
  199. return V4L2_COLORSPACE_SRGB;
  200. }
  201. }
  202. static u32 sh_veu_reg_read(struct sh_veu_dev *veu, unsigned int reg)
  203. {
  204. return ioread32(veu->base + reg);
  205. }
  206. static void sh_veu_reg_write(struct sh_veu_dev *veu, unsigned int reg,
  207. u32 value)
  208. {
  209. iowrite32(value, veu->base + reg);
  210. }
  211. /* ========== mem2mem callbacks ========== */
  212. static void sh_veu_job_abort(void *priv)
  213. {
  214. struct sh_veu_dev *veu = priv;
  215. /* Will cancel the transaction in the next interrupt handler */
  216. veu->aborting = true;
  217. }
  218. static void sh_veu_lock(void *priv)
  219. {
  220. struct sh_veu_dev *veu = priv;
  221. mutex_lock(&veu->fop_lock);
  222. }
  223. static void sh_veu_unlock(void *priv)
  224. {
  225. struct sh_veu_dev *veu = priv;
  226. mutex_unlock(&veu->fop_lock);
  227. }
  228. static void sh_veu_process(struct sh_veu_dev *veu,
  229. struct vb2_buffer *src_buf,
  230. struct vb2_buffer *dst_buf)
  231. {
  232. dma_addr_t addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
  233. sh_veu_reg_write(veu, VEU_DAYR, addr + veu->vfmt_out.offset_y);
  234. sh_veu_reg_write(veu, VEU_DACR, veu->vfmt_out.offset_c ?
  235. addr + veu->vfmt_out.offset_c : 0);
  236. dev_dbg(veu->dev, "%s(): dst base %lx, y: %x, c: %x\n", __func__,
  237. (unsigned long)addr,
  238. veu->vfmt_out.offset_y, veu->vfmt_out.offset_c);
  239. addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
  240. sh_veu_reg_write(veu, VEU_SAYR, addr + veu->vfmt_in.offset_y);
  241. sh_veu_reg_write(veu, VEU_SACR, veu->vfmt_in.offset_c ?
  242. addr + veu->vfmt_in.offset_c : 0);
  243. dev_dbg(veu->dev, "%s(): src base %lx, y: %x, c: %x\n", __func__,
  244. (unsigned long)addr,
  245. veu->vfmt_in.offset_y, veu->vfmt_in.offset_c);
  246. sh_veu_reg_write(veu, VEU_STR, 1);
  247. sh_veu_reg_write(veu, VEU_EIER, 1); /* enable interrupt in VEU */
  248. }
  249. /**
  250. * sh_veu_device_run() - prepares and starts the device
  251. *
  252. * This will be called by the framework when it decides to schedule a particular
  253. * instance.
  254. */
  255. static void sh_veu_device_run(void *priv)
  256. {
  257. struct sh_veu_dev *veu = priv;
  258. struct vb2_buffer *src_buf, *dst_buf;
  259. src_buf = v4l2_m2m_next_src_buf(veu->m2m_ctx);
  260. dst_buf = v4l2_m2m_next_dst_buf(veu->m2m_ctx);
  261. if (src_buf && dst_buf)
  262. sh_veu_process(veu, src_buf, dst_buf);
  263. }
  264. /* ========== video ioctls ========== */
  265. static bool sh_veu_is_streamer(struct sh_veu_dev *veu, struct sh_veu_file *veu_file,
  266. enum v4l2_buf_type type)
  267. {
  268. return (type == V4L2_BUF_TYPE_VIDEO_CAPTURE &&
  269. veu_file == veu->capture) ||
  270. (type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
  271. veu_file == veu->output);
  272. }
  273. static int sh_veu_queue_init(void *priv, struct vb2_queue *src_vq,
  274. struct vb2_queue *dst_vq);
  275. /*
  276. * It is not unusual to have video nodes open()ed multiple times. While some
  277. * V4L2 operations are non-intrusive, like querying formats and various
  278. * parameters, others, like setting formats, starting and stopping streaming,
  279. * queuing and dequeuing buffers, directly affect hardware configuration and /
  280. * or execution. This function verifies availability of the requested interface
  281. * and, if available, reserves it for the requesting user.
  282. */
  283. static int sh_veu_stream_init(struct sh_veu_dev *veu, struct sh_veu_file *veu_file,
  284. enum v4l2_buf_type type)
  285. {
  286. struct sh_veu_file **stream;
  287. switch (type) {
  288. case V4L2_BUF_TYPE_VIDEO_CAPTURE:
  289. stream = &veu->capture;
  290. break;
  291. case V4L2_BUF_TYPE_VIDEO_OUTPUT:
  292. stream = &veu->output;
  293. break;
  294. default:
  295. return -EINVAL;
  296. }
  297. if (*stream == veu_file)
  298. return 0;
  299. if (*stream)
  300. return -EBUSY;
  301. *stream = veu_file;
  302. return 0;
  303. }
  304. static int sh_veu_context_init(struct sh_veu_dev *veu)
  305. {
  306. if (veu->m2m_ctx)
  307. return 0;
  308. veu->m2m_ctx = v4l2_m2m_ctx_init(veu->m2m_dev, veu,
  309. sh_veu_queue_init);
  310. if (IS_ERR(veu->m2m_ctx))
  311. return PTR_ERR(veu->m2m_ctx);
  312. return 0;
  313. }
  314. static int sh_veu_querycap(struct file *file, void *priv,
  315. struct v4l2_capability *cap)
  316. {
  317. strlcpy(cap->driver, "sh-veu", sizeof(cap->driver));
  318. strlcpy(cap->card, "sh-mobile VEU", sizeof(cap->card));
  319. strlcpy(cap->bus_info, "platform:sh-veu", sizeof(cap->bus_info));
  320. cap->device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING;
  321. cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
  322. return 0;
  323. }
  324. static int sh_veu_enum_fmt(struct v4l2_fmtdesc *f, const int *fmt, int fmt_num)
  325. {
  326. if (f->index >= fmt_num)
  327. return -EINVAL;
  328. strlcpy(f->description, sh_veu_fmt[fmt[f->index]].name, sizeof(f->description));
  329. f->pixelformat = sh_veu_fmt[fmt[f->index]].fourcc;
  330. return 0;
  331. }
  332. static int sh_veu_enum_fmt_vid_cap(struct file *file, void *priv,
  333. struct v4l2_fmtdesc *f)
  334. {
  335. return sh_veu_enum_fmt(f, sh_veu_fmt_out, ARRAY_SIZE(sh_veu_fmt_out));
  336. }
  337. static int sh_veu_enum_fmt_vid_out(struct file *file, void *priv,
  338. struct v4l2_fmtdesc *f)
  339. {
  340. return sh_veu_enum_fmt(f, sh_veu_fmt_in, ARRAY_SIZE(sh_veu_fmt_in));
  341. }
  342. static struct sh_veu_vfmt *sh_veu_get_vfmt(struct sh_veu_dev *veu,
  343. enum v4l2_buf_type type)
  344. {
  345. switch (type) {
  346. case V4L2_BUF_TYPE_VIDEO_CAPTURE:
  347. return &veu->vfmt_out;
  348. case V4L2_BUF_TYPE_VIDEO_OUTPUT:
  349. return &veu->vfmt_in;
  350. default:
  351. return NULL;
  352. }
  353. }
  354. static int sh_veu_g_fmt(struct sh_veu_file *veu_file, struct v4l2_format *f)
  355. {
  356. struct v4l2_pix_format *pix = &f->fmt.pix;
  357. struct sh_veu_dev *veu = veu_file->veu_dev;
  358. struct sh_veu_vfmt *vfmt;
  359. vfmt = sh_veu_get_vfmt(veu, f->type);
  360. pix->width = vfmt->frame.width;
  361. pix->height = vfmt->frame.height;
  362. pix->field = V4L2_FIELD_NONE;
  363. pix->pixelformat = vfmt->fmt->fourcc;
  364. pix->colorspace = sh_veu_4cc2cspace(pix->pixelformat);
  365. pix->bytesperline = vfmt->bytesperline;
  366. pix->sizeimage = vfmt->bytesperline * pix->height *
  367. vfmt->fmt->depth / vfmt->fmt->ydepth;
  368. pix->priv = 0;
  369. dev_dbg(veu->dev, "%s(): type: %d, size %u @ %ux%u, fmt %x\n", __func__,
  370. f->type, pix->sizeimage, pix->width, pix->height, pix->pixelformat);
  371. return 0;
  372. }
  373. static int sh_veu_g_fmt_vid_out(struct file *file, void *priv,
  374. struct v4l2_format *f)
  375. {
  376. return sh_veu_g_fmt(priv, f);
  377. }
  378. static int sh_veu_g_fmt_vid_cap(struct file *file, void *priv,
  379. struct v4l2_format *f)
  380. {
  381. return sh_veu_g_fmt(priv, f);
  382. }
  383. static int sh_veu_try_fmt(struct v4l2_format *f, const struct sh_veu_format *fmt)
  384. {
  385. struct v4l2_pix_format *pix = &f->fmt.pix;
  386. unsigned int y_bytes_used;
  387. /*
  388. * V4L2 specification suggests, that the driver should correct the
  389. * format struct if any of the dimensions is unsupported
  390. */
  391. switch (pix->field) {
  392. default:
  393. case V4L2_FIELD_ANY:
  394. pix->field = V4L2_FIELD_NONE;
  395. /* fall through: continue handling V4L2_FIELD_NONE */
  396. case V4L2_FIELD_NONE:
  397. break;
  398. }
  399. v4l_bound_align_image(&pix->width, MIN_W, MAX_W, ALIGN_W,
  400. &pix->height, MIN_H, MAX_H, 0, 0);
  401. y_bytes_used = (pix->width * fmt->ydepth) >> 3;
  402. if (pix->bytesperline < y_bytes_used)
  403. pix->bytesperline = y_bytes_used;
  404. pix->sizeimage = pix->height * pix->bytesperline * fmt->depth / fmt->ydepth;
  405. pix->pixelformat = fmt->fourcc;
  406. pix->colorspace = sh_veu_4cc2cspace(pix->pixelformat);
  407. pix->priv = 0;
  408. pr_debug("%s(): type: %d, size %u\n", __func__, f->type, pix->sizeimage);
  409. return 0;
  410. }
  411. static const struct sh_veu_format *sh_veu_find_fmt(const struct v4l2_format *f)
  412. {
  413. const int *fmt;
  414. int i, n, dflt;
  415. pr_debug("%s(%d;%d)\n", __func__, f->type, f->fmt.pix.field);
  416. switch (f->type) {
  417. case V4L2_BUF_TYPE_VIDEO_CAPTURE:
  418. fmt = sh_veu_fmt_out;
  419. n = ARRAY_SIZE(sh_veu_fmt_out);
  420. dflt = DEFAULT_OUT_FMTIDX;
  421. break;
  422. case V4L2_BUF_TYPE_VIDEO_OUTPUT:
  423. default:
  424. fmt = sh_veu_fmt_in;
  425. n = ARRAY_SIZE(sh_veu_fmt_in);
  426. dflt = DEFAULT_IN_FMTIDX;
  427. break;
  428. }
  429. for (i = 0; i < n; i++)
  430. if (sh_veu_fmt[fmt[i]].fourcc == f->fmt.pix.pixelformat)
  431. return &sh_veu_fmt[fmt[i]];
  432. return &sh_veu_fmt[dflt];
  433. }
  434. static int sh_veu_try_fmt_vid_cap(struct file *file, void *priv,
  435. struct v4l2_format *f)
  436. {
  437. const struct sh_veu_format *fmt;
  438. fmt = sh_veu_find_fmt(f);
  439. if (!fmt)
  440. /* wrong buffer type */
  441. return -EINVAL;
  442. return sh_veu_try_fmt(f, fmt);
  443. }
  444. static int sh_veu_try_fmt_vid_out(struct file *file, void *priv,
  445. struct v4l2_format *f)
  446. {
  447. const struct sh_veu_format *fmt;
  448. fmt = sh_veu_find_fmt(f);
  449. if (!fmt)
  450. /* wrong buffer type */
  451. return -EINVAL;
  452. return sh_veu_try_fmt(f, fmt);
  453. }
  454. static void sh_veu_colour_offset(struct sh_veu_dev *veu, struct sh_veu_vfmt *vfmt)
  455. {
  456. /* dst_left and dst_top validity will be verified in CROP / COMPOSE */
  457. unsigned int left = vfmt->frame.left & ~0x03;
  458. unsigned int top = vfmt->frame.top;
  459. dma_addr_t offset = ((left * veu->vfmt_out.fmt->depth) >> 3) +
  460. top * veu->vfmt_out.bytesperline;
  461. unsigned int y_line;
  462. vfmt->offset_y = offset;
  463. switch (vfmt->fmt->fourcc) {
  464. case V4L2_PIX_FMT_NV12:
  465. case V4L2_PIX_FMT_NV16:
  466. case V4L2_PIX_FMT_NV24:
  467. y_line = ALIGN(vfmt->frame.width, 16);
  468. vfmt->offset_c = offset + y_line * vfmt->frame.height;
  469. break;
  470. case V4L2_PIX_FMT_RGB332:
  471. case V4L2_PIX_FMT_RGB444:
  472. case V4L2_PIX_FMT_RGB565:
  473. case V4L2_PIX_FMT_BGR666:
  474. case V4L2_PIX_FMT_RGB24:
  475. vfmt->offset_c = 0;
  476. break;
  477. default:
  478. BUG();
  479. }
  480. }
  481. static int sh_veu_s_fmt(struct sh_veu_file *veu_file, struct v4l2_format *f)
  482. {
  483. struct v4l2_pix_format *pix = &f->fmt.pix;
  484. struct sh_veu_dev *veu = veu_file->veu_dev;
  485. struct sh_veu_vfmt *vfmt;
  486. struct vb2_queue *vq;
  487. int ret = sh_veu_context_init(veu);
  488. if (ret < 0)
  489. return ret;
  490. vq = v4l2_m2m_get_vq(veu->m2m_ctx, f->type);
  491. if (!vq)
  492. return -EINVAL;
  493. if (vb2_is_busy(vq)) {
  494. v4l2_err(&veu_file->veu_dev->v4l2_dev, "%s queue busy\n", __func__);
  495. return -EBUSY;
  496. }
  497. vfmt = sh_veu_get_vfmt(veu, f->type);
  498. /* called after try_fmt(), hence vfmt != NULL. Implicit BUG_ON() below */
  499. vfmt->fmt = sh_veu_find_fmt(f);
  500. /* vfmt->fmt != NULL following the same argument as above */
  501. vfmt->frame.width = pix->width;
  502. vfmt->frame.height = pix->height;
  503. vfmt->bytesperline = pix->bytesperline;
  504. sh_veu_colour_offset(veu, vfmt);
  505. /*
  506. * We could also verify and require configuration only if any parameters
  507. * actually have changed, but it is unlikely, that the user requests the
  508. * same configuration several times without closing the device.
  509. */
  510. veu_file->cfg_needed = true;
  511. dev_dbg(veu->dev,
  512. "Setting format for type %d, wxh: %dx%d, fmt: %x\n",
  513. f->type, pix->width, pix->height, vfmt->fmt->fourcc);
  514. return 0;
  515. }
  516. static int sh_veu_s_fmt_vid_cap(struct file *file, void *priv,
  517. struct v4l2_format *f)
  518. {
  519. int ret = sh_veu_try_fmt_vid_cap(file, priv, f);
  520. if (ret)
  521. return ret;
  522. return sh_veu_s_fmt(priv, f);
  523. }
  524. static int sh_veu_s_fmt_vid_out(struct file *file, void *priv,
  525. struct v4l2_format *f)
  526. {
  527. int ret = sh_veu_try_fmt_vid_out(file, priv, f);
  528. if (ret)
  529. return ret;
  530. return sh_veu_s_fmt(priv, f);
  531. }
  532. static int sh_veu_reqbufs(struct file *file, void *priv,
  533. struct v4l2_requestbuffers *reqbufs)
  534. {
  535. struct sh_veu_file *veu_file = priv;
  536. struct sh_veu_dev *veu = veu_file->veu_dev;
  537. int ret = sh_veu_context_init(veu);
  538. if (ret < 0)
  539. return ret;
  540. ret = sh_veu_stream_init(veu, veu_file, reqbufs->type);
  541. if (ret < 0)
  542. return ret;
  543. return v4l2_m2m_reqbufs(file, veu->m2m_ctx, reqbufs);
  544. }
  545. static int sh_veu_querybuf(struct file *file, void *priv,
  546. struct v4l2_buffer *buf)
  547. {
  548. struct sh_veu_file *veu_file = priv;
  549. if (!sh_veu_is_streamer(veu_file->veu_dev, veu_file, buf->type))
  550. return -EBUSY;
  551. return v4l2_m2m_querybuf(file, veu_file->veu_dev->m2m_ctx, buf);
  552. }
  553. static int sh_veu_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
  554. {
  555. struct sh_veu_file *veu_file = priv;
  556. dev_dbg(veu_file->veu_dev->dev, "%s(%d)\n", __func__, buf->type);
  557. if (!sh_veu_is_streamer(veu_file->veu_dev, veu_file, buf->type))
  558. return -EBUSY;
  559. return v4l2_m2m_qbuf(file, veu_file->veu_dev->m2m_ctx, buf);
  560. }
  561. static int sh_veu_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
  562. {
  563. struct sh_veu_file *veu_file = priv;
  564. dev_dbg(veu_file->veu_dev->dev, "%s(%d)\n", __func__, buf->type);
  565. if (!sh_veu_is_streamer(veu_file->veu_dev, veu_file, buf->type))
  566. return -EBUSY;
  567. return v4l2_m2m_dqbuf(file, veu_file->veu_dev->m2m_ctx, buf);
  568. }
  569. static void sh_veu_calc_scale(struct sh_veu_dev *veu,
  570. int size_in, int size_out, int crop_out,
  571. u32 *mant, u32 *frac, u32 *rep)
  572. {
  573. u32 fixpoint;
  574. /* calculate FRAC and MANT */
  575. *rep = *mant = *frac = 0;
  576. if (size_in == size_out) {
  577. if (crop_out != size_out)
  578. *mant = 1; /* needed for cropping */
  579. return;
  580. }
  581. /* VEU2H special upscale */
  582. if (veu->is_2h && size_out > size_in) {
  583. u32 fixpoint = (4096 * size_in) / size_out;
  584. *mant = fixpoint / 4096;
  585. *frac = (fixpoint - (*mant * 4096)) & ~0x07;
  586. switch (*frac) {
  587. case 0x800:
  588. *rep = 1;
  589. break;
  590. case 0x400:
  591. *rep = 3;
  592. break;
  593. case 0x200:
  594. *rep = 7;
  595. break;
  596. }
  597. if (*rep)
  598. return;
  599. }
  600. fixpoint = (4096 * (size_in - 1)) / (size_out + 1);
  601. *mant = fixpoint / 4096;
  602. *frac = fixpoint - (*mant * 4096);
  603. if (*frac & 0x07) {
  604. /*
  605. * FIXME: do we really have to round down twice in the
  606. * up-scaling case?
  607. */
  608. *frac &= ~0x07;
  609. if (size_out > size_in)
  610. *frac -= 8; /* round down if scaling up */
  611. else
  612. *frac += 8; /* round up if scaling down */
  613. }
  614. }
  615. static unsigned long sh_veu_scale_v(struct sh_veu_dev *veu,
  616. int size_in, int size_out, int crop_out)
  617. {
  618. u32 mant, frac, value, rep;
  619. sh_veu_calc_scale(veu, size_in, size_out, crop_out, &mant, &frac, &rep);
  620. /* set scale */
  621. value = (sh_veu_reg_read(veu, VEU_RFCR) & ~0xffff0000) |
  622. (((mant << 12) | frac) << 16);
  623. sh_veu_reg_write(veu, VEU_RFCR, value);
  624. /* set clip */
  625. value = (sh_veu_reg_read(veu, VEU_RFSR) & ~0xffff0000) |
  626. (((rep << 12) | crop_out) << 16);
  627. sh_veu_reg_write(veu, VEU_RFSR, value);
  628. return ALIGN((size_in * crop_out) / size_out, 4);
  629. }
  630. static unsigned long sh_veu_scale_h(struct sh_veu_dev *veu,
  631. int size_in, int size_out, int crop_out)
  632. {
  633. u32 mant, frac, value, rep;
  634. sh_veu_calc_scale(veu, size_in, size_out, crop_out, &mant, &frac, &rep);
  635. /* set scale */
  636. value = (sh_veu_reg_read(veu, VEU_RFCR) & ~0xffff) |
  637. (mant << 12) | frac;
  638. sh_veu_reg_write(veu, VEU_RFCR, value);
  639. /* set clip */
  640. value = (sh_veu_reg_read(veu, VEU_RFSR) & ~0xffff) |
  641. (rep << 12) | crop_out;
  642. sh_veu_reg_write(veu, VEU_RFSR, value);
  643. return ALIGN((size_in * crop_out) / size_out, 4);
  644. }
  645. static void sh_veu_configure(struct sh_veu_dev *veu)
  646. {
  647. u32 src_width, src_stride, src_height;
  648. u32 dst_width, dst_stride, dst_height;
  649. u32 real_w, real_h;
  650. /* reset VEU */
  651. sh_veu_reg_write(veu, VEU_BSRR, 0x100);
  652. src_width = veu->vfmt_in.frame.width;
  653. src_height = veu->vfmt_in.frame.height;
  654. src_stride = ALIGN(veu->vfmt_in.frame.width, 16);
  655. dst_width = real_w = veu->vfmt_out.frame.width;
  656. dst_height = real_h = veu->vfmt_out.frame.height;
  657. /* Datasheet is unclear - whether it's always number of bytes or not */
  658. dst_stride = veu->vfmt_out.bytesperline;
  659. /*
  660. * So far real_w == dst_width && real_h == dst_height, but it wasn't
  661. * necessarily the case in the original vidix driver, so, it may change
  662. * here in the future too.
  663. */
  664. src_width = sh_veu_scale_h(veu, src_width, real_w, dst_width);
  665. src_height = sh_veu_scale_v(veu, src_height, real_h, dst_height);
  666. sh_veu_reg_write(veu, VEU_SWR, src_stride);
  667. sh_veu_reg_write(veu, VEU_SSR, src_width | (src_height << 16));
  668. sh_veu_reg_write(veu, VEU_BSSR, 0); /* not using bundle mode */
  669. sh_veu_reg_write(veu, VEU_EDWR, dst_stride);
  670. sh_veu_reg_write(veu, VEU_DACR, 0); /* unused for RGB */
  671. sh_veu_reg_write(veu, VEU_SWPR, 0x67);
  672. sh_veu_reg_write(veu, VEU_TRCR, (6 << 16) | (0 << 14) | 2 | 4);
  673. if (veu->is_2h) {
  674. sh_veu_reg_write(veu, VEU_MCR00, 0x0cc5);
  675. sh_veu_reg_write(veu, VEU_MCR01, 0x0950);
  676. sh_veu_reg_write(veu, VEU_MCR02, 0x0000);
  677. sh_veu_reg_write(veu, VEU_MCR10, 0x397f);
  678. sh_veu_reg_write(veu, VEU_MCR11, 0x0950);
  679. sh_veu_reg_write(veu, VEU_MCR12, 0x3ccd);
  680. sh_veu_reg_write(veu, VEU_MCR20, 0x0000);
  681. sh_veu_reg_write(veu, VEU_MCR21, 0x0950);
  682. sh_veu_reg_write(veu, VEU_MCR22, 0x1023);
  683. sh_veu_reg_write(veu, VEU_COFFR, 0x00800010);
  684. }
  685. }
  686. static int sh_veu_streamon(struct file *file, void *priv,
  687. enum v4l2_buf_type type)
  688. {
  689. struct sh_veu_file *veu_file = priv;
  690. if (!sh_veu_is_streamer(veu_file->veu_dev, veu_file, type))
  691. return -EBUSY;
  692. if (veu_file->cfg_needed) {
  693. struct sh_veu_dev *veu = veu_file->veu_dev;
  694. veu_file->cfg_needed = false;
  695. sh_veu_configure(veu_file->veu_dev);
  696. veu->xaction = 0;
  697. veu->aborting = false;
  698. }
  699. return v4l2_m2m_streamon(file, veu_file->veu_dev->m2m_ctx, type);
  700. }
  701. static int sh_veu_streamoff(struct file *file, void *priv,
  702. enum v4l2_buf_type type)
  703. {
  704. struct sh_veu_file *veu_file = priv;
  705. if (!sh_veu_is_streamer(veu_file->veu_dev, veu_file, type))
  706. return -EBUSY;
  707. return v4l2_m2m_streamoff(file, veu_file->veu_dev->m2m_ctx, type);
  708. }
  709. static const struct v4l2_ioctl_ops sh_veu_ioctl_ops = {
  710. .vidioc_querycap = sh_veu_querycap,
  711. .vidioc_enum_fmt_vid_cap = sh_veu_enum_fmt_vid_cap,
  712. .vidioc_g_fmt_vid_cap = sh_veu_g_fmt_vid_cap,
  713. .vidioc_try_fmt_vid_cap = sh_veu_try_fmt_vid_cap,
  714. .vidioc_s_fmt_vid_cap = sh_veu_s_fmt_vid_cap,
  715. .vidioc_enum_fmt_vid_out = sh_veu_enum_fmt_vid_out,
  716. .vidioc_g_fmt_vid_out = sh_veu_g_fmt_vid_out,
  717. .vidioc_try_fmt_vid_out = sh_veu_try_fmt_vid_out,
  718. .vidioc_s_fmt_vid_out = sh_veu_s_fmt_vid_out,
  719. .vidioc_reqbufs = sh_veu_reqbufs,
  720. .vidioc_querybuf = sh_veu_querybuf,
  721. .vidioc_qbuf = sh_veu_qbuf,
  722. .vidioc_dqbuf = sh_veu_dqbuf,
  723. .vidioc_streamon = sh_veu_streamon,
  724. .vidioc_streamoff = sh_veu_streamoff,
  725. };
  726. /* ========== Queue operations ========== */
  727. static int sh_veu_queue_setup(struct vb2_queue *vq,
  728. const struct v4l2_format *f,
  729. unsigned int *nbuffers, unsigned int *nplanes,
  730. unsigned int sizes[], void *alloc_ctxs[])
  731. {
  732. struct sh_veu_dev *veu = vb2_get_drv_priv(vq);
  733. struct sh_veu_vfmt *vfmt;
  734. unsigned int size, count = *nbuffers;
  735. if (f) {
  736. const struct v4l2_pix_format *pix = &f->fmt.pix;
  737. const struct sh_veu_format *fmt = sh_veu_find_fmt(f);
  738. struct v4l2_format ftmp = *f;
  739. if (fmt->fourcc != pix->pixelformat)
  740. return -EINVAL;
  741. sh_veu_try_fmt(&ftmp, fmt);
  742. if (ftmp.fmt.pix.width != pix->width ||
  743. ftmp.fmt.pix.height != pix->height)
  744. return -EINVAL;
  745. size = pix->bytesperline ? pix->bytesperline * pix->height :
  746. pix->width * pix->height * fmt->depth >> 3;
  747. } else {
  748. vfmt = sh_veu_get_vfmt(veu, vq->type);
  749. size = vfmt->bytesperline * vfmt->frame.height;
  750. }
  751. if (count < 2)
  752. *nbuffers = count = 2;
  753. if (size * count > VIDEO_MEM_LIMIT) {
  754. count = VIDEO_MEM_LIMIT / size;
  755. *nbuffers = count;
  756. }
  757. *nplanes = 1;
  758. sizes[0] = size;
  759. alloc_ctxs[0] = veu->alloc_ctx;
  760. dev_dbg(veu->dev, "get %d buffer(s) of size %d each.\n", count, size);
  761. return 0;
  762. }
  763. static int sh_veu_buf_prepare(struct vb2_buffer *vb)
  764. {
  765. struct sh_veu_dev *veu = vb2_get_drv_priv(vb->vb2_queue);
  766. struct sh_veu_vfmt *vfmt;
  767. unsigned int sizeimage;
  768. vfmt = sh_veu_get_vfmt(veu, vb->vb2_queue->type);
  769. sizeimage = vfmt->bytesperline * vfmt->frame.height *
  770. vfmt->fmt->depth / vfmt->fmt->ydepth;
  771. if (vb2_plane_size(vb, 0) < sizeimage) {
  772. dev_dbg(veu->dev, "%s data will not fit into plane (%lu < %u)\n",
  773. __func__, vb2_plane_size(vb, 0), sizeimage);
  774. return -EINVAL;
  775. }
  776. vb2_set_plane_payload(vb, 0, sizeimage);
  777. return 0;
  778. }
  779. static void sh_veu_buf_queue(struct vb2_buffer *vb)
  780. {
  781. struct sh_veu_dev *veu = vb2_get_drv_priv(vb->vb2_queue);
  782. dev_dbg(veu->dev, "%s(%d)\n", __func__, vb->v4l2_buf.type);
  783. v4l2_m2m_buf_queue(veu->m2m_ctx, vb);
  784. }
  785. static void sh_veu_wait_prepare(struct vb2_queue *q)
  786. {
  787. sh_veu_unlock(vb2_get_drv_priv(q));
  788. }
  789. static void sh_veu_wait_finish(struct vb2_queue *q)
  790. {
  791. sh_veu_lock(vb2_get_drv_priv(q));
  792. }
  793. static const struct vb2_ops sh_veu_qops = {
  794. .queue_setup = sh_veu_queue_setup,
  795. .buf_prepare = sh_veu_buf_prepare,
  796. .buf_queue = sh_veu_buf_queue,
  797. .wait_prepare = sh_veu_wait_prepare,
  798. .wait_finish = sh_veu_wait_finish,
  799. };
  800. static int sh_veu_queue_init(void *priv, struct vb2_queue *src_vq,
  801. struct vb2_queue *dst_vq)
  802. {
  803. int ret;
  804. memset(src_vq, 0, sizeof(*src_vq));
  805. src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
  806. src_vq->io_modes = VB2_MMAP | VB2_USERPTR;
  807. src_vq->drv_priv = priv;
  808. src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
  809. src_vq->ops = &sh_veu_qops;
  810. src_vq->mem_ops = &vb2_dma_contig_memops;
  811. ret = vb2_queue_init(src_vq);
  812. if (ret < 0)
  813. return ret;
  814. memset(dst_vq, 0, sizeof(*dst_vq));
  815. dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
  816. dst_vq->io_modes = VB2_MMAP | VB2_USERPTR;
  817. dst_vq->drv_priv = priv;
  818. dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
  819. dst_vq->ops = &sh_veu_qops;
  820. dst_vq->mem_ops = &vb2_dma_contig_memops;
  821. return vb2_queue_init(dst_vq);
  822. }
  823. /* ========== File operations ========== */
  824. static int sh_veu_open(struct file *file)
  825. {
  826. struct sh_veu_dev *veu = video_drvdata(file);
  827. struct sh_veu_file *veu_file;
  828. veu_file = kzalloc(sizeof(*veu_file), GFP_KERNEL);
  829. if (!veu_file)
  830. return -ENOMEM;
  831. veu_file->veu_dev = veu;
  832. veu_file->cfg_needed = true;
  833. file->private_data = veu_file;
  834. pm_runtime_get_sync(veu->dev);
  835. dev_dbg(veu->dev, "Created instance %p\n", veu_file);
  836. return 0;
  837. }
  838. static int sh_veu_release(struct file *file)
  839. {
  840. struct sh_veu_dev *veu = video_drvdata(file);
  841. struct sh_veu_file *veu_file = file->private_data;
  842. dev_dbg(veu->dev, "Releasing instance %p\n", veu_file);
  843. pm_runtime_put(veu->dev);
  844. if (veu_file == veu->capture) {
  845. veu->capture = NULL;
  846. vb2_queue_release(v4l2_m2m_get_vq(veu->m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE));
  847. }
  848. if (veu_file == veu->output) {
  849. veu->output = NULL;
  850. vb2_queue_release(v4l2_m2m_get_vq(veu->m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT));
  851. }
  852. if (!veu->output && !veu->capture && veu->m2m_ctx) {
  853. v4l2_m2m_ctx_release(veu->m2m_ctx);
  854. veu->m2m_ctx = NULL;
  855. }
  856. kfree(veu_file);
  857. return 0;
  858. }
  859. static unsigned int sh_veu_poll(struct file *file,
  860. struct poll_table_struct *wait)
  861. {
  862. struct sh_veu_file *veu_file = file->private_data;
  863. return v4l2_m2m_poll(file, veu_file->veu_dev->m2m_ctx, wait);
  864. }
  865. static int sh_veu_mmap(struct file *file, struct vm_area_struct *vma)
  866. {
  867. struct sh_veu_file *veu_file = file->private_data;
  868. return v4l2_m2m_mmap(file, veu_file->veu_dev->m2m_ctx, vma);
  869. }
  870. static const struct v4l2_file_operations sh_veu_fops = {
  871. .owner = THIS_MODULE,
  872. .open = sh_veu_open,
  873. .release = sh_veu_release,
  874. .poll = sh_veu_poll,
  875. .unlocked_ioctl = video_ioctl2,
  876. .mmap = sh_veu_mmap,
  877. };
  878. static const struct video_device sh_veu_videodev = {
  879. .name = "sh-veu",
  880. .fops = &sh_veu_fops,
  881. .ioctl_ops = &sh_veu_ioctl_ops,
  882. .minor = -1,
  883. .release = video_device_release_empty,
  884. .vfl_dir = VFL_DIR_M2M,
  885. };
  886. static const struct v4l2_m2m_ops sh_veu_m2m_ops = {
  887. .device_run = sh_veu_device_run,
  888. .job_abort = sh_veu_job_abort,
  889. };
  890. static irqreturn_t sh_veu_bh(int irq, void *dev_id)
  891. {
  892. struct sh_veu_dev *veu = dev_id;
  893. if (veu->xaction == MEM2MEM_DEF_TRANSLEN || veu->aborting) {
  894. v4l2_m2m_job_finish(veu->m2m_dev, veu->m2m_ctx);
  895. veu->xaction = 0;
  896. } else {
  897. sh_veu_device_run(veu);
  898. }
  899. return IRQ_HANDLED;
  900. }
  901. static irqreturn_t sh_veu_isr(int irq, void *dev_id)
  902. {
  903. struct sh_veu_dev *veu = dev_id;
  904. struct vb2_buffer *dst;
  905. struct vb2_buffer *src;
  906. u32 status = sh_veu_reg_read(veu, VEU_EVTR);
  907. /* bundle read mode not used */
  908. if (!(status & 1))
  909. return IRQ_NONE;
  910. /* disable interrupt in VEU */
  911. sh_veu_reg_write(veu, VEU_EIER, 0);
  912. /* halt operation */
  913. sh_veu_reg_write(veu, VEU_STR, 0);
  914. /* ack int, write 0 to clear bits */
  915. sh_veu_reg_write(veu, VEU_EVTR, status & ~1);
  916. /* conversion completed */
  917. dst = v4l2_m2m_dst_buf_remove(veu->m2m_ctx);
  918. src = v4l2_m2m_src_buf_remove(veu->m2m_ctx);
  919. if (!src || !dst)
  920. return IRQ_NONE;
  921. spin_lock(&veu->lock);
  922. v4l2_m2m_buf_done(src, VB2_BUF_STATE_DONE);
  923. v4l2_m2m_buf_done(dst, VB2_BUF_STATE_DONE);
  924. spin_unlock(&veu->lock);
  925. veu->xaction++;
  926. if (!veu->aborting)
  927. return IRQ_WAKE_THREAD;
  928. return IRQ_HANDLED;
  929. }
  930. static int sh_veu_probe(struct platform_device *pdev)
  931. {
  932. struct sh_veu_dev *veu;
  933. struct resource *reg_res;
  934. struct video_device *vdev;
  935. int irq, ret;
  936. reg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  937. irq = platform_get_irq(pdev, 0);
  938. if (!reg_res || irq <= 0) {
  939. dev_err(&pdev->dev, "Insufficient VEU platform information.\n");
  940. return -ENODEV;
  941. }
  942. veu = devm_kzalloc(&pdev->dev, sizeof(*veu), GFP_KERNEL);
  943. if (!veu)
  944. return -ENOMEM;
  945. veu->is_2h = resource_size(reg_res) == 0x22c;
  946. veu->base = devm_request_and_ioremap(&pdev->dev, reg_res);
  947. if (!veu->base)
  948. return -ENOMEM;
  949. ret = devm_request_threaded_irq(&pdev->dev, irq, sh_veu_isr, sh_veu_bh,
  950. 0, "veu", veu);
  951. if (ret < 0)
  952. return ret;
  953. ret = v4l2_device_register(&pdev->dev, &veu->v4l2_dev);
  954. if (ret < 0) {
  955. dev_err(&pdev->dev, "Error registering v4l2 device\n");
  956. return ret;
  957. }
  958. vdev = &veu->vdev;
  959. veu->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
  960. if (IS_ERR(veu->alloc_ctx)) {
  961. ret = PTR_ERR(veu->alloc_ctx);
  962. goto einitctx;
  963. }
  964. *vdev = sh_veu_videodev;
  965. spin_lock_init(&veu->lock);
  966. mutex_init(&veu->fop_lock);
  967. vdev->lock = &veu->fop_lock;
  968. video_set_drvdata(vdev, veu);
  969. veu->dev = &pdev->dev;
  970. veu->vfmt_out = DEFAULT_OUT_VFMT;
  971. veu->vfmt_in = DEFAULT_IN_VFMT;
  972. veu->m2m_dev = v4l2_m2m_init(&sh_veu_m2m_ops);
  973. if (IS_ERR(veu->m2m_dev)) {
  974. ret = PTR_ERR(veu->m2m_dev);
  975. v4l2_err(&veu->v4l2_dev, "Failed to init mem2mem device: %d\n", ret);
  976. goto em2minit;
  977. }
  978. pm_runtime_enable(&pdev->dev);
  979. pm_runtime_resume(&pdev->dev);
  980. ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
  981. pm_runtime_suspend(&pdev->dev);
  982. if (ret < 0)
  983. goto evidreg;
  984. return ret;
  985. evidreg:
  986. pm_runtime_disable(&pdev->dev);
  987. v4l2_m2m_release(veu->m2m_dev);
  988. em2minit:
  989. vb2_dma_contig_cleanup_ctx(veu->alloc_ctx);
  990. einitctx:
  991. v4l2_device_unregister(&veu->v4l2_dev);
  992. return ret;
  993. }
  994. static int sh_veu_remove(struct platform_device *pdev)
  995. {
  996. struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
  997. struct sh_veu_dev *veu = container_of(v4l2_dev,
  998. struct sh_veu_dev, v4l2_dev);
  999. video_unregister_device(&veu->vdev);
  1000. pm_runtime_disable(&pdev->dev);
  1001. v4l2_m2m_release(veu->m2m_dev);
  1002. vb2_dma_contig_cleanup_ctx(veu->alloc_ctx);
  1003. v4l2_device_unregister(&veu->v4l2_dev);
  1004. return 0;
  1005. }
  1006. static struct platform_driver __refdata sh_veu_pdrv = {
  1007. .remove = sh_veu_remove,
  1008. .driver = {
  1009. .name = "sh_veu",
  1010. .owner = THIS_MODULE,
  1011. },
  1012. };
  1013. static int __init sh_veu_init(void)
  1014. {
  1015. return platform_driver_probe(&sh_veu_pdrv, sh_veu_probe);
  1016. }
  1017. static void __exit sh_veu_exit(void)
  1018. {
  1019. platform_driver_unregister(&sh_veu_pdrv);
  1020. }
  1021. module_init(sh_veu_init);
  1022. module_exit(sh_veu_exit);
  1023. MODULE_DESCRIPTION("sh-mobile VEU mem2mem driver");
  1024. MODULE_AUTHOR("Guennadi Liakhovetski, <g.liakhovetski@gmx.de>");
  1025. MODULE_LICENSE("GPL v2");