sh_veu.c 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250
  1. /*
  2. * sh-mobile VEU mem2mem driver
  3. *
  4. * Copyright (C) 2012 Renesas Electronics Corporation
  5. * Author: Guennadi Liakhovetski, <g.liakhovetski@gmx.de>
  6. * Copyright (C) 2008 Magnus Damm
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the version 2 of the GNU General Public License as
  10. * published by the Free Software Foundation
  11. */
  12. #include <linux/err.h>
  13. #include <linux/fs.h>
  14. #include <linux/kernel.h>
  15. #include <linux/module.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/io.h>
  18. #include <linux/platform_device.h>
  19. #include <linux/pm_runtime.h>
  20. #include <linux/slab.h>
  21. #include <linux/types.h>
  22. #include <linux/videodev2.h>
  23. #include <media/v4l2-dev.h>
  24. #include <media/v4l2-device.h>
  25. #include <media/v4l2-ioctl.h>
  26. #include <media/v4l2-mem2mem.h>
  27. #include <media/videobuf2-dma-contig.h>
  28. #define VEU_STR 0x00 /* start register */
  29. #define VEU_SWR 0x10 /* src: line length */
  30. #define VEU_SSR 0x14 /* src: image size */
  31. #define VEU_SAYR 0x18 /* src: y/rgb plane address */
  32. #define VEU_SACR 0x1c /* src: c plane address */
  33. #define VEU_BSSR 0x20 /* bundle mode register */
  34. #define VEU_EDWR 0x30 /* dst: line length */
  35. #define VEU_DAYR 0x34 /* dst: y/rgb plane address */
  36. #define VEU_DACR 0x38 /* dst: c plane address */
  37. #define VEU_TRCR 0x50 /* transform control */
  38. #define VEU_RFCR 0x54 /* resize scale */
  39. #define VEU_RFSR 0x58 /* resize clip */
  40. #define VEU_ENHR 0x5c /* enhance */
  41. #define VEU_FMCR 0x70 /* filter mode */
  42. #define VEU_VTCR 0x74 /* lowpass vertical */
  43. #define VEU_HTCR 0x78 /* lowpass horizontal */
  44. #define VEU_APCR 0x80 /* color match */
  45. #define VEU_ECCR 0x84 /* color replace */
  46. #define VEU_AFXR 0x90 /* fixed mode */
  47. #define VEU_SWPR 0x94 /* swap */
  48. #define VEU_EIER 0xa0 /* interrupt mask */
  49. #define VEU_EVTR 0xa4 /* interrupt event */
  50. #define VEU_STAR 0xb0 /* status */
  51. #define VEU_BSRR 0xb4 /* reset */
  52. #define VEU_MCR00 0x200 /* color conversion matrix coefficient 00 */
  53. #define VEU_MCR01 0x204 /* color conversion matrix coefficient 01 */
  54. #define VEU_MCR02 0x208 /* color conversion matrix coefficient 02 */
  55. #define VEU_MCR10 0x20c /* color conversion matrix coefficient 10 */
  56. #define VEU_MCR11 0x210 /* color conversion matrix coefficient 11 */
  57. #define VEU_MCR12 0x214 /* color conversion matrix coefficient 12 */
  58. #define VEU_MCR20 0x218 /* color conversion matrix coefficient 20 */
  59. #define VEU_MCR21 0x21c /* color conversion matrix coefficient 21 */
  60. #define VEU_MCR22 0x220 /* color conversion matrix coefficient 22 */
  61. #define VEU_COFFR 0x224 /* color conversion offset */
  62. #define VEU_CBR 0x228 /* color conversion clip */
  63. /*
  64. * 4092x4092 max size is the normal case. In some cases it can be reduced to
  65. * 2048x2048, in other cases it can be 4092x8188 or even 8188x8188.
  66. */
  67. #define MAX_W 4092
  68. #define MAX_H 4092
  69. #define MIN_W 8
  70. #define MIN_H 8
  71. #define ALIGN_W 4
  72. /* 3 buffers of 2048 x 1536 - 3 megapixels @ 16bpp */
  73. #define VIDEO_MEM_LIMIT ALIGN(2048 * 1536 * 2 * 3, 1024 * 1024)
  74. #define MEM2MEM_DEF_TRANSLEN 1
  75. struct sh_veu_dev;
  76. struct sh_veu_file {
  77. struct sh_veu_dev *veu_dev;
  78. bool cfg_needed;
  79. };
  80. struct sh_veu_format {
  81. char *name;
  82. u32 fourcc;
  83. unsigned int depth;
  84. unsigned int ydepth;
  85. };
  86. /* video data format */
  87. struct sh_veu_vfmt {
  88. /* Replace with v4l2_rect */
  89. struct v4l2_rect frame;
  90. unsigned int bytesperline;
  91. unsigned int offset_y;
  92. unsigned int offset_c;
  93. const struct sh_veu_format *fmt;
  94. };
  95. struct sh_veu_dev {
  96. struct v4l2_device v4l2_dev;
  97. struct video_device vdev;
  98. struct v4l2_m2m_dev *m2m_dev;
  99. struct device *dev;
  100. struct v4l2_m2m_ctx *m2m_ctx;
  101. struct sh_veu_vfmt vfmt_out;
  102. struct sh_veu_vfmt vfmt_in;
  103. /* Only single user per direction so far */
  104. struct sh_veu_file *capture;
  105. struct sh_veu_file *output;
  106. struct mutex fop_lock;
  107. void __iomem *base;
  108. struct vb2_alloc_ctx *alloc_ctx;
  109. spinlock_t lock;
  110. bool is_2h;
  111. unsigned int xaction;
  112. bool aborting;
  113. };
  114. enum sh_veu_fmt_idx {
  115. SH_VEU_FMT_NV12,
  116. SH_VEU_FMT_NV16,
  117. SH_VEU_FMT_NV24,
  118. SH_VEU_FMT_RGB332,
  119. SH_VEU_FMT_RGB444,
  120. SH_VEU_FMT_RGB565,
  121. SH_VEU_FMT_RGB666,
  122. SH_VEU_FMT_RGB24,
  123. };
  124. #define VGA_WIDTH 640
  125. #define VGA_HEIGHT 480
  126. #define DEFAULT_IN_WIDTH VGA_WIDTH
  127. #define DEFAULT_IN_HEIGHT VGA_HEIGHT
  128. #define DEFAULT_IN_FMTIDX SH_VEU_FMT_NV12
  129. #define DEFAULT_OUT_WIDTH VGA_WIDTH
  130. #define DEFAULT_OUT_HEIGHT VGA_HEIGHT
  131. #define DEFAULT_OUT_FMTIDX SH_VEU_FMT_RGB565
  132. /*
  133. * Alignment: Y-plane should be 4-byte aligned for NV12 and NV16, and 8-byte
  134. * aligned for NV24.
  135. */
  136. static const struct sh_veu_format sh_veu_fmt[] = {
  137. [SH_VEU_FMT_NV12] = { .ydepth = 8, .depth = 12, .name = "NV12", .fourcc = V4L2_PIX_FMT_NV12 },
  138. [SH_VEU_FMT_NV16] = { .ydepth = 8, .depth = 16, .name = "NV16", .fourcc = V4L2_PIX_FMT_NV16 },
  139. [SH_VEU_FMT_NV24] = { .ydepth = 8, .depth = 24, .name = "NV24", .fourcc = V4L2_PIX_FMT_NV24 },
  140. [SH_VEU_FMT_RGB332] = { .ydepth = 8, .depth = 8, .name = "RGB332", .fourcc = V4L2_PIX_FMT_RGB332 },
  141. [SH_VEU_FMT_RGB444] = { .ydepth = 16, .depth = 16, .name = "RGB444", .fourcc = V4L2_PIX_FMT_RGB444 },
  142. [SH_VEU_FMT_RGB565] = { .ydepth = 16, .depth = 16, .name = "RGB565", .fourcc = V4L2_PIX_FMT_RGB565 },
  143. [SH_VEU_FMT_RGB666] = { .ydepth = 32, .depth = 32, .name = "BGR666", .fourcc = V4L2_PIX_FMT_BGR666 },
  144. [SH_VEU_FMT_RGB24] = { .ydepth = 24, .depth = 24, .name = "RGB24", .fourcc = V4L2_PIX_FMT_RGB24 },
  145. };
  146. #define DEFAULT_IN_VFMT (struct sh_veu_vfmt){ \
  147. .frame = { \
  148. .width = VGA_WIDTH, \
  149. .height = VGA_HEIGHT, \
  150. }, \
  151. .bytesperline = (VGA_WIDTH * sh_veu_fmt[DEFAULT_IN_FMTIDX].ydepth) >> 3, \
  152. .fmt = &sh_veu_fmt[DEFAULT_IN_FMTIDX], \
  153. }
  154. #define DEFAULT_OUT_VFMT (struct sh_veu_vfmt){ \
  155. .frame = { \
  156. .width = VGA_WIDTH, \
  157. .height = VGA_HEIGHT, \
  158. }, \
  159. .bytesperline = (VGA_WIDTH * sh_veu_fmt[DEFAULT_OUT_FMTIDX].ydepth) >> 3, \
  160. .fmt = &sh_veu_fmt[DEFAULT_OUT_FMTIDX], \
  161. }
  162. /*
  163. * TODO: add support for further output formats:
  164. * SH_VEU_FMT_NV12,
  165. * SH_VEU_FMT_NV16,
  166. * SH_VEU_FMT_NV24,
  167. * SH_VEU_FMT_RGB332,
  168. * SH_VEU_FMT_RGB444,
  169. * SH_VEU_FMT_RGB666,
  170. * SH_VEU_FMT_RGB24,
  171. */
  172. static const int sh_veu_fmt_out[] = {
  173. SH_VEU_FMT_RGB565,
  174. };
  175. /*
  176. * TODO: add support for further input formats:
  177. * SH_VEU_FMT_NV16,
  178. * SH_VEU_FMT_NV24,
  179. * SH_VEU_FMT_RGB565,
  180. * SH_VEU_FMT_RGB666,
  181. * SH_VEU_FMT_RGB24,
  182. */
  183. static const int sh_veu_fmt_in[] = {
  184. SH_VEU_FMT_NV12,
  185. };
  186. static enum v4l2_colorspace sh_veu_4cc2cspace(u32 fourcc)
  187. {
  188. switch (fourcc) {
  189. default:
  190. BUG();
  191. case V4L2_PIX_FMT_NV12:
  192. case V4L2_PIX_FMT_NV16:
  193. case V4L2_PIX_FMT_NV24:
  194. return V4L2_COLORSPACE_JPEG;
  195. case V4L2_PIX_FMT_RGB332:
  196. case V4L2_PIX_FMT_RGB444:
  197. case V4L2_PIX_FMT_RGB565:
  198. case V4L2_PIX_FMT_BGR666:
  199. case V4L2_PIX_FMT_RGB24:
  200. return V4L2_COLORSPACE_SRGB;
  201. }
  202. }
  203. static u32 sh_veu_reg_read(struct sh_veu_dev *veu, unsigned int reg)
  204. {
  205. return ioread32(veu->base + reg);
  206. }
  207. static void sh_veu_reg_write(struct sh_veu_dev *veu, unsigned int reg,
  208. u32 value)
  209. {
  210. iowrite32(value, veu->base + reg);
  211. }
  212. /* ========== mem2mem callbacks ========== */
  213. static void sh_veu_job_abort(void *priv)
  214. {
  215. struct sh_veu_dev *veu = priv;
  216. /* Will cancel the transaction in the next interrupt handler */
  217. veu->aborting = true;
  218. }
  219. static void sh_veu_lock(void *priv)
  220. {
  221. struct sh_veu_dev *veu = priv;
  222. mutex_lock(&veu->fop_lock);
  223. }
  224. static void sh_veu_unlock(void *priv)
  225. {
  226. struct sh_veu_dev *veu = priv;
  227. mutex_unlock(&veu->fop_lock);
  228. }
  229. static void sh_veu_process(struct sh_veu_dev *veu,
  230. struct vb2_buffer *src_buf,
  231. struct vb2_buffer *dst_buf)
  232. {
  233. dma_addr_t addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
  234. sh_veu_reg_write(veu, VEU_DAYR, addr + veu->vfmt_out.offset_y);
  235. sh_veu_reg_write(veu, VEU_DACR, veu->vfmt_out.offset_c ?
  236. addr + veu->vfmt_out.offset_c : 0);
  237. dev_dbg(veu->dev, "%s(): dst base %lx, y: %x, c: %x\n", __func__,
  238. (unsigned long)addr,
  239. veu->vfmt_out.offset_y, veu->vfmt_out.offset_c);
  240. addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
  241. sh_veu_reg_write(veu, VEU_SAYR, addr + veu->vfmt_in.offset_y);
  242. sh_veu_reg_write(veu, VEU_SACR, veu->vfmt_in.offset_c ?
  243. addr + veu->vfmt_in.offset_c : 0);
  244. dev_dbg(veu->dev, "%s(): src base %lx, y: %x, c: %x\n", __func__,
  245. (unsigned long)addr,
  246. veu->vfmt_in.offset_y, veu->vfmt_in.offset_c);
  247. sh_veu_reg_write(veu, VEU_STR, 1);
  248. sh_veu_reg_write(veu, VEU_EIER, 1); /* enable interrupt in VEU */
  249. }
  250. /**
  251. * sh_veu_device_run() - prepares and starts the device
  252. *
  253. * This will be called by the framework when it decides to schedule a particular
  254. * instance.
  255. */
  256. static void sh_veu_device_run(void *priv)
  257. {
  258. struct sh_veu_dev *veu = priv;
  259. struct vb2_buffer *src_buf, *dst_buf;
  260. src_buf = v4l2_m2m_next_src_buf(veu->m2m_ctx);
  261. dst_buf = v4l2_m2m_next_dst_buf(veu->m2m_ctx);
  262. if (src_buf && dst_buf)
  263. sh_veu_process(veu, src_buf, dst_buf);
  264. }
  265. /* ========== video ioctls ========== */
  266. static bool sh_veu_is_streamer(struct sh_veu_dev *veu, struct sh_veu_file *veu_file,
  267. enum v4l2_buf_type type)
  268. {
  269. return (type == V4L2_BUF_TYPE_VIDEO_CAPTURE &&
  270. veu_file == veu->capture) ||
  271. (type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
  272. veu_file == veu->output);
  273. }
  274. static int sh_veu_queue_init(void *priv, struct vb2_queue *src_vq,
  275. struct vb2_queue *dst_vq);
  276. /*
  277. * It is not unusual to have video nodes open()ed multiple times. While some
  278. * V4L2 operations are non-intrusive, like querying formats and various
  279. * parameters, others, like setting formats, starting and stopping streaming,
  280. * queuing and dequeuing buffers, directly affect hardware configuration and /
  281. * or execution. This function verifies availability of the requested interface
  282. * and, if available, reserves it for the requesting user.
  283. */
  284. static int sh_veu_stream_init(struct sh_veu_dev *veu, struct sh_veu_file *veu_file,
  285. enum v4l2_buf_type type)
  286. {
  287. struct sh_veu_file **stream;
  288. switch (type) {
  289. case V4L2_BUF_TYPE_VIDEO_CAPTURE:
  290. stream = &veu->capture;
  291. break;
  292. case V4L2_BUF_TYPE_VIDEO_OUTPUT:
  293. stream = &veu->output;
  294. break;
  295. default:
  296. return -EINVAL;
  297. }
  298. if (*stream == veu_file)
  299. return 0;
  300. if (*stream)
  301. return -EBUSY;
  302. *stream = veu_file;
  303. return 0;
  304. }
  305. static int sh_veu_context_init(struct sh_veu_dev *veu)
  306. {
  307. if (veu->m2m_ctx)
  308. return 0;
  309. veu->m2m_ctx = v4l2_m2m_ctx_init(veu->m2m_dev, veu,
  310. sh_veu_queue_init);
  311. return PTR_ERR_OR_ZERO(veu->m2m_ctx);
  312. }
  313. static int sh_veu_querycap(struct file *file, void *priv,
  314. struct v4l2_capability *cap)
  315. {
  316. strlcpy(cap->driver, "sh-veu", sizeof(cap->driver));
  317. strlcpy(cap->card, "sh-mobile VEU", sizeof(cap->card));
  318. strlcpy(cap->bus_info, "platform:sh-veu", sizeof(cap->bus_info));
  319. cap->device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING;
  320. cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
  321. return 0;
  322. }
  323. static int sh_veu_enum_fmt(struct v4l2_fmtdesc *f, const int *fmt, int fmt_num)
  324. {
  325. if (f->index >= fmt_num)
  326. return -EINVAL;
  327. strlcpy(f->description, sh_veu_fmt[fmt[f->index]].name, sizeof(f->description));
  328. f->pixelformat = sh_veu_fmt[fmt[f->index]].fourcc;
  329. return 0;
  330. }
  331. static int sh_veu_enum_fmt_vid_cap(struct file *file, void *priv,
  332. struct v4l2_fmtdesc *f)
  333. {
  334. return sh_veu_enum_fmt(f, sh_veu_fmt_out, ARRAY_SIZE(sh_veu_fmt_out));
  335. }
  336. static int sh_veu_enum_fmt_vid_out(struct file *file, void *priv,
  337. struct v4l2_fmtdesc *f)
  338. {
  339. return sh_veu_enum_fmt(f, sh_veu_fmt_in, ARRAY_SIZE(sh_veu_fmt_in));
  340. }
  341. static struct sh_veu_vfmt *sh_veu_get_vfmt(struct sh_veu_dev *veu,
  342. enum v4l2_buf_type type)
  343. {
  344. switch (type) {
  345. case V4L2_BUF_TYPE_VIDEO_CAPTURE:
  346. return &veu->vfmt_out;
  347. case V4L2_BUF_TYPE_VIDEO_OUTPUT:
  348. return &veu->vfmt_in;
  349. default:
  350. return NULL;
  351. }
  352. }
  353. static int sh_veu_g_fmt(struct sh_veu_file *veu_file, struct v4l2_format *f)
  354. {
  355. struct v4l2_pix_format *pix = &f->fmt.pix;
  356. struct sh_veu_dev *veu = veu_file->veu_dev;
  357. struct sh_veu_vfmt *vfmt;
  358. vfmt = sh_veu_get_vfmt(veu, f->type);
  359. pix->width = vfmt->frame.width;
  360. pix->height = vfmt->frame.height;
  361. pix->field = V4L2_FIELD_NONE;
  362. pix->pixelformat = vfmt->fmt->fourcc;
  363. pix->colorspace = sh_veu_4cc2cspace(pix->pixelformat);
  364. pix->bytesperline = vfmt->bytesperline;
  365. pix->sizeimage = vfmt->bytesperline * pix->height *
  366. vfmt->fmt->depth / vfmt->fmt->ydepth;
  367. pix->priv = 0;
  368. dev_dbg(veu->dev, "%s(): type: %d, size %u @ %ux%u, fmt %x\n", __func__,
  369. f->type, pix->sizeimage, pix->width, pix->height, pix->pixelformat);
  370. return 0;
  371. }
  372. static int sh_veu_g_fmt_vid_out(struct file *file, void *priv,
  373. struct v4l2_format *f)
  374. {
  375. return sh_veu_g_fmt(priv, f);
  376. }
  377. static int sh_veu_g_fmt_vid_cap(struct file *file, void *priv,
  378. struct v4l2_format *f)
  379. {
  380. return sh_veu_g_fmt(priv, f);
  381. }
  382. static int sh_veu_try_fmt(struct v4l2_format *f, const struct sh_veu_format *fmt)
  383. {
  384. struct v4l2_pix_format *pix = &f->fmt.pix;
  385. unsigned int y_bytes_used;
  386. /*
  387. * V4L2 specification suggests, that the driver should correct the
  388. * format struct if any of the dimensions is unsupported
  389. */
  390. switch (pix->field) {
  391. default:
  392. case V4L2_FIELD_ANY:
  393. pix->field = V4L2_FIELD_NONE;
  394. /* fall through: continue handling V4L2_FIELD_NONE */
  395. case V4L2_FIELD_NONE:
  396. break;
  397. }
  398. v4l_bound_align_image(&pix->width, MIN_W, MAX_W, ALIGN_W,
  399. &pix->height, MIN_H, MAX_H, 0, 0);
  400. y_bytes_used = (pix->width * fmt->ydepth) >> 3;
  401. if (pix->bytesperline < y_bytes_used)
  402. pix->bytesperline = y_bytes_used;
  403. pix->sizeimage = pix->height * pix->bytesperline * fmt->depth / fmt->ydepth;
  404. pix->pixelformat = fmt->fourcc;
  405. pix->colorspace = sh_veu_4cc2cspace(pix->pixelformat);
  406. pix->priv = 0;
  407. pr_debug("%s(): type: %d, size %u\n", __func__, f->type, pix->sizeimage);
  408. return 0;
  409. }
  410. static const struct sh_veu_format *sh_veu_find_fmt(const struct v4l2_format *f)
  411. {
  412. const int *fmt;
  413. int i, n, dflt;
  414. pr_debug("%s(%d;%d)\n", __func__, f->type, f->fmt.pix.field);
  415. switch (f->type) {
  416. case V4L2_BUF_TYPE_VIDEO_CAPTURE:
  417. fmt = sh_veu_fmt_out;
  418. n = ARRAY_SIZE(sh_veu_fmt_out);
  419. dflt = DEFAULT_OUT_FMTIDX;
  420. break;
  421. case V4L2_BUF_TYPE_VIDEO_OUTPUT:
  422. default:
  423. fmt = sh_veu_fmt_in;
  424. n = ARRAY_SIZE(sh_veu_fmt_in);
  425. dflt = DEFAULT_IN_FMTIDX;
  426. break;
  427. }
  428. for (i = 0; i < n; i++)
  429. if (sh_veu_fmt[fmt[i]].fourcc == f->fmt.pix.pixelformat)
  430. return &sh_veu_fmt[fmt[i]];
  431. return &sh_veu_fmt[dflt];
  432. }
  433. static int sh_veu_try_fmt_vid_cap(struct file *file, void *priv,
  434. struct v4l2_format *f)
  435. {
  436. const struct sh_veu_format *fmt;
  437. fmt = sh_veu_find_fmt(f);
  438. if (!fmt)
  439. /* wrong buffer type */
  440. return -EINVAL;
  441. return sh_veu_try_fmt(f, fmt);
  442. }
  443. static int sh_veu_try_fmt_vid_out(struct file *file, void *priv,
  444. struct v4l2_format *f)
  445. {
  446. const struct sh_veu_format *fmt;
  447. fmt = sh_veu_find_fmt(f);
  448. if (!fmt)
  449. /* wrong buffer type */
  450. return -EINVAL;
  451. return sh_veu_try_fmt(f, fmt);
  452. }
  453. static void sh_veu_colour_offset(struct sh_veu_dev *veu, struct sh_veu_vfmt *vfmt)
  454. {
  455. /* dst_left and dst_top validity will be verified in CROP / COMPOSE */
  456. unsigned int left = vfmt->frame.left & ~0x03;
  457. unsigned int top = vfmt->frame.top;
  458. dma_addr_t offset = ((left * veu->vfmt_out.fmt->depth) >> 3) +
  459. top * veu->vfmt_out.bytesperline;
  460. unsigned int y_line;
  461. vfmt->offset_y = offset;
  462. switch (vfmt->fmt->fourcc) {
  463. case V4L2_PIX_FMT_NV12:
  464. case V4L2_PIX_FMT_NV16:
  465. case V4L2_PIX_FMT_NV24:
  466. y_line = ALIGN(vfmt->frame.width, 16);
  467. vfmt->offset_c = offset + y_line * vfmt->frame.height;
  468. break;
  469. case V4L2_PIX_FMT_RGB332:
  470. case V4L2_PIX_FMT_RGB444:
  471. case V4L2_PIX_FMT_RGB565:
  472. case V4L2_PIX_FMT_BGR666:
  473. case V4L2_PIX_FMT_RGB24:
  474. vfmt->offset_c = 0;
  475. break;
  476. default:
  477. BUG();
  478. }
  479. }
  480. static int sh_veu_s_fmt(struct sh_veu_file *veu_file, struct v4l2_format *f)
  481. {
  482. struct v4l2_pix_format *pix = &f->fmt.pix;
  483. struct sh_veu_dev *veu = veu_file->veu_dev;
  484. struct sh_veu_vfmt *vfmt;
  485. struct vb2_queue *vq;
  486. int ret = sh_veu_context_init(veu);
  487. if (ret < 0)
  488. return ret;
  489. vq = v4l2_m2m_get_vq(veu->m2m_ctx, f->type);
  490. if (!vq)
  491. return -EINVAL;
  492. if (vb2_is_busy(vq)) {
  493. v4l2_err(&veu_file->veu_dev->v4l2_dev, "%s queue busy\n", __func__);
  494. return -EBUSY;
  495. }
  496. vfmt = sh_veu_get_vfmt(veu, f->type);
  497. /* called after try_fmt(), hence vfmt != NULL. Implicit BUG_ON() below */
  498. vfmt->fmt = sh_veu_find_fmt(f);
  499. /* vfmt->fmt != NULL following the same argument as above */
  500. vfmt->frame.width = pix->width;
  501. vfmt->frame.height = pix->height;
  502. vfmt->bytesperline = pix->bytesperline;
  503. sh_veu_colour_offset(veu, vfmt);
  504. /*
  505. * We could also verify and require configuration only if any parameters
  506. * actually have changed, but it is unlikely, that the user requests the
  507. * same configuration several times without closing the device.
  508. */
  509. veu_file->cfg_needed = true;
  510. dev_dbg(veu->dev,
  511. "Setting format for type %d, wxh: %dx%d, fmt: %x\n",
  512. f->type, pix->width, pix->height, vfmt->fmt->fourcc);
  513. return 0;
  514. }
  515. static int sh_veu_s_fmt_vid_cap(struct file *file, void *priv,
  516. struct v4l2_format *f)
  517. {
  518. int ret = sh_veu_try_fmt_vid_cap(file, priv, f);
  519. if (ret)
  520. return ret;
  521. return sh_veu_s_fmt(priv, f);
  522. }
  523. static int sh_veu_s_fmt_vid_out(struct file *file, void *priv,
  524. struct v4l2_format *f)
  525. {
  526. int ret = sh_veu_try_fmt_vid_out(file, priv, f);
  527. if (ret)
  528. return ret;
  529. return sh_veu_s_fmt(priv, f);
  530. }
  531. static int sh_veu_reqbufs(struct file *file, void *priv,
  532. struct v4l2_requestbuffers *reqbufs)
  533. {
  534. struct sh_veu_file *veu_file = priv;
  535. struct sh_veu_dev *veu = veu_file->veu_dev;
  536. int ret = sh_veu_context_init(veu);
  537. if (ret < 0)
  538. return ret;
  539. ret = sh_veu_stream_init(veu, veu_file, reqbufs->type);
  540. if (ret < 0)
  541. return ret;
  542. return v4l2_m2m_reqbufs(file, veu->m2m_ctx, reqbufs);
  543. }
  544. static int sh_veu_querybuf(struct file *file, void *priv,
  545. struct v4l2_buffer *buf)
  546. {
  547. struct sh_veu_file *veu_file = priv;
  548. if (!sh_veu_is_streamer(veu_file->veu_dev, veu_file, buf->type))
  549. return -EBUSY;
  550. return v4l2_m2m_querybuf(file, veu_file->veu_dev->m2m_ctx, buf);
  551. }
  552. static int sh_veu_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
  553. {
  554. struct sh_veu_file *veu_file = priv;
  555. dev_dbg(veu_file->veu_dev->dev, "%s(%d)\n", __func__, buf->type);
  556. if (!sh_veu_is_streamer(veu_file->veu_dev, veu_file, buf->type))
  557. return -EBUSY;
  558. return v4l2_m2m_qbuf(file, veu_file->veu_dev->m2m_ctx, buf);
  559. }
  560. static int sh_veu_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
  561. {
  562. struct sh_veu_file *veu_file = priv;
  563. dev_dbg(veu_file->veu_dev->dev, "%s(%d)\n", __func__, buf->type);
  564. if (!sh_veu_is_streamer(veu_file->veu_dev, veu_file, buf->type))
  565. return -EBUSY;
  566. return v4l2_m2m_dqbuf(file, veu_file->veu_dev->m2m_ctx, buf);
  567. }
  568. static void sh_veu_calc_scale(struct sh_veu_dev *veu,
  569. int size_in, int size_out, int crop_out,
  570. u32 *mant, u32 *frac, u32 *rep)
  571. {
  572. u32 fixpoint;
  573. /* calculate FRAC and MANT */
  574. *rep = *mant = *frac = 0;
  575. if (size_in == size_out) {
  576. if (crop_out != size_out)
  577. *mant = 1; /* needed for cropping */
  578. return;
  579. }
  580. /* VEU2H special upscale */
  581. if (veu->is_2h && size_out > size_in) {
  582. u32 fixpoint = (4096 * size_in) / size_out;
  583. *mant = fixpoint / 4096;
  584. *frac = (fixpoint - (*mant * 4096)) & ~0x07;
  585. switch (*frac) {
  586. case 0x800:
  587. *rep = 1;
  588. break;
  589. case 0x400:
  590. *rep = 3;
  591. break;
  592. case 0x200:
  593. *rep = 7;
  594. break;
  595. }
  596. if (*rep)
  597. return;
  598. }
  599. fixpoint = (4096 * (size_in - 1)) / (size_out + 1);
  600. *mant = fixpoint / 4096;
  601. *frac = fixpoint - (*mant * 4096);
  602. if (*frac & 0x07) {
  603. /*
  604. * FIXME: do we really have to round down twice in the
  605. * up-scaling case?
  606. */
  607. *frac &= ~0x07;
  608. if (size_out > size_in)
  609. *frac -= 8; /* round down if scaling up */
  610. else
  611. *frac += 8; /* round up if scaling down */
  612. }
  613. }
  614. static unsigned long sh_veu_scale_v(struct sh_veu_dev *veu,
  615. int size_in, int size_out, int crop_out)
  616. {
  617. u32 mant, frac, value, rep;
  618. sh_veu_calc_scale(veu, size_in, size_out, crop_out, &mant, &frac, &rep);
  619. /* set scale */
  620. value = (sh_veu_reg_read(veu, VEU_RFCR) & ~0xffff0000) |
  621. (((mant << 12) | frac) << 16);
  622. sh_veu_reg_write(veu, VEU_RFCR, value);
  623. /* set clip */
  624. value = (sh_veu_reg_read(veu, VEU_RFSR) & ~0xffff0000) |
  625. (((rep << 12) | crop_out) << 16);
  626. sh_veu_reg_write(veu, VEU_RFSR, value);
  627. return ALIGN((size_in * crop_out) / size_out, 4);
  628. }
  629. static unsigned long sh_veu_scale_h(struct sh_veu_dev *veu,
  630. int size_in, int size_out, int crop_out)
  631. {
  632. u32 mant, frac, value, rep;
  633. sh_veu_calc_scale(veu, size_in, size_out, crop_out, &mant, &frac, &rep);
  634. /* set scale */
  635. value = (sh_veu_reg_read(veu, VEU_RFCR) & ~0xffff) |
  636. (mant << 12) | frac;
  637. sh_veu_reg_write(veu, VEU_RFCR, value);
  638. /* set clip */
  639. value = (sh_veu_reg_read(veu, VEU_RFSR) & ~0xffff) |
  640. (rep << 12) | crop_out;
  641. sh_veu_reg_write(veu, VEU_RFSR, value);
  642. return ALIGN((size_in * crop_out) / size_out, 4);
  643. }
  644. static void sh_veu_configure(struct sh_veu_dev *veu)
  645. {
  646. u32 src_width, src_stride, src_height;
  647. u32 dst_width, dst_stride, dst_height;
  648. u32 real_w, real_h;
  649. /* reset VEU */
  650. sh_veu_reg_write(veu, VEU_BSRR, 0x100);
  651. src_width = veu->vfmt_in.frame.width;
  652. src_height = veu->vfmt_in.frame.height;
  653. src_stride = ALIGN(veu->vfmt_in.frame.width, 16);
  654. dst_width = real_w = veu->vfmt_out.frame.width;
  655. dst_height = real_h = veu->vfmt_out.frame.height;
  656. /* Datasheet is unclear - whether it's always number of bytes or not */
  657. dst_stride = veu->vfmt_out.bytesperline;
  658. /*
  659. * So far real_w == dst_width && real_h == dst_height, but it wasn't
  660. * necessarily the case in the original vidix driver, so, it may change
  661. * here in the future too.
  662. */
  663. src_width = sh_veu_scale_h(veu, src_width, real_w, dst_width);
  664. src_height = sh_veu_scale_v(veu, src_height, real_h, dst_height);
  665. sh_veu_reg_write(veu, VEU_SWR, src_stride);
  666. sh_veu_reg_write(veu, VEU_SSR, src_width | (src_height << 16));
  667. sh_veu_reg_write(veu, VEU_BSSR, 0); /* not using bundle mode */
  668. sh_veu_reg_write(veu, VEU_EDWR, dst_stride);
  669. sh_veu_reg_write(veu, VEU_DACR, 0); /* unused for RGB */
  670. sh_veu_reg_write(veu, VEU_SWPR, 0x67);
  671. sh_veu_reg_write(veu, VEU_TRCR, (6 << 16) | (0 << 14) | 2 | 4);
  672. if (veu->is_2h) {
  673. sh_veu_reg_write(veu, VEU_MCR00, 0x0cc5);
  674. sh_veu_reg_write(veu, VEU_MCR01, 0x0950);
  675. sh_veu_reg_write(veu, VEU_MCR02, 0x0000);
  676. sh_veu_reg_write(veu, VEU_MCR10, 0x397f);
  677. sh_veu_reg_write(veu, VEU_MCR11, 0x0950);
  678. sh_veu_reg_write(veu, VEU_MCR12, 0x3ccd);
  679. sh_veu_reg_write(veu, VEU_MCR20, 0x0000);
  680. sh_veu_reg_write(veu, VEU_MCR21, 0x0950);
  681. sh_veu_reg_write(veu, VEU_MCR22, 0x1023);
  682. sh_veu_reg_write(veu, VEU_COFFR, 0x00800010);
  683. }
  684. }
  685. static int sh_veu_streamon(struct file *file, void *priv,
  686. enum v4l2_buf_type type)
  687. {
  688. struct sh_veu_file *veu_file = priv;
  689. if (!sh_veu_is_streamer(veu_file->veu_dev, veu_file, type))
  690. return -EBUSY;
  691. if (veu_file->cfg_needed) {
  692. struct sh_veu_dev *veu = veu_file->veu_dev;
  693. veu_file->cfg_needed = false;
  694. sh_veu_configure(veu_file->veu_dev);
  695. veu->xaction = 0;
  696. veu->aborting = false;
  697. }
  698. return v4l2_m2m_streamon(file, veu_file->veu_dev->m2m_ctx, type);
  699. }
  700. static int sh_veu_streamoff(struct file *file, void *priv,
  701. enum v4l2_buf_type type)
  702. {
  703. struct sh_veu_file *veu_file = priv;
  704. if (!sh_veu_is_streamer(veu_file->veu_dev, veu_file, type))
  705. return -EBUSY;
  706. return v4l2_m2m_streamoff(file, veu_file->veu_dev->m2m_ctx, type);
  707. }
  708. static const struct v4l2_ioctl_ops sh_veu_ioctl_ops = {
  709. .vidioc_querycap = sh_veu_querycap,
  710. .vidioc_enum_fmt_vid_cap = sh_veu_enum_fmt_vid_cap,
  711. .vidioc_g_fmt_vid_cap = sh_veu_g_fmt_vid_cap,
  712. .vidioc_try_fmt_vid_cap = sh_veu_try_fmt_vid_cap,
  713. .vidioc_s_fmt_vid_cap = sh_veu_s_fmt_vid_cap,
  714. .vidioc_enum_fmt_vid_out = sh_veu_enum_fmt_vid_out,
  715. .vidioc_g_fmt_vid_out = sh_veu_g_fmt_vid_out,
  716. .vidioc_try_fmt_vid_out = sh_veu_try_fmt_vid_out,
  717. .vidioc_s_fmt_vid_out = sh_veu_s_fmt_vid_out,
  718. .vidioc_reqbufs = sh_veu_reqbufs,
  719. .vidioc_querybuf = sh_veu_querybuf,
  720. .vidioc_qbuf = sh_veu_qbuf,
  721. .vidioc_dqbuf = sh_veu_dqbuf,
  722. .vidioc_streamon = sh_veu_streamon,
  723. .vidioc_streamoff = sh_veu_streamoff,
  724. };
  725. /* ========== Queue operations ========== */
  726. static int sh_veu_queue_setup(struct vb2_queue *vq,
  727. const struct v4l2_format *f,
  728. unsigned int *nbuffers, unsigned int *nplanes,
  729. unsigned int sizes[], void *alloc_ctxs[])
  730. {
  731. struct sh_veu_dev *veu = vb2_get_drv_priv(vq);
  732. struct sh_veu_vfmt *vfmt;
  733. unsigned int size, count = *nbuffers;
  734. if (f) {
  735. const struct v4l2_pix_format *pix = &f->fmt.pix;
  736. const struct sh_veu_format *fmt = sh_veu_find_fmt(f);
  737. struct v4l2_format ftmp = *f;
  738. if (fmt->fourcc != pix->pixelformat)
  739. return -EINVAL;
  740. sh_veu_try_fmt(&ftmp, fmt);
  741. if (ftmp.fmt.pix.width != pix->width ||
  742. ftmp.fmt.pix.height != pix->height)
  743. return -EINVAL;
  744. size = pix->bytesperline ? pix->bytesperline * pix->height * fmt->depth / fmt->ydepth :
  745. pix->width * pix->height * fmt->depth / fmt->ydepth;
  746. } else {
  747. vfmt = sh_veu_get_vfmt(veu, vq->type);
  748. size = vfmt->bytesperline * vfmt->frame.height * vfmt->fmt->depth / vfmt->fmt->ydepth;
  749. }
  750. if (count < 2)
  751. *nbuffers = count = 2;
  752. if (size * count > VIDEO_MEM_LIMIT) {
  753. count = VIDEO_MEM_LIMIT / size;
  754. *nbuffers = count;
  755. }
  756. *nplanes = 1;
  757. sizes[0] = size;
  758. alloc_ctxs[0] = veu->alloc_ctx;
  759. dev_dbg(veu->dev, "get %d buffer(s) of size %d each.\n", count, size);
  760. return 0;
  761. }
  762. static int sh_veu_buf_prepare(struct vb2_buffer *vb)
  763. {
  764. struct sh_veu_dev *veu = vb2_get_drv_priv(vb->vb2_queue);
  765. struct sh_veu_vfmt *vfmt;
  766. unsigned int sizeimage;
  767. vfmt = sh_veu_get_vfmt(veu, vb->vb2_queue->type);
  768. sizeimage = vfmt->bytesperline * vfmt->frame.height *
  769. vfmt->fmt->depth / vfmt->fmt->ydepth;
  770. if (vb2_plane_size(vb, 0) < sizeimage) {
  771. dev_dbg(veu->dev, "%s data will not fit into plane (%lu < %u)\n",
  772. __func__, vb2_plane_size(vb, 0), sizeimage);
  773. return -EINVAL;
  774. }
  775. vb2_set_plane_payload(vb, 0, sizeimage);
  776. return 0;
  777. }
  778. static void sh_veu_buf_queue(struct vb2_buffer *vb)
  779. {
  780. struct sh_veu_dev *veu = vb2_get_drv_priv(vb->vb2_queue);
  781. dev_dbg(veu->dev, "%s(%d)\n", __func__, vb->v4l2_buf.type);
  782. v4l2_m2m_buf_queue(veu->m2m_ctx, vb);
  783. }
  784. static void sh_veu_wait_prepare(struct vb2_queue *q)
  785. {
  786. sh_veu_unlock(vb2_get_drv_priv(q));
  787. }
  788. static void sh_veu_wait_finish(struct vb2_queue *q)
  789. {
  790. sh_veu_lock(vb2_get_drv_priv(q));
  791. }
  792. static const struct vb2_ops sh_veu_qops = {
  793. .queue_setup = sh_veu_queue_setup,
  794. .buf_prepare = sh_veu_buf_prepare,
  795. .buf_queue = sh_veu_buf_queue,
  796. .wait_prepare = sh_veu_wait_prepare,
  797. .wait_finish = sh_veu_wait_finish,
  798. };
  799. static int sh_veu_queue_init(void *priv, struct vb2_queue *src_vq,
  800. struct vb2_queue *dst_vq)
  801. {
  802. int ret;
  803. memset(src_vq, 0, sizeof(*src_vq));
  804. src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
  805. src_vq->io_modes = VB2_MMAP | VB2_USERPTR;
  806. src_vq->drv_priv = priv;
  807. src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
  808. src_vq->ops = &sh_veu_qops;
  809. src_vq->mem_ops = &vb2_dma_contig_memops;
  810. ret = vb2_queue_init(src_vq);
  811. if (ret < 0)
  812. return ret;
  813. memset(dst_vq, 0, sizeof(*dst_vq));
  814. dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
  815. dst_vq->io_modes = VB2_MMAP | VB2_USERPTR;
  816. dst_vq->drv_priv = priv;
  817. dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
  818. dst_vq->ops = &sh_veu_qops;
  819. dst_vq->mem_ops = &vb2_dma_contig_memops;
  820. return vb2_queue_init(dst_vq);
  821. }
  822. /* ========== File operations ========== */
  823. static int sh_veu_open(struct file *file)
  824. {
  825. struct sh_veu_dev *veu = video_drvdata(file);
  826. struct sh_veu_file *veu_file;
  827. veu_file = kzalloc(sizeof(*veu_file), GFP_KERNEL);
  828. if (!veu_file)
  829. return -ENOMEM;
  830. veu_file->veu_dev = veu;
  831. veu_file->cfg_needed = true;
  832. file->private_data = veu_file;
  833. pm_runtime_get_sync(veu->dev);
  834. dev_dbg(veu->dev, "Created instance %p\n", veu_file);
  835. return 0;
  836. }
  837. static int sh_veu_release(struct file *file)
  838. {
  839. struct sh_veu_dev *veu = video_drvdata(file);
  840. struct sh_veu_file *veu_file = file->private_data;
  841. dev_dbg(veu->dev, "Releasing instance %p\n", veu_file);
  842. if (veu_file == veu->capture) {
  843. veu->capture = NULL;
  844. vb2_queue_release(v4l2_m2m_get_vq(veu->m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE));
  845. }
  846. if (veu_file == veu->output) {
  847. veu->output = NULL;
  848. vb2_queue_release(v4l2_m2m_get_vq(veu->m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT));
  849. }
  850. if (!veu->output && !veu->capture && veu->m2m_ctx) {
  851. v4l2_m2m_ctx_release(veu->m2m_ctx);
  852. veu->m2m_ctx = NULL;
  853. }
  854. pm_runtime_put(veu->dev);
  855. kfree(veu_file);
  856. return 0;
  857. }
  858. static unsigned int sh_veu_poll(struct file *file,
  859. struct poll_table_struct *wait)
  860. {
  861. struct sh_veu_file *veu_file = file->private_data;
  862. return v4l2_m2m_poll(file, veu_file->veu_dev->m2m_ctx, wait);
  863. }
  864. static int sh_veu_mmap(struct file *file, struct vm_area_struct *vma)
  865. {
  866. struct sh_veu_file *veu_file = file->private_data;
  867. return v4l2_m2m_mmap(file, veu_file->veu_dev->m2m_ctx, vma);
  868. }
  869. static const struct v4l2_file_operations sh_veu_fops = {
  870. .owner = THIS_MODULE,
  871. .open = sh_veu_open,
  872. .release = sh_veu_release,
  873. .poll = sh_veu_poll,
  874. .unlocked_ioctl = video_ioctl2,
  875. .mmap = sh_veu_mmap,
  876. };
  877. static const struct video_device sh_veu_videodev = {
  878. .name = "sh-veu",
  879. .fops = &sh_veu_fops,
  880. .ioctl_ops = &sh_veu_ioctl_ops,
  881. .minor = -1,
  882. .release = video_device_release_empty,
  883. .vfl_dir = VFL_DIR_M2M,
  884. };
  885. static const struct v4l2_m2m_ops sh_veu_m2m_ops = {
  886. .device_run = sh_veu_device_run,
  887. .job_abort = sh_veu_job_abort,
  888. };
  889. static irqreturn_t sh_veu_bh(int irq, void *dev_id)
  890. {
  891. struct sh_veu_dev *veu = dev_id;
  892. if (veu->xaction == MEM2MEM_DEF_TRANSLEN || veu->aborting) {
  893. v4l2_m2m_job_finish(veu->m2m_dev, veu->m2m_ctx);
  894. veu->xaction = 0;
  895. } else {
  896. sh_veu_device_run(veu);
  897. }
  898. return IRQ_HANDLED;
  899. }
  900. static irqreturn_t sh_veu_isr(int irq, void *dev_id)
  901. {
  902. struct sh_veu_dev *veu = dev_id;
  903. struct vb2_buffer *dst;
  904. struct vb2_buffer *src;
  905. u32 status = sh_veu_reg_read(veu, VEU_EVTR);
  906. /* bundle read mode not used */
  907. if (!(status & 1))
  908. return IRQ_NONE;
  909. /* disable interrupt in VEU */
  910. sh_veu_reg_write(veu, VEU_EIER, 0);
  911. /* halt operation */
  912. sh_veu_reg_write(veu, VEU_STR, 0);
  913. /* ack int, write 0 to clear bits */
  914. sh_veu_reg_write(veu, VEU_EVTR, status & ~1);
  915. /* conversion completed */
  916. dst = v4l2_m2m_dst_buf_remove(veu->m2m_ctx);
  917. src = v4l2_m2m_src_buf_remove(veu->m2m_ctx);
  918. if (!src || !dst)
  919. return IRQ_NONE;
  920. spin_lock(&veu->lock);
  921. v4l2_m2m_buf_done(src, VB2_BUF_STATE_DONE);
  922. v4l2_m2m_buf_done(dst, VB2_BUF_STATE_DONE);
  923. spin_unlock(&veu->lock);
  924. veu->xaction++;
  925. return IRQ_WAKE_THREAD;
  926. }
  927. static int sh_veu_probe(struct platform_device *pdev)
  928. {
  929. struct sh_veu_dev *veu;
  930. struct resource *reg_res;
  931. struct video_device *vdev;
  932. int irq, ret;
  933. reg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  934. irq = platform_get_irq(pdev, 0);
  935. if (!reg_res || irq <= 0) {
  936. dev_err(&pdev->dev, "Insufficient VEU platform information.\n");
  937. return -ENODEV;
  938. }
  939. veu = devm_kzalloc(&pdev->dev, sizeof(*veu), GFP_KERNEL);
  940. if (!veu)
  941. return -ENOMEM;
  942. veu->is_2h = resource_size(reg_res) == 0x22c;
  943. veu->base = devm_ioremap_resource(&pdev->dev, reg_res);
  944. if (IS_ERR(veu->base))
  945. return PTR_ERR(veu->base);
  946. ret = devm_request_threaded_irq(&pdev->dev, irq, sh_veu_isr, sh_veu_bh,
  947. 0, "veu", veu);
  948. if (ret < 0)
  949. return ret;
  950. ret = v4l2_device_register(&pdev->dev, &veu->v4l2_dev);
  951. if (ret < 0) {
  952. dev_err(&pdev->dev, "Error registering v4l2 device\n");
  953. return ret;
  954. }
  955. vdev = &veu->vdev;
  956. veu->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
  957. if (IS_ERR(veu->alloc_ctx)) {
  958. ret = PTR_ERR(veu->alloc_ctx);
  959. goto einitctx;
  960. }
  961. *vdev = sh_veu_videodev;
  962. spin_lock_init(&veu->lock);
  963. mutex_init(&veu->fop_lock);
  964. vdev->lock = &veu->fop_lock;
  965. video_set_drvdata(vdev, veu);
  966. veu->dev = &pdev->dev;
  967. veu->vfmt_out = DEFAULT_OUT_VFMT;
  968. veu->vfmt_in = DEFAULT_IN_VFMT;
  969. veu->m2m_dev = v4l2_m2m_init(&sh_veu_m2m_ops);
  970. if (IS_ERR(veu->m2m_dev)) {
  971. ret = PTR_ERR(veu->m2m_dev);
  972. v4l2_err(&veu->v4l2_dev, "Failed to init mem2mem device: %d\n", ret);
  973. goto em2minit;
  974. }
  975. pm_runtime_enable(&pdev->dev);
  976. pm_runtime_resume(&pdev->dev);
  977. ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
  978. pm_runtime_suspend(&pdev->dev);
  979. if (ret < 0)
  980. goto evidreg;
  981. return ret;
  982. evidreg:
  983. pm_runtime_disable(&pdev->dev);
  984. v4l2_m2m_release(veu->m2m_dev);
  985. em2minit:
  986. vb2_dma_contig_cleanup_ctx(veu->alloc_ctx);
  987. einitctx:
  988. v4l2_device_unregister(&veu->v4l2_dev);
  989. return ret;
  990. }
  991. static int sh_veu_remove(struct platform_device *pdev)
  992. {
  993. struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
  994. struct sh_veu_dev *veu = container_of(v4l2_dev,
  995. struct sh_veu_dev, v4l2_dev);
  996. video_unregister_device(&veu->vdev);
  997. pm_runtime_disable(&pdev->dev);
  998. v4l2_m2m_release(veu->m2m_dev);
  999. vb2_dma_contig_cleanup_ctx(veu->alloc_ctx);
  1000. v4l2_device_unregister(&veu->v4l2_dev);
  1001. return 0;
  1002. }
  1003. static struct platform_driver __refdata sh_veu_pdrv = {
  1004. .remove = sh_veu_remove,
  1005. .driver = {
  1006. .name = "sh_veu",
  1007. .owner = THIS_MODULE,
  1008. },
  1009. };
  1010. module_platform_driver_probe(sh_veu_pdrv, sh_veu_probe);
  1011. MODULE_DESCRIPTION("sh-mobile VEU mem2mem driver");
  1012. MODULE_AUTHOR("Guennadi Liakhovetski, <g.liakhovetski@gmx.de>");
  1013. MODULE_LICENSE("GPL v2");