mixer_video.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126
  1. /*
  2. * Samsung TV Mixer driver
  3. *
  4. * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
  5. *
  6. * Tomasz Stanislawski, <t.stanislaws@samsung.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published
  10. * by the Free Software Foundation. either version 2 of the License,
  11. * or (at your option) any later version
  12. */
  13. #define pr_fmt(fmt) "s5p-tv (mixer): " fmt
  14. #include "mixer.h"
  15. #include <media/v4l2-ioctl.h>
  16. #include <linux/videodev2.h>
  17. #include <linux/mm.h>
  18. #include <linux/module.h>
  19. #include <linux/timer.h>
  20. #include <media/videobuf2-dma-contig.h>
  21. static int find_reg_callback(struct device *dev, void *p)
  22. {
  23. struct v4l2_subdev **sd = p;
  24. *sd = dev_get_drvdata(dev);
  25. /* non-zero value stops iteration */
  26. return 1;
  27. }
  28. static struct v4l2_subdev *find_and_register_subdev(
  29. struct mxr_device *mdev, char *module_name)
  30. {
  31. struct device_driver *drv;
  32. struct v4l2_subdev *sd = NULL;
  33. int ret;
  34. /* TODO: add waiting until probe is finished */
  35. drv = driver_find(module_name, &platform_bus_type);
  36. if (!drv) {
  37. mxr_warn(mdev, "module %s is missing\n", module_name);
  38. return NULL;
  39. }
  40. /* driver refcnt is increased, it is safe to iterate over devices */
  41. ret = driver_for_each_device(drv, NULL, &sd, find_reg_callback);
  42. /* ret == 0 means that find_reg_callback was never executed */
  43. if (sd == NULL) {
  44. mxr_warn(mdev, "module %s provides no subdev!\n", module_name);
  45. goto done;
  46. }
  47. /* v4l2_device_register_subdev detects if sd is NULL */
  48. ret = v4l2_device_register_subdev(&mdev->v4l2_dev, sd);
  49. if (ret) {
  50. mxr_warn(mdev, "failed to register subdev %s\n", sd->name);
  51. sd = NULL;
  52. }
  53. done:
  54. return sd;
  55. }
  56. int __devinit mxr_acquire_video(struct mxr_device *mdev,
  57. struct mxr_output_conf *output_conf, int output_count)
  58. {
  59. struct device *dev = mdev->dev;
  60. struct v4l2_device *v4l2_dev = &mdev->v4l2_dev;
  61. int i;
  62. int ret = 0;
  63. struct v4l2_subdev *sd;
  64. strlcpy(v4l2_dev->name, dev_name(mdev->dev), sizeof(v4l2_dev->name));
  65. /* prepare context for V4L2 device */
  66. ret = v4l2_device_register(dev, v4l2_dev);
  67. if (ret) {
  68. mxr_err(mdev, "could not register v4l2 device.\n");
  69. goto fail;
  70. }
  71. mdev->alloc_ctx = vb2_dma_contig_init_ctx(mdev->dev);
  72. if (IS_ERR_OR_NULL(mdev->alloc_ctx)) {
  73. mxr_err(mdev, "could not acquire vb2 allocator\n");
  74. goto fail_v4l2_dev;
  75. }
  76. /* registering outputs */
  77. mdev->output_cnt = 0;
  78. for (i = 0; i < output_count; ++i) {
  79. struct mxr_output_conf *conf = &output_conf[i];
  80. struct mxr_output *out;
  81. sd = find_and_register_subdev(mdev, conf->module_name);
  82. /* trying to register next output */
  83. if (sd == NULL)
  84. continue;
  85. out = kzalloc(sizeof *out, GFP_KERNEL);
  86. if (out == NULL) {
  87. mxr_err(mdev, "no memory for '%s'\n",
  88. conf->output_name);
  89. ret = -ENOMEM;
  90. /* registered subdevs are removed in fail_v4l2_dev */
  91. goto fail_output;
  92. }
  93. strlcpy(out->name, conf->output_name, sizeof(out->name));
  94. out->sd = sd;
  95. out->cookie = conf->cookie;
  96. mdev->output[mdev->output_cnt++] = out;
  97. mxr_info(mdev, "added output '%s' from module '%s'\n",
  98. conf->output_name, conf->module_name);
  99. /* checking if maximal number of outputs is reached */
  100. if (mdev->output_cnt >= MXR_MAX_OUTPUTS)
  101. break;
  102. }
  103. if (mdev->output_cnt == 0) {
  104. mxr_err(mdev, "failed to register any output\n");
  105. ret = -ENODEV;
  106. /* skipping fail_output because there is nothing to free */
  107. goto fail_vb2_allocator;
  108. }
  109. return 0;
  110. fail_output:
  111. /* kfree is NULL-safe */
  112. for (i = 0; i < mdev->output_cnt; ++i)
  113. kfree(mdev->output[i]);
  114. memset(mdev->output, 0, sizeof mdev->output);
  115. fail_vb2_allocator:
  116. /* freeing allocator context */
  117. vb2_dma_contig_cleanup_ctx(mdev->alloc_ctx);
  118. fail_v4l2_dev:
  119. /* NOTE: automatically unregister all subdevs */
  120. v4l2_device_unregister(v4l2_dev);
  121. fail:
  122. return ret;
  123. }
  124. void mxr_release_video(struct mxr_device *mdev)
  125. {
  126. int i;
  127. /* kfree is NULL-safe */
  128. for (i = 0; i < mdev->output_cnt; ++i)
  129. kfree(mdev->output[i]);
  130. vb2_dma_contig_cleanup_ctx(mdev->alloc_ctx);
  131. v4l2_device_unregister(&mdev->v4l2_dev);
  132. }
  133. static int mxr_querycap(struct file *file, void *priv,
  134. struct v4l2_capability *cap)
  135. {
  136. struct mxr_layer *layer = video_drvdata(file);
  137. mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
  138. strlcpy(cap->driver, MXR_DRIVER_NAME, sizeof cap->driver);
  139. strlcpy(cap->card, layer->vfd.name, sizeof cap->card);
  140. sprintf(cap->bus_info, "%d", layer->idx);
  141. cap->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_OUTPUT_MPLANE;
  142. cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
  143. return 0;
  144. }
  145. static void mxr_geometry_dump(struct mxr_device *mdev, struct mxr_geometry *geo)
  146. {
  147. mxr_dbg(mdev, "src.full_size = (%u, %u)\n",
  148. geo->src.full_width, geo->src.full_height);
  149. mxr_dbg(mdev, "src.size = (%u, %u)\n",
  150. geo->src.width, geo->src.height);
  151. mxr_dbg(mdev, "src.offset = (%u, %u)\n",
  152. geo->src.x_offset, geo->src.y_offset);
  153. mxr_dbg(mdev, "dst.full_size = (%u, %u)\n",
  154. geo->dst.full_width, geo->dst.full_height);
  155. mxr_dbg(mdev, "dst.size = (%u, %u)\n",
  156. geo->dst.width, geo->dst.height);
  157. mxr_dbg(mdev, "dst.offset = (%u, %u)\n",
  158. geo->dst.x_offset, geo->dst.y_offset);
  159. mxr_dbg(mdev, "ratio = (%u, %u)\n",
  160. geo->x_ratio, geo->y_ratio);
  161. }
  162. static void mxr_layer_default_geo(struct mxr_layer *layer)
  163. {
  164. struct mxr_device *mdev = layer->mdev;
  165. struct v4l2_mbus_framefmt mbus_fmt;
  166. memset(&layer->geo, 0, sizeof layer->geo);
  167. mxr_get_mbus_fmt(mdev, &mbus_fmt);
  168. layer->geo.dst.full_width = mbus_fmt.width;
  169. layer->geo.dst.full_height = mbus_fmt.height;
  170. layer->geo.dst.width = layer->geo.dst.full_width;
  171. layer->geo.dst.height = layer->geo.dst.full_height;
  172. layer->geo.dst.field = mbus_fmt.field;
  173. layer->geo.src.full_width = mbus_fmt.width;
  174. layer->geo.src.full_height = mbus_fmt.height;
  175. layer->geo.src.width = layer->geo.src.full_width;
  176. layer->geo.src.height = layer->geo.src.full_height;
  177. mxr_geometry_dump(mdev, &layer->geo);
  178. layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
  179. mxr_geometry_dump(mdev, &layer->geo);
  180. }
  181. static void mxr_layer_update_output(struct mxr_layer *layer)
  182. {
  183. struct mxr_device *mdev = layer->mdev;
  184. struct v4l2_mbus_framefmt mbus_fmt;
  185. mxr_get_mbus_fmt(mdev, &mbus_fmt);
  186. /* checking if update is needed */
  187. if (layer->geo.dst.full_width == mbus_fmt.width &&
  188. layer->geo.dst.full_height == mbus_fmt.width)
  189. return;
  190. layer->geo.dst.full_width = mbus_fmt.width;
  191. layer->geo.dst.full_height = mbus_fmt.height;
  192. layer->geo.dst.field = mbus_fmt.field;
  193. layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
  194. mxr_geometry_dump(mdev, &layer->geo);
  195. }
  196. static const struct mxr_format *find_format_by_fourcc(
  197. struct mxr_layer *layer, unsigned long fourcc);
  198. static const struct mxr_format *find_format_by_index(
  199. struct mxr_layer *layer, unsigned long index);
  200. static int mxr_enum_fmt(struct file *file, void *priv,
  201. struct v4l2_fmtdesc *f)
  202. {
  203. struct mxr_layer *layer = video_drvdata(file);
  204. struct mxr_device *mdev = layer->mdev;
  205. const struct mxr_format *fmt;
  206. mxr_dbg(mdev, "%s\n", __func__);
  207. fmt = find_format_by_index(layer, f->index);
  208. if (fmt == NULL)
  209. return -EINVAL;
  210. strlcpy(f->description, fmt->name, sizeof(f->description));
  211. f->pixelformat = fmt->fourcc;
  212. return 0;
  213. }
  214. static unsigned int divup(unsigned int divident, unsigned int divisor)
  215. {
  216. return (divident + divisor - 1) / divisor;
  217. }
  218. unsigned long mxr_get_plane_size(const struct mxr_block *blk,
  219. unsigned int width, unsigned int height)
  220. {
  221. unsigned int bl_width = divup(width, blk->width);
  222. unsigned int bl_height = divup(height, blk->height);
  223. return bl_width * bl_height * blk->size;
  224. }
  225. static void mxr_mplane_fill(struct v4l2_plane_pix_format *planes,
  226. const struct mxr_format *fmt, u32 width, u32 height)
  227. {
  228. int i;
  229. /* checking if nothing to fill */
  230. if (!planes)
  231. return;
  232. memset(planes, 0, sizeof(*planes) * fmt->num_subframes);
  233. for (i = 0; i < fmt->num_planes; ++i) {
  234. struct v4l2_plane_pix_format *plane = planes
  235. + fmt->plane2subframe[i];
  236. const struct mxr_block *blk = &fmt->plane[i];
  237. u32 bl_width = divup(width, blk->width);
  238. u32 bl_height = divup(height, blk->height);
  239. u32 sizeimage = bl_width * bl_height * blk->size;
  240. u16 bytesperline = bl_width * blk->size / blk->height;
  241. plane->sizeimage += sizeimage;
  242. plane->bytesperline = max(plane->bytesperline, bytesperline);
  243. }
  244. }
  245. static int mxr_g_fmt(struct file *file, void *priv,
  246. struct v4l2_format *f)
  247. {
  248. struct mxr_layer *layer = video_drvdata(file);
  249. struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
  250. mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
  251. pix->width = layer->geo.src.full_width;
  252. pix->height = layer->geo.src.full_height;
  253. pix->field = V4L2_FIELD_NONE;
  254. pix->pixelformat = layer->fmt->fourcc;
  255. pix->colorspace = layer->fmt->colorspace;
  256. mxr_mplane_fill(pix->plane_fmt, layer->fmt, pix->width, pix->height);
  257. return 0;
  258. }
  259. static int mxr_s_fmt(struct file *file, void *priv,
  260. struct v4l2_format *f)
  261. {
  262. struct mxr_layer *layer = video_drvdata(file);
  263. const struct mxr_format *fmt;
  264. struct v4l2_pix_format_mplane *pix;
  265. struct mxr_device *mdev = layer->mdev;
  266. struct mxr_geometry *geo = &layer->geo;
  267. mxr_dbg(mdev, "%s:%d\n", __func__, __LINE__);
  268. pix = &f->fmt.pix_mp;
  269. fmt = find_format_by_fourcc(layer, pix->pixelformat);
  270. if (fmt == NULL) {
  271. mxr_warn(mdev, "not recognized fourcc: %08x\n",
  272. pix->pixelformat);
  273. return -EINVAL;
  274. }
  275. layer->fmt = fmt;
  276. /* set source size to highest accepted value */
  277. geo->src.full_width = max(geo->dst.full_width, pix->width);
  278. geo->src.full_height = max(geo->dst.full_height, pix->height);
  279. layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
  280. mxr_geometry_dump(mdev, &layer->geo);
  281. /* set cropping to total visible screen */
  282. geo->src.width = pix->width;
  283. geo->src.height = pix->height;
  284. geo->src.x_offset = 0;
  285. geo->src.y_offset = 0;
  286. /* assure consistency of geometry */
  287. layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
  288. mxr_geometry_dump(mdev, &layer->geo);
  289. /* set full size to lowest possible value */
  290. geo->src.full_width = 0;
  291. geo->src.full_height = 0;
  292. layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
  293. mxr_geometry_dump(mdev, &layer->geo);
  294. /* returning results */
  295. mxr_g_fmt(file, priv, f);
  296. return 0;
  297. }
  298. static int mxr_g_selection(struct file *file, void *fh,
  299. struct v4l2_selection *s)
  300. {
  301. struct mxr_layer *layer = video_drvdata(file);
  302. struct mxr_geometry *geo = &layer->geo;
  303. mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
  304. if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
  305. s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
  306. return -EINVAL;
  307. switch (s->target) {
  308. case V4L2_SEL_TGT_CROP:
  309. s->r.left = geo->src.x_offset;
  310. s->r.top = geo->src.y_offset;
  311. s->r.width = geo->src.width;
  312. s->r.height = geo->src.height;
  313. break;
  314. case V4L2_SEL_TGT_CROP_DEFAULT:
  315. case V4L2_SEL_TGT_CROP_BOUNDS:
  316. s->r.left = 0;
  317. s->r.top = 0;
  318. s->r.width = geo->src.full_width;
  319. s->r.height = geo->src.full_height;
  320. break;
  321. case V4L2_SEL_TGT_COMPOSE:
  322. case V4L2_SEL_TGT_COMPOSE_PADDED:
  323. s->r.left = geo->dst.x_offset;
  324. s->r.top = geo->dst.y_offset;
  325. s->r.width = geo->dst.width;
  326. s->r.height = geo->dst.height;
  327. break;
  328. case V4L2_SEL_TGT_COMPOSE_DEFAULT:
  329. case V4L2_SEL_TGT_COMPOSE_BOUNDS:
  330. s->r.left = 0;
  331. s->r.top = 0;
  332. s->r.width = geo->dst.full_width;
  333. s->r.height = geo->dst.full_height;
  334. break;
  335. default:
  336. return -EINVAL;
  337. }
  338. return 0;
  339. }
  340. /* returns 1 if rectangle 'a' is inside 'b' */
  341. static int mxr_is_rect_inside(struct v4l2_rect *a, struct v4l2_rect *b)
  342. {
  343. if (a->left < b->left)
  344. return 0;
  345. if (a->top < b->top)
  346. return 0;
  347. if (a->left + a->width > b->left + b->width)
  348. return 0;
  349. if (a->top + a->height > b->top + b->height)
  350. return 0;
  351. return 1;
  352. }
  353. static int mxr_s_selection(struct file *file, void *fh,
  354. struct v4l2_selection *s)
  355. {
  356. struct mxr_layer *layer = video_drvdata(file);
  357. struct mxr_geometry *geo = &layer->geo;
  358. struct mxr_crop *target = NULL;
  359. enum mxr_geometry_stage stage;
  360. struct mxr_geometry tmp;
  361. struct v4l2_rect res;
  362. memset(&res, 0, sizeof res);
  363. mxr_dbg(layer->mdev, "%s: rect: %dx%d@%d,%d\n", __func__,
  364. s->r.width, s->r.height, s->r.left, s->r.top);
  365. if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
  366. s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
  367. return -EINVAL;
  368. switch (s->target) {
  369. /* ignore read-only targets */
  370. case V4L2_SEL_TGT_CROP_DEFAULT:
  371. case V4L2_SEL_TGT_CROP_BOUNDS:
  372. res.width = geo->src.full_width;
  373. res.height = geo->src.full_height;
  374. break;
  375. /* ignore read-only targets */
  376. case V4L2_SEL_TGT_COMPOSE_DEFAULT:
  377. case V4L2_SEL_TGT_COMPOSE_BOUNDS:
  378. res.width = geo->dst.full_width;
  379. res.height = geo->dst.full_height;
  380. break;
  381. case V4L2_SEL_TGT_CROP:
  382. target = &geo->src;
  383. stage = MXR_GEOMETRY_CROP;
  384. break;
  385. case V4L2_SEL_TGT_COMPOSE:
  386. case V4L2_SEL_TGT_COMPOSE_PADDED:
  387. target = &geo->dst;
  388. stage = MXR_GEOMETRY_COMPOSE;
  389. break;
  390. default:
  391. return -EINVAL;
  392. }
  393. /* apply change and update geometry if needed */
  394. if (target) {
  395. /* backup current geometry if setup fails */
  396. memcpy(&tmp, geo, sizeof tmp);
  397. /* apply requested selection */
  398. target->x_offset = s->r.left;
  399. target->y_offset = s->r.top;
  400. target->width = s->r.width;
  401. target->height = s->r.height;
  402. layer->ops.fix_geometry(layer, stage, s->flags);
  403. /* retrieve update selection rectangle */
  404. res.left = target->x_offset;
  405. res.top = target->y_offset;
  406. res.width = target->width;
  407. res.height = target->height;
  408. mxr_geometry_dump(layer->mdev, &layer->geo);
  409. }
  410. /* checking if the rectangle satisfies constraints */
  411. if ((s->flags & V4L2_SEL_FLAG_LE) && !mxr_is_rect_inside(&res, &s->r))
  412. goto fail;
  413. if ((s->flags & V4L2_SEL_FLAG_GE) && !mxr_is_rect_inside(&s->r, &res))
  414. goto fail;
  415. /* return result rectangle */
  416. s->r = res;
  417. return 0;
  418. fail:
  419. /* restore old geometry, which is not touched if target is NULL */
  420. if (target)
  421. memcpy(geo, &tmp, sizeof tmp);
  422. return -ERANGE;
  423. }
  424. static int mxr_enum_dv_presets(struct file *file, void *fh,
  425. struct v4l2_dv_enum_preset *preset)
  426. {
  427. struct mxr_layer *layer = video_drvdata(file);
  428. struct mxr_device *mdev = layer->mdev;
  429. int ret;
  430. /* lock protects from changing sd_out */
  431. mutex_lock(&mdev->mutex);
  432. ret = v4l2_subdev_call(to_outsd(mdev), video, enum_dv_presets, preset);
  433. mutex_unlock(&mdev->mutex);
  434. return ret ? -EINVAL : 0;
  435. }
  436. static int mxr_s_dv_preset(struct file *file, void *fh,
  437. struct v4l2_dv_preset *preset)
  438. {
  439. struct mxr_layer *layer = video_drvdata(file);
  440. struct mxr_device *mdev = layer->mdev;
  441. int ret;
  442. /* lock protects from changing sd_out */
  443. mutex_lock(&mdev->mutex);
  444. /* preset change cannot be done while there is an entity
  445. * dependant on output configuration
  446. */
  447. if (mdev->n_output > 0) {
  448. mutex_unlock(&mdev->mutex);
  449. return -EBUSY;
  450. }
  451. ret = v4l2_subdev_call(to_outsd(mdev), video, s_dv_preset, preset);
  452. mutex_unlock(&mdev->mutex);
  453. mxr_layer_update_output(layer);
  454. /* any failure should return EINVAL according to V4L2 doc */
  455. return ret ? -EINVAL : 0;
  456. }
  457. static int mxr_g_dv_preset(struct file *file, void *fh,
  458. struct v4l2_dv_preset *preset)
  459. {
  460. struct mxr_layer *layer = video_drvdata(file);
  461. struct mxr_device *mdev = layer->mdev;
  462. int ret;
  463. /* lock protects from changing sd_out */
  464. mutex_lock(&mdev->mutex);
  465. ret = v4l2_subdev_call(to_outsd(mdev), video, g_dv_preset, preset);
  466. mutex_unlock(&mdev->mutex);
  467. return ret ? -EINVAL : 0;
  468. }
  469. static int mxr_s_std(struct file *file, void *fh, v4l2_std_id *norm)
  470. {
  471. struct mxr_layer *layer = video_drvdata(file);
  472. struct mxr_device *mdev = layer->mdev;
  473. int ret;
  474. /* lock protects from changing sd_out */
  475. mutex_lock(&mdev->mutex);
  476. /* standard change cannot be done while there is an entity
  477. * dependant on output configuration
  478. */
  479. if (mdev->n_output > 0) {
  480. mutex_unlock(&mdev->mutex);
  481. return -EBUSY;
  482. }
  483. ret = v4l2_subdev_call(to_outsd(mdev), video, s_std_output, *norm);
  484. mutex_unlock(&mdev->mutex);
  485. mxr_layer_update_output(layer);
  486. return ret ? -EINVAL : 0;
  487. }
  488. static int mxr_g_std(struct file *file, void *fh, v4l2_std_id *norm)
  489. {
  490. struct mxr_layer *layer = video_drvdata(file);
  491. struct mxr_device *mdev = layer->mdev;
  492. int ret;
  493. /* lock protects from changing sd_out */
  494. mutex_lock(&mdev->mutex);
  495. ret = v4l2_subdev_call(to_outsd(mdev), video, g_std_output, norm);
  496. mutex_unlock(&mdev->mutex);
  497. return ret ? -EINVAL : 0;
  498. }
  499. static int mxr_enum_output(struct file *file, void *fh, struct v4l2_output *a)
  500. {
  501. struct mxr_layer *layer = video_drvdata(file);
  502. struct mxr_device *mdev = layer->mdev;
  503. struct mxr_output *out;
  504. struct v4l2_subdev *sd;
  505. if (a->index >= mdev->output_cnt)
  506. return -EINVAL;
  507. out = mdev->output[a->index];
  508. BUG_ON(out == NULL);
  509. sd = out->sd;
  510. strlcpy(a->name, out->name, sizeof(a->name));
  511. /* try to obtain supported tv norms */
  512. v4l2_subdev_call(sd, video, g_tvnorms_output, &a->std);
  513. a->capabilities = 0;
  514. if (sd->ops->video && sd->ops->video->s_dv_preset)
  515. a->capabilities |= V4L2_OUT_CAP_PRESETS;
  516. if (sd->ops->video && sd->ops->video->s_std_output)
  517. a->capabilities |= V4L2_OUT_CAP_STD;
  518. a->type = V4L2_OUTPUT_TYPE_ANALOG;
  519. return 0;
  520. }
  521. static int mxr_s_output(struct file *file, void *fh, unsigned int i)
  522. {
  523. struct video_device *vfd = video_devdata(file);
  524. struct mxr_layer *layer = video_drvdata(file);
  525. struct mxr_device *mdev = layer->mdev;
  526. if (i >= mdev->output_cnt || mdev->output[i] == NULL)
  527. return -EINVAL;
  528. mutex_lock(&mdev->mutex);
  529. if (mdev->n_output > 0) {
  530. mutex_unlock(&mdev->mutex);
  531. return -EBUSY;
  532. }
  533. mdev->current_output = i;
  534. vfd->tvnorms = 0;
  535. v4l2_subdev_call(to_outsd(mdev), video, g_tvnorms_output,
  536. &vfd->tvnorms);
  537. mutex_unlock(&mdev->mutex);
  538. /* update layers geometry */
  539. mxr_layer_update_output(layer);
  540. mxr_dbg(mdev, "tvnorms = %08llx\n", vfd->tvnorms);
  541. return 0;
  542. }
  543. static int mxr_g_output(struct file *file, void *fh, unsigned int *p)
  544. {
  545. struct mxr_layer *layer = video_drvdata(file);
  546. struct mxr_device *mdev = layer->mdev;
  547. mutex_lock(&mdev->mutex);
  548. *p = mdev->current_output;
  549. mutex_unlock(&mdev->mutex);
  550. return 0;
  551. }
  552. static int mxr_reqbufs(struct file *file, void *priv,
  553. struct v4l2_requestbuffers *p)
  554. {
  555. struct mxr_layer *layer = video_drvdata(file);
  556. mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
  557. return vb2_reqbufs(&layer->vb_queue, p);
  558. }
  559. static int mxr_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
  560. {
  561. struct mxr_layer *layer = video_drvdata(file);
  562. mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
  563. return vb2_querybuf(&layer->vb_queue, p);
  564. }
  565. static int mxr_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
  566. {
  567. struct mxr_layer *layer = video_drvdata(file);
  568. mxr_dbg(layer->mdev, "%s:%d(%d)\n", __func__, __LINE__, p->index);
  569. return vb2_qbuf(&layer->vb_queue, p);
  570. }
  571. static int mxr_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
  572. {
  573. struct mxr_layer *layer = video_drvdata(file);
  574. mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
  575. return vb2_dqbuf(&layer->vb_queue, p, file->f_flags & O_NONBLOCK);
  576. }
  577. static int mxr_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
  578. {
  579. struct mxr_layer *layer = video_drvdata(file);
  580. mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
  581. return vb2_streamon(&layer->vb_queue, i);
  582. }
  583. static int mxr_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
  584. {
  585. struct mxr_layer *layer = video_drvdata(file);
  586. mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
  587. return vb2_streamoff(&layer->vb_queue, i);
  588. }
  589. static const struct v4l2_ioctl_ops mxr_ioctl_ops = {
  590. .vidioc_querycap = mxr_querycap,
  591. /* format handling */
  592. .vidioc_enum_fmt_vid_out_mplane = mxr_enum_fmt,
  593. .vidioc_s_fmt_vid_out_mplane = mxr_s_fmt,
  594. .vidioc_g_fmt_vid_out_mplane = mxr_g_fmt,
  595. /* buffer control */
  596. .vidioc_reqbufs = mxr_reqbufs,
  597. .vidioc_querybuf = mxr_querybuf,
  598. .vidioc_qbuf = mxr_qbuf,
  599. .vidioc_dqbuf = mxr_dqbuf,
  600. /* Streaming control */
  601. .vidioc_streamon = mxr_streamon,
  602. .vidioc_streamoff = mxr_streamoff,
  603. /* Preset functions */
  604. .vidioc_enum_dv_presets = mxr_enum_dv_presets,
  605. .vidioc_s_dv_preset = mxr_s_dv_preset,
  606. .vidioc_g_dv_preset = mxr_g_dv_preset,
  607. /* analog TV standard functions */
  608. .vidioc_s_std = mxr_s_std,
  609. .vidioc_g_std = mxr_g_std,
  610. /* Output handling */
  611. .vidioc_enum_output = mxr_enum_output,
  612. .vidioc_s_output = mxr_s_output,
  613. .vidioc_g_output = mxr_g_output,
  614. /* selection ioctls */
  615. .vidioc_g_selection = mxr_g_selection,
  616. .vidioc_s_selection = mxr_s_selection,
  617. };
  618. static int mxr_video_open(struct file *file)
  619. {
  620. struct mxr_layer *layer = video_drvdata(file);
  621. struct mxr_device *mdev = layer->mdev;
  622. int ret = 0;
  623. mxr_dbg(mdev, "%s:%d\n", __func__, __LINE__);
  624. if (mutex_lock_interruptible(&layer->mutex))
  625. return -ERESTARTSYS;
  626. /* assure device probe is finished */
  627. wait_for_device_probe();
  628. /* creating context for file descriptor */
  629. ret = v4l2_fh_open(file);
  630. if (ret) {
  631. mxr_err(mdev, "v4l2_fh_open failed\n");
  632. goto unlock;
  633. }
  634. /* leaving if layer is already initialized */
  635. if (!v4l2_fh_is_singular_file(file))
  636. goto unlock;
  637. /* FIXME: should power be enabled on open? */
  638. ret = mxr_power_get(mdev);
  639. if (ret) {
  640. mxr_err(mdev, "power on failed\n");
  641. goto fail_fh_open;
  642. }
  643. ret = vb2_queue_init(&layer->vb_queue);
  644. if (ret != 0) {
  645. mxr_err(mdev, "failed to initialize vb2 queue\n");
  646. goto fail_power;
  647. }
  648. /* set default format, first on the list */
  649. layer->fmt = layer->fmt_array[0];
  650. /* setup default geometry */
  651. mxr_layer_default_geo(layer);
  652. mutex_unlock(&layer->mutex);
  653. return 0;
  654. fail_power:
  655. mxr_power_put(mdev);
  656. fail_fh_open:
  657. v4l2_fh_release(file);
  658. unlock:
  659. mutex_unlock(&layer->mutex);
  660. return ret;
  661. }
  662. static unsigned int
  663. mxr_video_poll(struct file *file, struct poll_table_struct *wait)
  664. {
  665. struct mxr_layer *layer = video_drvdata(file);
  666. unsigned int res;
  667. mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
  668. mutex_lock(&layer->mutex);
  669. res = vb2_poll(&layer->vb_queue, file, wait);
  670. mutex_unlock(&layer->mutex);
  671. return res;
  672. }
  673. static int mxr_video_mmap(struct file *file, struct vm_area_struct *vma)
  674. {
  675. struct mxr_layer *layer = video_drvdata(file);
  676. int ret;
  677. mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
  678. if (mutex_lock_interruptible(&layer->mutex))
  679. return -ERESTARTSYS;
  680. ret = vb2_mmap(&layer->vb_queue, vma);
  681. mutex_unlock(&layer->mutex);
  682. return ret;
  683. }
  684. static int mxr_video_release(struct file *file)
  685. {
  686. struct mxr_layer *layer = video_drvdata(file);
  687. mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
  688. mutex_lock(&layer->mutex);
  689. if (v4l2_fh_is_singular_file(file)) {
  690. vb2_queue_release(&layer->vb_queue);
  691. mxr_power_put(layer->mdev);
  692. }
  693. v4l2_fh_release(file);
  694. mutex_unlock(&layer->mutex);
  695. return 0;
  696. }
  697. static const struct v4l2_file_operations mxr_fops = {
  698. .owner = THIS_MODULE,
  699. .open = mxr_video_open,
  700. .poll = mxr_video_poll,
  701. .mmap = mxr_video_mmap,
  702. .release = mxr_video_release,
  703. .unlocked_ioctl = video_ioctl2,
  704. };
  705. static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *pfmt,
  706. unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[],
  707. void *alloc_ctxs[])
  708. {
  709. struct mxr_layer *layer = vb2_get_drv_priv(vq);
  710. const struct mxr_format *fmt = layer->fmt;
  711. int i;
  712. struct mxr_device *mdev = layer->mdev;
  713. struct v4l2_plane_pix_format planes[3];
  714. mxr_dbg(mdev, "%s\n", __func__);
  715. /* checking if format was configured */
  716. if (fmt == NULL)
  717. return -EINVAL;
  718. mxr_dbg(mdev, "fmt = %s\n", fmt->name);
  719. mxr_mplane_fill(planes, fmt, layer->geo.src.full_width,
  720. layer->geo.src.full_height);
  721. *nplanes = fmt->num_subframes;
  722. for (i = 0; i < fmt->num_subframes; ++i) {
  723. alloc_ctxs[i] = layer->mdev->alloc_ctx;
  724. sizes[i] = planes[i].sizeimage;
  725. mxr_dbg(mdev, "size[%d] = %08x\n", i, sizes[i]);
  726. }
  727. if (*nbuffers == 0)
  728. *nbuffers = 1;
  729. return 0;
  730. }
  731. static void buf_queue(struct vb2_buffer *vb)
  732. {
  733. struct mxr_buffer *buffer = container_of(vb, struct mxr_buffer, vb);
  734. struct mxr_layer *layer = vb2_get_drv_priv(vb->vb2_queue);
  735. struct mxr_device *mdev = layer->mdev;
  736. unsigned long flags;
  737. spin_lock_irqsave(&layer->enq_slock, flags);
  738. list_add_tail(&buffer->list, &layer->enq_list);
  739. spin_unlock_irqrestore(&layer->enq_slock, flags);
  740. mxr_dbg(mdev, "queuing buffer\n");
  741. }
  742. static void wait_lock(struct vb2_queue *vq)
  743. {
  744. struct mxr_layer *layer = vb2_get_drv_priv(vq);
  745. mxr_dbg(layer->mdev, "%s\n", __func__);
  746. mutex_lock(&layer->mutex);
  747. }
  748. static void wait_unlock(struct vb2_queue *vq)
  749. {
  750. struct mxr_layer *layer = vb2_get_drv_priv(vq);
  751. mxr_dbg(layer->mdev, "%s\n", __func__);
  752. mutex_unlock(&layer->mutex);
  753. }
  754. static int start_streaming(struct vb2_queue *vq, unsigned int count)
  755. {
  756. struct mxr_layer *layer = vb2_get_drv_priv(vq);
  757. struct mxr_device *mdev = layer->mdev;
  758. unsigned long flags;
  759. mxr_dbg(mdev, "%s\n", __func__);
  760. if (count == 0) {
  761. mxr_dbg(mdev, "no output buffers queued\n");
  762. return -EINVAL;
  763. }
  764. /* block any changes in output configuration */
  765. mxr_output_get(mdev);
  766. mxr_layer_update_output(layer);
  767. layer->ops.format_set(layer);
  768. /* enabling layer in hardware */
  769. spin_lock_irqsave(&layer->enq_slock, flags);
  770. layer->state = MXR_LAYER_STREAMING;
  771. spin_unlock_irqrestore(&layer->enq_slock, flags);
  772. layer->ops.stream_set(layer, MXR_ENABLE);
  773. mxr_streamer_get(mdev);
  774. return 0;
  775. }
  776. static void mxr_watchdog(unsigned long arg)
  777. {
  778. struct mxr_layer *layer = (struct mxr_layer *) arg;
  779. struct mxr_device *mdev = layer->mdev;
  780. unsigned long flags;
  781. mxr_err(mdev, "watchdog fired for layer %s\n", layer->vfd.name);
  782. spin_lock_irqsave(&layer->enq_slock, flags);
  783. if (layer->update_buf == layer->shadow_buf)
  784. layer->update_buf = NULL;
  785. if (layer->update_buf) {
  786. vb2_buffer_done(&layer->update_buf->vb, VB2_BUF_STATE_ERROR);
  787. layer->update_buf = NULL;
  788. }
  789. if (layer->shadow_buf) {
  790. vb2_buffer_done(&layer->shadow_buf->vb, VB2_BUF_STATE_ERROR);
  791. layer->shadow_buf = NULL;
  792. }
  793. spin_unlock_irqrestore(&layer->enq_slock, flags);
  794. }
  795. static int stop_streaming(struct vb2_queue *vq)
  796. {
  797. struct mxr_layer *layer = vb2_get_drv_priv(vq);
  798. struct mxr_device *mdev = layer->mdev;
  799. unsigned long flags;
  800. struct timer_list watchdog;
  801. struct mxr_buffer *buf, *buf_tmp;
  802. mxr_dbg(mdev, "%s\n", __func__);
  803. spin_lock_irqsave(&layer->enq_slock, flags);
  804. /* reset list */
  805. layer->state = MXR_LAYER_STREAMING_FINISH;
  806. /* set all buffer to be done */
  807. list_for_each_entry_safe(buf, buf_tmp, &layer->enq_list, list) {
  808. list_del(&buf->list);
  809. vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
  810. }
  811. spin_unlock_irqrestore(&layer->enq_slock, flags);
  812. /* give 1 seconds to complete to complete last buffers */
  813. setup_timer_on_stack(&watchdog, mxr_watchdog,
  814. (unsigned long)layer);
  815. mod_timer(&watchdog, jiffies + msecs_to_jiffies(1000));
  816. /* wait until all buffers are goes to done state */
  817. vb2_wait_for_all_buffers(vq);
  818. /* stop timer if all synchronization is done */
  819. del_timer_sync(&watchdog);
  820. destroy_timer_on_stack(&watchdog);
  821. /* stopping hardware */
  822. spin_lock_irqsave(&layer->enq_slock, flags);
  823. layer->state = MXR_LAYER_IDLE;
  824. spin_unlock_irqrestore(&layer->enq_slock, flags);
  825. /* disabling layer in hardware */
  826. layer->ops.stream_set(layer, MXR_DISABLE);
  827. /* remove one streamer */
  828. mxr_streamer_put(mdev);
  829. /* allow changes in output configuration */
  830. mxr_output_put(mdev);
  831. return 0;
  832. }
  833. static struct vb2_ops mxr_video_qops = {
  834. .queue_setup = queue_setup,
  835. .buf_queue = buf_queue,
  836. .wait_prepare = wait_unlock,
  837. .wait_finish = wait_lock,
  838. .start_streaming = start_streaming,
  839. .stop_streaming = stop_streaming,
  840. };
  841. /* FIXME: try to put this functions to mxr_base_layer_create */
  842. int mxr_base_layer_register(struct mxr_layer *layer)
  843. {
  844. struct mxr_device *mdev = layer->mdev;
  845. int ret;
  846. ret = video_register_device(&layer->vfd, VFL_TYPE_GRABBER, -1);
  847. if (ret)
  848. mxr_err(mdev, "failed to register video device\n");
  849. else
  850. mxr_info(mdev, "registered layer %s as /dev/video%d\n",
  851. layer->vfd.name, layer->vfd.num);
  852. return ret;
  853. }
  854. void mxr_base_layer_unregister(struct mxr_layer *layer)
  855. {
  856. video_unregister_device(&layer->vfd);
  857. }
  858. void mxr_layer_release(struct mxr_layer *layer)
  859. {
  860. if (layer->ops.release)
  861. layer->ops.release(layer);
  862. }
  863. void mxr_base_layer_release(struct mxr_layer *layer)
  864. {
  865. kfree(layer);
  866. }
  867. static void mxr_vfd_release(struct video_device *vdev)
  868. {
  869. pr_info("video device release\n");
  870. }
  871. struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
  872. int idx, char *name, struct mxr_layer_ops *ops)
  873. {
  874. struct mxr_layer *layer;
  875. layer = kzalloc(sizeof *layer, GFP_KERNEL);
  876. if (layer == NULL) {
  877. mxr_err(mdev, "not enough memory for layer.\n");
  878. goto fail;
  879. }
  880. layer->mdev = mdev;
  881. layer->idx = idx;
  882. layer->ops = *ops;
  883. spin_lock_init(&layer->enq_slock);
  884. INIT_LIST_HEAD(&layer->enq_list);
  885. mutex_init(&layer->mutex);
  886. layer->vfd = (struct video_device) {
  887. .minor = -1,
  888. .release = mxr_vfd_release,
  889. .fops = &mxr_fops,
  890. .vfl_dir = VFL_DIR_TX,
  891. .ioctl_ops = &mxr_ioctl_ops,
  892. };
  893. strlcpy(layer->vfd.name, name, sizeof(layer->vfd.name));
  894. /* let framework control PRIORITY */
  895. set_bit(V4L2_FL_USE_FH_PRIO, &layer->vfd.flags);
  896. video_set_drvdata(&layer->vfd, layer);
  897. layer->vfd.lock = &layer->mutex;
  898. layer->vfd.v4l2_dev = &mdev->v4l2_dev;
  899. layer->vb_queue = (struct vb2_queue) {
  900. .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
  901. .io_modes = VB2_MMAP | VB2_USERPTR,
  902. .drv_priv = layer,
  903. .buf_struct_size = sizeof(struct mxr_buffer),
  904. .ops = &mxr_video_qops,
  905. .mem_ops = &vb2_dma_contig_memops,
  906. };
  907. return layer;
  908. fail:
  909. return NULL;
  910. }
  911. static const struct mxr_format *find_format_by_fourcc(
  912. struct mxr_layer *layer, unsigned long fourcc)
  913. {
  914. int i;
  915. for (i = 0; i < layer->fmt_array_size; ++i)
  916. if (layer->fmt_array[i]->fourcc == fourcc)
  917. return layer->fmt_array[i];
  918. return NULL;
  919. }
  920. static const struct mxr_format *find_format_by_index(
  921. struct mxr_layer *layer, unsigned long index)
  922. {
  923. if (index >= layer->fmt_array_size)
  924. return NULL;
  925. return layer->fmt_array[index];
  926. }