mixer_video.c 26 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006
  1. /*
  2. * Samsung TV Mixer driver
  3. *
  4. * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
  5. *
  6. * Tomasz Stanislawski, <t.stanislaws@samsung.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published
  10. * by the Free Software Foundation. either version 2 of the License,
  11. * or (at your option) any later version
  12. */
  13. #include "mixer.h"
  14. #include <media/v4l2-ioctl.h>
  15. #include <linux/videodev2.h>
  16. #include <linux/mm.h>
  17. #include <linux/version.h>
  18. #include <linux/timer.h>
  19. #include <media/videobuf2-dma-contig.h>
  20. static int find_reg_callback(struct device *dev, void *p)
  21. {
  22. struct v4l2_subdev **sd = p;
  23. *sd = dev_get_drvdata(dev);
  24. /* non-zero value stops iteration */
  25. return 1;
  26. }
  27. static struct v4l2_subdev *find_and_register_subdev(
  28. struct mxr_device *mdev, char *module_name)
  29. {
  30. struct device_driver *drv;
  31. struct v4l2_subdev *sd = NULL;
  32. int ret;
  33. /* TODO: add waiting until probe is finished */
  34. drv = driver_find(module_name, &platform_bus_type);
  35. if (!drv) {
  36. mxr_warn(mdev, "module %s is missing\n", module_name);
  37. return NULL;
  38. }
  39. /* driver refcnt is increased, it is safe to iterate over devices */
  40. ret = driver_for_each_device(drv, NULL, &sd, find_reg_callback);
  41. /* ret == 0 means that find_reg_callback was never executed */
  42. if (sd == NULL) {
  43. mxr_warn(mdev, "module %s provides no subdev!\n", module_name);
  44. goto done;
  45. }
  46. /* v4l2_device_register_subdev detects if sd is NULL */
  47. ret = v4l2_device_register_subdev(&mdev->v4l2_dev, sd);
  48. if (ret) {
  49. mxr_warn(mdev, "failed to register subdev %s\n", sd->name);
  50. sd = NULL;
  51. }
  52. done:
  53. put_driver(drv);
  54. return sd;
  55. }
  56. int __devinit mxr_acquire_video(struct mxr_device *mdev,
  57. struct mxr_output_conf *output_conf, int output_count)
  58. {
  59. struct device *dev = mdev->dev;
  60. struct v4l2_device *v4l2_dev = &mdev->v4l2_dev;
  61. int i;
  62. int ret = 0;
  63. struct v4l2_subdev *sd;
  64. strlcpy(v4l2_dev->name, dev_name(mdev->dev), sizeof(v4l2_dev->name));
  65. /* prepare context for V4L2 device */
  66. ret = v4l2_device_register(dev, v4l2_dev);
  67. if (ret) {
  68. mxr_err(mdev, "could not register v4l2 device.\n");
  69. goto fail;
  70. }
  71. mdev->alloc_ctx = vb2_dma_contig_init_ctx(mdev->dev);
  72. if (IS_ERR_OR_NULL(mdev->alloc_ctx)) {
  73. mxr_err(mdev, "could not acquire vb2 allocator\n");
  74. goto fail_v4l2_dev;
  75. }
  76. /* registering outputs */
  77. mdev->output_cnt = 0;
  78. for (i = 0; i < output_count; ++i) {
  79. struct mxr_output_conf *conf = &output_conf[i];
  80. struct mxr_output *out;
  81. sd = find_and_register_subdev(mdev, conf->module_name);
  82. /* trying to register next output */
  83. if (sd == NULL)
  84. continue;
  85. out = kzalloc(sizeof *out, GFP_KERNEL);
  86. if (out == NULL) {
  87. mxr_err(mdev, "no memory for '%s'\n",
  88. conf->output_name);
  89. ret = -ENOMEM;
  90. /* registered subdevs are removed in fail_v4l2_dev */
  91. goto fail_output;
  92. }
  93. strlcpy(out->name, conf->output_name, sizeof(out->name));
  94. out->sd = sd;
  95. out->cookie = conf->cookie;
  96. mdev->output[mdev->output_cnt++] = out;
  97. mxr_info(mdev, "added output '%s' from module '%s'\n",
  98. conf->output_name, conf->module_name);
  99. /* checking if maximal number of outputs is reached */
  100. if (mdev->output_cnt >= MXR_MAX_OUTPUTS)
  101. break;
  102. }
  103. if (mdev->output_cnt == 0) {
  104. mxr_err(mdev, "failed to register any output\n");
  105. ret = -ENODEV;
  106. /* skipping fail_output because there is nothing to free */
  107. goto fail_vb2_allocator;
  108. }
  109. return 0;
  110. fail_output:
  111. /* kfree is NULL-safe */
  112. for (i = 0; i < mdev->output_cnt; ++i)
  113. kfree(mdev->output[i]);
  114. memset(mdev->output, 0, sizeof mdev->output);
  115. fail_vb2_allocator:
  116. /* freeing allocator context */
  117. vb2_dma_contig_cleanup_ctx(mdev->alloc_ctx);
  118. fail_v4l2_dev:
  119. /* NOTE: automatically unregister all subdevs */
  120. v4l2_device_unregister(v4l2_dev);
  121. fail:
  122. return ret;
  123. }
  124. void __devexit mxr_release_video(struct mxr_device *mdev)
  125. {
  126. int i;
  127. /* kfree is NULL-safe */
  128. for (i = 0; i < mdev->output_cnt; ++i)
  129. kfree(mdev->output[i]);
  130. vb2_dma_contig_cleanup_ctx(mdev->alloc_ctx);
  131. v4l2_device_unregister(&mdev->v4l2_dev);
  132. }
  133. static int mxr_querycap(struct file *file, void *priv,
  134. struct v4l2_capability *cap)
  135. {
  136. struct mxr_layer *layer = video_drvdata(file);
  137. mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
  138. strlcpy(cap->driver, MXR_DRIVER_NAME, sizeof cap->driver);
  139. strlcpy(cap->card, layer->vfd.name, sizeof cap->card);
  140. sprintf(cap->bus_info, "%d", layer->idx);
  141. cap->version = KERNEL_VERSION(0, 1, 0);
  142. cap->capabilities = V4L2_CAP_STREAMING |
  143. V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_VIDEO_OUTPUT_MPLANE;
  144. return 0;
  145. }
  146. /* Geometry handling */
  147. static void mxr_layer_geo_fix(struct mxr_layer *layer)
  148. {
  149. struct mxr_device *mdev = layer->mdev;
  150. struct v4l2_mbus_framefmt mbus_fmt;
  151. /* TODO: add some dirty flag to avoid unnecessary adjustments */
  152. mxr_get_mbus_fmt(mdev, &mbus_fmt);
  153. layer->geo.dst.full_width = mbus_fmt.width;
  154. layer->geo.dst.full_height = mbus_fmt.height;
  155. layer->geo.dst.field = mbus_fmt.field;
  156. layer->ops.fix_geometry(layer);
  157. }
  158. static void mxr_layer_default_geo(struct mxr_layer *layer)
  159. {
  160. struct mxr_device *mdev = layer->mdev;
  161. struct v4l2_mbus_framefmt mbus_fmt;
  162. memset(&layer->geo, 0, sizeof layer->geo);
  163. mxr_get_mbus_fmt(mdev, &mbus_fmt);
  164. layer->geo.dst.full_width = mbus_fmt.width;
  165. layer->geo.dst.full_height = mbus_fmt.height;
  166. layer->geo.dst.width = layer->geo.dst.full_width;
  167. layer->geo.dst.height = layer->geo.dst.full_height;
  168. layer->geo.dst.field = mbus_fmt.field;
  169. layer->geo.src.full_width = mbus_fmt.width;
  170. layer->geo.src.full_height = mbus_fmt.height;
  171. layer->geo.src.width = layer->geo.src.full_width;
  172. layer->geo.src.height = layer->geo.src.full_height;
  173. layer->ops.fix_geometry(layer);
  174. }
  175. static void mxr_geometry_dump(struct mxr_device *mdev, struct mxr_geometry *geo)
  176. {
  177. mxr_dbg(mdev, "src.full_size = (%u, %u)\n",
  178. geo->src.full_width, geo->src.full_height);
  179. mxr_dbg(mdev, "src.size = (%u, %u)\n",
  180. geo->src.width, geo->src.height);
  181. mxr_dbg(mdev, "src.offset = (%u, %u)\n",
  182. geo->src.x_offset, geo->src.y_offset);
  183. mxr_dbg(mdev, "dst.full_size = (%u, %u)\n",
  184. geo->dst.full_width, geo->dst.full_height);
  185. mxr_dbg(mdev, "dst.size = (%u, %u)\n",
  186. geo->dst.width, geo->dst.height);
  187. mxr_dbg(mdev, "dst.offset = (%u, %u)\n",
  188. geo->dst.x_offset, geo->dst.y_offset);
  189. mxr_dbg(mdev, "ratio = (%u, %u)\n",
  190. geo->x_ratio, geo->y_ratio);
  191. }
  192. static const struct mxr_format *find_format_by_fourcc(
  193. struct mxr_layer *layer, unsigned long fourcc);
  194. static const struct mxr_format *find_format_by_index(
  195. struct mxr_layer *layer, unsigned long index);
  196. static int mxr_enum_fmt(struct file *file, void *priv,
  197. struct v4l2_fmtdesc *f)
  198. {
  199. struct mxr_layer *layer = video_drvdata(file);
  200. struct mxr_device *mdev = layer->mdev;
  201. const struct mxr_format *fmt;
  202. mxr_dbg(mdev, "%s\n", __func__);
  203. fmt = find_format_by_index(layer, f->index);
  204. if (fmt == NULL)
  205. return -EINVAL;
  206. strlcpy(f->description, fmt->name, sizeof(f->description));
  207. f->pixelformat = fmt->fourcc;
  208. return 0;
  209. }
  210. static int mxr_s_fmt(struct file *file, void *priv,
  211. struct v4l2_format *f)
  212. {
  213. struct mxr_layer *layer = video_drvdata(file);
  214. const struct mxr_format *fmt;
  215. struct v4l2_pix_format_mplane *pix;
  216. struct mxr_device *mdev = layer->mdev;
  217. struct mxr_geometry *geo = &layer->geo;
  218. mxr_dbg(mdev, "%s:%d\n", __func__, __LINE__);
  219. pix = &f->fmt.pix_mp;
  220. fmt = find_format_by_fourcc(layer, pix->pixelformat);
  221. if (fmt == NULL) {
  222. mxr_warn(mdev, "not recognized fourcc: %08x\n",
  223. pix->pixelformat);
  224. return -EINVAL;
  225. }
  226. layer->fmt = fmt;
  227. geo->src.full_width = pix->width;
  228. geo->src.width = pix->width;
  229. geo->src.full_height = pix->height;
  230. geo->src.height = pix->height;
  231. /* assure consistency of geometry */
  232. mxr_layer_geo_fix(layer);
  233. mxr_dbg(mdev, "width=%u height=%u span=%u\n",
  234. geo->src.width, geo->src.height, geo->src.full_width);
  235. return 0;
  236. }
  237. static unsigned int divup(unsigned int divident, unsigned int divisor)
  238. {
  239. return (divident + divisor - 1) / divisor;
  240. }
  241. unsigned long mxr_get_plane_size(const struct mxr_block *blk,
  242. unsigned int width, unsigned int height)
  243. {
  244. unsigned int bl_width = divup(width, blk->width);
  245. unsigned int bl_height = divup(height, blk->height);
  246. return bl_width * bl_height * blk->size;
  247. }
  248. static void mxr_mplane_fill(struct v4l2_plane_pix_format *planes,
  249. const struct mxr_format *fmt, u32 width, u32 height)
  250. {
  251. int i;
  252. memset(planes, 0, sizeof(*planes) * fmt->num_subframes);
  253. for (i = 0; i < fmt->num_planes; ++i) {
  254. struct v4l2_plane_pix_format *plane = planes
  255. + fmt->plane2subframe[i];
  256. const struct mxr_block *blk = &fmt->plane[i];
  257. u32 bl_width = divup(width, blk->width);
  258. u32 bl_height = divup(height, blk->height);
  259. u32 sizeimage = bl_width * bl_height * blk->size;
  260. u16 bytesperline = bl_width * blk->size / blk->height;
  261. plane->sizeimage += sizeimage;
  262. plane->bytesperline = max(plane->bytesperline, bytesperline);
  263. }
  264. }
  265. static int mxr_g_fmt(struct file *file, void *priv,
  266. struct v4l2_format *f)
  267. {
  268. struct mxr_layer *layer = video_drvdata(file);
  269. struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
  270. mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
  271. pix->width = layer->geo.src.full_width;
  272. pix->height = layer->geo.src.full_height;
  273. pix->field = V4L2_FIELD_NONE;
  274. pix->pixelformat = layer->fmt->fourcc;
  275. pix->colorspace = layer->fmt->colorspace;
  276. mxr_mplane_fill(pix->plane_fmt, layer->fmt, pix->width, pix->height);
  277. return 0;
  278. }
  279. static inline struct mxr_crop *choose_crop_by_type(struct mxr_geometry *geo,
  280. enum v4l2_buf_type type)
  281. {
  282. switch (type) {
  283. case V4L2_BUF_TYPE_VIDEO_OUTPUT:
  284. case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
  285. return &geo->dst;
  286. case V4L2_BUF_TYPE_VIDEO_OVERLAY:
  287. return &geo->src;
  288. default:
  289. return NULL;
  290. }
  291. }
  292. static int mxr_g_crop(struct file *file, void *fh, struct v4l2_crop *a)
  293. {
  294. struct mxr_layer *layer = video_drvdata(file);
  295. struct mxr_crop *crop;
  296. mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
  297. crop = choose_crop_by_type(&layer->geo, a->type);
  298. if (crop == NULL)
  299. return -EINVAL;
  300. mxr_layer_geo_fix(layer);
  301. a->c.left = crop->x_offset;
  302. a->c.top = crop->y_offset;
  303. a->c.width = crop->width;
  304. a->c.height = crop->height;
  305. return 0;
  306. }
  307. static int mxr_s_crop(struct file *file, void *fh, struct v4l2_crop *a)
  308. {
  309. struct mxr_layer *layer = video_drvdata(file);
  310. struct mxr_crop *crop;
  311. mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
  312. crop = choose_crop_by_type(&layer->geo, a->type);
  313. if (crop == NULL)
  314. return -EINVAL;
  315. crop->x_offset = a->c.left;
  316. crop->y_offset = a->c.top;
  317. crop->width = a->c.width;
  318. crop->height = a->c.height;
  319. mxr_layer_geo_fix(layer);
  320. return 0;
  321. }
  322. static int mxr_cropcap(struct file *file, void *fh, struct v4l2_cropcap *a)
  323. {
  324. struct mxr_layer *layer = video_drvdata(file);
  325. struct mxr_crop *crop;
  326. mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
  327. crop = choose_crop_by_type(&layer->geo, a->type);
  328. if (crop == NULL)
  329. return -EINVAL;
  330. mxr_layer_geo_fix(layer);
  331. a->bounds.left = 0;
  332. a->bounds.top = 0;
  333. a->bounds.width = crop->full_width;
  334. a->bounds.top = crop->full_height;
  335. a->defrect = a->bounds;
  336. /* setting pixel aspect to 1/1 */
  337. a->pixelaspect.numerator = 1;
  338. a->pixelaspect.denominator = 1;
  339. return 0;
  340. }
  341. static int mxr_enum_dv_presets(struct file *file, void *fh,
  342. struct v4l2_dv_enum_preset *preset)
  343. {
  344. struct mxr_layer *layer = video_drvdata(file);
  345. struct mxr_device *mdev = layer->mdev;
  346. int ret;
  347. /* lock protects from changing sd_out */
  348. mutex_lock(&mdev->mutex);
  349. ret = v4l2_subdev_call(to_outsd(mdev), video, enum_dv_presets, preset);
  350. mutex_unlock(&mdev->mutex);
  351. return ret ? -EINVAL : 0;
  352. }
  353. static int mxr_s_dv_preset(struct file *file, void *fh,
  354. struct v4l2_dv_preset *preset)
  355. {
  356. struct mxr_layer *layer = video_drvdata(file);
  357. struct mxr_device *mdev = layer->mdev;
  358. int ret;
  359. /* lock protects from changing sd_out */
  360. mutex_lock(&mdev->mutex);
  361. /* preset change cannot be done while there is an entity
  362. * dependant on output configuration
  363. */
  364. if (mdev->n_output > 0) {
  365. mutex_unlock(&mdev->mutex);
  366. return -EBUSY;
  367. }
  368. ret = v4l2_subdev_call(to_outsd(mdev), video, s_dv_preset, preset);
  369. mutex_unlock(&mdev->mutex);
  370. /* any failure should return EINVAL according to V4L2 doc */
  371. return ret ? -EINVAL : 0;
  372. }
  373. static int mxr_g_dv_preset(struct file *file, void *fh,
  374. struct v4l2_dv_preset *preset)
  375. {
  376. struct mxr_layer *layer = video_drvdata(file);
  377. struct mxr_device *mdev = layer->mdev;
  378. int ret;
  379. /* lock protects from changing sd_out */
  380. mutex_lock(&mdev->mutex);
  381. ret = v4l2_subdev_call(to_outsd(mdev), video, g_dv_preset, preset);
  382. mutex_unlock(&mdev->mutex);
  383. return ret ? -EINVAL : 0;
  384. }
  385. static int mxr_s_std(struct file *file, void *fh, v4l2_std_id *norm)
  386. {
  387. struct mxr_layer *layer = video_drvdata(file);
  388. struct mxr_device *mdev = layer->mdev;
  389. int ret;
  390. /* lock protects from changing sd_out */
  391. mutex_lock(&mdev->mutex);
  392. /* standard change cannot be done while there is an entity
  393. * dependant on output configuration
  394. */
  395. if (mdev->n_output > 0) {
  396. mutex_unlock(&mdev->mutex);
  397. return -EBUSY;
  398. }
  399. ret = v4l2_subdev_call(to_outsd(mdev), video, s_std_output, *norm);
  400. mutex_unlock(&mdev->mutex);
  401. return ret ? -EINVAL : 0;
  402. }
  403. static int mxr_g_std(struct file *file, void *fh, v4l2_std_id *norm)
  404. {
  405. struct mxr_layer *layer = video_drvdata(file);
  406. struct mxr_device *mdev = layer->mdev;
  407. int ret;
  408. /* lock protects from changing sd_out */
  409. mutex_lock(&mdev->mutex);
  410. ret = v4l2_subdev_call(to_outsd(mdev), video, g_std_output, norm);
  411. mutex_unlock(&mdev->mutex);
  412. return ret ? -EINVAL : 0;
  413. }
  414. static int mxr_enum_output(struct file *file, void *fh, struct v4l2_output *a)
  415. {
  416. struct mxr_layer *layer = video_drvdata(file);
  417. struct mxr_device *mdev = layer->mdev;
  418. struct mxr_output *out;
  419. struct v4l2_subdev *sd;
  420. if (a->index >= mdev->output_cnt)
  421. return -EINVAL;
  422. out = mdev->output[a->index];
  423. BUG_ON(out == NULL);
  424. sd = out->sd;
  425. strlcpy(a->name, out->name, sizeof(a->name));
  426. /* try to obtain supported tv norms */
  427. v4l2_subdev_call(sd, video, g_tvnorms_output, &a->std);
  428. a->capabilities = 0;
  429. if (sd->ops->video && sd->ops->video->s_dv_preset)
  430. a->capabilities |= V4L2_OUT_CAP_PRESETS;
  431. if (sd->ops->video && sd->ops->video->s_std_output)
  432. a->capabilities |= V4L2_OUT_CAP_STD;
  433. a->type = V4L2_OUTPUT_TYPE_ANALOG;
  434. return 0;
  435. }
  436. static int mxr_s_output(struct file *file, void *fh, unsigned int i)
  437. {
  438. struct video_device *vfd = video_devdata(file);
  439. struct mxr_layer *layer = video_drvdata(file);
  440. struct mxr_device *mdev = layer->mdev;
  441. int ret = 0;
  442. if (i >= mdev->output_cnt || mdev->output[i] == NULL)
  443. return -EINVAL;
  444. mutex_lock(&mdev->mutex);
  445. if (mdev->n_output > 0) {
  446. ret = -EBUSY;
  447. goto done;
  448. }
  449. mdev->current_output = i;
  450. vfd->tvnorms = 0;
  451. v4l2_subdev_call(to_outsd(mdev), video, g_tvnorms_output,
  452. &vfd->tvnorms);
  453. mxr_dbg(mdev, "tvnorms = %08llx\n", vfd->tvnorms);
  454. done:
  455. mutex_unlock(&mdev->mutex);
  456. return ret;
  457. }
  458. static int mxr_g_output(struct file *file, void *fh, unsigned int *p)
  459. {
  460. struct mxr_layer *layer = video_drvdata(file);
  461. struct mxr_device *mdev = layer->mdev;
  462. mutex_lock(&mdev->mutex);
  463. *p = mdev->current_output;
  464. mutex_unlock(&mdev->mutex);
  465. return 0;
  466. }
  467. static int mxr_reqbufs(struct file *file, void *priv,
  468. struct v4l2_requestbuffers *p)
  469. {
  470. struct mxr_layer *layer = video_drvdata(file);
  471. mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
  472. return vb2_reqbufs(&layer->vb_queue, p);
  473. }
  474. static int mxr_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
  475. {
  476. struct mxr_layer *layer = video_drvdata(file);
  477. mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
  478. return vb2_querybuf(&layer->vb_queue, p);
  479. }
  480. static int mxr_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
  481. {
  482. struct mxr_layer *layer = video_drvdata(file);
  483. mxr_dbg(layer->mdev, "%s:%d(%d)\n", __func__, __LINE__, p->index);
  484. return vb2_qbuf(&layer->vb_queue, p);
  485. }
  486. static int mxr_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
  487. {
  488. struct mxr_layer *layer = video_drvdata(file);
  489. mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
  490. return vb2_dqbuf(&layer->vb_queue, p, file->f_flags & O_NONBLOCK);
  491. }
  492. static int mxr_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
  493. {
  494. struct mxr_layer *layer = video_drvdata(file);
  495. mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
  496. return vb2_streamon(&layer->vb_queue, i);
  497. }
  498. static int mxr_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
  499. {
  500. struct mxr_layer *layer = video_drvdata(file);
  501. mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
  502. return vb2_streamoff(&layer->vb_queue, i);
  503. }
  504. static const struct v4l2_ioctl_ops mxr_ioctl_ops = {
  505. .vidioc_querycap = mxr_querycap,
  506. /* format handling */
  507. .vidioc_enum_fmt_vid_out = mxr_enum_fmt,
  508. .vidioc_s_fmt_vid_out_mplane = mxr_s_fmt,
  509. .vidioc_g_fmt_vid_out_mplane = mxr_g_fmt,
  510. /* buffer control */
  511. .vidioc_reqbufs = mxr_reqbufs,
  512. .vidioc_querybuf = mxr_querybuf,
  513. .vidioc_qbuf = mxr_qbuf,
  514. .vidioc_dqbuf = mxr_dqbuf,
  515. /* Streaming control */
  516. .vidioc_streamon = mxr_streamon,
  517. .vidioc_streamoff = mxr_streamoff,
  518. /* Preset functions */
  519. .vidioc_enum_dv_presets = mxr_enum_dv_presets,
  520. .vidioc_s_dv_preset = mxr_s_dv_preset,
  521. .vidioc_g_dv_preset = mxr_g_dv_preset,
  522. /* analog TV standard functions */
  523. .vidioc_s_std = mxr_s_std,
  524. .vidioc_g_std = mxr_g_std,
  525. /* Output handling */
  526. .vidioc_enum_output = mxr_enum_output,
  527. .vidioc_s_output = mxr_s_output,
  528. .vidioc_g_output = mxr_g_output,
  529. /* Crop ioctls */
  530. .vidioc_g_crop = mxr_g_crop,
  531. .vidioc_s_crop = mxr_s_crop,
  532. .vidioc_cropcap = mxr_cropcap,
  533. };
  534. static int mxr_video_open(struct file *file)
  535. {
  536. struct mxr_layer *layer = video_drvdata(file);
  537. struct mxr_device *mdev = layer->mdev;
  538. int ret = 0;
  539. mxr_dbg(mdev, "%s:%d\n", __func__, __LINE__);
  540. /* assure device probe is finished */
  541. wait_for_device_probe();
  542. /* creating context for file descriptor */
  543. ret = v4l2_fh_open(file);
  544. if (ret) {
  545. mxr_err(mdev, "v4l2_fh_open failed\n");
  546. return ret;
  547. }
  548. /* leaving if layer is already initialized */
  549. if (!v4l2_fh_is_singular_file(file))
  550. return 0;
  551. /* FIXME: should power be enabled on open? */
  552. ret = mxr_power_get(mdev);
  553. if (ret) {
  554. mxr_err(mdev, "power on failed\n");
  555. goto fail_fh_open;
  556. }
  557. ret = vb2_queue_init(&layer->vb_queue);
  558. if (ret != 0) {
  559. mxr_err(mdev, "failed to initialize vb2 queue\n");
  560. goto fail_power;
  561. }
  562. /* set default format, first on the list */
  563. layer->fmt = layer->fmt_array[0];
  564. /* setup default geometry */
  565. mxr_layer_default_geo(layer);
  566. return 0;
  567. fail_power:
  568. mxr_power_put(mdev);
  569. fail_fh_open:
  570. v4l2_fh_release(file);
  571. return ret;
  572. }
  573. static unsigned int
  574. mxr_video_poll(struct file *file, struct poll_table_struct *wait)
  575. {
  576. struct mxr_layer *layer = video_drvdata(file);
  577. mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
  578. return vb2_poll(&layer->vb_queue, file, wait);
  579. }
  580. static int mxr_video_mmap(struct file *file, struct vm_area_struct *vma)
  581. {
  582. struct mxr_layer *layer = video_drvdata(file);
  583. mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
  584. return vb2_mmap(&layer->vb_queue, vma);
  585. }
  586. static int mxr_video_release(struct file *file)
  587. {
  588. struct mxr_layer *layer = video_drvdata(file);
  589. mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
  590. if (v4l2_fh_is_singular_file(file)) {
  591. vb2_queue_release(&layer->vb_queue);
  592. mxr_power_put(layer->mdev);
  593. }
  594. v4l2_fh_release(file);
  595. return 0;
  596. }
  597. static const struct v4l2_file_operations mxr_fops = {
  598. .owner = THIS_MODULE,
  599. .open = mxr_video_open,
  600. .poll = mxr_video_poll,
  601. .mmap = mxr_video_mmap,
  602. .release = mxr_video_release,
  603. .unlocked_ioctl = video_ioctl2,
  604. };
  605. static int queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
  606. unsigned int *nplanes, unsigned int sizes[],
  607. void *alloc_ctxs[])
  608. {
  609. struct mxr_layer *layer = vb2_get_drv_priv(vq);
  610. const struct mxr_format *fmt = layer->fmt;
  611. int i;
  612. struct mxr_device *mdev = layer->mdev;
  613. struct v4l2_plane_pix_format planes[3];
  614. mxr_dbg(mdev, "%s\n", __func__);
  615. /* checking if format was configured */
  616. if (fmt == NULL)
  617. return -EINVAL;
  618. mxr_dbg(mdev, "fmt = %s\n", fmt->name);
  619. mxr_mplane_fill(planes, fmt, layer->geo.src.full_width,
  620. layer->geo.src.full_height);
  621. *nplanes = fmt->num_subframes;
  622. for (i = 0; i < fmt->num_subframes; ++i) {
  623. alloc_ctxs[i] = layer->mdev->alloc_ctx;
  624. sizes[i] = PAGE_ALIGN(planes[i].sizeimage);
  625. mxr_dbg(mdev, "size[%d] = %08lx\n", i, sizes[i]);
  626. }
  627. if (*nbuffers == 0)
  628. *nbuffers = 1;
  629. return 0;
  630. }
  631. static void buf_queue(struct vb2_buffer *vb)
  632. {
  633. struct mxr_buffer *buffer = container_of(vb, struct mxr_buffer, vb);
  634. struct mxr_layer *layer = vb2_get_drv_priv(vb->vb2_queue);
  635. struct mxr_device *mdev = layer->mdev;
  636. unsigned long flags;
  637. int must_start = 0;
  638. spin_lock_irqsave(&layer->enq_slock, flags);
  639. if (layer->state == MXR_LAYER_STREAMING_START) {
  640. layer->state = MXR_LAYER_STREAMING;
  641. must_start = 1;
  642. }
  643. list_add_tail(&buffer->list, &layer->enq_list);
  644. spin_unlock_irqrestore(&layer->enq_slock, flags);
  645. if (must_start) {
  646. layer->ops.stream_set(layer, MXR_ENABLE);
  647. mxr_streamer_get(mdev);
  648. }
  649. mxr_dbg(mdev, "queuing buffer\n");
  650. }
  651. static void wait_lock(struct vb2_queue *vq)
  652. {
  653. struct mxr_layer *layer = vb2_get_drv_priv(vq);
  654. mxr_dbg(layer->mdev, "%s\n", __func__);
  655. mutex_lock(&layer->mutex);
  656. }
  657. static void wait_unlock(struct vb2_queue *vq)
  658. {
  659. struct mxr_layer *layer = vb2_get_drv_priv(vq);
  660. mxr_dbg(layer->mdev, "%s\n", __func__);
  661. mutex_unlock(&layer->mutex);
  662. }
  663. static int start_streaming(struct vb2_queue *vq)
  664. {
  665. struct mxr_layer *layer = vb2_get_drv_priv(vq);
  666. struct mxr_device *mdev = layer->mdev;
  667. unsigned long flags;
  668. mxr_dbg(mdev, "%s\n", __func__);
  669. /* block any changes in output configuration */
  670. mxr_output_get(mdev);
  671. /* update layers geometry */
  672. mxr_layer_geo_fix(layer);
  673. mxr_geometry_dump(mdev, &layer->geo);
  674. layer->ops.format_set(layer);
  675. /* enabling layer in hardware */
  676. spin_lock_irqsave(&layer->enq_slock, flags);
  677. layer->state = MXR_LAYER_STREAMING_START;
  678. spin_unlock_irqrestore(&layer->enq_slock, flags);
  679. return 0;
  680. }
  681. static void mxr_watchdog(unsigned long arg)
  682. {
  683. struct mxr_layer *layer = (struct mxr_layer *) arg;
  684. struct mxr_device *mdev = layer->mdev;
  685. unsigned long flags;
  686. mxr_err(mdev, "watchdog fired for layer %s\n", layer->vfd.name);
  687. spin_lock_irqsave(&layer->enq_slock, flags);
  688. if (layer->update_buf == layer->shadow_buf)
  689. layer->update_buf = NULL;
  690. if (layer->update_buf) {
  691. vb2_buffer_done(&layer->update_buf->vb, VB2_BUF_STATE_ERROR);
  692. layer->update_buf = NULL;
  693. }
  694. if (layer->shadow_buf) {
  695. vb2_buffer_done(&layer->shadow_buf->vb, VB2_BUF_STATE_ERROR);
  696. layer->shadow_buf = NULL;
  697. }
  698. spin_unlock_irqrestore(&layer->enq_slock, flags);
  699. }
  700. static int stop_streaming(struct vb2_queue *vq)
  701. {
  702. struct mxr_layer *layer = vb2_get_drv_priv(vq);
  703. struct mxr_device *mdev = layer->mdev;
  704. unsigned long flags;
  705. struct timer_list watchdog;
  706. struct mxr_buffer *buf, *buf_tmp;
  707. mxr_dbg(mdev, "%s\n", __func__);
  708. spin_lock_irqsave(&layer->enq_slock, flags);
  709. /* reset list */
  710. layer->state = MXR_LAYER_STREAMING_FINISH;
  711. /* set all buffer to be done */
  712. list_for_each_entry_safe(buf, buf_tmp, &layer->enq_list, list) {
  713. list_del(&buf->list);
  714. vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
  715. }
  716. spin_unlock_irqrestore(&layer->enq_slock, flags);
  717. /* give 1 seconds to complete to complete last buffers */
  718. setup_timer_on_stack(&watchdog, mxr_watchdog,
  719. (unsigned long)layer);
  720. mod_timer(&watchdog, jiffies + msecs_to_jiffies(1000));
  721. /* wait until all buffers are goes to done state */
  722. vb2_wait_for_all_buffers(vq);
  723. /* stop timer if all synchronization is done */
  724. del_timer_sync(&watchdog);
  725. destroy_timer_on_stack(&watchdog);
  726. /* stopping hardware */
  727. spin_lock_irqsave(&layer->enq_slock, flags);
  728. layer->state = MXR_LAYER_IDLE;
  729. spin_unlock_irqrestore(&layer->enq_slock, flags);
  730. /* disabling layer in hardware */
  731. layer->ops.stream_set(layer, MXR_DISABLE);
  732. /* remove one streamer */
  733. mxr_streamer_put(mdev);
  734. /* allow changes in output configuration */
  735. mxr_output_put(mdev);
  736. return 0;
  737. }
  738. static struct vb2_ops mxr_video_qops = {
  739. .queue_setup = queue_setup,
  740. .buf_queue = buf_queue,
  741. .wait_prepare = wait_unlock,
  742. .wait_finish = wait_lock,
  743. .start_streaming = start_streaming,
  744. .stop_streaming = stop_streaming,
  745. };
  746. /* FIXME: try to put this functions to mxr_base_layer_create */
  747. int mxr_base_layer_register(struct mxr_layer *layer)
  748. {
  749. struct mxr_device *mdev = layer->mdev;
  750. int ret;
  751. ret = video_register_device(&layer->vfd, VFL_TYPE_GRABBER, -1);
  752. if (ret)
  753. mxr_err(mdev, "failed to register video device\n");
  754. else
  755. mxr_info(mdev, "registered layer %s as /dev/video%d\n",
  756. layer->vfd.name, layer->vfd.num);
  757. return ret;
  758. }
  759. void mxr_base_layer_unregister(struct mxr_layer *layer)
  760. {
  761. video_unregister_device(&layer->vfd);
  762. }
  763. void mxr_layer_release(struct mxr_layer *layer)
  764. {
  765. if (layer->ops.release)
  766. layer->ops.release(layer);
  767. }
  768. void mxr_base_layer_release(struct mxr_layer *layer)
  769. {
  770. kfree(layer);
  771. }
  772. static void mxr_vfd_release(struct video_device *vdev)
  773. {
  774. printk(KERN_INFO "video device release\n");
  775. }
  776. struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
  777. int idx, char *name, struct mxr_layer_ops *ops)
  778. {
  779. struct mxr_layer *layer;
  780. layer = kzalloc(sizeof *layer, GFP_KERNEL);
  781. if (layer == NULL) {
  782. mxr_err(mdev, "not enough memory for layer.\n");
  783. goto fail;
  784. }
  785. layer->mdev = mdev;
  786. layer->idx = idx;
  787. layer->ops = *ops;
  788. spin_lock_init(&layer->enq_slock);
  789. INIT_LIST_HEAD(&layer->enq_list);
  790. mutex_init(&layer->mutex);
  791. layer->vfd = (struct video_device) {
  792. .minor = -1,
  793. .release = mxr_vfd_release,
  794. .fops = &mxr_fops,
  795. .ioctl_ops = &mxr_ioctl_ops,
  796. };
  797. strlcpy(layer->vfd.name, name, sizeof(layer->vfd.name));
  798. /* let framework control PRIORITY */
  799. set_bit(V4L2_FL_USE_FH_PRIO, &layer->vfd.flags);
  800. video_set_drvdata(&layer->vfd, layer);
  801. layer->vfd.lock = &layer->mutex;
  802. layer->vfd.v4l2_dev = &mdev->v4l2_dev;
  803. layer->vb_queue = (struct vb2_queue) {
  804. .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
  805. .io_modes = VB2_MMAP | VB2_USERPTR,
  806. .drv_priv = layer,
  807. .buf_struct_size = sizeof(struct mxr_buffer),
  808. .ops = &mxr_video_qops,
  809. .mem_ops = &vb2_dma_contig_memops,
  810. };
  811. return layer;
  812. fail:
  813. return NULL;
  814. }
  815. static const struct mxr_format *find_format_by_fourcc(
  816. struct mxr_layer *layer, unsigned long fourcc)
  817. {
  818. int i;
  819. for (i = 0; i < layer->fmt_array_size; ++i)
  820. if (layer->fmt_array[i]->fourcc == fourcc)
  821. return layer->fmt_array[i];
  822. return NULL;
  823. }
  824. static const struct mxr_format *find_format_by_index(
  825. struct mxr_layer *layer, unsigned long index)
  826. {
  827. if (index >= layer->fmt_array_size)
  828. return NULL;
  829. return layer->fmt_array[index];
  830. }