mixer_video.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125
  1. /*
  2. * Samsung TV Mixer driver
  3. *
  4. * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
  5. *
  6. * Tomasz Stanislawski, <t.stanislaws@samsung.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published
  10. * by the Free Software Foundation. either version 2 of the License,
  11. * or (at your option) any later version
  12. */
  13. #include "mixer.h"
  14. #include <media/v4l2-ioctl.h>
  15. #include <linux/videodev2.h>
  16. #include <linux/mm.h>
  17. #include <linux/module.h>
  18. #include <linux/version.h>
  19. #include <linux/timer.h>
  20. #include <media/videobuf2-dma-contig.h>
  21. static int find_reg_callback(struct device *dev, void *p)
  22. {
  23. struct v4l2_subdev **sd = p;
  24. *sd = dev_get_drvdata(dev);
  25. /* non-zero value stops iteration */
  26. return 1;
  27. }
  28. static struct v4l2_subdev *find_and_register_subdev(
  29. struct mxr_device *mdev, char *module_name)
  30. {
  31. struct device_driver *drv;
  32. struct v4l2_subdev *sd = NULL;
  33. int ret;
  34. /* TODO: add waiting until probe is finished */
  35. drv = driver_find(module_name, &platform_bus_type);
  36. if (!drv) {
  37. mxr_warn(mdev, "module %s is missing\n", module_name);
  38. return NULL;
  39. }
  40. /* driver refcnt is increased, it is safe to iterate over devices */
  41. ret = driver_for_each_device(drv, NULL, &sd, find_reg_callback);
  42. /* ret == 0 means that find_reg_callback was never executed */
  43. if (sd == NULL) {
  44. mxr_warn(mdev, "module %s provides no subdev!\n", module_name);
  45. goto done;
  46. }
  47. /* v4l2_device_register_subdev detects if sd is NULL */
  48. ret = v4l2_device_register_subdev(&mdev->v4l2_dev, sd);
  49. if (ret) {
  50. mxr_warn(mdev, "failed to register subdev %s\n", sd->name);
  51. sd = NULL;
  52. }
  53. done:
  54. return sd;
  55. }
  56. int __devinit mxr_acquire_video(struct mxr_device *mdev,
  57. struct mxr_output_conf *output_conf, int output_count)
  58. {
  59. struct device *dev = mdev->dev;
  60. struct v4l2_device *v4l2_dev = &mdev->v4l2_dev;
  61. int i;
  62. int ret = 0;
  63. struct v4l2_subdev *sd;
  64. strlcpy(v4l2_dev->name, dev_name(mdev->dev), sizeof(v4l2_dev->name));
  65. /* prepare context for V4L2 device */
  66. ret = v4l2_device_register(dev, v4l2_dev);
  67. if (ret) {
  68. mxr_err(mdev, "could not register v4l2 device.\n");
  69. goto fail;
  70. }
  71. mdev->alloc_ctx = vb2_dma_contig_init_ctx(mdev->dev);
  72. if (IS_ERR_OR_NULL(mdev->alloc_ctx)) {
  73. mxr_err(mdev, "could not acquire vb2 allocator\n");
  74. goto fail_v4l2_dev;
  75. }
  76. /* registering outputs */
  77. mdev->output_cnt = 0;
  78. for (i = 0; i < output_count; ++i) {
  79. struct mxr_output_conf *conf = &output_conf[i];
  80. struct mxr_output *out;
  81. sd = find_and_register_subdev(mdev, conf->module_name);
  82. /* trying to register next output */
  83. if (sd == NULL)
  84. continue;
  85. out = kzalloc(sizeof *out, GFP_KERNEL);
  86. if (out == NULL) {
  87. mxr_err(mdev, "no memory for '%s'\n",
  88. conf->output_name);
  89. ret = -ENOMEM;
  90. /* registered subdevs are removed in fail_v4l2_dev */
  91. goto fail_output;
  92. }
  93. strlcpy(out->name, conf->output_name, sizeof(out->name));
  94. out->sd = sd;
  95. out->cookie = conf->cookie;
  96. mdev->output[mdev->output_cnt++] = out;
  97. mxr_info(mdev, "added output '%s' from module '%s'\n",
  98. conf->output_name, conf->module_name);
  99. /* checking if maximal number of outputs is reached */
  100. if (mdev->output_cnt >= MXR_MAX_OUTPUTS)
  101. break;
  102. }
  103. if (mdev->output_cnt == 0) {
  104. mxr_err(mdev, "failed to register any output\n");
  105. ret = -ENODEV;
  106. /* skipping fail_output because there is nothing to free */
  107. goto fail_vb2_allocator;
  108. }
  109. return 0;
  110. fail_output:
  111. /* kfree is NULL-safe */
  112. for (i = 0; i < mdev->output_cnt; ++i)
  113. kfree(mdev->output[i]);
  114. memset(mdev->output, 0, sizeof mdev->output);
  115. fail_vb2_allocator:
  116. /* freeing allocator context */
  117. vb2_dma_contig_cleanup_ctx(mdev->alloc_ctx);
  118. fail_v4l2_dev:
  119. /* NOTE: automatically unregister all subdevs */
  120. v4l2_device_unregister(v4l2_dev);
  121. fail:
  122. return ret;
  123. }
  124. void mxr_release_video(struct mxr_device *mdev)
  125. {
  126. int i;
  127. /* kfree is NULL-safe */
  128. for (i = 0; i < mdev->output_cnt; ++i)
  129. kfree(mdev->output[i]);
  130. vb2_dma_contig_cleanup_ctx(mdev->alloc_ctx);
  131. v4l2_device_unregister(&mdev->v4l2_dev);
  132. }
  133. static int mxr_querycap(struct file *file, void *priv,
  134. struct v4l2_capability *cap)
  135. {
  136. struct mxr_layer *layer = video_drvdata(file);
  137. mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
  138. strlcpy(cap->driver, MXR_DRIVER_NAME, sizeof cap->driver);
  139. strlcpy(cap->card, layer->vfd.name, sizeof cap->card);
  140. sprintf(cap->bus_info, "%d", layer->idx);
  141. cap->version = KERNEL_VERSION(0, 1, 0);
  142. cap->capabilities = V4L2_CAP_STREAMING |
  143. V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_VIDEO_OUTPUT_MPLANE;
  144. return 0;
  145. }
  146. static void mxr_geometry_dump(struct mxr_device *mdev, struct mxr_geometry *geo)
  147. {
  148. mxr_dbg(mdev, "src.full_size = (%u, %u)\n",
  149. geo->src.full_width, geo->src.full_height);
  150. mxr_dbg(mdev, "src.size = (%u, %u)\n",
  151. geo->src.width, geo->src.height);
  152. mxr_dbg(mdev, "src.offset = (%u, %u)\n",
  153. geo->src.x_offset, geo->src.y_offset);
  154. mxr_dbg(mdev, "dst.full_size = (%u, %u)\n",
  155. geo->dst.full_width, geo->dst.full_height);
  156. mxr_dbg(mdev, "dst.size = (%u, %u)\n",
  157. geo->dst.width, geo->dst.height);
  158. mxr_dbg(mdev, "dst.offset = (%u, %u)\n",
  159. geo->dst.x_offset, geo->dst.y_offset);
  160. mxr_dbg(mdev, "ratio = (%u, %u)\n",
  161. geo->x_ratio, geo->y_ratio);
  162. }
  163. static void mxr_layer_default_geo(struct mxr_layer *layer)
  164. {
  165. struct mxr_device *mdev = layer->mdev;
  166. struct v4l2_mbus_framefmt mbus_fmt;
  167. memset(&layer->geo, 0, sizeof layer->geo);
  168. mxr_get_mbus_fmt(mdev, &mbus_fmt);
  169. layer->geo.dst.full_width = mbus_fmt.width;
  170. layer->geo.dst.full_height = mbus_fmt.height;
  171. layer->geo.dst.width = layer->geo.dst.full_width;
  172. layer->geo.dst.height = layer->geo.dst.full_height;
  173. layer->geo.dst.field = mbus_fmt.field;
  174. layer->geo.src.full_width = mbus_fmt.width;
  175. layer->geo.src.full_height = mbus_fmt.height;
  176. layer->geo.src.width = layer->geo.src.full_width;
  177. layer->geo.src.height = layer->geo.src.full_height;
  178. mxr_geometry_dump(mdev, &layer->geo);
  179. layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
  180. mxr_geometry_dump(mdev, &layer->geo);
  181. }
  182. static void mxr_layer_update_output(struct mxr_layer *layer)
  183. {
  184. struct mxr_device *mdev = layer->mdev;
  185. struct v4l2_mbus_framefmt mbus_fmt;
  186. mxr_get_mbus_fmt(mdev, &mbus_fmt);
  187. /* checking if update is needed */
  188. if (layer->geo.dst.full_width == mbus_fmt.width &&
  189. layer->geo.dst.full_height == mbus_fmt.width)
  190. return;
  191. layer->geo.dst.full_width = mbus_fmt.width;
  192. layer->geo.dst.full_height = mbus_fmt.height;
  193. layer->geo.dst.field = mbus_fmt.field;
  194. layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
  195. mxr_geometry_dump(mdev, &layer->geo);
  196. }
  197. static const struct mxr_format *find_format_by_fourcc(
  198. struct mxr_layer *layer, unsigned long fourcc);
  199. static const struct mxr_format *find_format_by_index(
  200. struct mxr_layer *layer, unsigned long index);
  201. static int mxr_enum_fmt(struct file *file, void *priv,
  202. struct v4l2_fmtdesc *f)
  203. {
  204. struct mxr_layer *layer = video_drvdata(file);
  205. struct mxr_device *mdev = layer->mdev;
  206. const struct mxr_format *fmt;
  207. mxr_dbg(mdev, "%s\n", __func__);
  208. fmt = find_format_by_index(layer, f->index);
  209. if (fmt == NULL)
  210. return -EINVAL;
  211. strlcpy(f->description, fmt->name, sizeof(f->description));
  212. f->pixelformat = fmt->fourcc;
  213. return 0;
  214. }
  215. static unsigned int divup(unsigned int divident, unsigned int divisor)
  216. {
  217. return (divident + divisor - 1) / divisor;
  218. }
  219. unsigned long mxr_get_plane_size(const struct mxr_block *blk,
  220. unsigned int width, unsigned int height)
  221. {
  222. unsigned int bl_width = divup(width, blk->width);
  223. unsigned int bl_height = divup(height, blk->height);
  224. return bl_width * bl_height * blk->size;
  225. }
  226. static void mxr_mplane_fill(struct v4l2_plane_pix_format *planes,
  227. const struct mxr_format *fmt, u32 width, u32 height)
  228. {
  229. int i;
  230. /* checking if nothing to fill */
  231. if (!planes)
  232. return;
  233. memset(planes, 0, sizeof(*planes) * fmt->num_subframes);
  234. for (i = 0; i < fmt->num_planes; ++i) {
  235. struct v4l2_plane_pix_format *plane = planes
  236. + fmt->plane2subframe[i];
  237. const struct mxr_block *blk = &fmt->plane[i];
  238. u32 bl_width = divup(width, blk->width);
  239. u32 bl_height = divup(height, blk->height);
  240. u32 sizeimage = bl_width * bl_height * blk->size;
  241. u16 bytesperline = bl_width * blk->size / blk->height;
  242. plane->sizeimage += sizeimage;
  243. plane->bytesperline = max(plane->bytesperline, bytesperline);
  244. }
  245. }
  246. static int mxr_g_fmt(struct file *file, void *priv,
  247. struct v4l2_format *f)
  248. {
  249. struct mxr_layer *layer = video_drvdata(file);
  250. struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
  251. mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
  252. pix->width = layer->geo.src.full_width;
  253. pix->height = layer->geo.src.full_height;
  254. pix->field = V4L2_FIELD_NONE;
  255. pix->pixelformat = layer->fmt->fourcc;
  256. pix->colorspace = layer->fmt->colorspace;
  257. mxr_mplane_fill(pix->plane_fmt, layer->fmt, pix->width, pix->height);
  258. return 0;
  259. }
  260. static int mxr_s_fmt(struct file *file, void *priv,
  261. struct v4l2_format *f)
  262. {
  263. struct mxr_layer *layer = video_drvdata(file);
  264. const struct mxr_format *fmt;
  265. struct v4l2_pix_format_mplane *pix;
  266. struct mxr_device *mdev = layer->mdev;
  267. struct mxr_geometry *geo = &layer->geo;
  268. mxr_dbg(mdev, "%s:%d\n", __func__, __LINE__);
  269. pix = &f->fmt.pix_mp;
  270. fmt = find_format_by_fourcc(layer, pix->pixelformat);
  271. if (fmt == NULL) {
  272. mxr_warn(mdev, "not recognized fourcc: %08x\n",
  273. pix->pixelformat);
  274. return -EINVAL;
  275. }
  276. layer->fmt = fmt;
  277. /* set source size to highest accepted value */
  278. geo->src.full_width = max(geo->dst.full_width, pix->width);
  279. geo->src.full_height = max(geo->dst.full_height, pix->height);
  280. layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
  281. mxr_geometry_dump(mdev, &layer->geo);
  282. /* set cropping to total visible screen */
  283. geo->src.width = pix->width;
  284. geo->src.height = pix->height;
  285. geo->src.x_offset = 0;
  286. geo->src.y_offset = 0;
  287. /* assure consistency of geometry */
  288. layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
  289. mxr_geometry_dump(mdev, &layer->geo);
  290. /* set full size to lowest possible value */
  291. geo->src.full_width = 0;
  292. geo->src.full_height = 0;
  293. layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
  294. mxr_geometry_dump(mdev, &layer->geo);
  295. /* returning results */
  296. mxr_g_fmt(file, priv, f);
  297. return 0;
  298. }
  299. static int mxr_g_selection(struct file *file, void *fh,
  300. struct v4l2_selection *s)
  301. {
  302. struct mxr_layer *layer = video_drvdata(file);
  303. struct mxr_geometry *geo = &layer->geo;
  304. mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
  305. if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
  306. s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
  307. return -EINVAL;
  308. switch (s->target) {
  309. case V4L2_SEL_TGT_CROP:
  310. s->r.left = geo->src.x_offset;
  311. s->r.top = geo->src.y_offset;
  312. s->r.width = geo->src.width;
  313. s->r.height = geo->src.height;
  314. break;
  315. case V4L2_SEL_TGT_CROP_DEFAULT:
  316. case V4L2_SEL_TGT_CROP_BOUNDS:
  317. s->r.left = 0;
  318. s->r.top = 0;
  319. s->r.width = geo->src.full_width;
  320. s->r.height = geo->src.full_height;
  321. break;
  322. case V4L2_SEL_TGT_COMPOSE:
  323. case V4L2_SEL_TGT_COMPOSE_PADDED:
  324. s->r.left = geo->dst.x_offset;
  325. s->r.top = geo->dst.y_offset;
  326. s->r.width = geo->dst.width;
  327. s->r.height = geo->dst.height;
  328. break;
  329. case V4L2_SEL_TGT_COMPOSE_DEFAULT:
  330. case V4L2_SEL_TGT_COMPOSE_BOUNDS:
  331. s->r.left = 0;
  332. s->r.top = 0;
  333. s->r.width = geo->dst.full_width;
  334. s->r.height = geo->dst.full_height;
  335. break;
  336. default:
  337. return -EINVAL;
  338. }
  339. return 0;
  340. }
  341. /* returns 1 if rectangle 'a' is inside 'b' */
  342. static int mxr_is_rect_inside(struct v4l2_rect *a, struct v4l2_rect *b)
  343. {
  344. if (a->left < b->left)
  345. return 0;
  346. if (a->top < b->top)
  347. return 0;
  348. if (a->left + a->width > b->left + b->width)
  349. return 0;
  350. if (a->top + a->height > b->top + b->height)
  351. return 0;
  352. return 1;
  353. }
  354. static int mxr_s_selection(struct file *file, void *fh,
  355. struct v4l2_selection *s)
  356. {
  357. struct mxr_layer *layer = video_drvdata(file);
  358. struct mxr_geometry *geo = &layer->geo;
  359. struct mxr_crop *target = NULL;
  360. enum mxr_geometry_stage stage;
  361. struct mxr_geometry tmp;
  362. struct v4l2_rect res;
  363. memset(&res, 0, sizeof res);
  364. mxr_dbg(layer->mdev, "%s: rect: %dx%d@%d,%d\n", __func__,
  365. s->r.width, s->r.height, s->r.left, s->r.top);
  366. if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
  367. s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
  368. return -EINVAL;
  369. switch (s->target) {
  370. /* ignore read-only targets */
  371. case V4L2_SEL_TGT_CROP_DEFAULT:
  372. case V4L2_SEL_TGT_CROP_BOUNDS:
  373. res.width = geo->src.full_width;
  374. res.height = geo->src.full_height;
  375. break;
  376. /* ignore read-only targets */
  377. case V4L2_SEL_TGT_COMPOSE_DEFAULT:
  378. case V4L2_SEL_TGT_COMPOSE_BOUNDS:
  379. res.width = geo->dst.full_width;
  380. res.height = geo->dst.full_height;
  381. break;
  382. case V4L2_SEL_TGT_CROP:
  383. target = &geo->src;
  384. stage = MXR_GEOMETRY_CROP;
  385. break;
  386. case V4L2_SEL_TGT_COMPOSE:
  387. case V4L2_SEL_TGT_COMPOSE_PADDED:
  388. target = &geo->dst;
  389. stage = MXR_GEOMETRY_COMPOSE;
  390. break;
  391. default:
  392. return -EINVAL;
  393. }
  394. /* apply change and update geometry if needed */
  395. if (target) {
  396. /* backup current geometry if setup fails */
  397. memcpy(&tmp, geo, sizeof tmp);
  398. /* apply requested selection */
  399. target->x_offset = s->r.left;
  400. target->y_offset = s->r.top;
  401. target->width = s->r.width;
  402. target->height = s->r.height;
  403. layer->ops.fix_geometry(layer, stage, s->flags);
  404. /* retrieve update selection rectangle */
  405. res.left = target->x_offset;
  406. res.top = target->y_offset;
  407. res.width = target->width;
  408. res.height = target->height;
  409. mxr_geometry_dump(layer->mdev, &layer->geo);
  410. }
  411. /* checking if the rectangle satisfies constraints */
  412. if ((s->flags & V4L2_SEL_FLAG_LE) && !mxr_is_rect_inside(&res, &s->r))
  413. goto fail;
  414. if ((s->flags & V4L2_SEL_FLAG_GE) && !mxr_is_rect_inside(&s->r, &res))
  415. goto fail;
  416. /* return result rectangle */
  417. s->r = res;
  418. return 0;
  419. fail:
  420. /* restore old geometry, which is not touched if target is NULL */
  421. if (target)
  422. memcpy(geo, &tmp, sizeof tmp);
  423. return -ERANGE;
  424. }
  425. static int mxr_enum_dv_presets(struct file *file, void *fh,
  426. struct v4l2_dv_enum_preset *preset)
  427. {
  428. struct mxr_layer *layer = video_drvdata(file);
  429. struct mxr_device *mdev = layer->mdev;
  430. int ret;
  431. /* lock protects from changing sd_out */
  432. mutex_lock(&mdev->mutex);
  433. ret = v4l2_subdev_call(to_outsd(mdev), video, enum_dv_presets, preset);
  434. mutex_unlock(&mdev->mutex);
  435. return ret ? -EINVAL : 0;
  436. }
  437. static int mxr_s_dv_preset(struct file *file, void *fh,
  438. struct v4l2_dv_preset *preset)
  439. {
  440. struct mxr_layer *layer = video_drvdata(file);
  441. struct mxr_device *mdev = layer->mdev;
  442. int ret;
  443. /* lock protects from changing sd_out */
  444. mutex_lock(&mdev->mutex);
  445. /* preset change cannot be done while there is an entity
  446. * dependant on output configuration
  447. */
  448. if (mdev->n_output > 0) {
  449. mutex_unlock(&mdev->mutex);
  450. return -EBUSY;
  451. }
  452. ret = v4l2_subdev_call(to_outsd(mdev), video, s_dv_preset, preset);
  453. mutex_unlock(&mdev->mutex);
  454. mxr_layer_update_output(layer);
  455. /* any failure should return EINVAL according to V4L2 doc */
  456. return ret ? -EINVAL : 0;
  457. }
  458. static int mxr_g_dv_preset(struct file *file, void *fh,
  459. struct v4l2_dv_preset *preset)
  460. {
  461. struct mxr_layer *layer = video_drvdata(file);
  462. struct mxr_device *mdev = layer->mdev;
  463. int ret;
  464. /* lock protects from changing sd_out */
  465. mutex_lock(&mdev->mutex);
  466. ret = v4l2_subdev_call(to_outsd(mdev), video, g_dv_preset, preset);
  467. mutex_unlock(&mdev->mutex);
  468. return ret ? -EINVAL : 0;
  469. }
  470. static int mxr_s_std(struct file *file, void *fh, v4l2_std_id *norm)
  471. {
  472. struct mxr_layer *layer = video_drvdata(file);
  473. struct mxr_device *mdev = layer->mdev;
  474. int ret;
  475. /* lock protects from changing sd_out */
  476. mutex_lock(&mdev->mutex);
  477. /* standard change cannot be done while there is an entity
  478. * dependant on output configuration
  479. */
  480. if (mdev->n_output > 0) {
  481. mutex_unlock(&mdev->mutex);
  482. return -EBUSY;
  483. }
  484. ret = v4l2_subdev_call(to_outsd(mdev), video, s_std_output, *norm);
  485. mutex_unlock(&mdev->mutex);
  486. mxr_layer_update_output(layer);
  487. return ret ? -EINVAL : 0;
  488. }
  489. static int mxr_g_std(struct file *file, void *fh, v4l2_std_id *norm)
  490. {
  491. struct mxr_layer *layer = video_drvdata(file);
  492. struct mxr_device *mdev = layer->mdev;
  493. int ret;
  494. /* lock protects from changing sd_out */
  495. mutex_lock(&mdev->mutex);
  496. ret = v4l2_subdev_call(to_outsd(mdev), video, g_std_output, norm);
  497. mutex_unlock(&mdev->mutex);
  498. return ret ? -EINVAL : 0;
  499. }
  500. static int mxr_enum_output(struct file *file, void *fh, struct v4l2_output *a)
  501. {
  502. struct mxr_layer *layer = video_drvdata(file);
  503. struct mxr_device *mdev = layer->mdev;
  504. struct mxr_output *out;
  505. struct v4l2_subdev *sd;
  506. if (a->index >= mdev->output_cnt)
  507. return -EINVAL;
  508. out = mdev->output[a->index];
  509. BUG_ON(out == NULL);
  510. sd = out->sd;
  511. strlcpy(a->name, out->name, sizeof(a->name));
  512. /* try to obtain supported tv norms */
  513. v4l2_subdev_call(sd, video, g_tvnorms_output, &a->std);
  514. a->capabilities = 0;
  515. if (sd->ops->video && sd->ops->video->s_dv_preset)
  516. a->capabilities |= V4L2_OUT_CAP_PRESETS;
  517. if (sd->ops->video && sd->ops->video->s_std_output)
  518. a->capabilities |= V4L2_OUT_CAP_STD;
  519. a->type = V4L2_OUTPUT_TYPE_ANALOG;
  520. return 0;
  521. }
  522. static int mxr_s_output(struct file *file, void *fh, unsigned int i)
  523. {
  524. struct video_device *vfd = video_devdata(file);
  525. struct mxr_layer *layer = video_drvdata(file);
  526. struct mxr_device *mdev = layer->mdev;
  527. if (i >= mdev->output_cnt || mdev->output[i] == NULL)
  528. return -EINVAL;
  529. mutex_lock(&mdev->mutex);
  530. if (mdev->n_output > 0) {
  531. mutex_unlock(&mdev->mutex);
  532. return -EBUSY;
  533. }
  534. mdev->current_output = i;
  535. vfd->tvnorms = 0;
  536. v4l2_subdev_call(to_outsd(mdev), video, g_tvnorms_output,
  537. &vfd->tvnorms);
  538. mutex_unlock(&mdev->mutex);
  539. /* update layers geometry */
  540. mxr_layer_update_output(layer);
  541. mxr_dbg(mdev, "tvnorms = %08llx\n", vfd->tvnorms);
  542. return 0;
  543. }
  544. static int mxr_g_output(struct file *file, void *fh, unsigned int *p)
  545. {
  546. struct mxr_layer *layer = video_drvdata(file);
  547. struct mxr_device *mdev = layer->mdev;
  548. mutex_lock(&mdev->mutex);
  549. *p = mdev->current_output;
  550. mutex_unlock(&mdev->mutex);
  551. return 0;
  552. }
  553. static int mxr_reqbufs(struct file *file, void *priv,
  554. struct v4l2_requestbuffers *p)
  555. {
  556. struct mxr_layer *layer = video_drvdata(file);
  557. mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
  558. return vb2_reqbufs(&layer->vb_queue, p);
  559. }
  560. static int mxr_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
  561. {
  562. struct mxr_layer *layer = video_drvdata(file);
  563. mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
  564. return vb2_querybuf(&layer->vb_queue, p);
  565. }
  566. static int mxr_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
  567. {
  568. struct mxr_layer *layer = video_drvdata(file);
  569. mxr_dbg(layer->mdev, "%s:%d(%d)\n", __func__, __LINE__, p->index);
  570. return vb2_qbuf(&layer->vb_queue, p);
  571. }
  572. static int mxr_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
  573. {
  574. struct mxr_layer *layer = video_drvdata(file);
  575. mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
  576. return vb2_dqbuf(&layer->vb_queue, p, file->f_flags & O_NONBLOCK);
  577. }
  578. static int mxr_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
  579. {
  580. struct mxr_layer *layer = video_drvdata(file);
  581. mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
  582. return vb2_streamon(&layer->vb_queue, i);
  583. }
  584. static int mxr_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
  585. {
  586. struct mxr_layer *layer = video_drvdata(file);
  587. mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
  588. return vb2_streamoff(&layer->vb_queue, i);
  589. }
  590. static const struct v4l2_ioctl_ops mxr_ioctl_ops = {
  591. .vidioc_querycap = mxr_querycap,
  592. /* format handling */
  593. .vidioc_enum_fmt_vid_out = mxr_enum_fmt,
  594. .vidioc_s_fmt_vid_out_mplane = mxr_s_fmt,
  595. .vidioc_g_fmt_vid_out_mplane = mxr_g_fmt,
  596. /* buffer control */
  597. .vidioc_reqbufs = mxr_reqbufs,
  598. .vidioc_querybuf = mxr_querybuf,
  599. .vidioc_qbuf = mxr_qbuf,
  600. .vidioc_dqbuf = mxr_dqbuf,
  601. /* Streaming control */
  602. .vidioc_streamon = mxr_streamon,
  603. .vidioc_streamoff = mxr_streamoff,
  604. /* Preset functions */
  605. .vidioc_enum_dv_presets = mxr_enum_dv_presets,
  606. .vidioc_s_dv_preset = mxr_s_dv_preset,
  607. .vidioc_g_dv_preset = mxr_g_dv_preset,
  608. /* analog TV standard functions */
  609. .vidioc_s_std = mxr_s_std,
  610. .vidioc_g_std = mxr_g_std,
  611. /* Output handling */
  612. .vidioc_enum_output = mxr_enum_output,
  613. .vidioc_s_output = mxr_s_output,
  614. .vidioc_g_output = mxr_g_output,
  615. /* selection ioctls */
  616. .vidioc_g_selection = mxr_g_selection,
  617. .vidioc_s_selection = mxr_s_selection,
  618. };
  619. static int mxr_video_open(struct file *file)
  620. {
  621. struct mxr_layer *layer = video_drvdata(file);
  622. struct mxr_device *mdev = layer->mdev;
  623. int ret = 0;
  624. mxr_dbg(mdev, "%s:%d\n", __func__, __LINE__);
  625. if (mutex_lock_interruptible(&layer->mutex))
  626. return -ERESTARTSYS;
  627. /* assure device probe is finished */
  628. wait_for_device_probe();
  629. /* creating context for file descriptor */
  630. ret = v4l2_fh_open(file);
  631. if (ret) {
  632. mxr_err(mdev, "v4l2_fh_open failed\n");
  633. goto unlock;
  634. }
  635. /* leaving if layer is already initialized */
  636. if (!v4l2_fh_is_singular_file(file))
  637. goto unlock;
  638. /* FIXME: should power be enabled on open? */
  639. ret = mxr_power_get(mdev);
  640. if (ret) {
  641. mxr_err(mdev, "power on failed\n");
  642. goto fail_fh_open;
  643. }
  644. ret = vb2_queue_init(&layer->vb_queue);
  645. if (ret != 0) {
  646. mxr_err(mdev, "failed to initialize vb2 queue\n");
  647. goto fail_power;
  648. }
  649. /* set default format, first on the list */
  650. layer->fmt = layer->fmt_array[0];
  651. /* setup default geometry */
  652. mxr_layer_default_geo(layer);
  653. mutex_unlock(&layer->mutex);
  654. return 0;
  655. fail_power:
  656. mxr_power_put(mdev);
  657. fail_fh_open:
  658. v4l2_fh_release(file);
  659. unlock:
  660. mutex_unlock(&layer->mutex);
  661. return ret;
  662. }
  663. static unsigned int
  664. mxr_video_poll(struct file *file, struct poll_table_struct *wait)
  665. {
  666. struct mxr_layer *layer = video_drvdata(file);
  667. unsigned int res;
  668. mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
  669. mutex_lock(&layer->mutex);
  670. res = vb2_poll(&layer->vb_queue, file, wait);
  671. mutex_unlock(&layer->mutex);
  672. return res;
  673. }
  674. static int mxr_video_mmap(struct file *file, struct vm_area_struct *vma)
  675. {
  676. struct mxr_layer *layer = video_drvdata(file);
  677. int ret;
  678. mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
  679. if (mutex_lock_interruptible(&layer->mutex))
  680. return -ERESTARTSYS;
  681. ret = vb2_mmap(&layer->vb_queue, vma);
  682. mutex_unlock(&layer->mutex);
  683. return ret;
  684. }
  685. static int mxr_video_release(struct file *file)
  686. {
  687. struct mxr_layer *layer = video_drvdata(file);
  688. mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
  689. mutex_lock(&layer->mutex);
  690. if (v4l2_fh_is_singular_file(file)) {
  691. vb2_queue_release(&layer->vb_queue);
  692. mxr_power_put(layer->mdev);
  693. }
  694. v4l2_fh_release(file);
  695. mutex_unlock(&layer->mutex);
  696. return 0;
  697. }
  698. static const struct v4l2_file_operations mxr_fops = {
  699. .owner = THIS_MODULE,
  700. .open = mxr_video_open,
  701. .poll = mxr_video_poll,
  702. .mmap = mxr_video_mmap,
  703. .release = mxr_video_release,
  704. .unlocked_ioctl = video_ioctl2,
  705. };
  706. static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *pfmt,
  707. unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[],
  708. void *alloc_ctxs[])
  709. {
  710. struct mxr_layer *layer = vb2_get_drv_priv(vq);
  711. const struct mxr_format *fmt = layer->fmt;
  712. int i;
  713. struct mxr_device *mdev = layer->mdev;
  714. struct v4l2_plane_pix_format planes[3];
  715. mxr_dbg(mdev, "%s\n", __func__);
  716. /* checking if format was configured */
  717. if (fmt == NULL)
  718. return -EINVAL;
  719. mxr_dbg(mdev, "fmt = %s\n", fmt->name);
  720. mxr_mplane_fill(planes, fmt, layer->geo.src.full_width,
  721. layer->geo.src.full_height);
  722. *nplanes = fmt->num_subframes;
  723. for (i = 0; i < fmt->num_subframes; ++i) {
  724. alloc_ctxs[i] = layer->mdev->alloc_ctx;
  725. sizes[i] = planes[i].sizeimage;
  726. mxr_dbg(mdev, "size[%d] = %08x\n", i, sizes[i]);
  727. }
  728. if (*nbuffers == 0)
  729. *nbuffers = 1;
  730. return 0;
  731. }
  732. static void buf_queue(struct vb2_buffer *vb)
  733. {
  734. struct mxr_buffer *buffer = container_of(vb, struct mxr_buffer, vb);
  735. struct mxr_layer *layer = vb2_get_drv_priv(vb->vb2_queue);
  736. struct mxr_device *mdev = layer->mdev;
  737. unsigned long flags;
  738. spin_lock_irqsave(&layer->enq_slock, flags);
  739. list_add_tail(&buffer->list, &layer->enq_list);
  740. spin_unlock_irqrestore(&layer->enq_slock, flags);
  741. mxr_dbg(mdev, "queuing buffer\n");
  742. }
  743. static void wait_lock(struct vb2_queue *vq)
  744. {
  745. struct mxr_layer *layer = vb2_get_drv_priv(vq);
  746. mxr_dbg(layer->mdev, "%s\n", __func__);
  747. mutex_lock(&layer->mutex);
  748. }
  749. static void wait_unlock(struct vb2_queue *vq)
  750. {
  751. struct mxr_layer *layer = vb2_get_drv_priv(vq);
  752. mxr_dbg(layer->mdev, "%s\n", __func__);
  753. mutex_unlock(&layer->mutex);
  754. }
  755. static int start_streaming(struct vb2_queue *vq, unsigned int count)
  756. {
  757. struct mxr_layer *layer = vb2_get_drv_priv(vq);
  758. struct mxr_device *mdev = layer->mdev;
  759. unsigned long flags;
  760. mxr_dbg(mdev, "%s\n", __func__);
  761. if (count == 0) {
  762. mxr_dbg(mdev, "no output buffers queued\n");
  763. return -EINVAL;
  764. }
  765. /* block any changes in output configuration */
  766. mxr_output_get(mdev);
  767. mxr_layer_update_output(layer);
  768. layer->ops.format_set(layer);
  769. /* enabling layer in hardware */
  770. spin_lock_irqsave(&layer->enq_slock, flags);
  771. layer->state = MXR_LAYER_STREAMING;
  772. spin_unlock_irqrestore(&layer->enq_slock, flags);
  773. layer->ops.stream_set(layer, MXR_ENABLE);
  774. mxr_streamer_get(mdev);
  775. return 0;
  776. }
  777. static void mxr_watchdog(unsigned long arg)
  778. {
  779. struct mxr_layer *layer = (struct mxr_layer *) arg;
  780. struct mxr_device *mdev = layer->mdev;
  781. unsigned long flags;
  782. mxr_err(mdev, "watchdog fired for layer %s\n", layer->vfd.name);
  783. spin_lock_irqsave(&layer->enq_slock, flags);
  784. if (layer->update_buf == layer->shadow_buf)
  785. layer->update_buf = NULL;
  786. if (layer->update_buf) {
  787. vb2_buffer_done(&layer->update_buf->vb, VB2_BUF_STATE_ERROR);
  788. layer->update_buf = NULL;
  789. }
  790. if (layer->shadow_buf) {
  791. vb2_buffer_done(&layer->shadow_buf->vb, VB2_BUF_STATE_ERROR);
  792. layer->shadow_buf = NULL;
  793. }
  794. spin_unlock_irqrestore(&layer->enq_slock, flags);
  795. }
  796. static int stop_streaming(struct vb2_queue *vq)
  797. {
  798. struct mxr_layer *layer = vb2_get_drv_priv(vq);
  799. struct mxr_device *mdev = layer->mdev;
  800. unsigned long flags;
  801. struct timer_list watchdog;
  802. struct mxr_buffer *buf, *buf_tmp;
  803. mxr_dbg(mdev, "%s\n", __func__);
  804. spin_lock_irqsave(&layer->enq_slock, flags);
  805. /* reset list */
  806. layer->state = MXR_LAYER_STREAMING_FINISH;
  807. /* set all buffer to be done */
  808. list_for_each_entry_safe(buf, buf_tmp, &layer->enq_list, list) {
  809. list_del(&buf->list);
  810. vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
  811. }
  812. spin_unlock_irqrestore(&layer->enq_slock, flags);
  813. /* give 1 seconds to complete to complete last buffers */
  814. setup_timer_on_stack(&watchdog, mxr_watchdog,
  815. (unsigned long)layer);
  816. mod_timer(&watchdog, jiffies + msecs_to_jiffies(1000));
  817. /* wait until all buffers are goes to done state */
  818. vb2_wait_for_all_buffers(vq);
  819. /* stop timer if all synchronization is done */
  820. del_timer_sync(&watchdog);
  821. destroy_timer_on_stack(&watchdog);
  822. /* stopping hardware */
  823. spin_lock_irqsave(&layer->enq_slock, flags);
  824. layer->state = MXR_LAYER_IDLE;
  825. spin_unlock_irqrestore(&layer->enq_slock, flags);
  826. /* disabling layer in hardware */
  827. layer->ops.stream_set(layer, MXR_DISABLE);
  828. /* remove one streamer */
  829. mxr_streamer_put(mdev);
  830. /* allow changes in output configuration */
  831. mxr_output_put(mdev);
  832. return 0;
  833. }
  834. static struct vb2_ops mxr_video_qops = {
  835. .queue_setup = queue_setup,
  836. .buf_queue = buf_queue,
  837. .wait_prepare = wait_unlock,
  838. .wait_finish = wait_lock,
  839. .start_streaming = start_streaming,
  840. .stop_streaming = stop_streaming,
  841. };
  842. /* FIXME: try to put this functions to mxr_base_layer_create */
  843. int mxr_base_layer_register(struct mxr_layer *layer)
  844. {
  845. struct mxr_device *mdev = layer->mdev;
  846. int ret;
  847. ret = video_register_device(&layer->vfd, VFL_TYPE_GRABBER, -1);
  848. if (ret)
  849. mxr_err(mdev, "failed to register video device\n");
  850. else
  851. mxr_info(mdev, "registered layer %s as /dev/video%d\n",
  852. layer->vfd.name, layer->vfd.num);
  853. return ret;
  854. }
  855. void mxr_base_layer_unregister(struct mxr_layer *layer)
  856. {
  857. video_unregister_device(&layer->vfd);
  858. }
  859. void mxr_layer_release(struct mxr_layer *layer)
  860. {
  861. if (layer->ops.release)
  862. layer->ops.release(layer);
  863. }
  864. void mxr_base_layer_release(struct mxr_layer *layer)
  865. {
  866. kfree(layer);
  867. }
  868. static void mxr_vfd_release(struct video_device *vdev)
  869. {
  870. printk(KERN_INFO "video device release\n");
  871. }
  872. struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
  873. int idx, char *name, struct mxr_layer_ops *ops)
  874. {
  875. struct mxr_layer *layer;
  876. layer = kzalloc(sizeof *layer, GFP_KERNEL);
  877. if (layer == NULL) {
  878. mxr_err(mdev, "not enough memory for layer.\n");
  879. goto fail;
  880. }
  881. layer->mdev = mdev;
  882. layer->idx = idx;
  883. layer->ops = *ops;
  884. spin_lock_init(&layer->enq_slock);
  885. INIT_LIST_HEAD(&layer->enq_list);
  886. mutex_init(&layer->mutex);
  887. layer->vfd = (struct video_device) {
  888. .minor = -1,
  889. .release = mxr_vfd_release,
  890. .fops = &mxr_fops,
  891. .ioctl_ops = &mxr_ioctl_ops,
  892. };
  893. strlcpy(layer->vfd.name, name, sizeof(layer->vfd.name));
  894. /* let framework control PRIORITY */
  895. set_bit(V4L2_FL_USE_FH_PRIO, &layer->vfd.flags);
  896. video_set_drvdata(&layer->vfd, layer);
  897. layer->vfd.lock = &layer->mutex;
  898. layer->vfd.v4l2_dev = &mdev->v4l2_dev;
  899. layer->vb_queue = (struct vb2_queue) {
  900. .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
  901. .io_modes = VB2_MMAP | VB2_USERPTR,
  902. .drv_priv = layer,
  903. .buf_struct_size = sizeof(struct mxr_buffer),
  904. .ops = &mxr_video_qops,
  905. .mem_ops = &vb2_dma_contig_memops,
  906. };
  907. return layer;
  908. fail:
  909. return NULL;
  910. }
  911. static const struct mxr_format *find_format_by_fourcc(
  912. struct mxr_layer *layer, unsigned long fourcc)
  913. {
  914. int i;
  915. for (i = 0; i < layer->fmt_array_size; ++i)
  916. if (layer->fmt_array[i]->fourcc == fourcc)
  917. return layer->fmt_array[i];
  918. return NULL;
  919. }
  920. static const struct mxr_format *find_format_by_index(
  921. struct mxr_layer *layer, unsigned long index)
  922. {
  923. if (index >= layer->fmt_array_size)
  924. return NULL;
  925. return layer->fmt_array[index];
  926. }