mixer_video.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154
  1. /*
  2. * Samsung TV Mixer driver
  3. *
  4. * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
  5. *
  6. * Tomasz Stanislawski, <t.stanislaws@samsung.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published
  10. * by the Free Software Foundation. either version 2 of the License,
  11. * or (at your option) any later version
  12. */
  13. #define pr_fmt(fmt) "s5p-tv (mixer): " fmt
  14. #include "mixer.h"
  15. #include <media/v4l2-ioctl.h>
  16. #include <linux/videodev2.h>
  17. #include <linux/mm.h>
  18. #include <linux/module.h>
  19. #include <linux/platform_device.h>
  20. #include <linux/timer.h>
  21. #include <media/videobuf2-dma-contig.h>
  22. static int find_reg_callback(struct device *dev, void *p)
  23. {
  24. struct v4l2_subdev **sd = p;
  25. *sd = dev_get_drvdata(dev);
  26. /* non-zero value stops iteration */
  27. return 1;
  28. }
  29. static struct v4l2_subdev *find_and_register_subdev(
  30. struct mxr_device *mdev, char *module_name)
  31. {
  32. struct device_driver *drv;
  33. struct v4l2_subdev *sd = NULL;
  34. int ret;
  35. /* TODO: add waiting until probe is finished */
  36. drv = driver_find(module_name, &platform_bus_type);
  37. if (!drv) {
  38. mxr_warn(mdev, "module %s is missing\n", module_name);
  39. return NULL;
  40. }
  41. /* driver refcnt is increased, it is safe to iterate over devices */
  42. ret = driver_for_each_device(drv, NULL, &sd, find_reg_callback);
  43. /* ret == 0 means that find_reg_callback was never executed */
  44. if (sd == NULL) {
  45. mxr_warn(mdev, "module %s provides no subdev!\n", module_name);
  46. goto done;
  47. }
  48. /* v4l2_device_register_subdev detects if sd is NULL */
  49. ret = v4l2_device_register_subdev(&mdev->v4l2_dev, sd);
  50. if (ret) {
  51. mxr_warn(mdev, "failed to register subdev %s\n", sd->name);
  52. sd = NULL;
  53. }
  54. done:
  55. return sd;
  56. }
  57. int mxr_acquire_video(struct mxr_device *mdev,
  58. struct mxr_output_conf *output_conf, int output_count)
  59. {
  60. struct device *dev = mdev->dev;
  61. struct v4l2_device *v4l2_dev = &mdev->v4l2_dev;
  62. int i;
  63. int ret = 0;
  64. struct v4l2_subdev *sd;
  65. strlcpy(v4l2_dev->name, dev_name(mdev->dev), sizeof(v4l2_dev->name));
  66. /* prepare context for V4L2 device */
  67. ret = v4l2_device_register(dev, v4l2_dev);
  68. if (ret) {
  69. mxr_err(mdev, "could not register v4l2 device.\n");
  70. goto fail;
  71. }
  72. mdev->alloc_ctx = vb2_dma_contig_init_ctx(mdev->dev);
  73. if (IS_ERR(mdev->alloc_ctx)) {
  74. mxr_err(mdev, "could not acquire vb2 allocator\n");
  75. ret = PTR_ERR(mdev->alloc_ctx);
  76. goto fail_v4l2_dev;
  77. }
  78. /* registering outputs */
  79. mdev->output_cnt = 0;
  80. for (i = 0; i < output_count; ++i) {
  81. struct mxr_output_conf *conf = &output_conf[i];
  82. struct mxr_output *out;
  83. sd = find_and_register_subdev(mdev, conf->module_name);
  84. /* trying to register next output */
  85. if (sd == NULL)
  86. continue;
  87. out = kzalloc(sizeof(*out), GFP_KERNEL);
  88. if (out == NULL) {
  89. mxr_err(mdev, "no memory for '%s'\n",
  90. conf->output_name);
  91. ret = -ENOMEM;
  92. /* registered subdevs are removed in fail_v4l2_dev */
  93. goto fail_output;
  94. }
  95. strlcpy(out->name, conf->output_name, sizeof(out->name));
  96. out->sd = sd;
  97. out->cookie = conf->cookie;
  98. mdev->output[mdev->output_cnt++] = out;
  99. mxr_info(mdev, "added output '%s' from module '%s'\n",
  100. conf->output_name, conf->module_name);
  101. /* checking if maximal number of outputs is reached */
  102. if (mdev->output_cnt >= MXR_MAX_OUTPUTS)
  103. break;
  104. }
  105. if (mdev->output_cnt == 0) {
  106. mxr_err(mdev, "failed to register any output\n");
  107. ret = -ENODEV;
  108. /* skipping fail_output because there is nothing to free */
  109. goto fail_vb2_allocator;
  110. }
  111. return 0;
  112. fail_output:
  113. /* kfree is NULL-safe */
  114. for (i = 0; i < mdev->output_cnt; ++i)
  115. kfree(mdev->output[i]);
  116. memset(mdev->output, 0, sizeof(mdev->output));
  117. fail_vb2_allocator:
  118. /* freeing allocator context */
  119. vb2_dma_contig_cleanup_ctx(mdev->alloc_ctx);
  120. fail_v4l2_dev:
  121. /* NOTE: automatically unregister all subdevs */
  122. v4l2_device_unregister(v4l2_dev);
  123. fail:
  124. return ret;
  125. }
  126. void mxr_release_video(struct mxr_device *mdev)
  127. {
  128. int i;
  129. /* kfree is NULL-safe */
  130. for (i = 0; i < mdev->output_cnt; ++i)
  131. kfree(mdev->output[i]);
  132. vb2_dma_contig_cleanup_ctx(mdev->alloc_ctx);
  133. v4l2_device_unregister(&mdev->v4l2_dev);
  134. }
  135. static int mxr_querycap(struct file *file, void *priv,
  136. struct v4l2_capability *cap)
  137. {
  138. struct mxr_layer *layer = video_drvdata(file);
  139. mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
  140. strlcpy(cap->driver, MXR_DRIVER_NAME, sizeof(cap->driver));
  141. strlcpy(cap->card, layer->vfd.name, sizeof(cap->card));
  142. sprintf(cap->bus_info, "%d", layer->idx);
  143. cap->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_OUTPUT_MPLANE;
  144. cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
  145. return 0;
  146. }
  147. static void mxr_geometry_dump(struct mxr_device *mdev, struct mxr_geometry *geo)
  148. {
  149. mxr_dbg(mdev, "src.full_size = (%u, %u)\n",
  150. geo->src.full_width, geo->src.full_height);
  151. mxr_dbg(mdev, "src.size = (%u, %u)\n",
  152. geo->src.width, geo->src.height);
  153. mxr_dbg(mdev, "src.offset = (%u, %u)\n",
  154. geo->src.x_offset, geo->src.y_offset);
  155. mxr_dbg(mdev, "dst.full_size = (%u, %u)\n",
  156. geo->dst.full_width, geo->dst.full_height);
  157. mxr_dbg(mdev, "dst.size = (%u, %u)\n",
  158. geo->dst.width, geo->dst.height);
  159. mxr_dbg(mdev, "dst.offset = (%u, %u)\n",
  160. geo->dst.x_offset, geo->dst.y_offset);
  161. mxr_dbg(mdev, "ratio = (%u, %u)\n",
  162. geo->x_ratio, geo->y_ratio);
  163. }
  164. static void mxr_layer_default_geo(struct mxr_layer *layer)
  165. {
  166. struct mxr_device *mdev = layer->mdev;
  167. struct v4l2_mbus_framefmt mbus_fmt;
  168. memset(&layer->geo, 0, sizeof(layer->geo));
  169. mxr_get_mbus_fmt(mdev, &mbus_fmt);
  170. layer->geo.dst.full_width = mbus_fmt.width;
  171. layer->geo.dst.full_height = mbus_fmt.height;
  172. layer->geo.dst.width = layer->geo.dst.full_width;
  173. layer->geo.dst.height = layer->geo.dst.full_height;
  174. layer->geo.dst.field = mbus_fmt.field;
  175. layer->geo.src.full_width = mbus_fmt.width;
  176. layer->geo.src.full_height = mbus_fmt.height;
  177. layer->geo.src.width = layer->geo.src.full_width;
  178. layer->geo.src.height = layer->geo.src.full_height;
  179. mxr_geometry_dump(mdev, &layer->geo);
  180. layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
  181. mxr_geometry_dump(mdev, &layer->geo);
  182. }
  183. static void mxr_layer_update_output(struct mxr_layer *layer)
  184. {
  185. struct mxr_device *mdev = layer->mdev;
  186. struct v4l2_mbus_framefmt mbus_fmt;
  187. mxr_get_mbus_fmt(mdev, &mbus_fmt);
  188. /* checking if update is needed */
  189. if (layer->geo.dst.full_width == mbus_fmt.width &&
  190. layer->geo.dst.full_height == mbus_fmt.width)
  191. return;
  192. layer->geo.dst.full_width = mbus_fmt.width;
  193. layer->geo.dst.full_height = mbus_fmt.height;
  194. layer->geo.dst.field = mbus_fmt.field;
  195. layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
  196. mxr_geometry_dump(mdev, &layer->geo);
  197. }
  198. static const struct mxr_format *find_format_by_fourcc(
  199. struct mxr_layer *layer, unsigned long fourcc);
  200. static const struct mxr_format *find_format_by_index(
  201. struct mxr_layer *layer, unsigned long index);
  202. static int mxr_enum_fmt(struct file *file, void *priv,
  203. struct v4l2_fmtdesc *f)
  204. {
  205. struct mxr_layer *layer = video_drvdata(file);
  206. struct mxr_device *mdev = layer->mdev;
  207. const struct mxr_format *fmt;
  208. mxr_dbg(mdev, "%s\n", __func__);
  209. fmt = find_format_by_index(layer, f->index);
  210. if (fmt == NULL)
  211. return -EINVAL;
  212. strlcpy(f->description, fmt->name, sizeof(f->description));
  213. f->pixelformat = fmt->fourcc;
  214. return 0;
  215. }
  216. static unsigned int divup(unsigned int divident, unsigned int divisor)
  217. {
  218. return (divident + divisor - 1) / divisor;
  219. }
  220. unsigned long mxr_get_plane_size(const struct mxr_block *blk,
  221. unsigned int width, unsigned int height)
  222. {
  223. unsigned int bl_width = divup(width, blk->width);
  224. unsigned int bl_height = divup(height, blk->height);
  225. return bl_width * bl_height * blk->size;
  226. }
  227. static void mxr_mplane_fill(struct v4l2_plane_pix_format *planes,
  228. const struct mxr_format *fmt, u32 width, u32 height)
  229. {
  230. int i;
  231. /* checking if nothing to fill */
  232. if (!planes)
  233. return;
  234. memset(planes, 0, sizeof(*planes) * fmt->num_subframes);
  235. for (i = 0; i < fmt->num_planes; ++i) {
  236. struct v4l2_plane_pix_format *plane = planes
  237. + fmt->plane2subframe[i];
  238. const struct mxr_block *blk = &fmt->plane[i];
  239. u32 bl_width = divup(width, blk->width);
  240. u32 bl_height = divup(height, blk->height);
  241. u32 sizeimage = bl_width * bl_height * blk->size;
  242. u16 bytesperline = bl_width * blk->size / blk->height;
  243. plane->sizeimage += sizeimage;
  244. plane->bytesperline = max(plane->bytesperline, bytesperline);
  245. }
  246. }
  247. static int mxr_g_fmt(struct file *file, void *priv,
  248. struct v4l2_format *f)
  249. {
  250. struct mxr_layer *layer = video_drvdata(file);
  251. struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
  252. mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
  253. pix->width = layer->geo.src.full_width;
  254. pix->height = layer->geo.src.full_height;
  255. pix->field = V4L2_FIELD_NONE;
  256. pix->pixelformat = layer->fmt->fourcc;
  257. pix->colorspace = layer->fmt->colorspace;
  258. mxr_mplane_fill(pix->plane_fmt, layer->fmt, pix->width, pix->height);
  259. return 0;
  260. }
  261. static int mxr_s_fmt(struct file *file, void *priv,
  262. struct v4l2_format *f)
  263. {
  264. struct mxr_layer *layer = video_drvdata(file);
  265. const struct mxr_format *fmt;
  266. struct v4l2_pix_format_mplane *pix;
  267. struct mxr_device *mdev = layer->mdev;
  268. struct mxr_geometry *geo = &layer->geo;
  269. mxr_dbg(mdev, "%s:%d\n", __func__, __LINE__);
  270. pix = &f->fmt.pix_mp;
  271. fmt = find_format_by_fourcc(layer, pix->pixelformat);
  272. if (fmt == NULL) {
  273. mxr_warn(mdev, "not recognized fourcc: %08x\n",
  274. pix->pixelformat);
  275. return -EINVAL;
  276. }
  277. layer->fmt = fmt;
  278. /* set source size to highest accepted value */
  279. geo->src.full_width = max(geo->dst.full_width, pix->width);
  280. geo->src.full_height = max(geo->dst.full_height, pix->height);
  281. layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
  282. mxr_geometry_dump(mdev, &layer->geo);
  283. /* set cropping to total visible screen */
  284. geo->src.width = pix->width;
  285. geo->src.height = pix->height;
  286. geo->src.x_offset = 0;
  287. geo->src.y_offset = 0;
  288. /* assure consistency of geometry */
  289. layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
  290. mxr_geometry_dump(mdev, &layer->geo);
  291. /* set full size to lowest possible value */
  292. geo->src.full_width = 0;
  293. geo->src.full_height = 0;
  294. layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
  295. mxr_geometry_dump(mdev, &layer->geo);
  296. /* returning results */
  297. mxr_g_fmt(file, priv, f);
  298. return 0;
  299. }
  300. static int mxr_g_selection(struct file *file, void *fh,
  301. struct v4l2_selection *s)
  302. {
  303. struct mxr_layer *layer = video_drvdata(file);
  304. struct mxr_geometry *geo = &layer->geo;
  305. mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
  306. if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
  307. s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
  308. return -EINVAL;
  309. switch (s->target) {
  310. case V4L2_SEL_TGT_CROP:
  311. s->r.left = geo->src.x_offset;
  312. s->r.top = geo->src.y_offset;
  313. s->r.width = geo->src.width;
  314. s->r.height = geo->src.height;
  315. break;
  316. case V4L2_SEL_TGT_CROP_DEFAULT:
  317. case V4L2_SEL_TGT_CROP_BOUNDS:
  318. s->r.left = 0;
  319. s->r.top = 0;
  320. s->r.width = geo->src.full_width;
  321. s->r.height = geo->src.full_height;
  322. break;
  323. case V4L2_SEL_TGT_COMPOSE:
  324. case V4L2_SEL_TGT_COMPOSE_PADDED:
  325. s->r.left = geo->dst.x_offset;
  326. s->r.top = geo->dst.y_offset;
  327. s->r.width = geo->dst.width;
  328. s->r.height = geo->dst.height;
  329. break;
  330. case V4L2_SEL_TGT_COMPOSE_DEFAULT:
  331. case V4L2_SEL_TGT_COMPOSE_BOUNDS:
  332. s->r.left = 0;
  333. s->r.top = 0;
  334. s->r.width = geo->dst.full_width;
  335. s->r.height = geo->dst.full_height;
  336. break;
  337. default:
  338. return -EINVAL;
  339. }
  340. return 0;
  341. }
  342. /* returns 1 if rectangle 'a' is inside 'b' */
  343. static int mxr_is_rect_inside(struct v4l2_rect *a, struct v4l2_rect *b)
  344. {
  345. if (a->left < b->left)
  346. return 0;
  347. if (a->top < b->top)
  348. return 0;
  349. if (a->left + a->width > b->left + b->width)
  350. return 0;
  351. if (a->top + a->height > b->top + b->height)
  352. return 0;
  353. return 1;
  354. }
  355. static int mxr_s_selection(struct file *file, void *fh,
  356. struct v4l2_selection *s)
  357. {
  358. struct mxr_layer *layer = video_drvdata(file);
  359. struct mxr_geometry *geo = &layer->geo;
  360. struct mxr_crop *target = NULL;
  361. enum mxr_geometry_stage stage;
  362. struct mxr_geometry tmp;
  363. struct v4l2_rect res;
  364. memset(&res, 0, sizeof(res));
  365. mxr_dbg(layer->mdev, "%s: rect: %dx%d@%d,%d\n", __func__,
  366. s->r.width, s->r.height, s->r.left, s->r.top);
  367. if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
  368. s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
  369. return -EINVAL;
  370. switch (s->target) {
  371. /* ignore read-only targets */
  372. case V4L2_SEL_TGT_CROP_DEFAULT:
  373. case V4L2_SEL_TGT_CROP_BOUNDS:
  374. res.width = geo->src.full_width;
  375. res.height = geo->src.full_height;
  376. break;
  377. /* ignore read-only targets */
  378. case V4L2_SEL_TGT_COMPOSE_DEFAULT:
  379. case V4L2_SEL_TGT_COMPOSE_BOUNDS:
  380. res.width = geo->dst.full_width;
  381. res.height = geo->dst.full_height;
  382. break;
  383. case V4L2_SEL_TGT_CROP:
  384. target = &geo->src;
  385. stage = MXR_GEOMETRY_CROP;
  386. break;
  387. case V4L2_SEL_TGT_COMPOSE:
  388. case V4L2_SEL_TGT_COMPOSE_PADDED:
  389. target = &geo->dst;
  390. stage = MXR_GEOMETRY_COMPOSE;
  391. break;
  392. default:
  393. return -EINVAL;
  394. }
  395. /* apply change and update geometry if needed */
  396. if (target) {
  397. /* backup current geometry if setup fails */
  398. memcpy(&tmp, geo, sizeof(tmp));
  399. /* apply requested selection */
  400. target->x_offset = s->r.left;
  401. target->y_offset = s->r.top;
  402. target->width = s->r.width;
  403. target->height = s->r.height;
  404. layer->ops.fix_geometry(layer, stage, s->flags);
  405. /* retrieve update selection rectangle */
  406. res.left = target->x_offset;
  407. res.top = target->y_offset;
  408. res.width = target->width;
  409. res.height = target->height;
  410. mxr_geometry_dump(layer->mdev, &layer->geo);
  411. }
  412. /* checking if the rectangle satisfies constraints */
  413. if ((s->flags & V4L2_SEL_FLAG_LE) && !mxr_is_rect_inside(&res, &s->r))
  414. goto fail;
  415. if ((s->flags & V4L2_SEL_FLAG_GE) && !mxr_is_rect_inside(&s->r, &res))
  416. goto fail;
  417. /* return result rectangle */
  418. s->r = res;
  419. return 0;
  420. fail:
  421. /* restore old geometry, which is not touched if target is NULL */
  422. if (target)
  423. memcpy(geo, &tmp, sizeof(tmp));
  424. return -ERANGE;
  425. }
  426. static int mxr_enum_dv_timings(struct file *file, void *fh,
  427. struct v4l2_enum_dv_timings *timings)
  428. {
  429. struct mxr_layer *layer = video_drvdata(file);
  430. struct mxr_device *mdev = layer->mdev;
  431. int ret;
  432. /* lock protects from changing sd_out */
  433. mutex_lock(&mdev->mutex);
  434. ret = v4l2_subdev_call(to_outsd(mdev), video, enum_dv_timings, timings);
  435. mutex_unlock(&mdev->mutex);
  436. return ret ? -EINVAL : 0;
  437. }
  438. static int mxr_s_dv_timings(struct file *file, void *fh,
  439. struct v4l2_dv_timings *timings)
  440. {
  441. struct mxr_layer *layer = video_drvdata(file);
  442. struct mxr_device *mdev = layer->mdev;
  443. int ret;
  444. /* lock protects from changing sd_out */
  445. mutex_lock(&mdev->mutex);
  446. /* timings change cannot be done while there is an entity
  447. * dependant on output configuration
  448. */
  449. if (mdev->n_output > 0) {
  450. mutex_unlock(&mdev->mutex);
  451. return -EBUSY;
  452. }
  453. ret = v4l2_subdev_call(to_outsd(mdev), video, s_dv_timings, timings);
  454. mutex_unlock(&mdev->mutex);
  455. mxr_layer_update_output(layer);
  456. /* any failure should return EINVAL according to V4L2 doc */
  457. return ret ? -EINVAL : 0;
  458. }
  459. static int mxr_g_dv_timings(struct file *file, void *fh,
  460. struct v4l2_dv_timings *timings)
  461. {
  462. struct mxr_layer *layer = video_drvdata(file);
  463. struct mxr_device *mdev = layer->mdev;
  464. int ret;
  465. /* lock protects from changing sd_out */
  466. mutex_lock(&mdev->mutex);
  467. ret = v4l2_subdev_call(to_outsd(mdev), video, g_dv_timings, timings);
  468. mutex_unlock(&mdev->mutex);
  469. return ret ? -EINVAL : 0;
  470. }
  471. static int mxr_dv_timings_cap(struct file *file, void *fh,
  472. struct v4l2_dv_timings_cap *cap)
  473. {
  474. struct mxr_layer *layer = video_drvdata(file);
  475. struct mxr_device *mdev = layer->mdev;
  476. int ret;
  477. /* lock protects from changing sd_out */
  478. mutex_lock(&mdev->mutex);
  479. ret = v4l2_subdev_call(to_outsd(mdev), video, dv_timings_cap, cap);
  480. mutex_unlock(&mdev->mutex);
  481. return ret ? -EINVAL : 0;
  482. }
  483. static int mxr_s_std(struct file *file, void *fh, v4l2_std_id norm)
  484. {
  485. struct mxr_layer *layer = video_drvdata(file);
  486. struct mxr_device *mdev = layer->mdev;
  487. int ret;
  488. /* lock protects from changing sd_out */
  489. mutex_lock(&mdev->mutex);
  490. /* standard change cannot be done while there is an entity
  491. * dependant on output configuration
  492. */
  493. if (mdev->n_output > 0) {
  494. mutex_unlock(&mdev->mutex);
  495. return -EBUSY;
  496. }
  497. ret = v4l2_subdev_call(to_outsd(mdev), video, s_std_output, norm);
  498. mutex_unlock(&mdev->mutex);
  499. mxr_layer_update_output(layer);
  500. return ret ? -EINVAL : 0;
  501. }
  502. static int mxr_g_std(struct file *file, void *fh, v4l2_std_id *norm)
  503. {
  504. struct mxr_layer *layer = video_drvdata(file);
  505. struct mxr_device *mdev = layer->mdev;
  506. int ret;
  507. /* lock protects from changing sd_out */
  508. mutex_lock(&mdev->mutex);
  509. ret = v4l2_subdev_call(to_outsd(mdev), video, g_std_output, norm);
  510. mutex_unlock(&mdev->mutex);
  511. return ret ? -EINVAL : 0;
  512. }
  513. static int mxr_enum_output(struct file *file, void *fh, struct v4l2_output *a)
  514. {
  515. struct mxr_layer *layer = video_drvdata(file);
  516. struct mxr_device *mdev = layer->mdev;
  517. struct mxr_output *out;
  518. struct v4l2_subdev *sd;
  519. if (a->index >= mdev->output_cnt)
  520. return -EINVAL;
  521. out = mdev->output[a->index];
  522. BUG_ON(out == NULL);
  523. sd = out->sd;
  524. strlcpy(a->name, out->name, sizeof(a->name));
  525. /* try to obtain supported tv norms */
  526. v4l2_subdev_call(sd, video, g_tvnorms_output, &a->std);
  527. a->capabilities = 0;
  528. if (sd->ops->video && sd->ops->video->s_dv_timings)
  529. a->capabilities |= V4L2_OUT_CAP_DV_TIMINGS;
  530. if (sd->ops->video && sd->ops->video->s_std_output)
  531. a->capabilities |= V4L2_OUT_CAP_STD;
  532. a->type = V4L2_OUTPUT_TYPE_ANALOG;
  533. return 0;
  534. }
  535. static int mxr_s_output(struct file *file, void *fh, unsigned int i)
  536. {
  537. struct video_device *vfd = video_devdata(file);
  538. struct mxr_layer *layer = video_drvdata(file);
  539. struct mxr_device *mdev = layer->mdev;
  540. if (i >= mdev->output_cnt || mdev->output[i] == NULL)
  541. return -EINVAL;
  542. mutex_lock(&mdev->mutex);
  543. if (mdev->n_output > 0) {
  544. mutex_unlock(&mdev->mutex);
  545. return -EBUSY;
  546. }
  547. mdev->current_output = i;
  548. vfd->tvnorms = 0;
  549. v4l2_subdev_call(to_outsd(mdev), video, g_tvnorms_output,
  550. &vfd->tvnorms);
  551. mutex_unlock(&mdev->mutex);
  552. /* update layers geometry */
  553. mxr_layer_update_output(layer);
  554. mxr_dbg(mdev, "tvnorms = %08llx\n", vfd->tvnorms);
  555. return 0;
  556. }
  557. static int mxr_g_output(struct file *file, void *fh, unsigned int *p)
  558. {
  559. struct mxr_layer *layer = video_drvdata(file);
  560. struct mxr_device *mdev = layer->mdev;
  561. mutex_lock(&mdev->mutex);
  562. *p = mdev->current_output;
  563. mutex_unlock(&mdev->mutex);
  564. return 0;
  565. }
  566. static int mxr_reqbufs(struct file *file, void *priv,
  567. struct v4l2_requestbuffers *p)
  568. {
  569. struct mxr_layer *layer = video_drvdata(file);
  570. mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
  571. return vb2_reqbufs(&layer->vb_queue, p);
  572. }
  573. static int mxr_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
  574. {
  575. struct mxr_layer *layer = video_drvdata(file);
  576. mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
  577. return vb2_querybuf(&layer->vb_queue, p);
  578. }
  579. static int mxr_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
  580. {
  581. struct mxr_layer *layer = video_drvdata(file);
  582. mxr_dbg(layer->mdev, "%s:%d(%d)\n", __func__, __LINE__, p->index);
  583. return vb2_qbuf(&layer->vb_queue, p);
  584. }
  585. static int mxr_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
  586. {
  587. struct mxr_layer *layer = video_drvdata(file);
  588. mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
  589. return vb2_dqbuf(&layer->vb_queue, p, file->f_flags & O_NONBLOCK);
  590. }
  591. static int mxr_expbuf(struct file *file, void *priv,
  592. struct v4l2_exportbuffer *eb)
  593. {
  594. struct mxr_layer *layer = video_drvdata(file);
  595. mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
  596. return vb2_expbuf(&layer->vb_queue, eb);
  597. }
  598. static int mxr_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
  599. {
  600. struct mxr_layer *layer = video_drvdata(file);
  601. mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
  602. return vb2_streamon(&layer->vb_queue, i);
  603. }
  604. static int mxr_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
  605. {
  606. struct mxr_layer *layer = video_drvdata(file);
  607. mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
  608. return vb2_streamoff(&layer->vb_queue, i);
  609. }
  610. static const struct v4l2_ioctl_ops mxr_ioctl_ops = {
  611. .vidioc_querycap = mxr_querycap,
  612. /* format handling */
  613. .vidioc_enum_fmt_vid_out_mplane = mxr_enum_fmt,
  614. .vidioc_s_fmt_vid_out_mplane = mxr_s_fmt,
  615. .vidioc_g_fmt_vid_out_mplane = mxr_g_fmt,
  616. /* buffer control */
  617. .vidioc_reqbufs = mxr_reqbufs,
  618. .vidioc_querybuf = mxr_querybuf,
  619. .vidioc_qbuf = mxr_qbuf,
  620. .vidioc_dqbuf = mxr_dqbuf,
  621. .vidioc_expbuf = mxr_expbuf,
  622. /* Streaming control */
  623. .vidioc_streamon = mxr_streamon,
  624. .vidioc_streamoff = mxr_streamoff,
  625. /* DV Timings functions */
  626. .vidioc_enum_dv_timings = mxr_enum_dv_timings,
  627. .vidioc_s_dv_timings = mxr_s_dv_timings,
  628. .vidioc_g_dv_timings = mxr_g_dv_timings,
  629. .vidioc_dv_timings_cap = mxr_dv_timings_cap,
  630. /* analog TV standard functions */
  631. .vidioc_s_std = mxr_s_std,
  632. .vidioc_g_std = mxr_g_std,
  633. /* Output handling */
  634. .vidioc_enum_output = mxr_enum_output,
  635. .vidioc_s_output = mxr_s_output,
  636. .vidioc_g_output = mxr_g_output,
  637. /* selection ioctls */
  638. .vidioc_g_selection = mxr_g_selection,
  639. .vidioc_s_selection = mxr_s_selection,
  640. };
  641. static int mxr_video_open(struct file *file)
  642. {
  643. struct mxr_layer *layer = video_drvdata(file);
  644. struct mxr_device *mdev = layer->mdev;
  645. int ret = 0;
  646. mxr_dbg(mdev, "%s:%d\n", __func__, __LINE__);
  647. if (mutex_lock_interruptible(&layer->mutex))
  648. return -ERESTARTSYS;
  649. /* assure device probe is finished */
  650. wait_for_device_probe();
  651. /* creating context for file descriptor */
  652. ret = v4l2_fh_open(file);
  653. if (ret) {
  654. mxr_err(mdev, "v4l2_fh_open failed\n");
  655. goto unlock;
  656. }
  657. /* leaving if layer is already initialized */
  658. if (!v4l2_fh_is_singular_file(file))
  659. goto unlock;
  660. /* FIXME: should power be enabled on open? */
  661. ret = mxr_power_get(mdev);
  662. if (ret) {
  663. mxr_err(mdev, "power on failed\n");
  664. goto fail_fh_open;
  665. }
  666. ret = vb2_queue_init(&layer->vb_queue);
  667. if (ret != 0) {
  668. mxr_err(mdev, "failed to initialize vb2 queue\n");
  669. goto fail_power;
  670. }
  671. /* set default format, first on the list */
  672. layer->fmt = layer->fmt_array[0];
  673. /* setup default geometry */
  674. mxr_layer_default_geo(layer);
  675. mutex_unlock(&layer->mutex);
  676. return 0;
  677. fail_power:
  678. mxr_power_put(mdev);
  679. fail_fh_open:
  680. v4l2_fh_release(file);
  681. unlock:
  682. mutex_unlock(&layer->mutex);
  683. return ret;
  684. }
  685. static unsigned int
  686. mxr_video_poll(struct file *file, struct poll_table_struct *wait)
  687. {
  688. struct mxr_layer *layer = video_drvdata(file);
  689. unsigned int res;
  690. mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
  691. mutex_lock(&layer->mutex);
  692. res = vb2_poll(&layer->vb_queue, file, wait);
  693. mutex_unlock(&layer->mutex);
  694. return res;
  695. }
  696. static int mxr_video_mmap(struct file *file, struct vm_area_struct *vma)
  697. {
  698. struct mxr_layer *layer = video_drvdata(file);
  699. int ret;
  700. mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
  701. if (mutex_lock_interruptible(&layer->mutex))
  702. return -ERESTARTSYS;
  703. ret = vb2_mmap(&layer->vb_queue, vma);
  704. mutex_unlock(&layer->mutex);
  705. return ret;
  706. }
  707. static int mxr_video_release(struct file *file)
  708. {
  709. struct mxr_layer *layer = video_drvdata(file);
  710. mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
  711. mutex_lock(&layer->mutex);
  712. if (v4l2_fh_is_singular_file(file)) {
  713. vb2_queue_release(&layer->vb_queue);
  714. mxr_power_put(layer->mdev);
  715. }
  716. v4l2_fh_release(file);
  717. mutex_unlock(&layer->mutex);
  718. return 0;
  719. }
  720. static const struct v4l2_file_operations mxr_fops = {
  721. .owner = THIS_MODULE,
  722. .open = mxr_video_open,
  723. .poll = mxr_video_poll,
  724. .mmap = mxr_video_mmap,
  725. .release = mxr_video_release,
  726. .unlocked_ioctl = video_ioctl2,
  727. };
  728. static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *pfmt,
  729. unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[],
  730. void *alloc_ctxs[])
  731. {
  732. struct mxr_layer *layer = vb2_get_drv_priv(vq);
  733. const struct mxr_format *fmt = layer->fmt;
  734. int i;
  735. struct mxr_device *mdev = layer->mdev;
  736. struct v4l2_plane_pix_format planes[3];
  737. mxr_dbg(mdev, "%s\n", __func__);
  738. /* checking if format was configured */
  739. if (fmt == NULL)
  740. return -EINVAL;
  741. mxr_dbg(mdev, "fmt = %s\n", fmt->name);
  742. mxr_mplane_fill(planes, fmt, layer->geo.src.full_width,
  743. layer->geo.src.full_height);
  744. *nplanes = fmt->num_subframes;
  745. for (i = 0; i < fmt->num_subframes; ++i) {
  746. alloc_ctxs[i] = layer->mdev->alloc_ctx;
  747. sizes[i] = planes[i].sizeimage;
  748. mxr_dbg(mdev, "size[%d] = %08x\n", i, sizes[i]);
  749. }
  750. if (*nbuffers == 0)
  751. *nbuffers = 1;
  752. return 0;
  753. }
  754. static void buf_queue(struct vb2_buffer *vb)
  755. {
  756. struct mxr_buffer *buffer = container_of(vb, struct mxr_buffer, vb);
  757. struct mxr_layer *layer = vb2_get_drv_priv(vb->vb2_queue);
  758. struct mxr_device *mdev = layer->mdev;
  759. unsigned long flags;
  760. spin_lock_irqsave(&layer->enq_slock, flags);
  761. list_add_tail(&buffer->list, &layer->enq_list);
  762. spin_unlock_irqrestore(&layer->enq_slock, flags);
  763. mxr_dbg(mdev, "queuing buffer\n");
  764. }
  765. static void wait_lock(struct vb2_queue *vq)
  766. {
  767. struct mxr_layer *layer = vb2_get_drv_priv(vq);
  768. mxr_dbg(layer->mdev, "%s\n", __func__);
  769. mutex_lock(&layer->mutex);
  770. }
  771. static void wait_unlock(struct vb2_queue *vq)
  772. {
  773. struct mxr_layer *layer = vb2_get_drv_priv(vq);
  774. mxr_dbg(layer->mdev, "%s\n", __func__);
  775. mutex_unlock(&layer->mutex);
  776. }
  777. static int start_streaming(struct vb2_queue *vq, unsigned int count)
  778. {
  779. struct mxr_layer *layer = vb2_get_drv_priv(vq);
  780. struct mxr_device *mdev = layer->mdev;
  781. unsigned long flags;
  782. mxr_dbg(mdev, "%s\n", __func__);
  783. if (count == 0) {
  784. mxr_dbg(mdev, "no output buffers queued\n");
  785. return -EINVAL;
  786. }
  787. /* block any changes in output configuration */
  788. mxr_output_get(mdev);
  789. mxr_layer_update_output(layer);
  790. layer->ops.format_set(layer);
  791. /* enabling layer in hardware */
  792. spin_lock_irqsave(&layer->enq_slock, flags);
  793. layer->state = MXR_LAYER_STREAMING;
  794. spin_unlock_irqrestore(&layer->enq_slock, flags);
  795. layer->ops.stream_set(layer, MXR_ENABLE);
  796. mxr_streamer_get(mdev);
  797. return 0;
  798. }
  799. static void mxr_watchdog(unsigned long arg)
  800. {
  801. struct mxr_layer *layer = (struct mxr_layer *) arg;
  802. struct mxr_device *mdev = layer->mdev;
  803. unsigned long flags;
  804. mxr_err(mdev, "watchdog fired for layer %s\n", layer->vfd.name);
  805. spin_lock_irqsave(&layer->enq_slock, flags);
  806. if (layer->update_buf == layer->shadow_buf)
  807. layer->update_buf = NULL;
  808. if (layer->update_buf) {
  809. vb2_buffer_done(&layer->update_buf->vb, VB2_BUF_STATE_ERROR);
  810. layer->update_buf = NULL;
  811. }
  812. if (layer->shadow_buf) {
  813. vb2_buffer_done(&layer->shadow_buf->vb, VB2_BUF_STATE_ERROR);
  814. layer->shadow_buf = NULL;
  815. }
  816. spin_unlock_irqrestore(&layer->enq_slock, flags);
  817. }
  818. static int stop_streaming(struct vb2_queue *vq)
  819. {
  820. struct mxr_layer *layer = vb2_get_drv_priv(vq);
  821. struct mxr_device *mdev = layer->mdev;
  822. unsigned long flags;
  823. struct timer_list watchdog;
  824. struct mxr_buffer *buf, *buf_tmp;
  825. mxr_dbg(mdev, "%s\n", __func__);
  826. spin_lock_irqsave(&layer->enq_slock, flags);
  827. /* reset list */
  828. layer->state = MXR_LAYER_STREAMING_FINISH;
  829. /* set all buffer to be done */
  830. list_for_each_entry_safe(buf, buf_tmp, &layer->enq_list, list) {
  831. list_del(&buf->list);
  832. vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
  833. }
  834. spin_unlock_irqrestore(&layer->enq_slock, flags);
  835. /* give 1 seconds to complete to complete last buffers */
  836. setup_timer_on_stack(&watchdog, mxr_watchdog,
  837. (unsigned long)layer);
  838. mod_timer(&watchdog, jiffies + msecs_to_jiffies(1000));
  839. /* wait until all buffers are goes to done state */
  840. vb2_wait_for_all_buffers(vq);
  841. /* stop timer if all synchronization is done */
  842. del_timer_sync(&watchdog);
  843. destroy_timer_on_stack(&watchdog);
  844. /* stopping hardware */
  845. spin_lock_irqsave(&layer->enq_slock, flags);
  846. layer->state = MXR_LAYER_IDLE;
  847. spin_unlock_irqrestore(&layer->enq_slock, flags);
  848. /* disabling layer in hardware */
  849. layer->ops.stream_set(layer, MXR_DISABLE);
  850. /* remove one streamer */
  851. mxr_streamer_put(mdev);
  852. /* allow changes in output configuration */
  853. mxr_output_put(mdev);
  854. return 0;
  855. }
  856. static struct vb2_ops mxr_video_qops = {
  857. .queue_setup = queue_setup,
  858. .buf_queue = buf_queue,
  859. .wait_prepare = wait_unlock,
  860. .wait_finish = wait_lock,
  861. .start_streaming = start_streaming,
  862. .stop_streaming = stop_streaming,
  863. };
  864. /* FIXME: try to put this functions to mxr_base_layer_create */
  865. int mxr_base_layer_register(struct mxr_layer *layer)
  866. {
  867. struct mxr_device *mdev = layer->mdev;
  868. int ret;
  869. ret = video_register_device(&layer->vfd, VFL_TYPE_GRABBER, -1);
  870. if (ret)
  871. mxr_err(mdev, "failed to register video device\n");
  872. else
  873. mxr_info(mdev, "registered layer %s as /dev/video%d\n",
  874. layer->vfd.name, layer->vfd.num);
  875. return ret;
  876. }
  877. void mxr_base_layer_unregister(struct mxr_layer *layer)
  878. {
  879. video_unregister_device(&layer->vfd);
  880. }
  881. void mxr_layer_release(struct mxr_layer *layer)
  882. {
  883. if (layer->ops.release)
  884. layer->ops.release(layer);
  885. }
  886. void mxr_base_layer_release(struct mxr_layer *layer)
  887. {
  888. kfree(layer);
  889. }
  890. static void mxr_vfd_release(struct video_device *vdev)
  891. {
  892. pr_info("video device release\n");
  893. }
  894. struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
  895. int idx, char *name, struct mxr_layer_ops *ops)
  896. {
  897. struct mxr_layer *layer;
  898. layer = kzalloc(sizeof(*layer), GFP_KERNEL);
  899. if (layer == NULL) {
  900. mxr_err(mdev, "not enough memory for layer.\n");
  901. goto fail;
  902. }
  903. layer->mdev = mdev;
  904. layer->idx = idx;
  905. layer->ops = *ops;
  906. spin_lock_init(&layer->enq_slock);
  907. INIT_LIST_HEAD(&layer->enq_list);
  908. mutex_init(&layer->mutex);
  909. layer->vfd = (struct video_device) {
  910. .minor = -1,
  911. .release = mxr_vfd_release,
  912. .fops = &mxr_fops,
  913. .vfl_dir = VFL_DIR_TX,
  914. .ioctl_ops = &mxr_ioctl_ops,
  915. };
  916. strlcpy(layer->vfd.name, name, sizeof(layer->vfd.name));
  917. /* let framework control PRIORITY */
  918. set_bit(V4L2_FL_USE_FH_PRIO, &layer->vfd.flags);
  919. video_set_drvdata(&layer->vfd, layer);
  920. layer->vfd.lock = &layer->mutex;
  921. layer->vfd.v4l2_dev = &mdev->v4l2_dev;
  922. layer->vb_queue = (struct vb2_queue) {
  923. .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
  924. .io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF,
  925. .drv_priv = layer,
  926. .buf_struct_size = sizeof(struct mxr_buffer),
  927. .ops = &mxr_video_qops,
  928. .mem_ops = &vb2_dma_contig_memops,
  929. };
  930. return layer;
  931. fail:
  932. return NULL;
  933. }
  934. static const struct mxr_format *find_format_by_fourcc(
  935. struct mxr_layer *layer, unsigned long fourcc)
  936. {
  937. int i;
  938. for (i = 0; i < layer->fmt_array_size; ++i)
  939. if (layer->fmt_array[i]->fourcc == fourcc)
  940. return layer->fmt_array[i];
  941. return NULL;
  942. }
  943. static const struct mxr_format *find_format_by_index(
  944. struct mxr_layer *layer, unsigned long index)
  945. {
  946. if (index >= layer->fmt_array_size)
  947. return NULL;
  948. return layer->fmt_array[index];
  949. }