mixer_video.c 26 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007
  1. /*
  2. * Samsung TV Mixer driver
  3. *
  4. * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
  5. *
  6. * Tomasz Stanislawski, <t.stanislaws@samsung.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published
  10. * by the Free Software Foundation. either version 2 of the License,
  11. * or (at your option) any later version
  12. */
  13. #include "mixer.h"
  14. #include <media/v4l2-ioctl.h>
  15. #include <linux/videodev2.h>
  16. #include <linux/mm.h>
  17. #include <linux/module.h>
  18. #include <linux/version.h>
  19. #include <linux/timer.h>
  20. #include <media/videobuf2-dma-contig.h>
  21. static int find_reg_callback(struct device *dev, void *p)
  22. {
  23. struct v4l2_subdev **sd = p;
  24. *sd = dev_get_drvdata(dev);
  25. /* non-zero value stops iteration */
  26. return 1;
  27. }
  28. static struct v4l2_subdev *find_and_register_subdev(
  29. struct mxr_device *mdev, char *module_name)
  30. {
  31. struct device_driver *drv;
  32. struct v4l2_subdev *sd = NULL;
  33. int ret;
  34. /* TODO: add waiting until probe is finished */
  35. drv = driver_find(module_name, &platform_bus_type);
  36. if (!drv) {
  37. mxr_warn(mdev, "module %s is missing\n", module_name);
  38. return NULL;
  39. }
  40. /* driver refcnt is increased, it is safe to iterate over devices */
  41. ret = driver_for_each_device(drv, NULL, &sd, find_reg_callback);
  42. /* ret == 0 means that find_reg_callback was never executed */
  43. if (sd == NULL) {
  44. mxr_warn(mdev, "module %s provides no subdev!\n", module_name);
  45. goto done;
  46. }
  47. /* v4l2_device_register_subdev detects if sd is NULL */
  48. ret = v4l2_device_register_subdev(&mdev->v4l2_dev, sd);
  49. if (ret) {
  50. mxr_warn(mdev, "failed to register subdev %s\n", sd->name);
  51. sd = NULL;
  52. }
  53. done:
  54. put_driver(drv);
  55. return sd;
  56. }
  57. int __devinit mxr_acquire_video(struct mxr_device *mdev,
  58. struct mxr_output_conf *output_conf, int output_count)
  59. {
  60. struct device *dev = mdev->dev;
  61. struct v4l2_device *v4l2_dev = &mdev->v4l2_dev;
  62. int i;
  63. int ret = 0;
  64. struct v4l2_subdev *sd;
  65. strlcpy(v4l2_dev->name, dev_name(mdev->dev), sizeof(v4l2_dev->name));
  66. /* prepare context for V4L2 device */
  67. ret = v4l2_device_register(dev, v4l2_dev);
  68. if (ret) {
  69. mxr_err(mdev, "could not register v4l2 device.\n");
  70. goto fail;
  71. }
  72. mdev->alloc_ctx = vb2_dma_contig_init_ctx(mdev->dev);
  73. if (IS_ERR_OR_NULL(mdev->alloc_ctx)) {
  74. mxr_err(mdev, "could not acquire vb2 allocator\n");
  75. goto fail_v4l2_dev;
  76. }
  77. /* registering outputs */
  78. mdev->output_cnt = 0;
  79. for (i = 0; i < output_count; ++i) {
  80. struct mxr_output_conf *conf = &output_conf[i];
  81. struct mxr_output *out;
  82. sd = find_and_register_subdev(mdev, conf->module_name);
  83. /* trying to register next output */
  84. if (sd == NULL)
  85. continue;
  86. out = kzalloc(sizeof *out, GFP_KERNEL);
  87. if (out == NULL) {
  88. mxr_err(mdev, "no memory for '%s'\n",
  89. conf->output_name);
  90. ret = -ENOMEM;
  91. /* registered subdevs are removed in fail_v4l2_dev */
  92. goto fail_output;
  93. }
  94. strlcpy(out->name, conf->output_name, sizeof(out->name));
  95. out->sd = sd;
  96. out->cookie = conf->cookie;
  97. mdev->output[mdev->output_cnt++] = out;
  98. mxr_info(mdev, "added output '%s' from module '%s'\n",
  99. conf->output_name, conf->module_name);
  100. /* checking if maximal number of outputs is reached */
  101. if (mdev->output_cnt >= MXR_MAX_OUTPUTS)
  102. break;
  103. }
  104. if (mdev->output_cnt == 0) {
  105. mxr_err(mdev, "failed to register any output\n");
  106. ret = -ENODEV;
  107. /* skipping fail_output because there is nothing to free */
  108. goto fail_vb2_allocator;
  109. }
  110. return 0;
  111. fail_output:
  112. /* kfree is NULL-safe */
  113. for (i = 0; i < mdev->output_cnt; ++i)
  114. kfree(mdev->output[i]);
  115. memset(mdev->output, 0, sizeof mdev->output);
  116. fail_vb2_allocator:
  117. /* freeing allocator context */
  118. vb2_dma_contig_cleanup_ctx(mdev->alloc_ctx);
  119. fail_v4l2_dev:
  120. /* NOTE: automatically unregister all subdevs */
  121. v4l2_device_unregister(v4l2_dev);
  122. fail:
  123. return ret;
  124. }
  125. void __devexit mxr_release_video(struct mxr_device *mdev)
  126. {
  127. int i;
  128. /* kfree is NULL-safe */
  129. for (i = 0; i < mdev->output_cnt; ++i)
  130. kfree(mdev->output[i]);
  131. vb2_dma_contig_cleanup_ctx(mdev->alloc_ctx);
  132. v4l2_device_unregister(&mdev->v4l2_dev);
  133. }
  134. static int mxr_querycap(struct file *file, void *priv,
  135. struct v4l2_capability *cap)
  136. {
  137. struct mxr_layer *layer = video_drvdata(file);
  138. mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
  139. strlcpy(cap->driver, MXR_DRIVER_NAME, sizeof cap->driver);
  140. strlcpy(cap->card, layer->vfd.name, sizeof cap->card);
  141. sprintf(cap->bus_info, "%d", layer->idx);
  142. cap->version = KERNEL_VERSION(0, 1, 0);
  143. cap->capabilities = V4L2_CAP_STREAMING |
  144. V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_VIDEO_OUTPUT_MPLANE;
  145. return 0;
  146. }
  147. /* Geometry handling */
  148. static void mxr_layer_geo_fix(struct mxr_layer *layer)
  149. {
  150. struct mxr_device *mdev = layer->mdev;
  151. struct v4l2_mbus_framefmt mbus_fmt;
  152. /* TODO: add some dirty flag to avoid unnecessary adjustments */
  153. mxr_get_mbus_fmt(mdev, &mbus_fmt);
  154. layer->geo.dst.full_width = mbus_fmt.width;
  155. layer->geo.dst.full_height = mbus_fmt.height;
  156. layer->geo.dst.field = mbus_fmt.field;
  157. layer->ops.fix_geometry(layer);
  158. }
  159. static void mxr_layer_default_geo(struct mxr_layer *layer)
  160. {
  161. struct mxr_device *mdev = layer->mdev;
  162. struct v4l2_mbus_framefmt mbus_fmt;
  163. memset(&layer->geo, 0, sizeof layer->geo);
  164. mxr_get_mbus_fmt(mdev, &mbus_fmt);
  165. layer->geo.dst.full_width = mbus_fmt.width;
  166. layer->geo.dst.full_height = mbus_fmt.height;
  167. layer->geo.dst.width = layer->geo.dst.full_width;
  168. layer->geo.dst.height = layer->geo.dst.full_height;
  169. layer->geo.dst.field = mbus_fmt.field;
  170. layer->geo.src.full_width = mbus_fmt.width;
  171. layer->geo.src.full_height = mbus_fmt.height;
  172. layer->geo.src.width = layer->geo.src.full_width;
  173. layer->geo.src.height = layer->geo.src.full_height;
  174. layer->ops.fix_geometry(layer);
  175. }
  176. static void mxr_geometry_dump(struct mxr_device *mdev, struct mxr_geometry *geo)
  177. {
  178. mxr_dbg(mdev, "src.full_size = (%u, %u)\n",
  179. geo->src.full_width, geo->src.full_height);
  180. mxr_dbg(mdev, "src.size = (%u, %u)\n",
  181. geo->src.width, geo->src.height);
  182. mxr_dbg(mdev, "src.offset = (%u, %u)\n",
  183. geo->src.x_offset, geo->src.y_offset);
  184. mxr_dbg(mdev, "dst.full_size = (%u, %u)\n",
  185. geo->dst.full_width, geo->dst.full_height);
  186. mxr_dbg(mdev, "dst.size = (%u, %u)\n",
  187. geo->dst.width, geo->dst.height);
  188. mxr_dbg(mdev, "dst.offset = (%u, %u)\n",
  189. geo->dst.x_offset, geo->dst.y_offset);
  190. mxr_dbg(mdev, "ratio = (%u, %u)\n",
  191. geo->x_ratio, geo->y_ratio);
  192. }
  193. static const struct mxr_format *find_format_by_fourcc(
  194. struct mxr_layer *layer, unsigned long fourcc);
  195. static const struct mxr_format *find_format_by_index(
  196. struct mxr_layer *layer, unsigned long index);
  197. static int mxr_enum_fmt(struct file *file, void *priv,
  198. struct v4l2_fmtdesc *f)
  199. {
  200. struct mxr_layer *layer = video_drvdata(file);
  201. struct mxr_device *mdev = layer->mdev;
  202. const struct mxr_format *fmt;
  203. mxr_dbg(mdev, "%s\n", __func__);
  204. fmt = find_format_by_index(layer, f->index);
  205. if (fmt == NULL)
  206. return -EINVAL;
  207. strlcpy(f->description, fmt->name, sizeof(f->description));
  208. f->pixelformat = fmt->fourcc;
  209. return 0;
  210. }
  211. static int mxr_s_fmt(struct file *file, void *priv,
  212. struct v4l2_format *f)
  213. {
  214. struct mxr_layer *layer = video_drvdata(file);
  215. const struct mxr_format *fmt;
  216. struct v4l2_pix_format_mplane *pix;
  217. struct mxr_device *mdev = layer->mdev;
  218. struct mxr_geometry *geo = &layer->geo;
  219. mxr_dbg(mdev, "%s:%d\n", __func__, __LINE__);
  220. pix = &f->fmt.pix_mp;
  221. fmt = find_format_by_fourcc(layer, pix->pixelformat);
  222. if (fmt == NULL) {
  223. mxr_warn(mdev, "not recognized fourcc: %08x\n",
  224. pix->pixelformat);
  225. return -EINVAL;
  226. }
  227. layer->fmt = fmt;
  228. geo->src.full_width = pix->width;
  229. geo->src.width = pix->width;
  230. geo->src.full_height = pix->height;
  231. geo->src.height = pix->height;
  232. /* assure consistency of geometry */
  233. mxr_layer_geo_fix(layer);
  234. mxr_dbg(mdev, "width=%u height=%u span=%u\n",
  235. geo->src.width, geo->src.height, geo->src.full_width);
  236. return 0;
  237. }
  238. static unsigned int divup(unsigned int divident, unsigned int divisor)
  239. {
  240. return (divident + divisor - 1) / divisor;
  241. }
  242. unsigned long mxr_get_plane_size(const struct mxr_block *blk,
  243. unsigned int width, unsigned int height)
  244. {
  245. unsigned int bl_width = divup(width, blk->width);
  246. unsigned int bl_height = divup(height, blk->height);
  247. return bl_width * bl_height * blk->size;
  248. }
  249. static void mxr_mplane_fill(struct v4l2_plane_pix_format *planes,
  250. const struct mxr_format *fmt, u32 width, u32 height)
  251. {
  252. int i;
  253. memset(planes, 0, sizeof(*planes) * fmt->num_subframes);
  254. for (i = 0; i < fmt->num_planes; ++i) {
  255. struct v4l2_plane_pix_format *plane = planes
  256. + fmt->plane2subframe[i];
  257. const struct mxr_block *blk = &fmt->plane[i];
  258. u32 bl_width = divup(width, blk->width);
  259. u32 bl_height = divup(height, blk->height);
  260. u32 sizeimage = bl_width * bl_height * blk->size;
  261. u16 bytesperline = bl_width * blk->size / blk->height;
  262. plane->sizeimage += sizeimage;
  263. plane->bytesperline = max(plane->bytesperline, bytesperline);
  264. }
  265. }
  266. static int mxr_g_fmt(struct file *file, void *priv,
  267. struct v4l2_format *f)
  268. {
  269. struct mxr_layer *layer = video_drvdata(file);
  270. struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
  271. mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
  272. pix->width = layer->geo.src.full_width;
  273. pix->height = layer->geo.src.full_height;
  274. pix->field = V4L2_FIELD_NONE;
  275. pix->pixelformat = layer->fmt->fourcc;
  276. pix->colorspace = layer->fmt->colorspace;
  277. mxr_mplane_fill(pix->plane_fmt, layer->fmt, pix->width, pix->height);
  278. return 0;
  279. }
  280. static inline struct mxr_crop *choose_crop_by_type(struct mxr_geometry *geo,
  281. enum v4l2_buf_type type)
  282. {
  283. switch (type) {
  284. case V4L2_BUF_TYPE_VIDEO_OUTPUT:
  285. case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
  286. return &geo->dst;
  287. case V4L2_BUF_TYPE_VIDEO_OVERLAY:
  288. return &geo->src;
  289. default:
  290. return NULL;
  291. }
  292. }
  293. static int mxr_g_crop(struct file *file, void *fh, struct v4l2_crop *a)
  294. {
  295. struct mxr_layer *layer = video_drvdata(file);
  296. struct mxr_crop *crop;
  297. mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
  298. crop = choose_crop_by_type(&layer->geo, a->type);
  299. if (crop == NULL)
  300. return -EINVAL;
  301. mxr_layer_geo_fix(layer);
  302. a->c.left = crop->x_offset;
  303. a->c.top = crop->y_offset;
  304. a->c.width = crop->width;
  305. a->c.height = crop->height;
  306. return 0;
  307. }
  308. static int mxr_s_crop(struct file *file, void *fh, struct v4l2_crop *a)
  309. {
  310. struct mxr_layer *layer = video_drvdata(file);
  311. struct mxr_crop *crop;
  312. mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
  313. crop = choose_crop_by_type(&layer->geo, a->type);
  314. if (crop == NULL)
  315. return -EINVAL;
  316. crop->x_offset = a->c.left;
  317. crop->y_offset = a->c.top;
  318. crop->width = a->c.width;
  319. crop->height = a->c.height;
  320. mxr_layer_geo_fix(layer);
  321. return 0;
  322. }
  323. static int mxr_cropcap(struct file *file, void *fh, struct v4l2_cropcap *a)
  324. {
  325. struct mxr_layer *layer = video_drvdata(file);
  326. struct mxr_crop *crop;
  327. mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
  328. crop = choose_crop_by_type(&layer->geo, a->type);
  329. if (crop == NULL)
  330. return -EINVAL;
  331. mxr_layer_geo_fix(layer);
  332. a->bounds.left = 0;
  333. a->bounds.top = 0;
  334. a->bounds.width = crop->full_width;
  335. a->bounds.top = crop->full_height;
  336. a->defrect = a->bounds;
  337. /* setting pixel aspect to 1/1 */
  338. a->pixelaspect.numerator = 1;
  339. a->pixelaspect.denominator = 1;
  340. return 0;
  341. }
  342. static int mxr_enum_dv_presets(struct file *file, void *fh,
  343. struct v4l2_dv_enum_preset *preset)
  344. {
  345. struct mxr_layer *layer = video_drvdata(file);
  346. struct mxr_device *mdev = layer->mdev;
  347. int ret;
  348. /* lock protects from changing sd_out */
  349. mutex_lock(&mdev->mutex);
  350. ret = v4l2_subdev_call(to_outsd(mdev), video, enum_dv_presets, preset);
  351. mutex_unlock(&mdev->mutex);
  352. return ret ? -EINVAL : 0;
  353. }
  354. static int mxr_s_dv_preset(struct file *file, void *fh,
  355. struct v4l2_dv_preset *preset)
  356. {
  357. struct mxr_layer *layer = video_drvdata(file);
  358. struct mxr_device *mdev = layer->mdev;
  359. int ret;
  360. /* lock protects from changing sd_out */
  361. mutex_lock(&mdev->mutex);
  362. /* preset change cannot be done while there is an entity
  363. * dependant on output configuration
  364. */
  365. if (mdev->n_output > 0) {
  366. mutex_unlock(&mdev->mutex);
  367. return -EBUSY;
  368. }
  369. ret = v4l2_subdev_call(to_outsd(mdev), video, s_dv_preset, preset);
  370. mutex_unlock(&mdev->mutex);
  371. /* any failure should return EINVAL according to V4L2 doc */
  372. return ret ? -EINVAL : 0;
  373. }
  374. static int mxr_g_dv_preset(struct file *file, void *fh,
  375. struct v4l2_dv_preset *preset)
  376. {
  377. struct mxr_layer *layer = video_drvdata(file);
  378. struct mxr_device *mdev = layer->mdev;
  379. int ret;
  380. /* lock protects from changing sd_out */
  381. mutex_lock(&mdev->mutex);
  382. ret = v4l2_subdev_call(to_outsd(mdev), video, g_dv_preset, preset);
  383. mutex_unlock(&mdev->mutex);
  384. return ret ? -EINVAL : 0;
  385. }
  386. static int mxr_s_std(struct file *file, void *fh, v4l2_std_id *norm)
  387. {
  388. struct mxr_layer *layer = video_drvdata(file);
  389. struct mxr_device *mdev = layer->mdev;
  390. int ret;
  391. /* lock protects from changing sd_out */
  392. mutex_lock(&mdev->mutex);
  393. /* standard change cannot be done while there is an entity
  394. * dependant on output configuration
  395. */
  396. if (mdev->n_output > 0) {
  397. mutex_unlock(&mdev->mutex);
  398. return -EBUSY;
  399. }
  400. ret = v4l2_subdev_call(to_outsd(mdev), video, s_std_output, *norm);
  401. mutex_unlock(&mdev->mutex);
  402. return ret ? -EINVAL : 0;
  403. }
  404. static int mxr_g_std(struct file *file, void *fh, v4l2_std_id *norm)
  405. {
  406. struct mxr_layer *layer = video_drvdata(file);
  407. struct mxr_device *mdev = layer->mdev;
  408. int ret;
  409. /* lock protects from changing sd_out */
  410. mutex_lock(&mdev->mutex);
  411. ret = v4l2_subdev_call(to_outsd(mdev), video, g_std_output, norm);
  412. mutex_unlock(&mdev->mutex);
  413. return ret ? -EINVAL : 0;
  414. }
  415. static int mxr_enum_output(struct file *file, void *fh, struct v4l2_output *a)
  416. {
  417. struct mxr_layer *layer = video_drvdata(file);
  418. struct mxr_device *mdev = layer->mdev;
  419. struct mxr_output *out;
  420. struct v4l2_subdev *sd;
  421. if (a->index >= mdev->output_cnt)
  422. return -EINVAL;
  423. out = mdev->output[a->index];
  424. BUG_ON(out == NULL);
  425. sd = out->sd;
  426. strlcpy(a->name, out->name, sizeof(a->name));
  427. /* try to obtain supported tv norms */
  428. v4l2_subdev_call(sd, video, g_tvnorms_output, &a->std);
  429. a->capabilities = 0;
  430. if (sd->ops->video && sd->ops->video->s_dv_preset)
  431. a->capabilities |= V4L2_OUT_CAP_PRESETS;
  432. if (sd->ops->video && sd->ops->video->s_std_output)
  433. a->capabilities |= V4L2_OUT_CAP_STD;
  434. a->type = V4L2_OUTPUT_TYPE_ANALOG;
  435. return 0;
  436. }
  437. static int mxr_s_output(struct file *file, void *fh, unsigned int i)
  438. {
  439. struct video_device *vfd = video_devdata(file);
  440. struct mxr_layer *layer = video_drvdata(file);
  441. struct mxr_device *mdev = layer->mdev;
  442. int ret = 0;
  443. if (i >= mdev->output_cnt || mdev->output[i] == NULL)
  444. return -EINVAL;
  445. mutex_lock(&mdev->mutex);
  446. if (mdev->n_output > 0) {
  447. ret = -EBUSY;
  448. goto done;
  449. }
  450. mdev->current_output = i;
  451. vfd->tvnorms = 0;
  452. v4l2_subdev_call(to_outsd(mdev), video, g_tvnorms_output,
  453. &vfd->tvnorms);
  454. mxr_dbg(mdev, "tvnorms = %08llx\n", vfd->tvnorms);
  455. done:
  456. mutex_unlock(&mdev->mutex);
  457. return ret;
  458. }
  459. static int mxr_g_output(struct file *file, void *fh, unsigned int *p)
  460. {
  461. struct mxr_layer *layer = video_drvdata(file);
  462. struct mxr_device *mdev = layer->mdev;
  463. mutex_lock(&mdev->mutex);
  464. *p = mdev->current_output;
  465. mutex_unlock(&mdev->mutex);
  466. return 0;
  467. }
  468. static int mxr_reqbufs(struct file *file, void *priv,
  469. struct v4l2_requestbuffers *p)
  470. {
  471. struct mxr_layer *layer = video_drvdata(file);
  472. mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
  473. return vb2_reqbufs(&layer->vb_queue, p);
  474. }
  475. static int mxr_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
  476. {
  477. struct mxr_layer *layer = video_drvdata(file);
  478. mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
  479. return vb2_querybuf(&layer->vb_queue, p);
  480. }
  481. static int mxr_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
  482. {
  483. struct mxr_layer *layer = video_drvdata(file);
  484. mxr_dbg(layer->mdev, "%s:%d(%d)\n", __func__, __LINE__, p->index);
  485. return vb2_qbuf(&layer->vb_queue, p);
  486. }
  487. static int mxr_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
  488. {
  489. struct mxr_layer *layer = video_drvdata(file);
  490. mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
  491. return vb2_dqbuf(&layer->vb_queue, p, file->f_flags & O_NONBLOCK);
  492. }
  493. static int mxr_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
  494. {
  495. struct mxr_layer *layer = video_drvdata(file);
  496. mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
  497. return vb2_streamon(&layer->vb_queue, i);
  498. }
  499. static int mxr_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
  500. {
  501. struct mxr_layer *layer = video_drvdata(file);
  502. mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
  503. return vb2_streamoff(&layer->vb_queue, i);
  504. }
  505. static const struct v4l2_ioctl_ops mxr_ioctl_ops = {
  506. .vidioc_querycap = mxr_querycap,
  507. /* format handling */
  508. .vidioc_enum_fmt_vid_out = mxr_enum_fmt,
  509. .vidioc_s_fmt_vid_out_mplane = mxr_s_fmt,
  510. .vidioc_g_fmt_vid_out_mplane = mxr_g_fmt,
  511. /* buffer control */
  512. .vidioc_reqbufs = mxr_reqbufs,
  513. .vidioc_querybuf = mxr_querybuf,
  514. .vidioc_qbuf = mxr_qbuf,
  515. .vidioc_dqbuf = mxr_dqbuf,
  516. /* Streaming control */
  517. .vidioc_streamon = mxr_streamon,
  518. .vidioc_streamoff = mxr_streamoff,
  519. /* Preset functions */
  520. .vidioc_enum_dv_presets = mxr_enum_dv_presets,
  521. .vidioc_s_dv_preset = mxr_s_dv_preset,
  522. .vidioc_g_dv_preset = mxr_g_dv_preset,
  523. /* analog TV standard functions */
  524. .vidioc_s_std = mxr_s_std,
  525. .vidioc_g_std = mxr_g_std,
  526. /* Output handling */
  527. .vidioc_enum_output = mxr_enum_output,
  528. .vidioc_s_output = mxr_s_output,
  529. .vidioc_g_output = mxr_g_output,
  530. /* Crop ioctls */
  531. .vidioc_g_crop = mxr_g_crop,
  532. .vidioc_s_crop = mxr_s_crop,
  533. .vidioc_cropcap = mxr_cropcap,
  534. };
  535. static int mxr_video_open(struct file *file)
  536. {
  537. struct mxr_layer *layer = video_drvdata(file);
  538. struct mxr_device *mdev = layer->mdev;
  539. int ret = 0;
  540. mxr_dbg(mdev, "%s:%d\n", __func__, __LINE__);
  541. /* assure device probe is finished */
  542. wait_for_device_probe();
  543. /* creating context for file descriptor */
  544. ret = v4l2_fh_open(file);
  545. if (ret) {
  546. mxr_err(mdev, "v4l2_fh_open failed\n");
  547. return ret;
  548. }
  549. /* leaving if layer is already initialized */
  550. if (!v4l2_fh_is_singular_file(file))
  551. return 0;
  552. /* FIXME: should power be enabled on open? */
  553. ret = mxr_power_get(mdev);
  554. if (ret) {
  555. mxr_err(mdev, "power on failed\n");
  556. goto fail_fh_open;
  557. }
  558. ret = vb2_queue_init(&layer->vb_queue);
  559. if (ret != 0) {
  560. mxr_err(mdev, "failed to initialize vb2 queue\n");
  561. goto fail_power;
  562. }
  563. /* set default format, first on the list */
  564. layer->fmt = layer->fmt_array[0];
  565. /* setup default geometry */
  566. mxr_layer_default_geo(layer);
  567. return 0;
  568. fail_power:
  569. mxr_power_put(mdev);
  570. fail_fh_open:
  571. v4l2_fh_release(file);
  572. return ret;
  573. }
  574. static unsigned int
  575. mxr_video_poll(struct file *file, struct poll_table_struct *wait)
  576. {
  577. struct mxr_layer *layer = video_drvdata(file);
  578. mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
  579. return vb2_poll(&layer->vb_queue, file, wait);
  580. }
  581. static int mxr_video_mmap(struct file *file, struct vm_area_struct *vma)
  582. {
  583. struct mxr_layer *layer = video_drvdata(file);
  584. mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
  585. return vb2_mmap(&layer->vb_queue, vma);
  586. }
  587. static int mxr_video_release(struct file *file)
  588. {
  589. struct mxr_layer *layer = video_drvdata(file);
  590. mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
  591. if (v4l2_fh_is_singular_file(file)) {
  592. vb2_queue_release(&layer->vb_queue);
  593. mxr_power_put(layer->mdev);
  594. }
  595. v4l2_fh_release(file);
  596. return 0;
  597. }
  598. static const struct v4l2_file_operations mxr_fops = {
  599. .owner = THIS_MODULE,
  600. .open = mxr_video_open,
  601. .poll = mxr_video_poll,
  602. .mmap = mxr_video_mmap,
  603. .release = mxr_video_release,
  604. .unlocked_ioctl = video_ioctl2,
  605. };
  606. static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *pfmt,
  607. unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[],
  608. void *alloc_ctxs[])
  609. {
  610. struct mxr_layer *layer = vb2_get_drv_priv(vq);
  611. const struct mxr_format *fmt = layer->fmt;
  612. int i;
  613. struct mxr_device *mdev = layer->mdev;
  614. struct v4l2_plane_pix_format planes[3];
  615. mxr_dbg(mdev, "%s\n", __func__);
  616. /* checking if format was configured */
  617. if (fmt == NULL)
  618. return -EINVAL;
  619. mxr_dbg(mdev, "fmt = %s\n", fmt->name);
  620. mxr_mplane_fill(planes, fmt, layer->geo.src.full_width,
  621. layer->geo.src.full_height);
  622. *nplanes = fmt->num_subframes;
  623. for (i = 0; i < fmt->num_subframes; ++i) {
  624. alloc_ctxs[i] = layer->mdev->alloc_ctx;
  625. sizes[i] = PAGE_ALIGN(planes[i].sizeimage);
  626. mxr_dbg(mdev, "size[%d] = %08lx\n", i, sizes[i]);
  627. }
  628. if (*nbuffers == 0)
  629. *nbuffers = 1;
  630. return 0;
  631. }
  632. static void buf_queue(struct vb2_buffer *vb)
  633. {
  634. struct mxr_buffer *buffer = container_of(vb, struct mxr_buffer, vb);
  635. struct mxr_layer *layer = vb2_get_drv_priv(vb->vb2_queue);
  636. struct mxr_device *mdev = layer->mdev;
  637. unsigned long flags;
  638. spin_lock_irqsave(&layer->enq_slock, flags);
  639. list_add_tail(&buffer->list, &layer->enq_list);
  640. spin_unlock_irqrestore(&layer->enq_slock, flags);
  641. mxr_dbg(mdev, "queuing buffer\n");
  642. }
  643. static void wait_lock(struct vb2_queue *vq)
  644. {
  645. struct mxr_layer *layer = vb2_get_drv_priv(vq);
  646. mxr_dbg(layer->mdev, "%s\n", __func__);
  647. mutex_lock(&layer->mutex);
  648. }
  649. static void wait_unlock(struct vb2_queue *vq)
  650. {
  651. struct mxr_layer *layer = vb2_get_drv_priv(vq);
  652. mxr_dbg(layer->mdev, "%s\n", __func__);
  653. mutex_unlock(&layer->mutex);
  654. }
  655. static int start_streaming(struct vb2_queue *vq, unsigned int count)
  656. {
  657. struct mxr_layer *layer = vb2_get_drv_priv(vq);
  658. struct mxr_device *mdev = layer->mdev;
  659. unsigned long flags;
  660. mxr_dbg(mdev, "%s\n", __func__);
  661. if (count == 0) {
  662. mxr_dbg(mdev, "no output buffers queued\n");
  663. return -EINVAL;
  664. }
  665. /* block any changes in output configuration */
  666. mxr_output_get(mdev);
  667. /* update layers geometry */
  668. mxr_layer_geo_fix(layer);
  669. mxr_geometry_dump(mdev, &layer->geo);
  670. layer->ops.format_set(layer);
  671. /* enabling layer in hardware */
  672. spin_lock_irqsave(&layer->enq_slock, flags);
  673. layer->state = MXR_LAYER_STREAMING;
  674. spin_unlock_irqrestore(&layer->enq_slock, flags);
  675. layer->ops.stream_set(layer, MXR_ENABLE);
  676. mxr_streamer_get(mdev);
  677. return 0;
  678. }
  679. static void mxr_watchdog(unsigned long arg)
  680. {
  681. struct mxr_layer *layer = (struct mxr_layer *) arg;
  682. struct mxr_device *mdev = layer->mdev;
  683. unsigned long flags;
  684. mxr_err(mdev, "watchdog fired for layer %s\n", layer->vfd.name);
  685. spin_lock_irqsave(&layer->enq_slock, flags);
  686. if (layer->update_buf == layer->shadow_buf)
  687. layer->update_buf = NULL;
  688. if (layer->update_buf) {
  689. vb2_buffer_done(&layer->update_buf->vb, VB2_BUF_STATE_ERROR);
  690. layer->update_buf = NULL;
  691. }
  692. if (layer->shadow_buf) {
  693. vb2_buffer_done(&layer->shadow_buf->vb, VB2_BUF_STATE_ERROR);
  694. layer->shadow_buf = NULL;
  695. }
  696. spin_unlock_irqrestore(&layer->enq_slock, flags);
  697. }
  698. static int stop_streaming(struct vb2_queue *vq)
  699. {
  700. struct mxr_layer *layer = vb2_get_drv_priv(vq);
  701. struct mxr_device *mdev = layer->mdev;
  702. unsigned long flags;
  703. struct timer_list watchdog;
  704. struct mxr_buffer *buf, *buf_tmp;
  705. mxr_dbg(mdev, "%s\n", __func__);
  706. spin_lock_irqsave(&layer->enq_slock, flags);
  707. /* reset list */
  708. layer->state = MXR_LAYER_STREAMING_FINISH;
  709. /* set all buffer to be done */
  710. list_for_each_entry_safe(buf, buf_tmp, &layer->enq_list, list) {
  711. list_del(&buf->list);
  712. vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
  713. }
  714. spin_unlock_irqrestore(&layer->enq_slock, flags);
  715. /* give 1 seconds to complete to complete last buffers */
  716. setup_timer_on_stack(&watchdog, mxr_watchdog,
  717. (unsigned long)layer);
  718. mod_timer(&watchdog, jiffies + msecs_to_jiffies(1000));
  719. /* wait until all buffers are goes to done state */
  720. vb2_wait_for_all_buffers(vq);
  721. /* stop timer if all synchronization is done */
  722. del_timer_sync(&watchdog);
  723. destroy_timer_on_stack(&watchdog);
  724. /* stopping hardware */
  725. spin_lock_irqsave(&layer->enq_slock, flags);
  726. layer->state = MXR_LAYER_IDLE;
  727. spin_unlock_irqrestore(&layer->enq_slock, flags);
  728. /* disabling layer in hardware */
  729. layer->ops.stream_set(layer, MXR_DISABLE);
  730. /* remove one streamer */
  731. mxr_streamer_put(mdev);
  732. /* allow changes in output configuration */
  733. mxr_output_put(mdev);
  734. return 0;
  735. }
  736. static struct vb2_ops mxr_video_qops = {
  737. .queue_setup = queue_setup,
  738. .buf_queue = buf_queue,
  739. .wait_prepare = wait_unlock,
  740. .wait_finish = wait_lock,
  741. .start_streaming = start_streaming,
  742. .stop_streaming = stop_streaming,
  743. };
  744. /* FIXME: try to put this functions to mxr_base_layer_create */
  745. int mxr_base_layer_register(struct mxr_layer *layer)
  746. {
  747. struct mxr_device *mdev = layer->mdev;
  748. int ret;
  749. ret = video_register_device(&layer->vfd, VFL_TYPE_GRABBER, -1);
  750. if (ret)
  751. mxr_err(mdev, "failed to register video device\n");
  752. else
  753. mxr_info(mdev, "registered layer %s as /dev/video%d\n",
  754. layer->vfd.name, layer->vfd.num);
  755. return ret;
  756. }
  757. void mxr_base_layer_unregister(struct mxr_layer *layer)
  758. {
  759. video_unregister_device(&layer->vfd);
  760. }
  761. void mxr_layer_release(struct mxr_layer *layer)
  762. {
  763. if (layer->ops.release)
  764. layer->ops.release(layer);
  765. }
  766. void mxr_base_layer_release(struct mxr_layer *layer)
  767. {
  768. kfree(layer);
  769. }
  770. static void mxr_vfd_release(struct video_device *vdev)
  771. {
  772. printk(KERN_INFO "video device release\n");
  773. }
  774. struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
  775. int idx, char *name, struct mxr_layer_ops *ops)
  776. {
  777. struct mxr_layer *layer;
  778. layer = kzalloc(sizeof *layer, GFP_KERNEL);
  779. if (layer == NULL) {
  780. mxr_err(mdev, "not enough memory for layer.\n");
  781. goto fail;
  782. }
  783. layer->mdev = mdev;
  784. layer->idx = idx;
  785. layer->ops = *ops;
  786. spin_lock_init(&layer->enq_slock);
  787. INIT_LIST_HEAD(&layer->enq_list);
  788. mutex_init(&layer->mutex);
  789. layer->vfd = (struct video_device) {
  790. .minor = -1,
  791. .release = mxr_vfd_release,
  792. .fops = &mxr_fops,
  793. .ioctl_ops = &mxr_ioctl_ops,
  794. };
  795. strlcpy(layer->vfd.name, name, sizeof(layer->vfd.name));
  796. /* let framework control PRIORITY */
  797. set_bit(V4L2_FL_USE_FH_PRIO, &layer->vfd.flags);
  798. video_set_drvdata(&layer->vfd, layer);
  799. layer->vfd.lock = &layer->mutex;
  800. layer->vfd.v4l2_dev = &mdev->v4l2_dev;
  801. layer->vb_queue = (struct vb2_queue) {
  802. .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
  803. .io_modes = VB2_MMAP | VB2_USERPTR,
  804. .drv_priv = layer,
  805. .buf_struct_size = sizeof(struct mxr_buffer),
  806. .ops = &mxr_video_qops,
  807. .mem_ops = &vb2_dma_contig_memops,
  808. };
  809. return layer;
  810. fail:
  811. return NULL;
  812. }
  813. static const struct mxr_format *find_format_by_fourcc(
  814. struct mxr_layer *layer, unsigned long fourcc)
  815. {
  816. int i;
  817. for (i = 0; i < layer->fmt_array_size; ++i)
  818. if (layer->fmt_array[i]->fourcc == fourcc)
  819. return layer->fmt_array[i];
  820. return NULL;
  821. }
  822. static const struct mxr_format *find_format_by_index(
  823. struct mxr_layer *layer, unsigned long index)
  824. {
  825. if (index >= layer->fmt_array_size)
  826. return NULL;
  827. return layer->fmt_array[index];
  828. }