dmxdev.c 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166
  1. /*
  2. * dmxdev.c - DVB demultiplexer device
  3. *
  4. * Copyright (C) 2000 Ralph Metzler & Marcus Metzler
  5. * for convergence integrated media GmbH
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public License
  9. * as published by the Free Software Foundation; either version 2.1
  10. * of the License, or (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  20. *
  21. */
  22. #include <linux/spinlock.h>
  23. #include <linux/slab.h>
  24. #include <linux/vmalloc.h>
  25. #include <linux/module.h>
  26. #include <linux/poll.h>
  27. #include <linux/ioctl.h>
  28. #include <linux/wait.h>
  29. #include <asm/uaccess.h>
  30. #include <asm/system.h>
  31. #include "dmxdev.h"
  32. static int debug;
  33. module_param(debug, int, 0644);
  34. MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off).");
  35. #define dprintk if (debug) printk
  36. static int dvb_dmxdev_buffer_write(struct dvb_ringbuffer *buf,
  37. const u8 *src, size_t len)
  38. {
  39. ssize_t free;
  40. if (!len)
  41. return 0;
  42. if (!buf->data)
  43. return 0;
  44. free = dvb_ringbuffer_free(buf);
  45. if (len > free) {
  46. dprintk("dmxdev: buffer overflow\n");
  47. return -EOVERFLOW;
  48. }
  49. return dvb_ringbuffer_write(buf, src, len);
  50. }
  51. static ssize_t dvb_dmxdev_buffer_read(struct dvb_ringbuffer *src,
  52. int non_blocking, char __user *buf,
  53. size_t count, loff_t *ppos)
  54. {
  55. size_t todo;
  56. ssize_t avail;
  57. ssize_t ret = 0;
  58. if (!src->data)
  59. return 0;
  60. if (src->error) {
  61. ret = src->error;
  62. dvb_ringbuffer_flush(src);
  63. return ret;
  64. }
  65. for (todo = count; todo > 0; todo -= ret) {
  66. if (non_blocking && dvb_ringbuffer_empty(src)) {
  67. ret = -EWOULDBLOCK;
  68. break;
  69. }
  70. ret = wait_event_interruptible(src->queue,
  71. !dvb_ringbuffer_empty(src) ||
  72. (src->error != 0));
  73. if (ret < 0)
  74. break;
  75. if (src->error) {
  76. ret = src->error;
  77. dvb_ringbuffer_flush(src);
  78. break;
  79. }
  80. avail = dvb_ringbuffer_avail(src);
  81. if (avail > todo)
  82. avail = todo;
  83. ret = dvb_ringbuffer_read_user(src, buf, avail);
  84. if (ret < 0)
  85. break;
  86. buf += ret;
  87. }
  88. return (count - todo) ? (count - todo) : ret;
  89. }
  90. static struct dmx_frontend *get_fe(struct dmx_demux *demux, int type)
  91. {
  92. struct list_head *head, *pos;
  93. head = demux->get_frontends(demux);
  94. if (!head)
  95. return NULL;
  96. list_for_each(pos, head)
  97. if (DMX_FE_ENTRY(pos)->source == type)
  98. return DMX_FE_ENTRY(pos);
  99. return NULL;
  100. }
  101. static int dvb_dvr_open(struct inode *inode, struct file *file)
  102. {
  103. struct dvb_device *dvbdev = file->private_data;
  104. struct dmxdev *dmxdev = dvbdev->priv;
  105. struct dmx_frontend *front;
  106. dprintk("function : %s\n", __func__);
  107. if (mutex_lock_interruptible(&dmxdev->mutex))
  108. return -ERESTARTSYS;
  109. if (dmxdev->exit) {
  110. mutex_unlock(&dmxdev->mutex);
  111. return -ENODEV;
  112. }
  113. if ((file->f_flags & O_ACCMODE) == O_RDWR) {
  114. if (!(dmxdev->capabilities & DMXDEV_CAP_DUPLEX)) {
  115. mutex_unlock(&dmxdev->mutex);
  116. return -EOPNOTSUPP;
  117. }
  118. }
  119. if ((file->f_flags & O_ACCMODE) == O_RDONLY) {
  120. void *mem;
  121. if (!dvbdev->readers) {
  122. mutex_unlock(&dmxdev->mutex);
  123. return -EBUSY;
  124. }
  125. mem = vmalloc(DVR_BUFFER_SIZE);
  126. if (!mem) {
  127. mutex_unlock(&dmxdev->mutex);
  128. return -ENOMEM;
  129. }
  130. dvb_ringbuffer_init(&dmxdev->dvr_buffer, mem, DVR_BUFFER_SIZE);
  131. dvbdev->readers--;
  132. }
  133. if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
  134. dmxdev->dvr_orig_fe = dmxdev->demux->frontend;
  135. if (!dmxdev->demux->write) {
  136. mutex_unlock(&dmxdev->mutex);
  137. return -EOPNOTSUPP;
  138. }
  139. front = get_fe(dmxdev->demux, DMX_MEMORY_FE);
  140. if (!front) {
  141. mutex_unlock(&dmxdev->mutex);
  142. return -EINVAL;
  143. }
  144. dmxdev->demux->disconnect_frontend(dmxdev->demux);
  145. dmxdev->demux->connect_frontend(dmxdev->demux, front);
  146. }
  147. dvbdev->users++;
  148. mutex_unlock(&dmxdev->mutex);
  149. return 0;
  150. }
  151. static int dvb_dvr_release(struct inode *inode, struct file *file)
  152. {
  153. struct dvb_device *dvbdev = file->private_data;
  154. struct dmxdev *dmxdev = dvbdev->priv;
  155. mutex_lock(&dmxdev->mutex);
  156. if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
  157. dmxdev->demux->disconnect_frontend(dmxdev->demux);
  158. dmxdev->demux->connect_frontend(dmxdev->demux,
  159. dmxdev->dvr_orig_fe);
  160. }
  161. if ((file->f_flags & O_ACCMODE) == O_RDONLY) {
  162. dvbdev->readers++;
  163. if (dmxdev->dvr_buffer.data) {
  164. void *mem = dmxdev->dvr_buffer.data;
  165. mb();
  166. spin_lock_irq(&dmxdev->lock);
  167. dmxdev->dvr_buffer.data = NULL;
  168. spin_unlock_irq(&dmxdev->lock);
  169. vfree(mem);
  170. }
  171. }
  172. /* TODO */
  173. dvbdev->users--;
  174. if(dvbdev->users==-1 && dmxdev->exit==1) {
  175. fops_put(file->f_op);
  176. file->f_op = NULL;
  177. mutex_unlock(&dmxdev->mutex);
  178. wake_up(&dvbdev->wait_queue);
  179. } else
  180. mutex_unlock(&dmxdev->mutex);
  181. return 0;
  182. }
  183. static ssize_t dvb_dvr_write(struct file *file, const char __user *buf,
  184. size_t count, loff_t *ppos)
  185. {
  186. struct dvb_device *dvbdev = file->private_data;
  187. struct dmxdev *dmxdev = dvbdev->priv;
  188. int ret;
  189. if (!dmxdev->demux->write)
  190. return -EOPNOTSUPP;
  191. if ((file->f_flags & O_ACCMODE) != O_WRONLY)
  192. return -EINVAL;
  193. if (mutex_lock_interruptible(&dmxdev->mutex))
  194. return -ERESTARTSYS;
  195. if (dmxdev->exit) {
  196. mutex_unlock(&dmxdev->mutex);
  197. return -ENODEV;
  198. }
  199. ret = dmxdev->demux->write(dmxdev->demux, buf, count);
  200. mutex_unlock(&dmxdev->mutex);
  201. return ret;
  202. }
  203. static ssize_t dvb_dvr_read(struct file *file, char __user *buf, size_t count,
  204. loff_t *ppos)
  205. {
  206. struct dvb_device *dvbdev = file->private_data;
  207. struct dmxdev *dmxdev = dvbdev->priv;
  208. int ret;
  209. if (dmxdev->exit) {
  210. mutex_unlock(&dmxdev->mutex);
  211. return -ENODEV;
  212. }
  213. //mutex_lock(&dmxdev->mutex);
  214. ret = dvb_dmxdev_buffer_read(&dmxdev->dvr_buffer,
  215. file->f_flags & O_NONBLOCK,
  216. buf, count, ppos);
  217. //mutex_unlock(&dmxdev->mutex);
  218. return ret;
  219. }
  220. static int dvb_dvr_set_buffer_size(struct dmxdev *dmxdev,
  221. unsigned long size)
  222. {
  223. struct dvb_ringbuffer *buf = &dmxdev->dvr_buffer;
  224. void *newmem;
  225. void *oldmem;
  226. dprintk("function : %s\n", __func__);
  227. if (buf->size == size)
  228. return 0;
  229. if (!size)
  230. return -EINVAL;
  231. newmem = vmalloc(size);
  232. if (!newmem)
  233. return -ENOMEM;
  234. oldmem = buf->data;
  235. spin_lock_irq(&dmxdev->lock);
  236. buf->data = newmem;
  237. buf->size = size;
  238. /* reset and not flush in case the buffer shrinks */
  239. dvb_ringbuffer_reset(buf);
  240. spin_unlock_irq(&dmxdev->lock);
  241. vfree(oldmem);
  242. return 0;
  243. }
  244. static inline void dvb_dmxdev_filter_state_set(struct dmxdev_filter
  245. *dmxdevfilter, int state)
  246. {
  247. spin_lock_irq(&dmxdevfilter->dev->lock);
  248. dmxdevfilter->state = state;
  249. spin_unlock_irq(&dmxdevfilter->dev->lock);
  250. }
  251. static int dvb_dmxdev_set_buffer_size(struct dmxdev_filter *dmxdevfilter,
  252. unsigned long size)
  253. {
  254. struct dvb_ringbuffer *buf = &dmxdevfilter->buffer;
  255. void *newmem;
  256. void *oldmem;
  257. if (buf->size == size)
  258. return 0;
  259. if (!size)
  260. return -EINVAL;
  261. if (dmxdevfilter->state >= DMXDEV_STATE_GO)
  262. return -EBUSY;
  263. newmem = vmalloc(size);
  264. if (!newmem)
  265. return -ENOMEM;
  266. oldmem = buf->data;
  267. spin_lock_irq(&dmxdevfilter->dev->lock);
  268. buf->data = newmem;
  269. buf->size = size;
  270. /* reset and not flush in case the buffer shrinks */
  271. dvb_ringbuffer_reset(buf);
  272. spin_unlock_irq(&dmxdevfilter->dev->lock);
  273. vfree(oldmem);
  274. return 0;
  275. }
  276. static void dvb_dmxdev_filter_timeout(unsigned long data)
  277. {
  278. struct dmxdev_filter *dmxdevfilter = (struct dmxdev_filter *)data;
  279. dmxdevfilter->buffer.error = -ETIMEDOUT;
  280. spin_lock_irq(&dmxdevfilter->dev->lock);
  281. dmxdevfilter->state = DMXDEV_STATE_TIMEDOUT;
  282. spin_unlock_irq(&dmxdevfilter->dev->lock);
  283. wake_up(&dmxdevfilter->buffer.queue);
  284. }
  285. static void dvb_dmxdev_filter_timer(struct dmxdev_filter *dmxdevfilter)
  286. {
  287. struct dmx_sct_filter_params *para = &dmxdevfilter->params.sec;
  288. del_timer(&dmxdevfilter->timer);
  289. if (para->timeout) {
  290. dmxdevfilter->timer.function = dvb_dmxdev_filter_timeout;
  291. dmxdevfilter->timer.data = (unsigned long)dmxdevfilter;
  292. dmxdevfilter->timer.expires =
  293. jiffies + 1 + (HZ / 2 + HZ * para->timeout) / 1000;
  294. add_timer(&dmxdevfilter->timer);
  295. }
  296. }
  297. static int dvb_dmxdev_section_callback(const u8 *buffer1, size_t buffer1_len,
  298. const u8 *buffer2, size_t buffer2_len,
  299. struct dmx_section_filter *filter,
  300. enum dmx_success success)
  301. {
  302. struct dmxdev_filter *dmxdevfilter = filter->priv;
  303. unsigned long flags;
  304. int ret;
  305. if (dmxdevfilter->buffer.error) {
  306. wake_up(&dmxdevfilter->buffer.queue);
  307. return 0;
  308. }
  309. spin_lock_irqsave(&dmxdevfilter->dev->lock, flags);
  310. if (dmxdevfilter->state != DMXDEV_STATE_GO) {
  311. spin_unlock_irqrestore(&dmxdevfilter->dev->lock, flags);
  312. return 0;
  313. }
  314. del_timer(&dmxdevfilter->timer);
  315. dprintk("dmxdev: section callback %02x %02x %02x %02x %02x %02x\n",
  316. buffer1[0], buffer1[1],
  317. buffer1[2], buffer1[3], buffer1[4], buffer1[5]);
  318. ret = dvb_dmxdev_buffer_write(&dmxdevfilter->buffer, buffer1,
  319. buffer1_len);
  320. if (ret == buffer1_len) {
  321. ret = dvb_dmxdev_buffer_write(&dmxdevfilter->buffer, buffer2,
  322. buffer2_len);
  323. }
  324. if (ret < 0) {
  325. dvb_ringbuffer_flush(&dmxdevfilter->buffer);
  326. dmxdevfilter->buffer.error = ret;
  327. }
  328. if (dmxdevfilter->params.sec.flags & DMX_ONESHOT)
  329. dmxdevfilter->state = DMXDEV_STATE_DONE;
  330. spin_unlock_irqrestore(&dmxdevfilter->dev->lock, flags);
  331. wake_up(&dmxdevfilter->buffer.queue);
  332. return 0;
  333. }
  334. static int dvb_dmxdev_ts_callback(const u8 *buffer1, size_t buffer1_len,
  335. const u8 *buffer2, size_t buffer2_len,
  336. struct dmx_ts_feed *feed,
  337. enum dmx_success success)
  338. {
  339. struct dmxdev_filter *dmxdevfilter = feed->priv;
  340. struct dvb_ringbuffer *buffer;
  341. unsigned long flags;
  342. int ret;
  343. spin_lock_irqsave(&dmxdevfilter->dev->lock, flags);
  344. if (dmxdevfilter->params.pes.output == DMX_OUT_DECODER) {
  345. spin_unlock_irqrestore(&dmxdevfilter->dev->lock, flags);
  346. return 0;
  347. }
  348. if (dmxdevfilter->params.pes.output == DMX_OUT_TAP
  349. || dmxdevfilter->params.pes.output == DMX_OUT_TSDEMUX_TAP)
  350. buffer = &dmxdevfilter->buffer;
  351. else
  352. buffer = &dmxdevfilter->dev->dvr_buffer;
  353. if (buffer->error) {
  354. spin_unlock_irqrestore(&dmxdevfilter->dev->lock, flags);
  355. wake_up(&buffer->queue);
  356. return 0;
  357. }
  358. ret = dvb_dmxdev_buffer_write(buffer, buffer1, buffer1_len);
  359. if (ret == buffer1_len)
  360. ret = dvb_dmxdev_buffer_write(buffer, buffer2, buffer2_len);
  361. if (ret < 0) {
  362. dvb_ringbuffer_flush(buffer);
  363. buffer->error = ret;
  364. }
  365. spin_unlock_irqrestore(&dmxdevfilter->dev->lock, flags);
  366. wake_up(&buffer->queue);
  367. return 0;
  368. }
  369. /* stop feed but only mark the specified filter as stopped (state set) */
  370. static int dvb_dmxdev_feed_stop(struct dmxdev_filter *dmxdevfilter)
  371. {
  372. dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_SET);
  373. switch (dmxdevfilter->type) {
  374. case DMXDEV_TYPE_SEC:
  375. del_timer(&dmxdevfilter->timer);
  376. dmxdevfilter->feed.sec->stop_filtering(dmxdevfilter->feed.sec);
  377. break;
  378. case DMXDEV_TYPE_PES:
  379. dmxdevfilter->feed.ts->stop_filtering(dmxdevfilter->feed.ts);
  380. break;
  381. default:
  382. return -EINVAL;
  383. }
  384. return 0;
  385. }
  386. /* start feed associated with the specified filter */
  387. static int dvb_dmxdev_feed_start(struct dmxdev_filter *filter)
  388. {
  389. dvb_dmxdev_filter_state_set(filter, DMXDEV_STATE_GO);
  390. switch (filter->type) {
  391. case DMXDEV_TYPE_SEC:
  392. return filter->feed.sec->start_filtering(filter->feed.sec);
  393. case DMXDEV_TYPE_PES:
  394. return filter->feed.ts->start_filtering(filter->feed.ts);
  395. default:
  396. return -EINVAL;
  397. }
  398. return 0;
  399. }
  400. /* restart section feed if it has filters left associated with it,
  401. otherwise release the feed */
  402. static int dvb_dmxdev_feed_restart(struct dmxdev_filter *filter)
  403. {
  404. int i;
  405. struct dmxdev *dmxdev = filter->dev;
  406. u16 pid = filter->params.sec.pid;
  407. for (i = 0; i < dmxdev->filternum; i++)
  408. if (dmxdev->filter[i].state >= DMXDEV_STATE_GO &&
  409. dmxdev->filter[i].type == DMXDEV_TYPE_SEC &&
  410. dmxdev->filter[i].params.sec.pid == pid) {
  411. dvb_dmxdev_feed_start(&dmxdev->filter[i]);
  412. return 0;
  413. }
  414. filter->dev->demux->release_section_feed(dmxdev->demux,
  415. filter->feed.sec);
  416. return 0;
  417. }
  418. static int dvb_dmxdev_filter_stop(struct dmxdev_filter *dmxdevfilter)
  419. {
  420. if (dmxdevfilter->state < DMXDEV_STATE_GO)
  421. return 0;
  422. switch (dmxdevfilter->type) {
  423. case DMXDEV_TYPE_SEC:
  424. if (!dmxdevfilter->feed.sec)
  425. break;
  426. dvb_dmxdev_feed_stop(dmxdevfilter);
  427. if (dmxdevfilter->filter.sec)
  428. dmxdevfilter->feed.sec->
  429. release_filter(dmxdevfilter->feed.sec,
  430. dmxdevfilter->filter.sec);
  431. dvb_dmxdev_feed_restart(dmxdevfilter);
  432. dmxdevfilter->feed.sec = NULL;
  433. break;
  434. case DMXDEV_TYPE_PES:
  435. if (!dmxdevfilter->feed.ts)
  436. break;
  437. dvb_dmxdev_feed_stop(dmxdevfilter);
  438. dmxdevfilter->dev->demux->
  439. release_ts_feed(dmxdevfilter->dev->demux,
  440. dmxdevfilter->feed.ts);
  441. dmxdevfilter->feed.ts = NULL;
  442. break;
  443. default:
  444. if (dmxdevfilter->state == DMXDEV_STATE_ALLOCATED)
  445. return 0;
  446. return -EINVAL;
  447. }
  448. dvb_ringbuffer_flush(&dmxdevfilter->buffer);
  449. return 0;
  450. }
  451. static inline int dvb_dmxdev_filter_reset(struct dmxdev_filter *dmxdevfilter)
  452. {
  453. if (dmxdevfilter->state < DMXDEV_STATE_SET)
  454. return 0;
  455. dmxdevfilter->type = DMXDEV_TYPE_NONE;
  456. dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_ALLOCATED);
  457. return 0;
  458. }
  459. static int dvb_dmxdev_filter_start(struct dmxdev_filter *filter)
  460. {
  461. struct dmxdev *dmxdev = filter->dev;
  462. void *mem;
  463. int ret, i;
  464. if (filter->state < DMXDEV_STATE_SET)
  465. return -EINVAL;
  466. if (filter->state >= DMXDEV_STATE_GO)
  467. dvb_dmxdev_filter_stop(filter);
  468. if (!filter->buffer.data) {
  469. mem = vmalloc(filter->buffer.size);
  470. if (!mem)
  471. return -ENOMEM;
  472. spin_lock_irq(&filter->dev->lock);
  473. filter->buffer.data = mem;
  474. spin_unlock_irq(&filter->dev->lock);
  475. }
  476. dvb_ringbuffer_flush(&filter->buffer);
  477. switch (filter->type) {
  478. case DMXDEV_TYPE_SEC:
  479. {
  480. struct dmx_sct_filter_params *para = &filter->params.sec;
  481. struct dmx_section_filter **secfilter = &filter->filter.sec;
  482. struct dmx_section_feed **secfeed = &filter->feed.sec;
  483. *secfilter = NULL;
  484. *secfeed = NULL;
  485. /* find active filter/feed with same PID */
  486. for (i = 0; i < dmxdev->filternum; i++) {
  487. if (dmxdev->filter[i].state >= DMXDEV_STATE_GO &&
  488. dmxdev->filter[i].type == DMXDEV_TYPE_SEC &&
  489. dmxdev->filter[i].params.sec.pid == para->pid) {
  490. *secfeed = dmxdev->filter[i].feed.sec;
  491. break;
  492. }
  493. }
  494. /* if no feed found, try to allocate new one */
  495. if (!*secfeed) {
  496. ret = dmxdev->demux->allocate_section_feed(dmxdev->demux,
  497. secfeed,
  498. dvb_dmxdev_section_callback);
  499. if (ret < 0) {
  500. printk("DVB (%s): could not alloc feed\n",
  501. __func__);
  502. return ret;
  503. }
  504. ret = (*secfeed)->set(*secfeed, para->pid, 32768,
  505. (para->flags & DMX_CHECK_CRC) ? 1 : 0);
  506. if (ret < 0) {
  507. printk("DVB (%s): could not set feed\n",
  508. __func__);
  509. dvb_dmxdev_feed_restart(filter);
  510. return ret;
  511. }
  512. } else {
  513. dvb_dmxdev_feed_stop(filter);
  514. }
  515. ret = (*secfeed)->allocate_filter(*secfeed, secfilter);
  516. if (ret < 0) {
  517. dvb_dmxdev_feed_restart(filter);
  518. filter->feed.sec->start_filtering(*secfeed);
  519. dprintk("could not get filter\n");
  520. return ret;
  521. }
  522. (*secfilter)->priv = filter;
  523. memcpy(&((*secfilter)->filter_value[3]),
  524. &(para->filter.filter[1]), DMX_FILTER_SIZE - 1);
  525. memcpy(&(*secfilter)->filter_mask[3],
  526. &para->filter.mask[1], DMX_FILTER_SIZE - 1);
  527. memcpy(&(*secfilter)->filter_mode[3],
  528. &para->filter.mode[1], DMX_FILTER_SIZE - 1);
  529. (*secfilter)->filter_value[0] = para->filter.filter[0];
  530. (*secfilter)->filter_mask[0] = para->filter.mask[0];
  531. (*secfilter)->filter_mode[0] = para->filter.mode[0];
  532. (*secfilter)->filter_mask[1] = 0;
  533. (*secfilter)->filter_mask[2] = 0;
  534. filter->todo = 0;
  535. ret = filter->feed.sec->start_filtering(filter->feed.sec);
  536. if (ret < 0)
  537. return ret;
  538. dvb_dmxdev_filter_timer(filter);
  539. break;
  540. }
  541. case DMXDEV_TYPE_PES:
  542. {
  543. struct timespec timeout = { 0 };
  544. struct dmx_pes_filter_params *para = &filter->params.pes;
  545. dmx_output_t otype;
  546. int ts_type;
  547. enum dmx_ts_pes ts_pes;
  548. struct dmx_ts_feed **tsfeed = &filter->feed.ts;
  549. filter->feed.ts = NULL;
  550. otype = para->output;
  551. ts_pes = (enum dmx_ts_pes)para->pes_type;
  552. if (ts_pes < DMX_PES_OTHER)
  553. ts_type = TS_DECODER;
  554. else
  555. ts_type = 0;
  556. if (otype == DMX_OUT_TS_TAP)
  557. ts_type |= TS_PACKET;
  558. else if (otype == DMX_OUT_TSDEMUX_TAP)
  559. ts_type |= TS_PACKET | TS_DEMUX;
  560. else if (otype == DMX_OUT_TAP)
  561. ts_type |= TS_PACKET | TS_DEMUX | TS_PAYLOAD_ONLY;
  562. ret = dmxdev->demux->allocate_ts_feed(dmxdev->demux,
  563. tsfeed,
  564. dvb_dmxdev_ts_callback);
  565. if (ret < 0)
  566. return ret;
  567. (*tsfeed)->priv = filter;
  568. ret = (*tsfeed)->set(*tsfeed, para->pid, ts_type, ts_pes,
  569. 32768, timeout);
  570. if (ret < 0) {
  571. dmxdev->demux->release_ts_feed(dmxdev->demux,
  572. *tsfeed);
  573. return ret;
  574. }
  575. ret = filter->feed.ts->start_filtering(filter->feed.ts);
  576. if (ret < 0) {
  577. dmxdev->demux->release_ts_feed(dmxdev->demux,
  578. *tsfeed);
  579. return ret;
  580. }
  581. break;
  582. }
  583. default:
  584. return -EINVAL;
  585. }
  586. dvb_dmxdev_filter_state_set(filter, DMXDEV_STATE_GO);
  587. return 0;
  588. }
  589. static int dvb_demux_open(struct inode *inode, struct file *file)
  590. {
  591. struct dvb_device *dvbdev = file->private_data;
  592. struct dmxdev *dmxdev = dvbdev->priv;
  593. int i;
  594. struct dmxdev_filter *dmxdevfilter;
  595. if (!dmxdev->filter)
  596. return -EINVAL;
  597. if (mutex_lock_interruptible(&dmxdev->mutex))
  598. return -ERESTARTSYS;
  599. for (i = 0; i < dmxdev->filternum; i++)
  600. if (dmxdev->filter[i].state == DMXDEV_STATE_FREE)
  601. break;
  602. if (i == dmxdev->filternum) {
  603. mutex_unlock(&dmxdev->mutex);
  604. return -EMFILE;
  605. }
  606. dmxdevfilter = &dmxdev->filter[i];
  607. mutex_init(&dmxdevfilter->mutex);
  608. file->private_data = dmxdevfilter;
  609. dvb_ringbuffer_init(&dmxdevfilter->buffer, NULL, 8192);
  610. dmxdevfilter->type = DMXDEV_TYPE_NONE;
  611. dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_ALLOCATED);
  612. dmxdevfilter->feed.ts = NULL;
  613. init_timer(&dmxdevfilter->timer);
  614. dvbdev->users++;
  615. mutex_unlock(&dmxdev->mutex);
  616. return 0;
  617. }
  618. static int dvb_dmxdev_filter_free(struct dmxdev *dmxdev,
  619. struct dmxdev_filter *dmxdevfilter)
  620. {
  621. mutex_lock(&dmxdev->mutex);
  622. mutex_lock(&dmxdevfilter->mutex);
  623. dvb_dmxdev_filter_stop(dmxdevfilter);
  624. dvb_dmxdev_filter_reset(dmxdevfilter);
  625. if (dmxdevfilter->buffer.data) {
  626. void *mem = dmxdevfilter->buffer.data;
  627. spin_lock_irq(&dmxdev->lock);
  628. dmxdevfilter->buffer.data = NULL;
  629. spin_unlock_irq(&dmxdev->lock);
  630. vfree(mem);
  631. }
  632. dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_FREE);
  633. wake_up(&dmxdevfilter->buffer.queue);
  634. mutex_unlock(&dmxdevfilter->mutex);
  635. mutex_unlock(&dmxdev->mutex);
  636. return 0;
  637. }
  638. static inline void invert_mode(dmx_filter_t *filter)
  639. {
  640. int i;
  641. for (i = 0; i < DMX_FILTER_SIZE; i++)
  642. filter->mode[i] ^= 0xff;
  643. }
  644. static int dvb_dmxdev_filter_set(struct dmxdev *dmxdev,
  645. struct dmxdev_filter *dmxdevfilter,
  646. struct dmx_sct_filter_params *params)
  647. {
  648. dprintk("function : %s\n", __func__);
  649. dvb_dmxdev_filter_stop(dmxdevfilter);
  650. dmxdevfilter->type = DMXDEV_TYPE_SEC;
  651. memcpy(&dmxdevfilter->params.sec,
  652. params, sizeof(struct dmx_sct_filter_params));
  653. invert_mode(&dmxdevfilter->params.sec.filter);
  654. dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_SET);
  655. if (params->flags & DMX_IMMEDIATE_START)
  656. return dvb_dmxdev_filter_start(dmxdevfilter);
  657. return 0;
  658. }
  659. static int dvb_dmxdev_pes_filter_set(struct dmxdev *dmxdev,
  660. struct dmxdev_filter *dmxdevfilter,
  661. struct dmx_pes_filter_params *params)
  662. {
  663. dvb_dmxdev_filter_stop(dmxdevfilter);
  664. if (params->pes_type > DMX_PES_OTHER || params->pes_type < 0)
  665. return -EINVAL;
  666. dmxdevfilter->type = DMXDEV_TYPE_PES;
  667. memcpy(&dmxdevfilter->params, params,
  668. sizeof(struct dmx_pes_filter_params));
  669. dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_SET);
  670. if (params->flags & DMX_IMMEDIATE_START)
  671. return dvb_dmxdev_filter_start(dmxdevfilter);
  672. return 0;
  673. }
  674. static ssize_t dvb_dmxdev_read_sec(struct dmxdev_filter *dfil,
  675. struct file *file, char __user *buf,
  676. size_t count, loff_t *ppos)
  677. {
  678. int result, hcount;
  679. int done = 0;
  680. if (dfil->todo <= 0) {
  681. hcount = 3 + dfil->todo;
  682. if (hcount > count)
  683. hcount = count;
  684. result = dvb_dmxdev_buffer_read(&dfil->buffer,
  685. file->f_flags & O_NONBLOCK,
  686. buf, hcount, ppos);
  687. if (result < 0) {
  688. dfil->todo = 0;
  689. return result;
  690. }
  691. if (copy_from_user(dfil->secheader - dfil->todo, buf, result))
  692. return -EFAULT;
  693. buf += result;
  694. done = result;
  695. count -= result;
  696. dfil->todo -= result;
  697. if (dfil->todo > -3)
  698. return done;
  699. dfil->todo = ((dfil->secheader[1] << 8) | dfil->secheader[2]) & 0xfff;
  700. if (!count)
  701. return done;
  702. }
  703. if (count > dfil->todo)
  704. count = dfil->todo;
  705. result = dvb_dmxdev_buffer_read(&dfil->buffer,
  706. file->f_flags & O_NONBLOCK,
  707. buf, count, ppos);
  708. if (result < 0)
  709. return result;
  710. dfil->todo -= result;
  711. return (result + done);
  712. }
  713. static ssize_t
  714. dvb_demux_read(struct file *file, char __user *buf, size_t count,
  715. loff_t *ppos)
  716. {
  717. struct dmxdev_filter *dmxdevfilter = file->private_data;
  718. int ret;
  719. if (mutex_lock_interruptible(&dmxdevfilter->mutex))
  720. return -ERESTARTSYS;
  721. if (dmxdevfilter->type == DMXDEV_TYPE_SEC)
  722. ret = dvb_dmxdev_read_sec(dmxdevfilter, file, buf, count, ppos);
  723. else
  724. ret = dvb_dmxdev_buffer_read(&dmxdevfilter->buffer,
  725. file->f_flags & O_NONBLOCK,
  726. buf, count, ppos);
  727. mutex_unlock(&dmxdevfilter->mutex);
  728. return ret;
  729. }
  730. static int dvb_demux_do_ioctl(struct inode *inode, struct file *file,
  731. unsigned int cmd, void *parg)
  732. {
  733. struct dmxdev_filter *dmxdevfilter = file->private_data;
  734. struct dmxdev *dmxdev = dmxdevfilter->dev;
  735. unsigned long arg = (unsigned long)parg;
  736. int ret = 0;
  737. if (mutex_lock_interruptible(&dmxdev->mutex))
  738. return -ERESTARTSYS;
  739. switch (cmd) {
  740. case DMX_START:
  741. if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
  742. mutex_unlock(&dmxdev->mutex);
  743. return -ERESTARTSYS;
  744. }
  745. if (dmxdevfilter->state < DMXDEV_STATE_SET)
  746. ret = -EINVAL;
  747. else
  748. ret = dvb_dmxdev_filter_start(dmxdevfilter);
  749. mutex_unlock(&dmxdevfilter->mutex);
  750. break;
  751. case DMX_STOP:
  752. if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
  753. mutex_unlock(&dmxdev->mutex);
  754. return -ERESTARTSYS;
  755. }
  756. ret = dvb_dmxdev_filter_stop(dmxdevfilter);
  757. mutex_unlock(&dmxdevfilter->mutex);
  758. break;
  759. case DMX_SET_FILTER:
  760. if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
  761. mutex_unlock(&dmxdev->mutex);
  762. return -ERESTARTSYS;
  763. }
  764. ret = dvb_dmxdev_filter_set(dmxdev, dmxdevfilter, parg);
  765. mutex_unlock(&dmxdevfilter->mutex);
  766. break;
  767. case DMX_SET_PES_FILTER:
  768. if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
  769. mutex_unlock(&dmxdev->mutex);
  770. return -ERESTARTSYS;
  771. }
  772. ret = dvb_dmxdev_pes_filter_set(dmxdev, dmxdevfilter, parg);
  773. mutex_unlock(&dmxdevfilter->mutex);
  774. break;
  775. case DMX_SET_BUFFER_SIZE:
  776. if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
  777. mutex_unlock(&dmxdev->mutex);
  778. return -ERESTARTSYS;
  779. }
  780. ret = dvb_dmxdev_set_buffer_size(dmxdevfilter, arg);
  781. mutex_unlock(&dmxdevfilter->mutex);
  782. break;
  783. case DMX_GET_PES_PIDS:
  784. if (!dmxdev->demux->get_pes_pids) {
  785. ret = -EINVAL;
  786. break;
  787. }
  788. dmxdev->demux->get_pes_pids(dmxdev->demux, parg);
  789. break;
  790. case DMX_GET_CAPS:
  791. if (!dmxdev->demux->get_caps) {
  792. ret = -EINVAL;
  793. break;
  794. }
  795. ret = dmxdev->demux->get_caps(dmxdev->demux, parg);
  796. break;
  797. case DMX_SET_SOURCE:
  798. if (!dmxdev->demux->set_source) {
  799. ret = -EINVAL;
  800. break;
  801. }
  802. ret = dmxdev->demux->set_source(dmxdev->demux, parg);
  803. break;
  804. case DMX_GET_STC:
  805. if (!dmxdev->demux->get_stc) {
  806. ret = -EINVAL;
  807. break;
  808. }
  809. ret = dmxdev->demux->get_stc(dmxdev->demux,
  810. ((struct dmx_stc *)parg)->num,
  811. &((struct dmx_stc *)parg)->stc,
  812. &((struct dmx_stc *)parg)->base);
  813. break;
  814. default:
  815. ret = -EINVAL;
  816. break;
  817. }
  818. mutex_unlock(&dmxdev->mutex);
  819. return ret;
  820. }
  821. static int dvb_demux_ioctl(struct inode *inode, struct file *file,
  822. unsigned int cmd, unsigned long arg)
  823. {
  824. return dvb_usercopy(inode, file, cmd, arg, dvb_demux_do_ioctl);
  825. }
  826. static unsigned int dvb_demux_poll(struct file *file, poll_table *wait)
  827. {
  828. struct dmxdev_filter *dmxdevfilter = file->private_data;
  829. unsigned int mask = 0;
  830. if (!dmxdevfilter)
  831. return -EINVAL;
  832. poll_wait(file, &dmxdevfilter->buffer.queue, wait);
  833. if (dmxdevfilter->state != DMXDEV_STATE_GO &&
  834. dmxdevfilter->state != DMXDEV_STATE_DONE &&
  835. dmxdevfilter->state != DMXDEV_STATE_TIMEDOUT)
  836. return 0;
  837. if (dmxdevfilter->buffer.error)
  838. mask |= (POLLIN | POLLRDNORM | POLLPRI | POLLERR);
  839. if (!dvb_ringbuffer_empty(&dmxdevfilter->buffer))
  840. mask |= (POLLIN | POLLRDNORM | POLLPRI);
  841. return mask;
  842. }
  843. static int dvb_demux_release(struct inode *inode, struct file *file)
  844. {
  845. struct dmxdev_filter *dmxdevfilter = file->private_data;
  846. struct dmxdev *dmxdev = dmxdevfilter->dev;
  847. int ret;
  848. ret = dvb_dmxdev_filter_free(dmxdev, dmxdevfilter);
  849. mutex_lock(&dmxdev->mutex);
  850. dmxdev->dvbdev->users--;
  851. if(dmxdev->dvbdev->users==1 && dmxdev->exit==1) {
  852. fops_put(file->f_op);
  853. file->f_op = NULL;
  854. mutex_unlock(&dmxdev->mutex);
  855. wake_up(&dmxdev->dvbdev->wait_queue);
  856. } else
  857. mutex_unlock(&dmxdev->mutex);
  858. return ret;
  859. }
  860. static struct file_operations dvb_demux_fops = {
  861. .owner = THIS_MODULE,
  862. .read = dvb_demux_read,
  863. .ioctl = dvb_demux_ioctl,
  864. .open = dvb_demux_open,
  865. .release = dvb_demux_release,
  866. .poll = dvb_demux_poll,
  867. };
  868. static struct dvb_device dvbdev_demux = {
  869. .priv = NULL,
  870. .users = 1,
  871. .writers = 1,
  872. .fops = &dvb_demux_fops
  873. };
  874. static int dvb_dvr_do_ioctl(struct inode *inode, struct file *file,
  875. unsigned int cmd, void *parg)
  876. {
  877. struct dvb_device *dvbdev = file->private_data;
  878. struct dmxdev *dmxdev = dvbdev->priv;
  879. unsigned long arg = (unsigned long)parg;
  880. int ret;
  881. if (mutex_lock_interruptible(&dmxdev->mutex))
  882. return -ERESTARTSYS;
  883. switch (cmd) {
  884. case DMX_SET_BUFFER_SIZE:
  885. ret = dvb_dvr_set_buffer_size(dmxdev, arg);
  886. break;
  887. default:
  888. ret = -EINVAL;
  889. break;
  890. }
  891. mutex_unlock(&dmxdev->mutex);
  892. return ret;
  893. }
  894. static int dvb_dvr_ioctl(struct inode *inode, struct file *file,
  895. unsigned int cmd, unsigned long arg)
  896. {
  897. return dvb_usercopy(inode, file, cmd, arg, dvb_dvr_do_ioctl);
  898. }
  899. static unsigned int dvb_dvr_poll(struct file *file, poll_table *wait)
  900. {
  901. struct dvb_device *dvbdev = file->private_data;
  902. struct dmxdev *dmxdev = dvbdev->priv;
  903. unsigned int mask = 0;
  904. dprintk("function : %s\n", __func__);
  905. poll_wait(file, &dmxdev->dvr_buffer.queue, wait);
  906. if ((file->f_flags & O_ACCMODE) == O_RDONLY) {
  907. if (dmxdev->dvr_buffer.error)
  908. mask |= (POLLIN | POLLRDNORM | POLLPRI | POLLERR);
  909. if (!dvb_ringbuffer_empty(&dmxdev->dvr_buffer))
  910. mask |= (POLLIN | POLLRDNORM | POLLPRI);
  911. } else
  912. mask |= (POLLOUT | POLLWRNORM | POLLPRI);
  913. return mask;
  914. }
  915. static struct file_operations dvb_dvr_fops = {
  916. .owner = THIS_MODULE,
  917. .read = dvb_dvr_read,
  918. .write = dvb_dvr_write,
  919. .ioctl = dvb_dvr_ioctl,
  920. .open = dvb_dvr_open,
  921. .release = dvb_dvr_release,
  922. .poll = dvb_dvr_poll,
  923. };
  924. static struct dvb_device dvbdev_dvr = {
  925. .priv = NULL,
  926. .readers = 1,
  927. .users = 1,
  928. .fops = &dvb_dvr_fops
  929. };
  930. int dvb_dmxdev_init(struct dmxdev *dmxdev, struct dvb_adapter *dvb_adapter)
  931. {
  932. int i;
  933. if (dmxdev->demux->open(dmxdev->demux) < 0)
  934. return -EUSERS;
  935. dmxdev->filter = vmalloc(dmxdev->filternum * sizeof(struct dmxdev_filter));
  936. if (!dmxdev->filter)
  937. return -ENOMEM;
  938. mutex_init(&dmxdev->mutex);
  939. spin_lock_init(&dmxdev->lock);
  940. for (i = 0; i < dmxdev->filternum; i++) {
  941. dmxdev->filter[i].dev = dmxdev;
  942. dmxdev->filter[i].buffer.data = NULL;
  943. dvb_dmxdev_filter_state_set(&dmxdev->filter[i],
  944. DMXDEV_STATE_FREE);
  945. }
  946. dvb_register_device(dvb_adapter, &dmxdev->dvbdev, &dvbdev_demux, dmxdev,
  947. DVB_DEVICE_DEMUX);
  948. dvb_register_device(dvb_adapter, &dmxdev->dvr_dvbdev, &dvbdev_dvr,
  949. dmxdev, DVB_DEVICE_DVR);
  950. dvb_ringbuffer_init(&dmxdev->dvr_buffer, NULL, 8192);
  951. return 0;
  952. }
  953. EXPORT_SYMBOL(dvb_dmxdev_init);
  954. void dvb_dmxdev_release(struct dmxdev *dmxdev)
  955. {
  956. dmxdev->exit=1;
  957. if (dmxdev->dvbdev->users > 1) {
  958. wait_event(dmxdev->dvbdev->wait_queue,
  959. dmxdev->dvbdev->users==1);
  960. }
  961. if (dmxdev->dvr_dvbdev->users > 1) {
  962. wait_event(dmxdev->dvr_dvbdev->wait_queue,
  963. dmxdev->dvr_dvbdev->users==1);
  964. }
  965. dvb_unregister_device(dmxdev->dvbdev);
  966. dvb_unregister_device(dmxdev->dvr_dvbdev);
  967. vfree(dmxdev->filter);
  968. dmxdev->filter = NULL;
  969. dmxdev->demux->close(dmxdev->demux);
  970. }
  971. EXPORT_SYMBOL(dvb_dmxdev_release);