dmxdev.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275
  1. /*
  2. * dmxdev.c - DVB demultiplexer device
  3. *
  4. * Copyright (C) 2000 Ralph Metzler & Marcus Metzler
  5. * for convergence integrated media GmbH
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public License
  9. * as published by the Free Software Foundation; either version 2.1
  10. * of the License, or (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  20. *
  21. */
  22. #include <linux/spinlock.h>
  23. #include <linux/slab.h>
  24. #include <linux/vmalloc.h>
  25. #include <linux/module.h>
  26. #include <linux/poll.h>
  27. #include <linux/ioctl.h>
  28. #include <linux/wait.h>
  29. #include <asm/uaccess.h>
  30. #include <asm/system.h>
  31. #include "dmxdev.h"
  32. static int debug;
  33. module_param(debug, int, 0644);
  34. MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off).");
  35. #define dprintk if (debug) printk
  36. static int dvb_dmxdev_buffer_write(struct dvb_ringbuffer *buf,
  37. const u8 *src, size_t len)
  38. {
  39. ssize_t free;
  40. if (!len)
  41. return 0;
  42. if (!buf->data)
  43. return 0;
  44. free = dvb_ringbuffer_free(buf);
  45. if (len > free) {
  46. dprintk("dmxdev: buffer overflow\n");
  47. return -EOVERFLOW;
  48. }
  49. return dvb_ringbuffer_write(buf, src, len);
  50. }
  51. static ssize_t dvb_dmxdev_buffer_read(struct dvb_ringbuffer *src,
  52. int non_blocking, char __user *buf,
  53. size_t count, loff_t *ppos)
  54. {
  55. size_t todo;
  56. ssize_t avail;
  57. ssize_t ret = 0;
  58. if (!src->data)
  59. return 0;
  60. if (src->error) {
  61. ret = src->error;
  62. dvb_ringbuffer_flush(src);
  63. return ret;
  64. }
  65. for (todo = count; todo > 0; todo -= ret) {
  66. if (non_blocking && dvb_ringbuffer_empty(src)) {
  67. ret = -EWOULDBLOCK;
  68. break;
  69. }
  70. ret = wait_event_interruptible(src->queue,
  71. !dvb_ringbuffer_empty(src) ||
  72. (src->error != 0));
  73. if (ret < 0)
  74. break;
  75. if (src->error) {
  76. ret = src->error;
  77. dvb_ringbuffer_flush(src);
  78. break;
  79. }
  80. avail = dvb_ringbuffer_avail(src);
  81. if (avail > todo)
  82. avail = todo;
  83. ret = dvb_ringbuffer_read_user(src, buf, avail);
  84. if (ret < 0)
  85. break;
  86. buf += ret;
  87. }
  88. return (count - todo) ? (count - todo) : ret;
  89. }
  90. static struct dmx_frontend *get_fe(struct dmx_demux *demux, int type)
  91. {
  92. struct list_head *head, *pos;
  93. head = demux->get_frontends(demux);
  94. if (!head)
  95. return NULL;
  96. list_for_each(pos, head)
  97. if (DMX_FE_ENTRY(pos)->source == type)
  98. return DMX_FE_ENTRY(pos);
  99. return NULL;
  100. }
  101. static int dvb_dvr_open(struct inode *inode, struct file *file)
  102. {
  103. struct dvb_device *dvbdev = file->private_data;
  104. struct dmxdev *dmxdev = dvbdev->priv;
  105. struct dmx_frontend *front;
  106. dprintk("function : %s\n", __func__);
  107. if (mutex_lock_interruptible(&dmxdev->mutex))
  108. return -ERESTARTSYS;
  109. if (dmxdev->exit) {
  110. mutex_unlock(&dmxdev->mutex);
  111. return -ENODEV;
  112. }
  113. if ((file->f_flags & O_ACCMODE) == O_RDWR) {
  114. if (!(dmxdev->capabilities & DMXDEV_CAP_DUPLEX)) {
  115. mutex_unlock(&dmxdev->mutex);
  116. return -EOPNOTSUPP;
  117. }
  118. }
  119. if ((file->f_flags & O_ACCMODE) == O_RDONLY) {
  120. void *mem;
  121. if (!dvbdev->readers) {
  122. mutex_unlock(&dmxdev->mutex);
  123. return -EBUSY;
  124. }
  125. mem = vmalloc(DVR_BUFFER_SIZE);
  126. if (!mem) {
  127. mutex_unlock(&dmxdev->mutex);
  128. return -ENOMEM;
  129. }
  130. dvb_ringbuffer_init(&dmxdev->dvr_buffer, mem, DVR_BUFFER_SIZE);
  131. dvbdev->readers--;
  132. }
  133. if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
  134. dmxdev->dvr_orig_fe = dmxdev->demux->frontend;
  135. if (!dmxdev->demux->write) {
  136. mutex_unlock(&dmxdev->mutex);
  137. return -EOPNOTSUPP;
  138. }
  139. front = get_fe(dmxdev->demux, DMX_MEMORY_FE);
  140. if (!front) {
  141. mutex_unlock(&dmxdev->mutex);
  142. return -EINVAL;
  143. }
  144. dmxdev->demux->disconnect_frontend(dmxdev->demux);
  145. dmxdev->demux->connect_frontend(dmxdev->demux, front);
  146. }
  147. dvbdev->users++;
  148. mutex_unlock(&dmxdev->mutex);
  149. return 0;
  150. }
  151. static int dvb_dvr_release(struct inode *inode, struct file *file)
  152. {
  153. struct dvb_device *dvbdev = file->private_data;
  154. struct dmxdev *dmxdev = dvbdev->priv;
  155. mutex_lock(&dmxdev->mutex);
  156. if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
  157. dmxdev->demux->disconnect_frontend(dmxdev->demux);
  158. dmxdev->demux->connect_frontend(dmxdev->demux,
  159. dmxdev->dvr_orig_fe);
  160. }
  161. if ((file->f_flags & O_ACCMODE) == O_RDONLY) {
  162. dvbdev->readers++;
  163. if (dmxdev->dvr_buffer.data) {
  164. void *mem = dmxdev->dvr_buffer.data;
  165. mb();
  166. spin_lock_irq(&dmxdev->lock);
  167. dmxdev->dvr_buffer.data = NULL;
  168. spin_unlock_irq(&dmxdev->lock);
  169. vfree(mem);
  170. }
  171. }
  172. /* TODO */
  173. dvbdev->users--;
  174. if(dvbdev->users==-1 && dmxdev->exit==1) {
  175. fops_put(file->f_op);
  176. file->f_op = NULL;
  177. mutex_unlock(&dmxdev->mutex);
  178. wake_up(&dvbdev->wait_queue);
  179. } else
  180. mutex_unlock(&dmxdev->mutex);
  181. return 0;
  182. }
  183. static ssize_t dvb_dvr_write(struct file *file, const char __user *buf,
  184. size_t count, loff_t *ppos)
  185. {
  186. struct dvb_device *dvbdev = file->private_data;
  187. struct dmxdev *dmxdev = dvbdev->priv;
  188. int ret;
  189. if (!dmxdev->demux->write)
  190. return -EOPNOTSUPP;
  191. if ((file->f_flags & O_ACCMODE) != O_WRONLY)
  192. return -EINVAL;
  193. if (mutex_lock_interruptible(&dmxdev->mutex))
  194. return -ERESTARTSYS;
  195. if (dmxdev->exit) {
  196. mutex_unlock(&dmxdev->mutex);
  197. return -ENODEV;
  198. }
  199. ret = dmxdev->demux->write(dmxdev->demux, buf, count);
  200. mutex_unlock(&dmxdev->mutex);
  201. return ret;
  202. }
  203. static ssize_t dvb_dvr_read(struct file *file, char __user *buf, size_t count,
  204. loff_t *ppos)
  205. {
  206. struct dvb_device *dvbdev = file->private_data;
  207. struct dmxdev *dmxdev = dvbdev->priv;
  208. if (dmxdev->exit)
  209. return -ENODEV;
  210. return dvb_dmxdev_buffer_read(&dmxdev->dvr_buffer,
  211. file->f_flags & O_NONBLOCK,
  212. buf, count, ppos);
  213. }
  214. static int dvb_dvr_set_buffer_size(struct dmxdev *dmxdev,
  215. unsigned long size)
  216. {
  217. struct dvb_ringbuffer *buf = &dmxdev->dvr_buffer;
  218. void *newmem;
  219. void *oldmem;
  220. dprintk("function : %s\n", __func__);
  221. if (buf->size == size)
  222. return 0;
  223. if (!size)
  224. return -EINVAL;
  225. newmem = vmalloc(size);
  226. if (!newmem)
  227. return -ENOMEM;
  228. oldmem = buf->data;
  229. spin_lock_irq(&dmxdev->lock);
  230. buf->data = newmem;
  231. buf->size = size;
  232. /* reset and not flush in case the buffer shrinks */
  233. dvb_ringbuffer_reset(buf);
  234. spin_unlock_irq(&dmxdev->lock);
  235. vfree(oldmem);
  236. return 0;
  237. }
  238. static inline void dvb_dmxdev_filter_state_set(struct dmxdev_filter
  239. *dmxdevfilter, int state)
  240. {
  241. spin_lock_irq(&dmxdevfilter->dev->lock);
  242. dmxdevfilter->state = state;
  243. spin_unlock_irq(&dmxdevfilter->dev->lock);
  244. }
  245. static int dvb_dmxdev_set_buffer_size(struct dmxdev_filter *dmxdevfilter,
  246. unsigned long size)
  247. {
  248. struct dvb_ringbuffer *buf = &dmxdevfilter->buffer;
  249. void *newmem;
  250. void *oldmem;
  251. if (buf->size == size)
  252. return 0;
  253. if (!size)
  254. return -EINVAL;
  255. if (dmxdevfilter->state >= DMXDEV_STATE_GO)
  256. return -EBUSY;
  257. newmem = vmalloc(size);
  258. if (!newmem)
  259. return -ENOMEM;
  260. oldmem = buf->data;
  261. spin_lock_irq(&dmxdevfilter->dev->lock);
  262. buf->data = newmem;
  263. buf->size = size;
  264. /* reset and not flush in case the buffer shrinks */
  265. dvb_ringbuffer_reset(buf);
  266. spin_unlock_irq(&dmxdevfilter->dev->lock);
  267. vfree(oldmem);
  268. return 0;
  269. }
  270. static void dvb_dmxdev_filter_timeout(unsigned long data)
  271. {
  272. struct dmxdev_filter *dmxdevfilter = (struct dmxdev_filter *)data;
  273. dmxdevfilter->buffer.error = -ETIMEDOUT;
  274. spin_lock_irq(&dmxdevfilter->dev->lock);
  275. dmxdevfilter->state = DMXDEV_STATE_TIMEDOUT;
  276. spin_unlock_irq(&dmxdevfilter->dev->lock);
  277. wake_up(&dmxdevfilter->buffer.queue);
  278. }
  279. static void dvb_dmxdev_filter_timer(struct dmxdev_filter *dmxdevfilter)
  280. {
  281. struct dmx_sct_filter_params *para = &dmxdevfilter->params.sec;
  282. del_timer(&dmxdevfilter->timer);
  283. if (para->timeout) {
  284. dmxdevfilter->timer.function = dvb_dmxdev_filter_timeout;
  285. dmxdevfilter->timer.data = (unsigned long)dmxdevfilter;
  286. dmxdevfilter->timer.expires =
  287. jiffies + 1 + (HZ / 2 + HZ * para->timeout) / 1000;
  288. add_timer(&dmxdevfilter->timer);
  289. }
  290. }
  291. static int dvb_dmxdev_section_callback(const u8 *buffer1, size_t buffer1_len,
  292. const u8 *buffer2, size_t buffer2_len,
  293. struct dmx_section_filter *filter,
  294. enum dmx_success success)
  295. {
  296. struct dmxdev_filter *dmxdevfilter = filter->priv;
  297. int ret;
  298. if (dmxdevfilter->buffer.error) {
  299. wake_up(&dmxdevfilter->buffer.queue);
  300. return 0;
  301. }
  302. spin_lock(&dmxdevfilter->dev->lock);
  303. if (dmxdevfilter->state != DMXDEV_STATE_GO) {
  304. spin_unlock(&dmxdevfilter->dev->lock);
  305. return 0;
  306. }
  307. del_timer(&dmxdevfilter->timer);
  308. dprintk("dmxdev: section callback %02x %02x %02x %02x %02x %02x\n",
  309. buffer1[0], buffer1[1],
  310. buffer1[2], buffer1[3], buffer1[4], buffer1[5]);
  311. ret = dvb_dmxdev_buffer_write(&dmxdevfilter->buffer, buffer1,
  312. buffer1_len);
  313. if (ret == buffer1_len) {
  314. ret = dvb_dmxdev_buffer_write(&dmxdevfilter->buffer, buffer2,
  315. buffer2_len);
  316. }
  317. if (ret < 0) {
  318. dvb_ringbuffer_flush(&dmxdevfilter->buffer);
  319. dmxdevfilter->buffer.error = ret;
  320. }
  321. if (dmxdevfilter->params.sec.flags & DMX_ONESHOT)
  322. dmxdevfilter->state = DMXDEV_STATE_DONE;
  323. spin_unlock(&dmxdevfilter->dev->lock);
  324. wake_up(&dmxdevfilter->buffer.queue);
  325. return 0;
  326. }
  327. static int dvb_dmxdev_ts_callback(const u8 *buffer1, size_t buffer1_len,
  328. const u8 *buffer2, size_t buffer2_len,
  329. struct dmx_ts_feed *feed,
  330. enum dmx_success success)
  331. {
  332. struct dmxdev_filter *dmxdevfilter = feed->priv;
  333. struct dvb_ringbuffer *buffer;
  334. int ret;
  335. spin_lock(&dmxdevfilter->dev->lock);
  336. if (dmxdevfilter->params.pes.output == DMX_OUT_DECODER) {
  337. spin_unlock(&dmxdevfilter->dev->lock);
  338. return 0;
  339. }
  340. if (dmxdevfilter->params.pes.output == DMX_OUT_TAP
  341. || dmxdevfilter->params.pes.output == DMX_OUT_TSDEMUX_TAP)
  342. buffer = &dmxdevfilter->buffer;
  343. else
  344. buffer = &dmxdevfilter->dev->dvr_buffer;
  345. if (buffer->error) {
  346. spin_unlock(&dmxdevfilter->dev->lock);
  347. wake_up(&buffer->queue);
  348. return 0;
  349. }
  350. ret = dvb_dmxdev_buffer_write(buffer, buffer1, buffer1_len);
  351. if (ret == buffer1_len)
  352. ret = dvb_dmxdev_buffer_write(buffer, buffer2, buffer2_len);
  353. if (ret < 0) {
  354. dvb_ringbuffer_flush(buffer);
  355. buffer->error = ret;
  356. }
  357. spin_unlock(&dmxdevfilter->dev->lock);
  358. wake_up(&buffer->queue);
  359. return 0;
  360. }
  361. /* stop feed but only mark the specified filter as stopped (state set) */
  362. static int dvb_dmxdev_feed_stop(struct dmxdev_filter *dmxdevfilter)
  363. {
  364. struct dmxdev_feed *feed;
  365. dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_SET);
  366. switch (dmxdevfilter->type) {
  367. case DMXDEV_TYPE_SEC:
  368. del_timer(&dmxdevfilter->timer);
  369. dmxdevfilter->feed.sec->stop_filtering(dmxdevfilter->feed.sec);
  370. break;
  371. case DMXDEV_TYPE_PES:
  372. list_for_each_entry(feed, &dmxdevfilter->feed.ts, next)
  373. feed->ts->stop_filtering(feed->ts);
  374. break;
  375. default:
  376. return -EINVAL;
  377. }
  378. return 0;
  379. }
  380. /* start feed associated with the specified filter */
  381. static int dvb_dmxdev_feed_start(struct dmxdev_filter *filter)
  382. {
  383. struct dmxdev_feed *feed;
  384. int ret;
  385. dvb_dmxdev_filter_state_set(filter, DMXDEV_STATE_GO);
  386. switch (filter->type) {
  387. case DMXDEV_TYPE_SEC:
  388. return filter->feed.sec->start_filtering(filter->feed.sec);
  389. case DMXDEV_TYPE_PES:
  390. list_for_each_entry(feed, &filter->feed.ts, next) {
  391. ret = feed->ts->start_filtering(feed->ts);
  392. if (ret < 0) {
  393. dvb_dmxdev_feed_stop(filter);
  394. return ret;
  395. }
  396. }
  397. break;
  398. default:
  399. return -EINVAL;
  400. }
  401. return 0;
  402. }
  403. /* restart section feed if it has filters left associated with it,
  404. otherwise release the feed */
  405. static int dvb_dmxdev_feed_restart(struct dmxdev_filter *filter)
  406. {
  407. int i;
  408. struct dmxdev *dmxdev = filter->dev;
  409. u16 pid = filter->params.sec.pid;
  410. for (i = 0; i < dmxdev->filternum; i++)
  411. if (dmxdev->filter[i].state >= DMXDEV_STATE_GO &&
  412. dmxdev->filter[i].type == DMXDEV_TYPE_SEC &&
  413. dmxdev->filter[i].params.sec.pid == pid) {
  414. dvb_dmxdev_feed_start(&dmxdev->filter[i]);
  415. return 0;
  416. }
  417. filter->dev->demux->release_section_feed(dmxdev->demux,
  418. filter->feed.sec);
  419. return 0;
  420. }
  421. static int dvb_dmxdev_filter_stop(struct dmxdev_filter *dmxdevfilter)
  422. {
  423. struct dmxdev_feed *feed;
  424. struct dmx_demux *demux;
  425. if (dmxdevfilter->state < DMXDEV_STATE_GO)
  426. return 0;
  427. switch (dmxdevfilter->type) {
  428. case DMXDEV_TYPE_SEC:
  429. if (!dmxdevfilter->feed.sec)
  430. break;
  431. dvb_dmxdev_feed_stop(dmxdevfilter);
  432. if (dmxdevfilter->filter.sec)
  433. dmxdevfilter->feed.sec->
  434. release_filter(dmxdevfilter->feed.sec,
  435. dmxdevfilter->filter.sec);
  436. dvb_dmxdev_feed_restart(dmxdevfilter);
  437. dmxdevfilter->feed.sec = NULL;
  438. break;
  439. case DMXDEV_TYPE_PES:
  440. dvb_dmxdev_feed_stop(dmxdevfilter);
  441. demux = dmxdevfilter->dev->demux;
  442. list_for_each_entry(feed, &dmxdevfilter->feed.ts, next) {
  443. demux->release_ts_feed(demux, feed->ts);
  444. feed->ts = NULL;
  445. }
  446. break;
  447. default:
  448. if (dmxdevfilter->state == DMXDEV_STATE_ALLOCATED)
  449. return 0;
  450. return -EINVAL;
  451. }
  452. dvb_ringbuffer_flush(&dmxdevfilter->buffer);
  453. return 0;
  454. }
  455. static void dvb_dmxdev_delete_pids(struct dmxdev_filter *dmxdevfilter)
  456. {
  457. struct dmxdev_feed *feed, *tmp;
  458. /* delete all PIDs */
  459. list_for_each_entry_safe(feed, tmp, &dmxdevfilter->feed.ts, next) {
  460. list_del(&feed->next);
  461. kfree(feed);
  462. }
  463. BUG_ON(!list_empty(&dmxdevfilter->feed.ts));
  464. }
  465. static inline int dvb_dmxdev_filter_reset(struct dmxdev_filter *dmxdevfilter)
  466. {
  467. if (dmxdevfilter->state < DMXDEV_STATE_SET)
  468. return 0;
  469. if (dmxdevfilter->type == DMXDEV_TYPE_PES)
  470. dvb_dmxdev_delete_pids(dmxdevfilter);
  471. dmxdevfilter->type = DMXDEV_TYPE_NONE;
  472. dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_ALLOCATED);
  473. return 0;
  474. }
  475. static int dvb_dmxdev_start_feed(struct dmxdev *dmxdev,
  476. struct dmxdev_filter *filter,
  477. struct dmxdev_feed *feed)
  478. {
  479. struct timespec timeout = { 0 };
  480. struct dmx_pes_filter_params *para = &filter->params.pes;
  481. dmx_output_t otype;
  482. int ret;
  483. int ts_type;
  484. enum dmx_ts_pes ts_pes;
  485. struct dmx_ts_feed *tsfeed;
  486. feed->ts = NULL;
  487. otype = para->output;
  488. ts_pes = (enum dmx_ts_pes)para->pes_type;
  489. if (ts_pes < DMX_PES_OTHER)
  490. ts_type = TS_DECODER;
  491. else
  492. ts_type = 0;
  493. if (otype == DMX_OUT_TS_TAP)
  494. ts_type |= TS_PACKET;
  495. else if (otype == DMX_OUT_TSDEMUX_TAP)
  496. ts_type |= TS_PACKET | TS_DEMUX;
  497. else if (otype == DMX_OUT_TAP)
  498. ts_type |= TS_PACKET | TS_DEMUX | TS_PAYLOAD_ONLY;
  499. ret = dmxdev->demux->allocate_ts_feed(dmxdev->demux, &feed->ts,
  500. dvb_dmxdev_ts_callback);
  501. if (ret < 0)
  502. return ret;
  503. tsfeed = feed->ts;
  504. tsfeed->priv = filter;
  505. ret = tsfeed->set(tsfeed, feed->pid, ts_type, ts_pes, 32768, timeout);
  506. if (ret < 0) {
  507. dmxdev->demux->release_ts_feed(dmxdev->demux, tsfeed);
  508. return ret;
  509. }
  510. ret = tsfeed->start_filtering(tsfeed);
  511. if (ret < 0) {
  512. dmxdev->demux->release_ts_feed(dmxdev->demux, tsfeed);
  513. return ret;
  514. }
  515. return 0;
  516. }
  517. static int dvb_dmxdev_filter_start(struct dmxdev_filter *filter)
  518. {
  519. struct dmxdev *dmxdev = filter->dev;
  520. struct dmxdev_feed *feed;
  521. void *mem;
  522. int ret, i;
  523. if (filter->state < DMXDEV_STATE_SET)
  524. return -EINVAL;
  525. if (filter->state >= DMXDEV_STATE_GO)
  526. dvb_dmxdev_filter_stop(filter);
  527. if (!filter->buffer.data) {
  528. mem = vmalloc(filter->buffer.size);
  529. if (!mem)
  530. return -ENOMEM;
  531. spin_lock_irq(&filter->dev->lock);
  532. filter->buffer.data = mem;
  533. spin_unlock_irq(&filter->dev->lock);
  534. }
  535. dvb_ringbuffer_flush(&filter->buffer);
  536. switch (filter->type) {
  537. case DMXDEV_TYPE_SEC:
  538. {
  539. struct dmx_sct_filter_params *para = &filter->params.sec;
  540. struct dmx_section_filter **secfilter = &filter->filter.sec;
  541. struct dmx_section_feed **secfeed = &filter->feed.sec;
  542. *secfilter = NULL;
  543. *secfeed = NULL;
  544. /* find active filter/feed with same PID */
  545. for (i = 0; i < dmxdev->filternum; i++) {
  546. if (dmxdev->filter[i].state >= DMXDEV_STATE_GO &&
  547. dmxdev->filter[i].type == DMXDEV_TYPE_SEC &&
  548. dmxdev->filter[i].params.sec.pid == para->pid) {
  549. *secfeed = dmxdev->filter[i].feed.sec;
  550. break;
  551. }
  552. }
  553. /* if no feed found, try to allocate new one */
  554. if (!*secfeed) {
  555. ret = dmxdev->demux->allocate_section_feed(dmxdev->demux,
  556. secfeed,
  557. dvb_dmxdev_section_callback);
  558. if (ret < 0) {
  559. printk("DVB (%s): could not alloc feed\n",
  560. __func__);
  561. return ret;
  562. }
  563. ret = (*secfeed)->set(*secfeed, para->pid, 32768,
  564. (para->flags & DMX_CHECK_CRC) ? 1 : 0);
  565. if (ret < 0) {
  566. printk("DVB (%s): could not set feed\n",
  567. __func__);
  568. dvb_dmxdev_feed_restart(filter);
  569. return ret;
  570. }
  571. } else {
  572. dvb_dmxdev_feed_stop(filter);
  573. }
  574. ret = (*secfeed)->allocate_filter(*secfeed, secfilter);
  575. if (ret < 0) {
  576. dvb_dmxdev_feed_restart(filter);
  577. filter->feed.sec->start_filtering(*secfeed);
  578. dprintk("could not get filter\n");
  579. return ret;
  580. }
  581. (*secfilter)->priv = filter;
  582. memcpy(&((*secfilter)->filter_value[3]),
  583. &(para->filter.filter[1]), DMX_FILTER_SIZE - 1);
  584. memcpy(&(*secfilter)->filter_mask[3],
  585. &para->filter.mask[1], DMX_FILTER_SIZE - 1);
  586. memcpy(&(*secfilter)->filter_mode[3],
  587. &para->filter.mode[1], DMX_FILTER_SIZE - 1);
  588. (*secfilter)->filter_value[0] = para->filter.filter[0];
  589. (*secfilter)->filter_mask[0] = para->filter.mask[0];
  590. (*secfilter)->filter_mode[0] = para->filter.mode[0];
  591. (*secfilter)->filter_mask[1] = 0;
  592. (*secfilter)->filter_mask[2] = 0;
  593. filter->todo = 0;
  594. ret = filter->feed.sec->start_filtering(filter->feed.sec);
  595. if (ret < 0)
  596. return ret;
  597. dvb_dmxdev_filter_timer(filter);
  598. break;
  599. }
  600. case DMXDEV_TYPE_PES:
  601. list_for_each_entry(feed, &filter->feed.ts, next) {
  602. ret = dvb_dmxdev_start_feed(dmxdev, filter, feed);
  603. if (ret < 0) {
  604. dvb_dmxdev_filter_stop(filter);
  605. return ret;
  606. }
  607. }
  608. break;
  609. default:
  610. return -EINVAL;
  611. }
  612. dvb_dmxdev_filter_state_set(filter, DMXDEV_STATE_GO);
  613. return 0;
  614. }
  615. static int dvb_demux_open(struct inode *inode, struct file *file)
  616. {
  617. struct dvb_device *dvbdev = file->private_data;
  618. struct dmxdev *dmxdev = dvbdev->priv;
  619. int i;
  620. struct dmxdev_filter *dmxdevfilter;
  621. if (!dmxdev->filter)
  622. return -EINVAL;
  623. if (mutex_lock_interruptible(&dmxdev->mutex))
  624. return -ERESTARTSYS;
  625. for (i = 0; i < dmxdev->filternum; i++)
  626. if (dmxdev->filter[i].state == DMXDEV_STATE_FREE)
  627. break;
  628. if (i == dmxdev->filternum) {
  629. mutex_unlock(&dmxdev->mutex);
  630. return -EMFILE;
  631. }
  632. dmxdevfilter = &dmxdev->filter[i];
  633. mutex_init(&dmxdevfilter->mutex);
  634. file->private_data = dmxdevfilter;
  635. dvb_ringbuffer_init(&dmxdevfilter->buffer, NULL, 8192);
  636. dmxdevfilter->type = DMXDEV_TYPE_NONE;
  637. dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_ALLOCATED);
  638. INIT_LIST_HEAD(&dmxdevfilter->feed.ts);
  639. init_timer(&dmxdevfilter->timer);
  640. dvbdev->users++;
  641. mutex_unlock(&dmxdev->mutex);
  642. return 0;
  643. }
  644. static int dvb_dmxdev_filter_free(struct dmxdev *dmxdev,
  645. struct dmxdev_filter *dmxdevfilter)
  646. {
  647. mutex_lock(&dmxdev->mutex);
  648. mutex_lock(&dmxdevfilter->mutex);
  649. dvb_dmxdev_filter_stop(dmxdevfilter);
  650. dvb_dmxdev_filter_reset(dmxdevfilter);
  651. if (dmxdevfilter->buffer.data) {
  652. void *mem = dmxdevfilter->buffer.data;
  653. spin_lock_irq(&dmxdev->lock);
  654. dmxdevfilter->buffer.data = NULL;
  655. spin_unlock_irq(&dmxdev->lock);
  656. vfree(mem);
  657. }
  658. dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_FREE);
  659. wake_up(&dmxdevfilter->buffer.queue);
  660. mutex_unlock(&dmxdevfilter->mutex);
  661. mutex_unlock(&dmxdev->mutex);
  662. return 0;
  663. }
  664. static inline void invert_mode(dmx_filter_t *filter)
  665. {
  666. int i;
  667. for (i = 0; i < DMX_FILTER_SIZE; i++)
  668. filter->mode[i] ^= 0xff;
  669. }
  670. static int dvb_dmxdev_add_pid(struct dmxdev *dmxdev,
  671. struct dmxdev_filter *filter, u16 pid)
  672. {
  673. struct dmxdev_feed *feed;
  674. if ((filter->type != DMXDEV_TYPE_PES) ||
  675. (filter->state < DMXDEV_STATE_SET))
  676. return -EINVAL;
  677. /* only TS packet filters may have multiple PIDs */
  678. if ((filter->params.pes.output != DMX_OUT_TSDEMUX_TAP) &&
  679. (!list_empty(&filter->feed.ts)))
  680. return -EINVAL;
  681. feed = kzalloc(sizeof(struct dmxdev_feed), GFP_KERNEL);
  682. if (feed == NULL)
  683. return -ENOMEM;
  684. feed->pid = pid;
  685. list_add(&feed->next, &filter->feed.ts);
  686. if (filter->state >= DMXDEV_STATE_GO)
  687. return dvb_dmxdev_start_feed(dmxdev, filter, feed);
  688. return 0;
  689. }
  690. static int dvb_dmxdev_remove_pid(struct dmxdev *dmxdev,
  691. struct dmxdev_filter *filter, u16 pid)
  692. {
  693. struct dmxdev_feed *feed, *tmp;
  694. if ((filter->type != DMXDEV_TYPE_PES) ||
  695. (filter->state < DMXDEV_STATE_SET))
  696. return -EINVAL;
  697. list_for_each_entry_safe(feed, tmp, &filter->feed.ts, next) {
  698. if ((feed->pid == pid) && (feed->ts != NULL)) {
  699. feed->ts->stop_filtering(feed->ts);
  700. filter->dev->demux->release_ts_feed(filter->dev->demux,
  701. feed->ts);
  702. list_del(&feed->next);
  703. kfree(feed);
  704. }
  705. }
  706. return 0;
  707. }
  708. static int dvb_dmxdev_filter_set(struct dmxdev *dmxdev,
  709. struct dmxdev_filter *dmxdevfilter,
  710. struct dmx_sct_filter_params *params)
  711. {
  712. dprintk("function : %s\n", __func__);
  713. dvb_dmxdev_filter_stop(dmxdevfilter);
  714. dmxdevfilter->type = DMXDEV_TYPE_SEC;
  715. memcpy(&dmxdevfilter->params.sec,
  716. params, sizeof(struct dmx_sct_filter_params));
  717. invert_mode(&dmxdevfilter->params.sec.filter);
  718. dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_SET);
  719. if (params->flags & DMX_IMMEDIATE_START)
  720. return dvb_dmxdev_filter_start(dmxdevfilter);
  721. return 0;
  722. }
  723. static int dvb_dmxdev_pes_filter_set(struct dmxdev *dmxdev,
  724. struct dmxdev_filter *dmxdevfilter,
  725. struct dmx_pes_filter_params *params)
  726. {
  727. int ret;
  728. dvb_dmxdev_filter_stop(dmxdevfilter);
  729. dvb_dmxdev_filter_reset(dmxdevfilter);
  730. if (params->pes_type > DMX_PES_OTHER || params->pes_type < 0)
  731. return -EINVAL;
  732. dmxdevfilter->type = DMXDEV_TYPE_PES;
  733. memcpy(&dmxdevfilter->params, params,
  734. sizeof(struct dmx_pes_filter_params));
  735. dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_SET);
  736. ret = dvb_dmxdev_add_pid(dmxdev, dmxdevfilter,
  737. dmxdevfilter->params.pes.pid);
  738. if (ret < 0)
  739. return ret;
  740. if (params->flags & DMX_IMMEDIATE_START)
  741. return dvb_dmxdev_filter_start(dmxdevfilter);
  742. return 0;
  743. }
  744. static ssize_t dvb_dmxdev_read_sec(struct dmxdev_filter *dfil,
  745. struct file *file, char __user *buf,
  746. size_t count, loff_t *ppos)
  747. {
  748. int result, hcount;
  749. int done = 0;
  750. if (dfil->todo <= 0) {
  751. hcount = 3 + dfil->todo;
  752. if (hcount > count)
  753. hcount = count;
  754. result = dvb_dmxdev_buffer_read(&dfil->buffer,
  755. file->f_flags & O_NONBLOCK,
  756. buf, hcount, ppos);
  757. if (result < 0) {
  758. dfil->todo = 0;
  759. return result;
  760. }
  761. if (copy_from_user(dfil->secheader - dfil->todo, buf, result))
  762. return -EFAULT;
  763. buf += result;
  764. done = result;
  765. count -= result;
  766. dfil->todo -= result;
  767. if (dfil->todo > -3)
  768. return done;
  769. dfil->todo = ((dfil->secheader[1] << 8) | dfil->secheader[2]) & 0xfff;
  770. if (!count)
  771. return done;
  772. }
  773. if (count > dfil->todo)
  774. count = dfil->todo;
  775. result = dvb_dmxdev_buffer_read(&dfil->buffer,
  776. file->f_flags & O_NONBLOCK,
  777. buf, count, ppos);
  778. if (result < 0)
  779. return result;
  780. dfil->todo -= result;
  781. return (result + done);
  782. }
  783. static ssize_t
  784. dvb_demux_read(struct file *file, char __user *buf, size_t count,
  785. loff_t *ppos)
  786. {
  787. struct dmxdev_filter *dmxdevfilter = file->private_data;
  788. int ret;
  789. if (mutex_lock_interruptible(&dmxdevfilter->mutex))
  790. return -ERESTARTSYS;
  791. if (dmxdevfilter->type == DMXDEV_TYPE_SEC)
  792. ret = dvb_dmxdev_read_sec(dmxdevfilter, file, buf, count, ppos);
  793. else
  794. ret = dvb_dmxdev_buffer_read(&dmxdevfilter->buffer,
  795. file->f_flags & O_NONBLOCK,
  796. buf, count, ppos);
  797. mutex_unlock(&dmxdevfilter->mutex);
  798. return ret;
  799. }
  800. static int dvb_demux_do_ioctl(struct inode *inode, struct file *file,
  801. unsigned int cmd, void *parg)
  802. {
  803. struct dmxdev_filter *dmxdevfilter = file->private_data;
  804. struct dmxdev *dmxdev = dmxdevfilter->dev;
  805. unsigned long arg = (unsigned long)parg;
  806. int ret = 0;
  807. if (mutex_lock_interruptible(&dmxdev->mutex))
  808. return -ERESTARTSYS;
  809. switch (cmd) {
  810. case DMX_START:
  811. if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
  812. mutex_unlock(&dmxdev->mutex);
  813. return -ERESTARTSYS;
  814. }
  815. if (dmxdevfilter->state < DMXDEV_STATE_SET)
  816. ret = -EINVAL;
  817. else
  818. ret = dvb_dmxdev_filter_start(dmxdevfilter);
  819. mutex_unlock(&dmxdevfilter->mutex);
  820. break;
  821. case DMX_STOP:
  822. if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
  823. mutex_unlock(&dmxdev->mutex);
  824. return -ERESTARTSYS;
  825. }
  826. ret = dvb_dmxdev_filter_stop(dmxdevfilter);
  827. mutex_unlock(&dmxdevfilter->mutex);
  828. break;
  829. case DMX_SET_FILTER:
  830. if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
  831. mutex_unlock(&dmxdev->mutex);
  832. return -ERESTARTSYS;
  833. }
  834. ret = dvb_dmxdev_filter_set(dmxdev, dmxdevfilter, parg);
  835. mutex_unlock(&dmxdevfilter->mutex);
  836. break;
  837. case DMX_SET_PES_FILTER:
  838. if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
  839. mutex_unlock(&dmxdev->mutex);
  840. return -ERESTARTSYS;
  841. }
  842. ret = dvb_dmxdev_pes_filter_set(dmxdev, dmxdevfilter, parg);
  843. mutex_unlock(&dmxdevfilter->mutex);
  844. break;
  845. case DMX_SET_BUFFER_SIZE:
  846. if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
  847. mutex_unlock(&dmxdev->mutex);
  848. return -ERESTARTSYS;
  849. }
  850. ret = dvb_dmxdev_set_buffer_size(dmxdevfilter, arg);
  851. mutex_unlock(&dmxdevfilter->mutex);
  852. break;
  853. case DMX_GET_PES_PIDS:
  854. if (!dmxdev->demux->get_pes_pids) {
  855. ret = -EINVAL;
  856. break;
  857. }
  858. dmxdev->demux->get_pes_pids(dmxdev->demux, parg);
  859. break;
  860. case DMX_GET_CAPS:
  861. if (!dmxdev->demux->get_caps) {
  862. ret = -EINVAL;
  863. break;
  864. }
  865. ret = dmxdev->demux->get_caps(dmxdev->demux, parg);
  866. break;
  867. case DMX_SET_SOURCE:
  868. if (!dmxdev->demux->set_source) {
  869. ret = -EINVAL;
  870. break;
  871. }
  872. ret = dmxdev->demux->set_source(dmxdev->demux, parg);
  873. break;
  874. case DMX_GET_STC:
  875. if (!dmxdev->demux->get_stc) {
  876. ret = -EINVAL;
  877. break;
  878. }
  879. ret = dmxdev->demux->get_stc(dmxdev->demux,
  880. ((struct dmx_stc *)parg)->num,
  881. &((struct dmx_stc *)parg)->stc,
  882. &((struct dmx_stc *)parg)->base);
  883. break;
  884. case DMX_ADD_PID:
  885. if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
  886. ret = -ERESTARTSYS;
  887. break;
  888. }
  889. ret = dvb_dmxdev_add_pid(dmxdev, dmxdevfilter, *(u16 *)parg);
  890. mutex_unlock(&dmxdevfilter->mutex);
  891. break;
  892. case DMX_REMOVE_PID:
  893. if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
  894. ret = -ERESTARTSYS;
  895. break;
  896. }
  897. ret = dvb_dmxdev_remove_pid(dmxdev, dmxdevfilter, *(u16 *)parg);
  898. mutex_unlock(&dmxdevfilter->mutex);
  899. break;
  900. default:
  901. ret = -EINVAL;
  902. break;
  903. }
  904. mutex_unlock(&dmxdev->mutex);
  905. return ret;
  906. }
  907. static int dvb_demux_ioctl(struct inode *inode, struct file *file,
  908. unsigned int cmd, unsigned long arg)
  909. {
  910. return dvb_usercopy(inode, file, cmd, arg, dvb_demux_do_ioctl);
  911. }
  912. static unsigned int dvb_demux_poll(struct file *file, poll_table *wait)
  913. {
  914. struct dmxdev_filter *dmxdevfilter = file->private_data;
  915. unsigned int mask = 0;
  916. if (!dmxdevfilter)
  917. return -EINVAL;
  918. poll_wait(file, &dmxdevfilter->buffer.queue, wait);
  919. if (dmxdevfilter->state != DMXDEV_STATE_GO &&
  920. dmxdevfilter->state != DMXDEV_STATE_DONE &&
  921. dmxdevfilter->state != DMXDEV_STATE_TIMEDOUT)
  922. return 0;
  923. if (dmxdevfilter->buffer.error)
  924. mask |= (POLLIN | POLLRDNORM | POLLPRI | POLLERR);
  925. if (!dvb_ringbuffer_empty(&dmxdevfilter->buffer))
  926. mask |= (POLLIN | POLLRDNORM | POLLPRI);
  927. return mask;
  928. }
  929. static int dvb_demux_release(struct inode *inode, struct file *file)
  930. {
  931. struct dmxdev_filter *dmxdevfilter = file->private_data;
  932. struct dmxdev *dmxdev = dmxdevfilter->dev;
  933. int ret;
  934. ret = dvb_dmxdev_filter_free(dmxdev, dmxdevfilter);
  935. mutex_lock(&dmxdev->mutex);
  936. dmxdev->dvbdev->users--;
  937. if(dmxdev->dvbdev->users==1 && dmxdev->exit==1) {
  938. fops_put(file->f_op);
  939. file->f_op = NULL;
  940. mutex_unlock(&dmxdev->mutex);
  941. wake_up(&dmxdev->dvbdev->wait_queue);
  942. } else
  943. mutex_unlock(&dmxdev->mutex);
  944. return ret;
  945. }
  946. static const struct file_operations dvb_demux_fops = {
  947. .owner = THIS_MODULE,
  948. .read = dvb_demux_read,
  949. .ioctl = dvb_demux_ioctl,
  950. .open = dvb_demux_open,
  951. .release = dvb_demux_release,
  952. .poll = dvb_demux_poll,
  953. };
  954. static struct dvb_device dvbdev_demux = {
  955. .priv = NULL,
  956. .users = 1,
  957. .writers = 1,
  958. .fops = &dvb_demux_fops
  959. };
  960. static int dvb_dvr_do_ioctl(struct inode *inode, struct file *file,
  961. unsigned int cmd, void *parg)
  962. {
  963. struct dvb_device *dvbdev = file->private_data;
  964. struct dmxdev *dmxdev = dvbdev->priv;
  965. unsigned long arg = (unsigned long)parg;
  966. int ret;
  967. if (mutex_lock_interruptible(&dmxdev->mutex))
  968. return -ERESTARTSYS;
  969. switch (cmd) {
  970. case DMX_SET_BUFFER_SIZE:
  971. ret = dvb_dvr_set_buffer_size(dmxdev, arg);
  972. break;
  973. default:
  974. ret = -EINVAL;
  975. break;
  976. }
  977. mutex_unlock(&dmxdev->mutex);
  978. return ret;
  979. }
  980. static int dvb_dvr_ioctl(struct inode *inode, struct file *file,
  981. unsigned int cmd, unsigned long arg)
  982. {
  983. return dvb_usercopy(inode, file, cmd, arg, dvb_dvr_do_ioctl);
  984. }
  985. static unsigned int dvb_dvr_poll(struct file *file, poll_table *wait)
  986. {
  987. struct dvb_device *dvbdev = file->private_data;
  988. struct dmxdev *dmxdev = dvbdev->priv;
  989. unsigned int mask = 0;
  990. dprintk("function : %s\n", __func__);
  991. poll_wait(file, &dmxdev->dvr_buffer.queue, wait);
  992. if ((file->f_flags & O_ACCMODE) == O_RDONLY) {
  993. if (dmxdev->dvr_buffer.error)
  994. mask |= (POLLIN | POLLRDNORM | POLLPRI | POLLERR);
  995. if (!dvb_ringbuffer_empty(&dmxdev->dvr_buffer))
  996. mask |= (POLLIN | POLLRDNORM | POLLPRI);
  997. } else
  998. mask |= (POLLOUT | POLLWRNORM | POLLPRI);
  999. return mask;
  1000. }
  1001. static struct file_operations dvb_dvr_fops = {
  1002. .owner = THIS_MODULE,
  1003. .read = dvb_dvr_read,
  1004. .write = dvb_dvr_write,
  1005. .ioctl = dvb_dvr_ioctl,
  1006. .open = dvb_dvr_open,
  1007. .release = dvb_dvr_release,
  1008. .poll = dvb_dvr_poll,
  1009. };
  1010. static struct dvb_device dvbdev_dvr = {
  1011. .priv = NULL,
  1012. .readers = 1,
  1013. .users = 1,
  1014. .fops = &dvb_dvr_fops
  1015. };
  1016. int dvb_dmxdev_init(struct dmxdev *dmxdev, struct dvb_adapter *dvb_adapter)
  1017. {
  1018. int i;
  1019. if (dmxdev->demux->open(dmxdev->demux) < 0)
  1020. return -EUSERS;
  1021. dmxdev->filter = vmalloc(dmxdev->filternum * sizeof(struct dmxdev_filter));
  1022. if (!dmxdev->filter)
  1023. return -ENOMEM;
  1024. mutex_init(&dmxdev->mutex);
  1025. spin_lock_init(&dmxdev->lock);
  1026. for (i = 0; i < dmxdev->filternum; i++) {
  1027. dmxdev->filter[i].dev = dmxdev;
  1028. dmxdev->filter[i].buffer.data = NULL;
  1029. dvb_dmxdev_filter_state_set(&dmxdev->filter[i],
  1030. DMXDEV_STATE_FREE);
  1031. }
  1032. dvb_register_device(dvb_adapter, &dmxdev->dvbdev, &dvbdev_demux, dmxdev,
  1033. DVB_DEVICE_DEMUX);
  1034. dvb_register_device(dvb_adapter, &dmxdev->dvr_dvbdev, &dvbdev_dvr,
  1035. dmxdev, DVB_DEVICE_DVR);
  1036. dvb_ringbuffer_init(&dmxdev->dvr_buffer, NULL, 8192);
  1037. return 0;
  1038. }
  1039. EXPORT_SYMBOL(dvb_dmxdev_init);
  1040. void dvb_dmxdev_release(struct dmxdev *dmxdev)
  1041. {
  1042. dmxdev->exit=1;
  1043. if (dmxdev->dvbdev->users > 1) {
  1044. wait_event(dmxdev->dvbdev->wait_queue,
  1045. dmxdev->dvbdev->users==1);
  1046. }
  1047. if (dmxdev->dvr_dvbdev->users > 1) {
  1048. wait_event(dmxdev->dvr_dvbdev->wait_queue,
  1049. dmxdev->dvr_dvbdev->users==1);
  1050. }
  1051. dvb_unregister_device(dmxdev->dvbdev);
  1052. dvb_unregister_device(dmxdev->dvr_dvbdev);
  1053. vfree(dmxdev->filter);
  1054. dmxdev->filter = NULL;
  1055. dmxdev->demux->close(dmxdev->demux);
  1056. }
  1057. EXPORT_SYMBOL(dvb_dmxdev_release);