flexcop-pci.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451
  1. /*
  2. * This file is part of linux driver the digital TV devices equipped with B2C2 FlexcopII(b)/III
  3. *
  4. * flexcop-pci.c - covers the PCI part including DMA transfers.
  5. *
  6. * see flexcop.c for copyright information.
  7. */
  8. #define FC_LOG_PREFIX "flexcop-pci"
  9. #include "flexcop-common.h"
  10. static int enable_pid_filtering = 1;
  11. module_param(enable_pid_filtering, int, 0444);
  12. MODULE_PARM_DESC(enable_pid_filtering, "enable hardware pid filtering: supported values: 0 (fullts), 1");
  13. static int irq_chk_intv = 100;
  14. module_param(irq_chk_intv, int, 0644);
  15. MODULE_PARM_DESC(irq_chk_intv, "set the interval for IRQ streaming watchdog.");
  16. #ifdef CONFIG_DVB_B2C2_FLEXCOP_DEBUG
  17. #define dprintk(level,args...) \
  18. do { if ((debug & level)) printk(args); } while (0)
  19. #define DEBSTATUS ""
  20. #else
  21. #define dprintk(level,args...)
  22. #define DEBSTATUS " (debugging is not enabled)"
  23. #endif
  24. #define deb_info(args...) dprintk(0x01,args)
  25. #define deb_reg(args...) dprintk(0x02,args)
  26. #define deb_ts(args...) dprintk(0x04,args)
  27. #define deb_irq(args...) dprintk(0x08,args)
  28. #define deb_chk(args...) dprintk(0x10,args)
  29. static int debug;
  30. module_param(debug, int, 0644);
  31. MODULE_PARM_DESC(debug,
  32. "set debug level (1=info,2=regs,4=TS,8=irqdma,16=check (|-able))."
  33. DEBSTATUS);
  34. #define DRIVER_VERSION "0.1"
  35. #define DRIVER_NAME "Technisat/B2C2 FlexCop II/IIb/III Digital TV PCI Driver"
  36. #define DRIVER_AUTHOR "Patrick Boettcher <patrick.boettcher@desy.de>"
  37. struct flexcop_pci {
  38. struct pci_dev *pdev;
  39. #define FC_PCI_INIT 0x01
  40. #define FC_PCI_DMA_INIT 0x02
  41. int init_state;
  42. void __iomem *io_mem;
  43. u32 irq;
  44. /* buffersize (at least for DMA1, need to be % 188 == 0,
  45. * this logic is required */
  46. #define FC_DEFAULT_DMA1_BUFSIZE (1280 * 188)
  47. #define FC_DEFAULT_DMA2_BUFSIZE (10 * 188)
  48. struct flexcop_dma dma[2];
  49. int active_dma1_addr; /* 0 = addr0 of dma1; 1 = addr1 of dma1 */
  50. u32 last_dma1_cur_pos; /* position of the pointer last time the timer/packet irq occured */
  51. int count;
  52. int count_prev;
  53. int stream_problem;
  54. spinlock_t irq_lock;
  55. unsigned long last_irq;
  56. struct delayed_work irq_check_work;
  57. struct flexcop_device *fc_dev;
  58. };
  59. static int lastwreg,lastwval,lastrreg,lastrval;
  60. static flexcop_ibi_value flexcop_pci_read_ibi_reg (struct flexcop_device *fc, flexcop_ibi_register r)
  61. {
  62. struct flexcop_pci *fc_pci = fc->bus_specific;
  63. flexcop_ibi_value v;
  64. v.raw = readl(fc_pci->io_mem + r);
  65. if (lastrreg != r || lastrval != v.raw) {
  66. lastrreg = r; lastrval = v.raw;
  67. deb_reg("new rd: %3x: %08x\n",r,v.raw);
  68. }
  69. return v;
  70. }
  71. static int flexcop_pci_write_ibi_reg(struct flexcop_device *fc, flexcop_ibi_register r, flexcop_ibi_value v)
  72. {
  73. struct flexcop_pci *fc_pci = fc->bus_specific;
  74. if (lastwreg != r || lastwval != v.raw) {
  75. lastwreg = r; lastwval = v.raw;
  76. deb_reg("new wr: %3x: %08x\n",r,v.raw);
  77. }
  78. writel(v.raw, fc_pci->io_mem + r);
  79. return 0;
  80. }
  81. static void flexcop_pci_irq_check_work(struct work_struct *work)
  82. {
  83. struct flexcop_pci *fc_pci =
  84. container_of(work, struct flexcop_pci, irq_check_work.work);
  85. struct flexcop_device *fc = fc_pci->fc_dev;
  86. if (fc->feedcount) {
  87. if (fc_pci->count == fc_pci->count_prev) {
  88. deb_chk("no IRQ since the last check\n");
  89. if (fc_pci->stream_problem++ == 3) {
  90. struct dvb_demux_feed *feed;
  91. spin_lock_irq(&fc->demux.lock);
  92. list_for_each_entry(feed, &fc->demux.feed_list,
  93. list_head) {
  94. flexcop_pid_feed_control(fc, feed, 0);
  95. }
  96. list_for_each_entry(feed, &fc->demux.feed_list,
  97. list_head) {
  98. flexcop_pid_feed_control(fc, feed, 1);
  99. }
  100. spin_unlock_irq(&fc->demux.lock);
  101. fc_pci->stream_problem = 0;
  102. }
  103. } else {
  104. fc_pci->stream_problem = 0;
  105. fc_pci->count_prev = fc_pci->count;
  106. }
  107. }
  108. schedule_delayed_work(&fc_pci->irq_check_work,
  109. msecs_to_jiffies(irq_chk_intv < 100 ? 100 : irq_chk_intv));
  110. }
  111. /* When PID filtering is turned on, we use the timer IRQ, because small amounts
  112. * of data need to be passed to the user space instantly as well. When PID
  113. * filtering is turned off, we use the page-change-IRQ */
  114. static irqreturn_t flexcop_pci_isr(int irq, void *dev_id)
  115. {
  116. struct flexcop_pci *fc_pci = dev_id;
  117. struct flexcop_device *fc = fc_pci->fc_dev;
  118. unsigned long flags;
  119. flexcop_ibi_value v;
  120. irqreturn_t ret = IRQ_HANDLED;
  121. spin_lock_irqsave(&fc_pci->irq_lock,flags);
  122. v = fc->read_ibi_reg(fc,irq_20c);
  123. /* errors */
  124. if (v.irq_20c.Data_receiver_error)
  125. deb_chk("data receiver error\n");
  126. if (v.irq_20c.Continuity_error_flag)
  127. deb_chk("Contunuity error flag is set\n");
  128. if (v.irq_20c.LLC_SNAP_FLAG_set)
  129. deb_chk("LLC_SNAP_FLAG_set is set\n");
  130. if (v.irq_20c.Transport_Error)
  131. deb_chk("Transport error\n");
  132. if ((fc_pci->count % 1000) == 0)
  133. deb_chk("%d valid irq took place so far\n",fc_pci->count);
  134. if (v.irq_20c.DMA1_IRQ_Status == 1) {
  135. if (fc_pci->active_dma1_addr == 0)
  136. flexcop_pass_dmx_packets(fc_pci->fc_dev,fc_pci->dma[0].cpu_addr0,fc_pci->dma[0].size / 188);
  137. else
  138. flexcop_pass_dmx_packets(fc_pci->fc_dev,fc_pci->dma[0].cpu_addr1,fc_pci->dma[0].size / 188);
  139. deb_irq("page change to page: %d\n",!fc_pci->active_dma1_addr);
  140. fc_pci->active_dma1_addr = !fc_pci->active_dma1_addr;
  141. } else if (v.irq_20c.DMA1_Timer_Status == 1) {
  142. /* for the timer IRQ we only can use buffer dmx feeding, because we don't have
  143. * complete TS packets when reading from the DMA memory */
  144. dma_addr_t cur_addr =
  145. fc->read_ibi_reg(fc,dma1_008).dma_0x8.dma_cur_addr << 2;
  146. u32 cur_pos = cur_addr - fc_pci->dma[0].dma_addr0;
  147. deb_irq("%u irq: %08x cur_addr: %llx: cur_pos: %08x, last_cur_pos: %08x ",
  148. jiffies_to_usecs(jiffies - fc_pci->last_irq),
  149. v.raw, (unsigned long long)cur_addr, cur_pos,
  150. fc_pci->last_dma1_cur_pos);
  151. fc_pci->last_irq = jiffies;
  152. /* buffer end was reached, restarted from the beginning
  153. * pass the data from last_cur_pos to the buffer end to the demux
  154. */
  155. if (cur_pos < fc_pci->last_dma1_cur_pos) {
  156. deb_irq(" end was reached: passing %d bytes ",(fc_pci->dma[0].size*2 - 1) - fc_pci->last_dma1_cur_pos);
  157. flexcop_pass_dmx_data(fc_pci->fc_dev,
  158. fc_pci->dma[0].cpu_addr0 + fc_pci->last_dma1_cur_pos,
  159. (fc_pci->dma[0].size*2) - fc_pci->last_dma1_cur_pos);
  160. fc_pci->last_dma1_cur_pos = 0;
  161. }
  162. if (cur_pos > fc_pci->last_dma1_cur_pos) {
  163. deb_irq(" passing %d bytes ",cur_pos - fc_pci->last_dma1_cur_pos);
  164. flexcop_pass_dmx_data(fc_pci->fc_dev,
  165. fc_pci->dma[0].cpu_addr0 + fc_pci->last_dma1_cur_pos,
  166. cur_pos - fc_pci->last_dma1_cur_pos);
  167. }
  168. deb_irq("\n");
  169. fc_pci->last_dma1_cur_pos = cur_pos;
  170. fc_pci->count++;
  171. } else {
  172. deb_irq("isr for flexcop called, apparently without reason (%08x)\n",v.raw);
  173. ret = IRQ_NONE;
  174. }
  175. spin_unlock_irqrestore(&fc_pci->irq_lock,flags);
  176. return ret;
  177. }
  178. static int flexcop_pci_stream_control(struct flexcop_device *fc, int onoff)
  179. {
  180. struct flexcop_pci *fc_pci = fc->bus_specific;
  181. if (onoff) {
  182. flexcop_dma_config(fc,&fc_pci->dma[0],FC_DMA_1);
  183. flexcop_dma_config(fc,&fc_pci->dma[1],FC_DMA_2);
  184. flexcop_dma_config_timer(fc,FC_DMA_1,0);
  185. flexcop_dma_xfer_control(fc,FC_DMA_1,FC_DMA_SUBADDR_0 | FC_DMA_SUBADDR_1,1);
  186. deb_irq("DMA xfer enabled\n");
  187. fc_pci->last_dma1_cur_pos = 0;
  188. flexcop_dma_control_timer_irq(fc,FC_DMA_1,1);
  189. deb_irq("IRQ enabled\n");
  190. fc_pci->count_prev = fc_pci->count;
  191. // fc_pci->active_dma1_addr = 0;
  192. // flexcop_dma_control_size_irq(fc,FC_DMA_1,1);
  193. } else {
  194. flexcop_dma_control_timer_irq(fc,FC_DMA_1,0);
  195. deb_irq("IRQ disabled\n");
  196. // flexcop_dma_control_size_irq(fc,FC_DMA_1,0);
  197. flexcop_dma_xfer_control(fc,FC_DMA_1,FC_DMA_SUBADDR_0 | FC_DMA_SUBADDR_1,0);
  198. deb_irq("DMA xfer disabled\n");
  199. }
  200. return 0;
  201. }
  202. static int flexcop_pci_dma_init(struct flexcop_pci *fc_pci)
  203. {
  204. int ret;
  205. if ((ret = flexcop_dma_allocate(fc_pci->pdev,&fc_pci->dma[0],FC_DEFAULT_DMA1_BUFSIZE)) != 0)
  206. return ret;
  207. if ((ret = flexcop_dma_allocate(fc_pci->pdev,&fc_pci->dma[1],FC_DEFAULT_DMA2_BUFSIZE)) != 0) {
  208. flexcop_dma_free(&fc_pci->dma[0]);
  209. return ret;
  210. }
  211. flexcop_sram_set_dest(fc_pci->fc_dev,FC_SRAM_DEST_MEDIA | FC_SRAM_DEST_NET, FC_SRAM_DEST_TARGET_DMA1);
  212. flexcop_sram_set_dest(fc_pci->fc_dev,FC_SRAM_DEST_CAO | FC_SRAM_DEST_CAI, FC_SRAM_DEST_TARGET_DMA2);
  213. fc_pci->init_state |= FC_PCI_DMA_INIT;
  214. return ret;
  215. }
  216. static void flexcop_pci_dma_exit(struct flexcop_pci *fc_pci)
  217. {
  218. if (fc_pci->init_state & FC_PCI_DMA_INIT) {
  219. flexcop_dma_free(&fc_pci->dma[0]);
  220. flexcop_dma_free(&fc_pci->dma[1]);
  221. }
  222. fc_pci->init_state &= ~FC_PCI_DMA_INIT;
  223. }
  224. static int flexcop_pci_init(struct flexcop_pci *fc_pci)
  225. {
  226. int ret;
  227. u8 card_rev;
  228. pci_read_config_byte(fc_pci->pdev, PCI_CLASS_REVISION, &card_rev);
  229. info("card revision %x", card_rev);
  230. if ((ret = pci_enable_device(fc_pci->pdev)) != 0)
  231. return ret;
  232. pci_set_master(fc_pci->pdev);
  233. /* enable interrupts */
  234. // pci_write_config_dword(pdev, 0x6c, 0x8000);
  235. if ((ret = pci_request_regions(fc_pci->pdev, DRIVER_NAME)) != 0)
  236. goto err_pci_disable_device;
  237. fc_pci->io_mem = pci_iomap(fc_pci->pdev, 0, 0x800);
  238. if (!fc_pci->io_mem) {
  239. err("cannot map io memory\n");
  240. ret = -EIO;
  241. goto err_pci_release_regions;
  242. }
  243. pci_set_drvdata(fc_pci->pdev, fc_pci);
  244. spin_lock_init(&fc_pci->irq_lock);
  245. if ((ret = request_irq(fc_pci->pdev->irq, flexcop_pci_isr,
  246. IRQF_SHARED, DRIVER_NAME, fc_pci)) != 0)
  247. goto err_pci_iounmap;
  248. fc_pci->init_state |= FC_PCI_INIT;
  249. return ret;
  250. err_pci_iounmap:
  251. pci_iounmap(fc_pci->pdev, fc_pci->io_mem);
  252. pci_set_drvdata(fc_pci->pdev, NULL);
  253. err_pci_release_regions:
  254. pci_release_regions(fc_pci->pdev);
  255. err_pci_disable_device:
  256. pci_disable_device(fc_pci->pdev);
  257. return ret;
  258. }
  259. static void flexcop_pci_exit(struct flexcop_pci *fc_pci)
  260. {
  261. if (fc_pci->init_state & FC_PCI_INIT) {
  262. free_irq(fc_pci->pdev->irq, fc_pci);
  263. pci_iounmap(fc_pci->pdev, fc_pci->io_mem);
  264. pci_set_drvdata(fc_pci->pdev, NULL);
  265. pci_release_regions(fc_pci->pdev);
  266. pci_disable_device(fc_pci->pdev);
  267. }
  268. fc_pci->init_state &= ~FC_PCI_INIT;
  269. }
  270. static int flexcop_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  271. {
  272. struct flexcop_device *fc;
  273. struct flexcop_pci *fc_pci;
  274. int ret = -ENOMEM;
  275. if ((fc = flexcop_device_kmalloc(sizeof(struct flexcop_pci))) == NULL) {
  276. err("out of memory\n");
  277. return -ENOMEM;
  278. }
  279. /* general flexcop init */
  280. fc_pci = fc->bus_specific;
  281. fc_pci->fc_dev = fc;
  282. fc->read_ibi_reg = flexcop_pci_read_ibi_reg;
  283. fc->write_ibi_reg = flexcop_pci_write_ibi_reg;
  284. fc->i2c_request = flexcop_i2c_request;
  285. fc->get_mac_addr = flexcop_eeprom_check_mac_addr;
  286. fc->stream_control = flexcop_pci_stream_control;
  287. if (enable_pid_filtering)
  288. info("will use the HW PID filter.");
  289. else
  290. info("will pass the complete TS to the demuxer.");
  291. fc->pid_filtering = enable_pid_filtering;
  292. fc->bus_type = FC_PCI;
  293. fc->dev = &pdev->dev;
  294. fc->owner = THIS_MODULE;
  295. /* bus specific part */
  296. fc_pci->pdev = pdev;
  297. if ((ret = flexcop_pci_init(fc_pci)) != 0)
  298. goto err_kfree;
  299. /* init flexcop */
  300. if ((ret = flexcop_device_initialize(fc)) != 0)
  301. goto err_pci_exit;
  302. /* init dma */
  303. if ((ret = flexcop_pci_dma_init(fc_pci)) != 0)
  304. goto err_fc_exit;
  305. INIT_DELAYED_WORK(&fc_pci->irq_check_work, flexcop_pci_irq_check_work);
  306. if (irq_chk_intv > 0)
  307. schedule_delayed_work(&fc_pci->irq_check_work,
  308. msecs_to_jiffies(irq_chk_intv < 100 ? 100 : irq_chk_intv));
  309. return ret;
  310. err_fc_exit:
  311. flexcop_device_exit(fc);
  312. err_pci_exit:
  313. flexcop_pci_exit(fc_pci);
  314. err_kfree:
  315. flexcop_device_kfree(fc);
  316. return ret;
  317. }
  318. /* in theory every _exit function should be called exactly two times,
  319. * here and in the bail-out-part of the _init-function
  320. */
  321. static void flexcop_pci_remove(struct pci_dev *pdev)
  322. {
  323. struct flexcop_pci *fc_pci = pci_get_drvdata(pdev);
  324. if (irq_chk_intv > 0)
  325. cancel_delayed_work(&fc_pci->irq_check_work);
  326. flexcop_pci_dma_exit(fc_pci);
  327. flexcop_device_exit(fc_pci->fc_dev);
  328. flexcop_pci_exit(fc_pci);
  329. flexcop_device_kfree(fc_pci->fc_dev);
  330. }
  331. static struct pci_device_id flexcop_pci_tbl[] = {
  332. { PCI_DEVICE(0x13d0, 0x2103) },
  333. /* { PCI_DEVICE(0x13d0, 0x2200) }, ? */
  334. { },
  335. };
  336. MODULE_DEVICE_TABLE(pci, flexcop_pci_tbl);
  337. static struct pci_driver flexcop_pci_driver = {
  338. .name = "b2c2_flexcop_pci",
  339. .id_table = flexcop_pci_tbl,
  340. .probe = flexcop_pci_probe,
  341. .remove = flexcop_pci_remove,
  342. };
  343. static int __init flexcop_pci_module_init(void)
  344. {
  345. return pci_register_driver(&flexcop_pci_driver);
  346. }
  347. static void __exit flexcop_pci_module_exit(void)
  348. {
  349. pci_unregister_driver(&flexcop_pci_driver);
  350. }
  351. module_init(flexcop_pci_module_init);
  352. module_exit(flexcop_pci_module_exit);
  353. MODULE_AUTHOR(DRIVER_AUTHOR);
  354. MODULE_DESCRIPTION(DRIVER_NAME);
  355. MODULE_LICENSE("GPL");