flexcop-pci.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452
  1. /*
  2. * This file is part of linux driver the digital TV devices equipped with B2C2 FlexcopII(b)/III
  3. *
  4. * flexcop-pci.c - covers the PCI part including DMA transfers.
  5. *
  6. * see flexcop.c for copyright information.
  7. */
  8. #define FC_LOG_PREFIX "flexcop-pci"
  9. #include "flexcop-common.h"
  10. static int enable_pid_filtering = 1;
  11. module_param(enable_pid_filtering, int, 0444);
  12. MODULE_PARM_DESC(enable_pid_filtering, "enable hardware pid filtering: supported values: 0 (fullts), 1");
  13. static int irq_chk_intv = 100;
  14. module_param(irq_chk_intv, int, 0644);
  15. MODULE_PARM_DESC(irq_chk_intv, "set the interval for IRQ streaming watchdog.");
  16. #ifdef CONFIG_DVB_B2C2_FLEXCOP_DEBUG
  17. #define dprintk(level,args...) \
  18. do { if ((debug & level)) printk(args); } while (0)
  19. #define DEBSTATUS ""
  20. #else
  21. #define dprintk(level,args...)
  22. #define DEBSTATUS " (debugging is not enabled)"
  23. #endif
  24. #define deb_info(args...) dprintk(0x01,args)
  25. #define deb_reg(args...) dprintk(0x02,args)
  26. #define deb_ts(args...) dprintk(0x04,args)
  27. #define deb_irq(args...) dprintk(0x08,args)
  28. #define deb_chk(args...) dprintk(0x10,args)
  29. static int debug;
  30. module_param(debug, int, 0644);
  31. MODULE_PARM_DESC(debug,
  32. "set debug level (1=info,2=regs,4=TS,8=irqdma,16=check (|-able))."
  33. DEBSTATUS);
  34. #define DRIVER_VERSION "0.1"
  35. #define DRIVER_NAME "Technisat/B2C2 FlexCop II/IIb/III Digital TV PCI Driver"
  36. #define DRIVER_AUTHOR "Patrick Boettcher <patrick.boettcher@desy.de>"
  37. struct flexcop_pci {
  38. struct pci_dev *pdev;
  39. #define FC_PCI_INIT 0x01
  40. #define FC_PCI_DMA_INIT 0x02
  41. int init_state;
  42. void __iomem *io_mem;
  43. u32 irq;
  44. /* buffersize (at least for DMA1, need to be % 188 == 0,
  45. * this logic is required */
  46. #define FC_DEFAULT_DMA1_BUFSIZE (1280 * 188)
  47. #define FC_DEFAULT_DMA2_BUFSIZE (10 * 188)
  48. struct flexcop_dma dma[2];
  49. int active_dma1_addr; /* 0 = addr0 of dma1; 1 = addr1 of dma1 */
  50. u32 last_dma1_cur_pos; /* position of the pointer last time the timer/packet irq occured */
  51. int count;
  52. int count_prev;
  53. int stream_problem;
  54. spinlock_t irq_lock;
  55. unsigned long last_irq;
  56. struct delayed_work irq_check_work;
  57. struct flexcop_device *fc_dev;
  58. };
  59. static int lastwreg,lastwval,lastrreg,lastrval;
  60. static flexcop_ibi_value flexcop_pci_read_ibi_reg (struct flexcop_device *fc, flexcop_ibi_register r)
  61. {
  62. struct flexcop_pci *fc_pci = fc->bus_specific;
  63. flexcop_ibi_value v;
  64. v.raw = readl(fc_pci->io_mem + r);
  65. if (lastrreg != r || lastrval != v.raw) {
  66. lastrreg = r; lastrval = v.raw;
  67. deb_reg("new rd: %3x: %08x\n",r,v.raw);
  68. }
  69. return v;
  70. }
  71. static int flexcop_pci_write_ibi_reg(struct flexcop_device *fc, flexcop_ibi_register r, flexcop_ibi_value v)
  72. {
  73. struct flexcop_pci *fc_pci = fc->bus_specific;
  74. if (lastwreg != r || lastwval != v.raw) {
  75. lastwreg = r; lastwval = v.raw;
  76. deb_reg("new wr: %3x: %08x\n",r,v.raw);
  77. }
  78. writel(v.raw, fc_pci->io_mem + r);
  79. return 0;
  80. }
  81. static void flexcop_pci_irq_check_work(struct work_struct *work)
  82. {
  83. struct flexcop_pci *fc_pci =
  84. container_of(work, struct flexcop_pci, irq_check_work.work);
  85. struct flexcop_device *fc = fc_pci->fc_dev;
  86. if (fc->feedcount) {
  87. if (fc_pci->count == fc_pci->count_prev) {
  88. deb_chk("no IRQ since the last check\n");
  89. if (fc_pci->stream_problem++ == 3) {
  90. struct dvb_demux_feed *feed;
  91. deb_info("flexcop-pci: stream problem, resetting pid filter\n");
  92. spin_lock_irq(&fc->demux.lock);
  93. list_for_each_entry(feed, &fc->demux.feed_list,
  94. list_head) {
  95. flexcop_pid_feed_control(fc, feed, 0);
  96. }
  97. list_for_each_entry(feed, &fc->demux.feed_list,
  98. list_head) {
  99. flexcop_pid_feed_control(fc, feed, 1);
  100. }
  101. spin_unlock_irq(&fc->demux.lock);
  102. fc_pci->stream_problem = 0;
  103. }
  104. } else {
  105. fc_pci->stream_problem = 0;
  106. fc_pci->count_prev = fc_pci->count;
  107. }
  108. }
  109. schedule_delayed_work(&fc_pci->irq_check_work,
  110. msecs_to_jiffies(irq_chk_intv < 100 ? 100 : irq_chk_intv));
  111. }
  112. /* When PID filtering is turned on, we use the timer IRQ, because small amounts
  113. * of data need to be passed to the user space instantly as well. When PID
  114. * filtering is turned off, we use the page-change-IRQ */
  115. static irqreturn_t flexcop_pci_isr(int irq, void *dev_id)
  116. {
  117. struct flexcop_pci *fc_pci = dev_id;
  118. struct flexcop_device *fc = fc_pci->fc_dev;
  119. unsigned long flags;
  120. flexcop_ibi_value v;
  121. irqreturn_t ret = IRQ_HANDLED;
  122. spin_lock_irqsave(&fc_pci->irq_lock,flags);
  123. v = fc->read_ibi_reg(fc,irq_20c);
  124. /* errors */
  125. if (v.irq_20c.Data_receiver_error)
  126. deb_chk("data receiver error\n");
  127. if (v.irq_20c.Continuity_error_flag)
  128. deb_chk("Contunuity error flag is set\n");
  129. if (v.irq_20c.LLC_SNAP_FLAG_set)
  130. deb_chk("LLC_SNAP_FLAG_set is set\n");
  131. if (v.irq_20c.Transport_Error)
  132. deb_chk("Transport error\n");
  133. if ((fc_pci->count % 1000) == 0)
  134. deb_chk("%d valid irq took place so far\n",fc_pci->count);
  135. if (v.irq_20c.DMA1_IRQ_Status == 1) {
  136. if (fc_pci->active_dma1_addr == 0)
  137. flexcop_pass_dmx_packets(fc_pci->fc_dev,fc_pci->dma[0].cpu_addr0,fc_pci->dma[0].size / 188);
  138. else
  139. flexcop_pass_dmx_packets(fc_pci->fc_dev,fc_pci->dma[0].cpu_addr1,fc_pci->dma[0].size / 188);
  140. deb_irq("page change to page: %d\n",!fc_pci->active_dma1_addr);
  141. fc_pci->active_dma1_addr = !fc_pci->active_dma1_addr;
  142. } else if (v.irq_20c.DMA1_Timer_Status == 1) {
  143. /* for the timer IRQ we only can use buffer dmx feeding, because we don't have
  144. * complete TS packets when reading from the DMA memory */
  145. dma_addr_t cur_addr =
  146. fc->read_ibi_reg(fc,dma1_008).dma_0x8.dma_cur_addr << 2;
  147. u32 cur_pos = cur_addr - fc_pci->dma[0].dma_addr0;
  148. deb_irq("%u irq: %08x cur_addr: %llx: cur_pos: %08x, last_cur_pos: %08x ",
  149. jiffies_to_usecs(jiffies - fc_pci->last_irq),
  150. v.raw, (unsigned long long)cur_addr, cur_pos,
  151. fc_pci->last_dma1_cur_pos);
  152. fc_pci->last_irq = jiffies;
  153. /* buffer end was reached, restarted from the beginning
  154. * pass the data from last_cur_pos to the buffer end to the demux
  155. */
  156. if (cur_pos < fc_pci->last_dma1_cur_pos) {
  157. deb_irq(" end was reached: passing %d bytes ",(fc_pci->dma[0].size*2 - 1) - fc_pci->last_dma1_cur_pos);
  158. flexcop_pass_dmx_data(fc_pci->fc_dev,
  159. fc_pci->dma[0].cpu_addr0 + fc_pci->last_dma1_cur_pos,
  160. (fc_pci->dma[0].size*2) - fc_pci->last_dma1_cur_pos);
  161. fc_pci->last_dma1_cur_pos = 0;
  162. }
  163. if (cur_pos > fc_pci->last_dma1_cur_pos) {
  164. deb_irq(" passing %d bytes ",cur_pos - fc_pci->last_dma1_cur_pos);
  165. flexcop_pass_dmx_data(fc_pci->fc_dev,
  166. fc_pci->dma[0].cpu_addr0 + fc_pci->last_dma1_cur_pos,
  167. cur_pos - fc_pci->last_dma1_cur_pos);
  168. }
  169. deb_irq("\n");
  170. fc_pci->last_dma1_cur_pos = cur_pos;
  171. fc_pci->count++;
  172. } else {
  173. deb_irq("isr for flexcop called, apparently without reason (%08x)\n",v.raw);
  174. ret = IRQ_NONE;
  175. }
  176. spin_unlock_irqrestore(&fc_pci->irq_lock,flags);
  177. return ret;
  178. }
  179. static int flexcop_pci_stream_control(struct flexcop_device *fc, int onoff)
  180. {
  181. struct flexcop_pci *fc_pci = fc->bus_specific;
  182. if (onoff) {
  183. flexcop_dma_config(fc,&fc_pci->dma[0],FC_DMA_1);
  184. flexcop_dma_config(fc,&fc_pci->dma[1],FC_DMA_2);
  185. flexcop_dma_config_timer(fc,FC_DMA_1,0);
  186. flexcop_dma_xfer_control(fc,FC_DMA_1,FC_DMA_SUBADDR_0 | FC_DMA_SUBADDR_1,1);
  187. deb_irq("DMA xfer enabled\n");
  188. fc_pci->last_dma1_cur_pos = 0;
  189. flexcop_dma_control_timer_irq(fc,FC_DMA_1,1);
  190. deb_irq("IRQ enabled\n");
  191. fc_pci->count_prev = fc_pci->count;
  192. // fc_pci->active_dma1_addr = 0;
  193. // flexcop_dma_control_size_irq(fc,FC_DMA_1,1);
  194. } else {
  195. flexcop_dma_control_timer_irq(fc,FC_DMA_1,0);
  196. deb_irq("IRQ disabled\n");
  197. // flexcop_dma_control_size_irq(fc,FC_DMA_1,0);
  198. flexcop_dma_xfer_control(fc,FC_DMA_1,FC_DMA_SUBADDR_0 | FC_DMA_SUBADDR_1,0);
  199. deb_irq("DMA xfer disabled\n");
  200. }
  201. return 0;
  202. }
  203. static int flexcop_pci_dma_init(struct flexcop_pci *fc_pci)
  204. {
  205. int ret;
  206. if ((ret = flexcop_dma_allocate(fc_pci->pdev,&fc_pci->dma[0],FC_DEFAULT_DMA1_BUFSIZE)) != 0)
  207. return ret;
  208. if ((ret = flexcop_dma_allocate(fc_pci->pdev,&fc_pci->dma[1],FC_DEFAULT_DMA2_BUFSIZE)) != 0) {
  209. flexcop_dma_free(&fc_pci->dma[0]);
  210. return ret;
  211. }
  212. flexcop_sram_set_dest(fc_pci->fc_dev,FC_SRAM_DEST_MEDIA | FC_SRAM_DEST_NET, FC_SRAM_DEST_TARGET_DMA1);
  213. flexcop_sram_set_dest(fc_pci->fc_dev,FC_SRAM_DEST_CAO | FC_SRAM_DEST_CAI, FC_SRAM_DEST_TARGET_DMA2);
  214. fc_pci->init_state |= FC_PCI_DMA_INIT;
  215. return ret;
  216. }
  217. static void flexcop_pci_dma_exit(struct flexcop_pci *fc_pci)
  218. {
  219. if (fc_pci->init_state & FC_PCI_DMA_INIT) {
  220. flexcop_dma_free(&fc_pci->dma[0]);
  221. flexcop_dma_free(&fc_pci->dma[1]);
  222. }
  223. fc_pci->init_state &= ~FC_PCI_DMA_INIT;
  224. }
  225. static int flexcop_pci_init(struct flexcop_pci *fc_pci)
  226. {
  227. int ret;
  228. u8 card_rev;
  229. pci_read_config_byte(fc_pci->pdev, PCI_CLASS_REVISION, &card_rev);
  230. info("card revision %x", card_rev);
  231. if ((ret = pci_enable_device(fc_pci->pdev)) != 0)
  232. return ret;
  233. pci_set_master(fc_pci->pdev);
  234. /* enable interrupts */
  235. // pci_write_config_dword(pdev, 0x6c, 0x8000);
  236. if ((ret = pci_request_regions(fc_pci->pdev, DRIVER_NAME)) != 0)
  237. goto err_pci_disable_device;
  238. fc_pci->io_mem = pci_iomap(fc_pci->pdev, 0, 0x800);
  239. if (!fc_pci->io_mem) {
  240. err("cannot map io memory\n");
  241. ret = -EIO;
  242. goto err_pci_release_regions;
  243. }
  244. pci_set_drvdata(fc_pci->pdev, fc_pci);
  245. spin_lock_init(&fc_pci->irq_lock);
  246. if ((ret = request_irq(fc_pci->pdev->irq, flexcop_pci_isr,
  247. IRQF_SHARED, DRIVER_NAME, fc_pci)) != 0)
  248. goto err_pci_iounmap;
  249. fc_pci->init_state |= FC_PCI_INIT;
  250. return ret;
  251. err_pci_iounmap:
  252. pci_iounmap(fc_pci->pdev, fc_pci->io_mem);
  253. pci_set_drvdata(fc_pci->pdev, NULL);
  254. err_pci_release_regions:
  255. pci_release_regions(fc_pci->pdev);
  256. err_pci_disable_device:
  257. pci_disable_device(fc_pci->pdev);
  258. return ret;
  259. }
  260. static void flexcop_pci_exit(struct flexcop_pci *fc_pci)
  261. {
  262. if (fc_pci->init_state & FC_PCI_INIT) {
  263. free_irq(fc_pci->pdev->irq, fc_pci);
  264. pci_iounmap(fc_pci->pdev, fc_pci->io_mem);
  265. pci_set_drvdata(fc_pci->pdev, NULL);
  266. pci_release_regions(fc_pci->pdev);
  267. pci_disable_device(fc_pci->pdev);
  268. }
  269. fc_pci->init_state &= ~FC_PCI_INIT;
  270. }
  271. static int flexcop_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  272. {
  273. struct flexcop_device *fc;
  274. struct flexcop_pci *fc_pci;
  275. int ret = -ENOMEM;
  276. if ((fc = flexcop_device_kmalloc(sizeof(struct flexcop_pci))) == NULL) {
  277. err("out of memory\n");
  278. return -ENOMEM;
  279. }
  280. /* general flexcop init */
  281. fc_pci = fc->bus_specific;
  282. fc_pci->fc_dev = fc;
  283. fc->read_ibi_reg = flexcop_pci_read_ibi_reg;
  284. fc->write_ibi_reg = flexcop_pci_write_ibi_reg;
  285. fc->i2c_request = flexcop_i2c_request;
  286. fc->get_mac_addr = flexcop_eeprom_check_mac_addr;
  287. fc->stream_control = flexcop_pci_stream_control;
  288. if (enable_pid_filtering)
  289. info("will use the HW PID filter.");
  290. else
  291. info("will pass the complete TS to the demuxer.");
  292. fc->pid_filtering = enable_pid_filtering;
  293. fc->bus_type = FC_PCI;
  294. fc->dev = &pdev->dev;
  295. fc->owner = THIS_MODULE;
  296. /* bus specific part */
  297. fc_pci->pdev = pdev;
  298. if ((ret = flexcop_pci_init(fc_pci)) != 0)
  299. goto err_kfree;
  300. /* init flexcop */
  301. if ((ret = flexcop_device_initialize(fc)) != 0)
  302. goto err_pci_exit;
  303. /* init dma */
  304. if ((ret = flexcop_pci_dma_init(fc_pci)) != 0)
  305. goto err_fc_exit;
  306. INIT_DELAYED_WORK(&fc_pci->irq_check_work, flexcop_pci_irq_check_work);
  307. if (irq_chk_intv > 0)
  308. schedule_delayed_work(&fc_pci->irq_check_work,
  309. msecs_to_jiffies(irq_chk_intv < 100 ? 100 : irq_chk_intv));
  310. return ret;
  311. err_fc_exit:
  312. flexcop_device_exit(fc);
  313. err_pci_exit:
  314. flexcop_pci_exit(fc_pci);
  315. err_kfree:
  316. flexcop_device_kfree(fc);
  317. return ret;
  318. }
  319. /* in theory every _exit function should be called exactly two times,
  320. * here and in the bail-out-part of the _init-function
  321. */
  322. static void flexcop_pci_remove(struct pci_dev *pdev)
  323. {
  324. struct flexcop_pci *fc_pci = pci_get_drvdata(pdev);
  325. if (irq_chk_intv > 0)
  326. cancel_delayed_work(&fc_pci->irq_check_work);
  327. flexcop_pci_dma_exit(fc_pci);
  328. flexcop_device_exit(fc_pci->fc_dev);
  329. flexcop_pci_exit(fc_pci);
  330. flexcop_device_kfree(fc_pci->fc_dev);
  331. }
  332. static struct pci_device_id flexcop_pci_tbl[] = {
  333. { PCI_DEVICE(0x13d0, 0x2103) },
  334. /* { PCI_DEVICE(0x13d0, 0x2200) }, ? */
  335. { },
  336. };
  337. MODULE_DEVICE_TABLE(pci, flexcop_pci_tbl);
  338. static struct pci_driver flexcop_pci_driver = {
  339. .name = "b2c2_flexcop_pci",
  340. .id_table = flexcop_pci_tbl,
  341. .probe = flexcop_pci_probe,
  342. .remove = flexcop_pci_remove,
  343. };
  344. static int __init flexcop_pci_module_init(void)
  345. {
  346. return pci_register_driver(&flexcop_pci_driver);
  347. }
  348. static void __exit flexcop_pci_module_exit(void)
  349. {
  350. pci_unregister_driver(&flexcop_pci_driver);
  351. }
  352. module_init(flexcop_pci_module_init);
  353. module_exit(flexcop_pci_module_exit);
  354. MODULE_AUTHOR(DRIVER_AUTHOR);
  355. MODULE_DESCRIPTION(DRIVER_NAME);
  356. MODULE_LICENSE("GPL");