netjet.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168
  1. /*
  2. * NETJet mISDN driver
  3. *
  4. * Author Karsten Keil <keil@isdn4linux.de>
  5. *
  6. * Copyright 2009 by Karsten Keil <keil@isdn4linux.de>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  20. *
  21. */
  22. #include <linux/interrupt.h>
  23. #include <linux/module.h>
  24. #include <linux/pci.h>
  25. #include <linux/delay.h>
  26. #include <linux/mISDNhw.h>
  27. #include <linux/slab.h>
  28. #include "ipac.h"
  29. #include "iohelper.h"
  30. #include "netjet.h"
  31. #include <linux/isdn/hdlc.h>
  32. #define NETJET_REV "2.0"
  33. enum nj_types {
  34. NETJET_S_TJ300,
  35. NETJET_S_TJ320,
  36. ENTERNOW__TJ320,
  37. };
  38. struct tiger_dma {
  39. size_t size;
  40. u32 *start;
  41. int idx;
  42. u32 dmastart;
  43. u32 dmairq;
  44. u32 dmaend;
  45. u32 dmacur;
  46. };
  47. struct tiger_hw;
  48. struct tiger_ch {
  49. struct bchannel bch;
  50. struct tiger_hw *nj;
  51. int idx;
  52. int free;
  53. int lastrx;
  54. u16 rxstate;
  55. u16 txstate;
  56. struct isdnhdlc_vars hsend;
  57. struct isdnhdlc_vars hrecv;
  58. u8 *hsbuf;
  59. u8 *hrbuf;
  60. };
  61. #define TX_INIT 0x0001
  62. #define TX_IDLE 0x0002
  63. #define TX_RUN 0x0004
  64. #define TX_UNDERRUN 0x0100
  65. #define RX_OVERRUN 0x0100
  66. #define LOG_SIZE 64
  67. struct tiger_hw {
  68. struct list_head list;
  69. struct pci_dev *pdev;
  70. char name[MISDN_MAX_IDLEN];
  71. enum nj_types typ;
  72. int irq;
  73. u32 irqcnt;
  74. u32 base;
  75. size_t base_s;
  76. dma_addr_t dma;
  77. void *dma_p;
  78. spinlock_t lock; /* lock HW */
  79. struct isac_hw isac;
  80. struct tiger_dma send;
  81. struct tiger_dma recv;
  82. struct tiger_ch bc[2];
  83. u8 ctrlreg;
  84. u8 dmactrl;
  85. u8 auxd;
  86. u8 last_is0;
  87. u8 irqmask0;
  88. char log[LOG_SIZE];
  89. };
  90. static LIST_HEAD(Cards);
  91. static DEFINE_RWLOCK(card_lock); /* protect Cards */
  92. static u32 debug;
  93. static int nj_cnt;
  94. static void
  95. _set_debug(struct tiger_hw *card)
  96. {
  97. card->isac.dch.debug = debug;
  98. card->bc[0].bch.debug = debug;
  99. card->bc[1].bch.debug = debug;
  100. }
  101. static int
  102. set_debug(const char *val, struct kernel_param *kp)
  103. {
  104. int ret;
  105. struct tiger_hw *card;
  106. ret = param_set_uint(val, kp);
  107. if (!ret) {
  108. read_lock(&card_lock);
  109. list_for_each_entry(card, &Cards, list)
  110. _set_debug(card);
  111. read_unlock(&card_lock);
  112. }
  113. return ret;
  114. }
  115. MODULE_AUTHOR("Karsten Keil");
  116. MODULE_LICENSE("GPL v2");
  117. MODULE_VERSION(NETJET_REV);
  118. module_param_call(debug, set_debug, param_get_uint, &debug, S_IRUGO | S_IWUSR);
  119. MODULE_PARM_DESC(debug, "Netjet debug mask");
  120. static void
  121. nj_disable_hwirq(struct tiger_hw *card)
  122. {
  123. outb(0, card->base + NJ_IRQMASK0);
  124. outb(0, card->base + NJ_IRQMASK1);
  125. }
  126. static u8
  127. ReadISAC_nj(void *p, u8 offset)
  128. {
  129. struct tiger_hw *card = p;
  130. u8 ret;
  131. card->auxd &= 0xfc;
  132. card->auxd |= (offset >> 4) & 3;
  133. outb(card->auxd, card->base + NJ_AUXDATA);
  134. ret = inb(card->base + NJ_ISAC_OFF + ((offset & 0x0f) << 2));
  135. return ret;
  136. }
  137. static void
  138. WriteISAC_nj(void *p, u8 offset, u8 value)
  139. {
  140. struct tiger_hw *card = p;
  141. card->auxd &= 0xfc;
  142. card->auxd |= (offset >> 4) & 3;
  143. outb(card->auxd, card->base + NJ_AUXDATA);
  144. outb(value, card->base + NJ_ISAC_OFF + ((offset & 0x0f) << 2));
  145. }
  146. static void
  147. ReadFiFoISAC_nj(void *p, u8 offset, u8 *data, int size)
  148. {
  149. struct tiger_hw *card = p;
  150. card->auxd &= 0xfc;
  151. outb(card->auxd, card->base + NJ_AUXDATA);
  152. insb(card->base + NJ_ISAC_OFF, data, size);
  153. }
  154. static void
  155. WriteFiFoISAC_nj(void *p, u8 offset, u8 *data, int size)
  156. {
  157. struct tiger_hw *card = p;
  158. card->auxd &= 0xfc;
  159. outb(card->auxd, card->base + NJ_AUXDATA);
  160. outsb(card->base + NJ_ISAC_OFF, data, size);
  161. }
  162. static void
  163. fill_mem(struct tiger_ch *bc, u32 idx, u32 cnt, u32 fill)
  164. {
  165. struct tiger_hw *card = bc->bch.hw;
  166. u32 mask = 0xff, val;
  167. pr_debug("%s: B%1d fill %02x len %d idx %d/%d\n", card->name,
  168. bc->bch.nr, fill, cnt, idx, card->send.idx);
  169. if (bc->bch.nr & 2) {
  170. fill <<= 8;
  171. mask <<= 8;
  172. }
  173. mask ^= 0xffffffff;
  174. while (cnt--) {
  175. val = card->send.start[idx];
  176. val &= mask;
  177. val |= fill;
  178. card->send.start[idx++] = val;
  179. if (idx >= card->send.size)
  180. idx = 0;
  181. }
  182. }
  183. static int
  184. mode_tiger(struct tiger_ch *bc, u32 protocol)
  185. {
  186. struct tiger_hw *card = bc->bch.hw;
  187. pr_debug("%s: B%1d protocol %x-->%x\n", card->name,
  188. bc->bch.nr, bc->bch.state, protocol);
  189. switch (protocol) {
  190. case ISDN_P_NONE:
  191. if (bc->bch.state == ISDN_P_NONE)
  192. break;
  193. fill_mem(bc, 0, card->send.size, 0xff);
  194. bc->bch.state = protocol;
  195. /* only stop dma and interrupts if both channels NULL */
  196. if ((card->bc[0].bch.state == ISDN_P_NONE) &&
  197. (card->bc[1].bch.state == ISDN_P_NONE)) {
  198. card->dmactrl = 0;
  199. outb(card->dmactrl, card->base + NJ_DMACTRL);
  200. outb(0, card->base + NJ_IRQMASK0);
  201. }
  202. test_and_clear_bit(FLG_HDLC, &bc->bch.Flags);
  203. test_and_clear_bit(FLG_TRANSPARENT, &bc->bch.Flags);
  204. bc->txstate = 0;
  205. bc->rxstate = 0;
  206. bc->lastrx = -1;
  207. break;
  208. case ISDN_P_B_RAW:
  209. test_and_set_bit(FLG_TRANSPARENT, &bc->bch.Flags);
  210. bc->bch.state = protocol;
  211. bc->idx = 0;
  212. bc->free = card->send.size / 2;
  213. bc->rxstate = 0;
  214. bc->txstate = TX_INIT | TX_IDLE;
  215. bc->lastrx = -1;
  216. if (!card->dmactrl) {
  217. card->dmactrl = 1;
  218. outb(card->dmactrl, card->base + NJ_DMACTRL);
  219. outb(0x0f, card->base + NJ_IRQMASK0);
  220. }
  221. break;
  222. case ISDN_P_B_HDLC:
  223. test_and_set_bit(FLG_HDLC, &bc->bch.Flags);
  224. bc->bch.state = protocol;
  225. bc->idx = 0;
  226. bc->free = card->send.size / 2;
  227. bc->rxstate = 0;
  228. bc->txstate = TX_INIT | TX_IDLE;
  229. isdnhdlc_rcv_init(&bc->hrecv, 0);
  230. isdnhdlc_out_init(&bc->hsend, 0);
  231. bc->lastrx = -1;
  232. if (!card->dmactrl) {
  233. card->dmactrl = 1;
  234. outb(card->dmactrl, card->base + NJ_DMACTRL);
  235. outb(0x0f, card->base + NJ_IRQMASK0);
  236. }
  237. break;
  238. default:
  239. pr_info("%s: %s protocol %x not handled\n", card->name,
  240. __func__, protocol);
  241. return -ENOPROTOOPT;
  242. }
  243. card->send.dmacur = inl(card->base + NJ_DMA_READ_ADR);
  244. card->recv.dmacur = inl(card->base + NJ_DMA_WRITE_ADR);
  245. card->send.idx = (card->send.dmacur - card->send.dmastart) >> 2;
  246. card->recv.idx = (card->recv.dmacur - card->recv.dmastart) >> 2;
  247. pr_debug("%s: %s ctrl %x irq %02x/%02x idx %d/%d\n",
  248. card->name, __func__,
  249. inb(card->base + NJ_DMACTRL),
  250. inb(card->base + NJ_IRQMASK0),
  251. inb(card->base + NJ_IRQSTAT0),
  252. card->send.idx,
  253. card->recv.idx);
  254. return 0;
  255. }
  256. static void
  257. nj_reset(struct tiger_hw *card)
  258. {
  259. outb(0xff, card->base + NJ_CTRL); /* Reset On */
  260. mdelay(1);
  261. /* now edge triggered for TJ320 GE 13/07/00 */
  262. /* see comment in IRQ function */
  263. if (card->typ == NETJET_S_TJ320) /* TJ320 */
  264. card->ctrlreg = 0x40; /* Reset Off and status read clear */
  265. else
  266. card->ctrlreg = 0x00; /* Reset Off and status read clear */
  267. outb(card->ctrlreg, card->base + NJ_CTRL);
  268. mdelay(10);
  269. /* configure AUX pins (all output except ISAC IRQ pin) */
  270. card->auxd = 0;
  271. card->dmactrl = 0;
  272. outb(~NJ_ISACIRQ, card->base + NJ_AUXCTRL);
  273. outb(NJ_ISACIRQ, card->base + NJ_IRQMASK1);
  274. outb(card->auxd, card->base + NJ_AUXDATA);
  275. }
  276. static int
  277. inittiger(struct tiger_hw *card)
  278. {
  279. int i;
  280. card->dma_p = pci_alloc_consistent(card->pdev, NJ_DMA_SIZE,
  281. &card->dma);
  282. if (!card->dma_p) {
  283. pr_info("%s: No DMA memory\n", card->name);
  284. return -ENOMEM;
  285. }
  286. if ((u64)card->dma > 0xffffffff) {
  287. pr_info("%s: DMA outside 32 bit\n", card->name);
  288. return -ENOMEM;
  289. }
  290. for (i = 0; i < 2; i++) {
  291. card->bc[i].hsbuf = kmalloc(NJ_DMA_TXSIZE, GFP_ATOMIC);
  292. if (!card->bc[i].hsbuf) {
  293. pr_info("%s: no B%d send buffer\n", card->name, i + 1);
  294. return -ENOMEM;
  295. }
  296. card->bc[i].hrbuf = kmalloc(NJ_DMA_RXSIZE, GFP_ATOMIC);
  297. if (!card->bc[i].hrbuf) {
  298. pr_info("%s: no B%d recv buffer\n", card->name, i + 1);
  299. return -ENOMEM;
  300. }
  301. }
  302. memset(card->dma_p, 0xff, NJ_DMA_SIZE);
  303. card->send.start = card->dma_p;
  304. card->send.dmastart = (u32)card->dma;
  305. card->send.dmaend = card->send.dmastart +
  306. (4 * (NJ_DMA_TXSIZE - 1));
  307. card->send.dmairq = card->send.dmastart +
  308. (4 * ((NJ_DMA_TXSIZE / 2) - 1));
  309. card->send.size = NJ_DMA_TXSIZE;
  310. if (debug & DEBUG_HW)
  311. pr_notice("%s: send buffer phy %#x - %#x - %#x virt %p"
  312. " size %zu u32\n", card->name,
  313. card->send.dmastart, card->send.dmairq,
  314. card->send.dmaend, card->send.start, card->send.size);
  315. outl(card->send.dmastart, card->base + NJ_DMA_READ_START);
  316. outl(card->send.dmairq, card->base + NJ_DMA_READ_IRQ);
  317. outl(card->send.dmaend, card->base + NJ_DMA_READ_END);
  318. card->recv.start = card->dma_p + (NJ_DMA_SIZE / 2);
  319. card->recv.dmastart = (u32)card->dma + (NJ_DMA_SIZE / 2);
  320. card->recv.dmaend = card->recv.dmastart +
  321. (4 * (NJ_DMA_RXSIZE - 1));
  322. card->recv.dmairq = card->recv.dmastart +
  323. (4 * ((NJ_DMA_RXSIZE / 2) - 1));
  324. card->recv.size = NJ_DMA_RXSIZE;
  325. if (debug & DEBUG_HW)
  326. pr_notice("%s: recv buffer phy %#x - %#x - %#x virt %p"
  327. " size %zu u32\n", card->name,
  328. card->recv.dmastart, card->recv.dmairq,
  329. card->recv.dmaend, card->recv.start, card->recv.size);
  330. outl(card->recv.dmastart, card->base + NJ_DMA_WRITE_START);
  331. outl(card->recv.dmairq, card->base + NJ_DMA_WRITE_IRQ);
  332. outl(card->recv.dmaend, card->base + NJ_DMA_WRITE_END);
  333. return 0;
  334. }
  335. static void
  336. read_dma(struct tiger_ch *bc, u32 idx, int cnt)
  337. {
  338. struct tiger_hw *card = bc->bch.hw;
  339. int i, stat;
  340. u32 val;
  341. u8 *p, *pn;
  342. if (bc->lastrx == idx) {
  343. bc->rxstate |= RX_OVERRUN;
  344. pr_info("%s: B%1d overrun at idx %d\n", card->name,
  345. bc->bch.nr, idx);
  346. }
  347. bc->lastrx = idx;
  348. if (test_bit(FLG_RX_OFF, &bc->bch.Flags)) {
  349. bc->bch.dropcnt += cnt;
  350. return;
  351. }
  352. stat = bchannel_get_rxbuf(&bc->bch, cnt);
  353. /* only transparent use the count here, HDLC overun is detected later */
  354. if (stat == ENOMEM) {
  355. pr_warning("%s.B%d: No memory for %d bytes\n",
  356. card->name, bc->bch.nr, cnt);
  357. return;
  358. }
  359. if (test_bit(FLG_TRANSPARENT, &bc->bch.Flags))
  360. p = skb_put(bc->bch.rx_skb, cnt);
  361. else
  362. p = bc->hrbuf;
  363. for (i = 0; i < cnt; i++) {
  364. val = card->recv.start[idx++];
  365. if (bc->bch.nr & 2)
  366. val >>= 8;
  367. if (idx >= card->recv.size)
  368. idx = 0;
  369. p[i] = val & 0xff;
  370. }
  371. if (test_bit(FLG_TRANSPARENT, &bc->bch.Flags)) {
  372. recv_Bchannel(&bc->bch, 0, false);
  373. return;
  374. }
  375. pn = bc->hrbuf;
  376. while (cnt > 0) {
  377. stat = isdnhdlc_decode(&bc->hrecv, pn, cnt, &i,
  378. bc->bch.rx_skb->data, bc->bch.maxlen);
  379. if (stat > 0) { /* valid frame received */
  380. p = skb_put(bc->bch.rx_skb, stat);
  381. if (debug & DEBUG_HW_BFIFO) {
  382. snprintf(card->log, LOG_SIZE,
  383. "B%1d-recv %s %d ", bc->bch.nr,
  384. card->name, stat);
  385. print_hex_dump_bytes(card->log,
  386. DUMP_PREFIX_OFFSET, p,
  387. stat);
  388. }
  389. recv_Bchannel(&bc->bch, 0, false);
  390. stat = bchannel_get_rxbuf(&bc->bch, bc->bch.maxlen);
  391. if (stat < 0) {
  392. pr_warning("%s.B%d: No memory for %d bytes\n",
  393. card->name, bc->bch.nr, cnt);
  394. return;
  395. }
  396. } else if (stat == -HDLC_CRC_ERROR) {
  397. pr_info("%s: B%1d receive frame CRC error\n",
  398. card->name, bc->bch.nr);
  399. } else if (stat == -HDLC_FRAMING_ERROR) {
  400. pr_info("%s: B%1d receive framing error\n",
  401. card->name, bc->bch.nr);
  402. } else if (stat == -HDLC_LENGTH_ERROR) {
  403. pr_info("%s: B%1d receive frame too long (> %d)\n",
  404. card->name, bc->bch.nr, bc->bch.maxlen);
  405. }
  406. pn += i;
  407. cnt -= i;
  408. }
  409. }
  410. static void
  411. recv_tiger(struct tiger_hw *card, u8 irq_stat)
  412. {
  413. u32 idx;
  414. int cnt = card->recv.size / 2;
  415. /* Note receive is via the WRITE DMA channel */
  416. card->last_is0 &= ~NJ_IRQM0_WR_MASK;
  417. card->last_is0 |= (irq_stat & NJ_IRQM0_WR_MASK);
  418. if (irq_stat & NJ_IRQM0_WR_END)
  419. idx = cnt - 1;
  420. else
  421. idx = card->recv.size - 1;
  422. if (test_bit(FLG_ACTIVE, &card->bc[0].bch.Flags))
  423. read_dma(&card->bc[0], idx, cnt);
  424. if (test_bit(FLG_ACTIVE, &card->bc[1].bch.Flags))
  425. read_dma(&card->bc[1], idx, cnt);
  426. }
  427. /* sync with current DMA address at start or after exception */
  428. static void
  429. resync(struct tiger_ch *bc, struct tiger_hw *card)
  430. {
  431. card->send.dmacur = inl(card->base | NJ_DMA_READ_ADR);
  432. card->send.idx = (card->send.dmacur - card->send.dmastart) >> 2;
  433. if (bc->free > card->send.size / 2)
  434. bc->free = card->send.size / 2;
  435. /* currently we simple sync to the next complete free area
  436. * this hast the advantage that we have always maximum time to
  437. * handle TX irq
  438. */
  439. if (card->send.idx < ((card->send.size / 2) - 1))
  440. bc->idx = (card->recv.size / 2) - 1;
  441. else
  442. bc->idx = card->recv.size - 1;
  443. bc->txstate = TX_RUN;
  444. pr_debug("%s: %s B%1d free %d idx %d/%d\n", card->name,
  445. __func__, bc->bch.nr, bc->free, bc->idx, card->send.idx);
  446. }
  447. static int bc_next_frame(struct tiger_ch *);
  448. static void
  449. fill_hdlc_flag(struct tiger_ch *bc)
  450. {
  451. struct tiger_hw *card = bc->bch.hw;
  452. int count, i;
  453. u32 m, v;
  454. u8 *p;
  455. if (bc->free == 0)
  456. return;
  457. pr_debug("%s: %s B%1d %d state %x idx %d/%d\n", card->name,
  458. __func__, bc->bch.nr, bc->free, bc->txstate,
  459. bc->idx, card->send.idx);
  460. if (bc->txstate & (TX_IDLE | TX_INIT | TX_UNDERRUN))
  461. resync(bc, card);
  462. count = isdnhdlc_encode(&bc->hsend, NULL, 0, &i,
  463. bc->hsbuf, bc->free);
  464. pr_debug("%s: B%1d hdlc encoded %d flags\n", card->name,
  465. bc->bch.nr, count);
  466. bc->free -= count;
  467. p = bc->hsbuf;
  468. m = (bc->bch.nr & 1) ? 0xffffff00 : 0xffff00ff;
  469. for (i = 0; i < count; i++) {
  470. if (bc->idx >= card->send.size)
  471. bc->idx = 0;
  472. v = card->send.start[bc->idx];
  473. v &= m;
  474. v |= (bc->bch.nr & 1) ? (u32)(p[i]) : ((u32)(p[i])) << 8;
  475. card->send.start[bc->idx++] = v;
  476. }
  477. if (debug & DEBUG_HW_BFIFO) {
  478. snprintf(card->log, LOG_SIZE, "B%1d-send %s %d ",
  479. bc->bch.nr, card->name, count);
  480. print_hex_dump_bytes(card->log, DUMP_PREFIX_OFFSET, p, count);
  481. }
  482. }
  483. static void
  484. fill_dma(struct tiger_ch *bc)
  485. {
  486. struct tiger_hw *card = bc->bch.hw;
  487. int count, i, fillempty = 0;
  488. u32 m, v, n = 0;
  489. u8 *p;
  490. if (bc->free == 0)
  491. return;
  492. if (!bc->bch.tx_skb) {
  493. if (!test_bit(FLG_TX_EMPTY, &bc->bch.Flags))
  494. return;
  495. fillempty = 1;
  496. count = card->send.size >> 1;
  497. p = bc->bch.fill;
  498. } else {
  499. count = bc->bch.tx_skb->len - bc->bch.tx_idx;
  500. if (count <= 0)
  501. return;
  502. pr_debug("%s: %s B%1d %d/%d/%d/%d state %x idx %d/%d\n",
  503. card->name, __func__, bc->bch.nr, count, bc->free,
  504. bc->bch.tx_idx, bc->bch.tx_skb->len, bc->txstate,
  505. bc->idx, card->send.idx);
  506. p = bc->bch.tx_skb->data + bc->bch.tx_idx;
  507. }
  508. if (bc->txstate & (TX_IDLE | TX_INIT | TX_UNDERRUN))
  509. resync(bc, card);
  510. if (test_bit(FLG_HDLC, &bc->bch.Flags) && !fillempty) {
  511. count = isdnhdlc_encode(&bc->hsend, p, count, &i,
  512. bc->hsbuf, bc->free);
  513. pr_debug("%s: B%1d hdlc encoded %d in %d\n", card->name,
  514. bc->bch.nr, i, count);
  515. bc->bch.tx_idx += i;
  516. bc->free -= count;
  517. p = bc->hsbuf;
  518. } else {
  519. if (count > bc->free)
  520. count = bc->free;
  521. if (!fillempty)
  522. bc->bch.tx_idx += count;
  523. bc->free -= count;
  524. }
  525. m = (bc->bch.nr & 1) ? 0xffffff00 : 0xffff00ff;
  526. if (fillempty) {
  527. n = p[0];
  528. if (!(bc->bch.nr & 1))
  529. n <<= 8;
  530. for (i = 0; i < count; i++) {
  531. if (bc->idx >= card->send.size)
  532. bc->idx = 0;
  533. v = card->send.start[bc->idx];
  534. v &= m;
  535. v |= n;
  536. card->send.start[bc->idx++] = v;
  537. }
  538. } else {
  539. for (i = 0; i < count; i++) {
  540. if (bc->idx >= card->send.size)
  541. bc->idx = 0;
  542. v = card->send.start[bc->idx];
  543. v &= m;
  544. n = p[i];
  545. v |= (bc->bch.nr & 1) ? n : n << 8;
  546. card->send.start[bc->idx++] = v;
  547. }
  548. }
  549. if (debug & DEBUG_HW_BFIFO) {
  550. snprintf(card->log, LOG_SIZE, "B%1d-send %s %d ",
  551. bc->bch.nr, card->name, count);
  552. print_hex_dump_bytes(card->log, DUMP_PREFIX_OFFSET, p, count);
  553. }
  554. if (bc->free)
  555. bc_next_frame(bc);
  556. }
  557. static int
  558. bc_next_frame(struct tiger_ch *bc)
  559. {
  560. int ret = 1;
  561. if (bc->bch.tx_skb && bc->bch.tx_idx < bc->bch.tx_skb->len) {
  562. fill_dma(bc);
  563. } else {
  564. if (bc->bch.tx_skb)
  565. dev_kfree_skb(bc->bch.tx_skb);
  566. if (get_next_bframe(&bc->bch)) {
  567. fill_dma(bc);
  568. test_and_clear_bit(FLG_TX_EMPTY, &bc->bch.Flags);
  569. } else if (test_bit(FLG_TX_EMPTY, &bc->bch.Flags)) {
  570. fill_dma(bc);
  571. } else if (test_bit(FLG_FILLEMPTY, &bc->bch.Flags)) {
  572. test_and_set_bit(FLG_TX_EMPTY, &bc->bch.Flags);
  573. ret = 0;
  574. } else {
  575. ret = 0;
  576. }
  577. }
  578. return ret;
  579. }
  580. static void
  581. send_tiger_bc(struct tiger_hw *card, struct tiger_ch *bc)
  582. {
  583. int ret;
  584. bc->free += card->send.size / 2;
  585. if (bc->free >= card->send.size) {
  586. if (!(bc->txstate & (TX_UNDERRUN | TX_INIT))) {
  587. pr_info("%s: B%1d TX underrun state %x\n", card->name,
  588. bc->bch.nr, bc->txstate);
  589. bc->txstate |= TX_UNDERRUN;
  590. }
  591. bc->free = card->send.size;
  592. }
  593. ret = bc_next_frame(bc);
  594. if (!ret) {
  595. if (test_bit(FLG_HDLC, &bc->bch.Flags)) {
  596. fill_hdlc_flag(bc);
  597. return;
  598. }
  599. pr_debug("%s: B%1d TX no data free %d idx %d/%d\n", card->name,
  600. bc->bch.nr, bc->free, bc->idx, card->send.idx);
  601. if (!(bc->txstate & (TX_IDLE | TX_INIT))) {
  602. fill_mem(bc, bc->idx, bc->free, 0xff);
  603. if (bc->free == card->send.size)
  604. bc->txstate |= TX_IDLE;
  605. }
  606. }
  607. }
  608. static void
  609. send_tiger(struct tiger_hw *card, u8 irq_stat)
  610. {
  611. int i;
  612. /* Note send is via the READ DMA channel */
  613. if ((irq_stat & card->last_is0) & NJ_IRQM0_RD_MASK) {
  614. pr_info("%s: tiger warn write double dma %x/%x\n",
  615. card->name, irq_stat, card->last_is0);
  616. return;
  617. } else {
  618. card->last_is0 &= ~NJ_IRQM0_RD_MASK;
  619. card->last_is0 |= (irq_stat & NJ_IRQM0_RD_MASK);
  620. }
  621. for (i = 0; i < 2; i++) {
  622. if (test_bit(FLG_ACTIVE, &card->bc[i].bch.Flags))
  623. send_tiger_bc(card, &card->bc[i]);
  624. }
  625. }
  626. static irqreturn_t
  627. nj_irq(int intno, void *dev_id)
  628. {
  629. struct tiger_hw *card = dev_id;
  630. u8 val, s1val, s0val;
  631. spin_lock(&card->lock);
  632. s0val = inb(card->base | NJ_IRQSTAT0);
  633. s1val = inb(card->base | NJ_IRQSTAT1);
  634. if ((s1val & NJ_ISACIRQ) && (s0val == 0)) {
  635. /* shared IRQ */
  636. spin_unlock(&card->lock);
  637. return IRQ_NONE;
  638. }
  639. pr_debug("%s: IRQSTAT0 %02x IRQSTAT1 %02x\n", card->name, s0val, s1val);
  640. card->irqcnt++;
  641. if (!(s1val & NJ_ISACIRQ)) {
  642. val = ReadISAC_nj(card, ISAC_ISTA);
  643. if (val)
  644. mISDNisac_irq(&card->isac, val);
  645. }
  646. if (s0val)
  647. /* write to clear */
  648. outb(s0val, card->base | NJ_IRQSTAT0);
  649. else
  650. goto end;
  651. s1val = s0val;
  652. /* set bits in sval to indicate which page is free */
  653. card->recv.dmacur = inl(card->base | NJ_DMA_WRITE_ADR);
  654. card->recv.idx = (card->recv.dmacur - card->recv.dmastart) >> 2;
  655. if (card->recv.dmacur < card->recv.dmairq)
  656. s0val = 0x08; /* the 2nd write area is free */
  657. else
  658. s0val = 0x04; /* the 1st write area is free */
  659. card->send.dmacur = inl(card->base | NJ_DMA_READ_ADR);
  660. card->send.idx = (card->send.dmacur - card->send.dmastart) >> 2;
  661. if (card->send.dmacur < card->send.dmairq)
  662. s0val |= 0x02; /* the 2nd read area is free */
  663. else
  664. s0val |= 0x01; /* the 1st read area is free */
  665. pr_debug("%s: DMA Status %02x/%02x/%02x %d/%d\n", card->name,
  666. s1val, s0val, card->last_is0,
  667. card->recv.idx, card->send.idx);
  668. /* test if we have a DMA interrupt */
  669. if (s0val != card->last_is0) {
  670. if ((s0val & NJ_IRQM0_RD_MASK) !=
  671. (card->last_is0 & NJ_IRQM0_RD_MASK))
  672. /* got a write dma int */
  673. send_tiger(card, s0val);
  674. if ((s0val & NJ_IRQM0_WR_MASK) !=
  675. (card->last_is0 & NJ_IRQM0_WR_MASK))
  676. /* got a read dma int */
  677. recv_tiger(card, s0val);
  678. }
  679. end:
  680. spin_unlock(&card->lock);
  681. return IRQ_HANDLED;
  682. }
  683. static int
  684. nj_l2l1B(struct mISDNchannel *ch, struct sk_buff *skb)
  685. {
  686. int ret = -EINVAL;
  687. struct bchannel *bch = container_of(ch, struct bchannel, ch);
  688. struct tiger_ch *bc = container_of(bch, struct tiger_ch, bch);
  689. struct tiger_hw *card = bch->hw;
  690. struct mISDNhead *hh = mISDN_HEAD_P(skb);
  691. unsigned long flags;
  692. switch (hh->prim) {
  693. case PH_DATA_REQ:
  694. spin_lock_irqsave(&card->lock, flags);
  695. ret = bchannel_senddata(bch, skb);
  696. if (ret > 0) { /* direct TX */
  697. fill_dma(bc);
  698. ret = 0;
  699. }
  700. spin_unlock_irqrestore(&card->lock, flags);
  701. return ret;
  702. case PH_ACTIVATE_REQ:
  703. spin_lock_irqsave(&card->lock, flags);
  704. if (!test_and_set_bit(FLG_ACTIVE, &bch->Flags))
  705. ret = mode_tiger(bc, ch->protocol);
  706. else
  707. ret = 0;
  708. spin_unlock_irqrestore(&card->lock, flags);
  709. if (!ret)
  710. _queue_data(ch, PH_ACTIVATE_IND, MISDN_ID_ANY, 0,
  711. NULL, GFP_KERNEL);
  712. break;
  713. case PH_DEACTIVATE_REQ:
  714. spin_lock_irqsave(&card->lock, flags);
  715. mISDN_clear_bchannel(bch);
  716. mode_tiger(bc, ISDN_P_NONE);
  717. spin_unlock_irqrestore(&card->lock, flags);
  718. _queue_data(ch, PH_DEACTIVATE_IND, MISDN_ID_ANY, 0,
  719. NULL, GFP_KERNEL);
  720. ret = 0;
  721. break;
  722. }
  723. if (!ret)
  724. dev_kfree_skb(skb);
  725. return ret;
  726. }
  727. static int
  728. channel_bctrl(struct tiger_ch *bc, struct mISDN_ctrl_req *cq)
  729. {
  730. return mISDN_ctrl_bchannel(&bc->bch, cq);
  731. }
  732. static int
  733. nj_bctrl(struct mISDNchannel *ch, u32 cmd, void *arg)
  734. {
  735. struct bchannel *bch = container_of(ch, struct bchannel, ch);
  736. struct tiger_ch *bc = container_of(bch, struct tiger_ch, bch);
  737. struct tiger_hw *card = bch->hw;
  738. int ret = -EINVAL;
  739. u_long flags;
  740. pr_debug("%s: %s cmd:%x %p\n", card->name, __func__, cmd, arg);
  741. switch (cmd) {
  742. case CLOSE_CHANNEL:
  743. test_and_clear_bit(FLG_OPEN, &bch->Flags);
  744. spin_lock_irqsave(&card->lock, flags);
  745. mISDN_freebchannel(bch);
  746. mode_tiger(bc, ISDN_P_NONE);
  747. spin_unlock_irqrestore(&card->lock, flags);
  748. ch->protocol = ISDN_P_NONE;
  749. ch->peer = NULL;
  750. module_put(THIS_MODULE);
  751. ret = 0;
  752. break;
  753. case CONTROL_CHANNEL:
  754. ret = channel_bctrl(bc, arg);
  755. break;
  756. default:
  757. pr_info("%s: %s unknown prim(%x)\n", card->name, __func__, cmd);
  758. }
  759. return ret;
  760. }
  761. static int
  762. channel_ctrl(struct tiger_hw *card, struct mISDN_ctrl_req *cq)
  763. {
  764. int ret = 0;
  765. switch (cq->op) {
  766. case MISDN_CTRL_GETOP:
  767. cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_L1_TIMER3;
  768. break;
  769. case MISDN_CTRL_LOOP:
  770. /* cq->channel: 0 disable, 1 B1 loop 2 B2 loop, 3 both */
  771. if (cq->channel < 0 || cq->channel > 3) {
  772. ret = -EINVAL;
  773. break;
  774. }
  775. ret = card->isac.ctrl(&card->isac, HW_TESTLOOP, cq->channel);
  776. break;
  777. case MISDN_CTRL_L1_TIMER3:
  778. ret = card->isac.ctrl(&card->isac, HW_TIMER3_VALUE, cq->p1);
  779. break;
  780. default:
  781. pr_info("%s: %s unknown Op %x\n", card->name, __func__, cq->op);
  782. ret = -EINVAL;
  783. break;
  784. }
  785. return ret;
  786. }
  787. static int
  788. open_bchannel(struct tiger_hw *card, struct channel_req *rq)
  789. {
  790. struct bchannel *bch;
  791. if (rq->adr.channel == 0 || rq->adr.channel > 2)
  792. return -EINVAL;
  793. if (rq->protocol == ISDN_P_NONE)
  794. return -EINVAL;
  795. bch = &card->bc[rq->adr.channel - 1].bch;
  796. if (test_and_set_bit(FLG_OPEN, &bch->Flags))
  797. return -EBUSY; /* b-channel can be only open once */
  798. test_and_clear_bit(FLG_FILLEMPTY, &bch->Flags);
  799. bch->ch.protocol = rq->protocol;
  800. rq->ch = &bch->ch;
  801. return 0;
  802. }
  803. /*
  804. * device control function
  805. */
  806. static int
  807. nj_dctrl(struct mISDNchannel *ch, u32 cmd, void *arg)
  808. {
  809. struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D);
  810. struct dchannel *dch = container_of(dev, struct dchannel, dev);
  811. struct tiger_hw *card = dch->hw;
  812. struct channel_req *rq;
  813. int err = 0;
  814. pr_debug("%s: %s cmd:%x %p\n", card->name, __func__, cmd, arg);
  815. switch (cmd) {
  816. case OPEN_CHANNEL:
  817. rq = arg;
  818. if (rq->protocol == ISDN_P_TE_S0)
  819. err = card->isac.open(&card->isac, rq);
  820. else
  821. err = open_bchannel(card, rq);
  822. if (err)
  823. break;
  824. if (!try_module_get(THIS_MODULE))
  825. pr_info("%s: cannot get module\n", card->name);
  826. break;
  827. case CLOSE_CHANNEL:
  828. pr_debug("%s: dev(%d) close from %p\n", card->name, dch->dev.id,
  829. __builtin_return_address(0));
  830. module_put(THIS_MODULE);
  831. break;
  832. case CONTROL_CHANNEL:
  833. err = channel_ctrl(card, arg);
  834. break;
  835. default:
  836. pr_debug("%s: %s unknown command %x\n",
  837. card->name, __func__, cmd);
  838. return -EINVAL;
  839. }
  840. return err;
  841. }
  842. static int
  843. nj_init_card(struct tiger_hw *card)
  844. {
  845. u_long flags;
  846. int ret;
  847. spin_lock_irqsave(&card->lock, flags);
  848. nj_disable_hwirq(card);
  849. spin_unlock_irqrestore(&card->lock, flags);
  850. card->irq = card->pdev->irq;
  851. if (request_irq(card->irq, nj_irq, IRQF_SHARED, card->name, card)) {
  852. pr_info("%s: couldn't get interrupt %d\n",
  853. card->name, card->irq);
  854. card->irq = -1;
  855. return -EIO;
  856. }
  857. spin_lock_irqsave(&card->lock, flags);
  858. nj_reset(card);
  859. ret = card->isac.init(&card->isac);
  860. if (ret)
  861. goto error;
  862. ret = inittiger(card);
  863. if (ret)
  864. goto error;
  865. mode_tiger(&card->bc[0], ISDN_P_NONE);
  866. mode_tiger(&card->bc[1], ISDN_P_NONE);
  867. error:
  868. spin_unlock_irqrestore(&card->lock, flags);
  869. return ret;
  870. }
  871. static void
  872. nj_release(struct tiger_hw *card)
  873. {
  874. u_long flags;
  875. int i;
  876. if (card->base_s) {
  877. spin_lock_irqsave(&card->lock, flags);
  878. nj_disable_hwirq(card);
  879. mode_tiger(&card->bc[0], ISDN_P_NONE);
  880. mode_tiger(&card->bc[1], ISDN_P_NONE);
  881. card->isac.release(&card->isac);
  882. spin_unlock_irqrestore(&card->lock, flags);
  883. release_region(card->base, card->base_s);
  884. card->base_s = 0;
  885. }
  886. if (card->irq > 0)
  887. free_irq(card->irq, card);
  888. if (card->isac.dch.dev.dev.class)
  889. mISDN_unregister_device(&card->isac.dch.dev);
  890. for (i = 0; i < 2; i++) {
  891. mISDN_freebchannel(&card->bc[i].bch);
  892. kfree(card->bc[i].hsbuf);
  893. kfree(card->bc[i].hrbuf);
  894. }
  895. if (card->dma_p)
  896. pci_free_consistent(card->pdev, NJ_DMA_SIZE,
  897. card->dma_p, card->dma);
  898. write_lock_irqsave(&card_lock, flags);
  899. list_del(&card->list);
  900. write_unlock_irqrestore(&card_lock, flags);
  901. pci_clear_master(card->pdev);
  902. pci_disable_device(card->pdev);
  903. pci_set_drvdata(card->pdev, NULL);
  904. kfree(card);
  905. }
  906. static int
  907. nj_setup(struct tiger_hw *card)
  908. {
  909. card->base = pci_resource_start(card->pdev, 0);
  910. card->base_s = pci_resource_len(card->pdev, 0);
  911. if (!request_region(card->base, card->base_s, card->name)) {
  912. pr_info("%s: NETjet config port %#x-%#x already in use\n",
  913. card->name, card->base,
  914. (u32)(card->base + card->base_s - 1));
  915. card->base_s = 0;
  916. return -EIO;
  917. }
  918. ASSIGN_FUNC(nj, ISAC, card->isac);
  919. return 0;
  920. }
  921. static int __devinit
  922. setup_instance(struct tiger_hw *card)
  923. {
  924. int i, err;
  925. u_long flags;
  926. snprintf(card->name, MISDN_MAX_IDLEN - 1, "netjet.%d", nj_cnt + 1);
  927. write_lock_irqsave(&card_lock, flags);
  928. list_add_tail(&card->list, &Cards);
  929. write_unlock_irqrestore(&card_lock, flags);
  930. _set_debug(card);
  931. card->isac.name = card->name;
  932. spin_lock_init(&card->lock);
  933. card->isac.hwlock = &card->lock;
  934. mISDNisac_init(&card->isac, card);
  935. card->isac.dch.dev.Bprotocols = (1 << (ISDN_P_B_RAW & ISDN_P_B_MASK)) |
  936. (1 << (ISDN_P_B_HDLC & ISDN_P_B_MASK));
  937. card->isac.dch.dev.D.ctrl = nj_dctrl;
  938. for (i = 0; i < 2; i++) {
  939. card->bc[i].bch.nr = i + 1;
  940. set_channelmap(i + 1, card->isac.dch.dev.channelmap);
  941. mISDN_initbchannel(&card->bc[i].bch, MAX_DATA_MEM,
  942. NJ_DMA_RXSIZE >> 1);
  943. card->bc[i].bch.hw = card;
  944. card->bc[i].bch.ch.send = nj_l2l1B;
  945. card->bc[i].bch.ch.ctrl = nj_bctrl;
  946. card->bc[i].bch.ch.nr = i + 1;
  947. list_add(&card->bc[i].bch.ch.list,
  948. &card->isac.dch.dev.bchannels);
  949. card->bc[i].bch.hw = card;
  950. }
  951. err = nj_setup(card);
  952. if (err)
  953. goto error;
  954. err = mISDN_register_device(&card->isac.dch.dev, &card->pdev->dev,
  955. card->name);
  956. if (err)
  957. goto error;
  958. err = nj_init_card(card);
  959. if (!err) {
  960. nj_cnt++;
  961. pr_notice("Netjet %d cards installed\n", nj_cnt);
  962. return 0;
  963. }
  964. error:
  965. nj_release(card);
  966. return err;
  967. }
  968. static int __devinit
  969. nj_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  970. {
  971. int err = -ENOMEM;
  972. int cfg;
  973. struct tiger_hw *card;
  974. if (pdev->subsystem_vendor == 0x8086 &&
  975. pdev->subsystem_device == 0x0003) {
  976. pr_notice("Netjet: Digium X100P/X101P not handled\n");
  977. return -ENODEV;
  978. }
  979. if (pdev->subsystem_vendor == 0x55 &&
  980. pdev->subsystem_device == 0x02) {
  981. pr_notice("Netjet: Enter!Now not handled yet\n");
  982. return -ENODEV;
  983. }
  984. if (pdev->subsystem_vendor == 0xb100 &&
  985. pdev->subsystem_device == 0x0003) {
  986. pr_notice("Netjet: Digium TDM400P not handled yet\n");
  987. return -ENODEV;
  988. }
  989. card = kzalloc(sizeof(struct tiger_hw), GFP_ATOMIC);
  990. if (!card) {
  991. pr_info("No kmem for Netjet\n");
  992. return err;
  993. }
  994. card->pdev = pdev;
  995. err = pci_enable_device(pdev);
  996. if (err) {
  997. kfree(card);
  998. return err;
  999. }
  1000. printk(KERN_INFO "nj_probe(mISDN): found adapter at %s\n",
  1001. pci_name(pdev));
  1002. pci_set_master(pdev);
  1003. /* the TJ300 and TJ320 must be detected, the IRQ handling is different
  1004. * unfortunately the chips use the same device ID, but the TJ320 has
  1005. * the bit20 in status PCI cfg register set
  1006. */
  1007. pci_read_config_dword(pdev, 0x04, &cfg);
  1008. if (cfg & 0x00100000)
  1009. card->typ = NETJET_S_TJ320;
  1010. else
  1011. card->typ = NETJET_S_TJ300;
  1012. card->base = pci_resource_start(pdev, 0);
  1013. card->irq = pdev->irq;
  1014. pci_set_drvdata(pdev, card);
  1015. err = setup_instance(card);
  1016. if (err)
  1017. pci_set_drvdata(pdev, NULL);
  1018. return err;
  1019. }
  1020. static void __devexit nj_remove(struct pci_dev *pdev)
  1021. {
  1022. struct tiger_hw *card = pci_get_drvdata(pdev);
  1023. if (card)
  1024. nj_release(card);
  1025. else
  1026. pr_info("%s drvdata already removed\n", __func__);
  1027. }
  1028. /* We cannot select cards with PCI_SUB... IDs, since here are cards with
  1029. * SUB IDs set to PCI_ANY_ID, so we need to match all and reject
  1030. * known other cards which not work with this driver - see probe function */
  1031. static struct pci_device_id nj_pci_ids[] __devinitdata = {
  1032. { PCI_VENDOR_ID_TIGERJET, PCI_DEVICE_ID_TIGERJET_300,
  1033. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
  1034. { }
  1035. };
  1036. MODULE_DEVICE_TABLE(pci, nj_pci_ids);
  1037. static struct pci_driver nj_driver = {
  1038. .name = "netjet",
  1039. .probe = nj_probe,
  1040. .remove = __devexit_p(nj_remove),
  1041. .id_table = nj_pci_ids,
  1042. };
  1043. static int __init nj_init(void)
  1044. {
  1045. int err;
  1046. pr_notice("Netjet PCI driver Rev. %s\n", NETJET_REV);
  1047. err = pci_register_driver(&nj_driver);
  1048. return err;
  1049. }
  1050. static void __exit nj_cleanup(void)
  1051. {
  1052. pci_unregister_driver(&nj_driver);
  1053. }
  1054. module_init(nj_init);
  1055. module_exit(nj_cleanup);