netjet.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156
  1. /*
  2. * NETJet mISDN driver
  3. *
  4. * Author Karsten Keil <keil@isdn4linux.de>
  5. *
  6. * Copyright 2009 by Karsten Keil <keil@isdn4linux.de>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  20. *
  21. */
  22. #include <linux/module.h>
  23. #include <linux/pci.h>
  24. #include <linux/delay.h>
  25. #include <linux/mISDNhw.h>
  26. #include "ipac.h"
  27. #include "iohelper.h"
  28. #include "netjet.h"
  29. #include <linux/isdn/hdlc.h>
  30. #define NETJET_REV "2.0"
  31. enum nj_types {
  32. NETJET_S_TJ300,
  33. NETJET_S_TJ320,
  34. ENTERNOW__TJ320,
  35. };
  36. struct tiger_dma {
  37. size_t size;
  38. u32 *start;
  39. int idx;
  40. u32 dmastart;
  41. u32 dmairq;
  42. u32 dmaend;
  43. u32 dmacur;
  44. };
  45. struct tiger_hw;
  46. struct tiger_ch {
  47. struct bchannel bch;
  48. struct tiger_hw *nj;
  49. int idx;
  50. int free;
  51. int lastrx;
  52. u16 rxstate;
  53. u16 txstate;
  54. struct isdnhdlc_vars hsend;
  55. struct isdnhdlc_vars hrecv;
  56. u8 *hsbuf;
  57. u8 *hrbuf;
  58. };
  59. #define TX_INIT 0x0001
  60. #define TX_IDLE 0x0002
  61. #define TX_RUN 0x0004
  62. #define TX_UNDERRUN 0x0100
  63. #define RX_OVERRUN 0x0100
  64. #define LOG_SIZE 64
  65. struct tiger_hw {
  66. struct list_head list;
  67. struct pci_dev *pdev;
  68. char name[MISDN_MAX_IDLEN];
  69. enum nj_types typ;
  70. int irq;
  71. u32 irqcnt;
  72. u32 base;
  73. size_t base_s;
  74. dma_addr_t dma;
  75. void *dma_p;
  76. spinlock_t lock; /* lock HW */
  77. struct isac_hw isac;
  78. struct tiger_dma send;
  79. struct tiger_dma recv;
  80. struct tiger_ch bc[2];
  81. u8 ctrlreg;
  82. u8 dmactrl;
  83. u8 auxd;
  84. u8 last_is0;
  85. u8 irqmask0;
  86. char log[LOG_SIZE];
  87. };
  88. static LIST_HEAD(Cards);
  89. static DEFINE_RWLOCK(card_lock); /* protect Cards */
  90. static u32 debug;
  91. static int nj_cnt;
  92. static void
  93. _set_debug(struct tiger_hw *card)
  94. {
  95. card->isac.dch.debug = debug;
  96. card->bc[0].bch.debug = debug;
  97. card->bc[1].bch.debug = debug;
  98. }
  99. static int
  100. set_debug(const char *val, struct kernel_param *kp)
  101. {
  102. int ret;
  103. struct tiger_hw *card;
  104. ret = param_set_uint(val, kp);
  105. if (!ret) {
  106. read_lock(&card_lock);
  107. list_for_each_entry(card, &Cards, list)
  108. _set_debug(card);
  109. read_unlock(&card_lock);
  110. }
  111. return ret;
  112. }
  113. MODULE_AUTHOR("Karsten Keil");
  114. MODULE_LICENSE("GPL v2");
  115. MODULE_VERSION(NETJET_REV);
  116. module_param_call(debug, set_debug, param_get_uint, &debug, S_IRUGO | S_IWUSR);
  117. MODULE_PARM_DESC(debug, "Netjet debug mask");
  118. static void
  119. nj_disable_hwirq(struct tiger_hw *card)
  120. {
  121. outb(0, card->base + NJ_IRQMASK0);
  122. outb(0, card->base + NJ_IRQMASK1);
  123. }
  124. static u8
  125. ReadISAC_nj(void *p, u8 offset)
  126. {
  127. struct tiger_hw *card = p;
  128. u8 ret;
  129. card->auxd &= 0xfc;
  130. card->auxd |= (offset >> 4) & 3;
  131. outb(card->auxd, card->base + NJ_AUXDATA);
  132. ret = inb(card->base + NJ_ISAC_OFF + ((offset & 0x0f) << 2));
  133. return ret;
  134. }
  135. static void
  136. WriteISAC_nj(void *p, u8 offset, u8 value)
  137. {
  138. struct tiger_hw *card = p;
  139. card->auxd &= 0xfc;
  140. card->auxd |= (offset >> 4) & 3;
  141. outb(card->auxd, card->base + NJ_AUXDATA);
  142. outb(value, card->base + NJ_ISAC_OFF + ((offset & 0x0f) << 2));
  143. }
  144. static void
  145. ReadFiFoISAC_nj(void *p, u8 offset, u8 *data, int size)
  146. {
  147. struct tiger_hw *card = p;
  148. card->auxd &= 0xfc;
  149. outb(card->auxd, card->base + NJ_AUXDATA);
  150. insb(card->base + NJ_ISAC_OFF, data, size);
  151. }
  152. static void
  153. WriteFiFoISAC_nj(void *p, u8 offset, u8 *data, int size)
  154. {
  155. struct tiger_hw *card = p;
  156. card->auxd &= 0xfc;
  157. outb(card->auxd, card->base + NJ_AUXDATA);
  158. outsb(card->base + NJ_ISAC_OFF, data, size);
  159. }
  160. static void
  161. fill_mem(struct tiger_ch *bc, u32 idx, u32 cnt, u32 fill)
  162. {
  163. struct tiger_hw *card = bc->bch.hw;
  164. u32 mask = 0xff, val;
  165. pr_debug("%s: B%1d fill %02x len %d idx %d/%d\n", card->name,
  166. bc->bch.nr, fill, cnt, idx, card->send.idx);
  167. if (bc->bch.nr & 2) {
  168. fill <<= 8;
  169. mask <<= 8;
  170. }
  171. mask ^= 0xffffffff;
  172. while (cnt--) {
  173. val = card->send.start[idx];
  174. val &= mask;
  175. val |= fill;
  176. card->send.start[idx++] = val;
  177. if (idx >= card->send.size)
  178. idx = 0;
  179. }
  180. }
  181. static int
  182. mode_tiger(struct tiger_ch *bc, u32 protocol)
  183. {
  184. struct tiger_hw *card = bc->bch.hw;
  185. pr_debug("%s: B%1d protocol %x-->%x\n", card->name,
  186. bc->bch.nr, bc->bch.state, protocol);
  187. switch (protocol) {
  188. case ISDN_P_NONE:
  189. if (bc->bch.state == ISDN_P_NONE)
  190. break;
  191. fill_mem(bc, 0, card->send.size, 0xff);
  192. bc->bch.state = protocol;
  193. /* only stop dma and interrupts if both channels NULL */
  194. if ((card->bc[0].bch.state == ISDN_P_NONE) &&
  195. (card->bc[1].bch.state == ISDN_P_NONE)) {
  196. card->dmactrl = 0;
  197. outb(card->dmactrl, card->base + NJ_DMACTRL);
  198. outb(0, card->base + NJ_IRQMASK0);
  199. }
  200. test_and_clear_bit(FLG_HDLC, &bc->bch.Flags);
  201. test_and_clear_bit(FLG_TRANSPARENT, &bc->bch.Flags);
  202. bc->txstate = 0;
  203. bc->rxstate = 0;
  204. bc->lastrx = -1;
  205. break;
  206. case ISDN_P_B_RAW:
  207. test_and_set_bit(FLG_TRANSPARENT, &bc->bch.Flags);
  208. bc->bch.state = protocol;
  209. bc->idx = 0;
  210. bc->free = card->send.size/2;
  211. bc->rxstate = 0;
  212. bc->txstate = TX_INIT | TX_IDLE;
  213. bc->lastrx = -1;
  214. if (!card->dmactrl) {
  215. card->dmactrl = 1;
  216. outb(card->dmactrl, card->base + NJ_DMACTRL);
  217. outb(0x0f, card->base + NJ_IRQMASK0);
  218. }
  219. break;
  220. case ISDN_P_B_HDLC:
  221. test_and_set_bit(FLG_HDLC, &bc->bch.Flags);
  222. bc->bch.state = protocol;
  223. bc->idx = 0;
  224. bc->free = card->send.size/2;
  225. bc->rxstate = 0;
  226. bc->txstate = TX_INIT | TX_IDLE;
  227. isdnhdlc_rcv_init(&bc->hrecv, 0);
  228. isdnhdlc_out_init(&bc->hsend, 0);
  229. bc->lastrx = -1;
  230. if (!card->dmactrl) {
  231. card->dmactrl = 1;
  232. outb(card->dmactrl, card->base + NJ_DMACTRL);
  233. outb(0x0f, card->base + NJ_IRQMASK0);
  234. }
  235. break;
  236. default:
  237. pr_info("%s: %s protocol %x not handled\n", card->name,
  238. __func__, protocol);
  239. return -ENOPROTOOPT;
  240. }
  241. card->send.dmacur = inl(card->base + NJ_DMA_READ_ADR);
  242. card->recv.dmacur = inl(card->base + NJ_DMA_WRITE_ADR);
  243. card->send.idx = (card->send.dmacur - card->send.dmastart) >> 2;
  244. card->recv.idx = (card->recv.dmacur - card->recv.dmastart) >> 2;
  245. pr_debug("%s: %s ctrl %x irq %02x/%02x idx %d/%d\n",
  246. card->name, __func__,
  247. inb(card->base + NJ_DMACTRL),
  248. inb(card->base + NJ_IRQMASK0),
  249. inb(card->base + NJ_IRQSTAT0),
  250. card->send.idx,
  251. card->recv.idx);
  252. return 0;
  253. }
  254. static void
  255. nj_reset(struct tiger_hw *card)
  256. {
  257. outb(0xff, card->base + NJ_CTRL); /* Reset On */
  258. mdelay(1);
  259. /* now edge triggered for TJ320 GE 13/07/00 */
  260. /* see comment in IRQ function */
  261. if (card->typ == NETJET_S_TJ320) /* TJ320 */
  262. card->ctrlreg = 0x40; /* Reset Off and status read clear */
  263. else
  264. card->ctrlreg = 0x00; /* Reset Off and status read clear */
  265. outb(card->ctrlreg, card->base + NJ_CTRL);
  266. mdelay(10);
  267. /* configure AUX pins (all output except ISAC IRQ pin) */
  268. card->auxd = 0;
  269. card->dmactrl = 0;
  270. outb(~NJ_ISACIRQ, card->base + NJ_AUXCTRL);
  271. outb(NJ_ISACIRQ, card->base + NJ_IRQMASK1);
  272. outb(card->auxd, card->base + NJ_AUXDATA);
  273. }
  274. static int
  275. inittiger(struct tiger_hw *card)
  276. {
  277. int i;
  278. card->dma_p = pci_alloc_consistent(card->pdev, NJ_DMA_SIZE,
  279. &card->dma);
  280. if (!card->dma_p) {
  281. pr_info("%s: No DMA memory\n", card->name);
  282. return -ENOMEM;
  283. }
  284. if ((u64)card->dma > 0xffffffff) {
  285. pr_info("%s: DMA outside 32 bit\n", card->name);
  286. return -ENOMEM;
  287. }
  288. for (i = 0; i < 2; i++) {
  289. card->bc[i].hsbuf = kmalloc(NJ_DMA_TXSIZE, GFP_KERNEL);
  290. if (!card->bc[i].hsbuf) {
  291. pr_info("%s: no B%d send buffer\n", card->name, i + 1);
  292. return -ENOMEM;
  293. }
  294. card->bc[i].hrbuf = kmalloc(NJ_DMA_RXSIZE, GFP_KERNEL);
  295. if (!card->bc[i].hrbuf) {
  296. pr_info("%s: no B%d recv buffer\n", card->name, i + 1);
  297. return -ENOMEM;
  298. }
  299. }
  300. memset(card->dma_p, 0xff, NJ_DMA_SIZE);
  301. card->send.start = card->dma_p;
  302. card->send.dmastart = (u32)card->dma;
  303. card->send.dmaend = card->send.dmastart +
  304. (4 * (NJ_DMA_TXSIZE - 1));
  305. card->send.dmairq = card->send.dmastart +
  306. (4 * ((NJ_DMA_TXSIZE / 2) - 1));
  307. card->send.size = NJ_DMA_TXSIZE;
  308. if (debug & DEBUG_HW)
  309. pr_notice("%s: send buffer phy %#x - %#x - %#x virt %p"
  310. " size %zu u32\n", card->name,
  311. card->send.dmastart, card->send.dmairq,
  312. card->send.dmaend, card->send.start, card->send.size);
  313. outl(card->send.dmastart, card->base + NJ_DMA_READ_START);
  314. outl(card->send.dmairq, card->base + NJ_DMA_READ_IRQ);
  315. outl(card->send.dmaend, card->base + NJ_DMA_READ_END);
  316. card->recv.start = card->dma_p + (NJ_DMA_SIZE / 2);
  317. card->recv.dmastart = (u32)card->dma + (NJ_DMA_SIZE / 2);
  318. card->recv.dmaend = card->recv.dmastart +
  319. (4 * (NJ_DMA_RXSIZE - 1));
  320. card->recv.dmairq = card->recv.dmastart +
  321. (4 * ((NJ_DMA_RXSIZE / 2) - 1));
  322. card->recv.size = NJ_DMA_RXSIZE;
  323. if (debug & DEBUG_HW)
  324. pr_notice("%s: recv buffer phy %#x - %#x - %#x virt %p"
  325. " size %zu u32\n", card->name,
  326. card->recv.dmastart, card->recv.dmairq,
  327. card->recv.dmaend, card->recv.start, card->recv.size);
  328. outl(card->recv.dmastart, card->base + NJ_DMA_WRITE_START);
  329. outl(card->recv.dmairq, card->base + NJ_DMA_WRITE_IRQ);
  330. outl(card->recv.dmaend, card->base + NJ_DMA_WRITE_END);
  331. return 0;
  332. }
  333. static void
  334. read_dma(struct tiger_ch *bc, u32 idx, int cnt)
  335. {
  336. struct tiger_hw *card = bc->bch.hw;
  337. int i, stat;
  338. u32 val;
  339. u8 *p, *pn;
  340. if (bc->lastrx == idx) {
  341. bc->rxstate |= RX_OVERRUN;
  342. pr_info("%s: B%1d overrun at idx %d\n", card->name,
  343. bc->bch.nr, idx);
  344. }
  345. bc->lastrx = idx;
  346. if (!bc->bch.rx_skb) {
  347. bc->bch.rx_skb = mI_alloc_skb(bc->bch.maxlen, GFP_ATOMIC);
  348. if (!bc->bch.rx_skb) {
  349. pr_info("%s: B%1d receive out of memory\n",
  350. card->name, bc->bch.nr);
  351. return;
  352. }
  353. }
  354. if (test_bit(FLG_TRANSPARENT, &bc->bch.Flags)) {
  355. if ((bc->bch.rx_skb->len + cnt) > bc->bch.maxlen) {
  356. pr_debug("%s: B%1d overrun %d\n", card->name,
  357. bc->bch.nr, bc->bch.rx_skb->len + cnt);
  358. skb_trim(bc->bch.rx_skb, 0);
  359. return;
  360. }
  361. p = skb_put(bc->bch.rx_skb, cnt);
  362. } else
  363. p = bc->hrbuf;
  364. for (i = 0; i < cnt; i++) {
  365. val = card->recv.start[idx++];
  366. if (bc->bch.nr & 2)
  367. val >>= 8;
  368. if (idx >= card->recv.size)
  369. idx = 0;
  370. p[i] = val & 0xff;
  371. }
  372. pn = bc->hrbuf;
  373. next_frame:
  374. if (test_bit(FLG_HDLC, &bc->bch.Flags)) {
  375. stat = isdnhdlc_decode(&bc->hrecv, pn, cnt, &i,
  376. bc->bch.rx_skb->data, bc->bch.maxlen);
  377. if (stat > 0) /* valid frame received */
  378. p = skb_put(bc->bch.rx_skb, stat);
  379. else if (stat == -HDLC_CRC_ERROR)
  380. pr_info("%s: B%1d receive frame CRC error\n",
  381. card->name, bc->bch.nr);
  382. else if (stat == -HDLC_FRAMING_ERROR)
  383. pr_info("%s: B%1d receive framing error\n",
  384. card->name, bc->bch.nr);
  385. else if (stat == -HDLC_LENGTH_ERROR)
  386. pr_info("%s: B%1d receive frame too long (> %d)\n",
  387. card->name, bc->bch.nr, bc->bch.maxlen);
  388. } else
  389. stat = cnt;
  390. if (stat > 0) {
  391. if (debug & DEBUG_HW_BFIFO) {
  392. snprintf(card->log, LOG_SIZE, "B%1d-recv %s %d ",
  393. bc->bch.nr, card->name, stat);
  394. print_hex_dump_bytes(card->log, DUMP_PREFIX_OFFSET,
  395. p, stat);
  396. }
  397. recv_Bchannel(&bc->bch, 0);
  398. }
  399. if (test_bit(FLG_HDLC, &bc->bch.Flags)) {
  400. pn += i;
  401. cnt -= i;
  402. if (!bc->bch.rx_skb) {
  403. bc->bch.rx_skb = mI_alloc_skb(bc->bch.maxlen,
  404. GFP_ATOMIC);
  405. if (!bc->bch.rx_skb) {
  406. pr_info("%s: B%1d receive out of memory\n",
  407. card->name, bc->bch.nr);
  408. return;
  409. }
  410. }
  411. if (cnt > 0)
  412. goto next_frame;
  413. }
  414. }
  415. static void
  416. recv_tiger(struct tiger_hw *card, u8 irq_stat)
  417. {
  418. u32 idx;
  419. int cnt = card->recv.size / 2;
  420. /* Note receive is via the WRITE DMA channel */
  421. card->last_is0 &= ~NJ_IRQM0_WR_MASK;
  422. card->last_is0 |= (irq_stat & NJ_IRQM0_WR_MASK);
  423. if (irq_stat & NJ_IRQM0_WR_END)
  424. idx = cnt - 1;
  425. else
  426. idx = card->recv.size - 1;
  427. if (test_bit(FLG_ACTIVE, &card->bc[0].bch.Flags))
  428. read_dma(&card->bc[0], idx, cnt);
  429. if (test_bit(FLG_ACTIVE, &card->bc[1].bch.Flags))
  430. read_dma(&card->bc[1], idx, cnt);
  431. }
  432. /* sync with current DMA address at start or after exception */
  433. static void
  434. resync(struct tiger_ch *bc, struct tiger_hw *card)
  435. {
  436. card->send.dmacur = inl(card->base | NJ_DMA_READ_ADR);
  437. card->send.idx = (card->send.dmacur - card->send.dmastart) >> 2;
  438. if (bc->free > card->send.size / 2)
  439. bc->free = card->send.size / 2;
  440. /* currently we simple sync to the next complete free area
  441. * this hast the advantage that we have always maximum time to
  442. * handle TX irq
  443. */
  444. if (card->send.idx < ((card->send.size / 2) - 1))
  445. bc->idx = (card->recv.size / 2) - 1;
  446. else
  447. bc->idx = card->recv.size - 1;
  448. bc->txstate = TX_RUN;
  449. pr_debug("%s: %s B%1d free %d idx %d/%d\n", card->name,
  450. __func__, bc->bch.nr, bc->free, bc->idx, card->send.idx);
  451. }
  452. static int bc_next_frame(struct tiger_ch *);
  453. static void
  454. fill_hdlc_flag(struct tiger_ch *bc)
  455. {
  456. struct tiger_hw *card = bc->bch.hw;
  457. int count, i;
  458. u32 m, v;
  459. u8 *p;
  460. if (bc->free == 0)
  461. return;
  462. pr_debug("%s: %s B%1d %d state %x idx %d/%d\n", card->name,
  463. __func__, bc->bch.nr, bc->free, bc->txstate,
  464. bc->idx, card->send.idx);
  465. if (bc->txstate & (TX_IDLE | TX_INIT | TX_UNDERRUN))
  466. resync(bc, card);
  467. count = isdnhdlc_encode(&bc->hsend, NULL, 0, &i,
  468. bc->hsbuf, bc->free);
  469. pr_debug("%s: B%1d hdlc encoded %d flags\n", card->name,
  470. bc->bch.nr, count);
  471. bc->free -= count;
  472. p = bc->hsbuf;
  473. m = (bc->bch.nr & 1) ? 0xffffff00 : 0xffff00ff;
  474. for (i = 0; i < count; i++) {
  475. if (bc->idx >= card->send.size)
  476. bc->idx = 0;
  477. v = card->send.start[bc->idx];
  478. v &= m;
  479. v |= (bc->bch.nr & 1) ? (u32)(p[i]) : ((u32)(p[i])) << 8;
  480. card->send.start[bc->idx++] = v;
  481. }
  482. if (debug & DEBUG_HW_BFIFO) {
  483. snprintf(card->log, LOG_SIZE, "B%1d-send %s %d ",
  484. bc->bch.nr, card->name, count);
  485. print_hex_dump_bytes(card->log, DUMP_PREFIX_OFFSET, p, count);
  486. }
  487. }
  488. static void
  489. fill_dma(struct tiger_ch *bc)
  490. {
  491. struct tiger_hw *card = bc->bch.hw;
  492. int count, i;
  493. u32 m, v;
  494. u8 *p;
  495. if (bc->free == 0)
  496. return;
  497. count = bc->bch.tx_skb->len - bc->bch.tx_idx;
  498. if (count <= 0)
  499. return;
  500. pr_debug("%s: %s B%1d %d/%d/%d/%d state %x idx %d/%d\n", card->name,
  501. __func__, bc->bch.nr, count, bc->free, bc->bch.tx_idx,
  502. bc->bch.tx_skb->len, bc->txstate, bc->idx, card->send.idx);
  503. if (bc->txstate & (TX_IDLE | TX_INIT | TX_UNDERRUN))
  504. resync(bc, card);
  505. p = bc->bch.tx_skb->data + bc->bch.tx_idx;
  506. if (test_bit(FLG_HDLC, &bc->bch.Flags)) {
  507. count = isdnhdlc_encode(&bc->hsend, p, count, &i,
  508. bc->hsbuf, bc->free);
  509. pr_debug("%s: B%1d hdlc encoded %d in %d\n", card->name,
  510. bc->bch.nr, i, count);
  511. bc->bch.tx_idx += i;
  512. bc->free -= count;
  513. p = bc->hsbuf;
  514. } else {
  515. if (count > bc->free)
  516. count = bc->free;
  517. bc->bch.tx_idx += count;
  518. bc->free -= count;
  519. }
  520. m = (bc->bch.nr & 1) ? 0xffffff00 : 0xffff00ff;
  521. for (i = 0; i < count; i++) {
  522. if (bc->idx >= card->send.size)
  523. bc->idx = 0;
  524. v = card->send.start[bc->idx];
  525. v &= m;
  526. v |= (bc->bch.nr & 1) ? (u32)(p[i]) : ((u32)(p[i])) << 8;
  527. card->send.start[bc->idx++] = v;
  528. }
  529. if (debug & DEBUG_HW_BFIFO) {
  530. snprintf(card->log, LOG_SIZE, "B%1d-send %s %d ",
  531. bc->bch.nr, card->name, count);
  532. print_hex_dump_bytes(card->log, DUMP_PREFIX_OFFSET, p, count);
  533. }
  534. if (bc->free)
  535. bc_next_frame(bc);
  536. }
  537. static int
  538. bc_next_frame(struct tiger_ch *bc)
  539. {
  540. if (bc->bch.tx_skb && bc->bch.tx_idx < bc->bch.tx_skb->len)
  541. fill_dma(bc);
  542. else {
  543. if (bc->bch.tx_skb) {
  544. /* send confirm, on trans, free on hdlc. */
  545. if (test_bit(FLG_TRANSPARENT, &bc->bch.Flags))
  546. confirm_Bsend(&bc->bch);
  547. dev_kfree_skb(bc->bch.tx_skb);
  548. }
  549. if (get_next_bframe(&bc->bch))
  550. fill_dma(bc);
  551. else
  552. return 0;
  553. }
  554. return 1;
  555. }
  556. static void
  557. send_tiger_bc(struct tiger_hw *card, struct tiger_ch *bc)
  558. {
  559. int ret;
  560. bc->free += card->send.size / 2;
  561. if (bc->free >= card->send.size) {
  562. if (!(bc->txstate & (TX_UNDERRUN | TX_INIT))) {
  563. pr_info("%s: B%1d TX underrun state %x\n", card->name,
  564. bc->bch.nr, bc->txstate);
  565. bc->txstate |= TX_UNDERRUN;
  566. }
  567. bc->free = card->send.size;
  568. }
  569. ret = bc_next_frame(bc);
  570. if (!ret) {
  571. if (test_bit(FLG_HDLC, &bc->bch.Flags)) {
  572. fill_hdlc_flag(bc);
  573. return;
  574. }
  575. pr_debug("%s: B%1d TX no data free %d idx %d/%d\n", card->name,
  576. bc->bch.nr, bc->free, bc->idx, card->send.idx);
  577. if (!(bc->txstate & (TX_IDLE | TX_INIT))) {
  578. fill_mem(bc, bc->idx, bc->free, 0xff);
  579. if (bc->free == card->send.size)
  580. bc->txstate |= TX_IDLE;
  581. }
  582. }
  583. }
  584. static void
  585. send_tiger(struct tiger_hw *card, u8 irq_stat)
  586. {
  587. int i;
  588. /* Note send is via the READ DMA channel */
  589. if ((irq_stat & card->last_is0) & NJ_IRQM0_RD_MASK) {
  590. pr_info("%s: tiger warn write double dma %x/%x\n",
  591. card->name, irq_stat, card->last_is0);
  592. return;
  593. } else {
  594. card->last_is0 &= ~NJ_IRQM0_RD_MASK;
  595. card->last_is0 |= (irq_stat & NJ_IRQM0_RD_MASK);
  596. }
  597. for (i = 0; i < 2; i++) {
  598. if (test_bit(FLG_ACTIVE, &card->bc[i].bch.Flags))
  599. send_tiger_bc(card, &card->bc[i]);
  600. }
  601. }
  602. static irqreturn_t
  603. nj_irq(int intno, void *dev_id)
  604. {
  605. struct tiger_hw *card = dev_id;
  606. u8 val, s1val, s0val;
  607. spin_lock(&card->lock);
  608. s0val = inb(card->base | NJ_IRQSTAT0);
  609. s1val = inb(card->base | NJ_IRQSTAT1);
  610. if ((s1val & NJ_ISACIRQ) && (s0val == 0)) {
  611. /* shared IRQ */
  612. spin_unlock(&card->lock);
  613. return IRQ_NONE;
  614. }
  615. pr_debug("%s: IRQSTAT0 %02x IRQSTAT1 %02x\n", card->name, s0val, s1val);
  616. card->irqcnt++;
  617. if (!(s1val & NJ_ISACIRQ)) {
  618. val = ReadISAC_nj(card, ISAC_ISTA);
  619. if (val)
  620. mISDNisac_irq(&card->isac, val);
  621. }
  622. if (s0val)
  623. /* write to clear */
  624. outb(s0val, card->base | NJ_IRQSTAT0);
  625. else
  626. goto end;
  627. s1val = s0val;
  628. /* set bits in sval to indicate which page is free */
  629. card->recv.dmacur = inl(card->base | NJ_DMA_WRITE_ADR);
  630. card->recv.idx = (card->recv.dmacur - card->recv.dmastart) >> 2;
  631. if (card->recv.dmacur < card->recv.dmairq)
  632. s0val = 0x08; /* the 2nd write area is free */
  633. else
  634. s0val = 0x04; /* the 1st write area is free */
  635. card->send.dmacur = inl(card->base | NJ_DMA_READ_ADR);
  636. card->send.idx = (card->send.dmacur - card->send.dmastart) >> 2;
  637. if (card->send.dmacur < card->send.dmairq)
  638. s0val |= 0x02; /* the 2nd read area is free */
  639. else
  640. s0val |= 0x01; /* the 1st read area is free */
  641. pr_debug("%s: DMA Status %02x/%02x/%02x %d/%d\n", card->name,
  642. s1val, s0val, card->last_is0,
  643. card->recv.idx, card->send.idx);
  644. /* test if we have a DMA interrupt */
  645. if (s0val != card->last_is0) {
  646. if ((s0val & NJ_IRQM0_RD_MASK) !=
  647. (card->last_is0 & NJ_IRQM0_RD_MASK))
  648. /* got a write dma int */
  649. send_tiger(card, s0val);
  650. if ((s0val & NJ_IRQM0_WR_MASK) !=
  651. (card->last_is0 & NJ_IRQM0_WR_MASK))
  652. /* got a read dma int */
  653. recv_tiger(card, s0val);
  654. }
  655. end:
  656. spin_unlock(&card->lock);
  657. return IRQ_HANDLED;
  658. }
  659. static int
  660. nj_l2l1B(struct mISDNchannel *ch, struct sk_buff *skb)
  661. {
  662. int ret = -EINVAL;
  663. struct bchannel *bch = container_of(ch, struct bchannel, ch);
  664. struct tiger_ch *bc = container_of(bch, struct tiger_ch, bch);
  665. struct tiger_hw *card = bch->hw;
  666. struct mISDNhead *hh = mISDN_HEAD_P(skb);
  667. u32 id;
  668. u_long flags;
  669. switch (hh->prim) {
  670. case PH_DATA_REQ:
  671. spin_lock_irqsave(&card->lock, flags);
  672. ret = bchannel_senddata(bch, skb);
  673. if (ret > 0) { /* direct TX */
  674. id = hh->id; /* skb can be freed */
  675. fill_dma(bc);
  676. ret = 0;
  677. spin_unlock_irqrestore(&card->lock, flags);
  678. if (!test_bit(FLG_TRANSPARENT, &bch->Flags))
  679. queue_ch_frame(ch, PH_DATA_CNF, id, NULL);
  680. } else
  681. spin_unlock_irqrestore(&card->lock, flags);
  682. return ret;
  683. case PH_ACTIVATE_REQ:
  684. spin_lock_irqsave(&card->lock, flags);
  685. if (!test_and_set_bit(FLG_ACTIVE, &bch->Flags))
  686. ret = mode_tiger(bc, ch->protocol);
  687. else
  688. ret = 0;
  689. spin_unlock_irqrestore(&card->lock, flags);
  690. if (!ret)
  691. _queue_data(ch, PH_ACTIVATE_IND, MISDN_ID_ANY, 0,
  692. NULL, GFP_KERNEL);
  693. break;
  694. case PH_DEACTIVATE_REQ:
  695. spin_lock_irqsave(&card->lock, flags);
  696. mISDN_clear_bchannel(bch);
  697. mode_tiger(bc, ISDN_P_NONE);
  698. spin_unlock_irqrestore(&card->lock, flags);
  699. _queue_data(ch, PH_DEACTIVATE_IND, MISDN_ID_ANY, 0,
  700. NULL, GFP_KERNEL);
  701. ret = 0;
  702. break;
  703. }
  704. if (!ret)
  705. dev_kfree_skb(skb);
  706. return ret;
  707. }
  708. static int
  709. channel_bctrl(struct tiger_ch *bc, struct mISDN_ctrl_req *cq)
  710. {
  711. int ret = 0;
  712. struct tiger_hw *card = bc->bch.hw;
  713. switch (cq->op) {
  714. case MISDN_CTRL_GETOP:
  715. cq->op = 0;
  716. break;
  717. /* Nothing implemented yet */
  718. case MISDN_CTRL_FILL_EMPTY:
  719. default:
  720. pr_info("%s: %s unknown Op %x\n", card->name, __func__, cq->op);
  721. ret = -EINVAL;
  722. break;
  723. }
  724. return ret;
  725. }
  726. static int
  727. nj_bctrl(struct mISDNchannel *ch, u32 cmd, void *arg)
  728. {
  729. struct bchannel *bch = container_of(ch, struct bchannel, ch);
  730. struct tiger_ch *bc = container_of(bch, struct tiger_ch, bch);
  731. struct tiger_hw *card = bch->hw;
  732. int ret = -EINVAL;
  733. u_long flags;
  734. pr_debug("%s: %s cmd:%x %p\n", card->name, __func__, cmd, arg);
  735. switch (cmd) {
  736. case CLOSE_CHANNEL:
  737. test_and_clear_bit(FLG_OPEN, &bch->Flags);
  738. if (test_bit(FLG_ACTIVE, &bch->Flags)) {
  739. spin_lock_irqsave(&card->lock, flags);
  740. mISDN_freebchannel(bch);
  741. test_and_clear_bit(FLG_TX_BUSY, &bch->Flags);
  742. test_and_clear_bit(FLG_ACTIVE, &bch->Flags);
  743. mode_tiger(bc, ISDN_P_NONE);
  744. spin_unlock_irqrestore(&card->lock, flags);
  745. }
  746. ch->protocol = ISDN_P_NONE;
  747. ch->peer = NULL;
  748. module_put(THIS_MODULE);
  749. ret = 0;
  750. break;
  751. case CONTROL_CHANNEL:
  752. ret = channel_bctrl(bc, arg);
  753. break;
  754. default:
  755. pr_info("%s: %s unknown prim(%x)\n", card->name, __func__, cmd);
  756. }
  757. return ret;
  758. }
  759. static int
  760. channel_ctrl(struct tiger_hw *card, struct mISDN_ctrl_req *cq)
  761. {
  762. int ret = 0;
  763. switch (cq->op) {
  764. case MISDN_CTRL_GETOP:
  765. cq->op = MISDN_CTRL_LOOP;
  766. break;
  767. case MISDN_CTRL_LOOP:
  768. /* cq->channel: 0 disable, 1 B1 loop 2 B2 loop, 3 both */
  769. if (cq->channel < 0 || cq->channel > 3) {
  770. ret = -EINVAL;
  771. break;
  772. }
  773. ret = card->isac.ctrl(&card->isac, HW_TESTLOOP, cq->channel);
  774. break;
  775. default:
  776. pr_info("%s: %s unknown Op %x\n", card->name, __func__, cq->op);
  777. ret = -EINVAL;
  778. break;
  779. }
  780. return ret;
  781. }
  782. static int
  783. open_bchannel(struct tiger_hw *card, struct channel_req *rq)
  784. {
  785. struct bchannel *bch;
  786. if (rq->adr.channel > 2)
  787. return -EINVAL;
  788. if (rq->protocol == ISDN_P_NONE)
  789. return -EINVAL;
  790. bch = &card->bc[rq->adr.channel - 1].bch;
  791. if (test_and_set_bit(FLG_OPEN, &bch->Flags))
  792. return -EBUSY; /* b-channel can be only open once */
  793. test_and_clear_bit(FLG_FILLEMPTY, &bch->Flags);
  794. bch->ch.protocol = rq->protocol;
  795. rq->ch = &bch->ch;
  796. return 0;
  797. }
  798. /*
  799. * device control function
  800. */
  801. static int
  802. nj_dctrl(struct mISDNchannel *ch, u32 cmd, void *arg)
  803. {
  804. struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D);
  805. struct dchannel *dch = container_of(dev, struct dchannel, dev);
  806. struct tiger_hw *card = dch->hw;
  807. struct channel_req *rq;
  808. int err = 0;
  809. pr_debug("%s: %s cmd:%x %p\n", card->name, __func__, cmd, arg);
  810. switch (cmd) {
  811. case OPEN_CHANNEL:
  812. rq = arg;
  813. if (rq->protocol == ISDN_P_TE_S0)
  814. err = card->isac.open(&card->isac, rq);
  815. else
  816. err = open_bchannel(card, rq);
  817. if (err)
  818. break;
  819. if (!try_module_get(THIS_MODULE))
  820. pr_info("%s: cannot get module\n", card->name);
  821. break;
  822. case CLOSE_CHANNEL:
  823. pr_debug("%s: dev(%d) close from %p\n", card->name, dch->dev.id,
  824. __builtin_return_address(0));
  825. module_put(THIS_MODULE);
  826. break;
  827. case CONTROL_CHANNEL:
  828. err = channel_ctrl(card, arg);
  829. break;
  830. default:
  831. pr_debug("%s: %s unknown command %x\n",
  832. card->name, __func__, cmd);
  833. return -EINVAL;
  834. }
  835. return err;
  836. }
  837. static int
  838. nj_init_card(struct tiger_hw *card)
  839. {
  840. u_long flags;
  841. int ret;
  842. spin_lock_irqsave(&card->lock, flags);
  843. nj_disable_hwirq(card);
  844. spin_unlock_irqrestore(&card->lock, flags);
  845. card->irq = card->pdev->irq;
  846. if (request_irq(card->irq, nj_irq, IRQF_SHARED, card->name, card)) {
  847. pr_info("%s: couldn't get interrupt %d\n",
  848. card->name, card->irq);
  849. card->irq = -1;
  850. return -EIO;
  851. }
  852. spin_lock_irqsave(&card->lock, flags);
  853. nj_reset(card);
  854. ret = card->isac.init(&card->isac);
  855. if (ret)
  856. goto error;
  857. ret = inittiger(card);
  858. if (ret)
  859. goto error;
  860. mode_tiger(&card->bc[0], ISDN_P_NONE);
  861. mode_tiger(&card->bc[1], ISDN_P_NONE);
  862. error:
  863. spin_unlock_irqrestore(&card->lock, flags);
  864. return ret;
  865. }
  866. static void
  867. nj_release(struct tiger_hw *card)
  868. {
  869. u_long flags;
  870. int i;
  871. if (card->base_s) {
  872. spin_lock_irqsave(&card->lock, flags);
  873. nj_disable_hwirq(card);
  874. mode_tiger(&card->bc[0], ISDN_P_NONE);
  875. mode_tiger(&card->bc[1], ISDN_P_NONE);
  876. card->isac.release(&card->isac);
  877. spin_unlock_irqrestore(&card->lock, flags);
  878. release_region(card->base, card->base_s);
  879. card->base_s = 0;
  880. }
  881. if (card->irq > 0)
  882. free_irq(card->irq, card);
  883. if (card->isac.dch.dev.dev.class)
  884. mISDN_unregister_device(&card->isac.dch.dev);
  885. for (i = 0; i < 2; i++) {
  886. mISDN_freebchannel(&card->bc[i].bch);
  887. kfree(card->bc[i].hsbuf);
  888. kfree(card->bc[i].hrbuf);
  889. }
  890. if (card->dma_p)
  891. pci_free_consistent(card->pdev, NJ_DMA_SIZE,
  892. card->dma_p, card->dma);
  893. write_lock_irqsave(&card_lock, flags);
  894. list_del(&card->list);
  895. write_unlock_irqrestore(&card_lock, flags);
  896. pci_clear_master(card->pdev);
  897. pci_disable_device(card->pdev);
  898. pci_set_drvdata(card->pdev, NULL);
  899. kfree(card);
  900. }
  901. static int
  902. nj_setup(struct tiger_hw *card)
  903. {
  904. card->base = pci_resource_start(card->pdev, 0);
  905. card->base_s = pci_resource_len(card->pdev, 0);
  906. if (!request_region(card->base, card->base_s, card->name)) {
  907. pr_info("%s: NETjet config port %#x-%#x already in use\n",
  908. card->name, card->base,
  909. (u32)(card->base + card->base_s - 1));
  910. card->base_s = 0;
  911. return -EIO;
  912. }
  913. ASSIGN_FUNC(nj, ISAC, card->isac);
  914. return 0;
  915. }
  916. static int __devinit
  917. setup_instance(struct tiger_hw *card)
  918. {
  919. int i, err;
  920. u_long flags;
  921. snprintf(card->name, MISDN_MAX_IDLEN - 1, "netjet.%d", nj_cnt + 1);
  922. write_lock_irqsave(&card_lock, flags);
  923. list_add_tail(&card->list, &Cards);
  924. write_unlock_irqrestore(&card_lock, flags);
  925. _set_debug(card);
  926. card->isac.name = card->name;
  927. spin_lock_init(&card->lock);
  928. card->isac.hwlock = &card->lock;
  929. mISDNisac_init(&card->isac, card);
  930. card->isac.dch.dev.Bprotocols = (1 << (ISDN_P_B_RAW & ISDN_P_B_MASK)) |
  931. (1 << (ISDN_P_B_HDLC & ISDN_P_B_MASK));
  932. card->isac.dch.dev.D.ctrl = nj_dctrl;
  933. for (i = 0; i < 2; i++) {
  934. card->bc[i].bch.nr = i + 1;
  935. set_channelmap(i + 1, card->isac.dch.dev.channelmap);
  936. mISDN_initbchannel(&card->bc[i].bch, MAX_DATA_MEM);
  937. card->bc[i].bch.hw = card;
  938. card->bc[i].bch.ch.send = nj_l2l1B;
  939. card->bc[i].bch.ch.ctrl = nj_bctrl;
  940. card->bc[i].bch.ch.nr = i + 1;
  941. list_add(&card->bc[i].bch.ch.list,
  942. &card->isac.dch.dev.bchannels);
  943. card->bc[i].bch.hw = card;
  944. }
  945. err = nj_setup(card);
  946. if (err)
  947. goto error;
  948. err = mISDN_register_device(&card->isac.dch.dev, &card->pdev->dev,
  949. card->name);
  950. if (err)
  951. goto error;
  952. err = nj_init_card(card);
  953. if (!err) {
  954. nj_cnt++;
  955. pr_notice("Netjet %d cards installed\n", nj_cnt);
  956. return 0;
  957. }
  958. error:
  959. nj_release(card);
  960. return err;
  961. }
  962. static int __devinit
  963. nj_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  964. {
  965. int err = -ENOMEM;
  966. int cfg;
  967. struct tiger_hw *card;
  968. if (pdev->subsystem_vendor == 0x8086 &&
  969. pdev->subsystem_device == 0x0003) {
  970. pr_notice("Netjet: Digium X100P/X101P not handled\n");
  971. return -ENODEV;
  972. }
  973. if (pdev->subsystem_vendor == 0x55 &&
  974. pdev->subsystem_device == 0x02) {
  975. pr_notice("Netjet: Enter!Now not handled yet\n");
  976. return -ENODEV;
  977. }
  978. card = kzalloc(sizeof(struct tiger_hw), GFP_ATOMIC);
  979. if (!card) {
  980. pr_info("No kmem for Netjet\n");
  981. return err;
  982. }
  983. card->pdev = pdev;
  984. err = pci_enable_device(pdev);
  985. if (err) {
  986. kfree(card);
  987. return err;
  988. }
  989. printk(KERN_INFO "nj_probe(mISDN): found adapter at %s\n",
  990. pci_name(pdev));
  991. pci_set_master(pdev);
  992. /* the TJ300 and TJ320 must be detected, the IRQ handling is different
  993. * unfortunately the chips use the same device ID, but the TJ320 has
  994. * the bit20 in status PCI cfg register set
  995. */
  996. pci_read_config_dword(pdev, 0x04, &cfg);
  997. if (cfg & 0x00100000)
  998. card->typ = NETJET_S_TJ320;
  999. else
  1000. card->typ = NETJET_S_TJ300;
  1001. card->base = pci_resource_start(pdev, 0);
  1002. card->irq = pdev->irq;
  1003. pci_set_drvdata(pdev, card);
  1004. err = setup_instance(card);
  1005. if (err)
  1006. pci_set_drvdata(pdev, NULL);
  1007. return err;
  1008. }
  1009. static void __devexit nj_remove(struct pci_dev *pdev)
  1010. {
  1011. struct tiger_hw *card = pci_get_drvdata(pdev);
  1012. if (card)
  1013. nj_release(card);
  1014. else
  1015. pr_info("%s drvdata already removed\n", __func__);
  1016. }
  1017. /* We cannot select cards with PCI_SUB... IDs, since here are cards with
  1018. * SUB IDs set to PCI_ANY_ID, so we need to match all and reject
  1019. * known other cards which not work with this driver - see probe function */
  1020. static struct pci_device_id nj_pci_ids[] __devinitdata = {
  1021. { PCI_VENDOR_ID_TIGERJET, PCI_DEVICE_ID_TIGERJET_300,
  1022. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
  1023. { }
  1024. };
  1025. MODULE_DEVICE_TABLE(pci, nj_pci_ids);
  1026. static struct pci_driver nj_driver = {
  1027. .name = "netjet",
  1028. .probe = nj_probe,
  1029. .remove = __devexit_p(nj_remove),
  1030. .id_table = nj_pci_ids,
  1031. };
  1032. static int __init nj_init(void)
  1033. {
  1034. int err;
  1035. pr_notice("Netjet PCI driver Rev. %s\n", NETJET_REV);
  1036. err = pci_register_driver(&nj_driver);
  1037. return err;
  1038. }
  1039. static void __exit nj_cleanup(void)
  1040. {
  1041. pci_unregister_driver(&nj_driver);
  1042. }
  1043. module_init(nj_init);
  1044. module_exit(nj_cleanup);