hd6457x.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868
  1. /*
  2. * Hitachi SCA HD64570 and HD64572 common driver for Linux
  3. *
  4. * Copyright (C) 1998-2003 Krzysztof Halasa <khc@pm.waw.pl>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of version 2 of the GNU General Public License
  8. * as published by the Free Software Foundation.
  9. *
  10. * Sources of information:
  11. * Hitachi HD64570 SCA User's Manual
  12. * Hitachi HD64572 SCA-II User's Manual
  13. *
  14. * We use the following SCA memory map:
  15. *
  16. * Packet buffer descriptor rings - starting from winbase or win0base:
  17. * rx_ring_buffers * sizeof(pkt_desc) = logical channel #0 RX ring
  18. * tx_ring_buffers * sizeof(pkt_desc) = logical channel #0 TX ring
  19. * rx_ring_buffers * sizeof(pkt_desc) = logical channel #1 RX ring (if used)
  20. * tx_ring_buffers * sizeof(pkt_desc) = logical channel #1 TX ring (if used)
  21. *
  22. * Packet data buffers - starting from winbase + buff_offset:
  23. * rx_ring_buffers * HDLC_MAX_MRU = logical channel #0 RX buffers
  24. * tx_ring_buffers * HDLC_MAX_MRU = logical channel #0 TX buffers
  25. * rx_ring_buffers * HDLC_MAX_MRU = logical channel #0 RX buffers (if used)
  26. * tx_ring_buffers * HDLC_MAX_MRU = logical channel #0 TX buffers (if used)
  27. */
  28. #include <linux/module.h>
  29. #include <linux/kernel.h>
  30. #include <linux/slab.h>
  31. #include <linux/jiffies.h>
  32. #include <linux/types.h>
  33. #include <linux/fcntl.h>
  34. #include <linux/interrupt.h>
  35. #include <linux/in.h>
  36. #include <linux/string.h>
  37. #include <linux/errno.h>
  38. #include <linux/init.h>
  39. #include <linux/ioport.h>
  40. #include <linux/bitops.h>
  41. #include <asm/system.h>
  42. #include <asm/uaccess.h>
  43. #include <asm/io.h>
  44. #include <linux/netdevice.h>
  45. #include <linux/skbuff.h>
  46. #include <linux/hdlc.h>
  47. #if (!defined (__HD64570_H) && !defined (__HD64572_H)) || \
  48. (defined (__HD64570_H) && defined (__HD64572_H))
  49. #error Either hd64570.h or hd64572.h must be included
  50. #endif
  51. #define get_msci(port) (phy_node(port) ? MSCI1_OFFSET : MSCI0_OFFSET)
  52. #define get_dmac_rx(port) (phy_node(port) ? DMAC1RX_OFFSET : DMAC0RX_OFFSET)
  53. #define get_dmac_tx(port) (phy_node(port) ? DMAC1TX_OFFSET : DMAC0TX_OFFSET)
  54. #define SCA_INTR_MSCI(node) (node ? 0x10 : 0x01)
  55. #define SCA_INTR_DMAC_RX(node) (node ? 0x20 : 0x02)
  56. #define SCA_INTR_DMAC_TX(node) (node ? 0x40 : 0x04)
  57. #ifdef __HD64570_H /* HD64570 */
  58. #define sca_outa(value, reg, card) sca_outw(value, reg, card)
  59. #define sca_ina(reg, card) sca_inw(reg, card)
  60. #define writea(value, ptr) writew(value, ptr)
  61. #else /* HD64572 */
  62. #define sca_outa(value, reg, card) sca_outl(value, reg, card)
  63. #define sca_ina(reg, card) sca_inl(reg, card)
  64. #define writea(value, ptr) writel(value, ptr)
  65. #endif
  66. static inline struct net_device *port_to_dev(port_t *port)
  67. {
  68. return port->dev;
  69. }
  70. static inline int sca_intr_status(card_t *card)
  71. {
  72. u8 result = 0;
  73. #ifdef __HD64570_H /* HD64570 */
  74. u8 isr0 = sca_in(ISR0, card);
  75. u8 isr1 = sca_in(ISR1, card);
  76. if (isr1 & 0x03) result |= SCA_INTR_DMAC_RX(0);
  77. if (isr1 & 0x0C) result |= SCA_INTR_DMAC_TX(0);
  78. if (isr1 & 0x30) result |= SCA_INTR_DMAC_RX(1);
  79. if (isr1 & 0xC0) result |= SCA_INTR_DMAC_TX(1);
  80. if (isr0 & 0x0F) result |= SCA_INTR_MSCI(0);
  81. if (isr0 & 0xF0) result |= SCA_INTR_MSCI(1);
  82. #else /* HD64572 */
  83. u32 isr0 = sca_inl(ISR0, card);
  84. if (isr0 & 0x0000000F) result |= SCA_INTR_DMAC_RX(0);
  85. if (isr0 & 0x000000F0) result |= SCA_INTR_DMAC_TX(0);
  86. if (isr0 & 0x00000F00) result |= SCA_INTR_DMAC_RX(1);
  87. if (isr0 & 0x0000F000) result |= SCA_INTR_DMAC_TX(1);
  88. if (isr0 & 0x003E0000) result |= SCA_INTR_MSCI(0);
  89. if (isr0 & 0x3E000000) result |= SCA_INTR_MSCI(1);
  90. #endif /* HD64570 vs HD64572 */
  91. if (!(result & SCA_INTR_DMAC_TX(0)))
  92. if (sca_in(DSR_TX(0), card) & DSR_EOM)
  93. result |= SCA_INTR_DMAC_TX(0);
  94. if (!(result & SCA_INTR_DMAC_TX(1)))
  95. if (sca_in(DSR_TX(1), card) & DSR_EOM)
  96. result |= SCA_INTR_DMAC_TX(1);
  97. return result;
  98. }
  99. static inline port_t* dev_to_port(struct net_device *dev)
  100. {
  101. return dev_to_hdlc(dev)->priv;
  102. }
  103. static inline u16 next_desc(port_t *port, u16 desc, int transmit)
  104. {
  105. return (desc + 1) % (transmit ? port_to_card(port)->tx_ring_buffers
  106. : port_to_card(port)->rx_ring_buffers);
  107. }
  108. static inline u16 desc_abs_number(port_t *port, u16 desc, int transmit)
  109. {
  110. u16 rx_buffs = port_to_card(port)->rx_ring_buffers;
  111. u16 tx_buffs = port_to_card(port)->tx_ring_buffers;
  112. desc %= (transmit ? tx_buffs : rx_buffs); // called with "X + 1" etc.
  113. return log_node(port) * (rx_buffs + tx_buffs) +
  114. transmit * rx_buffs + desc;
  115. }
  116. static inline u16 desc_offset(port_t *port, u16 desc, int transmit)
  117. {
  118. /* Descriptor offset always fits in 16 bytes */
  119. return desc_abs_number(port, desc, transmit) * sizeof(pkt_desc);
  120. }
  121. static inline pkt_desc __iomem *desc_address(port_t *port, u16 desc, int transmit)
  122. {
  123. #ifdef PAGE0_ALWAYS_MAPPED
  124. return (pkt_desc __iomem *)(win0base(port_to_card(port))
  125. + desc_offset(port, desc, transmit));
  126. #else
  127. return (pkt_desc __iomem *)(winbase(port_to_card(port))
  128. + desc_offset(port, desc, transmit));
  129. #endif
  130. }
  131. static inline u32 buffer_offset(port_t *port, u16 desc, int transmit)
  132. {
  133. return port_to_card(port)->buff_offset +
  134. desc_abs_number(port, desc, transmit) * (u32)HDLC_MAX_MRU;
  135. }
  136. static inline void sca_set_carrier(port_t *port)
  137. {
  138. if (!(sca_in(get_msci(port) + ST3, port_to_card(port)) & ST3_DCD)) {
  139. #ifdef DEBUG_LINK
  140. printk(KERN_DEBUG "%s: sca_set_carrier on\n",
  141. port_to_dev(port)->name);
  142. #endif
  143. netif_carrier_on(port_to_dev(port));
  144. } else {
  145. #ifdef DEBUG_LINK
  146. printk(KERN_DEBUG "%s: sca_set_carrier off\n",
  147. port_to_dev(port)->name);
  148. #endif
  149. netif_carrier_off(port_to_dev(port));
  150. }
  151. }
  152. static void sca_init_sync_port(port_t *port)
  153. {
  154. card_t *card = port_to_card(port);
  155. int transmit, i;
  156. port->rxin = 0;
  157. port->txin = 0;
  158. port->txlast = 0;
  159. #if !defined(PAGE0_ALWAYS_MAPPED) && !defined(ALL_PAGES_ALWAYS_MAPPED)
  160. openwin(card, 0);
  161. #endif
  162. for (transmit = 0; transmit < 2; transmit++) {
  163. u16 dmac = transmit ? get_dmac_tx(port) : get_dmac_rx(port);
  164. u16 buffs = transmit ? card->tx_ring_buffers
  165. : card->rx_ring_buffers;
  166. for (i = 0; i < buffs; i++) {
  167. pkt_desc __iomem *desc = desc_address(port, i, transmit);
  168. u16 chain_off = desc_offset(port, i + 1, transmit);
  169. u32 buff_off = buffer_offset(port, i, transmit);
  170. writea(chain_off, &desc->cp);
  171. writel(buff_off, &desc->bp);
  172. writew(0, &desc->len);
  173. writeb(0, &desc->stat);
  174. }
  175. /* DMA disable - to halt state */
  176. sca_out(0, transmit ? DSR_TX(phy_node(port)) :
  177. DSR_RX(phy_node(port)), card);
  178. /* software ABORT - to initial state */
  179. sca_out(DCR_ABORT, transmit ? DCR_TX(phy_node(port)) :
  180. DCR_RX(phy_node(port)), card);
  181. #ifdef __HD64570_H
  182. sca_out(0, dmac + CPB, card); /* pointer base */
  183. #endif
  184. /* current desc addr */
  185. sca_outa(desc_offset(port, 0, transmit), dmac + CDAL, card);
  186. if (!transmit)
  187. sca_outa(desc_offset(port, buffs - 1, transmit),
  188. dmac + EDAL, card);
  189. else
  190. sca_outa(desc_offset(port, 0, transmit), dmac + EDAL,
  191. card);
  192. /* clear frame end interrupt counter */
  193. sca_out(DCR_CLEAR_EOF, transmit ? DCR_TX(phy_node(port)) :
  194. DCR_RX(phy_node(port)), card);
  195. if (!transmit) { /* Receive */
  196. /* set buffer length */
  197. sca_outw(HDLC_MAX_MRU, dmac + BFLL, card);
  198. /* Chain mode, Multi-frame */
  199. sca_out(0x14, DMR_RX(phy_node(port)), card);
  200. sca_out(DIR_EOME | DIR_BOFE, DIR_RX(phy_node(port)),
  201. card);
  202. /* DMA enable */
  203. sca_out(DSR_DE, DSR_RX(phy_node(port)), card);
  204. } else { /* Transmit */
  205. /* Chain mode, Multi-frame */
  206. sca_out(0x14, DMR_TX(phy_node(port)), card);
  207. /* enable underflow interrupts */
  208. sca_out(DIR_BOFE, DIR_TX(phy_node(port)), card);
  209. }
  210. }
  211. sca_set_carrier(port);
  212. }
  213. #ifdef NEED_SCA_MSCI_INTR
  214. /* MSCI interrupt service */
  215. static inline void sca_msci_intr(port_t *port)
  216. {
  217. u16 msci = get_msci(port);
  218. card_t* card = port_to_card(port);
  219. u8 stat = sca_in(msci + ST1, card); /* read MSCI ST1 status */
  220. /* Reset MSCI TX underrun and CDCD status bit */
  221. sca_out(stat & (ST1_UDRN | ST1_CDCD), msci + ST1, card);
  222. if (stat & ST1_UDRN) {
  223. /* TX Underrun error detected */
  224. port_to_dev(port)->stats.tx_errors++;
  225. port_to_dev(port)->stats.tx_fifo_errors++;
  226. }
  227. if (stat & ST1_CDCD)
  228. sca_set_carrier(port);
  229. }
  230. #endif
  231. static inline void sca_rx(card_t *card, port_t *port, pkt_desc __iomem *desc, u16 rxin)
  232. {
  233. struct net_device *dev = port_to_dev(port);
  234. struct sk_buff *skb;
  235. u16 len;
  236. u32 buff;
  237. #ifndef ALL_PAGES_ALWAYS_MAPPED
  238. u32 maxlen;
  239. u8 page;
  240. #endif
  241. len = readw(&desc->len);
  242. skb = dev_alloc_skb(len);
  243. if (!skb) {
  244. dev->stats.rx_dropped++;
  245. return;
  246. }
  247. buff = buffer_offset(port, rxin, 0);
  248. #ifndef ALL_PAGES_ALWAYS_MAPPED
  249. page = buff / winsize(card);
  250. buff = buff % winsize(card);
  251. maxlen = winsize(card) - buff;
  252. openwin(card, page);
  253. if (len > maxlen) {
  254. memcpy_fromio(skb->data, winbase(card) + buff, maxlen);
  255. openwin(card, page + 1);
  256. memcpy_fromio(skb->data + maxlen, winbase(card), len - maxlen);
  257. } else
  258. #endif
  259. memcpy_fromio(skb->data, winbase(card) + buff, len);
  260. #if !defined(PAGE0_ALWAYS_MAPPED) && !defined(ALL_PAGES_ALWAYS_MAPPED)
  261. /* select pkt_desc table page back */
  262. openwin(card, 0);
  263. #endif
  264. skb_put(skb, len);
  265. #ifdef DEBUG_PKT
  266. printk(KERN_DEBUG "%s RX(%i):", dev->name, skb->len);
  267. debug_frame(skb);
  268. #endif
  269. dev->stats.rx_packets++;
  270. dev->stats.rx_bytes += skb->len;
  271. dev->last_rx = jiffies;
  272. skb->protocol = hdlc_type_trans(skb, dev);
  273. netif_rx(skb);
  274. }
  275. /* Receive DMA interrupt service */
  276. static inline void sca_rx_intr(port_t *port)
  277. {
  278. struct net_device *dev = port_to_dev(port);
  279. u16 dmac = get_dmac_rx(port);
  280. card_t *card = port_to_card(port);
  281. u8 stat = sca_in(DSR_RX(phy_node(port)), card); /* read DMA Status */
  282. /* Reset DSR status bits */
  283. sca_out((stat & (DSR_EOT | DSR_EOM | DSR_BOF | DSR_COF)) | DSR_DWE,
  284. DSR_RX(phy_node(port)), card);
  285. if (stat & DSR_BOF)
  286. /* Dropped one or more frames */
  287. dev->stats.rx_over_errors++;
  288. while (1) {
  289. u32 desc_off = desc_offset(port, port->rxin, 0);
  290. pkt_desc __iomem *desc;
  291. u32 cda = sca_ina(dmac + CDAL, card);
  292. if ((cda >= desc_off) && (cda < desc_off + sizeof(pkt_desc)))
  293. break; /* No frame received */
  294. desc = desc_address(port, port->rxin, 0);
  295. stat = readb(&desc->stat);
  296. if (!(stat & ST_RX_EOM))
  297. port->rxpart = 1; /* partial frame received */
  298. else if ((stat & ST_ERROR_MASK) || port->rxpart) {
  299. dev->stats.rx_errors++;
  300. if (stat & ST_RX_OVERRUN)
  301. dev->stats.rx_fifo_errors++;
  302. else if ((stat & (ST_RX_SHORT | ST_RX_ABORT |
  303. ST_RX_RESBIT)) || port->rxpart)
  304. dev->stats.rx_frame_errors++;
  305. else if (stat & ST_RX_CRC)
  306. dev->stats.rx_crc_errors++;
  307. if (stat & ST_RX_EOM)
  308. port->rxpart = 0; /* received last fragment */
  309. } else
  310. sca_rx(card, port, desc, port->rxin);
  311. /* Set new error descriptor address */
  312. sca_outa(desc_off, dmac + EDAL, card);
  313. port->rxin = next_desc(port, port->rxin, 0);
  314. }
  315. /* make sure RX DMA is enabled */
  316. sca_out(DSR_DE, DSR_RX(phy_node(port)), card);
  317. }
  318. /* Transmit DMA interrupt service */
  319. static inline void sca_tx_intr(port_t *port)
  320. {
  321. struct net_device *dev = port_to_dev(port);
  322. u16 dmac = get_dmac_tx(port);
  323. card_t* card = port_to_card(port);
  324. u8 stat;
  325. spin_lock(&port->lock);
  326. stat = sca_in(DSR_TX(phy_node(port)), card); /* read DMA Status */
  327. /* Reset DSR status bits */
  328. sca_out((stat & (DSR_EOT | DSR_EOM | DSR_BOF | DSR_COF)) | DSR_DWE,
  329. DSR_TX(phy_node(port)), card);
  330. while (1) {
  331. pkt_desc __iomem *desc;
  332. u32 desc_off = desc_offset(port, port->txlast, 1);
  333. u32 cda = sca_ina(dmac + CDAL, card);
  334. if ((cda >= desc_off) && (cda < desc_off + sizeof(pkt_desc)))
  335. break; /* Transmitter is/will_be sending this frame */
  336. desc = desc_address(port, port->txlast, 1);
  337. dev->stats.tx_packets++;
  338. dev->stats.tx_bytes += readw(&desc->len);
  339. writeb(0, &desc->stat); /* Free descriptor */
  340. port->txlast = next_desc(port, port->txlast, 1);
  341. }
  342. netif_wake_queue(dev);
  343. spin_unlock(&port->lock);
  344. }
  345. static irqreturn_t sca_intr(int irq, void* dev_id)
  346. {
  347. card_t *card = dev_id;
  348. int i;
  349. u8 stat;
  350. int handled = 0;
  351. #ifndef ALL_PAGES_ALWAYS_MAPPED
  352. u8 page = sca_get_page(card);
  353. #endif
  354. while((stat = sca_intr_status(card)) != 0) {
  355. handled = 1;
  356. for (i = 0; i < 2; i++) {
  357. port_t *port = get_port(card, i);
  358. if (port) {
  359. if (stat & SCA_INTR_MSCI(i))
  360. sca_msci_intr(port);
  361. if (stat & SCA_INTR_DMAC_RX(i))
  362. sca_rx_intr(port);
  363. if (stat & SCA_INTR_DMAC_TX(i))
  364. sca_tx_intr(port);
  365. }
  366. }
  367. }
  368. #ifndef ALL_PAGES_ALWAYS_MAPPED
  369. openwin(card, page); /* Restore original page */
  370. #endif
  371. return IRQ_RETVAL(handled);
  372. }
  373. static void sca_set_port(port_t *port)
  374. {
  375. card_t* card = port_to_card(port);
  376. u16 msci = get_msci(port);
  377. u8 md2 = sca_in(msci + MD2, card);
  378. unsigned int tmc, br = 10, brv = 1024;
  379. if (port->settings.clock_rate > 0) {
  380. /* Try lower br for better accuracy*/
  381. do {
  382. br--;
  383. brv >>= 1; /* brv = 2^9 = 512 max in specs */
  384. /* Baud Rate = CLOCK_BASE / TMC / 2^BR */
  385. tmc = CLOCK_BASE / brv / port->settings.clock_rate;
  386. }while (br > 1 && tmc <= 128);
  387. if (tmc < 1) {
  388. tmc = 1;
  389. br = 0; /* For baud=CLOCK_BASE we use tmc=1 br=0 */
  390. brv = 1;
  391. } else if (tmc > 255)
  392. tmc = 256; /* tmc=0 means 256 - low baud rates */
  393. port->settings.clock_rate = CLOCK_BASE / brv / tmc;
  394. } else {
  395. br = 9; /* Minimum clock rate */
  396. tmc = 256; /* 8bit = 0 */
  397. port->settings.clock_rate = CLOCK_BASE / (256 * 512);
  398. }
  399. port->rxs = (port->rxs & ~CLK_BRG_MASK) | br;
  400. port->txs = (port->txs & ~CLK_BRG_MASK) | br;
  401. port->tmc = tmc;
  402. /* baud divisor - time constant*/
  403. #ifdef __HD64570_H
  404. sca_out(port->tmc, msci + TMC, card);
  405. #else
  406. sca_out(port->tmc, msci + TMCR, card);
  407. sca_out(port->tmc, msci + TMCT, card);
  408. #endif
  409. /* Set BRG bits */
  410. sca_out(port->rxs, msci + RXS, card);
  411. sca_out(port->txs, msci + TXS, card);
  412. if (port->settings.loopback)
  413. md2 |= MD2_LOOPBACK;
  414. else
  415. md2 &= ~MD2_LOOPBACK;
  416. sca_out(md2, msci + MD2, card);
  417. }
  418. static void sca_open(struct net_device *dev)
  419. {
  420. port_t *port = dev_to_port(dev);
  421. card_t* card = port_to_card(port);
  422. u16 msci = get_msci(port);
  423. u8 md0, md2;
  424. switch(port->encoding) {
  425. case ENCODING_NRZ: md2 = MD2_NRZ; break;
  426. case ENCODING_NRZI: md2 = MD2_NRZI; break;
  427. case ENCODING_FM_MARK: md2 = MD2_FM_MARK; break;
  428. case ENCODING_FM_SPACE: md2 = MD2_FM_SPACE; break;
  429. default: md2 = MD2_MANCHESTER;
  430. }
  431. if (port->settings.loopback)
  432. md2 |= MD2_LOOPBACK;
  433. switch(port->parity) {
  434. case PARITY_CRC16_PR0: md0 = MD0_HDLC | MD0_CRC_16_0; break;
  435. case PARITY_CRC16_PR1: md0 = MD0_HDLC | MD0_CRC_16; break;
  436. #ifdef __HD64570_H
  437. case PARITY_CRC16_PR0_CCITT: md0 = MD0_HDLC | MD0_CRC_ITU_0; break;
  438. #else
  439. case PARITY_CRC32_PR1_CCITT: md0 = MD0_HDLC | MD0_CRC_ITU32; break;
  440. #endif
  441. case PARITY_CRC16_PR1_CCITT: md0 = MD0_HDLC | MD0_CRC_ITU; break;
  442. default: md0 = MD0_HDLC | MD0_CRC_NONE;
  443. }
  444. sca_out(CMD_RESET, msci + CMD, card);
  445. sca_out(md0, msci + MD0, card);
  446. sca_out(0x00, msci + MD1, card); /* no address field check */
  447. sca_out(md2, msci + MD2, card);
  448. sca_out(0x7E, msci + IDL, card); /* flag character 0x7E */
  449. #ifdef __HD64570_H
  450. sca_out(CTL_IDLE, msci + CTL, card);
  451. #else
  452. /* Skip the rest of underrun frame */
  453. sca_out(CTL_IDLE | CTL_URCT | CTL_URSKP, msci + CTL, card);
  454. #endif
  455. #ifdef __HD64570_H
  456. /* Allow at least 8 bytes before requesting RX DMA operation */
  457. /* TX with higher priority and possibly with shorter transfers */
  458. sca_out(0x07, msci + RRC, card); /* +1=RXRDY/DMA activation condition*/
  459. sca_out(0x10, msci + TRC0, card); /* = TXRDY/DMA activation condition*/
  460. sca_out(0x14, msci + TRC1, card); /* +1=TXRDY/DMA deactiv condition */
  461. #else
  462. sca_out(0x0F, msci + RNR, card); /* +1=RX DMA activation condition */
  463. sca_out(0x3C, msci + TFS, card); /* +1 = TX start */
  464. sca_out(0x38, msci + TCR, card); /* =Critical TX DMA activ condition */
  465. sca_out(0x38, msci + TNR0, card); /* =TX DMA activation condition */
  466. sca_out(0x3F, msci + TNR1, card); /* +1=TX DMA deactivation condition*/
  467. #endif
  468. /* We're using the following interrupts:
  469. - TXINT (DMAC completed all transmisions, underrun or DCD change)
  470. - all DMA interrupts
  471. */
  472. sca_set_carrier(port);
  473. #ifdef __HD64570_H
  474. /* MSCI TX INT and RX INT A IRQ enable */
  475. sca_out(IE0_TXINT | IE0_RXINTA, msci + IE0, card);
  476. sca_out(IE1_UDRN | IE1_CDCD, msci + IE1, card);
  477. sca_out(sca_in(IER0, card) | (phy_node(port) ? 0xC0 : 0x0C),
  478. IER0, card); /* TXINT and RXINT */
  479. /* enable DMA IRQ */
  480. sca_out(sca_in(IER1, card) | (phy_node(port) ? 0xF0 : 0x0F),
  481. IER1, card);
  482. #else
  483. /* MSCI TXINT and RXINTA interrupt enable */
  484. sca_outl(IE0_TXINT | IE0_RXINTA | IE0_UDRN | IE0_CDCD, msci + IE0,
  485. card);
  486. /* DMA & MSCI IRQ enable */
  487. sca_outl(sca_inl(IER0, card) |
  488. (phy_node(port) ? 0x0A006600 : 0x000A0066), IER0, card);
  489. #endif
  490. #ifdef __HD64570_H
  491. sca_out(port->tmc, msci + TMC, card); /* Restore registers */
  492. #else
  493. sca_out(port->tmc, msci + TMCR, card);
  494. sca_out(port->tmc, msci + TMCT, card);
  495. #endif
  496. sca_out(port->rxs, msci + RXS, card);
  497. sca_out(port->txs, msci + TXS, card);
  498. sca_out(CMD_TX_ENABLE, msci + CMD, card);
  499. sca_out(CMD_RX_ENABLE, msci + CMD, card);
  500. netif_start_queue(dev);
  501. }
  502. static void sca_close(struct net_device *dev)
  503. {
  504. port_t *port = dev_to_port(dev);
  505. card_t* card = port_to_card(port);
  506. /* reset channel */
  507. sca_out(CMD_RESET, get_msci(port) + CMD, port_to_card(port));
  508. #ifdef __HD64570_H
  509. /* disable MSCI interrupts */
  510. sca_out(sca_in(IER0, card) & (phy_node(port) ? 0x0F : 0xF0),
  511. IER0, card);
  512. /* disable DMA interrupts */
  513. sca_out(sca_in(IER1, card) & (phy_node(port) ? 0x0F : 0xF0),
  514. IER1, card);
  515. #else
  516. /* disable DMA & MSCI IRQ */
  517. sca_outl(sca_inl(IER0, card) &
  518. (phy_node(port) ? 0x00FF00FF : 0xFF00FF00), IER0, card);
  519. #endif
  520. netif_stop_queue(dev);
  521. }
  522. static int sca_attach(struct net_device *dev, unsigned short encoding,
  523. unsigned short parity)
  524. {
  525. if (encoding != ENCODING_NRZ &&
  526. encoding != ENCODING_NRZI &&
  527. encoding != ENCODING_FM_MARK &&
  528. encoding != ENCODING_FM_SPACE &&
  529. encoding != ENCODING_MANCHESTER)
  530. return -EINVAL;
  531. if (parity != PARITY_NONE &&
  532. parity != PARITY_CRC16_PR0 &&
  533. parity != PARITY_CRC16_PR1 &&
  534. #ifdef __HD64570_H
  535. parity != PARITY_CRC16_PR0_CCITT &&
  536. #else
  537. parity != PARITY_CRC32_PR1_CCITT &&
  538. #endif
  539. parity != PARITY_CRC16_PR1_CCITT)
  540. return -EINVAL;
  541. dev_to_port(dev)->encoding = encoding;
  542. dev_to_port(dev)->parity = parity;
  543. return 0;
  544. }
  545. #ifdef DEBUG_RINGS
  546. static void sca_dump_rings(struct net_device *dev)
  547. {
  548. port_t *port = dev_to_port(dev);
  549. card_t *card = port_to_card(port);
  550. u16 cnt;
  551. #if !defined(PAGE0_ALWAYS_MAPPED) && !defined(ALL_PAGES_ALWAYS_MAPPED)
  552. u8 page;
  553. #endif
  554. #if !defined(PAGE0_ALWAYS_MAPPED) && !defined(ALL_PAGES_ALWAYS_MAPPED)
  555. page = sca_get_page(card);
  556. openwin(card, 0);
  557. #endif
  558. printk(KERN_DEBUG "RX ring: CDA=%u EDA=%u DSR=%02X in=%u %sactive",
  559. sca_ina(get_dmac_rx(port) + CDAL, card),
  560. sca_ina(get_dmac_rx(port) + EDAL, card),
  561. sca_in(DSR_RX(phy_node(port)), card), port->rxin,
  562. sca_in(DSR_RX(phy_node(port)), card) & DSR_DE?"":"in");
  563. for (cnt = 0; cnt < port_to_card(port)->rx_ring_buffers; cnt++)
  564. printk(" %02X", readb(&(desc_address(port, cnt, 0)->stat)));
  565. printk("\n" KERN_DEBUG "TX ring: CDA=%u EDA=%u DSR=%02X in=%u "
  566. "last=%u %sactive",
  567. sca_ina(get_dmac_tx(port) + CDAL, card),
  568. sca_ina(get_dmac_tx(port) + EDAL, card),
  569. sca_in(DSR_TX(phy_node(port)), card), port->txin, port->txlast,
  570. sca_in(DSR_TX(phy_node(port)), card) & DSR_DE ? "" : "in");
  571. for (cnt = 0; cnt < port_to_card(port)->tx_ring_buffers; cnt++)
  572. printk(" %02X", readb(&(desc_address(port, cnt, 1)->stat)));
  573. printk("\n");
  574. printk(KERN_DEBUG "MSCI: MD: %02x %02x %02x, "
  575. "ST: %02x %02x %02x %02x"
  576. #ifdef __HD64572_H
  577. " %02x"
  578. #endif
  579. ", FST: %02x CST: %02x %02x\n",
  580. sca_in(get_msci(port) + MD0, card),
  581. sca_in(get_msci(port) + MD1, card),
  582. sca_in(get_msci(port) + MD2, card),
  583. sca_in(get_msci(port) + ST0, card),
  584. sca_in(get_msci(port) + ST1, card),
  585. sca_in(get_msci(port) + ST2, card),
  586. sca_in(get_msci(port) + ST3, card),
  587. #ifdef __HD64572_H
  588. sca_in(get_msci(port) + ST4, card),
  589. #endif
  590. sca_in(get_msci(port) + FST, card),
  591. sca_in(get_msci(port) + CST0, card),
  592. sca_in(get_msci(port) + CST1, card));
  593. #ifdef __HD64572_H
  594. printk(KERN_DEBUG "ILAR: %02x ISR: %08x %08x\n", sca_in(ILAR, card),
  595. sca_inl(ISR0, card), sca_inl(ISR1, card));
  596. #else
  597. printk(KERN_DEBUG "ISR: %02x %02x %02x\n", sca_in(ISR0, card),
  598. sca_in(ISR1, card), sca_in(ISR2, card));
  599. #endif
  600. #if !defined(PAGE0_ALWAYS_MAPPED) && !defined(ALL_PAGES_ALWAYS_MAPPED)
  601. openwin(card, page); /* Restore original page */
  602. #endif
  603. }
  604. #endif /* DEBUG_RINGS */
  605. static int sca_xmit(struct sk_buff *skb, struct net_device *dev)
  606. {
  607. port_t *port = dev_to_port(dev);
  608. card_t *card = port_to_card(port);
  609. pkt_desc __iomem *desc;
  610. u32 buff, len;
  611. #ifndef ALL_PAGES_ALWAYS_MAPPED
  612. u8 page;
  613. u32 maxlen;
  614. #endif
  615. spin_lock_irq(&port->lock);
  616. desc = desc_address(port, port->txin + 1, 1);
  617. if (readb(&desc->stat)) { /* allow 1 packet gap */
  618. /* should never happen - previous xmit should stop queue */
  619. #ifdef DEBUG_PKT
  620. printk(KERN_DEBUG "%s: transmitter buffer full\n", dev->name);
  621. #endif
  622. netif_stop_queue(dev);
  623. spin_unlock_irq(&port->lock);
  624. return 1; /* request packet to be queued */
  625. }
  626. #ifdef DEBUG_PKT
  627. printk(KERN_DEBUG "%s TX(%i):", dev->name, skb->len);
  628. debug_frame(skb);
  629. #endif
  630. desc = desc_address(port, port->txin, 1);
  631. buff = buffer_offset(port, port->txin, 1);
  632. len = skb->len;
  633. #ifndef ALL_PAGES_ALWAYS_MAPPED
  634. page = buff / winsize(card);
  635. buff = buff % winsize(card);
  636. maxlen = winsize(card) - buff;
  637. openwin(card, page);
  638. if (len > maxlen) {
  639. memcpy_toio(winbase(card) + buff, skb->data, maxlen);
  640. openwin(card, page + 1);
  641. memcpy_toio(winbase(card), skb->data + maxlen, len - maxlen);
  642. }
  643. else
  644. #endif
  645. memcpy_toio(winbase(card) + buff, skb->data, len);
  646. #if !defined(PAGE0_ALWAYS_MAPPED) && !defined(ALL_PAGES_ALWAYS_MAPPED)
  647. openwin(card, 0); /* select pkt_desc table page back */
  648. #endif
  649. writew(len, &desc->len);
  650. writeb(ST_TX_EOM, &desc->stat);
  651. dev->trans_start = jiffies;
  652. port->txin = next_desc(port, port->txin, 1);
  653. sca_outa(desc_offset(port, port->txin, 1),
  654. get_dmac_tx(port) + EDAL, card);
  655. sca_out(DSR_DE, DSR_TX(phy_node(port)), card); /* Enable TX DMA */
  656. desc = desc_address(port, port->txin + 1, 1);
  657. if (readb(&desc->stat)) /* allow 1 packet gap */
  658. netif_stop_queue(dev);
  659. spin_unlock_irq(&port->lock);
  660. dev_kfree_skb(skb);
  661. return 0;
  662. }
  663. #ifdef NEED_DETECT_RAM
  664. static u32 __devinit sca_detect_ram(card_t *card, u8 __iomem *rambase, u32 ramsize)
  665. {
  666. /* Round RAM size to 32 bits, fill from end to start */
  667. u32 i = ramsize &= ~3;
  668. #ifndef ALL_PAGES_ALWAYS_MAPPED
  669. u32 size = winsize(card);
  670. openwin(card, (i - 4) / size); /* select last window */
  671. #endif
  672. do {
  673. i -= 4;
  674. #ifndef ALL_PAGES_ALWAYS_MAPPED
  675. if ((i + 4) % size == 0)
  676. openwin(card, i / size);
  677. writel(i ^ 0x12345678, rambase + i % size);
  678. #else
  679. writel(i ^ 0x12345678, rambase + i);
  680. #endif
  681. }while (i > 0);
  682. for (i = 0; i < ramsize ; i += 4) {
  683. #ifndef ALL_PAGES_ALWAYS_MAPPED
  684. if (i % size == 0)
  685. openwin(card, i / size);
  686. if (readl(rambase + i % size) != (i ^ 0x12345678))
  687. break;
  688. #else
  689. if (readl(rambase + i) != (i ^ 0x12345678))
  690. break;
  691. #endif
  692. }
  693. return i;
  694. }
  695. #endif /* NEED_DETECT_RAM */
  696. static void __devinit sca_init(card_t *card, int wait_states)
  697. {
  698. sca_out(wait_states, WCRL, card); /* Wait Control */
  699. sca_out(wait_states, WCRM, card);
  700. sca_out(wait_states, WCRH, card);
  701. sca_out(0, DMER, card); /* DMA Master disable */
  702. sca_out(0x03, PCR, card); /* DMA priority */
  703. sca_out(0, DSR_RX(0), card); /* DMA disable - to halt state */
  704. sca_out(0, DSR_TX(0), card);
  705. sca_out(0, DSR_RX(1), card);
  706. sca_out(0, DSR_TX(1), card);
  707. sca_out(DMER_DME, DMER, card); /* DMA Master enable */
  708. }