7990.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675
  1. /*
  2. * 7990.c -- LANCE ethernet IC generic routines.
  3. * This is an attempt to separate out the bits of various ethernet
  4. * drivers that are common because they all use the AMD 7990 LANCE
  5. * (Local Area Network Controller for Ethernet) chip.
  6. *
  7. * Copyright (C) 05/1998 Peter Maydell <pmaydell@chiark.greenend.org.uk>
  8. *
  9. * Most of this stuff was obtained by looking at other LANCE drivers,
  10. * in particular a2065.[ch]. The AMD C-LANCE datasheet was also helpful.
  11. * NB: this was made easy by the fact that Jes Sorensen had cleaned up
  12. * most of a2025 and sunlance with the aim of merging them, so the
  13. * common code was pretty obvious.
  14. */
  15. #include <linux/crc32.h>
  16. #include <linux/delay.h>
  17. #include <linux/errno.h>
  18. #include <linux/netdevice.h>
  19. #include <linux/etherdevice.h>
  20. #include <linux/init.h>
  21. #include <linux/module.h>
  22. #include <linux/kernel.h>
  23. #include <linux/types.h>
  24. #include <linux/fcntl.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/ioport.h>
  27. #include <linux/in.h>
  28. #include <linux/route.h>
  29. #include <linux/slab.h>
  30. #include <linux/string.h>
  31. #include <linux/skbuff.h>
  32. #include <asm/irq.h>
  33. /* Used for the temporal inet entries and routing */
  34. #include <linux/socket.h>
  35. #include <linux/bitops.h>
  36. #include <asm/system.h>
  37. #include <asm/io.h>
  38. #include <asm/dma.h>
  39. #include <asm/pgtable.h>
  40. #ifdef CONFIG_HP300
  41. #include <asm/blinken.h>
  42. #endif
  43. #include "7990.h"
  44. #define WRITERAP(lp,x) out_be16(lp->base + LANCE_RAP, (x))
  45. #define WRITERDP(lp,x) out_be16(lp->base + LANCE_RDP, (x))
  46. #define READRDP(lp) in_be16(lp->base + LANCE_RDP)
  47. #if defined(CONFIG_HPLANCE) || defined(CONFIG_HPLANCE_MODULE)
  48. #include "hplance.h"
  49. #undef WRITERAP
  50. #undef WRITERDP
  51. #undef READRDP
  52. #if defined(CONFIG_MVME147_NET) || defined(CONFIG_MVME147_NET_MODULE)
  53. /* Lossage Factor Nine, Mr Sulu. */
  54. #define WRITERAP(lp,x) (lp->writerap(lp,x))
  55. #define WRITERDP(lp,x) (lp->writerdp(lp,x))
  56. #define READRDP(lp) (lp->readrdp(lp))
  57. #else
  58. /* These inlines can be used if only CONFIG_HPLANCE is defined */
  59. static inline void WRITERAP(struct lance_private *lp, __u16 value)
  60. {
  61. do {
  62. out_be16(lp->base + HPLANCE_REGOFF + LANCE_RAP, value);
  63. } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0);
  64. }
  65. static inline void WRITERDP(struct lance_private *lp, __u16 value)
  66. {
  67. do {
  68. out_be16(lp->base + HPLANCE_REGOFF + LANCE_RDP, value);
  69. } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0);
  70. }
  71. static inline __u16 READRDP(struct lance_private *lp)
  72. {
  73. __u16 value;
  74. do {
  75. value = in_be16(lp->base + HPLANCE_REGOFF + LANCE_RDP);
  76. } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0);
  77. return value;
  78. }
  79. #endif
  80. #endif /* CONFIG_HPLANCE || CONFIG_HPLANCE_MODULE */
  81. /* debugging output macros, various flavours */
  82. /* #define TEST_HITS */
  83. #ifdef UNDEF
  84. #define PRINT_RINGS() \
  85. do { \
  86. int t; \
  87. for (t=0; t < RX_RING_SIZE; t++) { \
  88. printk("R%d: @(%02X %04X) len %04X, mblen %04X, bits %02X\n",\
  89. t, ib->brx_ring[t].rmd1_hadr, ib->brx_ring[t].rmd0,\
  90. ib->brx_ring[t].length,\
  91. ib->brx_ring[t].mblength, ib->brx_ring[t].rmd1_bits);\
  92. }\
  93. for (t=0; t < TX_RING_SIZE; t++) { \
  94. printk("T%d: @(%02X %04X) len %04X, misc %04X, bits %02X\n",\
  95. t, ib->btx_ring[t].tmd1_hadr, ib->btx_ring[t].tmd0,\
  96. ib->btx_ring[t].length,\
  97. ib->btx_ring[t].misc, ib->btx_ring[t].tmd1_bits);\
  98. }\
  99. } while (0)
  100. #else
  101. #define PRINT_RINGS()
  102. #endif
  103. /* Load the CSR registers. The LANCE has to be STOPped when we do this! */
  104. static void load_csrs (struct lance_private *lp)
  105. {
  106. volatile struct lance_init_block *aib = lp->lance_init_block;
  107. int leptr;
  108. leptr = LANCE_ADDR (aib);
  109. WRITERAP(lp, LE_CSR1); /* load address of init block */
  110. WRITERDP(lp, leptr & 0xFFFF);
  111. WRITERAP(lp, LE_CSR2);
  112. WRITERDP(lp, leptr >> 16);
  113. WRITERAP(lp, LE_CSR3);
  114. WRITERDP(lp, lp->busmaster_regval); /* set byteswap/ALEctrl/byte ctrl */
  115. /* Point back to csr0 */
  116. WRITERAP(lp, LE_CSR0);
  117. }
  118. /* #define to 0 or 1 appropriately */
  119. #define DEBUG_IRING 0
  120. /* Set up the Lance Rx and Tx rings and the init block */
  121. static void lance_init_ring (struct net_device *dev)
  122. {
  123. struct lance_private *lp = netdev_priv(dev);
  124. volatile struct lance_init_block *ib = lp->init_block;
  125. volatile struct lance_init_block *aib; /* for LANCE_ADDR computations */
  126. int leptr;
  127. int i;
  128. aib = lp->lance_init_block;
  129. lp->rx_new = lp->tx_new = 0;
  130. lp->rx_old = lp->tx_old = 0;
  131. ib->mode = LE_MO_PROM; /* normal, enable Tx & Rx */
  132. /* Copy the ethernet address to the lance init block
  133. * Notice that we do a byteswap if we're big endian.
  134. * [I think this is the right criterion; at least, sunlance,
  135. * a2065 and atarilance do the byteswap and lance.c (PC) doesn't.
  136. * However, the datasheet says that the BSWAP bit doesn't affect
  137. * the init block, so surely it should be low byte first for
  138. * everybody? Um.]
  139. * We could define the ib->physaddr as three 16bit values and
  140. * use (addr[1] << 8) | addr[0] & co, but this is more efficient.
  141. */
  142. #ifdef __BIG_ENDIAN
  143. ib->phys_addr [0] = dev->dev_addr [1];
  144. ib->phys_addr [1] = dev->dev_addr [0];
  145. ib->phys_addr [2] = dev->dev_addr [3];
  146. ib->phys_addr [3] = dev->dev_addr [2];
  147. ib->phys_addr [4] = dev->dev_addr [5];
  148. ib->phys_addr [5] = dev->dev_addr [4];
  149. #else
  150. for (i=0; i<6; i++)
  151. ib->phys_addr[i] = dev->dev_addr[i];
  152. #endif
  153. if (DEBUG_IRING)
  154. printk ("TX rings:\n");
  155. lp->tx_full = 0;
  156. /* Setup the Tx ring entries */
  157. for (i = 0; i < (1<<lp->lance_log_tx_bufs); i++) {
  158. leptr = LANCE_ADDR(&aib->tx_buf[i][0]);
  159. ib->btx_ring [i].tmd0 = leptr;
  160. ib->btx_ring [i].tmd1_hadr = leptr >> 16;
  161. ib->btx_ring [i].tmd1_bits = 0;
  162. ib->btx_ring [i].length = 0xf000; /* The ones required by tmd2 */
  163. ib->btx_ring [i].misc = 0;
  164. if (DEBUG_IRING)
  165. printk ("%d: 0x%8.8x\n", i, leptr);
  166. }
  167. /* Setup the Rx ring entries */
  168. if (DEBUG_IRING)
  169. printk ("RX rings:\n");
  170. for (i = 0; i < (1<<lp->lance_log_rx_bufs); i++) {
  171. leptr = LANCE_ADDR(&aib->rx_buf[i][0]);
  172. ib->brx_ring [i].rmd0 = leptr;
  173. ib->brx_ring [i].rmd1_hadr = leptr >> 16;
  174. ib->brx_ring [i].rmd1_bits = LE_R1_OWN;
  175. /* 0xf000 == bits that must be one (reserved, presumably) */
  176. ib->brx_ring [i].length = -RX_BUFF_SIZE | 0xf000;
  177. ib->brx_ring [i].mblength = 0;
  178. if (DEBUG_IRING)
  179. printk ("%d: 0x%8.8x\n", i, leptr);
  180. }
  181. /* Setup the initialization block */
  182. /* Setup rx descriptor pointer */
  183. leptr = LANCE_ADDR(&aib->brx_ring);
  184. ib->rx_len = (lp->lance_log_rx_bufs << 13) | (leptr >> 16);
  185. ib->rx_ptr = leptr;
  186. if (DEBUG_IRING)
  187. printk ("RX ptr: %8.8x\n", leptr);
  188. /* Setup tx descriptor pointer */
  189. leptr = LANCE_ADDR(&aib->btx_ring);
  190. ib->tx_len = (lp->lance_log_tx_bufs << 13) | (leptr >> 16);
  191. ib->tx_ptr = leptr;
  192. if (DEBUG_IRING)
  193. printk ("TX ptr: %8.8x\n", leptr);
  194. /* Clear the multicast filter */
  195. ib->filter [0] = 0;
  196. ib->filter [1] = 0;
  197. PRINT_RINGS();
  198. }
  199. /* LANCE must be STOPped before we do this, too... */
  200. static int init_restart_lance (struct lance_private *lp)
  201. {
  202. int i;
  203. WRITERAP(lp, LE_CSR0);
  204. WRITERDP(lp, LE_C0_INIT);
  205. /* Need a hook here for sunlance ledma stuff */
  206. /* Wait for the lance to complete initialization */
  207. for (i = 0; (i < 100) && !(READRDP(lp) & (LE_C0_ERR | LE_C0_IDON)); i++)
  208. barrier();
  209. if ((i == 100) || (READRDP(lp) & LE_C0_ERR)) {
  210. printk ("LANCE unopened after %d ticks, csr0=%4.4x.\n", i, READRDP(lp));
  211. return -1;
  212. }
  213. /* Clear IDON by writing a "1", enable interrupts and start lance */
  214. WRITERDP(lp, LE_C0_IDON);
  215. WRITERDP(lp, LE_C0_INEA | LE_C0_STRT);
  216. return 0;
  217. }
  218. static int lance_reset (struct net_device *dev)
  219. {
  220. struct lance_private *lp = netdev_priv(dev);
  221. int status;
  222. /* Stop the lance */
  223. WRITERAP(lp, LE_CSR0);
  224. WRITERDP(lp, LE_C0_STOP);
  225. load_csrs (lp);
  226. lance_init_ring (dev);
  227. dev->trans_start = jiffies;
  228. status = init_restart_lance (lp);
  229. #ifdef DEBUG_DRIVER
  230. printk ("Lance restart=%d\n", status);
  231. #endif
  232. return status;
  233. }
  234. static int lance_rx (struct net_device *dev)
  235. {
  236. struct lance_private *lp = netdev_priv(dev);
  237. volatile struct lance_init_block *ib = lp->init_block;
  238. volatile struct lance_rx_desc *rd;
  239. unsigned char bits;
  240. #ifdef TEST_HITS
  241. int i;
  242. #endif
  243. #ifdef TEST_HITS
  244. printk ("[");
  245. for (i = 0; i < RX_RING_SIZE; i++) {
  246. if (i == lp->rx_new)
  247. printk ("%s",
  248. ib->brx_ring [i].rmd1_bits & LE_R1_OWN ? "_" : "X");
  249. else
  250. printk ("%s",
  251. ib->brx_ring [i].rmd1_bits & LE_R1_OWN ? "." : "1");
  252. }
  253. printk ("]");
  254. #endif
  255. #ifdef CONFIG_HP300
  256. blinken_leds(0x40, 0);
  257. #endif
  258. WRITERDP(lp, LE_C0_RINT | LE_C0_INEA); /* ack Rx int, reenable ints */
  259. for (rd = &ib->brx_ring [lp->rx_new]; /* For each Rx ring we own... */
  260. !((bits = rd->rmd1_bits) & LE_R1_OWN);
  261. rd = &ib->brx_ring [lp->rx_new]) {
  262. /* We got an incomplete frame? */
  263. if ((bits & LE_R1_POK) != LE_R1_POK) {
  264. dev->stats.rx_over_errors++;
  265. dev->stats.rx_errors++;
  266. continue;
  267. } else if (bits & LE_R1_ERR) {
  268. /* Count only the end frame as a rx error,
  269. * not the beginning
  270. */
  271. if (bits & LE_R1_BUF) dev->stats.rx_fifo_errors++;
  272. if (bits & LE_R1_CRC) dev->stats.rx_crc_errors++;
  273. if (bits & LE_R1_OFL) dev->stats.rx_over_errors++;
  274. if (bits & LE_R1_FRA) dev->stats.rx_frame_errors++;
  275. if (bits & LE_R1_EOP) dev->stats.rx_errors++;
  276. } else {
  277. int len = (rd->mblength & 0xfff) - 4;
  278. struct sk_buff *skb = dev_alloc_skb (len+2);
  279. if (!skb) {
  280. printk ("%s: Memory squeeze, deferring packet.\n",
  281. dev->name);
  282. dev->stats.rx_dropped++;
  283. rd->mblength = 0;
  284. rd->rmd1_bits = LE_R1_OWN;
  285. lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
  286. return 0;
  287. }
  288. skb_reserve (skb, 2); /* 16 byte align */
  289. skb_put (skb, len); /* make room */
  290. skb_copy_to_linear_data(skb,
  291. (unsigned char *)&(ib->rx_buf [lp->rx_new][0]),
  292. len);
  293. skb->protocol = eth_type_trans (skb, dev);
  294. netif_rx (skb);
  295. dev->last_rx = jiffies;
  296. dev->stats.rx_packets++;
  297. dev->stats.rx_bytes += len;
  298. }
  299. /* Return the packet to the pool */
  300. rd->mblength = 0;
  301. rd->rmd1_bits = LE_R1_OWN;
  302. lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
  303. }
  304. return 0;
  305. }
  306. static int lance_tx (struct net_device *dev)
  307. {
  308. struct lance_private *lp = netdev_priv(dev);
  309. volatile struct lance_init_block *ib = lp->init_block;
  310. volatile struct lance_tx_desc *td;
  311. int i, j;
  312. int status;
  313. #ifdef CONFIG_HP300
  314. blinken_leds(0x80, 0);
  315. #endif
  316. /* csr0 is 2f3 */
  317. WRITERDP(lp, LE_C0_TINT | LE_C0_INEA);
  318. /* csr0 is 73 */
  319. j = lp->tx_old;
  320. for (i = j; i != lp->tx_new; i = j) {
  321. td = &ib->btx_ring [i];
  322. /* If we hit a packet not owned by us, stop */
  323. if (td->tmd1_bits & LE_T1_OWN)
  324. break;
  325. if (td->tmd1_bits & LE_T1_ERR) {
  326. status = td->misc;
  327. dev->stats.tx_errors++;
  328. if (status & LE_T3_RTY) dev->stats.tx_aborted_errors++;
  329. if (status & LE_T3_LCOL) dev->stats.tx_window_errors++;
  330. if (status & LE_T3_CLOS) {
  331. dev->stats.tx_carrier_errors++;
  332. if (lp->auto_select) {
  333. lp->tpe = 1 - lp->tpe;
  334. printk("%s: Carrier Lost, trying %s\n",
  335. dev->name, lp->tpe?"TPE":"AUI");
  336. /* Stop the lance */
  337. WRITERAP(lp, LE_CSR0);
  338. WRITERDP(lp, LE_C0_STOP);
  339. lance_init_ring (dev);
  340. load_csrs (lp);
  341. init_restart_lance (lp);
  342. return 0;
  343. }
  344. }
  345. /* buffer errors and underflows turn off the transmitter */
  346. /* Restart the adapter */
  347. if (status & (LE_T3_BUF|LE_T3_UFL)) {
  348. dev->stats.tx_fifo_errors++;
  349. printk ("%s: Tx: ERR_BUF|ERR_UFL, restarting\n",
  350. dev->name);
  351. /* Stop the lance */
  352. WRITERAP(lp, LE_CSR0);
  353. WRITERDP(lp, LE_C0_STOP);
  354. lance_init_ring (dev);
  355. load_csrs (lp);
  356. init_restart_lance (lp);
  357. return 0;
  358. }
  359. } else if ((td->tmd1_bits & LE_T1_POK) == LE_T1_POK) {
  360. /*
  361. * So we don't count the packet more than once.
  362. */
  363. td->tmd1_bits &= ~(LE_T1_POK);
  364. /* One collision before packet was sent. */
  365. if (td->tmd1_bits & LE_T1_EONE)
  366. dev->stats.collisions++;
  367. /* More than one collision, be optimistic. */
  368. if (td->tmd1_bits & LE_T1_EMORE)
  369. dev->stats.collisions += 2;
  370. dev->stats.tx_packets++;
  371. }
  372. j = (j + 1) & lp->tx_ring_mod_mask;
  373. }
  374. lp->tx_old = j;
  375. WRITERDP(lp, LE_C0_TINT | LE_C0_INEA);
  376. return 0;
  377. }
  378. static irqreturn_t
  379. lance_interrupt (int irq, void *dev_id)
  380. {
  381. struct net_device *dev = (struct net_device *)dev_id;
  382. struct lance_private *lp = netdev_priv(dev);
  383. int csr0;
  384. spin_lock (&lp->devlock);
  385. WRITERAP(lp, LE_CSR0); /* LANCE Controller Status */
  386. csr0 = READRDP(lp);
  387. PRINT_RINGS();
  388. if (!(csr0 & LE_C0_INTR)) { /* Check if any interrupt has */
  389. spin_unlock (&lp->devlock);
  390. return IRQ_NONE; /* been generated by the Lance. */
  391. }
  392. /* Acknowledge all the interrupt sources ASAP */
  393. WRITERDP(lp, csr0 & ~(LE_C0_INEA|LE_C0_TDMD|LE_C0_STOP|LE_C0_STRT|LE_C0_INIT));
  394. if ((csr0 & LE_C0_ERR)) {
  395. /* Clear the error condition */
  396. WRITERDP(lp, LE_C0_BABL|LE_C0_ERR|LE_C0_MISS|LE_C0_INEA);
  397. }
  398. if (csr0 & LE_C0_RINT)
  399. lance_rx (dev);
  400. if (csr0 & LE_C0_TINT)
  401. lance_tx (dev);
  402. /* Log misc errors. */
  403. if (csr0 & LE_C0_BABL)
  404. dev->stats.tx_errors++; /* Tx babble. */
  405. if (csr0 & LE_C0_MISS)
  406. dev->stats.rx_errors++; /* Missed a Rx frame. */
  407. if (csr0 & LE_C0_MERR) {
  408. printk("%s: Bus master arbitration failure, status %4.4x.\n",
  409. dev->name, csr0);
  410. /* Restart the chip. */
  411. WRITERDP(lp, LE_C0_STRT);
  412. }
  413. if (lp->tx_full && netif_queue_stopped(dev) && (TX_BUFFS_AVAIL >= 0)) {
  414. lp->tx_full = 0;
  415. netif_wake_queue (dev);
  416. }
  417. WRITERAP(lp, LE_CSR0);
  418. WRITERDP(lp, LE_C0_BABL|LE_C0_CERR|LE_C0_MISS|LE_C0_MERR|LE_C0_IDON|LE_C0_INEA);
  419. spin_unlock (&lp->devlock);
  420. return IRQ_HANDLED;
  421. }
  422. int lance_open (struct net_device *dev)
  423. {
  424. struct lance_private *lp = netdev_priv(dev);
  425. int res;
  426. /* Install the Interrupt handler. Or we could shunt this out to specific drivers? */
  427. if (request_irq(lp->irq, lance_interrupt, IRQF_SHARED, lp->name, dev))
  428. return -EAGAIN;
  429. res = lance_reset(dev);
  430. spin_lock_init(&lp->devlock);
  431. netif_start_queue (dev);
  432. return res;
  433. }
  434. EXPORT_SYMBOL_GPL(lance_open);
  435. int lance_close (struct net_device *dev)
  436. {
  437. struct lance_private *lp = netdev_priv(dev);
  438. netif_stop_queue (dev);
  439. /* Stop the LANCE */
  440. WRITERAP(lp, LE_CSR0);
  441. WRITERDP(lp, LE_C0_STOP);
  442. free_irq(lp->irq, dev);
  443. return 0;
  444. }
  445. EXPORT_SYMBOL_GPL(lance_close);
  446. void lance_tx_timeout(struct net_device *dev)
  447. {
  448. printk("lance_tx_timeout\n");
  449. lance_reset(dev);
  450. dev->trans_start = jiffies;
  451. netif_wake_queue (dev);
  452. }
  453. EXPORT_SYMBOL_GPL(lance_tx_timeout);
  454. int lance_start_xmit (struct sk_buff *skb, struct net_device *dev)
  455. {
  456. struct lance_private *lp = netdev_priv(dev);
  457. volatile struct lance_init_block *ib = lp->init_block;
  458. int entry, skblen, len;
  459. static int outs;
  460. unsigned long flags;
  461. if (!TX_BUFFS_AVAIL)
  462. return -1;
  463. netif_stop_queue (dev);
  464. skblen = skb->len;
  465. #ifdef DEBUG_DRIVER
  466. /* dump the packet */
  467. {
  468. int i;
  469. for (i = 0; i < 64; i++) {
  470. if ((i % 16) == 0)
  471. printk ("\n");
  472. printk ("%2.2x ", skb->data [i]);
  473. }
  474. }
  475. #endif
  476. len = (skblen <= ETH_ZLEN) ? ETH_ZLEN : skblen;
  477. entry = lp->tx_new & lp->tx_ring_mod_mask;
  478. ib->btx_ring [entry].length = (-len) | 0xf000;
  479. ib->btx_ring [entry].misc = 0;
  480. if (skb->len < ETH_ZLEN)
  481. memset((void *)&ib->tx_buf[entry][0], 0, ETH_ZLEN);
  482. skb_copy_from_linear_data(skb, (void *)&ib->tx_buf[entry][0], skblen);
  483. /* Now, give the packet to the lance */
  484. ib->btx_ring [entry].tmd1_bits = (LE_T1_POK|LE_T1_OWN);
  485. lp->tx_new = (lp->tx_new+1) & lp->tx_ring_mod_mask;
  486. outs++;
  487. /* Kick the lance: transmit now */
  488. WRITERDP(lp, LE_C0_INEA | LE_C0_TDMD);
  489. dev->trans_start = jiffies;
  490. dev_kfree_skb (skb);
  491. spin_lock_irqsave (&lp->devlock, flags);
  492. if (TX_BUFFS_AVAIL)
  493. netif_start_queue (dev);
  494. else
  495. lp->tx_full = 1;
  496. spin_unlock_irqrestore (&lp->devlock, flags);
  497. return 0;
  498. }
  499. EXPORT_SYMBOL_GPL(lance_start_xmit);
  500. /* taken from the depca driver via a2065.c */
  501. static void lance_load_multicast (struct net_device *dev)
  502. {
  503. struct lance_private *lp = netdev_priv(dev);
  504. volatile struct lance_init_block *ib = lp->init_block;
  505. volatile u16 *mcast_table = (u16 *)&ib->filter;
  506. struct dev_mc_list *dmi=dev->mc_list;
  507. char *addrs;
  508. int i;
  509. u32 crc;
  510. /* set all multicast bits */
  511. if (dev->flags & IFF_ALLMULTI){
  512. ib->filter [0] = 0xffffffff;
  513. ib->filter [1] = 0xffffffff;
  514. return;
  515. }
  516. /* clear the multicast filter */
  517. ib->filter [0] = 0;
  518. ib->filter [1] = 0;
  519. /* Add addresses */
  520. for (i = 0; i < dev->mc_count; i++){
  521. addrs = dmi->dmi_addr;
  522. dmi = dmi->next;
  523. /* multicast address? */
  524. if (!(*addrs & 1))
  525. continue;
  526. crc = ether_crc_le(6, addrs);
  527. crc = crc >> 26;
  528. mcast_table [crc >> 4] |= 1 << (crc & 0xf);
  529. }
  530. return;
  531. }
  532. void lance_set_multicast (struct net_device *dev)
  533. {
  534. struct lance_private *lp = netdev_priv(dev);
  535. volatile struct lance_init_block *ib = lp->init_block;
  536. int stopped;
  537. stopped = netif_queue_stopped(dev);
  538. if (!stopped)
  539. netif_stop_queue (dev);
  540. while (lp->tx_old != lp->tx_new)
  541. schedule();
  542. WRITERAP(lp, LE_CSR0);
  543. WRITERDP(lp, LE_C0_STOP);
  544. lance_init_ring (dev);
  545. if (dev->flags & IFF_PROMISC) {
  546. ib->mode |= LE_MO_PROM;
  547. } else {
  548. ib->mode &= ~LE_MO_PROM;
  549. lance_load_multicast (dev);
  550. }
  551. load_csrs (lp);
  552. init_restart_lance (lp);
  553. if (!stopped)
  554. netif_start_queue (dev);
  555. }
  556. EXPORT_SYMBOL_GPL(lance_set_multicast);
  557. #ifdef CONFIG_NET_POLL_CONTROLLER
  558. void lance_poll(struct net_device *dev)
  559. {
  560. struct lance_private *lp = netdev_priv(dev);
  561. spin_lock (&lp->devlock);
  562. WRITERAP(lp, LE_CSR0);
  563. WRITERDP(lp, LE_C0_STRT);
  564. spin_unlock (&lp->devlock);
  565. lance_interrupt(dev->irq, dev);
  566. }
  567. #endif
  568. MODULE_LICENSE("GPL");