a2065.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843
  1. /*
  2. * Amiga Linux/68k A2065 Ethernet Driver
  3. *
  4. * (C) Copyright 1995-2003 by Geert Uytterhoeven <geert@linux-m68k.org>
  5. *
  6. * Fixes and tips by:
  7. * - Janos Farkas (CHEXUM@sparta.banki.hu)
  8. * - Jes Degn Soerensen (jds@kom.auc.dk)
  9. * - Matt Domsch (Matt_Domsch@dell.com)
  10. *
  11. * ----------------------------------------------------------------------------
  12. *
  13. * This program is based on
  14. *
  15. * ariadne.?: Amiga Linux/68k Ariadne Ethernet Driver
  16. * (C) Copyright 1995 by Geert Uytterhoeven,
  17. * Peter De Schrijver
  18. *
  19. * lance.c: An AMD LANCE ethernet driver for linux.
  20. * Written 1993-94 by Donald Becker.
  21. *
  22. * Am79C960: PCnet(tm)-ISA Single-Chip Ethernet Controller
  23. * Advanced Micro Devices
  24. * Publication #16907, Rev. B, Amendment/0, May 1994
  25. *
  26. * ----------------------------------------------------------------------------
  27. *
  28. * This file is subject to the terms and conditions of the GNU General Public
  29. * License. See the file COPYING in the main directory of the Linux
  30. * distribution for more details.
  31. *
  32. * ----------------------------------------------------------------------------
  33. *
  34. * The A2065 is a Zorro-II board made by Commodore/Ameristar. It contains:
  35. *
  36. * - an Am7990 Local Area Network Controller for Ethernet (LANCE) with
  37. * both 10BASE-2 (thin coax) and AUI (DB-15) connectors
  38. */
  39. #include <linux/errno.h>
  40. #include <linux/netdevice.h>
  41. #include <linux/etherdevice.h>
  42. #include <linux/module.h>
  43. #include <linux/stddef.h>
  44. #include <linux/kernel.h>
  45. #include <linux/interrupt.h>
  46. #include <linux/ioport.h>
  47. #include <linux/skbuff.h>
  48. #include <linux/slab.h>
  49. #include <linux/string.h>
  50. #include <linux/config.h>
  51. #include <linux/init.h>
  52. #include <linux/crc32.h>
  53. #include <linux/zorro.h>
  54. #include <linux/bitops.h>
  55. #include <asm/irq.h>
  56. #include <asm/amigaints.h>
  57. #include <asm/amigahw.h>
  58. #include "a2065.h"
  59. /*
  60. * Transmit/Receive Ring Definitions
  61. */
  62. #define LANCE_LOG_TX_BUFFERS (2)
  63. #define LANCE_LOG_RX_BUFFERS (4)
  64. #define TX_RING_SIZE (1<<LANCE_LOG_TX_BUFFERS)
  65. #define RX_RING_SIZE (1<<LANCE_LOG_RX_BUFFERS)
  66. #define TX_RING_MOD_MASK (TX_RING_SIZE-1)
  67. #define RX_RING_MOD_MASK (RX_RING_SIZE-1)
  68. #define PKT_BUF_SIZE (1544)
  69. #define RX_BUFF_SIZE PKT_BUF_SIZE
  70. #define TX_BUFF_SIZE PKT_BUF_SIZE
  71. /*
  72. * Layout of the Lance's RAM Buffer
  73. */
  74. struct lance_init_block {
  75. unsigned short mode; /* Pre-set mode (reg. 15) */
  76. unsigned char phys_addr[6]; /* Physical ethernet address */
  77. unsigned filter[2]; /* Multicast filter. */
  78. /* Receive and transmit ring base, along with extra bits. */
  79. unsigned short rx_ptr; /* receive descriptor addr */
  80. unsigned short rx_len; /* receive len and high addr */
  81. unsigned short tx_ptr; /* transmit descriptor addr */
  82. unsigned short tx_len; /* transmit len and high addr */
  83. /* The Tx and Rx ring entries must aligned on 8-byte boundaries. */
  84. struct lance_rx_desc brx_ring[RX_RING_SIZE];
  85. struct lance_tx_desc btx_ring[TX_RING_SIZE];
  86. char rx_buf [RX_RING_SIZE][RX_BUFF_SIZE];
  87. char tx_buf [TX_RING_SIZE][TX_BUFF_SIZE];
  88. };
  89. /*
  90. * Private Device Data
  91. */
  92. struct lance_private {
  93. char *name;
  94. volatile struct lance_regs *ll;
  95. volatile struct lance_init_block *init_block; /* Hosts view */
  96. volatile struct lance_init_block *lance_init_block; /* Lance view */
  97. int rx_new, tx_new;
  98. int rx_old, tx_old;
  99. int lance_log_rx_bufs, lance_log_tx_bufs;
  100. int rx_ring_mod_mask, tx_ring_mod_mask;
  101. struct net_device_stats stats;
  102. int tpe; /* cable-selection is TPE */
  103. int auto_select; /* cable-selection by carrier */
  104. unsigned short busmaster_regval;
  105. #ifdef CONFIG_SUNLANCE
  106. struct Linux_SBus_DMA *ledma; /* if set this points to ledma and arch=4m */
  107. int burst_sizes; /* ledma SBus burst sizes */
  108. #endif
  109. struct timer_list multicast_timer;
  110. };
  111. #define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
  112. lp->tx_old+lp->tx_ring_mod_mask-lp->tx_new:\
  113. lp->tx_old - lp->tx_new-1)
  114. #define LANCE_ADDR(x) ((int)(x) & ~0xff000000)
  115. /* Load the CSR registers */
  116. static void load_csrs (struct lance_private *lp)
  117. {
  118. volatile struct lance_regs *ll = lp->ll;
  119. volatile struct lance_init_block *aib = lp->lance_init_block;
  120. int leptr;
  121. leptr = LANCE_ADDR (aib);
  122. ll->rap = LE_CSR1;
  123. ll->rdp = (leptr & 0xFFFF);
  124. ll->rap = LE_CSR2;
  125. ll->rdp = leptr >> 16;
  126. ll->rap = LE_CSR3;
  127. ll->rdp = lp->busmaster_regval;
  128. /* Point back to csr0 */
  129. ll->rap = LE_CSR0;
  130. }
  131. #define ZERO 0
  132. /* Setup the Lance Rx and Tx rings */
  133. static void lance_init_ring (struct net_device *dev)
  134. {
  135. struct lance_private *lp = netdev_priv(dev);
  136. volatile struct lance_init_block *ib = lp->init_block;
  137. volatile struct lance_init_block *aib; /* for LANCE_ADDR computations */
  138. int leptr;
  139. int i;
  140. aib = lp->lance_init_block;
  141. /* Lock out other processes while setting up hardware */
  142. netif_stop_queue(dev);
  143. lp->rx_new = lp->tx_new = 0;
  144. lp->rx_old = lp->tx_old = 0;
  145. ib->mode = 0;
  146. /* Copy the ethernet address to the lance init block
  147. * Note that on the sparc you need to swap the ethernet address.
  148. */
  149. ib->phys_addr [0] = dev->dev_addr [1];
  150. ib->phys_addr [1] = dev->dev_addr [0];
  151. ib->phys_addr [2] = dev->dev_addr [3];
  152. ib->phys_addr [3] = dev->dev_addr [2];
  153. ib->phys_addr [4] = dev->dev_addr [5];
  154. ib->phys_addr [5] = dev->dev_addr [4];
  155. if (ZERO)
  156. printk(KERN_DEBUG "TX rings:\n");
  157. /* Setup the Tx ring entries */
  158. for (i = 0; i <= (1<<lp->lance_log_tx_bufs); i++) {
  159. leptr = LANCE_ADDR(&aib->tx_buf[i][0]);
  160. ib->btx_ring [i].tmd0 = leptr;
  161. ib->btx_ring [i].tmd1_hadr = leptr >> 16;
  162. ib->btx_ring [i].tmd1_bits = 0;
  163. ib->btx_ring [i].length = 0xf000; /* The ones required by tmd2 */
  164. ib->btx_ring [i].misc = 0;
  165. if (i < 3 && ZERO)
  166. printk(KERN_DEBUG "%d: 0x%8.8x\n", i, leptr);
  167. }
  168. /* Setup the Rx ring entries */
  169. if (ZERO)
  170. printk(KERN_DEBUG "RX rings:\n");
  171. for (i = 0; i < (1<<lp->lance_log_rx_bufs); i++) {
  172. leptr = LANCE_ADDR(&aib->rx_buf[i][0]);
  173. ib->brx_ring [i].rmd0 = leptr;
  174. ib->brx_ring [i].rmd1_hadr = leptr >> 16;
  175. ib->brx_ring [i].rmd1_bits = LE_R1_OWN;
  176. ib->brx_ring [i].length = -RX_BUFF_SIZE | 0xf000;
  177. ib->brx_ring [i].mblength = 0;
  178. if (i < 3 && ZERO)
  179. printk(KERN_DEBUG "%d: 0x%8.8x\n", i, leptr);
  180. }
  181. /* Setup the initialization block */
  182. /* Setup rx descriptor pointer */
  183. leptr = LANCE_ADDR(&aib->brx_ring);
  184. ib->rx_len = (lp->lance_log_rx_bufs << 13) | (leptr >> 16);
  185. ib->rx_ptr = leptr;
  186. if (ZERO)
  187. printk(KERN_DEBUG "RX ptr: %8.8x\n", leptr);
  188. /* Setup tx descriptor pointer */
  189. leptr = LANCE_ADDR(&aib->btx_ring);
  190. ib->tx_len = (lp->lance_log_tx_bufs << 13) | (leptr >> 16);
  191. ib->tx_ptr = leptr;
  192. if (ZERO)
  193. printk(KERN_DEBUG "TX ptr: %8.8x\n", leptr);
  194. /* Clear the multicast filter */
  195. ib->filter [0] = 0;
  196. ib->filter [1] = 0;
  197. }
  198. static int init_restart_lance (struct lance_private *lp)
  199. {
  200. volatile struct lance_regs *ll = lp->ll;
  201. int i;
  202. ll->rap = LE_CSR0;
  203. ll->rdp = LE_C0_INIT;
  204. /* Wait for the lance to complete initialization */
  205. for (i = 0; (i < 100) && !(ll->rdp & (LE_C0_ERR | LE_C0_IDON)); i++)
  206. barrier();
  207. if ((i == 100) || (ll->rdp & LE_C0_ERR)) {
  208. printk(KERN_ERR "LANCE unopened after %d ticks, csr0=%4.4x.\n",
  209. i, ll->rdp);
  210. return -EIO;
  211. }
  212. /* Clear IDON by writing a "1", enable interrupts and start lance */
  213. ll->rdp = LE_C0_IDON;
  214. ll->rdp = LE_C0_INEA | LE_C0_STRT;
  215. return 0;
  216. }
  217. static int lance_rx (struct net_device *dev)
  218. {
  219. struct lance_private *lp = netdev_priv(dev);
  220. volatile struct lance_init_block *ib = lp->init_block;
  221. volatile struct lance_regs *ll = lp->ll;
  222. volatile struct lance_rx_desc *rd;
  223. unsigned char bits;
  224. int len = 0; /* XXX shut up gcc warnings */
  225. struct sk_buff *skb = 0; /* XXX shut up gcc warnings */
  226. #ifdef TEST_HITS
  227. int i;
  228. printk(KERN_DEBUG "[");
  229. for (i = 0; i < RX_RING_SIZE; i++) {
  230. if (i == lp->rx_new)
  231. printk ("%s",
  232. ib->brx_ring [i].rmd1_bits & LE_R1_OWN ? "_" : "X");
  233. else
  234. printk ("%s",
  235. ib->brx_ring [i].rmd1_bits & LE_R1_OWN ? "." : "1");
  236. }
  237. printk ("]\n");
  238. #endif
  239. ll->rdp = LE_C0_RINT|LE_C0_INEA;
  240. for (rd = &ib->brx_ring [lp->rx_new];
  241. !((bits = rd->rmd1_bits) & LE_R1_OWN);
  242. rd = &ib->brx_ring [lp->rx_new]) {
  243. /* We got an incomplete frame? */
  244. if ((bits & LE_R1_POK) != LE_R1_POK) {
  245. lp->stats.rx_over_errors++;
  246. lp->stats.rx_errors++;
  247. continue;
  248. } else if (bits & LE_R1_ERR) {
  249. /* Count only the end frame as a rx error,
  250. * not the beginning
  251. */
  252. if (bits & LE_R1_BUF) lp->stats.rx_fifo_errors++;
  253. if (bits & LE_R1_CRC) lp->stats.rx_crc_errors++;
  254. if (bits & LE_R1_OFL) lp->stats.rx_over_errors++;
  255. if (bits & LE_R1_FRA) lp->stats.rx_frame_errors++;
  256. if (bits & LE_R1_EOP) lp->stats.rx_errors++;
  257. } else {
  258. len = (rd->mblength & 0xfff) - 4;
  259. skb = dev_alloc_skb (len+2);
  260. if (skb == 0) {
  261. printk(KERN_WARNING "%s: Memory squeeze, "
  262. "deferring packet.\n", dev->name);
  263. lp->stats.rx_dropped++;
  264. rd->mblength = 0;
  265. rd->rmd1_bits = LE_R1_OWN;
  266. lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
  267. return 0;
  268. }
  269. skb->dev = dev;
  270. skb_reserve (skb, 2); /* 16 byte align */
  271. skb_put (skb, len); /* make room */
  272. eth_copy_and_sum(skb,
  273. (unsigned char *)&(ib->rx_buf [lp->rx_new][0]),
  274. len, 0);
  275. skb->protocol = eth_type_trans (skb, dev);
  276. netif_rx (skb);
  277. dev->last_rx = jiffies;
  278. lp->stats.rx_packets++;
  279. lp->stats.rx_bytes += len;
  280. }
  281. /* Return the packet to the pool */
  282. rd->mblength = 0;
  283. rd->rmd1_bits = LE_R1_OWN;
  284. lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
  285. }
  286. return 0;
  287. }
  288. static int lance_tx (struct net_device *dev)
  289. {
  290. struct lance_private *lp = netdev_priv(dev);
  291. volatile struct lance_init_block *ib = lp->init_block;
  292. volatile struct lance_regs *ll = lp->ll;
  293. volatile struct lance_tx_desc *td;
  294. int i, j;
  295. int status;
  296. /* csr0 is 2f3 */
  297. ll->rdp = LE_C0_TINT | LE_C0_INEA;
  298. /* csr0 is 73 */
  299. j = lp->tx_old;
  300. for (i = j; i != lp->tx_new; i = j) {
  301. td = &ib->btx_ring [i];
  302. /* If we hit a packet not owned by us, stop */
  303. if (td->tmd1_bits & LE_T1_OWN)
  304. break;
  305. if (td->tmd1_bits & LE_T1_ERR) {
  306. status = td->misc;
  307. lp->stats.tx_errors++;
  308. if (status & LE_T3_RTY) lp->stats.tx_aborted_errors++;
  309. if (status & LE_T3_LCOL) lp->stats.tx_window_errors++;
  310. if (status & LE_T3_CLOS) {
  311. lp->stats.tx_carrier_errors++;
  312. if (lp->auto_select) {
  313. lp->tpe = 1 - lp->tpe;
  314. printk(KERN_ERR "%s: Carrier Lost, "
  315. "trying %s\n", dev->name,
  316. lp->tpe?"TPE":"AUI");
  317. /* Stop the lance */
  318. ll->rap = LE_CSR0;
  319. ll->rdp = LE_C0_STOP;
  320. lance_init_ring (dev);
  321. load_csrs (lp);
  322. init_restart_lance (lp);
  323. return 0;
  324. }
  325. }
  326. /* buffer errors and underflows turn off the transmitter */
  327. /* Restart the adapter */
  328. if (status & (LE_T3_BUF|LE_T3_UFL)) {
  329. lp->stats.tx_fifo_errors++;
  330. printk(KERN_ERR "%s: Tx: ERR_BUF|ERR_UFL, "
  331. "restarting\n", dev->name);
  332. /* Stop the lance */
  333. ll->rap = LE_CSR0;
  334. ll->rdp = LE_C0_STOP;
  335. lance_init_ring (dev);
  336. load_csrs (lp);
  337. init_restart_lance (lp);
  338. return 0;
  339. }
  340. } else if ((td->tmd1_bits & LE_T1_POK) == LE_T1_POK) {
  341. /*
  342. * So we don't count the packet more than once.
  343. */
  344. td->tmd1_bits &= ~(LE_T1_POK);
  345. /* One collision before packet was sent. */
  346. if (td->tmd1_bits & LE_T1_EONE)
  347. lp->stats.collisions++;
  348. /* More than one collision, be optimistic. */
  349. if (td->tmd1_bits & LE_T1_EMORE)
  350. lp->stats.collisions += 2;
  351. lp->stats.tx_packets++;
  352. }
  353. j = (j + 1) & lp->tx_ring_mod_mask;
  354. }
  355. lp->tx_old = j;
  356. ll->rdp = LE_C0_TINT | LE_C0_INEA;
  357. return 0;
  358. }
  359. static irqreturn_t
  360. lance_interrupt (int irq, void *dev_id, struct pt_regs *regs)
  361. {
  362. struct net_device *dev;
  363. struct lance_private *lp;
  364. volatile struct lance_regs *ll;
  365. int csr0;
  366. dev = (struct net_device *) dev_id;
  367. lp = netdev_priv(dev);
  368. ll = lp->ll;
  369. ll->rap = LE_CSR0; /* LANCE Controller Status */
  370. csr0 = ll->rdp;
  371. if (!(csr0 & LE_C0_INTR)) /* Check if any interrupt has */
  372. return IRQ_NONE; /* been generated by the Lance. */
  373. /* Acknowledge all the interrupt sources ASAP */
  374. ll->rdp = csr0 & ~(LE_C0_INEA|LE_C0_TDMD|LE_C0_STOP|LE_C0_STRT|
  375. LE_C0_INIT);
  376. if ((csr0 & LE_C0_ERR)) {
  377. /* Clear the error condition */
  378. ll->rdp = LE_C0_BABL|LE_C0_ERR|LE_C0_MISS|LE_C0_INEA;
  379. }
  380. if (csr0 & LE_C0_RINT)
  381. lance_rx (dev);
  382. if (csr0 & LE_C0_TINT)
  383. lance_tx (dev);
  384. /* Log misc errors. */
  385. if (csr0 & LE_C0_BABL)
  386. lp->stats.tx_errors++; /* Tx babble. */
  387. if (csr0 & LE_C0_MISS)
  388. lp->stats.rx_errors++; /* Missed a Rx frame. */
  389. if (csr0 & LE_C0_MERR) {
  390. printk(KERN_ERR "%s: Bus master arbitration failure, status "
  391. "%4.4x.\n", dev->name, csr0);
  392. /* Restart the chip. */
  393. ll->rdp = LE_C0_STRT;
  394. }
  395. if (netif_queue_stopped(dev) && TX_BUFFS_AVAIL > 0)
  396. netif_wake_queue(dev);
  397. ll->rap = LE_CSR0;
  398. ll->rdp = LE_C0_BABL|LE_C0_CERR|LE_C0_MISS|LE_C0_MERR|
  399. LE_C0_IDON|LE_C0_INEA;
  400. return IRQ_HANDLED;
  401. }
  402. struct net_device *last_dev = 0;
  403. static int lance_open (struct net_device *dev)
  404. {
  405. struct lance_private *lp = netdev_priv(dev);
  406. volatile struct lance_regs *ll = lp->ll;
  407. int ret;
  408. last_dev = dev;
  409. /* Stop the Lance */
  410. ll->rap = LE_CSR0;
  411. ll->rdp = LE_C0_STOP;
  412. /* Install the Interrupt handler */
  413. ret = request_irq(IRQ_AMIGA_PORTS, lance_interrupt, SA_SHIRQ,
  414. dev->name, dev);
  415. if (ret) return ret;
  416. load_csrs (lp);
  417. lance_init_ring (dev);
  418. netif_start_queue(dev);
  419. return init_restart_lance (lp);
  420. }
  421. static int lance_close (struct net_device *dev)
  422. {
  423. struct lance_private *lp = netdev_priv(dev);
  424. volatile struct lance_regs *ll = lp->ll;
  425. netif_stop_queue(dev);
  426. del_timer_sync(&lp->multicast_timer);
  427. /* Stop the card */
  428. ll->rap = LE_CSR0;
  429. ll->rdp = LE_C0_STOP;
  430. free_irq(IRQ_AMIGA_PORTS, dev);
  431. return 0;
  432. }
  433. static inline int lance_reset (struct net_device *dev)
  434. {
  435. struct lance_private *lp = netdev_priv(dev);
  436. volatile struct lance_regs *ll = lp->ll;
  437. int status;
  438. /* Stop the lance */
  439. ll->rap = LE_CSR0;
  440. ll->rdp = LE_C0_STOP;
  441. load_csrs (lp);
  442. lance_init_ring (dev);
  443. dev->trans_start = jiffies;
  444. netif_start_queue(dev);
  445. status = init_restart_lance (lp);
  446. #ifdef DEBUG_DRIVER
  447. printk(KERN_DEBUG "Lance restart=%d\n", status);
  448. #endif
  449. return status;
  450. }
  451. static void lance_tx_timeout(struct net_device *dev)
  452. {
  453. struct lance_private *lp = netdev_priv(dev);
  454. volatile struct lance_regs *ll = lp->ll;
  455. printk(KERN_ERR "%s: transmit timed out, status %04x, reset\n",
  456. dev->name, ll->rdp);
  457. lance_reset(dev);
  458. netif_wake_queue(dev);
  459. }
  460. static int lance_start_xmit (struct sk_buff *skb, struct net_device *dev)
  461. {
  462. struct lance_private *lp = netdev_priv(dev);
  463. volatile struct lance_regs *ll = lp->ll;
  464. volatile struct lance_init_block *ib = lp->init_block;
  465. int entry, skblen, len;
  466. int status = 0;
  467. static int outs;
  468. unsigned long flags;
  469. skblen = skb->len;
  470. len = skblen;
  471. if (len < ETH_ZLEN) {
  472. len = ETH_ZLEN;
  473. skb = skb_padto(skb, ETH_ZLEN);
  474. if (skb == NULL)
  475. return 0;
  476. }
  477. local_irq_save(flags);
  478. if (!TX_BUFFS_AVAIL){
  479. local_irq_restore(flags);
  480. return -1;
  481. }
  482. #ifdef DEBUG_DRIVER
  483. /* dump the packet */
  484. {
  485. int i;
  486. for (i = 0; i < 64; i++) {
  487. if ((i % 16) == 0)
  488. printk("\n" KERN_DEBUG);
  489. printk ("%2.2x ", skb->data [i]);
  490. }
  491. printk("\n");
  492. }
  493. #endif
  494. entry = lp->tx_new & lp->tx_ring_mod_mask;
  495. ib->btx_ring [entry].length = (-len) | 0xf000;
  496. ib->btx_ring [entry].misc = 0;
  497. memcpy ((char *)&ib->tx_buf [entry][0], skb->data, skblen);
  498. /* Clear the slack of the packet, do I need this? */
  499. if (len != skblen)
  500. memset ((char *) &ib->tx_buf [entry][skblen], 0, len - skblen);
  501. /* Now, give the packet to the lance */
  502. ib->btx_ring [entry].tmd1_bits = (LE_T1_POK|LE_T1_OWN);
  503. lp->tx_new = (lp->tx_new+1) & lp->tx_ring_mod_mask;
  504. outs++;
  505. if (TX_BUFFS_AVAIL <= 0)
  506. netif_stop_queue(dev);
  507. /* Kick the lance: transmit now */
  508. ll->rdp = LE_C0_INEA | LE_C0_TDMD;
  509. dev->trans_start = jiffies;
  510. dev_kfree_skb (skb);
  511. local_irq_restore(flags);
  512. return status;
  513. }
  514. static struct net_device_stats *lance_get_stats (struct net_device *dev)
  515. {
  516. struct lance_private *lp = netdev_priv(dev);
  517. return &lp->stats;
  518. }
  519. /* taken from the depca driver */
  520. static void lance_load_multicast (struct net_device *dev)
  521. {
  522. struct lance_private *lp = netdev_priv(dev);
  523. volatile struct lance_init_block *ib = lp->init_block;
  524. volatile u16 *mcast_table = (u16 *)&ib->filter;
  525. struct dev_mc_list *dmi=dev->mc_list;
  526. char *addrs;
  527. int i;
  528. u32 crc;
  529. /* set all multicast bits */
  530. if (dev->flags & IFF_ALLMULTI){
  531. ib->filter [0] = 0xffffffff;
  532. ib->filter [1] = 0xffffffff;
  533. return;
  534. }
  535. /* clear the multicast filter */
  536. ib->filter [0] = 0;
  537. ib->filter [1] = 0;
  538. /* Add addresses */
  539. for (i = 0; i < dev->mc_count; i++){
  540. addrs = dmi->dmi_addr;
  541. dmi = dmi->next;
  542. /* multicast address? */
  543. if (!(*addrs & 1))
  544. continue;
  545. crc = ether_crc_le(6, addrs);
  546. crc = crc >> 26;
  547. mcast_table [crc >> 4] |= 1 << (crc & 0xf);
  548. }
  549. return;
  550. }
  551. static void lance_set_multicast (struct net_device *dev)
  552. {
  553. struct lance_private *lp = netdev_priv(dev);
  554. volatile struct lance_init_block *ib = lp->init_block;
  555. volatile struct lance_regs *ll = lp->ll;
  556. if (!netif_running(dev))
  557. return;
  558. if (lp->tx_old != lp->tx_new) {
  559. mod_timer(&lp->multicast_timer, jiffies + 4);
  560. netif_wake_queue(dev);
  561. return;
  562. }
  563. netif_stop_queue(dev);
  564. ll->rap = LE_CSR0;
  565. ll->rdp = LE_C0_STOP;
  566. lance_init_ring (dev);
  567. if (dev->flags & IFF_PROMISC) {
  568. ib->mode |= LE_MO_PROM;
  569. } else {
  570. ib->mode &= ~LE_MO_PROM;
  571. lance_load_multicast (dev);
  572. }
  573. load_csrs (lp);
  574. init_restart_lance (lp);
  575. netif_wake_queue(dev);
  576. }
  577. static int __devinit a2065_init_one(struct zorro_dev *z,
  578. const struct zorro_device_id *ent);
  579. static void __devexit a2065_remove_one(struct zorro_dev *z);
  580. static struct zorro_device_id a2065_zorro_tbl[] __devinitdata = {
  581. { ZORRO_PROD_CBM_A2065_1 },
  582. { ZORRO_PROD_CBM_A2065_2 },
  583. { ZORRO_PROD_AMERISTAR_A2065 },
  584. { 0 }
  585. };
  586. static struct zorro_driver a2065_driver = {
  587. .name = "a2065",
  588. .id_table = a2065_zorro_tbl,
  589. .probe = a2065_init_one,
  590. .remove = __devexit_p(a2065_remove_one),
  591. };
  592. static int __devinit a2065_init_one(struct zorro_dev *z,
  593. const struct zorro_device_id *ent)
  594. {
  595. struct net_device *dev;
  596. struct lance_private *priv;
  597. unsigned long board, base_addr, mem_start;
  598. struct resource *r1, *r2;
  599. int err;
  600. board = z->resource.start;
  601. base_addr = board+A2065_LANCE;
  602. mem_start = board+A2065_RAM;
  603. r1 = request_mem_region(base_addr, sizeof(struct lance_regs),
  604. "Am7990");
  605. if (!r1)
  606. return -EBUSY;
  607. r2 = request_mem_region(mem_start, A2065_RAM_SIZE, "RAM");
  608. if (!r2) {
  609. release_resource(r1);
  610. return -EBUSY;
  611. }
  612. dev = alloc_etherdev(sizeof(struct lance_private));
  613. if (dev == NULL) {
  614. release_resource(r1);
  615. release_resource(r2);
  616. return -ENOMEM;
  617. }
  618. SET_MODULE_OWNER(dev);
  619. priv = netdev_priv(dev);
  620. r1->name = dev->name;
  621. r2->name = dev->name;
  622. dev->dev_addr[0] = 0x00;
  623. if (z->id != ZORRO_PROD_AMERISTAR_A2065) { /* Commodore */
  624. dev->dev_addr[1] = 0x80;
  625. dev->dev_addr[2] = 0x10;
  626. } else { /* Ameristar */
  627. dev->dev_addr[1] = 0x00;
  628. dev->dev_addr[2] = 0x9f;
  629. }
  630. dev->dev_addr[3] = (z->rom.er_SerialNumber>>16) & 0xff;
  631. dev->dev_addr[4] = (z->rom.er_SerialNumber>>8) & 0xff;
  632. dev->dev_addr[5] = z->rom.er_SerialNumber & 0xff;
  633. dev->base_addr = ZTWO_VADDR(base_addr);
  634. dev->mem_start = ZTWO_VADDR(mem_start);
  635. dev->mem_end = dev->mem_start+A2065_RAM_SIZE;
  636. priv->ll = (volatile struct lance_regs *)dev->base_addr;
  637. priv->init_block = (struct lance_init_block *)dev->mem_start;
  638. priv->lance_init_block = (struct lance_init_block *)A2065_RAM;
  639. priv->auto_select = 0;
  640. priv->busmaster_regval = LE_C3_BSWP;
  641. priv->lance_log_rx_bufs = LANCE_LOG_RX_BUFFERS;
  642. priv->lance_log_tx_bufs = LANCE_LOG_TX_BUFFERS;
  643. priv->rx_ring_mod_mask = RX_RING_MOD_MASK;
  644. priv->tx_ring_mod_mask = TX_RING_MOD_MASK;
  645. dev->open = &lance_open;
  646. dev->stop = &lance_close;
  647. dev->hard_start_xmit = &lance_start_xmit;
  648. dev->tx_timeout = &lance_tx_timeout;
  649. dev->watchdog_timeo = 5*HZ;
  650. dev->get_stats = &lance_get_stats;
  651. dev->set_multicast_list = &lance_set_multicast;
  652. dev->dma = 0;
  653. init_timer(&priv->multicast_timer);
  654. priv->multicast_timer.data = (unsigned long) dev;
  655. priv->multicast_timer.function =
  656. (void (*)(unsigned long)) &lance_set_multicast;
  657. err = register_netdev(dev);
  658. if (err) {
  659. release_resource(r1);
  660. release_resource(r2);
  661. free_netdev(dev);
  662. return err;
  663. }
  664. zorro_set_drvdata(z, dev);
  665. printk(KERN_INFO "%s: A2065 at 0x%08lx, Ethernet Address "
  666. "%02x:%02x:%02x:%02x:%02x:%02x\n", dev->name, board,
  667. dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
  668. dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
  669. return 0;
  670. }
  671. static void __devexit a2065_remove_one(struct zorro_dev *z)
  672. {
  673. struct net_device *dev = zorro_get_drvdata(z);
  674. unregister_netdev(dev);
  675. release_mem_region(ZTWO_PADDR(dev->base_addr),
  676. sizeof(struct lance_regs));
  677. release_mem_region(ZTWO_PADDR(dev->mem_start), A2065_RAM_SIZE);
  678. free_netdev(dev);
  679. }
  680. static int __init a2065_init_module(void)
  681. {
  682. return zorro_register_driver(&a2065_driver);
  683. }
  684. static void __exit a2065_cleanup_module(void)
  685. {
  686. zorro_unregister_driver(&a2065_driver);
  687. }
  688. module_init(a2065_init_module);
  689. module_exit(a2065_cleanup_module);
  690. MODULE_LICENSE("GPL");