macmace.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799
  1. /*
  2. * Driver for the Macintosh 68K onboard MACE controller with PSC
  3. * driven DMA. The MACE driver code is derived from mace.c. The
  4. * Mac68k theory of operation is courtesy of the MacBSD wizards.
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. *
  11. * Copyright (C) 1996 Paul Mackerras.
  12. * Copyright (C) 1998 Alan Cox <alan@lxorguk.ukuu.org.uk>
  13. *
  14. * Modified heavily by Joshua M. Thompson based on Dave Huang's NetBSD driver
  15. *
  16. * Copyright (C) 2007 Finn Thain
  17. *
  18. * Converted to DMA API, converted to unified driver model,
  19. * sync'd some routines with mace.c and fixed various bugs.
  20. */
  21. #include <linux/kernel.h>
  22. #include <linux/module.h>
  23. #include <linux/netdevice.h>
  24. #include <linux/etherdevice.h>
  25. #include <linux/delay.h>
  26. #include <linux/string.h>
  27. #include <linux/crc32.h>
  28. #include <linux/bitrev.h>
  29. #include <linux/dma-mapping.h>
  30. #include <linux/platform_device.h>
  31. #include <asm/io.h>
  32. #include <asm/irq.h>
  33. #include <asm/macintosh.h>
  34. #include <asm/macints.h>
  35. #include <asm/mac_psc.h>
  36. #include <asm/page.h>
  37. #include "mace.h"
  38. static char mac_mace_string[] = "macmace";
  39. #define N_TX_BUFF_ORDER 0
  40. #define N_TX_RING (1 << N_TX_BUFF_ORDER)
  41. #define N_RX_BUFF_ORDER 3
  42. #define N_RX_RING (1 << N_RX_BUFF_ORDER)
  43. #define TX_TIMEOUT HZ
  44. #define MACE_BUFF_SIZE 0x800
  45. /* Chip rev needs workaround on HW & multicast addr change */
  46. #define BROKEN_ADDRCHG_REV 0x0941
  47. /* The MACE is simply wired down on a Mac68K box */
  48. #define MACE_BASE (void *)(0x50F1C000)
  49. #define MACE_PROM (void *)(0x50F08001)
  50. struct mace_data {
  51. volatile struct mace *mace;
  52. unsigned char *tx_ring;
  53. dma_addr_t tx_ring_phys;
  54. unsigned char *rx_ring;
  55. dma_addr_t rx_ring_phys;
  56. int dma_intr;
  57. int rx_slot, rx_tail;
  58. int tx_slot, tx_sloti, tx_count;
  59. int chipid;
  60. struct device *device;
  61. };
  62. struct mace_frame {
  63. u8 rcvcnt;
  64. u8 pad1;
  65. u8 rcvsts;
  66. u8 pad2;
  67. u8 rntpc;
  68. u8 pad3;
  69. u8 rcvcc;
  70. u8 pad4;
  71. u32 pad5;
  72. u32 pad6;
  73. u8 data[1];
  74. /* And frame continues.. */
  75. };
  76. #define PRIV_BYTES sizeof(struct mace_data)
  77. static int mace_open(struct net_device *dev);
  78. static int mace_close(struct net_device *dev);
  79. static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
  80. static void mace_set_multicast(struct net_device *dev);
  81. static int mace_set_address(struct net_device *dev, void *addr);
  82. static void mace_reset(struct net_device *dev);
  83. static irqreturn_t mace_interrupt(int irq, void *dev_id);
  84. static irqreturn_t mace_dma_intr(int irq, void *dev_id);
  85. static void mace_tx_timeout(struct net_device *dev);
  86. static void __mace_set_address(struct net_device *dev, void *addr);
  87. /*
  88. * Load a receive DMA channel with a base address and ring length
  89. */
  90. static void mace_load_rxdma_base(struct net_device *dev, int set)
  91. {
  92. struct mace_data *mp = netdev_priv(dev);
  93. psc_write_word(PSC_ENETRD_CMD + set, 0x0100);
  94. psc_write_long(PSC_ENETRD_ADDR + set, (u32) mp->rx_ring_phys);
  95. psc_write_long(PSC_ENETRD_LEN + set, N_RX_RING);
  96. psc_write_word(PSC_ENETRD_CMD + set, 0x9800);
  97. mp->rx_tail = 0;
  98. }
  99. /*
  100. * Reset the receive DMA subsystem
  101. */
  102. static void mace_rxdma_reset(struct net_device *dev)
  103. {
  104. struct mace_data *mp = netdev_priv(dev);
  105. volatile struct mace *mace = mp->mace;
  106. u8 maccc = mace->maccc;
  107. mace->maccc = maccc & ~ENRCV;
  108. psc_write_word(PSC_ENETRD_CTL, 0x8800);
  109. mace_load_rxdma_base(dev, 0x00);
  110. psc_write_word(PSC_ENETRD_CTL, 0x0400);
  111. psc_write_word(PSC_ENETRD_CTL, 0x8800);
  112. mace_load_rxdma_base(dev, 0x10);
  113. psc_write_word(PSC_ENETRD_CTL, 0x0400);
  114. mace->maccc = maccc;
  115. mp->rx_slot = 0;
  116. psc_write_word(PSC_ENETRD_CMD + PSC_SET0, 0x9800);
  117. psc_write_word(PSC_ENETRD_CMD + PSC_SET1, 0x9800);
  118. }
  119. /*
  120. * Reset the transmit DMA subsystem
  121. */
  122. static void mace_txdma_reset(struct net_device *dev)
  123. {
  124. struct mace_data *mp = netdev_priv(dev);
  125. volatile struct mace *mace = mp->mace;
  126. u8 maccc;
  127. psc_write_word(PSC_ENETWR_CTL, 0x8800);
  128. maccc = mace->maccc;
  129. mace->maccc = maccc & ~ENXMT;
  130. mp->tx_slot = mp->tx_sloti = 0;
  131. mp->tx_count = N_TX_RING;
  132. psc_write_word(PSC_ENETWR_CTL, 0x0400);
  133. mace->maccc = maccc;
  134. }
  135. /*
  136. * Disable DMA
  137. */
  138. static void mace_dma_off(struct net_device *dev)
  139. {
  140. psc_write_word(PSC_ENETRD_CTL, 0x8800);
  141. psc_write_word(PSC_ENETRD_CTL, 0x1000);
  142. psc_write_word(PSC_ENETRD_CMD + PSC_SET0, 0x1100);
  143. psc_write_word(PSC_ENETRD_CMD + PSC_SET1, 0x1100);
  144. psc_write_word(PSC_ENETWR_CTL, 0x8800);
  145. psc_write_word(PSC_ENETWR_CTL, 0x1000);
  146. psc_write_word(PSC_ENETWR_CMD + PSC_SET0, 0x1100);
  147. psc_write_word(PSC_ENETWR_CMD + PSC_SET1, 0x1100);
  148. }
  149. static const struct net_device_ops mace_netdev_ops = {
  150. .ndo_open = mace_open,
  151. .ndo_stop = mace_close,
  152. .ndo_start_xmit = mace_xmit_start,
  153. .ndo_tx_timeout = mace_tx_timeout,
  154. .ndo_set_multicast_list = mace_set_multicast,
  155. .ndo_set_mac_address = mace_set_address,
  156. .ndo_change_mtu = eth_change_mtu,
  157. .ndo_validate_addr = eth_validate_addr,
  158. };
  159. /*
  160. * Not really much of a probe. The hardware table tells us if this
  161. * model of Macintrash has a MACE (AV macintoshes)
  162. */
  163. static int __devinit mace_probe(struct platform_device *pdev)
  164. {
  165. int j;
  166. struct mace_data *mp;
  167. unsigned char *addr;
  168. struct net_device *dev;
  169. unsigned char checksum = 0;
  170. static int found = 0;
  171. int err;
  172. if (found || macintosh_config->ether_type != MAC_ETHER_MACE)
  173. return -ENODEV;
  174. found = 1; /* prevent 'finding' one on every device probe */
  175. dev = alloc_etherdev(PRIV_BYTES);
  176. if (!dev)
  177. return -ENOMEM;
  178. mp = netdev_priv(dev);
  179. mp->device = &pdev->dev;
  180. SET_NETDEV_DEV(dev, &pdev->dev);
  181. dev->base_addr = (u32)MACE_BASE;
  182. mp->mace = (volatile struct mace *) MACE_BASE;
  183. dev->irq = IRQ_MAC_MACE;
  184. mp->dma_intr = IRQ_MAC_MACE_DMA;
  185. mp->chipid = mp->mace->chipid_hi << 8 | mp->mace->chipid_lo;
  186. /*
  187. * The PROM contains 8 bytes which total 0xFF when XOR'd
  188. * together. Due to the usual peculiar apple brain damage
  189. * the bytes are spaced out in a strange boundary and the
  190. * bits are reversed.
  191. */
  192. addr = (void *)MACE_PROM;
  193. for (j = 0; j < 6; ++j) {
  194. u8 v = bitrev8(addr[j<<4]);
  195. checksum ^= v;
  196. dev->dev_addr[j] = v;
  197. }
  198. for (; j < 8; ++j) {
  199. checksum ^= bitrev8(addr[j<<4]);
  200. }
  201. if (checksum != 0xFF) {
  202. free_netdev(dev);
  203. return -ENODEV;
  204. }
  205. dev->netdev_ops = &mace_netdev_ops;
  206. dev->watchdog_timeo = TX_TIMEOUT;
  207. printk(KERN_INFO "%s: 68K MACE, hardware address %pM\n",
  208. dev->name, dev->dev_addr);
  209. err = register_netdev(dev);
  210. if (!err)
  211. return 0;
  212. free_netdev(dev);
  213. return err;
  214. }
  215. /*
  216. * Reset the chip.
  217. */
  218. static void mace_reset(struct net_device *dev)
  219. {
  220. struct mace_data *mp = netdev_priv(dev);
  221. volatile struct mace *mb = mp->mace;
  222. int i;
  223. /* soft-reset the chip */
  224. i = 200;
  225. while (--i) {
  226. mb->biucc = SWRST;
  227. if (mb->biucc & SWRST) {
  228. udelay(10);
  229. continue;
  230. }
  231. break;
  232. }
  233. if (!i) {
  234. printk(KERN_ERR "macmace: cannot reset chip!\n");
  235. return;
  236. }
  237. mb->maccc = 0; /* turn off tx, rx */
  238. mb->imr = 0xFF; /* disable all intrs for now */
  239. i = mb->ir;
  240. mb->biucc = XMTSP_64;
  241. mb->utr = RTRD;
  242. mb->fifocc = XMTFW_8 | RCVFW_64 | XMTFWU | RCVFWU;
  243. mb->xmtfc = AUTO_PAD_XMIT; /* auto-pad short frames */
  244. mb->rcvfc = 0;
  245. /* load up the hardware address */
  246. __mace_set_address(dev, dev->dev_addr);
  247. /* clear the multicast filter */
  248. if (mp->chipid == BROKEN_ADDRCHG_REV)
  249. mb->iac = LOGADDR;
  250. else {
  251. mb->iac = ADDRCHG | LOGADDR;
  252. while ((mb->iac & ADDRCHG) != 0)
  253. ;
  254. }
  255. for (i = 0; i < 8; ++i)
  256. mb->ladrf = 0;
  257. /* done changing address */
  258. if (mp->chipid != BROKEN_ADDRCHG_REV)
  259. mb->iac = 0;
  260. mb->plscc = PORTSEL_AUI;
  261. }
  262. /*
  263. * Load the address on a mace controller.
  264. */
  265. static void __mace_set_address(struct net_device *dev, void *addr)
  266. {
  267. struct mace_data *mp = netdev_priv(dev);
  268. volatile struct mace *mb = mp->mace;
  269. unsigned char *p = addr;
  270. int i;
  271. /* load up the hardware address */
  272. if (mp->chipid == BROKEN_ADDRCHG_REV)
  273. mb->iac = PHYADDR;
  274. else {
  275. mb->iac = ADDRCHG | PHYADDR;
  276. while ((mb->iac & ADDRCHG) != 0)
  277. ;
  278. }
  279. for (i = 0; i < 6; ++i)
  280. mb->padr = dev->dev_addr[i] = p[i];
  281. if (mp->chipid != BROKEN_ADDRCHG_REV)
  282. mb->iac = 0;
  283. }
  284. static int mace_set_address(struct net_device *dev, void *addr)
  285. {
  286. struct mace_data *mp = netdev_priv(dev);
  287. volatile struct mace *mb = mp->mace;
  288. unsigned long flags;
  289. u8 maccc;
  290. local_irq_save(flags);
  291. maccc = mb->maccc;
  292. __mace_set_address(dev, addr);
  293. mb->maccc = maccc;
  294. local_irq_restore(flags);
  295. return 0;
  296. }
  297. /*
  298. * Open the Macintosh MACE. Most of this is playing with the DMA
  299. * engine. The ethernet chip is quite friendly.
  300. */
  301. static int mace_open(struct net_device *dev)
  302. {
  303. struct mace_data *mp = netdev_priv(dev);
  304. volatile struct mace *mb = mp->mace;
  305. /* reset the chip */
  306. mace_reset(dev);
  307. if (request_irq(dev->irq, mace_interrupt, 0, dev->name, dev)) {
  308. printk(KERN_ERR "%s: can't get irq %d\n", dev->name, dev->irq);
  309. return -EAGAIN;
  310. }
  311. if (request_irq(mp->dma_intr, mace_dma_intr, 0, dev->name, dev)) {
  312. printk(KERN_ERR "%s: can't get irq %d\n", dev->name, mp->dma_intr);
  313. free_irq(dev->irq, dev);
  314. return -EAGAIN;
  315. }
  316. /* Allocate the DMA ring buffers */
  317. mp->tx_ring = dma_alloc_coherent(mp->device,
  318. N_TX_RING * MACE_BUFF_SIZE,
  319. &mp->tx_ring_phys, GFP_KERNEL);
  320. if (mp->tx_ring == NULL) {
  321. printk(KERN_ERR "%s: unable to allocate DMA tx buffers\n", dev->name);
  322. goto out1;
  323. }
  324. mp->rx_ring = dma_alloc_coherent(mp->device,
  325. N_RX_RING * MACE_BUFF_SIZE,
  326. &mp->rx_ring_phys, GFP_KERNEL);
  327. if (mp->rx_ring == NULL) {
  328. printk(KERN_ERR "%s: unable to allocate DMA rx buffers\n", dev->name);
  329. goto out2;
  330. }
  331. mace_dma_off(dev);
  332. /* Not sure what these do */
  333. psc_write_word(PSC_ENETWR_CTL, 0x9000);
  334. psc_write_word(PSC_ENETRD_CTL, 0x9000);
  335. psc_write_word(PSC_ENETWR_CTL, 0x0400);
  336. psc_write_word(PSC_ENETRD_CTL, 0x0400);
  337. mace_rxdma_reset(dev);
  338. mace_txdma_reset(dev);
  339. /* turn it on! */
  340. mb->maccc = ENXMT | ENRCV;
  341. /* enable all interrupts except receive interrupts */
  342. mb->imr = RCVINT;
  343. return 0;
  344. out2:
  345. dma_free_coherent(mp->device, N_TX_RING * MACE_BUFF_SIZE,
  346. mp->tx_ring, mp->tx_ring_phys);
  347. out1:
  348. free_irq(dev->irq, dev);
  349. free_irq(mp->dma_intr, dev);
  350. return -ENOMEM;
  351. }
  352. /*
  353. * Shut down the mace and its interrupt channel
  354. */
  355. static int mace_close(struct net_device *dev)
  356. {
  357. struct mace_data *mp = netdev_priv(dev);
  358. volatile struct mace *mb = mp->mace;
  359. mb->maccc = 0; /* disable rx and tx */
  360. mb->imr = 0xFF; /* disable all irqs */
  361. mace_dma_off(dev); /* disable rx and tx dma */
  362. return 0;
  363. }
  364. /*
  365. * Transmit a frame
  366. */
  367. static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
  368. {
  369. struct mace_data *mp = netdev_priv(dev);
  370. unsigned long flags;
  371. /* Stop the queue since there's only the one buffer */
  372. local_irq_save(flags);
  373. netif_stop_queue(dev);
  374. if (!mp->tx_count) {
  375. printk(KERN_ERR "macmace: tx queue running but no free buffers.\n");
  376. local_irq_restore(flags);
  377. return NETDEV_TX_BUSY;
  378. }
  379. mp->tx_count--;
  380. local_irq_restore(flags);
  381. dev->stats.tx_packets++;
  382. dev->stats.tx_bytes += skb->len;
  383. /* We need to copy into our xmit buffer to take care of alignment and caching issues */
  384. skb_copy_from_linear_data(skb, mp->tx_ring, skb->len);
  385. /* load the Tx DMA and fire it off */
  386. psc_write_long(PSC_ENETWR_ADDR + mp->tx_slot, (u32) mp->tx_ring_phys);
  387. psc_write_long(PSC_ENETWR_LEN + mp->tx_slot, skb->len);
  388. psc_write_word(PSC_ENETWR_CMD + mp->tx_slot, 0x9800);
  389. mp->tx_slot ^= 0x10;
  390. dev_kfree_skb(skb);
  391. dev->trans_start = jiffies;
  392. return NETDEV_TX_OK;
  393. }
  394. static void mace_set_multicast(struct net_device *dev)
  395. {
  396. struct mace_data *mp = netdev_priv(dev);
  397. volatile struct mace *mb = mp->mace;
  398. int i;
  399. u32 crc;
  400. u8 maccc;
  401. unsigned long flags;
  402. local_irq_save(flags);
  403. maccc = mb->maccc;
  404. mb->maccc &= ~PROM;
  405. if (dev->flags & IFF_PROMISC) {
  406. mb->maccc |= PROM;
  407. } else {
  408. unsigned char multicast_filter[8];
  409. struct dev_mc_list *dmi;
  410. if (dev->flags & IFF_ALLMULTI) {
  411. for (i = 0; i < 8; i++) {
  412. multicast_filter[i] = 0xFF;
  413. }
  414. } else {
  415. for (i = 0; i < 8; i++)
  416. multicast_filter[i] = 0;
  417. netdev_for_each_mc_addr(dmi, dev) {
  418. crc = ether_crc_le(6, dmi->dmi_addr);
  419. /* bit number in multicast_filter */
  420. i = crc >> 26;
  421. multicast_filter[i >> 3] |= 1 << (i & 7);
  422. }
  423. }
  424. if (mp->chipid == BROKEN_ADDRCHG_REV)
  425. mb->iac = LOGADDR;
  426. else {
  427. mb->iac = ADDRCHG | LOGADDR;
  428. while ((mb->iac & ADDRCHG) != 0)
  429. ;
  430. }
  431. for (i = 0; i < 8; ++i)
  432. mb->ladrf = multicast_filter[i];
  433. if (mp->chipid != BROKEN_ADDRCHG_REV)
  434. mb->iac = 0;
  435. }
  436. mb->maccc = maccc;
  437. local_irq_restore(flags);
  438. }
  439. static void mace_handle_misc_intrs(struct net_device *dev, int intr)
  440. {
  441. struct mace_data *mp = netdev_priv(dev);
  442. volatile struct mace *mb = mp->mace;
  443. static int mace_babbles, mace_jabbers;
  444. if (intr & MPCO)
  445. dev->stats.rx_missed_errors += 256;
  446. dev->stats.rx_missed_errors += mb->mpc; /* reading clears it */
  447. if (intr & RNTPCO)
  448. dev->stats.rx_length_errors += 256;
  449. dev->stats.rx_length_errors += mb->rntpc; /* reading clears it */
  450. if (intr & CERR)
  451. ++dev->stats.tx_heartbeat_errors;
  452. if (intr & BABBLE)
  453. if (mace_babbles++ < 4)
  454. printk(KERN_DEBUG "macmace: babbling transmitter\n");
  455. if (intr & JABBER)
  456. if (mace_jabbers++ < 4)
  457. printk(KERN_DEBUG "macmace: jabbering transceiver\n");
  458. }
  459. static irqreturn_t mace_interrupt(int irq, void *dev_id)
  460. {
  461. struct net_device *dev = (struct net_device *) dev_id;
  462. struct mace_data *mp = netdev_priv(dev);
  463. volatile struct mace *mb = mp->mace;
  464. int intr, fs;
  465. unsigned long flags;
  466. /* don't want the dma interrupt handler to fire */
  467. local_irq_save(flags);
  468. intr = mb->ir; /* read interrupt register */
  469. mace_handle_misc_intrs(dev, intr);
  470. if (intr & XMTINT) {
  471. fs = mb->xmtfs;
  472. if ((fs & XMTSV) == 0) {
  473. printk(KERN_ERR "macmace: xmtfs not valid! (fs=%x)\n", fs);
  474. mace_reset(dev);
  475. /*
  476. * XXX mace likes to hang the machine after a xmtfs error.
  477. * This is hard to reproduce, reseting *may* help
  478. */
  479. }
  480. /* dma should have finished */
  481. if (!mp->tx_count) {
  482. printk(KERN_DEBUG "macmace: tx ring ran out? (fs=%x)\n", fs);
  483. }
  484. /* Update stats */
  485. if (fs & (UFLO|LCOL|LCAR|RTRY)) {
  486. ++dev->stats.tx_errors;
  487. if (fs & LCAR)
  488. ++dev->stats.tx_carrier_errors;
  489. else if (fs & (UFLO|LCOL|RTRY)) {
  490. ++dev->stats.tx_aborted_errors;
  491. if (mb->xmtfs & UFLO) {
  492. printk(KERN_ERR "%s: DMA underrun.\n", dev->name);
  493. dev->stats.tx_fifo_errors++;
  494. mace_txdma_reset(dev);
  495. }
  496. }
  497. }
  498. }
  499. if (mp->tx_count)
  500. netif_wake_queue(dev);
  501. local_irq_restore(flags);
  502. return IRQ_HANDLED;
  503. }
  504. static void mace_tx_timeout(struct net_device *dev)
  505. {
  506. struct mace_data *mp = netdev_priv(dev);
  507. volatile struct mace *mb = mp->mace;
  508. unsigned long flags;
  509. local_irq_save(flags);
  510. /* turn off both tx and rx and reset the chip */
  511. mb->maccc = 0;
  512. printk(KERN_ERR "macmace: transmit timeout - resetting\n");
  513. mace_txdma_reset(dev);
  514. mace_reset(dev);
  515. /* restart rx dma */
  516. mace_rxdma_reset(dev);
  517. mp->tx_count = N_TX_RING;
  518. netif_wake_queue(dev);
  519. /* turn it on! */
  520. mb->maccc = ENXMT | ENRCV;
  521. /* enable all interrupts except receive interrupts */
  522. mb->imr = RCVINT;
  523. local_irq_restore(flags);
  524. }
  525. /*
  526. * Handle a newly arrived frame
  527. */
  528. static void mace_dma_rx_frame(struct net_device *dev, struct mace_frame *mf)
  529. {
  530. struct sk_buff *skb;
  531. unsigned int frame_status = mf->rcvsts;
  532. if (frame_status & (RS_OFLO | RS_CLSN | RS_FRAMERR | RS_FCSERR)) {
  533. dev->stats.rx_errors++;
  534. if (frame_status & RS_OFLO) {
  535. printk(KERN_DEBUG "%s: fifo overflow.\n", dev->name);
  536. dev->stats.rx_fifo_errors++;
  537. }
  538. if (frame_status & RS_CLSN)
  539. dev->stats.collisions++;
  540. if (frame_status & RS_FRAMERR)
  541. dev->stats.rx_frame_errors++;
  542. if (frame_status & RS_FCSERR)
  543. dev->stats.rx_crc_errors++;
  544. } else {
  545. unsigned int frame_length = mf->rcvcnt + ((frame_status & 0x0F) << 8 );
  546. skb = dev_alloc_skb(frame_length + 2);
  547. if (!skb) {
  548. dev->stats.rx_dropped++;
  549. return;
  550. }
  551. skb_reserve(skb, 2);
  552. memcpy(skb_put(skb, frame_length), mf->data, frame_length);
  553. skb->protocol = eth_type_trans(skb, dev);
  554. netif_rx(skb);
  555. dev->stats.rx_packets++;
  556. dev->stats.rx_bytes += frame_length;
  557. }
  558. }
  559. /*
  560. * The PSC has passed us a DMA interrupt event.
  561. */
  562. static irqreturn_t mace_dma_intr(int irq, void *dev_id)
  563. {
  564. struct net_device *dev = (struct net_device *) dev_id;
  565. struct mace_data *mp = netdev_priv(dev);
  566. int left, head;
  567. u16 status;
  568. u32 baka;
  569. /* Not sure what this does */
  570. while ((baka = psc_read_long(PSC_MYSTERY)) != psc_read_long(PSC_MYSTERY));
  571. if (!(baka & 0x60000000)) return IRQ_NONE;
  572. /*
  573. * Process the read queue
  574. */
  575. status = psc_read_word(PSC_ENETRD_CTL);
  576. if (status & 0x2000) {
  577. mace_rxdma_reset(dev);
  578. } else if (status & 0x0100) {
  579. psc_write_word(PSC_ENETRD_CMD + mp->rx_slot, 0x1100);
  580. left = psc_read_long(PSC_ENETRD_LEN + mp->rx_slot);
  581. head = N_RX_RING - left;
  582. /* Loop through the ring buffer and process new packages */
  583. while (mp->rx_tail < head) {
  584. mace_dma_rx_frame(dev, (struct mace_frame*) (mp->rx_ring
  585. + (mp->rx_tail * MACE_BUFF_SIZE)));
  586. mp->rx_tail++;
  587. }
  588. /* If we're out of buffers in this ring then switch to */
  589. /* the other set, otherwise just reactivate this one. */
  590. if (!left) {
  591. mace_load_rxdma_base(dev, mp->rx_slot);
  592. mp->rx_slot ^= 0x10;
  593. } else {
  594. psc_write_word(PSC_ENETRD_CMD + mp->rx_slot, 0x9800);
  595. }
  596. }
  597. /*
  598. * Process the write queue
  599. */
  600. status = psc_read_word(PSC_ENETWR_CTL);
  601. if (status & 0x2000) {
  602. mace_txdma_reset(dev);
  603. } else if (status & 0x0100) {
  604. psc_write_word(PSC_ENETWR_CMD + mp->tx_sloti, 0x0100);
  605. mp->tx_sloti ^= 0x10;
  606. mp->tx_count++;
  607. }
  608. return IRQ_HANDLED;
  609. }
  610. MODULE_LICENSE("GPL");
  611. MODULE_DESCRIPTION("Macintosh MACE ethernet driver");
  612. MODULE_ALIAS("platform:macmace");
  613. static int __devexit mac_mace_device_remove (struct platform_device *pdev)
  614. {
  615. struct net_device *dev = platform_get_drvdata(pdev);
  616. struct mace_data *mp = netdev_priv(dev);
  617. unregister_netdev(dev);
  618. free_irq(dev->irq, dev);
  619. free_irq(IRQ_MAC_MACE_DMA, dev);
  620. dma_free_coherent(mp->device, N_RX_RING * MACE_BUFF_SIZE,
  621. mp->rx_ring, mp->rx_ring_phys);
  622. dma_free_coherent(mp->device, N_TX_RING * MACE_BUFF_SIZE,
  623. mp->tx_ring, mp->tx_ring_phys);
  624. free_netdev(dev);
  625. return 0;
  626. }
  627. static struct platform_driver mac_mace_driver = {
  628. .probe = mace_probe,
  629. .remove = __devexit_p(mac_mace_device_remove),
  630. .driver = {
  631. .name = mac_mace_string,
  632. .owner = THIS_MODULE,
  633. },
  634. };
  635. static int __init mac_mace_init_module(void)
  636. {
  637. if (!MACH_IS_MAC)
  638. return -ENODEV;
  639. return platform_driver_register(&mac_mace_driver);
  640. }
  641. static void __exit mac_mace_cleanup_module(void)
  642. {
  643. platform_driver_unregister(&mac_mace_driver);
  644. }
  645. module_init(mac_mace_init_module);
  646. module_exit(mac_mace_cleanup_module);