dl2k.c 48 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834
  1. /* D-Link DL2000-based Gigabit Ethernet Adapter Linux driver */
  2. /*
  3. Copyright (c) 2001, 2002 by D-Link Corporation
  4. Written by Edward Peng.<edward_peng@dlink.com.tw>
  5. Created 03-May-2001, base on Linux' sundance.c.
  6. This program is free software; you can redistribute it and/or modify
  7. it under the terms of the GNU General Public License as published by
  8. the Free Software Foundation; either version 2 of the License, or
  9. (at your option) any later version.
  10. */
  11. #define DRV_NAME "D-Link DL2000-based linux driver"
  12. #define DRV_VERSION "v1.18"
  13. #define DRV_RELDATE "2006/06/27"
  14. #include "dl2k.h"
  15. #include <linux/dma-mapping.h>
  16. static char version[] __devinitdata =
  17. KERN_INFO DRV_NAME " " DRV_VERSION " " DRV_RELDATE "\n";
  18. #define MAX_UNITS 8
  19. static int mtu[MAX_UNITS];
  20. static int vlan[MAX_UNITS];
  21. static int jumbo[MAX_UNITS];
  22. static char *media[MAX_UNITS];
  23. static int tx_flow=-1;
  24. static int rx_flow=-1;
  25. static int copy_thresh;
  26. static int rx_coalesce=10; /* Rx frame count each interrupt */
  27. static int rx_timeout=200; /* Rx DMA wait time in 640ns increments */
  28. static int tx_coalesce=16; /* HW xmit count each TxDMAComplete */
  29. MODULE_AUTHOR ("Edward Peng");
  30. MODULE_DESCRIPTION ("D-Link DL2000-based Gigabit Ethernet Adapter");
  31. MODULE_LICENSE("GPL");
  32. module_param_array(mtu, int, NULL, 0);
  33. module_param_array(media, charp, NULL, 0);
  34. module_param_array(vlan, int, NULL, 0);
  35. module_param_array(jumbo, int, NULL, 0);
  36. module_param(tx_flow, int, 0);
  37. module_param(rx_flow, int, 0);
  38. module_param(copy_thresh, int, 0);
  39. module_param(rx_coalesce, int, 0); /* Rx frame count each interrupt */
  40. module_param(rx_timeout, int, 0); /* Rx DMA wait time in 64ns increments */
  41. module_param(tx_coalesce, int, 0); /* HW xmit count each TxDMAComplete */
  42. /* Enable the default interrupts */
  43. #define DEFAULT_INTR (RxDMAComplete | HostError | IntRequested | TxDMAComplete| \
  44. UpdateStats | LinkEvent)
  45. #define EnableInt() \
  46. writew(DEFAULT_INTR, ioaddr + IntEnable)
  47. static const int max_intrloop = 50;
  48. static const int multicast_filter_limit = 0x40;
  49. static int rio_open (struct net_device *dev);
  50. static void rio_timer (unsigned long data);
  51. static void rio_tx_timeout (struct net_device *dev);
  52. static void alloc_list (struct net_device *dev);
  53. static int start_xmit (struct sk_buff *skb, struct net_device *dev);
  54. static irqreturn_t rio_interrupt (int irq, void *dev_instance);
  55. static void rio_free_tx (struct net_device *dev, int irq);
  56. static void tx_error (struct net_device *dev, int tx_status);
  57. static int receive_packet (struct net_device *dev);
  58. static void rio_error (struct net_device *dev, int int_status);
  59. static int change_mtu (struct net_device *dev, int new_mtu);
  60. static void set_multicast (struct net_device *dev);
  61. static struct net_device_stats *get_stats (struct net_device *dev);
  62. static int clear_stats (struct net_device *dev);
  63. static int rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd);
  64. static int rio_close (struct net_device *dev);
  65. static int find_miiphy (struct net_device *dev);
  66. static int parse_eeprom (struct net_device *dev);
  67. static int read_eeprom (long ioaddr, int eep_addr);
  68. static int mii_wait_link (struct net_device *dev, int wait);
  69. static int mii_set_media (struct net_device *dev);
  70. static int mii_get_media (struct net_device *dev);
  71. static int mii_set_media_pcs (struct net_device *dev);
  72. static int mii_get_media_pcs (struct net_device *dev);
  73. static int mii_read (struct net_device *dev, int phy_addr, int reg_num);
  74. static int mii_write (struct net_device *dev, int phy_addr, int reg_num,
  75. u16 data);
  76. static const struct ethtool_ops ethtool_ops;
  77. static int __devinit
  78. rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
  79. {
  80. struct net_device *dev;
  81. struct netdev_private *np;
  82. static int card_idx;
  83. int chip_idx = ent->driver_data;
  84. int err, irq;
  85. long ioaddr;
  86. static int version_printed;
  87. void *ring_space;
  88. dma_addr_t ring_dma;
  89. if (!version_printed++)
  90. printk ("%s", version);
  91. err = pci_enable_device (pdev);
  92. if (err)
  93. return err;
  94. irq = pdev->irq;
  95. err = pci_request_regions (pdev, "dl2k");
  96. if (err)
  97. goto err_out_disable;
  98. pci_set_master (pdev);
  99. dev = alloc_etherdev (sizeof (*np));
  100. if (!dev) {
  101. err = -ENOMEM;
  102. goto err_out_res;
  103. }
  104. SET_MODULE_OWNER (dev);
  105. SET_NETDEV_DEV(dev, &pdev->dev);
  106. #ifdef MEM_MAPPING
  107. ioaddr = pci_resource_start (pdev, 1);
  108. ioaddr = (long) ioremap (ioaddr, RIO_IO_SIZE);
  109. if (!ioaddr) {
  110. err = -ENOMEM;
  111. goto err_out_dev;
  112. }
  113. #else
  114. ioaddr = pci_resource_start (pdev, 0);
  115. #endif
  116. dev->base_addr = ioaddr;
  117. dev->irq = irq;
  118. np = netdev_priv(dev);
  119. np->chip_id = chip_idx;
  120. np->pdev = pdev;
  121. spin_lock_init (&np->tx_lock);
  122. spin_lock_init (&np->rx_lock);
  123. /* Parse manual configuration */
  124. np->an_enable = 1;
  125. np->tx_coalesce = 1;
  126. if (card_idx < MAX_UNITS) {
  127. if (media[card_idx] != NULL) {
  128. np->an_enable = 0;
  129. if (strcmp (media[card_idx], "auto") == 0 ||
  130. strcmp (media[card_idx], "autosense") == 0 ||
  131. strcmp (media[card_idx], "0") == 0 ) {
  132. np->an_enable = 2;
  133. } else if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
  134. strcmp (media[card_idx], "4") == 0) {
  135. np->speed = 100;
  136. np->full_duplex = 1;
  137. } else if (strcmp (media[card_idx], "100mbps_hd") == 0
  138. || strcmp (media[card_idx], "3") == 0) {
  139. np->speed = 100;
  140. np->full_duplex = 0;
  141. } else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
  142. strcmp (media[card_idx], "2") == 0) {
  143. np->speed = 10;
  144. np->full_duplex = 1;
  145. } else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
  146. strcmp (media[card_idx], "1") == 0) {
  147. np->speed = 10;
  148. np->full_duplex = 0;
  149. } else if (strcmp (media[card_idx], "1000mbps_fd") == 0 ||
  150. strcmp (media[card_idx], "6") == 0) {
  151. np->speed=1000;
  152. np->full_duplex=1;
  153. } else if (strcmp (media[card_idx], "1000mbps_hd") == 0 ||
  154. strcmp (media[card_idx], "5") == 0) {
  155. np->speed = 1000;
  156. np->full_duplex = 0;
  157. } else {
  158. np->an_enable = 1;
  159. }
  160. }
  161. if (jumbo[card_idx] != 0) {
  162. np->jumbo = 1;
  163. dev->mtu = MAX_JUMBO;
  164. } else {
  165. np->jumbo = 0;
  166. if (mtu[card_idx] > 0 && mtu[card_idx] < PACKET_SIZE)
  167. dev->mtu = mtu[card_idx];
  168. }
  169. np->vlan = (vlan[card_idx] > 0 && vlan[card_idx] < 4096) ?
  170. vlan[card_idx] : 0;
  171. if (rx_coalesce > 0 && rx_timeout > 0) {
  172. np->rx_coalesce = rx_coalesce;
  173. np->rx_timeout = rx_timeout;
  174. np->coalesce = 1;
  175. }
  176. np->tx_flow = (tx_flow == 0) ? 0 : 1;
  177. np->rx_flow = (rx_flow == 0) ? 0 : 1;
  178. if (tx_coalesce < 1)
  179. tx_coalesce = 1;
  180. else if (tx_coalesce > TX_RING_SIZE-1)
  181. tx_coalesce = TX_RING_SIZE - 1;
  182. }
  183. dev->open = &rio_open;
  184. dev->hard_start_xmit = &start_xmit;
  185. dev->stop = &rio_close;
  186. dev->get_stats = &get_stats;
  187. dev->set_multicast_list = &set_multicast;
  188. dev->do_ioctl = &rio_ioctl;
  189. dev->tx_timeout = &rio_tx_timeout;
  190. dev->watchdog_timeo = TX_TIMEOUT;
  191. dev->change_mtu = &change_mtu;
  192. SET_ETHTOOL_OPS(dev, &ethtool_ops);
  193. #if 0
  194. dev->features = NETIF_F_IP_CSUM;
  195. #endif
  196. pci_set_drvdata (pdev, dev);
  197. ring_space = pci_alloc_consistent (pdev, TX_TOTAL_SIZE, &ring_dma);
  198. if (!ring_space)
  199. goto err_out_iounmap;
  200. np->tx_ring = (struct netdev_desc *) ring_space;
  201. np->tx_ring_dma = ring_dma;
  202. ring_space = pci_alloc_consistent (pdev, RX_TOTAL_SIZE, &ring_dma);
  203. if (!ring_space)
  204. goto err_out_unmap_tx;
  205. np->rx_ring = (struct netdev_desc *) ring_space;
  206. np->rx_ring_dma = ring_dma;
  207. /* Parse eeprom data */
  208. parse_eeprom (dev);
  209. /* Find PHY address */
  210. err = find_miiphy (dev);
  211. if (err)
  212. goto err_out_unmap_rx;
  213. /* Fiber device? */
  214. np->phy_media = (readw(ioaddr + ASICCtrl) & PhyMedia) ? 1 : 0;
  215. np->link_status = 0;
  216. /* Set media and reset PHY */
  217. if (np->phy_media) {
  218. /* default Auto-Negotiation for fiber deivices */
  219. if (np->an_enable == 2) {
  220. np->an_enable = 1;
  221. }
  222. mii_set_media_pcs (dev);
  223. } else {
  224. /* Auto-Negotiation is mandatory for 1000BASE-T,
  225. IEEE 802.3ab Annex 28D page 14 */
  226. if (np->speed == 1000)
  227. np->an_enable = 1;
  228. mii_set_media (dev);
  229. }
  230. err = register_netdev (dev);
  231. if (err)
  232. goto err_out_unmap_rx;
  233. card_idx++;
  234. printk (KERN_INFO "%s: %s, %02x:%02x:%02x:%02x:%02x:%02x, IRQ %d\n",
  235. dev->name, np->name,
  236. dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
  237. dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5], irq);
  238. if (tx_coalesce > 1)
  239. printk(KERN_INFO "tx_coalesce:\t%d packets\n",
  240. tx_coalesce);
  241. if (np->coalesce)
  242. printk(KERN_INFO "rx_coalesce:\t%d packets\n"
  243. KERN_INFO "rx_timeout: \t%d ns\n",
  244. np->rx_coalesce, np->rx_timeout*640);
  245. if (np->vlan)
  246. printk(KERN_INFO "vlan(id):\t%d\n", np->vlan);
  247. return 0;
  248. err_out_unmap_rx:
  249. pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
  250. err_out_unmap_tx:
  251. pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
  252. err_out_iounmap:
  253. #ifdef MEM_MAPPING
  254. iounmap ((void *) ioaddr);
  255. err_out_dev:
  256. #endif
  257. free_netdev (dev);
  258. err_out_res:
  259. pci_release_regions (pdev);
  260. err_out_disable:
  261. pci_disable_device (pdev);
  262. return err;
  263. }
  264. int
  265. find_miiphy (struct net_device *dev)
  266. {
  267. int i, phy_found = 0;
  268. struct netdev_private *np;
  269. long ioaddr;
  270. np = netdev_priv(dev);
  271. ioaddr = dev->base_addr;
  272. np->phy_addr = 1;
  273. for (i = 31; i >= 0; i--) {
  274. int mii_status = mii_read (dev, i, 1);
  275. if (mii_status != 0xffff && mii_status != 0x0000) {
  276. np->phy_addr = i;
  277. phy_found++;
  278. }
  279. }
  280. if (!phy_found) {
  281. printk (KERN_ERR "%s: No MII PHY found!\n", dev->name);
  282. return -ENODEV;
  283. }
  284. return 0;
  285. }
  286. int
  287. parse_eeprom (struct net_device *dev)
  288. {
  289. int i, j;
  290. long ioaddr = dev->base_addr;
  291. u8 sromdata[256];
  292. u8 *psib;
  293. u32 crc;
  294. PSROM_t psrom = (PSROM_t) sromdata;
  295. struct netdev_private *np = netdev_priv(dev);
  296. int cid, next;
  297. #ifdef MEM_MAPPING
  298. ioaddr = pci_resource_start (np->pdev, 0);
  299. #endif
  300. /* Read eeprom */
  301. for (i = 0; i < 128; i++) {
  302. ((u16 *) sromdata)[i] = le16_to_cpu (read_eeprom (ioaddr, i));
  303. }
  304. #ifdef MEM_MAPPING
  305. ioaddr = dev->base_addr;
  306. #endif
  307. /* Check CRC */
  308. crc = ~ether_crc_le (256 - 4, sromdata);
  309. if (psrom->crc != crc) {
  310. printk (KERN_ERR "%s: EEPROM data CRC error.\n", dev->name);
  311. return -1;
  312. }
  313. /* Set MAC address */
  314. for (i = 0; i < 6; i++)
  315. dev->dev_addr[i] = psrom->mac_addr[i];
  316. /* Parse Software Information Block */
  317. i = 0x30;
  318. psib = (u8 *) sromdata;
  319. do {
  320. cid = psib[i++];
  321. next = psib[i++];
  322. if ((cid == 0 && next == 0) || (cid == 0xff && next == 0xff)) {
  323. printk (KERN_ERR "Cell data error\n");
  324. return -1;
  325. }
  326. switch (cid) {
  327. case 0: /* Format version */
  328. break;
  329. case 1: /* End of cell */
  330. return 0;
  331. case 2: /* Duplex Polarity */
  332. np->duplex_polarity = psib[i];
  333. writeb (readb (ioaddr + PhyCtrl) | psib[i],
  334. ioaddr + PhyCtrl);
  335. break;
  336. case 3: /* Wake Polarity */
  337. np->wake_polarity = psib[i];
  338. break;
  339. case 9: /* Adapter description */
  340. j = (next - i > 255) ? 255 : next - i;
  341. memcpy (np->name, &(psib[i]), j);
  342. break;
  343. case 4:
  344. case 5:
  345. case 6:
  346. case 7:
  347. case 8: /* Reversed */
  348. break;
  349. default: /* Unknown cell */
  350. return -1;
  351. }
  352. i = next;
  353. } while (1);
  354. return 0;
  355. }
  356. static int
  357. rio_open (struct net_device *dev)
  358. {
  359. struct netdev_private *np = netdev_priv(dev);
  360. long ioaddr = dev->base_addr;
  361. int i;
  362. u16 macctrl;
  363. i = request_irq (dev->irq, &rio_interrupt, IRQF_SHARED, dev->name, dev);
  364. if (i)
  365. return i;
  366. /* Reset all logic functions */
  367. writew (GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset,
  368. ioaddr + ASICCtrl + 2);
  369. mdelay(10);
  370. /* DebugCtrl bit 4, 5, 9 must set */
  371. writel (readl (ioaddr + DebugCtrl) | 0x0230, ioaddr + DebugCtrl);
  372. /* Jumbo frame */
  373. if (np->jumbo != 0)
  374. writew (MAX_JUMBO+14, ioaddr + MaxFrameSize);
  375. alloc_list (dev);
  376. /* Get station address */
  377. for (i = 0; i < 6; i++)
  378. writeb (dev->dev_addr[i], ioaddr + StationAddr0 + i);
  379. set_multicast (dev);
  380. if (np->coalesce) {
  381. writel (np->rx_coalesce | np->rx_timeout << 16,
  382. ioaddr + RxDMAIntCtrl);
  383. }
  384. /* Set RIO to poll every N*320nsec. */
  385. writeb (0x20, ioaddr + RxDMAPollPeriod);
  386. writeb (0xff, ioaddr + TxDMAPollPeriod);
  387. writeb (0x30, ioaddr + RxDMABurstThresh);
  388. writeb (0x30, ioaddr + RxDMAUrgentThresh);
  389. writel (0x0007ffff, ioaddr + RmonStatMask);
  390. /* clear statistics */
  391. clear_stats (dev);
  392. /* VLAN supported */
  393. if (np->vlan) {
  394. /* priority field in RxDMAIntCtrl */
  395. writel (readl(ioaddr + RxDMAIntCtrl) | 0x7 << 10,
  396. ioaddr + RxDMAIntCtrl);
  397. /* VLANId */
  398. writew (np->vlan, ioaddr + VLANId);
  399. /* Length/Type should be 0x8100 */
  400. writel (0x8100 << 16 | np->vlan, ioaddr + VLANTag);
  401. /* Enable AutoVLANuntagging, but disable AutoVLANtagging.
  402. VLAN information tagged by TFC' VID, CFI fields. */
  403. writel (readl (ioaddr + MACCtrl) | AutoVLANuntagging,
  404. ioaddr + MACCtrl);
  405. }
  406. init_timer (&np->timer);
  407. np->timer.expires = jiffies + 1*HZ;
  408. np->timer.data = (unsigned long) dev;
  409. np->timer.function = &rio_timer;
  410. add_timer (&np->timer);
  411. /* Start Tx/Rx */
  412. writel (readl (ioaddr + MACCtrl) | StatsEnable | RxEnable | TxEnable,
  413. ioaddr + MACCtrl);
  414. macctrl = 0;
  415. macctrl |= (np->vlan) ? AutoVLANuntagging : 0;
  416. macctrl |= (np->full_duplex) ? DuplexSelect : 0;
  417. macctrl |= (np->tx_flow) ? TxFlowControlEnable : 0;
  418. macctrl |= (np->rx_flow) ? RxFlowControlEnable : 0;
  419. writew(macctrl, ioaddr + MACCtrl);
  420. netif_start_queue (dev);
  421. /* Enable default interrupts */
  422. EnableInt ();
  423. return 0;
  424. }
  425. static void
  426. rio_timer (unsigned long data)
  427. {
  428. struct net_device *dev = (struct net_device *)data;
  429. struct netdev_private *np = netdev_priv(dev);
  430. unsigned int entry;
  431. int next_tick = 1*HZ;
  432. unsigned long flags;
  433. spin_lock_irqsave(&np->rx_lock, flags);
  434. /* Recover rx ring exhausted error */
  435. if (np->cur_rx - np->old_rx >= RX_RING_SIZE) {
  436. printk(KERN_INFO "Try to recover rx ring exhausted...\n");
  437. /* Re-allocate skbuffs to fill the descriptor ring */
  438. for (; np->cur_rx - np->old_rx > 0; np->old_rx++) {
  439. struct sk_buff *skb;
  440. entry = np->old_rx % RX_RING_SIZE;
  441. /* Dropped packets don't need to re-allocate */
  442. if (np->rx_skbuff[entry] == NULL) {
  443. skb = dev_alloc_skb (np->rx_buf_sz);
  444. if (skb == NULL) {
  445. np->rx_ring[entry].fraginfo = 0;
  446. printk (KERN_INFO
  447. "%s: Still unable to re-allocate Rx skbuff.#%d\n",
  448. dev->name, entry);
  449. break;
  450. }
  451. np->rx_skbuff[entry] = skb;
  452. /* 16 byte align the IP header */
  453. skb_reserve (skb, 2);
  454. np->rx_ring[entry].fraginfo =
  455. cpu_to_le64 (pci_map_single
  456. (np->pdev, skb->data, np->rx_buf_sz,
  457. PCI_DMA_FROMDEVICE));
  458. }
  459. np->rx_ring[entry].fraginfo |=
  460. cpu_to_le64 (np->rx_buf_sz) << 48;
  461. np->rx_ring[entry].status = 0;
  462. } /* end for */
  463. } /* end if */
  464. spin_unlock_irqrestore (&np->rx_lock, flags);
  465. np->timer.expires = jiffies + next_tick;
  466. add_timer(&np->timer);
  467. }
  468. static void
  469. rio_tx_timeout (struct net_device *dev)
  470. {
  471. long ioaddr = dev->base_addr;
  472. printk (KERN_INFO "%s: Tx timed out (%4.4x), is buffer full?\n",
  473. dev->name, readl (ioaddr + TxStatus));
  474. rio_free_tx(dev, 0);
  475. dev->if_port = 0;
  476. dev->trans_start = jiffies;
  477. }
  478. /* allocate and initialize Tx and Rx descriptors */
  479. static void
  480. alloc_list (struct net_device *dev)
  481. {
  482. struct netdev_private *np = netdev_priv(dev);
  483. int i;
  484. np->cur_rx = np->cur_tx = 0;
  485. np->old_rx = np->old_tx = 0;
  486. np->rx_buf_sz = (dev->mtu <= 1500 ? PACKET_SIZE : dev->mtu + 32);
  487. /* Initialize Tx descriptors, TFDListPtr leaves in start_xmit(). */
  488. for (i = 0; i < TX_RING_SIZE; i++) {
  489. np->tx_skbuff[i] = NULL;
  490. np->tx_ring[i].status = cpu_to_le64 (TFDDone);
  491. np->tx_ring[i].next_desc = cpu_to_le64 (np->tx_ring_dma +
  492. ((i+1)%TX_RING_SIZE) *
  493. sizeof (struct netdev_desc));
  494. }
  495. /* Initialize Rx descriptors */
  496. for (i = 0; i < RX_RING_SIZE; i++) {
  497. np->rx_ring[i].next_desc = cpu_to_le64 (np->rx_ring_dma +
  498. ((i + 1) % RX_RING_SIZE) *
  499. sizeof (struct netdev_desc));
  500. np->rx_ring[i].status = 0;
  501. np->rx_ring[i].fraginfo = 0;
  502. np->rx_skbuff[i] = NULL;
  503. }
  504. /* Allocate the rx buffers */
  505. for (i = 0; i < RX_RING_SIZE; i++) {
  506. /* Allocated fixed size of skbuff */
  507. struct sk_buff *skb = dev_alloc_skb (np->rx_buf_sz);
  508. np->rx_skbuff[i] = skb;
  509. if (skb == NULL) {
  510. printk (KERN_ERR
  511. "%s: alloc_list: allocate Rx buffer error! ",
  512. dev->name);
  513. break;
  514. }
  515. skb_reserve (skb, 2); /* 16 byte align the IP header. */
  516. /* Rubicon now supports 40 bits of addressing space. */
  517. np->rx_ring[i].fraginfo =
  518. cpu_to_le64 ( pci_map_single (
  519. np->pdev, skb->data, np->rx_buf_sz,
  520. PCI_DMA_FROMDEVICE));
  521. np->rx_ring[i].fraginfo |= cpu_to_le64 (np->rx_buf_sz) << 48;
  522. }
  523. /* Set RFDListPtr */
  524. writel (cpu_to_le32 (np->rx_ring_dma), dev->base_addr + RFDListPtr0);
  525. writel (0, dev->base_addr + RFDListPtr1);
  526. return;
  527. }
  528. static int
  529. start_xmit (struct sk_buff *skb, struct net_device *dev)
  530. {
  531. struct netdev_private *np = netdev_priv(dev);
  532. struct netdev_desc *txdesc;
  533. unsigned entry;
  534. u32 ioaddr;
  535. u64 tfc_vlan_tag = 0;
  536. if (np->link_status == 0) { /* Link Down */
  537. dev_kfree_skb(skb);
  538. return 0;
  539. }
  540. ioaddr = dev->base_addr;
  541. entry = np->cur_tx % TX_RING_SIZE;
  542. np->tx_skbuff[entry] = skb;
  543. txdesc = &np->tx_ring[entry];
  544. #if 0
  545. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  546. txdesc->status |=
  547. cpu_to_le64 (TCPChecksumEnable | UDPChecksumEnable |
  548. IPChecksumEnable);
  549. }
  550. #endif
  551. if (np->vlan) {
  552. tfc_vlan_tag =
  553. cpu_to_le64 (VLANTagInsert) |
  554. (cpu_to_le64 (np->vlan) << 32) |
  555. (cpu_to_le64 (skb->priority) << 45);
  556. }
  557. txdesc->fraginfo = cpu_to_le64 (pci_map_single (np->pdev, skb->data,
  558. skb->len,
  559. PCI_DMA_TODEVICE));
  560. txdesc->fraginfo |= cpu_to_le64 (skb->len) << 48;
  561. /* DL2K bug: DMA fails to get next descriptor ptr in 10Mbps mode
  562. * Work around: Always use 1 descriptor in 10Mbps mode */
  563. if (entry % np->tx_coalesce == 0 || np->speed == 10)
  564. txdesc->status = cpu_to_le64 (entry | tfc_vlan_tag |
  565. WordAlignDisable |
  566. TxDMAIndicate |
  567. (1 << FragCountShift));
  568. else
  569. txdesc->status = cpu_to_le64 (entry | tfc_vlan_tag |
  570. WordAlignDisable |
  571. (1 << FragCountShift));
  572. /* TxDMAPollNow */
  573. writel (readl (ioaddr + DMACtrl) | 0x00001000, ioaddr + DMACtrl);
  574. /* Schedule ISR */
  575. writel(10000, ioaddr + CountDown);
  576. np->cur_tx = (np->cur_tx + 1) % TX_RING_SIZE;
  577. if ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE
  578. < TX_QUEUE_LEN - 1 && np->speed != 10) {
  579. /* do nothing */
  580. } else if (!netif_queue_stopped(dev)) {
  581. netif_stop_queue (dev);
  582. }
  583. /* The first TFDListPtr */
  584. if (readl (dev->base_addr + TFDListPtr0) == 0) {
  585. writel (np->tx_ring_dma + entry * sizeof (struct netdev_desc),
  586. dev->base_addr + TFDListPtr0);
  587. writel (0, dev->base_addr + TFDListPtr1);
  588. }
  589. /* NETDEV WATCHDOG timer */
  590. dev->trans_start = jiffies;
  591. return 0;
  592. }
  593. static irqreturn_t
  594. rio_interrupt (int irq, void *dev_instance)
  595. {
  596. struct net_device *dev = dev_instance;
  597. struct netdev_private *np;
  598. unsigned int_status;
  599. long ioaddr;
  600. int cnt = max_intrloop;
  601. int handled = 0;
  602. ioaddr = dev->base_addr;
  603. np = netdev_priv(dev);
  604. while (1) {
  605. int_status = readw (ioaddr + IntStatus);
  606. writew (int_status, ioaddr + IntStatus);
  607. int_status &= DEFAULT_INTR;
  608. if (int_status == 0 || --cnt < 0)
  609. break;
  610. handled = 1;
  611. /* Processing received packets */
  612. if (int_status & RxDMAComplete)
  613. receive_packet (dev);
  614. /* TxDMAComplete interrupt */
  615. if ((int_status & (TxDMAComplete|IntRequested))) {
  616. int tx_status;
  617. tx_status = readl (ioaddr + TxStatus);
  618. if (tx_status & 0x01)
  619. tx_error (dev, tx_status);
  620. /* Free used tx skbuffs */
  621. rio_free_tx (dev, 1);
  622. }
  623. /* Handle uncommon events */
  624. if (int_status &
  625. (HostError | LinkEvent | UpdateStats))
  626. rio_error (dev, int_status);
  627. }
  628. if (np->cur_tx != np->old_tx)
  629. writel (100, ioaddr + CountDown);
  630. return IRQ_RETVAL(handled);
  631. }
  632. static void
  633. rio_free_tx (struct net_device *dev, int irq)
  634. {
  635. struct netdev_private *np = netdev_priv(dev);
  636. int entry = np->old_tx % TX_RING_SIZE;
  637. int tx_use = 0;
  638. unsigned long flag = 0;
  639. if (irq)
  640. spin_lock(&np->tx_lock);
  641. else
  642. spin_lock_irqsave(&np->tx_lock, flag);
  643. /* Free used tx skbuffs */
  644. while (entry != np->cur_tx) {
  645. struct sk_buff *skb;
  646. if (!(np->tx_ring[entry].status & TFDDone))
  647. break;
  648. skb = np->tx_skbuff[entry];
  649. pci_unmap_single (np->pdev,
  650. np->tx_ring[entry].fraginfo & DMA_48BIT_MASK,
  651. skb->len, PCI_DMA_TODEVICE);
  652. if (irq)
  653. dev_kfree_skb_irq (skb);
  654. else
  655. dev_kfree_skb (skb);
  656. np->tx_skbuff[entry] = NULL;
  657. entry = (entry + 1) % TX_RING_SIZE;
  658. tx_use++;
  659. }
  660. if (irq)
  661. spin_unlock(&np->tx_lock);
  662. else
  663. spin_unlock_irqrestore(&np->tx_lock, flag);
  664. np->old_tx = entry;
  665. /* If the ring is no longer full, clear tx_full and
  666. call netif_wake_queue() */
  667. if (netif_queue_stopped(dev) &&
  668. ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE
  669. < TX_QUEUE_LEN - 1 || np->speed == 10)) {
  670. netif_wake_queue (dev);
  671. }
  672. }
  673. static void
  674. tx_error (struct net_device *dev, int tx_status)
  675. {
  676. struct netdev_private *np;
  677. long ioaddr = dev->base_addr;
  678. int frame_id;
  679. int i;
  680. np = netdev_priv(dev);
  681. frame_id = (tx_status & 0xffff0000);
  682. printk (KERN_ERR "%s: Transmit error, TxStatus %4.4x, FrameId %d.\n",
  683. dev->name, tx_status, frame_id);
  684. np->stats.tx_errors++;
  685. /* Ttransmit Underrun */
  686. if (tx_status & 0x10) {
  687. np->stats.tx_fifo_errors++;
  688. writew (readw (ioaddr + TxStartThresh) + 0x10,
  689. ioaddr + TxStartThresh);
  690. /* Transmit Underrun need to set TxReset, DMARest, FIFOReset */
  691. writew (TxReset | DMAReset | FIFOReset | NetworkReset,
  692. ioaddr + ASICCtrl + 2);
  693. /* Wait for ResetBusy bit clear */
  694. for (i = 50; i > 0; i--) {
  695. if ((readw (ioaddr + ASICCtrl + 2) & ResetBusy) == 0)
  696. break;
  697. mdelay (1);
  698. }
  699. rio_free_tx (dev, 1);
  700. /* Reset TFDListPtr */
  701. writel (np->tx_ring_dma +
  702. np->old_tx * sizeof (struct netdev_desc),
  703. dev->base_addr + TFDListPtr0);
  704. writel (0, dev->base_addr + TFDListPtr1);
  705. /* Let TxStartThresh stay default value */
  706. }
  707. /* Late Collision */
  708. if (tx_status & 0x04) {
  709. np->stats.tx_fifo_errors++;
  710. /* TxReset and clear FIFO */
  711. writew (TxReset | FIFOReset, ioaddr + ASICCtrl + 2);
  712. /* Wait reset done */
  713. for (i = 50; i > 0; i--) {
  714. if ((readw (ioaddr + ASICCtrl + 2) & ResetBusy) == 0)
  715. break;
  716. mdelay (1);
  717. }
  718. /* Let TxStartThresh stay default value */
  719. }
  720. /* Maximum Collisions */
  721. #ifdef ETHER_STATS
  722. if (tx_status & 0x08)
  723. np->stats.collisions16++;
  724. #else
  725. if (tx_status & 0x08)
  726. np->stats.collisions++;
  727. #endif
  728. /* Restart the Tx */
  729. writel (readw (dev->base_addr + MACCtrl) | TxEnable, ioaddr + MACCtrl);
  730. }
  731. static int
  732. receive_packet (struct net_device *dev)
  733. {
  734. struct netdev_private *np = netdev_priv(dev);
  735. int entry = np->cur_rx % RX_RING_SIZE;
  736. int cnt = 30;
  737. /* If RFDDone, FrameStart and FrameEnd set, there is a new packet in. */
  738. while (1) {
  739. struct netdev_desc *desc = &np->rx_ring[entry];
  740. int pkt_len;
  741. u64 frame_status;
  742. if (!(desc->status & RFDDone) ||
  743. !(desc->status & FrameStart) || !(desc->status & FrameEnd))
  744. break;
  745. /* Chip omits the CRC. */
  746. pkt_len = le64_to_cpu (desc->status & 0xffff);
  747. frame_status = le64_to_cpu (desc->status);
  748. if (--cnt < 0)
  749. break;
  750. /* Update rx error statistics, drop packet. */
  751. if (frame_status & RFS_Errors) {
  752. np->stats.rx_errors++;
  753. if (frame_status & (RxRuntFrame | RxLengthError))
  754. np->stats.rx_length_errors++;
  755. if (frame_status & RxFCSError)
  756. np->stats.rx_crc_errors++;
  757. if (frame_status & RxAlignmentError && np->speed != 1000)
  758. np->stats.rx_frame_errors++;
  759. if (frame_status & RxFIFOOverrun)
  760. np->stats.rx_fifo_errors++;
  761. } else {
  762. struct sk_buff *skb;
  763. /* Small skbuffs for short packets */
  764. if (pkt_len > copy_thresh) {
  765. pci_unmap_single (np->pdev,
  766. desc->fraginfo & DMA_48BIT_MASK,
  767. np->rx_buf_sz,
  768. PCI_DMA_FROMDEVICE);
  769. skb_put (skb = np->rx_skbuff[entry], pkt_len);
  770. np->rx_skbuff[entry] = NULL;
  771. } else if ((skb = dev_alloc_skb (pkt_len + 2)) != NULL) {
  772. pci_dma_sync_single_for_cpu(np->pdev,
  773. desc->fraginfo &
  774. DMA_48BIT_MASK,
  775. np->rx_buf_sz,
  776. PCI_DMA_FROMDEVICE);
  777. /* 16 byte align the IP header */
  778. skb_reserve (skb, 2);
  779. skb_copy_to_linear_data (skb,
  780. np->rx_skbuff[entry]->data,
  781. pkt_len);
  782. skb_put (skb, pkt_len);
  783. pci_dma_sync_single_for_device(np->pdev,
  784. desc->fraginfo &
  785. DMA_48BIT_MASK,
  786. np->rx_buf_sz,
  787. PCI_DMA_FROMDEVICE);
  788. }
  789. skb->protocol = eth_type_trans (skb, dev);
  790. #if 0
  791. /* Checksum done by hw, but csum value unavailable. */
  792. if (np->pdev->pci_rev_id >= 0x0c &&
  793. !(frame_status & (TCPError | UDPError | IPError))) {
  794. skb->ip_summed = CHECKSUM_UNNECESSARY;
  795. }
  796. #endif
  797. netif_rx (skb);
  798. dev->last_rx = jiffies;
  799. }
  800. entry = (entry + 1) % RX_RING_SIZE;
  801. }
  802. spin_lock(&np->rx_lock);
  803. np->cur_rx = entry;
  804. /* Re-allocate skbuffs to fill the descriptor ring */
  805. entry = np->old_rx;
  806. while (entry != np->cur_rx) {
  807. struct sk_buff *skb;
  808. /* Dropped packets don't need to re-allocate */
  809. if (np->rx_skbuff[entry] == NULL) {
  810. skb = dev_alloc_skb (np->rx_buf_sz);
  811. if (skb == NULL) {
  812. np->rx_ring[entry].fraginfo = 0;
  813. printk (KERN_INFO
  814. "%s: receive_packet: "
  815. "Unable to re-allocate Rx skbuff.#%d\n",
  816. dev->name, entry);
  817. break;
  818. }
  819. np->rx_skbuff[entry] = skb;
  820. /* 16 byte align the IP header */
  821. skb_reserve (skb, 2);
  822. np->rx_ring[entry].fraginfo =
  823. cpu_to_le64 (pci_map_single
  824. (np->pdev, skb->data, np->rx_buf_sz,
  825. PCI_DMA_FROMDEVICE));
  826. }
  827. np->rx_ring[entry].fraginfo |=
  828. cpu_to_le64 (np->rx_buf_sz) << 48;
  829. np->rx_ring[entry].status = 0;
  830. entry = (entry + 1) % RX_RING_SIZE;
  831. }
  832. np->old_rx = entry;
  833. spin_unlock(&np->rx_lock);
  834. return 0;
  835. }
  836. static void
  837. rio_error (struct net_device *dev, int int_status)
  838. {
  839. long ioaddr = dev->base_addr;
  840. struct netdev_private *np = netdev_priv(dev);
  841. u16 macctrl;
  842. /* Link change event */
  843. if (int_status & LinkEvent) {
  844. if (mii_wait_link (dev, 10) == 0) {
  845. printk (KERN_INFO "%s: Link up\n", dev->name);
  846. if (np->phy_media)
  847. mii_get_media_pcs (dev);
  848. else
  849. mii_get_media (dev);
  850. if (np->speed == 1000)
  851. np->tx_coalesce = tx_coalesce;
  852. else
  853. np->tx_coalesce = 1;
  854. macctrl = 0;
  855. macctrl |= (np->vlan) ? AutoVLANuntagging : 0;
  856. macctrl |= (np->full_duplex) ? DuplexSelect : 0;
  857. macctrl |= (np->tx_flow) ?
  858. TxFlowControlEnable : 0;
  859. macctrl |= (np->rx_flow) ?
  860. RxFlowControlEnable : 0;
  861. writew(macctrl, ioaddr + MACCtrl);
  862. np->link_status = 1;
  863. netif_carrier_on(dev);
  864. } else {
  865. printk (KERN_INFO "%s: Link off\n", dev->name);
  866. np->link_status = 0;
  867. netif_carrier_off(dev);
  868. }
  869. }
  870. /* UpdateStats statistics registers */
  871. if (int_status & UpdateStats) {
  872. get_stats (dev);
  873. }
  874. /* PCI Error, a catastronphic error related to the bus interface
  875. occurs, set GlobalReset and HostReset to reset. */
  876. if (int_status & HostError) {
  877. printk (KERN_ERR "%s: HostError! IntStatus %4.4x.\n",
  878. dev->name, int_status);
  879. writew (GlobalReset | HostReset, ioaddr + ASICCtrl + 2);
  880. mdelay (500);
  881. }
  882. }
  883. static struct net_device_stats *
  884. get_stats (struct net_device *dev)
  885. {
  886. long ioaddr = dev->base_addr;
  887. struct netdev_private *np = netdev_priv(dev);
  888. #ifdef MEM_MAPPING
  889. int i;
  890. #endif
  891. unsigned int stat_reg;
  892. /* All statistics registers need to be acknowledged,
  893. else statistic overflow could cause problems */
  894. np->stats.rx_packets += readl (ioaddr + FramesRcvOk);
  895. np->stats.tx_packets += readl (ioaddr + FramesXmtOk);
  896. np->stats.rx_bytes += readl (ioaddr + OctetRcvOk);
  897. np->stats.tx_bytes += readl (ioaddr + OctetXmtOk);
  898. np->stats.multicast = readl (ioaddr + McstFramesRcvdOk);
  899. np->stats.collisions += readl (ioaddr + SingleColFrames)
  900. + readl (ioaddr + MultiColFrames);
  901. /* detailed tx errors */
  902. stat_reg = readw (ioaddr + FramesAbortXSColls);
  903. np->stats.tx_aborted_errors += stat_reg;
  904. np->stats.tx_errors += stat_reg;
  905. stat_reg = readw (ioaddr + CarrierSenseErrors);
  906. np->stats.tx_carrier_errors += stat_reg;
  907. np->stats.tx_errors += stat_reg;
  908. /* Clear all other statistic register. */
  909. readl (ioaddr + McstOctetXmtOk);
  910. readw (ioaddr + BcstFramesXmtdOk);
  911. readl (ioaddr + McstFramesXmtdOk);
  912. readw (ioaddr + BcstFramesRcvdOk);
  913. readw (ioaddr + MacControlFramesRcvd);
  914. readw (ioaddr + FrameTooLongErrors);
  915. readw (ioaddr + InRangeLengthErrors);
  916. readw (ioaddr + FramesCheckSeqErrors);
  917. readw (ioaddr + FramesLostRxErrors);
  918. readl (ioaddr + McstOctetXmtOk);
  919. readl (ioaddr + BcstOctetXmtOk);
  920. readl (ioaddr + McstFramesXmtdOk);
  921. readl (ioaddr + FramesWDeferredXmt);
  922. readl (ioaddr + LateCollisions);
  923. readw (ioaddr + BcstFramesXmtdOk);
  924. readw (ioaddr + MacControlFramesXmtd);
  925. readw (ioaddr + FramesWEXDeferal);
  926. #ifdef MEM_MAPPING
  927. for (i = 0x100; i <= 0x150; i += 4)
  928. readl (ioaddr + i);
  929. #endif
  930. readw (ioaddr + TxJumboFrames);
  931. readw (ioaddr + RxJumboFrames);
  932. readw (ioaddr + TCPCheckSumErrors);
  933. readw (ioaddr + UDPCheckSumErrors);
  934. readw (ioaddr + IPCheckSumErrors);
  935. return &np->stats;
  936. }
  937. static int
  938. clear_stats (struct net_device *dev)
  939. {
  940. long ioaddr = dev->base_addr;
  941. #ifdef MEM_MAPPING
  942. int i;
  943. #endif
  944. /* All statistics registers need to be acknowledged,
  945. else statistic overflow could cause problems */
  946. readl (ioaddr + FramesRcvOk);
  947. readl (ioaddr + FramesXmtOk);
  948. readl (ioaddr + OctetRcvOk);
  949. readl (ioaddr + OctetXmtOk);
  950. readl (ioaddr + McstFramesRcvdOk);
  951. readl (ioaddr + SingleColFrames);
  952. readl (ioaddr + MultiColFrames);
  953. readl (ioaddr + LateCollisions);
  954. /* detailed rx errors */
  955. readw (ioaddr + FrameTooLongErrors);
  956. readw (ioaddr + InRangeLengthErrors);
  957. readw (ioaddr + FramesCheckSeqErrors);
  958. readw (ioaddr + FramesLostRxErrors);
  959. /* detailed tx errors */
  960. readw (ioaddr + FramesAbortXSColls);
  961. readw (ioaddr + CarrierSenseErrors);
  962. /* Clear all other statistic register. */
  963. readl (ioaddr + McstOctetXmtOk);
  964. readw (ioaddr + BcstFramesXmtdOk);
  965. readl (ioaddr + McstFramesXmtdOk);
  966. readw (ioaddr + BcstFramesRcvdOk);
  967. readw (ioaddr + MacControlFramesRcvd);
  968. readl (ioaddr + McstOctetXmtOk);
  969. readl (ioaddr + BcstOctetXmtOk);
  970. readl (ioaddr + McstFramesXmtdOk);
  971. readl (ioaddr + FramesWDeferredXmt);
  972. readw (ioaddr + BcstFramesXmtdOk);
  973. readw (ioaddr + MacControlFramesXmtd);
  974. readw (ioaddr + FramesWEXDeferal);
  975. #ifdef MEM_MAPPING
  976. for (i = 0x100; i <= 0x150; i += 4)
  977. readl (ioaddr + i);
  978. #endif
  979. readw (ioaddr + TxJumboFrames);
  980. readw (ioaddr + RxJumboFrames);
  981. readw (ioaddr + TCPCheckSumErrors);
  982. readw (ioaddr + UDPCheckSumErrors);
  983. readw (ioaddr + IPCheckSumErrors);
  984. return 0;
  985. }
  986. int
  987. change_mtu (struct net_device *dev, int new_mtu)
  988. {
  989. struct netdev_private *np = netdev_priv(dev);
  990. int max = (np->jumbo) ? MAX_JUMBO : 1536;
  991. if ((new_mtu < 68) || (new_mtu > max)) {
  992. return -EINVAL;
  993. }
  994. dev->mtu = new_mtu;
  995. return 0;
  996. }
  997. static void
  998. set_multicast (struct net_device *dev)
  999. {
  1000. long ioaddr = dev->base_addr;
  1001. u32 hash_table[2];
  1002. u16 rx_mode = 0;
  1003. struct netdev_private *np = netdev_priv(dev);
  1004. hash_table[0] = hash_table[1] = 0;
  1005. /* RxFlowcontrol DA: 01-80-C2-00-00-01. Hash index=0x39 */
  1006. hash_table[1] |= cpu_to_le32(0x02000000);
  1007. if (dev->flags & IFF_PROMISC) {
  1008. /* Receive all frames promiscuously. */
  1009. rx_mode = ReceiveAllFrames;
  1010. } else if ((dev->flags & IFF_ALLMULTI) ||
  1011. (dev->mc_count > multicast_filter_limit)) {
  1012. /* Receive broadcast and multicast frames */
  1013. rx_mode = ReceiveBroadcast | ReceiveMulticast | ReceiveUnicast;
  1014. } else if (dev->mc_count > 0) {
  1015. int i;
  1016. struct dev_mc_list *mclist;
  1017. /* Receive broadcast frames and multicast frames filtering
  1018. by Hashtable */
  1019. rx_mode =
  1020. ReceiveBroadcast | ReceiveMulticastHash | ReceiveUnicast;
  1021. for (i=0, mclist = dev->mc_list; mclist && i < dev->mc_count;
  1022. i++, mclist=mclist->next)
  1023. {
  1024. int bit, index = 0;
  1025. int crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr);
  1026. /* The inverted high significant 6 bits of CRC are
  1027. used as an index to hashtable */
  1028. for (bit = 0; bit < 6; bit++)
  1029. if (crc & (1 << (31 - bit)))
  1030. index |= (1 << bit);
  1031. hash_table[index / 32] |= (1 << (index % 32));
  1032. }
  1033. } else {
  1034. rx_mode = ReceiveBroadcast | ReceiveUnicast;
  1035. }
  1036. if (np->vlan) {
  1037. /* ReceiveVLANMatch field in ReceiveMode */
  1038. rx_mode |= ReceiveVLANMatch;
  1039. }
  1040. writel (hash_table[0], ioaddr + HashTable0);
  1041. writel (hash_table[1], ioaddr + HashTable1);
  1042. writew (rx_mode, ioaddr + ReceiveMode);
  1043. }
  1044. static void rio_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
  1045. {
  1046. struct netdev_private *np = netdev_priv(dev);
  1047. strcpy(info->driver, "dl2k");
  1048. strcpy(info->version, DRV_VERSION);
  1049. strcpy(info->bus_info, pci_name(np->pdev));
  1050. }
  1051. static int rio_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  1052. {
  1053. struct netdev_private *np = netdev_priv(dev);
  1054. if (np->phy_media) {
  1055. /* fiber device */
  1056. cmd->supported = SUPPORTED_Autoneg | SUPPORTED_FIBRE;
  1057. cmd->advertising= ADVERTISED_Autoneg | ADVERTISED_FIBRE;
  1058. cmd->port = PORT_FIBRE;
  1059. cmd->transceiver = XCVR_INTERNAL;
  1060. } else {
  1061. /* copper device */
  1062. cmd->supported = SUPPORTED_10baseT_Half |
  1063. SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half
  1064. | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Full |
  1065. SUPPORTED_Autoneg | SUPPORTED_MII;
  1066. cmd->advertising = ADVERTISED_10baseT_Half |
  1067. ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half |
  1068. ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Full|
  1069. ADVERTISED_Autoneg | ADVERTISED_MII;
  1070. cmd->port = PORT_MII;
  1071. cmd->transceiver = XCVR_INTERNAL;
  1072. }
  1073. if ( np->link_status ) {
  1074. cmd->speed = np->speed;
  1075. cmd->duplex = np->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
  1076. } else {
  1077. cmd->speed = -1;
  1078. cmd->duplex = -1;
  1079. }
  1080. if ( np->an_enable)
  1081. cmd->autoneg = AUTONEG_ENABLE;
  1082. else
  1083. cmd->autoneg = AUTONEG_DISABLE;
  1084. cmd->phy_address = np->phy_addr;
  1085. return 0;
  1086. }
  1087. static int rio_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  1088. {
  1089. struct netdev_private *np = netdev_priv(dev);
  1090. netif_carrier_off(dev);
  1091. if (cmd->autoneg == AUTONEG_ENABLE) {
  1092. if (np->an_enable)
  1093. return 0;
  1094. else {
  1095. np->an_enable = 1;
  1096. mii_set_media(dev);
  1097. return 0;
  1098. }
  1099. } else {
  1100. np->an_enable = 0;
  1101. if (np->speed == 1000) {
  1102. cmd->speed = SPEED_100;
  1103. cmd->duplex = DUPLEX_FULL;
  1104. printk("Warning!! Can't disable Auto negotiation in 1000Mbps, change to Manual 100Mbps, Full duplex.\n");
  1105. }
  1106. switch(cmd->speed + cmd->duplex) {
  1107. case SPEED_10 + DUPLEX_HALF:
  1108. np->speed = 10;
  1109. np->full_duplex = 0;
  1110. break;
  1111. case SPEED_10 + DUPLEX_FULL:
  1112. np->speed = 10;
  1113. np->full_duplex = 1;
  1114. break;
  1115. case SPEED_100 + DUPLEX_HALF:
  1116. np->speed = 100;
  1117. np->full_duplex = 0;
  1118. break;
  1119. case SPEED_100 + DUPLEX_FULL:
  1120. np->speed = 100;
  1121. np->full_duplex = 1;
  1122. break;
  1123. case SPEED_1000 + DUPLEX_HALF:/* not supported */
  1124. case SPEED_1000 + DUPLEX_FULL:/* not supported */
  1125. default:
  1126. return -EINVAL;
  1127. }
  1128. mii_set_media(dev);
  1129. }
  1130. return 0;
  1131. }
  1132. static u32 rio_get_link(struct net_device *dev)
  1133. {
  1134. struct netdev_private *np = netdev_priv(dev);
  1135. return np->link_status;
  1136. }
  1137. static const struct ethtool_ops ethtool_ops = {
  1138. .get_drvinfo = rio_get_drvinfo,
  1139. .get_settings = rio_get_settings,
  1140. .set_settings = rio_set_settings,
  1141. .get_link = rio_get_link,
  1142. };
  1143. static int
  1144. rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
  1145. {
  1146. int phy_addr;
  1147. struct netdev_private *np = netdev_priv(dev);
  1148. struct mii_data *miidata = (struct mii_data *) &rq->ifr_ifru;
  1149. struct netdev_desc *desc;
  1150. int i;
  1151. phy_addr = np->phy_addr;
  1152. switch (cmd) {
  1153. case SIOCDEVPRIVATE:
  1154. break;
  1155. case SIOCDEVPRIVATE + 1:
  1156. miidata->out_value = mii_read (dev, phy_addr, miidata->reg_num);
  1157. break;
  1158. case SIOCDEVPRIVATE + 2:
  1159. mii_write (dev, phy_addr, miidata->reg_num, miidata->in_value);
  1160. break;
  1161. case SIOCDEVPRIVATE + 3:
  1162. break;
  1163. case SIOCDEVPRIVATE + 4:
  1164. break;
  1165. case SIOCDEVPRIVATE + 5:
  1166. netif_stop_queue (dev);
  1167. break;
  1168. case SIOCDEVPRIVATE + 6:
  1169. netif_wake_queue (dev);
  1170. break;
  1171. case SIOCDEVPRIVATE + 7:
  1172. printk
  1173. ("tx_full=%x cur_tx=%lx old_tx=%lx cur_rx=%lx old_rx=%lx\n",
  1174. netif_queue_stopped(dev), np->cur_tx, np->old_tx, np->cur_rx,
  1175. np->old_rx);
  1176. break;
  1177. case SIOCDEVPRIVATE + 8:
  1178. printk("TX ring:\n");
  1179. for (i = 0; i < TX_RING_SIZE; i++) {
  1180. desc = &np->tx_ring[i];
  1181. printk
  1182. ("%02x:cur:%08x next:%08x status:%08x frag1:%08x frag0:%08x",
  1183. i,
  1184. (u32) (np->tx_ring_dma + i * sizeof (*desc)),
  1185. (u32) desc->next_desc,
  1186. (u32) desc->status, (u32) (desc->fraginfo >> 32),
  1187. (u32) desc->fraginfo);
  1188. printk ("\n");
  1189. }
  1190. printk ("\n");
  1191. break;
  1192. default:
  1193. return -EOPNOTSUPP;
  1194. }
  1195. return 0;
  1196. }
  1197. #define EEP_READ 0x0200
  1198. #define EEP_BUSY 0x8000
  1199. /* Read the EEPROM word */
  1200. /* We use I/O instruction to read/write eeprom to avoid fail on some machines */
  1201. int
  1202. read_eeprom (long ioaddr, int eep_addr)
  1203. {
  1204. int i = 1000;
  1205. outw (EEP_READ | (eep_addr & 0xff), ioaddr + EepromCtrl);
  1206. while (i-- > 0) {
  1207. if (!(inw (ioaddr + EepromCtrl) & EEP_BUSY)) {
  1208. return inw (ioaddr + EepromData);
  1209. }
  1210. }
  1211. return 0;
  1212. }
  1213. enum phy_ctrl_bits {
  1214. MII_READ = 0x00, MII_CLK = 0x01, MII_DATA1 = 0x02, MII_WRITE = 0x04,
  1215. MII_DUPLEX = 0x08,
  1216. };
  1217. #define mii_delay() readb(ioaddr)
  1218. static void
  1219. mii_sendbit (struct net_device *dev, u32 data)
  1220. {
  1221. long ioaddr = dev->base_addr + PhyCtrl;
  1222. data = (data) ? MII_DATA1 : 0;
  1223. data |= MII_WRITE;
  1224. data |= (readb (ioaddr) & 0xf8) | MII_WRITE;
  1225. writeb (data, ioaddr);
  1226. mii_delay ();
  1227. writeb (data | MII_CLK, ioaddr);
  1228. mii_delay ();
  1229. }
  1230. static int
  1231. mii_getbit (struct net_device *dev)
  1232. {
  1233. long ioaddr = dev->base_addr + PhyCtrl;
  1234. u8 data;
  1235. data = (readb (ioaddr) & 0xf8) | MII_READ;
  1236. writeb (data, ioaddr);
  1237. mii_delay ();
  1238. writeb (data | MII_CLK, ioaddr);
  1239. mii_delay ();
  1240. return ((readb (ioaddr) >> 1) & 1);
  1241. }
  1242. static void
  1243. mii_send_bits (struct net_device *dev, u32 data, int len)
  1244. {
  1245. int i;
  1246. for (i = len - 1; i >= 0; i--) {
  1247. mii_sendbit (dev, data & (1 << i));
  1248. }
  1249. }
  1250. static int
  1251. mii_read (struct net_device *dev, int phy_addr, int reg_num)
  1252. {
  1253. u32 cmd;
  1254. int i;
  1255. u32 retval = 0;
  1256. /* Preamble */
  1257. mii_send_bits (dev, 0xffffffff, 32);
  1258. /* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */
  1259. /* ST,OP = 0110'b for read operation */
  1260. cmd = (0x06 << 10 | phy_addr << 5 | reg_num);
  1261. mii_send_bits (dev, cmd, 14);
  1262. /* Turnaround */
  1263. if (mii_getbit (dev))
  1264. goto err_out;
  1265. /* Read data */
  1266. for (i = 0; i < 16; i++) {
  1267. retval |= mii_getbit (dev);
  1268. retval <<= 1;
  1269. }
  1270. /* End cycle */
  1271. mii_getbit (dev);
  1272. return (retval >> 1) & 0xffff;
  1273. err_out:
  1274. return 0;
  1275. }
  1276. static int
  1277. mii_write (struct net_device *dev, int phy_addr, int reg_num, u16 data)
  1278. {
  1279. u32 cmd;
  1280. /* Preamble */
  1281. mii_send_bits (dev, 0xffffffff, 32);
  1282. /* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */
  1283. /* ST,OP,AAAAA,RRRRR,TA = 0101xxxxxxxxxx10'b = 0x5002 for write */
  1284. cmd = (0x5002 << 16) | (phy_addr << 23) | (reg_num << 18) | data;
  1285. mii_send_bits (dev, cmd, 32);
  1286. /* End cycle */
  1287. mii_getbit (dev);
  1288. return 0;
  1289. }
  1290. static int
  1291. mii_wait_link (struct net_device *dev, int wait)
  1292. {
  1293. BMSR_t bmsr;
  1294. int phy_addr;
  1295. struct netdev_private *np;
  1296. np = netdev_priv(dev);
  1297. phy_addr = np->phy_addr;
  1298. do {
  1299. bmsr.image = mii_read (dev, phy_addr, MII_BMSR);
  1300. if (bmsr.bits.link_status)
  1301. return 0;
  1302. mdelay (1);
  1303. } while (--wait > 0);
  1304. return -1;
  1305. }
  1306. static int
  1307. mii_get_media (struct net_device *dev)
  1308. {
  1309. ANAR_t negotiate;
  1310. BMSR_t bmsr;
  1311. BMCR_t bmcr;
  1312. MSCR_t mscr;
  1313. MSSR_t mssr;
  1314. int phy_addr;
  1315. struct netdev_private *np;
  1316. np = netdev_priv(dev);
  1317. phy_addr = np->phy_addr;
  1318. bmsr.image = mii_read (dev, phy_addr, MII_BMSR);
  1319. if (np->an_enable) {
  1320. if (!bmsr.bits.an_complete) {
  1321. /* Auto-Negotiation not completed */
  1322. return -1;
  1323. }
  1324. negotiate.image = mii_read (dev, phy_addr, MII_ANAR) &
  1325. mii_read (dev, phy_addr, MII_ANLPAR);
  1326. mscr.image = mii_read (dev, phy_addr, MII_MSCR);
  1327. mssr.image = mii_read (dev, phy_addr, MII_MSSR);
  1328. if (mscr.bits.media_1000BT_FD & mssr.bits.lp_1000BT_FD) {
  1329. np->speed = 1000;
  1330. np->full_duplex = 1;
  1331. printk (KERN_INFO "Auto 1000 Mbps, Full duplex\n");
  1332. } else if (mscr.bits.media_1000BT_HD & mssr.bits.lp_1000BT_HD) {
  1333. np->speed = 1000;
  1334. np->full_duplex = 0;
  1335. printk (KERN_INFO "Auto 1000 Mbps, Half duplex\n");
  1336. } else if (negotiate.bits.media_100BX_FD) {
  1337. np->speed = 100;
  1338. np->full_duplex = 1;
  1339. printk (KERN_INFO "Auto 100 Mbps, Full duplex\n");
  1340. } else if (negotiate.bits.media_100BX_HD) {
  1341. np->speed = 100;
  1342. np->full_duplex = 0;
  1343. printk (KERN_INFO "Auto 100 Mbps, Half duplex\n");
  1344. } else if (negotiate.bits.media_10BT_FD) {
  1345. np->speed = 10;
  1346. np->full_duplex = 1;
  1347. printk (KERN_INFO "Auto 10 Mbps, Full duplex\n");
  1348. } else if (negotiate.bits.media_10BT_HD) {
  1349. np->speed = 10;
  1350. np->full_duplex = 0;
  1351. printk (KERN_INFO "Auto 10 Mbps, Half duplex\n");
  1352. }
  1353. if (negotiate.bits.pause) {
  1354. np->tx_flow &= 1;
  1355. np->rx_flow &= 1;
  1356. } else if (negotiate.bits.asymmetric) {
  1357. np->tx_flow = 0;
  1358. np->rx_flow &= 1;
  1359. }
  1360. /* else tx_flow, rx_flow = user select */
  1361. } else {
  1362. bmcr.image = mii_read (dev, phy_addr, MII_BMCR);
  1363. if (bmcr.bits.speed100 == 1 && bmcr.bits.speed1000 == 0) {
  1364. printk (KERN_INFO "Operating at 100 Mbps, ");
  1365. } else if (bmcr.bits.speed100 == 0 && bmcr.bits.speed1000 == 0) {
  1366. printk (KERN_INFO "Operating at 10 Mbps, ");
  1367. } else if (bmcr.bits.speed100 == 0 && bmcr.bits.speed1000 == 1) {
  1368. printk (KERN_INFO "Operating at 1000 Mbps, ");
  1369. }
  1370. if (bmcr.bits.duplex_mode) {
  1371. printk ("Full duplex\n");
  1372. } else {
  1373. printk ("Half duplex\n");
  1374. }
  1375. }
  1376. if (np->tx_flow)
  1377. printk(KERN_INFO "Enable Tx Flow Control\n");
  1378. else
  1379. printk(KERN_INFO "Disable Tx Flow Control\n");
  1380. if (np->rx_flow)
  1381. printk(KERN_INFO "Enable Rx Flow Control\n");
  1382. else
  1383. printk(KERN_INFO "Disable Rx Flow Control\n");
  1384. return 0;
  1385. }
  1386. static int
  1387. mii_set_media (struct net_device *dev)
  1388. {
  1389. PHY_SCR_t pscr;
  1390. BMCR_t bmcr;
  1391. BMSR_t bmsr;
  1392. ANAR_t anar;
  1393. int phy_addr;
  1394. struct netdev_private *np;
  1395. np = netdev_priv(dev);
  1396. phy_addr = np->phy_addr;
  1397. /* Does user set speed? */
  1398. if (np->an_enable) {
  1399. /* Advertise capabilities */
  1400. bmsr.image = mii_read (dev, phy_addr, MII_BMSR);
  1401. anar.image = mii_read (dev, phy_addr, MII_ANAR);
  1402. anar.bits.media_100BX_FD = bmsr.bits.media_100BX_FD;
  1403. anar.bits.media_100BX_HD = bmsr.bits.media_100BX_HD;
  1404. anar.bits.media_100BT4 = bmsr.bits.media_100BT4;
  1405. anar.bits.media_10BT_FD = bmsr.bits.media_10BT_FD;
  1406. anar.bits.media_10BT_HD = bmsr.bits.media_10BT_HD;
  1407. anar.bits.pause = 1;
  1408. anar.bits.asymmetric = 1;
  1409. mii_write (dev, phy_addr, MII_ANAR, anar.image);
  1410. /* Enable Auto crossover */
  1411. pscr.image = mii_read (dev, phy_addr, MII_PHY_SCR);
  1412. pscr.bits.mdi_crossover_mode = 3; /* 11'b */
  1413. mii_write (dev, phy_addr, MII_PHY_SCR, pscr.image);
  1414. /* Soft reset PHY */
  1415. mii_write (dev, phy_addr, MII_BMCR, MII_BMCR_RESET);
  1416. bmcr.image = 0;
  1417. bmcr.bits.an_enable = 1;
  1418. bmcr.bits.restart_an = 1;
  1419. bmcr.bits.reset = 1;
  1420. mii_write (dev, phy_addr, MII_BMCR, bmcr.image);
  1421. mdelay(1);
  1422. } else {
  1423. /* Force speed setting */
  1424. /* 1) Disable Auto crossover */
  1425. pscr.image = mii_read (dev, phy_addr, MII_PHY_SCR);
  1426. pscr.bits.mdi_crossover_mode = 0;
  1427. mii_write (dev, phy_addr, MII_PHY_SCR, pscr.image);
  1428. /* 2) PHY Reset */
  1429. bmcr.image = mii_read (dev, phy_addr, MII_BMCR);
  1430. bmcr.bits.reset = 1;
  1431. mii_write (dev, phy_addr, MII_BMCR, bmcr.image);
  1432. /* 3) Power Down */
  1433. bmcr.image = 0x1940; /* must be 0x1940 */
  1434. mii_write (dev, phy_addr, MII_BMCR, bmcr.image);
  1435. mdelay (100); /* wait a certain time */
  1436. /* 4) Advertise nothing */
  1437. mii_write (dev, phy_addr, MII_ANAR, 0);
  1438. /* 5) Set media and Power Up */
  1439. bmcr.image = 0;
  1440. bmcr.bits.power_down = 1;
  1441. if (np->speed == 100) {
  1442. bmcr.bits.speed100 = 1;
  1443. bmcr.bits.speed1000 = 0;
  1444. printk (KERN_INFO "Manual 100 Mbps, ");
  1445. } else if (np->speed == 10) {
  1446. bmcr.bits.speed100 = 0;
  1447. bmcr.bits.speed1000 = 0;
  1448. printk (KERN_INFO "Manual 10 Mbps, ");
  1449. }
  1450. if (np->full_duplex) {
  1451. bmcr.bits.duplex_mode = 1;
  1452. printk ("Full duplex\n");
  1453. } else {
  1454. bmcr.bits.duplex_mode = 0;
  1455. printk ("Half duplex\n");
  1456. }
  1457. #if 0
  1458. /* Set 1000BaseT Master/Slave setting */
  1459. mscr.image = mii_read (dev, phy_addr, MII_MSCR);
  1460. mscr.bits.cfg_enable = 1;
  1461. mscr.bits.cfg_value = 0;
  1462. #endif
  1463. mii_write (dev, phy_addr, MII_BMCR, bmcr.image);
  1464. mdelay(10);
  1465. }
  1466. return 0;
  1467. }
  1468. static int
  1469. mii_get_media_pcs (struct net_device *dev)
  1470. {
  1471. ANAR_PCS_t negotiate;
  1472. BMSR_t bmsr;
  1473. BMCR_t bmcr;
  1474. int phy_addr;
  1475. struct netdev_private *np;
  1476. np = netdev_priv(dev);
  1477. phy_addr = np->phy_addr;
  1478. bmsr.image = mii_read (dev, phy_addr, PCS_BMSR);
  1479. if (np->an_enable) {
  1480. if (!bmsr.bits.an_complete) {
  1481. /* Auto-Negotiation not completed */
  1482. return -1;
  1483. }
  1484. negotiate.image = mii_read (dev, phy_addr, PCS_ANAR) &
  1485. mii_read (dev, phy_addr, PCS_ANLPAR);
  1486. np->speed = 1000;
  1487. if (negotiate.bits.full_duplex) {
  1488. printk (KERN_INFO "Auto 1000 Mbps, Full duplex\n");
  1489. np->full_duplex = 1;
  1490. } else {
  1491. printk (KERN_INFO "Auto 1000 Mbps, half duplex\n");
  1492. np->full_duplex = 0;
  1493. }
  1494. if (negotiate.bits.pause) {
  1495. np->tx_flow &= 1;
  1496. np->rx_flow &= 1;
  1497. } else if (negotiate.bits.asymmetric) {
  1498. np->tx_flow = 0;
  1499. np->rx_flow &= 1;
  1500. }
  1501. /* else tx_flow, rx_flow = user select */
  1502. } else {
  1503. bmcr.image = mii_read (dev, phy_addr, PCS_BMCR);
  1504. printk (KERN_INFO "Operating at 1000 Mbps, ");
  1505. if (bmcr.bits.duplex_mode) {
  1506. printk ("Full duplex\n");
  1507. } else {
  1508. printk ("Half duplex\n");
  1509. }
  1510. }
  1511. if (np->tx_flow)
  1512. printk(KERN_INFO "Enable Tx Flow Control\n");
  1513. else
  1514. printk(KERN_INFO "Disable Tx Flow Control\n");
  1515. if (np->rx_flow)
  1516. printk(KERN_INFO "Enable Rx Flow Control\n");
  1517. else
  1518. printk(KERN_INFO "Disable Rx Flow Control\n");
  1519. return 0;
  1520. }
  1521. static int
  1522. mii_set_media_pcs (struct net_device *dev)
  1523. {
  1524. BMCR_t bmcr;
  1525. ESR_t esr;
  1526. ANAR_PCS_t anar;
  1527. int phy_addr;
  1528. struct netdev_private *np;
  1529. np = netdev_priv(dev);
  1530. phy_addr = np->phy_addr;
  1531. /* Auto-Negotiation? */
  1532. if (np->an_enable) {
  1533. /* Advertise capabilities */
  1534. esr.image = mii_read (dev, phy_addr, PCS_ESR);
  1535. anar.image = mii_read (dev, phy_addr, MII_ANAR);
  1536. anar.bits.half_duplex =
  1537. esr.bits.media_1000BT_HD | esr.bits.media_1000BX_HD;
  1538. anar.bits.full_duplex =
  1539. esr.bits.media_1000BT_FD | esr.bits.media_1000BX_FD;
  1540. anar.bits.pause = 1;
  1541. anar.bits.asymmetric = 1;
  1542. mii_write (dev, phy_addr, MII_ANAR, anar.image);
  1543. /* Soft reset PHY */
  1544. mii_write (dev, phy_addr, MII_BMCR, MII_BMCR_RESET);
  1545. bmcr.image = 0;
  1546. bmcr.bits.an_enable = 1;
  1547. bmcr.bits.restart_an = 1;
  1548. bmcr.bits.reset = 1;
  1549. mii_write (dev, phy_addr, MII_BMCR, bmcr.image);
  1550. mdelay(1);
  1551. } else {
  1552. /* Force speed setting */
  1553. /* PHY Reset */
  1554. bmcr.image = 0;
  1555. bmcr.bits.reset = 1;
  1556. mii_write (dev, phy_addr, MII_BMCR, bmcr.image);
  1557. mdelay(10);
  1558. bmcr.image = 0;
  1559. bmcr.bits.an_enable = 0;
  1560. if (np->full_duplex) {
  1561. bmcr.bits.duplex_mode = 1;
  1562. printk (KERN_INFO "Manual full duplex\n");
  1563. } else {
  1564. bmcr.bits.duplex_mode = 0;
  1565. printk (KERN_INFO "Manual half duplex\n");
  1566. }
  1567. mii_write (dev, phy_addr, MII_BMCR, bmcr.image);
  1568. mdelay(10);
  1569. /* Advertise nothing */
  1570. mii_write (dev, phy_addr, MII_ANAR, 0);
  1571. }
  1572. return 0;
  1573. }
  1574. static int
  1575. rio_close (struct net_device *dev)
  1576. {
  1577. long ioaddr = dev->base_addr;
  1578. struct netdev_private *np = netdev_priv(dev);
  1579. struct sk_buff *skb;
  1580. int i;
  1581. netif_stop_queue (dev);
  1582. /* Disable interrupts */
  1583. writew (0, ioaddr + IntEnable);
  1584. /* Stop Tx and Rx logics */
  1585. writel (TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl);
  1586. synchronize_irq (dev->irq);
  1587. free_irq (dev->irq, dev);
  1588. del_timer_sync (&np->timer);
  1589. /* Free all the skbuffs in the queue. */
  1590. for (i = 0; i < RX_RING_SIZE; i++) {
  1591. np->rx_ring[i].status = 0;
  1592. np->rx_ring[i].fraginfo = 0;
  1593. skb = np->rx_skbuff[i];
  1594. if (skb) {
  1595. pci_unmap_single(np->pdev,
  1596. np->rx_ring[i].fraginfo & DMA_48BIT_MASK,
  1597. skb->len, PCI_DMA_FROMDEVICE);
  1598. dev_kfree_skb (skb);
  1599. np->rx_skbuff[i] = NULL;
  1600. }
  1601. }
  1602. for (i = 0; i < TX_RING_SIZE; i++) {
  1603. skb = np->tx_skbuff[i];
  1604. if (skb) {
  1605. pci_unmap_single(np->pdev,
  1606. np->tx_ring[i].fraginfo & DMA_48BIT_MASK,
  1607. skb->len, PCI_DMA_TODEVICE);
  1608. dev_kfree_skb (skb);
  1609. np->tx_skbuff[i] = NULL;
  1610. }
  1611. }
  1612. return 0;
  1613. }
  1614. static void __devexit
  1615. rio_remove1 (struct pci_dev *pdev)
  1616. {
  1617. struct net_device *dev = pci_get_drvdata (pdev);
  1618. if (dev) {
  1619. struct netdev_private *np = netdev_priv(dev);
  1620. unregister_netdev (dev);
  1621. pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring,
  1622. np->rx_ring_dma);
  1623. pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring,
  1624. np->tx_ring_dma);
  1625. #ifdef MEM_MAPPING
  1626. iounmap ((char *) (dev->base_addr));
  1627. #endif
  1628. free_netdev (dev);
  1629. pci_release_regions (pdev);
  1630. pci_disable_device (pdev);
  1631. }
  1632. pci_set_drvdata (pdev, NULL);
  1633. }
  1634. static struct pci_driver rio_driver = {
  1635. .name = "dl2k",
  1636. .id_table = rio_pci_tbl,
  1637. .probe = rio_probe1,
  1638. .remove = __devexit_p(rio_remove1),
  1639. };
  1640. static int __init
  1641. rio_init (void)
  1642. {
  1643. return pci_register_driver(&rio_driver);
  1644. }
  1645. static void __exit
  1646. rio_exit (void)
  1647. {
  1648. pci_unregister_driver (&rio_driver);
  1649. }
  1650. module_init (rio_init);
  1651. module_exit (rio_exit);
  1652. /*
  1653. Compile command:
  1654. gcc -D__KERNEL__ -DMODULE -I/usr/src/linux/include -Wall -Wstrict-prototypes -O2 -c dl2k.c
  1655. Read Documentation/networking/dl2k.txt for details.
  1656. */