au1000_eth.c 57 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267
  1. /*
  2. *
  3. * Alchemy Au1x00 ethernet driver
  4. *
  5. * Copyright 2001,2002,2003 MontaVista Software Inc.
  6. * Copyright 2002 TimeSys Corp.
  7. * Added ethtool/mii-tool support,
  8. * Copyright 2004 Matt Porter <mporter@kernel.crashing.org>
  9. * Update: 2004 Bjoern Riemer, riemer@fokus.fraunhofer.de
  10. * or riemer@riemer-nt.de: fixed the link beat detection with
  11. * ioctls (SIOCGMIIPHY)
  12. * Author: MontaVista Software, Inc.
  13. * ppopov@mvista.com or source@mvista.com
  14. *
  15. * ########################################################################
  16. *
  17. * This program is free software; you can distribute it and/or modify it
  18. * under the terms of the GNU General Public License (Version 2) as
  19. * published by the Free Software Foundation.
  20. *
  21. * This program is distributed in the hope it will be useful, but WITHOUT
  22. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  23. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
  24. * for more details.
  25. *
  26. * You should have received a copy of the GNU General Public License along
  27. * with this program; if not, write to the Free Software Foundation, Inc.,
  28. * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
  29. *
  30. * ########################################################################
  31. *
  32. *
  33. */
  34. #include <linux/module.h>
  35. #include <linux/kernel.h>
  36. #include <linux/sched.h>
  37. #include <linux/string.h>
  38. #include <linux/timer.h>
  39. #include <linux/errno.h>
  40. #include <linux/in.h>
  41. #include <linux/ioport.h>
  42. #include <linux/bitops.h>
  43. #include <linux/slab.h>
  44. #include <linux/interrupt.h>
  45. #include <linux/pci.h>
  46. #include <linux/init.h>
  47. #include <linux/netdevice.h>
  48. #include <linux/etherdevice.h>
  49. #include <linux/ethtool.h>
  50. #include <linux/mii.h>
  51. #include <linux/skbuff.h>
  52. #include <linux/delay.h>
  53. #include <asm/mipsregs.h>
  54. #include <asm/irq.h>
  55. #include <asm/io.h>
  56. #include <asm/processor.h>
  57. #include <asm/mach-au1x00/au1000.h>
  58. #include <asm/cpu.h>
  59. #include "au1000_eth.h"
  60. #ifdef AU1000_ETH_DEBUG
  61. static int au1000_debug = 5;
  62. #else
  63. static int au1000_debug = 3;
  64. #endif
  65. #define DRV_NAME "au1000eth"
  66. #define DRV_VERSION "1.5"
  67. #define DRV_AUTHOR "Pete Popov <ppopov@embeddedalley.com>"
  68. #define DRV_DESC "Au1xxx on-chip Ethernet driver"
  69. MODULE_AUTHOR(DRV_AUTHOR);
  70. MODULE_DESCRIPTION(DRV_DESC);
  71. MODULE_LICENSE("GPL");
  72. // prototypes
  73. static void hard_stop(struct net_device *);
  74. static void enable_rx_tx(struct net_device *dev);
  75. static struct net_device * au1000_probe(u32 ioaddr, int irq, int port_num);
  76. static int au1000_init(struct net_device *);
  77. static int au1000_open(struct net_device *);
  78. static int au1000_close(struct net_device *);
  79. static int au1000_tx(struct sk_buff *, struct net_device *);
  80. static int au1000_rx(struct net_device *);
  81. static irqreturn_t au1000_interrupt(int, void *, struct pt_regs *);
  82. static void au1000_tx_timeout(struct net_device *);
  83. static int au1000_set_config(struct net_device *dev, struct ifmap *map);
  84. static void set_rx_mode(struct net_device *);
  85. static struct net_device_stats *au1000_get_stats(struct net_device *);
  86. static inline void update_tx_stats(struct net_device *, u32, u32);
  87. static inline void update_rx_stats(struct net_device *, u32);
  88. static void au1000_timer(unsigned long);
  89. static int au1000_ioctl(struct net_device *, struct ifreq *, int);
  90. static int mdio_read(struct net_device *, int, int);
  91. static void mdio_write(struct net_device *, int, int, u16);
  92. static void dump_mii(struct net_device *dev, int phy_id);
  93. // externs
  94. extern void ack_rise_edge_irq(unsigned int);
  95. extern int get_ethernet_addr(char *ethernet_addr);
  96. extern void str2eaddr(unsigned char *ea, unsigned char *str);
  97. extern char * __init prom_getcmdline(void);
  98. /*
  99. * Theory of operation
  100. *
  101. * The Au1000 MACs use a simple rx and tx descriptor ring scheme.
  102. * There are four receive and four transmit descriptors. These
  103. * descriptors are not in memory; rather, they are just a set of
  104. * hardware registers.
  105. *
  106. * Since the Au1000 has a coherent data cache, the receive and
  107. * transmit buffers are allocated from the KSEG0 segment. The
  108. * hardware registers, however, are still mapped at KSEG1 to
  109. * make sure there's no out-of-order writes, and that all writes
  110. * complete immediately.
  111. */
  112. /* These addresses are only used if yamon doesn't tell us what
  113. * the mac address is, and the mac address is not passed on the
  114. * command line.
  115. */
  116. static unsigned char au1000_mac_addr[6] __devinitdata = {
  117. 0x00, 0x50, 0xc2, 0x0c, 0x30, 0x00
  118. };
  119. #define nibswap(x) ((((x) >> 4) & 0x0f) | (((x) << 4) & 0xf0))
  120. #define RUN_AT(x) (jiffies + (x))
  121. // For reading/writing 32-bit words from/to DMA memory
  122. #define cpu_to_dma32 cpu_to_be32
  123. #define dma32_to_cpu be32_to_cpu
  124. struct au1000_private *au_macs[NUM_ETH_INTERFACES];
  125. /* FIXME
  126. * All of the PHY code really should be detached from the MAC
  127. * code.
  128. */
  129. /* Default advertise */
  130. #define GENMII_DEFAULT_ADVERTISE \
  131. ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
  132. ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
  133. ADVERTISED_Autoneg
  134. #define GENMII_DEFAULT_FEATURES \
  135. SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \
  136. SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \
  137. SUPPORTED_Autoneg
  138. static char *phy_link[] =
  139. { "unknown",
  140. "10Base2", "10BaseT",
  141. "AUI",
  142. "100BaseT", "100BaseTX", "100BaseFX"
  143. };
  144. int bcm_5201_init(struct net_device *dev, int phy_addr)
  145. {
  146. s16 data;
  147. /* Stop auto-negotiation */
  148. data = mdio_read(dev, phy_addr, MII_CONTROL);
  149. mdio_write(dev, phy_addr, MII_CONTROL, data & ~MII_CNTL_AUTO);
  150. /* Set advertisement to 10/100 and Half/Full duplex
  151. * (full capabilities) */
  152. data = mdio_read(dev, phy_addr, MII_ANADV);
  153. data |= MII_NWAY_TX | MII_NWAY_TX_FDX | MII_NWAY_T_FDX | MII_NWAY_T;
  154. mdio_write(dev, phy_addr, MII_ANADV, data);
  155. /* Restart auto-negotiation */
  156. data = mdio_read(dev, phy_addr, MII_CONTROL);
  157. data |= MII_CNTL_RST_AUTO | MII_CNTL_AUTO;
  158. mdio_write(dev, phy_addr, MII_CONTROL, data);
  159. if (au1000_debug > 4)
  160. dump_mii(dev, phy_addr);
  161. return 0;
  162. }
  163. int bcm_5201_reset(struct net_device *dev, int phy_addr)
  164. {
  165. s16 mii_control, timeout;
  166. mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
  167. mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
  168. mdelay(1);
  169. for (timeout = 100; timeout > 0; --timeout) {
  170. mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
  171. if ((mii_control & MII_CNTL_RESET) == 0)
  172. break;
  173. mdelay(1);
  174. }
  175. if (mii_control & MII_CNTL_RESET) {
  176. printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
  177. return -1;
  178. }
  179. return 0;
  180. }
  181. int
  182. bcm_5201_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
  183. {
  184. u16 mii_data;
  185. struct au1000_private *aup;
  186. if (!dev) {
  187. printk(KERN_ERR "bcm_5201_status error: NULL dev\n");
  188. return -1;
  189. }
  190. aup = (struct au1000_private *) dev->priv;
  191. mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
  192. if (mii_data & MII_STAT_LINK) {
  193. *link = 1;
  194. mii_data = mdio_read(dev, aup->phy_addr, MII_AUX_CNTRL);
  195. if (mii_data & MII_AUX_100) {
  196. if (mii_data & MII_AUX_FDX) {
  197. *speed = IF_PORT_100BASEFX;
  198. dev->if_port = IF_PORT_100BASEFX;
  199. }
  200. else {
  201. *speed = IF_PORT_100BASETX;
  202. dev->if_port = IF_PORT_100BASETX;
  203. }
  204. }
  205. else {
  206. *speed = IF_PORT_10BASET;
  207. dev->if_port = IF_PORT_10BASET;
  208. }
  209. }
  210. else {
  211. *link = 0;
  212. *speed = 0;
  213. dev->if_port = IF_PORT_UNKNOWN;
  214. }
  215. return 0;
  216. }
  217. int lsi_80227_init(struct net_device *dev, int phy_addr)
  218. {
  219. if (au1000_debug > 4)
  220. printk("lsi_80227_init\n");
  221. /* restart auto-negotiation */
  222. mdio_write(dev, phy_addr, MII_CONTROL,
  223. MII_CNTL_F100 | MII_CNTL_AUTO | MII_CNTL_RST_AUTO); // | MII_CNTL_FDX);
  224. mdelay(1);
  225. /* set up LEDs to correct display */
  226. #ifdef CONFIG_MIPS_MTX1
  227. mdio_write(dev, phy_addr, 17, 0xff80);
  228. #else
  229. mdio_write(dev, phy_addr, 17, 0xffc0);
  230. #endif
  231. if (au1000_debug > 4)
  232. dump_mii(dev, phy_addr);
  233. return 0;
  234. }
  235. int lsi_80227_reset(struct net_device *dev, int phy_addr)
  236. {
  237. s16 mii_control, timeout;
  238. if (au1000_debug > 4) {
  239. printk("lsi_80227_reset\n");
  240. dump_mii(dev, phy_addr);
  241. }
  242. mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
  243. mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
  244. mdelay(1);
  245. for (timeout = 100; timeout > 0; --timeout) {
  246. mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
  247. if ((mii_control & MII_CNTL_RESET) == 0)
  248. break;
  249. mdelay(1);
  250. }
  251. if (mii_control & MII_CNTL_RESET) {
  252. printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
  253. return -1;
  254. }
  255. return 0;
  256. }
  257. int
  258. lsi_80227_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
  259. {
  260. u16 mii_data;
  261. struct au1000_private *aup;
  262. if (!dev) {
  263. printk(KERN_ERR "lsi_80227_status error: NULL dev\n");
  264. return -1;
  265. }
  266. aup = (struct au1000_private *) dev->priv;
  267. mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
  268. if (mii_data & MII_STAT_LINK) {
  269. *link = 1;
  270. mii_data = mdio_read(dev, aup->phy_addr, MII_LSI_PHY_STAT);
  271. if (mii_data & MII_LSI_PHY_STAT_SPD) {
  272. if (mii_data & MII_LSI_PHY_STAT_FDX) {
  273. *speed = IF_PORT_100BASEFX;
  274. dev->if_port = IF_PORT_100BASEFX;
  275. }
  276. else {
  277. *speed = IF_PORT_100BASETX;
  278. dev->if_port = IF_PORT_100BASETX;
  279. }
  280. }
  281. else {
  282. *speed = IF_PORT_10BASET;
  283. dev->if_port = IF_PORT_10BASET;
  284. }
  285. }
  286. else {
  287. *link = 0;
  288. *speed = 0;
  289. dev->if_port = IF_PORT_UNKNOWN;
  290. }
  291. return 0;
  292. }
  293. int am79c901_init(struct net_device *dev, int phy_addr)
  294. {
  295. printk("am79c901_init\n");
  296. return 0;
  297. }
  298. int am79c901_reset(struct net_device *dev, int phy_addr)
  299. {
  300. printk("am79c901_reset\n");
  301. return 0;
  302. }
  303. int
  304. am79c901_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
  305. {
  306. return 0;
  307. }
  308. int am79c874_init(struct net_device *dev, int phy_addr)
  309. {
  310. s16 data;
  311. /* 79c874 has quit resembled bit assignments to BCM5201 */
  312. if (au1000_debug > 4)
  313. printk("am79c847_init\n");
  314. /* Stop auto-negotiation */
  315. data = mdio_read(dev, phy_addr, MII_CONTROL);
  316. mdio_write(dev, phy_addr, MII_CONTROL, data & ~MII_CNTL_AUTO);
  317. /* Set advertisement to 10/100 and Half/Full duplex
  318. * (full capabilities) */
  319. data = mdio_read(dev, phy_addr, MII_ANADV);
  320. data |= MII_NWAY_TX | MII_NWAY_TX_FDX | MII_NWAY_T_FDX | MII_NWAY_T;
  321. mdio_write(dev, phy_addr, MII_ANADV, data);
  322. /* Restart auto-negotiation */
  323. data = mdio_read(dev, phy_addr, MII_CONTROL);
  324. data |= MII_CNTL_RST_AUTO | MII_CNTL_AUTO;
  325. mdio_write(dev, phy_addr, MII_CONTROL, data);
  326. if (au1000_debug > 4) dump_mii(dev, phy_addr);
  327. return 0;
  328. }
  329. int am79c874_reset(struct net_device *dev, int phy_addr)
  330. {
  331. s16 mii_control, timeout;
  332. if (au1000_debug > 4)
  333. printk("am79c874_reset\n");
  334. mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
  335. mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
  336. mdelay(1);
  337. for (timeout = 100; timeout > 0; --timeout) {
  338. mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
  339. if ((mii_control & MII_CNTL_RESET) == 0)
  340. break;
  341. mdelay(1);
  342. }
  343. if (mii_control & MII_CNTL_RESET) {
  344. printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
  345. return -1;
  346. }
  347. return 0;
  348. }
  349. int
  350. am79c874_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
  351. {
  352. u16 mii_data;
  353. struct au1000_private *aup;
  354. // printk("am79c874_status\n");
  355. if (!dev) {
  356. printk(KERN_ERR "am79c874_status error: NULL dev\n");
  357. return -1;
  358. }
  359. aup = (struct au1000_private *) dev->priv;
  360. mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
  361. if (mii_data & MII_STAT_LINK) {
  362. *link = 1;
  363. mii_data = mdio_read(dev, aup->phy_addr, MII_AMD_PHY_STAT);
  364. if (mii_data & MII_AMD_PHY_STAT_SPD) {
  365. if (mii_data & MII_AMD_PHY_STAT_FDX) {
  366. *speed = IF_PORT_100BASEFX;
  367. dev->if_port = IF_PORT_100BASEFX;
  368. }
  369. else {
  370. *speed = IF_PORT_100BASETX;
  371. dev->if_port = IF_PORT_100BASETX;
  372. }
  373. }
  374. else {
  375. *speed = IF_PORT_10BASET;
  376. dev->if_port = IF_PORT_10BASET;
  377. }
  378. }
  379. else {
  380. *link = 0;
  381. *speed = 0;
  382. dev->if_port = IF_PORT_UNKNOWN;
  383. }
  384. return 0;
  385. }
  386. int lxt971a_init(struct net_device *dev, int phy_addr)
  387. {
  388. if (au1000_debug > 4)
  389. printk("lxt971a_init\n");
  390. /* restart auto-negotiation */
  391. mdio_write(dev, phy_addr, MII_CONTROL,
  392. MII_CNTL_F100 | MII_CNTL_AUTO | MII_CNTL_RST_AUTO | MII_CNTL_FDX);
  393. /* set up LEDs to correct display */
  394. mdio_write(dev, phy_addr, 20, 0x0422);
  395. if (au1000_debug > 4)
  396. dump_mii(dev, phy_addr);
  397. return 0;
  398. }
  399. int lxt971a_reset(struct net_device *dev, int phy_addr)
  400. {
  401. s16 mii_control, timeout;
  402. if (au1000_debug > 4) {
  403. printk("lxt971a_reset\n");
  404. dump_mii(dev, phy_addr);
  405. }
  406. mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
  407. mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
  408. mdelay(1);
  409. for (timeout = 100; timeout > 0; --timeout) {
  410. mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
  411. if ((mii_control & MII_CNTL_RESET) == 0)
  412. break;
  413. mdelay(1);
  414. }
  415. if (mii_control & MII_CNTL_RESET) {
  416. printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
  417. return -1;
  418. }
  419. return 0;
  420. }
  421. int
  422. lxt971a_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
  423. {
  424. u16 mii_data;
  425. struct au1000_private *aup;
  426. if (!dev) {
  427. printk(KERN_ERR "lxt971a_status error: NULL dev\n");
  428. return -1;
  429. }
  430. aup = (struct au1000_private *) dev->priv;
  431. mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
  432. if (mii_data & MII_STAT_LINK) {
  433. *link = 1;
  434. mii_data = mdio_read(dev, aup->phy_addr, MII_INTEL_PHY_STAT);
  435. if (mii_data & MII_INTEL_PHY_STAT_SPD) {
  436. if (mii_data & MII_INTEL_PHY_STAT_FDX) {
  437. *speed = IF_PORT_100BASEFX;
  438. dev->if_port = IF_PORT_100BASEFX;
  439. }
  440. else {
  441. *speed = IF_PORT_100BASETX;
  442. dev->if_port = IF_PORT_100BASETX;
  443. }
  444. }
  445. else {
  446. *speed = IF_PORT_10BASET;
  447. dev->if_port = IF_PORT_10BASET;
  448. }
  449. }
  450. else {
  451. *link = 0;
  452. *speed = 0;
  453. dev->if_port = IF_PORT_UNKNOWN;
  454. }
  455. return 0;
  456. }
  457. int ks8995m_init(struct net_device *dev, int phy_addr)
  458. {
  459. s16 data;
  460. // printk("ks8995m_init\n");
  461. /* Stop auto-negotiation */
  462. data = mdio_read(dev, phy_addr, MII_CONTROL);
  463. mdio_write(dev, phy_addr, MII_CONTROL, data & ~MII_CNTL_AUTO);
  464. /* Set advertisement to 10/100 and Half/Full duplex
  465. * (full capabilities) */
  466. data = mdio_read(dev, phy_addr, MII_ANADV);
  467. data |= MII_NWAY_TX | MII_NWAY_TX_FDX | MII_NWAY_T_FDX | MII_NWAY_T;
  468. mdio_write(dev, phy_addr, MII_ANADV, data);
  469. /* Restart auto-negotiation */
  470. data = mdio_read(dev, phy_addr, MII_CONTROL);
  471. data |= MII_CNTL_RST_AUTO | MII_CNTL_AUTO;
  472. mdio_write(dev, phy_addr, MII_CONTROL, data);
  473. if (au1000_debug > 4) dump_mii(dev, phy_addr);
  474. return 0;
  475. }
  476. int ks8995m_reset(struct net_device *dev, int phy_addr)
  477. {
  478. s16 mii_control, timeout;
  479. // printk("ks8995m_reset\n");
  480. mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
  481. mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
  482. mdelay(1);
  483. for (timeout = 100; timeout > 0; --timeout) {
  484. mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
  485. if ((mii_control & MII_CNTL_RESET) == 0)
  486. break;
  487. mdelay(1);
  488. }
  489. if (mii_control & MII_CNTL_RESET) {
  490. printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
  491. return -1;
  492. }
  493. return 0;
  494. }
  495. int ks8995m_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
  496. {
  497. u16 mii_data;
  498. struct au1000_private *aup;
  499. if (!dev) {
  500. printk(KERN_ERR "ks8995m_status error: NULL dev\n");
  501. return -1;
  502. }
  503. aup = (struct au1000_private *) dev->priv;
  504. mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
  505. if (mii_data & MII_STAT_LINK) {
  506. *link = 1;
  507. mii_data = mdio_read(dev, aup->phy_addr, MII_AUX_CNTRL);
  508. if (mii_data & MII_AUX_100) {
  509. if (mii_data & MII_AUX_FDX) {
  510. *speed = IF_PORT_100BASEFX;
  511. dev->if_port = IF_PORT_100BASEFX;
  512. }
  513. else {
  514. *speed = IF_PORT_100BASETX;
  515. dev->if_port = IF_PORT_100BASETX;
  516. }
  517. }
  518. else {
  519. *speed = IF_PORT_10BASET;
  520. dev->if_port = IF_PORT_10BASET;
  521. }
  522. }
  523. else {
  524. *link = 0;
  525. *speed = 0;
  526. dev->if_port = IF_PORT_UNKNOWN;
  527. }
  528. return 0;
  529. }
  530. int
  531. smsc_83C185_init (struct net_device *dev, int phy_addr)
  532. {
  533. s16 data;
  534. if (au1000_debug > 4)
  535. printk("smsc_83C185_init\n");
  536. /* Stop auto-negotiation */
  537. data = mdio_read(dev, phy_addr, MII_CONTROL);
  538. mdio_write(dev, phy_addr, MII_CONTROL, data & ~MII_CNTL_AUTO);
  539. /* Set advertisement to 10/100 and Half/Full duplex
  540. * (full capabilities) */
  541. data = mdio_read(dev, phy_addr, MII_ANADV);
  542. data |= MII_NWAY_TX | MII_NWAY_TX_FDX | MII_NWAY_T_FDX | MII_NWAY_T;
  543. mdio_write(dev, phy_addr, MII_ANADV, data);
  544. /* Restart auto-negotiation */
  545. data = mdio_read(dev, phy_addr, MII_CONTROL);
  546. data |= MII_CNTL_RST_AUTO | MII_CNTL_AUTO;
  547. mdio_write(dev, phy_addr, MII_CONTROL, data);
  548. if (au1000_debug > 4) dump_mii(dev, phy_addr);
  549. return 0;
  550. }
  551. int
  552. smsc_83C185_reset (struct net_device *dev, int phy_addr)
  553. {
  554. s16 mii_control, timeout;
  555. if (au1000_debug > 4)
  556. printk("smsc_83C185_reset\n");
  557. mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
  558. mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
  559. mdelay(1);
  560. for (timeout = 100; timeout > 0; --timeout) {
  561. mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
  562. if ((mii_control & MII_CNTL_RESET) == 0)
  563. break;
  564. mdelay(1);
  565. }
  566. if (mii_control & MII_CNTL_RESET) {
  567. printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
  568. return -1;
  569. }
  570. return 0;
  571. }
  572. int
  573. smsc_83C185_status (struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
  574. {
  575. u16 mii_data;
  576. struct au1000_private *aup;
  577. if (!dev) {
  578. printk(KERN_ERR "smsc_83C185_status error: NULL dev\n");
  579. return -1;
  580. }
  581. aup = (struct au1000_private *) dev->priv;
  582. mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
  583. if (mii_data & MII_STAT_LINK) {
  584. *link = 1;
  585. mii_data = mdio_read(dev, aup->phy_addr, 0x1f);
  586. if (mii_data & (1<<3)) {
  587. if (mii_data & (1<<4)) {
  588. *speed = IF_PORT_100BASEFX;
  589. dev->if_port = IF_PORT_100BASEFX;
  590. }
  591. else {
  592. *speed = IF_PORT_100BASETX;
  593. dev->if_port = IF_PORT_100BASETX;
  594. }
  595. }
  596. else {
  597. *speed = IF_PORT_10BASET;
  598. dev->if_port = IF_PORT_10BASET;
  599. }
  600. }
  601. else {
  602. *link = 0;
  603. *speed = 0;
  604. dev->if_port = IF_PORT_UNKNOWN;
  605. }
  606. return 0;
  607. }
  608. #ifdef CONFIG_MIPS_BOSPORUS
  609. int stub_init(struct net_device *dev, int phy_addr)
  610. {
  611. //printk("PHY stub_init\n");
  612. return 0;
  613. }
  614. int stub_reset(struct net_device *dev, int phy_addr)
  615. {
  616. //printk("PHY stub_reset\n");
  617. return 0;
  618. }
  619. int
  620. stub_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
  621. {
  622. //printk("PHY stub_status\n");
  623. *link = 1;
  624. /* hmmm, revisit */
  625. *speed = IF_PORT_100BASEFX;
  626. dev->if_port = IF_PORT_100BASEFX;
  627. return 0;
  628. }
  629. #endif
  630. struct phy_ops bcm_5201_ops = {
  631. bcm_5201_init,
  632. bcm_5201_reset,
  633. bcm_5201_status,
  634. };
  635. struct phy_ops am79c874_ops = {
  636. am79c874_init,
  637. am79c874_reset,
  638. am79c874_status,
  639. };
  640. struct phy_ops am79c901_ops = {
  641. am79c901_init,
  642. am79c901_reset,
  643. am79c901_status,
  644. };
  645. struct phy_ops lsi_80227_ops = {
  646. lsi_80227_init,
  647. lsi_80227_reset,
  648. lsi_80227_status,
  649. };
  650. struct phy_ops lxt971a_ops = {
  651. lxt971a_init,
  652. lxt971a_reset,
  653. lxt971a_status,
  654. };
  655. struct phy_ops ks8995m_ops = {
  656. ks8995m_init,
  657. ks8995m_reset,
  658. ks8995m_status,
  659. };
  660. struct phy_ops smsc_83C185_ops = {
  661. smsc_83C185_init,
  662. smsc_83C185_reset,
  663. smsc_83C185_status,
  664. };
  665. #ifdef CONFIG_MIPS_BOSPORUS
  666. struct phy_ops stub_ops = {
  667. stub_init,
  668. stub_reset,
  669. stub_status,
  670. };
  671. #endif
  672. static struct mii_chip_info {
  673. const char * name;
  674. u16 phy_id0;
  675. u16 phy_id1;
  676. struct phy_ops *phy_ops;
  677. int dual_phy;
  678. } mii_chip_table[] = {
  679. {"Broadcom BCM5201 10/100 BaseT PHY",0x0040,0x6212, &bcm_5201_ops,0},
  680. {"Broadcom BCM5221 10/100 BaseT PHY",0x0040,0x61e4, &bcm_5201_ops,0},
  681. {"Broadcom BCM5222 10/100 BaseT PHY",0x0040,0x6322, &bcm_5201_ops,1},
  682. {"AMD 79C901 HomePNA PHY",0x0000,0x35c8, &am79c901_ops,0},
  683. {"AMD 79C874 10/100 BaseT PHY",0x0022,0x561b, &am79c874_ops,0},
  684. {"LSI 80227 10/100 BaseT PHY",0x0016,0xf840, &lsi_80227_ops,0},
  685. {"Intel LXT971A Dual Speed PHY",0x0013,0x78e2, &lxt971a_ops,0},
  686. {"Kendin KS8995M 10/100 BaseT PHY",0x0022,0x1450, &ks8995m_ops,0},
  687. {"SMSC LAN83C185 10/100 BaseT PHY",0x0007,0xc0a3, &smsc_83C185_ops,0},
  688. #ifdef CONFIG_MIPS_BOSPORUS
  689. {"Stub", 0x1234, 0x5678, &stub_ops },
  690. #endif
  691. {0,},
  692. };
  693. static int mdio_read(struct net_device *dev, int phy_id, int reg)
  694. {
  695. struct au1000_private *aup = (struct au1000_private *) dev->priv;
  696. volatile u32 *mii_control_reg;
  697. volatile u32 *mii_data_reg;
  698. u32 timedout = 20;
  699. u32 mii_control;
  700. #ifdef CONFIG_BCM5222_DUAL_PHY
  701. /* First time we probe, it's for the mac0 phy.
  702. * Since we haven't determined yet that we have a dual phy,
  703. * aup->mii->mii_control_reg won't be setup and we'll
  704. * default to the else statement.
  705. * By the time we probe for the mac1 phy, the mii_control_reg
  706. * will be setup to be the address of the mac0 phy control since
  707. * both phys are controlled through mac0.
  708. */
  709. if (aup->mii && aup->mii->mii_control_reg) {
  710. mii_control_reg = aup->mii->mii_control_reg;
  711. mii_data_reg = aup->mii->mii_data_reg;
  712. }
  713. else if (au_macs[0]->mii && au_macs[0]->mii->mii_control_reg) {
  714. /* assume both phys are controlled through mac0 */
  715. mii_control_reg = au_macs[0]->mii->mii_control_reg;
  716. mii_data_reg = au_macs[0]->mii->mii_data_reg;
  717. }
  718. else
  719. #endif
  720. {
  721. /* default control and data reg addresses */
  722. mii_control_reg = &aup->mac->mii_control;
  723. mii_data_reg = &aup->mac->mii_data;
  724. }
  725. while (*mii_control_reg & MAC_MII_BUSY) {
  726. mdelay(1);
  727. if (--timedout == 0) {
  728. printk(KERN_ERR "%s: read_MII busy timeout!!\n",
  729. dev->name);
  730. return -1;
  731. }
  732. }
  733. mii_control = MAC_SET_MII_SELECT_REG(reg) |
  734. MAC_SET_MII_SELECT_PHY(phy_id) | MAC_MII_READ;
  735. *mii_control_reg = mii_control;
  736. timedout = 20;
  737. while (*mii_control_reg & MAC_MII_BUSY) {
  738. mdelay(1);
  739. if (--timedout == 0) {
  740. printk(KERN_ERR "%s: mdio_read busy timeout!!\n",
  741. dev->name);
  742. return -1;
  743. }
  744. }
  745. return (int)*mii_data_reg;
  746. }
  747. static void mdio_write(struct net_device *dev, int phy_id, int reg, u16 value)
  748. {
  749. struct au1000_private *aup = (struct au1000_private *) dev->priv;
  750. volatile u32 *mii_control_reg;
  751. volatile u32 *mii_data_reg;
  752. u32 timedout = 20;
  753. u32 mii_control;
  754. #ifdef CONFIG_BCM5222_DUAL_PHY
  755. if (aup->mii && aup->mii->mii_control_reg) {
  756. mii_control_reg = aup->mii->mii_control_reg;
  757. mii_data_reg = aup->mii->mii_data_reg;
  758. }
  759. else if (au_macs[0]->mii && au_macs[0]->mii->mii_control_reg) {
  760. /* assume both phys are controlled through mac0 */
  761. mii_control_reg = au_macs[0]->mii->mii_control_reg;
  762. mii_data_reg = au_macs[0]->mii->mii_data_reg;
  763. }
  764. else
  765. #endif
  766. {
  767. /* default control and data reg addresses */
  768. mii_control_reg = &aup->mac->mii_control;
  769. mii_data_reg = &aup->mac->mii_data;
  770. }
  771. while (*mii_control_reg & MAC_MII_BUSY) {
  772. mdelay(1);
  773. if (--timedout == 0) {
  774. printk(KERN_ERR "%s: mdio_write busy timeout!!\n",
  775. dev->name);
  776. return;
  777. }
  778. }
  779. mii_control = MAC_SET_MII_SELECT_REG(reg) |
  780. MAC_SET_MII_SELECT_PHY(phy_id) | MAC_MII_WRITE;
  781. *mii_data_reg = value;
  782. *mii_control_reg = mii_control;
  783. }
  784. static void dump_mii(struct net_device *dev, int phy_id)
  785. {
  786. int i, val;
  787. for (i = 0; i < 7; i++) {
  788. if ((val = mdio_read(dev, phy_id, i)) >= 0)
  789. printk("%s: MII Reg %d=%x\n", dev->name, i, val);
  790. }
  791. for (i = 16; i < 25; i++) {
  792. if ((val = mdio_read(dev, phy_id, i)) >= 0)
  793. printk("%s: MII Reg %d=%x\n", dev->name, i, val);
  794. }
  795. }
  796. static int mii_probe (struct net_device * dev)
  797. {
  798. struct au1000_private *aup = (struct au1000_private *) dev->priv;
  799. int phy_addr;
  800. #ifdef CONFIG_MIPS_BOSPORUS
  801. int phy_found=0;
  802. #endif
  803. /* search for total of 32 possible mii phy addresses */
  804. for (phy_addr = 0; phy_addr < 32; phy_addr++) {
  805. u16 mii_status;
  806. u16 phy_id0, phy_id1;
  807. int i;
  808. #ifdef CONFIG_BCM5222_DUAL_PHY
  809. /* Mask the already found phy, try next one */
  810. if (au_macs[0]->mii && au_macs[0]->mii->mii_control_reg) {
  811. if (au_macs[0]->phy_addr == phy_addr)
  812. continue;
  813. }
  814. #endif
  815. mii_status = mdio_read(dev, phy_addr, MII_STATUS);
  816. if (mii_status == 0xffff || mii_status == 0x0000)
  817. /* the mii is not accessable, try next one */
  818. continue;
  819. phy_id0 = mdio_read(dev, phy_addr, MII_PHY_ID0);
  820. phy_id1 = mdio_read(dev, phy_addr, MII_PHY_ID1);
  821. /* search our mii table for the current mii */
  822. for (i = 0; mii_chip_table[i].phy_id1; i++) {
  823. if (phy_id0 == mii_chip_table[i].phy_id0 &&
  824. phy_id1 == mii_chip_table[i].phy_id1) {
  825. struct mii_phy * mii_phy = aup->mii;
  826. printk(KERN_INFO "%s: %s at phy address %d\n",
  827. dev->name, mii_chip_table[i].name,
  828. phy_addr);
  829. #ifdef CONFIG_MIPS_BOSPORUS
  830. phy_found = 1;
  831. #endif
  832. mii_phy->chip_info = mii_chip_table+i;
  833. aup->phy_addr = phy_addr;
  834. aup->want_autoneg = 1;
  835. aup->phy_ops = mii_chip_table[i].phy_ops;
  836. aup->phy_ops->phy_init(dev,phy_addr);
  837. // Check for dual-phy and then store required
  838. // values and set indicators. We need to do
  839. // this now since mdio_{read,write} need the
  840. // control and data register addresses.
  841. #ifdef CONFIG_BCM5222_DUAL_PHY
  842. if ( mii_chip_table[i].dual_phy) {
  843. /* assume both phys are controlled
  844. * through MAC0. Board specific? */
  845. /* sanity check */
  846. if (!au_macs[0] || !au_macs[0]->mii)
  847. return -1;
  848. aup->mii->mii_control_reg = (u32 *)
  849. &au_macs[0]->mac->mii_control;
  850. aup->mii->mii_data_reg = (u32 *)
  851. &au_macs[0]->mac->mii_data;
  852. }
  853. #endif
  854. goto found;
  855. }
  856. }
  857. }
  858. found:
  859. #ifdef CONFIG_MIPS_BOSPORUS
  860. /* This is a workaround for the Micrel/Kendin 5 port switch
  861. The second MAC doesn't see a PHY connected... so we need to
  862. trick it into thinking we have one.
  863. If this kernel is run on another Au1500 development board
  864. the stub will be found as well as the actual PHY. However,
  865. the last found PHY will be used... usually at Addr 31 (Db1500).
  866. */
  867. if ( (!phy_found) )
  868. {
  869. u16 phy_id0, phy_id1;
  870. int i;
  871. phy_id0 = 0x1234;
  872. phy_id1 = 0x5678;
  873. /* search our mii table for the current mii */
  874. for (i = 0; mii_chip_table[i].phy_id1; i++) {
  875. if (phy_id0 == mii_chip_table[i].phy_id0 &&
  876. phy_id1 == mii_chip_table[i].phy_id1) {
  877. struct mii_phy * mii_phy;
  878. printk(KERN_INFO "%s: %s at phy address %d\n",
  879. dev->name, mii_chip_table[i].name,
  880. phy_addr);
  881. mii_phy = kmalloc(sizeof(struct mii_phy),
  882. GFP_KERNEL);
  883. if (mii_phy) {
  884. mii_phy->chip_info = mii_chip_table+i;
  885. aup->phy_addr = phy_addr;
  886. mii_phy->next = aup->mii;
  887. aup->phy_ops =
  888. mii_chip_table[i].phy_ops;
  889. aup->mii = mii_phy;
  890. aup->phy_ops->phy_init(dev,phy_addr);
  891. } else {
  892. printk(KERN_ERR "%s: out of memory\n",
  893. dev->name);
  894. return -1;
  895. }
  896. mii_phy->chip_info = mii_chip_table+i;
  897. aup->phy_addr = phy_addr;
  898. aup->phy_ops = mii_chip_table[i].phy_ops;
  899. aup->phy_ops->phy_init(dev,phy_addr);
  900. break;
  901. }
  902. }
  903. }
  904. if (aup->mac_id == 0) {
  905. /* the Bosporus phy responds to addresses 0-5 but
  906. * 5 is the correct one.
  907. */
  908. aup->phy_addr = 5;
  909. }
  910. #endif
  911. if (aup->mii->chip_info == NULL) {
  912. printk(KERN_ERR "%s: Au1x No MII transceivers found!\n",
  913. dev->name);
  914. return -1;
  915. }
  916. printk(KERN_INFO "%s: Using %s as default\n",
  917. dev->name, aup->mii->chip_info->name);
  918. return 0;
  919. }
  920. /*
  921. * Buffer allocation/deallocation routines. The buffer descriptor returned
  922. * has the virtual and dma address of a buffer suitable for
  923. * both, receive and transmit operations.
  924. */
  925. static db_dest_t *GetFreeDB(struct au1000_private *aup)
  926. {
  927. db_dest_t *pDB;
  928. pDB = aup->pDBfree;
  929. if (pDB) {
  930. aup->pDBfree = pDB->pnext;
  931. }
  932. return pDB;
  933. }
  934. void ReleaseDB(struct au1000_private *aup, db_dest_t *pDB)
  935. {
  936. db_dest_t *pDBfree = aup->pDBfree;
  937. if (pDBfree)
  938. pDBfree->pnext = pDB;
  939. aup->pDBfree = pDB;
  940. }
  941. static void enable_rx_tx(struct net_device *dev)
  942. {
  943. struct au1000_private *aup = (struct au1000_private *) dev->priv;
  944. if (au1000_debug > 4)
  945. printk(KERN_INFO "%s: enable_rx_tx\n", dev->name);
  946. aup->mac->control |= (MAC_RX_ENABLE | MAC_TX_ENABLE);
  947. au_sync_delay(10);
  948. }
  949. static void hard_stop(struct net_device *dev)
  950. {
  951. struct au1000_private *aup = (struct au1000_private *) dev->priv;
  952. if (au1000_debug > 4)
  953. printk(KERN_INFO "%s: hard stop\n", dev->name);
  954. aup->mac->control &= ~(MAC_RX_ENABLE | MAC_TX_ENABLE);
  955. au_sync_delay(10);
  956. }
  957. static void reset_mac(struct net_device *dev)
  958. {
  959. int i;
  960. u32 flags;
  961. struct au1000_private *aup = (struct au1000_private *) dev->priv;
  962. if (au1000_debug > 4)
  963. printk(KERN_INFO "%s: reset mac, aup %x\n",
  964. dev->name, (unsigned)aup);
  965. spin_lock_irqsave(&aup->lock, flags);
  966. if (aup->timer.function == &au1000_timer) {/* check if timer initted */
  967. del_timer(&aup->timer);
  968. }
  969. hard_stop(dev);
  970. #ifdef CONFIG_BCM5222_DUAL_PHY
  971. if (aup->mac_id != 0) {
  972. #endif
  973. /* If BCM5222, we can't leave MAC0 in reset because then
  974. * we can't access the dual phy for ETH1 */
  975. *aup->enable = MAC_EN_CLOCK_ENABLE;
  976. au_sync_delay(2);
  977. *aup->enable = 0;
  978. au_sync_delay(2);
  979. #ifdef CONFIG_BCM5222_DUAL_PHY
  980. }
  981. #endif
  982. aup->tx_full = 0;
  983. for (i = 0; i < NUM_RX_DMA; i++) {
  984. /* reset control bits */
  985. aup->rx_dma_ring[i]->buff_stat &= ~0xf;
  986. }
  987. for (i = 0; i < NUM_TX_DMA; i++) {
  988. /* reset control bits */
  989. aup->tx_dma_ring[i]->buff_stat &= ~0xf;
  990. }
  991. spin_unlock_irqrestore(&aup->lock, flags);
  992. }
  993. /*
  994. * Setup the receive and transmit "rings". These pointers are the addresses
  995. * of the rx and tx MAC DMA registers so they are fixed by the hardware --
  996. * these are not descriptors sitting in memory.
  997. */
  998. static void
  999. setup_hw_rings(struct au1000_private *aup, u32 rx_base, u32 tx_base)
  1000. {
  1001. int i;
  1002. for (i = 0; i < NUM_RX_DMA; i++) {
  1003. aup->rx_dma_ring[i] =
  1004. (volatile rx_dma_t *) (rx_base + sizeof(rx_dma_t)*i);
  1005. }
  1006. for (i = 0; i < NUM_TX_DMA; i++) {
  1007. aup->tx_dma_ring[i] =
  1008. (volatile tx_dma_t *) (tx_base + sizeof(tx_dma_t)*i);
  1009. }
  1010. }
  1011. static struct {
  1012. int port;
  1013. u32 base_addr;
  1014. u32 macen_addr;
  1015. int irq;
  1016. struct net_device *dev;
  1017. } iflist[2];
  1018. static int num_ifs;
  1019. /*
  1020. * Setup the base address and interupt of the Au1xxx ethernet macs
  1021. * based on cpu type and whether the interface is enabled in sys_pinfunc
  1022. * register. The last interface is enabled if SYS_PF_NI2 (bit 4) is 0.
  1023. */
  1024. static int __init au1000_init_module(void)
  1025. {
  1026. struct cpuinfo_mips *c = &current_cpu_data;
  1027. int ni = (int)((au_readl(SYS_PINFUNC) & (u32)(SYS_PF_NI2)) >> 4);
  1028. struct net_device *dev;
  1029. int i, found_one = 0;
  1030. switch (c->cputype) {
  1031. #ifdef CONFIG_SOC_AU1000
  1032. case CPU_AU1000:
  1033. num_ifs = 2 - ni;
  1034. iflist[0].base_addr = AU1000_ETH0_BASE;
  1035. iflist[1].base_addr = AU1000_ETH1_BASE;
  1036. iflist[0].macen_addr = AU1000_MAC0_ENABLE;
  1037. iflist[1].macen_addr = AU1000_MAC1_ENABLE;
  1038. iflist[0].irq = AU1000_MAC0_DMA_INT;
  1039. iflist[1].irq = AU1000_MAC1_DMA_INT;
  1040. break;
  1041. #endif
  1042. #ifdef CONFIG_SOC_AU1100
  1043. case CPU_AU1100:
  1044. num_ifs = 1 - ni;
  1045. iflist[0].base_addr = AU1100_ETH0_BASE;
  1046. iflist[0].macen_addr = AU1100_MAC0_ENABLE;
  1047. iflist[0].irq = AU1100_MAC0_DMA_INT;
  1048. break;
  1049. #endif
  1050. #ifdef CONFIG_SOC_AU1500
  1051. case CPU_AU1500:
  1052. num_ifs = 2 - ni;
  1053. iflist[0].base_addr = AU1500_ETH0_BASE;
  1054. iflist[1].base_addr = AU1500_ETH1_BASE;
  1055. iflist[0].macen_addr = AU1500_MAC0_ENABLE;
  1056. iflist[1].macen_addr = AU1500_MAC1_ENABLE;
  1057. iflist[0].irq = AU1500_MAC0_DMA_INT;
  1058. iflist[1].irq = AU1500_MAC1_DMA_INT;
  1059. break;
  1060. #endif
  1061. #ifdef CONFIG_SOC_AU1550
  1062. case CPU_AU1550:
  1063. num_ifs = 2 - ni;
  1064. iflist[0].base_addr = AU1550_ETH0_BASE;
  1065. iflist[1].base_addr = AU1550_ETH1_BASE;
  1066. iflist[0].macen_addr = AU1550_MAC0_ENABLE;
  1067. iflist[1].macen_addr = AU1550_MAC1_ENABLE;
  1068. iflist[0].irq = AU1550_MAC0_DMA_INT;
  1069. iflist[1].irq = AU1550_MAC1_DMA_INT;
  1070. break;
  1071. #endif
  1072. default:
  1073. num_ifs = 0;
  1074. }
  1075. for(i = 0; i < num_ifs; i++) {
  1076. dev = au1000_probe(iflist[i].base_addr, iflist[i].irq, i);
  1077. iflist[i].dev = dev;
  1078. if (dev)
  1079. found_one++;
  1080. }
  1081. if (!found_one)
  1082. return -ENODEV;
  1083. return 0;
  1084. }
  1085. static int au1000_setup_aneg(struct net_device *dev, u32 advertise)
  1086. {
  1087. struct au1000_private *aup = (struct au1000_private *)dev->priv;
  1088. u16 ctl, adv;
  1089. /* Setup standard advertise */
  1090. adv = mdio_read(dev, aup->phy_addr, MII_ADVERTISE);
  1091. adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
  1092. if (advertise & ADVERTISED_10baseT_Half)
  1093. adv |= ADVERTISE_10HALF;
  1094. if (advertise & ADVERTISED_10baseT_Full)
  1095. adv |= ADVERTISE_10FULL;
  1096. if (advertise & ADVERTISED_100baseT_Half)
  1097. adv |= ADVERTISE_100HALF;
  1098. if (advertise & ADVERTISED_100baseT_Full)
  1099. adv |= ADVERTISE_100FULL;
  1100. mdio_write(dev, aup->phy_addr, MII_ADVERTISE, adv);
  1101. /* Start/Restart aneg */
  1102. ctl = mdio_read(dev, aup->phy_addr, MII_BMCR);
  1103. ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
  1104. mdio_write(dev, aup->phy_addr, MII_BMCR, ctl);
  1105. return 0;
  1106. }
  1107. static int au1000_setup_forced(struct net_device *dev, int speed, int fd)
  1108. {
  1109. struct au1000_private *aup = (struct au1000_private *)dev->priv;
  1110. u16 ctl;
  1111. ctl = mdio_read(dev, aup->phy_addr, MII_BMCR);
  1112. ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 | BMCR_ANENABLE);
  1113. /* First reset the PHY */
  1114. mdio_write(dev, aup->phy_addr, MII_BMCR, ctl | BMCR_RESET);
  1115. /* Select speed & duplex */
  1116. switch (speed) {
  1117. case SPEED_10:
  1118. break;
  1119. case SPEED_100:
  1120. ctl |= BMCR_SPEED100;
  1121. break;
  1122. case SPEED_1000:
  1123. default:
  1124. return -EINVAL;
  1125. }
  1126. if (fd == DUPLEX_FULL)
  1127. ctl |= BMCR_FULLDPLX;
  1128. mdio_write(dev, aup->phy_addr, MII_BMCR, ctl);
  1129. return 0;
  1130. }
  1131. static void
  1132. au1000_start_link(struct net_device *dev, struct ethtool_cmd *cmd)
  1133. {
  1134. struct au1000_private *aup = (struct au1000_private *)dev->priv;
  1135. u32 advertise;
  1136. int autoneg;
  1137. int forced_speed;
  1138. int forced_duplex;
  1139. /* Default advertise */
  1140. advertise = GENMII_DEFAULT_ADVERTISE;
  1141. autoneg = aup->want_autoneg;
  1142. forced_speed = SPEED_100;
  1143. forced_duplex = DUPLEX_FULL;
  1144. /* Setup link parameters */
  1145. if (cmd) {
  1146. if (cmd->autoneg == AUTONEG_ENABLE) {
  1147. advertise = cmd->advertising;
  1148. autoneg = 1;
  1149. } else {
  1150. autoneg = 0;
  1151. forced_speed = cmd->speed;
  1152. forced_duplex = cmd->duplex;
  1153. }
  1154. }
  1155. /* Configure PHY & start aneg */
  1156. aup->want_autoneg = autoneg;
  1157. if (autoneg)
  1158. au1000_setup_aneg(dev, advertise);
  1159. else
  1160. au1000_setup_forced(dev, forced_speed, forced_duplex);
  1161. mod_timer(&aup->timer, jiffies + HZ);
  1162. }
  1163. static int au1000_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  1164. {
  1165. struct au1000_private *aup = (struct au1000_private *)dev->priv;
  1166. u16 link, speed;
  1167. cmd->supported = GENMII_DEFAULT_FEATURES;
  1168. cmd->advertising = GENMII_DEFAULT_ADVERTISE;
  1169. cmd->port = PORT_MII;
  1170. cmd->transceiver = XCVR_EXTERNAL;
  1171. cmd->phy_address = aup->phy_addr;
  1172. spin_lock_irq(&aup->lock);
  1173. cmd->autoneg = aup->want_autoneg;
  1174. aup->phy_ops->phy_status(dev, aup->phy_addr, &link, &speed);
  1175. if ((speed == IF_PORT_100BASETX) || (speed == IF_PORT_100BASEFX))
  1176. cmd->speed = SPEED_100;
  1177. else if (speed == IF_PORT_10BASET)
  1178. cmd->speed = SPEED_10;
  1179. if (link && (dev->if_port == IF_PORT_100BASEFX))
  1180. cmd->duplex = DUPLEX_FULL;
  1181. else
  1182. cmd->duplex = DUPLEX_HALF;
  1183. spin_unlock_irq(&aup->lock);
  1184. return 0;
  1185. }
  1186. static int au1000_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  1187. {
  1188. struct au1000_private *aup = (struct au1000_private *)dev->priv;
  1189. unsigned long features = GENMII_DEFAULT_FEATURES;
  1190. if (!capable(CAP_NET_ADMIN))
  1191. return -EPERM;
  1192. if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
  1193. return -EINVAL;
  1194. if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
  1195. return -EINVAL;
  1196. if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
  1197. return -EINVAL;
  1198. if (cmd->autoneg == AUTONEG_DISABLE)
  1199. switch (cmd->speed) {
  1200. case SPEED_10:
  1201. if (cmd->duplex == DUPLEX_HALF &&
  1202. (features & SUPPORTED_10baseT_Half) == 0)
  1203. return -EINVAL;
  1204. if (cmd->duplex == DUPLEX_FULL &&
  1205. (features & SUPPORTED_10baseT_Full) == 0)
  1206. return -EINVAL;
  1207. break;
  1208. case SPEED_100:
  1209. if (cmd->duplex == DUPLEX_HALF &&
  1210. (features & SUPPORTED_100baseT_Half) == 0)
  1211. return -EINVAL;
  1212. if (cmd->duplex == DUPLEX_FULL &&
  1213. (features & SUPPORTED_100baseT_Full) == 0)
  1214. return -EINVAL;
  1215. break;
  1216. default:
  1217. return -EINVAL;
  1218. }
  1219. else if ((features & SUPPORTED_Autoneg) == 0)
  1220. return -EINVAL;
  1221. spin_lock_irq(&aup->lock);
  1222. au1000_start_link(dev, cmd);
  1223. spin_unlock_irq(&aup->lock);
  1224. return 0;
  1225. }
  1226. static int au1000_nway_reset(struct net_device *dev)
  1227. {
  1228. struct au1000_private *aup = (struct au1000_private *)dev->priv;
  1229. if (!aup->want_autoneg)
  1230. return -EINVAL;
  1231. spin_lock_irq(&aup->lock);
  1232. au1000_start_link(dev, NULL);
  1233. spin_unlock_irq(&aup->lock);
  1234. return 0;
  1235. }
  1236. static void
  1237. au1000_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
  1238. {
  1239. struct au1000_private *aup = (struct au1000_private *)dev->priv;
  1240. strcpy(info->driver, DRV_NAME);
  1241. strcpy(info->version, DRV_VERSION);
  1242. info->fw_version[0] = '\0';
  1243. sprintf(info->bus_info, "%s %d", DRV_NAME, aup->mac_id);
  1244. info->regdump_len = 0;
  1245. }
  1246. static u32 au1000_get_link(struct net_device *dev)
  1247. {
  1248. return netif_carrier_ok(dev);
  1249. }
  1250. static struct ethtool_ops au1000_ethtool_ops = {
  1251. .get_settings = au1000_get_settings,
  1252. .set_settings = au1000_set_settings,
  1253. .get_drvinfo = au1000_get_drvinfo,
  1254. .nway_reset = au1000_nway_reset,
  1255. .get_link = au1000_get_link
  1256. };
  1257. static struct net_device *
  1258. au1000_probe(u32 ioaddr, int irq, int port_num)
  1259. {
  1260. static unsigned version_printed = 0;
  1261. struct au1000_private *aup = NULL;
  1262. struct net_device *dev = NULL;
  1263. db_dest_t *pDB, *pDBfree;
  1264. char *pmac, *argptr;
  1265. char ethaddr[6];
  1266. int i, err;
  1267. if (!request_mem_region(CPHYSADDR(ioaddr), MAC_IOSIZE, "Au1x00 ENET"))
  1268. return NULL;
  1269. if (version_printed++ == 0)
  1270. printk("%s version %s %s\n", DRV_NAME, DRV_VERSION, DRV_AUTHOR);
  1271. dev = alloc_etherdev(sizeof(struct au1000_private));
  1272. if (!dev) {
  1273. printk (KERN_ERR "au1000 eth: alloc_etherdev failed\n");
  1274. return NULL;
  1275. }
  1276. if ((err = register_netdev(dev))) {
  1277. printk(KERN_ERR "Au1x_eth Cannot register net device err %d\n",
  1278. err);
  1279. free_netdev(dev);
  1280. return NULL;
  1281. }
  1282. printk("%s: Au1x Ethernet found at 0x%x, irq %d\n",
  1283. dev->name, ioaddr, irq);
  1284. aup = dev->priv;
  1285. /* Allocate the data buffers */
  1286. /* Snooping works fine with eth on all au1xxx */
  1287. aup->vaddr = (u32)dma_alloc_noncoherent(NULL,
  1288. MAX_BUF_SIZE * (NUM_TX_BUFFS+NUM_RX_BUFFS),
  1289. &aup->dma_addr,
  1290. 0);
  1291. if (!aup->vaddr) {
  1292. free_netdev(dev);
  1293. release_mem_region(CPHYSADDR(ioaddr), MAC_IOSIZE);
  1294. return NULL;
  1295. }
  1296. /* aup->mac is the base address of the MAC's registers */
  1297. aup->mac = (volatile mac_reg_t *)((unsigned long)ioaddr);
  1298. /* Setup some variables for quick register address access */
  1299. if (ioaddr == iflist[0].base_addr)
  1300. {
  1301. /* check env variables first */
  1302. if (!get_ethernet_addr(ethaddr)) {
  1303. memcpy(au1000_mac_addr, ethaddr, sizeof(au1000_mac_addr));
  1304. } else {
  1305. /* Check command line */
  1306. argptr = prom_getcmdline();
  1307. if ((pmac = strstr(argptr, "ethaddr=")) == NULL) {
  1308. printk(KERN_INFO "%s: No mac address found\n",
  1309. dev->name);
  1310. /* use the hard coded mac addresses */
  1311. } else {
  1312. str2eaddr(ethaddr, pmac + strlen("ethaddr="));
  1313. memcpy(au1000_mac_addr, ethaddr,
  1314. sizeof(au1000_mac_addr));
  1315. }
  1316. }
  1317. aup->enable = (volatile u32 *)
  1318. ((unsigned long)iflist[0].macen_addr);
  1319. memcpy(dev->dev_addr, au1000_mac_addr, sizeof(au1000_mac_addr));
  1320. setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR);
  1321. aup->mac_id = 0;
  1322. au_macs[0] = aup;
  1323. }
  1324. else
  1325. if (ioaddr == iflist[1].base_addr)
  1326. {
  1327. aup->enable = (volatile u32 *)
  1328. ((unsigned long)iflist[1].macen_addr);
  1329. memcpy(dev->dev_addr, au1000_mac_addr, sizeof(au1000_mac_addr));
  1330. dev->dev_addr[4] += 0x10;
  1331. setup_hw_rings(aup, MAC1_RX_DMA_ADDR, MAC1_TX_DMA_ADDR);
  1332. aup->mac_id = 1;
  1333. au_macs[1] = aup;
  1334. }
  1335. else
  1336. {
  1337. printk(KERN_ERR "%s: bad ioaddr\n", dev->name);
  1338. }
  1339. /* bring the device out of reset, otherwise probing the mii
  1340. * will hang */
  1341. *aup->enable = MAC_EN_CLOCK_ENABLE;
  1342. au_sync_delay(2);
  1343. *aup->enable = MAC_EN_RESET0 | MAC_EN_RESET1 |
  1344. MAC_EN_RESET2 | MAC_EN_CLOCK_ENABLE;
  1345. au_sync_delay(2);
  1346. aup->mii = kmalloc(sizeof(struct mii_phy), GFP_KERNEL);
  1347. if (!aup->mii) {
  1348. printk(KERN_ERR "%s: out of memory\n", dev->name);
  1349. goto err_out;
  1350. }
  1351. aup->mii->mii_control_reg = 0;
  1352. aup->mii->mii_data_reg = 0;
  1353. if (mii_probe(dev) != 0) {
  1354. goto err_out;
  1355. }
  1356. pDBfree = NULL;
  1357. /* setup the data buffer descriptors and attach a buffer to each one */
  1358. pDB = aup->db;
  1359. for (i = 0; i < (NUM_TX_BUFFS+NUM_RX_BUFFS); i++) {
  1360. pDB->pnext = pDBfree;
  1361. pDBfree = pDB;
  1362. pDB->vaddr = (u32 *)((unsigned)aup->vaddr + MAX_BUF_SIZE*i);
  1363. pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr);
  1364. pDB++;
  1365. }
  1366. aup->pDBfree = pDBfree;
  1367. for (i = 0; i < NUM_RX_DMA; i++) {
  1368. pDB = GetFreeDB(aup);
  1369. if (!pDB) {
  1370. goto err_out;
  1371. }
  1372. aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
  1373. aup->rx_db_inuse[i] = pDB;
  1374. }
  1375. for (i = 0; i < NUM_TX_DMA; i++) {
  1376. pDB = GetFreeDB(aup);
  1377. if (!pDB) {
  1378. goto err_out;
  1379. }
  1380. aup->tx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
  1381. aup->tx_dma_ring[i]->len = 0;
  1382. aup->tx_db_inuse[i] = pDB;
  1383. }
  1384. spin_lock_init(&aup->lock);
  1385. dev->base_addr = ioaddr;
  1386. dev->irq = irq;
  1387. dev->open = au1000_open;
  1388. dev->hard_start_xmit = au1000_tx;
  1389. dev->stop = au1000_close;
  1390. dev->get_stats = au1000_get_stats;
  1391. dev->set_multicast_list = &set_rx_mode;
  1392. dev->do_ioctl = &au1000_ioctl;
  1393. SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops);
  1394. dev->set_config = &au1000_set_config;
  1395. dev->tx_timeout = au1000_tx_timeout;
  1396. dev->watchdog_timeo = ETH_TX_TIMEOUT;
  1397. /*
  1398. * The boot code uses the ethernet controller, so reset it to start
  1399. * fresh. au1000_init() expects that the device is in reset state.
  1400. */
  1401. reset_mac(dev);
  1402. return dev;
  1403. err_out:
  1404. /* here we should have a valid dev plus aup-> register addresses
  1405. * so we can reset the mac properly.*/
  1406. reset_mac(dev);
  1407. if (aup->mii)
  1408. kfree(aup->mii);
  1409. for (i = 0; i < NUM_RX_DMA; i++) {
  1410. if (aup->rx_db_inuse[i])
  1411. ReleaseDB(aup, aup->rx_db_inuse[i]);
  1412. }
  1413. for (i = 0; i < NUM_TX_DMA; i++) {
  1414. if (aup->tx_db_inuse[i])
  1415. ReleaseDB(aup, aup->tx_db_inuse[i]);
  1416. }
  1417. dma_free_noncoherent(NULL,
  1418. MAX_BUF_SIZE * (NUM_TX_BUFFS+NUM_RX_BUFFS),
  1419. (void *)aup->vaddr,
  1420. aup->dma_addr);
  1421. unregister_netdev(dev);
  1422. free_netdev(dev);
  1423. release_mem_region(CPHYSADDR(ioaddr), MAC_IOSIZE);
  1424. return NULL;
  1425. }
  1426. /*
  1427. * Initialize the interface.
  1428. *
  1429. * When the device powers up, the clocks are disabled and the
  1430. * mac is in reset state. When the interface is closed, we
  1431. * do the same -- reset the device and disable the clocks to
  1432. * conserve power. Thus, whenever au1000_init() is called,
  1433. * the device should already be in reset state.
  1434. */
  1435. static int au1000_init(struct net_device *dev)
  1436. {
  1437. struct au1000_private *aup = (struct au1000_private *) dev->priv;
  1438. u32 flags;
  1439. int i;
  1440. u32 control;
  1441. u16 link, speed;
  1442. if (au1000_debug > 4)
  1443. printk("%s: au1000_init\n", dev->name);
  1444. spin_lock_irqsave(&aup->lock, flags);
  1445. /* bring the device out of reset */
  1446. *aup->enable = MAC_EN_CLOCK_ENABLE;
  1447. au_sync_delay(2);
  1448. *aup->enable = MAC_EN_RESET0 | MAC_EN_RESET1 |
  1449. MAC_EN_RESET2 | MAC_EN_CLOCK_ENABLE;
  1450. au_sync_delay(20);
  1451. aup->mac->control = 0;
  1452. aup->tx_head = (aup->tx_dma_ring[0]->buff_stat & 0xC) >> 2;
  1453. aup->tx_tail = aup->tx_head;
  1454. aup->rx_head = (aup->rx_dma_ring[0]->buff_stat & 0xC) >> 2;
  1455. aup->mac->mac_addr_high = dev->dev_addr[5]<<8 | dev->dev_addr[4];
  1456. aup->mac->mac_addr_low = dev->dev_addr[3]<<24 | dev->dev_addr[2]<<16 |
  1457. dev->dev_addr[1]<<8 | dev->dev_addr[0];
  1458. for (i = 0; i < NUM_RX_DMA; i++) {
  1459. aup->rx_dma_ring[i]->buff_stat |= RX_DMA_ENABLE;
  1460. }
  1461. au_sync();
  1462. aup->phy_ops->phy_status(dev, aup->phy_addr, &link, &speed);
  1463. control = MAC_DISABLE_RX_OWN | MAC_RX_ENABLE | MAC_TX_ENABLE;
  1464. #ifndef CONFIG_CPU_LITTLE_ENDIAN
  1465. control |= MAC_BIG_ENDIAN;
  1466. #endif
  1467. if (link && (dev->if_port == IF_PORT_100BASEFX)) {
  1468. control |= MAC_FULL_DUPLEX;
  1469. }
  1470. aup->mac->control = control;
  1471. aup->mac->vlan1_tag = 0x8100; /* activate vlan support */
  1472. au_sync();
  1473. spin_unlock_irqrestore(&aup->lock, flags);
  1474. return 0;
  1475. }
  1476. static void au1000_timer(unsigned long data)
  1477. {
  1478. struct net_device *dev = (struct net_device *)data;
  1479. struct au1000_private *aup = (struct au1000_private *) dev->priv;
  1480. unsigned char if_port;
  1481. u16 link, speed;
  1482. if (!dev) {
  1483. /* fatal error, don't restart the timer */
  1484. printk(KERN_ERR "au1000_timer error: NULL dev\n");
  1485. return;
  1486. }
  1487. if_port = dev->if_port;
  1488. if (aup->phy_ops->phy_status(dev, aup->phy_addr, &link, &speed) == 0) {
  1489. if (link) {
  1490. if (!netif_carrier_ok(dev)) {
  1491. netif_carrier_on(dev);
  1492. printk(KERN_INFO "%s: link up\n", dev->name);
  1493. }
  1494. }
  1495. else {
  1496. if (netif_carrier_ok(dev)) {
  1497. netif_carrier_off(dev);
  1498. dev->if_port = 0;
  1499. printk(KERN_INFO "%s: link down\n", dev->name);
  1500. }
  1501. }
  1502. }
  1503. if (link && (dev->if_port != if_port) &&
  1504. (dev->if_port != IF_PORT_UNKNOWN)) {
  1505. hard_stop(dev);
  1506. if (dev->if_port == IF_PORT_100BASEFX) {
  1507. printk(KERN_INFO "%s: going to full duplex\n",
  1508. dev->name);
  1509. aup->mac->control |= MAC_FULL_DUPLEX;
  1510. au_sync_delay(1);
  1511. }
  1512. else {
  1513. aup->mac->control &= ~MAC_FULL_DUPLEX;
  1514. au_sync_delay(1);
  1515. }
  1516. enable_rx_tx(dev);
  1517. }
  1518. aup->timer.expires = RUN_AT((1*HZ));
  1519. aup->timer.data = (unsigned long)dev;
  1520. aup->timer.function = &au1000_timer; /* timer handler */
  1521. add_timer(&aup->timer);
  1522. }
  1523. static int au1000_open(struct net_device *dev)
  1524. {
  1525. int retval;
  1526. struct au1000_private *aup = (struct au1000_private *) dev->priv;
  1527. if (au1000_debug > 4)
  1528. printk("%s: open: dev=%p\n", dev->name, dev);
  1529. if ((retval = au1000_init(dev))) {
  1530. printk(KERN_ERR "%s: error in au1000_init\n", dev->name);
  1531. free_irq(dev->irq, dev);
  1532. return retval;
  1533. }
  1534. netif_start_queue(dev);
  1535. if ((retval = request_irq(dev->irq, &au1000_interrupt, 0,
  1536. dev->name, dev))) {
  1537. printk(KERN_ERR "%s: unable to get IRQ %d\n",
  1538. dev->name, dev->irq);
  1539. return retval;
  1540. }
  1541. init_timer(&aup->timer); /* used in ioctl() */
  1542. aup->timer.expires = RUN_AT((3*HZ));
  1543. aup->timer.data = (unsigned long)dev;
  1544. aup->timer.function = &au1000_timer; /* timer handler */
  1545. add_timer(&aup->timer);
  1546. if (au1000_debug > 4)
  1547. printk("%s: open: Initialization done.\n", dev->name);
  1548. return 0;
  1549. }
  1550. static int au1000_close(struct net_device *dev)
  1551. {
  1552. u32 flags;
  1553. struct au1000_private *aup = (struct au1000_private *) dev->priv;
  1554. if (au1000_debug > 4)
  1555. printk("%s: close: dev=%p\n", dev->name, dev);
  1556. reset_mac(dev);
  1557. spin_lock_irqsave(&aup->lock, flags);
  1558. /* stop the device */
  1559. netif_stop_queue(dev);
  1560. /* disable the interrupt */
  1561. free_irq(dev->irq, dev);
  1562. spin_unlock_irqrestore(&aup->lock, flags);
  1563. return 0;
  1564. }
  1565. static void __exit au1000_cleanup_module(void)
  1566. {
  1567. int i, j;
  1568. struct net_device *dev;
  1569. struct au1000_private *aup;
  1570. for (i = 0; i < num_ifs; i++) {
  1571. dev = iflist[i].dev;
  1572. if (dev) {
  1573. aup = (struct au1000_private *) dev->priv;
  1574. unregister_netdev(dev);
  1575. if (aup->mii)
  1576. kfree(aup->mii);
  1577. for (j = 0; j < NUM_RX_DMA; j++) {
  1578. if (aup->rx_db_inuse[j])
  1579. ReleaseDB(aup, aup->rx_db_inuse[j]);
  1580. }
  1581. for (j = 0; j < NUM_TX_DMA; j++) {
  1582. if (aup->tx_db_inuse[j])
  1583. ReleaseDB(aup, aup->tx_db_inuse[j]);
  1584. }
  1585. dma_free_noncoherent(NULL,
  1586. MAX_BUF_SIZE * (NUM_TX_BUFFS+NUM_RX_BUFFS),
  1587. (void *)aup->vaddr,
  1588. aup->dma_addr);
  1589. free_netdev(dev);
  1590. release_mem_region(CPHYSADDR(iflist[i].base_addr), MAC_IOSIZE);
  1591. }
  1592. }
  1593. }
  1594. static inline void
  1595. update_tx_stats(struct net_device *dev, u32 status, u32 pkt_len)
  1596. {
  1597. struct au1000_private *aup = (struct au1000_private *) dev->priv;
  1598. struct net_device_stats *ps = &aup->stats;
  1599. ps->tx_packets++;
  1600. ps->tx_bytes += pkt_len;
  1601. if (status & TX_FRAME_ABORTED) {
  1602. if (dev->if_port == IF_PORT_100BASEFX) {
  1603. if (status & (TX_JAB_TIMEOUT | TX_UNDERRUN)) {
  1604. /* any other tx errors are only valid
  1605. * in half duplex mode */
  1606. ps->tx_errors++;
  1607. ps->tx_aborted_errors++;
  1608. }
  1609. }
  1610. else {
  1611. ps->tx_errors++;
  1612. ps->tx_aborted_errors++;
  1613. if (status & (TX_NO_CARRIER | TX_LOSS_CARRIER))
  1614. ps->tx_carrier_errors++;
  1615. }
  1616. }
  1617. }
  1618. /*
  1619. * Called from the interrupt service routine to acknowledge
  1620. * the TX DONE bits. This is a must if the irq is setup as
  1621. * edge triggered.
  1622. */
  1623. static void au1000_tx_ack(struct net_device *dev)
  1624. {
  1625. struct au1000_private *aup = (struct au1000_private *) dev->priv;
  1626. volatile tx_dma_t *ptxd;
  1627. ptxd = aup->tx_dma_ring[aup->tx_tail];
  1628. while (ptxd->buff_stat & TX_T_DONE) {
  1629. update_tx_stats(dev, ptxd->status, ptxd->len & 0x3ff);
  1630. ptxd->buff_stat &= ~TX_T_DONE;
  1631. ptxd->len = 0;
  1632. au_sync();
  1633. aup->tx_tail = (aup->tx_tail + 1) & (NUM_TX_DMA - 1);
  1634. ptxd = aup->tx_dma_ring[aup->tx_tail];
  1635. if (aup->tx_full) {
  1636. aup->tx_full = 0;
  1637. netif_wake_queue(dev);
  1638. }
  1639. }
  1640. }
  1641. /*
  1642. * Au1000 transmit routine.
  1643. */
  1644. static int au1000_tx(struct sk_buff *skb, struct net_device *dev)
  1645. {
  1646. struct au1000_private *aup = (struct au1000_private *) dev->priv;
  1647. volatile tx_dma_t *ptxd;
  1648. u32 buff_stat;
  1649. db_dest_t *pDB;
  1650. int i;
  1651. if (au1000_debug > 5)
  1652. printk("%s: tx: aup %x len=%d, data=%p, head %d\n",
  1653. dev->name, (unsigned)aup, skb->len,
  1654. skb->data, aup->tx_head);
  1655. ptxd = aup->tx_dma_ring[aup->tx_head];
  1656. buff_stat = ptxd->buff_stat;
  1657. if (buff_stat & TX_DMA_ENABLE) {
  1658. /* We've wrapped around and the transmitter is still busy */
  1659. netif_stop_queue(dev);
  1660. aup->tx_full = 1;
  1661. return 1;
  1662. }
  1663. else if (buff_stat & TX_T_DONE) {
  1664. update_tx_stats(dev, ptxd->status, ptxd->len & 0x3ff);
  1665. ptxd->len = 0;
  1666. }
  1667. if (aup->tx_full) {
  1668. aup->tx_full = 0;
  1669. netif_wake_queue(dev);
  1670. }
  1671. pDB = aup->tx_db_inuse[aup->tx_head];
  1672. memcpy((void *)pDB->vaddr, skb->data, skb->len);
  1673. if (skb->len < ETH_ZLEN) {
  1674. for (i=skb->len; i<ETH_ZLEN; i++) {
  1675. ((char *)pDB->vaddr)[i] = 0;
  1676. }
  1677. ptxd->len = ETH_ZLEN;
  1678. }
  1679. else
  1680. ptxd->len = skb->len;
  1681. ptxd->buff_stat = pDB->dma_addr | TX_DMA_ENABLE;
  1682. au_sync();
  1683. dev_kfree_skb(skb);
  1684. aup->tx_head = (aup->tx_head + 1) & (NUM_TX_DMA - 1);
  1685. dev->trans_start = jiffies;
  1686. return 0;
  1687. }
  1688. static inline void update_rx_stats(struct net_device *dev, u32 status)
  1689. {
  1690. struct au1000_private *aup = (struct au1000_private *) dev->priv;
  1691. struct net_device_stats *ps = &aup->stats;
  1692. ps->rx_packets++;
  1693. if (status & RX_MCAST_FRAME)
  1694. ps->multicast++;
  1695. if (status & RX_ERROR) {
  1696. ps->rx_errors++;
  1697. if (status & RX_MISSED_FRAME)
  1698. ps->rx_missed_errors++;
  1699. if (status & (RX_OVERLEN | RX_OVERLEN | RX_LEN_ERROR))
  1700. ps->rx_length_errors++;
  1701. if (status & RX_CRC_ERROR)
  1702. ps->rx_crc_errors++;
  1703. if (status & RX_COLL)
  1704. ps->collisions++;
  1705. }
  1706. else
  1707. ps->rx_bytes += status & RX_FRAME_LEN_MASK;
  1708. }
  1709. /*
  1710. * Au1000 receive routine.
  1711. */
  1712. static int au1000_rx(struct net_device *dev)
  1713. {
  1714. struct au1000_private *aup = (struct au1000_private *) dev->priv;
  1715. struct sk_buff *skb;
  1716. volatile rx_dma_t *prxd;
  1717. u32 buff_stat, status;
  1718. db_dest_t *pDB;
  1719. u32 frmlen;
  1720. if (au1000_debug > 5)
  1721. printk("%s: au1000_rx head %d\n", dev->name, aup->rx_head);
  1722. prxd = aup->rx_dma_ring[aup->rx_head];
  1723. buff_stat = prxd->buff_stat;
  1724. while (buff_stat & RX_T_DONE) {
  1725. status = prxd->status;
  1726. pDB = aup->rx_db_inuse[aup->rx_head];
  1727. update_rx_stats(dev, status);
  1728. if (!(status & RX_ERROR)) {
  1729. /* good frame */
  1730. frmlen = (status & RX_FRAME_LEN_MASK);
  1731. frmlen -= 4; /* Remove FCS */
  1732. skb = dev_alloc_skb(frmlen + 2);
  1733. if (skb == NULL) {
  1734. printk(KERN_ERR
  1735. "%s: Memory squeeze, dropping packet.\n",
  1736. dev->name);
  1737. aup->stats.rx_dropped++;
  1738. continue;
  1739. }
  1740. skb->dev = dev;
  1741. skb_reserve(skb, 2); /* 16 byte IP header align */
  1742. eth_copy_and_sum(skb,
  1743. (unsigned char *)pDB->vaddr, frmlen, 0);
  1744. skb_put(skb, frmlen);
  1745. skb->protocol = eth_type_trans(skb, dev);
  1746. netif_rx(skb); /* pass the packet to upper layers */
  1747. }
  1748. else {
  1749. if (au1000_debug > 4) {
  1750. if (status & RX_MISSED_FRAME)
  1751. printk("rx miss\n");
  1752. if (status & RX_WDOG_TIMER)
  1753. printk("rx wdog\n");
  1754. if (status & RX_RUNT)
  1755. printk("rx runt\n");
  1756. if (status & RX_OVERLEN)
  1757. printk("rx overlen\n");
  1758. if (status & RX_COLL)
  1759. printk("rx coll\n");
  1760. if (status & RX_MII_ERROR)
  1761. printk("rx mii error\n");
  1762. if (status & RX_CRC_ERROR)
  1763. printk("rx crc error\n");
  1764. if (status & RX_LEN_ERROR)
  1765. printk("rx len error\n");
  1766. if (status & RX_U_CNTRL_FRAME)
  1767. printk("rx u control frame\n");
  1768. if (status & RX_MISSED_FRAME)
  1769. printk("rx miss\n");
  1770. }
  1771. }
  1772. prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE);
  1773. aup->rx_head = (aup->rx_head + 1) & (NUM_RX_DMA - 1);
  1774. au_sync();
  1775. /* next descriptor */
  1776. prxd = aup->rx_dma_ring[aup->rx_head];
  1777. buff_stat = prxd->buff_stat;
  1778. dev->last_rx = jiffies;
  1779. }
  1780. return 0;
  1781. }
  1782. /*
  1783. * Au1000 interrupt service routine.
  1784. */
  1785. static irqreturn_t au1000_interrupt(int irq, void *dev_id, struct pt_regs *regs)
  1786. {
  1787. struct net_device *dev = (struct net_device *) dev_id;
  1788. if (dev == NULL) {
  1789. printk(KERN_ERR "%s: isr: null dev ptr\n", dev->name);
  1790. return IRQ_RETVAL(1);
  1791. }
  1792. /* Handle RX interrupts first to minimize chance of overrun */
  1793. au1000_rx(dev);
  1794. au1000_tx_ack(dev);
  1795. return IRQ_RETVAL(1);
  1796. }
  1797. /*
  1798. * The Tx ring has been full longer than the watchdog timeout
  1799. * value. The transmitter must be hung?
  1800. */
  1801. static void au1000_tx_timeout(struct net_device *dev)
  1802. {
  1803. printk(KERN_ERR "%s: au1000_tx_timeout: dev=%p\n", dev->name, dev);
  1804. reset_mac(dev);
  1805. au1000_init(dev);
  1806. dev->trans_start = jiffies;
  1807. netif_wake_queue(dev);
  1808. }
  1809. static unsigned const ethernet_polynomial = 0x04c11db7U;
  1810. static inline u32 ether_crc(int length, unsigned char *data)
  1811. {
  1812. int crc = -1;
  1813. while(--length >= 0) {
  1814. unsigned char current_octet = *data++;
  1815. int bit;
  1816. for (bit = 0; bit < 8; bit++, current_octet >>= 1)
  1817. crc = (crc << 1) ^
  1818. ((crc < 0) ^ (current_octet & 1) ?
  1819. ethernet_polynomial : 0);
  1820. }
  1821. return crc;
  1822. }
  1823. static void set_rx_mode(struct net_device *dev)
  1824. {
  1825. struct au1000_private *aup = (struct au1000_private *) dev->priv;
  1826. if (au1000_debug > 4)
  1827. printk("%s: set_rx_mode: flags=%x\n", dev->name, dev->flags);
  1828. if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
  1829. aup->mac->control |= MAC_PROMISCUOUS;
  1830. printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name);
  1831. } else if ((dev->flags & IFF_ALLMULTI) ||
  1832. dev->mc_count > MULTICAST_FILTER_LIMIT) {
  1833. aup->mac->control |= MAC_PASS_ALL_MULTI;
  1834. aup->mac->control &= ~MAC_PROMISCUOUS;
  1835. printk(KERN_INFO "%s: Pass all multicast\n", dev->name);
  1836. } else {
  1837. int i;
  1838. struct dev_mc_list *mclist;
  1839. u32 mc_filter[2]; /* Multicast hash filter */
  1840. mc_filter[1] = mc_filter[0] = 0;
  1841. for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
  1842. i++, mclist = mclist->next) {
  1843. set_bit(ether_crc(ETH_ALEN, mclist->dmi_addr)>>26,
  1844. (long *)mc_filter);
  1845. }
  1846. aup->mac->multi_hash_high = mc_filter[1];
  1847. aup->mac->multi_hash_low = mc_filter[0];
  1848. aup->mac->control &= ~MAC_PROMISCUOUS;
  1849. aup->mac->control |= MAC_HASH_MODE;
  1850. }
  1851. }
  1852. static int au1000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
  1853. {
  1854. struct au1000_private *aup = (struct au1000_private *)dev->priv;
  1855. u16 *data = (u16 *)&rq->ifr_ifru;
  1856. switch(cmd) {
  1857. case SIOCDEVPRIVATE: /* Get the address of the PHY in use. */
  1858. case SIOCGMIIPHY:
  1859. if (!netif_running(dev)) return -EINVAL;
  1860. data[0] = aup->phy_addr;
  1861. case SIOCDEVPRIVATE+1: /* Read the specified MII register. */
  1862. case SIOCGMIIREG:
  1863. data[3] = mdio_read(dev, data[0], data[1]);
  1864. return 0;
  1865. case SIOCDEVPRIVATE+2: /* Write the specified MII register */
  1866. case SIOCSMIIREG:
  1867. if (!capable(CAP_NET_ADMIN))
  1868. return -EPERM;
  1869. mdio_write(dev, data[0], data[1],data[2]);
  1870. return 0;
  1871. default:
  1872. return -EOPNOTSUPP;
  1873. }
  1874. }
  1875. static int au1000_set_config(struct net_device *dev, struct ifmap *map)
  1876. {
  1877. struct au1000_private *aup = (struct au1000_private *) dev->priv;
  1878. u16 control;
  1879. if (au1000_debug > 4) {
  1880. printk("%s: set_config called: dev->if_port %d map->port %x\n",
  1881. dev->name, dev->if_port, map->port);
  1882. }
  1883. switch(map->port){
  1884. case IF_PORT_UNKNOWN: /* use auto here */
  1885. printk(KERN_INFO "%s: config phy for aneg\n",
  1886. dev->name);
  1887. dev->if_port = map->port;
  1888. /* Link Down: the timer will bring it up */
  1889. netif_carrier_off(dev);
  1890. /* read current control */
  1891. control = mdio_read(dev, aup->phy_addr, MII_CONTROL);
  1892. control &= ~(MII_CNTL_FDX | MII_CNTL_F100);
  1893. /* enable auto negotiation and reset the negotiation */
  1894. mdio_write(dev, aup->phy_addr, MII_CONTROL,
  1895. control | MII_CNTL_AUTO |
  1896. MII_CNTL_RST_AUTO);
  1897. break;
  1898. case IF_PORT_10BASET: /* 10BaseT */
  1899. printk(KERN_INFO "%s: config phy for 10BaseT\n",
  1900. dev->name);
  1901. dev->if_port = map->port;
  1902. /* Link Down: the timer will bring it up */
  1903. netif_carrier_off(dev);
  1904. /* set Speed to 10Mbps, Half Duplex */
  1905. control = mdio_read(dev, aup->phy_addr, MII_CONTROL);
  1906. control &= ~(MII_CNTL_F100 | MII_CNTL_AUTO |
  1907. MII_CNTL_FDX);
  1908. /* disable auto negotiation and force 10M/HD mode*/
  1909. mdio_write(dev, aup->phy_addr, MII_CONTROL, control);
  1910. break;
  1911. case IF_PORT_100BASET: /* 100BaseT */
  1912. case IF_PORT_100BASETX: /* 100BaseTx */
  1913. printk(KERN_INFO "%s: config phy for 100BaseTX\n",
  1914. dev->name);
  1915. dev->if_port = map->port;
  1916. /* Link Down: the timer will bring it up */
  1917. netif_carrier_off(dev);
  1918. /* set Speed to 100Mbps, Half Duplex */
  1919. /* disable auto negotiation and enable 100MBit Mode */
  1920. control = mdio_read(dev, aup->phy_addr, MII_CONTROL);
  1921. control &= ~(MII_CNTL_AUTO | MII_CNTL_FDX);
  1922. control |= MII_CNTL_F100;
  1923. mdio_write(dev, aup->phy_addr, MII_CONTROL, control);
  1924. break;
  1925. case IF_PORT_100BASEFX: /* 100BaseFx */
  1926. printk(KERN_INFO "%s: config phy for 100BaseFX\n",
  1927. dev->name);
  1928. dev->if_port = map->port;
  1929. /* Link Down: the timer will bring it up */
  1930. netif_carrier_off(dev);
  1931. /* set Speed to 100Mbps, Full Duplex */
  1932. /* disable auto negotiation and enable 100MBit Mode */
  1933. control = mdio_read(dev, aup->phy_addr, MII_CONTROL);
  1934. control &= ~MII_CNTL_AUTO;
  1935. control |= MII_CNTL_F100 | MII_CNTL_FDX;
  1936. mdio_write(dev, aup->phy_addr, MII_CONTROL, control);
  1937. break;
  1938. case IF_PORT_10BASE2: /* 10Base2 */
  1939. case IF_PORT_AUI: /* AUI */
  1940. /* These Modes are not supported (are they?)*/
  1941. printk(KERN_ERR "%s: 10Base2/AUI not supported",
  1942. dev->name);
  1943. return -EOPNOTSUPP;
  1944. break;
  1945. default:
  1946. printk(KERN_ERR "%s: Invalid media selected",
  1947. dev->name);
  1948. return -EINVAL;
  1949. }
  1950. return 0;
  1951. }
  1952. static struct net_device_stats *au1000_get_stats(struct net_device *dev)
  1953. {
  1954. struct au1000_private *aup = (struct au1000_private *) dev->priv;
  1955. if (au1000_debug > 4)
  1956. printk("%s: au1000_get_stats: dev=%p\n", dev->name, dev);
  1957. if (netif_device_present(dev)) {
  1958. return &aup->stats;
  1959. }
  1960. return 0;
  1961. }
  1962. module_init(au1000_init_module);
  1963. module_exit(au1000_cleanup_module);