au1000_eth.c 57 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259
  1. /*
  2. *
  3. * Alchemy Au1x00 ethernet driver
  4. *
  5. * Copyright 2001,2002,2003 MontaVista Software Inc.
  6. * Copyright 2002 TimeSys Corp.
  7. * Added ethtool/mii-tool support,
  8. * Copyright 2004 Matt Porter <mporter@kernel.crashing.org>
  9. * Update: 2004 Bjoern Riemer, riemer@fokus.fraunhofer.de
  10. * or riemer@riemer-nt.de: fixed the link beat detection with
  11. * ioctls (SIOCGMIIPHY)
  12. * Author: MontaVista Software, Inc.
  13. * ppopov@mvista.com or source@mvista.com
  14. *
  15. * ########################################################################
  16. *
  17. * This program is free software; you can distribute it and/or modify it
  18. * under the terms of the GNU General Public License (Version 2) as
  19. * published by the Free Software Foundation.
  20. *
  21. * This program is distributed in the hope it will be useful, but WITHOUT
  22. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  23. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
  24. * for more details.
  25. *
  26. * You should have received a copy of the GNU General Public License along
  27. * with this program; if not, write to the Free Software Foundation, Inc.,
  28. * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
  29. *
  30. * ########################################################################
  31. *
  32. *
  33. */
  34. #include <linux/config.h>
  35. #include <linux/module.h>
  36. #include <linux/kernel.h>
  37. #include <linux/sched.h>
  38. #include <linux/string.h>
  39. #include <linux/timer.h>
  40. #include <linux/errno.h>
  41. #include <linux/in.h>
  42. #include <linux/ioport.h>
  43. #include <linux/bitops.h>
  44. #include <linux/slab.h>
  45. #include <linux/interrupt.h>
  46. #include <linux/pci.h>
  47. #include <linux/init.h>
  48. #include <linux/netdevice.h>
  49. #include <linux/etherdevice.h>
  50. #include <linux/ethtool.h>
  51. #include <linux/mii.h>
  52. #include <linux/skbuff.h>
  53. #include <linux/delay.h>
  54. #include <asm/mipsregs.h>
  55. #include <asm/irq.h>
  56. #include <asm/io.h>
  57. #include <asm/processor.h>
  58. #include <asm/mach-au1x00/au1000.h>
  59. #include <asm/cpu.h>
  60. #include "au1000_eth.h"
  61. #ifdef AU1000_ETH_DEBUG
  62. static int au1000_debug = 5;
  63. #else
  64. static int au1000_debug = 3;
  65. #endif
  66. #define DRV_NAME "au1000eth"
  67. #define DRV_VERSION "1.5"
  68. #define DRV_AUTHOR "Pete Popov <ppopov@embeddedalley.com>"
  69. #define DRV_DESC "Au1xxx on-chip Ethernet driver"
  70. MODULE_AUTHOR(DRV_AUTHOR);
  71. MODULE_DESCRIPTION(DRV_DESC);
  72. MODULE_LICENSE("GPL");
  73. // prototypes
  74. static void hard_stop(struct net_device *);
  75. static void enable_rx_tx(struct net_device *dev);
  76. static struct net_device * au1000_probe(u32 ioaddr, int irq, int port_num);
  77. static int au1000_init(struct net_device *);
  78. static int au1000_open(struct net_device *);
  79. static int au1000_close(struct net_device *);
  80. static int au1000_tx(struct sk_buff *, struct net_device *);
  81. static int au1000_rx(struct net_device *);
  82. static irqreturn_t au1000_interrupt(int, void *, struct pt_regs *);
  83. static void au1000_tx_timeout(struct net_device *);
  84. static int au1000_set_config(struct net_device *dev, struct ifmap *map);
  85. static void set_rx_mode(struct net_device *);
  86. static struct net_device_stats *au1000_get_stats(struct net_device *);
  87. static void au1000_timer(unsigned long);
  88. static int au1000_ioctl(struct net_device *, struct ifreq *, int);
  89. static int mdio_read(struct net_device *, int, int);
  90. static void mdio_write(struct net_device *, int, int, u16);
  91. static void dump_mii(struct net_device *dev, int phy_id);
  92. // externs
  93. extern void ack_rise_edge_irq(unsigned int);
  94. extern int get_ethernet_addr(char *ethernet_addr);
  95. extern void str2eaddr(unsigned char *ea, unsigned char *str);
  96. extern char * __init prom_getcmdline(void);
  97. /*
  98. * Theory of operation
  99. *
  100. * The Au1000 MACs use a simple rx and tx descriptor ring scheme.
  101. * There are four receive and four transmit descriptors. These
  102. * descriptors are not in memory; rather, they are just a set of
  103. * hardware registers.
  104. *
  105. * Since the Au1000 has a coherent data cache, the receive and
  106. * transmit buffers are allocated from the KSEG0 segment. The
  107. * hardware registers, however, are still mapped at KSEG1 to
  108. * make sure there's no out-of-order writes, and that all writes
  109. * complete immediately.
  110. */
  111. /* These addresses are only used if yamon doesn't tell us what
  112. * the mac address is, and the mac address is not passed on the
  113. * command line.
  114. */
  115. static unsigned char au1000_mac_addr[6] __devinitdata = {
  116. 0x00, 0x50, 0xc2, 0x0c, 0x30, 0x00
  117. };
  118. #define nibswap(x) ((((x) >> 4) & 0x0f) | (((x) << 4) & 0xf0))
  119. #define RUN_AT(x) (jiffies + (x))
  120. // For reading/writing 32-bit words from/to DMA memory
  121. #define cpu_to_dma32 cpu_to_be32
  122. #define dma32_to_cpu be32_to_cpu
  123. struct au1000_private *au_macs[NUM_ETH_INTERFACES];
  124. /* FIXME
  125. * All of the PHY code really should be detached from the MAC
  126. * code.
  127. */
  128. /* Default advertise */
  129. #define GENMII_DEFAULT_ADVERTISE \
  130. ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
  131. ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
  132. ADVERTISED_Autoneg
  133. #define GENMII_DEFAULT_FEATURES \
  134. SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \
  135. SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \
  136. SUPPORTED_Autoneg
  137. int bcm_5201_init(struct net_device *dev, int phy_addr)
  138. {
  139. s16 data;
  140. /* Stop auto-negotiation */
  141. data = mdio_read(dev, phy_addr, MII_CONTROL);
  142. mdio_write(dev, phy_addr, MII_CONTROL, data & ~MII_CNTL_AUTO);
  143. /* Set advertisement to 10/100 and Half/Full duplex
  144. * (full capabilities) */
  145. data = mdio_read(dev, phy_addr, MII_ANADV);
  146. data |= MII_NWAY_TX | MII_NWAY_TX_FDX | MII_NWAY_T_FDX | MII_NWAY_T;
  147. mdio_write(dev, phy_addr, MII_ANADV, data);
  148. /* Restart auto-negotiation */
  149. data = mdio_read(dev, phy_addr, MII_CONTROL);
  150. data |= MII_CNTL_RST_AUTO | MII_CNTL_AUTO;
  151. mdio_write(dev, phy_addr, MII_CONTROL, data);
  152. if (au1000_debug > 4)
  153. dump_mii(dev, phy_addr);
  154. return 0;
  155. }
  156. int bcm_5201_reset(struct net_device *dev, int phy_addr)
  157. {
  158. s16 mii_control, timeout;
  159. mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
  160. mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
  161. mdelay(1);
  162. for (timeout = 100; timeout > 0; --timeout) {
  163. mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
  164. if ((mii_control & MII_CNTL_RESET) == 0)
  165. break;
  166. mdelay(1);
  167. }
  168. if (mii_control & MII_CNTL_RESET) {
  169. printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
  170. return -1;
  171. }
  172. return 0;
  173. }
  174. int
  175. bcm_5201_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
  176. {
  177. u16 mii_data;
  178. struct au1000_private *aup;
  179. if (!dev) {
  180. printk(KERN_ERR "bcm_5201_status error: NULL dev\n");
  181. return -1;
  182. }
  183. aup = (struct au1000_private *) dev->priv;
  184. mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
  185. if (mii_data & MII_STAT_LINK) {
  186. *link = 1;
  187. mii_data = mdio_read(dev, aup->phy_addr, MII_AUX_CNTRL);
  188. if (mii_data & MII_AUX_100) {
  189. if (mii_data & MII_AUX_FDX) {
  190. *speed = IF_PORT_100BASEFX;
  191. dev->if_port = IF_PORT_100BASEFX;
  192. }
  193. else {
  194. *speed = IF_PORT_100BASETX;
  195. dev->if_port = IF_PORT_100BASETX;
  196. }
  197. }
  198. else {
  199. *speed = IF_PORT_10BASET;
  200. dev->if_port = IF_PORT_10BASET;
  201. }
  202. }
  203. else {
  204. *link = 0;
  205. *speed = 0;
  206. dev->if_port = IF_PORT_UNKNOWN;
  207. }
  208. return 0;
  209. }
  210. int lsi_80227_init(struct net_device *dev, int phy_addr)
  211. {
  212. if (au1000_debug > 4)
  213. printk("lsi_80227_init\n");
  214. /* restart auto-negotiation */
  215. mdio_write(dev, phy_addr, MII_CONTROL,
  216. MII_CNTL_F100 | MII_CNTL_AUTO | MII_CNTL_RST_AUTO); // | MII_CNTL_FDX);
  217. mdelay(1);
  218. /* set up LEDs to correct display */
  219. #ifdef CONFIG_MIPS_MTX1
  220. mdio_write(dev, phy_addr, 17, 0xff80);
  221. #else
  222. mdio_write(dev, phy_addr, 17, 0xffc0);
  223. #endif
  224. if (au1000_debug > 4)
  225. dump_mii(dev, phy_addr);
  226. return 0;
  227. }
  228. int lsi_80227_reset(struct net_device *dev, int phy_addr)
  229. {
  230. s16 mii_control, timeout;
  231. if (au1000_debug > 4) {
  232. printk("lsi_80227_reset\n");
  233. dump_mii(dev, phy_addr);
  234. }
  235. mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
  236. mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
  237. mdelay(1);
  238. for (timeout = 100; timeout > 0; --timeout) {
  239. mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
  240. if ((mii_control & MII_CNTL_RESET) == 0)
  241. break;
  242. mdelay(1);
  243. }
  244. if (mii_control & MII_CNTL_RESET) {
  245. printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
  246. return -1;
  247. }
  248. return 0;
  249. }
  250. int
  251. lsi_80227_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
  252. {
  253. u16 mii_data;
  254. struct au1000_private *aup;
  255. if (!dev) {
  256. printk(KERN_ERR "lsi_80227_status error: NULL dev\n");
  257. return -1;
  258. }
  259. aup = (struct au1000_private *) dev->priv;
  260. mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
  261. if (mii_data & MII_STAT_LINK) {
  262. *link = 1;
  263. mii_data = mdio_read(dev, aup->phy_addr, MII_LSI_PHY_STAT);
  264. if (mii_data & MII_LSI_PHY_STAT_SPD) {
  265. if (mii_data & MII_LSI_PHY_STAT_FDX) {
  266. *speed = IF_PORT_100BASEFX;
  267. dev->if_port = IF_PORT_100BASEFX;
  268. }
  269. else {
  270. *speed = IF_PORT_100BASETX;
  271. dev->if_port = IF_PORT_100BASETX;
  272. }
  273. }
  274. else {
  275. *speed = IF_PORT_10BASET;
  276. dev->if_port = IF_PORT_10BASET;
  277. }
  278. }
  279. else {
  280. *link = 0;
  281. *speed = 0;
  282. dev->if_port = IF_PORT_UNKNOWN;
  283. }
  284. return 0;
  285. }
  286. int am79c901_init(struct net_device *dev, int phy_addr)
  287. {
  288. printk("am79c901_init\n");
  289. return 0;
  290. }
  291. int am79c901_reset(struct net_device *dev, int phy_addr)
  292. {
  293. printk("am79c901_reset\n");
  294. return 0;
  295. }
  296. int
  297. am79c901_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
  298. {
  299. return 0;
  300. }
  301. int am79c874_init(struct net_device *dev, int phy_addr)
  302. {
  303. s16 data;
  304. /* 79c874 has quit resembled bit assignments to BCM5201 */
  305. if (au1000_debug > 4)
  306. printk("am79c847_init\n");
  307. /* Stop auto-negotiation */
  308. data = mdio_read(dev, phy_addr, MII_CONTROL);
  309. mdio_write(dev, phy_addr, MII_CONTROL, data & ~MII_CNTL_AUTO);
  310. /* Set advertisement to 10/100 and Half/Full duplex
  311. * (full capabilities) */
  312. data = mdio_read(dev, phy_addr, MII_ANADV);
  313. data |= MII_NWAY_TX | MII_NWAY_TX_FDX | MII_NWAY_T_FDX | MII_NWAY_T;
  314. mdio_write(dev, phy_addr, MII_ANADV, data);
  315. /* Restart auto-negotiation */
  316. data = mdio_read(dev, phy_addr, MII_CONTROL);
  317. data |= MII_CNTL_RST_AUTO | MII_CNTL_AUTO;
  318. mdio_write(dev, phy_addr, MII_CONTROL, data);
  319. if (au1000_debug > 4) dump_mii(dev, phy_addr);
  320. return 0;
  321. }
  322. int am79c874_reset(struct net_device *dev, int phy_addr)
  323. {
  324. s16 mii_control, timeout;
  325. if (au1000_debug > 4)
  326. printk("am79c874_reset\n");
  327. mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
  328. mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
  329. mdelay(1);
  330. for (timeout = 100; timeout > 0; --timeout) {
  331. mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
  332. if ((mii_control & MII_CNTL_RESET) == 0)
  333. break;
  334. mdelay(1);
  335. }
  336. if (mii_control & MII_CNTL_RESET) {
  337. printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
  338. return -1;
  339. }
  340. return 0;
  341. }
  342. int
  343. am79c874_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
  344. {
  345. u16 mii_data;
  346. struct au1000_private *aup;
  347. // printk("am79c874_status\n");
  348. if (!dev) {
  349. printk(KERN_ERR "am79c874_status error: NULL dev\n");
  350. return -1;
  351. }
  352. aup = (struct au1000_private *) dev->priv;
  353. mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
  354. if (mii_data & MII_STAT_LINK) {
  355. *link = 1;
  356. mii_data = mdio_read(dev, aup->phy_addr, MII_AMD_PHY_STAT);
  357. if (mii_data & MII_AMD_PHY_STAT_SPD) {
  358. if (mii_data & MII_AMD_PHY_STAT_FDX) {
  359. *speed = IF_PORT_100BASEFX;
  360. dev->if_port = IF_PORT_100BASEFX;
  361. }
  362. else {
  363. *speed = IF_PORT_100BASETX;
  364. dev->if_port = IF_PORT_100BASETX;
  365. }
  366. }
  367. else {
  368. *speed = IF_PORT_10BASET;
  369. dev->if_port = IF_PORT_10BASET;
  370. }
  371. }
  372. else {
  373. *link = 0;
  374. *speed = 0;
  375. dev->if_port = IF_PORT_UNKNOWN;
  376. }
  377. return 0;
  378. }
  379. int lxt971a_init(struct net_device *dev, int phy_addr)
  380. {
  381. if (au1000_debug > 4)
  382. printk("lxt971a_init\n");
  383. /* restart auto-negotiation */
  384. mdio_write(dev, phy_addr, MII_CONTROL,
  385. MII_CNTL_F100 | MII_CNTL_AUTO | MII_CNTL_RST_AUTO | MII_CNTL_FDX);
  386. /* set up LEDs to correct display */
  387. mdio_write(dev, phy_addr, 20, 0x0422);
  388. if (au1000_debug > 4)
  389. dump_mii(dev, phy_addr);
  390. return 0;
  391. }
  392. int lxt971a_reset(struct net_device *dev, int phy_addr)
  393. {
  394. s16 mii_control, timeout;
  395. if (au1000_debug > 4) {
  396. printk("lxt971a_reset\n");
  397. dump_mii(dev, phy_addr);
  398. }
  399. mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
  400. mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
  401. mdelay(1);
  402. for (timeout = 100; timeout > 0; --timeout) {
  403. mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
  404. if ((mii_control & MII_CNTL_RESET) == 0)
  405. break;
  406. mdelay(1);
  407. }
  408. if (mii_control & MII_CNTL_RESET) {
  409. printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
  410. return -1;
  411. }
  412. return 0;
  413. }
  414. int
  415. lxt971a_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
  416. {
  417. u16 mii_data;
  418. struct au1000_private *aup;
  419. if (!dev) {
  420. printk(KERN_ERR "lxt971a_status error: NULL dev\n");
  421. return -1;
  422. }
  423. aup = (struct au1000_private *) dev->priv;
  424. mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
  425. if (mii_data & MII_STAT_LINK) {
  426. *link = 1;
  427. mii_data = mdio_read(dev, aup->phy_addr, MII_INTEL_PHY_STAT);
  428. if (mii_data & MII_INTEL_PHY_STAT_SPD) {
  429. if (mii_data & MII_INTEL_PHY_STAT_FDX) {
  430. *speed = IF_PORT_100BASEFX;
  431. dev->if_port = IF_PORT_100BASEFX;
  432. }
  433. else {
  434. *speed = IF_PORT_100BASETX;
  435. dev->if_port = IF_PORT_100BASETX;
  436. }
  437. }
  438. else {
  439. *speed = IF_PORT_10BASET;
  440. dev->if_port = IF_PORT_10BASET;
  441. }
  442. }
  443. else {
  444. *link = 0;
  445. *speed = 0;
  446. dev->if_port = IF_PORT_UNKNOWN;
  447. }
  448. return 0;
  449. }
  450. int ks8995m_init(struct net_device *dev, int phy_addr)
  451. {
  452. s16 data;
  453. // printk("ks8995m_init\n");
  454. /* Stop auto-negotiation */
  455. data = mdio_read(dev, phy_addr, MII_CONTROL);
  456. mdio_write(dev, phy_addr, MII_CONTROL, data & ~MII_CNTL_AUTO);
  457. /* Set advertisement to 10/100 and Half/Full duplex
  458. * (full capabilities) */
  459. data = mdio_read(dev, phy_addr, MII_ANADV);
  460. data |= MII_NWAY_TX | MII_NWAY_TX_FDX | MII_NWAY_T_FDX | MII_NWAY_T;
  461. mdio_write(dev, phy_addr, MII_ANADV, data);
  462. /* Restart auto-negotiation */
  463. data = mdio_read(dev, phy_addr, MII_CONTROL);
  464. data |= MII_CNTL_RST_AUTO | MII_CNTL_AUTO;
  465. mdio_write(dev, phy_addr, MII_CONTROL, data);
  466. if (au1000_debug > 4) dump_mii(dev, phy_addr);
  467. return 0;
  468. }
  469. int ks8995m_reset(struct net_device *dev, int phy_addr)
  470. {
  471. s16 mii_control, timeout;
  472. // printk("ks8995m_reset\n");
  473. mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
  474. mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
  475. mdelay(1);
  476. for (timeout = 100; timeout > 0; --timeout) {
  477. mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
  478. if ((mii_control & MII_CNTL_RESET) == 0)
  479. break;
  480. mdelay(1);
  481. }
  482. if (mii_control & MII_CNTL_RESET) {
  483. printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
  484. return -1;
  485. }
  486. return 0;
  487. }
  488. int ks8995m_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
  489. {
  490. u16 mii_data;
  491. struct au1000_private *aup;
  492. if (!dev) {
  493. printk(KERN_ERR "ks8995m_status error: NULL dev\n");
  494. return -1;
  495. }
  496. aup = (struct au1000_private *) dev->priv;
  497. mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
  498. if (mii_data & MII_STAT_LINK) {
  499. *link = 1;
  500. mii_data = mdio_read(dev, aup->phy_addr, MII_AUX_CNTRL);
  501. if (mii_data & MII_AUX_100) {
  502. if (mii_data & MII_AUX_FDX) {
  503. *speed = IF_PORT_100BASEFX;
  504. dev->if_port = IF_PORT_100BASEFX;
  505. }
  506. else {
  507. *speed = IF_PORT_100BASETX;
  508. dev->if_port = IF_PORT_100BASETX;
  509. }
  510. }
  511. else {
  512. *speed = IF_PORT_10BASET;
  513. dev->if_port = IF_PORT_10BASET;
  514. }
  515. }
  516. else {
  517. *link = 0;
  518. *speed = 0;
  519. dev->if_port = IF_PORT_UNKNOWN;
  520. }
  521. return 0;
  522. }
  523. int
  524. smsc_83C185_init (struct net_device *dev, int phy_addr)
  525. {
  526. s16 data;
  527. if (au1000_debug > 4)
  528. printk("smsc_83C185_init\n");
  529. /* Stop auto-negotiation */
  530. data = mdio_read(dev, phy_addr, MII_CONTROL);
  531. mdio_write(dev, phy_addr, MII_CONTROL, data & ~MII_CNTL_AUTO);
  532. /* Set advertisement to 10/100 and Half/Full duplex
  533. * (full capabilities) */
  534. data = mdio_read(dev, phy_addr, MII_ANADV);
  535. data |= MII_NWAY_TX | MII_NWAY_TX_FDX | MII_NWAY_T_FDX | MII_NWAY_T;
  536. mdio_write(dev, phy_addr, MII_ANADV, data);
  537. /* Restart auto-negotiation */
  538. data = mdio_read(dev, phy_addr, MII_CONTROL);
  539. data |= MII_CNTL_RST_AUTO | MII_CNTL_AUTO;
  540. mdio_write(dev, phy_addr, MII_CONTROL, data);
  541. if (au1000_debug > 4) dump_mii(dev, phy_addr);
  542. return 0;
  543. }
  544. int
  545. smsc_83C185_reset (struct net_device *dev, int phy_addr)
  546. {
  547. s16 mii_control, timeout;
  548. if (au1000_debug > 4)
  549. printk("smsc_83C185_reset\n");
  550. mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
  551. mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
  552. mdelay(1);
  553. for (timeout = 100; timeout > 0; --timeout) {
  554. mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
  555. if ((mii_control & MII_CNTL_RESET) == 0)
  556. break;
  557. mdelay(1);
  558. }
  559. if (mii_control & MII_CNTL_RESET) {
  560. printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
  561. return -1;
  562. }
  563. return 0;
  564. }
  565. int
  566. smsc_83C185_status (struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
  567. {
  568. u16 mii_data;
  569. struct au1000_private *aup;
  570. if (!dev) {
  571. printk(KERN_ERR "smsc_83C185_status error: NULL dev\n");
  572. return -1;
  573. }
  574. aup = (struct au1000_private *) dev->priv;
  575. mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
  576. if (mii_data & MII_STAT_LINK) {
  577. *link = 1;
  578. mii_data = mdio_read(dev, aup->phy_addr, 0x1f);
  579. if (mii_data & (1<<3)) {
  580. if (mii_data & (1<<4)) {
  581. *speed = IF_PORT_100BASEFX;
  582. dev->if_port = IF_PORT_100BASEFX;
  583. }
  584. else {
  585. *speed = IF_PORT_100BASETX;
  586. dev->if_port = IF_PORT_100BASETX;
  587. }
  588. }
  589. else {
  590. *speed = IF_PORT_10BASET;
  591. dev->if_port = IF_PORT_10BASET;
  592. }
  593. }
  594. else {
  595. *link = 0;
  596. *speed = 0;
  597. dev->if_port = IF_PORT_UNKNOWN;
  598. }
  599. return 0;
  600. }
  601. #ifdef CONFIG_MIPS_BOSPORUS
  602. int stub_init(struct net_device *dev, int phy_addr)
  603. {
  604. //printk("PHY stub_init\n");
  605. return 0;
  606. }
  607. int stub_reset(struct net_device *dev, int phy_addr)
  608. {
  609. //printk("PHY stub_reset\n");
  610. return 0;
  611. }
  612. int
  613. stub_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
  614. {
  615. //printk("PHY stub_status\n");
  616. *link = 1;
  617. /* hmmm, revisit */
  618. *speed = IF_PORT_100BASEFX;
  619. dev->if_port = IF_PORT_100BASEFX;
  620. return 0;
  621. }
  622. #endif
  623. struct phy_ops bcm_5201_ops = {
  624. bcm_5201_init,
  625. bcm_5201_reset,
  626. bcm_5201_status,
  627. };
  628. struct phy_ops am79c874_ops = {
  629. am79c874_init,
  630. am79c874_reset,
  631. am79c874_status,
  632. };
  633. struct phy_ops am79c901_ops = {
  634. am79c901_init,
  635. am79c901_reset,
  636. am79c901_status,
  637. };
  638. struct phy_ops lsi_80227_ops = {
  639. lsi_80227_init,
  640. lsi_80227_reset,
  641. lsi_80227_status,
  642. };
  643. struct phy_ops lxt971a_ops = {
  644. lxt971a_init,
  645. lxt971a_reset,
  646. lxt971a_status,
  647. };
  648. struct phy_ops ks8995m_ops = {
  649. ks8995m_init,
  650. ks8995m_reset,
  651. ks8995m_status,
  652. };
  653. struct phy_ops smsc_83C185_ops = {
  654. smsc_83C185_init,
  655. smsc_83C185_reset,
  656. smsc_83C185_status,
  657. };
  658. #ifdef CONFIG_MIPS_BOSPORUS
  659. struct phy_ops stub_ops = {
  660. stub_init,
  661. stub_reset,
  662. stub_status,
  663. };
  664. #endif
  665. static struct mii_chip_info {
  666. const char * name;
  667. u16 phy_id0;
  668. u16 phy_id1;
  669. struct phy_ops *phy_ops;
  670. int dual_phy;
  671. } mii_chip_table[] = {
  672. {"Broadcom BCM5201 10/100 BaseT PHY",0x0040,0x6212, &bcm_5201_ops,0},
  673. {"Broadcom BCM5221 10/100 BaseT PHY",0x0040,0x61e4, &bcm_5201_ops,0},
  674. {"Broadcom BCM5222 10/100 BaseT PHY",0x0040,0x6322, &bcm_5201_ops,1},
  675. {"NS DP83847 PHY", 0x2000, 0x5c30, &bcm_5201_ops ,0},
  676. {"AMD 79C901 HomePNA PHY",0x0000,0x35c8, &am79c901_ops,0},
  677. {"AMD 79C874 10/100 BaseT PHY",0x0022,0x561b, &am79c874_ops,0},
  678. {"LSI 80227 10/100 BaseT PHY",0x0016,0xf840, &lsi_80227_ops,0},
  679. {"Intel LXT971A Dual Speed PHY",0x0013,0x78e2, &lxt971a_ops,0},
  680. {"Kendin KS8995M 10/100 BaseT PHY",0x0022,0x1450, &ks8995m_ops,0},
  681. {"SMSC LAN83C185 10/100 BaseT PHY",0x0007,0xc0a3, &smsc_83C185_ops,0},
  682. #ifdef CONFIG_MIPS_BOSPORUS
  683. {"Stub", 0x1234, 0x5678, &stub_ops },
  684. #endif
  685. {0,},
  686. };
  687. static int mdio_read(struct net_device *dev, int phy_id, int reg)
  688. {
  689. struct au1000_private *aup = (struct au1000_private *) dev->priv;
  690. volatile u32 *mii_control_reg;
  691. volatile u32 *mii_data_reg;
  692. u32 timedout = 20;
  693. u32 mii_control;
  694. #ifdef CONFIG_BCM5222_DUAL_PHY
  695. /* First time we probe, it's for the mac0 phy.
  696. * Since we haven't determined yet that we have a dual phy,
  697. * aup->mii->mii_control_reg won't be setup and we'll
  698. * default to the else statement.
  699. * By the time we probe for the mac1 phy, the mii_control_reg
  700. * will be setup to be the address of the mac0 phy control since
  701. * both phys are controlled through mac0.
  702. */
  703. if (aup->mii && aup->mii->mii_control_reg) {
  704. mii_control_reg = aup->mii->mii_control_reg;
  705. mii_data_reg = aup->mii->mii_data_reg;
  706. }
  707. else if (au_macs[0]->mii && au_macs[0]->mii->mii_control_reg) {
  708. /* assume both phys are controlled through mac0 */
  709. mii_control_reg = au_macs[0]->mii->mii_control_reg;
  710. mii_data_reg = au_macs[0]->mii->mii_data_reg;
  711. }
  712. else
  713. #endif
  714. {
  715. /* default control and data reg addresses */
  716. mii_control_reg = &aup->mac->mii_control;
  717. mii_data_reg = &aup->mac->mii_data;
  718. }
  719. while (*mii_control_reg & MAC_MII_BUSY) {
  720. mdelay(1);
  721. if (--timedout == 0) {
  722. printk(KERN_ERR "%s: read_MII busy timeout!!\n",
  723. dev->name);
  724. return -1;
  725. }
  726. }
  727. mii_control = MAC_SET_MII_SELECT_REG(reg) |
  728. MAC_SET_MII_SELECT_PHY(phy_id) | MAC_MII_READ;
  729. *mii_control_reg = mii_control;
  730. timedout = 20;
  731. while (*mii_control_reg & MAC_MII_BUSY) {
  732. mdelay(1);
  733. if (--timedout == 0) {
  734. printk(KERN_ERR "%s: mdio_read busy timeout!!\n",
  735. dev->name);
  736. return -1;
  737. }
  738. }
  739. return (int)*mii_data_reg;
  740. }
  741. static void mdio_write(struct net_device *dev, int phy_id, int reg, u16 value)
  742. {
  743. struct au1000_private *aup = (struct au1000_private *) dev->priv;
  744. volatile u32 *mii_control_reg;
  745. volatile u32 *mii_data_reg;
  746. u32 timedout = 20;
  747. u32 mii_control;
  748. #ifdef CONFIG_BCM5222_DUAL_PHY
  749. if (aup->mii && aup->mii->mii_control_reg) {
  750. mii_control_reg = aup->mii->mii_control_reg;
  751. mii_data_reg = aup->mii->mii_data_reg;
  752. }
  753. else if (au_macs[0]->mii && au_macs[0]->mii->mii_control_reg) {
  754. /* assume both phys are controlled through mac0 */
  755. mii_control_reg = au_macs[0]->mii->mii_control_reg;
  756. mii_data_reg = au_macs[0]->mii->mii_data_reg;
  757. }
  758. else
  759. #endif
  760. {
  761. /* default control and data reg addresses */
  762. mii_control_reg = &aup->mac->mii_control;
  763. mii_data_reg = &aup->mac->mii_data;
  764. }
  765. while (*mii_control_reg & MAC_MII_BUSY) {
  766. mdelay(1);
  767. if (--timedout == 0) {
  768. printk(KERN_ERR "%s: mdio_write busy timeout!!\n",
  769. dev->name);
  770. return;
  771. }
  772. }
  773. mii_control = MAC_SET_MII_SELECT_REG(reg) |
  774. MAC_SET_MII_SELECT_PHY(phy_id) | MAC_MII_WRITE;
  775. *mii_data_reg = value;
  776. *mii_control_reg = mii_control;
  777. }
  778. static void dump_mii(struct net_device *dev, int phy_id)
  779. {
  780. int i, val;
  781. for (i = 0; i < 7; i++) {
  782. if ((val = mdio_read(dev, phy_id, i)) >= 0)
  783. printk("%s: MII Reg %d=%x\n", dev->name, i, val);
  784. }
  785. for (i = 16; i < 25; i++) {
  786. if ((val = mdio_read(dev, phy_id, i)) >= 0)
  787. printk("%s: MII Reg %d=%x\n", dev->name, i, val);
  788. }
  789. }
  790. static int mii_probe (struct net_device * dev)
  791. {
  792. struct au1000_private *aup = (struct au1000_private *) dev->priv;
  793. int phy_addr;
  794. #ifdef CONFIG_MIPS_BOSPORUS
  795. int phy_found=0;
  796. #endif
  797. /* search for total of 32 possible mii phy addresses */
  798. for (phy_addr = 0; phy_addr < 32; phy_addr++) {
  799. u16 mii_status;
  800. u16 phy_id0, phy_id1;
  801. int i;
  802. #ifdef CONFIG_BCM5222_DUAL_PHY
  803. /* Mask the already found phy, try next one */
  804. if (au_macs[0]->mii && au_macs[0]->mii->mii_control_reg) {
  805. if (au_macs[0]->phy_addr == phy_addr)
  806. continue;
  807. }
  808. #endif
  809. mii_status = mdio_read(dev, phy_addr, MII_STATUS);
  810. if (mii_status == 0xffff || mii_status == 0x0000)
  811. /* the mii is not accessable, try next one */
  812. continue;
  813. phy_id0 = mdio_read(dev, phy_addr, MII_PHY_ID0);
  814. phy_id1 = mdio_read(dev, phy_addr, MII_PHY_ID1);
  815. /* search our mii table for the current mii */
  816. for (i = 0; mii_chip_table[i].phy_id1; i++) {
  817. if (phy_id0 == mii_chip_table[i].phy_id0 &&
  818. phy_id1 == mii_chip_table[i].phy_id1) {
  819. struct mii_phy * mii_phy = aup->mii;
  820. printk(KERN_INFO "%s: %s at phy address %d\n",
  821. dev->name, mii_chip_table[i].name,
  822. phy_addr);
  823. #ifdef CONFIG_MIPS_BOSPORUS
  824. phy_found = 1;
  825. #endif
  826. mii_phy->chip_info = mii_chip_table+i;
  827. aup->phy_addr = phy_addr;
  828. aup->want_autoneg = 1;
  829. aup->phy_ops = mii_chip_table[i].phy_ops;
  830. aup->phy_ops->phy_init(dev,phy_addr);
  831. // Check for dual-phy and then store required
  832. // values and set indicators. We need to do
  833. // this now since mdio_{read,write} need the
  834. // control and data register addresses.
  835. #ifdef CONFIG_BCM5222_DUAL_PHY
  836. if ( mii_chip_table[i].dual_phy) {
  837. /* assume both phys are controlled
  838. * through MAC0. Board specific? */
  839. /* sanity check */
  840. if (!au_macs[0] || !au_macs[0]->mii)
  841. return -1;
  842. aup->mii->mii_control_reg = (u32 *)
  843. &au_macs[0]->mac->mii_control;
  844. aup->mii->mii_data_reg = (u32 *)
  845. &au_macs[0]->mac->mii_data;
  846. }
  847. #endif
  848. goto found;
  849. }
  850. }
  851. }
  852. found:
  853. #ifdef CONFIG_MIPS_BOSPORUS
  854. /* This is a workaround for the Micrel/Kendin 5 port switch
  855. The second MAC doesn't see a PHY connected... so we need to
  856. trick it into thinking we have one.
  857. If this kernel is run on another Au1500 development board
  858. the stub will be found as well as the actual PHY. However,
  859. the last found PHY will be used... usually at Addr 31 (Db1500).
  860. */
  861. if ( (!phy_found) )
  862. {
  863. u16 phy_id0, phy_id1;
  864. int i;
  865. phy_id0 = 0x1234;
  866. phy_id1 = 0x5678;
  867. /* search our mii table for the current mii */
  868. for (i = 0; mii_chip_table[i].phy_id1; i++) {
  869. if (phy_id0 == mii_chip_table[i].phy_id0 &&
  870. phy_id1 == mii_chip_table[i].phy_id1) {
  871. struct mii_phy * mii_phy;
  872. printk(KERN_INFO "%s: %s at phy address %d\n",
  873. dev->name, mii_chip_table[i].name,
  874. phy_addr);
  875. mii_phy = kmalloc(sizeof(struct mii_phy),
  876. GFP_KERNEL);
  877. if (mii_phy) {
  878. mii_phy->chip_info = mii_chip_table+i;
  879. aup->phy_addr = phy_addr;
  880. mii_phy->next = aup->mii;
  881. aup->phy_ops =
  882. mii_chip_table[i].phy_ops;
  883. aup->mii = mii_phy;
  884. aup->phy_ops->phy_init(dev,phy_addr);
  885. } else {
  886. printk(KERN_ERR "%s: out of memory\n",
  887. dev->name);
  888. return -1;
  889. }
  890. mii_phy->chip_info = mii_chip_table+i;
  891. aup->phy_addr = phy_addr;
  892. aup->phy_ops = mii_chip_table[i].phy_ops;
  893. aup->phy_ops->phy_init(dev,phy_addr);
  894. break;
  895. }
  896. }
  897. }
  898. if (aup->mac_id == 0) {
  899. /* the Bosporus phy responds to addresses 0-5 but
  900. * 5 is the correct one.
  901. */
  902. aup->phy_addr = 5;
  903. }
  904. #endif
  905. if (aup->mii->chip_info == NULL) {
  906. printk(KERN_ERR "%s: Au1x No known MII transceivers found!\n",
  907. dev->name);
  908. return -1;
  909. }
  910. printk(KERN_INFO "%s: Using %s as default\n",
  911. dev->name, aup->mii->chip_info->name);
  912. return 0;
  913. }
  914. /*
  915. * Buffer allocation/deallocation routines. The buffer descriptor returned
  916. * has the virtual and dma address of a buffer suitable for
  917. * both, receive and transmit operations.
  918. */
  919. static db_dest_t *GetFreeDB(struct au1000_private *aup)
  920. {
  921. db_dest_t *pDB;
  922. pDB = aup->pDBfree;
  923. if (pDB) {
  924. aup->pDBfree = pDB->pnext;
  925. }
  926. return pDB;
  927. }
  928. void ReleaseDB(struct au1000_private *aup, db_dest_t *pDB)
  929. {
  930. db_dest_t *pDBfree = aup->pDBfree;
  931. if (pDBfree)
  932. pDBfree->pnext = pDB;
  933. aup->pDBfree = pDB;
  934. }
  935. static void enable_rx_tx(struct net_device *dev)
  936. {
  937. struct au1000_private *aup = (struct au1000_private *) dev->priv;
  938. if (au1000_debug > 4)
  939. printk(KERN_INFO "%s: enable_rx_tx\n", dev->name);
  940. aup->mac->control |= (MAC_RX_ENABLE | MAC_TX_ENABLE);
  941. au_sync_delay(10);
  942. }
  943. static void hard_stop(struct net_device *dev)
  944. {
  945. struct au1000_private *aup = (struct au1000_private *) dev->priv;
  946. if (au1000_debug > 4)
  947. printk(KERN_INFO "%s: hard stop\n", dev->name);
  948. aup->mac->control &= ~(MAC_RX_ENABLE | MAC_TX_ENABLE);
  949. au_sync_delay(10);
  950. }
  951. static void reset_mac(struct net_device *dev)
  952. {
  953. int i;
  954. u32 flags;
  955. struct au1000_private *aup = (struct au1000_private *) dev->priv;
  956. if (au1000_debug > 4)
  957. printk(KERN_INFO "%s: reset mac, aup %x\n",
  958. dev->name, (unsigned)aup);
  959. spin_lock_irqsave(&aup->lock, flags);
  960. if (aup->timer.function == &au1000_timer) {/* check if timer initted */
  961. del_timer(&aup->timer);
  962. }
  963. hard_stop(dev);
  964. #ifdef CONFIG_BCM5222_DUAL_PHY
  965. if (aup->mac_id != 0) {
  966. #endif
  967. /* If BCM5222, we can't leave MAC0 in reset because then
  968. * we can't access the dual phy for ETH1 */
  969. *aup->enable = MAC_EN_CLOCK_ENABLE;
  970. au_sync_delay(2);
  971. *aup->enable = 0;
  972. au_sync_delay(2);
  973. #ifdef CONFIG_BCM5222_DUAL_PHY
  974. }
  975. #endif
  976. aup->tx_full = 0;
  977. for (i = 0; i < NUM_RX_DMA; i++) {
  978. /* reset control bits */
  979. aup->rx_dma_ring[i]->buff_stat &= ~0xf;
  980. }
  981. for (i = 0; i < NUM_TX_DMA; i++) {
  982. /* reset control bits */
  983. aup->tx_dma_ring[i]->buff_stat &= ~0xf;
  984. }
  985. spin_unlock_irqrestore(&aup->lock, flags);
  986. }
  987. /*
  988. * Setup the receive and transmit "rings". These pointers are the addresses
  989. * of the rx and tx MAC DMA registers so they are fixed by the hardware --
  990. * these are not descriptors sitting in memory.
  991. */
  992. static void
  993. setup_hw_rings(struct au1000_private *aup, u32 rx_base, u32 tx_base)
  994. {
  995. int i;
  996. for (i = 0; i < NUM_RX_DMA; i++) {
  997. aup->rx_dma_ring[i] =
  998. (volatile rx_dma_t *) (rx_base + sizeof(rx_dma_t)*i);
  999. }
  1000. for (i = 0; i < NUM_TX_DMA; i++) {
  1001. aup->tx_dma_ring[i] =
  1002. (volatile tx_dma_t *) (tx_base + sizeof(tx_dma_t)*i);
  1003. }
  1004. }
  1005. static struct {
  1006. int port;
  1007. u32 base_addr;
  1008. u32 macen_addr;
  1009. int irq;
  1010. struct net_device *dev;
  1011. } iflist[2];
  1012. static int num_ifs;
  1013. /*
  1014. * Setup the base address and interupt of the Au1xxx ethernet macs
  1015. * based on cpu type and whether the interface is enabled in sys_pinfunc
  1016. * register. The last interface is enabled if SYS_PF_NI2 (bit 4) is 0.
  1017. */
  1018. static int __init au1000_init_module(void)
  1019. {
  1020. struct cpuinfo_mips *c = &current_cpu_data;
  1021. int ni = (int)((au_readl(SYS_PINFUNC) & (u32)(SYS_PF_NI2)) >> 4);
  1022. struct net_device *dev;
  1023. int i, found_one = 0;
  1024. switch (c->cputype) {
  1025. #ifdef CONFIG_SOC_AU1000
  1026. case CPU_AU1000:
  1027. num_ifs = 2 - ni;
  1028. iflist[0].base_addr = AU1000_ETH0_BASE;
  1029. iflist[1].base_addr = AU1000_ETH1_BASE;
  1030. iflist[0].macen_addr = AU1000_MAC0_ENABLE;
  1031. iflist[1].macen_addr = AU1000_MAC1_ENABLE;
  1032. iflist[0].irq = AU1000_MAC0_DMA_INT;
  1033. iflist[1].irq = AU1000_MAC1_DMA_INT;
  1034. break;
  1035. #endif
  1036. #ifdef CONFIG_SOC_AU1100
  1037. case CPU_AU1100:
  1038. num_ifs = 1 - ni;
  1039. iflist[0].base_addr = AU1100_ETH0_BASE;
  1040. iflist[0].macen_addr = AU1100_MAC0_ENABLE;
  1041. iflist[0].irq = AU1100_MAC0_DMA_INT;
  1042. break;
  1043. #endif
  1044. #ifdef CONFIG_SOC_AU1500
  1045. case CPU_AU1500:
  1046. num_ifs = 2 - ni;
  1047. iflist[0].base_addr = AU1500_ETH0_BASE;
  1048. iflist[1].base_addr = AU1500_ETH1_BASE;
  1049. iflist[0].macen_addr = AU1500_MAC0_ENABLE;
  1050. iflist[1].macen_addr = AU1500_MAC1_ENABLE;
  1051. iflist[0].irq = AU1500_MAC0_DMA_INT;
  1052. iflist[1].irq = AU1500_MAC1_DMA_INT;
  1053. break;
  1054. #endif
  1055. #ifdef CONFIG_SOC_AU1550
  1056. case CPU_AU1550:
  1057. num_ifs = 2 - ni;
  1058. iflist[0].base_addr = AU1550_ETH0_BASE;
  1059. iflist[1].base_addr = AU1550_ETH1_BASE;
  1060. iflist[0].macen_addr = AU1550_MAC0_ENABLE;
  1061. iflist[1].macen_addr = AU1550_MAC1_ENABLE;
  1062. iflist[0].irq = AU1550_MAC0_DMA_INT;
  1063. iflist[1].irq = AU1550_MAC1_DMA_INT;
  1064. break;
  1065. #endif
  1066. default:
  1067. num_ifs = 0;
  1068. }
  1069. for(i = 0; i < num_ifs; i++) {
  1070. dev = au1000_probe(iflist[i].base_addr, iflist[i].irq, i);
  1071. iflist[i].dev = dev;
  1072. if (dev)
  1073. found_one++;
  1074. }
  1075. if (!found_one)
  1076. return -ENODEV;
  1077. return 0;
  1078. }
  1079. static int au1000_setup_aneg(struct net_device *dev, u32 advertise)
  1080. {
  1081. struct au1000_private *aup = (struct au1000_private *)dev->priv;
  1082. u16 ctl, adv;
  1083. /* Setup standard advertise */
  1084. adv = mdio_read(dev, aup->phy_addr, MII_ADVERTISE);
  1085. adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
  1086. if (advertise & ADVERTISED_10baseT_Half)
  1087. adv |= ADVERTISE_10HALF;
  1088. if (advertise & ADVERTISED_10baseT_Full)
  1089. adv |= ADVERTISE_10FULL;
  1090. if (advertise & ADVERTISED_100baseT_Half)
  1091. adv |= ADVERTISE_100HALF;
  1092. if (advertise & ADVERTISED_100baseT_Full)
  1093. adv |= ADVERTISE_100FULL;
  1094. mdio_write(dev, aup->phy_addr, MII_ADVERTISE, adv);
  1095. /* Start/Restart aneg */
  1096. ctl = mdio_read(dev, aup->phy_addr, MII_BMCR);
  1097. ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
  1098. mdio_write(dev, aup->phy_addr, MII_BMCR, ctl);
  1099. return 0;
  1100. }
  1101. static int au1000_setup_forced(struct net_device *dev, int speed, int fd)
  1102. {
  1103. struct au1000_private *aup = (struct au1000_private *)dev->priv;
  1104. u16 ctl;
  1105. ctl = mdio_read(dev, aup->phy_addr, MII_BMCR);
  1106. ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 | BMCR_ANENABLE);
  1107. /* First reset the PHY */
  1108. mdio_write(dev, aup->phy_addr, MII_BMCR, ctl | BMCR_RESET);
  1109. /* Select speed & duplex */
  1110. switch (speed) {
  1111. case SPEED_10:
  1112. break;
  1113. case SPEED_100:
  1114. ctl |= BMCR_SPEED100;
  1115. break;
  1116. case SPEED_1000:
  1117. default:
  1118. return -EINVAL;
  1119. }
  1120. if (fd == DUPLEX_FULL)
  1121. ctl |= BMCR_FULLDPLX;
  1122. mdio_write(dev, aup->phy_addr, MII_BMCR, ctl);
  1123. return 0;
  1124. }
  1125. static void
  1126. au1000_start_link(struct net_device *dev, struct ethtool_cmd *cmd)
  1127. {
  1128. struct au1000_private *aup = (struct au1000_private *)dev->priv;
  1129. u32 advertise;
  1130. int autoneg;
  1131. int forced_speed;
  1132. int forced_duplex;
  1133. /* Default advertise */
  1134. advertise = GENMII_DEFAULT_ADVERTISE;
  1135. autoneg = aup->want_autoneg;
  1136. forced_speed = SPEED_100;
  1137. forced_duplex = DUPLEX_FULL;
  1138. /* Setup link parameters */
  1139. if (cmd) {
  1140. if (cmd->autoneg == AUTONEG_ENABLE) {
  1141. advertise = cmd->advertising;
  1142. autoneg = 1;
  1143. } else {
  1144. autoneg = 0;
  1145. forced_speed = cmd->speed;
  1146. forced_duplex = cmd->duplex;
  1147. }
  1148. }
  1149. /* Configure PHY & start aneg */
  1150. aup->want_autoneg = autoneg;
  1151. if (autoneg)
  1152. au1000_setup_aneg(dev, advertise);
  1153. else
  1154. au1000_setup_forced(dev, forced_speed, forced_duplex);
  1155. mod_timer(&aup->timer, jiffies + HZ);
  1156. }
  1157. static int au1000_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  1158. {
  1159. struct au1000_private *aup = (struct au1000_private *)dev->priv;
  1160. u16 link, speed;
  1161. cmd->supported = GENMII_DEFAULT_FEATURES;
  1162. cmd->advertising = GENMII_DEFAULT_ADVERTISE;
  1163. cmd->port = PORT_MII;
  1164. cmd->transceiver = XCVR_EXTERNAL;
  1165. cmd->phy_address = aup->phy_addr;
  1166. spin_lock_irq(&aup->lock);
  1167. cmd->autoneg = aup->want_autoneg;
  1168. aup->phy_ops->phy_status(dev, aup->phy_addr, &link, &speed);
  1169. if ((speed == IF_PORT_100BASETX) || (speed == IF_PORT_100BASEFX))
  1170. cmd->speed = SPEED_100;
  1171. else if (speed == IF_PORT_10BASET)
  1172. cmd->speed = SPEED_10;
  1173. if (link && (dev->if_port == IF_PORT_100BASEFX))
  1174. cmd->duplex = DUPLEX_FULL;
  1175. else
  1176. cmd->duplex = DUPLEX_HALF;
  1177. spin_unlock_irq(&aup->lock);
  1178. return 0;
  1179. }
  1180. static int au1000_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  1181. {
  1182. struct au1000_private *aup = (struct au1000_private *)dev->priv;
  1183. unsigned long features = GENMII_DEFAULT_FEATURES;
  1184. if (!capable(CAP_NET_ADMIN))
  1185. return -EPERM;
  1186. if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
  1187. return -EINVAL;
  1188. if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
  1189. return -EINVAL;
  1190. if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
  1191. return -EINVAL;
  1192. if (cmd->autoneg == AUTONEG_DISABLE)
  1193. switch (cmd->speed) {
  1194. case SPEED_10:
  1195. if (cmd->duplex == DUPLEX_HALF &&
  1196. (features & SUPPORTED_10baseT_Half) == 0)
  1197. return -EINVAL;
  1198. if (cmd->duplex == DUPLEX_FULL &&
  1199. (features & SUPPORTED_10baseT_Full) == 0)
  1200. return -EINVAL;
  1201. break;
  1202. case SPEED_100:
  1203. if (cmd->duplex == DUPLEX_HALF &&
  1204. (features & SUPPORTED_100baseT_Half) == 0)
  1205. return -EINVAL;
  1206. if (cmd->duplex == DUPLEX_FULL &&
  1207. (features & SUPPORTED_100baseT_Full) == 0)
  1208. return -EINVAL;
  1209. break;
  1210. default:
  1211. return -EINVAL;
  1212. }
  1213. else if ((features & SUPPORTED_Autoneg) == 0)
  1214. return -EINVAL;
  1215. spin_lock_irq(&aup->lock);
  1216. au1000_start_link(dev, cmd);
  1217. spin_unlock_irq(&aup->lock);
  1218. return 0;
  1219. }
  1220. static int au1000_nway_reset(struct net_device *dev)
  1221. {
  1222. struct au1000_private *aup = (struct au1000_private *)dev->priv;
  1223. if (!aup->want_autoneg)
  1224. return -EINVAL;
  1225. spin_lock_irq(&aup->lock);
  1226. au1000_start_link(dev, NULL);
  1227. spin_unlock_irq(&aup->lock);
  1228. return 0;
  1229. }
  1230. static void
  1231. au1000_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
  1232. {
  1233. struct au1000_private *aup = (struct au1000_private *)dev->priv;
  1234. strcpy(info->driver, DRV_NAME);
  1235. strcpy(info->version, DRV_VERSION);
  1236. info->fw_version[0] = '\0';
  1237. sprintf(info->bus_info, "%s %d", DRV_NAME, aup->mac_id);
  1238. info->regdump_len = 0;
  1239. }
  1240. static u32 au1000_get_link(struct net_device *dev)
  1241. {
  1242. return netif_carrier_ok(dev);
  1243. }
  1244. static struct ethtool_ops au1000_ethtool_ops = {
  1245. .get_settings = au1000_get_settings,
  1246. .set_settings = au1000_set_settings,
  1247. .get_drvinfo = au1000_get_drvinfo,
  1248. .nway_reset = au1000_nway_reset,
  1249. .get_link = au1000_get_link
  1250. };
  1251. static struct net_device *
  1252. au1000_probe(u32 ioaddr, int irq, int port_num)
  1253. {
  1254. static unsigned version_printed = 0;
  1255. struct au1000_private *aup = NULL;
  1256. struct net_device *dev = NULL;
  1257. db_dest_t *pDB, *pDBfree;
  1258. char *pmac, *argptr;
  1259. char ethaddr[6];
  1260. int i, err;
  1261. if (!request_mem_region(CPHYSADDR(ioaddr), MAC_IOSIZE, "Au1x00 ENET"))
  1262. return NULL;
  1263. if (version_printed++ == 0)
  1264. printk("%s version %s %s\n", DRV_NAME, DRV_VERSION, DRV_AUTHOR);
  1265. dev = alloc_etherdev(sizeof(struct au1000_private));
  1266. if (!dev) {
  1267. printk (KERN_ERR "au1000 eth: alloc_etherdev failed\n");
  1268. return NULL;
  1269. }
  1270. if ((err = register_netdev(dev))) {
  1271. printk(KERN_ERR "Au1x_eth Cannot register net device err %d\n",
  1272. err);
  1273. free_netdev(dev);
  1274. return NULL;
  1275. }
  1276. printk("%s: Au1x Ethernet found at 0x%x, irq %d\n",
  1277. dev->name, ioaddr, irq);
  1278. aup = dev->priv;
  1279. /* Allocate the data buffers */
  1280. /* Snooping works fine with eth on all au1xxx */
  1281. aup->vaddr = (u32)dma_alloc_noncoherent(NULL,
  1282. MAX_BUF_SIZE * (NUM_TX_BUFFS+NUM_RX_BUFFS),
  1283. &aup->dma_addr,
  1284. 0);
  1285. if (!aup->vaddr) {
  1286. free_netdev(dev);
  1287. release_mem_region(CPHYSADDR(ioaddr), MAC_IOSIZE);
  1288. return NULL;
  1289. }
  1290. /* aup->mac is the base address of the MAC's registers */
  1291. aup->mac = (volatile mac_reg_t *)((unsigned long)ioaddr);
  1292. /* Setup some variables for quick register address access */
  1293. if (ioaddr == iflist[0].base_addr)
  1294. {
  1295. /* check env variables first */
  1296. if (!get_ethernet_addr(ethaddr)) {
  1297. memcpy(au1000_mac_addr, ethaddr, sizeof(au1000_mac_addr));
  1298. } else {
  1299. /* Check command line */
  1300. argptr = prom_getcmdline();
  1301. if ((pmac = strstr(argptr, "ethaddr=")) == NULL) {
  1302. printk(KERN_INFO "%s: No mac address found\n",
  1303. dev->name);
  1304. /* use the hard coded mac addresses */
  1305. } else {
  1306. str2eaddr(ethaddr, pmac + strlen("ethaddr="));
  1307. memcpy(au1000_mac_addr, ethaddr,
  1308. sizeof(au1000_mac_addr));
  1309. }
  1310. }
  1311. aup->enable = (volatile u32 *)
  1312. ((unsigned long)iflist[0].macen_addr);
  1313. memcpy(dev->dev_addr, au1000_mac_addr, sizeof(au1000_mac_addr));
  1314. setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR);
  1315. aup->mac_id = 0;
  1316. au_macs[0] = aup;
  1317. }
  1318. else
  1319. if (ioaddr == iflist[1].base_addr)
  1320. {
  1321. aup->enable = (volatile u32 *)
  1322. ((unsigned long)iflist[1].macen_addr);
  1323. memcpy(dev->dev_addr, au1000_mac_addr, sizeof(au1000_mac_addr));
  1324. dev->dev_addr[4] += 0x10;
  1325. setup_hw_rings(aup, MAC1_RX_DMA_ADDR, MAC1_TX_DMA_ADDR);
  1326. aup->mac_id = 1;
  1327. au_macs[1] = aup;
  1328. }
  1329. else
  1330. {
  1331. printk(KERN_ERR "%s: bad ioaddr\n", dev->name);
  1332. }
  1333. /* bring the device out of reset, otherwise probing the mii
  1334. * will hang */
  1335. *aup->enable = MAC_EN_CLOCK_ENABLE;
  1336. au_sync_delay(2);
  1337. *aup->enable = MAC_EN_RESET0 | MAC_EN_RESET1 |
  1338. MAC_EN_RESET2 | MAC_EN_CLOCK_ENABLE;
  1339. au_sync_delay(2);
  1340. aup->mii = kmalloc(sizeof(struct mii_phy), GFP_KERNEL);
  1341. if (!aup->mii) {
  1342. printk(KERN_ERR "%s: out of memory\n", dev->name);
  1343. goto err_out;
  1344. }
  1345. aup->mii->next = NULL;
  1346. aup->mii->chip_info = NULL;
  1347. aup->mii->status = 0;
  1348. aup->mii->mii_control_reg = 0;
  1349. aup->mii->mii_data_reg = 0;
  1350. if (mii_probe(dev) != 0) {
  1351. goto err_out;
  1352. }
  1353. pDBfree = NULL;
  1354. /* setup the data buffer descriptors and attach a buffer to each one */
  1355. pDB = aup->db;
  1356. for (i = 0; i < (NUM_TX_BUFFS+NUM_RX_BUFFS); i++) {
  1357. pDB->pnext = pDBfree;
  1358. pDBfree = pDB;
  1359. pDB->vaddr = (u32 *)((unsigned)aup->vaddr + MAX_BUF_SIZE*i);
  1360. pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr);
  1361. pDB++;
  1362. }
  1363. aup->pDBfree = pDBfree;
  1364. for (i = 0; i < NUM_RX_DMA; i++) {
  1365. pDB = GetFreeDB(aup);
  1366. if (!pDB) {
  1367. goto err_out;
  1368. }
  1369. aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
  1370. aup->rx_db_inuse[i] = pDB;
  1371. }
  1372. for (i = 0; i < NUM_TX_DMA; i++) {
  1373. pDB = GetFreeDB(aup);
  1374. if (!pDB) {
  1375. goto err_out;
  1376. }
  1377. aup->tx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
  1378. aup->tx_dma_ring[i]->len = 0;
  1379. aup->tx_db_inuse[i] = pDB;
  1380. }
  1381. spin_lock_init(&aup->lock);
  1382. dev->base_addr = ioaddr;
  1383. dev->irq = irq;
  1384. dev->open = au1000_open;
  1385. dev->hard_start_xmit = au1000_tx;
  1386. dev->stop = au1000_close;
  1387. dev->get_stats = au1000_get_stats;
  1388. dev->set_multicast_list = &set_rx_mode;
  1389. dev->do_ioctl = &au1000_ioctl;
  1390. SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops);
  1391. dev->set_config = &au1000_set_config;
  1392. dev->tx_timeout = au1000_tx_timeout;
  1393. dev->watchdog_timeo = ETH_TX_TIMEOUT;
  1394. /*
  1395. * The boot code uses the ethernet controller, so reset it to start
  1396. * fresh. au1000_init() expects that the device is in reset state.
  1397. */
  1398. reset_mac(dev);
  1399. return dev;
  1400. err_out:
  1401. /* here we should have a valid dev plus aup-> register addresses
  1402. * so we can reset the mac properly.*/
  1403. reset_mac(dev);
  1404. kfree(aup->mii);
  1405. for (i = 0; i < NUM_RX_DMA; i++) {
  1406. if (aup->rx_db_inuse[i])
  1407. ReleaseDB(aup, aup->rx_db_inuse[i]);
  1408. }
  1409. for (i = 0; i < NUM_TX_DMA; i++) {
  1410. if (aup->tx_db_inuse[i])
  1411. ReleaseDB(aup, aup->tx_db_inuse[i]);
  1412. }
  1413. dma_free_noncoherent(NULL,
  1414. MAX_BUF_SIZE * (NUM_TX_BUFFS+NUM_RX_BUFFS),
  1415. (void *)aup->vaddr,
  1416. aup->dma_addr);
  1417. unregister_netdev(dev);
  1418. free_netdev(dev);
  1419. release_mem_region(CPHYSADDR(ioaddr), MAC_IOSIZE);
  1420. return NULL;
  1421. }
  1422. /*
  1423. * Initialize the interface.
  1424. *
  1425. * When the device powers up, the clocks are disabled and the
  1426. * mac is in reset state. When the interface is closed, we
  1427. * do the same -- reset the device and disable the clocks to
  1428. * conserve power. Thus, whenever au1000_init() is called,
  1429. * the device should already be in reset state.
  1430. */
  1431. static int au1000_init(struct net_device *dev)
  1432. {
  1433. struct au1000_private *aup = (struct au1000_private *) dev->priv;
  1434. u32 flags;
  1435. int i;
  1436. u32 control;
  1437. u16 link, speed;
  1438. if (au1000_debug > 4)
  1439. printk("%s: au1000_init\n", dev->name);
  1440. spin_lock_irqsave(&aup->lock, flags);
  1441. /* bring the device out of reset */
  1442. *aup->enable = MAC_EN_CLOCK_ENABLE;
  1443. au_sync_delay(2);
  1444. *aup->enable = MAC_EN_RESET0 | MAC_EN_RESET1 |
  1445. MAC_EN_RESET2 | MAC_EN_CLOCK_ENABLE;
  1446. au_sync_delay(20);
  1447. aup->mac->control = 0;
  1448. aup->tx_head = (aup->tx_dma_ring[0]->buff_stat & 0xC) >> 2;
  1449. aup->tx_tail = aup->tx_head;
  1450. aup->rx_head = (aup->rx_dma_ring[0]->buff_stat & 0xC) >> 2;
  1451. aup->mac->mac_addr_high = dev->dev_addr[5]<<8 | dev->dev_addr[4];
  1452. aup->mac->mac_addr_low = dev->dev_addr[3]<<24 | dev->dev_addr[2]<<16 |
  1453. dev->dev_addr[1]<<8 | dev->dev_addr[0];
  1454. for (i = 0; i < NUM_RX_DMA; i++) {
  1455. aup->rx_dma_ring[i]->buff_stat |= RX_DMA_ENABLE;
  1456. }
  1457. au_sync();
  1458. aup->phy_ops->phy_status(dev, aup->phy_addr, &link, &speed);
  1459. control = MAC_DISABLE_RX_OWN | MAC_RX_ENABLE | MAC_TX_ENABLE;
  1460. #ifndef CONFIG_CPU_LITTLE_ENDIAN
  1461. control |= MAC_BIG_ENDIAN;
  1462. #endif
  1463. if (link && (dev->if_port == IF_PORT_100BASEFX)) {
  1464. control |= MAC_FULL_DUPLEX;
  1465. }
  1466. aup->mac->control = control;
  1467. aup->mac->vlan1_tag = 0x8100; /* activate vlan support */
  1468. au_sync();
  1469. spin_unlock_irqrestore(&aup->lock, flags);
  1470. return 0;
  1471. }
  1472. static void au1000_timer(unsigned long data)
  1473. {
  1474. struct net_device *dev = (struct net_device *)data;
  1475. struct au1000_private *aup = (struct au1000_private *) dev->priv;
  1476. unsigned char if_port;
  1477. u16 link, speed;
  1478. if (!dev) {
  1479. /* fatal error, don't restart the timer */
  1480. printk(KERN_ERR "au1000_timer error: NULL dev\n");
  1481. return;
  1482. }
  1483. if_port = dev->if_port;
  1484. if (aup->phy_ops->phy_status(dev, aup->phy_addr, &link, &speed) == 0) {
  1485. if (link) {
  1486. if (!netif_carrier_ok(dev)) {
  1487. netif_carrier_on(dev);
  1488. printk(KERN_INFO "%s: link up\n", dev->name);
  1489. }
  1490. }
  1491. else {
  1492. if (netif_carrier_ok(dev)) {
  1493. netif_carrier_off(dev);
  1494. dev->if_port = 0;
  1495. printk(KERN_INFO "%s: link down\n", dev->name);
  1496. }
  1497. }
  1498. }
  1499. if (link && (dev->if_port != if_port) &&
  1500. (dev->if_port != IF_PORT_UNKNOWN)) {
  1501. hard_stop(dev);
  1502. if (dev->if_port == IF_PORT_100BASEFX) {
  1503. printk(KERN_INFO "%s: going to full duplex\n",
  1504. dev->name);
  1505. aup->mac->control |= MAC_FULL_DUPLEX;
  1506. au_sync_delay(1);
  1507. }
  1508. else {
  1509. aup->mac->control &= ~MAC_FULL_DUPLEX;
  1510. au_sync_delay(1);
  1511. }
  1512. enable_rx_tx(dev);
  1513. }
  1514. aup->timer.expires = RUN_AT((1*HZ));
  1515. aup->timer.data = (unsigned long)dev;
  1516. aup->timer.function = &au1000_timer; /* timer handler */
  1517. add_timer(&aup->timer);
  1518. }
  1519. static int au1000_open(struct net_device *dev)
  1520. {
  1521. int retval;
  1522. struct au1000_private *aup = (struct au1000_private *) dev->priv;
  1523. if (au1000_debug > 4)
  1524. printk("%s: open: dev=%p\n", dev->name, dev);
  1525. if ((retval = au1000_init(dev))) {
  1526. printk(KERN_ERR "%s: error in au1000_init\n", dev->name);
  1527. free_irq(dev->irq, dev);
  1528. return retval;
  1529. }
  1530. netif_start_queue(dev);
  1531. if ((retval = request_irq(dev->irq, &au1000_interrupt, 0,
  1532. dev->name, dev))) {
  1533. printk(KERN_ERR "%s: unable to get IRQ %d\n",
  1534. dev->name, dev->irq);
  1535. return retval;
  1536. }
  1537. init_timer(&aup->timer); /* used in ioctl() */
  1538. aup->timer.expires = RUN_AT((3*HZ));
  1539. aup->timer.data = (unsigned long)dev;
  1540. aup->timer.function = &au1000_timer; /* timer handler */
  1541. add_timer(&aup->timer);
  1542. if (au1000_debug > 4)
  1543. printk("%s: open: Initialization done.\n", dev->name);
  1544. return 0;
  1545. }
  1546. static int au1000_close(struct net_device *dev)
  1547. {
  1548. u32 flags;
  1549. struct au1000_private *aup = (struct au1000_private *) dev->priv;
  1550. if (au1000_debug > 4)
  1551. printk("%s: close: dev=%p\n", dev->name, dev);
  1552. reset_mac(dev);
  1553. spin_lock_irqsave(&aup->lock, flags);
  1554. /* stop the device */
  1555. netif_stop_queue(dev);
  1556. /* disable the interrupt */
  1557. free_irq(dev->irq, dev);
  1558. spin_unlock_irqrestore(&aup->lock, flags);
  1559. return 0;
  1560. }
  1561. static void __exit au1000_cleanup_module(void)
  1562. {
  1563. int i, j;
  1564. struct net_device *dev;
  1565. struct au1000_private *aup;
  1566. for (i = 0; i < num_ifs; i++) {
  1567. dev = iflist[i].dev;
  1568. if (dev) {
  1569. aup = (struct au1000_private *) dev->priv;
  1570. unregister_netdev(dev);
  1571. kfree(aup->mii);
  1572. for (j = 0; j < NUM_RX_DMA; j++) {
  1573. if (aup->rx_db_inuse[j])
  1574. ReleaseDB(aup, aup->rx_db_inuse[j]);
  1575. }
  1576. for (j = 0; j < NUM_TX_DMA; j++) {
  1577. if (aup->tx_db_inuse[j])
  1578. ReleaseDB(aup, aup->tx_db_inuse[j]);
  1579. }
  1580. dma_free_noncoherent(NULL,
  1581. MAX_BUF_SIZE * (NUM_TX_BUFFS+NUM_RX_BUFFS),
  1582. (void *)aup->vaddr,
  1583. aup->dma_addr);
  1584. free_netdev(dev);
  1585. release_mem_region(CPHYSADDR(iflist[i].base_addr), MAC_IOSIZE);
  1586. }
  1587. }
  1588. }
  1589. static void update_tx_stats(struct net_device *dev, u32 status)
  1590. {
  1591. struct au1000_private *aup = (struct au1000_private *) dev->priv;
  1592. struct net_device_stats *ps = &aup->stats;
  1593. if (status & TX_FRAME_ABORTED) {
  1594. if (dev->if_port == IF_PORT_100BASEFX) {
  1595. if (status & (TX_JAB_TIMEOUT | TX_UNDERRUN)) {
  1596. /* any other tx errors are only valid
  1597. * in half duplex mode */
  1598. ps->tx_errors++;
  1599. ps->tx_aborted_errors++;
  1600. }
  1601. }
  1602. else {
  1603. ps->tx_errors++;
  1604. ps->tx_aborted_errors++;
  1605. if (status & (TX_NO_CARRIER | TX_LOSS_CARRIER))
  1606. ps->tx_carrier_errors++;
  1607. }
  1608. }
  1609. }
  1610. /*
  1611. * Called from the interrupt service routine to acknowledge
  1612. * the TX DONE bits. This is a must if the irq is setup as
  1613. * edge triggered.
  1614. */
  1615. static void au1000_tx_ack(struct net_device *dev)
  1616. {
  1617. struct au1000_private *aup = (struct au1000_private *) dev->priv;
  1618. volatile tx_dma_t *ptxd;
  1619. ptxd = aup->tx_dma_ring[aup->tx_tail];
  1620. while (ptxd->buff_stat & TX_T_DONE) {
  1621. update_tx_stats(dev, ptxd->status);
  1622. ptxd->buff_stat &= ~TX_T_DONE;
  1623. ptxd->len = 0;
  1624. au_sync();
  1625. aup->tx_tail = (aup->tx_tail + 1) & (NUM_TX_DMA - 1);
  1626. ptxd = aup->tx_dma_ring[aup->tx_tail];
  1627. if (aup->tx_full) {
  1628. aup->tx_full = 0;
  1629. netif_wake_queue(dev);
  1630. }
  1631. }
  1632. }
  1633. /*
  1634. * Au1000 transmit routine.
  1635. */
  1636. static int au1000_tx(struct sk_buff *skb, struct net_device *dev)
  1637. {
  1638. struct au1000_private *aup = (struct au1000_private *) dev->priv;
  1639. struct net_device_stats *ps = &aup->stats;
  1640. volatile tx_dma_t *ptxd;
  1641. u32 buff_stat;
  1642. db_dest_t *pDB;
  1643. int i;
  1644. if (au1000_debug > 5)
  1645. printk("%s: tx: aup %x len=%d, data=%p, head %d\n",
  1646. dev->name, (unsigned)aup, skb->len,
  1647. skb->data, aup->tx_head);
  1648. ptxd = aup->tx_dma_ring[aup->tx_head];
  1649. buff_stat = ptxd->buff_stat;
  1650. if (buff_stat & TX_DMA_ENABLE) {
  1651. /* We've wrapped around and the transmitter is still busy */
  1652. netif_stop_queue(dev);
  1653. aup->tx_full = 1;
  1654. return 1;
  1655. }
  1656. else if (buff_stat & TX_T_DONE) {
  1657. update_tx_stats(dev, ptxd->status);
  1658. ptxd->len = 0;
  1659. }
  1660. if (aup->tx_full) {
  1661. aup->tx_full = 0;
  1662. netif_wake_queue(dev);
  1663. }
  1664. pDB = aup->tx_db_inuse[aup->tx_head];
  1665. memcpy((void *)pDB->vaddr, skb->data, skb->len);
  1666. if (skb->len < ETH_ZLEN) {
  1667. for (i=skb->len; i<ETH_ZLEN; i++) {
  1668. ((char *)pDB->vaddr)[i] = 0;
  1669. }
  1670. ptxd->len = ETH_ZLEN;
  1671. }
  1672. else
  1673. ptxd->len = skb->len;
  1674. ps->tx_packets++;
  1675. ps->tx_bytes += ptxd->len;
  1676. ptxd->buff_stat = pDB->dma_addr | TX_DMA_ENABLE;
  1677. au_sync();
  1678. dev_kfree_skb(skb);
  1679. aup->tx_head = (aup->tx_head + 1) & (NUM_TX_DMA - 1);
  1680. dev->trans_start = jiffies;
  1681. return 0;
  1682. }
  1683. static inline void update_rx_stats(struct net_device *dev, u32 status)
  1684. {
  1685. struct au1000_private *aup = (struct au1000_private *) dev->priv;
  1686. struct net_device_stats *ps = &aup->stats;
  1687. ps->rx_packets++;
  1688. if (status & RX_MCAST_FRAME)
  1689. ps->multicast++;
  1690. if (status & RX_ERROR) {
  1691. ps->rx_errors++;
  1692. if (status & RX_MISSED_FRAME)
  1693. ps->rx_missed_errors++;
  1694. if (status & (RX_OVERLEN | RX_OVERLEN | RX_LEN_ERROR))
  1695. ps->rx_length_errors++;
  1696. if (status & RX_CRC_ERROR)
  1697. ps->rx_crc_errors++;
  1698. if (status & RX_COLL)
  1699. ps->collisions++;
  1700. }
  1701. else
  1702. ps->rx_bytes += status & RX_FRAME_LEN_MASK;
  1703. }
  1704. /*
  1705. * Au1000 receive routine.
  1706. */
  1707. static int au1000_rx(struct net_device *dev)
  1708. {
  1709. struct au1000_private *aup = (struct au1000_private *) dev->priv;
  1710. struct sk_buff *skb;
  1711. volatile rx_dma_t *prxd;
  1712. u32 buff_stat, status;
  1713. db_dest_t *pDB;
  1714. u32 frmlen;
  1715. if (au1000_debug > 5)
  1716. printk("%s: au1000_rx head %d\n", dev->name, aup->rx_head);
  1717. prxd = aup->rx_dma_ring[aup->rx_head];
  1718. buff_stat = prxd->buff_stat;
  1719. while (buff_stat & RX_T_DONE) {
  1720. status = prxd->status;
  1721. pDB = aup->rx_db_inuse[aup->rx_head];
  1722. update_rx_stats(dev, status);
  1723. if (!(status & RX_ERROR)) {
  1724. /* good frame */
  1725. frmlen = (status & RX_FRAME_LEN_MASK);
  1726. frmlen -= 4; /* Remove FCS */
  1727. skb = dev_alloc_skb(frmlen + 2);
  1728. if (skb == NULL) {
  1729. printk(KERN_ERR
  1730. "%s: Memory squeeze, dropping packet.\n",
  1731. dev->name);
  1732. aup->stats.rx_dropped++;
  1733. continue;
  1734. }
  1735. skb->dev = dev;
  1736. skb_reserve(skb, 2); /* 16 byte IP header align */
  1737. eth_copy_and_sum(skb,
  1738. (unsigned char *)pDB->vaddr, frmlen, 0);
  1739. skb_put(skb, frmlen);
  1740. skb->protocol = eth_type_trans(skb, dev);
  1741. netif_rx(skb); /* pass the packet to upper layers */
  1742. }
  1743. else {
  1744. if (au1000_debug > 4) {
  1745. if (status & RX_MISSED_FRAME)
  1746. printk("rx miss\n");
  1747. if (status & RX_WDOG_TIMER)
  1748. printk("rx wdog\n");
  1749. if (status & RX_RUNT)
  1750. printk("rx runt\n");
  1751. if (status & RX_OVERLEN)
  1752. printk("rx overlen\n");
  1753. if (status & RX_COLL)
  1754. printk("rx coll\n");
  1755. if (status & RX_MII_ERROR)
  1756. printk("rx mii error\n");
  1757. if (status & RX_CRC_ERROR)
  1758. printk("rx crc error\n");
  1759. if (status & RX_LEN_ERROR)
  1760. printk("rx len error\n");
  1761. if (status & RX_U_CNTRL_FRAME)
  1762. printk("rx u control frame\n");
  1763. if (status & RX_MISSED_FRAME)
  1764. printk("rx miss\n");
  1765. }
  1766. }
  1767. prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE);
  1768. aup->rx_head = (aup->rx_head + 1) & (NUM_RX_DMA - 1);
  1769. au_sync();
  1770. /* next descriptor */
  1771. prxd = aup->rx_dma_ring[aup->rx_head];
  1772. buff_stat = prxd->buff_stat;
  1773. dev->last_rx = jiffies;
  1774. }
  1775. return 0;
  1776. }
  1777. /*
  1778. * Au1000 interrupt service routine.
  1779. */
  1780. static irqreturn_t au1000_interrupt(int irq, void *dev_id, struct pt_regs *regs)
  1781. {
  1782. struct net_device *dev = (struct net_device *) dev_id;
  1783. if (dev == NULL) {
  1784. printk(KERN_ERR "%s: isr: null dev ptr\n", dev->name);
  1785. return IRQ_RETVAL(1);
  1786. }
  1787. /* Handle RX interrupts first to minimize chance of overrun */
  1788. au1000_rx(dev);
  1789. au1000_tx_ack(dev);
  1790. return IRQ_RETVAL(1);
  1791. }
  1792. /*
  1793. * The Tx ring has been full longer than the watchdog timeout
  1794. * value. The transmitter must be hung?
  1795. */
  1796. static void au1000_tx_timeout(struct net_device *dev)
  1797. {
  1798. printk(KERN_ERR "%s: au1000_tx_timeout: dev=%p\n", dev->name, dev);
  1799. reset_mac(dev);
  1800. au1000_init(dev);
  1801. dev->trans_start = jiffies;
  1802. netif_wake_queue(dev);
  1803. }
  1804. static unsigned const ethernet_polynomial = 0x04c11db7U;
  1805. static inline u32 ether_crc(int length, unsigned char *data)
  1806. {
  1807. int crc = -1;
  1808. while(--length >= 0) {
  1809. unsigned char current_octet = *data++;
  1810. int bit;
  1811. for (bit = 0; bit < 8; bit++, current_octet >>= 1)
  1812. crc = (crc << 1) ^
  1813. ((crc < 0) ^ (current_octet & 1) ?
  1814. ethernet_polynomial : 0);
  1815. }
  1816. return crc;
  1817. }
  1818. static void set_rx_mode(struct net_device *dev)
  1819. {
  1820. struct au1000_private *aup = (struct au1000_private *) dev->priv;
  1821. if (au1000_debug > 4)
  1822. printk("%s: set_rx_mode: flags=%x\n", dev->name, dev->flags);
  1823. if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
  1824. aup->mac->control |= MAC_PROMISCUOUS;
  1825. printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name);
  1826. } else if ((dev->flags & IFF_ALLMULTI) ||
  1827. dev->mc_count > MULTICAST_FILTER_LIMIT) {
  1828. aup->mac->control |= MAC_PASS_ALL_MULTI;
  1829. aup->mac->control &= ~MAC_PROMISCUOUS;
  1830. printk(KERN_INFO "%s: Pass all multicast\n", dev->name);
  1831. } else {
  1832. int i;
  1833. struct dev_mc_list *mclist;
  1834. u32 mc_filter[2]; /* Multicast hash filter */
  1835. mc_filter[1] = mc_filter[0] = 0;
  1836. for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
  1837. i++, mclist = mclist->next) {
  1838. set_bit(ether_crc(ETH_ALEN, mclist->dmi_addr)>>26,
  1839. (long *)mc_filter);
  1840. }
  1841. aup->mac->multi_hash_high = mc_filter[1];
  1842. aup->mac->multi_hash_low = mc_filter[0];
  1843. aup->mac->control &= ~MAC_PROMISCUOUS;
  1844. aup->mac->control |= MAC_HASH_MODE;
  1845. }
  1846. }
  1847. static int au1000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
  1848. {
  1849. struct au1000_private *aup = (struct au1000_private *)dev->priv;
  1850. u16 *data = (u16 *)&rq->ifr_ifru;
  1851. switch(cmd) {
  1852. case SIOCDEVPRIVATE: /* Get the address of the PHY in use. */
  1853. case SIOCGMIIPHY:
  1854. if (!netif_running(dev)) return -EINVAL;
  1855. data[0] = aup->phy_addr;
  1856. case SIOCDEVPRIVATE+1: /* Read the specified MII register. */
  1857. case SIOCGMIIREG:
  1858. data[3] = mdio_read(dev, data[0], data[1]);
  1859. return 0;
  1860. case SIOCDEVPRIVATE+2: /* Write the specified MII register */
  1861. case SIOCSMIIREG:
  1862. if (!capable(CAP_NET_ADMIN))
  1863. return -EPERM;
  1864. mdio_write(dev, data[0], data[1],data[2]);
  1865. return 0;
  1866. default:
  1867. return -EOPNOTSUPP;
  1868. }
  1869. }
  1870. static int au1000_set_config(struct net_device *dev, struct ifmap *map)
  1871. {
  1872. struct au1000_private *aup = (struct au1000_private *) dev->priv;
  1873. u16 control;
  1874. if (au1000_debug > 4) {
  1875. printk("%s: set_config called: dev->if_port %d map->port %x\n",
  1876. dev->name, dev->if_port, map->port);
  1877. }
  1878. switch(map->port){
  1879. case IF_PORT_UNKNOWN: /* use auto here */
  1880. printk(KERN_INFO "%s: config phy for aneg\n",
  1881. dev->name);
  1882. dev->if_port = map->port;
  1883. /* Link Down: the timer will bring it up */
  1884. netif_carrier_off(dev);
  1885. /* read current control */
  1886. control = mdio_read(dev, aup->phy_addr, MII_CONTROL);
  1887. control &= ~(MII_CNTL_FDX | MII_CNTL_F100);
  1888. /* enable auto negotiation and reset the negotiation */
  1889. mdio_write(dev, aup->phy_addr, MII_CONTROL,
  1890. control | MII_CNTL_AUTO |
  1891. MII_CNTL_RST_AUTO);
  1892. break;
  1893. case IF_PORT_10BASET: /* 10BaseT */
  1894. printk(KERN_INFO "%s: config phy for 10BaseT\n",
  1895. dev->name);
  1896. dev->if_port = map->port;
  1897. /* Link Down: the timer will bring it up */
  1898. netif_carrier_off(dev);
  1899. /* set Speed to 10Mbps, Half Duplex */
  1900. control = mdio_read(dev, aup->phy_addr, MII_CONTROL);
  1901. control &= ~(MII_CNTL_F100 | MII_CNTL_AUTO |
  1902. MII_CNTL_FDX);
  1903. /* disable auto negotiation and force 10M/HD mode*/
  1904. mdio_write(dev, aup->phy_addr, MII_CONTROL, control);
  1905. break;
  1906. case IF_PORT_100BASET: /* 100BaseT */
  1907. case IF_PORT_100BASETX: /* 100BaseTx */
  1908. printk(KERN_INFO "%s: config phy for 100BaseTX\n",
  1909. dev->name);
  1910. dev->if_port = map->port;
  1911. /* Link Down: the timer will bring it up */
  1912. netif_carrier_off(dev);
  1913. /* set Speed to 100Mbps, Half Duplex */
  1914. /* disable auto negotiation and enable 100MBit Mode */
  1915. control = mdio_read(dev, aup->phy_addr, MII_CONTROL);
  1916. control &= ~(MII_CNTL_AUTO | MII_CNTL_FDX);
  1917. control |= MII_CNTL_F100;
  1918. mdio_write(dev, aup->phy_addr, MII_CONTROL, control);
  1919. break;
  1920. case IF_PORT_100BASEFX: /* 100BaseFx */
  1921. printk(KERN_INFO "%s: config phy for 100BaseFX\n",
  1922. dev->name);
  1923. dev->if_port = map->port;
  1924. /* Link Down: the timer will bring it up */
  1925. netif_carrier_off(dev);
  1926. /* set Speed to 100Mbps, Full Duplex */
  1927. /* disable auto negotiation and enable 100MBit Mode */
  1928. control = mdio_read(dev, aup->phy_addr, MII_CONTROL);
  1929. control &= ~MII_CNTL_AUTO;
  1930. control |= MII_CNTL_F100 | MII_CNTL_FDX;
  1931. mdio_write(dev, aup->phy_addr, MII_CONTROL, control);
  1932. break;
  1933. case IF_PORT_10BASE2: /* 10Base2 */
  1934. case IF_PORT_AUI: /* AUI */
  1935. /* These Modes are not supported (are they?)*/
  1936. printk(KERN_ERR "%s: 10Base2/AUI not supported",
  1937. dev->name);
  1938. return -EOPNOTSUPP;
  1939. break;
  1940. default:
  1941. printk(KERN_ERR "%s: Invalid media selected",
  1942. dev->name);
  1943. return -EINVAL;
  1944. }
  1945. return 0;
  1946. }
  1947. static struct net_device_stats *au1000_get_stats(struct net_device *dev)
  1948. {
  1949. struct au1000_private *aup = (struct au1000_private *) dev->priv;
  1950. if (au1000_debug > 4)
  1951. printk("%s: au1000_get_stats: dev=%p\n", dev->name, dev);
  1952. if (netif_device_present(dev)) {
  1953. return &aup->stats;
  1954. }
  1955. return 0;
  1956. }
  1957. module_init(au1000_init_module);
  1958. module_exit(au1000_cleanup_module);