smc911x.c 57 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186
  1. /*
  2. * smc911x.c
  3. * This is a driver for SMSC's LAN911{5,6,7,8} single-chip Ethernet devices.
  4. *
  5. * Copyright (C) 2005 Sensoria Corp
  6. * Derived from the unified SMC91x driver by Nicolas Pitre
  7. * and the smsc911x.c reference driver by SMSC
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation; either version 2 of the License, or
  12. * (at your option) any later version.
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program; if not, write to the Free Software
  21. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  22. *
  23. * Arguments:
  24. * watchdog = TX watchdog timeout
  25. * tx_fifo_kb = Size of TX FIFO in KB
  26. *
  27. * History:
  28. * 04/16/05 Dustin McIntire Initial version
  29. */
  30. static const char version[] =
  31. "smc911x.c: v1.0 04-16-2005 by Dustin McIntire <dustin@sensoria.com>\n";
  32. /* Debugging options */
  33. #define ENABLE_SMC_DEBUG_RX 0
  34. #define ENABLE_SMC_DEBUG_TX 0
  35. #define ENABLE_SMC_DEBUG_DMA 0
  36. #define ENABLE_SMC_DEBUG_PKTS 0
  37. #define ENABLE_SMC_DEBUG_MISC 0
  38. #define ENABLE_SMC_DEBUG_FUNC 0
  39. #define SMC_DEBUG_RX ((ENABLE_SMC_DEBUG_RX ? 1 : 0) << 0)
  40. #define SMC_DEBUG_TX ((ENABLE_SMC_DEBUG_TX ? 1 : 0) << 1)
  41. #define SMC_DEBUG_DMA ((ENABLE_SMC_DEBUG_DMA ? 1 : 0) << 2)
  42. #define SMC_DEBUG_PKTS ((ENABLE_SMC_DEBUG_PKTS ? 1 : 0) << 3)
  43. #define SMC_DEBUG_MISC ((ENABLE_SMC_DEBUG_MISC ? 1 : 0) << 4)
  44. #define SMC_DEBUG_FUNC ((ENABLE_SMC_DEBUG_FUNC ? 1 : 0) << 5)
  45. #ifndef SMC_DEBUG
  46. #define SMC_DEBUG ( SMC_DEBUG_RX | \
  47. SMC_DEBUG_TX | \
  48. SMC_DEBUG_DMA | \
  49. SMC_DEBUG_PKTS | \
  50. SMC_DEBUG_MISC | \
  51. SMC_DEBUG_FUNC \
  52. )
  53. #endif
  54. #include <linux/init.h>
  55. #include <linux/module.h>
  56. #include <linux/kernel.h>
  57. #include <linux/sched.h>
  58. #include <linux/delay.h>
  59. #include <linux/interrupt.h>
  60. #include <linux/errno.h>
  61. #include <linux/ioport.h>
  62. #include <linux/crc32.h>
  63. #include <linux/device.h>
  64. #include <linux/platform_device.h>
  65. #include <linux/spinlock.h>
  66. #include <linux/ethtool.h>
  67. #include <linux/mii.h>
  68. #include <linux/workqueue.h>
  69. #include <linux/netdevice.h>
  70. #include <linux/etherdevice.h>
  71. #include <linux/skbuff.h>
  72. #include <asm/io.h>
  73. #include "smc911x.h"
  74. /*
  75. * Transmit timeout, default 5 seconds.
  76. */
  77. static int watchdog = 5000;
  78. module_param(watchdog, int, 0400);
  79. MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds");
  80. static int tx_fifo_kb=8;
  81. module_param(tx_fifo_kb, int, 0400);
  82. MODULE_PARM_DESC(tx_fifo_kb,"transmit FIFO size in KB (1<x<15)(default=8)");
  83. MODULE_LICENSE("GPL");
  84. MODULE_ALIAS("platform:smc911x");
  85. /*
  86. * The internal workings of the driver. If you are changing anything
  87. * here with the SMC stuff, you should have the datasheet and know
  88. * what you are doing.
  89. */
  90. #define CARDNAME "smc911x"
  91. /*
  92. * Use power-down feature of the chip
  93. */
  94. #define POWER_DOWN 1
  95. #if SMC_DEBUG > 0
  96. #define DBG(n, dev, args...) \
  97. do { \
  98. if (SMC_DEBUG & (n)) \
  99. netdev_dbg(dev, args); \
  100. } while (0)
  101. #define PRINTK(dev, args...) netdev_info(dev, args)
  102. #else
  103. #define DBG(n, dev, args...) do { } while (0)
  104. #define PRINTK(dev, args...) netdev_dbg(dev, args)
  105. #endif
  106. #if SMC_DEBUG_PKTS > 0
  107. static void PRINT_PKT(u_char *buf, int length)
  108. {
  109. int i;
  110. int remainder;
  111. int lines;
  112. lines = length / 16;
  113. remainder = length % 16;
  114. for (i = 0; i < lines ; i ++) {
  115. int cur;
  116. printk(KERN_DEBUG);
  117. for (cur = 0; cur < 8; cur++) {
  118. u_char a, b;
  119. a = *buf++;
  120. b = *buf++;
  121. pr_cont("%02x%02x ", a, b);
  122. }
  123. pr_cont("\n");
  124. }
  125. printk(KERN_DEBUG);
  126. for (i = 0; i < remainder/2 ; i++) {
  127. u_char a, b;
  128. a = *buf++;
  129. b = *buf++;
  130. pr_cont("%02x%02x ", a, b);
  131. }
  132. pr_cont("\n");
  133. }
  134. #else
  135. #define PRINT_PKT(x...) do { } while (0)
  136. #endif
  137. /* this enables an interrupt in the interrupt mask register */
  138. #define SMC_ENABLE_INT(lp, x) do { \
  139. unsigned int __mask; \
  140. __mask = SMC_GET_INT_EN((lp)); \
  141. __mask |= (x); \
  142. SMC_SET_INT_EN((lp), __mask); \
  143. } while (0)
  144. /* this disables an interrupt from the interrupt mask register */
  145. #define SMC_DISABLE_INT(lp, x) do { \
  146. unsigned int __mask; \
  147. __mask = SMC_GET_INT_EN((lp)); \
  148. __mask &= ~(x); \
  149. SMC_SET_INT_EN((lp), __mask); \
  150. } while (0)
  151. /*
  152. * this does a soft reset on the device
  153. */
  154. static void smc911x_reset(struct net_device *dev)
  155. {
  156. struct smc911x_local *lp = netdev_priv(dev);
  157. unsigned int reg, timeout=0, resets=1, irq_cfg;
  158. unsigned long flags;
  159. DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
  160. /* Take out of PM setting first */
  161. if ((SMC_GET_PMT_CTRL(lp) & PMT_CTRL_READY_) == 0) {
  162. /* Write to the bytetest will take out of powerdown */
  163. SMC_SET_BYTE_TEST(lp, 0);
  164. timeout=10;
  165. do {
  166. udelay(10);
  167. reg = SMC_GET_PMT_CTRL(lp) & PMT_CTRL_READY_;
  168. } while (--timeout && !reg);
  169. if (timeout == 0) {
  170. PRINTK(dev, "smc911x_reset timeout waiting for PM restore\n");
  171. return;
  172. }
  173. }
  174. /* Disable all interrupts */
  175. spin_lock_irqsave(&lp->lock, flags);
  176. SMC_SET_INT_EN(lp, 0);
  177. spin_unlock_irqrestore(&lp->lock, flags);
  178. while (resets--) {
  179. SMC_SET_HW_CFG(lp, HW_CFG_SRST_);
  180. timeout=10;
  181. do {
  182. udelay(10);
  183. reg = SMC_GET_HW_CFG(lp);
  184. /* If chip indicates reset timeout then try again */
  185. if (reg & HW_CFG_SRST_TO_) {
  186. PRINTK(dev, "chip reset timeout, retrying...\n");
  187. resets++;
  188. break;
  189. }
  190. } while (--timeout && (reg & HW_CFG_SRST_));
  191. }
  192. if (timeout == 0) {
  193. PRINTK(dev, "smc911x_reset timeout waiting for reset\n");
  194. return;
  195. }
  196. /* make sure EEPROM has finished loading before setting GPIO_CFG */
  197. timeout=1000;
  198. while (--timeout && (SMC_GET_E2P_CMD(lp) & E2P_CMD_EPC_BUSY_))
  199. udelay(10);
  200. if (timeout == 0){
  201. PRINTK(dev, "smc911x_reset timeout waiting for EEPROM busy\n");
  202. return;
  203. }
  204. /* Initialize interrupts */
  205. SMC_SET_INT_EN(lp, 0);
  206. SMC_ACK_INT(lp, -1);
  207. /* Reset the FIFO level and flow control settings */
  208. SMC_SET_HW_CFG(lp, (lp->tx_fifo_kb & 0xF) << 16);
  209. //TODO: Figure out what appropriate pause time is
  210. SMC_SET_FLOW(lp, FLOW_FCPT_ | FLOW_FCEN_);
  211. SMC_SET_AFC_CFG(lp, lp->afc_cfg);
  212. /* Set to LED outputs */
  213. SMC_SET_GPIO_CFG(lp, 0x70070000);
  214. /*
  215. * Deassert IRQ for 1*10us for edge type interrupts
  216. * and drive IRQ pin push-pull
  217. */
  218. irq_cfg = (1 << 24) | INT_CFG_IRQ_EN_ | INT_CFG_IRQ_TYPE_;
  219. #ifdef SMC_DYNAMIC_BUS_CONFIG
  220. if (lp->cfg.irq_polarity)
  221. irq_cfg |= INT_CFG_IRQ_POL_;
  222. #endif
  223. SMC_SET_IRQ_CFG(lp, irq_cfg);
  224. /* clear anything saved */
  225. if (lp->pending_tx_skb != NULL) {
  226. dev_kfree_skb (lp->pending_tx_skb);
  227. lp->pending_tx_skb = NULL;
  228. dev->stats.tx_errors++;
  229. dev->stats.tx_aborted_errors++;
  230. }
  231. }
  232. /*
  233. * Enable Interrupts, Receive, and Transmit
  234. */
  235. static void smc911x_enable(struct net_device *dev)
  236. {
  237. struct smc911x_local *lp = netdev_priv(dev);
  238. unsigned mask, cfg, cr;
  239. unsigned long flags;
  240. DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
  241. spin_lock_irqsave(&lp->lock, flags);
  242. SMC_SET_MAC_ADDR(lp, dev->dev_addr);
  243. /* Enable TX */
  244. cfg = SMC_GET_HW_CFG(lp);
  245. cfg &= HW_CFG_TX_FIF_SZ_ | 0xFFF;
  246. cfg |= HW_CFG_SF_;
  247. SMC_SET_HW_CFG(lp, cfg);
  248. SMC_SET_FIFO_TDA(lp, 0xFF);
  249. /* Update TX stats on every 64 packets received or every 1 sec */
  250. SMC_SET_FIFO_TSL(lp, 64);
  251. SMC_SET_GPT_CFG(lp, GPT_CFG_TIMER_EN_ | 10000);
  252. SMC_GET_MAC_CR(lp, cr);
  253. cr |= MAC_CR_TXEN_ | MAC_CR_HBDIS_;
  254. SMC_SET_MAC_CR(lp, cr);
  255. SMC_SET_TX_CFG(lp, TX_CFG_TX_ON_);
  256. /* Add 2 byte padding to start of packets */
  257. SMC_SET_RX_CFG(lp, (2<<8) & RX_CFG_RXDOFF_);
  258. /* Turn on receiver and enable RX */
  259. if (cr & MAC_CR_RXEN_)
  260. DBG(SMC_DEBUG_RX, dev, "Receiver already enabled\n");
  261. SMC_SET_MAC_CR(lp, cr | MAC_CR_RXEN_);
  262. /* Interrupt on every received packet */
  263. SMC_SET_FIFO_RSA(lp, 0x01);
  264. SMC_SET_FIFO_RSL(lp, 0x00);
  265. /* now, enable interrupts */
  266. mask = INT_EN_TDFA_EN_ | INT_EN_TSFL_EN_ | INT_EN_RSFL_EN_ |
  267. INT_EN_GPT_INT_EN_ | INT_EN_RXDFH_INT_EN_ | INT_EN_RXE_EN_ |
  268. INT_EN_PHY_INT_EN_;
  269. if (IS_REV_A(lp->revision))
  270. mask|=INT_EN_RDFL_EN_;
  271. else {
  272. mask|=INT_EN_RDFO_EN_;
  273. }
  274. SMC_ENABLE_INT(lp, mask);
  275. spin_unlock_irqrestore(&lp->lock, flags);
  276. }
  277. /*
  278. * this puts the device in an inactive state
  279. */
  280. static void smc911x_shutdown(struct net_device *dev)
  281. {
  282. struct smc911x_local *lp = netdev_priv(dev);
  283. unsigned cr;
  284. unsigned long flags;
  285. DBG(SMC_DEBUG_FUNC, dev, "%s: --> %s\n", CARDNAME, __func__);
  286. /* Disable IRQ's */
  287. SMC_SET_INT_EN(lp, 0);
  288. /* Turn of Rx and TX */
  289. spin_lock_irqsave(&lp->lock, flags);
  290. SMC_GET_MAC_CR(lp, cr);
  291. cr &= ~(MAC_CR_TXEN_ | MAC_CR_RXEN_ | MAC_CR_HBDIS_);
  292. SMC_SET_MAC_CR(lp, cr);
  293. SMC_SET_TX_CFG(lp, TX_CFG_STOP_TX_);
  294. spin_unlock_irqrestore(&lp->lock, flags);
  295. }
  296. static inline void smc911x_drop_pkt(struct net_device *dev)
  297. {
  298. struct smc911x_local *lp = netdev_priv(dev);
  299. unsigned int fifo_count, timeout, reg;
  300. DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, dev, "%s: --> %s\n",
  301. CARDNAME, __func__);
  302. fifo_count = SMC_GET_RX_FIFO_INF(lp) & 0xFFFF;
  303. if (fifo_count <= 4) {
  304. /* Manually dump the packet data */
  305. while (fifo_count--)
  306. SMC_GET_RX_FIFO(lp);
  307. } else {
  308. /* Fast forward through the bad packet */
  309. SMC_SET_RX_DP_CTRL(lp, RX_DP_CTRL_FFWD_BUSY_);
  310. timeout=50;
  311. do {
  312. udelay(10);
  313. reg = SMC_GET_RX_DP_CTRL(lp) & RX_DP_CTRL_FFWD_BUSY_;
  314. } while (--timeout && reg);
  315. if (timeout == 0) {
  316. PRINTK(dev, "timeout waiting for RX fast forward\n");
  317. }
  318. }
  319. }
  320. /*
  321. * This is the procedure to handle the receipt of a packet.
  322. * It should be called after checking for packet presence in
  323. * the RX status FIFO. It must be called with the spin lock
  324. * already held.
  325. */
  326. static inline void smc911x_rcv(struct net_device *dev)
  327. {
  328. struct smc911x_local *lp = netdev_priv(dev);
  329. unsigned int pkt_len, status;
  330. struct sk_buff *skb;
  331. unsigned char *data;
  332. DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, dev, "--> %s\n",
  333. __func__);
  334. status = SMC_GET_RX_STS_FIFO(lp);
  335. DBG(SMC_DEBUG_RX, dev, "Rx pkt len %d status 0x%08x\n",
  336. (status & 0x3fff0000) >> 16, status & 0xc000ffff);
  337. pkt_len = (status & RX_STS_PKT_LEN_) >> 16;
  338. if (status & RX_STS_ES_) {
  339. /* Deal with a bad packet */
  340. dev->stats.rx_errors++;
  341. if (status & RX_STS_CRC_ERR_)
  342. dev->stats.rx_crc_errors++;
  343. else {
  344. if (status & RX_STS_LEN_ERR_)
  345. dev->stats.rx_length_errors++;
  346. if (status & RX_STS_MCAST_)
  347. dev->stats.multicast++;
  348. }
  349. /* Remove the bad packet data from the RX FIFO */
  350. smc911x_drop_pkt(dev);
  351. } else {
  352. /* Receive a valid packet */
  353. /* Alloc a buffer with extra room for DMA alignment */
  354. skb = netdev_alloc_skb(dev, pkt_len+32);
  355. if (unlikely(skb == NULL)) {
  356. PRINTK(dev, "Low memory, rcvd packet dropped.\n");
  357. dev->stats.rx_dropped++;
  358. smc911x_drop_pkt(dev);
  359. return;
  360. }
  361. /* Align IP header to 32 bits
  362. * Note that the device is configured to add a 2
  363. * byte padding to the packet start, so we really
  364. * want to write to the orignal data pointer */
  365. data = skb->data;
  366. skb_reserve(skb, 2);
  367. skb_put(skb,pkt_len-4);
  368. #ifdef SMC_USE_DMA
  369. {
  370. unsigned int fifo;
  371. /* Lower the FIFO threshold if possible */
  372. fifo = SMC_GET_FIFO_INT(lp);
  373. if (fifo & 0xFF) fifo--;
  374. DBG(SMC_DEBUG_RX, dev, "Setting RX stat FIFO threshold to %d\n",
  375. fifo & 0xff);
  376. SMC_SET_FIFO_INT(lp, fifo);
  377. /* Setup RX DMA */
  378. SMC_SET_RX_CFG(lp, RX_CFG_RX_END_ALGN16_ | ((2<<8) & RX_CFG_RXDOFF_));
  379. lp->rxdma_active = 1;
  380. lp->current_rx_skb = skb;
  381. SMC_PULL_DATA(lp, data, (pkt_len+2+15) & ~15);
  382. /* Packet processing deferred to DMA RX interrupt */
  383. }
  384. #else
  385. SMC_SET_RX_CFG(lp, RX_CFG_RX_END_ALGN4_ | ((2<<8) & RX_CFG_RXDOFF_));
  386. SMC_PULL_DATA(lp, data, pkt_len+2+3);
  387. DBG(SMC_DEBUG_PKTS, dev, "Received packet\n");
  388. PRINT_PKT(data, ((pkt_len - 4) <= 64) ? pkt_len - 4 : 64);
  389. skb->protocol = eth_type_trans(skb, dev);
  390. netif_rx(skb);
  391. dev->stats.rx_packets++;
  392. dev->stats.rx_bytes += pkt_len-4;
  393. #endif
  394. }
  395. }
  396. /*
  397. * This is called to actually send a packet to the chip.
  398. */
  399. static void smc911x_hardware_send_pkt(struct net_device *dev)
  400. {
  401. struct smc911x_local *lp = netdev_priv(dev);
  402. struct sk_buff *skb;
  403. unsigned int cmdA, cmdB, len;
  404. unsigned char *buf;
  405. DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, dev, "--> %s\n", __func__);
  406. BUG_ON(lp->pending_tx_skb == NULL);
  407. skb = lp->pending_tx_skb;
  408. lp->pending_tx_skb = NULL;
  409. /* cmdA {25:24] data alignment [20:16] start offset [10:0] buffer length */
  410. /* cmdB {31:16] pkt tag [10:0] length */
  411. #ifdef SMC_USE_DMA
  412. /* 16 byte buffer alignment mode */
  413. buf = (char*)((u32)(skb->data) & ~0xF);
  414. len = (skb->len + 0xF + ((u32)skb->data & 0xF)) & ~0xF;
  415. cmdA = (1<<24) | (((u32)skb->data & 0xF)<<16) |
  416. TX_CMD_A_INT_FIRST_SEG_ | TX_CMD_A_INT_LAST_SEG_ |
  417. skb->len;
  418. #else
  419. buf = (char*)((u32)skb->data & ~0x3);
  420. len = (skb->len + 3 + ((u32)skb->data & 3)) & ~0x3;
  421. cmdA = (((u32)skb->data & 0x3) << 16) |
  422. TX_CMD_A_INT_FIRST_SEG_ | TX_CMD_A_INT_LAST_SEG_ |
  423. skb->len;
  424. #endif
  425. /* tag is packet length so we can use this in stats update later */
  426. cmdB = (skb->len << 16) | (skb->len & 0x7FF);
  427. DBG(SMC_DEBUG_TX, dev, "TX PKT LENGTH 0x%04x (%d) BUF 0x%p CMDA 0x%08x CMDB 0x%08x\n",
  428. len, len, buf, cmdA, cmdB);
  429. SMC_SET_TX_FIFO(lp, cmdA);
  430. SMC_SET_TX_FIFO(lp, cmdB);
  431. DBG(SMC_DEBUG_PKTS, dev, "Transmitted packet\n");
  432. PRINT_PKT(buf, len <= 64 ? len : 64);
  433. /* Send pkt via PIO or DMA */
  434. #ifdef SMC_USE_DMA
  435. lp->current_tx_skb = skb;
  436. SMC_PUSH_DATA(lp, buf, len);
  437. /* DMA complete IRQ will free buffer and set jiffies */
  438. #else
  439. SMC_PUSH_DATA(lp, buf, len);
  440. dev->trans_start = jiffies;
  441. dev_kfree_skb_irq(skb);
  442. #endif
  443. if (!lp->tx_throttle) {
  444. netif_wake_queue(dev);
  445. }
  446. SMC_ENABLE_INT(lp, INT_EN_TDFA_EN_ | INT_EN_TSFL_EN_);
  447. }
  448. /*
  449. * Since I am not sure if I will have enough room in the chip's ram
  450. * to store the packet, I call this routine which either sends it
  451. * now, or set the card to generates an interrupt when ready
  452. * for the packet.
  453. */
  454. static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
  455. {
  456. struct smc911x_local *lp = netdev_priv(dev);
  457. unsigned int free;
  458. unsigned long flags;
  459. DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, dev, "--> %s\n",
  460. __func__);
  461. spin_lock_irqsave(&lp->lock, flags);
  462. BUG_ON(lp->pending_tx_skb != NULL);
  463. free = SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TDFREE_;
  464. DBG(SMC_DEBUG_TX, dev, "TX free space %d\n", free);
  465. /* Turn off the flow when running out of space in FIFO */
  466. if (free <= SMC911X_TX_FIFO_LOW_THRESHOLD) {
  467. DBG(SMC_DEBUG_TX, dev, "Disabling data flow due to low FIFO space (%d)\n",
  468. free);
  469. /* Reenable when at least 1 packet of size MTU present */
  470. SMC_SET_FIFO_TDA(lp, (SMC911X_TX_FIFO_LOW_THRESHOLD)/64);
  471. lp->tx_throttle = 1;
  472. netif_stop_queue(dev);
  473. }
  474. /* Drop packets when we run out of space in TX FIFO
  475. * Account for overhead required for:
  476. *
  477. * Tx command words 8 bytes
  478. * Start offset 15 bytes
  479. * End padding 15 bytes
  480. */
  481. if (unlikely(free < (skb->len + 8 + 15 + 15))) {
  482. netdev_warn(dev, "No Tx free space %d < %d\n",
  483. free, skb->len);
  484. lp->pending_tx_skb = NULL;
  485. dev->stats.tx_errors++;
  486. dev->stats.tx_dropped++;
  487. spin_unlock_irqrestore(&lp->lock, flags);
  488. dev_kfree_skb(skb);
  489. return NETDEV_TX_OK;
  490. }
  491. #ifdef SMC_USE_DMA
  492. {
  493. /* If the DMA is already running then defer this packet Tx until
  494. * the DMA IRQ starts it
  495. */
  496. if (lp->txdma_active) {
  497. DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev, "Tx DMA running, deferring packet\n");
  498. lp->pending_tx_skb = skb;
  499. netif_stop_queue(dev);
  500. spin_unlock_irqrestore(&lp->lock, flags);
  501. return NETDEV_TX_OK;
  502. } else {
  503. DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev, "Activating Tx DMA\n");
  504. lp->txdma_active = 1;
  505. }
  506. }
  507. #endif
  508. lp->pending_tx_skb = skb;
  509. smc911x_hardware_send_pkt(dev);
  510. spin_unlock_irqrestore(&lp->lock, flags);
  511. return NETDEV_TX_OK;
  512. }
  513. /*
  514. * This handles a TX status interrupt, which is only called when:
  515. * - a TX error occurred, or
  516. * - TX of a packet completed.
  517. */
  518. static void smc911x_tx(struct net_device *dev)
  519. {
  520. struct smc911x_local *lp = netdev_priv(dev);
  521. unsigned int tx_status;
  522. DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, dev, "--> %s\n",
  523. __func__);
  524. /* Collect the TX status */
  525. while (((SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TSUSED_) >> 16) != 0) {
  526. DBG(SMC_DEBUG_TX, dev, "Tx stat FIFO used 0x%04x\n",
  527. (SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TSUSED_) >> 16);
  528. tx_status = SMC_GET_TX_STS_FIFO(lp);
  529. dev->stats.tx_packets++;
  530. dev->stats.tx_bytes+=tx_status>>16;
  531. DBG(SMC_DEBUG_TX, dev, "Tx FIFO tag 0x%04x status 0x%04x\n",
  532. (tx_status & 0xffff0000) >> 16,
  533. tx_status & 0x0000ffff);
  534. /* count Tx errors, but ignore lost carrier errors when in
  535. * full-duplex mode */
  536. if ((tx_status & TX_STS_ES_) && !(lp->ctl_rfduplx &&
  537. !(tx_status & 0x00000306))) {
  538. dev->stats.tx_errors++;
  539. }
  540. if (tx_status & TX_STS_MANY_COLL_) {
  541. dev->stats.collisions+=16;
  542. dev->stats.tx_aborted_errors++;
  543. } else {
  544. dev->stats.collisions+=(tx_status & TX_STS_COLL_CNT_) >> 3;
  545. }
  546. /* carrier error only has meaning for half-duplex communication */
  547. if ((tx_status & (TX_STS_LOC_ | TX_STS_NO_CARR_)) &&
  548. !lp->ctl_rfduplx) {
  549. dev->stats.tx_carrier_errors++;
  550. }
  551. if (tx_status & TX_STS_LATE_COLL_) {
  552. dev->stats.collisions++;
  553. dev->stats.tx_aborted_errors++;
  554. }
  555. }
  556. }
  557. /*---PHY CONTROL AND CONFIGURATION-----------------------------------------*/
  558. /*
  559. * Reads a register from the MII Management serial interface
  560. */
  561. static int smc911x_phy_read(struct net_device *dev, int phyaddr, int phyreg)
  562. {
  563. struct smc911x_local *lp = netdev_priv(dev);
  564. unsigned int phydata;
  565. SMC_GET_MII(lp, phyreg, phyaddr, phydata);
  566. DBG(SMC_DEBUG_MISC, dev, "%s: phyaddr=0x%x, phyreg=0x%02x, phydata=0x%04x\n",
  567. __func__, phyaddr, phyreg, phydata);
  568. return phydata;
  569. }
  570. /*
  571. * Writes a register to the MII Management serial interface
  572. */
  573. static void smc911x_phy_write(struct net_device *dev, int phyaddr, int phyreg,
  574. int phydata)
  575. {
  576. struct smc911x_local *lp = netdev_priv(dev);
  577. DBG(SMC_DEBUG_MISC, dev, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n",
  578. __func__, phyaddr, phyreg, phydata);
  579. SMC_SET_MII(lp, phyreg, phyaddr, phydata);
  580. }
  581. /*
  582. * Finds and reports the PHY address (115 and 117 have external
  583. * PHY interface 118 has internal only
  584. */
  585. static void smc911x_phy_detect(struct net_device *dev)
  586. {
  587. struct smc911x_local *lp = netdev_priv(dev);
  588. int phyaddr;
  589. unsigned int cfg, id1, id2;
  590. DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
  591. lp->phy_type = 0;
  592. /*
  593. * Scan all 32 PHY addresses if necessary, starting at
  594. * PHY#1 to PHY#31, and then PHY#0 last.
  595. */
  596. switch(lp->version) {
  597. case CHIP_9115:
  598. case CHIP_9117:
  599. case CHIP_9215:
  600. case CHIP_9217:
  601. cfg = SMC_GET_HW_CFG(lp);
  602. if (cfg & HW_CFG_EXT_PHY_DET_) {
  603. cfg &= ~HW_CFG_PHY_CLK_SEL_;
  604. cfg |= HW_CFG_PHY_CLK_SEL_CLK_DIS_;
  605. SMC_SET_HW_CFG(lp, cfg);
  606. udelay(10); /* Wait for clocks to stop */
  607. cfg |= HW_CFG_EXT_PHY_EN_;
  608. SMC_SET_HW_CFG(lp, cfg);
  609. udelay(10); /* Wait for clocks to stop */
  610. cfg &= ~HW_CFG_PHY_CLK_SEL_;
  611. cfg |= HW_CFG_PHY_CLK_SEL_EXT_PHY_;
  612. SMC_SET_HW_CFG(lp, cfg);
  613. udelay(10); /* Wait for clocks to stop */
  614. cfg |= HW_CFG_SMI_SEL_;
  615. SMC_SET_HW_CFG(lp, cfg);
  616. for (phyaddr = 1; phyaddr < 32; ++phyaddr) {
  617. /* Read the PHY identifiers */
  618. SMC_GET_PHY_ID1(lp, phyaddr & 31, id1);
  619. SMC_GET_PHY_ID2(lp, phyaddr & 31, id2);
  620. /* Make sure it is a valid identifier */
  621. if (id1 != 0x0000 && id1 != 0xffff &&
  622. id1 != 0x8000 && id2 != 0x0000 &&
  623. id2 != 0xffff && id2 != 0x8000) {
  624. /* Save the PHY's address */
  625. lp->mii.phy_id = phyaddr & 31;
  626. lp->phy_type = id1 << 16 | id2;
  627. break;
  628. }
  629. }
  630. if (phyaddr < 32)
  631. /* Found an external PHY */
  632. break;
  633. }
  634. default:
  635. /* Internal media only */
  636. SMC_GET_PHY_ID1(lp, 1, id1);
  637. SMC_GET_PHY_ID2(lp, 1, id2);
  638. /* Save the PHY's address */
  639. lp->mii.phy_id = 1;
  640. lp->phy_type = id1 << 16 | id2;
  641. }
  642. DBG(SMC_DEBUG_MISC, dev, "phy_id1=0x%x, phy_id2=0x%x phyaddr=0x%d\n",
  643. id1, id2, lp->mii.phy_id);
  644. }
  645. /*
  646. * Sets the PHY to a configuration as determined by the user.
  647. * Called with spin_lock held.
  648. */
  649. static int smc911x_phy_fixed(struct net_device *dev)
  650. {
  651. struct smc911x_local *lp = netdev_priv(dev);
  652. int phyaddr = lp->mii.phy_id;
  653. int bmcr;
  654. DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
  655. /* Enter Link Disable state */
  656. SMC_GET_PHY_BMCR(lp, phyaddr, bmcr);
  657. bmcr |= BMCR_PDOWN;
  658. SMC_SET_PHY_BMCR(lp, phyaddr, bmcr);
  659. /*
  660. * Set our fixed capabilities
  661. * Disable auto-negotiation
  662. */
  663. bmcr &= ~BMCR_ANENABLE;
  664. if (lp->ctl_rfduplx)
  665. bmcr |= BMCR_FULLDPLX;
  666. if (lp->ctl_rspeed == 100)
  667. bmcr |= BMCR_SPEED100;
  668. /* Write our capabilities to the phy control register */
  669. SMC_SET_PHY_BMCR(lp, phyaddr, bmcr);
  670. /* Re-Configure the Receive/Phy Control register */
  671. bmcr &= ~BMCR_PDOWN;
  672. SMC_SET_PHY_BMCR(lp, phyaddr, bmcr);
  673. return 1;
  674. }
  675. /**
  676. * smc911x_phy_reset - reset the phy
  677. * @dev: net device
  678. * @phy: phy address
  679. *
  680. * Issue a software reset for the specified PHY and
  681. * wait up to 100ms for the reset to complete. We should
  682. * not access the PHY for 50ms after issuing the reset.
  683. *
  684. * The time to wait appears to be dependent on the PHY.
  685. *
  686. */
  687. static int smc911x_phy_reset(struct net_device *dev, int phy)
  688. {
  689. struct smc911x_local *lp = netdev_priv(dev);
  690. int timeout;
  691. unsigned long flags;
  692. unsigned int reg;
  693. DBG(SMC_DEBUG_FUNC, dev, "--> %s()\n", __func__);
  694. spin_lock_irqsave(&lp->lock, flags);
  695. reg = SMC_GET_PMT_CTRL(lp);
  696. reg &= ~0xfffff030;
  697. reg |= PMT_CTRL_PHY_RST_;
  698. SMC_SET_PMT_CTRL(lp, reg);
  699. spin_unlock_irqrestore(&lp->lock, flags);
  700. for (timeout = 2; timeout; timeout--) {
  701. msleep(50);
  702. spin_lock_irqsave(&lp->lock, flags);
  703. reg = SMC_GET_PMT_CTRL(lp);
  704. spin_unlock_irqrestore(&lp->lock, flags);
  705. if (!(reg & PMT_CTRL_PHY_RST_)) {
  706. /* extra delay required because the phy may
  707. * not be completed with its reset
  708. * when PHY_BCR_RESET_ is cleared. 256us
  709. * should suffice, but use 500us to be safe
  710. */
  711. udelay(500);
  712. break;
  713. }
  714. }
  715. return reg & PMT_CTRL_PHY_RST_;
  716. }
  717. /**
  718. * smc911x_phy_powerdown - powerdown phy
  719. * @dev: net device
  720. * @phy: phy address
  721. *
  722. * Power down the specified PHY
  723. */
  724. static void smc911x_phy_powerdown(struct net_device *dev, int phy)
  725. {
  726. struct smc911x_local *lp = netdev_priv(dev);
  727. unsigned int bmcr;
  728. /* Enter Link Disable state */
  729. SMC_GET_PHY_BMCR(lp, phy, bmcr);
  730. bmcr |= BMCR_PDOWN;
  731. SMC_SET_PHY_BMCR(lp, phy, bmcr);
  732. }
  733. /**
  734. * smc911x_phy_check_media - check the media status and adjust BMCR
  735. * @dev: net device
  736. * @init: set true for initialisation
  737. *
  738. * Select duplex mode depending on negotiation state. This
  739. * also updates our carrier state.
  740. */
  741. static void smc911x_phy_check_media(struct net_device *dev, int init)
  742. {
  743. struct smc911x_local *lp = netdev_priv(dev);
  744. int phyaddr = lp->mii.phy_id;
  745. unsigned int bmcr, cr;
  746. DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
  747. if (mii_check_media(&lp->mii, netif_msg_link(lp), init)) {
  748. /* duplex state has changed */
  749. SMC_GET_PHY_BMCR(lp, phyaddr, bmcr);
  750. SMC_GET_MAC_CR(lp, cr);
  751. if (lp->mii.full_duplex) {
  752. DBG(SMC_DEBUG_MISC, dev, "Configuring for full-duplex mode\n");
  753. bmcr |= BMCR_FULLDPLX;
  754. cr |= MAC_CR_RCVOWN_;
  755. } else {
  756. DBG(SMC_DEBUG_MISC, dev, "Configuring for half-duplex mode\n");
  757. bmcr &= ~BMCR_FULLDPLX;
  758. cr &= ~MAC_CR_RCVOWN_;
  759. }
  760. SMC_SET_PHY_BMCR(lp, phyaddr, bmcr);
  761. SMC_SET_MAC_CR(lp, cr);
  762. }
  763. }
  764. /*
  765. * Configures the specified PHY through the MII management interface
  766. * using Autonegotiation.
  767. * Calls smc911x_phy_fixed() if the user has requested a certain config.
  768. * If RPC ANEG bit is set, the media selection is dependent purely on
  769. * the selection by the MII (either in the MII BMCR reg or the result
  770. * of autonegotiation.) If the RPC ANEG bit is cleared, the selection
  771. * is controlled by the RPC SPEED and RPC DPLX bits.
  772. */
  773. static void smc911x_phy_configure(struct work_struct *work)
  774. {
  775. struct smc911x_local *lp = container_of(work, struct smc911x_local,
  776. phy_configure);
  777. struct net_device *dev = lp->netdev;
  778. int phyaddr = lp->mii.phy_id;
  779. int my_phy_caps; /* My PHY capabilities */
  780. int my_ad_caps; /* My Advertised capabilities */
  781. int status;
  782. unsigned long flags;
  783. DBG(SMC_DEBUG_FUNC, dev, "--> %s()\n", __func__);
  784. /*
  785. * We should not be called if phy_type is zero.
  786. */
  787. if (lp->phy_type == 0)
  788. return;
  789. if (smc911x_phy_reset(dev, phyaddr)) {
  790. netdev_info(dev, "PHY reset timed out\n");
  791. return;
  792. }
  793. spin_lock_irqsave(&lp->lock, flags);
  794. /*
  795. * Enable PHY Interrupts (for register 18)
  796. * Interrupts listed here are enabled
  797. */
  798. SMC_SET_PHY_INT_MASK(lp, phyaddr, PHY_INT_MASK_ENERGY_ON_ |
  799. PHY_INT_MASK_ANEG_COMP_ | PHY_INT_MASK_REMOTE_FAULT_ |
  800. PHY_INT_MASK_LINK_DOWN_);
  801. /* If the user requested no auto neg, then go set his request */
  802. if (lp->mii.force_media) {
  803. smc911x_phy_fixed(dev);
  804. goto smc911x_phy_configure_exit;
  805. }
  806. /* Copy our capabilities from MII_BMSR to MII_ADVERTISE */
  807. SMC_GET_PHY_BMSR(lp, phyaddr, my_phy_caps);
  808. if (!(my_phy_caps & BMSR_ANEGCAPABLE)) {
  809. netdev_info(dev, "Auto negotiation NOT supported\n");
  810. smc911x_phy_fixed(dev);
  811. goto smc911x_phy_configure_exit;
  812. }
  813. /* CSMA capable w/ both pauses */
  814. my_ad_caps = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
  815. if (my_phy_caps & BMSR_100BASE4)
  816. my_ad_caps |= ADVERTISE_100BASE4;
  817. if (my_phy_caps & BMSR_100FULL)
  818. my_ad_caps |= ADVERTISE_100FULL;
  819. if (my_phy_caps & BMSR_100HALF)
  820. my_ad_caps |= ADVERTISE_100HALF;
  821. if (my_phy_caps & BMSR_10FULL)
  822. my_ad_caps |= ADVERTISE_10FULL;
  823. if (my_phy_caps & BMSR_10HALF)
  824. my_ad_caps |= ADVERTISE_10HALF;
  825. /* Disable capabilities not selected by our user */
  826. if (lp->ctl_rspeed != 100)
  827. my_ad_caps &= ~(ADVERTISE_100BASE4|ADVERTISE_100FULL|ADVERTISE_100HALF);
  828. if (!lp->ctl_rfduplx)
  829. my_ad_caps &= ~(ADVERTISE_100FULL|ADVERTISE_10FULL);
  830. /* Update our Auto-Neg Advertisement Register */
  831. SMC_SET_PHY_MII_ADV(lp, phyaddr, my_ad_caps);
  832. lp->mii.advertising = my_ad_caps;
  833. /*
  834. * Read the register back. Without this, it appears that when
  835. * auto-negotiation is restarted, sometimes it isn't ready and
  836. * the link does not come up.
  837. */
  838. udelay(10);
  839. SMC_GET_PHY_MII_ADV(lp, phyaddr, status);
  840. DBG(SMC_DEBUG_MISC, dev, "phy caps=0x%04x\n", my_phy_caps);
  841. DBG(SMC_DEBUG_MISC, dev, "phy advertised caps=0x%04x\n", my_ad_caps);
  842. /* Restart auto-negotiation process in order to advertise my caps */
  843. SMC_SET_PHY_BMCR(lp, phyaddr, BMCR_ANENABLE | BMCR_ANRESTART);
  844. smc911x_phy_check_media(dev, 1);
  845. smc911x_phy_configure_exit:
  846. spin_unlock_irqrestore(&lp->lock, flags);
  847. }
  848. /*
  849. * smc911x_phy_interrupt
  850. *
  851. * Purpose: Handle interrupts relating to PHY register 18. This is
  852. * called from the "hard" interrupt handler under our private spinlock.
  853. */
  854. static void smc911x_phy_interrupt(struct net_device *dev)
  855. {
  856. struct smc911x_local *lp = netdev_priv(dev);
  857. int phyaddr = lp->mii.phy_id;
  858. int status;
  859. DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
  860. if (lp->phy_type == 0)
  861. return;
  862. smc911x_phy_check_media(dev, 0);
  863. /* read to clear status bits */
  864. SMC_GET_PHY_INT_SRC(lp, phyaddr,status);
  865. DBG(SMC_DEBUG_MISC, dev, "PHY interrupt status 0x%04x\n",
  866. status & 0xffff);
  867. DBG(SMC_DEBUG_MISC, dev, "AFC_CFG 0x%08x\n",
  868. SMC_GET_AFC_CFG(lp));
  869. }
  870. /*--- END PHY CONTROL AND CONFIGURATION-------------------------------------*/
  871. /*
  872. * This is the main routine of the driver, to handle the device when
  873. * it needs some attention.
  874. */
  875. static irqreturn_t smc911x_interrupt(int irq, void *dev_id)
  876. {
  877. struct net_device *dev = dev_id;
  878. struct smc911x_local *lp = netdev_priv(dev);
  879. unsigned int status, mask, timeout;
  880. unsigned int rx_overrun=0, cr, pkts;
  881. unsigned long flags;
  882. DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
  883. spin_lock_irqsave(&lp->lock, flags);
  884. /* Spurious interrupt check */
  885. if ((SMC_GET_IRQ_CFG(lp) & (INT_CFG_IRQ_INT_ | INT_CFG_IRQ_EN_)) !=
  886. (INT_CFG_IRQ_INT_ | INT_CFG_IRQ_EN_)) {
  887. spin_unlock_irqrestore(&lp->lock, flags);
  888. return IRQ_NONE;
  889. }
  890. mask = SMC_GET_INT_EN(lp);
  891. SMC_SET_INT_EN(lp, 0);
  892. /* set a timeout value, so I don't stay here forever */
  893. timeout = 8;
  894. do {
  895. status = SMC_GET_INT(lp);
  896. DBG(SMC_DEBUG_MISC, dev, "INT 0x%08x MASK 0x%08x OUTSIDE MASK 0x%08x\n",
  897. status, mask, status & ~mask);
  898. status &= mask;
  899. if (!status)
  900. break;
  901. /* Handle SW interrupt condition */
  902. if (status & INT_STS_SW_INT_) {
  903. SMC_ACK_INT(lp, INT_STS_SW_INT_);
  904. mask &= ~INT_EN_SW_INT_EN_;
  905. }
  906. /* Handle various error conditions */
  907. if (status & INT_STS_RXE_) {
  908. SMC_ACK_INT(lp, INT_STS_RXE_);
  909. dev->stats.rx_errors++;
  910. }
  911. if (status & INT_STS_RXDFH_INT_) {
  912. SMC_ACK_INT(lp, INT_STS_RXDFH_INT_);
  913. dev->stats.rx_dropped+=SMC_GET_RX_DROP(lp);
  914. }
  915. /* Undocumented interrupt-what is the right thing to do here? */
  916. if (status & INT_STS_RXDF_INT_) {
  917. SMC_ACK_INT(lp, INT_STS_RXDF_INT_);
  918. }
  919. /* Rx Data FIFO exceeds set level */
  920. if (status & INT_STS_RDFL_) {
  921. if (IS_REV_A(lp->revision)) {
  922. rx_overrun=1;
  923. SMC_GET_MAC_CR(lp, cr);
  924. cr &= ~MAC_CR_RXEN_;
  925. SMC_SET_MAC_CR(lp, cr);
  926. DBG(SMC_DEBUG_RX, dev, "RX overrun\n");
  927. dev->stats.rx_errors++;
  928. dev->stats.rx_fifo_errors++;
  929. }
  930. SMC_ACK_INT(lp, INT_STS_RDFL_);
  931. }
  932. if (status & INT_STS_RDFO_) {
  933. if (!IS_REV_A(lp->revision)) {
  934. SMC_GET_MAC_CR(lp, cr);
  935. cr &= ~MAC_CR_RXEN_;
  936. SMC_SET_MAC_CR(lp, cr);
  937. rx_overrun=1;
  938. DBG(SMC_DEBUG_RX, dev, "RX overrun\n");
  939. dev->stats.rx_errors++;
  940. dev->stats.rx_fifo_errors++;
  941. }
  942. SMC_ACK_INT(lp, INT_STS_RDFO_);
  943. }
  944. /* Handle receive condition */
  945. if ((status & INT_STS_RSFL_) || rx_overrun) {
  946. unsigned int fifo;
  947. DBG(SMC_DEBUG_RX, dev, "RX irq\n");
  948. fifo = SMC_GET_RX_FIFO_INF(lp);
  949. pkts = (fifo & RX_FIFO_INF_RXSUSED_) >> 16;
  950. DBG(SMC_DEBUG_RX, dev, "Rx FIFO pkts %d, bytes %d\n",
  951. pkts, fifo & 0xFFFF);
  952. if (pkts != 0) {
  953. #ifdef SMC_USE_DMA
  954. unsigned int fifo;
  955. if (lp->rxdma_active){
  956. DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, dev,
  957. "RX DMA active\n");
  958. /* The DMA is already running so up the IRQ threshold */
  959. fifo = SMC_GET_FIFO_INT(lp) & ~0xFF;
  960. fifo |= pkts & 0xFF;
  961. DBG(SMC_DEBUG_RX, dev,
  962. "Setting RX stat FIFO threshold to %d\n",
  963. fifo & 0xff);
  964. SMC_SET_FIFO_INT(lp, fifo);
  965. } else
  966. #endif
  967. smc911x_rcv(dev);
  968. }
  969. SMC_ACK_INT(lp, INT_STS_RSFL_);
  970. }
  971. /* Handle transmit FIFO available */
  972. if (status & INT_STS_TDFA_) {
  973. DBG(SMC_DEBUG_TX, dev, "TX data FIFO space available irq\n");
  974. SMC_SET_FIFO_TDA(lp, 0xFF);
  975. lp->tx_throttle = 0;
  976. #ifdef SMC_USE_DMA
  977. if (!lp->txdma_active)
  978. #endif
  979. netif_wake_queue(dev);
  980. SMC_ACK_INT(lp, INT_STS_TDFA_);
  981. }
  982. /* Handle transmit done condition */
  983. #if 1
  984. if (status & (INT_STS_TSFL_ | INT_STS_GPT_INT_)) {
  985. DBG(SMC_DEBUG_TX | SMC_DEBUG_MISC, dev,
  986. "Tx stat FIFO limit (%d) /GPT irq\n",
  987. (SMC_GET_FIFO_INT(lp) & 0x00ff0000) >> 16);
  988. smc911x_tx(dev);
  989. SMC_SET_GPT_CFG(lp, GPT_CFG_TIMER_EN_ | 10000);
  990. SMC_ACK_INT(lp, INT_STS_TSFL_);
  991. SMC_ACK_INT(lp, INT_STS_TSFL_ | INT_STS_GPT_INT_);
  992. }
  993. #else
  994. if (status & INT_STS_TSFL_) {
  995. DBG(SMC_DEBUG_TX, dev, "TX status FIFO limit (%d) irq\n", ?);
  996. smc911x_tx(dev);
  997. SMC_ACK_INT(lp, INT_STS_TSFL_);
  998. }
  999. if (status & INT_STS_GPT_INT_) {
  1000. DBG(SMC_DEBUG_RX, dev, "IRQ_CFG 0x%08x FIFO_INT 0x%08x RX_CFG 0x%08x\n",
  1001. SMC_GET_IRQ_CFG(lp),
  1002. SMC_GET_FIFO_INT(lp),
  1003. SMC_GET_RX_CFG(lp));
  1004. DBG(SMC_DEBUG_RX, dev, "Rx Stat FIFO Used 0x%02x Data FIFO Used 0x%04x Stat FIFO 0x%08x\n",
  1005. (SMC_GET_RX_FIFO_INF(lp) & 0x00ff0000) >> 16,
  1006. SMC_GET_RX_FIFO_INF(lp) & 0xffff,
  1007. SMC_GET_RX_STS_FIFO_PEEK(lp));
  1008. SMC_SET_GPT_CFG(lp, GPT_CFG_TIMER_EN_ | 10000);
  1009. SMC_ACK_INT(lp, INT_STS_GPT_INT_);
  1010. }
  1011. #endif
  1012. /* Handle PHY interrupt condition */
  1013. if (status & INT_STS_PHY_INT_) {
  1014. DBG(SMC_DEBUG_MISC, dev, "PHY irq\n");
  1015. smc911x_phy_interrupt(dev);
  1016. SMC_ACK_INT(lp, INT_STS_PHY_INT_);
  1017. }
  1018. } while (--timeout);
  1019. /* restore mask state */
  1020. SMC_SET_INT_EN(lp, mask);
  1021. DBG(SMC_DEBUG_MISC, dev, "Interrupt done (%d loops)\n",
  1022. 8-timeout);
  1023. spin_unlock_irqrestore(&lp->lock, flags);
  1024. return IRQ_HANDLED;
  1025. }
  1026. #ifdef SMC_USE_DMA
  1027. static void
  1028. smc911x_tx_dma_irq(int dma, void *data)
  1029. {
  1030. struct net_device *dev = (struct net_device *)data;
  1031. struct smc911x_local *lp = netdev_priv(dev);
  1032. struct sk_buff *skb = lp->current_tx_skb;
  1033. unsigned long flags;
  1034. DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
  1035. DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev, "TX DMA irq handler\n");
  1036. /* Clear the DMA interrupt sources */
  1037. SMC_DMA_ACK_IRQ(dev, dma);
  1038. BUG_ON(skb == NULL);
  1039. dma_unmap_single(NULL, tx_dmabuf, tx_dmalen, DMA_TO_DEVICE);
  1040. dev->trans_start = jiffies;
  1041. dev_kfree_skb_irq(skb);
  1042. lp->current_tx_skb = NULL;
  1043. if (lp->pending_tx_skb != NULL)
  1044. smc911x_hardware_send_pkt(dev);
  1045. else {
  1046. DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev,
  1047. "No pending Tx packets. DMA disabled\n");
  1048. spin_lock_irqsave(&lp->lock, flags);
  1049. lp->txdma_active = 0;
  1050. if (!lp->tx_throttle) {
  1051. netif_wake_queue(dev);
  1052. }
  1053. spin_unlock_irqrestore(&lp->lock, flags);
  1054. }
  1055. DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev,
  1056. "TX DMA irq completed\n");
  1057. }
  1058. static void
  1059. smc911x_rx_dma_irq(int dma, void *data)
  1060. {
  1061. struct net_device *dev = (struct net_device *)data;
  1062. unsigned long ioaddr = dev->base_addr;
  1063. struct smc911x_local *lp = netdev_priv(dev);
  1064. struct sk_buff *skb = lp->current_rx_skb;
  1065. unsigned long flags;
  1066. unsigned int pkts;
  1067. DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
  1068. DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, dev, "RX DMA irq handler\n");
  1069. /* Clear the DMA interrupt sources */
  1070. SMC_DMA_ACK_IRQ(dev, dma);
  1071. dma_unmap_single(NULL, rx_dmabuf, rx_dmalen, DMA_FROM_DEVICE);
  1072. BUG_ON(skb == NULL);
  1073. lp->current_rx_skb = NULL;
  1074. PRINT_PKT(skb->data, skb->len);
  1075. skb->protocol = eth_type_trans(skb, dev);
  1076. dev->stats.rx_packets++;
  1077. dev->stats.rx_bytes += skb->len;
  1078. netif_rx(skb);
  1079. spin_lock_irqsave(&lp->lock, flags);
  1080. pkts = (SMC_GET_RX_FIFO_INF(lp) & RX_FIFO_INF_RXSUSED_) >> 16;
  1081. if (pkts != 0) {
  1082. smc911x_rcv(dev);
  1083. }else {
  1084. lp->rxdma_active = 0;
  1085. }
  1086. spin_unlock_irqrestore(&lp->lock, flags);
  1087. DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, dev,
  1088. "RX DMA irq completed. DMA RX FIFO PKTS %d\n",
  1089. pkts);
  1090. }
  1091. #endif /* SMC_USE_DMA */
  1092. #ifdef CONFIG_NET_POLL_CONTROLLER
  1093. /*
  1094. * Polling receive - used by netconsole and other diagnostic tools
  1095. * to allow network i/o with interrupts disabled.
  1096. */
  1097. static void smc911x_poll_controller(struct net_device *dev)
  1098. {
  1099. disable_irq(dev->irq);
  1100. smc911x_interrupt(dev->irq, dev);
  1101. enable_irq(dev->irq);
  1102. }
  1103. #endif
  1104. /* Our watchdog timed out. Called by the networking layer */
  1105. static void smc911x_timeout(struct net_device *dev)
  1106. {
  1107. struct smc911x_local *lp = netdev_priv(dev);
  1108. int status, mask;
  1109. unsigned long flags;
  1110. DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
  1111. spin_lock_irqsave(&lp->lock, flags);
  1112. status = SMC_GET_INT(lp);
  1113. mask = SMC_GET_INT_EN(lp);
  1114. spin_unlock_irqrestore(&lp->lock, flags);
  1115. DBG(SMC_DEBUG_MISC, dev, "INT 0x%02x MASK 0x%02x\n",
  1116. status, mask);
  1117. /* Dump the current TX FIFO contents and restart */
  1118. mask = SMC_GET_TX_CFG(lp);
  1119. SMC_SET_TX_CFG(lp, mask | TX_CFG_TXS_DUMP_ | TX_CFG_TXD_DUMP_);
  1120. /*
  1121. * Reconfiguring the PHY doesn't seem like a bad idea here, but
  1122. * smc911x_phy_configure() calls msleep() which calls schedule_timeout()
  1123. * which calls schedule(). Hence we use a work queue.
  1124. */
  1125. if (lp->phy_type != 0)
  1126. schedule_work(&lp->phy_configure);
  1127. /* We can accept TX packets again */
  1128. dev->trans_start = jiffies; /* prevent tx timeout */
  1129. netif_wake_queue(dev);
  1130. }
  1131. /*
  1132. * This routine will, depending on the values passed to it,
  1133. * either make it accept multicast packets, go into
  1134. * promiscuous mode (for TCPDUMP and cousins) or accept
  1135. * a select set of multicast packets
  1136. */
  1137. static void smc911x_set_multicast_list(struct net_device *dev)
  1138. {
  1139. struct smc911x_local *lp = netdev_priv(dev);
  1140. unsigned int multicast_table[2];
  1141. unsigned int mcr, update_multicast = 0;
  1142. unsigned long flags;
  1143. DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
  1144. spin_lock_irqsave(&lp->lock, flags);
  1145. SMC_GET_MAC_CR(lp, mcr);
  1146. spin_unlock_irqrestore(&lp->lock, flags);
  1147. if (dev->flags & IFF_PROMISC) {
  1148. DBG(SMC_DEBUG_MISC, dev, "RCR_PRMS\n");
  1149. mcr |= MAC_CR_PRMS_;
  1150. }
  1151. /*
  1152. * Here, I am setting this to accept all multicast packets.
  1153. * I don't need to zero the multicast table, because the flag is
  1154. * checked before the table is
  1155. */
  1156. else if (dev->flags & IFF_ALLMULTI || netdev_mc_count(dev) > 16) {
  1157. DBG(SMC_DEBUG_MISC, dev, "RCR_ALMUL\n");
  1158. mcr |= MAC_CR_MCPAS_;
  1159. }
  1160. /*
  1161. * This sets the internal hardware table to filter out unwanted
  1162. * multicast packets before they take up memory.
  1163. *
  1164. * The SMC chip uses a hash table where the high 6 bits of the CRC of
  1165. * address are the offset into the table. If that bit is 1, then the
  1166. * multicast packet is accepted. Otherwise, it's dropped silently.
  1167. *
  1168. * To use the 6 bits as an offset into the table, the high 1 bit is
  1169. * the number of the 32 bit register, while the low 5 bits are the bit
  1170. * within that register.
  1171. */
  1172. else if (!netdev_mc_empty(dev)) {
  1173. struct netdev_hw_addr *ha;
  1174. /* Set the Hash perfec mode */
  1175. mcr |= MAC_CR_HPFILT_;
  1176. /* start with a table of all zeros: reject all */
  1177. memset(multicast_table, 0, sizeof(multicast_table));
  1178. netdev_for_each_mc_addr(ha, dev) {
  1179. u32 position;
  1180. /* upper 6 bits are used as hash index */
  1181. position = ether_crc(ETH_ALEN, ha->addr)>>26;
  1182. multicast_table[position>>5] |= 1 << (position&0x1f);
  1183. }
  1184. /* be sure I get rid of flags I might have set */
  1185. mcr &= ~(MAC_CR_PRMS_ | MAC_CR_MCPAS_);
  1186. /* now, the table can be loaded into the chipset */
  1187. update_multicast = 1;
  1188. } else {
  1189. DBG(SMC_DEBUG_MISC, dev, "~(MAC_CR_PRMS_|MAC_CR_MCPAS_)\n");
  1190. mcr &= ~(MAC_CR_PRMS_ | MAC_CR_MCPAS_);
  1191. /*
  1192. * since I'm disabling all multicast entirely, I need to
  1193. * clear the multicast list
  1194. */
  1195. memset(multicast_table, 0, sizeof(multicast_table));
  1196. update_multicast = 1;
  1197. }
  1198. spin_lock_irqsave(&lp->lock, flags);
  1199. SMC_SET_MAC_CR(lp, mcr);
  1200. if (update_multicast) {
  1201. DBG(SMC_DEBUG_MISC, dev,
  1202. "update mcast hash table 0x%08x 0x%08x\n",
  1203. multicast_table[0], multicast_table[1]);
  1204. SMC_SET_HASHL(lp, multicast_table[0]);
  1205. SMC_SET_HASHH(lp, multicast_table[1]);
  1206. }
  1207. spin_unlock_irqrestore(&lp->lock, flags);
  1208. }
  1209. /*
  1210. * Open and Initialize the board
  1211. *
  1212. * Set up everything, reset the card, etc..
  1213. */
  1214. static int
  1215. smc911x_open(struct net_device *dev)
  1216. {
  1217. struct smc911x_local *lp = netdev_priv(dev);
  1218. DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
  1219. /* reset the hardware */
  1220. smc911x_reset(dev);
  1221. /* Configure the PHY, initialize the link state */
  1222. smc911x_phy_configure(&lp->phy_configure);
  1223. /* Turn on Tx + Rx */
  1224. smc911x_enable(dev);
  1225. netif_start_queue(dev);
  1226. return 0;
  1227. }
  1228. /*
  1229. * smc911x_close
  1230. *
  1231. * this makes the board clean up everything that it can
  1232. * and not talk to the outside world. Caused by
  1233. * an 'ifconfig ethX down'
  1234. */
  1235. static int smc911x_close(struct net_device *dev)
  1236. {
  1237. struct smc911x_local *lp = netdev_priv(dev);
  1238. DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
  1239. netif_stop_queue(dev);
  1240. netif_carrier_off(dev);
  1241. /* clear everything */
  1242. smc911x_shutdown(dev);
  1243. if (lp->phy_type != 0) {
  1244. /* We need to ensure that no calls to
  1245. * smc911x_phy_configure are pending.
  1246. */
  1247. cancel_work_sync(&lp->phy_configure);
  1248. smc911x_phy_powerdown(dev, lp->mii.phy_id);
  1249. }
  1250. if (lp->pending_tx_skb) {
  1251. dev_kfree_skb(lp->pending_tx_skb);
  1252. lp->pending_tx_skb = NULL;
  1253. }
  1254. return 0;
  1255. }
  1256. /*
  1257. * Ethtool support
  1258. */
  1259. static int
  1260. smc911x_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
  1261. {
  1262. struct smc911x_local *lp = netdev_priv(dev);
  1263. int ret, status;
  1264. unsigned long flags;
  1265. DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
  1266. cmd->maxtxpkt = 1;
  1267. cmd->maxrxpkt = 1;
  1268. if (lp->phy_type != 0) {
  1269. spin_lock_irqsave(&lp->lock, flags);
  1270. ret = mii_ethtool_gset(&lp->mii, cmd);
  1271. spin_unlock_irqrestore(&lp->lock, flags);
  1272. } else {
  1273. cmd->supported = SUPPORTED_10baseT_Half |
  1274. SUPPORTED_10baseT_Full |
  1275. SUPPORTED_TP | SUPPORTED_AUI;
  1276. if (lp->ctl_rspeed == 10)
  1277. ethtool_cmd_speed_set(cmd, SPEED_10);
  1278. else if (lp->ctl_rspeed == 100)
  1279. ethtool_cmd_speed_set(cmd, SPEED_100);
  1280. cmd->autoneg = AUTONEG_DISABLE;
  1281. if (lp->mii.phy_id==1)
  1282. cmd->transceiver = XCVR_INTERNAL;
  1283. else
  1284. cmd->transceiver = XCVR_EXTERNAL;
  1285. cmd->port = 0;
  1286. SMC_GET_PHY_SPECIAL(lp, lp->mii.phy_id, status);
  1287. cmd->duplex =
  1288. (status & (PHY_SPECIAL_SPD_10FULL_ | PHY_SPECIAL_SPD_100FULL_)) ?
  1289. DUPLEX_FULL : DUPLEX_HALF;
  1290. ret = 0;
  1291. }
  1292. return ret;
  1293. }
  1294. static int
  1295. smc911x_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
  1296. {
  1297. struct smc911x_local *lp = netdev_priv(dev);
  1298. int ret;
  1299. unsigned long flags;
  1300. if (lp->phy_type != 0) {
  1301. spin_lock_irqsave(&lp->lock, flags);
  1302. ret = mii_ethtool_sset(&lp->mii, cmd);
  1303. spin_unlock_irqrestore(&lp->lock, flags);
  1304. } else {
  1305. if (cmd->autoneg != AUTONEG_DISABLE ||
  1306. cmd->speed != SPEED_10 ||
  1307. (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL) ||
  1308. (cmd->port != PORT_TP && cmd->port != PORT_AUI))
  1309. return -EINVAL;
  1310. lp->ctl_rfduplx = cmd->duplex == DUPLEX_FULL;
  1311. ret = 0;
  1312. }
  1313. return ret;
  1314. }
  1315. static void
  1316. smc911x_ethtool_getdrvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
  1317. {
  1318. strlcpy(info->driver, CARDNAME, sizeof(info->driver));
  1319. strlcpy(info->version, version, sizeof(info->version));
  1320. strlcpy(info->bus_info, dev_name(dev->dev.parent),
  1321. sizeof(info->bus_info));
  1322. }
  1323. static int smc911x_ethtool_nwayreset(struct net_device *dev)
  1324. {
  1325. struct smc911x_local *lp = netdev_priv(dev);
  1326. int ret = -EINVAL;
  1327. unsigned long flags;
  1328. if (lp->phy_type != 0) {
  1329. spin_lock_irqsave(&lp->lock, flags);
  1330. ret = mii_nway_restart(&lp->mii);
  1331. spin_unlock_irqrestore(&lp->lock, flags);
  1332. }
  1333. return ret;
  1334. }
  1335. static u32 smc911x_ethtool_getmsglevel(struct net_device *dev)
  1336. {
  1337. struct smc911x_local *lp = netdev_priv(dev);
  1338. return lp->msg_enable;
  1339. }
  1340. static void smc911x_ethtool_setmsglevel(struct net_device *dev, u32 level)
  1341. {
  1342. struct smc911x_local *lp = netdev_priv(dev);
  1343. lp->msg_enable = level;
  1344. }
  1345. static int smc911x_ethtool_getregslen(struct net_device *dev)
  1346. {
  1347. /* System regs + MAC regs + PHY regs */
  1348. return (((E2P_CMD - ID_REV)/4 + 1) +
  1349. (WUCSR - MAC_CR)+1 + 32) * sizeof(u32);
  1350. }
  1351. static void smc911x_ethtool_getregs(struct net_device *dev,
  1352. struct ethtool_regs* regs, void *buf)
  1353. {
  1354. struct smc911x_local *lp = netdev_priv(dev);
  1355. unsigned long flags;
  1356. u32 reg,i,j=0;
  1357. u32 *data = (u32*)buf;
  1358. regs->version = lp->version;
  1359. for(i=ID_REV;i<=E2P_CMD;i+=4) {
  1360. data[j++] = SMC_inl(lp, i);
  1361. }
  1362. for(i=MAC_CR;i<=WUCSR;i++) {
  1363. spin_lock_irqsave(&lp->lock, flags);
  1364. SMC_GET_MAC_CSR(lp, i, reg);
  1365. spin_unlock_irqrestore(&lp->lock, flags);
  1366. data[j++] = reg;
  1367. }
  1368. for(i=0;i<=31;i++) {
  1369. spin_lock_irqsave(&lp->lock, flags);
  1370. SMC_GET_MII(lp, i, lp->mii.phy_id, reg);
  1371. spin_unlock_irqrestore(&lp->lock, flags);
  1372. data[j++] = reg & 0xFFFF;
  1373. }
  1374. }
  1375. static int smc911x_ethtool_wait_eeprom_ready(struct net_device *dev)
  1376. {
  1377. struct smc911x_local *lp = netdev_priv(dev);
  1378. unsigned int timeout;
  1379. int e2p_cmd;
  1380. e2p_cmd = SMC_GET_E2P_CMD(lp);
  1381. for(timeout=10;(e2p_cmd & E2P_CMD_EPC_BUSY_) && timeout; timeout--) {
  1382. if (e2p_cmd & E2P_CMD_EPC_TIMEOUT_) {
  1383. PRINTK(dev, "%s timeout waiting for EEPROM to respond\n",
  1384. __func__);
  1385. return -EFAULT;
  1386. }
  1387. mdelay(1);
  1388. e2p_cmd = SMC_GET_E2P_CMD(lp);
  1389. }
  1390. if (timeout == 0) {
  1391. PRINTK(dev, "%s timeout waiting for EEPROM CMD not busy\n",
  1392. __func__);
  1393. return -ETIMEDOUT;
  1394. }
  1395. return 0;
  1396. }
  1397. static inline int smc911x_ethtool_write_eeprom_cmd(struct net_device *dev,
  1398. int cmd, int addr)
  1399. {
  1400. struct smc911x_local *lp = netdev_priv(dev);
  1401. int ret;
  1402. if ((ret = smc911x_ethtool_wait_eeprom_ready(dev))!=0)
  1403. return ret;
  1404. SMC_SET_E2P_CMD(lp, E2P_CMD_EPC_BUSY_ |
  1405. ((cmd) & (0x7<<28)) |
  1406. ((addr) & 0xFF));
  1407. return 0;
  1408. }
  1409. static inline int smc911x_ethtool_read_eeprom_byte(struct net_device *dev,
  1410. u8 *data)
  1411. {
  1412. struct smc911x_local *lp = netdev_priv(dev);
  1413. int ret;
  1414. if ((ret = smc911x_ethtool_wait_eeprom_ready(dev))!=0)
  1415. return ret;
  1416. *data = SMC_GET_E2P_DATA(lp);
  1417. return 0;
  1418. }
  1419. static inline int smc911x_ethtool_write_eeprom_byte(struct net_device *dev,
  1420. u8 data)
  1421. {
  1422. struct smc911x_local *lp = netdev_priv(dev);
  1423. int ret;
  1424. if ((ret = smc911x_ethtool_wait_eeprom_ready(dev))!=0)
  1425. return ret;
  1426. SMC_SET_E2P_DATA(lp, data);
  1427. return 0;
  1428. }
  1429. static int smc911x_ethtool_geteeprom(struct net_device *dev,
  1430. struct ethtool_eeprom *eeprom, u8 *data)
  1431. {
  1432. u8 eebuf[SMC911X_EEPROM_LEN];
  1433. int i, ret;
  1434. for(i=0;i<SMC911X_EEPROM_LEN;i++) {
  1435. if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_READ_, i ))!=0)
  1436. return ret;
  1437. if ((ret=smc911x_ethtool_read_eeprom_byte(dev, &eebuf[i]))!=0)
  1438. return ret;
  1439. }
  1440. memcpy(data, eebuf+eeprom->offset, eeprom->len);
  1441. return 0;
  1442. }
  1443. static int smc911x_ethtool_seteeprom(struct net_device *dev,
  1444. struct ethtool_eeprom *eeprom, u8 *data)
  1445. {
  1446. int i, ret;
  1447. /* Enable erase */
  1448. if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_EWEN_, 0 ))!=0)
  1449. return ret;
  1450. for(i=eeprom->offset;i<(eeprom->offset+eeprom->len);i++) {
  1451. /* erase byte */
  1452. if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_ERASE_, i ))!=0)
  1453. return ret;
  1454. /* write byte */
  1455. if ((ret=smc911x_ethtool_write_eeprom_byte(dev, *data))!=0)
  1456. return ret;
  1457. if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_WRITE_, i ))!=0)
  1458. return ret;
  1459. }
  1460. return 0;
  1461. }
  1462. static int smc911x_ethtool_geteeprom_len(struct net_device *dev)
  1463. {
  1464. return SMC911X_EEPROM_LEN;
  1465. }
  1466. static const struct ethtool_ops smc911x_ethtool_ops = {
  1467. .get_settings = smc911x_ethtool_getsettings,
  1468. .set_settings = smc911x_ethtool_setsettings,
  1469. .get_drvinfo = smc911x_ethtool_getdrvinfo,
  1470. .get_msglevel = smc911x_ethtool_getmsglevel,
  1471. .set_msglevel = smc911x_ethtool_setmsglevel,
  1472. .nway_reset = smc911x_ethtool_nwayreset,
  1473. .get_link = ethtool_op_get_link,
  1474. .get_regs_len = smc911x_ethtool_getregslen,
  1475. .get_regs = smc911x_ethtool_getregs,
  1476. .get_eeprom_len = smc911x_ethtool_geteeprom_len,
  1477. .get_eeprom = smc911x_ethtool_geteeprom,
  1478. .set_eeprom = smc911x_ethtool_seteeprom,
  1479. };
  1480. /*
  1481. * smc911x_findirq
  1482. *
  1483. * This routine has a simple purpose -- make the SMC chip generate an
  1484. * interrupt, so an auto-detect routine can detect it, and find the IRQ,
  1485. */
  1486. static int smc911x_findirq(struct net_device *dev)
  1487. {
  1488. struct smc911x_local *lp = netdev_priv(dev);
  1489. int timeout = 20;
  1490. unsigned long cookie;
  1491. DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
  1492. cookie = probe_irq_on();
  1493. /*
  1494. * Force a SW interrupt
  1495. */
  1496. SMC_SET_INT_EN(lp, INT_EN_SW_INT_EN_);
  1497. /*
  1498. * Wait until positive that the interrupt has been generated
  1499. */
  1500. do {
  1501. int int_status;
  1502. udelay(10);
  1503. int_status = SMC_GET_INT_EN(lp);
  1504. if (int_status & INT_EN_SW_INT_EN_)
  1505. break; /* got the interrupt */
  1506. } while (--timeout);
  1507. /*
  1508. * there is really nothing that I can do here if timeout fails,
  1509. * as autoirq_report will return a 0 anyway, which is what I
  1510. * want in this case. Plus, the clean up is needed in both
  1511. * cases.
  1512. */
  1513. /* and disable all interrupts again */
  1514. SMC_SET_INT_EN(lp, 0);
  1515. /* and return what I found */
  1516. return probe_irq_off(cookie);
  1517. }
  1518. static const struct net_device_ops smc911x_netdev_ops = {
  1519. .ndo_open = smc911x_open,
  1520. .ndo_stop = smc911x_close,
  1521. .ndo_start_xmit = smc911x_hard_start_xmit,
  1522. .ndo_tx_timeout = smc911x_timeout,
  1523. .ndo_set_rx_mode = smc911x_set_multicast_list,
  1524. .ndo_change_mtu = eth_change_mtu,
  1525. .ndo_validate_addr = eth_validate_addr,
  1526. .ndo_set_mac_address = eth_mac_addr,
  1527. #ifdef CONFIG_NET_POLL_CONTROLLER
  1528. .ndo_poll_controller = smc911x_poll_controller,
  1529. #endif
  1530. };
  1531. /*
  1532. * Function: smc911x_probe(unsigned long ioaddr)
  1533. *
  1534. * Purpose:
  1535. * Tests to see if a given ioaddr points to an SMC911x chip.
  1536. * Returns a 0 on success
  1537. *
  1538. * Algorithm:
  1539. * (1) see if the endian word is OK
  1540. * (1) see if I recognize the chip ID in the appropriate register
  1541. *
  1542. * Here I do typical initialization tasks.
  1543. *
  1544. * o Initialize the structure if needed
  1545. * o print out my vanity message if not done so already
  1546. * o print out what type of hardware is detected
  1547. * o print out the ethernet address
  1548. * o find the IRQ
  1549. * o set up my private data
  1550. * o configure the dev structure with my subroutines
  1551. * o actually GRAB the irq.
  1552. * o GRAB the region
  1553. */
  1554. static int smc911x_probe(struct net_device *dev)
  1555. {
  1556. struct smc911x_local *lp = netdev_priv(dev);
  1557. int i, retval;
  1558. unsigned int val, chip_id, revision;
  1559. const char *version_string;
  1560. unsigned long irq_flags;
  1561. DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
  1562. /* First, see if the endian word is recognized */
  1563. val = SMC_GET_BYTE_TEST(lp);
  1564. DBG(SMC_DEBUG_MISC, dev, "%s: endian probe returned 0x%04x\n",
  1565. CARDNAME, val);
  1566. if (val != 0x87654321) {
  1567. netdev_err(dev, "Invalid chip endian 0x%08x\n", val);
  1568. retval = -ENODEV;
  1569. goto err_out;
  1570. }
  1571. /*
  1572. * check if the revision register is something that I
  1573. * recognize. These might need to be added to later,
  1574. * as future revisions could be added.
  1575. */
  1576. chip_id = SMC_GET_PN(lp);
  1577. DBG(SMC_DEBUG_MISC, dev, "%s: id probe returned 0x%04x\n",
  1578. CARDNAME, chip_id);
  1579. for(i=0;chip_ids[i].id != 0; i++) {
  1580. if (chip_ids[i].id == chip_id) break;
  1581. }
  1582. if (!chip_ids[i].id) {
  1583. netdev_err(dev, "Unknown chip ID %04x\n", chip_id);
  1584. retval = -ENODEV;
  1585. goto err_out;
  1586. }
  1587. version_string = chip_ids[i].name;
  1588. revision = SMC_GET_REV(lp);
  1589. DBG(SMC_DEBUG_MISC, dev, "%s: revision = 0x%04x\n", CARDNAME, revision);
  1590. /* At this point I'll assume that the chip is an SMC911x. */
  1591. DBG(SMC_DEBUG_MISC, dev, "%s: Found a %s\n",
  1592. CARDNAME, chip_ids[i].name);
  1593. /* Validate the TX FIFO size requested */
  1594. if ((tx_fifo_kb < 2) || (tx_fifo_kb > 14)) {
  1595. netdev_err(dev, "Invalid TX FIFO size requested %d\n",
  1596. tx_fifo_kb);
  1597. retval = -EINVAL;
  1598. goto err_out;
  1599. }
  1600. /* fill in some of the fields */
  1601. lp->version = chip_ids[i].id;
  1602. lp->revision = revision;
  1603. lp->tx_fifo_kb = tx_fifo_kb;
  1604. /* Reverse calculate the RX FIFO size from the TX */
  1605. lp->tx_fifo_size=(lp->tx_fifo_kb<<10) - 512;
  1606. lp->rx_fifo_size= ((0x4000 - 512 - lp->tx_fifo_size) / 16) * 15;
  1607. /* Set the automatic flow control values */
  1608. switch(lp->tx_fifo_kb) {
  1609. /*
  1610. * AFC_HI is about ((Rx Data Fifo Size)*2/3)/64
  1611. * AFC_LO is AFC_HI/2
  1612. * BACK_DUR is about 5uS*(AFC_LO) rounded down
  1613. */
  1614. case 2:/* 13440 Rx Data Fifo Size */
  1615. lp->afc_cfg=0x008C46AF;break;
  1616. case 3:/* 12480 Rx Data Fifo Size */
  1617. lp->afc_cfg=0x0082419F;break;
  1618. case 4:/* 11520 Rx Data Fifo Size */
  1619. lp->afc_cfg=0x00783C9F;break;
  1620. case 5:/* 10560 Rx Data Fifo Size */
  1621. lp->afc_cfg=0x006E374F;break;
  1622. case 6:/* 9600 Rx Data Fifo Size */
  1623. lp->afc_cfg=0x0064328F;break;
  1624. case 7:/* 8640 Rx Data Fifo Size */
  1625. lp->afc_cfg=0x005A2D7F;break;
  1626. case 8:/* 7680 Rx Data Fifo Size */
  1627. lp->afc_cfg=0x0050287F;break;
  1628. case 9:/* 6720 Rx Data Fifo Size */
  1629. lp->afc_cfg=0x0046236F;break;
  1630. case 10:/* 5760 Rx Data Fifo Size */
  1631. lp->afc_cfg=0x003C1E6F;break;
  1632. case 11:/* 4800 Rx Data Fifo Size */
  1633. lp->afc_cfg=0x0032195F;break;
  1634. /*
  1635. * AFC_HI is ~1520 bytes less than RX Data Fifo Size
  1636. * AFC_LO is AFC_HI/2
  1637. * BACK_DUR is about 5uS*(AFC_LO) rounded down
  1638. */
  1639. case 12:/* 3840 Rx Data Fifo Size */
  1640. lp->afc_cfg=0x0024124F;break;
  1641. case 13:/* 2880 Rx Data Fifo Size */
  1642. lp->afc_cfg=0x0015073F;break;
  1643. case 14:/* 1920 Rx Data Fifo Size */
  1644. lp->afc_cfg=0x0006032F;break;
  1645. default:
  1646. PRINTK(dev, "ERROR -- no AFC_CFG setting found");
  1647. break;
  1648. }
  1649. DBG(SMC_DEBUG_MISC | SMC_DEBUG_TX | SMC_DEBUG_RX, dev,
  1650. "%s: tx_fifo %d rx_fifo %d afc_cfg 0x%08x\n", CARDNAME,
  1651. lp->tx_fifo_size, lp->rx_fifo_size, lp->afc_cfg);
  1652. spin_lock_init(&lp->lock);
  1653. /* Get the MAC address */
  1654. SMC_GET_MAC_ADDR(lp, dev->dev_addr);
  1655. /* now, reset the chip, and put it into a known state */
  1656. smc911x_reset(dev);
  1657. /*
  1658. * If dev->irq is 0, then the device has to be banged on to see
  1659. * what the IRQ is.
  1660. *
  1661. * Specifying an IRQ is done with the assumption that the user knows
  1662. * what (s)he is doing. No checking is done!!!!
  1663. */
  1664. if (dev->irq < 1) {
  1665. int trials;
  1666. trials = 3;
  1667. while (trials--) {
  1668. dev->irq = smc911x_findirq(dev);
  1669. if (dev->irq)
  1670. break;
  1671. /* kick the card and try again */
  1672. smc911x_reset(dev);
  1673. }
  1674. }
  1675. if (dev->irq == 0) {
  1676. netdev_warn(dev, "Couldn't autodetect your IRQ. Use irq=xx.\n");
  1677. retval = -ENODEV;
  1678. goto err_out;
  1679. }
  1680. dev->irq = irq_canonicalize(dev->irq);
  1681. /* Fill in the fields of the device structure with ethernet values. */
  1682. ether_setup(dev);
  1683. dev->netdev_ops = &smc911x_netdev_ops;
  1684. dev->watchdog_timeo = msecs_to_jiffies(watchdog);
  1685. dev->ethtool_ops = &smc911x_ethtool_ops;
  1686. INIT_WORK(&lp->phy_configure, smc911x_phy_configure);
  1687. lp->mii.phy_id_mask = 0x1f;
  1688. lp->mii.reg_num_mask = 0x1f;
  1689. lp->mii.force_media = 0;
  1690. lp->mii.full_duplex = 0;
  1691. lp->mii.dev = dev;
  1692. lp->mii.mdio_read = smc911x_phy_read;
  1693. lp->mii.mdio_write = smc911x_phy_write;
  1694. /*
  1695. * Locate the phy, if any.
  1696. */
  1697. smc911x_phy_detect(dev);
  1698. /* Set default parameters */
  1699. lp->msg_enable = NETIF_MSG_LINK;
  1700. lp->ctl_rfduplx = 1;
  1701. lp->ctl_rspeed = 100;
  1702. #ifdef SMC_DYNAMIC_BUS_CONFIG
  1703. irq_flags = lp->cfg.irq_flags;
  1704. #else
  1705. irq_flags = IRQF_SHARED | SMC_IRQ_SENSE;
  1706. #endif
  1707. /* Grab the IRQ */
  1708. retval = request_irq(dev->irq, smc911x_interrupt,
  1709. irq_flags, dev->name, dev);
  1710. if (retval)
  1711. goto err_out;
  1712. #ifdef SMC_USE_DMA
  1713. lp->rxdma = SMC_DMA_REQUEST(dev, smc911x_rx_dma_irq);
  1714. lp->txdma = SMC_DMA_REQUEST(dev, smc911x_tx_dma_irq);
  1715. lp->rxdma_active = 0;
  1716. lp->txdma_active = 0;
  1717. dev->dma = lp->rxdma;
  1718. #endif
  1719. retval = register_netdev(dev);
  1720. if (retval == 0) {
  1721. /* now, print out the card info, in a short format.. */
  1722. netdev_info(dev, "%s (rev %d) at %#lx IRQ %d",
  1723. version_string, lp->revision,
  1724. dev->base_addr, dev->irq);
  1725. #ifdef SMC_USE_DMA
  1726. if (lp->rxdma != -1)
  1727. pr_cont(" RXDMA %d", lp->rxdma);
  1728. if (lp->txdma != -1)
  1729. pr_cont(" TXDMA %d", lp->txdma);
  1730. #endif
  1731. pr_cont("\n");
  1732. if (!is_valid_ether_addr(dev->dev_addr)) {
  1733. netdev_warn(dev, "Invalid ethernet MAC address. Please set using ifconfig\n");
  1734. } else {
  1735. /* Print the Ethernet address */
  1736. netdev_info(dev, "Ethernet addr: %pM\n",
  1737. dev->dev_addr);
  1738. }
  1739. if (lp->phy_type == 0) {
  1740. PRINTK(dev, "No PHY found\n");
  1741. } else if ((lp->phy_type & ~0xff) == LAN911X_INTERNAL_PHY_ID) {
  1742. PRINTK(dev, "LAN911x Internal PHY\n");
  1743. } else {
  1744. PRINTK(dev, "External PHY 0x%08x\n", lp->phy_type);
  1745. }
  1746. }
  1747. err_out:
  1748. #ifdef SMC_USE_DMA
  1749. if (retval) {
  1750. if (lp->rxdma != -1) {
  1751. SMC_DMA_FREE(dev, lp->rxdma);
  1752. }
  1753. if (lp->txdma != -1) {
  1754. SMC_DMA_FREE(dev, lp->txdma);
  1755. }
  1756. }
  1757. #endif
  1758. return retval;
  1759. }
  1760. /*
  1761. * smc911x_drv_probe(void)
  1762. *
  1763. * Output:
  1764. * 0 --> there is a device
  1765. * anything else, error
  1766. */
  1767. static int smc911x_drv_probe(struct platform_device *pdev)
  1768. {
  1769. struct net_device *ndev;
  1770. struct resource *res;
  1771. struct smc911x_local *lp;
  1772. void __iomem *addr;
  1773. int ret;
  1774. /* ndev is not valid yet, so avoid passing it in. */
  1775. DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__);
  1776. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1777. if (!res) {
  1778. ret = -ENODEV;
  1779. goto out;
  1780. }
  1781. /*
  1782. * Request the regions.
  1783. */
  1784. if (!request_mem_region(res->start, SMC911X_IO_EXTENT, CARDNAME)) {
  1785. ret = -EBUSY;
  1786. goto out;
  1787. }
  1788. ndev = alloc_etherdev(sizeof(struct smc911x_local));
  1789. if (!ndev) {
  1790. ret = -ENOMEM;
  1791. goto release_1;
  1792. }
  1793. SET_NETDEV_DEV(ndev, &pdev->dev);
  1794. ndev->dma = (unsigned char)-1;
  1795. ndev->irq = platform_get_irq(pdev, 0);
  1796. lp = netdev_priv(ndev);
  1797. lp->netdev = ndev;
  1798. #ifdef SMC_DYNAMIC_BUS_CONFIG
  1799. {
  1800. struct smc911x_platdata *pd = dev_get_platdata(&pdev->dev);
  1801. if (!pd) {
  1802. ret = -EINVAL;
  1803. goto release_both;
  1804. }
  1805. memcpy(&lp->cfg, pd, sizeof(lp->cfg));
  1806. }
  1807. #endif
  1808. addr = ioremap(res->start, SMC911X_IO_EXTENT);
  1809. if (!addr) {
  1810. ret = -ENOMEM;
  1811. goto release_both;
  1812. }
  1813. platform_set_drvdata(pdev, ndev);
  1814. lp->base = addr;
  1815. ndev->base_addr = res->start;
  1816. ret = smc911x_probe(ndev);
  1817. if (ret != 0) {
  1818. iounmap(addr);
  1819. release_both:
  1820. free_netdev(ndev);
  1821. release_1:
  1822. release_mem_region(res->start, SMC911X_IO_EXTENT);
  1823. out:
  1824. pr_info("%s: not found (%d).\n", CARDNAME, ret);
  1825. }
  1826. #ifdef SMC_USE_DMA
  1827. else {
  1828. lp->physaddr = res->start;
  1829. lp->dev = &pdev->dev;
  1830. }
  1831. #endif
  1832. return ret;
  1833. }
  1834. static int smc911x_drv_remove(struct platform_device *pdev)
  1835. {
  1836. struct net_device *ndev = platform_get_drvdata(pdev);
  1837. struct smc911x_local *lp = netdev_priv(ndev);
  1838. struct resource *res;
  1839. DBG(SMC_DEBUG_FUNC, ndev, "--> %s\n", __func__);
  1840. unregister_netdev(ndev);
  1841. free_irq(ndev->irq, ndev);
  1842. #ifdef SMC_USE_DMA
  1843. {
  1844. if (lp->rxdma != -1) {
  1845. SMC_DMA_FREE(dev, lp->rxdma);
  1846. }
  1847. if (lp->txdma != -1) {
  1848. SMC_DMA_FREE(dev, lp->txdma);
  1849. }
  1850. }
  1851. #endif
  1852. iounmap(lp->base);
  1853. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1854. release_mem_region(res->start, SMC911X_IO_EXTENT);
  1855. free_netdev(ndev);
  1856. return 0;
  1857. }
  1858. static int smc911x_drv_suspend(struct platform_device *dev, pm_message_t state)
  1859. {
  1860. struct net_device *ndev = platform_get_drvdata(dev);
  1861. struct smc911x_local *lp = netdev_priv(ndev);
  1862. DBG(SMC_DEBUG_FUNC, ndev, "--> %s\n", __func__);
  1863. if (ndev) {
  1864. if (netif_running(ndev)) {
  1865. netif_device_detach(ndev);
  1866. smc911x_shutdown(ndev);
  1867. #if POWER_DOWN
  1868. /* Set D2 - Energy detect only setting */
  1869. SMC_SET_PMT_CTRL(lp, 2<<12);
  1870. #endif
  1871. }
  1872. }
  1873. return 0;
  1874. }
  1875. static int smc911x_drv_resume(struct platform_device *dev)
  1876. {
  1877. struct net_device *ndev = platform_get_drvdata(dev);
  1878. DBG(SMC_DEBUG_FUNC, ndev, "--> %s\n", __func__);
  1879. if (ndev) {
  1880. struct smc911x_local *lp = netdev_priv(ndev);
  1881. if (netif_running(ndev)) {
  1882. smc911x_reset(ndev);
  1883. if (lp->phy_type != 0)
  1884. smc911x_phy_configure(&lp->phy_configure);
  1885. smc911x_enable(ndev);
  1886. netif_device_attach(ndev);
  1887. }
  1888. }
  1889. return 0;
  1890. }
  1891. static struct platform_driver smc911x_driver = {
  1892. .probe = smc911x_drv_probe,
  1893. .remove = smc911x_drv_remove,
  1894. .suspend = smc911x_drv_suspend,
  1895. .resume = smc911x_drv_resume,
  1896. .driver = {
  1897. .name = CARDNAME,
  1898. .owner = THIS_MODULE,
  1899. },
  1900. };
  1901. module_platform_driver(smc911x_driver);