natsemi.c 93 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373
  1. /* natsemi.c: A Linux PCI Ethernet driver for the NatSemi DP8381x series. */
  2. /*
  3. Written/copyright 1999-2001 by Donald Becker.
  4. Portions copyright (c) 2001,2002 Sun Microsystems (thockin@sun.com)
  5. Portions copyright 2001,2002 Manfred Spraul (manfred@colorfullife.com)
  6. Portions copyright 2004 Harald Welte <laforge@gnumonks.org>
  7. This software may be used and distributed according to the terms of
  8. the GNU General Public License (GPL), incorporated herein by reference.
  9. Drivers based on or derived from this code fall under the GPL and must
  10. retain the authorship, copyright and license notice. This file is not
  11. a complete program and may only be used when the entire operating
  12. system is licensed under the GPL. License for under other terms may be
  13. available. Contact the original author for details.
  14. The original author may be reached as becker@scyld.com, or at
  15. Scyld Computing Corporation
  16. 410 Severn Ave., Suite 210
  17. Annapolis MD 21403
  18. Support information and updates available at
  19. http://www.scyld.com/network/netsemi.html
  20. Linux kernel modifications:
  21. Version 1.0.1:
  22. - Spinlock fixes
  23. - Bug fixes and better intr performance (Tjeerd)
  24. Version 1.0.2:
  25. - Now reads correct MAC address from eeprom
  26. Version 1.0.3:
  27. - Eliminate redundant priv->tx_full flag
  28. - Call netif_start_queue from dev->tx_timeout
  29. - wmb() in start_tx() to flush data
  30. - Update Tx locking
  31. - Clean up PCI enable (davej)
  32. Version 1.0.4:
  33. - Merge Donald Becker's natsemi.c version 1.07
  34. Version 1.0.5:
  35. - { fill me in }
  36. Version 1.0.6:
  37. * ethtool support (jgarzik)
  38. * Proper initialization of the card (which sometimes
  39. fails to occur and leaves the card in a non-functional
  40. state). (uzi)
  41. * Some documented register settings to optimize some
  42. of the 100Mbit autodetection circuitry in rev C cards. (uzi)
  43. * Polling of the PHY intr for stuff like link state
  44. change and auto- negotiation to finally work properly. (uzi)
  45. * One-liner removal of a duplicate declaration of
  46. netdev_error(). (uzi)
  47. Version 1.0.7: (Manfred Spraul)
  48. * pci dma
  49. * SMP locking update
  50. * full reset added into tx_timeout
  51. * correct multicast hash generation (both big and little endian)
  52. [copied from a natsemi driver version
  53. from Myrio Corporation, Greg Smith]
  54. * suspend/resume
  55. version 1.0.8 (Tim Hockin <thockin@sun.com>)
  56. * ETHTOOL_* support
  57. * Wake on lan support (Erik Gilling)
  58. * MXDMA fixes for serverworks
  59. * EEPROM reload
  60. version 1.0.9 (Manfred Spraul)
  61. * Main change: fix lack of synchronize
  62. netif_close/netif_suspend against a last interrupt
  63. or packet.
  64. * do not enable superflous interrupts (e.g. the
  65. drivers relies on TxDone - TxIntr not needed)
  66. * wait that the hardware has really stopped in close
  67. and suspend.
  68. * workaround for the (at least) gcc-2.95.1 compiler
  69. problem. Also simplifies the code a bit.
  70. * disable_irq() in tx_timeout - needed to protect
  71. against rx interrupts.
  72. * stop the nic before switching into silent rx mode
  73. for wol (required according to docu).
  74. version 1.0.10:
  75. * use long for ee_addr (various)
  76. * print pointers properly (DaveM)
  77. * include asm/irq.h (?)
  78. version 1.0.11:
  79. * check and reset if PHY errors appear (Adrian Sun)
  80. * WoL cleanup (Tim Hockin)
  81. * Magic number cleanup (Tim Hockin)
  82. * Don't reload EEPROM on every reset (Tim Hockin)
  83. * Save and restore EEPROM state across reset (Tim Hockin)
  84. * MDIO Cleanup (Tim Hockin)
  85. * Reformat register offsets/bits (jgarzik)
  86. version 1.0.12:
  87. * ETHTOOL_* further support (Tim Hockin)
  88. version 1.0.13:
  89. * ETHTOOL_[G]EEPROM support (Tim Hockin)
  90. version 1.0.13:
  91. * crc cleanup (Matt Domsch <Matt_Domsch@dell.com>)
  92. version 1.0.14:
  93. * Cleanup some messages and autoneg in ethtool (Tim Hockin)
  94. version 1.0.15:
  95. * Get rid of cable_magic flag
  96. * use new (National provided) solution for cable magic issue
  97. version 1.0.16:
  98. * call netdev_rx() for RxErrors (Manfred Spraul)
  99. * formatting and cleanups
  100. * change options and full_duplex arrays to be zero
  101. initialized
  102. * enable only the WoL and PHY interrupts in wol mode
  103. version 1.0.17:
  104. * only do cable_magic on 83815 and early 83816 (Tim Hockin)
  105. * create a function for rx refill (Manfred Spraul)
  106. * combine drain_ring and init_ring (Manfred Spraul)
  107. * oom handling (Manfred Spraul)
  108. * hands_off instead of playing with netif_device_{de,a}ttach
  109. (Manfred Spraul)
  110. * be sure to write the MAC back to the chip (Manfred Spraul)
  111. * lengthen EEPROM timeout, and always warn about timeouts
  112. (Manfred Spraul)
  113. * comments update (Manfred)
  114. * do the right thing on a phy-reset (Manfred and Tim)
  115. TODO:
  116. * big endian support with CFG:BEM instead of cpu_to_le32
  117. */
  118. #include <linux/config.h>
  119. #include <linux/module.h>
  120. #include <linux/kernel.h>
  121. #include <linux/string.h>
  122. #include <linux/timer.h>
  123. #include <linux/errno.h>
  124. #include <linux/ioport.h>
  125. #include <linux/slab.h>
  126. #include <linux/interrupt.h>
  127. #include <linux/pci.h>
  128. #include <linux/netdevice.h>
  129. #include <linux/etherdevice.h>
  130. #include <linux/skbuff.h>
  131. #include <linux/init.h>
  132. #include <linux/spinlock.h>
  133. #include <linux/ethtool.h>
  134. #include <linux/delay.h>
  135. #include <linux/rtnetlink.h>
  136. #include <linux/mii.h>
  137. #include <linux/crc32.h>
  138. #include <linux/bitops.h>
  139. #include <linux/prefetch.h>
  140. #include <asm/processor.h> /* Processor type for cache alignment. */
  141. #include <asm/io.h>
  142. #include <asm/irq.h>
  143. #include <asm/uaccess.h>
  144. #define DRV_NAME "natsemi"
  145. #define DRV_VERSION "1.07+LK1.0.17"
  146. #define DRV_RELDATE "Sep 27, 2002"
  147. #define RX_OFFSET 2
  148. /* Updated to recommendations in pci-skeleton v2.03. */
  149. /* The user-configurable values.
  150. These may be modified when a driver module is loaded.*/
  151. #define NATSEMI_DEF_MSG (NETIF_MSG_DRV | \
  152. NETIF_MSG_LINK | \
  153. NETIF_MSG_WOL | \
  154. NETIF_MSG_RX_ERR | \
  155. NETIF_MSG_TX_ERR)
  156. static int debug = -1;
  157. static int mtu;
  158. /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
  159. This chip uses a 512 element hash table based on the Ethernet CRC. */
  160. static const int multicast_filter_limit = 100;
  161. /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
  162. Setting to > 1518 effectively disables this feature. */
  163. static int rx_copybreak;
  164. /* Used to pass the media type, etc.
  165. Both 'options[]' and 'full_duplex[]' should exist for driver
  166. interoperability.
  167. The media type is usually passed in 'options[]'.
  168. */
  169. #define MAX_UNITS 8 /* More are supported, limit only on options */
  170. static int options[MAX_UNITS];
  171. static int full_duplex[MAX_UNITS];
  172. /* Operational parameters that are set at compile time. */
  173. /* Keep the ring sizes a power of two for compile efficiency.
  174. The compiler will convert <unsigned>'%'<2^N> into a bit mask.
  175. Making the Tx ring too large decreases the effectiveness of channel
  176. bonding and packet priority.
  177. There are no ill effects from too-large receive rings. */
  178. #define TX_RING_SIZE 16
  179. #define TX_QUEUE_LEN 10 /* Limit ring entries actually used, min 4. */
  180. #define RX_RING_SIZE 32
  181. /* Operational parameters that usually are not changed. */
  182. /* Time in jiffies before concluding the transmitter is hung. */
  183. #define TX_TIMEOUT (2*HZ)
  184. #define NATSEMI_HW_TIMEOUT 400
  185. #define NATSEMI_TIMER_FREQ 3*HZ
  186. #define NATSEMI_PG0_NREGS 64
  187. #define NATSEMI_RFDR_NREGS 8
  188. #define NATSEMI_PG1_NREGS 4
  189. #define NATSEMI_NREGS (NATSEMI_PG0_NREGS + NATSEMI_RFDR_NREGS + \
  190. NATSEMI_PG1_NREGS)
  191. #define NATSEMI_REGS_VER 1 /* v1 added RFDR registers */
  192. #define NATSEMI_REGS_SIZE (NATSEMI_NREGS * sizeof(u32))
  193. #define NATSEMI_DEF_EEPROM_SIZE 24 /* 12 16-bit values */
  194. /* Buffer sizes:
  195. * The nic writes 32-bit values, even if the upper bytes of
  196. * a 32-bit value are beyond the end of the buffer.
  197. */
  198. #define NATSEMI_HEADERS 22 /* 2*mac,type,vlan,crc */
  199. #define NATSEMI_PADDING 16 /* 2 bytes should be sufficient */
  200. #define NATSEMI_LONGPKT 1518 /* limit for normal packets */
  201. #define NATSEMI_RX_LIMIT 2046 /* maximum supported by hardware */
  202. /* These identify the driver base version and may not be removed. */
  203. static const char version[] __devinitdata =
  204. KERN_INFO DRV_NAME " dp8381x driver, version "
  205. DRV_VERSION ", " DRV_RELDATE "\n"
  206. KERN_INFO " originally by Donald Becker <becker@scyld.com>\n"
  207. KERN_INFO " http://www.scyld.com/network/natsemi.html\n"
  208. KERN_INFO " 2.4.x kernel port by Jeff Garzik, Tjeerd Mulder\n";
  209. MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
  210. MODULE_DESCRIPTION("National Semiconductor DP8381x series PCI Ethernet driver");
  211. MODULE_LICENSE("GPL");
  212. module_param(mtu, int, 0);
  213. module_param(debug, int, 0);
  214. module_param(rx_copybreak, int, 0);
  215. module_param_array(options, int, NULL, 0);
  216. module_param_array(full_duplex, int, NULL, 0);
  217. MODULE_PARM_DESC(mtu, "DP8381x MTU (all boards)");
  218. MODULE_PARM_DESC(debug, "DP8381x default debug level");
  219. MODULE_PARM_DESC(rx_copybreak,
  220. "DP8381x copy breakpoint for copy-only-tiny-frames");
  221. MODULE_PARM_DESC(options,
  222. "DP8381x: Bits 0-3: media type, bit 17: full duplex");
  223. MODULE_PARM_DESC(full_duplex, "DP8381x full duplex setting(s) (1)");
  224. /*
  225. Theory of Operation
  226. I. Board Compatibility
  227. This driver is designed for National Semiconductor DP83815 PCI Ethernet NIC.
  228. It also works with other chips in in the DP83810 series.
  229. II. Board-specific settings
  230. This driver requires the PCI interrupt line to be valid.
  231. It honors the EEPROM-set values.
  232. III. Driver operation
  233. IIIa. Ring buffers
  234. This driver uses two statically allocated fixed-size descriptor lists
  235. formed into rings by a branch from the final descriptor to the beginning of
  236. the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
  237. The NatSemi design uses a 'next descriptor' pointer that the driver forms
  238. into a list.
  239. IIIb/c. Transmit/Receive Structure
  240. This driver uses a zero-copy receive and transmit scheme.
  241. The driver allocates full frame size skbuffs for the Rx ring buffers at
  242. open() time and passes the skb->data field to the chip as receive data
  243. buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
  244. a fresh skbuff is allocated and the frame is copied to the new skbuff.
  245. When the incoming frame is larger, the skbuff is passed directly up the
  246. protocol stack. Buffers consumed this way are replaced by newly allocated
  247. skbuffs in a later phase of receives.
  248. The RX_COPYBREAK value is chosen to trade-off the memory wasted by
  249. using a full-sized skbuff for small frames vs. the copying costs of larger
  250. frames. New boards are typically used in generously configured machines
  251. and the underfilled buffers have negligible impact compared to the benefit of
  252. a single allocation size, so the default value of zero results in never
  253. copying packets. When copying is done, the cost is usually mitigated by using
  254. a combined copy/checksum routine. Copying also preloads the cache, which is
  255. most useful with small frames.
  256. A subtle aspect of the operation is that unaligned buffers are not permitted
  257. by the hardware. Thus the IP header at offset 14 in an ethernet frame isn't
  258. longword aligned for further processing. On copies frames are put into the
  259. skbuff at an offset of "+2", 16-byte aligning the IP header.
  260. IIId. Synchronization
  261. Most operations are synchronized on the np->lock irq spinlock, except the
  262. performance critical codepaths:
  263. The rx process only runs in the interrupt handler. Access from outside
  264. the interrupt handler is only permitted after disable_irq().
  265. The rx process usually runs under the dev->xmit_lock. If np->intr_tx_reap
  266. is set, then access is permitted under spin_lock_irq(&np->lock).
  267. Thus configuration functions that want to access everything must call
  268. disable_irq(dev->irq);
  269. spin_lock_bh(dev->xmit_lock);
  270. spin_lock_irq(&np->lock);
  271. IV. Notes
  272. NatSemi PCI network controllers are very uncommon.
  273. IVb. References
  274. http://www.scyld.com/expert/100mbps.html
  275. http://www.scyld.com/expert/NWay.html
  276. Datasheet is available from:
  277. http://www.national.com/pf/DP/DP83815.html
  278. IVc. Errata
  279. None characterised.
  280. */
  281. enum pcistuff {
  282. PCI_USES_IO = 0x01,
  283. PCI_USES_MEM = 0x02,
  284. PCI_USES_MASTER = 0x04,
  285. PCI_ADDR0 = 0x08,
  286. PCI_ADDR1 = 0x10,
  287. };
  288. /* MMIO operations required */
  289. #define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_MEM | PCI_ADDR1)
  290. /*
  291. * Support for fibre connections on Am79C874:
  292. * This phy needs a special setup when connected to a fibre cable.
  293. * http://www.amd.com/files/connectivitysolutions/networking/archivednetworking/22235.pdf
  294. */
  295. #define PHYID_AM79C874 0x0022561b
  296. #define MII_MCTRL 0x15 /* mode control register */
  297. #define MII_FX_SEL 0x0001 /* 100BASE-FX (fiber) */
  298. #define MII_EN_SCRM 0x0004 /* enable scrambler (tp) */
  299. /* array of board data directly indexed by pci_tbl[x].driver_data */
  300. static const struct {
  301. const char *name;
  302. unsigned long flags;
  303. } natsemi_pci_info[] __devinitdata = {
  304. { "NatSemi DP8381[56]", PCI_IOTYPE },
  305. };
  306. static struct pci_device_id natsemi_pci_tbl[] = {
  307. { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_83815, PCI_ANY_ID, PCI_ANY_ID, },
  308. { 0, },
  309. };
  310. MODULE_DEVICE_TABLE(pci, natsemi_pci_tbl);
  311. /* Offsets to the device registers.
  312. Unlike software-only systems, device drivers interact with complex hardware.
  313. It's not useful to define symbolic names for every register bit in the
  314. device.
  315. */
  316. enum register_offsets {
  317. ChipCmd = 0x00,
  318. ChipConfig = 0x04,
  319. EECtrl = 0x08,
  320. PCIBusCfg = 0x0C,
  321. IntrStatus = 0x10,
  322. IntrMask = 0x14,
  323. IntrEnable = 0x18,
  324. IntrHoldoff = 0x1C, /* DP83816 only */
  325. TxRingPtr = 0x20,
  326. TxConfig = 0x24,
  327. RxRingPtr = 0x30,
  328. RxConfig = 0x34,
  329. ClkRun = 0x3C,
  330. WOLCmd = 0x40,
  331. PauseCmd = 0x44,
  332. RxFilterAddr = 0x48,
  333. RxFilterData = 0x4C,
  334. BootRomAddr = 0x50,
  335. BootRomData = 0x54,
  336. SiliconRev = 0x58,
  337. StatsCtrl = 0x5C,
  338. StatsData = 0x60,
  339. RxPktErrs = 0x60,
  340. RxMissed = 0x68,
  341. RxCRCErrs = 0x64,
  342. BasicControl = 0x80,
  343. BasicStatus = 0x84,
  344. AnegAdv = 0x90,
  345. AnegPeer = 0x94,
  346. PhyStatus = 0xC0,
  347. MIntrCtrl = 0xC4,
  348. MIntrStatus = 0xC8,
  349. PhyCtrl = 0xE4,
  350. /* These are from the spec, around page 78... on a separate table.
  351. * The meaning of these registers depend on the value of PGSEL. */
  352. PGSEL = 0xCC,
  353. PMDCSR = 0xE4,
  354. TSTDAT = 0xFC,
  355. DSPCFG = 0xF4,
  356. SDCFG = 0xF8
  357. };
  358. /* the values for the 'magic' registers above (PGSEL=1) */
  359. #define PMDCSR_VAL 0x189c /* enable preferred adaptation circuitry */
  360. #define TSTDAT_VAL 0x0
  361. #define DSPCFG_VAL 0x5040
  362. #define SDCFG_VAL 0x008c /* set voltage thresholds for Signal Detect */
  363. #define DSPCFG_LOCK 0x20 /* coefficient lock bit in DSPCFG */
  364. #define DSPCFG_COEF 0x1000 /* see coefficient (in TSTDAT) bit in DSPCFG */
  365. #define TSTDAT_FIXED 0xe8 /* magic number for bad coefficients */
  366. /* misc PCI space registers */
  367. enum pci_register_offsets {
  368. PCIPM = 0x44,
  369. };
  370. enum ChipCmd_bits {
  371. ChipReset = 0x100,
  372. RxReset = 0x20,
  373. TxReset = 0x10,
  374. RxOff = 0x08,
  375. RxOn = 0x04,
  376. TxOff = 0x02,
  377. TxOn = 0x01,
  378. };
  379. enum ChipConfig_bits {
  380. CfgPhyDis = 0x200,
  381. CfgPhyRst = 0x400,
  382. CfgExtPhy = 0x1000,
  383. CfgAnegEnable = 0x2000,
  384. CfgAneg100 = 0x4000,
  385. CfgAnegFull = 0x8000,
  386. CfgAnegDone = 0x8000000,
  387. CfgFullDuplex = 0x20000000,
  388. CfgSpeed100 = 0x40000000,
  389. CfgLink = 0x80000000,
  390. };
  391. enum EECtrl_bits {
  392. EE_ShiftClk = 0x04,
  393. EE_DataIn = 0x01,
  394. EE_ChipSelect = 0x08,
  395. EE_DataOut = 0x02,
  396. MII_Data = 0x10,
  397. MII_Write = 0x20,
  398. MII_ShiftClk = 0x40,
  399. };
  400. enum PCIBusCfg_bits {
  401. EepromReload = 0x4,
  402. };
  403. /* Bits in the interrupt status/mask registers. */
  404. enum IntrStatus_bits {
  405. IntrRxDone = 0x0001,
  406. IntrRxIntr = 0x0002,
  407. IntrRxErr = 0x0004,
  408. IntrRxEarly = 0x0008,
  409. IntrRxIdle = 0x0010,
  410. IntrRxOverrun = 0x0020,
  411. IntrTxDone = 0x0040,
  412. IntrTxIntr = 0x0080,
  413. IntrTxErr = 0x0100,
  414. IntrTxIdle = 0x0200,
  415. IntrTxUnderrun = 0x0400,
  416. StatsMax = 0x0800,
  417. SWInt = 0x1000,
  418. WOLPkt = 0x2000,
  419. LinkChange = 0x4000,
  420. IntrHighBits = 0x8000,
  421. RxStatusFIFOOver = 0x10000,
  422. IntrPCIErr = 0xf00000,
  423. RxResetDone = 0x1000000,
  424. TxResetDone = 0x2000000,
  425. IntrAbnormalSummary = 0xCD20,
  426. };
  427. /*
  428. * Default Interrupts:
  429. * Rx OK, Rx Packet Error, Rx Overrun,
  430. * Tx OK, Tx Packet Error, Tx Underrun,
  431. * MIB Service, Phy Interrupt, High Bits,
  432. * Rx Status FIFO overrun,
  433. * Received Target Abort, Received Master Abort,
  434. * Signalled System Error, Received Parity Error
  435. */
  436. #define DEFAULT_INTR 0x00f1cd65
  437. enum TxConfig_bits {
  438. TxDrthMask = 0x3f,
  439. TxFlthMask = 0x3f00,
  440. TxMxdmaMask = 0x700000,
  441. TxMxdma_512 = 0x0,
  442. TxMxdma_4 = 0x100000,
  443. TxMxdma_8 = 0x200000,
  444. TxMxdma_16 = 0x300000,
  445. TxMxdma_32 = 0x400000,
  446. TxMxdma_64 = 0x500000,
  447. TxMxdma_128 = 0x600000,
  448. TxMxdma_256 = 0x700000,
  449. TxCollRetry = 0x800000,
  450. TxAutoPad = 0x10000000,
  451. TxMacLoop = 0x20000000,
  452. TxHeartIgn = 0x40000000,
  453. TxCarrierIgn = 0x80000000
  454. };
  455. /*
  456. * Tx Configuration:
  457. * - 256 byte DMA burst length
  458. * - fill threshold 512 bytes (i.e. restart DMA when 512 bytes are free)
  459. * - 64 bytes initial drain threshold (i.e. begin actual transmission
  460. * when 64 byte are in the fifo)
  461. * - on tx underruns, increase drain threshold by 64.
  462. * - at most use a drain threshold of 1472 bytes: The sum of the fill
  463. * threshold and the drain threshold must be less than 2016 bytes.
  464. *
  465. */
  466. #define TX_FLTH_VAL ((512/32) << 8)
  467. #define TX_DRTH_VAL_START (64/32)
  468. #define TX_DRTH_VAL_INC 2
  469. #define TX_DRTH_VAL_LIMIT (1472/32)
  470. enum RxConfig_bits {
  471. RxDrthMask = 0x3e,
  472. RxMxdmaMask = 0x700000,
  473. RxMxdma_512 = 0x0,
  474. RxMxdma_4 = 0x100000,
  475. RxMxdma_8 = 0x200000,
  476. RxMxdma_16 = 0x300000,
  477. RxMxdma_32 = 0x400000,
  478. RxMxdma_64 = 0x500000,
  479. RxMxdma_128 = 0x600000,
  480. RxMxdma_256 = 0x700000,
  481. RxAcceptLong = 0x8000000,
  482. RxAcceptTx = 0x10000000,
  483. RxAcceptRunt = 0x40000000,
  484. RxAcceptErr = 0x80000000
  485. };
  486. #define RX_DRTH_VAL (128/8)
  487. enum ClkRun_bits {
  488. PMEEnable = 0x100,
  489. PMEStatus = 0x8000,
  490. };
  491. enum WolCmd_bits {
  492. WakePhy = 0x1,
  493. WakeUnicast = 0x2,
  494. WakeMulticast = 0x4,
  495. WakeBroadcast = 0x8,
  496. WakeArp = 0x10,
  497. WakePMatch0 = 0x20,
  498. WakePMatch1 = 0x40,
  499. WakePMatch2 = 0x80,
  500. WakePMatch3 = 0x100,
  501. WakeMagic = 0x200,
  502. WakeMagicSecure = 0x400,
  503. SecureHack = 0x100000,
  504. WokePhy = 0x400000,
  505. WokeUnicast = 0x800000,
  506. WokeMulticast = 0x1000000,
  507. WokeBroadcast = 0x2000000,
  508. WokeArp = 0x4000000,
  509. WokePMatch0 = 0x8000000,
  510. WokePMatch1 = 0x10000000,
  511. WokePMatch2 = 0x20000000,
  512. WokePMatch3 = 0x40000000,
  513. WokeMagic = 0x80000000,
  514. WakeOptsSummary = 0x7ff
  515. };
  516. enum RxFilterAddr_bits {
  517. RFCRAddressMask = 0x3ff,
  518. AcceptMulticast = 0x00200000,
  519. AcceptMyPhys = 0x08000000,
  520. AcceptAllPhys = 0x10000000,
  521. AcceptAllMulticast = 0x20000000,
  522. AcceptBroadcast = 0x40000000,
  523. RxFilterEnable = 0x80000000
  524. };
  525. enum StatsCtrl_bits {
  526. StatsWarn = 0x1,
  527. StatsFreeze = 0x2,
  528. StatsClear = 0x4,
  529. StatsStrobe = 0x8,
  530. };
  531. enum MIntrCtrl_bits {
  532. MICRIntEn = 0x2,
  533. };
  534. enum PhyCtrl_bits {
  535. PhyAddrMask = 0x1f,
  536. };
  537. #define PHY_ADDR_NONE 32
  538. #define PHY_ADDR_INTERNAL 1
  539. /* values we might find in the silicon revision register */
  540. #define SRR_DP83815_C 0x0302
  541. #define SRR_DP83815_D 0x0403
  542. #define SRR_DP83816_A4 0x0504
  543. #define SRR_DP83816_A5 0x0505
  544. /* The Rx and Tx buffer descriptors. */
  545. /* Note that using only 32 bit fields simplifies conversion to big-endian
  546. architectures. */
  547. struct netdev_desc {
  548. u32 next_desc;
  549. s32 cmd_status;
  550. u32 addr;
  551. u32 software_use;
  552. };
  553. /* Bits in network_desc.status */
  554. enum desc_status_bits {
  555. DescOwn=0x80000000, DescMore=0x40000000, DescIntr=0x20000000,
  556. DescNoCRC=0x10000000, DescPktOK=0x08000000,
  557. DescSizeMask=0xfff,
  558. DescTxAbort=0x04000000, DescTxFIFO=0x02000000,
  559. DescTxCarrier=0x01000000, DescTxDefer=0x00800000,
  560. DescTxExcDefer=0x00400000, DescTxOOWCol=0x00200000,
  561. DescTxExcColl=0x00100000, DescTxCollCount=0x000f0000,
  562. DescRxAbort=0x04000000, DescRxOver=0x02000000,
  563. DescRxDest=0x01800000, DescRxLong=0x00400000,
  564. DescRxRunt=0x00200000, DescRxInvalid=0x00100000,
  565. DescRxCRC=0x00080000, DescRxAlign=0x00040000,
  566. DescRxLoop=0x00020000, DesRxColl=0x00010000,
  567. };
  568. struct netdev_private {
  569. /* Descriptor rings first for alignment */
  570. dma_addr_t ring_dma;
  571. struct netdev_desc *rx_ring;
  572. struct netdev_desc *tx_ring;
  573. /* The addresses of receive-in-place skbuffs */
  574. struct sk_buff *rx_skbuff[RX_RING_SIZE];
  575. dma_addr_t rx_dma[RX_RING_SIZE];
  576. /* address of a sent-in-place packet/buffer, for later free() */
  577. struct sk_buff *tx_skbuff[TX_RING_SIZE];
  578. dma_addr_t tx_dma[TX_RING_SIZE];
  579. struct net_device_stats stats;
  580. /* Media monitoring timer */
  581. struct timer_list timer;
  582. /* Frequently used values: keep some adjacent for cache effect */
  583. struct pci_dev *pci_dev;
  584. struct netdev_desc *rx_head_desc;
  585. /* Producer/consumer ring indices */
  586. unsigned int cur_rx, dirty_rx;
  587. unsigned int cur_tx, dirty_tx;
  588. /* Based on MTU+slack. */
  589. unsigned int rx_buf_sz;
  590. int oom;
  591. /* Interrupt status */
  592. u32 intr_status;
  593. /* Do not touch the nic registers */
  594. int hands_off;
  595. /* external phy that is used: only valid if dev->if_port != PORT_TP */
  596. int mii;
  597. int phy_addr_external;
  598. unsigned int full_duplex;
  599. /* Rx filter */
  600. u32 cur_rx_mode;
  601. u32 rx_filter[16];
  602. /* FIFO and PCI burst thresholds */
  603. u32 tx_config, rx_config;
  604. /* original contents of ClkRun register */
  605. u32 SavedClkRun;
  606. /* silicon revision */
  607. u32 srr;
  608. /* expected DSPCFG value */
  609. u16 dspcfg;
  610. /* parms saved in ethtool format */
  611. u16 speed; /* The forced speed, 10Mb, 100Mb, gigabit */
  612. u8 duplex; /* Duplex, half or full */
  613. u8 autoneg; /* Autonegotiation enabled */
  614. /* MII transceiver section */
  615. u16 advertising;
  616. unsigned int iosize;
  617. spinlock_t lock;
  618. u32 msg_enable;
  619. /* EEPROM data */
  620. int eeprom_size;
  621. };
  622. static void move_int_phy(struct net_device *dev, int addr);
  623. static int eeprom_read(void __iomem *ioaddr, int location);
  624. static int mdio_read(struct net_device *dev, int reg);
  625. static void mdio_write(struct net_device *dev, int reg, u16 data);
  626. static void init_phy_fixup(struct net_device *dev);
  627. static int miiport_read(struct net_device *dev, int phy_id, int reg);
  628. static void miiport_write(struct net_device *dev, int phy_id, int reg, u16 data);
  629. static int find_mii(struct net_device *dev);
  630. static void natsemi_reset(struct net_device *dev);
  631. static void natsemi_reload_eeprom(struct net_device *dev);
  632. static void natsemi_stop_rxtx(struct net_device *dev);
  633. static int netdev_open(struct net_device *dev);
  634. static void do_cable_magic(struct net_device *dev);
  635. static void undo_cable_magic(struct net_device *dev);
  636. static void check_link(struct net_device *dev);
  637. static void netdev_timer(unsigned long data);
  638. static void dump_ring(struct net_device *dev);
  639. static void tx_timeout(struct net_device *dev);
  640. static int alloc_ring(struct net_device *dev);
  641. static void refill_rx(struct net_device *dev);
  642. static void init_ring(struct net_device *dev);
  643. static void drain_tx(struct net_device *dev);
  644. static void drain_ring(struct net_device *dev);
  645. static void free_ring(struct net_device *dev);
  646. static void reinit_ring(struct net_device *dev);
  647. static void init_registers(struct net_device *dev);
  648. static int start_tx(struct sk_buff *skb, struct net_device *dev);
  649. static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
  650. static void netdev_error(struct net_device *dev, int intr_status);
  651. static int natsemi_poll(struct net_device *dev, int *budget);
  652. static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do);
  653. static void netdev_tx_done(struct net_device *dev);
  654. static int natsemi_change_mtu(struct net_device *dev, int new_mtu);
  655. #ifdef CONFIG_NET_POLL_CONTROLLER
  656. static void natsemi_poll_controller(struct net_device *dev);
  657. #endif
  658. static void __set_rx_mode(struct net_device *dev);
  659. static void set_rx_mode(struct net_device *dev);
  660. static void __get_stats(struct net_device *dev);
  661. static struct net_device_stats *get_stats(struct net_device *dev);
  662. static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
  663. static int netdev_set_wol(struct net_device *dev, u32 newval);
  664. static int netdev_get_wol(struct net_device *dev, u32 *supported, u32 *cur);
  665. static int netdev_set_sopass(struct net_device *dev, u8 *newval);
  666. static int netdev_get_sopass(struct net_device *dev, u8 *data);
  667. static int netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd);
  668. static int netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd);
  669. static void enable_wol_mode(struct net_device *dev, int enable_intr);
  670. static int netdev_close(struct net_device *dev);
  671. static int netdev_get_regs(struct net_device *dev, u8 *buf);
  672. static int netdev_get_eeprom(struct net_device *dev, u8 *buf);
  673. static struct ethtool_ops ethtool_ops;
  674. static inline void __iomem *ns_ioaddr(struct net_device *dev)
  675. {
  676. return (void __iomem *) dev->base_addr;
  677. }
  678. static inline void natsemi_irq_enable(struct net_device *dev)
  679. {
  680. writel(1, ns_ioaddr(dev) + IntrEnable);
  681. readl(ns_ioaddr(dev) + IntrEnable);
  682. }
  683. static inline void natsemi_irq_disable(struct net_device *dev)
  684. {
  685. writel(0, ns_ioaddr(dev) + IntrEnable);
  686. readl(ns_ioaddr(dev) + IntrEnable);
  687. }
  688. static void move_int_phy(struct net_device *dev, int addr)
  689. {
  690. struct netdev_private *np = netdev_priv(dev);
  691. void __iomem *ioaddr = ns_ioaddr(dev);
  692. int target = 31;
  693. /*
  694. * The internal phy is visible on the external mii bus. Therefore we must
  695. * move it away before we can send commands to an external phy.
  696. * There are two addresses we must avoid:
  697. * - the address on the external phy that is used for transmission.
  698. * - the address that we want to access. User space can access phys
  699. * on the mii bus with SIOCGMIIREG/SIOCSMIIREG, independant from the
  700. * phy that is used for transmission.
  701. */
  702. if (target == addr)
  703. target--;
  704. if (target == np->phy_addr_external)
  705. target--;
  706. writew(target, ioaddr + PhyCtrl);
  707. readw(ioaddr + PhyCtrl);
  708. udelay(1);
  709. }
  710. static int __devinit natsemi_probe1 (struct pci_dev *pdev,
  711. const struct pci_device_id *ent)
  712. {
  713. struct net_device *dev;
  714. struct netdev_private *np;
  715. int i, option, irq, chip_idx = ent->driver_data;
  716. static int find_cnt = -1;
  717. unsigned long iostart, iosize;
  718. void __iomem *ioaddr;
  719. const int pcibar = 1; /* PCI base address register */
  720. int prev_eedata;
  721. u32 tmp;
  722. /* when built into the kernel, we only print version if device is found */
  723. #ifndef MODULE
  724. static int printed_version;
  725. if (!printed_version++)
  726. printk(version);
  727. #endif
  728. i = pci_enable_device(pdev);
  729. if (i) return i;
  730. /* natsemi has a non-standard PM control register
  731. * in PCI config space. Some boards apparently need
  732. * to be brought to D0 in this manner.
  733. */
  734. pci_read_config_dword(pdev, PCIPM, &tmp);
  735. if (tmp & PCI_PM_CTRL_STATE_MASK) {
  736. /* D0 state, disable PME assertion */
  737. u32 newtmp = tmp & ~PCI_PM_CTRL_STATE_MASK;
  738. pci_write_config_dword(pdev, PCIPM, newtmp);
  739. }
  740. find_cnt++;
  741. iostart = pci_resource_start(pdev, pcibar);
  742. iosize = pci_resource_len(pdev, pcibar);
  743. irq = pdev->irq;
  744. if (natsemi_pci_info[chip_idx].flags & PCI_USES_MASTER)
  745. pci_set_master(pdev);
  746. dev = alloc_etherdev(sizeof (struct netdev_private));
  747. if (!dev)
  748. return -ENOMEM;
  749. SET_MODULE_OWNER(dev);
  750. SET_NETDEV_DEV(dev, &pdev->dev);
  751. i = pci_request_regions(pdev, DRV_NAME);
  752. if (i)
  753. goto err_pci_request_regions;
  754. ioaddr = ioremap(iostart, iosize);
  755. if (!ioaddr) {
  756. i = -ENOMEM;
  757. goto err_ioremap;
  758. }
  759. /* Work around the dropped serial bit. */
  760. prev_eedata = eeprom_read(ioaddr, 6);
  761. for (i = 0; i < 3; i++) {
  762. int eedata = eeprom_read(ioaddr, i + 7);
  763. dev->dev_addr[i*2] = (eedata << 1) + (prev_eedata >> 15);
  764. dev->dev_addr[i*2+1] = eedata >> 7;
  765. prev_eedata = eedata;
  766. }
  767. dev->base_addr = (unsigned long __force) ioaddr;
  768. dev->irq = irq;
  769. np = netdev_priv(dev);
  770. np->pci_dev = pdev;
  771. pci_set_drvdata(pdev, dev);
  772. np->iosize = iosize;
  773. spin_lock_init(&np->lock);
  774. np->msg_enable = (debug >= 0) ? (1<<debug)-1 : NATSEMI_DEF_MSG;
  775. np->hands_off = 0;
  776. np->intr_status = 0;
  777. np->eeprom_size = NATSEMI_DEF_EEPROM_SIZE;
  778. /* Initial port:
  779. * - If the nic was configured to use an external phy and if find_mii
  780. * finds a phy: use external port, first phy that replies.
  781. * - Otherwise: internal port.
  782. * Note that the phy address for the internal phy doesn't matter:
  783. * The address would be used to access a phy over the mii bus, but
  784. * the internal phy is accessed through mapped registers.
  785. */
  786. if (readl(ioaddr + ChipConfig) & CfgExtPhy)
  787. dev->if_port = PORT_MII;
  788. else
  789. dev->if_port = PORT_TP;
  790. /* Reset the chip to erase previous misconfiguration. */
  791. natsemi_reload_eeprom(dev);
  792. natsemi_reset(dev);
  793. if (dev->if_port != PORT_TP) {
  794. np->phy_addr_external = find_mii(dev);
  795. if (np->phy_addr_external == PHY_ADDR_NONE) {
  796. dev->if_port = PORT_TP;
  797. np->phy_addr_external = PHY_ADDR_INTERNAL;
  798. }
  799. } else {
  800. np->phy_addr_external = PHY_ADDR_INTERNAL;
  801. }
  802. option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
  803. if (dev->mem_start)
  804. option = dev->mem_start;
  805. /* The lower four bits are the media type. */
  806. if (option) {
  807. if (option & 0x200)
  808. np->full_duplex = 1;
  809. if (option & 15)
  810. printk(KERN_INFO
  811. "natsemi %s: ignoring user supplied media type %d",
  812. pci_name(np->pci_dev), option & 15);
  813. }
  814. if (find_cnt < MAX_UNITS && full_duplex[find_cnt])
  815. np->full_duplex = 1;
  816. /* The chip-specific entries in the device structure. */
  817. dev->open = &netdev_open;
  818. dev->hard_start_xmit = &start_tx;
  819. dev->stop = &netdev_close;
  820. dev->get_stats = &get_stats;
  821. dev->set_multicast_list = &set_rx_mode;
  822. dev->change_mtu = &natsemi_change_mtu;
  823. dev->do_ioctl = &netdev_ioctl;
  824. dev->tx_timeout = &tx_timeout;
  825. dev->watchdog_timeo = TX_TIMEOUT;
  826. dev->poll = natsemi_poll;
  827. dev->weight = 64;
  828. #ifdef CONFIG_NET_POLL_CONTROLLER
  829. dev->poll_controller = &natsemi_poll_controller;
  830. #endif
  831. SET_ETHTOOL_OPS(dev, &ethtool_ops);
  832. if (mtu)
  833. dev->mtu = mtu;
  834. netif_carrier_off(dev);
  835. /* get the initial settings from hardware */
  836. tmp = mdio_read(dev, MII_BMCR);
  837. np->speed = (tmp & BMCR_SPEED100)? SPEED_100 : SPEED_10;
  838. np->duplex = (tmp & BMCR_FULLDPLX)? DUPLEX_FULL : DUPLEX_HALF;
  839. np->autoneg = (tmp & BMCR_ANENABLE)? AUTONEG_ENABLE: AUTONEG_DISABLE;
  840. np->advertising= mdio_read(dev, MII_ADVERTISE);
  841. if ((np->advertising & ADVERTISE_ALL) != ADVERTISE_ALL
  842. && netif_msg_probe(np)) {
  843. printk(KERN_INFO "natsemi %s: Transceiver default autonegotiation %s "
  844. "10%s %s duplex.\n",
  845. pci_name(np->pci_dev),
  846. (mdio_read(dev, MII_BMCR) & BMCR_ANENABLE)?
  847. "enabled, advertise" : "disabled, force",
  848. (np->advertising &
  849. (ADVERTISE_100FULL|ADVERTISE_100HALF))?
  850. "0" : "",
  851. (np->advertising &
  852. (ADVERTISE_100FULL|ADVERTISE_10FULL))?
  853. "full" : "half");
  854. }
  855. if (netif_msg_probe(np))
  856. printk(KERN_INFO
  857. "natsemi %s: Transceiver status %#04x advertising %#04x.\n",
  858. pci_name(np->pci_dev), mdio_read(dev, MII_BMSR),
  859. np->advertising);
  860. /* save the silicon revision for later querying */
  861. np->srr = readl(ioaddr + SiliconRev);
  862. if (netif_msg_hw(np))
  863. printk(KERN_INFO "natsemi %s: silicon revision %#04x.\n",
  864. pci_name(np->pci_dev), np->srr);
  865. i = register_netdev(dev);
  866. if (i)
  867. goto err_register_netdev;
  868. if (netif_msg_drv(np)) {
  869. printk(KERN_INFO "natsemi %s: %s at %#08lx (%s), ",
  870. dev->name, natsemi_pci_info[chip_idx].name, iostart,
  871. pci_name(np->pci_dev));
  872. for (i = 0; i < ETH_ALEN-1; i++)
  873. printk("%02x:", dev->dev_addr[i]);
  874. printk("%02x, IRQ %d", dev->dev_addr[i], irq);
  875. if (dev->if_port == PORT_TP)
  876. printk(", port TP.\n");
  877. else
  878. printk(", port MII, phy ad %d.\n", np->phy_addr_external);
  879. }
  880. return 0;
  881. err_register_netdev:
  882. iounmap(ioaddr);
  883. err_ioremap:
  884. pci_release_regions(pdev);
  885. pci_set_drvdata(pdev, NULL);
  886. err_pci_request_regions:
  887. free_netdev(dev);
  888. return i;
  889. }
  890. /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces.
  891. The EEPROM code is for the common 93c06/46 EEPROMs with 6 bit addresses. */
  892. /* Delay between EEPROM clock transitions.
  893. No extra delay is needed with 33Mhz PCI, but future 66Mhz access may need
  894. a delay. Note that pre-2.0.34 kernels had a cache-alignment bug that
  895. made udelay() unreliable.
  896. The old method of using an ISA access as a delay, __SLOW_DOWN_IO__, is
  897. depricated.
  898. */
  899. #define eeprom_delay(ee_addr) readl(ee_addr)
  900. #define EE_Write0 (EE_ChipSelect)
  901. #define EE_Write1 (EE_ChipSelect | EE_DataIn)
  902. /* The EEPROM commands include the alway-set leading bit. */
  903. enum EEPROM_Cmds {
  904. EE_WriteCmd=(5 << 6), EE_ReadCmd=(6 << 6), EE_EraseCmd=(7 << 6),
  905. };
  906. static int eeprom_read(void __iomem *addr, int location)
  907. {
  908. int i;
  909. int retval = 0;
  910. void __iomem *ee_addr = addr + EECtrl;
  911. int read_cmd = location | EE_ReadCmd;
  912. writel(EE_Write0, ee_addr);
  913. /* Shift the read command bits out. */
  914. for (i = 10; i >= 0; i--) {
  915. short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0;
  916. writel(dataval, ee_addr);
  917. eeprom_delay(ee_addr);
  918. writel(dataval | EE_ShiftClk, ee_addr);
  919. eeprom_delay(ee_addr);
  920. }
  921. writel(EE_ChipSelect, ee_addr);
  922. eeprom_delay(ee_addr);
  923. for (i = 0; i < 16; i++) {
  924. writel(EE_ChipSelect | EE_ShiftClk, ee_addr);
  925. eeprom_delay(ee_addr);
  926. retval |= (readl(ee_addr) & EE_DataOut) ? 1 << i : 0;
  927. writel(EE_ChipSelect, ee_addr);
  928. eeprom_delay(ee_addr);
  929. }
  930. /* Terminate the EEPROM access. */
  931. writel(EE_Write0, ee_addr);
  932. writel(0, ee_addr);
  933. return retval;
  934. }
  935. /* MII transceiver control section.
  936. * The 83815 series has an internal transceiver, and we present the
  937. * internal management registers as if they were MII connected.
  938. * External Phy registers are referenced through the MII interface.
  939. */
  940. /* clock transitions >= 20ns (25MHz)
  941. * One readl should be good to PCI @ 100MHz
  942. */
  943. #define mii_delay(ioaddr) readl(ioaddr + EECtrl)
  944. static int mii_getbit (struct net_device *dev)
  945. {
  946. int data;
  947. void __iomem *ioaddr = ns_ioaddr(dev);
  948. writel(MII_ShiftClk, ioaddr + EECtrl);
  949. data = readl(ioaddr + EECtrl);
  950. writel(0, ioaddr + EECtrl);
  951. mii_delay(ioaddr);
  952. return (data & MII_Data)? 1 : 0;
  953. }
  954. static void mii_send_bits (struct net_device *dev, u32 data, int len)
  955. {
  956. u32 i;
  957. void __iomem *ioaddr = ns_ioaddr(dev);
  958. for (i = (1 << (len-1)); i; i >>= 1)
  959. {
  960. u32 mdio_val = MII_Write | ((data & i)? MII_Data : 0);
  961. writel(mdio_val, ioaddr + EECtrl);
  962. mii_delay(ioaddr);
  963. writel(mdio_val | MII_ShiftClk, ioaddr + EECtrl);
  964. mii_delay(ioaddr);
  965. }
  966. writel(0, ioaddr + EECtrl);
  967. mii_delay(ioaddr);
  968. }
  969. static int miiport_read(struct net_device *dev, int phy_id, int reg)
  970. {
  971. u32 cmd;
  972. int i;
  973. u32 retval = 0;
  974. /* Ensure sync */
  975. mii_send_bits (dev, 0xffffffff, 32);
  976. /* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */
  977. /* ST,OP = 0110'b for read operation */
  978. cmd = (0x06 << 10) | (phy_id << 5) | reg;
  979. mii_send_bits (dev, cmd, 14);
  980. /* Turnaround */
  981. if (mii_getbit (dev))
  982. return 0;
  983. /* Read data */
  984. for (i = 0; i < 16; i++) {
  985. retval <<= 1;
  986. retval |= mii_getbit (dev);
  987. }
  988. /* End cycle */
  989. mii_getbit (dev);
  990. return retval;
  991. }
  992. static void miiport_write(struct net_device *dev, int phy_id, int reg, u16 data)
  993. {
  994. u32 cmd;
  995. /* Ensure sync */
  996. mii_send_bits (dev, 0xffffffff, 32);
  997. /* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */
  998. /* ST,OP,AAAAA,RRRRR,TA = 0101xxxxxxxxxx10'b = 0x5002 for write */
  999. cmd = (0x5002 << 16) | (phy_id << 23) | (reg << 18) | data;
  1000. mii_send_bits (dev, cmd, 32);
  1001. /* End cycle */
  1002. mii_getbit (dev);
  1003. }
  1004. static int mdio_read(struct net_device *dev, int reg)
  1005. {
  1006. struct netdev_private *np = netdev_priv(dev);
  1007. void __iomem *ioaddr = ns_ioaddr(dev);
  1008. /* The 83815 series has two ports:
  1009. * - an internal transceiver
  1010. * - an external mii bus
  1011. */
  1012. if (dev->if_port == PORT_TP)
  1013. return readw(ioaddr+BasicControl+(reg<<2));
  1014. else
  1015. return miiport_read(dev, np->phy_addr_external, reg);
  1016. }
  1017. static void mdio_write(struct net_device *dev, int reg, u16 data)
  1018. {
  1019. struct netdev_private *np = netdev_priv(dev);
  1020. void __iomem *ioaddr = ns_ioaddr(dev);
  1021. /* The 83815 series has an internal transceiver; handle separately */
  1022. if (dev->if_port == PORT_TP)
  1023. writew(data, ioaddr+BasicControl+(reg<<2));
  1024. else
  1025. miiport_write(dev, np->phy_addr_external, reg, data);
  1026. }
  1027. static void init_phy_fixup(struct net_device *dev)
  1028. {
  1029. struct netdev_private *np = netdev_priv(dev);
  1030. void __iomem *ioaddr = ns_ioaddr(dev);
  1031. int i;
  1032. u32 cfg;
  1033. u16 tmp;
  1034. /* restore stuff lost when power was out */
  1035. tmp = mdio_read(dev, MII_BMCR);
  1036. if (np->autoneg == AUTONEG_ENABLE) {
  1037. /* renegotiate if something changed */
  1038. if ((tmp & BMCR_ANENABLE) == 0
  1039. || np->advertising != mdio_read(dev, MII_ADVERTISE))
  1040. {
  1041. /* turn on autonegotiation and force negotiation */
  1042. tmp |= (BMCR_ANENABLE | BMCR_ANRESTART);
  1043. mdio_write(dev, MII_ADVERTISE, np->advertising);
  1044. }
  1045. } else {
  1046. /* turn off auto negotiation, set speed and duplexity */
  1047. tmp &= ~(BMCR_ANENABLE | BMCR_SPEED100 | BMCR_FULLDPLX);
  1048. if (np->speed == SPEED_100)
  1049. tmp |= BMCR_SPEED100;
  1050. if (np->duplex == DUPLEX_FULL)
  1051. tmp |= BMCR_FULLDPLX;
  1052. /*
  1053. * Note: there is no good way to inform the link partner
  1054. * that our capabilities changed. The user has to unplug
  1055. * and replug the network cable after some changes, e.g.
  1056. * after switching from 10HD, autoneg off to 100 HD,
  1057. * autoneg off.
  1058. */
  1059. }
  1060. mdio_write(dev, MII_BMCR, tmp);
  1061. readl(ioaddr + ChipConfig);
  1062. udelay(1);
  1063. /* find out what phy this is */
  1064. np->mii = (mdio_read(dev, MII_PHYSID1) << 16)
  1065. + mdio_read(dev, MII_PHYSID2);
  1066. /* handle external phys here */
  1067. switch (np->mii) {
  1068. case PHYID_AM79C874:
  1069. /* phy specific configuration for fibre/tp operation */
  1070. tmp = mdio_read(dev, MII_MCTRL);
  1071. tmp &= ~(MII_FX_SEL | MII_EN_SCRM);
  1072. if (dev->if_port == PORT_FIBRE)
  1073. tmp |= MII_FX_SEL;
  1074. else
  1075. tmp |= MII_EN_SCRM;
  1076. mdio_write(dev, MII_MCTRL, tmp);
  1077. break;
  1078. default:
  1079. break;
  1080. }
  1081. cfg = readl(ioaddr + ChipConfig);
  1082. if (cfg & CfgExtPhy)
  1083. return;
  1084. /* On page 78 of the spec, they recommend some settings for "optimum
  1085. performance" to be done in sequence. These settings optimize some
  1086. of the 100Mbit autodetection circuitry. They say we only want to
  1087. do this for rev C of the chip, but engineers at NSC (Bradley
  1088. Kennedy) recommends always setting them. If you don't, you get
  1089. errors on some autonegotiations that make the device unusable.
  1090. It seems that the DSP needs a few usec to reinitialize after
  1091. the start of the phy. Just retry writing these values until they
  1092. stick.
  1093. */
  1094. for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
  1095. int dspcfg;
  1096. writew(1, ioaddr + PGSEL);
  1097. writew(PMDCSR_VAL, ioaddr + PMDCSR);
  1098. writew(TSTDAT_VAL, ioaddr + TSTDAT);
  1099. np->dspcfg = (np->srr <= SRR_DP83815_C)?
  1100. DSPCFG_VAL : (DSPCFG_COEF | readw(ioaddr + DSPCFG));
  1101. writew(np->dspcfg, ioaddr + DSPCFG);
  1102. writew(SDCFG_VAL, ioaddr + SDCFG);
  1103. writew(0, ioaddr + PGSEL);
  1104. readl(ioaddr + ChipConfig);
  1105. udelay(10);
  1106. writew(1, ioaddr + PGSEL);
  1107. dspcfg = readw(ioaddr + DSPCFG);
  1108. writew(0, ioaddr + PGSEL);
  1109. if (np->dspcfg == dspcfg)
  1110. break;
  1111. }
  1112. if (netif_msg_link(np)) {
  1113. if (i==NATSEMI_HW_TIMEOUT) {
  1114. printk(KERN_INFO
  1115. "%s: DSPCFG mismatch after retrying for %d usec.\n",
  1116. dev->name, i*10);
  1117. } else {
  1118. printk(KERN_INFO
  1119. "%s: DSPCFG accepted after %d usec.\n",
  1120. dev->name, i*10);
  1121. }
  1122. }
  1123. /*
  1124. * Enable PHY Specific event based interrupts. Link state change
  1125. * and Auto-Negotiation Completion are among the affected.
  1126. * Read the intr status to clear it (needed for wake events).
  1127. */
  1128. readw(ioaddr + MIntrStatus);
  1129. writew(MICRIntEn, ioaddr + MIntrCtrl);
  1130. }
  1131. static int switch_port_external(struct net_device *dev)
  1132. {
  1133. struct netdev_private *np = netdev_priv(dev);
  1134. void __iomem *ioaddr = ns_ioaddr(dev);
  1135. u32 cfg;
  1136. cfg = readl(ioaddr + ChipConfig);
  1137. if (cfg & CfgExtPhy)
  1138. return 0;
  1139. if (netif_msg_link(np)) {
  1140. printk(KERN_INFO "%s: switching to external transceiver.\n",
  1141. dev->name);
  1142. }
  1143. /* 1) switch back to external phy */
  1144. writel(cfg | (CfgExtPhy | CfgPhyDis), ioaddr + ChipConfig);
  1145. readl(ioaddr + ChipConfig);
  1146. udelay(1);
  1147. /* 2) reset the external phy: */
  1148. /* resetting the external PHY has been known to cause a hub supplying
  1149. * power over Ethernet to kill the power. We don't want to kill
  1150. * power to this computer, so we avoid resetting the phy.
  1151. */
  1152. /* 3) reinit the phy fixup, it got lost during power down. */
  1153. move_int_phy(dev, np->phy_addr_external);
  1154. init_phy_fixup(dev);
  1155. return 1;
  1156. }
  1157. static int switch_port_internal(struct net_device *dev)
  1158. {
  1159. struct netdev_private *np = netdev_priv(dev);
  1160. void __iomem *ioaddr = ns_ioaddr(dev);
  1161. int i;
  1162. u32 cfg;
  1163. u16 bmcr;
  1164. cfg = readl(ioaddr + ChipConfig);
  1165. if (!(cfg &CfgExtPhy))
  1166. return 0;
  1167. if (netif_msg_link(np)) {
  1168. printk(KERN_INFO "%s: switching to internal transceiver.\n",
  1169. dev->name);
  1170. }
  1171. /* 1) switch back to internal phy: */
  1172. cfg = cfg & ~(CfgExtPhy | CfgPhyDis);
  1173. writel(cfg, ioaddr + ChipConfig);
  1174. readl(ioaddr + ChipConfig);
  1175. udelay(1);
  1176. /* 2) reset the internal phy: */
  1177. bmcr = readw(ioaddr+BasicControl+(MII_BMCR<<2));
  1178. writel(bmcr | BMCR_RESET, ioaddr+BasicControl+(MII_BMCR<<2));
  1179. readl(ioaddr + ChipConfig);
  1180. udelay(10);
  1181. for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
  1182. bmcr = readw(ioaddr+BasicControl+(MII_BMCR<<2));
  1183. if (!(bmcr & BMCR_RESET))
  1184. break;
  1185. udelay(10);
  1186. }
  1187. if (i==NATSEMI_HW_TIMEOUT && netif_msg_link(np)) {
  1188. printk(KERN_INFO
  1189. "%s: phy reset did not complete in %d usec.\n",
  1190. dev->name, i*10);
  1191. }
  1192. /* 3) reinit the phy fixup, it got lost during power down. */
  1193. init_phy_fixup(dev);
  1194. return 1;
  1195. }
  1196. /* Scan for a PHY on the external mii bus.
  1197. * There are two tricky points:
  1198. * - Do not scan while the internal phy is enabled. The internal phy will
  1199. * crash: e.g. reads from the DSPCFG register will return odd values and
  1200. * the nasty random phy reset code will reset the nic every few seconds.
  1201. * - The internal phy must be moved around, an external phy could
  1202. * have the same address as the internal phy.
  1203. */
  1204. static int find_mii(struct net_device *dev)
  1205. {
  1206. struct netdev_private *np = netdev_priv(dev);
  1207. int tmp;
  1208. int i;
  1209. int did_switch;
  1210. /* Switch to external phy */
  1211. did_switch = switch_port_external(dev);
  1212. /* Scan the possible phy addresses:
  1213. *
  1214. * PHY address 0 means that the phy is in isolate mode. Not yet
  1215. * supported due to lack of test hardware. User space should
  1216. * handle it through ethtool.
  1217. */
  1218. for (i = 1; i <= 31; i++) {
  1219. move_int_phy(dev, i);
  1220. tmp = miiport_read(dev, i, MII_BMSR);
  1221. if (tmp != 0xffff && tmp != 0x0000) {
  1222. /* found something! */
  1223. np->mii = (mdio_read(dev, MII_PHYSID1) << 16)
  1224. + mdio_read(dev, MII_PHYSID2);
  1225. if (netif_msg_probe(np)) {
  1226. printk(KERN_INFO "natsemi %s: found external phy %08x at address %d.\n",
  1227. pci_name(np->pci_dev), np->mii, i);
  1228. }
  1229. break;
  1230. }
  1231. }
  1232. /* And switch back to internal phy: */
  1233. if (did_switch)
  1234. switch_port_internal(dev);
  1235. return i;
  1236. }
  1237. /* CFG bits [13:16] [18:23] */
  1238. #define CFG_RESET_SAVE 0xfde000
  1239. /* WCSR bits [0:4] [9:10] */
  1240. #define WCSR_RESET_SAVE 0x61f
  1241. /* RFCR bits [20] [22] [27:31] */
  1242. #define RFCR_RESET_SAVE 0xf8500000;
  1243. static void natsemi_reset(struct net_device *dev)
  1244. {
  1245. int i;
  1246. u32 cfg;
  1247. u32 wcsr;
  1248. u32 rfcr;
  1249. u16 pmatch[3];
  1250. u16 sopass[3];
  1251. struct netdev_private *np = netdev_priv(dev);
  1252. void __iomem *ioaddr = ns_ioaddr(dev);
  1253. /*
  1254. * Resetting the chip causes some registers to be lost.
  1255. * Natsemi suggests NOT reloading the EEPROM while live, so instead
  1256. * we save the state that would have been loaded from EEPROM
  1257. * on a normal power-up (see the spec EEPROM map). This assumes
  1258. * whoever calls this will follow up with init_registers() eventually.
  1259. */
  1260. /* CFG */
  1261. cfg = readl(ioaddr + ChipConfig) & CFG_RESET_SAVE;
  1262. /* WCSR */
  1263. wcsr = readl(ioaddr + WOLCmd) & WCSR_RESET_SAVE;
  1264. /* RFCR */
  1265. rfcr = readl(ioaddr + RxFilterAddr) & RFCR_RESET_SAVE;
  1266. /* PMATCH */
  1267. for (i = 0; i < 3; i++) {
  1268. writel(i*2, ioaddr + RxFilterAddr);
  1269. pmatch[i] = readw(ioaddr + RxFilterData);
  1270. }
  1271. /* SOPAS */
  1272. for (i = 0; i < 3; i++) {
  1273. writel(0xa+(i*2), ioaddr + RxFilterAddr);
  1274. sopass[i] = readw(ioaddr + RxFilterData);
  1275. }
  1276. /* now whack the chip */
  1277. writel(ChipReset, ioaddr + ChipCmd);
  1278. for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
  1279. if (!(readl(ioaddr + ChipCmd) & ChipReset))
  1280. break;
  1281. udelay(5);
  1282. }
  1283. if (i==NATSEMI_HW_TIMEOUT) {
  1284. printk(KERN_WARNING "%s: reset did not complete in %d usec.\n",
  1285. dev->name, i*5);
  1286. } else if (netif_msg_hw(np)) {
  1287. printk(KERN_DEBUG "%s: reset completed in %d usec.\n",
  1288. dev->name, i*5);
  1289. }
  1290. /* restore CFG */
  1291. cfg |= readl(ioaddr + ChipConfig) & ~CFG_RESET_SAVE;
  1292. /* turn on external phy if it was selected */
  1293. if (dev->if_port == PORT_TP)
  1294. cfg &= ~(CfgExtPhy | CfgPhyDis);
  1295. else
  1296. cfg |= (CfgExtPhy | CfgPhyDis);
  1297. writel(cfg, ioaddr + ChipConfig);
  1298. /* restore WCSR */
  1299. wcsr |= readl(ioaddr + WOLCmd) & ~WCSR_RESET_SAVE;
  1300. writel(wcsr, ioaddr + WOLCmd);
  1301. /* read RFCR */
  1302. rfcr |= readl(ioaddr + RxFilterAddr) & ~RFCR_RESET_SAVE;
  1303. /* restore PMATCH */
  1304. for (i = 0; i < 3; i++) {
  1305. writel(i*2, ioaddr + RxFilterAddr);
  1306. writew(pmatch[i], ioaddr + RxFilterData);
  1307. }
  1308. for (i = 0; i < 3; i++) {
  1309. writel(0xa+(i*2), ioaddr + RxFilterAddr);
  1310. writew(sopass[i], ioaddr + RxFilterData);
  1311. }
  1312. /* restore RFCR */
  1313. writel(rfcr, ioaddr + RxFilterAddr);
  1314. }
  1315. static void reset_rx(struct net_device *dev)
  1316. {
  1317. int i;
  1318. struct netdev_private *np = netdev_priv(dev);
  1319. void __iomem *ioaddr = ns_ioaddr(dev);
  1320. np->intr_status &= ~RxResetDone;
  1321. writel(RxReset, ioaddr + ChipCmd);
  1322. for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
  1323. np->intr_status |= readl(ioaddr + IntrStatus);
  1324. if (np->intr_status & RxResetDone)
  1325. break;
  1326. udelay(15);
  1327. }
  1328. if (i==NATSEMI_HW_TIMEOUT) {
  1329. printk(KERN_WARNING "%s: RX reset did not complete in %d usec.\n",
  1330. dev->name, i*15);
  1331. } else if (netif_msg_hw(np)) {
  1332. printk(KERN_WARNING "%s: RX reset took %d usec.\n",
  1333. dev->name, i*15);
  1334. }
  1335. }
  1336. static void natsemi_reload_eeprom(struct net_device *dev)
  1337. {
  1338. struct netdev_private *np = netdev_priv(dev);
  1339. void __iomem *ioaddr = ns_ioaddr(dev);
  1340. int i;
  1341. writel(EepromReload, ioaddr + PCIBusCfg);
  1342. for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
  1343. udelay(50);
  1344. if (!(readl(ioaddr + PCIBusCfg) & EepromReload))
  1345. break;
  1346. }
  1347. if (i==NATSEMI_HW_TIMEOUT) {
  1348. printk(KERN_WARNING "natsemi %s: EEPROM did not reload in %d usec.\n",
  1349. pci_name(np->pci_dev), i*50);
  1350. } else if (netif_msg_hw(np)) {
  1351. printk(KERN_DEBUG "natsemi %s: EEPROM reloaded in %d usec.\n",
  1352. pci_name(np->pci_dev), i*50);
  1353. }
  1354. }
  1355. static void natsemi_stop_rxtx(struct net_device *dev)
  1356. {
  1357. void __iomem * ioaddr = ns_ioaddr(dev);
  1358. struct netdev_private *np = netdev_priv(dev);
  1359. int i;
  1360. writel(RxOff | TxOff, ioaddr + ChipCmd);
  1361. for(i=0;i< NATSEMI_HW_TIMEOUT;i++) {
  1362. if ((readl(ioaddr + ChipCmd) & (TxOn|RxOn)) == 0)
  1363. break;
  1364. udelay(5);
  1365. }
  1366. if (i==NATSEMI_HW_TIMEOUT) {
  1367. printk(KERN_WARNING "%s: Tx/Rx process did not stop in %d usec.\n",
  1368. dev->name, i*5);
  1369. } else if (netif_msg_hw(np)) {
  1370. printk(KERN_DEBUG "%s: Tx/Rx process stopped in %d usec.\n",
  1371. dev->name, i*5);
  1372. }
  1373. }
  1374. static int netdev_open(struct net_device *dev)
  1375. {
  1376. struct netdev_private *np = netdev_priv(dev);
  1377. void __iomem * ioaddr = ns_ioaddr(dev);
  1378. int i;
  1379. /* Reset the chip, just in case. */
  1380. natsemi_reset(dev);
  1381. i = request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev);
  1382. if (i) return i;
  1383. if (netif_msg_ifup(np))
  1384. printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
  1385. dev->name, dev->irq);
  1386. i = alloc_ring(dev);
  1387. if (i < 0) {
  1388. free_irq(dev->irq, dev);
  1389. return i;
  1390. }
  1391. init_ring(dev);
  1392. spin_lock_irq(&np->lock);
  1393. init_registers(dev);
  1394. /* now set the MAC address according to dev->dev_addr */
  1395. for (i = 0; i < 3; i++) {
  1396. u16 mac = (dev->dev_addr[2*i+1]<<8) + dev->dev_addr[2*i];
  1397. writel(i*2, ioaddr + RxFilterAddr);
  1398. writew(mac, ioaddr + RxFilterData);
  1399. }
  1400. writel(np->cur_rx_mode, ioaddr + RxFilterAddr);
  1401. spin_unlock_irq(&np->lock);
  1402. netif_start_queue(dev);
  1403. if (netif_msg_ifup(np))
  1404. printk(KERN_DEBUG "%s: Done netdev_open(), status: %#08x.\n",
  1405. dev->name, (int)readl(ioaddr + ChipCmd));
  1406. /* Set the timer to check for link beat. */
  1407. init_timer(&np->timer);
  1408. np->timer.expires = jiffies + NATSEMI_TIMER_FREQ;
  1409. np->timer.data = (unsigned long)dev;
  1410. np->timer.function = &netdev_timer; /* timer handler */
  1411. add_timer(&np->timer);
  1412. return 0;
  1413. }
  1414. static void do_cable_magic(struct net_device *dev)
  1415. {
  1416. struct netdev_private *np = netdev_priv(dev);
  1417. void __iomem *ioaddr = ns_ioaddr(dev);
  1418. if (dev->if_port != PORT_TP)
  1419. return;
  1420. if (np->srr >= SRR_DP83816_A5)
  1421. return;
  1422. /*
  1423. * 100 MBit links with short cables can trip an issue with the chip.
  1424. * The problem manifests as lots of CRC errors and/or flickering
  1425. * activity LED while idle. This process is based on instructions
  1426. * from engineers at National.
  1427. */
  1428. if (readl(ioaddr + ChipConfig) & CfgSpeed100) {
  1429. u16 data;
  1430. writew(1, ioaddr + PGSEL);
  1431. /*
  1432. * coefficient visibility should already be enabled via
  1433. * DSPCFG | 0x1000
  1434. */
  1435. data = readw(ioaddr + TSTDAT) & 0xff;
  1436. /*
  1437. * the value must be negative, and within certain values
  1438. * (these values all come from National)
  1439. */
  1440. if (!(data & 0x80) || ((data >= 0xd8) && (data <= 0xff))) {
  1441. struct netdev_private *np = netdev_priv(dev);
  1442. /* the bug has been triggered - fix the coefficient */
  1443. writew(TSTDAT_FIXED, ioaddr + TSTDAT);
  1444. /* lock the value */
  1445. data = readw(ioaddr + DSPCFG);
  1446. np->dspcfg = data | DSPCFG_LOCK;
  1447. writew(np->dspcfg, ioaddr + DSPCFG);
  1448. }
  1449. writew(0, ioaddr + PGSEL);
  1450. }
  1451. }
  1452. static void undo_cable_magic(struct net_device *dev)
  1453. {
  1454. u16 data;
  1455. struct netdev_private *np = netdev_priv(dev);
  1456. void __iomem * ioaddr = ns_ioaddr(dev);
  1457. if (dev->if_port != PORT_TP)
  1458. return;
  1459. if (np->srr >= SRR_DP83816_A5)
  1460. return;
  1461. writew(1, ioaddr + PGSEL);
  1462. /* make sure the lock bit is clear */
  1463. data = readw(ioaddr + DSPCFG);
  1464. np->dspcfg = data & ~DSPCFG_LOCK;
  1465. writew(np->dspcfg, ioaddr + DSPCFG);
  1466. writew(0, ioaddr + PGSEL);
  1467. }
  1468. static void check_link(struct net_device *dev)
  1469. {
  1470. struct netdev_private *np = netdev_priv(dev);
  1471. void __iomem * ioaddr = ns_ioaddr(dev);
  1472. int duplex;
  1473. u16 bmsr;
  1474. /* The link status field is latched: it remains low after a temporary
  1475. * link failure until it's read. We need the current link status,
  1476. * thus read twice.
  1477. */
  1478. mdio_read(dev, MII_BMSR);
  1479. bmsr = mdio_read(dev, MII_BMSR);
  1480. if (!(bmsr & BMSR_LSTATUS)) {
  1481. if (netif_carrier_ok(dev)) {
  1482. if (netif_msg_link(np))
  1483. printk(KERN_NOTICE "%s: link down.\n",
  1484. dev->name);
  1485. netif_carrier_off(dev);
  1486. undo_cable_magic(dev);
  1487. }
  1488. return;
  1489. }
  1490. if (!netif_carrier_ok(dev)) {
  1491. if (netif_msg_link(np))
  1492. printk(KERN_NOTICE "%s: link up.\n", dev->name);
  1493. netif_carrier_on(dev);
  1494. do_cable_magic(dev);
  1495. }
  1496. duplex = np->full_duplex;
  1497. if (!duplex) {
  1498. if (bmsr & BMSR_ANEGCOMPLETE) {
  1499. int tmp = mii_nway_result(
  1500. np->advertising & mdio_read(dev, MII_LPA));
  1501. if (tmp == LPA_100FULL || tmp == LPA_10FULL)
  1502. duplex = 1;
  1503. } else if (mdio_read(dev, MII_BMCR) & BMCR_FULLDPLX)
  1504. duplex = 1;
  1505. }
  1506. /* if duplex is set then bit 28 must be set, too */
  1507. if (duplex ^ !!(np->rx_config & RxAcceptTx)) {
  1508. if (netif_msg_link(np))
  1509. printk(KERN_INFO
  1510. "%s: Setting %s-duplex based on negotiated "
  1511. "link capability.\n", dev->name,
  1512. duplex ? "full" : "half");
  1513. if (duplex) {
  1514. np->rx_config |= RxAcceptTx;
  1515. np->tx_config |= TxCarrierIgn | TxHeartIgn;
  1516. } else {
  1517. np->rx_config &= ~RxAcceptTx;
  1518. np->tx_config &= ~(TxCarrierIgn | TxHeartIgn);
  1519. }
  1520. writel(np->tx_config, ioaddr + TxConfig);
  1521. writel(np->rx_config, ioaddr + RxConfig);
  1522. }
  1523. }
  1524. static void init_registers(struct net_device *dev)
  1525. {
  1526. struct netdev_private *np = netdev_priv(dev);
  1527. void __iomem * ioaddr = ns_ioaddr(dev);
  1528. init_phy_fixup(dev);
  1529. /* clear any interrupts that are pending, such as wake events */
  1530. readl(ioaddr + IntrStatus);
  1531. writel(np->ring_dma, ioaddr + RxRingPtr);
  1532. writel(np->ring_dma + RX_RING_SIZE * sizeof(struct netdev_desc),
  1533. ioaddr + TxRingPtr);
  1534. /* Initialize other registers.
  1535. * Configure the PCI bus bursts and FIFO thresholds.
  1536. * Configure for standard, in-spec Ethernet.
  1537. * Start with half-duplex. check_link will update
  1538. * to the correct settings.
  1539. */
  1540. /* DRTH: 2: start tx if 64 bytes are in the fifo
  1541. * FLTH: 0x10: refill with next packet if 512 bytes are free
  1542. * MXDMA: 0: up to 256 byte bursts.
  1543. * MXDMA must be <= FLTH
  1544. * ECRETRY=1
  1545. * ATP=1
  1546. */
  1547. np->tx_config = TxAutoPad | TxCollRetry | TxMxdma_256 |
  1548. TX_FLTH_VAL | TX_DRTH_VAL_START;
  1549. writel(np->tx_config, ioaddr + TxConfig);
  1550. /* DRTH 0x10: start copying to memory if 128 bytes are in the fifo
  1551. * MXDMA 0: up to 256 byte bursts
  1552. */
  1553. np->rx_config = RxMxdma_256 | RX_DRTH_VAL;
  1554. /* if receive ring now has bigger buffers than normal, enable jumbo */
  1555. if (np->rx_buf_sz > NATSEMI_LONGPKT)
  1556. np->rx_config |= RxAcceptLong;
  1557. writel(np->rx_config, ioaddr + RxConfig);
  1558. /* Disable PME:
  1559. * The PME bit is initialized from the EEPROM contents.
  1560. * PCI cards probably have PME disabled, but motherboard
  1561. * implementations may have PME set to enable WakeOnLan.
  1562. * With PME set the chip will scan incoming packets but
  1563. * nothing will be written to memory. */
  1564. np->SavedClkRun = readl(ioaddr + ClkRun);
  1565. writel(np->SavedClkRun & ~PMEEnable, ioaddr + ClkRun);
  1566. if (np->SavedClkRun & PMEStatus && netif_msg_wol(np)) {
  1567. printk(KERN_NOTICE "%s: Wake-up event %#08x\n",
  1568. dev->name, readl(ioaddr + WOLCmd));
  1569. }
  1570. check_link(dev);
  1571. __set_rx_mode(dev);
  1572. /* Enable interrupts by setting the interrupt mask. */
  1573. writel(DEFAULT_INTR, ioaddr + IntrMask);
  1574. writel(1, ioaddr + IntrEnable);
  1575. writel(RxOn | TxOn, ioaddr + ChipCmd);
  1576. writel(StatsClear, ioaddr + StatsCtrl); /* Clear Stats */
  1577. }
  1578. /*
  1579. * netdev_timer:
  1580. * Purpose:
  1581. * 1) check for link changes. Usually they are handled by the MII interrupt
  1582. * but it doesn't hurt to check twice.
  1583. * 2) check for sudden death of the NIC:
  1584. * It seems that a reference set for this chip went out with incorrect info,
  1585. * and there exist boards that aren't quite right. An unexpected voltage
  1586. * drop can cause the PHY to get itself in a weird state (basically reset).
  1587. * NOTE: this only seems to affect revC chips.
  1588. * 3) check of death of the RX path due to OOM
  1589. */
  1590. static void netdev_timer(unsigned long data)
  1591. {
  1592. struct net_device *dev = (struct net_device *)data;
  1593. struct netdev_private *np = netdev_priv(dev);
  1594. void __iomem * ioaddr = ns_ioaddr(dev);
  1595. int next_tick = 5*HZ;
  1596. if (netif_msg_timer(np)) {
  1597. /* DO NOT read the IntrStatus register,
  1598. * a read clears any pending interrupts.
  1599. */
  1600. printk(KERN_DEBUG "%s: Media selection timer tick.\n",
  1601. dev->name);
  1602. }
  1603. if (dev->if_port == PORT_TP) {
  1604. u16 dspcfg;
  1605. spin_lock_irq(&np->lock);
  1606. /* check for a nasty random phy-reset - use dspcfg as a flag */
  1607. writew(1, ioaddr+PGSEL);
  1608. dspcfg = readw(ioaddr+DSPCFG);
  1609. writew(0, ioaddr+PGSEL);
  1610. if (dspcfg != np->dspcfg) {
  1611. if (!netif_queue_stopped(dev)) {
  1612. spin_unlock_irq(&np->lock);
  1613. if (netif_msg_hw(np))
  1614. printk(KERN_NOTICE "%s: possible phy reset: "
  1615. "re-initializing\n", dev->name);
  1616. disable_irq(dev->irq);
  1617. spin_lock_irq(&np->lock);
  1618. natsemi_stop_rxtx(dev);
  1619. dump_ring(dev);
  1620. reinit_ring(dev);
  1621. init_registers(dev);
  1622. spin_unlock_irq(&np->lock);
  1623. enable_irq(dev->irq);
  1624. } else {
  1625. /* hurry back */
  1626. next_tick = HZ;
  1627. spin_unlock_irq(&np->lock);
  1628. }
  1629. } else {
  1630. /* init_registers() calls check_link() for the above case */
  1631. check_link(dev);
  1632. spin_unlock_irq(&np->lock);
  1633. }
  1634. } else {
  1635. spin_lock_irq(&np->lock);
  1636. check_link(dev);
  1637. spin_unlock_irq(&np->lock);
  1638. }
  1639. if (np->oom) {
  1640. disable_irq(dev->irq);
  1641. np->oom = 0;
  1642. refill_rx(dev);
  1643. enable_irq(dev->irq);
  1644. if (!np->oom) {
  1645. writel(RxOn, ioaddr + ChipCmd);
  1646. } else {
  1647. next_tick = 1;
  1648. }
  1649. }
  1650. mod_timer(&np->timer, jiffies + next_tick);
  1651. }
  1652. static void dump_ring(struct net_device *dev)
  1653. {
  1654. struct netdev_private *np = netdev_priv(dev);
  1655. if (netif_msg_pktdata(np)) {
  1656. int i;
  1657. printk(KERN_DEBUG " Tx ring at %p:\n", np->tx_ring);
  1658. for (i = 0; i < TX_RING_SIZE; i++) {
  1659. printk(KERN_DEBUG " #%d desc. %#08x %#08x %#08x.\n",
  1660. i, np->tx_ring[i].next_desc,
  1661. np->tx_ring[i].cmd_status,
  1662. np->tx_ring[i].addr);
  1663. }
  1664. printk(KERN_DEBUG " Rx ring %p:\n", np->rx_ring);
  1665. for (i = 0; i < RX_RING_SIZE; i++) {
  1666. printk(KERN_DEBUG " #%d desc. %#08x %#08x %#08x.\n",
  1667. i, np->rx_ring[i].next_desc,
  1668. np->rx_ring[i].cmd_status,
  1669. np->rx_ring[i].addr);
  1670. }
  1671. }
  1672. }
  1673. static void tx_timeout(struct net_device *dev)
  1674. {
  1675. struct netdev_private *np = netdev_priv(dev);
  1676. void __iomem * ioaddr = ns_ioaddr(dev);
  1677. disable_irq(dev->irq);
  1678. spin_lock_irq(&np->lock);
  1679. if (!np->hands_off) {
  1680. if (netif_msg_tx_err(np))
  1681. printk(KERN_WARNING
  1682. "%s: Transmit timed out, status %#08x,"
  1683. " resetting...\n",
  1684. dev->name, readl(ioaddr + IntrStatus));
  1685. dump_ring(dev);
  1686. natsemi_reset(dev);
  1687. reinit_ring(dev);
  1688. init_registers(dev);
  1689. } else {
  1690. printk(KERN_WARNING
  1691. "%s: tx_timeout while in hands_off state?\n",
  1692. dev->name);
  1693. }
  1694. spin_unlock_irq(&np->lock);
  1695. enable_irq(dev->irq);
  1696. dev->trans_start = jiffies;
  1697. np->stats.tx_errors++;
  1698. netif_wake_queue(dev);
  1699. }
  1700. static int alloc_ring(struct net_device *dev)
  1701. {
  1702. struct netdev_private *np = netdev_priv(dev);
  1703. np->rx_ring = pci_alloc_consistent(np->pci_dev,
  1704. sizeof(struct netdev_desc) * (RX_RING_SIZE+TX_RING_SIZE),
  1705. &np->ring_dma);
  1706. if (!np->rx_ring)
  1707. return -ENOMEM;
  1708. np->tx_ring = &np->rx_ring[RX_RING_SIZE];
  1709. return 0;
  1710. }
  1711. static void refill_rx(struct net_device *dev)
  1712. {
  1713. struct netdev_private *np = netdev_priv(dev);
  1714. /* Refill the Rx ring buffers. */
  1715. for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
  1716. struct sk_buff *skb;
  1717. int entry = np->dirty_rx % RX_RING_SIZE;
  1718. if (np->rx_skbuff[entry] == NULL) {
  1719. unsigned int buflen = np->rx_buf_sz+NATSEMI_PADDING;
  1720. skb = dev_alloc_skb(buflen);
  1721. np->rx_skbuff[entry] = skb;
  1722. if (skb == NULL)
  1723. break; /* Better luck next round. */
  1724. skb->dev = dev; /* Mark as being used by this device. */
  1725. np->rx_dma[entry] = pci_map_single(np->pci_dev,
  1726. skb->data, buflen, PCI_DMA_FROMDEVICE);
  1727. np->rx_ring[entry].addr = cpu_to_le32(np->rx_dma[entry]);
  1728. }
  1729. np->rx_ring[entry].cmd_status = cpu_to_le32(np->rx_buf_sz);
  1730. }
  1731. if (np->cur_rx - np->dirty_rx == RX_RING_SIZE) {
  1732. if (netif_msg_rx_err(np))
  1733. printk(KERN_WARNING "%s: going OOM.\n", dev->name);
  1734. np->oom = 1;
  1735. }
  1736. }
  1737. static void set_bufsize(struct net_device *dev)
  1738. {
  1739. struct netdev_private *np = netdev_priv(dev);
  1740. if (dev->mtu <= ETH_DATA_LEN)
  1741. np->rx_buf_sz = ETH_DATA_LEN + NATSEMI_HEADERS;
  1742. else
  1743. np->rx_buf_sz = dev->mtu + NATSEMI_HEADERS;
  1744. }
  1745. /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
  1746. static void init_ring(struct net_device *dev)
  1747. {
  1748. struct netdev_private *np = netdev_priv(dev);
  1749. int i;
  1750. /* 1) TX ring */
  1751. np->dirty_tx = np->cur_tx = 0;
  1752. for (i = 0; i < TX_RING_SIZE; i++) {
  1753. np->tx_skbuff[i] = NULL;
  1754. np->tx_ring[i].next_desc = cpu_to_le32(np->ring_dma
  1755. +sizeof(struct netdev_desc)
  1756. *((i+1)%TX_RING_SIZE+RX_RING_SIZE));
  1757. np->tx_ring[i].cmd_status = 0;
  1758. }
  1759. /* 2) RX ring */
  1760. np->dirty_rx = 0;
  1761. np->cur_rx = RX_RING_SIZE;
  1762. np->oom = 0;
  1763. set_bufsize(dev);
  1764. np->rx_head_desc = &np->rx_ring[0];
  1765. /* Please be carefull before changing this loop - at least gcc-2.95.1
  1766. * miscompiles it otherwise.
  1767. */
  1768. /* Initialize all Rx descriptors. */
  1769. for (i = 0; i < RX_RING_SIZE; i++) {
  1770. np->rx_ring[i].next_desc = cpu_to_le32(np->ring_dma
  1771. +sizeof(struct netdev_desc)
  1772. *((i+1)%RX_RING_SIZE));
  1773. np->rx_ring[i].cmd_status = cpu_to_le32(DescOwn);
  1774. np->rx_skbuff[i] = NULL;
  1775. }
  1776. refill_rx(dev);
  1777. dump_ring(dev);
  1778. }
  1779. static void drain_tx(struct net_device *dev)
  1780. {
  1781. struct netdev_private *np = netdev_priv(dev);
  1782. int i;
  1783. for (i = 0; i < TX_RING_SIZE; i++) {
  1784. if (np->tx_skbuff[i]) {
  1785. pci_unmap_single(np->pci_dev,
  1786. np->tx_dma[i], np->tx_skbuff[i]->len,
  1787. PCI_DMA_TODEVICE);
  1788. dev_kfree_skb(np->tx_skbuff[i]);
  1789. np->stats.tx_dropped++;
  1790. }
  1791. np->tx_skbuff[i] = NULL;
  1792. }
  1793. }
  1794. static void drain_rx(struct net_device *dev)
  1795. {
  1796. struct netdev_private *np = netdev_priv(dev);
  1797. unsigned int buflen = np->rx_buf_sz;
  1798. int i;
  1799. /* Free all the skbuffs in the Rx queue. */
  1800. for (i = 0; i < RX_RING_SIZE; i++) {
  1801. np->rx_ring[i].cmd_status = 0;
  1802. np->rx_ring[i].addr = 0xBADF00D0; /* An invalid address. */
  1803. if (np->rx_skbuff[i]) {
  1804. pci_unmap_single(np->pci_dev,
  1805. np->rx_dma[i], buflen,
  1806. PCI_DMA_FROMDEVICE);
  1807. dev_kfree_skb(np->rx_skbuff[i]);
  1808. }
  1809. np->rx_skbuff[i] = NULL;
  1810. }
  1811. }
  1812. static void drain_ring(struct net_device *dev)
  1813. {
  1814. drain_rx(dev);
  1815. drain_tx(dev);
  1816. }
  1817. static void free_ring(struct net_device *dev)
  1818. {
  1819. struct netdev_private *np = netdev_priv(dev);
  1820. pci_free_consistent(np->pci_dev,
  1821. sizeof(struct netdev_desc) * (RX_RING_SIZE+TX_RING_SIZE),
  1822. np->rx_ring, np->ring_dma);
  1823. }
  1824. static void reinit_rx(struct net_device *dev)
  1825. {
  1826. struct netdev_private *np = netdev_priv(dev);
  1827. int i;
  1828. /* RX Ring */
  1829. np->dirty_rx = 0;
  1830. np->cur_rx = RX_RING_SIZE;
  1831. np->rx_head_desc = &np->rx_ring[0];
  1832. /* Initialize all Rx descriptors. */
  1833. for (i = 0; i < RX_RING_SIZE; i++)
  1834. np->rx_ring[i].cmd_status = cpu_to_le32(DescOwn);
  1835. refill_rx(dev);
  1836. }
  1837. static void reinit_ring(struct net_device *dev)
  1838. {
  1839. struct netdev_private *np = netdev_priv(dev);
  1840. int i;
  1841. /* drain TX ring */
  1842. drain_tx(dev);
  1843. np->dirty_tx = np->cur_tx = 0;
  1844. for (i=0;i<TX_RING_SIZE;i++)
  1845. np->tx_ring[i].cmd_status = 0;
  1846. reinit_rx(dev);
  1847. }
  1848. static int start_tx(struct sk_buff *skb, struct net_device *dev)
  1849. {
  1850. struct netdev_private *np = netdev_priv(dev);
  1851. void __iomem * ioaddr = ns_ioaddr(dev);
  1852. unsigned entry;
  1853. /* Note: Ordering is important here, set the field with the
  1854. "ownership" bit last, and only then increment cur_tx. */
  1855. /* Calculate the next Tx descriptor entry. */
  1856. entry = np->cur_tx % TX_RING_SIZE;
  1857. np->tx_skbuff[entry] = skb;
  1858. np->tx_dma[entry] = pci_map_single(np->pci_dev,
  1859. skb->data,skb->len, PCI_DMA_TODEVICE);
  1860. np->tx_ring[entry].addr = cpu_to_le32(np->tx_dma[entry]);
  1861. spin_lock_irq(&np->lock);
  1862. if (!np->hands_off) {
  1863. np->tx_ring[entry].cmd_status = cpu_to_le32(DescOwn | skb->len);
  1864. /* StrongARM: Explicitly cache flush np->tx_ring and
  1865. * skb->data,skb->len. */
  1866. wmb();
  1867. np->cur_tx++;
  1868. if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) {
  1869. netdev_tx_done(dev);
  1870. if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1)
  1871. netif_stop_queue(dev);
  1872. }
  1873. /* Wake the potentially-idle transmit channel. */
  1874. writel(TxOn, ioaddr + ChipCmd);
  1875. } else {
  1876. dev_kfree_skb_irq(skb);
  1877. np->stats.tx_dropped++;
  1878. }
  1879. spin_unlock_irq(&np->lock);
  1880. dev->trans_start = jiffies;
  1881. if (netif_msg_tx_queued(np)) {
  1882. printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
  1883. dev->name, np->cur_tx, entry);
  1884. }
  1885. return 0;
  1886. }
  1887. static void netdev_tx_done(struct net_device *dev)
  1888. {
  1889. struct netdev_private *np = netdev_priv(dev);
  1890. for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
  1891. int entry = np->dirty_tx % TX_RING_SIZE;
  1892. if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescOwn))
  1893. break;
  1894. if (netif_msg_tx_done(np))
  1895. printk(KERN_DEBUG
  1896. "%s: tx frame #%d finished, status %#08x.\n",
  1897. dev->name, np->dirty_tx,
  1898. le32_to_cpu(np->tx_ring[entry].cmd_status));
  1899. if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescPktOK)) {
  1900. np->stats.tx_packets++;
  1901. np->stats.tx_bytes += np->tx_skbuff[entry]->len;
  1902. } else { /* Various Tx errors */
  1903. int tx_status =
  1904. le32_to_cpu(np->tx_ring[entry].cmd_status);
  1905. if (tx_status & (DescTxAbort|DescTxExcColl))
  1906. np->stats.tx_aborted_errors++;
  1907. if (tx_status & DescTxFIFO)
  1908. np->stats.tx_fifo_errors++;
  1909. if (tx_status & DescTxCarrier)
  1910. np->stats.tx_carrier_errors++;
  1911. if (tx_status & DescTxOOWCol)
  1912. np->stats.tx_window_errors++;
  1913. np->stats.tx_errors++;
  1914. }
  1915. pci_unmap_single(np->pci_dev,np->tx_dma[entry],
  1916. np->tx_skbuff[entry]->len,
  1917. PCI_DMA_TODEVICE);
  1918. /* Free the original skb. */
  1919. dev_kfree_skb_irq(np->tx_skbuff[entry]);
  1920. np->tx_skbuff[entry] = NULL;
  1921. }
  1922. if (netif_queue_stopped(dev)
  1923. && np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
  1924. /* The ring is no longer full, wake queue. */
  1925. netif_wake_queue(dev);
  1926. }
  1927. }
  1928. /* The interrupt handler doesn't actually handle interrupts itself, it
  1929. * schedules a NAPI poll if there is anything to do. */
  1930. static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
  1931. {
  1932. struct net_device *dev = dev_instance;
  1933. struct netdev_private *np = netdev_priv(dev);
  1934. void __iomem * ioaddr = ns_ioaddr(dev);
  1935. if (np->hands_off)
  1936. return IRQ_NONE;
  1937. /* Reading automatically acknowledges. */
  1938. np->intr_status = readl(ioaddr + IntrStatus);
  1939. if (netif_msg_intr(np))
  1940. printk(KERN_DEBUG
  1941. "%s: Interrupt, status %#08x, mask %#08x.\n",
  1942. dev->name, np->intr_status,
  1943. readl(ioaddr + IntrMask));
  1944. if (!np->intr_status)
  1945. return IRQ_NONE;
  1946. prefetch(&np->rx_skbuff[np->cur_rx % RX_RING_SIZE]);
  1947. if (netif_rx_schedule_prep(dev)) {
  1948. /* Disable interrupts and register for poll */
  1949. natsemi_irq_disable(dev);
  1950. __netif_rx_schedule(dev);
  1951. }
  1952. return IRQ_HANDLED;
  1953. }
  1954. /* This is the NAPI poll routine. As well as the standard RX handling
  1955. * it also handles all other interrupts that the chip might raise.
  1956. */
  1957. static int natsemi_poll(struct net_device *dev, int *budget)
  1958. {
  1959. struct netdev_private *np = netdev_priv(dev);
  1960. void __iomem * ioaddr = ns_ioaddr(dev);
  1961. int work_to_do = min(*budget, dev->quota);
  1962. int work_done = 0;
  1963. do {
  1964. if (np->intr_status &
  1965. (IntrTxDone | IntrTxIntr | IntrTxIdle | IntrTxErr)) {
  1966. spin_lock(&np->lock);
  1967. netdev_tx_done(dev);
  1968. spin_unlock(&np->lock);
  1969. }
  1970. /* Abnormal error summary/uncommon events handlers. */
  1971. if (np->intr_status & IntrAbnormalSummary)
  1972. netdev_error(dev, np->intr_status);
  1973. if (np->intr_status &
  1974. (IntrRxDone | IntrRxIntr | RxStatusFIFOOver |
  1975. IntrRxErr | IntrRxOverrun)) {
  1976. netdev_rx(dev, &work_done, work_to_do);
  1977. }
  1978. *budget -= work_done;
  1979. dev->quota -= work_done;
  1980. if (work_done >= work_to_do)
  1981. return 1;
  1982. np->intr_status = readl(ioaddr + IntrStatus);
  1983. } while (np->intr_status);
  1984. netif_rx_complete(dev);
  1985. /* Reenable interrupts providing nothing is trying to shut
  1986. * the chip down. */
  1987. spin_lock(&np->lock);
  1988. if (!np->hands_off && netif_running(dev))
  1989. natsemi_irq_enable(dev);
  1990. spin_unlock(&np->lock);
  1991. return 0;
  1992. }
  1993. /* This routine is logically part of the interrupt handler, but separated
  1994. for clarity and better register allocation. */
  1995. static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do)
  1996. {
  1997. struct netdev_private *np = netdev_priv(dev);
  1998. int entry = np->cur_rx % RX_RING_SIZE;
  1999. int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
  2000. s32 desc_status = le32_to_cpu(np->rx_head_desc->cmd_status);
  2001. unsigned int buflen = np->rx_buf_sz;
  2002. void __iomem * ioaddr = ns_ioaddr(dev);
  2003. /* If the driver owns the next entry it's a new packet. Send it up. */
  2004. while (desc_status < 0) { /* e.g. & DescOwn */
  2005. int pkt_len;
  2006. if (netif_msg_rx_status(np))
  2007. printk(KERN_DEBUG
  2008. " netdev_rx() entry %d status was %#08x.\n",
  2009. entry, desc_status);
  2010. if (--boguscnt < 0)
  2011. break;
  2012. if (*work_done >= work_to_do)
  2013. break;
  2014. (*work_done)++;
  2015. pkt_len = (desc_status & DescSizeMask) - 4;
  2016. if ((desc_status&(DescMore|DescPktOK|DescRxLong)) != DescPktOK){
  2017. if (desc_status & DescMore) {
  2018. if (netif_msg_rx_err(np))
  2019. printk(KERN_WARNING
  2020. "%s: Oversized(?) Ethernet "
  2021. "frame spanned multiple "
  2022. "buffers, entry %#08x "
  2023. "status %#08x.\n", dev->name,
  2024. np->cur_rx, desc_status);
  2025. np->stats.rx_length_errors++;
  2026. /* The RX state machine has probably
  2027. * locked up beneath us. Follow the
  2028. * reset procedure documented in
  2029. * AN-1287. */
  2030. spin_lock_irq(&np->lock);
  2031. reset_rx(dev);
  2032. reinit_rx(dev);
  2033. writel(np->ring_dma, ioaddr + RxRingPtr);
  2034. check_link(dev);
  2035. spin_unlock_irq(&np->lock);
  2036. /* We'll enable RX on exit from this
  2037. * function. */
  2038. break;
  2039. } else {
  2040. /* There was an error. */
  2041. np->stats.rx_errors++;
  2042. if (desc_status & (DescRxAbort|DescRxOver))
  2043. np->stats.rx_over_errors++;
  2044. if (desc_status & (DescRxLong|DescRxRunt))
  2045. np->stats.rx_length_errors++;
  2046. if (desc_status & (DescRxInvalid|DescRxAlign))
  2047. np->stats.rx_frame_errors++;
  2048. if (desc_status & DescRxCRC)
  2049. np->stats.rx_crc_errors++;
  2050. }
  2051. } else if (pkt_len > np->rx_buf_sz) {
  2052. /* if this is the tail of a double buffer
  2053. * packet, we've already counted the error
  2054. * on the first part. Ignore the second half.
  2055. */
  2056. } else {
  2057. struct sk_buff *skb;
  2058. /* Omit CRC size. */
  2059. /* Check if the packet is long enough to accept
  2060. * without copying to a minimally-sized skbuff. */
  2061. if (pkt_len < rx_copybreak
  2062. && (skb = dev_alloc_skb(pkt_len + RX_OFFSET)) != NULL) {
  2063. skb->dev = dev;
  2064. /* 16 byte align the IP header */
  2065. skb_reserve(skb, RX_OFFSET);
  2066. pci_dma_sync_single_for_cpu(np->pci_dev,
  2067. np->rx_dma[entry],
  2068. buflen,
  2069. PCI_DMA_FROMDEVICE);
  2070. eth_copy_and_sum(skb,
  2071. np->rx_skbuff[entry]->data, pkt_len, 0);
  2072. skb_put(skb, pkt_len);
  2073. pci_dma_sync_single_for_device(np->pci_dev,
  2074. np->rx_dma[entry],
  2075. buflen,
  2076. PCI_DMA_FROMDEVICE);
  2077. } else {
  2078. pci_unmap_single(np->pci_dev, np->rx_dma[entry],
  2079. buflen, PCI_DMA_FROMDEVICE);
  2080. skb_put(skb = np->rx_skbuff[entry], pkt_len);
  2081. np->rx_skbuff[entry] = NULL;
  2082. }
  2083. skb->protocol = eth_type_trans(skb, dev);
  2084. netif_receive_skb(skb);
  2085. dev->last_rx = jiffies;
  2086. np->stats.rx_packets++;
  2087. np->stats.rx_bytes += pkt_len;
  2088. }
  2089. entry = (++np->cur_rx) % RX_RING_SIZE;
  2090. np->rx_head_desc = &np->rx_ring[entry];
  2091. desc_status = le32_to_cpu(np->rx_head_desc->cmd_status);
  2092. }
  2093. refill_rx(dev);
  2094. /* Restart Rx engine if stopped. */
  2095. if (np->oom)
  2096. mod_timer(&np->timer, jiffies + 1);
  2097. else
  2098. writel(RxOn, ioaddr + ChipCmd);
  2099. }
  2100. static void netdev_error(struct net_device *dev, int intr_status)
  2101. {
  2102. struct netdev_private *np = netdev_priv(dev);
  2103. void __iomem * ioaddr = ns_ioaddr(dev);
  2104. spin_lock(&np->lock);
  2105. if (intr_status & LinkChange) {
  2106. u16 lpa = mdio_read(dev, MII_LPA);
  2107. if (mdio_read(dev, MII_BMCR) & BMCR_ANENABLE
  2108. && netif_msg_link(np)) {
  2109. printk(KERN_INFO
  2110. "%s: Autonegotiation advertising"
  2111. " %#04x partner %#04x.\n", dev->name,
  2112. np->advertising, lpa);
  2113. }
  2114. /* read MII int status to clear the flag */
  2115. readw(ioaddr + MIntrStatus);
  2116. check_link(dev);
  2117. }
  2118. if (intr_status & StatsMax) {
  2119. __get_stats(dev);
  2120. }
  2121. if (intr_status & IntrTxUnderrun) {
  2122. if ((np->tx_config & TxDrthMask) < TX_DRTH_VAL_LIMIT) {
  2123. np->tx_config += TX_DRTH_VAL_INC;
  2124. if (netif_msg_tx_err(np))
  2125. printk(KERN_NOTICE
  2126. "%s: increased tx threshold, txcfg %#08x.\n",
  2127. dev->name, np->tx_config);
  2128. } else {
  2129. if (netif_msg_tx_err(np))
  2130. printk(KERN_NOTICE
  2131. "%s: tx underrun with maximum tx threshold, txcfg %#08x.\n",
  2132. dev->name, np->tx_config);
  2133. }
  2134. writel(np->tx_config, ioaddr + TxConfig);
  2135. }
  2136. if (intr_status & WOLPkt && netif_msg_wol(np)) {
  2137. int wol_status = readl(ioaddr + WOLCmd);
  2138. printk(KERN_NOTICE "%s: Link wake-up event %#08x\n",
  2139. dev->name, wol_status);
  2140. }
  2141. if (intr_status & RxStatusFIFOOver) {
  2142. if (netif_msg_rx_err(np) && netif_msg_intr(np)) {
  2143. printk(KERN_NOTICE "%s: Rx status FIFO overrun\n",
  2144. dev->name);
  2145. }
  2146. np->stats.rx_fifo_errors++;
  2147. }
  2148. /* Hmmmmm, it's not clear how to recover from PCI faults. */
  2149. if (intr_status & IntrPCIErr) {
  2150. printk(KERN_NOTICE "%s: PCI error %#08x\n", dev->name,
  2151. intr_status & IntrPCIErr);
  2152. np->stats.tx_fifo_errors++;
  2153. np->stats.rx_fifo_errors++;
  2154. }
  2155. spin_unlock(&np->lock);
  2156. }
  2157. static void __get_stats(struct net_device *dev)
  2158. {
  2159. void __iomem * ioaddr = ns_ioaddr(dev);
  2160. struct netdev_private *np = netdev_priv(dev);
  2161. /* The chip only need report frame silently dropped. */
  2162. np->stats.rx_crc_errors += readl(ioaddr + RxCRCErrs);
  2163. np->stats.rx_missed_errors += readl(ioaddr + RxMissed);
  2164. }
  2165. static struct net_device_stats *get_stats(struct net_device *dev)
  2166. {
  2167. struct netdev_private *np = netdev_priv(dev);
  2168. /* The chip only need report frame silently dropped. */
  2169. spin_lock_irq(&np->lock);
  2170. if (netif_running(dev) && !np->hands_off)
  2171. __get_stats(dev);
  2172. spin_unlock_irq(&np->lock);
  2173. return &np->stats;
  2174. }
  2175. #ifdef CONFIG_NET_POLL_CONTROLLER
  2176. static void natsemi_poll_controller(struct net_device *dev)
  2177. {
  2178. disable_irq(dev->irq);
  2179. intr_handler(dev->irq, dev, NULL);
  2180. enable_irq(dev->irq);
  2181. }
  2182. #endif
  2183. #define HASH_TABLE 0x200
  2184. static void __set_rx_mode(struct net_device *dev)
  2185. {
  2186. void __iomem * ioaddr = ns_ioaddr(dev);
  2187. struct netdev_private *np = netdev_priv(dev);
  2188. u8 mc_filter[64]; /* Multicast hash filter */
  2189. u32 rx_mode;
  2190. if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
  2191. /* Unconditionally log net taps. */
  2192. printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n",
  2193. dev->name);
  2194. rx_mode = RxFilterEnable | AcceptBroadcast
  2195. | AcceptAllMulticast | AcceptAllPhys | AcceptMyPhys;
  2196. } else if ((dev->mc_count > multicast_filter_limit)
  2197. || (dev->flags & IFF_ALLMULTI)) {
  2198. rx_mode = RxFilterEnable | AcceptBroadcast
  2199. | AcceptAllMulticast | AcceptMyPhys;
  2200. } else {
  2201. struct dev_mc_list *mclist;
  2202. int i;
  2203. memset(mc_filter, 0, sizeof(mc_filter));
  2204. for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
  2205. i++, mclist = mclist->next) {
  2206. int i = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 23) & 0x1ff;
  2207. mc_filter[i/8] |= (1 << (i & 0x07));
  2208. }
  2209. rx_mode = RxFilterEnable | AcceptBroadcast
  2210. | AcceptMulticast | AcceptMyPhys;
  2211. for (i = 0; i < 64; i += 2) {
  2212. writel(HASH_TABLE + i, ioaddr + RxFilterAddr);
  2213. writel((mc_filter[i + 1] << 8) + mc_filter[i],
  2214. ioaddr + RxFilterData);
  2215. }
  2216. }
  2217. writel(rx_mode, ioaddr + RxFilterAddr);
  2218. np->cur_rx_mode = rx_mode;
  2219. }
  2220. static int natsemi_change_mtu(struct net_device *dev, int new_mtu)
  2221. {
  2222. if (new_mtu < 64 || new_mtu > NATSEMI_RX_LIMIT-NATSEMI_HEADERS)
  2223. return -EINVAL;
  2224. dev->mtu = new_mtu;
  2225. /* synchronized against open : rtnl_lock() held by caller */
  2226. if (netif_running(dev)) {
  2227. struct netdev_private *np = netdev_priv(dev);
  2228. void __iomem * ioaddr = ns_ioaddr(dev);
  2229. disable_irq(dev->irq);
  2230. spin_lock(&np->lock);
  2231. /* stop engines */
  2232. natsemi_stop_rxtx(dev);
  2233. /* drain rx queue */
  2234. drain_rx(dev);
  2235. /* change buffers */
  2236. set_bufsize(dev);
  2237. reinit_rx(dev);
  2238. writel(np->ring_dma, ioaddr + RxRingPtr);
  2239. /* restart engines */
  2240. writel(RxOn | TxOn, ioaddr + ChipCmd);
  2241. spin_unlock(&np->lock);
  2242. enable_irq(dev->irq);
  2243. }
  2244. return 0;
  2245. }
  2246. static void set_rx_mode(struct net_device *dev)
  2247. {
  2248. struct netdev_private *np = netdev_priv(dev);
  2249. spin_lock_irq(&np->lock);
  2250. if (!np->hands_off)
  2251. __set_rx_mode(dev);
  2252. spin_unlock_irq(&np->lock);
  2253. }
  2254. static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
  2255. {
  2256. struct netdev_private *np = netdev_priv(dev);
  2257. strncpy(info->driver, DRV_NAME, ETHTOOL_BUSINFO_LEN);
  2258. strncpy(info->version, DRV_VERSION, ETHTOOL_BUSINFO_LEN);
  2259. strncpy(info->bus_info, pci_name(np->pci_dev), ETHTOOL_BUSINFO_LEN);
  2260. }
  2261. static int get_regs_len(struct net_device *dev)
  2262. {
  2263. return NATSEMI_REGS_SIZE;
  2264. }
  2265. static int get_eeprom_len(struct net_device *dev)
  2266. {
  2267. struct netdev_private *np = netdev_priv(dev);
  2268. return np->eeprom_size;
  2269. }
  2270. static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
  2271. {
  2272. struct netdev_private *np = netdev_priv(dev);
  2273. spin_lock_irq(&np->lock);
  2274. netdev_get_ecmd(dev, ecmd);
  2275. spin_unlock_irq(&np->lock);
  2276. return 0;
  2277. }
  2278. static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
  2279. {
  2280. struct netdev_private *np = netdev_priv(dev);
  2281. int res;
  2282. spin_lock_irq(&np->lock);
  2283. res = netdev_set_ecmd(dev, ecmd);
  2284. spin_unlock_irq(&np->lock);
  2285. return res;
  2286. }
  2287. static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
  2288. {
  2289. struct netdev_private *np = netdev_priv(dev);
  2290. spin_lock_irq(&np->lock);
  2291. netdev_get_wol(dev, &wol->supported, &wol->wolopts);
  2292. netdev_get_sopass(dev, wol->sopass);
  2293. spin_unlock_irq(&np->lock);
  2294. }
  2295. static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
  2296. {
  2297. struct netdev_private *np = netdev_priv(dev);
  2298. int res;
  2299. spin_lock_irq(&np->lock);
  2300. netdev_set_wol(dev, wol->wolopts);
  2301. res = netdev_set_sopass(dev, wol->sopass);
  2302. spin_unlock_irq(&np->lock);
  2303. return res;
  2304. }
  2305. static void get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
  2306. {
  2307. struct netdev_private *np = netdev_priv(dev);
  2308. regs->version = NATSEMI_REGS_VER;
  2309. spin_lock_irq(&np->lock);
  2310. netdev_get_regs(dev, buf);
  2311. spin_unlock_irq(&np->lock);
  2312. }
  2313. static u32 get_msglevel(struct net_device *dev)
  2314. {
  2315. struct netdev_private *np = netdev_priv(dev);
  2316. return np->msg_enable;
  2317. }
  2318. static void set_msglevel(struct net_device *dev, u32 val)
  2319. {
  2320. struct netdev_private *np = netdev_priv(dev);
  2321. np->msg_enable = val;
  2322. }
  2323. static int nway_reset(struct net_device *dev)
  2324. {
  2325. int tmp;
  2326. int r = -EINVAL;
  2327. /* if autoneg is off, it's an error */
  2328. tmp = mdio_read(dev, MII_BMCR);
  2329. if (tmp & BMCR_ANENABLE) {
  2330. tmp |= (BMCR_ANRESTART);
  2331. mdio_write(dev, MII_BMCR, tmp);
  2332. r = 0;
  2333. }
  2334. return r;
  2335. }
  2336. static u32 get_link(struct net_device *dev)
  2337. {
  2338. /* LSTATUS is latched low until a read - so read twice */
  2339. mdio_read(dev, MII_BMSR);
  2340. return (mdio_read(dev, MII_BMSR)&BMSR_LSTATUS) ? 1:0;
  2341. }
  2342. static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
  2343. {
  2344. struct netdev_private *np = netdev_priv(dev);
  2345. u8 *eebuf;
  2346. int res;
  2347. eebuf = kmalloc(np->eeprom_size, GFP_KERNEL);
  2348. if (!eebuf)
  2349. return -ENOMEM;
  2350. eeprom->magic = PCI_VENDOR_ID_NS | (PCI_DEVICE_ID_NS_83815<<16);
  2351. spin_lock_irq(&np->lock);
  2352. res = netdev_get_eeprom(dev, eebuf);
  2353. spin_unlock_irq(&np->lock);
  2354. if (!res)
  2355. memcpy(data, eebuf+eeprom->offset, eeprom->len);
  2356. kfree(eebuf);
  2357. return res;
  2358. }
  2359. static struct ethtool_ops ethtool_ops = {
  2360. .get_drvinfo = get_drvinfo,
  2361. .get_regs_len = get_regs_len,
  2362. .get_eeprom_len = get_eeprom_len,
  2363. .get_settings = get_settings,
  2364. .set_settings = set_settings,
  2365. .get_wol = get_wol,
  2366. .set_wol = set_wol,
  2367. .get_regs = get_regs,
  2368. .get_msglevel = get_msglevel,
  2369. .set_msglevel = set_msglevel,
  2370. .nway_reset = nway_reset,
  2371. .get_link = get_link,
  2372. .get_eeprom = get_eeprom,
  2373. };
  2374. static int netdev_set_wol(struct net_device *dev, u32 newval)
  2375. {
  2376. struct netdev_private *np = netdev_priv(dev);
  2377. void __iomem * ioaddr = ns_ioaddr(dev);
  2378. u32 data = readl(ioaddr + WOLCmd) & ~WakeOptsSummary;
  2379. /* translate to bitmasks this chip understands */
  2380. if (newval & WAKE_PHY)
  2381. data |= WakePhy;
  2382. if (newval & WAKE_UCAST)
  2383. data |= WakeUnicast;
  2384. if (newval & WAKE_MCAST)
  2385. data |= WakeMulticast;
  2386. if (newval & WAKE_BCAST)
  2387. data |= WakeBroadcast;
  2388. if (newval & WAKE_ARP)
  2389. data |= WakeArp;
  2390. if (newval & WAKE_MAGIC)
  2391. data |= WakeMagic;
  2392. if (np->srr >= SRR_DP83815_D) {
  2393. if (newval & WAKE_MAGICSECURE) {
  2394. data |= WakeMagicSecure;
  2395. }
  2396. }
  2397. writel(data, ioaddr + WOLCmd);
  2398. return 0;
  2399. }
  2400. static int netdev_get_wol(struct net_device *dev, u32 *supported, u32 *cur)
  2401. {
  2402. struct netdev_private *np = netdev_priv(dev);
  2403. void __iomem * ioaddr = ns_ioaddr(dev);
  2404. u32 regval = readl(ioaddr + WOLCmd);
  2405. *supported = (WAKE_PHY | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST
  2406. | WAKE_ARP | WAKE_MAGIC);
  2407. if (np->srr >= SRR_DP83815_D) {
  2408. /* SOPASS works on revD and higher */
  2409. *supported |= WAKE_MAGICSECURE;
  2410. }
  2411. *cur = 0;
  2412. /* translate from chip bitmasks */
  2413. if (regval & WakePhy)
  2414. *cur |= WAKE_PHY;
  2415. if (regval & WakeUnicast)
  2416. *cur |= WAKE_UCAST;
  2417. if (regval & WakeMulticast)
  2418. *cur |= WAKE_MCAST;
  2419. if (regval & WakeBroadcast)
  2420. *cur |= WAKE_BCAST;
  2421. if (regval & WakeArp)
  2422. *cur |= WAKE_ARP;
  2423. if (regval & WakeMagic)
  2424. *cur |= WAKE_MAGIC;
  2425. if (regval & WakeMagicSecure) {
  2426. /* this can be on in revC, but it's broken */
  2427. *cur |= WAKE_MAGICSECURE;
  2428. }
  2429. return 0;
  2430. }
  2431. static int netdev_set_sopass(struct net_device *dev, u8 *newval)
  2432. {
  2433. struct netdev_private *np = netdev_priv(dev);
  2434. void __iomem * ioaddr = ns_ioaddr(dev);
  2435. u16 *sval = (u16 *)newval;
  2436. u32 addr;
  2437. if (np->srr < SRR_DP83815_D) {
  2438. return 0;
  2439. }
  2440. /* enable writing to these registers by disabling the RX filter */
  2441. addr = readl(ioaddr + RxFilterAddr) & ~RFCRAddressMask;
  2442. addr &= ~RxFilterEnable;
  2443. writel(addr, ioaddr + RxFilterAddr);
  2444. /* write the three words to (undocumented) RFCR vals 0xa, 0xc, 0xe */
  2445. writel(addr | 0xa, ioaddr + RxFilterAddr);
  2446. writew(sval[0], ioaddr + RxFilterData);
  2447. writel(addr | 0xc, ioaddr + RxFilterAddr);
  2448. writew(sval[1], ioaddr + RxFilterData);
  2449. writel(addr | 0xe, ioaddr + RxFilterAddr);
  2450. writew(sval[2], ioaddr + RxFilterData);
  2451. /* re-enable the RX filter */
  2452. writel(addr | RxFilterEnable, ioaddr + RxFilterAddr);
  2453. return 0;
  2454. }
  2455. static int netdev_get_sopass(struct net_device *dev, u8 *data)
  2456. {
  2457. struct netdev_private *np = netdev_priv(dev);
  2458. void __iomem * ioaddr = ns_ioaddr(dev);
  2459. u16 *sval = (u16 *)data;
  2460. u32 addr;
  2461. if (np->srr < SRR_DP83815_D) {
  2462. sval[0] = sval[1] = sval[2] = 0;
  2463. return 0;
  2464. }
  2465. /* read the three words from (undocumented) RFCR vals 0xa, 0xc, 0xe */
  2466. addr = readl(ioaddr + RxFilterAddr) & ~RFCRAddressMask;
  2467. writel(addr | 0xa, ioaddr + RxFilterAddr);
  2468. sval[0] = readw(ioaddr + RxFilterData);
  2469. writel(addr | 0xc, ioaddr + RxFilterAddr);
  2470. sval[1] = readw(ioaddr + RxFilterData);
  2471. writel(addr | 0xe, ioaddr + RxFilterAddr);
  2472. sval[2] = readw(ioaddr + RxFilterData);
  2473. writel(addr, ioaddr + RxFilterAddr);
  2474. return 0;
  2475. }
  2476. static int netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
  2477. {
  2478. struct netdev_private *np = netdev_priv(dev);
  2479. u32 tmp;
  2480. ecmd->port = dev->if_port;
  2481. ecmd->speed = np->speed;
  2482. ecmd->duplex = np->duplex;
  2483. ecmd->autoneg = np->autoneg;
  2484. ecmd->advertising = 0;
  2485. if (np->advertising & ADVERTISE_10HALF)
  2486. ecmd->advertising |= ADVERTISED_10baseT_Half;
  2487. if (np->advertising & ADVERTISE_10FULL)
  2488. ecmd->advertising |= ADVERTISED_10baseT_Full;
  2489. if (np->advertising & ADVERTISE_100HALF)
  2490. ecmd->advertising |= ADVERTISED_100baseT_Half;
  2491. if (np->advertising & ADVERTISE_100FULL)
  2492. ecmd->advertising |= ADVERTISED_100baseT_Full;
  2493. ecmd->supported = (SUPPORTED_Autoneg |
  2494. SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
  2495. SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
  2496. SUPPORTED_TP | SUPPORTED_MII | SUPPORTED_FIBRE);
  2497. ecmd->phy_address = np->phy_addr_external;
  2498. /*
  2499. * We intentionally report the phy address of the external
  2500. * phy, even if the internal phy is used. This is necessary
  2501. * to work around a deficiency of the ethtool interface:
  2502. * It's only possible to query the settings of the active
  2503. * port. Therefore
  2504. * # ethtool -s ethX port mii
  2505. * actually sends an ioctl to switch to port mii with the
  2506. * settings that are used for the current active port.
  2507. * If we would report a different phy address in this
  2508. * command, then
  2509. * # ethtool -s ethX port tp;ethtool -s ethX port mii
  2510. * would unintentionally change the phy address.
  2511. *
  2512. * Fortunately the phy address doesn't matter with the
  2513. * internal phy...
  2514. */
  2515. /* set information based on active port type */
  2516. switch (ecmd->port) {
  2517. default:
  2518. case PORT_TP:
  2519. ecmd->advertising |= ADVERTISED_TP;
  2520. ecmd->transceiver = XCVR_INTERNAL;
  2521. break;
  2522. case PORT_MII:
  2523. ecmd->advertising |= ADVERTISED_MII;
  2524. ecmd->transceiver = XCVR_EXTERNAL;
  2525. break;
  2526. case PORT_FIBRE:
  2527. ecmd->advertising |= ADVERTISED_FIBRE;
  2528. ecmd->transceiver = XCVR_EXTERNAL;
  2529. break;
  2530. }
  2531. /* if autonegotiation is on, try to return the active speed/duplex */
  2532. if (ecmd->autoneg == AUTONEG_ENABLE) {
  2533. ecmd->advertising |= ADVERTISED_Autoneg;
  2534. tmp = mii_nway_result(
  2535. np->advertising & mdio_read(dev, MII_LPA));
  2536. if (tmp == LPA_100FULL || tmp == LPA_100HALF)
  2537. ecmd->speed = SPEED_100;
  2538. else
  2539. ecmd->speed = SPEED_10;
  2540. if (tmp == LPA_100FULL || tmp == LPA_10FULL)
  2541. ecmd->duplex = DUPLEX_FULL;
  2542. else
  2543. ecmd->duplex = DUPLEX_HALF;
  2544. }
  2545. /* ignore maxtxpkt, maxrxpkt for now */
  2546. return 0;
  2547. }
  2548. static int netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
  2549. {
  2550. struct netdev_private *np = netdev_priv(dev);
  2551. if (ecmd->port != PORT_TP && ecmd->port != PORT_MII && ecmd->port != PORT_FIBRE)
  2552. return -EINVAL;
  2553. if (ecmd->transceiver != XCVR_INTERNAL && ecmd->transceiver != XCVR_EXTERNAL)
  2554. return -EINVAL;
  2555. if (ecmd->autoneg == AUTONEG_ENABLE) {
  2556. if ((ecmd->advertising & (ADVERTISED_10baseT_Half |
  2557. ADVERTISED_10baseT_Full |
  2558. ADVERTISED_100baseT_Half |
  2559. ADVERTISED_100baseT_Full)) == 0) {
  2560. return -EINVAL;
  2561. }
  2562. } else if (ecmd->autoneg == AUTONEG_DISABLE) {
  2563. if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100)
  2564. return -EINVAL;
  2565. if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
  2566. return -EINVAL;
  2567. } else {
  2568. return -EINVAL;
  2569. }
  2570. /*
  2571. * maxtxpkt, maxrxpkt: ignored for now.
  2572. *
  2573. * transceiver:
  2574. * PORT_TP is always XCVR_INTERNAL, PORT_MII and PORT_FIBRE are always
  2575. * XCVR_EXTERNAL. The implementation thus ignores ecmd->transceiver and
  2576. * selects based on ecmd->port.
  2577. *
  2578. * Actually PORT_FIBRE is nearly identical to PORT_MII: it's for fibre
  2579. * phys that are connected to the mii bus. It's used to apply fibre
  2580. * specific updates.
  2581. */
  2582. /* WHEW! now lets bang some bits */
  2583. /* save the parms */
  2584. dev->if_port = ecmd->port;
  2585. np->autoneg = ecmd->autoneg;
  2586. np->phy_addr_external = ecmd->phy_address & PhyAddrMask;
  2587. if (np->autoneg == AUTONEG_ENABLE) {
  2588. /* advertise only what has been requested */
  2589. np->advertising &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
  2590. if (ecmd->advertising & ADVERTISED_10baseT_Half)
  2591. np->advertising |= ADVERTISE_10HALF;
  2592. if (ecmd->advertising & ADVERTISED_10baseT_Full)
  2593. np->advertising |= ADVERTISE_10FULL;
  2594. if (ecmd->advertising & ADVERTISED_100baseT_Half)
  2595. np->advertising |= ADVERTISE_100HALF;
  2596. if (ecmd->advertising & ADVERTISED_100baseT_Full)
  2597. np->advertising |= ADVERTISE_100FULL;
  2598. } else {
  2599. np->speed = ecmd->speed;
  2600. np->duplex = ecmd->duplex;
  2601. /* user overriding the initial full duplex parm? */
  2602. if (np->duplex == DUPLEX_HALF)
  2603. np->full_duplex = 0;
  2604. }
  2605. /* get the right phy enabled */
  2606. if (ecmd->port == PORT_TP)
  2607. switch_port_internal(dev);
  2608. else
  2609. switch_port_external(dev);
  2610. /* set parms and see how this affected our link status */
  2611. init_phy_fixup(dev);
  2612. check_link(dev);
  2613. return 0;
  2614. }
  2615. static int netdev_get_regs(struct net_device *dev, u8 *buf)
  2616. {
  2617. int i;
  2618. int j;
  2619. u32 rfcr;
  2620. u32 *rbuf = (u32 *)buf;
  2621. void __iomem * ioaddr = ns_ioaddr(dev);
  2622. /* read non-mii page 0 of registers */
  2623. for (i = 0; i < NATSEMI_PG0_NREGS/2; i++) {
  2624. rbuf[i] = readl(ioaddr + i*4);
  2625. }
  2626. /* read current mii registers */
  2627. for (i = NATSEMI_PG0_NREGS/2; i < NATSEMI_PG0_NREGS; i++)
  2628. rbuf[i] = mdio_read(dev, i & 0x1f);
  2629. /* read only the 'magic' registers from page 1 */
  2630. writew(1, ioaddr + PGSEL);
  2631. rbuf[i++] = readw(ioaddr + PMDCSR);
  2632. rbuf[i++] = readw(ioaddr + TSTDAT);
  2633. rbuf[i++] = readw(ioaddr + DSPCFG);
  2634. rbuf[i++] = readw(ioaddr + SDCFG);
  2635. writew(0, ioaddr + PGSEL);
  2636. /* read RFCR indexed registers */
  2637. rfcr = readl(ioaddr + RxFilterAddr);
  2638. for (j = 0; j < NATSEMI_RFDR_NREGS; j++) {
  2639. writel(j*2, ioaddr + RxFilterAddr);
  2640. rbuf[i++] = readw(ioaddr + RxFilterData);
  2641. }
  2642. writel(rfcr, ioaddr + RxFilterAddr);
  2643. /* the interrupt status is clear-on-read - see if we missed any */
  2644. if (rbuf[4] & rbuf[5]) {
  2645. printk(KERN_WARNING
  2646. "%s: shoot, we dropped an interrupt (%#08x)\n",
  2647. dev->name, rbuf[4] & rbuf[5]);
  2648. }
  2649. return 0;
  2650. }
  2651. #define SWAP_BITS(x) ( (((x) & 0x0001) << 15) | (((x) & 0x0002) << 13) \
  2652. | (((x) & 0x0004) << 11) | (((x) & 0x0008) << 9) \
  2653. | (((x) & 0x0010) << 7) | (((x) & 0x0020) << 5) \
  2654. | (((x) & 0x0040) << 3) | (((x) & 0x0080) << 1) \
  2655. | (((x) & 0x0100) >> 1) | (((x) & 0x0200) >> 3) \
  2656. | (((x) & 0x0400) >> 5) | (((x) & 0x0800) >> 7) \
  2657. | (((x) & 0x1000) >> 9) | (((x) & 0x2000) >> 11) \
  2658. | (((x) & 0x4000) >> 13) | (((x) & 0x8000) >> 15) )
  2659. static int netdev_get_eeprom(struct net_device *dev, u8 *buf)
  2660. {
  2661. int i;
  2662. u16 *ebuf = (u16 *)buf;
  2663. void __iomem * ioaddr = ns_ioaddr(dev);
  2664. struct netdev_private *np = netdev_priv(dev);
  2665. /* eeprom_read reads 16 bits, and indexes by 16 bits */
  2666. for (i = 0; i < np->eeprom_size/2; i++) {
  2667. ebuf[i] = eeprom_read(ioaddr, i);
  2668. /* The EEPROM itself stores data bit-swapped, but eeprom_read
  2669. * reads it back "sanely". So we swap it back here in order to
  2670. * present it to userland as it is stored. */
  2671. ebuf[i] = SWAP_BITS(ebuf[i]);
  2672. }
  2673. return 0;
  2674. }
  2675. static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
  2676. {
  2677. struct mii_ioctl_data *data = if_mii(rq);
  2678. struct netdev_private *np = netdev_priv(dev);
  2679. switch(cmd) {
  2680. case SIOCGMIIPHY: /* Get address of MII PHY in use. */
  2681. case SIOCDEVPRIVATE: /* for binary compat, remove in 2.5 */
  2682. data->phy_id = np->phy_addr_external;
  2683. /* Fall Through */
  2684. case SIOCGMIIREG: /* Read MII PHY register. */
  2685. case SIOCDEVPRIVATE+1: /* for binary compat, remove in 2.5 */
  2686. /* The phy_id is not enough to uniquely identify
  2687. * the intended target. Therefore the command is sent to
  2688. * the given mii on the current port.
  2689. */
  2690. if (dev->if_port == PORT_TP) {
  2691. if ((data->phy_id & 0x1f) == np->phy_addr_external)
  2692. data->val_out = mdio_read(dev,
  2693. data->reg_num & 0x1f);
  2694. else
  2695. data->val_out = 0;
  2696. } else {
  2697. move_int_phy(dev, data->phy_id & 0x1f);
  2698. data->val_out = miiport_read(dev, data->phy_id & 0x1f,
  2699. data->reg_num & 0x1f);
  2700. }
  2701. return 0;
  2702. case SIOCSMIIREG: /* Write MII PHY register. */
  2703. case SIOCDEVPRIVATE+2: /* for binary compat, remove in 2.5 */
  2704. if (!capable(CAP_NET_ADMIN))
  2705. return -EPERM;
  2706. if (dev->if_port == PORT_TP) {
  2707. if ((data->phy_id & 0x1f) == np->phy_addr_external) {
  2708. if ((data->reg_num & 0x1f) == MII_ADVERTISE)
  2709. np->advertising = data->val_in;
  2710. mdio_write(dev, data->reg_num & 0x1f,
  2711. data->val_in);
  2712. }
  2713. } else {
  2714. if ((data->phy_id & 0x1f) == np->phy_addr_external) {
  2715. if ((data->reg_num & 0x1f) == MII_ADVERTISE)
  2716. np->advertising = data->val_in;
  2717. }
  2718. move_int_phy(dev, data->phy_id & 0x1f);
  2719. miiport_write(dev, data->phy_id & 0x1f,
  2720. data->reg_num & 0x1f,
  2721. data->val_in);
  2722. }
  2723. return 0;
  2724. default:
  2725. return -EOPNOTSUPP;
  2726. }
  2727. }
  2728. static void enable_wol_mode(struct net_device *dev, int enable_intr)
  2729. {
  2730. void __iomem * ioaddr = ns_ioaddr(dev);
  2731. struct netdev_private *np = netdev_priv(dev);
  2732. if (netif_msg_wol(np))
  2733. printk(KERN_INFO "%s: remaining active for wake-on-lan\n",
  2734. dev->name);
  2735. /* For WOL we must restart the rx process in silent mode.
  2736. * Write NULL to the RxRingPtr. Only possible if
  2737. * rx process is stopped
  2738. */
  2739. writel(0, ioaddr + RxRingPtr);
  2740. /* read WoL status to clear */
  2741. readl(ioaddr + WOLCmd);
  2742. /* PME on, clear status */
  2743. writel(np->SavedClkRun | PMEEnable | PMEStatus, ioaddr + ClkRun);
  2744. /* and restart the rx process */
  2745. writel(RxOn, ioaddr + ChipCmd);
  2746. if (enable_intr) {
  2747. /* enable the WOL interrupt.
  2748. * Could be used to send a netlink message.
  2749. */
  2750. writel(WOLPkt | LinkChange, ioaddr + IntrMask);
  2751. writel(1, ioaddr + IntrEnable);
  2752. }
  2753. }
  2754. static int netdev_close(struct net_device *dev)
  2755. {
  2756. void __iomem * ioaddr = ns_ioaddr(dev);
  2757. struct netdev_private *np = netdev_priv(dev);
  2758. if (netif_msg_ifdown(np))
  2759. printk(KERN_DEBUG
  2760. "%s: Shutting down ethercard, status was %#04x.\n",
  2761. dev->name, (int)readl(ioaddr + ChipCmd));
  2762. if (netif_msg_pktdata(np))
  2763. printk(KERN_DEBUG
  2764. "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
  2765. dev->name, np->cur_tx, np->dirty_tx,
  2766. np->cur_rx, np->dirty_rx);
  2767. /*
  2768. * FIXME: what if someone tries to close a device
  2769. * that is suspended?
  2770. * Should we reenable the nic to switch to
  2771. * the final WOL settings?
  2772. */
  2773. del_timer_sync(&np->timer);
  2774. disable_irq(dev->irq);
  2775. spin_lock_irq(&np->lock);
  2776. natsemi_irq_disable(dev);
  2777. np->hands_off = 1;
  2778. spin_unlock_irq(&np->lock);
  2779. enable_irq(dev->irq);
  2780. free_irq(dev->irq, dev);
  2781. /* Interrupt disabled, interrupt handler released,
  2782. * queue stopped, timer deleted, rtnl_lock held
  2783. * All async codepaths that access the driver are disabled.
  2784. */
  2785. spin_lock_irq(&np->lock);
  2786. np->hands_off = 0;
  2787. readl(ioaddr + IntrMask);
  2788. readw(ioaddr + MIntrStatus);
  2789. /* Freeze Stats */
  2790. writel(StatsFreeze, ioaddr + StatsCtrl);
  2791. /* Stop the chip's Tx and Rx processes. */
  2792. natsemi_stop_rxtx(dev);
  2793. __get_stats(dev);
  2794. spin_unlock_irq(&np->lock);
  2795. /* clear the carrier last - an interrupt could reenable it otherwise */
  2796. netif_carrier_off(dev);
  2797. netif_stop_queue(dev);
  2798. dump_ring(dev);
  2799. drain_ring(dev);
  2800. free_ring(dev);
  2801. {
  2802. u32 wol = readl(ioaddr + WOLCmd) & WakeOptsSummary;
  2803. if (wol) {
  2804. /* restart the NIC in WOL mode.
  2805. * The nic must be stopped for this.
  2806. */
  2807. enable_wol_mode(dev, 0);
  2808. } else {
  2809. /* Restore PME enable bit unmolested */
  2810. writel(np->SavedClkRun, ioaddr + ClkRun);
  2811. }
  2812. }
  2813. return 0;
  2814. }
  2815. static void __devexit natsemi_remove1 (struct pci_dev *pdev)
  2816. {
  2817. struct net_device *dev = pci_get_drvdata(pdev);
  2818. void __iomem * ioaddr = ns_ioaddr(dev);
  2819. unregister_netdev (dev);
  2820. pci_release_regions (pdev);
  2821. iounmap(ioaddr);
  2822. free_netdev (dev);
  2823. pci_set_drvdata(pdev, NULL);
  2824. }
  2825. #ifdef CONFIG_PM
  2826. /*
  2827. * The ns83815 chip doesn't have explicit RxStop bits.
  2828. * Kicking the Rx or Tx process for a new packet reenables the Rx process
  2829. * of the nic, thus this function must be very careful:
  2830. *
  2831. * suspend/resume synchronization:
  2832. * entry points:
  2833. * netdev_open, netdev_close, netdev_ioctl, set_rx_mode, intr_handler,
  2834. * start_tx, tx_timeout
  2835. *
  2836. * No function accesses the hardware without checking np->hands_off.
  2837. * the check occurs under spin_lock_irq(&np->lock);
  2838. * exceptions:
  2839. * * netdev_ioctl: noncritical access.
  2840. * * netdev_open: cannot happen due to the device_detach
  2841. * * netdev_close: doesn't hurt.
  2842. * * netdev_timer: timer stopped by natsemi_suspend.
  2843. * * intr_handler: doesn't acquire the spinlock. suspend calls
  2844. * disable_irq() to enforce synchronization.
  2845. * * natsemi_poll: checks before reenabling interrupts. suspend
  2846. * sets hands_off, disables interrupts and then waits with
  2847. * netif_poll_disable().
  2848. *
  2849. * Interrupts must be disabled, otherwise hands_off can cause irq storms.
  2850. */
  2851. static int natsemi_suspend (struct pci_dev *pdev, pm_message_t state)
  2852. {
  2853. struct net_device *dev = pci_get_drvdata (pdev);
  2854. struct netdev_private *np = netdev_priv(dev);
  2855. void __iomem * ioaddr = ns_ioaddr(dev);
  2856. rtnl_lock();
  2857. if (netif_running (dev)) {
  2858. del_timer_sync(&np->timer);
  2859. disable_irq(dev->irq);
  2860. spin_lock_irq(&np->lock);
  2861. writel(0, ioaddr + IntrEnable);
  2862. np->hands_off = 1;
  2863. natsemi_stop_rxtx(dev);
  2864. netif_stop_queue(dev);
  2865. spin_unlock_irq(&np->lock);
  2866. enable_irq(dev->irq);
  2867. netif_poll_disable(dev);
  2868. /* Update the error counts. */
  2869. __get_stats(dev);
  2870. /* pci_power_off(pdev, -1); */
  2871. drain_ring(dev);
  2872. {
  2873. u32 wol = readl(ioaddr + WOLCmd) & WakeOptsSummary;
  2874. /* Restore PME enable bit */
  2875. if (wol) {
  2876. /* restart the NIC in WOL mode.
  2877. * The nic must be stopped for this.
  2878. * FIXME: use the WOL interrupt
  2879. */
  2880. enable_wol_mode(dev, 0);
  2881. } else {
  2882. /* Restore PME enable bit unmolested */
  2883. writel(np->SavedClkRun, ioaddr + ClkRun);
  2884. }
  2885. }
  2886. }
  2887. netif_device_detach(dev);
  2888. rtnl_unlock();
  2889. return 0;
  2890. }
  2891. static int natsemi_resume (struct pci_dev *pdev)
  2892. {
  2893. struct net_device *dev = pci_get_drvdata (pdev);
  2894. struct netdev_private *np = netdev_priv(dev);
  2895. rtnl_lock();
  2896. if (netif_device_present(dev))
  2897. goto out;
  2898. if (netif_running(dev)) {
  2899. BUG_ON(!np->hands_off);
  2900. pci_enable_device(pdev);
  2901. /* pci_power_on(pdev); */
  2902. natsemi_reset(dev);
  2903. init_ring(dev);
  2904. disable_irq(dev->irq);
  2905. spin_lock_irq(&np->lock);
  2906. np->hands_off = 0;
  2907. init_registers(dev);
  2908. netif_device_attach(dev);
  2909. spin_unlock_irq(&np->lock);
  2910. enable_irq(dev->irq);
  2911. mod_timer(&np->timer, jiffies + 1*HZ);
  2912. }
  2913. netif_device_attach(dev);
  2914. netif_poll_enable(dev);
  2915. out:
  2916. rtnl_unlock();
  2917. return 0;
  2918. }
  2919. #endif /* CONFIG_PM */
  2920. static struct pci_driver natsemi_driver = {
  2921. .name = DRV_NAME,
  2922. .id_table = natsemi_pci_tbl,
  2923. .probe = natsemi_probe1,
  2924. .remove = __devexit_p(natsemi_remove1),
  2925. #ifdef CONFIG_PM
  2926. .suspend = natsemi_suspend,
  2927. .resume = natsemi_resume,
  2928. #endif
  2929. };
  2930. static int __init natsemi_init_mod (void)
  2931. {
  2932. /* when a module, this is printed whether or not devices are found in probe */
  2933. #ifdef MODULE
  2934. printk(version);
  2935. #endif
  2936. return pci_module_init (&natsemi_driver);
  2937. }
  2938. static void __exit natsemi_exit_mod (void)
  2939. {
  2940. pci_unregister_driver (&natsemi_driver);
  2941. }
  2942. module_init(natsemi_init_mod);
  2943. module_exit(natsemi_exit_mod);