forcedeth.c 134 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544
  1. /*
  2. * forcedeth: Ethernet driver for NVIDIA nForce media access controllers.
  3. *
  4. * Note: This driver is a cleanroom reimplementation based on reverse
  5. * engineered documentation written by Carl-Daniel Hailfinger
  6. * and Andrew de Quincey. It's neither supported nor endorsed
  7. * by NVIDIA Corp. Use at your own risk.
  8. *
  9. * NVIDIA, nForce and other NVIDIA marks are trademarks or registered
  10. * trademarks of NVIDIA Corporation in the United States and other
  11. * countries.
  12. *
  13. * Copyright (C) 2003,4,5 Manfred Spraul
  14. * Copyright (C) 2004 Andrew de Quincey (wol support)
  15. * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane
  16. * IRQ rate fixes, bigendian fixes, cleanups, verification)
  17. * Copyright (c) 2004 NVIDIA Corporation
  18. *
  19. * This program is free software; you can redistribute it and/or modify
  20. * it under the terms of the GNU General Public License as published by
  21. * the Free Software Foundation; either version 2 of the License, or
  22. * (at your option) any later version.
  23. *
  24. * This program is distributed in the hope that it will be useful,
  25. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  26. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  27. * GNU General Public License for more details.
  28. *
  29. * You should have received a copy of the GNU General Public License
  30. * along with this program; if not, write to the Free Software
  31. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  32. *
  33. * Changelog:
  34. * 0.01: 05 Oct 2003: First release that compiles without warnings.
  35. * 0.02: 05 Oct 2003: Fix bug for nv_drain_tx: do not try to free NULL skbs.
  36. * Check all PCI BARs for the register window.
  37. * udelay added to mii_rw.
  38. * 0.03: 06 Oct 2003: Initialize dev->irq.
  39. * 0.04: 07 Oct 2003: Initialize np->lock, reduce handled irqs, add printks.
  40. * 0.05: 09 Oct 2003: printk removed again, irq status print tx_timeout.
  41. * 0.06: 10 Oct 2003: MAC Address read updated, pff flag generation updated,
  42. * irq mask updated
  43. * 0.07: 14 Oct 2003: Further irq mask updates.
  44. * 0.08: 20 Oct 2003: rx_desc.Length initialization added, nv_alloc_rx refill
  45. * added into irq handler, NULL check for drain_ring.
  46. * 0.09: 20 Oct 2003: Basic link speed irq implementation. Only handle the
  47. * requested interrupt sources.
  48. * 0.10: 20 Oct 2003: First cleanup for release.
  49. * 0.11: 21 Oct 2003: hexdump for tx added, rx buffer sizes increased.
  50. * MAC Address init fix, set_multicast cleanup.
  51. * 0.12: 23 Oct 2003: Cleanups for release.
  52. * 0.13: 25 Oct 2003: Limit for concurrent tx packets increased to 10.
  53. * Set link speed correctly. start rx before starting
  54. * tx (nv_start_rx sets the link speed).
  55. * 0.14: 25 Oct 2003: Nic dependant irq mask.
  56. * 0.15: 08 Nov 2003: fix smp deadlock with set_multicast_list during
  57. * open.
  58. * 0.16: 15 Nov 2003: include file cleanup for ppc64, rx buffer size
  59. * increased to 1628 bytes.
  60. * 0.17: 16 Nov 2003: undo rx buffer size increase. Substract 1 from
  61. * the tx length.
  62. * 0.18: 17 Nov 2003: fix oops due to late initialization of dev_stats
  63. * 0.19: 29 Nov 2003: Handle RxNoBuf, detect & handle invalid mac
  64. * addresses, really stop rx if already running
  65. * in nv_start_rx, clean up a bit.
  66. * 0.20: 07 Dec 2003: alloc fixes
  67. * 0.21: 12 Jan 2004: additional alloc fix, nic polling fix.
  68. * 0.22: 19 Jan 2004: reprogram timer to a sane rate, avoid lockup
  69. * on close.
  70. * 0.23: 26 Jan 2004: various small cleanups
  71. * 0.24: 27 Feb 2004: make driver even less anonymous in backtraces
  72. * 0.25: 09 Mar 2004: wol support
  73. * 0.26: 03 Jun 2004: netdriver specific annotation, sparse-related fixes
  74. * 0.27: 19 Jun 2004: Gigabit support, new descriptor rings,
  75. * added CK804/MCP04 device IDs, code fixes
  76. * for registers, link status and other minor fixes.
  77. * 0.28: 21 Jun 2004: Big cleanup, making driver mostly endian safe
  78. * 0.29: 31 Aug 2004: Add backup timer for link change notification.
  79. * 0.30: 25 Sep 2004: rx checksum support for nf 250 Gb. Add rx reset
  80. * into nv_close, otherwise reenabling for wol can
  81. * cause DMA to kfree'd memory.
  82. * 0.31: 14 Nov 2004: ethtool support for getting/setting link
  83. * capabilities.
  84. * 0.32: 16 Apr 2005: RX_ERROR4 handling added.
  85. * 0.33: 16 May 2005: Support for MCP51 added.
  86. * 0.34: 18 Jun 2005: Add DEV_NEED_LINKTIMER to all nForce nics.
  87. * 0.35: 26 Jun 2005: Support for MCP55 added.
  88. * 0.36: 28 Jun 2005: Add jumbo frame support.
  89. * 0.37: 10 Jul 2005: Additional ethtool support, cleanup of pci id list
  90. * 0.38: 16 Jul 2005: tx irq rewrite: Use global flags instead of
  91. * per-packet flags.
  92. * 0.39: 18 Jul 2005: Add 64bit descriptor support.
  93. * 0.40: 19 Jul 2005: Add support for mac address change.
  94. * 0.41: 30 Jul 2005: Write back original MAC in nv_close instead
  95. * of nv_remove
  96. * 0.42: 06 Aug 2005: Fix lack of link speed initialization
  97. * in the second (and later) nv_open call
  98. * 0.43: 10 Aug 2005: Add support for tx checksum.
  99. * 0.44: 20 Aug 2005: Add support for scatter gather and segmentation.
  100. * 0.45: 18 Sep 2005: Remove nv_stop/start_rx from every link check
  101. * 0.46: 20 Oct 2005: Add irq optimization modes.
  102. * 0.47: 26 Oct 2005: Add phyaddr 0 in phy scan.
  103. * 0.48: 24 Dec 2005: Disable TSO, bugfix for pci_map_single
  104. * 0.49: 10 Dec 2005: Fix tso for large buffers.
  105. * 0.50: 20 Jan 2006: Add 8021pq tagging support.
  106. * 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings.
  107. * 0.52: 20 Jan 2006: Add MSI/MSIX support.
  108. * 0.53: 19 Mar 2006: Fix init from low power mode and add hw reset.
  109. * 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup.
  110. * 0.55: 22 Mar 2006: Add flow control (pause frame).
  111. * 0.56: 22 Mar 2006: Additional ethtool config and moduleparam support.
  112. *
  113. * Known bugs:
  114. * We suspect that on some hardware no TX done interrupts are generated.
  115. * This means recovery from netif_stop_queue only happens if the hw timer
  116. * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT)
  117. * and the timer is active in the IRQMask, or if a rx packet arrives by chance.
  118. * If your hardware reliably generates tx done interrupts, then you can remove
  119. * DEV_NEED_TIMERIRQ from the driver_data flags.
  120. * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
  121. * superfluous timer interrupts from the nic.
  122. */
  123. #define FORCEDETH_VERSION "0.56"
  124. #define DRV_NAME "forcedeth"
  125. #include <linux/module.h>
  126. #include <linux/types.h>
  127. #include <linux/pci.h>
  128. #include <linux/interrupt.h>
  129. #include <linux/netdevice.h>
  130. #include <linux/etherdevice.h>
  131. #include <linux/delay.h>
  132. #include <linux/spinlock.h>
  133. #include <linux/ethtool.h>
  134. #include <linux/timer.h>
  135. #include <linux/skbuff.h>
  136. #include <linux/mii.h>
  137. #include <linux/random.h>
  138. #include <linux/init.h>
  139. #include <linux/if_vlan.h>
  140. #include <linux/dma-mapping.h>
  141. #include <asm/irq.h>
  142. #include <asm/io.h>
  143. #include <asm/uaccess.h>
  144. #include <asm/system.h>
  145. #if 0
  146. #define dprintk printk
  147. #else
  148. #define dprintk(x...) do { } while (0)
  149. #endif
  150. /*
  151. * Hardware access:
  152. */
  153. #define DEV_NEED_TIMERIRQ 0x0001 /* set the timer irq flag in the irq mask */
  154. #define DEV_NEED_LINKTIMER 0x0002 /* poll link settings. Relies on the timer irq */
  155. #define DEV_HAS_LARGEDESC 0x0004 /* device supports jumbo frames and needs packet format 2 */
  156. #define DEV_HAS_HIGH_DMA 0x0008 /* device supports 64bit dma */
  157. #define DEV_HAS_CHECKSUM 0x0010 /* device supports tx and rx checksum offloads */
  158. #define DEV_HAS_VLAN 0x0020 /* device supports vlan tagging and striping */
  159. #define DEV_HAS_MSI 0x0040 /* device supports MSI */
  160. #define DEV_HAS_MSI_X 0x0080 /* device supports MSI-X */
  161. #define DEV_HAS_POWER_CNTRL 0x0100 /* device supports power savings */
  162. #define DEV_HAS_PAUSEFRAME_TX 0x0200 /* device supports tx pause frames */
  163. #define DEV_HAS_STATISTICS 0x0400 /* device supports hw statistics */
  164. #define DEV_HAS_TEST_EXTENDED 0x0800 /* device supports extended diagnostic test */
  165. enum {
  166. NvRegIrqStatus = 0x000,
  167. #define NVREG_IRQSTAT_MIIEVENT 0x040
  168. #define NVREG_IRQSTAT_MASK 0x1ff
  169. NvRegIrqMask = 0x004,
  170. #define NVREG_IRQ_RX_ERROR 0x0001
  171. #define NVREG_IRQ_RX 0x0002
  172. #define NVREG_IRQ_RX_NOBUF 0x0004
  173. #define NVREG_IRQ_TX_ERR 0x0008
  174. #define NVREG_IRQ_TX_OK 0x0010
  175. #define NVREG_IRQ_TIMER 0x0020
  176. #define NVREG_IRQ_LINK 0x0040
  177. #define NVREG_IRQ_RX_FORCED 0x0080
  178. #define NVREG_IRQ_TX_FORCED 0x0100
  179. #define NVREG_IRQMASK_THROUGHPUT 0x00df
  180. #define NVREG_IRQMASK_CPU 0x0040
  181. #define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED)
  182. #define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED)
  183. #define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK)
  184. #define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \
  185. NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RX_FORCED| \
  186. NVREG_IRQ_TX_FORCED))
  187. NvRegUnknownSetupReg6 = 0x008,
  188. #define NVREG_UNKSETUP6_VAL 3
  189. /*
  190. * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic
  191. * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms
  192. */
  193. NvRegPollingInterval = 0x00c,
  194. #define NVREG_POLL_DEFAULT_THROUGHPUT 970
  195. #define NVREG_POLL_DEFAULT_CPU 13
  196. NvRegMSIMap0 = 0x020,
  197. NvRegMSIMap1 = 0x024,
  198. NvRegMSIIrqMask = 0x030,
  199. #define NVREG_MSI_VECTOR_0_ENABLED 0x01
  200. NvRegMisc1 = 0x080,
  201. #define NVREG_MISC1_PAUSE_TX 0x01
  202. #define NVREG_MISC1_HD 0x02
  203. #define NVREG_MISC1_FORCE 0x3b0f3c
  204. NvRegMacReset = 0x3c,
  205. #define NVREG_MAC_RESET_ASSERT 0x0F3
  206. NvRegTransmitterControl = 0x084,
  207. #define NVREG_XMITCTL_START 0x01
  208. NvRegTransmitterStatus = 0x088,
  209. #define NVREG_XMITSTAT_BUSY 0x01
  210. NvRegPacketFilterFlags = 0x8c,
  211. #define NVREG_PFF_PAUSE_RX 0x08
  212. #define NVREG_PFF_ALWAYS 0x7F0000
  213. #define NVREG_PFF_PROMISC 0x80
  214. #define NVREG_PFF_MYADDR 0x20
  215. #define NVREG_PFF_LOOPBACK 0x10
  216. NvRegOffloadConfig = 0x90,
  217. #define NVREG_OFFLOAD_HOMEPHY 0x601
  218. #define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE
  219. NvRegReceiverControl = 0x094,
  220. #define NVREG_RCVCTL_START 0x01
  221. NvRegReceiverStatus = 0x98,
  222. #define NVREG_RCVSTAT_BUSY 0x01
  223. NvRegRandomSeed = 0x9c,
  224. #define NVREG_RNDSEED_MASK 0x00ff
  225. #define NVREG_RNDSEED_FORCE 0x7f00
  226. #define NVREG_RNDSEED_FORCE2 0x2d00
  227. #define NVREG_RNDSEED_FORCE3 0x7400
  228. NvRegUnknownSetupReg1 = 0xA0,
  229. #define NVREG_UNKSETUP1_VAL 0x16070f
  230. NvRegUnknownSetupReg2 = 0xA4,
  231. #define NVREG_UNKSETUP2_VAL 0x16
  232. NvRegMacAddrA = 0xA8,
  233. NvRegMacAddrB = 0xAC,
  234. NvRegMulticastAddrA = 0xB0,
  235. #define NVREG_MCASTADDRA_FORCE 0x01
  236. NvRegMulticastAddrB = 0xB4,
  237. NvRegMulticastMaskA = 0xB8,
  238. NvRegMulticastMaskB = 0xBC,
  239. NvRegPhyInterface = 0xC0,
  240. #define PHY_RGMII 0x10000000
  241. NvRegTxRingPhysAddr = 0x100,
  242. NvRegRxRingPhysAddr = 0x104,
  243. NvRegRingSizes = 0x108,
  244. #define NVREG_RINGSZ_TXSHIFT 0
  245. #define NVREG_RINGSZ_RXSHIFT 16
  246. NvRegUnknownTransmitterReg = 0x10c,
  247. NvRegLinkSpeed = 0x110,
  248. #define NVREG_LINKSPEED_FORCE 0x10000
  249. #define NVREG_LINKSPEED_10 1000
  250. #define NVREG_LINKSPEED_100 100
  251. #define NVREG_LINKSPEED_1000 50
  252. #define NVREG_LINKSPEED_MASK (0xFFF)
  253. NvRegUnknownSetupReg5 = 0x130,
  254. #define NVREG_UNKSETUP5_BIT31 (1<<31)
  255. NvRegUnknownSetupReg3 = 0x13c,
  256. #define NVREG_UNKSETUP3_VAL1 0x200010
  257. NvRegTxRxControl = 0x144,
  258. #define NVREG_TXRXCTL_KICK 0x0001
  259. #define NVREG_TXRXCTL_BIT1 0x0002
  260. #define NVREG_TXRXCTL_BIT2 0x0004
  261. #define NVREG_TXRXCTL_IDLE 0x0008
  262. #define NVREG_TXRXCTL_RESET 0x0010
  263. #define NVREG_TXRXCTL_RXCHECK 0x0400
  264. #define NVREG_TXRXCTL_DESC_1 0
  265. #define NVREG_TXRXCTL_DESC_2 0x02100
  266. #define NVREG_TXRXCTL_DESC_3 0x02200
  267. #define NVREG_TXRXCTL_VLANSTRIP 0x00040
  268. #define NVREG_TXRXCTL_VLANINS 0x00080
  269. NvRegTxRingPhysAddrHigh = 0x148,
  270. NvRegRxRingPhysAddrHigh = 0x14C,
  271. NvRegTxPauseFrame = 0x170,
  272. #define NVREG_TX_PAUSEFRAME_DISABLE 0x1ff0080
  273. #define NVREG_TX_PAUSEFRAME_ENABLE 0x0c00030
  274. NvRegMIIStatus = 0x180,
  275. #define NVREG_MIISTAT_ERROR 0x0001
  276. #define NVREG_MIISTAT_LINKCHANGE 0x0008
  277. #define NVREG_MIISTAT_MASK 0x000f
  278. #define NVREG_MIISTAT_MASK2 0x000f
  279. NvRegUnknownSetupReg4 = 0x184,
  280. #define NVREG_UNKSETUP4_VAL 8
  281. NvRegAdapterControl = 0x188,
  282. #define NVREG_ADAPTCTL_START 0x02
  283. #define NVREG_ADAPTCTL_LINKUP 0x04
  284. #define NVREG_ADAPTCTL_PHYVALID 0x40000
  285. #define NVREG_ADAPTCTL_RUNNING 0x100000
  286. #define NVREG_ADAPTCTL_PHYSHIFT 24
  287. NvRegMIISpeed = 0x18c,
  288. #define NVREG_MIISPEED_BIT8 (1<<8)
  289. #define NVREG_MIIDELAY 5
  290. NvRegMIIControl = 0x190,
  291. #define NVREG_MIICTL_INUSE 0x08000
  292. #define NVREG_MIICTL_WRITE 0x00400
  293. #define NVREG_MIICTL_ADDRSHIFT 5
  294. NvRegMIIData = 0x194,
  295. NvRegWakeUpFlags = 0x200,
  296. #define NVREG_WAKEUPFLAGS_VAL 0x7770
  297. #define NVREG_WAKEUPFLAGS_BUSYSHIFT 24
  298. #define NVREG_WAKEUPFLAGS_ENABLESHIFT 16
  299. #define NVREG_WAKEUPFLAGS_D3SHIFT 12
  300. #define NVREG_WAKEUPFLAGS_D2SHIFT 8
  301. #define NVREG_WAKEUPFLAGS_D1SHIFT 4
  302. #define NVREG_WAKEUPFLAGS_D0SHIFT 0
  303. #define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT 0x01
  304. #define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT 0x02
  305. #define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE 0x04
  306. #define NVREG_WAKEUPFLAGS_ENABLE 0x1111
  307. NvRegPatternCRC = 0x204,
  308. NvRegPatternMask = 0x208,
  309. NvRegPowerCap = 0x268,
  310. #define NVREG_POWERCAP_D3SUPP (1<<30)
  311. #define NVREG_POWERCAP_D2SUPP (1<<26)
  312. #define NVREG_POWERCAP_D1SUPP (1<<25)
  313. NvRegPowerState = 0x26c,
  314. #define NVREG_POWERSTATE_POWEREDUP 0x8000
  315. #define NVREG_POWERSTATE_VALID 0x0100
  316. #define NVREG_POWERSTATE_MASK 0x0003
  317. #define NVREG_POWERSTATE_D0 0x0000
  318. #define NVREG_POWERSTATE_D1 0x0001
  319. #define NVREG_POWERSTATE_D2 0x0002
  320. #define NVREG_POWERSTATE_D3 0x0003
  321. NvRegTxCnt = 0x280,
  322. NvRegTxZeroReXmt = 0x284,
  323. NvRegTxOneReXmt = 0x288,
  324. NvRegTxManyReXmt = 0x28c,
  325. NvRegTxLateCol = 0x290,
  326. NvRegTxUnderflow = 0x294,
  327. NvRegTxLossCarrier = 0x298,
  328. NvRegTxExcessDef = 0x29c,
  329. NvRegTxRetryErr = 0x2a0,
  330. NvRegRxFrameErr = 0x2a4,
  331. NvRegRxExtraByte = 0x2a8,
  332. NvRegRxLateCol = 0x2ac,
  333. NvRegRxRunt = 0x2b0,
  334. NvRegRxFrameTooLong = 0x2b4,
  335. NvRegRxOverflow = 0x2b8,
  336. NvRegRxFCSErr = 0x2bc,
  337. NvRegRxFrameAlignErr = 0x2c0,
  338. NvRegRxLenErr = 0x2c4,
  339. NvRegRxUnicast = 0x2c8,
  340. NvRegRxMulticast = 0x2cc,
  341. NvRegRxBroadcast = 0x2d0,
  342. NvRegTxDef = 0x2d4,
  343. NvRegTxFrame = 0x2d8,
  344. NvRegRxCnt = 0x2dc,
  345. NvRegTxPause = 0x2e0,
  346. NvRegRxPause = 0x2e4,
  347. NvRegRxDropFrame = 0x2e8,
  348. NvRegVlanControl = 0x300,
  349. #define NVREG_VLANCONTROL_ENABLE 0x2000
  350. NvRegMSIXMap0 = 0x3e0,
  351. NvRegMSIXMap1 = 0x3e4,
  352. NvRegMSIXIrqStatus = 0x3f0,
  353. NvRegPowerState2 = 0x600,
  354. #define NVREG_POWERSTATE2_POWERUP_MASK 0x0F11
  355. #define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001
  356. };
  357. /* Big endian: should work, but is untested */
  358. struct ring_desc {
  359. u32 PacketBuffer;
  360. u32 FlagLen;
  361. };
  362. struct ring_desc_ex {
  363. u32 PacketBufferHigh;
  364. u32 PacketBufferLow;
  365. u32 TxVlan;
  366. u32 FlagLen;
  367. };
  368. typedef union _ring_type {
  369. struct ring_desc* orig;
  370. struct ring_desc_ex* ex;
  371. } ring_type;
  372. #define FLAG_MASK_V1 0xffff0000
  373. #define FLAG_MASK_V2 0xffffc000
  374. #define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1)
  375. #define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2)
  376. #define NV_TX_LASTPACKET (1<<16)
  377. #define NV_TX_RETRYERROR (1<<19)
  378. #define NV_TX_FORCED_INTERRUPT (1<<24)
  379. #define NV_TX_DEFERRED (1<<26)
  380. #define NV_TX_CARRIERLOST (1<<27)
  381. #define NV_TX_LATECOLLISION (1<<28)
  382. #define NV_TX_UNDERFLOW (1<<29)
  383. #define NV_TX_ERROR (1<<30)
  384. #define NV_TX_VALID (1<<31)
  385. #define NV_TX2_LASTPACKET (1<<29)
  386. #define NV_TX2_RETRYERROR (1<<18)
  387. #define NV_TX2_FORCED_INTERRUPT (1<<30)
  388. #define NV_TX2_DEFERRED (1<<25)
  389. #define NV_TX2_CARRIERLOST (1<<26)
  390. #define NV_TX2_LATECOLLISION (1<<27)
  391. #define NV_TX2_UNDERFLOW (1<<28)
  392. /* error and valid are the same for both */
  393. #define NV_TX2_ERROR (1<<30)
  394. #define NV_TX2_VALID (1<<31)
  395. #define NV_TX2_TSO (1<<28)
  396. #define NV_TX2_TSO_SHIFT 14
  397. #define NV_TX2_TSO_MAX_SHIFT 14
  398. #define NV_TX2_TSO_MAX_SIZE (1<<NV_TX2_TSO_MAX_SHIFT)
  399. #define NV_TX2_CHECKSUM_L3 (1<<27)
  400. #define NV_TX2_CHECKSUM_L4 (1<<26)
  401. #define NV_TX3_VLAN_TAG_PRESENT (1<<18)
  402. #define NV_RX_DESCRIPTORVALID (1<<16)
  403. #define NV_RX_MISSEDFRAME (1<<17)
  404. #define NV_RX_SUBSTRACT1 (1<<18)
  405. #define NV_RX_ERROR1 (1<<23)
  406. #define NV_RX_ERROR2 (1<<24)
  407. #define NV_RX_ERROR3 (1<<25)
  408. #define NV_RX_ERROR4 (1<<26)
  409. #define NV_RX_CRCERR (1<<27)
  410. #define NV_RX_OVERFLOW (1<<28)
  411. #define NV_RX_FRAMINGERR (1<<29)
  412. #define NV_RX_ERROR (1<<30)
  413. #define NV_RX_AVAIL (1<<31)
  414. #define NV_RX2_CHECKSUMMASK (0x1C000000)
  415. #define NV_RX2_CHECKSUMOK1 (0x10000000)
  416. #define NV_RX2_CHECKSUMOK2 (0x14000000)
  417. #define NV_RX2_CHECKSUMOK3 (0x18000000)
  418. #define NV_RX2_DESCRIPTORVALID (1<<29)
  419. #define NV_RX2_SUBSTRACT1 (1<<25)
  420. #define NV_RX2_ERROR1 (1<<18)
  421. #define NV_RX2_ERROR2 (1<<19)
  422. #define NV_RX2_ERROR3 (1<<20)
  423. #define NV_RX2_ERROR4 (1<<21)
  424. #define NV_RX2_CRCERR (1<<22)
  425. #define NV_RX2_OVERFLOW (1<<23)
  426. #define NV_RX2_FRAMINGERR (1<<24)
  427. /* error and avail are the same for both */
  428. #define NV_RX2_ERROR (1<<30)
  429. #define NV_RX2_AVAIL (1<<31)
  430. #define NV_RX3_VLAN_TAG_PRESENT (1<<16)
  431. #define NV_RX3_VLAN_TAG_MASK (0x0000FFFF)
  432. /* Miscelaneous hardware related defines: */
  433. #define NV_PCI_REGSZ_VER1 0x270
  434. #define NV_PCI_REGSZ_VER2 0x604
  435. /* various timeout delays: all in usec */
  436. #define NV_TXRX_RESET_DELAY 4
  437. #define NV_TXSTOP_DELAY1 10
  438. #define NV_TXSTOP_DELAY1MAX 500000
  439. #define NV_TXSTOP_DELAY2 100
  440. #define NV_RXSTOP_DELAY1 10
  441. #define NV_RXSTOP_DELAY1MAX 500000
  442. #define NV_RXSTOP_DELAY2 100
  443. #define NV_SETUP5_DELAY 5
  444. #define NV_SETUP5_DELAYMAX 50000
  445. #define NV_POWERUP_DELAY 5
  446. #define NV_POWERUP_DELAYMAX 5000
  447. #define NV_MIIBUSY_DELAY 50
  448. #define NV_MIIPHY_DELAY 10
  449. #define NV_MIIPHY_DELAYMAX 10000
  450. #define NV_MAC_RESET_DELAY 64
  451. #define NV_WAKEUPPATTERNS 5
  452. #define NV_WAKEUPMASKENTRIES 4
  453. /* General driver defaults */
  454. #define NV_WATCHDOG_TIMEO (5*HZ)
  455. #define RX_RING_DEFAULT 128
  456. #define TX_RING_DEFAULT 256
  457. #define RX_RING_MIN 128
  458. #define TX_RING_MIN 64
  459. #define RING_MAX_DESC_VER_1 1024
  460. #define RING_MAX_DESC_VER_2_3 16384
  461. /*
  462. * Difference between the get and put pointers for the tx ring.
  463. * This is used to throttle the amount of data outstanding in the
  464. * tx ring.
  465. */
  466. #define TX_LIMIT_DIFFERENCE 1
  467. /* rx/tx mac addr + type + vlan + align + slack*/
  468. #define NV_RX_HEADERS (64)
  469. /* even more slack. */
  470. #define NV_RX_ALLOC_PAD (64)
  471. /* maximum mtu size */
  472. #define NV_PKTLIMIT_1 ETH_DATA_LEN /* hard limit not known */
  473. #define NV_PKTLIMIT_2 9100 /* Actual limit according to NVidia: 9202 */
  474. #define OOM_REFILL (1+HZ/20)
  475. #define POLL_WAIT (1+HZ/100)
  476. #define LINK_TIMEOUT (3*HZ)
  477. #define STATS_INTERVAL (10*HZ)
  478. /*
  479. * desc_ver values:
  480. * The nic supports three different descriptor types:
  481. * - DESC_VER_1: Original
  482. * - DESC_VER_2: support for jumbo frames.
  483. * - DESC_VER_3: 64-bit format.
  484. */
  485. #define DESC_VER_1 1
  486. #define DESC_VER_2 2
  487. #define DESC_VER_3 3
  488. /* PHY defines */
  489. #define PHY_OUI_MARVELL 0x5043
  490. #define PHY_OUI_CICADA 0x03f1
  491. #define PHYID1_OUI_MASK 0x03ff
  492. #define PHYID1_OUI_SHFT 6
  493. #define PHYID2_OUI_MASK 0xfc00
  494. #define PHYID2_OUI_SHFT 10
  495. #define PHY_INIT1 0x0f000
  496. #define PHY_INIT2 0x0e00
  497. #define PHY_INIT3 0x01000
  498. #define PHY_INIT4 0x0200
  499. #define PHY_INIT5 0x0004
  500. #define PHY_INIT6 0x02000
  501. #define PHY_GIGABIT 0x0100
  502. #define PHY_TIMEOUT 0x1
  503. #define PHY_ERROR 0x2
  504. #define PHY_100 0x1
  505. #define PHY_1000 0x2
  506. #define PHY_HALF 0x100
  507. #define NV_PAUSEFRAME_RX_CAPABLE 0x0001
  508. #define NV_PAUSEFRAME_TX_CAPABLE 0x0002
  509. #define NV_PAUSEFRAME_RX_ENABLE 0x0004
  510. #define NV_PAUSEFRAME_TX_ENABLE 0x0008
  511. #define NV_PAUSEFRAME_RX_REQ 0x0010
  512. #define NV_PAUSEFRAME_TX_REQ 0x0020
  513. #define NV_PAUSEFRAME_AUTONEG 0x0040
  514. /* MSI/MSI-X defines */
  515. #define NV_MSI_X_MAX_VECTORS 8
  516. #define NV_MSI_X_VECTORS_MASK 0x000f
  517. #define NV_MSI_CAPABLE 0x0010
  518. #define NV_MSI_X_CAPABLE 0x0020
  519. #define NV_MSI_ENABLED 0x0040
  520. #define NV_MSI_X_ENABLED 0x0080
  521. #define NV_MSI_X_VECTOR_ALL 0x0
  522. #define NV_MSI_X_VECTOR_RX 0x0
  523. #define NV_MSI_X_VECTOR_TX 0x1
  524. #define NV_MSI_X_VECTOR_OTHER 0x2
  525. /* statistics */
  526. struct nv_ethtool_str {
  527. char name[ETH_GSTRING_LEN];
  528. };
  529. static const struct nv_ethtool_str nv_estats_str[] = {
  530. { "tx_bytes" },
  531. { "tx_zero_rexmt" },
  532. { "tx_one_rexmt" },
  533. { "tx_many_rexmt" },
  534. { "tx_late_collision" },
  535. { "tx_fifo_errors" },
  536. { "tx_carrier_errors" },
  537. { "tx_excess_deferral" },
  538. { "tx_retry_error" },
  539. { "tx_deferral" },
  540. { "tx_packets" },
  541. { "tx_pause" },
  542. { "rx_frame_error" },
  543. { "rx_extra_byte" },
  544. { "rx_late_collision" },
  545. { "rx_runt" },
  546. { "rx_frame_too_long" },
  547. { "rx_over_errors" },
  548. { "rx_crc_errors" },
  549. { "rx_frame_align_error" },
  550. { "rx_length_error" },
  551. { "rx_unicast" },
  552. { "rx_multicast" },
  553. { "rx_broadcast" },
  554. { "rx_bytes" },
  555. { "rx_pause" },
  556. { "rx_drop_frame" },
  557. { "rx_packets" },
  558. { "rx_errors_total" }
  559. };
  560. struct nv_ethtool_stats {
  561. u64 tx_bytes;
  562. u64 tx_zero_rexmt;
  563. u64 tx_one_rexmt;
  564. u64 tx_many_rexmt;
  565. u64 tx_late_collision;
  566. u64 tx_fifo_errors;
  567. u64 tx_carrier_errors;
  568. u64 tx_excess_deferral;
  569. u64 tx_retry_error;
  570. u64 tx_deferral;
  571. u64 tx_packets;
  572. u64 tx_pause;
  573. u64 rx_frame_error;
  574. u64 rx_extra_byte;
  575. u64 rx_late_collision;
  576. u64 rx_runt;
  577. u64 rx_frame_too_long;
  578. u64 rx_over_errors;
  579. u64 rx_crc_errors;
  580. u64 rx_frame_align_error;
  581. u64 rx_length_error;
  582. u64 rx_unicast;
  583. u64 rx_multicast;
  584. u64 rx_broadcast;
  585. u64 rx_bytes;
  586. u64 rx_pause;
  587. u64 rx_drop_frame;
  588. u64 rx_packets;
  589. u64 rx_errors_total;
  590. };
  591. /* diagnostics */
  592. #define NV_TEST_COUNT_BASE 3
  593. #define NV_TEST_COUNT_EXTENDED 4
  594. static const struct nv_ethtool_str nv_etests_str[] = {
  595. { "link (online/offline)" },
  596. { "register (offline) " },
  597. { "interrupt (offline) " },
  598. { "loopback (offline) " }
  599. };
  600. struct register_test {
  601. u32 reg;
  602. u32 mask;
  603. };
  604. static const struct register_test nv_registers_test[] = {
  605. { NvRegUnknownSetupReg6, 0x01 },
  606. { NvRegMisc1, 0x03c },
  607. { NvRegOffloadConfig, 0x03ff },
  608. { NvRegMulticastAddrA, 0xffffffff },
  609. { NvRegUnknownSetupReg3, 0x0ff },
  610. { NvRegWakeUpFlags, 0x07777 },
  611. { 0,0 }
  612. };
  613. /*
  614. * SMP locking:
  615. * All hardware access under dev->priv->lock, except the performance
  616. * critical parts:
  617. * - rx is (pseudo-) lockless: it relies on the single-threading provided
  618. * by the arch code for interrupts.
  619. * - tx setup is lockless: it relies on netif_tx_lock. Actual submission
  620. * needs dev->priv->lock :-(
  621. * - set_multicast_list: preparation lockless, relies on netif_tx_lock.
  622. */
  623. /* in dev: base, irq */
  624. struct fe_priv {
  625. spinlock_t lock;
  626. /* General data:
  627. * Locking: spin_lock(&np->lock); */
  628. struct net_device_stats stats;
  629. struct nv_ethtool_stats estats;
  630. int in_shutdown;
  631. u32 linkspeed;
  632. int duplex;
  633. int autoneg;
  634. int fixed_mode;
  635. int phyaddr;
  636. int wolenabled;
  637. unsigned int phy_oui;
  638. u16 gigabit;
  639. int intr_test;
  640. /* General data: RO fields */
  641. dma_addr_t ring_addr;
  642. struct pci_dev *pci_dev;
  643. u32 orig_mac[2];
  644. u32 irqmask;
  645. u32 desc_ver;
  646. u32 txrxctl_bits;
  647. u32 vlanctl_bits;
  648. u32 driver_data;
  649. u32 register_size;
  650. void __iomem *base;
  651. /* rx specific fields.
  652. * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
  653. */
  654. ring_type rx_ring;
  655. unsigned int cur_rx, refill_rx;
  656. struct sk_buff **rx_skbuff;
  657. dma_addr_t *rx_dma;
  658. unsigned int rx_buf_sz;
  659. unsigned int pkt_limit;
  660. struct timer_list oom_kick;
  661. struct timer_list nic_poll;
  662. struct timer_list stats_poll;
  663. u32 nic_poll_irq;
  664. int rx_ring_size;
  665. /* media detection workaround.
  666. * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
  667. */
  668. int need_linktimer;
  669. unsigned long link_timeout;
  670. /*
  671. * tx specific fields.
  672. */
  673. ring_type tx_ring;
  674. unsigned int next_tx, nic_tx;
  675. struct sk_buff **tx_skbuff;
  676. dma_addr_t *tx_dma;
  677. unsigned int *tx_dma_len;
  678. u32 tx_flags;
  679. int tx_ring_size;
  680. int tx_limit_start;
  681. int tx_limit_stop;
  682. /* vlan fields */
  683. struct vlan_group *vlangrp;
  684. /* msi/msi-x fields */
  685. u32 msi_flags;
  686. struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS];
  687. /* flow control */
  688. u32 pause_flags;
  689. };
  690. /*
  691. * Maximum number of loops until we assume that a bit in the irq mask
  692. * is stuck. Overridable with module param.
  693. */
  694. static int max_interrupt_work = 5;
  695. /*
  696. * Optimization can be either throuput mode or cpu mode
  697. *
  698. * Throughput Mode: Every tx and rx packet will generate an interrupt.
  699. * CPU Mode: Interrupts are controlled by a timer.
  700. */
  701. enum {
  702. NV_OPTIMIZATION_MODE_THROUGHPUT,
  703. NV_OPTIMIZATION_MODE_CPU
  704. };
  705. static int optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT;
  706. /*
  707. * Poll interval for timer irq
  708. *
  709. * This interval determines how frequent an interrupt is generated.
  710. * The is value is determined by [(time_in_micro_secs * 100) / (2^10)]
  711. * Min = 0, and Max = 65535
  712. */
  713. static int poll_interval = -1;
  714. /*
  715. * MSI interrupts
  716. */
  717. enum {
  718. NV_MSI_INT_DISABLED,
  719. NV_MSI_INT_ENABLED
  720. };
  721. static int msi = NV_MSI_INT_ENABLED;
  722. /*
  723. * MSIX interrupts
  724. */
  725. enum {
  726. NV_MSIX_INT_DISABLED,
  727. NV_MSIX_INT_ENABLED
  728. };
  729. static int msix = NV_MSIX_INT_ENABLED;
  730. /*
  731. * DMA 64bit
  732. */
  733. enum {
  734. NV_DMA_64BIT_DISABLED,
  735. NV_DMA_64BIT_ENABLED
  736. };
  737. static int dma_64bit = NV_DMA_64BIT_ENABLED;
  738. static inline struct fe_priv *get_nvpriv(struct net_device *dev)
  739. {
  740. return netdev_priv(dev);
  741. }
  742. static inline u8 __iomem *get_hwbase(struct net_device *dev)
  743. {
  744. return ((struct fe_priv *)netdev_priv(dev))->base;
  745. }
  746. static inline void pci_push(u8 __iomem *base)
  747. {
  748. /* force out pending posted writes */
  749. readl(base);
  750. }
  751. static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v)
  752. {
  753. return le32_to_cpu(prd->FlagLen)
  754. & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2);
  755. }
  756. static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v)
  757. {
  758. return le32_to_cpu(prd->FlagLen) & LEN_MASK_V2;
  759. }
  760. static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
  761. int delay, int delaymax, const char *msg)
  762. {
  763. u8 __iomem *base = get_hwbase(dev);
  764. pci_push(base);
  765. do {
  766. udelay(delay);
  767. delaymax -= delay;
  768. if (delaymax < 0) {
  769. if (msg)
  770. printk(msg);
  771. return 1;
  772. }
  773. } while ((readl(base + offset) & mask) != target);
  774. return 0;
  775. }
  776. #define NV_SETUP_RX_RING 0x01
  777. #define NV_SETUP_TX_RING 0x02
  778. static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
  779. {
  780. struct fe_priv *np = get_nvpriv(dev);
  781. u8 __iomem *base = get_hwbase(dev);
  782. if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
  783. if (rxtx_flags & NV_SETUP_RX_RING) {
  784. writel((u32) cpu_to_le64(np->ring_addr), base + NvRegRxRingPhysAddr);
  785. }
  786. if (rxtx_flags & NV_SETUP_TX_RING) {
  787. writel((u32) cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
  788. }
  789. } else {
  790. if (rxtx_flags & NV_SETUP_RX_RING) {
  791. writel((u32) cpu_to_le64(np->ring_addr), base + NvRegRxRingPhysAddr);
  792. writel((u32) (cpu_to_le64(np->ring_addr) >> 32), base + NvRegRxRingPhysAddrHigh);
  793. }
  794. if (rxtx_flags & NV_SETUP_TX_RING) {
  795. writel((u32) cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
  796. writel((u32) (cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)) >> 32), base + NvRegTxRingPhysAddrHigh);
  797. }
  798. }
  799. }
  800. static void free_rings(struct net_device *dev)
  801. {
  802. struct fe_priv *np = get_nvpriv(dev);
  803. if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
  804. if(np->rx_ring.orig)
  805. pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
  806. np->rx_ring.orig, np->ring_addr);
  807. } else {
  808. if (np->rx_ring.ex)
  809. pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
  810. np->rx_ring.ex, np->ring_addr);
  811. }
  812. if (np->rx_skbuff)
  813. kfree(np->rx_skbuff);
  814. if (np->rx_dma)
  815. kfree(np->rx_dma);
  816. if (np->tx_skbuff)
  817. kfree(np->tx_skbuff);
  818. if (np->tx_dma)
  819. kfree(np->tx_dma);
  820. if (np->tx_dma_len)
  821. kfree(np->tx_dma_len);
  822. }
  823. static int using_multi_irqs(struct net_device *dev)
  824. {
  825. struct fe_priv *np = get_nvpriv(dev);
  826. if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
  827. ((np->msi_flags & NV_MSI_X_ENABLED) &&
  828. ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1)))
  829. return 0;
  830. else
  831. return 1;
  832. }
  833. static void nv_enable_irq(struct net_device *dev)
  834. {
  835. struct fe_priv *np = get_nvpriv(dev);
  836. if (!using_multi_irqs(dev)) {
  837. if (np->msi_flags & NV_MSI_X_ENABLED)
  838. enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
  839. else
  840. enable_irq(dev->irq);
  841. } else {
  842. enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
  843. enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
  844. enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
  845. }
  846. }
  847. static void nv_disable_irq(struct net_device *dev)
  848. {
  849. struct fe_priv *np = get_nvpriv(dev);
  850. if (!using_multi_irqs(dev)) {
  851. if (np->msi_flags & NV_MSI_X_ENABLED)
  852. disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
  853. else
  854. disable_irq(dev->irq);
  855. } else {
  856. disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
  857. disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
  858. disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
  859. }
  860. }
  861. /* In MSIX mode, a write to irqmask behaves as XOR */
  862. static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask)
  863. {
  864. u8 __iomem *base = get_hwbase(dev);
  865. writel(mask, base + NvRegIrqMask);
  866. }
  867. static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask)
  868. {
  869. struct fe_priv *np = get_nvpriv(dev);
  870. u8 __iomem *base = get_hwbase(dev);
  871. if (np->msi_flags & NV_MSI_X_ENABLED) {
  872. writel(mask, base + NvRegIrqMask);
  873. } else {
  874. if (np->msi_flags & NV_MSI_ENABLED)
  875. writel(0, base + NvRegMSIIrqMask);
  876. writel(0, base + NvRegIrqMask);
  877. }
  878. }
  879. #define MII_READ (-1)
  880. /* mii_rw: read/write a register on the PHY.
  881. *
  882. * Caller must guarantee serialization
  883. */
  884. static int mii_rw(struct net_device *dev, int addr, int miireg, int value)
  885. {
  886. u8 __iomem *base = get_hwbase(dev);
  887. u32 reg;
  888. int retval;
  889. writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
  890. reg = readl(base + NvRegMIIControl);
  891. if (reg & NVREG_MIICTL_INUSE) {
  892. writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl);
  893. udelay(NV_MIIBUSY_DELAY);
  894. }
  895. reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg;
  896. if (value != MII_READ) {
  897. writel(value, base + NvRegMIIData);
  898. reg |= NVREG_MIICTL_WRITE;
  899. }
  900. writel(reg, base + NvRegMIIControl);
  901. if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0,
  902. NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX, NULL)) {
  903. dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d timed out.\n",
  904. dev->name, miireg, addr);
  905. retval = -1;
  906. } else if (value != MII_READ) {
  907. /* it was a write operation - fewer failures are detectable */
  908. dprintk(KERN_DEBUG "%s: mii_rw wrote 0x%x to reg %d at PHY %d\n",
  909. dev->name, value, miireg, addr);
  910. retval = 0;
  911. } else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) {
  912. dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d failed.\n",
  913. dev->name, miireg, addr);
  914. retval = -1;
  915. } else {
  916. retval = readl(base + NvRegMIIData);
  917. dprintk(KERN_DEBUG "%s: mii_rw read from reg %d at PHY %d: 0x%x.\n",
  918. dev->name, miireg, addr, retval);
  919. }
  920. return retval;
  921. }
  922. static int phy_reset(struct net_device *dev)
  923. {
  924. struct fe_priv *np = netdev_priv(dev);
  925. u32 miicontrol;
  926. unsigned int tries = 0;
  927. miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
  928. miicontrol |= BMCR_RESET;
  929. if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) {
  930. return -1;
  931. }
  932. /* wait for 500ms */
  933. msleep(500);
  934. /* must wait till reset is deasserted */
  935. while (miicontrol & BMCR_RESET) {
  936. msleep(10);
  937. miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
  938. /* FIXME: 100 tries seem excessive */
  939. if (tries++ > 100)
  940. return -1;
  941. }
  942. return 0;
  943. }
  944. static int phy_init(struct net_device *dev)
  945. {
  946. struct fe_priv *np = get_nvpriv(dev);
  947. u8 __iomem *base = get_hwbase(dev);
  948. u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg;
  949. /* set advertise register */
  950. reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
  951. reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|ADVERTISE_PAUSE_ASYM|ADVERTISE_PAUSE_CAP);
  952. if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) {
  953. printk(KERN_INFO "%s: phy write to advertise failed.\n", pci_name(np->pci_dev));
  954. return PHY_ERROR;
  955. }
  956. /* get phy interface type */
  957. phyinterface = readl(base + NvRegPhyInterface);
  958. /* see if gigabit phy */
  959. mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
  960. if (mii_status & PHY_GIGABIT) {
  961. np->gigabit = PHY_GIGABIT;
  962. mii_control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
  963. mii_control_1000 &= ~ADVERTISE_1000HALF;
  964. if (phyinterface & PHY_RGMII)
  965. mii_control_1000 |= ADVERTISE_1000FULL;
  966. else
  967. mii_control_1000 &= ~ADVERTISE_1000FULL;
  968. if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) {
  969. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  970. return PHY_ERROR;
  971. }
  972. }
  973. else
  974. np->gigabit = 0;
  975. /* reset the phy */
  976. if (phy_reset(dev)) {
  977. printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev));
  978. return PHY_ERROR;
  979. }
  980. /* phy vendor specific configuration */
  981. if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) {
  982. phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ);
  983. phy_reserved &= ~(PHY_INIT1 | PHY_INIT2);
  984. phy_reserved |= (PHY_INIT3 | PHY_INIT4);
  985. if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) {
  986. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  987. return PHY_ERROR;
  988. }
  989. phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
  990. phy_reserved |= PHY_INIT5;
  991. if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) {
  992. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  993. return PHY_ERROR;
  994. }
  995. }
  996. if (np->phy_oui == PHY_OUI_CICADA) {
  997. phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ);
  998. phy_reserved |= PHY_INIT6;
  999. if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) {
  1000. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1001. return PHY_ERROR;
  1002. }
  1003. }
  1004. /* some phys clear out pause advertisment on reset, set it back */
  1005. mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg);
  1006. /* restart auto negotiation */
  1007. mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
  1008. mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
  1009. if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
  1010. return PHY_ERROR;
  1011. }
  1012. return 0;
  1013. }
  1014. static void nv_start_rx(struct net_device *dev)
  1015. {
  1016. struct fe_priv *np = netdev_priv(dev);
  1017. u8 __iomem *base = get_hwbase(dev);
  1018. dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name);
  1019. /* Already running? Stop it. */
  1020. if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) {
  1021. writel(0, base + NvRegReceiverControl);
  1022. pci_push(base);
  1023. }
  1024. writel(np->linkspeed, base + NvRegLinkSpeed);
  1025. pci_push(base);
  1026. writel(NVREG_RCVCTL_START, base + NvRegReceiverControl);
  1027. dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n",
  1028. dev->name, np->duplex, np->linkspeed);
  1029. pci_push(base);
  1030. }
  1031. static void nv_stop_rx(struct net_device *dev)
  1032. {
  1033. u8 __iomem *base = get_hwbase(dev);
  1034. dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name);
  1035. writel(0, base + NvRegReceiverControl);
  1036. reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0,
  1037. NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX,
  1038. KERN_INFO "nv_stop_rx: ReceiverStatus remained busy");
  1039. udelay(NV_RXSTOP_DELAY2);
  1040. writel(0, base + NvRegLinkSpeed);
  1041. }
  1042. static void nv_start_tx(struct net_device *dev)
  1043. {
  1044. u8 __iomem *base = get_hwbase(dev);
  1045. dprintk(KERN_DEBUG "%s: nv_start_tx\n", dev->name);
  1046. writel(NVREG_XMITCTL_START, base + NvRegTransmitterControl);
  1047. pci_push(base);
  1048. }
  1049. static void nv_stop_tx(struct net_device *dev)
  1050. {
  1051. u8 __iomem *base = get_hwbase(dev);
  1052. dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name);
  1053. writel(0, base + NvRegTransmitterControl);
  1054. reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0,
  1055. NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX,
  1056. KERN_INFO "nv_stop_tx: TransmitterStatus remained busy");
  1057. udelay(NV_TXSTOP_DELAY2);
  1058. writel(0, base + NvRegUnknownTransmitterReg);
  1059. }
  1060. static void nv_txrx_reset(struct net_device *dev)
  1061. {
  1062. struct fe_priv *np = netdev_priv(dev);
  1063. u8 __iomem *base = get_hwbase(dev);
  1064. dprintk(KERN_DEBUG "%s: nv_txrx_reset\n", dev->name);
  1065. writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
  1066. pci_push(base);
  1067. udelay(NV_TXRX_RESET_DELAY);
  1068. writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
  1069. pci_push(base);
  1070. }
  1071. static void nv_mac_reset(struct net_device *dev)
  1072. {
  1073. struct fe_priv *np = netdev_priv(dev);
  1074. u8 __iomem *base = get_hwbase(dev);
  1075. dprintk(KERN_DEBUG "%s: nv_mac_reset\n", dev->name);
  1076. writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
  1077. pci_push(base);
  1078. writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset);
  1079. pci_push(base);
  1080. udelay(NV_MAC_RESET_DELAY);
  1081. writel(0, base + NvRegMacReset);
  1082. pci_push(base);
  1083. udelay(NV_MAC_RESET_DELAY);
  1084. writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
  1085. pci_push(base);
  1086. }
  1087. /*
  1088. * nv_get_stats: dev->get_stats function
  1089. * Get latest stats value from the nic.
  1090. * Called with read_lock(&dev_base_lock) held for read -
  1091. * only synchronized against unregister_netdevice.
  1092. */
  1093. static struct net_device_stats *nv_get_stats(struct net_device *dev)
  1094. {
  1095. struct fe_priv *np = netdev_priv(dev);
  1096. /* It seems that the nic always generates interrupts and doesn't
  1097. * accumulate errors internally. Thus the current values in np->stats
  1098. * are already up to date.
  1099. */
  1100. return &np->stats;
  1101. }
  1102. /*
  1103. * nv_alloc_rx: fill rx ring entries.
  1104. * Return 1 if the allocations for the skbs failed and the
  1105. * rx engine is without Available descriptors
  1106. */
  1107. static int nv_alloc_rx(struct net_device *dev)
  1108. {
  1109. struct fe_priv *np = netdev_priv(dev);
  1110. unsigned int refill_rx = np->refill_rx;
  1111. int nr;
  1112. while (np->cur_rx != refill_rx) {
  1113. struct sk_buff *skb;
  1114. nr = refill_rx % np->rx_ring_size;
  1115. if (np->rx_skbuff[nr] == NULL) {
  1116. skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
  1117. if (!skb)
  1118. break;
  1119. skb->dev = dev;
  1120. np->rx_skbuff[nr] = skb;
  1121. } else {
  1122. skb = np->rx_skbuff[nr];
  1123. }
  1124. np->rx_dma[nr] = pci_map_single(np->pci_dev, skb->data,
  1125. skb->end-skb->data, PCI_DMA_FROMDEVICE);
  1126. if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
  1127. np->rx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->rx_dma[nr]);
  1128. wmb();
  1129. np->rx_ring.orig[nr].FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
  1130. } else {
  1131. np->rx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->rx_dma[nr]) >> 32;
  1132. np->rx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->rx_dma[nr]) & 0x0FFFFFFFF;
  1133. wmb();
  1134. np->rx_ring.ex[nr].FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
  1135. }
  1136. dprintk(KERN_DEBUG "%s: nv_alloc_rx: Packet %d marked as Available\n",
  1137. dev->name, refill_rx);
  1138. refill_rx++;
  1139. }
  1140. np->refill_rx = refill_rx;
  1141. if (np->cur_rx - refill_rx == np->rx_ring_size)
  1142. return 1;
  1143. return 0;
  1144. }
  1145. static void nv_do_rx_refill(unsigned long data)
  1146. {
  1147. struct net_device *dev = (struct net_device *) data;
  1148. struct fe_priv *np = netdev_priv(dev);
  1149. if (!using_multi_irqs(dev)) {
  1150. if (np->msi_flags & NV_MSI_X_ENABLED)
  1151. disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
  1152. else
  1153. disable_irq(dev->irq);
  1154. } else {
  1155. disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
  1156. }
  1157. if (nv_alloc_rx(dev)) {
  1158. spin_lock_irq(&np->lock);
  1159. if (!np->in_shutdown)
  1160. mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
  1161. spin_unlock_irq(&np->lock);
  1162. }
  1163. if (!using_multi_irqs(dev)) {
  1164. if (np->msi_flags & NV_MSI_X_ENABLED)
  1165. enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
  1166. else
  1167. enable_irq(dev->irq);
  1168. } else {
  1169. enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
  1170. }
  1171. }
  1172. static void nv_init_rx(struct net_device *dev)
  1173. {
  1174. struct fe_priv *np = netdev_priv(dev);
  1175. int i;
  1176. np->cur_rx = np->rx_ring_size;
  1177. np->refill_rx = 0;
  1178. for (i = 0; i < np->rx_ring_size; i++)
  1179. if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
  1180. np->rx_ring.orig[i].FlagLen = 0;
  1181. else
  1182. np->rx_ring.ex[i].FlagLen = 0;
  1183. }
  1184. static void nv_init_tx(struct net_device *dev)
  1185. {
  1186. struct fe_priv *np = netdev_priv(dev);
  1187. int i;
  1188. np->next_tx = np->nic_tx = 0;
  1189. for (i = 0; i < np->tx_ring_size; i++) {
  1190. if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
  1191. np->tx_ring.orig[i].FlagLen = 0;
  1192. else
  1193. np->tx_ring.ex[i].FlagLen = 0;
  1194. np->tx_skbuff[i] = NULL;
  1195. np->tx_dma[i] = 0;
  1196. }
  1197. }
  1198. static int nv_init_ring(struct net_device *dev)
  1199. {
  1200. nv_init_tx(dev);
  1201. nv_init_rx(dev);
  1202. return nv_alloc_rx(dev);
  1203. }
  1204. static int nv_release_txskb(struct net_device *dev, unsigned int skbnr)
  1205. {
  1206. struct fe_priv *np = netdev_priv(dev);
  1207. dprintk(KERN_INFO "%s: nv_release_txskb for skbnr %d\n",
  1208. dev->name, skbnr);
  1209. if (np->tx_dma[skbnr]) {
  1210. pci_unmap_page(np->pci_dev, np->tx_dma[skbnr],
  1211. np->tx_dma_len[skbnr],
  1212. PCI_DMA_TODEVICE);
  1213. np->tx_dma[skbnr] = 0;
  1214. }
  1215. if (np->tx_skbuff[skbnr]) {
  1216. dev_kfree_skb_any(np->tx_skbuff[skbnr]);
  1217. np->tx_skbuff[skbnr] = NULL;
  1218. return 1;
  1219. } else {
  1220. return 0;
  1221. }
  1222. }
  1223. static void nv_drain_tx(struct net_device *dev)
  1224. {
  1225. struct fe_priv *np = netdev_priv(dev);
  1226. unsigned int i;
  1227. for (i = 0; i < np->tx_ring_size; i++) {
  1228. if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
  1229. np->tx_ring.orig[i].FlagLen = 0;
  1230. else
  1231. np->tx_ring.ex[i].FlagLen = 0;
  1232. if (nv_release_txskb(dev, i))
  1233. np->stats.tx_dropped++;
  1234. }
  1235. }
  1236. static void nv_drain_rx(struct net_device *dev)
  1237. {
  1238. struct fe_priv *np = netdev_priv(dev);
  1239. int i;
  1240. for (i = 0; i < np->rx_ring_size; i++) {
  1241. if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
  1242. np->rx_ring.orig[i].FlagLen = 0;
  1243. else
  1244. np->rx_ring.ex[i].FlagLen = 0;
  1245. wmb();
  1246. if (np->rx_skbuff[i]) {
  1247. pci_unmap_single(np->pci_dev, np->rx_dma[i],
  1248. np->rx_skbuff[i]->end-np->rx_skbuff[i]->data,
  1249. PCI_DMA_FROMDEVICE);
  1250. dev_kfree_skb(np->rx_skbuff[i]);
  1251. np->rx_skbuff[i] = NULL;
  1252. }
  1253. }
  1254. }
  1255. static void drain_ring(struct net_device *dev)
  1256. {
  1257. nv_drain_tx(dev);
  1258. nv_drain_rx(dev);
  1259. }
  1260. /*
  1261. * nv_start_xmit: dev->hard_start_xmit function
  1262. * Called with netif_tx_lock held.
  1263. */
  1264. static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
  1265. {
  1266. struct fe_priv *np = netdev_priv(dev);
  1267. u32 tx_flags = 0;
  1268. u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
  1269. unsigned int fragments = skb_shinfo(skb)->nr_frags;
  1270. unsigned int nr = (np->next_tx - 1) % np->tx_ring_size;
  1271. unsigned int start_nr = np->next_tx % np->tx_ring_size;
  1272. unsigned int i;
  1273. u32 offset = 0;
  1274. u32 bcnt;
  1275. u32 size = skb->len-skb->data_len;
  1276. u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
  1277. u32 tx_flags_vlan = 0;
  1278. /* add fragments to entries count */
  1279. for (i = 0; i < fragments; i++) {
  1280. entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) +
  1281. ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
  1282. }
  1283. spin_lock_irq(&np->lock);
  1284. if ((np->next_tx - np->nic_tx + entries - 1) > np->tx_limit_stop) {
  1285. spin_unlock_irq(&np->lock);
  1286. netif_stop_queue(dev);
  1287. return NETDEV_TX_BUSY;
  1288. }
  1289. /* setup the header buffer */
  1290. do {
  1291. bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
  1292. nr = (nr + 1) % np->tx_ring_size;
  1293. np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
  1294. PCI_DMA_TODEVICE);
  1295. np->tx_dma_len[nr] = bcnt;
  1296. if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
  1297. np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]);
  1298. np->tx_ring.orig[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags);
  1299. } else {
  1300. np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
  1301. np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
  1302. np->tx_ring.ex[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags);
  1303. }
  1304. tx_flags = np->tx_flags;
  1305. offset += bcnt;
  1306. size -= bcnt;
  1307. } while(size);
  1308. /* setup the fragments */
  1309. for (i = 0; i < fragments; i++) {
  1310. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  1311. u32 size = frag->size;
  1312. offset = 0;
  1313. do {
  1314. bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
  1315. nr = (nr + 1) % np->tx_ring_size;
  1316. np->tx_dma[nr] = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
  1317. PCI_DMA_TODEVICE);
  1318. np->tx_dma_len[nr] = bcnt;
  1319. if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
  1320. np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]);
  1321. np->tx_ring.orig[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags);
  1322. } else {
  1323. np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
  1324. np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
  1325. np->tx_ring.ex[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags);
  1326. }
  1327. offset += bcnt;
  1328. size -= bcnt;
  1329. } while (size);
  1330. }
  1331. /* set last fragment flag */
  1332. if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
  1333. np->tx_ring.orig[nr].FlagLen |= cpu_to_le32(tx_flags_extra);
  1334. } else {
  1335. np->tx_ring.ex[nr].FlagLen |= cpu_to_le32(tx_flags_extra);
  1336. }
  1337. np->tx_skbuff[nr] = skb;
  1338. #ifdef NETIF_F_TSO
  1339. if (skb_shinfo(skb)->gso_size)
  1340. tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
  1341. else
  1342. #endif
  1343. tx_flags_extra = (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0);
  1344. /* vlan tag */
  1345. if (np->vlangrp && vlan_tx_tag_present(skb)) {
  1346. tx_flags_vlan = NV_TX3_VLAN_TAG_PRESENT | vlan_tx_tag_get(skb);
  1347. }
  1348. /* set tx flags */
  1349. if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
  1350. np->tx_ring.orig[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra);
  1351. } else {
  1352. np->tx_ring.ex[start_nr].TxVlan = cpu_to_le32(tx_flags_vlan);
  1353. np->tx_ring.ex[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra);
  1354. }
  1355. dprintk(KERN_DEBUG "%s: nv_start_xmit: packet %d (entries %d) queued for transmission. tx_flags_extra: %x\n",
  1356. dev->name, np->next_tx, entries, tx_flags_extra);
  1357. {
  1358. int j;
  1359. for (j=0; j<64; j++) {
  1360. if ((j%16) == 0)
  1361. dprintk("\n%03x:", j);
  1362. dprintk(" %02x", ((unsigned char*)skb->data)[j]);
  1363. }
  1364. dprintk("\n");
  1365. }
  1366. np->next_tx += entries;
  1367. dev->trans_start = jiffies;
  1368. spin_unlock_irq(&np->lock);
  1369. writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
  1370. pci_push(get_hwbase(dev));
  1371. return NETDEV_TX_OK;
  1372. }
  1373. /*
  1374. * nv_tx_done: check for completed packets, release the skbs.
  1375. *
  1376. * Caller must own np->lock.
  1377. */
  1378. static void nv_tx_done(struct net_device *dev)
  1379. {
  1380. struct fe_priv *np = netdev_priv(dev);
  1381. u32 Flags;
  1382. unsigned int i;
  1383. struct sk_buff *skb;
  1384. while (np->nic_tx != np->next_tx) {
  1385. i = np->nic_tx % np->tx_ring_size;
  1386. if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
  1387. Flags = le32_to_cpu(np->tx_ring.orig[i].FlagLen);
  1388. else
  1389. Flags = le32_to_cpu(np->tx_ring.ex[i].FlagLen);
  1390. dprintk(KERN_DEBUG "%s: nv_tx_done: looking at packet %d, Flags 0x%x.\n",
  1391. dev->name, np->nic_tx, Flags);
  1392. if (Flags & NV_TX_VALID)
  1393. break;
  1394. if (np->desc_ver == DESC_VER_1) {
  1395. if (Flags & NV_TX_LASTPACKET) {
  1396. skb = np->tx_skbuff[i];
  1397. if (Flags & (NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION|
  1398. NV_TX_UNDERFLOW|NV_TX_ERROR)) {
  1399. if (Flags & NV_TX_UNDERFLOW)
  1400. np->stats.tx_fifo_errors++;
  1401. if (Flags & NV_TX_CARRIERLOST)
  1402. np->stats.tx_carrier_errors++;
  1403. np->stats.tx_errors++;
  1404. } else {
  1405. np->stats.tx_packets++;
  1406. np->stats.tx_bytes += skb->len;
  1407. }
  1408. }
  1409. } else {
  1410. if (Flags & NV_TX2_LASTPACKET) {
  1411. skb = np->tx_skbuff[i];
  1412. if (Flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION|
  1413. NV_TX2_UNDERFLOW|NV_TX2_ERROR)) {
  1414. if (Flags & NV_TX2_UNDERFLOW)
  1415. np->stats.tx_fifo_errors++;
  1416. if (Flags & NV_TX2_CARRIERLOST)
  1417. np->stats.tx_carrier_errors++;
  1418. np->stats.tx_errors++;
  1419. } else {
  1420. np->stats.tx_packets++;
  1421. np->stats.tx_bytes += skb->len;
  1422. }
  1423. }
  1424. }
  1425. nv_release_txskb(dev, i);
  1426. np->nic_tx++;
  1427. }
  1428. if (np->next_tx - np->nic_tx < np->tx_limit_start)
  1429. netif_wake_queue(dev);
  1430. }
  1431. /*
  1432. * nv_tx_timeout: dev->tx_timeout function
  1433. * Called with netif_tx_lock held.
  1434. */
  1435. static void nv_tx_timeout(struct net_device *dev)
  1436. {
  1437. struct fe_priv *np = netdev_priv(dev);
  1438. u8 __iomem *base = get_hwbase(dev);
  1439. u32 status;
  1440. if (np->msi_flags & NV_MSI_X_ENABLED)
  1441. status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
  1442. else
  1443. status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
  1444. printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name, status);
  1445. {
  1446. int i;
  1447. printk(KERN_INFO "%s: Ring at %lx: next %d nic %d\n",
  1448. dev->name, (unsigned long)np->ring_addr,
  1449. np->next_tx, np->nic_tx);
  1450. printk(KERN_INFO "%s: Dumping tx registers\n", dev->name);
  1451. for (i=0;i<=np->register_size;i+= 32) {
  1452. printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
  1453. i,
  1454. readl(base + i + 0), readl(base + i + 4),
  1455. readl(base + i + 8), readl(base + i + 12),
  1456. readl(base + i + 16), readl(base + i + 20),
  1457. readl(base + i + 24), readl(base + i + 28));
  1458. }
  1459. printk(KERN_INFO "%s: Dumping tx ring\n", dev->name);
  1460. for (i=0;i<np->tx_ring_size;i+= 4) {
  1461. if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
  1462. printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
  1463. i,
  1464. le32_to_cpu(np->tx_ring.orig[i].PacketBuffer),
  1465. le32_to_cpu(np->tx_ring.orig[i].FlagLen),
  1466. le32_to_cpu(np->tx_ring.orig[i+1].PacketBuffer),
  1467. le32_to_cpu(np->tx_ring.orig[i+1].FlagLen),
  1468. le32_to_cpu(np->tx_ring.orig[i+2].PacketBuffer),
  1469. le32_to_cpu(np->tx_ring.orig[i+2].FlagLen),
  1470. le32_to_cpu(np->tx_ring.orig[i+3].PacketBuffer),
  1471. le32_to_cpu(np->tx_ring.orig[i+3].FlagLen));
  1472. } else {
  1473. printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
  1474. i,
  1475. le32_to_cpu(np->tx_ring.ex[i].PacketBufferHigh),
  1476. le32_to_cpu(np->tx_ring.ex[i].PacketBufferLow),
  1477. le32_to_cpu(np->tx_ring.ex[i].FlagLen),
  1478. le32_to_cpu(np->tx_ring.ex[i+1].PacketBufferHigh),
  1479. le32_to_cpu(np->tx_ring.ex[i+1].PacketBufferLow),
  1480. le32_to_cpu(np->tx_ring.ex[i+1].FlagLen),
  1481. le32_to_cpu(np->tx_ring.ex[i+2].PacketBufferHigh),
  1482. le32_to_cpu(np->tx_ring.ex[i+2].PacketBufferLow),
  1483. le32_to_cpu(np->tx_ring.ex[i+2].FlagLen),
  1484. le32_to_cpu(np->tx_ring.ex[i+3].PacketBufferHigh),
  1485. le32_to_cpu(np->tx_ring.ex[i+3].PacketBufferLow),
  1486. le32_to_cpu(np->tx_ring.ex[i+3].FlagLen));
  1487. }
  1488. }
  1489. }
  1490. spin_lock_irq(&np->lock);
  1491. /* 1) stop tx engine */
  1492. nv_stop_tx(dev);
  1493. /* 2) check that the packets were not sent already: */
  1494. nv_tx_done(dev);
  1495. /* 3) if there are dead entries: clear everything */
  1496. if (np->next_tx != np->nic_tx) {
  1497. printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name);
  1498. nv_drain_tx(dev);
  1499. np->next_tx = np->nic_tx = 0;
  1500. setup_hw_rings(dev, NV_SETUP_TX_RING);
  1501. netif_wake_queue(dev);
  1502. }
  1503. /* 4) restart tx engine */
  1504. nv_start_tx(dev);
  1505. spin_unlock_irq(&np->lock);
  1506. }
  1507. /*
  1508. * Called when the nic notices a mismatch between the actual data len on the
  1509. * wire and the len indicated in the 802 header
  1510. */
  1511. static int nv_getlen(struct net_device *dev, void *packet, int datalen)
  1512. {
  1513. int hdrlen; /* length of the 802 header */
  1514. int protolen; /* length as stored in the proto field */
  1515. /* 1) calculate len according to header */
  1516. if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == __constant_htons(ETH_P_8021Q)) {
  1517. protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto );
  1518. hdrlen = VLAN_HLEN;
  1519. } else {
  1520. protolen = ntohs( ((struct ethhdr *)packet)->h_proto);
  1521. hdrlen = ETH_HLEN;
  1522. }
  1523. dprintk(KERN_DEBUG "%s: nv_getlen: datalen %d, protolen %d, hdrlen %d\n",
  1524. dev->name, datalen, protolen, hdrlen);
  1525. if (protolen > ETH_DATA_LEN)
  1526. return datalen; /* Value in proto field not a len, no checks possible */
  1527. protolen += hdrlen;
  1528. /* consistency checks: */
  1529. if (datalen > ETH_ZLEN) {
  1530. if (datalen >= protolen) {
  1531. /* more data on wire than in 802 header, trim of
  1532. * additional data.
  1533. */
  1534. dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n",
  1535. dev->name, protolen);
  1536. return protolen;
  1537. } else {
  1538. /* less data on wire than mentioned in header.
  1539. * Discard the packet.
  1540. */
  1541. dprintk(KERN_DEBUG "%s: nv_getlen: discarding long packet.\n",
  1542. dev->name);
  1543. return -1;
  1544. }
  1545. } else {
  1546. /* short packet. Accept only if 802 values are also short */
  1547. if (protolen > ETH_ZLEN) {
  1548. dprintk(KERN_DEBUG "%s: nv_getlen: discarding short packet.\n",
  1549. dev->name);
  1550. return -1;
  1551. }
  1552. dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n",
  1553. dev->name, datalen);
  1554. return datalen;
  1555. }
  1556. }
  1557. static void nv_rx_process(struct net_device *dev)
  1558. {
  1559. struct fe_priv *np = netdev_priv(dev);
  1560. u32 Flags;
  1561. u32 vlanflags = 0;
  1562. for (;;) {
  1563. struct sk_buff *skb;
  1564. int len;
  1565. int i;
  1566. if (np->cur_rx - np->refill_rx >= np->rx_ring_size)
  1567. break; /* we scanned the whole ring - do not continue */
  1568. i = np->cur_rx % np->rx_ring_size;
  1569. if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
  1570. Flags = le32_to_cpu(np->rx_ring.orig[i].FlagLen);
  1571. len = nv_descr_getlength(&np->rx_ring.orig[i], np->desc_ver);
  1572. } else {
  1573. Flags = le32_to_cpu(np->rx_ring.ex[i].FlagLen);
  1574. len = nv_descr_getlength_ex(&np->rx_ring.ex[i], np->desc_ver);
  1575. vlanflags = le32_to_cpu(np->rx_ring.ex[i].PacketBufferLow);
  1576. }
  1577. dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, Flags 0x%x.\n",
  1578. dev->name, np->cur_rx, Flags);
  1579. if (Flags & NV_RX_AVAIL)
  1580. break; /* still owned by hardware, */
  1581. /*
  1582. * the packet is for us - immediately tear down the pci mapping.
  1583. * TODO: check if a prefetch of the first cacheline improves
  1584. * the performance.
  1585. */
  1586. pci_unmap_single(np->pci_dev, np->rx_dma[i],
  1587. np->rx_skbuff[i]->end-np->rx_skbuff[i]->data,
  1588. PCI_DMA_FROMDEVICE);
  1589. {
  1590. int j;
  1591. dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",Flags);
  1592. for (j=0; j<64; j++) {
  1593. if ((j%16) == 0)
  1594. dprintk("\n%03x:", j);
  1595. dprintk(" %02x", ((unsigned char*)np->rx_skbuff[i]->data)[j]);
  1596. }
  1597. dprintk("\n");
  1598. }
  1599. /* look at what we actually got: */
  1600. if (np->desc_ver == DESC_VER_1) {
  1601. if (!(Flags & NV_RX_DESCRIPTORVALID))
  1602. goto next_pkt;
  1603. if (Flags & NV_RX_ERROR) {
  1604. if (Flags & NV_RX_MISSEDFRAME) {
  1605. np->stats.rx_missed_errors++;
  1606. np->stats.rx_errors++;
  1607. goto next_pkt;
  1608. }
  1609. if (Flags & (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3)) {
  1610. np->stats.rx_errors++;
  1611. goto next_pkt;
  1612. }
  1613. if (Flags & NV_RX_CRCERR) {
  1614. np->stats.rx_crc_errors++;
  1615. np->stats.rx_errors++;
  1616. goto next_pkt;
  1617. }
  1618. if (Flags & NV_RX_OVERFLOW) {
  1619. np->stats.rx_over_errors++;
  1620. np->stats.rx_errors++;
  1621. goto next_pkt;
  1622. }
  1623. if (Flags & NV_RX_ERROR4) {
  1624. len = nv_getlen(dev, np->rx_skbuff[i]->data, len);
  1625. if (len < 0) {
  1626. np->stats.rx_errors++;
  1627. goto next_pkt;
  1628. }
  1629. }
  1630. /* framing errors are soft errors. */
  1631. if (Flags & NV_RX_FRAMINGERR) {
  1632. if (Flags & NV_RX_SUBSTRACT1) {
  1633. len--;
  1634. }
  1635. }
  1636. }
  1637. } else {
  1638. if (!(Flags & NV_RX2_DESCRIPTORVALID))
  1639. goto next_pkt;
  1640. if (Flags & NV_RX2_ERROR) {
  1641. if (Flags & (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3)) {
  1642. np->stats.rx_errors++;
  1643. goto next_pkt;
  1644. }
  1645. if (Flags & NV_RX2_CRCERR) {
  1646. np->stats.rx_crc_errors++;
  1647. np->stats.rx_errors++;
  1648. goto next_pkt;
  1649. }
  1650. if (Flags & NV_RX2_OVERFLOW) {
  1651. np->stats.rx_over_errors++;
  1652. np->stats.rx_errors++;
  1653. goto next_pkt;
  1654. }
  1655. if (Flags & NV_RX2_ERROR4) {
  1656. len = nv_getlen(dev, np->rx_skbuff[i]->data, len);
  1657. if (len < 0) {
  1658. np->stats.rx_errors++;
  1659. goto next_pkt;
  1660. }
  1661. }
  1662. /* framing errors are soft errors */
  1663. if (Flags & NV_RX2_FRAMINGERR) {
  1664. if (Flags & NV_RX2_SUBSTRACT1) {
  1665. len--;
  1666. }
  1667. }
  1668. }
  1669. if (np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) {
  1670. Flags &= NV_RX2_CHECKSUMMASK;
  1671. if (Flags == NV_RX2_CHECKSUMOK1 ||
  1672. Flags == NV_RX2_CHECKSUMOK2 ||
  1673. Flags == NV_RX2_CHECKSUMOK3) {
  1674. dprintk(KERN_DEBUG "%s: hw checksum hit!.\n", dev->name);
  1675. np->rx_skbuff[i]->ip_summed = CHECKSUM_UNNECESSARY;
  1676. } else {
  1677. dprintk(KERN_DEBUG "%s: hwchecksum miss!.\n", dev->name);
  1678. }
  1679. }
  1680. }
  1681. /* got a valid packet - forward it to the network core */
  1682. skb = np->rx_skbuff[i];
  1683. np->rx_skbuff[i] = NULL;
  1684. skb_put(skb, len);
  1685. skb->protocol = eth_type_trans(skb, dev);
  1686. dprintk(KERN_DEBUG "%s: nv_rx_process: packet %d with %d bytes, proto %d accepted.\n",
  1687. dev->name, np->cur_rx, len, skb->protocol);
  1688. if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT)) {
  1689. vlan_hwaccel_rx(skb, np->vlangrp, vlanflags & NV_RX3_VLAN_TAG_MASK);
  1690. } else {
  1691. netif_rx(skb);
  1692. }
  1693. dev->last_rx = jiffies;
  1694. np->stats.rx_packets++;
  1695. np->stats.rx_bytes += len;
  1696. next_pkt:
  1697. np->cur_rx++;
  1698. }
  1699. }
  1700. static void set_bufsize(struct net_device *dev)
  1701. {
  1702. struct fe_priv *np = netdev_priv(dev);
  1703. if (dev->mtu <= ETH_DATA_LEN)
  1704. np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS;
  1705. else
  1706. np->rx_buf_sz = dev->mtu + NV_RX_HEADERS;
  1707. }
  1708. /*
  1709. * nv_change_mtu: dev->change_mtu function
  1710. * Called with dev_base_lock held for read.
  1711. */
  1712. static int nv_change_mtu(struct net_device *dev, int new_mtu)
  1713. {
  1714. struct fe_priv *np = netdev_priv(dev);
  1715. int old_mtu;
  1716. if (new_mtu < 64 || new_mtu > np->pkt_limit)
  1717. return -EINVAL;
  1718. old_mtu = dev->mtu;
  1719. dev->mtu = new_mtu;
  1720. /* return early if the buffer sizes will not change */
  1721. if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
  1722. return 0;
  1723. if (old_mtu == new_mtu)
  1724. return 0;
  1725. /* synchronized against open : rtnl_lock() held by caller */
  1726. if (netif_running(dev)) {
  1727. u8 __iomem *base = get_hwbase(dev);
  1728. /*
  1729. * It seems that the nic preloads valid ring entries into an
  1730. * internal buffer. The procedure for flushing everything is
  1731. * guessed, there is probably a simpler approach.
  1732. * Changing the MTU is a rare event, it shouldn't matter.
  1733. */
  1734. nv_disable_irq(dev);
  1735. netif_tx_lock_bh(dev);
  1736. spin_lock(&np->lock);
  1737. /* stop engines */
  1738. nv_stop_rx(dev);
  1739. nv_stop_tx(dev);
  1740. nv_txrx_reset(dev);
  1741. /* drain rx queue */
  1742. nv_drain_rx(dev);
  1743. nv_drain_tx(dev);
  1744. /* reinit driver view of the rx queue */
  1745. set_bufsize(dev);
  1746. if (nv_init_ring(dev)) {
  1747. if (!np->in_shutdown)
  1748. mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
  1749. }
  1750. /* reinit nic view of the rx queue */
  1751. writel(np->rx_buf_sz, base + NvRegOffloadConfig);
  1752. setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
  1753. writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
  1754. base + NvRegRingSizes);
  1755. pci_push(base);
  1756. writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
  1757. pci_push(base);
  1758. /* restart rx engine */
  1759. nv_start_rx(dev);
  1760. nv_start_tx(dev);
  1761. spin_unlock(&np->lock);
  1762. netif_tx_unlock_bh(dev);
  1763. nv_enable_irq(dev);
  1764. }
  1765. return 0;
  1766. }
  1767. static void nv_copy_mac_to_hw(struct net_device *dev)
  1768. {
  1769. u8 __iomem *base = get_hwbase(dev);
  1770. u32 mac[2];
  1771. mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
  1772. (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
  1773. mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
  1774. writel(mac[0], base + NvRegMacAddrA);
  1775. writel(mac[1], base + NvRegMacAddrB);
  1776. }
  1777. /*
  1778. * nv_set_mac_address: dev->set_mac_address function
  1779. * Called with rtnl_lock() held.
  1780. */
  1781. static int nv_set_mac_address(struct net_device *dev, void *addr)
  1782. {
  1783. struct fe_priv *np = netdev_priv(dev);
  1784. struct sockaddr *macaddr = (struct sockaddr*)addr;
  1785. if(!is_valid_ether_addr(macaddr->sa_data))
  1786. return -EADDRNOTAVAIL;
  1787. /* synchronized against open : rtnl_lock() held by caller */
  1788. memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN);
  1789. if (netif_running(dev)) {
  1790. netif_tx_lock_bh(dev);
  1791. spin_lock_irq(&np->lock);
  1792. /* stop rx engine */
  1793. nv_stop_rx(dev);
  1794. /* set mac address */
  1795. nv_copy_mac_to_hw(dev);
  1796. /* restart rx engine */
  1797. nv_start_rx(dev);
  1798. spin_unlock_irq(&np->lock);
  1799. netif_tx_unlock_bh(dev);
  1800. } else {
  1801. nv_copy_mac_to_hw(dev);
  1802. }
  1803. return 0;
  1804. }
  1805. /*
  1806. * nv_set_multicast: dev->set_multicast function
  1807. * Called with netif_tx_lock held.
  1808. */
  1809. static void nv_set_multicast(struct net_device *dev)
  1810. {
  1811. struct fe_priv *np = netdev_priv(dev);
  1812. u8 __iomem *base = get_hwbase(dev);
  1813. u32 addr[2];
  1814. u32 mask[2];
  1815. u32 pff = readl(base + NvRegPacketFilterFlags) & NVREG_PFF_PAUSE_RX;
  1816. memset(addr, 0, sizeof(addr));
  1817. memset(mask, 0, sizeof(mask));
  1818. if (dev->flags & IFF_PROMISC) {
  1819. printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
  1820. pff |= NVREG_PFF_PROMISC;
  1821. } else {
  1822. pff |= NVREG_PFF_MYADDR;
  1823. if (dev->flags & IFF_ALLMULTI || dev->mc_list) {
  1824. u32 alwaysOff[2];
  1825. u32 alwaysOn[2];
  1826. alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0xffffffff;
  1827. if (dev->flags & IFF_ALLMULTI) {
  1828. alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0;
  1829. } else {
  1830. struct dev_mc_list *walk;
  1831. walk = dev->mc_list;
  1832. while (walk != NULL) {
  1833. u32 a, b;
  1834. a = le32_to_cpu(*(u32 *) walk->dmi_addr);
  1835. b = le16_to_cpu(*(u16 *) (&walk->dmi_addr[4]));
  1836. alwaysOn[0] &= a;
  1837. alwaysOff[0] &= ~a;
  1838. alwaysOn[1] &= b;
  1839. alwaysOff[1] &= ~b;
  1840. walk = walk->next;
  1841. }
  1842. }
  1843. addr[0] = alwaysOn[0];
  1844. addr[1] = alwaysOn[1];
  1845. mask[0] = alwaysOn[0] | alwaysOff[0];
  1846. mask[1] = alwaysOn[1] | alwaysOff[1];
  1847. }
  1848. }
  1849. addr[0] |= NVREG_MCASTADDRA_FORCE;
  1850. pff |= NVREG_PFF_ALWAYS;
  1851. spin_lock_irq(&np->lock);
  1852. nv_stop_rx(dev);
  1853. writel(addr[0], base + NvRegMulticastAddrA);
  1854. writel(addr[1], base + NvRegMulticastAddrB);
  1855. writel(mask[0], base + NvRegMulticastMaskA);
  1856. writel(mask[1], base + NvRegMulticastMaskB);
  1857. writel(pff, base + NvRegPacketFilterFlags);
  1858. dprintk(KERN_INFO "%s: reconfiguration for multicast lists.\n",
  1859. dev->name);
  1860. nv_start_rx(dev);
  1861. spin_unlock_irq(&np->lock);
  1862. }
  1863. static void nv_update_pause(struct net_device *dev, u32 pause_flags)
  1864. {
  1865. struct fe_priv *np = netdev_priv(dev);
  1866. u8 __iomem *base = get_hwbase(dev);
  1867. np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE);
  1868. if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) {
  1869. u32 pff = readl(base + NvRegPacketFilterFlags) & ~NVREG_PFF_PAUSE_RX;
  1870. if (pause_flags & NV_PAUSEFRAME_RX_ENABLE) {
  1871. writel(pff|NVREG_PFF_PAUSE_RX, base + NvRegPacketFilterFlags);
  1872. np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
  1873. } else {
  1874. writel(pff, base + NvRegPacketFilterFlags);
  1875. }
  1876. }
  1877. if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) {
  1878. u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX;
  1879. if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) {
  1880. writel(NVREG_TX_PAUSEFRAME_ENABLE, base + NvRegTxPauseFrame);
  1881. writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1);
  1882. np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
  1883. } else {
  1884. writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame);
  1885. writel(regmisc, base + NvRegMisc1);
  1886. }
  1887. }
  1888. }
  1889. /**
  1890. * nv_update_linkspeed: Setup the MAC according to the link partner
  1891. * @dev: Network device to be configured
  1892. *
  1893. * The function queries the PHY and checks if there is a link partner.
  1894. * If yes, then it sets up the MAC accordingly. Otherwise, the MAC is
  1895. * set to 10 MBit HD.
  1896. *
  1897. * The function returns 0 if there is no link partner and 1 if there is
  1898. * a good link partner.
  1899. */
  1900. static int nv_update_linkspeed(struct net_device *dev)
  1901. {
  1902. struct fe_priv *np = netdev_priv(dev);
  1903. u8 __iomem *base = get_hwbase(dev);
  1904. int adv = 0;
  1905. int lpa = 0;
  1906. int adv_lpa, adv_pause, lpa_pause;
  1907. int newls = np->linkspeed;
  1908. int newdup = np->duplex;
  1909. int mii_status;
  1910. int retval = 0;
  1911. u32 control_1000, status_1000, phyreg, pause_flags;
  1912. /* BMSR_LSTATUS is latched, read it twice:
  1913. * we want the current value.
  1914. */
  1915. mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
  1916. mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
  1917. if (!(mii_status & BMSR_LSTATUS)) {
  1918. dprintk(KERN_DEBUG "%s: no link detected by phy - falling back to 10HD.\n",
  1919. dev->name);
  1920. newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
  1921. newdup = 0;
  1922. retval = 0;
  1923. goto set_speed;
  1924. }
  1925. if (np->autoneg == 0) {
  1926. dprintk(KERN_DEBUG "%s: nv_update_linkspeed: autoneg off, PHY set to 0x%04x.\n",
  1927. dev->name, np->fixed_mode);
  1928. if (np->fixed_mode & LPA_100FULL) {
  1929. newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
  1930. newdup = 1;
  1931. } else if (np->fixed_mode & LPA_100HALF) {
  1932. newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
  1933. newdup = 0;
  1934. } else if (np->fixed_mode & LPA_10FULL) {
  1935. newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
  1936. newdup = 1;
  1937. } else {
  1938. newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
  1939. newdup = 0;
  1940. }
  1941. retval = 1;
  1942. goto set_speed;
  1943. }
  1944. /* check auto negotiation is complete */
  1945. if (!(mii_status & BMSR_ANEGCOMPLETE)) {
  1946. /* still in autonegotiation - configure nic for 10 MBit HD and wait. */
  1947. newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
  1948. newdup = 0;
  1949. retval = 0;
  1950. dprintk(KERN_DEBUG "%s: autoneg not completed - falling back to 10HD.\n", dev->name);
  1951. goto set_speed;
  1952. }
  1953. adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
  1954. lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ);
  1955. dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n",
  1956. dev->name, adv, lpa);
  1957. retval = 1;
  1958. if (np->gigabit == PHY_GIGABIT) {
  1959. control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
  1960. status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ);
  1961. if ((control_1000 & ADVERTISE_1000FULL) &&
  1962. (status_1000 & LPA_1000FULL)) {
  1963. dprintk(KERN_DEBUG "%s: nv_update_linkspeed: GBit ethernet detected.\n",
  1964. dev->name);
  1965. newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000;
  1966. newdup = 1;
  1967. goto set_speed;
  1968. }
  1969. }
  1970. /* FIXME: handle parallel detection properly */
  1971. adv_lpa = lpa & adv;
  1972. if (adv_lpa & LPA_100FULL) {
  1973. newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
  1974. newdup = 1;
  1975. } else if (adv_lpa & LPA_100HALF) {
  1976. newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
  1977. newdup = 0;
  1978. } else if (adv_lpa & LPA_10FULL) {
  1979. newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
  1980. newdup = 1;
  1981. } else if (adv_lpa & LPA_10HALF) {
  1982. newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
  1983. newdup = 0;
  1984. } else {
  1985. dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, adv_lpa);
  1986. newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
  1987. newdup = 0;
  1988. }
  1989. set_speed:
  1990. if (np->duplex == newdup && np->linkspeed == newls)
  1991. return retval;
  1992. dprintk(KERN_INFO "%s: changing link setting from %d/%d to %d/%d.\n",
  1993. dev->name, np->linkspeed, np->duplex, newls, newdup);
  1994. np->duplex = newdup;
  1995. np->linkspeed = newls;
  1996. if (np->gigabit == PHY_GIGABIT) {
  1997. phyreg = readl(base + NvRegRandomSeed);
  1998. phyreg &= ~(0x3FF00);
  1999. if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10)
  2000. phyreg |= NVREG_RNDSEED_FORCE3;
  2001. else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100)
  2002. phyreg |= NVREG_RNDSEED_FORCE2;
  2003. else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000)
  2004. phyreg |= NVREG_RNDSEED_FORCE;
  2005. writel(phyreg, base + NvRegRandomSeed);
  2006. }
  2007. phyreg = readl(base + NvRegPhyInterface);
  2008. phyreg &= ~(PHY_HALF|PHY_100|PHY_1000);
  2009. if (np->duplex == 0)
  2010. phyreg |= PHY_HALF;
  2011. if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100)
  2012. phyreg |= PHY_100;
  2013. else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
  2014. phyreg |= PHY_1000;
  2015. writel(phyreg, base + NvRegPhyInterface);
  2016. writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD),
  2017. base + NvRegMisc1);
  2018. pci_push(base);
  2019. writel(np->linkspeed, base + NvRegLinkSpeed);
  2020. pci_push(base);
  2021. pause_flags = 0;
  2022. /* setup pause frame */
  2023. if (np->duplex != 0) {
  2024. if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) {
  2025. adv_pause = adv & (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM);
  2026. lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM);
  2027. switch (adv_pause) {
  2028. case (ADVERTISE_PAUSE_CAP):
  2029. if (lpa_pause & LPA_PAUSE_CAP) {
  2030. pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
  2031. if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
  2032. pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
  2033. }
  2034. break;
  2035. case (ADVERTISE_PAUSE_ASYM):
  2036. if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM))
  2037. {
  2038. pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
  2039. }
  2040. break;
  2041. case (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM):
  2042. if (lpa_pause & LPA_PAUSE_CAP)
  2043. {
  2044. pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
  2045. if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
  2046. pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
  2047. }
  2048. if (lpa_pause == LPA_PAUSE_ASYM)
  2049. {
  2050. pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
  2051. }
  2052. break;
  2053. }
  2054. } else {
  2055. pause_flags = np->pause_flags;
  2056. }
  2057. }
  2058. nv_update_pause(dev, pause_flags);
  2059. return retval;
  2060. }
  2061. static void nv_linkchange(struct net_device *dev)
  2062. {
  2063. if (nv_update_linkspeed(dev)) {
  2064. if (!netif_carrier_ok(dev)) {
  2065. netif_carrier_on(dev);
  2066. printk(KERN_INFO "%s: link up.\n", dev->name);
  2067. nv_start_rx(dev);
  2068. }
  2069. } else {
  2070. if (netif_carrier_ok(dev)) {
  2071. netif_carrier_off(dev);
  2072. printk(KERN_INFO "%s: link down.\n", dev->name);
  2073. nv_stop_rx(dev);
  2074. }
  2075. }
  2076. }
  2077. static void nv_link_irq(struct net_device *dev)
  2078. {
  2079. u8 __iomem *base = get_hwbase(dev);
  2080. u32 miistat;
  2081. miistat = readl(base + NvRegMIIStatus);
  2082. writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
  2083. dprintk(KERN_INFO "%s: link change irq, status 0x%x.\n", dev->name, miistat);
  2084. if (miistat & (NVREG_MIISTAT_LINKCHANGE))
  2085. nv_linkchange(dev);
  2086. dprintk(KERN_DEBUG "%s: link change notification done.\n", dev->name);
  2087. }
  2088. static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs)
  2089. {
  2090. struct net_device *dev = (struct net_device *) data;
  2091. struct fe_priv *np = netdev_priv(dev);
  2092. u8 __iomem *base = get_hwbase(dev);
  2093. u32 events;
  2094. int i;
  2095. dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name);
  2096. for (i=0; ; i++) {
  2097. if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
  2098. events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
  2099. writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
  2100. } else {
  2101. events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
  2102. writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
  2103. }
  2104. pci_push(base);
  2105. dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
  2106. if (!(events & np->irqmask))
  2107. break;
  2108. spin_lock(&np->lock);
  2109. nv_tx_done(dev);
  2110. spin_unlock(&np->lock);
  2111. nv_rx_process(dev);
  2112. if (nv_alloc_rx(dev)) {
  2113. spin_lock(&np->lock);
  2114. if (!np->in_shutdown)
  2115. mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
  2116. spin_unlock(&np->lock);
  2117. }
  2118. if (events & NVREG_IRQ_LINK) {
  2119. spin_lock(&np->lock);
  2120. nv_link_irq(dev);
  2121. spin_unlock(&np->lock);
  2122. }
  2123. if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
  2124. spin_lock(&np->lock);
  2125. nv_linkchange(dev);
  2126. spin_unlock(&np->lock);
  2127. np->link_timeout = jiffies + LINK_TIMEOUT;
  2128. }
  2129. if (events & (NVREG_IRQ_TX_ERR)) {
  2130. dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
  2131. dev->name, events);
  2132. }
  2133. if (events & (NVREG_IRQ_UNKNOWN)) {
  2134. printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
  2135. dev->name, events);
  2136. }
  2137. if (i > max_interrupt_work) {
  2138. spin_lock(&np->lock);
  2139. /* disable interrupts on the nic */
  2140. if (!(np->msi_flags & NV_MSI_X_ENABLED))
  2141. writel(0, base + NvRegIrqMask);
  2142. else
  2143. writel(np->irqmask, base + NvRegIrqMask);
  2144. pci_push(base);
  2145. if (!np->in_shutdown) {
  2146. np->nic_poll_irq = np->irqmask;
  2147. mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
  2148. }
  2149. printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i);
  2150. spin_unlock(&np->lock);
  2151. break;
  2152. }
  2153. }
  2154. dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name);
  2155. return IRQ_RETVAL(i);
  2156. }
  2157. static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs)
  2158. {
  2159. struct net_device *dev = (struct net_device *) data;
  2160. struct fe_priv *np = netdev_priv(dev);
  2161. u8 __iomem *base = get_hwbase(dev);
  2162. u32 events;
  2163. int i;
  2164. dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name);
  2165. for (i=0; ; i++) {
  2166. events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL;
  2167. writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus);
  2168. pci_push(base);
  2169. dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events);
  2170. if (!(events & np->irqmask))
  2171. break;
  2172. spin_lock_irq(&np->lock);
  2173. nv_tx_done(dev);
  2174. spin_unlock_irq(&np->lock);
  2175. if (events & (NVREG_IRQ_TX_ERR)) {
  2176. dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
  2177. dev->name, events);
  2178. }
  2179. if (i > max_interrupt_work) {
  2180. spin_lock_irq(&np->lock);
  2181. /* disable interrupts on the nic */
  2182. writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask);
  2183. pci_push(base);
  2184. if (!np->in_shutdown) {
  2185. np->nic_poll_irq |= NVREG_IRQ_TX_ALL;
  2186. mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
  2187. }
  2188. printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i);
  2189. spin_unlock_irq(&np->lock);
  2190. break;
  2191. }
  2192. }
  2193. dprintk(KERN_DEBUG "%s: nv_nic_irq_tx completed\n", dev->name);
  2194. return IRQ_RETVAL(i);
  2195. }
  2196. static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
  2197. {
  2198. struct net_device *dev = (struct net_device *) data;
  2199. struct fe_priv *np = netdev_priv(dev);
  2200. u8 __iomem *base = get_hwbase(dev);
  2201. u32 events;
  2202. int i;
  2203. dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name);
  2204. for (i=0; ; i++) {
  2205. events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
  2206. writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
  2207. pci_push(base);
  2208. dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events);
  2209. if (!(events & np->irqmask))
  2210. break;
  2211. nv_rx_process(dev);
  2212. if (nv_alloc_rx(dev)) {
  2213. spin_lock_irq(&np->lock);
  2214. if (!np->in_shutdown)
  2215. mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
  2216. spin_unlock_irq(&np->lock);
  2217. }
  2218. if (i > max_interrupt_work) {
  2219. spin_lock_irq(&np->lock);
  2220. /* disable interrupts on the nic */
  2221. writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
  2222. pci_push(base);
  2223. if (!np->in_shutdown) {
  2224. np->nic_poll_irq |= NVREG_IRQ_RX_ALL;
  2225. mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
  2226. }
  2227. printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i);
  2228. spin_unlock_irq(&np->lock);
  2229. break;
  2230. }
  2231. }
  2232. dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name);
  2233. return IRQ_RETVAL(i);
  2234. }
  2235. static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs)
  2236. {
  2237. struct net_device *dev = (struct net_device *) data;
  2238. struct fe_priv *np = netdev_priv(dev);
  2239. u8 __iomem *base = get_hwbase(dev);
  2240. u32 events;
  2241. int i;
  2242. dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name);
  2243. for (i=0; ; i++) {
  2244. events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER;
  2245. writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus);
  2246. pci_push(base);
  2247. dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
  2248. if (!(events & np->irqmask))
  2249. break;
  2250. if (events & NVREG_IRQ_LINK) {
  2251. spin_lock_irq(&np->lock);
  2252. nv_link_irq(dev);
  2253. spin_unlock_irq(&np->lock);
  2254. }
  2255. if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
  2256. spin_lock_irq(&np->lock);
  2257. nv_linkchange(dev);
  2258. spin_unlock_irq(&np->lock);
  2259. np->link_timeout = jiffies + LINK_TIMEOUT;
  2260. }
  2261. if (events & (NVREG_IRQ_UNKNOWN)) {
  2262. printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
  2263. dev->name, events);
  2264. }
  2265. if (i > max_interrupt_work) {
  2266. spin_lock_irq(&np->lock);
  2267. /* disable interrupts on the nic */
  2268. writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
  2269. pci_push(base);
  2270. if (!np->in_shutdown) {
  2271. np->nic_poll_irq |= NVREG_IRQ_OTHER;
  2272. mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
  2273. }
  2274. printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i);
  2275. spin_unlock_irq(&np->lock);
  2276. break;
  2277. }
  2278. }
  2279. dprintk(KERN_DEBUG "%s: nv_nic_irq_other completed\n", dev->name);
  2280. return IRQ_RETVAL(i);
  2281. }
  2282. static irqreturn_t nv_nic_irq_test(int foo, void *data, struct pt_regs *regs)
  2283. {
  2284. struct net_device *dev = (struct net_device *) data;
  2285. struct fe_priv *np = netdev_priv(dev);
  2286. u8 __iomem *base = get_hwbase(dev);
  2287. u32 events;
  2288. dprintk(KERN_DEBUG "%s: nv_nic_irq_test\n", dev->name);
  2289. if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
  2290. events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
  2291. writel(NVREG_IRQ_TIMER, base + NvRegIrqStatus);
  2292. } else {
  2293. events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
  2294. writel(NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus);
  2295. }
  2296. pci_push(base);
  2297. dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
  2298. if (!(events & NVREG_IRQ_TIMER))
  2299. return IRQ_RETVAL(0);
  2300. spin_lock(&np->lock);
  2301. np->intr_test = 1;
  2302. spin_unlock(&np->lock);
  2303. dprintk(KERN_DEBUG "%s: nv_nic_irq_test completed\n", dev->name);
  2304. return IRQ_RETVAL(1);
  2305. }
  2306. static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
  2307. {
  2308. u8 __iomem *base = get_hwbase(dev);
  2309. int i;
  2310. u32 msixmap = 0;
  2311. /* Each interrupt bit can be mapped to a MSIX vector (4 bits).
  2312. * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents
  2313. * the remaining 8 interrupts.
  2314. */
  2315. for (i = 0; i < 8; i++) {
  2316. if ((irqmask >> i) & 0x1) {
  2317. msixmap |= vector << (i << 2);
  2318. }
  2319. }
  2320. writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0);
  2321. msixmap = 0;
  2322. for (i = 0; i < 8; i++) {
  2323. if ((irqmask >> (i + 8)) & 0x1) {
  2324. msixmap |= vector << (i << 2);
  2325. }
  2326. }
  2327. writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
  2328. }
  2329. static int nv_request_irq(struct net_device *dev, int intr_test)
  2330. {
  2331. struct fe_priv *np = get_nvpriv(dev);
  2332. u8 __iomem *base = get_hwbase(dev);
  2333. int ret = 1;
  2334. int i;
  2335. if (np->msi_flags & NV_MSI_X_CAPABLE) {
  2336. for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
  2337. np->msi_x_entry[i].entry = i;
  2338. }
  2339. if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) {
  2340. np->msi_flags |= NV_MSI_X_ENABLED;
  2341. if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) {
  2342. /* Request irq for rx handling */
  2343. if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, IRQF_SHARED, dev->name, dev) != 0) {
  2344. printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret);
  2345. pci_disable_msix(np->pci_dev);
  2346. np->msi_flags &= ~NV_MSI_X_ENABLED;
  2347. goto out_err;
  2348. }
  2349. /* Request irq for tx handling */
  2350. if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, IRQF_SHARED, dev->name, dev) != 0) {
  2351. printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret);
  2352. pci_disable_msix(np->pci_dev);
  2353. np->msi_flags &= ~NV_MSI_X_ENABLED;
  2354. goto out_free_rx;
  2355. }
  2356. /* Request irq for link and timer handling */
  2357. if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, IRQF_SHARED, dev->name, dev) != 0) {
  2358. printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret);
  2359. pci_disable_msix(np->pci_dev);
  2360. np->msi_flags &= ~NV_MSI_X_ENABLED;
  2361. goto out_free_tx;
  2362. }
  2363. /* map interrupts to their respective vector */
  2364. writel(0, base + NvRegMSIXMap0);
  2365. writel(0, base + NvRegMSIXMap1);
  2366. set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL);
  2367. set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL);
  2368. set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
  2369. } else {
  2370. /* Request irq for all interrupts */
  2371. if ((!intr_test &&
  2372. request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, IRQF_SHARED, dev->name, dev) != 0) ||
  2373. (intr_test &&
  2374. request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq_test, IRQF_SHARED, dev->name, dev) != 0)) {
  2375. printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
  2376. pci_disable_msix(np->pci_dev);
  2377. np->msi_flags &= ~NV_MSI_X_ENABLED;
  2378. goto out_err;
  2379. }
  2380. /* map interrupts to vector 0 */
  2381. writel(0, base + NvRegMSIXMap0);
  2382. writel(0, base + NvRegMSIXMap1);
  2383. }
  2384. }
  2385. }
  2386. if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
  2387. if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
  2388. np->msi_flags |= NV_MSI_ENABLED;
  2389. if ((!intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq, IRQF_SHARED, dev->name, dev) != 0) ||
  2390. (intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq_test, IRQF_SHARED, dev->name, dev) != 0)) {
  2391. printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
  2392. pci_disable_msi(np->pci_dev);
  2393. np->msi_flags &= ~NV_MSI_ENABLED;
  2394. goto out_err;
  2395. }
  2396. /* map interrupts to vector 0 */
  2397. writel(0, base + NvRegMSIMap0);
  2398. writel(0, base + NvRegMSIMap1);
  2399. /* enable msi vector 0 */
  2400. writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
  2401. }
  2402. }
  2403. if (ret != 0) {
  2404. if ((!intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq, IRQF_SHARED, dev->name, dev) != 0) ||
  2405. (intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq_test, IRQF_SHARED, dev->name, dev) != 0))
  2406. goto out_err;
  2407. }
  2408. return 0;
  2409. out_free_tx:
  2410. free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev);
  2411. out_free_rx:
  2412. free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev);
  2413. out_err:
  2414. return 1;
  2415. }
  2416. static void nv_free_irq(struct net_device *dev)
  2417. {
  2418. struct fe_priv *np = get_nvpriv(dev);
  2419. int i;
  2420. if (np->msi_flags & NV_MSI_X_ENABLED) {
  2421. for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
  2422. free_irq(np->msi_x_entry[i].vector, dev);
  2423. }
  2424. pci_disable_msix(np->pci_dev);
  2425. np->msi_flags &= ~NV_MSI_X_ENABLED;
  2426. } else {
  2427. free_irq(np->pci_dev->irq, dev);
  2428. if (np->msi_flags & NV_MSI_ENABLED) {
  2429. pci_disable_msi(np->pci_dev);
  2430. np->msi_flags &= ~NV_MSI_ENABLED;
  2431. }
  2432. }
  2433. }
  2434. static void nv_do_nic_poll(unsigned long data)
  2435. {
  2436. struct net_device *dev = (struct net_device *) data;
  2437. struct fe_priv *np = netdev_priv(dev);
  2438. u8 __iomem *base = get_hwbase(dev);
  2439. u32 mask = 0;
  2440. /*
  2441. * First disable irq(s) and then
  2442. * reenable interrupts on the nic, we have to do this before calling
  2443. * nv_nic_irq because that may decide to do otherwise
  2444. */
  2445. if (!using_multi_irqs(dev)) {
  2446. if (np->msi_flags & NV_MSI_X_ENABLED)
  2447. disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
  2448. else
  2449. disable_irq_lockdep(dev->irq);
  2450. mask = np->irqmask;
  2451. } else {
  2452. if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
  2453. disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
  2454. mask |= NVREG_IRQ_RX_ALL;
  2455. }
  2456. if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
  2457. disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
  2458. mask |= NVREG_IRQ_TX_ALL;
  2459. }
  2460. if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
  2461. disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
  2462. mask |= NVREG_IRQ_OTHER;
  2463. }
  2464. }
  2465. np->nic_poll_irq = 0;
  2466. /* FIXME: Do we need synchronize_irq(dev->irq) here? */
  2467. writel(mask, base + NvRegIrqMask);
  2468. pci_push(base);
  2469. if (!using_multi_irqs(dev)) {
  2470. nv_nic_irq(0, dev, NULL);
  2471. if (np->msi_flags & NV_MSI_X_ENABLED)
  2472. enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
  2473. else
  2474. enable_irq_lockdep(dev->irq);
  2475. } else {
  2476. if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
  2477. nv_nic_irq_rx(0, dev, NULL);
  2478. enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
  2479. }
  2480. if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
  2481. nv_nic_irq_tx(0, dev, NULL);
  2482. enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
  2483. }
  2484. if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
  2485. nv_nic_irq_other(0, dev, NULL);
  2486. enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
  2487. }
  2488. }
  2489. }
  2490. #ifdef CONFIG_NET_POLL_CONTROLLER
  2491. static void nv_poll_controller(struct net_device *dev)
  2492. {
  2493. nv_do_nic_poll((unsigned long) dev);
  2494. }
  2495. #endif
  2496. static void nv_do_stats_poll(unsigned long data)
  2497. {
  2498. struct net_device *dev = (struct net_device *) data;
  2499. struct fe_priv *np = netdev_priv(dev);
  2500. u8 __iomem *base = get_hwbase(dev);
  2501. np->estats.tx_bytes += readl(base + NvRegTxCnt);
  2502. np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt);
  2503. np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt);
  2504. np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt);
  2505. np->estats.tx_late_collision += readl(base + NvRegTxLateCol);
  2506. np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow);
  2507. np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier);
  2508. np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef);
  2509. np->estats.tx_retry_error += readl(base + NvRegTxRetryErr);
  2510. np->estats.tx_deferral += readl(base + NvRegTxDef);
  2511. np->estats.tx_packets += readl(base + NvRegTxFrame);
  2512. np->estats.tx_pause += readl(base + NvRegTxPause);
  2513. np->estats.rx_frame_error += readl(base + NvRegRxFrameErr);
  2514. np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte);
  2515. np->estats.rx_late_collision += readl(base + NvRegRxLateCol);
  2516. np->estats.rx_runt += readl(base + NvRegRxRunt);
  2517. np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong);
  2518. np->estats.rx_over_errors += readl(base + NvRegRxOverflow);
  2519. np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr);
  2520. np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr);
  2521. np->estats.rx_length_error += readl(base + NvRegRxLenErr);
  2522. np->estats.rx_unicast += readl(base + NvRegRxUnicast);
  2523. np->estats.rx_multicast += readl(base + NvRegRxMulticast);
  2524. np->estats.rx_broadcast += readl(base + NvRegRxBroadcast);
  2525. np->estats.rx_bytes += readl(base + NvRegRxCnt);
  2526. np->estats.rx_pause += readl(base + NvRegRxPause);
  2527. np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame);
  2528. np->estats.rx_packets =
  2529. np->estats.rx_unicast +
  2530. np->estats.rx_multicast +
  2531. np->estats.rx_broadcast;
  2532. np->estats.rx_errors_total =
  2533. np->estats.rx_crc_errors +
  2534. np->estats.rx_over_errors +
  2535. np->estats.rx_frame_error +
  2536. (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) +
  2537. np->estats.rx_late_collision +
  2538. np->estats.rx_runt +
  2539. np->estats.rx_frame_too_long;
  2540. if (!np->in_shutdown)
  2541. mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL);
  2542. }
  2543. static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
  2544. {
  2545. struct fe_priv *np = netdev_priv(dev);
  2546. strcpy(info->driver, "forcedeth");
  2547. strcpy(info->version, FORCEDETH_VERSION);
  2548. strcpy(info->bus_info, pci_name(np->pci_dev));
  2549. }
  2550. static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
  2551. {
  2552. struct fe_priv *np = netdev_priv(dev);
  2553. wolinfo->supported = WAKE_MAGIC;
  2554. spin_lock_irq(&np->lock);
  2555. if (np->wolenabled)
  2556. wolinfo->wolopts = WAKE_MAGIC;
  2557. spin_unlock_irq(&np->lock);
  2558. }
  2559. static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
  2560. {
  2561. struct fe_priv *np = netdev_priv(dev);
  2562. u8 __iomem *base = get_hwbase(dev);
  2563. u32 flags = 0;
  2564. if (wolinfo->wolopts == 0) {
  2565. np->wolenabled = 0;
  2566. } else if (wolinfo->wolopts & WAKE_MAGIC) {
  2567. np->wolenabled = 1;
  2568. flags = NVREG_WAKEUPFLAGS_ENABLE;
  2569. }
  2570. if (netif_running(dev)) {
  2571. spin_lock_irq(&np->lock);
  2572. writel(flags, base + NvRegWakeUpFlags);
  2573. spin_unlock_irq(&np->lock);
  2574. }
  2575. return 0;
  2576. }
  2577. static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
  2578. {
  2579. struct fe_priv *np = netdev_priv(dev);
  2580. int adv;
  2581. spin_lock_irq(&np->lock);
  2582. ecmd->port = PORT_MII;
  2583. if (!netif_running(dev)) {
  2584. /* We do not track link speed / duplex setting if the
  2585. * interface is disabled. Force a link check */
  2586. if (nv_update_linkspeed(dev)) {
  2587. if (!netif_carrier_ok(dev))
  2588. netif_carrier_on(dev);
  2589. } else {
  2590. if (netif_carrier_ok(dev))
  2591. netif_carrier_off(dev);
  2592. }
  2593. }
  2594. if (netif_carrier_ok(dev)) {
  2595. switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) {
  2596. case NVREG_LINKSPEED_10:
  2597. ecmd->speed = SPEED_10;
  2598. break;
  2599. case NVREG_LINKSPEED_100:
  2600. ecmd->speed = SPEED_100;
  2601. break;
  2602. case NVREG_LINKSPEED_1000:
  2603. ecmd->speed = SPEED_1000;
  2604. break;
  2605. }
  2606. ecmd->duplex = DUPLEX_HALF;
  2607. if (np->duplex)
  2608. ecmd->duplex = DUPLEX_FULL;
  2609. } else {
  2610. ecmd->speed = -1;
  2611. ecmd->duplex = -1;
  2612. }
  2613. ecmd->autoneg = np->autoneg;
  2614. ecmd->advertising = ADVERTISED_MII;
  2615. if (np->autoneg) {
  2616. ecmd->advertising |= ADVERTISED_Autoneg;
  2617. adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
  2618. if (adv & ADVERTISE_10HALF)
  2619. ecmd->advertising |= ADVERTISED_10baseT_Half;
  2620. if (adv & ADVERTISE_10FULL)
  2621. ecmd->advertising |= ADVERTISED_10baseT_Full;
  2622. if (adv & ADVERTISE_100HALF)
  2623. ecmd->advertising |= ADVERTISED_100baseT_Half;
  2624. if (adv & ADVERTISE_100FULL)
  2625. ecmd->advertising |= ADVERTISED_100baseT_Full;
  2626. if (np->gigabit == PHY_GIGABIT) {
  2627. adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
  2628. if (adv & ADVERTISE_1000FULL)
  2629. ecmd->advertising |= ADVERTISED_1000baseT_Full;
  2630. }
  2631. }
  2632. ecmd->supported = (SUPPORTED_Autoneg |
  2633. SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
  2634. SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
  2635. SUPPORTED_MII);
  2636. if (np->gigabit == PHY_GIGABIT)
  2637. ecmd->supported |= SUPPORTED_1000baseT_Full;
  2638. ecmd->phy_address = np->phyaddr;
  2639. ecmd->transceiver = XCVR_EXTERNAL;
  2640. /* ignore maxtxpkt, maxrxpkt for now */
  2641. spin_unlock_irq(&np->lock);
  2642. return 0;
  2643. }
  2644. static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
  2645. {
  2646. struct fe_priv *np = netdev_priv(dev);
  2647. if (ecmd->port != PORT_MII)
  2648. return -EINVAL;
  2649. if (ecmd->transceiver != XCVR_EXTERNAL)
  2650. return -EINVAL;
  2651. if (ecmd->phy_address != np->phyaddr) {
  2652. /* TODO: support switching between multiple phys. Should be
  2653. * trivial, but not enabled due to lack of test hardware. */
  2654. return -EINVAL;
  2655. }
  2656. if (ecmd->autoneg == AUTONEG_ENABLE) {
  2657. u32 mask;
  2658. mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
  2659. ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
  2660. if (np->gigabit == PHY_GIGABIT)
  2661. mask |= ADVERTISED_1000baseT_Full;
  2662. if ((ecmd->advertising & mask) == 0)
  2663. return -EINVAL;
  2664. } else if (ecmd->autoneg == AUTONEG_DISABLE) {
  2665. /* Note: autonegotiation disable, speed 1000 intentionally
  2666. * forbidden - noone should need that. */
  2667. if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100)
  2668. return -EINVAL;
  2669. if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
  2670. return -EINVAL;
  2671. } else {
  2672. return -EINVAL;
  2673. }
  2674. netif_carrier_off(dev);
  2675. if (netif_running(dev)) {
  2676. nv_disable_irq(dev);
  2677. netif_tx_lock_bh(dev);
  2678. spin_lock(&np->lock);
  2679. /* stop engines */
  2680. nv_stop_rx(dev);
  2681. nv_stop_tx(dev);
  2682. spin_unlock(&np->lock);
  2683. netif_tx_unlock_bh(dev);
  2684. }
  2685. if (ecmd->autoneg == AUTONEG_ENABLE) {
  2686. int adv, bmcr;
  2687. np->autoneg = 1;
  2688. /* advertise only what has been requested */
  2689. adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
  2690. adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
  2691. if (ecmd->advertising & ADVERTISED_10baseT_Half)
  2692. adv |= ADVERTISE_10HALF;
  2693. if (ecmd->advertising & ADVERTISED_10baseT_Full)
  2694. adv |= ADVERTISE_10FULL;
  2695. if (ecmd->advertising & ADVERTISED_100baseT_Half)
  2696. adv |= ADVERTISE_100HALF;
  2697. if (ecmd->advertising & ADVERTISED_100baseT_Full)
  2698. adv |= ADVERTISE_100FULL;
  2699. if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */
  2700. adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
  2701. if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
  2702. adv |= ADVERTISE_PAUSE_ASYM;
  2703. mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
  2704. if (np->gigabit == PHY_GIGABIT) {
  2705. adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
  2706. adv &= ~ADVERTISE_1000FULL;
  2707. if (ecmd->advertising & ADVERTISED_1000baseT_Full)
  2708. adv |= ADVERTISE_1000FULL;
  2709. mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
  2710. }
  2711. if (netif_running(dev))
  2712. printk(KERN_INFO "%s: link down.\n", dev->name);
  2713. bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
  2714. bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
  2715. mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
  2716. } else {
  2717. int adv, bmcr;
  2718. np->autoneg = 0;
  2719. adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
  2720. adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
  2721. if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF)
  2722. adv |= ADVERTISE_10HALF;
  2723. if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL)
  2724. adv |= ADVERTISE_10FULL;
  2725. if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF)
  2726. adv |= ADVERTISE_100HALF;
  2727. if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL)
  2728. adv |= ADVERTISE_100FULL;
  2729. np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
  2730. if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisments but disable tx pause */
  2731. adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
  2732. np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
  2733. }
  2734. if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) {
  2735. adv |= ADVERTISE_PAUSE_ASYM;
  2736. np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
  2737. }
  2738. mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
  2739. np->fixed_mode = adv;
  2740. if (np->gigabit == PHY_GIGABIT) {
  2741. adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
  2742. adv &= ~ADVERTISE_1000FULL;
  2743. mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
  2744. }
  2745. bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
  2746. bmcr &= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_SPEED1000|BMCR_FULLDPLX);
  2747. if (np->fixed_mode & (ADVERTISE_10FULL|ADVERTISE_100FULL))
  2748. bmcr |= BMCR_FULLDPLX;
  2749. if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL))
  2750. bmcr |= BMCR_SPEED100;
  2751. mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
  2752. if (np->phy_oui == PHY_OUI_MARVELL) {
  2753. /* reset the phy */
  2754. if (phy_reset(dev)) {
  2755. printk(KERN_INFO "%s: phy reset failed\n", dev->name);
  2756. return -EINVAL;
  2757. }
  2758. } else if (netif_running(dev)) {
  2759. /* Wait a bit and then reconfigure the nic. */
  2760. udelay(10);
  2761. nv_linkchange(dev);
  2762. }
  2763. }
  2764. if (netif_running(dev)) {
  2765. nv_start_rx(dev);
  2766. nv_start_tx(dev);
  2767. nv_enable_irq(dev);
  2768. }
  2769. return 0;
  2770. }
  2771. #define FORCEDETH_REGS_VER 1
  2772. static int nv_get_regs_len(struct net_device *dev)
  2773. {
  2774. struct fe_priv *np = netdev_priv(dev);
  2775. return np->register_size;
  2776. }
  2777. static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
  2778. {
  2779. struct fe_priv *np = netdev_priv(dev);
  2780. u8 __iomem *base = get_hwbase(dev);
  2781. u32 *rbuf = buf;
  2782. int i;
  2783. regs->version = FORCEDETH_REGS_VER;
  2784. spin_lock_irq(&np->lock);
  2785. for (i = 0;i <= np->register_size/sizeof(u32); i++)
  2786. rbuf[i] = readl(base + i*sizeof(u32));
  2787. spin_unlock_irq(&np->lock);
  2788. }
  2789. static int nv_nway_reset(struct net_device *dev)
  2790. {
  2791. struct fe_priv *np = netdev_priv(dev);
  2792. int ret;
  2793. if (np->autoneg) {
  2794. int bmcr;
  2795. netif_carrier_off(dev);
  2796. if (netif_running(dev)) {
  2797. nv_disable_irq(dev);
  2798. netif_tx_lock_bh(dev);
  2799. spin_lock(&np->lock);
  2800. /* stop engines */
  2801. nv_stop_rx(dev);
  2802. nv_stop_tx(dev);
  2803. spin_unlock(&np->lock);
  2804. netif_tx_unlock_bh(dev);
  2805. printk(KERN_INFO "%s: link down.\n", dev->name);
  2806. }
  2807. bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
  2808. bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
  2809. mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
  2810. if (netif_running(dev)) {
  2811. nv_start_rx(dev);
  2812. nv_start_tx(dev);
  2813. nv_enable_irq(dev);
  2814. }
  2815. ret = 0;
  2816. } else {
  2817. ret = -EINVAL;
  2818. }
  2819. return ret;
  2820. }
  2821. static int nv_set_tso(struct net_device *dev, u32 value)
  2822. {
  2823. struct fe_priv *np = netdev_priv(dev);
  2824. if ((np->driver_data & DEV_HAS_CHECKSUM))
  2825. return ethtool_op_set_tso(dev, value);
  2826. else
  2827. return -EOPNOTSUPP;
  2828. }
  2829. static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
  2830. {
  2831. struct fe_priv *np = netdev_priv(dev);
  2832. ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
  2833. ring->rx_mini_max_pending = 0;
  2834. ring->rx_jumbo_max_pending = 0;
  2835. ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
  2836. ring->rx_pending = np->rx_ring_size;
  2837. ring->rx_mini_pending = 0;
  2838. ring->rx_jumbo_pending = 0;
  2839. ring->tx_pending = np->tx_ring_size;
  2840. }
  2841. static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
  2842. {
  2843. struct fe_priv *np = netdev_priv(dev);
  2844. u8 __iomem *base = get_hwbase(dev);
  2845. u8 *rxtx_ring, *rx_skbuff, *tx_skbuff, *rx_dma, *tx_dma, *tx_dma_len;
  2846. dma_addr_t ring_addr;
  2847. if (ring->rx_pending < RX_RING_MIN ||
  2848. ring->tx_pending < TX_RING_MIN ||
  2849. ring->rx_mini_pending != 0 ||
  2850. ring->rx_jumbo_pending != 0 ||
  2851. (np->desc_ver == DESC_VER_1 &&
  2852. (ring->rx_pending > RING_MAX_DESC_VER_1 ||
  2853. ring->tx_pending > RING_MAX_DESC_VER_1)) ||
  2854. (np->desc_ver != DESC_VER_1 &&
  2855. (ring->rx_pending > RING_MAX_DESC_VER_2_3 ||
  2856. ring->tx_pending > RING_MAX_DESC_VER_2_3))) {
  2857. return -EINVAL;
  2858. }
  2859. /* allocate new rings */
  2860. if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
  2861. rxtx_ring = pci_alloc_consistent(np->pci_dev,
  2862. sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
  2863. &ring_addr);
  2864. } else {
  2865. rxtx_ring = pci_alloc_consistent(np->pci_dev,
  2866. sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
  2867. &ring_addr);
  2868. }
  2869. rx_skbuff = kmalloc(sizeof(struct sk_buff*) * ring->rx_pending, GFP_KERNEL);
  2870. rx_dma = kmalloc(sizeof(dma_addr_t) * ring->rx_pending, GFP_KERNEL);
  2871. tx_skbuff = kmalloc(sizeof(struct sk_buff*) * ring->tx_pending, GFP_KERNEL);
  2872. tx_dma = kmalloc(sizeof(dma_addr_t) * ring->tx_pending, GFP_KERNEL);
  2873. tx_dma_len = kmalloc(sizeof(unsigned int) * ring->tx_pending, GFP_KERNEL);
  2874. if (!rxtx_ring || !rx_skbuff || !rx_dma || !tx_skbuff || !tx_dma || !tx_dma_len) {
  2875. /* fall back to old rings */
  2876. if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
  2877. if(rxtx_ring)
  2878. pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
  2879. rxtx_ring, ring_addr);
  2880. } else {
  2881. if (rxtx_ring)
  2882. pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
  2883. rxtx_ring, ring_addr);
  2884. }
  2885. if (rx_skbuff)
  2886. kfree(rx_skbuff);
  2887. if (rx_dma)
  2888. kfree(rx_dma);
  2889. if (tx_skbuff)
  2890. kfree(tx_skbuff);
  2891. if (tx_dma)
  2892. kfree(tx_dma);
  2893. if (tx_dma_len)
  2894. kfree(tx_dma_len);
  2895. goto exit;
  2896. }
  2897. if (netif_running(dev)) {
  2898. nv_disable_irq(dev);
  2899. netif_tx_lock_bh(dev);
  2900. spin_lock(&np->lock);
  2901. /* stop engines */
  2902. nv_stop_rx(dev);
  2903. nv_stop_tx(dev);
  2904. nv_txrx_reset(dev);
  2905. /* drain queues */
  2906. nv_drain_rx(dev);
  2907. nv_drain_tx(dev);
  2908. /* delete queues */
  2909. free_rings(dev);
  2910. }
  2911. /* set new values */
  2912. np->rx_ring_size = ring->rx_pending;
  2913. np->tx_ring_size = ring->tx_pending;
  2914. np->tx_limit_stop = ring->tx_pending - TX_LIMIT_DIFFERENCE;
  2915. np->tx_limit_start = ring->tx_pending - TX_LIMIT_DIFFERENCE - 1;
  2916. if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
  2917. np->rx_ring.orig = (struct ring_desc*)rxtx_ring;
  2918. np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
  2919. } else {
  2920. np->rx_ring.ex = (struct ring_desc_ex*)rxtx_ring;
  2921. np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
  2922. }
  2923. np->rx_skbuff = (struct sk_buff**)rx_skbuff;
  2924. np->rx_dma = (dma_addr_t*)rx_dma;
  2925. np->tx_skbuff = (struct sk_buff**)tx_skbuff;
  2926. np->tx_dma = (dma_addr_t*)tx_dma;
  2927. np->tx_dma_len = (unsigned int*)tx_dma_len;
  2928. np->ring_addr = ring_addr;
  2929. memset(np->rx_skbuff, 0, sizeof(struct sk_buff*) * np->rx_ring_size);
  2930. memset(np->rx_dma, 0, sizeof(dma_addr_t) * np->rx_ring_size);
  2931. memset(np->tx_skbuff, 0, sizeof(struct sk_buff*) * np->tx_ring_size);
  2932. memset(np->tx_dma, 0, sizeof(dma_addr_t) * np->tx_ring_size);
  2933. memset(np->tx_dma_len, 0, sizeof(unsigned int) * np->tx_ring_size);
  2934. if (netif_running(dev)) {
  2935. /* reinit driver view of the queues */
  2936. set_bufsize(dev);
  2937. if (nv_init_ring(dev)) {
  2938. if (!np->in_shutdown)
  2939. mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
  2940. }
  2941. /* reinit nic view of the queues */
  2942. writel(np->rx_buf_sz, base + NvRegOffloadConfig);
  2943. setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
  2944. writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
  2945. base + NvRegRingSizes);
  2946. pci_push(base);
  2947. writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
  2948. pci_push(base);
  2949. /* restart engines */
  2950. nv_start_rx(dev);
  2951. nv_start_tx(dev);
  2952. spin_unlock(&np->lock);
  2953. netif_tx_unlock_bh(dev);
  2954. nv_enable_irq(dev);
  2955. }
  2956. return 0;
  2957. exit:
  2958. return -ENOMEM;
  2959. }
  2960. static void nv_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
  2961. {
  2962. struct fe_priv *np = netdev_priv(dev);
  2963. pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0;
  2964. pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0;
  2965. pause->tx_pause = (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) != 0;
  2966. }
  2967. static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
  2968. {
  2969. struct fe_priv *np = netdev_priv(dev);
  2970. int adv, bmcr;
  2971. if ((!np->autoneg && np->duplex == 0) ||
  2972. (np->autoneg && !pause->autoneg && np->duplex == 0)) {
  2973. printk(KERN_INFO "%s: can not set pause settings when forced link is in half duplex.\n",
  2974. dev->name);
  2975. return -EINVAL;
  2976. }
  2977. if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) {
  2978. printk(KERN_INFO "%s: hardware does not support tx pause frames.\n", dev->name);
  2979. return -EINVAL;
  2980. }
  2981. netif_carrier_off(dev);
  2982. if (netif_running(dev)) {
  2983. nv_disable_irq(dev);
  2984. netif_tx_lock_bh(dev);
  2985. spin_lock(&np->lock);
  2986. /* stop engines */
  2987. nv_stop_rx(dev);
  2988. nv_stop_tx(dev);
  2989. spin_unlock(&np->lock);
  2990. netif_tx_unlock_bh(dev);
  2991. }
  2992. np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ);
  2993. if (pause->rx_pause)
  2994. np->pause_flags |= NV_PAUSEFRAME_RX_REQ;
  2995. if (pause->tx_pause)
  2996. np->pause_flags |= NV_PAUSEFRAME_TX_REQ;
  2997. if (np->autoneg && pause->autoneg) {
  2998. np->pause_flags |= NV_PAUSEFRAME_AUTONEG;
  2999. adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
  3000. adv &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
  3001. if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */
  3002. adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
  3003. if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
  3004. adv |= ADVERTISE_PAUSE_ASYM;
  3005. mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
  3006. if (netif_running(dev))
  3007. printk(KERN_INFO "%s: link down.\n", dev->name);
  3008. bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
  3009. bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
  3010. mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
  3011. } else {
  3012. np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
  3013. if (pause->rx_pause)
  3014. np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
  3015. if (pause->tx_pause)
  3016. np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
  3017. if (!netif_running(dev))
  3018. nv_update_linkspeed(dev);
  3019. else
  3020. nv_update_pause(dev, np->pause_flags);
  3021. }
  3022. if (netif_running(dev)) {
  3023. nv_start_rx(dev);
  3024. nv_start_tx(dev);
  3025. nv_enable_irq(dev);
  3026. }
  3027. return 0;
  3028. }
  3029. static u32 nv_get_rx_csum(struct net_device *dev)
  3030. {
  3031. struct fe_priv *np = netdev_priv(dev);
  3032. return (np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) != 0;
  3033. }
  3034. static int nv_set_rx_csum(struct net_device *dev, u32 data)
  3035. {
  3036. struct fe_priv *np = netdev_priv(dev);
  3037. u8 __iomem *base = get_hwbase(dev);
  3038. int retcode = 0;
  3039. if (np->driver_data & DEV_HAS_CHECKSUM) {
  3040. if (((np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) && data) ||
  3041. (!(np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) && !data)) {
  3042. /* already set or unset */
  3043. return 0;
  3044. }
  3045. if (data) {
  3046. np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
  3047. } else if (!(np->vlanctl_bits & NVREG_VLANCONTROL_ENABLE)) {
  3048. np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK;
  3049. } else {
  3050. printk(KERN_INFO "Can not disable rx checksum if vlan is enabled\n");
  3051. return -EINVAL;
  3052. }
  3053. if (netif_running(dev)) {
  3054. spin_lock_irq(&np->lock);
  3055. writel(np->txrxctl_bits, base + NvRegTxRxControl);
  3056. spin_unlock_irq(&np->lock);
  3057. }
  3058. } else {
  3059. return -EINVAL;
  3060. }
  3061. return retcode;
  3062. }
  3063. static int nv_set_tx_csum(struct net_device *dev, u32 data)
  3064. {
  3065. struct fe_priv *np = netdev_priv(dev);
  3066. if (np->driver_data & DEV_HAS_CHECKSUM)
  3067. return ethtool_op_set_tx_hw_csum(dev, data);
  3068. else
  3069. return -EOPNOTSUPP;
  3070. }
  3071. static int nv_set_sg(struct net_device *dev, u32 data)
  3072. {
  3073. struct fe_priv *np = netdev_priv(dev);
  3074. if (np->driver_data & DEV_HAS_CHECKSUM)
  3075. return ethtool_op_set_sg(dev, data);
  3076. else
  3077. return -EOPNOTSUPP;
  3078. }
  3079. static int nv_get_stats_count(struct net_device *dev)
  3080. {
  3081. struct fe_priv *np = netdev_priv(dev);
  3082. if (np->driver_data & DEV_HAS_STATISTICS)
  3083. return (sizeof(struct nv_ethtool_stats)/sizeof(u64));
  3084. else
  3085. return 0;
  3086. }
  3087. static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *buffer)
  3088. {
  3089. struct fe_priv *np = netdev_priv(dev);
  3090. /* update stats */
  3091. nv_do_stats_poll((unsigned long)dev);
  3092. memcpy(buffer, &np->estats, nv_get_stats_count(dev)*sizeof(u64));
  3093. }
  3094. static int nv_self_test_count(struct net_device *dev)
  3095. {
  3096. struct fe_priv *np = netdev_priv(dev);
  3097. if (np->driver_data & DEV_HAS_TEST_EXTENDED)
  3098. return NV_TEST_COUNT_EXTENDED;
  3099. else
  3100. return NV_TEST_COUNT_BASE;
  3101. }
  3102. static int nv_link_test(struct net_device *dev)
  3103. {
  3104. struct fe_priv *np = netdev_priv(dev);
  3105. int mii_status;
  3106. mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
  3107. mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
  3108. /* check phy link status */
  3109. if (!(mii_status & BMSR_LSTATUS))
  3110. return 0;
  3111. else
  3112. return 1;
  3113. }
  3114. static int nv_register_test(struct net_device *dev)
  3115. {
  3116. u8 __iomem *base = get_hwbase(dev);
  3117. int i = 0;
  3118. u32 orig_read, new_read;
  3119. do {
  3120. orig_read = readl(base + nv_registers_test[i].reg);
  3121. /* xor with mask to toggle bits */
  3122. orig_read ^= nv_registers_test[i].mask;
  3123. writel(orig_read, base + nv_registers_test[i].reg);
  3124. new_read = readl(base + nv_registers_test[i].reg);
  3125. if ((new_read & nv_registers_test[i].mask) != (orig_read & nv_registers_test[i].mask))
  3126. return 0;
  3127. /* restore original value */
  3128. orig_read ^= nv_registers_test[i].mask;
  3129. writel(orig_read, base + nv_registers_test[i].reg);
  3130. } while (nv_registers_test[++i].reg != 0);
  3131. return 1;
  3132. }
  3133. static int nv_interrupt_test(struct net_device *dev)
  3134. {
  3135. struct fe_priv *np = netdev_priv(dev);
  3136. u8 __iomem *base = get_hwbase(dev);
  3137. int ret = 1;
  3138. int testcnt;
  3139. u32 save_msi_flags, save_poll_interval = 0;
  3140. if (netif_running(dev)) {
  3141. /* free current irq */
  3142. nv_free_irq(dev);
  3143. save_poll_interval = readl(base+NvRegPollingInterval);
  3144. }
  3145. /* flag to test interrupt handler */
  3146. np->intr_test = 0;
  3147. /* setup test irq */
  3148. save_msi_flags = np->msi_flags;
  3149. np->msi_flags &= ~NV_MSI_X_VECTORS_MASK;
  3150. np->msi_flags |= 0x001; /* setup 1 vector */
  3151. if (nv_request_irq(dev, 1))
  3152. return 0;
  3153. /* setup timer interrupt */
  3154. writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
  3155. writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
  3156. nv_enable_hw_interrupts(dev, NVREG_IRQ_TIMER);
  3157. /* wait for at least one interrupt */
  3158. msleep(100);
  3159. spin_lock_irq(&np->lock);
  3160. /* flag should be set within ISR */
  3161. testcnt = np->intr_test;
  3162. if (!testcnt)
  3163. ret = 2;
  3164. nv_disable_hw_interrupts(dev, NVREG_IRQ_TIMER);
  3165. if (!(np->msi_flags & NV_MSI_X_ENABLED))
  3166. writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
  3167. else
  3168. writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
  3169. spin_unlock_irq(&np->lock);
  3170. nv_free_irq(dev);
  3171. np->msi_flags = save_msi_flags;
  3172. if (netif_running(dev)) {
  3173. writel(save_poll_interval, base + NvRegPollingInterval);
  3174. writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
  3175. /* restore original irq */
  3176. if (nv_request_irq(dev, 0))
  3177. return 0;
  3178. }
  3179. return ret;
  3180. }
  3181. static int nv_loopback_test(struct net_device *dev)
  3182. {
  3183. struct fe_priv *np = netdev_priv(dev);
  3184. u8 __iomem *base = get_hwbase(dev);
  3185. struct sk_buff *tx_skb, *rx_skb;
  3186. dma_addr_t test_dma_addr;
  3187. u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
  3188. u32 Flags;
  3189. int len, i, pkt_len;
  3190. u8 *pkt_data;
  3191. u32 filter_flags = 0;
  3192. u32 misc1_flags = 0;
  3193. int ret = 1;
  3194. if (netif_running(dev)) {
  3195. nv_disable_irq(dev);
  3196. filter_flags = readl(base + NvRegPacketFilterFlags);
  3197. misc1_flags = readl(base + NvRegMisc1);
  3198. } else {
  3199. nv_txrx_reset(dev);
  3200. }
  3201. /* reinit driver view of the rx queue */
  3202. set_bufsize(dev);
  3203. nv_init_ring(dev);
  3204. /* setup hardware for loopback */
  3205. writel(NVREG_MISC1_FORCE, base + NvRegMisc1);
  3206. writel(NVREG_PFF_ALWAYS | NVREG_PFF_LOOPBACK, base + NvRegPacketFilterFlags);
  3207. /* reinit nic view of the rx queue */
  3208. writel(np->rx_buf_sz, base + NvRegOffloadConfig);
  3209. setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
  3210. writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
  3211. base + NvRegRingSizes);
  3212. pci_push(base);
  3213. /* restart rx engine */
  3214. nv_start_rx(dev);
  3215. nv_start_tx(dev);
  3216. /* setup packet for tx */
  3217. pkt_len = ETH_DATA_LEN;
  3218. tx_skb = dev_alloc_skb(pkt_len);
  3219. pkt_data = skb_put(tx_skb, pkt_len);
  3220. for (i = 0; i < pkt_len; i++)
  3221. pkt_data[i] = (u8)(i & 0xff);
  3222. test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data,
  3223. tx_skb->end-tx_skb->data, PCI_DMA_FROMDEVICE);
  3224. if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
  3225. np->tx_ring.orig[0].PacketBuffer = cpu_to_le32(test_dma_addr);
  3226. np->tx_ring.orig[0].FlagLen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
  3227. } else {
  3228. np->tx_ring.ex[0].PacketBufferHigh = cpu_to_le64(test_dma_addr) >> 32;
  3229. np->tx_ring.ex[0].PacketBufferLow = cpu_to_le64(test_dma_addr) & 0x0FFFFFFFF;
  3230. np->tx_ring.ex[0].FlagLen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
  3231. }
  3232. writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
  3233. pci_push(get_hwbase(dev));
  3234. msleep(500);
  3235. /* check for rx of the packet */
  3236. if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
  3237. Flags = le32_to_cpu(np->rx_ring.orig[0].FlagLen);
  3238. len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver);
  3239. } else {
  3240. Flags = le32_to_cpu(np->rx_ring.ex[0].FlagLen);
  3241. len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver);
  3242. }
  3243. if (Flags & NV_RX_AVAIL) {
  3244. ret = 0;
  3245. } else if (np->desc_ver == DESC_VER_1) {
  3246. if (Flags & NV_RX_ERROR)
  3247. ret = 0;
  3248. } else {
  3249. if (Flags & NV_RX2_ERROR) {
  3250. ret = 0;
  3251. }
  3252. }
  3253. if (ret) {
  3254. if (len != pkt_len) {
  3255. ret = 0;
  3256. dprintk(KERN_DEBUG "%s: loopback len mismatch %d vs %d\n",
  3257. dev->name, len, pkt_len);
  3258. } else {
  3259. rx_skb = np->rx_skbuff[0];
  3260. for (i = 0; i < pkt_len; i++) {
  3261. if (rx_skb->data[i] != (u8)(i & 0xff)) {
  3262. ret = 0;
  3263. dprintk(KERN_DEBUG "%s: loopback pattern check failed on byte %d\n",
  3264. dev->name, i);
  3265. break;
  3266. }
  3267. }
  3268. }
  3269. } else {
  3270. dprintk(KERN_DEBUG "%s: loopback - did not receive test packet\n", dev->name);
  3271. }
  3272. pci_unmap_page(np->pci_dev, test_dma_addr,
  3273. tx_skb->end-tx_skb->data,
  3274. PCI_DMA_TODEVICE);
  3275. dev_kfree_skb_any(tx_skb);
  3276. /* stop engines */
  3277. nv_stop_rx(dev);
  3278. nv_stop_tx(dev);
  3279. nv_txrx_reset(dev);
  3280. /* drain rx queue */
  3281. nv_drain_rx(dev);
  3282. nv_drain_tx(dev);
  3283. if (netif_running(dev)) {
  3284. writel(misc1_flags, base + NvRegMisc1);
  3285. writel(filter_flags, base + NvRegPacketFilterFlags);
  3286. nv_enable_irq(dev);
  3287. }
  3288. return ret;
  3289. }
  3290. static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 *buffer)
  3291. {
  3292. struct fe_priv *np = netdev_priv(dev);
  3293. u8 __iomem *base = get_hwbase(dev);
  3294. int result;
  3295. memset(buffer, 0, nv_self_test_count(dev)*sizeof(u64));
  3296. if (!nv_link_test(dev)) {
  3297. test->flags |= ETH_TEST_FL_FAILED;
  3298. buffer[0] = 1;
  3299. }
  3300. if (test->flags & ETH_TEST_FL_OFFLINE) {
  3301. if (netif_running(dev)) {
  3302. netif_stop_queue(dev);
  3303. netif_tx_lock_bh(dev);
  3304. spin_lock_irq(&np->lock);
  3305. nv_disable_hw_interrupts(dev, np->irqmask);
  3306. if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
  3307. writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
  3308. } else {
  3309. writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
  3310. }
  3311. /* stop engines */
  3312. nv_stop_rx(dev);
  3313. nv_stop_tx(dev);
  3314. nv_txrx_reset(dev);
  3315. /* drain rx queue */
  3316. nv_drain_rx(dev);
  3317. nv_drain_tx(dev);
  3318. spin_unlock_irq(&np->lock);
  3319. netif_tx_unlock_bh(dev);
  3320. }
  3321. if (!nv_register_test(dev)) {
  3322. test->flags |= ETH_TEST_FL_FAILED;
  3323. buffer[1] = 1;
  3324. }
  3325. result = nv_interrupt_test(dev);
  3326. if (result != 1) {
  3327. test->flags |= ETH_TEST_FL_FAILED;
  3328. buffer[2] = 1;
  3329. }
  3330. if (result == 0) {
  3331. /* bail out */
  3332. return;
  3333. }
  3334. if (!nv_loopback_test(dev)) {
  3335. test->flags |= ETH_TEST_FL_FAILED;
  3336. buffer[3] = 1;
  3337. }
  3338. if (netif_running(dev)) {
  3339. /* reinit driver view of the rx queue */
  3340. set_bufsize(dev);
  3341. if (nv_init_ring(dev)) {
  3342. if (!np->in_shutdown)
  3343. mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
  3344. }
  3345. /* reinit nic view of the rx queue */
  3346. writel(np->rx_buf_sz, base + NvRegOffloadConfig);
  3347. setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
  3348. writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
  3349. base + NvRegRingSizes);
  3350. pci_push(base);
  3351. writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
  3352. pci_push(base);
  3353. /* restart rx engine */
  3354. nv_start_rx(dev);
  3355. nv_start_tx(dev);
  3356. netif_start_queue(dev);
  3357. nv_enable_hw_interrupts(dev, np->irqmask);
  3358. }
  3359. }
  3360. }
  3361. static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer)
  3362. {
  3363. switch (stringset) {
  3364. case ETH_SS_STATS:
  3365. memcpy(buffer, &nv_estats_str, nv_get_stats_count(dev)*sizeof(struct nv_ethtool_str));
  3366. break;
  3367. case ETH_SS_TEST:
  3368. memcpy(buffer, &nv_etests_str, nv_self_test_count(dev)*sizeof(struct nv_ethtool_str));
  3369. break;
  3370. }
  3371. }
  3372. static struct ethtool_ops ops = {
  3373. .get_drvinfo = nv_get_drvinfo,
  3374. .get_link = ethtool_op_get_link,
  3375. .get_wol = nv_get_wol,
  3376. .set_wol = nv_set_wol,
  3377. .get_settings = nv_get_settings,
  3378. .set_settings = nv_set_settings,
  3379. .get_regs_len = nv_get_regs_len,
  3380. .get_regs = nv_get_regs,
  3381. .nway_reset = nv_nway_reset,
  3382. .get_perm_addr = ethtool_op_get_perm_addr,
  3383. .get_tso = ethtool_op_get_tso,
  3384. .set_tso = nv_set_tso,
  3385. .get_ringparam = nv_get_ringparam,
  3386. .set_ringparam = nv_set_ringparam,
  3387. .get_pauseparam = nv_get_pauseparam,
  3388. .set_pauseparam = nv_set_pauseparam,
  3389. .get_rx_csum = nv_get_rx_csum,
  3390. .set_rx_csum = nv_set_rx_csum,
  3391. .get_tx_csum = ethtool_op_get_tx_csum,
  3392. .set_tx_csum = nv_set_tx_csum,
  3393. .get_sg = ethtool_op_get_sg,
  3394. .set_sg = nv_set_sg,
  3395. .get_strings = nv_get_strings,
  3396. .get_stats_count = nv_get_stats_count,
  3397. .get_ethtool_stats = nv_get_ethtool_stats,
  3398. .self_test_count = nv_self_test_count,
  3399. .self_test = nv_self_test,
  3400. };
  3401. static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
  3402. {
  3403. struct fe_priv *np = get_nvpriv(dev);
  3404. spin_lock_irq(&np->lock);
  3405. /* save vlan group */
  3406. np->vlangrp = grp;
  3407. if (grp) {
  3408. /* enable vlan on MAC */
  3409. np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP | NVREG_TXRXCTL_VLANINS;
  3410. } else {
  3411. /* disable vlan on MAC */
  3412. np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP;
  3413. np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS;
  3414. }
  3415. writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
  3416. spin_unlock_irq(&np->lock);
  3417. };
  3418. static void nv_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
  3419. {
  3420. /* nothing to do */
  3421. };
  3422. static int nv_open(struct net_device *dev)
  3423. {
  3424. struct fe_priv *np = netdev_priv(dev);
  3425. u8 __iomem *base = get_hwbase(dev);
  3426. int ret = 1;
  3427. int oom, i;
  3428. dprintk(KERN_DEBUG "nv_open: begin\n");
  3429. /* 1) erase previous misconfiguration */
  3430. if (np->driver_data & DEV_HAS_POWER_CNTRL)
  3431. nv_mac_reset(dev);
  3432. /* 4.1-1: stop adapter: ignored, 4.3 seems to be overkill */
  3433. writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
  3434. writel(0, base + NvRegMulticastAddrB);
  3435. writel(0, base + NvRegMulticastMaskA);
  3436. writel(0, base + NvRegMulticastMaskB);
  3437. writel(0, base + NvRegPacketFilterFlags);
  3438. writel(0, base + NvRegTransmitterControl);
  3439. writel(0, base + NvRegReceiverControl);
  3440. writel(0, base + NvRegAdapterControl);
  3441. if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)
  3442. writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame);
  3443. /* 2) initialize descriptor rings */
  3444. set_bufsize(dev);
  3445. oom = nv_init_ring(dev);
  3446. writel(0, base + NvRegLinkSpeed);
  3447. writel(0, base + NvRegUnknownTransmitterReg);
  3448. nv_txrx_reset(dev);
  3449. writel(0, base + NvRegUnknownSetupReg6);
  3450. np->in_shutdown = 0;
  3451. /* 3) set mac address */
  3452. nv_copy_mac_to_hw(dev);
  3453. /* 4) give hw rings */
  3454. setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
  3455. writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
  3456. base + NvRegRingSizes);
  3457. /* 5) continue setup */
  3458. writel(np->linkspeed, base + NvRegLinkSpeed);
  3459. writel(NVREG_UNKSETUP3_VAL1, base + NvRegUnknownSetupReg3);
  3460. writel(np->txrxctl_bits, base + NvRegTxRxControl);
  3461. writel(np->vlanctl_bits, base + NvRegVlanControl);
  3462. pci_push(base);
  3463. writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl);
  3464. reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31,
  3465. NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX,
  3466. KERN_INFO "open: SetupReg5, Bit 31 remained off\n");
  3467. writel(0, base + NvRegUnknownSetupReg4);
  3468. writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
  3469. writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
  3470. /* 6) continue setup */
  3471. writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1);
  3472. writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus);
  3473. writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags);
  3474. writel(np->rx_buf_sz, base + NvRegOffloadConfig);
  3475. writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus);
  3476. get_random_bytes(&i, sizeof(i));
  3477. writel(NVREG_RNDSEED_FORCE | (i&NVREG_RNDSEED_MASK), base + NvRegRandomSeed);
  3478. writel(NVREG_UNKSETUP1_VAL, base + NvRegUnknownSetupReg1);
  3479. writel(NVREG_UNKSETUP2_VAL, base + NvRegUnknownSetupReg2);
  3480. if (poll_interval == -1) {
  3481. if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT)
  3482. writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval);
  3483. else
  3484. writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
  3485. }
  3486. else
  3487. writel(poll_interval & 0xFFFF, base + NvRegPollingInterval);
  3488. writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
  3489. writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING,
  3490. base + NvRegAdapterControl);
  3491. writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed);
  3492. writel(NVREG_UNKSETUP4_VAL, base + NvRegUnknownSetupReg4);
  3493. if (np->wolenabled)
  3494. writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags);
  3495. i = readl(base + NvRegPowerState);
  3496. if ( (i & NVREG_POWERSTATE_POWEREDUP) == 0)
  3497. writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState);
  3498. pci_push(base);
  3499. udelay(10);
  3500. writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState);
  3501. nv_disable_hw_interrupts(dev, np->irqmask);
  3502. pci_push(base);
  3503. writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
  3504. writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
  3505. pci_push(base);
  3506. if (nv_request_irq(dev, 0)) {
  3507. goto out_drain;
  3508. }
  3509. /* ask for interrupts */
  3510. nv_enable_hw_interrupts(dev, np->irqmask);
  3511. spin_lock_irq(&np->lock);
  3512. writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
  3513. writel(0, base + NvRegMulticastAddrB);
  3514. writel(0, base + NvRegMulticastMaskA);
  3515. writel(0, base + NvRegMulticastMaskB);
  3516. writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
  3517. /* One manual link speed update: Interrupts are enabled, future link
  3518. * speed changes cause interrupts and are handled by nv_link_irq().
  3519. */
  3520. {
  3521. u32 miistat;
  3522. miistat = readl(base + NvRegMIIStatus);
  3523. writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
  3524. dprintk(KERN_INFO "startup: got 0x%08x.\n", miistat);
  3525. }
  3526. /* set linkspeed to invalid value, thus force nv_update_linkspeed
  3527. * to init hw */
  3528. np->linkspeed = 0;
  3529. ret = nv_update_linkspeed(dev);
  3530. nv_start_rx(dev);
  3531. nv_start_tx(dev);
  3532. netif_start_queue(dev);
  3533. if (ret) {
  3534. netif_carrier_on(dev);
  3535. } else {
  3536. printk("%s: no link during initialization.\n", dev->name);
  3537. netif_carrier_off(dev);
  3538. }
  3539. if (oom)
  3540. mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
  3541. /* start statistics timer */
  3542. if (np->driver_data & DEV_HAS_STATISTICS)
  3543. mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL);
  3544. spin_unlock_irq(&np->lock);
  3545. return 0;
  3546. out_drain:
  3547. drain_ring(dev);
  3548. return ret;
  3549. }
  3550. static int nv_close(struct net_device *dev)
  3551. {
  3552. struct fe_priv *np = netdev_priv(dev);
  3553. u8 __iomem *base;
  3554. spin_lock_irq(&np->lock);
  3555. np->in_shutdown = 1;
  3556. spin_unlock_irq(&np->lock);
  3557. synchronize_irq(dev->irq);
  3558. del_timer_sync(&np->oom_kick);
  3559. del_timer_sync(&np->nic_poll);
  3560. del_timer_sync(&np->stats_poll);
  3561. netif_stop_queue(dev);
  3562. spin_lock_irq(&np->lock);
  3563. nv_stop_tx(dev);
  3564. nv_stop_rx(dev);
  3565. nv_txrx_reset(dev);
  3566. /* disable interrupts on the nic or we will lock up */
  3567. base = get_hwbase(dev);
  3568. nv_disable_hw_interrupts(dev, np->irqmask);
  3569. pci_push(base);
  3570. dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name);
  3571. spin_unlock_irq(&np->lock);
  3572. nv_free_irq(dev);
  3573. drain_ring(dev);
  3574. if (np->wolenabled)
  3575. nv_start_rx(dev);
  3576. /* special op: write back the misordered MAC address - otherwise
  3577. * the next nv_probe would see a wrong address.
  3578. */
  3579. writel(np->orig_mac[0], base + NvRegMacAddrA);
  3580. writel(np->orig_mac[1], base + NvRegMacAddrB);
  3581. /* FIXME: power down nic */
  3582. return 0;
  3583. }
  3584. static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
  3585. {
  3586. struct net_device *dev;
  3587. struct fe_priv *np;
  3588. unsigned long addr;
  3589. u8 __iomem *base;
  3590. int err, i;
  3591. u32 powerstate;
  3592. dev = alloc_etherdev(sizeof(struct fe_priv));
  3593. err = -ENOMEM;
  3594. if (!dev)
  3595. goto out;
  3596. np = netdev_priv(dev);
  3597. np->pci_dev = pci_dev;
  3598. spin_lock_init(&np->lock);
  3599. SET_MODULE_OWNER(dev);
  3600. SET_NETDEV_DEV(dev, &pci_dev->dev);
  3601. init_timer(&np->oom_kick);
  3602. np->oom_kick.data = (unsigned long) dev;
  3603. np->oom_kick.function = &nv_do_rx_refill; /* timer handler */
  3604. init_timer(&np->nic_poll);
  3605. np->nic_poll.data = (unsigned long) dev;
  3606. np->nic_poll.function = &nv_do_nic_poll; /* timer handler */
  3607. init_timer(&np->stats_poll);
  3608. np->stats_poll.data = (unsigned long) dev;
  3609. np->stats_poll.function = &nv_do_stats_poll; /* timer handler */
  3610. err = pci_enable_device(pci_dev);
  3611. if (err) {
  3612. printk(KERN_INFO "forcedeth: pci_enable_dev failed (%d) for device %s\n",
  3613. err, pci_name(pci_dev));
  3614. goto out_free;
  3615. }
  3616. pci_set_master(pci_dev);
  3617. err = pci_request_regions(pci_dev, DRV_NAME);
  3618. if (err < 0)
  3619. goto out_disable;
  3620. if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS))
  3621. np->register_size = NV_PCI_REGSZ_VER2;
  3622. else
  3623. np->register_size = NV_PCI_REGSZ_VER1;
  3624. err = -EINVAL;
  3625. addr = 0;
  3626. for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
  3627. dprintk(KERN_DEBUG "%s: resource %d start %p len %ld flags 0x%08lx.\n",
  3628. pci_name(pci_dev), i, (void*)pci_resource_start(pci_dev, i),
  3629. pci_resource_len(pci_dev, i),
  3630. pci_resource_flags(pci_dev, i));
  3631. if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM &&
  3632. pci_resource_len(pci_dev, i) >= np->register_size) {
  3633. addr = pci_resource_start(pci_dev, i);
  3634. break;
  3635. }
  3636. }
  3637. if (i == DEVICE_COUNT_RESOURCE) {
  3638. printk(KERN_INFO "forcedeth: Couldn't find register window for device %s.\n",
  3639. pci_name(pci_dev));
  3640. goto out_relreg;
  3641. }
  3642. /* copy of driver data */
  3643. np->driver_data = id->driver_data;
  3644. /* handle different descriptor versions */
  3645. if (id->driver_data & DEV_HAS_HIGH_DMA) {
  3646. /* packet format 3: supports 40-bit addressing */
  3647. np->desc_ver = DESC_VER_3;
  3648. np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
  3649. if (dma_64bit) {
  3650. if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) {
  3651. printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n",
  3652. pci_name(pci_dev));
  3653. } else {
  3654. dev->features |= NETIF_F_HIGHDMA;
  3655. printk(KERN_INFO "forcedeth: using HIGHDMA\n");
  3656. }
  3657. if (pci_set_consistent_dma_mask(pci_dev, DMA_39BIT_MASK)) {
  3658. printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed, using 32-bit ring buffers for device %s.\n",
  3659. pci_name(pci_dev));
  3660. }
  3661. }
  3662. } else if (id->driver_data & DEV_HAS_LARGEDESC) {
  3663. /* packet format 2: supports jumbo frames */
  3664. np->desc_ver = DESC_VER_2;
  3665. np->txrxctl_bits = NVREG_TXRXCTL_DESC_2;
  3666. } else {
  3667. /* original packet format */
  3668. np->desc_ver = DESC_VER_1;
  3669. np->txrxctl_bits = NVREG_TXRXCTL_DESC_1;
  3670. }
  3671. np->pkt_limit = NV_PKTLIMIT_1;
  3672. if (id->driver_data & DEV_HAS_LARGEDESC)
  3673. np->pkt_limit = NV_PKTLIMIT_2;
  3674. if (id->driver_data & DEV_HAS_CHECKSUM) {
  3675. np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
  3676. dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
  3677. #ifdef NETIF_F_TSO
  3678. dev->features |= NETIF_F_TSO;
  3679. #endif
  3680. }
  3681. np->vlanctl_bits = 0;
  3682. if (id->driver_data & DEV_HAS_VLAN) {
  3683. np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE;
  3684. dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX;
  3685. dev->vlan_rx_register = nv_vlan_rx_register;
  3686. dev->vlan_rx_kill_vid = nv_vlan_rx_kill_vid;
  3687. }
  3688. np->msi_flags = 0;
  3689. if ((id->driver_data & DEV_HAS_MSI) && msi) {
  3690. np->msi_flags |= NV_MSI_CAPABLE;
  3691. }
  3692. if ((id->driver_data & DEV_HAS_MSI_X) && msix) {
  3693. np->msi_flags |= NV_MSI_X_CAPABLE;
  3694. }
  3695. np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG;
  3696. if (id->driver_data & DEV_HAS_PAUSEFRAME_TX) {
  3697. np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ;
  3698. }
  3699. err = -ENOMEM;
  3700. np->base = ioremap(addr, np->register_size);
  3701. if (!np->base)
  3702. goto out_relreg;
  3703. dev->base_addr = (unsigned long)np->base;
  3704. dev->irq = pci_dev->irq;
  3705. np->rx_ring_size = RX_RING_DEFAULT;
  3706. np->tx_ring_size = TX_RING_DEFAULT;
  3707. np->tx_limit_stop = np->tx_ring_size - TX_LIMIT_DIFFERENCE;
  3708. np->tx_limit_start = np->tx_ring_size - TX_LIMIT_DIFFERENCE - 1;
  3709. if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
  3710. np->rx_ring.orig = pci_alloc_consistent(pci_dev,
  3711. sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
  3712. &np->ring_addr);
  3713. if (!np->rx_ring.orig)
  3714. goto out_unmap;
  3715. np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
  3716. } else {
  3717. np->rx_ring.ex = pci_alloc_consistent(pci_dev,
  3718. sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
  3719. &np->ring_addr);
  3720. if (!np->rx_ring.ex)
  3721. goto out_unmap;
  3722. np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
  3723. }
  3724. np->rx_skbuff = kmalloc(sizeof(struct sk_buff*) * np->rx_ring_size, GFP_KERNEL);
  3725. np->rx_dma = kmalloc(sizeof(dma_addr_t) * np->rx_ring_size, GFP_KERNEL);
  3726. np->tx_skbuff = kmalloc(sizeof(struct sk_buff*) * np->tx_ring_size, GFP_KERNEL);
  3727. np->tx_dma = kmalloc(sizeof(dma_addr_t) * np->tx_ring_size, GFP_KERNEL);
  3728. np->tx_dma_len = kmalloc(sizeof(unsigned int) * np->tx_ring_size, GFP_KERNEL);
  3729. if (!np->rx_skbuff || !np->rx_dma || !np->tx_skbuff || !np->tx_dma || !np->tx_dma_len)
  3730. goto out_freering;
  3731. memset(np->rx_skbuff, 0, sizeof(struct sk_buff*) * np->rx_ring_size);
  3732. memset(np->rx_dma, 0, sizeof(dma_addr_t) * np->rx_ring_size);
  3733. memset(np->tx_skbuff, 0, sizeof(struct sk_buff*) * np->tx_ring_size);
  3734. memset(np->tx_dma, 0, sizeof(dma_addr_t) * np->tx_ring_size);
  3735. memset(np->tx_dma_len, 0, sizeof(unsigned int) * np->tx_ring_size);
  3736. dev->open = nv_open;
  3737. dev->stop = nv_close;
  3738. dev->hard_start_xmit = nv_start_xmit;
  3739. dev->get_stats = nv_get_stats;
  3740. dev->change_mtu = nv_change_mtu;
  3741. dev->set_mac_address = nv_set_mac_address;
  3742. dev->set_multicast_list = nv_set_multicast;
  3743. #ifdef CONFIG_NET_POLL_CONTROLLER
  3744. dev->poll_controller = nv_poll_controller;
  3745. #endif
  3746. SET_ETHTOOL_OPS(dev, &ops);
  3747. dev->tx_timeout = nv_tx_timeout;
  3748. dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
  3749. pci_set_drvdata(pci_dev, dev);
  3750. /* read the mac address */
  3751. base = get_hwbase(dev);
  3752. np->orig_mac[0] = readl(base + NvRegMacAddrA);
  3753. np->orig_mac[1] = readl(base + NvRegMacAddrB);
  3754. dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff;
  3755. dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff;
  3756. dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff;
  3757. dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
  3758. dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff;
  3759. dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff;
  3760. memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
  3761. if (!is_valid_ether_addr(dev->perm_addr)) {
  3762. /*
  3763. * Bad mac address. At least one bios sets the mac address
  3764. * to 01:23:45:67:89:ab
  3765. */
  3766. printk(KERN_ERR "%s: Invalid Mac address detected: %02x:%02x:%02x:%02x:%02x:%02x\n",
  3767. pci_name(pci_dev),
  3768. dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
  3769. dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
  3770. printk(KERN_ERR "Please complain to your hardware vendor. Switching to a random MAC.\n");
  3771. dev->dev_addr[0] = 0x00;
  3772. dev->dev_addr[1] = 0x00;
  3773. dev->dev_addr[2] = 0x6c;
  3774. get_random_bytes(&dev->dev_addr[3], 3);
  3775. }
  3776. dprintk(KERN_DEBUG "%s: MAC Address %02x:%02x:%02x:%02x:%02x:%02x\n", pci_name(pci_dev),
  3777. dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
  3778. dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
  3779. /* disable WOL */
  3780. writel(0, base + NvRegWakeUpFlags);
  3781. np->wolenabled = 0;
  3782. if (id->driver_data & DEV_HAS_POWER_CNTRL) {
  3783. u8 revision_id;
  3784. pci_read_config_byte(pci_dev, PCI_REVISION_ID, &revision_id);
  3785. /* take phy and nic out of low power mode */
  3786. powerstate = readl(base + NvRegPowerState2);
  3787. powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK;
  3788. if ((id->device == PCI_DEVICE_ID_NVIDIA_NVENET_12 ||
  3789. id->device == PCI_DEVICE_ID_NVIDIA_NVENET_13) &&
  3790. revision_id >= 0xA3)
  3791. powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3;
  3792. writel(powerstate, base + NvRegPowerState2);
  3793. }
  3794. if (np->desc_ver == DESC_VER_1) {
  3795. np->tx_flags = NV_TX_VALID;
  3796. } else {
  3797. np->tx_flags = NV_TX2_VALID;
  3798. }
  3799. if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) {
  3800. np->irqmask = NVREG_IRQMASK_THROUGHPUT;
  3801. if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
  3802. np->msi_flags |= 0x0003;
  3803. } else {
  3804. np->irqmask = NVREG_IRQMASK_CPU;
  3805. if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
  3806. np->msi_flags |= 0x0001;
  3807. }
  3808. if (id->driver_data & DEV_NEED_TIMERIRQ)
  3809. np->irqmask |= NVREG_IRQ_TIMER;
  3810. if (id->driver_data & DEV_NEED_LINKTIMER) {
  3811. dprintk(KERN_INFO "%s: link timer on.\n", pci_name(pci_dev));
  3812. np->need_linktimer = 1;
  3813. np->link_timeout = jiffies + LINK_TIMEOUT;
  3814. } else {
  3815. dprintk(KERN_INFO "%s: link timer off.\n", pci_name(pci_dev));
  3816. np->need_linktimer = 0;
  3817. }
  3818. /* find a suitable phy */
  3819. for (i = 1; i <= 32; i++) {
  3820. int id1, id2;
  3821. int phyaddr = i & 0x1F;
  3822. spin_lock_irq(&np->lock);
  3823. id1 = mii_rw(dev, phyaddr, MII_PHYSID1, MII_READ);
  3824. spin_unlock_irq(&np->lock);
  3825. if (id1 < 0 || id1 == 0xffff)
  3826. continue;
  3827. spin_lock_irq(&np->lock);
  3828. id2 = mii_rw(dev, phyaddr, MII_PHYSID2, MII_READ);
  3829. spin_unlock_irq(&np->lock);
  3830. if (id2 < 0 || id2 == 0xffff)
  3831. continue;
  3832. id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT;
  3833. id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT;
  3834. dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n",
  3835. pci_name(pci_dev), id1, id2, phyaddr);
  3836. np->phyaddr = phyaddr;
  3837. np->phy_oui = id1 | id2;
  3838. break;
  3839. }
  3840. if (i == 33) {
  3841. printk(KERN_INFO "%s: open: Could not find a valid PHY.\n",
  3842. pci_name(pci_dev));
  3843. goto out_error;
  3844. }
  3845. /* reset it */
  3846. phy_init(dev);
  3847. /* set default link speed settings */
  3848. np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
  3849. np->duplex = 0;
  3850. np->autoneg = 1;
  3851. err = register_netdev(dev);
  3852. if (err) {
  3853. printk(KERN_INFO "forcedeth: unable to register netdev: %d\n", err);
  3854. goto out_error;
  3855. }
  3856. printk(KERN_INFO "%s: forcedeth.c: subsystem: %05x:%04x bound to %s\n",
  3857. dev->name, pci_dev->subsystem_vendor, pci_dev->subsystem_device,
  3858. pci_name(pci_dev));
  3859. return 0;
  3860. out_error:
  3861. pci_set_drvdata(pci_dev, NULL);
  3862. out_freering:
  3863. free_rings(dev);
  3864. out_unmap:
  3865. iounmap(get_hwbase(dev));
  3866. out_relreg:
  3867. pci_release_regions(pci_dev);
  3868. out_disable:
  3869. pci_disable_device(pci_dev);
  3870. out_free:
  3871. free_netdev(dev);
  3872. out:
  3873. return err;
  3874. }
  3875. static void __devexit nv_remove(struct pci_dev *pci_dev)
  3876. {
  3877. struct net_device *dev = pci_get_drvdata(pci_dev);
  3878. unregister_netdev(dev);
  3879. /* free all structures */
  3880. free_rings(dev);
  3881. iounmap(get_hwbase(dev));
  3882. pci_release_regions(pci_dev);
  3883. pci_disable_device(pci_dev);
  3884. free_netdev(dev);
  3885. pci_set_drvdata(pci_dev, NULL);
  3886. }
  3887. static struct pci_device_id pci_tbl[] = {
  3888. { /* nForce Ethernet Controller */
  3889. PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_1),
  3890. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
  3891. },
  3892. { /* nForce2 Ethernet Controller */
  3893. PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_2),
  3894. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
  3895. },
  3896. { /* nForce3 Ethernet Controller */
  3897. PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_3),
  3898. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
  3899. },
  3900. { /* nForce3 Ethernet Controller */
  3901. PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_4),
  3902. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
  3903. },
  3904. { /* nForce3 Ethernet Controller */
  3905. PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_5),
  3906. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
  3907. },
  3908. { /* nForce3 Ethernet Controller */
  3909. PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_6),
  3910. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
  3911. },
  3912. { /* nForce3 Ethernet Controller */
  3913. PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_7),
  3914. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
  3915. },
  3916. { /* CK804 Ethernet Controller */
  3917. PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_8),
  3918. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
  3919. },
  3920. { /* CK804 Ethernet Controller */
  3921. PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_9),
  3922. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
  3923. },
  3924. { /* MCP04 Ethernet Controller */
  3925. PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_10),
  3926. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
  3927. },
  3928. { /* MCP04 Ethernet Controller */
  3929. PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_11),
  3930. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
  3931. },
  3932. { /* MCP51 Ethernet Controller */
  3933. PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12),
  3934. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL,
  3935. },
  3936. { /* MCP51 Ethernet Controller */
  3937. PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_13),
  3938. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL,
  3939. },
  3940. { /* MCP55 Ethernet Controller */
  3941. PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14),
  3942. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
  3943. },
  3944. { /* MCP55 Ethernet Controller */
  3945. PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15),
  3946. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
  3947. },
  3948. { /* MCP61 Ethernet Controller */
  3949. PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_16),
  3950. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
  3951. },
  3952. { /* MCP61 Ethernet Controller */
  3953. PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_17),
  3954. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
  3955. },
  3956. { /* MCP61 Ethernet Controller */
  3957. PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_18),
  3958. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
  3959. },
  3960. { /* MCP61 Ethernet Controller */
  3961. PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_19),
  3962. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
  3963. },
  3964. { /* MCP65 Ethernet Controller */
  3965. PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20),
  3966. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
  3967. },
  3968. { /* MCP65 Ethernet Controller */
  3969. PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21),
  3970. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
  3971. },
  3972. { /* MCP65 Ethernet Controller */
  3973. PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22),
  3974. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
  3975. },
  3976. { /* MCP65 Ethernet Controller */
  3977. PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23),
  3978. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
  3979. },
  3980. {0,},
  3981. };
  3982. static struct pci_driver driver = {
  3983. .name = "forcedeth",
  3984. .id_table = pci_tbl,
  3985. .probe = nv_probe,
  3986. .remove = __devexit_p(nv_remove),
  3987. };
  3988. static int __init init_nic(void)
  3989. {
  3990. printk(KERN_INFO "forcedeth.c: Reverse Engineered nForce ethernet driver. Version %s.\n", FORCEDETH_VERSION);
  3991. return pci_module_init(&driver);
  3992. }
  3993. static void __exit exit_nic(void)
  3994. {
  3995. pci_unregister_driver(&driver);
  3996. }
  3997. module_param(max_interrupt_work, int, 0);
  3998. MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt");
  3999. module_param(optimization_mode, int, 0);
  4000. MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer.");
  4001. module_param(poll_interval, int, 0);
  4002. MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535.");
  4003. module_param(msi, int, 0);
  4004. MODULE_PARM_DESC(msi, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0.");
  4005. module_param(msix, int, 0);
  4006. MODULE_PARM_DESC(msix, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0.");
  4007. module_param(dma_64bit, int, 0);
  4008. MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0.");
  4009. MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
  4010. MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
  4011. MODULE_LICENSE("GPL");
  4012. MODULE_DEVICE_TABLE(pci, pci_tbl);
  4013. module_init(init_nic);
  4014. module_exit(exit_nic);