forcedeth.c 135 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570
  1. /*
  2. * forcedeth: Ethernet driver for NVIDIA nForce media access controllers.
  3. *
  4. * Note: This driver is a cleanroom reimplementation based on reverse
  5. * engineered documentation written by Carl-Daniel Hailfinger
  6. * and Andrew de Quincey. It's neither supported nor endorsed
  7. * by NVIDIA Corp. Use at your own risk.
  8. *
  9. * NVIDIA, nForce and other NVIDIA marks are trademarks or registered
  10. * trademarks of NVIDIA Corporation in the United States and other
  11. * countries.
  12. *
  13. * Copyright (C) 2003,4,5 Manfred Spraul
  14. * Copyright (C) 2004 Andrew de Quincey (wol support)
  15. * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane
  16. * IRQ rate fixes, bigendian fixes, cleanups, verification)
  17. * Copyright (c) 2004 NVIDIA Corporation
  18. *
  19. * This program is free software; you can redistribute it and/or modify
  20. * it under the terms of the GNU General Public License as published by
  21. * the Free Software Foundation; either version 2 of the License, or
  22. * (at your option) any later version.
  23. *
  24. * This program is distributed in the hope that it will be useful,
  25. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  26. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  27. * GNU General Public License for more details.
  28. *
  29. * You should have received a copy of the GNU General Public License
  30. * along with this program; if not, write to the Free Software
  31. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  32. *
  33. * Changelog:
  34. * 0.01: 05 Oct 2003: First release that compiles without warnings.
  35. * 0.02: 05 Oct 2003: Fix bug for nv_drain_tx: do not try to free NULL skbs.
  36. * Check all PCI BARs for the register window.
  37. * udelay added to mii_rw.
  38. * 0.03: 06 Oct 2003: Initialize dev->irq.
  39. * 0.04: 07 Oct 2003: Initialize np->lock, reduce handled irqs, add printks.
  40. * 0.05: 09 Oct 2003: printk removed again, irq status print tx_timeout.
  41. * 0.06: 10 Oct 2003: MAC Address read updated, pff flag generation updated,
  42. * irq mask updated
  43. * 0.07: 14 Oct 2003: Further irq mask updates.
  44. * 0.08: 20 Oct 2003: rx_desc.Length initialization added, nv_alloc_rx refill
  45. * added into irq handler, NULL check for drain_ring.
  46. * 0.09: 20 Oct 2003: Basic link speed irq implementation. Only handle the
  47. * requested interrupt sources.
  48. * 0.10: 20 Oct 2003: First cleanup for release.
  49. * 0.11: 21 Oct 2003: hexdump for tx added, rx buffer sizes increased.
  50. * MAC Address init fix, set_multicast cleanup.
  51. * 0.12: 23 Oct 2003: Cleanups for release.
  52. * 0.13: 25 Oct 2003: Limit for concurrent tx packets increased to 10.
  53. * Set link speed correctly. start rx before starting
  54. * tx (nv_start_rx sets the link speed).
  55. * 0.14: 25 Oct 2003: Nic dependant irq mask.
  56. * 0.15: 08 Nov 2003: fix smp deadlock with set_multicast_list during
  57. * open.
  58. * 0.16: 15 Nov 2003: include file cleanup for ppc64, rx buffer size
  59. * increased to 1628 bytes.
  60. * 0.17: 16 Nov 2003: undo rx buffer size increase. Substract 1 from
  61. * the tx length.
  62. * 0.18: 17 Nov 2003: fix oops due to late initialization of dev_stats
  63. * 0.19: 29 Nov 2003: Handle RxNoBuf, detect & handle invalid mac
  64. * addresses, really stop rx if already running
  65. * in nv_start_rx, clean up a bit.
  66. * 0.20: 07 Dec 2003: alloc fixes
  67. * 0.21: 12 Jan 2004: additional alloc fix, nic polling fix.
  68. * 0.22: 19 Jan 2004: reprogram timer to a sane rate, avoid lockup
  69. * on close.
  70. * 0.23: 26 Jan 2004: various small cleanups
  71. * 0.24: 27 Feb 2004: make driver even less anonymous in backtraces
  72. * 0.25: 09 Mar 2004: wol support
  73. * 0.26: 03 Jun 2004: netdriver specific annotation, sparse-related fixes
  74. * 0.27: 19 Jun 2004: Gigabit support, new descriptor rings,
  75. * added CK804/MCP04 device IDs, code fixes
  76. * for registers, link status and other minor fixes.
  77. * 0.28: 21 Jun 2004: Big cleanup, making driver mostly endian safe
  78. * 0.29: 31 Aug 2004: Add backup timer for link change notification.
  79. * 0.30: 25 Sep 2004: rx checksum support for nf 250 Gb. Add rx reset
  80. * into nv_close, otherwise reenabling for wol can
  81. * cause DMA to kfree'd memory.
  82. * 0.31: 14 Nov 2004: ethtool support for getting/setting link
  83. * capabilities.
  84. * 0.32: 16 Apr 2005: RX_ERROR4 handling added.
  85. * 0.33: 16 May 2005: Support for MCP51 added.
  86. * 0.34: 18 Jun 2005: Add DEV_NEED_LINKTIMER to all nForce nics.
  87. * 0.35: 26 Jun 2005: Support for MCP55 added.
  88. * 0.36: 28 Jun 2005: Add jumbo frame support.
  89. * 0.37: 10 Jul 2005: Additional ethtool support, cleanup of pci id list
  90. * 0.38: 16 Jul 2005: tx irq rewrite: Use global flags instead of
  91. * per-packet flags.
  92. * 0.39: 18 Jul 2005: Add 64bit descriptor support.
  93. * 0.40: 19 Jul 2005: Add support for mac address change.
  94. * 0.41: 30 Jul 2005: Write back original MAC in nv_close instead
  95. * of nv_remove
  96. * 0.42: 06 Aug 2005: Fix lack of link speed initialization
  97. * in the second (and later) nv_open call
  98. * 0.43: 10 Aug 2005: Add support for tx checksum.
  99. * 0.44: 20 Aug 2005: Add support for scatter gather and segmentation.
  100. * 0.45: 18 Sep 2005: Remove nv_stop/start_rx from every link check
  101. * 0.46: 20 Oct 2005: Add irq optimization modes.
  102. * 0.47: 26 Oct 2005: Add phyaddr 0 in phy scan.
  103. * 0.48: 24 Dec 2005: Disable TSO, bugfix for pci_map_single
  104. * 0.49: 10 Dec 2005: Fix tso for large buffers.
  105. * 0.50: 20 Jan 2006: Add 8021pq tagging support.
  106. * 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings.
  107. * 0.52: 20 Jan 2006: Add MSI/MSIX support.
  108. * 0.53: 19 Mar 2006: Fix init from low power mode and add hw reset.
  109. * 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup.
  110. * 0.55: 22 Mar 2006: Add flow control (pause frame).
  111. * 0.56: 22 Mar 2006: Additional ethtool config and moduleparam support.
  112. *
  113. * Known bugs:
  114. * We suspect that on some hardware no TX done interrupts are generated.
  115. * This means recovery from netif_stop_queue only happens if the hw timer
  116. * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT)
  117. * and the timer is active in the IRQMask, or if a rx packet arrives by chance.
  118. * If your hardware reliably generates tx done interrupts, then you can remove
  119. * DEV_NEED_TIMERIRQ from the driver_data flags.
  120. * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
  121. * superfluous timer interrupts from the nic.
  122. */
  123. #define FORCEDETH_VERSION "0.56"
  124. #define DRV_NAME "forcedeth"
  125. #include <linux/module.h>
  126. #include <linux/types.h>
  127. #include <linux/pci.h>
  128. #include <linux/interrupt.h>
  129. #include <linux/netdevice.h>
  130. #include <linux/etherdevice.h>
  131. #include <linux/delay.h>
  132. #include <linux/spinlock.h>
  133. #include <linux/ethtool.h>
  134. #include <linux/timer.h>
  135. #include <linux/skbuff.h>
  136. #include <linux/mii.h>
  137. #include <linux/random.h>
  138. #include <linux/init.h>
  139. #include <linux/if_vlan.h>
  140. #include <linux/dma-mapping.h>
  141. #include <asm/irq.h>
  142. #include <asm/io.h>
  143. #include <asm/uaccess.h>
  144. #include <asm/system.h>
  145. #if 0
  146. #define dprintk printk
  147. #else
  148. #define dprintk(x...) do { } while (0)
  149. #endif
  150. /*
  151. * Hardware access:
  152. */
  153. #define DEV_NEED_TIMERIRQ 0x0001 /* set the timer irq flag in the irq mask */
  154. #define DEV_NEED_LINKTIMER 0x0002 /* poll link settings. Relies on the timer irq */
  155. #define DEV_HAS_LARGEDESC 0x0004 /* device supports jumbo frames and needs packet format 2 */
  156. #define DEV_HAS_HIGH_DMA 0x0008 /* device supports 64bit dma */
  157. #define DEV_HAS_CHECKSUM 0x0010 /* device supports tx and rx checksum offloads */
  158. #define DEV_HAS_VLAN 0x0020 /* device supports vlan tagging and striping */
  159. #define DEV_HAS_MSI 0x0040 /* device supports MSI */
  160. #define DEV_HAS_MSI_X 0x0080 /* device supports MSI-X */
  161. #define DEV_HAS_POWER_CNTRL 0x0100 /* device supports power savings */
  162. #define DEV_HAS_PAUSEFRAME_TX 0x0200 /* device supports tx pause frames */
  163. #define DEV_HAS_STATISTICS 0x0400 /* device supports hw statistics */
  164. #define DEV_HAS_TEST_EXTENDED 0x0800 /* device supports extended diagnostic test */
  165. enum {
  166. NvRegIrqStatus = 0x000,
  167. #define NVREG_IRQSTAT_MIIEVENT 0x040
  168. #define NVREG_IRQSTAT_MASK 0x1ff
  169. NvRegIrqMask = 0x004,
  170. #define NVREG_IRQ_RX_ERROR 0x0001
  171. #define NVREG_IRQ_RX 0x0002
  172. #define NVREG_IRQ_RX_NOBUF 0x0004
  173. #define NVREG_IRQ_TX_ERR 0x0008
  174. #define NVREG_IRQ_TX_OK 0x0010
  175. #define NVREG_IRQ_TIMER 0x0020
  176. #define NVREG_IRQ_LINK 0x0040
  177. #define NVREG_IRQ_RX_FORCED 0x0080
  178. #define NVREG_IRQ_TX_FORCED 0x0100
  179. #define NVREG_IRQMASK_THROUGHPUT 0x00df
  180. #define NVREG_IRQMASK_CPU 0x0040
  181. #define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED)
  182. #define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED)
  183. #define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK)
  184. #define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \
  185. NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RX_FORCED| \
  186. NVREG_IRQ_TX_FORCED))
  187. NvRegUnknownSetupReg6 = 0x008,
  188. #define NVREG_UNKSETUP6_VAL 3
  189. /*
  190. * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic
  191. * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms
  192. */
  193. NvRegPollingInterval = 0x00c,
  194. #define NVREG_POLL_DEFAULT_THROUGHPUT 970
  195. #define NVREG_POLL_DEFAULT_CPU 13
  196. NvRegMSIMap0 = 0x020,
  197. NvRegMSIMap1 = 0x024,
  198. NvRegMSIIrqMask = 0x030,
  199. #define NVREG_MSI_VECTOR_0_ENABLED 0x01
  200. NvRegMisc1 = 0x080,
  201. #define NVREG_MISC1_PAUSE_TX 0x01
  202. #define NVREG_MISC1_HD 0x02
  203. #define NVREG_MISC1_FORCE 0x3b0f3c
  204. NvRegMacReset = 0x3c,
  205. #define NVREG_MAC_RESET_ASSERT 0x0F3
  206. NvRegTransmitterControl = 0x084,
  207. #define NVREG_XMITCTL_START 0x01
  208. NvRegTransmitterStatus = 0x088,
  209. #define NVREG_XMITSTAT_BUSY 0x01
  210. NvRegPacketFilterFlags = 0x8c,
  211. #define NVREG_PFF_PAUSE_RX 0x08
  212. #define NVREG_PFF_ALWAYS 0x7F0000
  213. #define NVREG_PFF_PROMISC 0x80
  214. #define NVREG_PFF_MYADDR 0x20
  215. #define NVREG_PFF_LOOPBACK 0x10
  216. NvRegOffloadConfig = 0x90,
  217. #define NVREG_OFFLOAD_HOMEPHY 0x601
  218. #define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE
  219. NvRegReceiverControl = 0x094,
  220. #define NVREG_RCVCTL_START 0x01
  221. NvRegReceiverStatus = 0x98,
  222. #define NVREG_RCVSTAT_BUSY 0x01
  223. NvRegRandomSeed = 0x9c,
  224. #define NVREG_RNDSEED_MASK 0x00ff
  225. #define NVREG_RNDSEED_FORCE 0x7f00
  226. #define NVREG_RNDSEED_FORCE2 0x2d00
  227. #define NVREG_RNDSEED_FORCE3 0x7400
  228. NvRegTxDeferral = 0xA0,
  229. #define NVREG_TX_DEFERRAL_DEFAULT 0x15050f
  230. #define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f
  231. #define NVREG_TX_DEFERRAL_RGMII_1000 0x14050f
  232. NvRegRxDeferral = 0xA4,
  233. #define NVREG_RX_DEFERRAL_DEFAULT 0x16
  234. NvRegMacAddrA = 0xA8,
  235. NvRegMacAddrB = 0xAC,
  236. NvRegMulticastAddrA = 0xB0,
  237. #define NVREG_MCASTADDRA_FORCE 0x01
  238. NvRegMulticastAddrB = 0xB4,
  239. NvRegMulticastMaskA = 0xB8,
  240. NvRegMulticastMaskB = 0xBC,
  241. NvRegPhyInterface = 0xC0,
  242. #define PHY_RGMII 0x10000000
  243. NvRegTxRingPhysAddr = 0x100,
  244. NvRegRxRingPhysAddr = 0x104,
  245. NvRegRingSizes = 0x108,
  246. #define NVREG_RINGSZ_TXSHIFT 0
  247. #define NVREG_RINGSZ_RXSHIFT 16
  248. NvRegUnknownTransmitterReg = 0x10c,
  249. NvRegLinkSpeed = 0x110,
  250. #define NVREG_LINKSPEED_FORCE 0x10000
  251. #define NVREG_LINKSPEED_10 1000
  252. #define NVREG_LINKSPEED_100 100
  253. #define NVREG_LINKSPEED_1000 50
  254. #define NVREG_LINKSPEED_MASK (0xFFF)
  255. NvRegUnknownSetupReg5 = 0x130,
  256. #define NVREG_UNKSETUP5_BIT31 (1<<31)
  257. NvRegTxWatermark = 0x13c,
  258. #define NVREG_TX_WM_DESC1_DEFAULT 0x0200010
  259. #define NVREG_TX_WM_DESC2_3_DEFAULT 0x1e08000
  260. #define NVREG_TX_WM_DESC2_3_1000 0xfe08000
  261. NvRegTxRxControl = 0x144,
  262. #define NVREG_TXRXCTL_KICK 0x0001
  263. #define NVREG_TXRXCTL_BIT1 0x0002
  264. #define NVREG_TXRXCTL_BIT2 0x0004
  265. #define NVREG_TXRXCTL_IDLE 0x0008
  266. #define NVREG_TXRXCTL_RESET 0x0010
  267. #define NVREG_TXRXCTL_RXCHECK 0x0400
  268. #define NVREG_TXRXCTL_DESC_1 0
  269. #define NVREG_TXRXCTL_DESC_2 0x02100
  270. #define NVREG_TXRXCTL_DESC_3 0x02200
  271. #define NVREG_TXRXCTL_VLANSTRIP 0x00040
  272. #define NVREG_TXRXCTL_VLANINS 0x00080
  273. NvRegTxRingPhysAddrHigh = 0x148,
  274. NvRegRxRingPhysAddrHigh = 0x14C,
  275. NvRegTxPauseFrame = 0x170,
  276. #define NVREG_TX_PAUSEFRAME_DISABLE 0x1ff0080
  277. #define NVREG_TX_PAUSEFRAME_ENABLE 0x0c00030
  278. NvRegMIIStatus = 0x180,
  279. #define NVREG_MIISTAT_ERROR 0x0001
  280. #define NVREG_MIISTAT_LINKCHANGE 0x0008
  281. #define NVREG_MIISTAT_MASK 0x000f
  282. #define NVREG_MIISTAT_MASK2 0x000f
  283. NvRegUnknownSetupReg4 = 0x184,
  284. #define NVREG_UNKSETUP4_VAL 8
  285. NvRegAdapterControl = 0x188,
  286. #define NVREG_ADAPTCTL_START 0x02
  287. #define NVREG_ADAPTCTL_LINKUP 0x04
  288. #define NVREG_ADAPTCTL_PHYVALID 0x40000
  289. #define NVREG_ADAPTCTL_RUNNING 0x100000
  290. #define NVREG_ADAPTCTL_PHYSHIFT 24
  291. NvRegMIISpeed = 0x18c,
  292. #define NVREG_MIISPEED_BIT8 (1<<8)
  293. #define NVREG_MIIDELAY 5
  294. NvRegMIIControl = 0x190,
  295. #define NVREG_MIICTL_INUSE 0x08000
  296. #define NVREG_MIICTL_WRITE 0x00400
  297. #define NVREG_MIICTL_ADDRSHIFT 5
  298. NvRegMIIData = 0x194,
  299. NvRegWakeUpFlags = 0x200,
  300. #define NVREG_WAKEUPFLAGS_VAL 0x7770
  301. #define NVREG_WAKEUPFLAGS_BUSYSHIFT 24
  302. #define NVREG_WAKEUPFLAGS_ENABLESHIFT 16
  303. #define NVREG_WAKEUPFLAGS_D3SHIFT 12
  304. #define NVREG_WAKEUPFLAGS_D2SHIFT 8
  305. #define NVREG_WAKEUPFLAGS_D1SHIFT 4
  306. #define NVREG_WAKEUPFLAGS_D0SHIFT 0
  307. #define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT 0x01
  308. #define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT 0x02
  309. #define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE 0x04
  310. #define NVREG_WAKEUPFLAGS_ENABLE 0x1111
  311. NvRegPatternCRC = 0x204,
  312. NvRegPatternMask = 0x208,
  313. NvRegPowerCap = 0x268,
  314. #define NVREG_POWERCAP_D3SUPP (1<<30)
  315. #define NVREG_POWERCAP_D2SUPP (1<<26)
  316. #define NVREG_POWERCAP_D1SUPP (1<<25)
  317. NvRegPowerState = 0x26c,
  318. #define NVREG_POWERSTATE_POWEREDUP 0x8000
  319. #define NVREG_POWERSTATE_VALID 0x0100
  320. #define NVREG_POWERSTATE_MASK 0x0003
  321. #define NVREG_POWERSTATE_D0 0x0000
  322. #define NVREG_POWERSTATE_D1 0x0001
  323. #define NVREG_POWERSTATE_D2 0x0002
  324. #define NVREG_POWERSTATE_D3 0x0003
  325. NvRegTxCnt = 0x280,
  326. NvRegTxZeroReXmt = 0x284,
  327. NvRegTxOneReXmt = 0x288,
  328. NvRegTxManyReXmt = 0x28c,
  329. NvRegTxLateCol = 0x290,
  330. NvRegTxUnderflow = 0x294,
  331. NvRegTxLossCarrier = 0x298,
  332. NvRegTxExcessDef = 0x29c,
  333. NvRegTxRetryErr = 0x2a0,
  334. NvRegRxFrameErr = 0x2a4,
  335. NvRegRxExtraByte = 0x2a8,
  336. NvRegRxLateCol = 0x2ac,
  337. NvRegRxRunt = 0x2b0,
  338. NvRegRxFrameTooLong = 0x2b4,
  339. NvRegRxOverflow = 0x2b8,
  340. NvRegRxFCSErr = 0x2bc,
  341. NvRegRxFrameAlignErr = 0x2c0,
  342. NvRegRxLenErr = 0x2c4,
  343. NvRegRxUnicast = 0x2c8,
  344. NvRegRxMulticast = 0x2cc,
  345. NvRegRxBroadcast = 0x2d0,
  346. NvRegTxDef = 0x2d4,
  347. NvRegTxFrame = 0x2d8,
  348. NvRegRxCnt = 0x2dc,
  349. NvRegTxPause = 0x2e0,
  350. NvRegRxPause = 0x2e4,
  351. NvRegRxDropFrame = 0x2e8,
  352. NvRegVlanControl = 0x300,
  353. #define NVREG_VLANCONTROL_ENABLE 0x2000
  354. NvRegMSIXMap0 = 0x3e0,
  355. NvRegMSIXMap1 = 0x3e4,
  356. NvRegMSIXIrqStatus = 0x3f0,
  357. NvRegPowerState2 = 0x600,
  358. #define NVREG_POWERSTATE2_POWERUP_MASK 0x0F11
  359. #define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001
  360. };
  361. /* Big endian: should work, but is untested */
  362. struct ring_desc {
  363. __le32 buf;
  364. __le32 flaglen;
  365. };
  366. struct ring_desc_ex {
  367. __le32 bufhigh;
  368. __le32 buflow;
  369. __le32 txvlan;
  370. __le32 flaglen;
  371. };
  372. union ring_type {
  373. struct ring_desc* orig;
  374. struct ring_desc_ex* ex;
  375. };
  376. #define FLAG_MASK_V1 0xffff0000
  377. #define FLAG_MASK_V2 0xffffc000
  378. #define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1)
  379. #define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2)
  380. #define NV_TX_LASTPACKET (1<<16)
  381. #define NV_TX_RETRYERROR (1<<19)
  382. #define NV_TX_FORCED_INTERRUPT (1<<24)
  383. #define NV_TX_DEFERRED (1<<26)
  384. #define NV_TX_CARRIERLOST (1<<27)
  385. #define NV_TX_LATECOLLISION (1<<28)
  386. #define NV_TX_UNDERFLOW (1<<29)
  387. #define NV_TX_ERROR (1<<30)
  388. #define NV_TX_VALID (1<<31)
  389. #define NV_TX2_LASTPACKET (1<<29)
  390. #define NV_TX2_RETRYERROR (1<<18)
  391. #define NV_TX2_FORCED_INTERRUPT (1<<30)
  392. #define NV_TX2_DEFERRED (1<<25)
  393. #define NV_TX2_CARRIERLOST (1<<26)
  394. #define NV_TX2_LATECOLLISION (1<<27)
  395. #define NV_TX2_UNDERFLOW (1<<28)
  396. /* error and valid are the same for both */
  397. #define NV_TX2_ERROR (1<<30)
  398. #define NV_TX2_VALID (1<<31)
  399. #define NV_TX2_TSO (1<<28)
  400. #define NV_TX2_TSO_SHIFT 14
  401. #define NV_TX2_TSO_MAX_SHIFT 14
  402. #define NV_TX2_TSO_MAX_SIZE (1<<NV_TX2_TSO_MAX_SHIFT)
  403. #define NV_TX2_CHECKSUM_L3 (1<<27)
  404. #define NV_TX2_CHECKSUM_L4 (1<<26)
  405. #define NV_TX3_VLAN_TAG_PRESENT (1<<18)
  406. #define NV_RX_DESCRIPTORVALID (1<<16)
  407. #define NV_RX_MISSEDFRAME (1<<17)
  408. #define NV_RX_SUBSTRACT1 (1<<18)
  409. #define NV_RX_ERROR1 (1<<23)
  410. #define NV_RX_ERROR2 (1<<24)
  411. #define NV_RX_ERROR3 (1<<25)
  412. #define NV_RX_ERROR4 (1<<26)
  413. #define NV_RX_CRCERR (1<<27)
  414. #define NV_RX_OVERFLOW (1<<28)
  415. #define NV_RX_FRAMINGERR (1<<29)
  416. #define NV_RX_ERROR (1<<30)
  417. #define NV_RX_AVAIL (1<<31)
  418. #define NV_RX2_CHECKSUMMASK (0x1C000000)
  419. #define NV_RX2_CHECKSUMOK1 (0x10000000)
  420. #define NV_RX2_CHECKSUMOK2 (0x14000000)
  421. #define NV_RX2_CHECKSUMOK3 (0x18000000)
  422. #define NV_RX2_DESCRIPTORVALID (1<<29)
  423. #define NV_RX2_SUBSTRACT1 (1<<25)
  424. #define NV_RX2_ERROR1 (1<<18)
  425. #define NV_RX2_ERROR2 (1<<19)
  426. #define NV_RX2_ERROR3 (1<<20)
  427. #define NV_RX2_ERROR4 (1<<21)
  428. #define NV_RX2_CRCERR (1<<22)
  429. #define NV_RX2_OVERFLOW (1<<23)
  430. #define NV_RX2_FRAMINGERR (1<<24)
  431. /* error and avail are the same for both */
  432. #define NV_RX2_ERROR (1<<30)
  433. #define NV_RX2_AVAIL (1<<31)
  434. #define NV_RX3_VLAN_TAG_PRESENT (1<<16)
  435. #define NV_RX3_VLAN_TAG_MASK (0x0000FFFF)
  436. /* Miscelaneous hardware related defines: */
  437. #define NV_PCI_REGSZ_VER1 0x270
  438. #define NV_PCI_REGSZ_VER2 0x604
  439. /* various timeout delays: all in usec */
  440. #define NV_TXRX_RESET_DELAY 4
  441. #define NV_TXSTOP_DELAY1 10
  442. #define NV_TXSTOP_DELAY1MAX 500000
  443. #define NV_TXSTOP_DELAY2 100
  444. #define NV_RXSTOP_DELAY1 10
  445. #define NV_RXSTOP_DELAY1MAX 500000
  446. #define NV_RXSTOP_DELAY2 100
  447. #define NV_SETUP5_DELAY 5
  448. #define NV_SETUP5_DELAYMAX 50000
  449. #define NV_POWERUP_DELAY 5
  450. #define NV_POWERUP_DELAYMAX 5000
  451. #define NV_MIIBUSY_DELAY 50
  452. #define NV_MIIPHY_DELAY 10
  453. #define NV_MIIPHY_DELAYMAX 10000
  454. #define NV_MAC_RESET_DELAY 64
  455. #define NV_WAKEUPPATTERNS 5
  456. #define NV_WAKEUPMASKENTRIES 4
  457. /* General driver defaults */
  458. #define NV_WATCHDOG_TIMEO (5*HZ)
  459. #define RX_RING_DEFAULT 128
  460. #define TX_RING_DEFAULT 256
  461. #define RX_RING_MIN 128
  462. #define TX_RING_MIN 64
  463. #define RING_MAX_DESC_VER_1 1024
  464. #define RING_MAX_DESC_VER_2_3 16384
  465. /*
  466. * Difference between the get and put pointers for the tx ring.
  467. * This is used to throttle the amount of data outstanding in the
  468. * tx ring.
  469. */
  470. #define TX_LIMIT_DIFFERENCE 1
  471. /* rx/tx mac addr + type + vlan + align + slack*/
  472. #define NV_RX_HEADERS (64)
  473. /* even more slack. */
  474. #define NV_RX_ALLOC_PAD (64)
  475. /* maximum mtu size */
  476. #define NV_PKTLIMIT_1 ETH_DATA_LEN /* hard limit not known */
  477. #define NV_PKTLIMIT_2 9100 /* Actual limit according to NVidia: 9202 */
  478. #define OOM_REFILL (1+HZ/20)
  479. #define POLL_WAIT (1+HZ/100)
  480. #define LINK_TIMEOUT (3*HZ)
  481. #define STATS_INTERVAL (10*HZ)
  482. /*
  483. * desc_ver values:
  484. * The nic supports three different descriptor types:
  485. * - DESC_VER_1: Original
  486. * - DESC_VER_2: support for jumbo frames.
  487. * - DESC_VER_3: 64-bit format.
  488. */
  489. #define DESC_VER_1 1
  490. #define DESC_VER_2 2
  491. #define DESC_VER_3 3
  492. /* PHY defines */
  493. #define PHY_OUI_MARVELL 0x5043
  494. #define PHY_OUI_CICADA 0x03f1
  495. #define PHYID1_OUI_MASK 0x03ff
  496. #define PHYID1_OUI_SHFT 6
  497. #define PHYID2_OUI_MASK 0xfc00
  498. #define PHYID2_OUI_SHFT 10
  499. #define PHY_INIT1 0x0f000
  500. #define PHY_INIT2 0x0e00
  501. #define PHY_INIT3 0x01000
  502. #define PHY_INIT4 0x0200
  503. #define PHY_INIT5 0x0004
  504. #define PHY_INIT6 0x02000
  505. #define PHY_GIGABIT 0x0100
  506. #define PHY_TIMEOUT 0x1
  507. #define PHY_ERROR 0x2
  508. #define PHY_100 0x1
  509. #define PHY_1000 0x2
  510. #define PHY_HALF 0x100
  511. #define NV_PAUSEFRAME_RX_CAPABLE 0x0001
  512. #define NV_PAUSEFRAME_TX_CAPABLE 0x0002
  513. #define NV_PAUSEFRAME_RX_ENABLE 0x0004
  514. #define NV_PAUSEFRAME_TX_ENABLE 0x0008
  515. #define NV_PAUSEFRAME_RX_REQ 0x0010
  516. #define NV_PAUSEFRAME_TX_REQ 0x0020
  517. #define NV_PAUSEFRAME_AUTONEG 0x0040
  518. /* MSI/MSI-X defines */
  519. #define NV_MSI_X_MAX_VECTORS 8
  520. #define NV_MSI_X_VECTORS_MASK 0x000f
  521. #define NV_MSI_CAPABLE 0x0010
  522. #define NV_MSI_X_CAPABLE 0x0020
  523. #define NV_MSI_ENABLED 0x0040
  524. #define NV_MSI_X_ENABLED 0x0080
  525. #define NV_MSI_X_VECTOR_ALL 0x0
  526. #define NV_MSI_X_VECTOR_RX 0x0
  527. #define NV_MSI_X_VECTOR_TX 0x1
  528. #define NV_MSI_X_VECTOR_OTHER 0x2
  529. /* statistics */
  530. struct nv_ethtool_str {
  531. char name[ETH_GSTRING_LEN];
  532. };
  533. static const struct nv_ethtool_str nv_estats_str[] = {
  534. { "tx_bytes" },
  535. { "tx_zero_rexmt" },
  536. { "tx_one_rexmt" },
  537. { "tx_many_rexmt" },
  538. { "tx_late_collision" },
  539. { "tx_fifo_errors" },
  540. { "tx_carrier_errors" },
  541. { "tx_excess_deferral" },
  542. { "tx_retry_error" },
  543. { "tx_deferral" },
  544. { "tx_packets" },
  545. { "tx_pause" },
  546. { "rx_frame_error" },
  547. { "rx_extra_byte" },
  548. { "rx_late_collision" },
  549. { "rx_runt" },
  550. { "rx_frame_too_long" },
  551. { "rx_over_errors" },
  552. { "rx_crc_errors" },
  553. { "rx_frame_align_error" },
  554. { "rx_length_error" },
  555. { "rx_unicast" },
  556. { "rx_multicast" },
  557. { "rx_broadcast" },
  558. { "rx_bytes" },
  559. { "rx_pause" },
  560. { "rx_drop_frame" },
  561. { "rx_packets" },
  562. { "rx_errors_total" }
  563. };
  564. struct nv_ethtool_stats {
  565. u64 tx_bytes;
  566. u64 tx_zero_rexmt;
  567. u64 tx_one_rexmt;
  568. u64 tx_many_rexmt;
  569. u64 tx_late_collision;
  570. u64 tx_fifo_errors;
  571. u64 tx_carrier_errors;
  572. u64 tx_excess_deferral;
  573. u64 tx_retry_error;
  574. u64 tx_deferral;
  575. u64 tx_packets;
  576. u64 tx_pause;
  577. u64 rx_frame_error;
  578. u64 rx_extra_byte;
  579. u64 rx_late_collision;
  580. u64 rx_runt;
  581. u64 rx_frame_too_long;
  582. u64 rx_over_errors;
  583. u64 rx_crc_errors;
  584. u64 rx_frame_align_error;
  585. u64 rx_length_error;
  586. u64 rx_unicast;
  587. u64 rx_multicast;
  588. u64 rx_broadcast;
  589. u64 rx_bytes;
  590. u64 rx_pause;
  591. u64 rx_drop_frame;
  592. u64 rx_packets;
  593. u64 rx_errors_total;
  594. };
  595. /* diagnostics */
  596. #define NV_TEST_COUNT_BASE 3
  597. #define NV_TEST_COUNT_EXTENDED 4
  598. static const struct nv_ethtool_str nv_etests_str[] = {
  599. { "link (online/offline)" },
  600. { "register (offline) " },
  601. { "interrupt (offline) " },
  602. { "loopback (offline) " }
  603. };
  604. struct register_test {
  605. __le32 reg;
  606. __le32 mask;
  607. };
  608. static const struct register_test nv_registers_test[] = {
  609. { NvRegUnknownSetupReg6, 0x01 },
  610. { NvRegMisc1, 0x03c },
  611. { NvRegOffloadConfig, 0x03ff },
  612. { NvRegMulticastAddrA, 0xffffffff },
  613. { NvRegTxWatermark, 0x0ff },
  614. { NvRegWakeUpFlags, 0x07777 },
  615. { 0,0 }
  616. };
  617. /*
  618. * SMP locking:
  619. * All hardware access under dev->priv->lock, except the performance
  620. * critical parts:
  621. * - rx is (pseudo-) lockless: it relies on the single-threading provided
  622. * by the arch code for interrupts.
  623. * - tx setup is lockless: it relies on netif_tx_lock. Actual submission
  624. * needs dev->priv->lock :-(
  625. * - set_multicast_list: preparation lockless, relies on netif_tx_lock.
  626. */
  627. /* in dev: base, irq */
  628. struct fe_priv {
  629. spinlock_t lock;
  630. /* General data:
  631. * Locking: spin_lock(&np->lock); */
  632. struct net_device_stats stats;
  633. struct nv_ethtool_stats estats;
  634. int in_shutdown;
  635. u32 linkspeed;
  636. int duplex;
  637. int autoneg;
  638. int fixed_mode;
  639. int phyaddr;
  640. int wolenabled;
  641. unsigned int phy_oui;
  642. u16 gigabit;
  643. int intr_test;
  644. /* General data: RO fields */
  645. dma_addr_t ring_addr;
  646. struct pci_dev *pci_dev;
  647. u32 orig_mac[2];
  648. u32 irqmask;
  649. u32 desc_ver;
  650. u32 txrxctl_bits;
  651. u32 vlanctl_bits;
  652. u32 driver_data;
  653. u32 register_size;
  654. void __iomem *base;
  655. /* rx specific fields.
  656. * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
  657. */
  658. union ring_type rx_ring;
  659. unsigned int cur_rx, refill_rx;
  660. struct sk_buff **rx_skbuff;
  661. dma_addr_t *rx_dma;
  662. unsigned int rx_buf_sz;
  663. unsigned int pkt_limit;
  664. struct timer_list oom_kick;
  665. struct timer_list nic_poll;
  666. struct timer_list stats_poll;
  667. u32 nic_poll_irq;
  668. int rx_ring_size;
  669. /* media detection workaround.
  670. * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
  671. */
  672. int need_linktimer;
  673. unsigned long link_timeout;
  674. /*
  675. * tx specific fields.
  676. */
  677. union ring_type tx_ring;
  678. unsigned int next_tx, nic_tx;
  679. struct sk_buff **tx_skbuff;
  680. dma_addr_t *tx_dma;
  681. unsigned int *tx_dma_len;
  682. u32 tx_flags;
  683. int tx_ring_size;
  684. int tx_limit_start;
  685. int tx_limit_stop;
  686. /* vlan fields */
  687. struct vlan_group *vlangrp;
  688. /* msi/msi-x fields */
  689. u32 msi_flags;
  690. struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS];
  691. /* flow control */
  692. u32 pause_flags;
  693. };
  694. /*
  695. * Maximum number of loops until we assume that a bit in the irq mask
  696. * is stuck. Overridable with module param.
  697. */
  698. static int max_interrupt_work = 5;
  699. /*
  700. * Optimization can be either throuput mode or cpu mode
  701. *
  702. * Throughput Mode: Every tx and rx packet will generate an interrupt.
  703. * CPU Mode: Interrupts are controlled by a timer.
  704. */
  705. enum {
  706. NV_OPTIMIZATION_MODE_THROUGHPUT,
  707. NV_OPTIMIZATION_MODE_CPU
  708. };
  709. static int optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT;
  710. /*
  711. * Poll interval for timer irq
  712. *
  713. * This interval determines how frequent an interrupt is generated.
  714. * The is value is determined by [(time_in_micro_secs * 100) / (2^10)]
  715. * Min = 0, and Max = 65535
  716. */
  717. static int poll_interval = -1;
  718. /*
  719. * MSI interrupts
  720. */
  721. enum {
  722. NV_MSI_INT_DISABLED,
  723. NV_MSI_INT_ENABLED
  724. };
  725. static int msi = NV_MSI_INT_ENABLED;
  726. /*
  727. * MSIX interrupts
  728. */
  729. enum {
  730. NV_MSIX_INT_DISABLED,
  731. NV_MSIX_INT_ENABLED
  732. };
  733. static int msix = NV_MSIX_INT_ENABLED;
  734. /*
  735. * DMA 64bit
  736. */
  737. enum {
  738. NV_DMA_64BIT_DISABLED,
  739. NV_DMA_64BIT_ENABLED
  740. };
  741. static int dma_64bit = NV_DMA_64BIT_ENABLED;
  742. static inline struct fe_priv *get_nvpriv(struct net_device *dev)
  743. {
  744. return netdev_priv(dev);
  745. }
  746. static inline u8 __iomem *get_hwbase(struct net_device *dev)
  747. {
  748. return ((struct fe_priv *)netdev_priv(dev))->base;
  749. }
  750. static inline void pci_push(u8 __iomem *base)
  751. {
  752. /* force out pending posted writes */
  753. readl(base);
  754. }
  755. static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v)
  756. {
  757. return le32_to_cpu(prd->flaglen)
  758. & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2);
  759. }
  760. static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v)
  761. {
  762. return le32_to_cpu(prd->flaglen) & LEN_MASK_V2;
  763. }
  764. static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
  765. int delay, int delaymax, const char *msg)
  766. {
  767. u8 __iomem *base = get_hwbase(dev);
  768. pci_push(base);
  769. do {
  770. udelay(delay);
  771. delaymax -= delay;
  772. if (delaymax < 0) {
  773. if (msg)
  774. printk(msg);
  775. return 1;
  776. }
  777. } while ((readl(base + offset) & mask) != target);
  778. return 0;
  779. }
  780. #define NV_SETUP_RX_RING 0x01
  781. #define NV_SETUP_TX_RING 0x02
  782. static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
  783. {
  784. struct fe_priv *np = get_nvpriv(dev);
  785. u8 __iomem *base = get_hwbase(dev);
  786. if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
  787. if (rxtx_flags & NV_SETUP_RX_RING) {
  788. writel((u32) cpu_to_le64(np->ring_addr), base + NvRegRxRingPhysAddr);
  789. }
  790. if (rxtx_flags & NV_SETUP_TX_RING) {
  791. writel((u32) cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
  792. }
  793. } else {
  794. if (rxtx_flags & NV_SETUP_RX_RING) {
  795. writel((u32) cpu_to_le64(np->ring_addr), base + NvRegRxRingPhysAddr);
  796. writel((u32) (cpu_to_le64(np->ring_addr) >> 32), base + NvRegRxRingPhysAddrHigh);
  797. }
  798. if (rxtx_flags & NV_SETUP_TX_RING) {
  799. writel((u32) cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
  800. writel((u32) (cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)) >> 32), base + NvRegTxRingPhysAddrHigh);
  801. }
  802. }
  803. }
  804. static void free_rings(struct net_device *dev)
  805. {
  806. struct fe_priv *np = get_nvpriv(dev);
  807. if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
  808. if (np->rx_ring.orig)
  809. pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
  810. np->rx_ring.orig, np->ring_addr);
  811. } else {
  812. if (np->rx_ring.ex)
  813. pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
  814. np->rx_ring.ex, np->ring_addr);
  815. }
  816. if (np->rx_skbuff)
  817. kfree(np->rx_skbuff);
  818. if (np->rx_dma)
  819. kfree(np->rx_dma);
  820. if (np->tx_skbuff)
  821. kfree(np->tx_skbuff);
  822. if (np->tx_dma)
  823. kfree(np->tx_dma);
  824. if (np->tx_dma_len)
  825. kfree(np->tx_dma_len);
  826. }
  827. static int using_multi_irqs(struct net_device *dev)
  828. {
  829. struct fe_priv *np = get_nvpriv(dev);
  830. if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
  831. ((np->msi_flags & NV_MSI_X_ENABLED) &&
  832. ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1)))
  833. return 0;
  834. else
  835. return 1;
  836. }
  837. static void nv_enable_irq(struct net_device *dev)
  838. {
  839. struct fe_priv *np = get_nvpriv(dev);
  840. if (!using_multi_irqs(dev)) {
  841. if (np->msi_flags & NV_MSI_X_ENABLED)
  842. enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
  843. else
  844. enable_irq(dev->irq);
  845. } else {
  846. enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
  847. enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
  848. enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
  849. }
  850. }
  851. static void nv_disable_irq(struct net_device *dev)
  852. {
  853. struct fe_priv *np = get_nvpriv(dev);
  854. if (!using_multi_irqs(dev)) {
  855. if (np->msi_flags & NV_MSI_X_ENABLED)
  856. disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
  857. else
  858. disable_irq(dev->irq);
  859. } else {
  860. disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
  861. disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
  862. disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
  863. }
  864. }
  865. /* In MSIX mode, a write to irqmask behaves as XOR */
  866. static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask)
  867. {
  868. u8 __iomem *base = get_hwbase(dev);
  869. writel(mask, base + NvRegIrqMask);
  870. }
  871. static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask)
  872. {
  873. struct fe_priv *np = get_nvpriv(dev);
  874. u8 __iomem *base = get_hwbase(dev);
  875. if (np->msi_flags & NV_MSI_X_ENABLED) {
  876. writel(mask, base + NvRegIrqMask);
  877. } else {
  878. if (np->msi_flags & NV_MSI_ENABLED)
  879. writel(0, base + NvRegMSIIrqMask);
  880. writel(0, base + NvRegIrqMask);
  881. }
  882. }
  883. #define MII_READ (-1)
  884. /* mii_rw: read/write a register on the PHY.
  885. *
  886. * Caller must guarantee serialization
  887. */
  888. static int mii_rw(struct net_device *dev, int addr, int miireg, int value)
  889. {
  890. u8 __iomem *base = get_hwbase(dev);
  891. u32 reg;
  892. int retval;
  893. writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
  894. reg = readl(base + NvRegMIIControl);
  895. if (reg & NVREG_MIICTL_INUSE) {
  896. writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl);
  897. udelay(NV_MIIBUSY_DELAY);
  898. }
  899. reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg;
  900. if (value != MII_READ) {
  901. writel(value, base + NvRegMIIData);
  902. reg |= NVREG_MIICTL_WRITE;
  903. }
  904. writel(reg, base + NvRegMIIControl);
  905. if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0,
  906. NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX, NULL)) {
  907. dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d timed out.\n",
  908. dev->name, miireg, addr);
  909. retval = -1;
  910. } else if (value != MII_READ) {
  911. /* it was a write operation - fewer failures are detectable */
  912. dprintk(KERN_DEBUG "%s: mii_rw wrote 0x%x to reg %d at PHY %d\n",
  913. dev->name, value, miireg, addr);
  914. retval = 0;
  915. } else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) {
  916. dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d failed.\n",
  917. dev->name, miireg, addr);
  918. retval = -1;
  919. } else {
  920. retval = readl(base + NvRegMIIData);
  921. dprintk(KERN_DEBUG "%s: mii_rw read from reg %d at PHY %d: 0x%x.\n",
  922. dev->name, miireg, addr, retval);
  923. }
  924. return retval;
  925. }
  926. static int phy_reset(struct net_device *dev)
  927. {
  928. struct fe_priv *np = netdev_priv(dev);
  929. u32 miicontrol;
  930. unsigned int tries = 0;
  931. miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
  932. miicontrol |= BMCR_RESET;
  933. if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) {
  934. return -1;
  935. }
  936. /* wait for 500ms */
  937. msleep(500);
  938. /* must wait till reset is deasserted */
  939. while (miicontrol & BMCR_RESET) {
  940. msleep(10);
  941. miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
  942. /* FIXME: 100 tries seem excessive */
  943. if (tries++ > 100)
  944. return -1;
  945. }
  946. return 0;
  947. }
  948. static int phy_init(struct net_device *dev)
  949. {
  950. struct fe_priv *np = get_nvpriv(dev);
  951. u8 __iomem *base = get_hwbase(dev);
  952. u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg;
  953. /* set advertise register */
  954. reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
  955. reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|ADVERTISE_PAUSE_ASYM|ADVERTISE_PAUSE_CAP);
  956. if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) {
  957. printk(KERN_INFO "%s: phy write to advertise failed.\n", pci_name(np->pci_dev));
  958. return PHY_ERROR;
  959. }
  960. /* get phy interface type */
  961. phyinterface = readl(base + NvRegPhyInterface);
  962. /* see if gigabit phy */
  963. mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
  964. if (mii_status & PHY_GIGABIT) {
  965. np->gigabit = PHY_GIGABIT;
  966. mii_control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
  967. mii_control_1000 &= ~ADVERTISE_1000HALF;
  968. if (phyinterface & PHY_RGMII)
  969. mii_control_1000 |= ADVERTISE_1000FULL;
  970. else
  971. mii_control_1000 &= ~ADVERTISE_1000FULL;
  972. if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) {
  973. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  974. return PHY_ERROR;
  975. }
  976. }
  977. else
  978. np->gigabit = 0;
  979. /* reset the phy */
  980. if (phy_reset(dev)) {
  981. printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev));
  982. return PHY_ERROR;
  983. }
  984. /* phy vendor specific configuration */
  985. if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) {
  986. phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ);
  987. phy_reserved &= ~(PHY_INIT1 | PHY_INIT2);
  988. phy_reserved |= (PHY_INIT3 | PHY_INIT4);
  989. if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) {
  990. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  991. return PHY_ERROR;
  992. }
  993. phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
  994. phy_reserved |= PHY_INIT5;
  995. if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) {
  996. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  997. return PHY_ERROR;
  998. }
  999. }
  1000. if (np->phy_oui == PHY_OUI_CICADA) {
  1001. phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ);
  1002. phy_reserved |= PHY_INIT6;
  1003. if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) {
  1004. printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
  1005. return PHY_ERROR;
  1006. }
  1007. }
  1008. /* some phys clear out pause advertisment on reset, set it back */
  1009. mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg);
  1010. /* restart auto negotiation */
  1011. mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
  1012. mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
  1013. if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
  1014. return PHY_ERROR;
  1015. }
  1016. return 0;
  1017. }
  1018. static void nv_start_rx(struct net_device *dev)
  1019. {
  1020. struct fe_priv *np = netdev_priv(dev);
  1021. u8 __iomem *base = get_hwbase(dev);
  1022. dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name);
  1023. /* Already running? Stop it. */
  1024. if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) {
  1025. writel(0, base + NvRegReceiverControl);
  1026. pci_push(base);
  1027. }
  1028. writel(np->linkspeed, base + NvRegLinkSpeed);
  1029. pci_push(base);
  1030. writel(NVREG_RCVCTL_START, base + NvRegReceiverControl);
  1031. dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n",
  1032. dev->name, np->duplex, np->linkspeed);
  1033. pci_push(base);
  1034. }
  1035. static void nv_stop_rx(struct net_device *dev)
  1036. {
  1037. u8 __iomem *base = get_hwbase(dev);
  1038. dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name);
  1039. writel(0, base + NvRegReceiverControl);
  1040. reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0,
  1041. NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX,
  1042. KERN_INFO "nv_stop_rx: ReceiverStatus remained busy");
  1043. udelay(NV_RXSTOP_DELAY2);
  1044. writel(0, base + NvRegLinkSpeed);
  1045. }
  1046. static void nv_start_tx(struct net_device *dev)
  1047. {
  1048. u8 __iomem *base = get_hwbase(dev);
  1049. dprintk(KERN_DEBUG "%s: nv_start_tx\n", dev->name);
  1050. writel(NVREG_XMITCTL_START, base + NvRegTransmitterControl);
  1051. pci_push(base);
  1052. }
  1053. static void nv_stop_tx(struct net_device *dev)
  1054. {
  1055. u8 __iomem *base = get_hwbase(dev);
  1056. dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name);
  1057. writel(0, base + NvRegTransmitterControl);
  1058. reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0,
  1059. NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX,
  1060. KERN_INFO "nv_stop_tx: TransmitterStatus remained busy");
  1061. udelay(NV_TXSTOP_DELAY2);
  1062. writel(0, base + NvRegUnknownTransmitterReg);
  1063. }
  1064. static void nv_txrx_reset(struct net_device *dev)
  1065. {
  1066. struct fe_priv *np = netdev_priv(dev);
  1067. u8 __iomem *base = get_hwbase(dev);
  1068. dprintk(KERN_DEBUG "%s: nv_txrx_reset\n", dev->name);
  1069. writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
  1070. pci_push(base);
  1071. udelay(NV_TXRX_RESET_DELAY);
  1072. writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
  1073. pci_push(base);
  1074. }
  1075. static void nv_mac_reset(struct net_device *dev)
  1076. {
  1077. struct fe_priv *np = netdev_priv(dev);
  1078. u8 __iomem *base = get_hwbase(dev);
  1079. dprintk(KERN_DEBUG "%s: nv_mac_reset\n", dev->name);
  1080. writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
  1081. pci_push(base);
  1082. writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset);
  1083. pci_push(base);
  1084. udelay(NV_MAC_RESET_DELAY);
  1085. writel(0, base + NvRegMacReset);
  1086. pci_push(base);
  1087. udelay(NV_MAC_RESET_DELAY);
  1088. writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
  1089. pci_push(base);
  1090. }
  1091. /*
  1092. * nv_get_stats: dev->get_stats function
  1093. * Get latest stats value from the nic.
  1094. * Called with read_lock(&dev_base_lock) held for read -
  1095. * only synchronized against unregister_netdevice.
  1096. */
  1097. static struct net_device_stats *nv_get_stats(struct net_device *dev)
  1098. {
  1099. struct fe_priv *np = netdev_priv(dev);
  1100. /* It seems that the nic always generates interrupts and doesn't
  1101. * accumulate errors internally. Thus the current values in np->stats
  1102. * are already up to date.
  1103. */
  1104. return &np->stats;
  1105. }
  1106. /*
  1107. * nv_alloc_rx: fill rx ring entries.
  1108. * Return 1 if the allocations for the skbs failed and the
  1109. * rx engine is without Available descriptors
  1110. */
  1111. static int nv_alloc_rx(struct net_device *dev)
  1112. {
  1113. struct fe_priv *np = netdev_priv(dev);
  1114. unsigned int refill_rx = np->refill_rx;
  1115. int nr;
  1116. while (np->cur_rx != refill_rx) {
  1117. struct sk_buff *skb;
  1118. nr = refill_rx % np->rx_ring_size;
  1119. if (np->rx_skbuff[nr] == NULL) {
  1120. skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
  1121. if (!skb)
  1122. break;
  1123. skb->dev = dev;
  1124. np->rx_skbuff[nr] = skb;
  1125. } else {
  1126. skb = np->rx_skbuff[nr];
  1127. }
  1128. np->rx_dma[nr] = pci_map_single(np->pci_dev, skb->data,
  1129. skb->end-skb->data, PCI_DMA_FROMDEVICE);
  1130. if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
  1131. np->rx_ring.orig[nr].buf = cpu_to_le32(np->rx_dma[nr]);
  1132. wmb();
  1133. np->rx_ring.orig[nr].flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
  1134. } else {
  1135. np->rx_ring.ex[nr].bufhigh = cpu_to_le64(np->rx_dma[nr]) >> 32;
  1136. np->rx_ring.ex[nr].buflow = cpu_to_le64(np->rx_dma[nr]) & 0x0FFFFFFFF;
  1137. wmb();
  1138. np->rx_ring.ex[nr].flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
  1139. }
  1140. dprintk(KERN_DEBUG "%s: nv_alloc_rx: Packet %d marked as Available\n",
  1141. dev->name, refill_rx);
  1142. refill_rx++;
  1143. }
  1144. np->refill_rx = refill_rx;
  1145. if (np->cur_rx - refill_rx == np->rx_ring_size)
  1146. return 1;
  1147. return 0;
  1148. }
  1149. static void nv_do_rx_refill(unsigned long data)
  1150. {
  1151. struct net_device *dev = (struct net_device *) data;
  1152. struct fe_priv *np = netdev_priv(dev);
  1153. if (!using_multi_irqs(dev)) {
  1154. if (np->msi_flags & NV_MSI_X_ENABLED)
  1155. disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
  1156. else
  1157. disable_irq(dev->irq);
  1158. } else {
  1159. disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
  1160. }
  1161. if (nv_alloc_rx(dev)) {
  1162. spin_lock_irq(&np->lock);
  1163. if (!np->in_shutdown)
  1164. mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
  1165. spin_unlock_irq(&np->lock);
  1166. }
  1167. if (!using_multi_irqs(dev)) {
  1168. if (np->msi_flags & NV_MSI_X_ENABLED)
  1169. enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
  1170. else
  1171. enable_irq(dev->irq);
  1172. } else {
  1173. enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
  1174. }
  1175. }
  1176. static void nv_init_rx(struct net_device *dev)
  1177. {
  1178. struct fe_priv *np = netdev_priv(dev);
  1179. int i;
  1180. np->cur_rx = np->rx_ring_size;
  1181. np->refill_rx = 0;
  1182. for (i = 0; i < np->rx_ring_size; i++)
  1183. if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
  1184. np->rx_ring.orig[i].flaglen = 0;
  1185. else
  1186. np->rx_ring.ex[i].flaglen = 0;
  1187. }
  1188. static void nv_init_tx(struct net_device *dev)
  1189. {
  1190. struct fe_priv *np = netdev_priv(dev);
  1191. int i;
  1192. np->next_tx = np->nic_tx = 0;
  1193. for (i = 0; i < np->tx_ring_size; i++) {
  1194. if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
  1195. np->tx_ring.orig[i].flaglen = 0;
  1196. else
  1197. np->tx_ring.ex[i].flaglen = 0;
  1198. np->tx_skbuff[i] = NULL;
  1199. np->tx_dma[i] = 0;
  1200. }
  1201. }
  1202. static int nv_init_ring(struct net_device *dev)
  1203. {
  1204. nv_init_tx(dev);
  1205. nv_init_rx(dev);
  1206. return nv_alloc_rx(dev);
  1207. }
  1208. static int nv_release_txskb(struct net_device *dev, unsigned int skbnr)
  1209. {
  1210. struct fe_priv *np = netdev_priv(dev);
  1211. dprintk(KERN_INFO "%s: nv_release_txskb for skbnr %d\n",
  1212. dev->name, skbnr);
  1213. if (np->tx_dma[skbnr]) {
  1214. pci_unmap_page(np->pci_dev, np->tx_dma[skbnr],
  1215. np->tx_dma_len[skbnr],
  1216. PCI_DMA_TODEVICE);
  1217. np->tx_dma[skbnr] = 0;
  1218. }
  1219. if (np->tx_skbuff[skbnr]) {
  1220. dev_kfree_skb_any(np->tx_skbuff[skbnr]);
  1221. np->tx_skbuff[skbnr] = NULL;
  1222. return 1;
  1223. } else {
  1224. return 0;
  1225. }
  1226. }
  1227. static void nv_drain_tx(struct net_device *dev)
  1228. {
  1229. struct fe_priv *np = netdev_priv(dev);
  1230. unsigned int i;
  1231. for (i = 0; i < np->tx_ring_size; i++) {
  1232. if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
  1233. np->tx_ring.orig[i].flaglen = 0;
  1234. else
  1235. np->tx_ring.ex[i].flaglen = 0;
  1236. if (nv_release_txskb(dev, i))
  1237. np->stats.tx_dropped++;
  1238. }
  1239. }
  1240. static void nv_drain_rx(struct net_device *dev)
  1241. {
  1242. struct fe_priv *np = netdev_priv(dev);
  1243. int i;
  1244. for (i = 0; i < np->rx_ring_size; i++) {
  1245. if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
  1246. np->rx_ring.orig[i].flaglen = 0;
  1247. else
  1248. np->rx_ring.ex[i].flaglen = 0;
  1249. wmb();
  1250. if (np->rx_skbuff[i]) {
  1251. pci_unmap_single(np->pci_dev, np->rx_dma[i],
  1252. np->rx_skbuff[i]->end-np->rx_skbuff[i]->data,
  1253. PCI_DMA_FROMDEVICE);
  1254. dev_kfree_skb(np->rx_skbuff[i]);
  1255. np->rx_skbuff[i] = NULL;
  1256. }
  1257. }
  1258. }
  1259. static void drain_ring(struct net_device *dev)
  1260. {
  1261. nv_drain_tx(dev);
  1262. nv_drain_rx(dev);
  1263. }
  1264. /*
  1265. * nv_start_xmit: dev->hard_start_xmit function
  1266. * Called with netif_tx_lock held.
  1267. */
  1268. static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
  1269. {
  1270. struct fe_priv *np = netdev_priv(dev);
  1271. u32 tx_flags = 0;
  1272. u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
  1273. unsigned int fragments = skb_shinfo(skb)->nr_frags;
  1274. unsigned int nr = (np->next_tx - 1) % np->tx_ring_size;
  1275. unsigned int start_nr = np->next_tx % np->tx_ring_size;
  1276. unsigned int i;
  1277. u32 offset = 0;
  1278. u32 bcnt;
  1279. u32 size = skb->len-skb->data_len;
  1280. u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
  1281. u32 tx_flags_vlan = 0;
  1282. /* add fragments to entries count */
  1283. for (i = 0; i < fragments; i++) {
  1284. entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) +
  1285. ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
  1286. }
  1287. spin_lock_irq(&np->lock);
  1288. if ((np->next_tx - np->nic_tx + entries - 1) > np->tx_limit_stop) {
  1289. spin_unlock_irq(&np->lock);
  1290. netif_stop_queue(dev);
  1291. return NETDEV_TX_BUSY;
  1292. }
  1293. /* setup the header buffer */
  1294. do {
  1295. bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
  1296. nr = (nr + 1) % np->tx_ring_size;
  1297. np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
  1298. PCI_DMA_TODEVICE);
  1299. np->tx_dma_len[nr] = bcnt;
  1300. if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
  1301. np->tx_ring.orig[nr].buf = cpu_to_le32(np->tx_dma[nr]);
  1302. np->tx_ring.orig[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags);
  1303. } else {
  1304. np->tx_ring.ex[nr].bufhigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
  1305. np->tx_ring.ex[nr].buflow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
  1306. np->tx_ring.ex[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags);
  1307. }
  1308. tx_flags = np->tx_flags;
  1309. offset += bcnt;
  1310. size -= bcnt;
  1311. } while (size);
  1312. /* setup the fragments */
  1313. for (i = 0; i < fragments; i++) {
  1314. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  1315. u32 size = frag->size;
  1316. offset = 0;
  1317. do {
  1318. bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
  1319. nr = (nr + 1) % np->tx_ring_size;
  1320. np->tx_dma[nr] = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
  1321. PCI_DMA_TODEVICE);
  1322. np->tx_dma_len[nr] = bcnt;
  1323. if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
  1324. np->tx_ring.orig[nr].buf = cpu_to_le32(np->tx_dma[nr]);
  1325. np->tx_ring.orig[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags);
  1326. } else {
  1327. np->tx_ring.ex[nr].bufhigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
  1328. np->tx_ring.ex[nr].buflow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
  1329. np->tx_ring.ex[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags);
  1330. }
  1331. offset += bcnt;
  1332. size -= bcnt;
  1333. } while (size);
  1334. }
  1335. /* set last fragment flag */
  1336. if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
  1337. np->tx_ring.orig[nr].flaglen |= cpu_to_le32(tx_flags_extra);
  1338. } else {
  1339. np->tx_ring.ex[nr].flaglen |= cpu_to_le32(tx_flags_extra);
  1340. }
  1341. np->tx_skbuff[nr] = skb;
  1342. #ifdef NETIF_F_TSO
  1343. if (skb_is_gso(skb))
  1344. tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
  1345. else
  1346. #endif
  1347. tx_flags_extra = (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0);
  1348. /* vlan tag */
  1349. if (np->vlangrp && vlan_tx_tag_present(skb)) {
  1350. tx_flags_vlan = NV_TX3_VLAN_TAG_PRESENT | vlan_tx_tag_get(skb);
  1351. }
  1352. /* set tx flags */
  1353. if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
  1354. np->tx_ring.orig[start_nr].flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
  1355. } else {
  1356. np->tx_ring.ex[start_nr].txvlan = cpu_to_le32(tx_flags_vlan);
  1357. np->tx_ring.ex[start_nr].flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
  1358. }
  1359. dprintk(KERN_DEBUG "%s: nv_start_xmit: packet %d (entries %d) queued for transmission. tx_flags_extra: %x\n",
  1360. dev->name, np->next_tx, entries, tx_flags_extra);
  1361. {
  1362. int j;
  1363. for (j=0; j<64; j++) {
  1364. if ((j%16) == 0)
  1365. dprintk("\n%03x:", j);
  1366. dprintk(" %02x", ((unsigned char*)skb->data)[j]);
  1367. }
  1368. dprintk("\n");
  1369. }
  1370. np->next_tx += entries;
  1371. dev->trans_start = jiffies;
  1372. spin_unlock_irq(&np->lock);
  1373. writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
  1374. pci_push(get_hwbase(dev));
  1375. return NETDEV_TX_OK;
  1376. }
  1377. /*
  1378. * nv_tx_done: check for completed packets, release the skbs.
  1379. *
  1380. * Caller must own np->lock.
  1381. */
  1382. static void nv_tx_done(struct net_device *dev)
  1383. {
  1384. struct fe_priv *np = netdev_priv(dev);
  1385. u32 flags;
  1386. unsigned int i;
  1387. struct sk_buff *skb;
  1388. while (np->nic_tx != np->next_tx) {
  1389. i = np->nic_tx % np->tx_ring_size;
  1390. if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
  1391. flags = le32_to_cpu(np->tx_ring.orig[i].flaglen);
  1392. else
  1393. flags = le32_to_cpu(np->tx_ring.ex[i].flaglen);
  1394. dprintk(KERN_DEBUG "%s: nv_tx_done: looking at packet %d, flags 0x%x.\n",
  1395. dev->name, np->nic_tx, flags);
  1396. if (flags & NV_TX_VALID)
  1397. break;
  1398. if (np->desc_ver == DESC_VER_1) {
  1399. if (flags & NV_TX_LASTPACKET) {
  1400. skb = np->tx_skbuff[i];
  1401. if (flags & (NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION|
  1402. NV_TX_UNDERFLOW|NV_TX_ERROR)) {
  1403. if (flags & NV_TX_UNDERFLOW)
  1404. np->stats.tx_fifo_errors++;
  1405. if (flags & NV_TX_CARRIERLOST)
  1406. np->stats.tx_carrier_errors++;
  1407. np->stats.tx_errors++;
  1408. } else {
  1409. np->stats.tx_packets++;
  1410. np->stats.tx_bytes += skb->len;
  1411. }
  1412. }
  1413. } else {
  1414. if (flags & NV_TX2_LASTPACKET) {
  1415. skb = np->tx_skbuff[i];
  1416. if (flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION|
  1417. NV_TX2_UNDERFLOW|NV_TX2_ERROR)) {
  1418. if (flags & NV_TX2_UNDERFLOW)
  1419. np->stats.tx_fifo_errors++;
  1420. if (flags & NV_TX2_CARRIERLOST)
  1421. np->stats.tx_carrier_errors++;
  1422. np->stats.tx_errors++;
  1423. } else {
  1424. np->stats.tx_packets++;
  1425. np->stats.tx_bytes += skb->len;
  1426. }
  1427. }
  1428. }
  1429. nv_release_txskb(dev, i);
  1430. np->nic_tx++;
  1431. }
  1432. if (np->next_tx - np->nic_tx < np->tx_limit_start)
  1433. netif_wake_queue(dev);
  1434. }
  1435. /*
  1436. * nv_tx_timeout: dev->tx_timeout function
  1437. * Called with netif_tx_lock held.
  1438. */
  1439. static void nv_tx_timeout(struct net_device *dev)
  1440. {
  1441. struct fe_priv *np = netdev_priv(dev);
  1442. u8 __iomem *base = get_hwbase(dev);
  1443. u32 status;
  1444. if (np->msi_flags & NV_MSI_X_ENABLED)
  1445. status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
  1446. else
  1447. status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
  1448. printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name, status);
  1449. {
  1450. int i;
  1451. printk(KERN_INFO "%s: Ring at %lx: next %d nic %d\n",
  1452. dev->name, (unsigned long)np->ring_addr,
  1453. np->next_tx, np->nic_tx);
  1454. printk(KERN_INFO "%s: Dumping tx registers\n", dev->name);
  1455. for (i=0;i<=np->register_size;i+= 32) {
  1456. printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
  1457. i,
  1458. readl(base + i + 0), readl(base + i + 4),
  1459. readl(base + i + 8), readl(base + i + 12),
  1460. readl(base + i + 16), readl(base + i + 20),
  1461. readl(base + i + 24), readl(base + i + 28));
  1462. }
  1463. printk(KERN_INFO "%s: Dumping tx ring\n", dev->name);
  1464. for (i=0;i<np->tx_ring_size;i+= 4) {
  1465. if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
  1466. printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
  1467. i,
  1468. le32_to_cpu(np->tx_ring.orig[i].buf),
  1469. le32_to_cpu(np->tx_ring.orig[i].flaglen),
  1470. le32_to_cpu(np->tx_ring.orig[i+1].buf),
  1471. le32_to_cpu(np->tx_ring.orig[i+1].flaglen),
  1472. le32_to_cpu(np->tx_ring.orig[i+2].buf),
  1473. le32_to_cpu(np->tx_ring.orig[i+2].flaglen),
  1474. le32_to_cpu(np->tx_ring.orig[i+3].buf),
  1475. le32_to_cpu(np->tx_ring.orig[i+3].flaglen));
  1476. } else {
  1477. printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
  1478. i,
  1479. le32_to_cpu(np->tx_ring.ex[i].bufhigh),
  1480. le32_to_cpu(np->tx_ring.ex[i].buflow),
  1481. le32_to_cpu(np->tx_ring.ex[i].flaglen),
  1482. le32_to_cpu(np->tx_ring.ex[i+1].bufhigh),
  1483. le32_to_cpu(np->tx_ring.ex[i+1].buflow),
  1484. le32_to_cpu(np->tx_ring.ex[i+1].flaglen),
  1485. le32_to_cpu(np->tx_ring.ex[i+2].bufhigh),
  1486. le32_to_cpu(np->tx_ring.ex[i+2].buflow),
  1487. le32_to_cpu(np->tx_ring.ex[i+2].flaglen),
  1488. le32_to_cpu(np->tx_ring.ex[i+3].bufhigh),
  1489. le32_to_cpu(np->tx_ring.ex[i+3].buflow),
  1490. le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
  1491. }
  1492. }
  1493. }
  1494. spin_lock_irq(&np->lock);
  1495. /* 1) stop tx engine */
  1496. nv_stop_tx(dev);
  1497. /* 2) check that the packets were not sent already: */
  1498. nv_tx_done(dev);
  1499. /* 3) if there are dead entries: clear everything */
  1500. if (np->next_tx != np->nic_tx) {
  1501. printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name);
  1502. nv_drain_tx(dev);
  1503. np->next_tx = np->nic_tx = 0;
  1504. setup_hw_rings(dev, NV_SETUP_TX_RING);
  1505. netif_wake_queue(dev);
  1506. }
  1507. /* 4) restart tx engine */
  1508. nv_start_tx(dev);
  1509. spin_unlock_irq(&np->lock);
  1510. }
  1511. /*
  1512. * Called when the nic notices a mismatch between the actual data len on the
  1513. * wire and the len indicated in the 802 header
  1514. */
  1515. static int nv_getlen(struct net_device *dev, void *packet, int datalen)
  1516. {
  1517. int hdrlen; /* length of the 802 header */
  1518. int protolen; /* length as stored in the proto field */
  1519. /* 1) calculate len according to header */
  1520. if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) {
  1521. protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto );
  1522. hdrlen = VLAN_HLEN;
  1523. } else {
  1524. protolen = ntohs( ((struct ethhdr *)packet)->h_proto);
  1525. hdrlen = ETH_HLEN;
  1526. }
  1527. dprintk(KERN_DEBUG "%s: nv_getlen: datalen %d, protolen %d, hdrlen %d\n",
  1528. dev->name, datalen, protolen, hdrlen);
  1529. if (protolen > ETH_DATA_LEN)
  1530. return datalen; /* Value in proto field not a len, no checks possible */
  1531. protolen += hdrlen;
  1532. /* consistency checks: */
  1533. if (datalen > ETH_ZLEN) {
  1534. if (datalen >= protolen) {
  1535. /* more data on wire than in 802 header, trim of
  1536. * additional data.
  1537. */
  1538. dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n",
  1539. dev->name, protolen);
  1540. return protolen;
  1541. } else {
  1542. /* less data on wire than mentioned in header.
  1543. * Discard the packet.
  1544. */
  1545. dprintk(KERN_DEBUG "%s: nv_getlen: discarding long packet.\n",
  1546. dev->name);
  1547. return -1;
  1548. }
  1549. } else {
  1550. /* short packet. Accept only if 802 values are also short */
  1551. if (protolen > ETH_ZLEN) {
  1552. dprintk(KERN_DEBUG "%s: nv_getlen: discarding short packet.\n",
  1553. dev->name);
  1554. return -1;
  1555. }
  1556. dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n",
  1557. dev->name, datalen);
  1558. return datalen;
  1559. }
  1560. }
  1561. static void nv_rx_process(struct net_device *dev)
  1562. {
  1563. struct fe_priv *np = netdev_priv(dev);
  1564. u32 flags;
  1565. u32 vlanflags = 0;
  1566. for (;;) {
  1567. struct sk_buff *skb;
  1568. int len;
  1569. int i;
  1570. if (np->cur_rx - np->refill_rx >= np->rx_ring_size)
  1571. break; /* we scanned the whole ring - do not continue */
  1572. i = np->cur_rx % np->rx_ring_size;
  1573. if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
  1574. flags = le32_to_cpu(np->rx_ring.orig[i].flaglen);
  1575. len = nv_descr_getlength(&np->rx_ring.orig[i], np->desc_ver);
  1576. } else {
  1577. flags = le32_to_cpu(np->rx_ring.ex[i].flaglen);
  1578. len = nv_descr_getlength_ex(&np->rx_ring.ex[i], np->desc_ver);
  1579. vlanflags = le32_to_cpu(np->rx_ring.ex[i].buflow);
  1580. }
  1581. dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, flags 0x%x.\n",
  1582. dev->name, np->cur_rx, flags);
  1583. if (flags & NV_RX_AVAIL)
  1584. break; /* still owned by hardware, */
  1585. /*
  1586. * the packet is for us - immediately tear down the pci mapping.
  1587. * TODO: check if a prefetch of the first cacheline improves
  1588. * the performance.
  1589. */
  1590. pci_unmap_single(np->pci_dev, np->rx_dma[i],
  1591. np->rx_skbuff[i]->end-np->rx_skbuff[i]->data,
  1592. PCI_DMA_FROMDEVICE);
  1593. {
  1594. int j;
  1595. dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags);
  1596. for (j=0; j<64; j++) {
  1597. if ((j%16) == 0)
  1598. dprintk("\n%03x:", j);
  1599. dprintk(" %02x", ((unsigned char*)np->rx_skbuff[i]->data)[j]);
  1600. }
  1601. dprintk("\n");
  1602. }
  1603. /* look at what we actually got: */
  1604. if (np->desc_ver == DESC_VER_1) {
  1605. if (!(flags & NV_RX_DESCRIPTORVALID))
  1606. goto next_pkt;
  1607. if (flags & NV_RX_ERROR) {
  1608. if (flags & NV_RX_MISSEDFRAME) {
  1609. np->stats.rx_missed_errors++;
  1610. np->stats.rx_errors++;
  1611. goto next_pkt;
  1612. }
  1613. if (flags & (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3)) {
  1614. np->stats.rx_errors++;
  1615. goto next_pkt;
  1616. }
  1617. if (flags & NV_RX_CRCERR) {
  1618. np->stats.rx_crc_errors++;
  1619. np->stats.rx_errors++;
  1620. goto next_pkt;
  1621. }
  1622. if (flags & NV_RX_OVERFLOW) {
  1623. np->stats.rx_over_errors++;
  1624. np->stats.rx_errors++;
  1625. goto next_pkt;
  1626. }
  1627. if (flags & NV_RX_ERROR4) {
  1628. len = nv_getlen(dev, np->rx_skbuff[i]->data, len);
  1629. if (len < 0) {
  1630. np->stats.rx_errors++;
  1631. goto next_pkt;
  1632. }
  1633. }
  1634. /* framing errors are soft errors. */
  1635. if (flags & NV_RX_FRAMINGERR) {
  1636. if (flags & NV_RX_SUBSTRACT1) {
  1637. len--;
  1638. }
  1639. }
  1640. }
  1641. } else {
  1642. if (!(flags & NV_RX2_DESCRIPTORVALID))
  1643. goto next_pkt;
  1644. if (flags & NV_RX2_ERROR) {
  1645. if (flags & (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3)) {
  1646. np->stats.rx_errors++;
  1647. goto next_pkt;
  1648. }
  1649. if (flags & NV_RX2_CRCERR) {
  1650. np->stats.rx_crc_errors++;
  1651. np->stats.rx_errors++;
  1652. goto next_pkt;
  1653. }
  1654. if (flags & NV_RX2_OVERFLOW) {
  1655. np->stats.rx_over_errors++;
  1656. np->stats.rx_errors++;
  1657. goto next_pkt;
  1658. }
  1659. if (flags & NV_RX2_ERROR4) {
  1660. len = nv_getlen(dev, np->rx_skbuff[i]->data, len);
  1661. if (len < 0) {
  1662. np->stats.rx_errors++;
  1663. goto next_pkt;
  1664. }
  1665. }
  1666. /* framing errors are soft errors */
  1667. if (flags & NV_RX2_FRAMINGERR) {
  1668. if (flags & NV_RX2_SUBSTRACT1) {
  1669. len--;
  1670. }
  1671. }
  1672. }
  1673. if (np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) {
  1674. flags &= NV_RX2_CHECKSUMMASK;
  1675. if (flags == NV_RX2_CHECKSUMOK1 ||
  1676. flags == NV_RX2_CHECKSUMOK2 ||
  1677. flags == NV_RX2_CHECKSUMOK3) {
  1678. dprintk(KERN_DEBUG "%s: hw checksum hit!.\n", dev->name);
  1679. np->rx_skbuff[i]->ip_summed = CHECKSUM_UNNECESSARY;
  1680. } else {
  1681. dprintk(KERN_DEBUG "%s: hwchecksum miss!.\n", dev->name);
  1682. }
  1683. }
  1684. }
  1685. /* got a valid packet - forward it to the network core */
  1686. skb = np->rx_skbuff[i];
  1687. np->rx_skbuff[i] = NULL;
  1688. skb_put(skb, len);
  1689. skb->protocol = eth_type_trans(skb, dev);
  1690. dprintk(KERN_DEBUG "%s: nv_rx_process: packet %d with %d bytes, proto %d accepted.\n",
  1691. dev->name, np->cur_rx, len, skb->protocol);
  1692. if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT)) {
  1693. vlan_hwaccel_rx(skb, np->vlangrp, vlanflags & NV_RX3_VLAN_TAG_MASK);
  1694. } else {
  1695. netif_rx(skb);
  1696. }
  1697. dev->last_rx = jiffies;
  1698. np->stats.rx_packets++;
  1699. np->stats.rx_bytes += len;
  1700. next_pkt:
  1701. np->cur_rx++;
  1702. }
  1703. }
  1704. static void set_bufsize(struct net_device *dev)
  1705. {
  1706. struct fe_priv *np = netdev_priv(dev);
  1707. if (dev->mtu <= ETH_DATA_LEN)
  1708. np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS;
  1709. else
  1710. np->rx_buf_sz = dev->mtu + NV_RX_HEADERS;
  1711. }
  1712. /*
  1713. * nv_change_mtu: dev->change_mtu function
  1714. * Called with dev_base_lock held for read.
  1715. */
  1716. static int nv_change_mtu(struct net_device *dev, int new_mtu)
  1717. {
  1718. struct fe_priv *np = netdev_priv(dev);
  1719. int old_mtu;
  1720. if (new_mtu < 64 || new_mtu > np->pkt_limit)
  1721. return -EINVAL;
  1722. old_mtu = dev->mtu;
  1723. dev->mtu = new_mtu;
  1724. /* return early if the buffer sizes will not change */
  1725. if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
  1726. return 0;
  1727. if (old_mtu == new_mtu)
  1728. return 0;
  1729. /* synchronized against open : rtnl_lock() held by caller */
  1730. if (netif_running(dev)) {
  1731. u8 __iomem *base = get_hwbase(dev);
  1732. /*
  1733. * It seems that the nic preloads valid ring entries into an
  1734. * internal buffer. The procedure for flushing everything is
  1735. * guessed, there is probably a simpler approach.
  1736. * Changing the MTU is a rare event, it shouldn't matter.
  1737. */
  1738. nv_disable_irq(dev);
  1739. netif_tx_lock_bh(dev);
  1740. spin_lock(&np->lock);
  1741. /* stop engines */
  1742. nv_stop_rx(dev);
  1743. nv_stop_tx(dev);
  1744. nv_txrx_reset(dev);
  1745. /* drain rx queue */
  1746. nv_drain_rx(dev);
  1747. nv_drain_tx(dev);
  1748. /* reinit driver view of the rx queue */
  1749. set_bufsize(dev);
  1750. if (nv_init_ring(dev)) {
  1751. if (!np->in_shutdown)
  1752. mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
  1753. }
  1754. /* reinit nic view of the rx queue */
  1755. writel(np->rx_buf_sz, base + NvRegOffloadConfig);
  1756. setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
  1757. writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
  1758. base + NvRegRingSizes);
  1759. pci_push(base);
  1760. writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
  1761. pci_push(base);
  1762. /* restart rx engine */
  1763. nv_start_rx(dev);
  1764. nv_start_tx(dev);
  1765. spin_unlock(&np->lock);
  1766. netif_tx_unlock_bh(dev);
  1767. nv_enable_irq(dev);
  1768. }
  1769. return 0;
  1770. }
  1771. static void nv_copy_mac_to_hw(struct net_device *dev)
  1772. {
  1773. u8 __iomem *base = get_hwbase(dev);
  1774. u32 mac[2];
  1775. mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
  1776. (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
  1777. mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
  1778. writel(mac[0], base + NvRegMacAddrA);
  1779. writel(mac[1], base + NvRegMacAddrB);
  1780. }
  1781. /*
  1782. * nv_set_mac_address: dev->set_mac_address function
  1783. * Called with rtnl_lock() held.
  1784. */
  1785. static int nv_set_mac_address(struct net_device *dev, void *addr)
  1786. {
  1787. struct fe_priv *np = netdev_priv(dev);
  1788. struct sockaddr *macaddr = (struct sockaddr*)addr;
  1789. if (!is_valid_ether_addr(macaddr->sa_data))
  1790. return -EADDRNOTAVAIL;
  1791. /* synchronized against open : rtnl_lock() held by caller */
  1792. memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN);
  1793. if (netif_running(dev)) {
  1794. netif_tx_lock_bh(dev);
  1795. spin_lock_irq(&np->lock);
  1796. /* stop rx engine */
  1797. nv_stop_rx(dev);
  1798. /* set mac address */
  1799. nv_copy_mac_to_hw(dev);
  1800. /* restart rx engine */
  1801. nv_start_rx(dev);
  1802. spin_unlock_irq(&np->lock);
  1803. netif_tx_unlock_bh(dev);
  1804. } else {
  1805. nv_copy_mac_to_hw(dev);
  1806. }
  1807. return 0;
  1808. }
  1809. /*
  1810. * nv_set_multicast: dev->set_multicast function
  1811. * Called with netif_tx_lock held.
  1812. */
  1813. static void nv_set_multicast(struct net_device *dev)
  1814. {
  1815. struct fe_priv *np = netdev_priv(dev);
  1816. u8 __iomem *base = get_hwbase(dev);
  1817. u32 addr[2];
  1818. u32 mask[2];
  1819. u32 pff = readl(base + NvRegPacketFilterFlags) & NVREG_PFF_PAUSE_RX;
  1820. memset(addr, 0, sizeof(addr));
  1821. memset(mask, 0, sizeof(mask));
  1822. if (dev->flags & IFF_PROMISC) {
  1823. printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
  1824. pff |= NVREG_PFF_PROMISC;
  1825. } else {
  1826. pff |= NVREG_PFF_MYADDR;
  1827. if (dev->flags & IFF_ALLMULTI || dev->mc_list) {
  1828. u32 alwaysOff[2];
  1829. u32 alwaysOn[2];
  1830. alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0xffffffff;
  1831. if (dev->flags & IFF_ALLMULTI) {
  1832. alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0;
  1833. } else {
  1834. struct dev_mc_list *walk;
  1835. walk = dev->mc_list;
  1836. while (walk != NULL) {
  1837. u32 a, b;
  1838. a = le32_to_cpu(*(u32 *) walk->dmi_addr);
  1839. b = le16_to_cpu(*(u16 *) (&walk->dmi_addr[4]));
  1840. alwaysOn[0] &= a;
  1841. alwaysOff[0] &= ~a;
  1842. alwaysOn[1] &= b;
  1843. alwaysOff[1] &= ~b;
  1844. walk = walk->next;
  1845. }
  1846. }
  1847. addr[0] = alwaysOn[0];
  1848. addr[1] = alwaysOn[1];
  1849. mask[0] = alwaysOn[0] | alwaysOff[0];
  1850. mask[1] = alwaysOn[1] | alwaysOff[1];
  1851. }
  1852. }
  1853. addr[0] |= NVREG_MCASTADDRA_FORCE;
  1854. pff |= NVREG_PFF_ALWAYS;
  1855. spin_lock_irq(&np->lock);
  1856. nv_stop_rx(dev);
  1857. writel(addr[0], base + NvRegMulticastAddrA);
  1858. writel(addr[1], base + NvRegMulticastAddrB);
  1859. writel(mask[0], base + NvRegMulticastMaskA);
  1860. writel(mask[1], base + NvRegMulticastMaskB);
  1861. writel(pff, base + NvRegPacketFilterFlags);
  1862. dprintk(KERN_INFO "%s: reconfiguration for multicast lists.\n",
  1863. dev->name);
  1864. nv_start_rx(dev);
  1865. spin_unlock_irq(&np->lock);
  1866. }
  1867. static void nv_update_pause(struct net_device *dev, u32 pause_flags)
  1868. {
  1869. struct fe_priv *np = netdev_priv(dev);
  1870. u8 __iomem *base = get_hwbase(dev);
  1871. np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE);
  1872. if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) {
  1873. u32 pff = readl(base + NvRegPacketFilterFlags) & ~NVREG_PFF_PAUSE_RX;
  1874. if (pause_flags & NV_PAUSEFRAME_RX_ENABLE) {
  1875. writel(pff|NVREG_PFF_PAUSE_RX, base + NvRegPacketFilterFlags);
  1876. np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
  1877. } else {
  1878. writel(pff, base + NvRegPacketFilterFlags);
  1879. }
  1880. }
  1881. if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) {
  1882. u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX;
  1883. if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) {
  1884. writel(NVREG_TX_PAUSEFRAME_ENABLE, base + NvRegTxPauseFrame);
  1885. writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1);
  1886. np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
  1887. } else {
  1888. writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame);
  1889. writel(regmisc, base + NvRegMisc1);
  1890. }
  1891. }
  1892. }
  1893. /**
  1894. * nv_update_linkspeed: Setup the MAC according to the link partner
  1895. * @dev: Network device to be configured
  1896. *
  1897. * The function queries the PHY and checks if there is a link partner.
  1898. * If yes, then it sets up the MAC accordingly. Otherwise, the MAC is
  1899. * set to 10 MBit HD.
  1900. *
  1901. * The function returns 0 if there is no link partner and 1 if there is
  1902. * a good link partner.
  1903. */
  1904. static int nv_update_linkspeed(struct net_device *dev)
  1905. {
  1906. struct fe_priv *np = netdev_priv(dev);
  1907. u8 __iomem *base = get_hwbase(dev);
  1908. int adv = 0;
  1909. int lpa = 0;
  1910. int adv_lpa, adv_pause, lpa_pause;
  1911. int newls = np->linkspeed;
  1912. int newdup = np->duplex;
  1913. int mii_status;
  1914. int retval = 0;
  1915. u32 control_1000, status_1000, phyreg, pause_flags, txreg;
  1916. /* BMSR_LSTATUS is latched, read it twice:
  1917. * we want the current value.
  1918. */
  1919. mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
  1920. mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
  1921. if (!(mii_status & BMSR_LSTATUS)) {
  1922. dprintk(KERN_DEBUG "%s: no link detected by phy - falling back to 10HD.\n",
  1923. dev->name);
  1924. newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
  1925. newdup = 0;
  1926. retval = 0;
  1927. goto set_speed;
  1928. }
  1929. if (np->autoneg == 0) {
  1930. dprintk(KERN_DEBUG "%s: nv_update_linkspeed: autoneg off, PHY set to 0x%04x.\n",
  1931. dev->name, np->fixed_mode);
  1932. if (np->fixed_mode & LPA_100FULL) {
  1933. newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
  1934. newdup = 1;
  1935. } else if (np->fixed_mode & LPA_100HALF) {
  1936. newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
  1937. newdup = 0;
  1938. } else if (np->fixed_mode & LPA_10FULL) {
  1939. newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
  1940. newdup = 1;
  1941. } else {
  1942. newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
  1943. newdup = 0;
  1944. }
  1945. retval = 1;
  1946. goto set_speed;
  1947. }
  1948. /* check auto negotiation is complete */
  1949. if (!(mii_status & BMSR_ANEGCOMPLETE)) {
  1950. /* still in autonegotiation - configure nic for 10 MBit HD and wait. */
  1951. newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
  1952. newdup = 0;
  1953. retval = 0;
  1954. dprintk(KERN_DEBUG "%s: autoneg not completed - falling back to 10HD.\n", dev->name);
  1955. goto set_speed;
  1956. }
  1957. adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
  1958. lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ);
  1959. dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n",
  1960. dev->name, adv, lpa);
  1961. retval = 1;
  1962. if (np->gigabit == PHY_GIGABIT) {
  1963. control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
  1964. status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ);
  1965. if ((control_1000 & ADVERTISE_1000FULL) &&
  1966. (status_1000 & LPA_1000FULL)) {
  1967. dprintk(KERN_DEBUG "%s: nv_update_linkspeed: GBit ethernet detected.\n",
  1968. dev->name);
  1969. newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000;
  1970. newdup = 1;
  1971. goto set_speed;
  1972. }
  1973. }
  1974. /* FIXME: handle parallel detection properly */
  1975. adv_lpa = lpa & adv;
  1976. if (adv_lpa & LPA_100FULL) {
  1977. newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
  1978. newdup = 1;
  1979. } else if (adv_lpa & LPA_100HALF) {
  1980. newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
  1981. newdup = 0;
  1982. } else if (adv_lpa & LPA_10FULL) {
  1983. newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
  1984. newdup = 1;
  1985. } else if (adv_lpa & LPA_10HALF) {
  1986. newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
  1987. newdup = 0;
  1988. } else {
  1989. dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, adv_lpa);
  1990. newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
  1991. newdup = 0;
  1992. }
  1993. set_speed:
  1994. if (np->duplex == newdup && np->linkspeed == newls)
  1995. return retval;
  1996. dprintk(KERN_INFO "%s: changing link setting from %d/%d to %d/%d.\n",
  1997. dev->name, np->linkspeed, np->duplex, newls, newdup);
  1998. np->duplex = newdup;
  1999. np->linkspeed = newls;
  2000. if (np->gigabit == PHY_GIGABIT) {
  2001. phyreg = readl(base + NvRegRandomSeed);
  2002. phyreg &= ~(0x3FF00);
  2003. if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10)
  2004. phyreg |= NVREG_RNDSEED_FORCE3;
  2005. else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100)
  2006. phyreg |= NVREG_RNDSEED_FORCE2;
  2007. else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000)
  2008. phyreg |= NVREG_RNDSEED_FORCE;
  2009. writel(phyreg, base + NvRegRandomSeed);
  2010. }
  2011. phyreg = readl(base + NvRegPhyInterface);
  2012. phyreg &= ~(PHY_HALF|PHY_100|PHY_1000);
  2013. if (np->duplex == 0)
  2014. phyreg |= PHY_HALF;
  2015. if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100)
  2016. phyreg |= PHY_100;
  2017. else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
  2018. phyreg |= PHY_1000;
  2019. writel(phyreg, base + NvRegPhyInterface);
  2020. if (phyreg & PHY_RGMII) {
  2021. if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
  2022. txreg = NVREG_TX_DEFERRAL_RGMII_1000;
  2023. else
  2024. txreg = NVREG_TX_DEFERRAL_RGMII_10_100;
  2025. } else {
  2026. txreg = NVREG_TX_DEFERRAL_DEFAULT;
  2027. }
  2028. writel(txreg, base + NvRegTxDeferral);
  2029. if (np->desc_ver == DESC_VER_1) {
  2030. txreg = NVREG_TX_WM_DESC1_DEFAULT;
  2031. } else {
  2032. if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
  2033. txreg = NVREG_TX_WM_DESC2_3_1000;
  2034. else
  2035. txreg = NVREG_TX_WM_DESC2_3_DEFAULT;
  2036. }
  2037. writel(txreg, base + NvRegTxWatermark);
  2038. writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD),
  2039. base + NvRegMisc1);
  2040. pci_push(base);
  2041. writel(np->linkspeed, base + NvRegLinkSpeed);
  2042. pci_push(base);
  2043. pause_flags = 0;
  2044. /* setup pause frame */
  2045. if (np->duplex != 0) {
  2046. if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) {
  2047. adv_pause = adv & (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM);
  2048. lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM);
  2049. switch (adv_pause) {
  2050. case ADVERTISE_PAUSE_CAP:
  2051. if (lpa_pause & LPA_PAUSE_CAP) {
  2052. pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
  2053. if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
  2054. pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
  2055. }
  2056. break;
  2057. case ADVERTISE_PAUSE_ASYM:
  2058. if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM))
  2059. {
  2060. pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
  2061. }
  2062. break;
  2063. case ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM:
  2064. if (lpa_pause & LPA_PAUSE_CAP)
  2065. {
  2066. pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
  2067. if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
  2068. pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
  2069. }
  2070. if (lpa_pause == LPA_PAUSE_ASYM)
  2071. {
  2072. pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
  2073. }
  2074. break;
  2075. }
  2076. } else {
  2077. pause_flags = np->pause_flags;
  2078. }
  2079. }
  2080. nv_update_pause(dev, pause_flags);
  2081. return retval;
  2082. }
  2083. static void nv_linkchange(struct net_device *dev)
  2084. {
  2085. if (nv_update_linkspeed(dev)) {
  2086. if (!netif_carrier_ok(dev)) {
  2087. netif_carrier_on(dev);
  2088. printk(KERN_INFO "%s: link up.\n", dev->name);
  2089. nv_start_rx(dev);
  2090. }
  2091. } else {
  2092. if (netif_carrier_ok(dev)) {
  2093. netif_carrier_off(dev);
  2094. printk(KERN_INFO "%s: link down.\n", dev->name);
  2095. nv_stop_rx(dev);
  2096. }
  2097. }
  2098. }
  2099. static void nv_link_irq(struct net_device *dev)
  2100. {
  2101. u8 __iomem *base = get_hwbase(dev);
  2102. u32 miistat;
  2103. miistat = readl(base + NvRegMIIStatus);
  2104. writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
  2105. dprintk(KERN_INFO "%s: link change irq, status 0x%x.\n", dev->name, miistat);
  2106. if (miistat & (NVREG_MIISTAT_LINKCHANGE))
  2107. nv_linkchange(dev);
  2108. dprintk(KERN_DEBUG "%s: link change notification done.\n", dev->name);
  2109. }
  2110. static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs)
  2111. {
  2112. struct net_device *dev = (struct net_device *) data;
  2113. struct fe_priv *np = netdev_priv(dev);
  2114. u8 __iomem *base = get_hwbase(dev);
  2115. u32 events;
  2116. int i;
  2117. dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name);
  2118. for (i=0; ; i++) {
  2119. if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
  2120. events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
  2121. writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
  2122. } else {
  2123. events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
  2124. writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
  2125. }
  2126. pci_push(base);
  2127. dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
  2128. if (!(events & np->irqmask))
  2129. break;
  2130. spin_lock(&np->lock);
  2131. nv_tx_done(dev);
  2132. spin_unlock(&np->lock);
  2133. nv_rx_process(dev);
  2134. if (nv_alloc_rx(dev)) {
  2135. spin_lock(&np->lock);
  2136. if (!np->in_shutdown)
  2137. mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
  2138. spin_unlock(&np->lock);
  2139. }
  2140. if (events & NVREG_IRQ_LINK) {
  2141. spin_lock(&np->lock);
  2142. nv_link_irq(dev);
  2143. spin_unlock(&np->lock);
  2144. }
  2145. if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
  2146. spin_lock(&np->lock);
  2147. nv_linkchange(dev);
  2148. spin_unlock(&np->lock);
  2149. np->link_timeout = jiffies + LINK_TIMEOUT;
  2150. }
  2151. if (events & (NVREG_IRQ_TX_ERR)) {
  2152. dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
  2153. dev->name, events);
  2154. }
  2155. if (events & (NVREG_IRQ_UNKNOWN)) {
  2156. printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
  2157. dev->name, events);
  2158. }
  2159. if (i > max_interrupt_work) {
  2160. spin_lock(&np->lock);
  2161. /* disable interrupts on the nic */
  2162. if (!(np->msi_flags & NV_MSI_X_ENABLED))
  2163. writel(0, base + NvRegIrqMask);
  2164. else
  2165. writel(np->irqmask, base + NvRegIrqMask);
  2166. pci_push(base);
  2167. if (!np->in_shutdown) {
  2168. np->nic_poll_irq = np->irqmask;
  2169. mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
  2170. }
  2171. printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i);
  2172. spin_unlock(&np->lock);
  2173. break;
  2174. }
  2175. }
  2176. dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name);
  2177. return IRQ_RETVAL(i);
  2178. }
  2179. static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs)
  2180. {
  2181. struct net_device *dev = (struct net_device *) data;
  2182. struct fe_priv *np = netdev_priv(dev);
  2183. u8 __iomem *base = get_hwbase(dev);
  2184. u32 events;
  2185. int i;
  2186. dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name);
  2187. for (i=0; ; i++) {
  2188. events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL;
  2189. writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus);
  2190. pci_push(base);
  2191. dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events);
  2192. if (!(events & np->irqmask))
  2193. break;
  2194. spin_lock_irq(&np->lock);
  2195. nv_tx_done(dev);
  2196. spin_unlock_irq(&np->lock);
  2197. if (events & (NVREG_IRQ_TX_ERR)) {
  2198. dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
  2199. dev->name, events);
  2200. }
  2201. if (i > max_interrupt_work) {
  2202. spin_lock_irq(&np->lock);
  2203. /* disable interrupts on the nic */
  2204. writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask);
  2205. pci_push(base);
  2206. if (!np->in_shutdown) {
  2207. np->nic_poll_irq |= NVREG_IRQ_TX_ALL;
  2208. mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
  2209. }
  2210. printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i);
  2211. spin_unlock_irq(&np->lock);
  2212. break;
  2213. }
  2214. }
  2215. dprintk(KERN_DEBUG "%s: nv_nic_irq_tx completed\n", dev->name);
  2216. return IRQ_RETVAL(i);
  2217. }
  2218. static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
  2219. {
  2220. struct net_device *dev = (struct net_device *) data;
  2221. struct fe_priv *np = netdev_priv(dev);
  2222. u8 __iomem *base = get_hwbase(dev);
  2223. u32 events;
  2224. int i;
  2225. dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name);
  2226. for (i=0; ; i++) {
  2227. events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
  2228. writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
  2229. pci_push(base);
  2230. dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events);
  2231. if (!(events & np->irqmask))
  2232. break;
  2233. nv_rx_process(dev);
  2234. if (nv_alloc_rx(dev)) {
  2235. spin_lock_irq(&np->lock);
  2236. if (!np->in_shutdown)
  2237. mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
  2238. spin_unlock_irq(&np->lock);
  2239. }
  2240. if (i > max_interrupt_work) {
  2241. spin_lock_irq(&np->lock);
  2242. /* disable interrupts on the nic */
  2243. writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
  2244. pci_push(base);
  2245. if (!np->in_shutdown) {
  2246. np->nic_poll_irq |= NVREG_IRQ_RX_ALL;
  2247. mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
  2248. }
  2249. printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i);
  2250. spin_unlock_irq(&np->lock);
  2251. break;
  2252. }
  2253. }
  2254. dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name);
  2255. return IRQ_RETVAL(i);
  2256. }
  2257. static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs)
  2258. {
  2259. struct net_device *dev = (struct net_device *) data;
  2260. struct fe_priv *np = netdev_priv(dev);
  2261. u8 __iomem *base = get_hwbase(dev);
  2262. u32 events;
  2263. int i;
  2264. dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name);
  2265. for (i=0; ; i++) {
  2266. events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER;
  2267. writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus);
  2268. pci_push(base);
  2269. dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
  2270. if (!(events & np->irqmask))
  2271. break;
  2272. if (events & NVREG_IRQ_LINK) {
  2273. spin_lock_irq(&np->lock);
  2274. nv_link_irq(dev);
  2275. spin_unlock_irq(&np->lock);
  2276. }
  2277. if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
  2278. spin_lock_irq(&np->lock);
  2279. nv_linkchange(dev);
  2280. spin_unlock_irq(&np->lock);
  2281. np->link_timeout = jiffies + LINK_TIMEOUT;
  2282. }
  2283. if (events & (NVREG_IRQ_UNKNOWN)) {
  2284. printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
  2285. dev->name, events);
  2286. }
  2287. if (i > max_interrupt_work) {
  2288. spin_lock_irq(&np->lock);
  2289. /* disable interrupts on the nic */
  2290. writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
  2291. pci_push(base);
  2292. if (!np->in_shutdown) {
  2293. np->nic_poll_irq |= NVREG_IRQ_OTHER;
  2294. mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
  2295. }
  2296. printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i);
  2297. spin_unlock_irq(&np->lock);
  2298. break;
  2299. }
  2300. }
  2301. dprintk(KERN_DEBUG "%s: nv_nic_irq_other completed\n", dev->name);
  2302. return IRQ_RETVAL(i);
  2303. }
  2304. static irqreturn_t nv_nic_irq_test(int foo, void *data, struct pt_regs *regs)
  2305. {
  2306. struct net_device *dev = (struct net_device *) data;
  2307. struct fe_priv *np = netdev_priv(dev);
  2308. u8 __iomem *base = get_hwbase(dev);
  2309. u32 events;
  2310. dprintk(KERN_DEBUG "%s: nv_nic_irq_test\n", dev->name);
  2311. if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
  2312. events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
  2313. writel(NVREG_IRQ_TIMER, base + NvRegIrqStatus);
  2314. } else {
  2315. events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
  2316. writel(NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus);
  2317. }
  2318. pci_push(base);
  2319. dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
  2320. if (!(events & NVREG_IRQ_TIMER))
  2321. return IRQ_RETVAL(0);
  2322. spin_lock(&np->lock);
  2323. np->intr_test = 1;
  2324. spin_unlock(&np->lock);
  2325. dprintk(KERN_DEBUG "%s: nv_nic_irq_test completed\n", dev->name);
  2326. return IRQ_RETVAL(1);
  2327. }
  2328. static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
  2329. {
  2330. u8 __iomem *base = get_hwbase(dev);
  2331. int i;
  2332. u32 msixmap = 0;
  2333. /* Each interrupt bit can be mapped to a MSIX vector (4 bits).
  2334. * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents
  2335. * the remaining 8 interrupts.
  2336. */
  2337. for (i = 0; i < 8; i++) {
  2338. if ((irqmask >> i) & 0x1) {
  2339. msixmap |= vector << (i << 2);
  2340. }
  2341. }
  2342. writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0);
  2343. msixmap = 0;
  2344. for (i = 0; i < 8; i++) {
  2345. if ((irqmask >> (i + 8)) & 0x1) {
  2346. msixmap |= vector << (i << 2);
  2347. }
  2348. }
  2349. writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
  2350. }
  2351. static int nv_request_irq(struct net_device *dev, int intr_test)
  2352. {
  2353. struct fe_priv *np = get_nvpriv(dev);
  2354. u8 __iomem *base = get_hwbase(dev);
  2355. int ret = 1;
  2356. int i;
  2357. if (np->msi_flags & NV_MSI_X_CAPABLE) {
  2358. for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
  2359. np->msi_x_entry[i].entry = i;
  2360. }
  2361. if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) {
  2362. np->msi_flags |= NV_MSI_X_ENABLED;
  2363. if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) {
  2364. /* Request irq for rx handling */
  2365. if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, IRQF_SHARED, dev->name, dev) != 0) {
  2366. printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret);
  2367. pci_disable_msix(np->pci_dev);
  2368. np->msi_flags &= ~NV_MSI_X_ENABLED;
  2369. goto out_err;
  2370. }
  2371. /* Request irq for tx handling */
  2372. if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, IRQF_SHARED, dev->name, dev) != 0) {
  2373. printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret);
  2374. pci_disable_msix(np->pci_dev);
  2375. np->msi_flags &= ~NV_MSI_X_ENABLED;
  2376. goto out_free_rx;
  2377. }
  2378. /* Request irq for link and timer handling */
  2379. if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, IRQF_SHARED, dev->name, dev) != 0) {
  2380. printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret);
  2381. pci_disable_msix(np->pci_dev);
  2382. np->msi_flags &= ~NV_MSI_X_ENABLED;
  2383. goto out_free_tx;
  2384. }
  2385. /* map interrupts to their respective vector */
  2386. writel(0, base + NvRegMSIXMap0);
  2387. writel(0, base + NvRegMSIXMap1);
  2388. set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL);
  2389. set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL);
  2390. set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
  2391. } else {
  2392. /* Request irq for all interrupts */
  2393. if ((!intr_test &&
  2394. request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, IRQF_SHARED, dev->name, dev) != 0) ||
  2395. (intr_test &&
  2396. request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq_test, IRQF_SHARED, dev->name, dev) != 0)) {
  2397. printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
  2398. pci_disable_msix(np->pci_dev);
  2399. np->msi_flags &= ~NV_MSI_X_ENABLED;
  2400. goto out_err;
  2401. }
  2402. /* map interrupts to vector 0 */
  2403. writel(0, base + NvRegMSIXMap0);
  2404. writel(0, base + NvRegMSIXMap1);
  2405. }
  2406. }
  2407. }
  2408. if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
  2409. if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
  2410. np->msi_flags |= NV_MSI_ENABLED;
  2411. if ((!intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq, IRQF_SHARED, dev->name, dev) != 0) ||
  2412. (intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq_test, IRQF_SHARED, dev->name, dev) != 0)) {
  2413. printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
  2414. pci_disable_msi(np->pci_dev);
  2415. np->msi_flags &= ~NV_MSI_ENABLED;
  2416. goto out_err;
  2417. }
  2418. /* map interrupts to vector 0 */
  2419. writel(0, base + NvRegMSIMap0);
  2420. writel(0, base + NvRegMSIMap1);
  2421. /* enable msi vector 0 */
  2422. writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
  2423. }
  2424. }
  2425. if (ret != 0) {
  2426. if ((!intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq, IRQF_SHARED, dev->name, dev) != 0) ||
  2427. (intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq_test, IRQF_SHARED, dev->name, dev) != 0))
  2428. goto out_err;
  2429. }
  2430. return 0;
  2431. out_free_tx:
  2432. free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev);
  2433. out_free_rx:
  2434. free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev);
  2435. out_err:
  2436. return 1;
  2437. }
  2438. static void nv_free_irq(struct net_device *dev)
  2439. {
  2440. struct fe_priv *np = get_nvpriv(dev);
  2441. int i;
  2442. if (np->msi_flags & NV_MSI_X_ENABLED) {
  2443. for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
  2444. free_irq(np->msi_x_entry[i].vector, dev);
  2445. }
  2446. pci_disable_msix(np->pci_dev);
  2447. np->msi_flags &= ~NV_MSI_X_ENABLED;
  2448. } else {
  2449. free_irq(np->pci_dev->irq, dev);
  2450. if (np->msi_flags & NV_MSI_ENABLED) {
  2451. pci_disable_msi(np->pci_dev);
  2452. np->msi_flags &= ~NV_MSI_ENABLED;
  2453. }
  2454. }
  2455. }
  2456. static void nv_do_nic_poll(unsigned long data)
  2457. {
  2458. struct net_device *dev = (struct net_device *) data;
  2459. struct fe_priv *np = netdev_priv(dev);
  2460. u8 __iomem *base = get_hwbase(dev);
  2461. u32 mask = 0;
  2462. /*
  2463. * First disable irq(s) and then
  2464. * reenable interrupts on the nic, we have to do this before calling
  2465. * nv_nic_irq because that may decide to do otherwise
  2466. */
  2467. if (!using_multi_irqs(dev)) {
  2468. if (np->msi_flags & NV_MSI_X_ENABLED)
  2469. disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
  2470. else
  2471. disable_irq_lockdep(dev->irq);
  2472. mask = np->irqmask;
  2473. } else {
  2474. if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
  2475. disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
  2476. mask |= NVREG_IRQ_RX_ALL;
  2477. }
  2478. if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
  2479. disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
  2480. mask |= NVREG_IRQ_TX_ALL;
  2481. }
  2482. if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
  2483. disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
  2484. mask |= NVREG_IRQ_OTHER;
  2485. }
  2486. }
  2487. np->nic_poll_irq = 0;
  2488. /* FIXME: Do we need synchronize_irq(dev->irq) here? */
  2489. writel(mask, base + NvRegIrqMask);
  2490. pci_push(base);
  2491. if (!using_multi_irqs(dev)) {
  2492. nv_nic_irq(0, dev, NULL);
  2493. if (np->msi_flags & NV_MSI_X_ENABLED)
  2494. enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
  2495. else
  2496. enable_irq_lockdep(dev->irq);
  2497. } else {
  2498. if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
  2499. nv_nic_irq_rx(0, dev, NULL);
  2500. enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
  2501. }
  2502. if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
  2503. nv_nic_irq_tx(0, dev, NULL);
  2504. enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
  2505. }
  2506. if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
  2507. nv_nic_irq_other(0, dev, NULL);
  2508. enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
  2509. }
  2510. }
  2511. }
  2512. #ifdef CONFIG_NET_POLL_CONTROLLER
  2513. static void nv_poll_controller(struct net_device *dev)
  2514. {
  2515. nv_do_nic_poll((unsigned long) dev);
  2516. }
  2517. #endif
  2518. static void nv_do_stats_poll(unsigned long data)
  2519. {
  2520. struct net_device *dev = (struct net_device *) data;
  2521. struct fe_priv *np = netdev_priv(dev);
  2522. u8 __iomem *base = get_hwbase(dev);
  2523. np->estats.tx_bytes += readl(base + NvRegTxCnt);
  2524. np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt);
  2525. np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt);
  2526. np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt);
  2527. np->estats.tx_late_collision += readl(base + NvRegTxLateCol);
  2528. np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow);
  2529. np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier);
  2530. np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef);
  2531. np->estats.tx_retry_error += readl(base + NvRegTxRetryErr);
  2532. np->estats.tx_deferral += readl(base + NvRegTxDef);
  2533. np->estats.tx_packets += readl(base + NvRegTxFrame);
  2534. np->estats.tx_pause += readl(base + NvRegTxPause);
  2535. np->estats.rx_frame_error += readl(base + NvRegRxFrameErr);
  2536. np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte);
  2537. np->estats.rx_late_collision += readl(base + NvRegRxLateCol);
  2538. np->estats.rx_runt += readl(base + NvRegRxRunt);
  2539. np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong);
  2540. np->estats.rx_over_errors += readl(base + NvRegRxOverflow);
  2541. np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr);
  2542. np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr);
  2543. np->estats.rx_length_error += readl(base + NvRegRxLenErr);
  2544. np->estats.rx_unicast += readl(base + NvRegRxUnicast);
  2545. np->estats.rx_multicast += readl(base + NvRegRxMulticast);
  2546. np->estats.rx_broadcast += readl(base + NvRegRxBroadcast);
  2547. np->estats.rx_bytes += readl(base + NvRegRxCnt);
  2548. np->estats.rx_pause += readl(base + NvRegRxPause);
  2549. np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame);
  2550. np->estats.rx_packets =
  2551. np->estats.rx_unicast +
  2552. np->estats.rx_multicast +
  2553. np->estats.rx_broadcast;
  2554. np->estats.rx_errors_total =
  2555. np->estats.rx_crc_errors +
  2556. np->estats.rx_over_errors +
  2557. np->estats.rx_frame_error +
  2558. (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) +
  2559. np->estats.rx_late_collision +
  2560. np->estats.rx_runt +
  2561. np->estats.rx_frame_too_long;
  2562. if (!np->in_shutdown)
  2563. mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL);
  2564. }
  2565. static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
  2566. {
  2567. struct fe_priv *np = netdev_priv(dev);
  2568. strcpy(info->driver, "forcedeth");
  2569. strcpy(info->version, FORCEDETH_VERSION);
  2570. strcpy(info->bus_info, pci_name(np->pci_dev));
  2571. }
  2572. static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
  2573. {
  2574. struct fe_priv *np = netdev_priv(dev);
  2575. wolinfo->supported = WAKE_MAGIC;
  2576. spin_lock_irq(&np->lock);
  2577. if (np->wolenabled)
  2578. wolinfo->wolopts = WAKE_MAGIC;
  2579. spin_unlock_irq(&np->lock);
  2580. }
  2581. static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
  2582. {
  2583. struct fe_priv *np = netdev_priv(dev);
  2584. u8 __iomem *base = get_hwbase(dev);
  2585. u32 flags = 0;
  2586. if (wolinfo->wolopts == 0) {
  2587. np->wolenabled = 0;
  2588. } else if (wolinfo->wolopts & WAKE_MAGIC) {
  2589. np->wolenabled = 1;
  2590. flags = NVREG_WAKEUPFLAGS_ENABLE;
  2591. }
  2592. if (netif_running(dev)) {
  2593. spin_lock_irq(&np->lock);
  2594. writel(flags, base + NvRegWakeUpFlags);
  2595. spin_unlock_irq(&np->lock);
  2596. }
  2597. return 0;
  2598. }
  2599. static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
  2600. {
  2601. struct fe_priv *np = netdev_priv(dev);
  2602. int adv;
  2603. spin_lock_irq(&np->lock);
  2604. ecmd->port = PORT_MII;
  2605. if (!netif_running(dev)) {
  2606. /* We do not track link speed / duplex setting if the
  2607. * interface is disabled. Force a link check */
  2608. if (nv_update_linkspeed(dev)) {
  2609. if (!netif_carrier_ok(dev))
  2610. netif_carrier_on(dev);
  2611. } else {
  2612. if (netif_carrier_ok(dev))
  2613. netif_carrier_off(dev);
  2614. }
  2615. }
  2616. if (netif_carrier_ok(dev)) {
  2617. switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) {
  2618. case NVREG_LINKSPEED_10:
  2619. ecmd->speed = SPEED_10;
  2620. break;
  2621. case NVREG_LINKSPEED_100:
  2622. ecmd->speed = SPEED_100;
  2623. break;
  2624. case NVREG_LINKSPEED_1000:
  2625. ecmd->speed = SPEED_1000;
  2626. break;
  2627. }
  2628. ecmd->duplex = DUPLEX_HALF;
  2629. if (np->duplex)
  2630. ecmd->duplex = DUPLEX_FULL;
  2631. } else {
  2632. ecmd->speed = -1;
  2633. ecmd->duplex = -1;
  2634. }
  2635. ecmd->autoneg = np->autoneg;
  2636. ecmd->advertising = ADVERTISED_MII;
  2637. if (np->autoneg) {
  2638. ecmd->advertising |= ADVERTISED_Autoneg;
  2639. adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
  2640. if (adv & ADVERTISE_10HALF)
  2641. ecmd->advertising |= ADVERTISED_10baseT_Half;
  2642. if (adv & ADVERTISE_10FULL)
  2643. ecmd->advertising |= ADVERTISED_10baseT_Full;
  2644. if (adv & ADVERTISE_100HALF)
  2645. ecmd->advertising |= ADVERTISED_100baseT_Half;
  2646. if (adv & ADVERTISE_100FULL)
  2647. ecmd->advertising |= ADVERTISED_100baseT_Full;
  2648. if (np->gigabit == PHY_GIGABIT) {
  2649. adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
  2650. if (adv & ADVERTISE_1000FULL)
  2651. ecmd->advertising |= ADVERTISED_1000baseT_Full;
  2652. }
  2653. }
  2654. ecmd->supported = (SUPPORTED_Autoneg |
  2655. SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
  2656. SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
  2657. SUPPORTED_MII);
  2658. if (np->gigabit == PHY_GIGABIT)
  2659. ecmd->supported |= SUPPORTED_1000baseT_Full;
  2660. ecmd->phy_address = np->phyaddr;
  2661. ecmd->transceiver = XCVR_EXTERNAL;
  2662. /* ignore maxtxpkt, maxrxpkt for now */
  2663. spin_unlock_irq(&np->lock);
  2664. return 0;
  2665. }
  2666. static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
  2667. {
  2668. struct fe_priv *np = netdev_priv(dev);
  2669. if (ecmd->port != PORT_MII)
  2670. return -EINVAL;
  2671. if (ecmd->transceiver != XCVR_EXTERNAL)
  2672. return -EINVAL;
  2673. if (ecmd->phy_address != np->phyaddr) {
  2674. /* TODO: support switching between multiple phys. Should be
  2675. * trivial, but not enabled due to lack of test hardware. */
  2676. return -EINVAL;
  2677. }
  2678. if (ecmd->autoneg == AUTONEG_ENABLE) {
  2679. u32 mask;
  2680. mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
  2681. ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
  2682. if (np->gigabit == PHY_GIGABIT)
  2683. mask |= ADVERTISED_1000baseT_Full;
  2684. if ((ecmd->advertising & mask) == 0)
  2685. return -EINVAL;
  2686. } else if (ecmd->autoneg == AUTONEG_DISABLE) {
  2687. /* Note: autonegotiation disable, speed 1000 intentionally
  2688. * forbidden - noone should need that. */
  2689. if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100)
  2690. return -EINVAL;
  2691. if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
  2692. return -EINVAL;
  2693. } else {
  2694. return -EINVAL;
  2695. }
  2696. netif_carrier_off(dev);
  2697. if (netif_running(dev)) {
  2698. nv_disable_irq(dev);
  2699. netif_tx_lock_bh(dev);
  2700. spin_lock(&np->lock);
  2701. /* stop engines */
  2702. nv_stop_rx(dev);
  2703. nv_stop_tx(dev);
  2704. spin_unlock(&np->lock);
  2705. netif_tx_unlock_bh(dev);
  2706. }
  2707. if (ecmd->autoneg == AUTONEG_ENABLE) {
  2708. int adv, bmcr;
  2709. np->autoneg = 1;
  2710. /* advertise only what has been requested */
  2711. adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
  2712. adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
  2713. if (ecmd->advertising & ADVERTISED_10baseT_Half)
  2714. adv |= ADVERTISE_10HALF;
  2715. if (ecmd->advertising & ADVERTISED_10baseT_Full)
  2716. adv |= ADVERTISE_10FULL;
  2717. if (ecmd->advertising & ADVERTISED_100baseT_Half)
  2718. adv |= ADVERTISE_100HALF;
  2719. if (ecmd->advertising & ADVERTISED_100baseT_Full)
  2720. adv |= ADVERTISE_100FULL;
  2721. if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */
  2722. adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
  2723. if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
  2724. adv |= ADVERTISE_PAUSE_ASYM;
  2725. mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
  2726. if (np->gigabit == PHY_GIGABIT) {
  2727. adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
  2728. adv &= ~ADVERTISE_1000FULL;
  2729. if (ecmd->advertising & ADVERTISED_1000baseT_Full)
  2730. adv |= ADVERTISE_1000FULL;
  2731. mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
  2732. }
  2733. if (netif_running(dev))
  2734. printk(KERN_INFO "%s: link down.\n", dev->name);
  2735. bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
  2736. bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
  2737. mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
  2738. } else {
  2739. int adv, bmcr;
  2740. np->autoneg = 0;
  2741. adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
  2742. adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
  2743. if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF)
  2744. adv |= ADVERTISE_10HALF;
  2745. if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL)
  2746. adv |= ADVERTISE_10FULL;
  2747. if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF)
  2748. adv |= ADVERTISE_100HALF;
  2749. if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL)
  2750. adv |= ADVERTISE_100FULL;
  2751. np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
  2752. if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisments but disable tx pause */
  2753. adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
  2754. np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
  2755. }
  2756. if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) {
  2757. adv |= ADVERTISE_PAUSE_ASYM;
  2758. np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
  2759. }
  2760. mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
  2761. np->fixed_mode = adv;
  2762. if (np->gigabit == PHY_GIGABIT) {
  2763. adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
  2764. adv &= ~ADVERTISE_1000FULL;
  2765. mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
  2766. }
  2767. bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
  2768. bmcr &= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_SPEED1000|BMCR_FULLDPLX);
  2769. if (np->fixed_mode & (ADVERTISE_10FULL|ADVERTISE_100FULL))
  2770. bmcr |= BMCR_FULLDPLX;
  2771. if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL))
  2772. bmcr |= BMCR_SPEED100;
  2773. mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
  2774. if (np->phy_oui == PHY_OUI_MARVELL) {
  2775. /* reset the phy */
  2776. if (phy_reset(dev)) {
  2777. printk(KERN_INFO "%s: phy reset failed\n", dev->name);
  2778. return -EINVAL;
  2779. }
  2780. } else if (netif_running(dev)) {
  2781. /* Wait a bit and then reconfigure the nic. */
  2782. udelay(10);
  2783. nv_linkchange(dev);
  2784. }
  2785. }
  2786. if (netif_running(dev)) {
  2787. nv_start_rx(dev);
  2788. nv_start_tx(dev);
  2789. nv_enable_irq(dev);
  2790. }
  2791. return 0;
  2792. }
  2793. #define FORCEDETH_REGS_VER 1
  2794. static int nv_get_regs_len(struct net_device *dev)
  2795. {
  2796. struct fe_priv *np = netdev_priv(dev);
  2797. return np->register_size;
  2798. }
  2799. static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
  2800. {
  2801. struct fe_priv *np = netdev_priv(dev);
  2802. u8 __iomem *base = get_hwbase(dev);
  2803. u32 *rbuf = buf;
  2804. int i;
  2805. regs->version = FORCEDETH_REGS_VER;
  2806. spin_lock_irq(&np->lock);
  2807. for (i = 0;i <= np->register_size/sizeof(u32); i++)
  2808. rbuf[i] = readl(base + i*sizeof(u32));
  2809. spin_unlock_irq(&np->lock);
  2810. }
  2811. static int nv_nway_reset(struct net_device *dev)
  2812. {
  2813. struct fe_priv *np = netdev_priv(dev);
  2814. int ret;
  2815. if (np->autoneg) {
  2816. int bmcr;
  2817. netif_carrier_off(dev);
  2818. if (netif_running(dev)) {
  2819. nv_disable_irq(dev);
  2820. netif_tx_lock_bh(dev);
  2821. spin_lock(&np->lock);
  2822. /* stop engines */
  2823. nv_stop_rx(dev);
  2824. nv_stop_tx(dev);
  2825. spin_unlock(&np->lock);
  2826. netif_tx_unlock_bh(dev);
  2827. printk(KERN_INFO "%s: link down.\n", dev->name);
  2828. }
  2829. bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
  2830. bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
  2831. mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
  2832. if (netif_running(dev)) {
  2833. nv_start_rx(dev);
  2834. nv_start_tx(dev);
  2835. nv_enable_irq(dev);
  2836. }
  2837. ret = 0;
  2838. } else {
  2839. ret = -EINVAL;
  2840. }
  2841. return ret;
  2842. }
  2843. static int nv_set_tso(struct net_device *dev, u32 value)
  2844. {
  2845. struct fe_priv *np = netdev_priv(dev);
  2846. if ((np->driver_data & DEV_HAS_CHECKSUM))
  2847. return ethtool_op_set_tso(dev, value);
  2848. else
  2849. return -EOPNOTSUPP;
  2850. }
  2851. static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
  2852. {
  2853. struct fe_priv *np = netdev_priv(dev);
  2854. ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
  2855. ring->rx_mini_max_pending = 0;
  2856. ring->rx_jumbo_max_pending = 0;
  2857. ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
  2858. ring->rx_pending = np->rx_ring_size;
  2859. ring->rx_mini_pending = 0;
  2860. ring->rx_jumbo_pending = 0;
  2861. ring->tx_pending = np->tx_ring_size;
  2862. }
  2863. static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
  2864. {
  2865. struct fe_priv *np = netdev_priv(dev);
  2866. u8 __iomem *base = get_hwbase(dev);
  2867. u8 *rxtx_ring, *rx_skbuff, *tx_skbuff, *rx_dma, *tx_dma, *tx_dma_len;
  2868. dma_addr_t ring_addr;
  2869. if (ring->rx_pending < RX_RING_MIN ||
  2870. ring->tx_pending < TX_RING_MIN ||
  2871. ring->rx_mini_pending != 0 ||
  2872. ring->rx_jumbo_pending != 0 ||
  2873. (np->desc_ver == DESC_VER_1 &&
  2874. (ring->rx_pending > RING_MAX_DESC_VER_1 ||
  2875. ring->tx_pending > RING_MAX_DESC_VER_1)) ||
  2876. (np->desc_ver != DESC_VER_1 &&
  2877. (ring->rx_pending > RING_MAX_DESC_VER_2_3 ||
  2878. ring->tx_pending > RING_MAX_DESC_VER_2_3))) {
  2879. return -EINVAL;
  2880. }
  2881. /* allocate new rings */
  2882. if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
  2883. rxtx_ring = pci_alloc_consistent(np->pci_dev,
  2884. sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
  2885. &ring_addr);
  2886. } else {
  2887. rxtx_ring = pci_alloc_consistent(np->pci_dev,
  2888. sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
  2889. &ring_addr);
  2890. }
  2891. rx_skbuff = kmalloc(sizeof(struct sk_buff*) * ring->rx_pending, GFP_KERNEL);
  2892. rx_dma = kmalloc(sizeof(dma_addr_t) * ring->rx_pending, GFP_KERNEL);
  2893. tx_skbuff = kmalloc(sizeof(struct sk_buff*) * ring->tx_pending, GFP_KERNEL);
  2894. tx_dma = kmalloc(sizeof(dma_addr_t) * ring->tx_pending, GFP_KERNEL);
  2895. tx_dma_len = kmalloc(sizeof(unsigned int) * ring->tx_pending, GFP_KERNEL);
  2896. if (!rxtx_ring || !rx_skbuff || !rx_dma || !tx_skbuff || !tx_dma || !tx_dma_len) {
  2897. /* fall back to old rings */
  2898. if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
  2899. if (rxtx_ring)
  2900. pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
  2901. rxtx_ring, ring_addr);
  2902. } else {
  2903. if (rxtx_ring)
  2904. pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
  2905. rxtx_ring, ring_addr);
  2906. }
  2907. if (rx_skbuff)
  2908. kfree(rx_skbuff);
  2909. if (rx_dma)
  2910. kfree(rx_dma);
  2911. if (tx_skbuff)
  2912. kfree(tx_skbuff);
  2913. if (tx_dma)
  2914. kfree(tx_dma);
  2915. if (tx_dma_len)
  2916. kfree(tx_dma_len);
  2917. goto exit;
  2918. }
  2919. if (netif_running(dev)) {
  2920. nv_disable_irq(dev);
  2921. netif_tx_lock_bh(dev);
  2922. spin_lock(&np->lock);
  2923. /* stop engines */
  2924. nv_stop_rx(dev);
  2925. nv_stop_tx(dev);
  2926. nv_txrx_reset(dev);
  2927. /* drain queues */
  2928. nv_drain_rx(dev);
  2929. nv_drain_tx(dev);
  2930. /* delete queues */
  2931. free_rings(dev);
  2932. }
  2933. /* set new values */
  2934. np->rx_ring_size = ring->rx_pending;
  2935. np->tx_ring_size = ring->tx_pending;
  2936. np->tx_limit_stop = ring->tx_pending - TX_LIMIT_DIFFERENCE;
  2937. np->tx_limit_start = ring->tx_pending - TX_LIMIT_DIFFERENCE - 1;
  2938. if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
  2939. np->rx_ring.orig = (struct ring_desc*)rxtx_ring;
  2940. np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
  2941. } else {
  2942. np->rx_ring.ex = (struct ring_desc_ex*)rxtx_ring;
  2943. np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
  2944. }
  2945. np->rx_skbuff = (struct sk_buff**)rx_skbuff;
  2946. np->rx_dma = (dma_addr_t*)rx_dma;
  2947. np->tx_skbuff = (struct sk_buff**)tx_skbuff;
  2948. np->tx_dma = (dma_addr_t*)tx_dma;
  2949. np->tx_dma_len = (unsigned int*)tx_dma_len;
  2950. np->ring_addr = ring_addr;
  2951. memset(np->rx_skbuff, 0, sizeof(struct sk_buff*) * np->rx_ring_size);
  2952. memset(np->rx_dma, 0, sizeof(dma_addr_t) * np->rx_ring_size);
  2953. memset(np->tx_skbuff, 0, sizeof(struct sk_buff*) * np->tx_ring_size);
  2954. memset(np->tx_dma, 0, sizeof(dma_addr_t) * np->tx_ring_size);
  2955. memset(np->tx_dma_len, 0, sizeof(unsigned int) * np->tx_ring_size);
  2956. if (netif_running(dev)) {
  2957. /* reinit driver view of the queues */
  2958. set_bufsize(dev);
  2959. if (nv_init_ring(dev)) {
  2960. if (!np->in_shutdown)
  2961. mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
  2962. }
  2963. /* reinit nic view of the queues */
  2964. writel(np->rx_buf_sz, base + NvRegOffloadConfig);
  2965. setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
  2966. writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
  2967. base + NvRegRingSizes);
  2968. pci_push(base);
  2969. writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
  2970. pci_push(base);
  2971. /* restart engines */
  2972. nv_start_rx(dev);
  2973. nv_start_tx(dev);
  2974. spin_unlock(&np->lock);
  2975. netif_tx_unlock_bh(dev);
  2976. nv_enable_irq(dev);
  2977. }
  2978. return 0;
  2979. exit:
  2980. return -ENOMEM;
  2981. }
  2982. static void nv_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
  2983. {
  2984. struct fe_priv *np = netdev_priv(dev);
  2985. pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0;
  2986. pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0;
  2987. pause->tx_pause = (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) != 0;
  2988. }
  2989. static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
  2990. {
  2991. struct fe_priv *np = netdev_priv(dev);
  2992. int adv, bmcr;
  2993. if ((!np->autoneg && np->duplex == 0) ||
  2994. (np->autoneg && !pause->autoneg && np->duplex == 0)) {
  2995. printk(KERN_INFO "%s: can not set pause settings when forced link is in half duplex.\n",
  2996. dev->name);
  2997. return -EINVAL;
  2998. }
  2999. if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) {
  3000. printk(KERN_INFO "%s: hardware does not support tx pause frames.\n", dev->name);
  3001. return -EINVAL;
  3002. }
  3003. netif_carrier_off(dev);
  3004. if (netif_running(dev)) {
  3005. nv_disable_irq(dev);
  3006. netif_tx_lock_bh(dev);
  3007. spin_lock(&np->lock);
  3008. /* stop engines */
  3009. nv_stop_rx(dev);
  3010. nv_stop_tx(dev);
  3011. spin_unlock(&np->lock);
  3012. netif_tx_unlock_bh(dev);
  3013. }
  3014. np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ);
  3015. if (pause->rx_pause)
  3016. np->pause_flags |= NV_PAUSEFRAME_RX_REQ;
  3017. if (pause->tx_pause)
  3018. np->pause_flags |= NV_PAUSEFRAME_TX_REQ;
  3019. if (np->autoneg && pause->autoneg) {
  3020. np->pause_flags |= NV_PAUSEFRAME_AUTONEG;
  3021. adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
  3022. adv &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
  3023. if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */
  3024. adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
  3025. if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
  3026. adv |= ADVERTISE_PAUSE_ASYM;
  3027. mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
  3028. if (netif_running(dev))
  3029. printk(KERN_INFO "%s: link down.\n", dev->name);
  3030. bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
  3031. bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
  3032. mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
  3033. } else {
  3034. np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
  3035. if (pause->rx_pause)
  3036. np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
  3037. if (pause->tx_pause)
  3038. np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
  3039. if (!netif_running(dev))
  3040. nv_update_linkspeed(dev);
  3041. else
  3042. nv_update_pause(dev, np->pause_flags);
  3043. }
  3044. if (netif_running(dev)) {
  3045. nv_start_rx(dev);
  3046. nv_start_tx(dev);
  3047. nv_enable_irq(dev);
  3048. }
  3049. return 0;
  3050. }
  3051. static u32 nv_get_rx_csum(struct net_device *dev)
  3052. {
  3053. struct fe_priv *np = netdev_priv(dev);
  3054. return (np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) != 0;
  3055. }
  3056. static int nv_set_rx_csum(struct net_device *dev, u32 data)
  3057. {
  3058. struct fe_priv *np = netdev_priv(dev);
  3059. u8 __iomem *base = get_hwbase(dev);
  3060. int retcode = 0;
  3061. if (np->driver_data & DEV_HAS_CHECKSUM) {
  3062. if (((np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) && data) ||
  3063. (!(np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) && !data)) {
  3064. /* already set or unset */
  3065. return 0;
  3066. }
  3067. if (data) {
  3068. np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
  3069. } else if (!(np->vlanctl_bits & NVREG_VLANCONTROL_ENABLE)) {
  3070. np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK;
  3071. } else {
  3072. printk(KERN_INFO "Can not disable rx checksum if vlan is enabled\n");
  3073. return -EINVAL;
  3074. }
  3075. if (netif_running(dev)) {
  3076. spin_lock_irq(&np->lock);
  3077. writel(np->txrxctl_bits, base + NvRegTxRxControl);
  3078. spin_unlock_irq(&np->lock);
  3079. }
  3080. } else {
  3081. return -EINVAL;
  3082. }
  3083. return retcode;
  3084. }
  3085. static int nv_set_tx_csum(struct net_device *dev, u32 data)
  3086. {
  3087. struct fe_priv *np = netdev_priv(dev);
  3088. if (np->driver_data & DEV_HAS_CHECKSUM)
  3089. return ethtool_op_set_tx_hw_csum(dev, data);
  3090. else
  3091. return -EOPNOTSUPP;
  3092. }
  3093. static int nv_set_sg(struct net_device *dev, u32 data)
  3094. {
  3095. struct fe_priv *np = netdev_priv(dev);
  3096. if (np->driver_data & DEV_HAS_CHECKSUM)
  3097. return ethtool_op_set_sg(dev, data);
  3098. else
  3099. return -EOPNOTSUPP;
  3100. }
  3101. static int nv_get_stats_count(struct net_device *dev)
  3102. {
  3103. struct fe_priv *np = netdev_priv(dev);
  3104. if (np->driver_data & DEV_HAS_STATISTICS)
  3105. return sizeof(struct nv_ethtool_stats)/sizeof(u64);
  3106. else
  3107. return 0;
  3108. }
  3109. static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *buffer)
  3110. {
  3111. struct fe_priv *np = netdev_priv(dev);
  3112. /* update stats */
  3113. nv_do_stats_poll((unsigned long)dev);
  3114. memcpy(buffer, &np->estats, nv_get_stats_count(dev)*sizeof(u64));
  3115. }
  3116. static int nv_self_test_count(struct net_device *dev)
  3117. {
  3118. struct fe_priv *np = netdev_priv(dev);
  3119. if (np->driver_data & DEV_HAS_TEST_EXTENDED)
  3120. return NV_TEST_COUNT_EXTENDED;
  3121. else
  3122. return NV_TEST_COUNT_BASE;
  3123. }
  3124. static int nv_link_test(struct net_device *dev)
  3125. {
  3126. struct fe_priv *np = netdev_priv(dev);
  3127. int mii_status;
  3128. mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
  3129. mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
  3130. /* check phy link status */
  3131. if (!(mii_status & BMSR_LSTATUS))
  3132. return 0;
  3133. else
  3134. return 1;
  3135. }
  3136. static int nv_register_test(struct net_device *dev)
  3137. {
  3138. u8 __iomem *base = get_hwbase(dev);
  3139. int i = 0;
  3140. u32 orig_read, new_read;
  3141. do {
  3142. orig_read = readl(base + nv_registers_test[i].reg);
  3143. /* xor with mask to toggle bits */
  3144. orig_read ^= nv_registers_test[i].mask;
  3145. writel(orig_read, base + nv_registers_test[i].reg);
  3146. new_read = readl(base + nv_registers_test[i].reg);
  3147. if ((new_read & nv_registers_test[i].mask) != (orig_read & nv_registers_test[i].mask))
  3148. return 0;
  3149. /* restore original value */
  3150. orig_read ^= nv_registers_test[i].mask;
  3151. writel(orig_read, base + nv_registers_test[i].reg);
  3152. } while (nv_registers_test[++i].reg != 0);
  3153. return 1;
  3154. }
  3155. static int nv_interrupt_test(struct net_device *dev)
  3156. {
  3157. struct fe_priv *np = netdev_priv(dev);
  3158. u8 __iomem *base = get_hwbase(dev);
  3159. int ret = 1;
  3160. int testcnt;
  3161. u32 save_msi_flags, save_poll_interval = 0;
  3162. if (netif_running(dev)) {
  3163. /* free current irq */
  3164. nv_free_irq(dev);
  3165. save_poll_interval = readl(base+NvRegPollingInterval);
  3166. }
  3167. /* flag to test interrupt handler */
  3168. np->intr_test = 0;
  3169. /* setup test irq */
  3170. save_msi_flags = np->msi_flags;
  3171. np->msi_flags &= ~NV_MSI_X_VECTORS_MASK;
  3172. np->msi_flags |= 0x001; /* setup 1 vector */
  3173. if (nv_request_irq(dev, 1))
  3174. return 0;
  3175. /* setup timer interrupt */
  3176. writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
  3177. writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
  3178. nv_enable_hw_interrupts(dev, NVREG_IRQ_TIMER);
  3179. /* wait for at least one interrupt */
  3180. msleep(100);
  3181. spin_lock_irq(&np->lock);
  3182. /* flag should be set within ISR */
  3183. testcnt = np->intr_test;
  3184. if (!testcnt)
  3185. ret = 2;
  3186. nv_disable_hw_interrupts(dev, NVREG_IRQ_TIMER);
  3187. if (!(np->msi_flags & NV_MSI_X_ENABLED))
  3188. writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
  3189. else
  3190. writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
  3191. spin_unlock_irq(&np->lock);
  3192. nv_free_irq(dev);
  3193. np->msi_flags = save_msi_flags;
  3194. if (netif_running(dev)) {
  3195. writel(save_poll_interval, base + NvRegPollingInterval);
  3196. writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
  3197. /* restore original irq */
  3198. if (nv_request_irq(dev, 0))
  3199. return 0;
  3200. }
  3201. return ret;
  3202. }
  3203. static int nv_loopback_test(struct net_device *dev)
  3204. {
  3205. struct fe_priv *np = netdev_priv(dev);
  3206. u8 __iomem *base = get_hwbase(dev);
  3207. struct sk_buff *tx_skb, *rx_skb;
  3208. dma_addr_t test_dma_addr;
  3209. u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
  3210. u32 flags;
  3211. int len, i, pkt_len;
  3212. u8 *pkt_data;
  3213. u32 filter_flags = 0;
  3214. u32 misc1_flags = 0;
  3215. int ret = 1;
  3216. if (netif_running(dev)) {
  3217. nv_disable_irq(dev);
  3218. filter_flags = readl(base + NvRegPacketFilterFlags);
  3219. misc1_flags = readl(base + NvRegMisc1);
  3220. } else {
  3221. nv_txrx_reset(dev);
  3222. }
  3223. /* reinit driver view of the rx queue */
  3224. set_bufsize(dev);
  3225. nv_init_ring(dev);
  3226. /* setup hardware for loopback */
  3227. writel(NVREG_MISC1_FORCE, base + NvRegMisc1);
  3228. writel(NVREG_PFF_ALWAYS | NVREG_PFF_LOOPBACK, base + NvRegPacketFilterFlags);
  3229. /* reinit nic view of the rx queue */
  3230. writel(np->rx_buf_sz, base + NvRegOffloadConfig);
  3231. setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
  3232. writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
  3233. base + NvRegRingSizes);
  3234. pci_push(base);
  3235. /* restart rx engine */
  3236. nv_start_rx(dev);
  3237. nv_start_tx(dev);
  3238. /* setup packet for tx */
  3239. pkt_len = ETH_DATA_LEN;
  3240. tx_skb = dev_alloc_skb(pkt_len);
  3241. pkt_data = skb_put(tx_skb, pkt_len);
  3242. for (i = 0; i < pkt_len; i++)
  3243. pkt_data[i] = (u8)(i & 0xff);
  3244. test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data,
  3245. tx_skb->end-tx_skb->data, PCI_DMA_FROMDEVICE);
  3246. if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
  3247. np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr);
  3248. np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
  3249. } else {
  3250. np->tx_ring.ex[0].bufhigh = cpu_to_le64(test_dma_addr) >> 32;
  3251. np->tx_ring.ex[0].buflow = cpu_to_le64(test_dma_addr) & 0x0FFFFFFFF;
  3252. np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
  3253. }
  3254. writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
  3255. pci_push(get_hwbase(dev));
  3256. msleep(500);
  3257. /* check for rx of the packet */
  3258. if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
  3259. flags = le32_to_cpu(np->rx_ring.orig[0].flaglen);
  3260. len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver);
  3261. } else {
  3262. flags = le32_to_cpu(np->rx_ring.ex[0].flaglen);
  3263. len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver);
  3264. }
  3265. if (flags & NV_RX_AVAIL) {
  3266. ret = 0;
  3267. } else if (np->desc_ver == DESC_VER_1) {
  3268. if (flags & NV_RX_ERROR)
  3269. ret = 0;
  3270. } else {
  3271. if (flags & NV_RX2_ERROR) {
  3272. ret = 0;
  3273. }
  3274. }
  3275. if (ret) {
  3276. if (len != pkt_len) {
  3277. ret = 0;
  3278. dprintk(KERN_DEBUG "%s: loopback len mismatch %d vs %d\n",
  3279. dev->name, len, pkt_len);
  3280. } else {
  3281. rx_skb = np->rx_skbuff[0];
  3282. for (i = 0; i < pkt_len; i++) {
  3283. if (rx_skb->data[i] != (u8)(i & 0xff)) {
  3284. ret = 0;
  3285. dprintk(KERN_DEBUG "%s: loopback pattern check failed on byte %d\n",
  3286. dev->name, i);
  3287. break;
  3288. }
  3289. }
  3290. }
  3291. } else {
  3292. dprintk(KERN_DEBUG "%s: loopback - did not receive test packet\n", dev->name);
  3293. }
  3294. pci_unmap_page(np->pci_dev, test_dma_addr,
  3295. tx_skb->end-tx_skb->data,
  3296. PCI_DMA_TODEVICE);
  3297. dev_kfree_skb_any(tx_skb);
  3298. /* stop engines */
  3299. nv_stop_rx(dev);
  3300. nv_stop_tx(dev);
  3301. nv_txrx_reset(dev);
  3302. /* drain rx queue */
  3303. nv_drain_rx(dev);
  3304. nv_drain_tx(dev);
  3305. if (netif_running(dev)) {
  3306. writel(misc1_flags, base + NvRegMisc1);
  3307. writel(filter_flags, base + NvRegPacketFilterFlags);
  3308. nv_enable_irq(dev);
  3309. }
  3310. return ret;
  3311. }
  3312. static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 *buffer)
  3313. {
  3314. struct fe_priv *np = netdev_priv(dev);
  3315. u8 __iomem *base = get_hwbase(dev);
  3316. int result;
  3317. memset(buffer, 0, nv_self_test_count(dev)*sizeof(u64));
  3318. if (!nv_link_test(dev)) {
  3319. test->flags |= ETH_TEST_FL_FAILED;
  3320. buffer[0] = 1;
  3321. }
  3322. if (test->flags & ETH_TEST_FL_OFFLINE) {
  3323. if (netif_running(dev)) {
  3324. netif_stop_queue(dev);
  3325. netif_tx_lock_bh(dev);
  3326. spin_lock_irq(&np->lock);
  3327. nv_disable_hw_interrupts(dev, np->irqmask);
  3328. if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
  3329. writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
  3330. } else {
  3331. writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
  3332. }
  3333. /* stop engines */
  3334. nv_stop_rx(dev);
  3335. nv_stop_tx(dev);
  3336. nv_txrx_reset(dev);
  3337. /* drain rx queue */
  3338. nv_drain_rx(dev);
  3339. nv_drain_tx(dev);
  3340. spin_unlock_irq(&np->lock);
  3341. netif_tx_unlock_bh(dev);
  3342. }
  3343. if (!nv_register_test(dev)) {
  3344. test->flags |= ETH_TEST_FL_FAILED;
  3345. buffer[1] = 1;
  3346. }
  3347. result = nv_interrupt_test(dev);
  3348. if (result != 1) {
  3349. test->flags |= ETH_TEST_FL_FAILED;
  3350. buffer[2] = 1;
  3351. }
  3352. if (result == 0) {
  3353. /* bail out */
  3354. return;
  3355. }
  3356. if (!nv_loopback_test(dev)) {
  3357. test->flags |= ETH_TEST_FL_FAILED;
  3358. buffer[3] = 1;
  3359. }
  3360. if (netif_running(dev)) {
  3361. /* reinit driver view of the rx queue */
  3362. set_bufsize(dev);
  3363. if (nv_init_ring(dev)) {
  3364. if (!np->in_shutdown)
  3365. mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
  3366. }
  3367. /* reinit nic view of the rx queue */
  3368. writel(np->rx_buf_sz, base + NvRegOffloadConfig);
  3369. setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
  3370. writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
  3371. base + NvRegRingSizes);
  3372. pci_push(base);
  3373. writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
  3374. pci_push(base);
  3375. /* restart rx engine */
  3376. nv_start_rx(dev);
  3377. nv_start_tx(dev);
  3378. netif_start_queue(dev);
  3379. nv_enable_hw_interrupts(dev, np->irqmask);
  3380. }
  3381. }
  3382. }
  3383. static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer)
  3384. {
  3385. switch (stringset) {
  3386. case ETH_SS_STATS:
  3387. memcpy(buffer, &nv_estats_str, nv_get_stats_count(dev)*sizeof(struct nv_ethtool_str));
  3388. break;
  3389. case ETH_SS_TEST:
  3390. memcpy(buffer, &nv_etests_str, nv_self_test_count(dev)*sizeof(struct nv_ethtool_str));
  3391. break;
  3392. }
  3393. }
  3394. static struct ethtool_ops ops = {
  3395. .get_drvinfo = nv_get_drvinfo,
  3396. .get_link = ethtool_op_get_link,
  3397. .get_wol = nv_get_wol,
  3398. .set_wol = nv_set_wol,
  3399. .get_settings = nv_get_settings,
  3400. .set_settings = nv_set_settings,
  3401. .get_regs_len = nv_get_regs_len,
  3402. .get_regs = nv_get_regs,
  3403. .nway_reset = nv_nway_reset,
  3404. .get_perm_addr = ethtool_op_get_perm_addr,
  3405. .get_tso = ethtool_op_get_tso,
  3406. .set_tso = nv_set_tso,
  3407. .get_ringparam = nv_get_ringparam,
  3408. .set_ringparam = nv_set_ringparam,
  3409. .get_pauseparam = nv_get_pauseparam,
  3410. .set_pauseparam = nv_set_pauseparam,
  3411. .get_rx_csum = nv_get_rx_csum,
  3412. .set_rx_csum = nv_set_rx_csum,
  3413. .get_tx_csum = ethtool_op_get_tx_csum,
  3414. .set_tx_csum = nv_set_tx_csum,
  3415. .get_sg = ethtool_op_get_sg,
  3416. .set_sg = nv_set_sg,
  3417. .get_strings = nv_get_strings,
  3418. .get_stats_count = nv_get_stats_count,
  3419. .get_ethtool_stats = nv_get_ethtool_stats,
  3420. .self_test_count = nv_self_test_count,
  3421. .self_test = nv_self_test,
  3422. };
  3423. static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
  3424. {
  3425. struct fe_priv *np = get_nvpriv(dev);
  3426. spin_lock_irq(&np->lock);
  3427. /* save vlan group */
  3428. np->vlangrp = grp;
  3429. if (grp) {
  3430. /* enable vlan on MAC */
  3431. np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP | NVREG_TXRXCTL_VLANINS;
  3432. } else {
  3433. /* disable vlan on MAC */
  3434. np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP;
  3435. np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS;
  3436. }
  3437. writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
  3438. spin_unlock_irq(&np->lock);
  3439. };
  3440. static void nv_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
  3441. {
  3442. /* nothing to do */
  3443. };
  3444. static int nv_open(struct net_device *dev)
  3445. {
  3446. struct fe_priv *np = netdev_priv(dev);
  3447. u8 __iomem *base = get_hwbase(dev);
  3448. int ret = 1;
  3449. int oom, i;
  3450. dprintk(KERN_DEBUG "nv_open: begin\n");
  3451. /* erase previous misconfiguration */
  3452. if (np->driver_data & DEV_HAS_POWER_CNTRL)
  3453. nv_mac_reset(dev);
  3454. writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
  3455. writel(0, base + NvRegMulticastAddrB);
  3456. writel(0, base + NvRegMulticastMaskA);
  3457. writel(0, base + NvRegMulticastMaskB);
  3458. writel(0, base + NvRegPacketFilterFlags);
  3459. writel(0, base + NvRegTransmitterControl);
  3460. writel(0, base + NvRegReceiverControl);
  3461. writel(0, base + NvRegAdapterControl);
  3462. if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)
  3463. writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame);
  3464. /* initialize descriptor rings */
  3465. set_bufsize(dev);
  3466. oom = nv_init_ring(dev);
  3467. writel(0, base + NvRegLinkSpeed);
  3468. writel(0, base + NvRegUnknownTransmitterReg);
  3469. nv_txrx_reset(dev);
  3470. writel(0, base + NvRegUnknownSetupReg6);
  3471. np->in_shutdown = 0;
  3472. /* give hw rings */
  3473. setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
  3474. writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
  3475. base + NvRegRingSizes);
  3476. writel(np->linkspeed, base + NvRegLinkSpeed);
  3477. if (np->desc_ver == DESC_VER_1)
  3478. writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark);
  3479. else
  3480. writel(NVREG_TX_WM_DESC2_3_DEFAULT, base + NvRegTxWatermark);
  3481. writel(np->txrxctl_bits, base + NvRegTxRxControl);
  3482. writel(np->vlanctl_bits, base + NvRegVlanControl);
  3483. pci_push(base);
  3484. writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl);
  3485. reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31,
  3486. NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX,
  3487. KERN_INFO "open: SetupReg5, Bit 31 remained off\n");
  3488. writel(0, base + NvRegUnknownSetupReg4);
  3489. writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
  3490. writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
  3491. writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1);
  3492. writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus);
  3493. writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags);
  3494. writel(np->rx_buf_sz, base + NvRegOffloadConfig);
  3495. writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus);
  3496. get_random_bytes(&i, sizeof(i));
  3497. writel(NVREG_RNDSEED_FORCE | (i&NVREG_RNDSEED_MASK), base + NvRegRandomSeed);
  3498. writel(NVREG_TX_DEFERRAL_DEFAULT, base + NvRegTxDeferral);
  3499. writel(NVREG_RX_DEFERRAL_DEFAULT, base + NvRegRxDeferral);
  3500. if (poll_interval == -1) {
  3501. if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT)
  3502. writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval);
  3503. else
  3504. writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
  3505. }
  3506. else
  3507. writel(poll_interval & 0xFFFF, base + NvRegPollingInterval);
  3508. writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
  3509. writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING,
  3510. base + NvRegAdapterControl);
  3511. writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed);
  3512. writel(NVREG_UNKSETUP4_VAL, base + NvRegUnknownSetupReg4);
  3513. if (np->wolenabled)
  3514. writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags);
  3515. i = readl(base + NvRegPowerState);
  3516. if ( (i & NVREG_POWERSTATE_POWEREDUP) == 0)
  3517. writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState);
  3518. pci_push(base);
  3519. udelay(10);
  3520. writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState);
  3521. nv_disable_hw_interrupts(dev, np->irqmask);
  3522. pci_push(base);
  3523. writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
  3524. writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
  3525. pci_push(base);
  3526. if (nv_request_irq(dev, 0)) {
  3527. goto out_drain;
  3528. }
  3529. /* ask for interrupts */
  3530. nv_enable_hw_interrupts(dev, np->irqmask);
  3531. spin_lock_irq(&np->lock);
  3532. writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
  3533. writel(0, base + NvRegMulticastAddrB);
  3534. writel(0, base + NvRegMulticastMaskA);
  3535. writel(0, base + NvRegMulticastMaskB);
  3536. writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
  3537. /* One manual link speed update: Interrupts are enabled, future link
  3538. * speed changes cause interrupts and are handled by nv_link_irq().
  3539. */
  3540. {
  3541. u32 miistat;
  3542. miistat = readl(base + NvRegMIIStatus);
  3543. writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
  3544. dprintk(KERN_INFO "startup: got 0x%08x.\n", miistat);
  3545. }
  3546. /* set linkspeed to invalid value, thus force nv_update_linkspeed
  3547. * to init hw */
  3548. np->linkspeed = 0;
  3549. ret = nv_update_linkspeed(dev);
  3550. nv_start_rx(dev);
  3551. nv_start_tx(dev);
  3552. netif_start_queue(dev);
  3553. if (ret) {
  3554. netif_carrier_on(dev);
  3555. } else {
  3556. printk("%s: no link during initialization.\n", dev->name);
  3557. netif_carrier_off(dev);
  3558. }
  3559. if (oom)
  3560. mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
  3561. /* start statistics timer */
  3562. if (np->driver_data & DEV_HAS_STATISTICS)
  3563. mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL);
  3564. spin_unlock_irq(&np->lock);
  3565. return 0;
  3566. out_drain:
  3567. drain_ring(dev);
  3568. return ret;
  3569. }
  3570. static int nv_close(struct net_device *dev)
  3571. {
  3572. struct fe_priv *np = netdev_priv(dev);
  3573. u8 __iomem *base;
  3574. spin_lock_irq(&np->lock);
  3575. np->in_shutdown = 1;
  3576. spin_unlock_irq(&np->lock);
  3577. synchronize_irq(dev->irq);
  3578. del_timer_sync(&np->oom_kick);
  3579. del_timer_sync(&np->nic_poll);
  3580. del_timer_sync(&np->stats_poll);
  3581. netif_stop_queue(dev);
  3582. spin_lock_irq(&np->lock);
  3583. nv_stop_tx(dev);
  3584. nv_stop_rx(dev);
  3585. nv_txrx_reset(dev);
  3586. /* disable interrupts on the nic or we will lock up */
  3587. base = get_hwbase(dev);
  3588. nv_disable_hw_interrupts(dev, np->irqmask);
  3589. pci_push(base);
  3590. dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name);
  3591. spin_unlock_irq(&np->lock);
  3592. nv_free_irq(dev);
  3593. drain_ring(dev);
  3594. if (np->wolenabled)
  3595. nv_start_rx(dev);
  3596. /* FIXME: power down nic */
  3597. return 0;
  3598. }
  3599. static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
  3600. {
  3601. struct net_device *dev;
  3602. struct fe_priv *np;
  3603. unsigned long addr;
  3604. u8 __iomem *base;
  3605. int err, i;
  3606. u32 powerstate;
  3607. dev = alloc_etherdev(sizeof(struct fe_priv));
  3608. err = -ENOMEM;
  3609. if (!dev)
  3610. goto out;
  3611. np = netdev_priv(dev);
  3612. np->pci_dev = pci_dev;
  3613. spin_lock_init(&np->lock);
  3614. SET_MODULE_OWNER(dev);
  3615. SET_NETDEV_DEV(dev, &pci_dev->dev);
  3616. init_timer(&np->oom_kick);
  3617. np->oom_kick.data = (unsigned long) dev;
  3618. np->oom_kick.function = &nv_do_rx_refill; /* timer handler */
  3619. init_timer(&np->nic_poll);
  3620. np->nic_poll.data = (unsigned long) dev;
  3621. np->nic_poll.function = &nv_do_nic_poll; /* timer handler */
  3622. init_timer(&np->stats_poll);
  3623. np->stats_poll.data = (unsigned long) dev;
  3624. np->stats_poll.function = &nv_do_stats_poll; /* timer handler */
  3625. err = pci_enable_device(pci_dev);
  3626. if (err) {
  3627. printk(KERN_INFO "forcedeth: pci_enable_dev failed (%d) for device %s\n",
  3628. err, pci_name(pci_dev));
  3629. goto out_free;
  3630. }
  3631. pci_set_master(pci_dev);
  3632. err = pci_request_regions(pci_dev, DRV_NAME);
  3633. if (err < 0)
  3634. goto out_disable;
  3635. if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS))
  3636. np->register_size = NV_PCI_REGSZ_VER2;
  3637. else
  3638. np->register_size = NV_PCI_REGSZ_VER1;
  3639. err = -EINVAL;
  3640. addr = 0;
  3641. for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
  3642. dprintk(KERN_DEBUG "%s: resource %d start %p len %ld flags 0x%08lx.\n",
  3643. pci_name(pci_dev), i, (void*)pci_resource_start(pci_dev, i),
  3644. pci_resource_len(pci_dev, i),
  3645. pci_resource_flags(pci_dev, i));
  3646. if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM &&
  3647. pci_resource_len(pci_dev, i) >= np->register_size) {
  3648. addr = pci_resource_start(pci_dev, i);
  3649. break;
  3650. }
  3651. }
  3652. if (i == DEVICE_COUNT_RESOURCE) {
  3653. printk(KERN_INFO "forcedeth: Couldn't find register window for device %s.\n",
  3654. pci_name(pci_dev));
  3655. goto out_relreg;
  3656. }
  3657. /* copy of driver data */
  3658. np->driver_data = id->driver_data;
  3659. /* handle different descriptor versions */
  3660. if (id->driver_data & DEV_HAS_HIGH_DMA) {
  3661. /* packet format 3: supports 40-bit addressing */
  3662. np->desc_ver = DESC_VER_3;
  3663. np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
  3664. if (dma_64bit) {
  3665. if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) {
  3666. printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n",
  3667. pci_name(pci_dev));
  3668. } else {
  3669. dev->features |= NETIF_F_HIGHDMA;
  3670. printk(KERN_INFO "forcedeth: using HIGHDMA\n");
  3671. }
  3672. if (pci_set_consistent_dma_mask(pci_dev, DMA_39BIT_MASK)) {
  3673. printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed, using 32-bit ring buffers for device %s.\n",
  3674. pci_name(pci_dev));
  3675. }
  3676. }
  3677. } else if (id->driver_data & DEV_HAS_LARGEDESC) {
  3678. /* packet format 2: supports jumbo frames */
  3679. np->desc_ver = DESC_VER_2;
  3680. np->txrxctl_bits = NVREG_TXRXCTL_DESC_2;
  3681. } else {
  3682. /* original packet format */
  3683. np->desc_ver = DESC_VER_1;
  3684. np->txrxctl_bits = NVREG_TXRXCTL_DESC_1;
  3685. }
  3686. np->pkt_limit = NV_PKTLIMIT_1;
  3687. if (id->driver_data & DEV_HAS_LARGEDESC)
  3688. np->pkt_limit = NV_PKTLIMIT_2;
  3689. if (id->driver_data & DEV_HAS_CHECKSUM) {
  3690. np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
  3691. dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
  3692. #ifdef NETIF_F_TSO
  3693. dev->features |= NETIF_F_TSO;
  3694. #endif
  3695. }
  3696. np->vlanctl_bits = 0;
  3697. if (id->driver_data & DEV_HAS_VLAN) {
  3698. np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE;
  3699. dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX;
  3700. dev->vlan_rx_register = nv_vlan_rx_register;
  3701. dev->vlan_rx_kill_vid = nv_vlan_rx_kill_vid;
  3702. }
  3703. np->msi_flags = 0;
  3704. if ((id->driver_data & DEV_HAS_MSI) && msi) {
  3705. np->msi_flags |= NV_MSI_CAPABLE;
  3706. }
  3707. if ((id->driver_data & DEV_HAS_MSI_X) && msix) {
  3708. np->msi_flags |= NV_MSI_X_CAPABLE;
  3709. }
  3710. np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG;
  3711. if (id->driver_data & DEV_HAS_PAUSEFRAME_TX) {
  3712. np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ;
  3713. }
  3714. err = -ENOMEM;
  3715. np->base = ioremap(addr, np->register_size);
  3716. if (!np->base)
  3717. goto out_relreg;
  3718. dev->base_addr = (unsigned long)np->base;
  3719. dev->irq = pci_dev->irq;
  3720. np->rx_ring_size = RX_RING_DEFAULT;
  3721. np->tx_ring_size = TX_RING_DEFAULT;
  3722. np->tx_limit_stop = np->tx_ring_size - TX_LIMIT_DIFFERENCE;
  3723. np->tx_limit_start = np->tx_ring_size - TX_LIMIT_DIFFERENCE - 1;
  3724. if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
  3725. np->rx_ring.orig = pci_alloc_consistent(pci_dev,
  3726. sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
  3727. &np->ring_addr);
  3728. if (!np->rx_ring.orig)
  3729. goto out_unmap;
  3730. np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
  3731. } else {
  3732. np->rx_ring.ex = pci_alloc_consistent(pci_dev,
  3733. sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
  3734. &np->ring_addr);
  3735. if (!np->rx_ring.ex)
  3736. goto out_unmap;
  3737. np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
  3738. }
  3739. np->rx_skbuff = kmalloc(sizeof(struct sk_buff*) * np->rx_ring_size, GFP_KERNEL);
  3740. np->rx_dma = kmalloc(sizeof(dma_addr_t) * np->rx_ring_size, GFP_KERNEL);
  3741. np->tx_skbuff = kmalloc(sizeof(struct sk_buff*) * np->tx_ring_size, GFP_KERNEL);
  3742. np->tx_dma = kmalloc(sizeof(dma_addr_t) * np->tx_ring_size, GFP_KERNEL);
  3743. np->tx_dma_len = kmalloc(sizeof(unsigned int) * np->tx_ring_size, GFP_KERNEL);
  3744. if (!np->rx_skbuff || !np->rx_dma || !np->tx_skbuff || !np->tx_dma || !np->tx_dma_len)
  3745. goto out_freering;
  3746. memset(np->rx_skbuff, 0, sizeof(struct sk_buff*) * np->rx_ring_size);
  3747. memset(np->rx_dma, 0, sizeof(dma_addr_t) * np->rx_ring_size);
  3748. memset(np->tx_skbuff, 0, sizeof(struct sk_buff*) * np->tx_ring_size);
  3749. memset(np->tx_dma, 0, sizeof(dma_addr_t) * np->tx_ring_size);
  3750. memset(np->tx_dma_len, 0, sizeof(unsigned int) * np->tx_ring_size);
  3751. dev->open = nv_open;
  3752. dev->stop = nv_close;
  3753. dev->hard_start_xmit = nv_start_xmit;
  3754. dev->get_stats = nv_get_stats;
  3755. dev->change_mtu = nv_change_mtu;
  3756. dev->set_mac_address = nv_set_mac_address;
  3757. dev->set_multicast_list = nv_set_multicast;
  3758. #ifdef CONFIG_NET_POLL_CONTROLLER
  3759. dev->poll_controller = nv_poll_controller;
  3760. #endif
  3761. SET_ETHTOOL_OPS(dev, &ops);
  3762. dev->tx_timeout = nv_tx_timeout;
  3763. dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
  3764. pci_set_drvdata(pci_dev, dev);
  3765. /* read the mac address */
  3766. base = get_hwbase(dev);
  3767. np->orig_mac[0] = readl(base + NvRegMacAddrA);
  3768. np->orig_mac[1] = readl(base + NvRegMacAddrB);
  3769. dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff;
  3770. dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff;
  3771. dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff;
  3772. dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
  3773. dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff;
  3774. dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff;
  3775. memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
  3776. if (!is_valid_ether_addr(dev->perm_addr)) {
  3777. /*
  3778. * Bad mac address. At least one bios sets the mac address
  3779. * to 01:23:45:67:89:ab
  3780. */
  3781. printk(KERN_ERR "%s: Invalid Mac address detected: %02x:%02x:%02x:%02x:%02x:%02x\n",
  3782. pci_name(pci_dev),
  3783. dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
  3784. dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
  3785. printk(KERN_ERR "Please complain to your hardware vendor. Switching to a random MAC.\n");
  3786. dev->dev_addr[0] = 0x00;
  3787. dev->dev_addr[1] = 0x00;
  3788. dev->dev_addr[2] = 0x6c;
  3789. get_random_bytes(&dev->dev_addr[3], 3);
  3790. }
  3791. dprintk(KERN_DEBUG "%s: MAC Address %02x:%02x:%02x:%02x:%02x:%02x\n", pci_name(pci_dev),
  3792. dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
  3793. dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
  3794. /* set mac address */
  3795. nv_copy_mac_to_hw(dev);
  3796. /* disable WOL */
  3797. writel(0, base + NvRegWakeUpFlags);
  3798. np->wolenabled = 0;
  3799. if (id->driver_data & DEV_HAS_POWER_CNTRL) {
  3800. u8 revision_id;
  3801. pci_read_config_byte(pci_dev, PCI_REVISION_ID, &revision_id);
  3802. /* take phy and nic out of low power mode */
  3803. powerstate = readl(base + NvRegPowerState2);
  3804. powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK;
  3805. if ((id->device == PCI_DEVICE_ID_NVIDIA_NVENET_12 ||
  3806. id->device == PCI_DEVICE_ID_NVIDIA_NVENET_13) &&
  3807. revision_id >= 0xA3)
  3808. powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3;
  3809. writel(powerstate, base + NvRegPowerState2);
  3810. }
  3811. if (np->desc_ver == DESC_VER_1) {
  3812. np->tx_flags = NV_TX_VALID;
  3813. } else {
  3814. np->tx_flags = NV_TX2_VALID;
  3815. }
  3816. if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) {
  3817. np->irqmask = NVREG_IRQMASK_THROUGHPUT;
  3818. if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
  3819. np->msi_flags |= 0x0003;
  3820. } else {
  3821. np->irqmask = NVREG_IRQMASK_CPU;
  3822. if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
  3823. np->msi_flags |= 0x0001;
  3824. }
  3825. if (id->driver_data & DEV_NEED_TIMERIRQ)
  3826. np->irqmask |= NVREG_IRQ_TIMER;
  3827. if (id->driver_data & DEV_NEED_LINKTIMER) {
  3828. dprintk(KERN_INFO "%s: link timer on.\n", pci_name(pci_dev));
  3829. np->need_linktimer = 1;
  3830. np->link_timeout = jiffies + LINK_TIMEOUT;
  3831. } else {
  3832. dprintk(KERN_INFO "%s: link timer off.\n", pci_name(pci_dev));
  3833. np->need_linktimer = 0;
  3834. }
  3835. /* find a suitable phy */
  3836. for (i = 1; i <= 32; i++) {
  3837. int id1, id2;
  3838. int phyaddr = i & 0x1F;
  3839. spin_lock_irq(&np->lock);
  3840. id1 = mii_rw(dev, phyaddr, MII_PHYSID1, MII_READ);
  3841. spin_unlock_irq(&np->lock);
  3842. if (id1 < 0 || id1 == 0xffff)
  3843. continue;
  3844. spin_lock_irq(&np->lock);
  3845. id2 = mii_rw(dev, phyaddr, MII_PHYSID2, MII_READ);
  3846. spin_unlock_irq(&np->lock);
  3847. if (id2 < 0 || id2 == 0xffff)
  3848. continue;
  3849. id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT;
  3850. id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT;
  3851. dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n",
  3852. pci_name(pci_dev), id1, id2, phyaddr);
  3853. np->phyaddr = phyaddr;
  3854. np->phy_oui = id1 | id2;
  3855. break;
  3856. }
  3857. if (i == 33) {
  3858. printk(KERN_INFO "%s: open: Could not find a valid PHY.\n",
  3859. pci_name(pci_dev));
  3860. goto out_error;
  3861. }
  3862. /* reset it */
  3863. phy_init(dev);
  3864. /* set default link speed settings */
  3865. np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
  3866. np->duplex = 0;
  3867. np->autoneg = 1;
  3868. err = register_netdev(dev);
  3869. if (err) {
  3870. printk(KERN_INFO "forcedeth: unable to register netdev: %d\n", err);
  3871. goto out_error;
  3872. }
  3873. printk(KERN_INFO "%s: forcedeth.c: subsystem: %05x:%04x bound to %s\n",
  3874. dev->name, pci_dev->subsystem_vendor, pci_dev->subsystem_device,
  3875. pci_name(pci_dev));
  3876. return 0;
  3877. out_error:
  3878. pci_set_drvdata(pci_dev, NULL);
  3879. out_freering:
  3880. free_rings(dev);
  3881. out_unmap:
  3882. iounmap(get_hwbase(dev));
  3883. out_relreg:
  3884. pci_release_regions(pci_dev);
  3885. out_disable:
  3886. pci_disable_device(pci_dev);
  3887. out_free:
  3888. free_netdev(dev);
  3889. out:
  3890. return err;
  3891. }
  3892. static void __devexit nv_remove(struct pci_dev *pci_dev)
  3893. {
  3894. struct net_device *dev = pci_get_drvdata(pci_dev);
  3895. struct fe_priv *np = netdev_priv(dev);
  3896. u8 __iomem *base = get_hwbase(dev);
  3897. unregister_netdev(dev);
  3898. /* special op: write back the misordered MAC address - otherwise
  3899. * the next nv_probe would see a wrong address.
  3900. */
  3901. writel(np->orig_mac[0], base + NvRegMacAddrA);
  3902. writel(np->orig_mac[1], base + NvRegMacAddrB);
  3903. /* free all structures */
  3904. free_rings(dev);
  3905. iounmap(get_hwbase(dev));
  3906. pci_release_regions(pci_dev);
  3907. pci_disable_device(pci_dev);
  3908. free_netdev(dev);
  3909. pci_set_drvdata(pci_dev, NULL);
  3910. }
  3911. static struct pci_device_id pci_tbl[] = {
  3912. { /* nForce Ethernet Controller */
  3913. PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_1),
  3914. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
  3915. },
  3916. { /* nForce2 Ethernet Controller */
  3917. PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_2),
  3918. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
  3919. },
  3920. { /* nForce3 Ethernet Controller */
  3921. PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_3),
  3922. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
  3923. },
  3924. { /* nForce3 Ethernet Controller */
  3925. PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_4),
  3926. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
  3927. },
  3928. { /* nForce3 Ethernet Controller */
  3929. PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_5),
  3930. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
  3931. },
  3932. { /* nForce3 Ethernet Controller */
  3933. PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_6),
  3934. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
  3935. },
  3936. { /* nForce3 Ethernet Controller */
  3937. PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_7),
  3938. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
  3939. },
  3940. { /* CK804 Ethernet Controller */
  3941. PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_8),
  3942. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
  3943. },
  3944. { /* CK804 Ethernet Controller */
  3945. PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_9),
  3946. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
  3947. },
  3948. { /* MCP04 Ethernet Controller */
  3949. PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_10),
  3950. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
  3951. },
  3952. { /* MCP04 Ethernet Controller */
  3953. PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_11),
  3954. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
  3955. },
  3956. { /* MCP51 Ethernet Controller */
  3957. PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12),
  3958. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL,
  3959. },
  3960. { /* MCP51 Ethernet Controller */
  3961. PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_13),
  3962. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL,
  3963. },
  3964. { /* MCP55 Ethernet Controller */
  3965. PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14),
  3966. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
  3967. },
  3968. { /* MCP55 Ethernet Controller */
  3969. PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15),
  3970. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
  3971. },
  3972. { /* MCP61 Ethernet Controller */
  3973. PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_16),
  3974. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
  3975. },
  3976. { /* MCP61 Ethernet Controller */
  3977. PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_17),
  3978. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
  3979. },
  3980. { /* MCP61 Ethernet Controller */
  3981. PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_18),
  3982. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
  3983. },
  3984. { /* MCP61 Ethernet Controller */
  3985. PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_19),
  3986. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
  3987. },
  3988. { /* MCP65 Ethernet Controller */
  3989. PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20),
  3990. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
  3991. },
  3992. { /* MCP65 Ethernet Controller */
  3993. PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21),
  3994. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
  3995. },
  3996. { /* MCP65 Ethernet Controller */
  3997. PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22),
  3998. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
  3999. },
  4000. { /* MCP65 Ethernet Controller */
  4001. PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23),
  4002. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
  4003. },
  4004. {0,},
  4005. };
  4006. static struct pci_driver driver = {
  4007. .name = "forcedeth",
  4008. .id_table = pci_tbl,
  4009. .probe = nv_probe,
  4010. .remove = __devexit_p(nv_remove),
  4011. };
  4012. static int __init init_nic(void)
  4013. {
  4014. printk(KERN_INFO "forcedeth.c: Reverse Engineered nForce ethernet driver. Version %s.\n", FORCEDETH_VERSION);
  4015. return pci_module_init(&driver);
  4016. }
  4017. static void __exit exit_nic(void)
  4018. {
  4019. pci_unregister_driver(&driver);
  4020. }
  4021. module_param(max_interrupt_work, int, 0);
  4022. MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt");
  4023. module_param(optimization_mode, int, 0);
  4024. MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer.");
  4025. module_param(poll_interval, int, 0);
  4026. MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535.");
  4027. module_param(msi, int, 0);
  4028. MODULE_PARM_DESC(msi, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0.");
  4029. module_param(msix, int, 0);
  4030. MODULE_PARM_DESC(msix, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0.");
  4031. module_param(dma_64bit, int, 0);
  4032. MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0.");
  4033. MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
  4034. MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
  4035. MODULE_LICENSE("GPL");
  4036. MODULE_DEVICE_TABLE(pci, pci_tbl);
  4037. module_init(init_nic);
  4038. module_exit(exit_nic);