forcedeth.c 184 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172
  1. /*
  2. * forcedeth: Ethernet driver for NVIDIA nForce media access controllers.
  3. *
  4. * Note: This driver is a cleanroom reimplementation based on reverse
  5. * engineered documentation written by Carl-Daniel Hailfinger
  6. * and Andrew de Quincey.
  7. *
  8. * NVIDIA, nForce and other NVIDIA marks are trademarks or registered
  9. * trademarks of NVIDIA Corporation in the United States and other
  10. * countries.
  11. *
  12. * Copyright (C) 2003,4,5 Manfred Spraul
  13. * Copyright (C) 2004 Andrew de Quincey (wol support)
  14. * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane
  15. * IRQ rate fixes, bigendian fixes, cleanups, verification)
  16. * Copyright (c) 2004,2005,2006,2007,2008,2009 NVIDIA Corporation
  17. *
  18. * This program is free software; you can redistribute it and/or modify
  19. * it under the terms of the GNU General Public License as published by
  20. * the Free Software Foundation; either version 2 of the License, or
  21. * (at your option) any later version.
  22. *
  23. * This program is distributed in the hope that it will be useful,
  24. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  25. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  26. * GNU General Public License for more details.
  27. *
  28. * You should have received a copy of the GNU General Public License
  29. * along with this program; if not, write to the Free Software
  30. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  31. *
  32. * Known bugs:
  33. * We suspect that on some hardware no TX done interrupts are generated.
  34. * This means recovery from netif_stop_queue only happens if the hw timer
  35. * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT)
  36. * and the timer is active in the IRQMask, or if a rx packet arrives by chance.
  37. * If your hardware reliably generates tx done interrupts, then you can remove
  38. * DEV_NEED_TIMERIRQ from the driver_data flags.
  39. * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
  40. * superfluous timer interrupts from the nic.
  41. */
  42. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  43. #define FORCEDETH_VERSION "0.64"
  44. #define DRV_NAME "forcedeth"
  45. #include <linux/module.h>
  46. #include <linux/types.h>
  47. #include <linux/pci.h>
  48. #include <linux/interrupt.h>
  49. #include <linux/netdevice.h>
  50. #include <linux/etherdevice.h>
  51. #include <linux/delay.h>
  52. #include <linux/sched.h>
  53. #include <linux/spinlock.h>
  54. #include <linux/ethtool.h>
  55. #include <linux/timer.h>
  56. #include <linux/skbuff.h>
  57. #include <linux/mii.h>
  58. #include <linux/random.h>
  59. #include <linux/init.h>
  60. #include <linux/if_vlan.h>
  61. #include <linux/dma-mapping.h>
  62. #include <linux/slab.h>
  63. #include <linux/uaccess.h>
  64. #include <linux/io.h>
  65. #include <asm/irq.h>
  66. #include <asm/system.h>
  67. #define TX_WORK_PER_LOOP 64
  68. #define RX_WORK_PER_LOOP 64
  69. /*
  70. * Hardware access:
  71. */
  72. #define DEV_NEED_TIMERIRQ 0x0000001 /* set the timer irq flag in the irq mask */
  73. #define DEV_NEED_LINKTIMER 0x0000002 /* poll link settings. Relies on the timer irq */
  74. #define DEV_HAS_LARGEDESC 0x0000004 /* device supports jumbo frames and needs packet format 2 */
  75. #define DEV_HAS_HIGH_DMA 0x0000008 /* device supports 64bit dma */
  76. #define DEV_HAS_CHECKSUM 0x0000010 /* device supports tx and rx checksum offloads */
  77. #define DEV_HAS_VLAN 0x0000020 /* device supports vlan tagging and striping */
  78. #define DEV_HAS_MSI 0x0000040 /* device supports MSI */
  79. #define DEV_HAS_MSI_X 0x0000080 /* device supports MSI-X */
  80. #define DEV_HAS_POWER_CNTRL 0x0000100 /* device supports power savings */
  81. #define DEV_HAS_STATISTICS_V1 0x0000200 /* device supports hw statistics version 1 */
  82. #define DEV_HAS_STATISTICS_V2 0x0000400 /* device supports hw statistics version 2 */
  83. #define DEV_HAS_STATISTICS_V3 0x0000800 /* device supports hw statistics version 3 */
  84. #define DEV_HAS_STATISTICS_V12 0x0000600 /* device supports hw statistics version 1 and 2 */
  85. #define DEV_HAS_STATISTICS_V123 0x0000e00 /* device supports hw statistics version 1, 2, and 3 */
  86. #define DEV_HAS_TEST_EXTENDED 0x0001000 /* device supports extended diagnostic test */
  87. #define DEV_HAS_MGMT_UNIT 0x0002000 /* device supports management unit */
  88. #define DEV_HAS_CORRECT_MACADDR 0x0004000 /* device supports correct mac address order */
  89. #define DEV_HAS_COLLISION_FIX 0x0008000 /* device supports tx collision fix */
  90. #define DEV_HAS_PAUSEFRAME_TX_V1 0x0010000 /* device supports tx pause frames version 1 */
  91. #define DEV_HAS_PAUSEFRAME_TX_V2 0x0020000 /* device supports tx pause frames version 2 */
  92. #define DEV_HAS_PAUSEFRAME_TX_V3 0x0040000 /* device supports tx pause frames version 3 */
  93. #define DEV_NEED_TX_LIMIT 0x0080000 /* device needs to limit tx */
  94. #define DEV_NEED_TX_LIMIT2 0x0180000 /* device needs to limit tx, expect for some revs */
  95. #define DEV_HAS_GEAR_MODE 0x0200000 /* device supports gear mode */
  96. #define DEV_NEED_PHY_INIT_FIX 0x0400000 /* device needs specific phy workaround */
  97. #define DEV_NEED_LOW_POWER_FIX 0x0800000 /* device needs special power up workaround */
  98. #define DEV_NEED_MSI_FIX 0x1000000 /* device needs msi workaround */
  99. enum {
  100. NvRegIrqStatus = 0x000,
  101. #define NVREG_IRQSTAT_MIIEVENT 0x040
  102. #define NVREG_IRQSTAT_MASK 0x83ff
  103. NvRegIrqMask = 0x004,
  104. #define NVREG_IRQ_RX_ERROR 0x0001
  105. #define NVREG_IRQ_RX 0x0002
  106. #define NVREG_IRQ_RX_NOBUF 0x0004
  107. #define NVREG_IRQ_TX_ERR 0x0008
  108. #define NVREG_IRQ_TX_OK 0x0010
  109. #define NVREG_IRQ_TIMER 0x0020
  110. #define NVREG_IRQ_LINK 0x0040
  111. #define NVREG_IRQ_RX_FORCED 0x0080
  112. #define NVREG_IRQ_TX_FORCED 0x0100
  113. #define NVREG_IRQ_RECOVER_ERROR 0x8200
  114. #define NVREG_IRQMASK_THROUGHPUT 0x00df
  115. #define NVREG_IRQMASK_CPU 0x0060
  116. #define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED)
  117. #define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED)
  118. #define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RECOVER_ERROR)
  119. NvRegUnknownSetupReg6 = 0x008,
  120. #define NVREG_UNKSETUP6_VAL 3
  121. /*
  122. * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic
  123. * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms
  124. */
  125. NvRegPollingInterval = 0x00c,
  126. #define NVREG_POLL_DEFAULT_THROUGHPUT 65535 /* backup tx cleanup if loop max reached */
  127. #define NVREG_POLL_DEFAULT_CPU 13
  128. NvRegMSIMap0 = 0x020,
  129. NvRegMSIMap1 = 0x024,
  130. NvRegMSIIrqMask = 0x030,
  131. #define NVREG_MSI_VECTOR_0_ENABLED 0x01
  132. NvRegMisc1 = 0x080,
  133. #define NVREG_MISC1_PAUSE_TX 0x01
  134. #define NVREG_MISC1_HD 0x02
  135. #define NVREG_MISC1_FORCE 0x3b0f3c
  136. NvRegMacReset = 0x34,
  137. #define NVREG_MAC_RESET_ASSERT 0x0F3
  138. NvRegTransmitterControl = 0x084,
  139. #define NVREG_XMITCTL_START 0x01
  140. #define NVREG_XMITCTL_MGMT_ST 0x40000000
  141. #define NVREG_XMITCTL_SYNC_MASK 0x000f0000
  142. #define NVREG_XMITCTL_SYNC_NOT_READY 0x0
  143. #define NVREG_XMITCTL_SYNC_PHY_INIT 0x00040000
  144. #define NVREG_XMITCTL_MGMT_SEMA_MASK 0x00000f00
  145. #define NVREG_XMITCTL_MGMT_SEMA_FREE 0x0
  146. #define NVREG_XMITCTL_HOST_SEMA_MASK 0x0000f000
  147. #define NVREG_XMITCTL_HOST_SEMA_ACQ 0x0000f000
  148. #define NVREG_XMITCTL_HOST_LOADED 0x00004000
  149. #define NVREG_XMITCTL_TX_PATH_EN 0x01000000
  150. #define NVREG_XMITCTL_DATA_START 0x00100000
  151. #define NVREG_XMITCTL_DATA_READY 0x00010000
  152. #define NVREG_XMITCTL_DATA_ERROR 0x00020000
  153. NvRegTransmitterStatus = 0x088,
  154. #define NVREG_XMITSTAT_BUSY 0x01
  155. NvRegPacketFilterFlags = 0x8c,
  156. #define NVREG_PFF_PAUSE_RX 0x08
  157. #define NVREG_PFF_ALWAYS 0x7F0000
  158. #define NVREG_PFF_PROMISC 0x80
  159. #define NVREG_PFF_MYADDR 0x20
  160. #define NVREG_PFF_LOOPBACK 0x10
  161. NvRegOffloadConfig = 0x90,
  162. #define NVREG_OFFLOAD_HOMEPHY 0x601
  163. #define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE
  164. NvRegReceiverControl = 0x094,
  165. #define NVREG_RCVCTL_START 0x01
  166. #define NVREG_RCVCTL_RX_PATH_EN 0x01000000
  167. NvRegReceiverStatus = 0x98,
  168. #define NVREG_RCVSTAT_BUSY 0x01
  169. NvRegSlotTime = 0x9c,
  170. #define NVREG_SLOTTIME_LEGBF_ENABLED 0x80000000
  171. #define NVREG_SLOTTIME_10_100_FULL 0x00007f00
  172. #define NVREG_SLOTTIME_1000_FULL 0x0003ff00
  173. #define NVREG_SLOTTIME_HALF 0x0000ff00
  174. #define NVREG_SLOTTIME_DEFAULT 0x00007f00
  175. #define NVREG_SLOTTIME_MASK 0x000000ff
  176. NvRegTxDeferral = 0xA0,
  177. #define NVREG_TX_DEFERRAL_DEFAULT 0x15050f
  178. #define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f
  179. #define NVREG_TX_DEFERRAL_RGMII_1000 0x14050f
  180. #define NVREG_TX_DEFERRAL_RGMII_STRETCH_10 0x16190f
  181. #define NVREG_TX_DEFERRAL_RGMII_STRETCH_100 0x16300f
  182. #define NVREG_TX_DEFERRAL_MII_STRETCH 0x152000
  183. NvRegRxDeferral = 0xA4,
  184. #define NVREG_RX_DEFERRAL_DEFAULT 0x16
  185. NvRegMacAddrA = 0xA8,
  186. NvRegMacAddrB = 0xAC,
  187. NvRegMulticastAddrA = 0xB0,
  188. #define NVREG_MCASTADDRA_FORCE 0x01
  189. NvRegMulticastAddrB = 0xB4,
  190. NvRegMulticastMaskA = 0xB8,
  191. #define NVREG_MCASTMASKA_NONE 0xffffffff
  192. NvRegMulticastMaskB = 0xBC,
  193. #define NVREG_MCASTMASKB_NONE 0xffff
  194. NvRegPhyInterface = 0xC0,
  195. #define PHY_RGMII 0x10000000
  196. NvRegBackOffControl = 0xC4,
  197. #define NVREG_BKOFFCTRL_DEFAULT 0x70000000
  198. #define NVREG_BKOFFCTRL_SEED_MASK 0x000003ff
  199. #define NVREG_BKOFFCTRL_SELECT 24
  200. #define NVREG_BKOFFCTRL_GEAR 12
  201. NvRegTxRingPhysAddr = 0x100,
  202. NvRegRxRingPhysAddr = 0x104,
  203. NvRegRingSizes = 0x108,
  204. #define NVREG_RINGSZ_TXSHIFT 0
  205. #define NVREG_RINGSZ_RXSHIFT 16
  206. NvRegTransmitPoll = 0x10c,
  207. #define NVREG_TRANSMITPOLL_MAC_ADDR_REV 0x00008000
  208. NvRegLinkSpeed = 0x110,
  209. #define NVREG_LINKSPEED_FORCE 0x10000
  210. #define NVREG_LINKSPEED_10 1000
  211. #define NVREG_LINKSPEED_100 100
  212. #define NVREG_LINKSPEED_1000 50
  213. #define NVREG_LINKSPEED_MASK (0xFFF)
  214. NvRegUnknownSetupReg5 = 0x130,
  215. #define NVREG_UNKSETUP5_BIT31 (1<<31)
  216. NvRegTxWatermark = 0x13c,
  217. #define NVREG_TX_WM_DESC1_DEFAULT 0x0200010
  218. #define NVREG_TX_WM_DESC2_3_DEFAULT 0x1e08000
  219. #define NVREG_TX_WM_DESC2_3_1000 0xfe08000
  220. NvRegTxRxControl = 0x144,
  221. #define NVREG_TXRXCTL_KICK 0x0001
  222. #define NVREG_TXRXCTL_BIT1 0x0002
  223. #define NVREG_TXRXCTL_BIT2 0x0004
  224. #define NVREG_TXRXCTL_IDLE 0x0008
  225. #define NVREG_TXRXCTL_RESET 0x0010
  226. #define NVREG_TXRXCTL_RXCHECK 0x0400
  227. #define NVREG_TXRXCTL_DESC_1 0
  228. #define NVREG_TXRXCTL_DESC_2 0x002100
  229. #define NVREG_TXRXCTL_DESC_3 0xc02200
  230. #define NVREG_TXRXCTL_VLANSTRIP 0x00040
  231. #define NVREG_TXRXCTL_VLANINS 0x00080
  232. NvRegTxRingPhysAddrHigh = 0x148,
  233. NvRegRxRingPhysAddrHigh = 0x14C,
  234. NvRegTxPauseFrame = 0x170,
  235. #define NVREG_TX_PAUSEFRAME_DISABLE 0x0fff0080
  236. #define NVREG_TX_PAUSEFRAME_ENABLE_V1 0x01800010
  237. #define NVREG_TX_PAUSEFRAME_ENABLE_V2 0x056003f0
  238. #define NVREG_TX_PAUSEFRAME_ENABLE_V3 0x09f00880
  239. NvRegTxPauseFrameLimit = 0x174,
  240. #define NVREG_TX_PAUSEFRAMELIMIT_ENABLE 0x00010000
  241. NvRegMIIStatus = 0x180,
  242. #define NVREG_MIISTAT_ERROR 0x0001
  243. #define NVREG_MIISTAT_LINKCHANGE 0x0008
  244. #define NVREG_MIISTAT_MASK_RW 0x0007
  245. #define NVREG_MIISTAT_MASK_ALL 0x000f
  246. NvRegMIIMask = 0x184,
  247. #define NVREG_MII_LINKCHANGE 0x0008
  248. NvRegAdapterControl = 0x188,
  249. #define NVREG_ADAPTCTL_START 0x02
  250. #define NVREG_ADAPTCTL_LINKUP 0x04
  251. #define NVREG_ADAPTCTL_PHYVALID 0x40000
  252. #define NVREG_ADAPTCTL_RUNNING 0x100000
  253. #define NVREG_ADAPTCTL_PHYSHIFT 24
  254. NvRegMIISpeed = 0x18c,
  255. #define NVREG_MIISPEED_BIT8 (1<<8)
  256. #define NVREG_MIIDELAY 5
  257. NvRegMIIControl = 0x190,
  258. #define NVREG_MIICTL_INUSE 0x08000
  259. #define NVREG_MIICTL_WRITE 0x00400
  260. #define NVREG_MIICTL_ADDRSHIFT 5
  261. NvRegMIIData = 0x194,
  262. NvRegTxUnicast = 0x1a0,
  263. NvRegTxMulticast = 0x1a4,
  264. NvRegTxBroadcast = 0x1a8,
  265. NvRegWakeUpFlags = 0x200,
  266. #define NVREG_WAKEUPFLAGS_VAL 0x7770
  267. #define NVREG_WAKEUPFLAGS_BUSYSHIFT 24
  268. #define NVREG_WAKEUPFLAGS_ENABLESHIFT 16
  269. #define NVREG_WAKEUPFLAGS_D3SHIFT 12
  270. #define NVREG_WAKEUPFLAGS_D2SHIFT 8
  271. #define NVREG_WAKEUPFLAGS_D1SHIFT 4
  272. #define NVREG_WAKEUPFLAGS_D0SHIFT 0
  273. #define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT 0x01
  274. #define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT 0x02
  275. #define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE 0x04
  276. #define NVREG_WAKEUPFLAGS_ENABLE 0x1111
  277. NvRegMgmtUnitGetVersion = 0x204,
  278. #define NVREG_MGMTUNITGETVERSION 0x01
  279. NvRegMgmtUnitVersion = 0x208,
  280. #define NVREG_MGMTUNITVERSION 0x08
  281. NvRegPowerCap = 0x268,
  282. #define NVREG_POWERCAP_D3SUPP (1<<30)
  283. #define NVREG_POWERCAP_D2SUPP (1<<26)
  284. #define NVREG_POWERCAP_D1SUPP (1<<25)
  285. NvRegPowerState = 0x26c,
  286. #define NVREG_POWERSTATE_POWEREDUP 0x8000
  287. #define NVREG_POWERSTATE_VALID 0x0100
  288. #define NVREG_POWERSTATE_MASK 0x0003
  289. #define NVREG_POWERSTATE_D0 0x0000
  290. #define NVREG_POWERSTATE_D1 0x0001
  291. #define NVREG_POWERSTATE_D2 0x0002
  292. #define NVREG_POWERSTATE_D3 0x0003
  293. NvRegMgmtUnitControl = 0x278,
  294. #define NVREG_MGMTUNITCONTROL_INUSE 0x20000
  295. NvRegTxCnt = 0x280,
  296. NvRegTxZeroReXmt = 0x284,
  297. NvRegTxOneReXmt = 0x288,
  298. NvRegTxManyReXmt = 0x28c,
  299. NvRegTxLateCol = 0x290,
  300. NvRegTxUnderflow = 0x294,
  301. NvRegTxLossCarrier = 0x298,
  302. NvRegTxExcessDef = 0x29c,
  303. NvRegTxRetryErr = 0x2a0,
  304. NvRegRxFrameErr = 0x2a4,
  305. NvRegRxExtraByte = 0x2a8,
  306. NvRegRxLateCol = 0x2ac,
  307. NvRegRxRunt = 0x2b0,
  308. NvRegRxFrameTooLong = 0x2b4,
  309. NvRegRxOverflow = 0x2b8,
  310. NvRegRxFCSErr = 0x2bc,
  311. NvRegRxFrameAlignErr = 0x2c0,
  312. NvRegRxLenErr = 0x2c4,
  313. NvRegRxUnicast = 0x2c8,
  314. NvRegRxMulticast = 0x2cc,
  315. NvRegRxBroadcast = 0x2d0,
  316. NvRegTxDef = 0x2d4,
  317. NvRegTxFrame = 0x2d8,
  318. NvRegRxCnt = 0x2dc,
  319. NvRegTxPause = 0x2e0,
  320. NvRegRxPause = 0x2e4,
  321. NvRegRxDropFrame = 0x2e8,
  322. NvRegVlanControl = 0x300,
  323. #define NVREG_VLANCONTROL_ENABLE 0x2000
  324. NvRegMSIXMap0 = 0x3e0,
  325. NvRegMSIXMap1 = 0x3e4,
  326. NvRegMSIXIrqStatus = 0x3f0,
  327. NvRegPowerState2 = 0x600,
  328. #define NVREG_POWERSTATE2_POWERUP_MASK 0x0F15
  329. #define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001
  330. #define NVREG_POWERSTATE2_PHY_RESET 0x0004
  331. #define NVREG_POWERSTATE2_GATE_CLOCKS 0x0F00
  332. };
  333. /* Big endian: should work, but is untested */
  334. struct ring_desc {
  335. __le32 buf;
  336. __le32 flaglen;
  337. };
  338. struct ring_desc_ex {
  339. __le32 bufhigh;
  340. __le32 buflow;
  341. __le32 txvlan;
  342. __le32 flaglen;
  343. };
  344. union ring_type {
  345. struct ring_desc *orig;
  346. struct ring_desc_ex *ex;
  347. };
  348. #define FLAG_MASK_V1 0xffff0000
  349. #define FLAG_MASK_V2 0xffffc000
  350. #define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1)
  351. #define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2)
  352. #define NV_TX_LASTPACKET (1<<16)
  353. #define NV_TX_RETRYERROR (1<<19)
  354. #define NV_TX_RETRYCOUNT_MASK (0xF<<20)
  355. #define NV_TX_FORCED_INTERRUPT (1<<24)
  356. #define NV_TX_DEFERRED (1<<26)
  357. #define NV_TX_CARRIERLOST (1<<27)
  358. #define NV_TX_LATECOLLISION (1<<28)
  359. #define NV_TX_UNDERFLOW (1<<29)
  360. #define NV_TX_ERROR (1<<30)
  361. #define NV_TX_VALID (1<<31)
  362. #define NV_TX2_LASTPACKET (1<<29)
  363. #define NV_TX2_RETRYERROR (1<<18)
  364. #define NV_TX2_RETRYCOUNT_MASK (0xF<<19)
  365. #define NV_TX2_FORCED_INTERRUPT (1<<30)
  366. #define NV_TX2_DEFERRED (1<<25)
  367. #define NV_TX2_CARRIERLOST (1<<26)
  368. #define NV_TX2_LATECOLLISION (1<<27)
  369. #define NV_TX2_UNDERFLOW (1<<28)
  370. /* error and valid are the same for both */
  371. #define NV_TX2_ERROR (1<<30)
  372. #define NV_TX2_VALID (1<<31)
  373. #define NV_TX2_TSO (1<<28)
  374. #define NV_TX2_TSO_SHIFT 14
  375. #define NV_TX2_TSO_MAX_SHIFT 14
  376. #define NV_TX2_TSO_MAX_SIZE (1<<NV_TX2_TSO_MAX_SHIFT)
  377. #define NV_TX2_CHECKSUM_L3 (1<<27)
  378. #define NV_TX2_CHECKSUM_L4 (1<<26)
  379. #define NV_TX3_VLAN_TAG_PRESENT (1<<18)
  380. #define NV_RX_DESCRIPTORVALID (1<<16)
  381. #define NV_RX_MISSEDFRAME (1<<17)
  382. #define NV_RX_SUBSTRACT1 (1<<18)
  383. #define NV_RX_ERROR1 (1<<23)
  384. #define NV_RX_ERROR2 (1<<24)
  385. #define NV_RX_ERROR3 (1<<25)
  386. #define NV_RX_ERROR4 (1<<26)
  387. #define NV_RX_CRCERR (1<<27)
  388. #define NV_RX_OVERFLOW (1<<28)
  389. #define NV_RX_FRAMINGERR (1<<29)
  390. #define NV_RX_ERROR (1<<30)
  391. #define NV_RX_AVAIL (1<<31)
  392. #define NV_RX_ERROR_MASK (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3|NV_RX_ERROR4|NV_RX_CRCERR|NV_RX_OVERFLOW|NV_RX_FRAMINGERR)
  393. #define NV_RX2_CHECKSUMMASK (0x1C000000)
  394. #define NV_RX2_CHECKSUM_IP (0x10000000)
  395. #define NV_RX2_CHECKSUM_IP_TCP (0x14000000)
  396. #define NV_RX2_CHECKSUM_IP_UDP (0x18000000)
  397. #define NV_RX2_DESCRIPTORVALID (1<<29)
  398. #define NV_RX2_SUBSTRACT1 (1<<25)
  399. #define NV_RX2_ERROR1 (1<<18)
  400. #define NV_RX2_ERROR2 (1<<19)
  401. #define NV_RX2_ERROR3 (1<<20)
  402. #define NV_RX2_ERROR4 (1<<21)
  403. #define NV_RX2_CRCERR (1<<22)
  404. #define NV_RX2_OVERFLOW (1<<23)
  405. #define NV_RX2_FRAMINGERR (1<<24)
  406. /* error and avail are the same for both */
  407. #define NV_RX2_ERROR (1<<30)
  408. #define NV_RX2_AVAIL (1<<31)
  409. #define NV_RX2_ERROR_MASK (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3|NV_RX2_ERROR4|NV_RX2_CRCERR|NV_RX2_OVERFLOW|NV_RX2_FRAMINGERR)
  410. #define NV_RX3_VLAN_TAG_PRESENT (1<<16)
  411. #define NV_RX3_VLAN_TAG_MASK (0x0000FFFF)
  412. /* Miscelaneous hardware related defines: */
  413. #define NV_PCI_REGSZ_VER1 0x270
  414. #define NV_PCI_REGSZ_VER2 0x2d4
  415. #define NV_PCI_REGSZ_VER3 0x604
  416. #define NV_PCI_REGSZ_MAX 0x604
  417. /* various timeout delays: all in usec */
  418. #define NV_TXRX_RESET_DELAY 4
  419. #define NV_TXSTOP_DELAY1 10
  420. #define NV_TXSTOP_DELAY1MAX 500000
  421. #define NV_TXSTOP_DELAY2 100
  422. #define NV_RXSTOP_DELAY1 10
  423. #define NV_RXSTOP_DELAY1MAX 500000
  424. #define NV_RXSTOP_DELAY2 100
  425. #define NV_SETUP5_DELAY 5
  426. #define NV_SETUP5_DELAYMAX 50000
  427. #define NV_POWERUP_DELAY 5
  428. #define NV_POWERUP_DELAYMAX 5000
  429. #define NV_MIIBUSY_DELAY 50
  430. #define NV_MIIPHY_DELAY 10
  431. #define NV_MIIPHY_DELAYMAX 10000
  432. #define NV_MAC_RESET_DELAY 64
  433. #define NV_WAKEUPPATTERNS 5
  434. #define NV_WAKEUPMASKENTRIES 4
  435. /* General driver defaults */
  436. #define NV_WATCHDOG_TIMEO (5*HZ)
  437. #define RX_RING_DEFAULT 512
  438. #define TX_RING_DEFAULT 256
  439. #define RX_RING_MIN 128
  440. #define TX_RING_MIN 64
  441. #define RING_MAX_DESC_VER_1 1024
  442. #define RING_MAX_DESC_VER_2_3 16384
  443. /* rx/tx mac addr + type + vlan + align + slack*/
  444. #define NV_RX_HEADERS (64)
  445. /* even more slack. */
  446. #define NV_RX_ALLOC_PAD (64)
  447. /* maximum mtu size */
  448. #define NV_PKTLIMIT_1 ETH_DATA_LEN /* hard limit not known */
  449. #define NV_PKTLIMIT_2 9100 /* Actual limit according to NVidia: 9202 */
  450. #define OOM_REFILL (1+HZ/20)
  451. #define POLL_WAIT (1+HZ/100)
  452. #define LINK_TIMEOUT (3*HZ)
  453. #define STATS_INTERVAL (10*HZ)
  454. /*
  455. * desc_ver values:
  456. * The nic supports three different descriptor types:
  457. * - DESC_VER_1: Original
  458. * - DESC_VER_2: support for jumbo frames.
  459. * - DESC_VER_3: 64-bit format.
  460. */
  461. #define DESC_VER_1 1
  462. #define DESC_VER_2 2
  463. #define DESC_VER_3 3
  464. /* PHY defines */
  465. #define PHY_OUI_MARVELL 0x5043
  466. #define PHY_OUI_CICADA 0x03f1
  467. #define PHY_OUI_VITESSE 0x01c1
  468. #define PHY_OUI_REALTEK 0x0732
  469. #define PHY_OUI_REALTEK2 0x0020
  470. #define PHYID1_OUI_MASK 0x03ff
  471. #define PHYID1_OUI_SHFT 6
  472. #define PHYID2_OUI_MASK 0xfc00
  473. #define PHYID2_OUI_SHFT 10
  474. #define PHYID2_MODEL_MASK 0x03f0
  475. #define PHY_MODEL_REALTEK_8211 0x0110
  476. #define PHY_REV_MASK 0x0001
  477. #define PHY_REV_REALTEK_8211B 0x0000
  478. #define PHY_REV_REALTEK_8211C 0x0001
  479. #define PHY_MODEL_REALTEK_8201 0x0200
  480. #define PHY_MODEL_MARVELL_E3016 0x0220
  481. #define PHY_MARVELL_E3016_INITMASK 0x0300
  482. #define PHY_CICADA_INIT1 0x0f000
  483. #define PHY_CICADA_INIT2 0x0e00
  484. #define PHY_CICADA_INIT3 0x01000
  485. #define PHY_CICADA_INIT4 0x0200
  486. #define PHY_CICADA_INIT5 0x0004
  487. #define PHY_CICADA_INIT6 0x02000
  488. #define PHY_VITESSE_INIT_REG1 0x1f
  489. #define PHY_VITESSE_INIT_REG2 0x10
  490. #define PHY_VITESSE_INIT_REG3 0x11
  491. #define PHY_VITESSE_INIT_REG4 0x12
  492. #define PHY_VITESSE_INIT_MSK1 0xc
  493. #define PHY_VITESSE_INIT_MSK2 0x0180
  494. #define PHY_VITESSE_INIT1 0x52b5
  495. #define PHY_VITESSE_INIT2 0xaf8a
  496. #define PHY_VITESSE_INIT3 0x8
  497. #define PHY_VITESSE_INIT4 0x8f8a
  498. #define PHY_VITESSE_INIT5 0xaf86
  499. #define PHY_VITESSE_INIT6 0x8f86
  500. #define PHY_VITESSE_INIT7 0xaf82
  501. #define PHY_VITESSE_INIT8 0x0100
  502. #define PHY_VITESSE_INIT9 0x8f82
  503. #define PHY_VITESSE_INIT10 0x0
  504. #define PHY_REALTEK_INIT_REG1 0x1f
  505. #define PHY_REALTEK_INIT_REG2 0x19
  506. #define PHY_REALTEK_INIT_REG3 0x13
  507. #define PHY_REALTEK_INIT_REG4 0x14
  508. #define PHY_REALTEK_INIT_REG5 0x18
  509. #define PHY_REALTEK_INIT_REG6 0x11
  510. #define PHY_REALTEK_INIT_REG7 0x01
  511. #define PHY_REALTEK_INIT1 0x0000
  512. #define PHY_REALTEK_INIT2 0x8e00
  513. #define PHY_REALTEK_INIT3 0x0001
  514. #define PHY_REALTEK_INIT4 0xad17
  515. #define PHY_REALTEK_INIT5 0xfb54
  516. #define PHY_REALTEK_INIT6 0xf5c7
  517. #define PHY_REALTEK_INIT7 0x1000
  518. #define PHY_REALTEK_INIT8 0x0003
  519. #define PHY_REALTEK_INIT9 0x0008
  520. #define PHY_REALTEK_INIT10 0x0005
  521. #define PHY_REALTEK_INIT11 0x0200
  522. #define PHY_REALTEK_INIT_MSK1 0x0003
  523. #define PHY_GIGABIT 0x0100
  524. #define PHY_TIMEOUT 0x1
  525. #define PHY_ERROR 0x2
  526. #define PHY_100 0x1
  527. #define PHY_1000 0x2
  528. #define PHY_HALF 0x100
  529. #define NV_PAUSEFRAME_RX_CAPABLE 0x0001
  530. #define NV_PAUSEFRAME_TX_CAPABLE 0x0002
  531. #define NV_PAUSEFRAME_RX_ENABLE 0x0004
  532. #define NV_PAUSEFRAME_TX_ENABLE 0x0008
  533. #define NV_PAUSEFRAME_RX_REQ 0x0010
  534. #define NV_PAUSEFRAME_TX_REQ 0x0020
  535. #define NV_PAUSEFRAME_AUTONEG 0x0040
  536. /* MSI/MSI-X defines */
  537. #define NV_MSI_X_MAX_VECTORS 8
  538. #define NV_MSI_X_VECTORS_MASK 0x000f
  539. #define NV_MSI_CAPABLE 0x0010
  540. #define NV_MSI_X_CAPABLE 0x0020
  541. #define NV_MSI_ENABLED 0x0040
  542. #define NV_MSI_X_ENABLED 0x0080
  543. #define NV_MSI_X_VECTOR_ALL 0x0
  544. #define NV_MSI_X_VECTOR_RX 0x0
  545. #define NV_MSI_X_VECTOR_TX 0x1
  546. #define NV_MSI_X_VECTOR_OTHER 0x2
  547. #define NV_MSI_PRIV_OFFSET 0x68
  548. #define NV_MSI_PRIV_VALUE 0xffffffff
  549. #define NV_RESTART_TX 0x1
  550. #define NV_RESTART_RX 0x2
  551. #define NV_TX_LIMIT_COUNT 16
  552. #define NV_DYNAMIC_THRESHOLD 4
  553. #define NV_DYNAMIC_MAX_QUIET_COUNT 2048
  554. /* statistics */
  555. struct nv_ethtool_str {
  556. char name[ETH_GSTRING_LEN];
  557. };
  558. static const struct nv_ethtool_str nv_estats_str[] = {
  559. { "tx_bytes" },
  560. { "tx_zero_rexmt" },
  561. { "tx_one_rexmt" },
  562. { "tx_many_rexmt" },
  563. { "tx_late_collision" },
  564. { "tx_fifo_errors" },
  565. { "tx_carrier_errors" },
  566. { "tx_excess_deferral" },
  567. { "tx_retry_error" },
  568. { "rx_frame_error" },
  569. { "rx_extra_byte" },
  570. { "rx_late_collision" },
  571. { "rx_runt" },
  572. { "rx_frame_too_long" },
  573. { "rx_over_errors" },
  574. { "rx_crc_errors" },
  575. { "rx_frame_align_error" },
  576. { "rx_length_error" },
  577. { "rx_unicast" },
  578. { "rx_multicast" },
  579. { "rx_broadcast" },
  580. { "rx_packets" },
  581. { "rx_errors_total" },
  582. { "tx_errors_total" },
  583. /* version 2 stats */
  584. { "tx_deferral" },
  585. { "tx_packets" },
  586. { "rx_bytes" },
  587. { "tx_pause" },
  588. { "rx_pause" },
  589. { "rx_drop_frame" },
  590. /* version 3 stats */
  591. { "tx_unicast" },
  592. { "tx_multicast" },
  593. { "tx_broadcast" }
  594. };
  595. struct nv_ethtool_stats {
  596. u64 tx_bytes;
  597. u64 tx_zero_rexmt;
  598. u64 tx_one_rexmt;
  599. u64 tx_many_rexmt;
  600. u64 tx_late_collision;
  601. u64 tx_fifo_errors;
  602. u64 tx_carrier_errors;
  603. u64 tx_excess_deferral;
  604. u64 tx_retry_error;
  605. u64 rx_frame_error;
  606. u64 rx_extra_byte;
  607. u64 rx_late_collision;
  608. u64 rx_runt;
  609. u64 rx_frame_too_long;
  610. u64 rx_over_errors;
  611. u64 rx_crc_errors;
  612. u64 rx_frame_align_error;
  613. u64 rx_length_error;
  614. u64 rx_unicast;
  615. u64 rx_multicast;
  616. u64 rx_broadcast;
  617. u64 rx_packets;
  618. u64 rx_errors_total;
  619. u64 tx_errors_total;
  620. /* version 2 stats */
  621. u64 tx_deferral;
  622. u64 tx_packets;
  623. u64 rx_bytes;
  624. u64 tx_pause;
  625. u64 rx_pause;
  626. u64 rx_drop_frame;
  627. /* version 3 stats */
  628. u64 tx_unicast;
  629. u64 tx_multicast;
  630. u64 tx_broadcast;
  631. };
  632. #define NV_DEV_STATISTICS_V3_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64))
  633. #define NV_DEV_STATISTICS_V2_COUNT (NV_DEV_STATISTICS_V3_COUNT - 3)
  634. #define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6)
  635. /* diagnostics */
  636. #define NV_TEST_COUNT_BASE 3
  637. #define NV_TEST_COUNT_EXTENDED 4
  638. static const struct nv_ethtool_str nv_etests_str[] = {
  639. { "link (online/offline)" },
  640. { "register (offline) " },
  641. { "interrupt (offline) " },
  642. { "loopback (offline) " }
  643. };
  644. struct register_test {
  645. __u32 reg;
  646. __u32 mask;
  647. };
  648. static const struct register_test nv_registers_test[] = {
  649. { NvRegUnknownSetupReg6, 0x01 },
  650. { NvRegMisc1, 0x03c },
  651. { NvRegOffloadConfig, 0x03ff },
  652. { NvRegMulticastAddrA, 0xffffffff },
  653. { NvRegTxWatermark, 0x0ff },
  654. { NvRegWakeUpFlags, 0x07777 },
  655. { 0, 0 }
  656. };
  657. struct nv_skb_map {
  658. struct sk_buff *skb;
  659. dma_addr_t dma;
  660. unsigned int dma_len:31;
  661. unsigned int dma_single:1;
  662. struct ring_desc_ex *first_tx_desc;
  663. struct nv_skb_map *next_tx_ctx;
  664. };
  665. /*
  666. * SMP locking:
  667. * All hardware access under netdev_priv(dev)->lock, except the performance
  668. * critical parts:
  669. * - rx is (pseudo-) lockless: it relies on the single-threading provided
  670. * by the arch code for interrupts.
  671. * - tx setup is lockless: it relies on netif_tx_lock. Actual submission
  672. * needs netdev_priv(dev)->lock :-(
  673. * - set_multicast_list: preparation lockless, relies on netif_tx_lock.
  674. */
  675. /* in dev: base, irq */
  676. struct fe_priv {
  677. spinlock_t lock;
  678. struct net_device *dev;
  679. struct napi_struct napi;
  680. /* General data:
  681. * Locking: spin_lock(&np->lock); */
  682. struct nv_ethtool_stats estats;
  683. int in_shutdown;
  684. u32 linkspeed;
  685. int duplex;
  686. int autoneg;
  687. int fixed_mode;
  688. int phyaddr;
  689. int wolenabled;
  690. unsigned int phy_oui;
  691. unsigned int phy_model;
  692. unsigned int phy_rev;
  693. u16 gigabit;
  694. int intr_test;
  695. int recover_error;
  696. int quiet_count;
  697. /* General data: RO fields */
  698. dma_addr_t ring_addr;
  699. struct pci_dev *pci_dev;
  700. u32 orig_mac[2];
  701. u32 events;
  702. u32 irqmask;
  703. u32 desc_ver;
  704. u32 txrxctl_bits;
  705. u32 vlanctl_bits;
  706. u32 driver_data;
  707. u32 device_id;
  708. u32 register_size;
  709. int rx_csum;
  710. u32 mac_in_use;
  711. int mgmt_version;
  712. int mgmt_sema;
  713. void __iomem *base;
  714. /* rx specific fields.
  715. * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
  716. */
  717. union ring_type get_rx, put_rx, first_rx, last_rx;
  718. struct nv_skb_map *get_rx_ctx, *put_rx_ctx;
  719. struct nv_skb_map *first_rx_ctx, *last_rx_ctx;
  720. struct nv_skb_map *rx_skb;
  721. union ring_type rx_ring;
  722. unsigned int rx_buf_sz;
  723. unsigned int pkt_limit;
  724. struct timer_list oom_kick;
  725. struct timer_list nic_poll;
  726. struct timer_list stats_poll;
  727. u32 nic_poll_irq;
  728. int rx_ring_size;
  729. /* media detection workaround.
  730. * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
  731. */
  732. int need_linktimer;
  733. unsigned long link_timeout;
  734. /*
  735. * tx specific fields.
  736. */
  737. union ring_type get_tx, put_tx, first_tx, last_tx;
  738. struct nv_skb_map *get_tx_ctx, *put_tx_ctx;
  739. struct nv_skb_map *first_tx_ctx, *last_tx_ctx;
  740. struct nv_skb_map *tx_skb;
  741. union ring_type tx_ring;
  742. u32 tx_flags;
  743. int tx_ring_size;
  744. int tx_limit;
  745. u32 tx_pkts_in_progress;
  746. struct nv_skb_map *tx_change_owner;
  747. struct nv_skb_map *tx_end_flip;
  748. int tx_stop;
  749. /* vlan fields */
  750. struct vlan_group *vlangrp;
  751. /* msi/msi-x fields */
  752. u32 msi_flags;
  753. struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS];
  754. /* flow control */
  755. u32 pause_flags;
  756. /* power saved state */
  757. u32 saved_config_space[NV_PCI_REGSZ_MAX/4];
  758. /* for different msi-x irq type */
  759. char name_rx[IFNAMSIZ + 3]; /* -rx */
  760. char name_tx[IFNAMSIZ + 3]; /* -tx */
  761. char name_other[IFNAMSIZ + 6]; /* -other */
  762. };
  763. /*
  764. * Maximum number of loops until we assume that a bit in the irq mask
  765. * is stuck. Overridable with module param.
  766. */
  767. static int max_interrupt_work = 4;
  768. /*
  769. * Optimization can be either throuput mode or cpu mode
  770. *
  771. * Throughput Mode: Every tx and rx packet will generate an interrupt.
  772. * CPU Mode: Interrupts are controlled by a timer.
  773. */
  774. enum {
  775. NV_OPTIMIZATION_MODE_THROUGHPUT,
  776. NV_OPTIMIZATION_MODE_CPU,
  777. NV_OPTIMIZATION_MODE_DYNAMIC
  778. };
  779. static int optimization_mode = NV_OPTIMIZATION_MODE_DYNAMIC;
  780. /*
  781. * Poll interval for timer irq
  782. *
  783. * This interval determines how frequent an interrupt is generated.
  784. * The is value is determined by [(time_in_micro_secs * 100) / (2^10)]
  785. * Min = 0, and Max = 65535
  786. */
  787. static int poll_interval = -1;
  788. /*
  789. * MSI interrupts
  790. */
  791. enum {
  792. NV_MSI_INT_DISABLED,
  793. NV_MSI_INT_ENABLED
  794. };
  795. static int msi = NV_MSI_INT_ENABLED;
  796. /*
  797. * MSIX interrupts
  798. */
  799. enum {
  800. NV_MSIX_INT_DISABLED,
  801. NV_MSIX_INT_ENABLED
  802. };
  803. static int msix = NV_MSIX_INT_ENABLED;
  804. /*
  805. * DMA 64bit
  806. */
  807. enum {
  808. NV_DMA_64BIT_DISABLED,
  809. NV_DMA_64BIT_ENABLED
  810. };
  811. static int dma_64bit = NV_DMA_64BIT_ENABLED;
  812. /*
  813. * Crossover Detection
  814. * Realtek 8201 phy + some OEM boards do not work properly.
  815. */
  816. enum {
  817. NV_CROSSOVER_DETECTION_DISABLED,
  818. NV_CROSSOVER_DETECTION_ENABLED
  819. };
  820. static int phy_cross = NV_CROSSOVER_DETECTION_DISABLED;
  821. /*
  822. * Power down phy when interface is down (persists through reboot;
  823. * older Linux and other OSes may not power it up again)
  824. */
  825. static int phy_power_down;
  826. static inline struct fe_priv *get_nvpriv(struct net_device *dev)
  827. {
  828. return netdev_priv(dev);
  829. }
  830. static inline u8 __iomem *get_hwbase(struct net_device *dev)
  831. {
  832. return ((struct fe_priv *)netdev_priv(dev))->base;
  833. }
  834. static inline void pci_push(u8 __iomem *base)
  835. {
  836. /* force out pending posted writes */
  837. readl(base);
  838. }
  839. static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v)
  840. {
  841. return le32_to_cpu(prd->flaglen)
  842. & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2);
  843. }
  844. static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v)
  845. {
  846. return le32_to_cpu(prd->flaglen) & LEN_MASK_V2;
  847. }
  848. static bool nv_optimized(struct fe_priv *np)
  849. {
  850. if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
  851. return false;
  852. return true;
  853. }
  854. static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
  855. int delay, int delaymax)
  856. {
  857. u8 __iomem *base = get_hwbase(dev);
  858. pci_push(base);
  859. do {
  860. udelay(delay);
  861. delaymax -= delay;
  862. if (delaymax < 0)
  863. return 1;
  864. } while ((readl(base + offset) & mask) != target);
  865. return 0;
  866. }
  867. #define NV_SETUP_RX_RING 0x01
  868. #define NV_SETUP_TX_RING 0x02
  869. static inline u32 dma_low(dma_addr_t addr)
  870. {
  871. return addr;
  872. }
  873. static inline u32 dma_high(dma_addr_t addr)
  874. {
  875. return addr>>31>>1; /* 0 if 32bit, shift down by 32 if 64bit */
  876. }
  877. static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
  878. {
  879. struct fe_priv *np = get_nvpriv(dev);
  880. u8 __iomem *base = get_hwbase(dev);
  881. if (!nv_optimized(np)) {
  882. if (rxtx_flags & NV_SETUP_RX_RING)
  883. writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
  884. if (rxtx_flags & NV_SETUP_TX_RING)
  885. writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
  886. } else {
  887. if (rxtx_flags & NV_SETUP_RX_RING) {
  888. writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
  889. writel(dma_high(np->ring_addr), base + NvRegRxRingPhysAddrHigh);
  890. }
  891. if (rxtx_flags & NV_SETUP_TX_RING) {
  892. writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
  893. writel(dma_high(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddrHigh);
  894. }
  895. }
  896. }
  897. static void free_rings(struct net_device *dev)
  898. {
  899. struct fe_priv *np = get_nvpriv(dev);
  900. if (!nv_optimized(np)) {
  901. if (np->rx_ring.orig)
  902. pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
  903. np->rx_ring.orig, np->ring_addr);
  904. } else {
  905. if (np->rx_ring.ex)
  906. pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
  907. np->rx_ring.ex, np->ring_addr);
  908. }
  909. kfree(np->rx_skb);
  910. kfree(np->tx_skb);
  911. }
  912. static int using_multi_irqs(struct net_device *dev)
  913. {
  914. struct fe_priv *np = get_nvpriv(dev);
  915. if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
  916. ((np->msi_flags & NV_MSI_X_ENABLED) &&
  917. ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1)))
  918. return 0;
  919. else
  920. return 1;
  921. }
  922. static void nv_txrx_gate(struct net_device *dev, bool gate)
  923. {
  924. struct fe_priv *np = get_nvpriv(dev);
  925. u8 __iomem *base = get_hwbase(dev);
  926. u32 powerstate;
  927. if (!np->mac_in_use &&
  928. (np->driver_data & DEV_HAS_POWER_CNTRL)) {
  929. powerstate = readl(base + NvRegPowerState2);
  930. if (gate)
  931. powerstate |= NVREG_POWERSTATE2_GATE_CLOCKS;
  932. else
  933. powerstate &= ~NVREG_POWERSTATE2_GATE_CLOCKS;
  934. writel(powerstate, base + NvRegPowerState2);
  935. }
  936. }
  937. static void nv_enable_irq(struct net_device *dev)
  938. {
  939. struct fe_priv *np = get_nvpriv(dev);
  940. if (!using_multi_irqs(dev)) {
  941. if (np->msi_flags & NV_MSI_X_ENABLED)
  942. enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
  943. else
  944. enable_irq(np->pci_dev->irq);
  945. } else {
  946. enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
  947. enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
  948. enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
  949. }
  950. }
  951. static void nv_disable_irq(struct net_device *dev)
  952. {
  953. struct fe_priv *np = get_nvpriv(dev);
  954. if (!using_multi_irqs(dev)) {
  955. if (np->msi_flags & NV_MSI_X_ENABLED)
  956. disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
  957. else
  958. disable_irq(np->pci_dev->irq);
  959. } else {
  960. disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
  961. disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
  962. disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
  963. }
  964. }
  965. /* In MSIX mode, a write to irqmask behaves as XOR */
  966. static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask)
  967. {
  968. u8 __iomem *base = get_hwbase(dev);
  969. writel(mask, base + NvRegIrqMask);
  970. }
  971. static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask)
  972. {
  973. struct fe_priv *np = get_nvpriv(dev);
  974. u8 __iomem *base = get_hwbase(dev);
  975. if (np->msi_flags & NV_MSI_X_ENABLED) {
  976. writel(mask, base + NvRegIrqMask);
  977. } else {
  978. if (np->msi_flags & NV_MSI_ENABLED)
  979. writel(0, base + NvRegMSIIrqMask);
  980. writel(0, base + NvRegIrqMask);
  981. }
  982. }
  983. static void nv_napi_enable(struct net_device *dev)
  984. {
  985. struct fe_priv *np = get_nvpriv(dev);
  986. napi_enable(&np->napi);
  987. }
  988. static void nv_napi_disable(struct net_device *dev)
  989. {
  990. struct fe_priv *np = get_nvpriv(dev);
  991. napi_disable(&np->napi);
  992. }
  993. #define MII_READ (-1)
  994. /* mii_rw: read/write a register on the PHY.
  995. *
  996. * Caller must guarantee serialization
  997. */
  998. static int mii_rw(struct net_device *dev, int addr, int miireg, int value)
  999. {
  1000. u8 __iomem *base = get_hwbase(dev);
  1001. u32 reg;
  1002. int retval;
  1003. writel(NVREG_MIISTAT_MASK_RW, base + NvRegMIIStatus);
  1004. reg = readl(base + NvRegMIIControl);
  1005. if (reg & NVREG_MIICTL_INUSE) {
  1006. writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl);
  1007. udelay(NV_MIIBUSY_DELAY);
  1008. }
  1009. reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg;
  1010. if (value != MII_READ) {
  1011. writel(value, base + NvRegMIIData);
  1012. reg |= NVREG_MIICTL_WRITE;
  1013. }
  1014. writel(reg, base + NvRegMIIControl);
  1015. if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0,
  1016. NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX)) {
  1017. netdev_dbg(dev, "mii_rw of reg %d at PHY %d timed out\n",
  1018. miireg, addr);
  1019. retval = -1;
  1020. } else if (value != MII_READ) {
  1021. /* it was a write operation - fewer failures are detectable */
  1022. netdev_dbg(dev, "mii_rw wrote 0x%x to reg %d at PHY %d\n",
  1023. value, miireg, addr);
  1024. retval = 0;
  1025. } else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) {
  1026. netdev_dbg(dev, "mii_rw of reg %d at PHY %d failed\n",
  1027. miireg, addr);
  1028. retval = -1;
  1029. } else {
  1030. retval = readl(base + NvRegMIIData);
  1031. netdev_dbg(dev, "mii_rw read from reg %d at PHY %d: 0x%x\n",
  1032. miireg, addr, retval);
  1033. }
  1034. return retval;
  1035. }
  1036. static int phy_reset(struct net_device *dev, u32 bmcr_setup)
  1037. {
  1038. struct fe_priv *np = netdev_priv(dev);
  1039. u32 miicontrol;
  1040. unsigned int tries = 0;
  1041. miicontrol = BMCR_RESET | bmcr_setup;
  1042. if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol))
  1043. return -1;
  1044. /* wait for 500ms */
  1045. msleep(500);
  1046. /* must wait till reset is deasserted */
  1047. while (miicontrol & BMCR_RESET) {
  1048. usleep_range(10000, 20000);
  1049. miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
  1050. /* FIXME: 100 tries seem excessive */
  1051. if (tries++ > 100)
  1052. return -1;
  1053. }
  1054. return 0;
  1055. }
  1056. static int init_realtek_8211b(struct net_device *dev, struct fe_priv *np)
  1057. {
  1058. static const struct {
  1059. int reg;
  1060. int init;
  1061. } ri[] = {
  1062. { PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1 },
  1063. { PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2 },
  1064. { PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3 },
  1065. { PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4 },
  1066. { PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5 },
  1067. { PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6 },
  1068. { PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1 },
  1069. };
  1070. int i;
  1071. for (i = 0; i < ARRAY_SIZE(ri); i++) {
  1072. if (mii_rw(dev, np->phyaddr, ri[i].reg, ri[i].init))
  1073. return PHY_ERROR;
  1074. }
  1075. return 0;
  1076. }
  1077. static int init_realtek_8211c(struct net_device *dev, struct fe_priv *np)
  1078. {
  1079. u32 reg;
  1080. u8 __iomem *base = get_hwbase(dev);
  1081. u32 powerstate = readl(base + NvRegPowerState2);
  1082. /* need to perform hw phy reset */
  1083. powerstate |= NVREG_POWERSTATE2_PHY_RESET;
  1084. writel(powerstate, base + NvRegPowerState2);
  1085. msleep(25);
  1086. powerstate &= ~NVREG_POWERSTATE2_PHY_RESET;
  1087. writel(powerstate, base + NvRegPowerState2);
  1088. msleep(25);
  1089. reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
  1090. reg |= PHY_REALTEK_INIT9;
  1091. if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, reg))
  1092. return PHY_ERROR;
  1093. if (mii_rw(dev, np->phyaddr,
  1094. PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT10))
  1095. return PHY_ERROR;
  1096. reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, MII_READ);
  1097. if (!(reg & PHY_REALTEK_INIT11)) {
  1098. reg |= PHY_REALTEK_INIT11;
  1099. if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, reg))
  1100. return PHY_ERROR;
  1101. }
  1102. if (mii_rw(dev, np->phyaddr,
  1103. PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1))
  1104. return PHY_ERROR;
  1105. return 0;
  1106. }
  1107. static int init_realtek_8201(struct net_device *dev, struct fe_priv *np)
  1108. {
  1109. u32 phy_reserved;
  1110. if (np->driver_data & DEV_NEED_PHY_INIT_FIX) {
  1111. phy_reserved = mii_rw(dev, np->phyaddr,
  1112. PHY_REALTEK_INIT_REG6, MII_READ);
  1113. phy_reserved |= PHY_REALTEK_INIT7;
  1114. if (mii_rw(dev, np->phyaddr,
  1115. PHY_REALTEK_INIT_REG6, phy_reserved))
  1116. return PHY_ERROR;
  1117. }
  1118. return 0;
  1119. }
  1120. static int init_realtek_8201_cross(struct net_device *dev, struct fe_priv *np)
  1121. {
  1122. u32 phy_reserved;
  1123. if (phy_cross == NV_CROSSOVER_DETECTION_DISABLED) {
  1124. if (mii_rw(dev, np->phyaddr,
  1125. PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3))
  1126. return PHY_ERROR;
  1127. phy_reserved = mii_rw(dev, np->phyaddr,
  1128. PHY_REALTEK_INIT_REG2, MII_READ);
  1129. phy_reserved &= ~PHY_REALTEK_INIT_MSK1;
  1130. phy_reserved |= PHY_REALTEK_INIT3;
  1131. if (mii_rw(dev, np->phyaddr,
  1132. PHY_REALTEK_INIT_REG2, phy_reserved))
  1133. return PHY_ERROR;
  1134. if (mii_rw(dev, np->phyaddr,
  1135. PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1))
  1136. return PHY_ERROR;
  1137. }
  1138. return 0;
  1139. }
  1140. static int init_cicada(struct net_device *dev, struct fe_priv *np,
  1141. u32 phyinterface)
  1142. {
  1143. u32 phy_reserved;
  1144. if (phyinterface & PHY_RGMII) {
  1145. phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ);
  1146. phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2);
  1147. phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4);
  1148. if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved))
  1149. return PHY_ERROR;
  1150. phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
  1151. phy_reserved |= PHY_CICADA_INIT5;
  1152. if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved))
  1153. return PHY_ERROR;
  1154. }
  1155. phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ);
  1156. phy_reserved |= PHY_CICADA_INIT6;
  1157. if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved))
  1158. return PHY_ERROR;
  1159. return 0;
  1160. }
  1161. static int init_vitesse(struct net_device *dev, struct fe_priv *np)
  1162. {
  1163. u32 phy_reserved;
  1164. if (mii_rw(dev, np->phyaddr,
  1165. PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1))
  1166. return PHY_ERROR;
  1167. if (mii_rw(dev, np->phyaddr,
  1168. PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2))
  1169. return PHY_ERROR;
  1170. phy_reserved = mii_rw(dev, np->phyaddr,
  1171. PHY_VITESSE_INIT_REG4, MII_READ);
  1172. if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
  1173. return PHY_ERROR;
  1174. phy_reserved = mii_rw(dev, np->phyaddr,
  1175. PHY_VITESSE_INIT_REG3, MII_READ);
  1176. phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
  1177. phy_reserved |= PHY_VITESSE_INIT3;
  1178. if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
  1179. return PHY_ERROR;
  1180. if (mii_rw(dev, np->phyaddr,
  1181. PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4))
  1182. return PHY_ERROR;
  1183. if (mii_rw(dev, np->phyaddr,
  1184. PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5))
  1185. return PHY_ERROR;
  1186. phy_reserved = mii_rw(dev, np->phyaddr,
  1187. PHY_VITESSE_INIT_REG4, MII_READ);
  1188. phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
  1189. phy_reserved |= PHY_VITESSE_INIT3;
  1190. if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
  1191. return PHY_ERROR;
  1192. phy_reserved = mii_rw(dev, np->phyaddr,
  1193. PHY_VITESSE_INIT_REG3, MII_READ);
  1194. if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
  1195. return PHY_ERROR;
  1196. if (mii_rw(dev, np->phyaddr,
  1197. PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6))
  1198. return PHY_ERROR;
  1199. if (mii_rw(dev, np->phyaddr,
  1200. PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7))
  1201. return PHY_ERROR;
  1202. phy_reserved = mii_rw(dev, np->phyaddr,
  1203. PHY_VITESSE_INIT_REG4, MII_READ);
  1204. if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
  1205. return PHY_ERROR;
  1206. phy_reserved = mii_rw(dev, np->phyaddr,
  1207. PHY_VITESSE_INIT_REG3, MII_READ);
  1208. phy_reserved &= ~PHY_VITESSE_INIT_MSK2;
  1209. phy_reserved |= PHY_VITESSE_INIT8;
  1210. if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
  1211. return PHY_ERROR;
  1212. if (mii_rw(dev, np->phyaddr,
  1213. PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9))
  1214. return PHY_ERROR;
  1215. if (mii_rw(dev, np->phyaddr,
  1216. PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10))
  1217. return PHY_ERROR;
  1218. return 0;
  1219. }
  1220. static int phy_init(struct net_device *dev)
  1221. {
  1222. struct fe_priv *np = get_nvpriv(dev);
  1223. u8 __iomem *base = get_hwbase(dev);
  1224. u32 phyinterface;
  1225. u32 mii_status, mii_control, mii_control_1000, reg;
  1226. /* phy errata for E3016 phy */
  1227. if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
  1228. reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
  1229. reg &= ~PHY_MARVELL_E3016_INITMASK;
  1230. if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) {
  1231. netdev_info(dev, "%s: phy write to errata reg failed\n",
  1232. pci_name(np->pci_dev));
  1233. return PHY_ERROR;
  1234. }
  1235. }
  1236. if (np->phy_oui == PHY_OUI_REALTEK) {
  1237. if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
  1238. np->phy_rev == PHY_REV_REALTEK_8211B) {
  1239. if (init_realtek_8211b(dev, np)) {
  1240. netdev_info(dev, "%s: phy init failed\n",
  1241. pci_name(np->pci_dev));
  1242. return PHY_ERROR;
  1243. }
  1244. } else if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
  1245. np->phy_rev == PHY_REV_REALTEK_8211C) {
  1246. if (init_realtek_8211c(dev, np)) {
  1247. netdev_info(dev, "%s: phy init failed\n",
  1248. pci_name(np->pci_dev));
  1249. return PHY_ERROR;
  1250. }
  1251. } else if (np->phy_model == PHY_MODEL_REALTEK_8201) {
  1252. if (init_realtek_8201(dev, np)) {
  1253. netdev_info(dev, "%s: phy init failed\n",
  1254. pci_name(np->pci_dev));
  1255. return PHY_ERROR;
  1256. }
  1257. }
  1258. }
  1259. /* set advertise register */
  1260. reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
  1261. reg |= (ADVERTISE_10HALF | ADVERTISE_10FULL |
  1262. ADVERTISE_100HALF | ADVERTISE_100FULL |
  1263. ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP);
  1264. if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) {
  1265. netdev_info(dev, "%s: phy write to advertise failed\n",
  1266. pci_name(np->pci_dev));
  1267. return PHY_ERROR;
  1268. }
  1269. /* get phy interface type */
  1270. phyinterface = readl(base + NvRegPhyInterface);
  1271. /* see if gigabit phy */
  1272. mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
  1273. if (mii_status & PHY_GIGABIT) {
  1274. np->gigabit = PHY_GIGABIT;
  1275. mii_control_1000 = mii_rw(dev, np->phyaddr,
  1276. MII_CTRL1000, MII_READ);
  1277. mii_control_1000 &= ~ADVERTISE_1000HALF;
  1278. if (phyinterface & PHY_RGMII)
  1279. mii_control_1000 |= ADVERTISE_1000FULL;
  1280. else
  1281. mii_control_1000 &= ~ADVERTISE_1000FULL;
  1282. if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) {
  1283. netdev_info(dev, "%s: phy init failed\n",
  1284. pci_name(np->pci_dev));
  1285. return PHY_ERROR;
  1286. }
  1287. } else
  1288. np->gigabit = 0;
  1289. mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
  1290. mii_control |= BMCR_ANENABLE;
  1291. if (np->phy_oui == PHY_OUI_REALTEK &&
  1292. np->phy_model == PHY_MODEL_REALTEK_8211 &&
  1293. np->phy_rev == PHY_REV_REALTEK_8211C) {
  1294. /* start autoneg since we already performed hw reset above */
  1295. mii_control |= BMCR_ANRESTART;
  1296. if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
  1297. netdev_info(dev, "%s: phy init failed\n",
  1298. pci_name(np->pci_dev));
  1299. return PHY_ERROR;
  1300. }
  1301. } else {
  1302. /* reset the phy
  1303. * (certain phys need bmcr to be setup with reset)
  1304. */
  1305. if (phy_reset(dev, mii_control)) {
  1306. netdev_info(dev, "%s: phy reset failed\n",
  1307. pci_name(np->pci_dev));
  1308. return PHY_ERROR;
  1309. }
  1310. }
  1311. /* phy vendor specific configuration */
  1312. if ((np->phy_oui == PHY_OUI_CICADA)) {
  1313. if (init_cicada(dev, np, phyinterface)) {
  1314. netdev_info(dev, "%s: phy init failed\n",
  1315. pci_name(np->pci_dev));
  1316. return PHY_ERROR;
  1317. }
  1318. } else if (np->phy_oui == PHY_OUI_VITESSE) {
  1319. if (init_vitesse(dev, np)) {
  1320. netdev_info(dev, "%s: phy init failed\n",
  1321. pci_name(np->pci_dev));
  1322. return PHY_ERROR;
  1323. }
  1324. } else if (np->phy_oui == PHY_OUI_REALTEK) {
  1325. if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
  1326. np->phy_rev == PHY_REV_REALTEK_8211B) {
  1327. /* reset could have cleared these out, set them back */
  1328. if (init_realtek_8211b(dev, np)) {
  1329. netdev_info(dev, "%s: phy init failed\n",
  1330. pci_name(np->pci_dev));
  1331. return PHY_ERROR;
  1332. }
  1333. } else if (np->phy_model == PHY_MODEL_REALTEK_8201) {
  1334. if (init_realtek_8201(dev, np) ||
  1335. init_realtek_8201_cross(dev, np)) {
  1336. netdev_info(dev, "%s: phy init failed\n",
  1337. pci_name(np->pci_dev));
  1338. return PHY_ERROR;
  1339. }
  1340. }
  1341. }
  1342. /* some phys clear out pause advertisment on reset, set it back */
  1343. mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg);
  1344. /* restart auto negotiation, power down phy */
  1345. mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
  1346. mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
  1347. if (phy_power_down)
  1348. mii_control |= BMCR_PDOWN;
  1349. if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control))
  1350. return PHY_ERROR;
  1351. return 0;
  1352. }
  1353. static void nv_start_rx(struct net_device *dev)
  1354. {
  1355. struct fe_priv *np = netdev_priv(dev);
  1356. u8 __iomem *base = get_hwbase(dev);
  1357. u32 rx_ctrl = readl(base + NvRegReceiverControl);
  1358. netdev_dbg(dev, "%s\n", __func__);
  1359. /* Already running? Stop it. */
  1360. if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) {
  1361. rx_ctrl &= ~NVREG_RCVCTL_START;
  1362. writel(rx_ctrl, base + NvRegReceiverControl);
  1363. pci_push(base);
  1364. }
  1365. writel(np->linkspeed, base + NvRegLinkSpeed);
  1366. pci_push(base);
  1367. rx_ctrl |= NVREG_RCVCTL_START;
  1368. if (np->mac_in_use)
  1369. rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN;
  1370. writel(rx_ctrl, base + NvRegReceiverControl);
  1371. netdev_dbg(dev, "%s: duplex %d, speed 0x%08x\n",
  1372. __func__, np->duplex, np->linkspeed);
  1373. pci_push(base);
  1374. }
  1375. static void nv_stop_rx(struct net_device *dev)
  1376. {
  1377. struct fe_priv *np = netdev_priv(dev);
  1378. u8 __iomem *base = get_hwbase(dev);
  1379. u32 rx_ctrl = readl(base + NvRegReceiverControl);
  1380. netdev_dbg(dev, "%s\n", __func__);
  1381. if (!np->mac_in_use)
  1382. rx_ctrl &= ~NVREG_RCVCTL_START;
  1383. else
  1384. rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN;
  1385. writel(rx_ctrl, base + NvRegReceiverControl);
  1386. if (reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0,
  1387. NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX))
  1388. netdev_info(dev, "%s: ReceiverStatus remained busy\n",
  1389. __func__);
  1390. udelay(NV_RXSTOP_DELAY2);
  1391. if (!np->mac_in_use)
  1392. writel(0, base + NvRegLinkSpeed);
  1393. }
  1394. static void nv_start_tx(struct net_device *dev)
  1395. {
  1396. struct fe_priv *np = netdev_priv(dev);
  1397. u8 __iomem *base = get_hwbase(dev);
  1398. u32 tx_ctrl = readl(base + NvRegTransmitterControl);
  1399. netdev_dbg(dev, "%s\n", __func__);
  1400. tx_ctrl |= NVREG_XMITCTL_START;
  1401. if (np->mac_in_use)
  1402. tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN;
  1403. writel(tx_ctrl, base + NvRegTransmitterControl);
  1404. pci_push(base);
  1405. }
  1406. static void nv_stop_tx(struct net_device *dev)
  1407. {
  1408. struct fe_priv *np = netdev_priv(dev);
  1409. u8 __iomem *base = get_hwbase(dev);
  1410. u32 tx_ctrl = readl(base + NvRegTransmitterControl);
  1411. netdev_dbg(dev, "%s\n", __func__);
  1412. if (!np->mac_in_use)
  1413. tx_ctrl &= ~NVREG_XMITCTL_START;
  1414. else
  1415. tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN;
  1416. writel(tx_ctrl, base + NvRegTransmitterControl);
  1417. if (reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0,
  1418. NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX))
  1419. netdev_info(dev, "%s: TransmitterStatus remained busy\n",
  1420. __func__);
  1421. udelay(NV_TXSTOP_DELAY2);
  1422. if (!np->mac_in_use)
  1423. writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV,
  1424. base + NvRegTransmitPoll);
  1425. }
  1426. static void nv_start_rxtx(struct net_device *dev)
  1427. {
  1428. nv_start_rx(dev);
  1429. nv_start_tx(dev);
  1430. }
  1431. static void nv_stop_rxtx(struct net_device *dev)
  1432. {
  1433. nv_stop_rx(dev);
  1434. nv_stop_tx(dev);
  1435. }
  1436. static void nv_txrx_reset(struct net_device *dev)
  1437. {
  1438. struct fe_priv *np = netdev_priv(dev);
  1439. u8 __iomem *base = get_hwbase(dev);
  1440. netdev_dbg(dev, "%s\n", __func__);
  1441. writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
  1442. pci_push(base);
  1443. udelay(NV_TXRX_RESET_DELAY);
  1444. writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
  1445. pci_push(base);
  1446. }
  1447. static void nv_mac_reset(struct net_device *dev)
  1448. {
  1449. struct fe_priv *np = netdev_priv(dev);
  1450. u8 __iomem *base = get_hwbase(dev);
  1451. u32 temp1, temp2, temp3;
  1452. netdev_dbg(dev, "%s\n", __func__);
  1453. writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
  1454. pci_push(base);
  1455. /* save registers since they will be cleared on reset */
  1456. temp1 = readl(base + NvRegMacAddrA);
  1457. temp2 = readl(base + NvRegMacAddrB);
  1458. temp3 = readl(base + NvRegTransmitPoll);
  1459. writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset);
  1460. pci_push(base);
  1461. udelay(NV_MAC_RESET_DELAY);
  1462. writel(0, base + NvRegMacReset);
  1463. pci_push(base);
  1464. udelay(NV_MAC_RESET_DELAY);
  1465. /* restore saved registers */
  1466. writel(temp1, base + NvRegMacAddrA);
  1467. writel(temp2, base + NvRegMacAddrB);
  1468. writel(temp3, base + NvRegTransmitPoll);
  1469. writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
  1470. pci_push(base);
  1471. }
  1472. static void nv_get_hw_stats(struct net_device *dev)
  1473. {
  1474. struct fe_priv *np = netdev_priv(dev);
  1475. u8 __iomem *base = get_hwbase(dev);
  1476. np->estats.tx_bytes += readl(base + NvRegTxCnt);
  1477. np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt);
  1478. np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt);
  1479. np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt);
  1480. np->estats.tx_late_collision += readl(base + NvRegTxLateCol);
  1481. np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow);
  1482. np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier);
  1483. np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef);
  1484. np->estats.tx_retry_error += readl(base + NvRegTxRetryErr);
  1485. np->estats.rx_frame_error += readl(base + NvRegRxFrameErr);
  1486. np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte);
  1487. np->estats.rx_late_collision += readl(base + NvRegRxLateCol);
  1488. np->estats.rx_runt += readl(base + NvRegRxRunt);
  1489. np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong);
  1490. np->estats.rx_over_errors += readl(base + NvRegRxOverflow);
  1491. np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr);
  1492. np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr);
  1493. np->estats.rx_length_error += readl(base + NvRegRxLenErr);
  1494. np->estats.rx_unicast += readl(base + NvRegRxUnicast);
  1495. np->estats.rx_multicast += readl(base + NvRegRxMulticast);
  1496. np->estats.rx_broadcast += readl(base + NvRegRxBroadcast);
  1497. np->estats.rx_packets =
  1498. np->estats.rx_unicast +
  1499. np->estats.rx_multicast +
  1500. np->estats.rx_broadcast;
  1501. np->estats.rx_errors_total =
  1502. np->estats.rx_crc_errors +
  1503. np->estats.rx_over_errors +
  1504. np->estats.rx_frame_error +
  1505. (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) +
  1506. np->estats.rx_late_collision +
  1507. np->estats.rx_runt +
  1508. np->estats.rx_frame_too_long;
  1509. np->estats.tx_errors_total =
  1510. np->estats.tx_late_collision +
  1511. np->estats.tx_fifo_errors +
  1512. np->estats.tx_carrier_errors +
  1513. np->estats.tx_excess_deferral +
  1514. np->estats.tx_retry_error;
  1515. if (np->driver_data & DEV_HAS_STATISTICS_V2) {
  1516. np->estats.tx_deferral += readl(base + NvRegTxDef);
  1517. np->estats.tx_packets += readl(base + NvRegTxFrame);
  1518. np->estats.rx_bytes += readl(base + NvRegRxCnt);
  1519. np->estats.tx_pause += readl(base + NvRegTxPause);
  1520. np->estats.rx_pause += readl(base + NvRegRxPause);
  1521. np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame);
  1522. }
  1523. if (np->driver_data & DEV_HAS_STATISTICS_V3) {
  1524. np->estats.tx_unicast += readl(base + NvRegTxUnicast);
  1525. np->estats.tx_multicast += readl(base + NvRegTxMulticast);
  1526. np->estats.tx_broadcast += readl(base + NvRegTxBroadcast);
  1527. }
  1528. }
  1529. /*
  1530. * nv_get_stats: dev->get_stats function
  1531. * Get latest stats value from the nic.
  1532. * Called with read_lock(&dev_base_lock) held for read -
  1533. * only synchronized against unregister_netdevice.
  1534. */
  1535. static struct net_device_stats *nv_get_stats(struct net_device *dev)
  1536. {
  1537. struct fe_priv *np = netdev_priv(dev);
  1538. /* If the nic supports hw counters then retrieve latest values */
  1539. if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) {
  1540. nv_get_hw_stats(dev);
  1541. /* copy to net_device stats */
  1542. dev->stats.tx_bytes = np->estats.tx_bytes;
  1543. dev->stats.tx_fifo_errors = np->estats.tx_fifo_errors;
  1544. dev->stats.tx_carrier_errors = np->estats.tx_carrier_errors;
  1545. dev->stats.rx_crc_errors = np->estats.rx_crc_errors;
  1546. dev->stats.rx_over_errors = np->estats.rx_over_errors;
  1547. dev->stats.rx_errors = np->estats.rx_errors_total;
  1548. dev->stats.tx_errors = np->estats.tx_errors_total;
  1549. }
  1550. return &dev->stats;
  1551. }
  1552. /*
  1553. * nv_alloc_rx: fill rx ring entries.
  1554. * Return 1 if the allocations for the skbs failed and the
  1555. * rx engine is without Available descriptors
  1556. */
  1557. static int nv_alloc_rx(struct net_device *dev)
  1558. {
  1559. struct fe_priv *np = netdev_priv(dev);
  1560. struct ring_desc *less_rx;
  1561. less_rx = np->get_rx.orig;
  1562. if (less_rx-- == np->first_rx.orig)
  1563. less_rx = np->last_rx.orig;
  1564. while (np->put_rx.orig != less_rx) {
  1565. struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
  1566. if (skb) {
  1567. np->put_rx_ctx->skb = skb;
  1568. np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
  1569. skb->data,
  1570. skb_tailroom(skb),
  1571. PCI_DMA_FROMDEVICE);
  1572. np->put_rx_ctx->dma_len = skb_tailroom(skb);
  1573. np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma);
  1574. wmb();
  1575. np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
  1576. if (unlikely(np->put_rx.orig++ == np->last_rx.orig))
  1577. np->put_rx.orig = np->first_rx.orig;
  1578. if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
  1579. np->put_rx_ctx = np->first_rx_ctx;
  1580. } else
  1581. return 1;
  1582. }
  1583. return 0;
  1584. }
  1585. static int nv_alloc_rx_optimized(struct net_device *dev)
  1586. {
  1587. struct fe_priv *np = netdev_priv(dev);
  1588. struct ring_desc_ex *less_rx;
  1589. less_rx = np->get_rx.ex;
  1590. if (less_rx-- == np->first_rx.ex)
  1591. less_rx = np->last_rx.ex;
  1592. while (np->put_rx.ex != less_rx) {
  1593. struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
  1594. if (skb) {
  1595. np->put_rx_ctx->skb = skb;
  1596. np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
  1597. skb->data,
  1598. skb_tailroom(skb),
  1599. PCI_DMA_FROMDEVICE);
  1600. np->put_rx_ctx->dma_len = skb_tailroom(skb);
  1601. np->put_rx.ex->bufhigh = cpu_to_le32(dma_high(np->put_rx_ctx->dma));
  1602. np->put_rx.ex->buflow = cpu_to_le32(dma_low(np->put_rx_ctx->dma));
  1603. wmb();
  1604. np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
  1605. if (unlikely(np->put_rx.ex++ == np->last_rx.ex))
  1606. np->put_rx.ex = np->first_rx.ex;
  1607. if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
  1608. np->put_rx_ctx = np->first_rx_ctx;
  1609. } else
  1610. return 1;
  1611. }
  1612. return 0;
  1613. }
  1614. /* If rx bufs are exhausted called after 50ms to attempt to refresh */
  1615. static void nv_do_rx_refill(unsigned long data)
  1616. {
  1617. struct net_device *dev = (struct net_device *) data;
  1618. struct fe_priv *np = netdev_priv(dev);
  1619. /* Just reschedule NAPI rx processing */
  1620. napi_schedule(&np->napi);
  1621. }
  1622. static void nv_init_rx(struct net_device *dev)
  1623. {
  1624. struct fe_priv *np = netdev_priv(dev);
  1625. int i;
  1626. np->get_rx = np->put_rx = np->first_rx = np->rx_ring;
  1627. if (!nv_optimized(np))
  1628. np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1];
  1629. else
  1630. np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1];
  1631. np->get_rx_ctx = np->put_rx_ctx = np->first_rx_ctx = np->rx_skb;
  1632. np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1];
  1633. for (i = 0; i < np->rx_ring_size; i++) {
  1634. if (!nv_optimized(np)) {
  1635. np->rx_ring.orig[i].flaglen = 0;
  1636. np->rx_ring.orig[i].buf = 0;
  1637. } else {
  1638. np->rx_ring.ex[i].flaglen = 0;
  1639. np->rx_ring.ex[i].txvlan = 0;
  1640. np->rx_ring.ex[i].bufhigh = 0;
  1641. np->rx_ring.ex[i].buflow = 0;
  1642. }
  1643. np->rx_skb[i].skb = NULL;
  1644. np->rx_skb[i].dma = 0;
  1645. }
  1646. }
  1647. static void nv_init_tx(struct net_device *dev)
  1648. {
  1649. struct fe_priv *np = netdev_priv(dev);
  1650. int i;
  1651. np->get_tx = np->put_tx = np->first_tx = np->tx_ring;
  1652. if (!nv_optimized(np))
  1653. np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1];
  1654. else
  1655. np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1];
  1656. np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb;
  1657. np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1];
  1658. np->tx_pkts_in_progress = 0;
  1659. np->tx_change_owner = NULL;
  1660. np->tx_end_flip = NULL;
  1661. np->tx_stop = 0;
  1662. for (i = 0; i < np->tx_ring_size; i++) {
  1663. if (!nv_optimized(np)) {
  1664. np->tx_ring.orig[i].flaglen = 0;
  1665. np->tx_ring.orig[i].buf = 0;
  1666. } else {
  1667. np->tx_ring.ex[i].flaglen = 0;
  1668. np->tx_ring.ex[i].txvlan = 0;
  1669. np->tx_ring.ex[i].bufhigh = 0;
  1670. np->tx_ring.ex[i].buflow = 0;
  1671. }
  1672. np->tx_skb[i].skb = NULL;
  1673. np->tx_skb[i].dma = 0;
  1674. np->tx_skb[i].dma_len = 0;
  1675. np->tx_skb[i].dma_single = 0;
  1676. np->tx_skb[i].first_tx_desc = NULL;
  1677. np->tx_skb[i].next_tx_ctx = NULL;
  1678. }
  1679. }
  1680. static int nv_init_ring(struct net_device *dev)
  1681. {
  1682. struct fe_priv *np = netdev_priv(dev);
  1683. nv_init_tx(dev);
  1684. nv_init_rx(dev);
  1685. if (!nv_optimized(np))
  1686. return nv_alloc_rx(dev);
  1687. else
  1688. return nv_alloc_rx_optimized(dev);
  1689. }
  1690. static void nv_unmap_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb)
  1691. {
  1692. if (tx_skb->dma) {
  1693. if (tx_skb->dma_single)
  1694. pci_unmap_single(np->pci_dev, tx_skb->dma,
  1695. tx_skb->dma_len,
  1696. PCI_DMA_TODEVICE);
  1697. else
  1698. pci_unmap_page(np->pci_dev, tx_skb->dma,
  1699. tx_skb->dma_len,
  1700. PCI_DMA_TODEVICE);
  1701. tx_skb->dma = 0;
  1702. }
  1703. }
  1704. static int nv_release_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb)
  1705. {
  1706. nv_unmap_txskb(np, tx_skb);
  1707. if (tx_skb->skb) {
  1708. dev_kfree_skb_any(tx_skb->skb);
  1709. tx_skb->skb = NULL;
  1710. return 1;
  1711. }
  1712. return 0;
  1713. }
  1714. static void nv_drain_tx(struct net_device *dev)
  1715. {
  1716. struct fe_priv *np = netdev_priv(dev);
  1717. unsigned int i;
  1718. for (i = 0; i < np->tx_ring_size; i++) {
  1719. if (!nv_optimized(np)) {
  1720. np->tx_ring.orig[i].flaglen = 0;
  1721. np->tx_ring.orig[i].buf = 0;
  1722. } else {
  1723. np->tx_ring.ex[i].flaglen = 0;
  1724. np->tx_ring.ex[i].txvlan = 0;
  1725. np->tx_ring.ex[i].bufhigh = 0;
  1726. np->tx_ring.ex[i].buflow = 0;
  1727. }
  1728. if (nv_release_txskb(np, &np->tx_skb[i]))
  1729. dev->stats.tx_dropped++;
  1730. np->tx_skb[i].dma = 0;
  1731. np->tx_skb[i].dma_len = 0;
  1732. np->tx_skb[i].dma_single = 0;
  1733. np->tx_skb[i].first_tx_desc = NULL;
  1734. np->tx_skb[i].next_tx_ctx = NULL;
  1735. }
  1736. np->tx_pkts_in_progress = 0;
  1737. np->tx_change_owner = NULL;
  1738. np->tx_end_flip = NULL;
  1739. }
  1740. static void nv_drain_rx(struct net_device *dev)
  1741. {
  1742. struct fe_priv *np = netdev_priv(dev);
  1743. int i;
  1744. for (i = 0; i < np->rx_ring_size; i++) {
  1745. if (!nv_optimized(np)) {
  1746. np->rx_ring.orig[i].flaglen = 0;
  1747. np->rx_ring.orig[i].buf = 0;
  1748. } else {
  1749. np->rx_ring.ex[i].flaglen = 0;
  1750. np->rx_ring.ex[i].txvlan = 0;
  1751. np->rx_ring.ex[i].bufhigh = 0;
  1752. np->rx_ring.ex[i].buflow = 0;
  1753. }
  1754. wmb();
  1755. if (np->rx_skb[i].skb) {
  1756. pci_unmap_single(np->pci_dev, np->rx_skb[i].dma,
  1757. (skb_end_pointer(np->rx_skb[i].skb) -
  1758. np->rx_skb[i].skb->data),
  1759. PCI_DMA_FROMDEVICE);
  1760. dev_kfree_skb(np->rx_skb[i].skb);
  1761. np->rx_skb[i].skb = NULL;
  1762. }
  1763. }
  1764. }
  1765. static void nv_drain_rxtx(struct net_device *dev)
  1766. {
  1767. nv_drain_tx(dev);
  1768. nv_drain_rx(dev);
  1769. }
  1770. static inline u32 nv_get_empty_tx_slots(struct fe_priv *np)
  1771. {
  1772. return (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_ring_size));
  1773. }
  1774. static void nv_legacybackoff_reseed(struct net_device *dev)
  1775. {
  1776. u8 __iomem *base = get_hwbase(dev);
  1777. u32 reg;
  1778. u32 low;
  1779. int tx_status = 0;
  1780. reg = readl(base + NvRegSlotTime) & ~NVREG_SLOTTIME_MASK;
  1781. get_random_bytes(&low, sizeof(low));
  1782. reg |= low & NVREG_SLOTTIME_MASK;
  1783. /* Need to stop tx before change takes effect.
  1784. * Caller has already gained np->lock.
  1785. */
  1786. tx_status = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START;
  1787. if (tx_status)
  1788. nv_stop_tx(dev);
  1789. nv_stop_rx(dev);
  1790. writel(reg, base + NvRegSlotTime);
  1791. if (tx_status)
  1792. nv_start_tx(dev);
  1793. nv_start_rx(dev);
  1794. }
  1795. /* Gear Backoff Seeds */
  1796. #define BACKOFF_SEEDSET_ROWS 8
  1797. #define BACKOFF_SEEDSET_LFSRS 15
  1798. /* Known Good seed sets */
  1799. static const u32 main_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = {
  1800. {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
  1801. {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 385, 761, 790, 974},
  1802. {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
  1803. {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 386, 761, 790, 974},
  1804. {266, 265, 276, 585, 397, 208, 345, 355, 365, 376, 385, 396, 771, 700, 984},
  1805. {266, 265, 276, 586, 397, 208, 346, 355, 365, 376, 285, 396, 771, 700, 984},
  1806. {366, 365, 376, 686, 497, 308, 447, 455, 466, 476, 485, 496, 871, 800, 84},
  1807. {466, 465, 476, 786, 597, 408, 547, 555, 566, 576, 585, 597, 971, 900, 184} };
  1808. static const u32 gear_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = {
  1809. {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
  1810. {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
  1811. {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 397},
  1812. {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
  1813. {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
  1814. {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
  1815. {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
  1816. {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395} };
  1817. static void nv_gear_backoff_reseed(struct net_device *dev)
  1818. {
  1819. u8 __iomem *base = get_hwbase(dev);
  1820. u32 miniseed1, miniseed2, miniseed2_reversed, miniseed3, miniseed3_reversed;
  1821. u32 temp, seedset, combinedSeed;
  1822. int i;
  1823. /* Setup seed for free running LFSR */
  1824. /* We are going to read the time stamp counter 3 times
  1825. and swizzle bits around to increase randomness */
  1826. get_random_bytes(&miniseed1, sizeof(miniseed1));
  1827. miniseed1 &= 0x0fff;
  1828. if (miniseed1 == 0)
  1829. miniseed1 = 0xabc;
  1830. get_random_bytes(&miniseed2, sizeof(miniseed2));
  1831. miniseed2 &= 0x0fff;
  1832. if (miniseed2 == 0)
  1833. miniseed2 = 0xabc;
  1834. miniseed2_reversed =
  1835. ((miniseed2 & 0xF00) >> 8) |
  1836. (miniseed2 & 0x0F0) |
  1837. ((miniseed2 & 0x00F) << 8);
  1838. get_random_bytes(&miniseed3, sizeof(miniseed3));
  1839. miniseed3 &= 0x0fff;
  1840. if (miniseed3 == 0)
  1841. miniseed3 = 0xabc;
  1842. miniseed3_reversed =
  1843. ((miniseed3 & 0xF00) >> 8) |
  1844. (miniseed3 & 0x0F0) |
  1845. ((miniseed3 & 0x00F) << 8);
  1846. combinedSeed = ((miniseed1 ^ miniseed2_reversed) << 12) |
  1847. (miniseed2 ^ miniseed3_reversed);
  1848. /* Seeds can not be zero */
  1849. if ((combinedSeed & NVREG_BKOFFCTRL_SEED_MASK) == 0)
  1850. combinedSeed |= 0x08;
  1851. if ((combinedSeed & (NVREG_BKOFFCTRL_SEED_MASK << NVREG_BKOFFCTRL_GEAR)) == 0)
  1852. combinedSeed |= 0x8000;
  1853. /* No need to disable tx here */
  1854. temp = NVREG_BKOFFCTRL_DEFAULT | (0 << NVREG_BKOFFCTRL_SELECT);
  1855. temp |= combinedSeed & NVREG_BKOFFCTRL_SEED_MASK;
  1856. temp |= combinedSeed >> NVREG_BKOFFCTRL_GEAR;
  1857. writel(temp, base + NvRegBackOffControl);
  1858. /* Setup seeds for all gear LFSRs. */
  1859. get_random_bytes(&seedset, sizeof(seedset));
  1860. seedset = seedset % BACKOFF_SEEDSET_ROWS;
  1861. for (i = 1; i <= BACKOFF_SEEDSET_LFSRS; i++) {
  1862. temp = NVREG_BKOFFCTRL_DEFAULT | (i << NVREG_BKOFFCTRL_SELECT);
  1863. temp |= main_seedset[seedset][i-1] & 0x3ff;
  1864. temp |= ((gear_seedset[seedset][i-1] & 0x3ff) << NVREG_BKOFFCTRL_GEAR);
  1865. writel(temp, base + NvRegBackOffControl);
  1866. }
  1867. }
  1868. /*
  1869. * nv_start_xmit: dev->hard_start_xmit function
  1870. * Called with netif_tx_lock held.
  1871. */
  1872. static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
  1873. {
  1874. struct fe_priv *np = netdev_priv(dev);
  1875. u32 tx_flags = 0;
  1876. u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
  1877. unsigned int fragments = skb_shinfo(skb)->nr_frags;
  1878. unsigned int i;
  1879. u32 offset = 0;
  1880. u32 bcnt;
  1881. u32 size = skb_headlen(skb);
  1882. u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
  1883. u32 empty_slots;
  1884. struct ring_desc *put_tx;
  1885. struct ring_desc *start_tx;
  1886. struct ring_desc *prev_tx;
  1887. struct nv_skb_map *prev_tx_ctx;
  1888. unsigned long flags;
  1889. /* add fragments to entries count */
  1890. for (i = 0; i < fragments; i++) {
  1891. entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) +
  1892. ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
  1893. }
  1894. spin_lock_irqsave(&np->lock, flags);
  1895. empty_slots = nv_get_empty_tx_slots(np);
  1896. if (unlikely(empty_slots <= entries)) {
  1897. netif_stop_queue(dev);
  1898. np->tx_stop = 1;
  1899. spin_unlock_irqrestore(&np->lock, flags);
  1900. return NETDEV_TX_BUSY;
  1901. }
  1902. spin_unlock_irqrestore(&np->lock, flags);
  1903. start_tx = put_tx = np->put_tx.orig;
  1904. /* setup the header buffer */
  1905. do {
  1906. prev_tx = put_tx;
  1907. prev_tx_ctx = np->put_tx_ctx;
  1908. bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
  1909. np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
  1910. PCI_DMA_TODEVICE);
  1911. np->put_tx_ctx->dma_len = bcnt;
  1912. np->put_tx_ctx->dma_single = 1;
  1913. put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
  1914. put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
  1915. tx_flags = np->tx_flags;
  1916. offset += bcnt;
  1917. size -= bcnt;
  1918. if (unlikely(put_tx++ == np->last_tx.orig))
  1919. put_tx = np->first_tx.orig;
  1920. if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
  1921. np->put_tx_ctx = np->first_tx_ctx;
  1922. } while (size);
  1923. /* setup the fragments */
  1924. for (i = 0; i < fragments; i++) {
  1925. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  1926. u32 size = frag->size;
  1927. offset = 0;
  1928. do {
  1929. prev_tx = put_tx;
  1930. prev_tx_ctx = np->put_tx_ctx;
  1931. bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
  1932. np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
  1933. PCI_DMA_TODEVICE);
  1934. np->put_tx_ctx->dma_len = bcnt;
  1935. np->put_tx_ctx->dma_single = 0;
  1936. put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
  1937. put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
  1938. offset += bcnt;
  1939. size -= bcnt;
  1940. if (unlikely(put_tx++ == np->last_tx.orig))
  1941. put_tx = np->first_tx.orig;
  1942. if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
  1943. np->put_tx_ctx = np->first_tx_ctx;
  1944. } while (size);
  1945. }
  1946. /* set last fragment flag */
  1947. prev_tx->flaglen |= cpu_to_le32(tx_flags_extra);
  1948. /* save skb in this slot's context area */
  1949. prev_tx_ctx->skb = skb;
  1950. if (skb_is_gso(skb))
  1951. tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
  1952. else
  1953. tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
  1954. NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
  1955. spin_lock_irqsave(&np->lock, flags);
  1956. /* set tx flags */
  1957. start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
  1958. np->put_tx.orig = put_tx;
  1959. spin_unlock_irqrestore(&np->lock, flags);
  1960. netdev_dbg(dev, "%s: entries %d queued for transmission. tx_flags_extra: %x\n",
  1961. __func__, entries, tx_flags_extra);
  1962. #ifdef DEBUG
  1963. print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 1,
  1964. skb->data, 64, true);
  1965. #endif
  1966. writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
  1967. return NETDEV_TX_OK;
  1968. }
  1969. static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
  1970. struct net_device *dev)
  1971. {
  1972. struct fe_priv *np = netdev_priv(dev);
  1973. u32 tx_flags = 0;
  1974. u32 tx_flags_extra;
  1975. unsigned int fragments = skb_shinfo(skb)->nr_frags;
  1976. unsigned int i;
  1977. u32 offset = 0;
  1978. u32 bcnt;
  1979. u32 size = skb_headlen(skb);
  1980. u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
  1981. u32 empty_slots;
  1982. struct ring_desc_ex *put_tx;
  1983. struct ring_desc_ex *start_tx;
  1984. struct ring_desc_ex *prev_tx;
  1985. struct nv_skb_map *prev_tx_ctx;
  1986. struct nv_skb_map *start_tx_ctx;
  1987. unsigned long flags;
  1988. /* add fragments to entries count */
  1989. for (i = 0; i < fragments; i++) {
  1990. entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) +
  1991. ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
  1992. }
  1993. spin_lock_irqsave(&np->lock, flags);
  1994. empty_slots = nv_get_empty_tx_slots(np);
  1995. if (unlikely(empty_slots <= entries)) {
  1996. netif_stop_queue(dev);
  1997. np->tx_stop = 1;
  1998. spin_unlock_irqrestore(&np->lock, flags);
  1999. return NETDEV_TX_BUSY;
  2000. }
  2001. spin_unlock_irqrestore(&np->lock, flags);
  2002. start_tx = put_tx = np->put_tx.ex;
  2003. start_tx_ctx = np->put_tx_ctx;
  2004. /* setup the header buffer */
  2005. do {
  2006. prev_tx = put_tx;
  2007. prev_tx_ctx = np->put_tx_ctx;
  2008. bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
  2009. np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
  2010. PCI_DMA_TODEVICE);
  2011. np->put_tx_ctx->dma_len = bcnt;
  2012. np->put_tx_ctx->dma_single = 1;
  2013. put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
  2014. put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma));
  2015. put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
  2016. tx_flags = NV_TX2_VALID;
  2017. offset += bcnt;
  2018. size -= bcnt;
  2019. if (unlikely(put_tx++ == np->last_tx.ex))
  2020. put_tx = np->first_tx.ex;
  2021. if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
  2022. np->put_tx_ctx = np->first_tx_ctx;
  2023. } while (size);
  2024. /* setup the fragments */
  2025. for (i = 0; i < fragments; i++) {
  2026. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  2027. u32 size = frag->size;
  2028. offset = 0;
  2029. do {
  2030. prev_tx = put_tx;
  2031. prev_tx_ctx = np->put_tx_ctx;
  2032. bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
  2033. np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
  2034. PCI_DMA_TODEVICE);
  2035. np->put_tx_ctx->dma_len = bcnt;
  2036. np->put_tx_ctx->dma_single = 0;
  2037. put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
  2038. put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma));
  2039. put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
  2040. offset += bcnt;
  2041. size -= bcnt;
  2042. if (unlikely(put_tx++ == np->last_tx.ex))
  2043. put_tx = np->first_tx.ex;
  2044. if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
  2045. np->put_tx_ctx = np->first_tx_ctx;
  2046. } while (size);
  2047. }
  2048. /* set last fragment flag */
  2049. prev_tx->flaglen |= cpu_to_le32(NV_TX2_LASTPACKET);
  2050. /* save skb in this slot's context area */
  2051. prev_tx_ctx->skb = skb;
  2052. if (skb_is_gso(skb))
  2053. tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
  2054. else
  2055. tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
  2056. NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
  2057. /* vlan tag */
  2058. if (vlan_tx_tag_present(skb))
  2059. start_tx->txvlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT |
  2060. vlan_tx_tag_get(skb));
  2061. else
  2062. start_tx->txvlan = 0;
  2063. spin_lock_irqsave(&np->lock, flags);
  2064. if (np->tx_limit) {
  2065. /* Limit the number of outstanding tx. Setup all fragments, but
  2066. * do not set the VALID bit on the first descriptor. Save a pointer
  2067. * to that descriptor and also for next skb_map element.
  2068. */
  2069. if (np->tx_pkts_in_progress == NV_TX_LIMIT_COUNT) {
  2070. if (!np->tx_change_owner)
  2071. np->tx_change_owner = start_tx_ctx;
  2072. /* remove VALID bit */
  2073. tx_flags &= ~NV_TX2_VALID;
  2074. start_tx_ctx->first_tx_desc = start_tx;
  2075. start_tx_ctx->next_tx_ctx = np->put_tx_ctx;
  2076. np->tx_end_flip = np->put_tx_ctx;
  2077. } else {
  2078. np->tx_pkts_in_progress++;
  2079. }
  2080. }
  2081. /* set tx flags */
  2082. start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
  2083. np->put_tx.ex = put_tx;
  2084. spin_unlock_irqrestore(&np->lock, flags);
  2085. netdev_dbg(dev, "%s: entries %d queued for transmission. tx_flags_extra: %x\n",
  2086. __func__, entries, tx_flags_extra);
  2087. #ifdef DEBUG
  2088. print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 1,
  2089. skb->data, 64, true);
  2090. #endif
  2091. writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
  2092. return NETDEV_TX_OK;
  2093. }
  2094. static inline void nv_tx_flip_ownership(struct net_device *dev)
  2095. {
  2096. struct fe_priv *np = netdev_priv(dev);
  2097. np->tx_pkts_in_progress--;
  2098. if (np->tx_change_owner) {
  2099. np->tx_change_owner->first_tx_desc->flaglen |=
  2100. cpu_to_le32(NV_TX2_VALID);
  2101. np->tx_pkts_in_progress++;
  2102. np->tx_change_owner = np->tx_change_owner->next_tx_ctx;
  2103. if (np->tx_change_owner == np->tx_end_flip)
  2104. np->tx_change_owner = NULL;
  2105. writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
  2106. }
  2107. }
  2108. /*
  2109. * nv_tx_done: check for completed packets, release the skbs.
  2110. *
  2111. * Caller must own np->lock.
  2112. */
  2113. static int nv_tx_done(struct net_device *dev, int limit)
  2114. {
  2115. struct fe_priv *np = netdev_priv(dev);
  2116. u32 flags;
  2117. int tx_work = 0;
  2118. struct ring_desc *orig_get_tx = np->get_tx.orig;
  2119. while ((np->get_tx.orig != np->put_tx.orig) &&
  2120. !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID) &&
  2121. (tx_work < limit)) {
  2122. netdev_dbg(dev, "%s: flags 0x%x\n", __func__, flags);
  2123. nv_unmap_txskb(np, np->get_tx_ctx);
  2124. if (np->desc_ver == DESC_VER_1) {
  2125. if (flags & NV_TX_LASTPACKET) {
  2126. if (flags & NV_TX_ERROR) {
  2127. if (flags & NV_TX_UNDERFLOW)
  2128. dev->stats.tx_fifo_errors++;
  2129. if (flags & NV_TX_CARRIERLOST)
  2130. dev->stats.tx_carrier_errors++;
  2131. if ((flags & NV_TX_RETRYERROR) && !(flags & NV_TX_RETRYCOUNT_MASK))
  2132. nv_legacybackoff_reseed(dev);
  2133. dev->stats.tx_errors++;
  2134. } else {
  2135. dev->stats.tx_packets++;
  2136. dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
  2137. }
  2138. dev_kfree_skb_any(np->get_tx_ctx->skb);
  2139. np->get_tx_ctx->skb = NULL;
  2140. tx_work++;
  2141. }
  2142. } else {
  2143. if (flags & NV_TX2_LASTPACKET) {
  2144. if (flags & NV_TX2_ERROR) {
  2145. if (flags & NV_TX2_UNDERFLOW)
  2146. dev->stats.tx_fifo_errors++;
  2147. if (flags & NV_TX2_CARRIERLOST)
  2148. dev->stats.tx_carrier_errors++;
  2149. if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK))
  2150. nv_legacybackoff_reseed(dev);
  2151. dev->stats.tx_errors++;
  2152. } else {
  2153. dev->stats.tx_packets++;
  2154. dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
  2155. }
  2156. dev_kfree_skb_any(np->get_tx_ctx->skb);
  2157. np->get_tx_ctx->skb = NULL;
  2158. tx_work++;
  2159. }
  2160. }
  2161. if (unlikely(np->get_tx.orig++ == np->last_tx.orig))
  2162. np->get_tx.orig = np->first_tx.orig;
  2163. if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
  2164. np->get_tx_ctx = np->first_tx_ctx;
  2165. }
  2166. if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) {
  2167. np->tx_stop = 0;
  2168. netif_wake_queue(dev);
  2169. }
  2170. return tx_work;
  2171. }
  2172. static int nv_tx_done_optimized(struct net_device *dev, int limit)
  2173. {
  2174. struct fe_priv *np = netdev_priv(dev);
  2175. u32 flags;
  2176. int tx_work = 0;
  2177. struct ring_desc_ex *orig_get_tx = np->get_tx.ex;
  2178. while ((np->get_tx.ex != np->put_tx.ex) &&
  2179. !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX2_VALID) &&
  2180. (tx_work < limit)) {
  2181. netdev_dbg(dev, "%s: flags 0x%x\n", __func__, flags);
  2182. nv_unmap_txskb(np, np->get_tx_ctx);
  2183. if (flags & NV_TX2_LASTPACKET) {
  2184. if (!(flags & NV_TX2_ERROR))
  2185. dev->stats.tx_packets++;
  2186. else {
  2187. if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) {
  2188. if (np->driver_data & DEV_HAS_GEAR_MODE)
  2189. nv_gear_backoff_reseed(dev);
  2190. else
  2191. nv_legacybackoff_reseed(dev);
  2192. }
  2193. }
  2194. dev_kfree_skb_any(np->get_tx_ctx->skb);
  2195. np->get_tx_ctx->skb = NULL;
  2196. tx_work++;
  2197. if (np->tx_limit)
  2198. nv_tx_flip_ownership(dev);
  2199. }
  2200. if (unlikely(np->get_tx.ex++ == np->last_tx.ex))
  2201. np->get_tx.ex = np->first_tx.ex;
  2202. if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
  2203. np->get_tx_ctx = np->first_tx_ctx;
  2204. }
  2205. if (unlikely((np->tx_stop == 1) && (np->get_tx.ex != orig_get_tx))) {
  2206. np->tx_stop = 0;
  2207. netif_wake_queue(dev);
  2208. }
  2209. return tx_work;
  2210. }
  2211. /*
  2212. * nv_tx_timeout: dev->tx_timeout function
  2213. * Called with netif_tx_lock held.
  2214. */
  2215. static void nv_tx_timeout(struct net_device *dev)
  2216. {
  2217. struct fe_priv *np = netdev_priv(dev);
  2218. u8 __iomem *base = get_hwbase(dev);
  2219. u32 status;
  2220. union ring_type put_tx;
  2221. int saved_tx_limit;
  2222. int i;
  2223. if (np->msi_flags & NV_MSI_X_ENABLED)
  2224. status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
  2225. else
  2226. status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
  2227. netdev_info(dev, "Got tx_timeout. irq: %08x\n", status);
  2228. netdev_info(dev, "Ring at %lx\n", (unsigned long)np->ring_addr);
  2229. netdev_info(dev, "Dumping tx registers\n");
  2230. for (i = 0; i <= np->register_size; i += 32) {
  2231. netdev_info(dev,
  2232. "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
  2233. i,
  2234. readl(base + i + 0), readl(base + i + 4),
  2235. readl(base + i + 8), readl(base + i + 12),
  2236. readl(base + i + 16), readl(base + i + 20),
  2237. readl(base + i + 24), readl(base + i + 28));
  2238. }
  2239. netdev_info(dev, "Dumping tx ring\n");
  2240. for (i = 0; i < np->tx_ring_size; i += 4) {
  2241. if (!nv_optimized(np)) {
  2242. netdev_info(dev,
  2243. "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
  2244. i,
  2245. le32_to_cpu(np->tx_ring.orig[i].buf),
  2246. le32_to_cpu(np->tx_ring.orig[i].flaglen),
  2247. le32_to_cpu(np->tx_ring.orig[i+1].buf),
  2248. le32_to_cpu(np->tx_ring.orig[i+1].flaglen),
  2249. le32_to_cpu(np->tx_ring.orig[i+2].buf),
  2250. le32_to_cpu(np->tx_ring.orig[i+2].flaglen),
  2251. le32_to_cpu(np->tx_ring.orig[i+3].buf),
  2252. le32_to_cpu(np->tx_ring.orig[i+3].flaglen));
  2253. } else {
  2254. netdev_info(dev,
  2255. "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
  2256. i,
  2257. le32_to_cpu(np->tx_ring.ex[i].bufhigh),
  2258. le32_to_cpu(np->tx_ring.ex[i].buflow),
  2259. le32_to_cpu(np->tx_ring.ex[i].flaglen),
  2260. le32_to_cpu(np->tx_ring.ex[i+1].bufhigh),
  2261. le32_to_cpu(np->tx_ring.ex[i+1].buflow),
  2262. le32_to_cpu(np->tx_ring.ex[i+1].flaglen),
  2263. le32_to_cpu(np->tx_ring.ex[i+2].bufhigh),
  2264. le32_to_cpu(np->tx_ring.ex[i+2].buflow),
  2265. le32_to_cpu(np->tx_ring.ex[i+2].flaglen),
  2266. le32_to_cpu(np->tx_ring.ex[i+3].bufhigh),
  2267. le32_to_cpu(np->tx_ring.ex[i+3].buflow),
  2268. le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
  2269. }
  2270. }
  2271. spin_lock_irq(&np->lock);
  2272. /* 1) stop tx engine */
  2273. nv_stop_tx(dev);
  2274. /* 2) complete any outstanding tx and do not give HW any limited tx pkts */
  2275. saved_tx_limit = np->tx_limit;
  2276. np->tx_limit = 0; /* prevent giving HW any limited pkts */
  2277. np->tx_stop = 0; /* prevent waking tx queue */
  2278. if (!nv_optimized(np))
  2279. nv_tx_done(dev, np->tx_ring_size);
  2280. else
  2281. nv_tx_done_optimized(dev, np->tx_ring_size);
  2282. /* save current HW postion */
  2283. if (np->tx_change_owner)
  2284. put_tx.ex = np->tx_change_owner->first_tx_desc;
  2285. else
  2286. put_tx = np->put_tx;
  2287. /* 3) clear all tx state */
  2288. nv_drain_tx(dev);
  2289. nv_init_tx(dev);
  2290. /* 4) restore state to current HW position */
  2291. np->get_tx = np->put_tx = put_tx;
  2292. np->tx_limit = saved_tx_limit;
  2293. /* 5) restart tx engine */
  2294. nv_start_tx(dev);
  2295. netif_wake_queue(dev);
  2296. spin_unlock_irq(&np->lock);
  2297. }
  2298. /*
  2299. * Called when the nic notices a mismatch between the actual data len on the
  2300. * wire and the len indicated in the 802 header
  2301. */
  2302. static int nv_getlen(struct net_device *dev, void *packet, int datalen)
  2303. {
  2304. int hdrlen; /* length of the 802 header */
  2305. int protolen; /* length as stored in the proto field */
  2306. /* 1) calculate len according to header */
  2307. if (((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) {
  2308. protolen = ntohs(((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto);
  2309. hdrlen = VLAN_HLEN;
  2310. } else {
  2311. protolen = ntohs(((struct ethhdr *)packet)->h_proto);
  2312. hdrlen = ETH_HLEN;
  2313. }
  2314. netdev_dbg(dev, "%s: datalen %d, protolen %d, hdrlen %d\n",
  2315. __func__, datalen, protolen, hdrlen);
  2316. if (protolen > ETH_DATA_LEN)
  2317. return datalen; /* Value in proto field not a len, no checks possible */
  2318. protolen += hdrlen;
  2319. /* consistency checks: */
  2320. if (datalen > ETH_ZLEN) {
  2321. if (datalen >= protolen) {
  2322. /* more data on wire than in 802 header, trim of
  2323. * additional data.
  2324. */
  2325. netdev_dbg(dev, "%s: accepting %d bytes\n",
  2326. __func__, protolen);
  2327. return protolen;
  2328. } else {
  2329. /* less data on wire than mentioned in header.
  2330. * Discard the packet.
  2331. */
  2332. netdev_dbg(dev, "%s: discarding long packet\n",
  2333. __func__);
  2334. return -1;
  2335. }
  2336. } else {
  2337. /* short packet. Accept only if 802 values are also short */
  2338. if (protolen > ETH_ZLEN) {
  2339. netdev_dbg(dev, "%s: discarding short packet\n",
  2340. __func__);
  2341. return -1;
  2342. }
  2343. netdev_dbg(dev, "%s: accepting %d bytes\n", __func__, datalen);
  2344. return datalen;
  2345. }
  2346. }
  2347. static int nv_rx_process(struct net_device *dev, int limit)
  2348. {
  2349. struct fe_priv *np = netdev_priv(dev);
  2350. u32 flags;
  2351. int rx_work = 0;
  2352. struct sk_buff *skb;
  2353. int len;
  2354. while ((np->get_rx.orig != np->put_rx.orig) &&
  2355. !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) &&
  2356. (rx_work < limit)) {
  2357. netdev_dbg(dev, "%s: flags 0x%x\n", __func__, flags);
  2358. /*
  2359. * the packet is for us - immediately tear down the pci mapping.
  2360. * TODO: check if a prefetch of the first cacheline improves
  2361. * the performance.
  2362. */
  2363. pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma,
  2364. np->get_rx_ctx->dma_len,
  2365. PCI_DMA_FROMDEVICE);
  2366. skb = np->get_rx_ctx->skb;
  2367. np->get_rx_ctx->skb = NULL;
  2368. netdev_dbg(dev, "Dumping packet (flags 0x%x)\n", flags);
  2369. #ifdef DEBUG
  2370. print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,
  2371. 16, 1, skb->data, 64, true);
  2372. #endif
  2373. /* look at what we actually got: */
  2374. if (np->desc_ver == DESC_VER_1) {
  2375. if (likely(flags & NV_RX_DESCRIPTORVALID)) {
  2376. len = flags & LEN_MASK_V1;
  2377. if (unlikely(flags & NV_RX_ERROR)) {
  2378. if ((flags & NV_RX_ERROR_MASK) == NV_RX_ERROR4) {
  2379. len = nv_getlen(dev, skb->data, len);
  2380. if (len < 0) {
  2381. dev->stats.rx_errors++;
  2382. dev_kfree_skb(skb);
  2383. goto next_pkt;
  2384. }
  2385. }
  2386. /* framing errors are soft errors */
  2387. else if ((flags & NV_RX_ERROR_MASK) == NV_RX_FRAMINGERR) {
  2388. if (flags & NV_RX_SUBSTRACT1)
  2389. len--;
  2390. }
  2391. /* the rest are hard errors */
  2392. else {
  2393. if (flags & NV_RX_MISSEDFRAME)
  2394. dev->stats.rx_missed_errors++;
  2395. if (flags & NV_RX_CRCERR)
  2396. dev->stats.rx_crc_errors++;
  2397. if (flags & NV_RX_OVERFLOW)
  2398. dev->stats.rx_over_errors++;
  2399. dev->stats.rx_errors++;
  2400. dev_kfree_skb(skb);
  2401. goto next_pkt;
  2402. }
  2403. }
  2404. } else {
  2405. dev_kfree_skb(skb);
  2406. goto next_pkt;
  2407. }
  2408. } else {
  2409. if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
  2410. len = flags & LEN_MASK_V2;
  2411. if (unlikely(flags & NV_RX2_ERROR)) {
  2412. if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) {
  2413. len = nv_getlen(dev, skb->data, len);
  2414. if (len < 0) {
  2415. dev->stats.rx_errors++;
  2416. dev_kfree_skb(skb);
  2417. goto next_pkt;
  2418. }
  2419. }
  2420. /* framing errors are soft errors */
  2421. else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
  2422. if (flags & NV_RX2_SUBSTRACT1)
  2423. len--;
  2424. }
  2425. /* the rest are hard errors */
  2426. else {
  2427. if (flags & NV_RX2_CRCERR)
  2428. dev->stats.rx_crc_errors++;
  2429. if (flags & NV_RX2_OVERFLOW)
  2430. dev->stats.rx_over_errors++;
  2431. dev->stats.rx_errors++;
  2432. dev_kfree_skb(skb);
  2433. goto next_pkt;
  2434. }
  2435. }
  2436. if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */
  2437. ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) /*ip and udp */
  2438. skb->ip_summed = CHECKSUM_UNNECESSARY;
  2439. } else {
  2440. dev_kfree_skb(skb);
  2441. goto next_pkt;
  2442. }
  2443. }
  2444. /* got a valid packet - forward it to the network core */
  2445. skb_put(skb, len);
  2446. skb->protocol = eth_type_trans(skb, dev);
  2447. netdev_dbg(dev, "%s: %d bytes, proto %d accepted\n",
  2448. __func__, len, skb->protocol);
  2449. napi_gro_receive(&np->napi, skb);
  2450. dev->stats.rx_packets++;
  2451. dev->stats.rx_bytes += len;
  2452. next_pkt:
  2453. if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
  2454. np->get_rx.orig = np->first_rx.orig;
  2455. if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
  2456. np->get_rx_ctx = np->first_rx_ctx;
  2457. rx_work++;
  2458. }
  2459. return rx_work;
  2460. }
  2461. static int nv_rx_process_optimized(struct net_device *dev, int limit)
  2462. {
  2463. struct fe_priv *np = netdev_priv(dev);
  2464. u32 flags;
  2465. u32 vlanflags = 0;
  2466. int rx_work = 0;
  2467. struct sk_buff *skb;
  2468. int len;
  2469. while ((np->get_rx.ex != np->put_rx.ex) &&
  2470. !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) &&
  2471. (rx_work < limit)) {
  2472. netdev_dbg(dev, "%s: flags 0x%x\n", __func__, flags);
  2473. /*
  2474. * the packet is for us - immediately tear down the pci mapping.
  2475. * TODO: check if a prefetch of the first cacheline improves
  2476. * the performance.
  2477. */
  2478. pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma,
  2479. np->get_rx_ctx->dma_len,
  2480. PCI_DMA_FROMDEVICE);
  2481. skb = np->get_rx_ctx->skb;
  2482. np->get_rx_ctx->skb = NULL;
  2483. netdev_dbg(dev, "Dumping packet (flags 0x%x)\n", flags);
  2484. #ifdef DEBUG
  2485. print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 1,
  2486. skb->data, 64, true);
  2487. #endif
  2488. /* look at what we actually got: */
  2489. if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
  2490. len = flags & LEN_MASK_V2;
  2491. if (unlikely(flags & NV_RX2_ERROR)) {
  2492. if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) {
  2493. len = nv_getlen(dev, skb->data, len);
  2494. if (len < 0) {
  2495. dev_kfree_skb(skb);
  2496. goto next_pkt;
  2497. }
  2498. }
  2499. /* framing errors are soft errors */
  2500. else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
  2501. if (flags & NV_RX2_SUBSTRACT1)
  2502. len--;
  2503. }
  2504. /* the rest are hard errors */
  2505. else {
  2506. dev_kfree_skb(skb);
  2507. goto next_pkt;
  2508. }
  2509. }
  2510. if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */
  2511. ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) /*ip and udp */
  2512. skb->ip_summed = CHECKSUM_UNNECESSARY;
  2513. /* got a valid packet - forward it to the network core */
  2514. skb_put(skb, len);
  2515. skb->protocol = eth_type_trans(skb, dev);
  2516. prefetch(skb->data);
  2517. netdev_dbg(dev, "%s: %d bytes, proto %d accepted\n",
  2518. __func__, len, skb->protocol);
  2519. if (likely(!np->vlangrp)) {
  2520. napi_gro_receive(&np->napi, skb);
  2521. } else {
  2522. vlanflags = le32_to_cpu(np->get_rx.ex->buflow);
  2523. if (vlanflags & NV_RX3_VLAN_TAG_PRESENT) {
  2524. vlan_gro_receive(&np->napi, np->vlangrp,
  2525. vlanflags & NV_RX3_VLAN_TAG_MASK, skb);
  2526. } else {
  2527. napi_gro_receive(&np->napi, skb);
  2528. }
  2529. }
  2530. dev->stats.rx_packets++;
  2531. dev->stats.rx_bytes += len;
  2532. } else {
  2533. dev_kfree_skb(skb);
  2534. }
  2535. next_pkt:
  2536. if (unlikely(np->get_rx.ex++ == np->last_rx.ex))
  2537. np->get_rx.ex = np->first_rx.ex;
  2538. if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
  2539. np->get_rx_ctx = np->first_rx_ctx;
  2540. rx_work++;
  2541. }
  2542. return rx_work;
  2543. }
  2544. static void set_bufsize(struct net_device *dev)
  2545. {
  2546. struct fe_priv *np = netdev_priv(dev);
  2547. if (dev->mtu <= ETH_DATA_LEN)
  2548. np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS;
  2549. else
  2550. np->rx_buf_sz = dev->mtu + NV_RX_HEADERS;
  2551. }
  2552. /*
  2553. * nv_change_mtu: dev->change_mtu function
  2554. * Called with dev_base_lock held for read.
  2555. */
  2556. static int nv_change_mtu(struct net_device *dev, int new_mtu)
  2557. {
  2558. struct fe_priv *np = netdev_priv(dev);
  2559. int old_mtu;
  2560. if (new_mtu < 64 || new_mtu > np->pkt_limit)
  2561. return -EINVAL;
  2562. old_mtu = dev->mtu;
  2563. dev->mtu = new_mtu;
  2564. /* return early if the buffer sizes will not change */
  2565. if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
  2566. return 0;
  2567. if (old_mtu == new_mtu)
  2568. return 0;
  2569. /* synchronized against open : rtnl_lock() held by caller */
  2570. if (netif_running(dev)) {
  2571. u8 __iomem *base = get_hwbase(dev);
  2572. /*
  2573. * It seems that the nic preloads valid ring entries into an
  2574. * internal buffer. The procedure for flushing everything is
  2575. * guessed, there is probably a simpler approach.
  2576. * Changing the MTU is a rare event, it shouldn't matter.
  2577. */
  2578. nv_disable_irq(dev);
  2579. nv_napi_disable(dev);
  2580. netif_tx_lock_bh(dev);
  2581. netif_addr_lock(dev);
  2582. spin_lock(&np->lock);
  2583. /* stop engines */
  2584. nv_stop_rxtx(dev);
  2585. nv_txrx_reset(dev);
  2586. /* drain rx queue */
  2587. nv_drain_rxtx(dev);
  2588. /* reinit driver view of the rx queue */
  2589. set_bufsize(dev);
  2590. if (nv_init_ring(dev)) {
  2591. if (!np->in_shutdown)
  2592. mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
  2593. }
  2594. /* reinit nic view of the rx queue */
  2595. writel(np->rx_buf_sz, base + NvRegOffloadConfig);
  2596. setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
  2597. writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
  2598. base + NvRegRingSizes);
  2599. pci_push(base);
  2600. writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
  2601. pci_push(base);
  2602. /* restart rx engine */
  2603. nv_start_rxtx(dev);
  2604. spin_unlock(&np->lock);
  2605. netif_addr_unlock(dev);
  2606. netif_tx_unlock_bh(dev);
  2607. nv_napi_enable(dev);
  2608. nv_enable_irq(dev);
  2609. }
  2610. return 0;
  2611. }
  2612. static void nv_copy_mac_to_hw(struct net_device *dev)
  2613. {
  2614. u8 __iomem *base = get_hwbase(dev);
  2615. u32 mac[2];
  2616. mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
  2617. (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
  2618. mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
  2619. writel(mac[0], base + NvRegMacAddrA);
  2620. writel(mac[1], base + NvRegMacAddrB);
  2621. }
  2622. /*
  2623. * nv_set_mac_address: dev->set_mac_address function
  2624. * Called with rtnl_lock() held.
  2625. */
  2626. static int nv_set_mac_address(struct net_device *dev, void *addr)
  2627. {
  2628. struct fe_priv *np = netdev_priv(dev);
  2629. struct sockaddr *macaddr = (struct sockaddr *)addr;
  2630. if (!is_valid_ether_addr(macaddr->sa_data))
  2631. return -EADDRNOTAVAIL;
  2632. /* synchronized against open : rtnl_lock() held by caller */
  2633. memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN);
  2634. if (netif_running(dev)) {
  2635. netif_tx_lock_bh(dev);
  2636. netif_addr_lock(dev);
  2637. spin_lock_irq(&np->lock);
  2638. /* stop rx engine */
  2639. nv_stop_rx(dev);
  2640. /* set mac address */
  2641. nv_copy_mac_to_hw(dev);
  2642. /* restart rx engine */
  2643. nv_start_rx(dev);
  2644. spin_unlock_irq(&np->lock);
  2645. netif_addr_unlock(dev);
  2646. netif_tx_unlock_bh(dev);
  2647. } else {
  2648. nv_copy_mac_to_hw(dev);
  2649. }
  2650. return 0;
  2651. }
  2652. /*
  2653. * nv_set_multicast: dev->set_multicast function
  2654. * Called with netif_tx_lock held.
  2655. */
  2656. static void nv_set_multicast(struct net_device *dev)
  2657. {
  2658. struct fe_priv *np = netdev_priv(dev);
  2659. u8 __iomem *base = get_hwbase(dev);
  2660. u32 addr[2];
  2661. u32 mask[2];
  2662. u32 pff = readl(base + NvRegPacketFilterFlags) & NVREG_PFF_PAUSE_RX;
  2663. memset(addr, 0, sizeof(addr));
  2664. memset(mask, 0, sizeof(mask));
  2665. if (dev->flags & IFF_PROMISC) {
  2666. pff |= NVREG_PFF_PROMISC;
  2667. } else {
  2668. pff |= NVREG_PFF_MYADDR;
  2669. if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) {
  2670. u32 alwaysOff[2];
  2671. u32 alwaysOn[2];
  2672. alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0xffffffff;
  2673. if (dev->flags & IFF_ALLMULTI) {
  2674. alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0;
  2675. } else {
  2676. struct netdev_hw_addr *ha;
  2677. netdev_for_each_mc_addr(ha, dev) {
  2678. unsigned char *addr = ha->addr;
  2679. u32 a, b;
  2680. a = le32_to_cpu(*(__le32 *) addr);
  2681. b = le16_to_cpu(*(__le16 *) (&addr[4]));
  2682. alwaysOn[0] &= a;
  2683. alwaysOff[0] &= ~a;
  2684. alwaysOn[1] &= b;
  2685. alwaysOff[1] &= ~b;
  2686. }
  2687. }
  2688. addr[0] = alwaysOn[0];
  2689. addr[1] = alwaysOn[1];
  2690. mask[0] = alwaysOn[0] | alwaysOff[0];
  2691. mask[1] = alwaysOn[1] | alwaysOff[1];
  2692. } else {
  2693. mask[0] = NVREG_MCASTMASKA_NONE;
  2694. mask[1] = NVREG_MCASTMASKB_NONE;
  2695. }
  2696. }
  2697. addr[0] |= NVREG_MCASTADDRA_FORCE;
  2698. pff |= NVREG_PFF_ALWAYS;
  2699. spin_lock_irq(&np->lock);
  2700. nv_stop_rx(dev);
  2701. writel(addr[0], base + NvRegMulticastAddrA);
  2702. writel(addr[1], base + NvRegMulticastAddrB);
  2703. writel(mask[0], base + NvRegMulticastMaskA);
  2704. writel(mask[1], base + NvRegMulticastMaskB);
  2705. writel(pff, base + NvRegPacketFilterFlags);
  2706. netdev_dbg(dev, "reconfiguration for multicast lists\n");
  2707. nv_start_rx(dev);
  2708. spin_unlock_irq(&np->lock);
  2709. }
  2710. static void nv_update_pause(struct net_device *dev, u32 pause_flags)
  2711. {
  2712. struct fe_priv *np = netdev_priv(dev);
  2713. u8 __iomem *base = get_hwbase(dev);
  2714. np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE);
  2715. if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) {
  2716. u32 pff = readl(base + NvRegPacketFilterFlags) & ~NVREG_PFF_PAUSE_RX;
  2717. if (pause_flags & NV_PAUSEFRAME_RX_ENABLE) {
  2718. writel(pff|NVREG_PFF_PAUSE_RX, base + NvRegPacketFilterFlags);
  2719. np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
  2720. } else {
  2721. writel(pff, base + NvRegPacketFilterFlags);
  2722. }
  2723. }
  2724. if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) {
  2725. u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX;
  2726. if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) {
  2727. u32 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V1;
  2728. if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V2)
  2729. pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V2;
  2730. if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V3) {
  2731. pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V3;
  2732. /* limit the number of tx pause frames to a default of 8 */
  2733. writel(readl(base + NvRegTxPauseFrameLimit)|NVREG_TX_PAUSEFRAMELIMIT_ENABLE, base + NvRegTxPauseFrameLimit);
  2734. }
  2735. writel(pause_enable, base + NvRegTxPauseFrame);
  2736. writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1);
  2737. np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
  2738. } else {
  2739. writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame);
  2740. writel(regmisc, base + NvRegMisc1);
  2741. }
  2742. }
  2743. }
  2744. /**
  2745. * nv_update_linkspeed: Setup the MAC according to the link partner
  2746. * @dev: Network device to be configured
  2747. *
  2748. * The function queries the PHY and checks if there is a link partner.
  2749. * If yes, then it sets up the MAC accordingly. Otherwise, the MAC is
  2750. * set to 10 MBit HD.
  2751. *
  2752. * The function returns 0 if there is no link partner and 1 if there is
  2753. * a good link partner.
  2754. */
  2755. static int nv_update_linkspeed(struct net_device *dev)
  2756. {
  2757. struct fe_priv *np = netdev_priv(dev);
  2758. u8 __iomem *base = get_hwbase(dev);
  2759. int adv = 0;
  2760. int lpa = 0;
  2761. int adv_lpa, adv_pause, lpa_pause;
  2762. int newls = np->linkspeed;
  2763. int newdup = np->duplex;
  2764. int mii_status;
  2765. int retval = 0;
  2766. u32 control_1000, status_1000, phyreg, pause_flags, txreg;
  2767. u32 txrxFlags = 0;
  2768. u32 phy_exp;
  2769. /* BMSR_LSTATUS is latched, read it twice:
  2770. * we want the current value.
  2771. */
  2772. mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
  2773. mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
  2774. if (!(mii_status & BMSR_LSTATUS)) {
  2775. netdev_dbg(dev,
  2776. "no link detected by phy - falling back to 10HD\n");
  2777. newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
  2778. newdup = 0;
  2779. retval = 0;
  2780. goto set_speed;
  2781. }
  2782. if (np->autoneg == 0) {
  2783. netdev_dbg(dev, "%s: autoneg off, PHY set to 0x%04x\n",
  2784. __func__, np->fixed_mode);
  2785. if (np->fixed_mode & LPA_100FULL) {
  2786. newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
  2787. newdup = 1;
  2788. } else if (np->fixed_mode & LPA_100HALF) {
  2789. newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
  2790. newdup = 0;
  2791. } else if (np->fixed_mode & LPA_10FULL) {
  2792. newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
  2793. newdup = 1;
  2794. } else {
  2795. newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
  2796. newdup = 0;
  2797. }
  2798. retval = 1;
  2799. goto set_speed;
  2800. }
  2801. /* check auto negotiation is complete */
  2802. if (!(mii_status & BMSR_ANEGCOMPLETE)) {
  2803. /* still in autonegotiation - configure nic for 10 MBit HD and wait. */
  2804. newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
  2805. newdup = 0;
  2806. retval = 0;
  2807. netdev_dbg(dev,
  2808. "autoneg not completed - falling back to 10HD\n");
  2809. goto set_speed;
  2810. }
  2811. adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
  2812. lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ);
  2813. netdev_dbg(dev, "%s: PHY advertises 0x%04x, lpa 0x%04x\n",
  2814. __func__, adv, lpa);
  2815. retval = 1;
  2816. if (np->gigabit == PHY_GIGABIT) {
  2817. control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
  2818. status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ);
  2819. if ((control_1000 & ADVERTISE_1000FULL) &&
  2820. (status_1000 & LPA_1000FULL)) {
  2821. netdev_dbg(dev, "%s: GBit ethernet detected\n",
  2822. __func__);
  2823. newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000;
  2824. newdup = 1;
  2825. goto set_speed;
  2826. }
  2827. }
  2828. /* FIXME: handle parallel detection properly */
  2829. adv_lpa = lpa & adv;
  2830. if (adv_lpa & LPA_100FULL) {
  2831. newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
  2832. newdup = 1;
  2833. } else if (adv_lpa & LPA_100HALF) {
  2834. newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
  2835. newdup = 0;
  2836. } else if (adv_lpa & LPA_10FULL) {
  2837. newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
  2838. newdup = 1;
  2839. } else if (adv_lpa & LPA_10HALF) {
  2840. newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
  2841. newdup = 0;
  2842. } else {
  2843. netdev_dbg(dev, "bad ability %04x - falling back to 10HD\n",
  2844. adv_lpa);
  2845. newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
  2846. newdup = 0;
  2847. }
  2848. set_speed:
  2849. if (np->duplex == newdup && np->linkspeed == newls)
  2850. return retval;
  2851. netdev_dbg(dev, "changing link setting from %d/%d to %d/%d\n",
  2852. np->linkspeed, np->duplex, newls, newdup);
  2853. np->duplex = newdup;
  2854. np->linkspeed = newls;
  2855. /* The transmitter and receiver must be restarted for safe update */
  2856. if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START) {
  2857. txrxFlags |= NV_RESTART_TX;
  2858. nv_stop_tx(dev);
  2859. }
  2860. if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) {
  2861. txrxFlags |= NV_RESTART_RX;
  2862. nv_stop_rx(dev);
  2863. }
  2864. if (np->gigabit == PHY_GIGABIT) {
  2865. phyreg = readl(base + NvRegSlotTime);
  2866. phyreg &= ~(0x3FF00);
  2867. if (((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10) ||
  2868. ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100))
  2869. phyreg |= NVREG_SLOTTIME_10_100_FULL;
  2870. else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000)
  2871. phyreg |= NVREG_SLOTTIME_1000_FULL;
  2872. writel(phyreg, base + NvRegSlotTime);
  2873. }
  2874. phyreg = readl(base + NvRegPhyInterface);
  2875. phyreg &= ~(PHY_HALF|PHY_100|PHY_1000);
  2876. if (np->duplex == 0)
  2877. phyreg |= PHY_HALF;
  2878. if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100)
  2879. phyreg |= PHY_100;
  2880. else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
  2881. phyreg |= PHY_1000;
  2882. writel(phyreg, base + NvRegPhyInterface);
  2883. phy_exp = mii_rw(dev, np->phyaddr, MII_EXPANSION, MII_READ) & EXPANSION_NWAY; /* autoneg capable */
  2884. if (phyreg & PHY_RGMII) {
  2885. if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) {
  2886. txreg = NVREG_TX_DEFERRAL_RGMII_1000;
  2887. } else {
  2888. if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX)) {
  2889. if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_10)
  2890. txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_10;
  2891. else
  2892. txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_100;
  2893. } else {
  2894. txreg = NVREG_TX_DEFERRAL_RGMII_10_100;
  2895. }
  2896. }
  2897. } else {
  2898. if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX))
  2899. txreg = NVREG_TX_DEFERRAL_MII_STRETCH;
  2900. else
  2901. txreg = NVREG_TX_DEFERRAL_DEFAULT;
  2902. }
  2903. writel(txreg, base + NvRegTxDeferral);
  2904. if (np->desc_ver == DESC_VER_1) {
  2905. txreg = NVREG_TX_WM_DESC1_DEFAULT;
  2906. } else {
  2907. if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
  2908. txreg = NVREG_TX_WM_DESC2_3_1000;
  2909. else
  2910. txreg = NVREG_TX_WM_DESC2_3_DEFAULT;
  2911. }
  2912. writel(txreg, base + NvRegTxWatermark);
  2913. writel(NVREG_MISC1_FORCE | (np->duplex ? 0 : NVREG_MISC1_HD),
  2914. base + NvRegMisc1);
  2915. pci_push(base);
  2916. writel(np->linkspeed, base + NvRegLinkSpeed);
  2917. pci_push(base);
  2918. pause_flags = 0;
  2919. /* setup pause frame */
  2920. if (np->duplex != 0) {
  2921. if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) {
  2922. adv_pause = adv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
  2923. lpa_pause = lpa & (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
  2924. switch (adv_pause) {
  2925. case ADVERTISE_PAUSE_CAP:
  2926. if (lpa_pause & LPA_PAUSE_CAP) {
  2927. pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
  2928. if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
  2929. pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
  2930. }
  2931. break;
  2932. case ADVERTISE_PAUSE_ASYM:
  2933. if (lpa_pause == (LPA_PAUSE_CAP | LPA_PAUSE_ASYM))
  2934. pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
  2935. break;
  2936. case ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM:
  2937. if (lpa_pause & LPA_PAUSE_CAP) {
  2938. pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
  2939. if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
  2940. pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
  2941. }
  2942. if (lpa_pause == LPA_PAUSE_ASYM)
  2943. pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
  2944. break;
  2945. }
  2946. } else {
  2947. pause_flags = np->pause_flags;
  2948. }
  2949. }
  2950. nv_update_pause(dev, pause_flags);
  2951. if (txrxFlags & NV_RESTART_TX)
  2952. nv_start_tx(dev);
  2953. if (txrxFlags & NV_RESTART_RX)
  2954. nv_start_rx(dev);
  2955. return retval;
  2956. }
  2957. static void nv_linkchange(struct net_device *dev)
  2958. {
  2959. if (nv_update_linkspeed(dev)) {
  2960. if (!netif_carrier_ok(dev)) {
  2961. netif_carrier_on(dev);
  2962. netdev_info(dev, "link up\n");
  2963. nv_txrx_gate(dev, false);
  2964. nv_start_rx(dev);
  2965. }
  2966. } else {
  2967. if (netif_carrier_ok(dev)) {
  2968. netif_carrier_off(dev);
  2969. netdev_info(dev, "link down\n");
  2970. nv_txrx_gate(dev, true);
  2971. nv_stop_rx(dev);
  2972. }
  2973. }
  2974. }
  2975. static void nv_link_irq(struct net_device *dev)
  2976. {
  2977. u8 __iomem *base = get_hwbase(dev);
  2978. u32 miistat;
  2979. miistat = readl(base + NvRegMIIStatus);
  2980. writel(NVREG_MIISTAT_LINKCHANGE, base + NvRegMIIStatus);
  2981. netdev_dbg(dev, "link change irq, status 0x%x\n", miistat);
  2982. if (miistat & (NVREG_MIISTAT_LINKCHANGE))
  2983. nv_linkchange(dev);
  2984. netdev_dbg(dev, "link change notification done\n");
  2985. }
  2986. static void nv_msi_workaround(struct fe_priv *np)
  2987. {
  2988. /* Need to toggle the msi irq mask within the ethernet device,
  2989. * otherwise, future interrupts will not be detected.
  2990. */
  2991. if (np->msi_flags & NV_MSI_ENABLED) {
  2992. u8 __iomem *base = np->base;
  2993. writel(0, base + NvRegMSIIrqMask);
  2994. writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
  2995. }
  2996. }
  2997. static inline int nv_change_interrupt_mode(struct net_device *dev, int total_work)
  2998. {
  2999. struct fe_priv *np = netdev_priv(dev);
  3000. if (optimization_mode == NV_OPTIMIZATION_MODE_DYNAMIC) {
  3001. if (total_work > NV_DYNAMIC_THRESHOLD) {
  3002. /* transition to poll based interrupts */
  3003. np->quiet_count = 0;
  3004. if (np->irqmask != NVREG_IRQMASK_CPU) {
  3005. np->irqmask = NVREG_IRQMASK_CPU;
  3006. return 1;
  3007. }
  3008. } else {
  3009. if (np->quiet_count < NV_DYNAMIC_MAX_QUIET_COUNT) {
  3010. np->quiet_count++;
  3011. } else {
  3012. /* reached a period of low activity, switch
  3013. to per tx/rx packet interrupts */
  3014. if (np->irqmask != NVREG_IRQMASK_THROUGHPUT) {
  3015. np->irqmask = NVREG_IRQMASK_THROUGHPUT;
  3016. return 1;
  3017. }
  3018. }
  3019. }
  3020. }
  3021. return 0;
  3022. }
  3023. static irqreturn_t nv_nic_irq(int foo, void *data)
  3024. {
  3025. struct net_device *dev = (struct net_device *) data;
  3026. struct fe_priv *np = netdev_priv(dev);
  3027. u8 __iomem *base = get_hwbase(dev);
  3028. netdev_dbg(dev, "%s\n", __func__);
  3029. if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
  3030. np->events = readl(base + NvRegIrqStatus);
  3031. writel(np->events, base + NvRegIrqStatus);
  3032. } else {
  3033. np->events = readl(base + NvRegMSIXIrqStatus);
  3034. writel(np->events, base + NvRegMSIXIrqStatus);
  3035. }
  3036. netdev_dbg(dev, "irq: %08x\n", np->events);
  3037. if (!(np->events & np->irqmask))
  3038. return IRQ_NONE;
  3039. nv_msi_workaround(np);
  3040. if (napi_schedule_prep(&np->napi)) {
  3041. /*
  3042. * Disable further irq's (msix not enabled with napi)
  3043. */
  3044. writel(0, base + NvRegIrqMask);
  3045. __napi_schedule(&np->napi);
  3046. }
  3047. netdev_dbg(dev, "%s: completed\n", __func__);
  3048. return IRQ_HANDLED;
  3049. }
  3050. /**
  3051. * All _optimized functions are used to help increase performance
  3052. * (reduce CPU and increase throughput). They use descripter version 3,
  3053. * compiler directives, and reduce memory accesses.
  3054. */
  3055. static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
  3056. {
  3057. struct net_device *dev = (struct net_device *) data;
  3058. struct fe_priv *np = netdev_priv(dev);
  3059. u8 __iomem *base = get_hwbase(dev);
  3060. netdev_dbg(dev, "%s\n", __func__);
  3061. if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
  3062. np->events = readl(base + NvRegIrqStatus);
  3063. writel(np->events, base + NvRegIrqStatus);
  3064. } else {
  3065. np->events = readl(base + NvRegMSIXIrqStatus);
  3066. writel(np->events, base + NvRegMSIXIrqStatus);
  3067. }
  3068. netdev_dbg(dev, "irq: %08x\n", np->events);
  3069. if (!(np->events & np->irqmask))
  3070. return IRQ_NONE;
  3071. nv_msi_workaround(np);
  3072. if (napi_schedule_prep(&np->napi)) {
  3073. /*
  3074. * Disable further irq's (msix not enabled with napi)
  3075. */
  3076. writel(0, base + NvRegIrqMask);
  3077. __napi_schedule(&np->napi);
  3078. }
  3079. netdev_dbg(dev, "%s: completed\n", __func__);
  3080. return IRQ_HANDLED;
  3081. }
  3082. static irqreturn_t nv_nic_irq_tx(int foo, void *data)
  3083. {
  3084. struct net_device *dev = (struct net_device *) data;
  3085. struct fe_priv *np = netdev_priv(dev);
  3086. u8 __iomem *base = get_hwbase(dev);
  3087. u32 events;
  3088. int i;
  3089. unsigned long flags;
  3090. netdev_dbg(dev, "%s\n", __func__);
  3091. for (i = 0;; i++) {
  3092. events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL;
  3093. writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus);
  3094. netdev_dbg(dev, "tx irq: %08x\n", events);
  3095. if (!(events & np->irqmask))
  3096. break;
  3097. spin_lock_irqsave(&np->lock, flags);
  3098. nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
  3099. spin_unlock_irqrestore(&np->lock, flags);
  3100. if (unlikely(i > max_interrupt_work)) {
  3101. spin_lock_irqsave(&np->lock, flags);
  3102. /* disable interrupts on the nic */
  3103. writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask);
  3104. pci_push(base);
  3105. if (!np->in_shutdown) {
  3106. np->nic_poll_irq |= NVREG_IRQ_TX_ALL;
  3107. mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
  3108. }
  3109. spin_unlock_irqrestore(&np->lock, flags);
  3110. printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i);
  3111. break;
  3112. }
  3113. }
  3114. netdev_dbg(dev, "%s: completed\n", __func__);
  3115. return IRQ_RETVAL(i);
  3116. }
  3117. static int nv_napi_poll(struct napi_struct *napi, int budget)
  3118. {
  3119. struct fe_priv *np = container_of(napi, struct fe_priv, napi);
  3120. struct net_device *dev = np->dev;
  3121. u8 __iomem *base = get_hwbase(dev);
  3122. unsigned long flags;
  3123. int retcode;
  3124. int rx_count, tx_work = 0, rx_work = 0;
  3125. do {
  3126. if (!nv_optimized(np)) {
  3127. spin_lock_irqsave(&np->lock, flags);
  3128. tx_work += nv_tx_done(dev, np->tx_ring_size);
  3129. spin_unlock_irqrestore(&np->lock, flags);
  3130. rx_count = nv_rx_process(dev, budget - rx_work);
  3131. retcode = nv_alloc_rx(dev);
  3132. } else {
  3133. spin_lock_irqsave(&np->lock, flags);
  3134. tx_work += nv_tx_done_optimized(dev, np->tx_ring_size);
  3135. spin_unlock_irqrestore(&np->lock, flags);
  3136. rx_count = nv_rx_process_optimized(dev,
  3137. budget - rx_work);
  3138. retcode = nv_alloc_rx_optimized(dev);
  3139. }
  3140. } while (retcode == 0 &&
  3141. rx_count > 0 && (rx_work += rx_count) < budget);
  3142. if (retcode) {
  3143. spin_lock_irqsave(&np->lock, flags);
  3144. if (!np->in_shutdown)
  3145. mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
  3146. spin_unlock_irqrestore(&np->lock, flags);
  3147. }
  3148. nv_change_interrupt_mode(dev, tx_work + rx_work);
  3149. if (unlikely(np->events & NVREG_IRQ_LINK)) {
  3150. spin_lock_irqsave(&np->lock, flags);
  3151. nv_link_irq(dev);
  3152. spin_unlock_irqrestore(&np->lock, flags);
  3153. }
  3154. if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
  3155. spin_lock_irqsave(&np->lock, flags);
  3156. nv_linkchange(dev);
  3157. spin_unlock_irqrestore(&np->lock, flags);
  3158. np->link_timeout = jiffies + LINK_TIMEOUT;
  3159. }
  3160. if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) {
  3161. spin_lock_irqsave(&np->lock, flags);
  3162. if (!np->in_shutdown) {
  3163. np->nic_poll_irq = np->irqmask;
  3164. np->recover_error = 1;
  3165. mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
  3166. }
  3167. spin_unlock_irqrestore(&np->lock, flags);
  3168. napi_complete(napi);
  3169. return rx_work;
  3170. }
  3171. if (rx_work < budget) {
  3172. /* re-enable interrupts
  3173. (msix not enabled in napi) */
  3174. napi_complete(napi);
  3175. writel(np->irqmask, base + NvRegIrqMask);
  3176. }
  3177. return rx_work;
  3178. }
  3179. static irqreturn_t nv_nic_irq_rx(int foo, void *data)
  3180. {
  3181. struct net_device *dev = (struct net_device *) data;
  3182. struct fe_priv *np = netdev_priv(dev);
  3183. u8 __iomem *base = get_hwbase(dev);
  3184. u32 events;
  3185. int i;
  3186. unsigned long flags;
  3187. netdev_dbg(dev, "%s\n", __func__);
  3188. for (i = 0;; i++) {
  3189. events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
  3190. writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
  3191. netdev_dbg(dev, "rx irq: %08x\n", events);
  3192. if (!(events & np->irqmask))
  3193. break;
  3194. if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) {
  3195. if (unlikely(nv_alloc_rx_optimized(dev))) {
  3196. spin_lock_irqsave(&np->lock, flags);
  3197. if (!np->in_shutdown)
  3198. mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
  3199. spin_unlock_irqrestore(&np->lock, flags);
  3200. }
  3201. }
  3202. if (unlikely(i > max_interrupt_work)) {
  3203. spin_lock_irqsave(&np->lock, flags);
  3204. /* disable interrupts on the nic */
  3205. writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
  3206. pci_push(base);
  3207. if (!np->in_shutdown) {
  3208. np->nic_poll_irq |= NVREG_IRQ_RX_ALL;
  3209. mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
  3210. }
  3211. spin_unlock_irqrestore(&np->lock, flags);
  3212. printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i);
  3213. break;
  3214. }
  3215. }
  3216. netdev_dbg(dev, "%s: completed\n", __func__);
  3217. return IRQ_RETVAL(i);
  3218. }
  3219. static irqreturn_t nv_nic_irq_other(int foo, void *data)
  3220. {
  3221. struct net_device *dev = (struct net_device *) data;
  3222. struct fe_priv *np = netdev_priv(dev);
  3223. u8 __iomem *base = get_hwbase(dev);
  3224. u32 events;
  3225. int i;
  3226. unsigned long flags;
  3227. netdev_dbg(dev, "%s\n", __func__);
  3228. for (i = 0;; i++) {
  3229. events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER;
  3230. writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus);
  3231. netdev_dbg(dev, "irq: %08x\n", events);
  3232. if (!(events & np->irqmask))
  3233. break;
  3234. /* check tx in case we reached max loop limit in tx isr */
  3235. spin_lock_irqsave(&np->lock, flags);
  3236. nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
  3237. spin_unlock_irqrestore(&np->lock, flags);
  3238. if (events & NVREG_IRQ_LINK) {
  3239. spin_lock_irqsave(&np->lock, flags);
  3240. nv_link_irq(dev);
  3241. spin_unlock_irqrestore(&np->lock, flags);
  3242. }
  3243. if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
  3244. spin_lock_irqsave(&np->lock, flags);
  3245. nv_linkchange(dev);
  3246. spin_unlock_irqrestore(&np->lock, flags);
  3247. np->link_timeout = jiffies + LINK_TIMEOUT;
  3248. }
  3249. if (events & NVREG_IRQ_RECOVER_ERROR) {
  3250. spin_lock_irq(&np->lock);
  3251. /* disable interrupts on the nic */
  3252. writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
  3253. pci_push(base);
  3254. if (!np->in_shutdown) {
  3255. np->nic_poll_irq |= NVREG_IRQ_OTHER;
  3256. np->recover_error = 1;
  3257. mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
  3258. }
  3259. spin_unlock_irq(&np->lock);
  3260. break;
  3261. }
  3262. if (unlikely(i > max_interrupt_work)) {
  3263. spin_lock_irqsave(&np->lock, flags);
  3264. /* disable interrupts on the nic */
  3265. writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
  3266. pci_push(base);
  3267. if (!np->in_shutdown) {
  3268. np->nic_poll_irq |= NVREG_IRQ_OTHER;
  3269. mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
  3270. }
  3271. spin_unlock_irqrestore(&np->lock, flags);
  3272. printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i);
  3273. break;
  3274. }
  3275. }
  3276. netdev_dbg(dev, "%s: completed\n", __func__);
  3277. return IRQ_RETVAL(i);
  3278. }
  3279. static irqreturn_t nv_nic_irq_test(int foo, void *data)
  3280. {
  3281. struct net_device *dev = (struct net_device *) data;
  3282. struct fe_priv *np = netdev_priv(dev);
  3283. u8 __iomem *base = get_hwbase(dev);
  3284. u32 events;
  3285. netdev_dbg(dev, "%s\n", __func__);
  3286. if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
  3287. events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
  3288. writel(NVREG_IRQ_TIMER, base + NvRegIrqStatus);
  3289. } else {
  3290. events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
  3291. writel(NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus);
  3292. }
  3293. pci_push(base);
  3294. netdev_dbg(dev, "irq: %08x\n", events);
  3295. if (!(events & NVREG_IRQ_TIMER))
  3296. return IRQ_RETVAL(0);
  3297. nv_msi_workaround(np);
  3298. spin_lock(&np->lock);
  3299. np->intr_test = 1;
  3300. spin_unlock(&np->lock);
  3301. netdev_dbg(dev, "%s: completed\n", __func__);
  3302. return IRQ_RETVAL(1);
  3303. }
  3304. static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
  3305. {
  3306. u8 __iomem *base = get_hwbase(dev);
  3307. int i;
  3308. u32 msixmap = 0;
  3309. /* Each interrupt bit can be mapped to a MSIX vector (4 bits).
  3310. * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents
  3311. * the remaining 8 interrupts.
  3312. */
  3313. for (i = 0; i < 8; i++) {
  3314. if ((irqmask >> i) & 0x1)
  3315. msixmap |= vector << (i << 2);
  3316. }
  3317. writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0);
  3318. msixmap = 0;
  3319. for (i = 0; i < 8; i++) {
  3320. if ((irqmask >> (i + 8)) & 0x1)
  3321. msixmap |= vector << (i << 2);
  3322. }
  3323. writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
  3324. }
  3325. static int nv_request_irq(struct net_device *dev, int intr_test)
  3326. {
  3327. struct fe_priv *np = get_nvpriv(dev);
  3328. u8 __iomem *base = get_hwbase(dev);
  3329. int ret = 1;
  3330. int i;
  3331. irqreturn_t (*handler)(int foo, void *data);
  3332. if (intr_test) {
  3333. handler = nv_nic_irq_test;
  3334. } else {
  3335. if (nv_optimized(np))
  3336. handler = nv_nic_irq_optimized;
  3337. else
  3338. handler = nv_nic_irq;
  3339. }
  3340. if (np->msi_flags & NV_MSI_X_CAPABLE) {
  3341. for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++)
  3342. np->msi_x_entry[i].entry = i;
  3343. ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK));
  3344. if (ret == 0) {
  3345. np->msi_flags |= NV_MSI_X_ENABLED;
  3346. if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) {
  3347. /* Request irq for rx handling */
  3348. sprintf(np->name_rx, "%s-rx", dev->name);
  3349. if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector,
  3350. nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev) != 0) {
  3351. netdev_info(dev,
  3352. "request_irq failed for rx %d\n",
  3353. ret);
  3354. pci_disable_msix(np->pci_dev);
  3355. np->msi_flags &= ~NV_MSI_X_ENABLED;
  3356. goto out_err;
  3357. }
  3358. /* Request irq for tx handling */
  3359. sprintf(np->name_tx, "%s-tx", dev->name);
  3360. if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector,
  3361. nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev) != 0) {
  3362. netdev_info(dev,
  3363. "request_irq failed for tx %d\n",
  3364. ret);
  3365. pci_disable_msix(np->pci_dev);
  3366. np->msi_flags &= ~NV_MSI_X_ENABLED;
  3367. goto out_free_rx;
  3368. }
  3369. /* Request irq for link and timer handling */
  3370. sprintf(np->name_other, "%s-other", dev->name);
  3371. if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector,
  3372. nv_nic_irq_other, IRQF_SHARED, np->name_other, dev) != 0) {
  3373. netdev_info(dev,
  3374. "request_irq failed for link %d\n",
  3375. ret);
  3376. pci_disable_msix(np->pci_dev);
  3377. np->msi_flags &= ~NV_MSI_X_ENABLED;
  3378. goto out_free_tx;
  3379. }
  3380. /* map interrupts to their respective vector */
  3381. writel(0, base + NvRegMSIXMap0);
  3382. writel(0, base + NvRegMSIXMap1);
  3383. set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL);
  3384. set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL);
  3385. set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
  3386. } else {
  3387. /* Request irq for all interrupts */
  3388. if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, handler, IRQF_SHARED, dev->name, dev) != 0) {
  3389. netdev_info(dev,
  3390. "request_irq failed %d\n",
  3391. ret);
  3392. pci_disable_msix(np->pci_dev);
  3393. np->msi_flags &= ~NV_MSI_X_ENABLED;
  3394. goto out_err;
  3395. }
  3396. /* map interrupts to vector 0 */
  3397. writel(0, base + NvRegMSIXMap0);
  3398. writel(0, base + NvRegMSIXMap1);
  3399. }
  3400. }
  3401. }
  3402. if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
  3403. ret = pci_enable_msi(np->pci_dev);
  3404. if (ret == 0) {
  3405. np->msi_flags |= NV_MSI_ENABLED;
  3406. dev->irq = np->pci_dev->irq;
  3407. if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) {
  3408. netdev_info(dev, "request_irq failed %d\n",
  3409. ret);
  3410. pci_disable_msi(np->pci_dev);
  3411. np->msi_flags &= ~NV_MSI_ENABLED;
  3412. dev->irq = np->pci_dev->irq;
  3413. goto out_err;
  3414. }
  3415. /* map interrupts to vector 0 */
  3416. writel(0, base + NvRegMSIMap0);
  3417. writel(0, base + NvRegMSIMap1);
  3418. /* enable msi vector 0 */
  3419. writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
  3420. }
  3421. }
  3422. if (ret != 0) {
  3423. if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0)
  3424. goto out_err;
  3425. }
  3426. return 0;
  3427. out_free_tx:
  3428. free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev);
  3429. out_free_rx:
  3430. free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev);
  3431. out_err:
  3432. return 1;
  3433. }
  3434. static void nv_free_irq(struct net_device *dev)
  3435. {
  3436. struct fe_priv *np = get_nvpriv(dev);
  3437. int i;
  3438. if (np->msi_flags & NV_MSI_X_ENABLED) {
  3439. for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++)
  3440. free_irq(np->msi_x_entry[i].vector, dev);
  3441. pci_disable_msix(np->pci_dev);
  3442. np->msi_flags &= ~NV_MSI_X_ENABLED;
  3443. } else {
  3444. free_irq(np->pci_dev->irq, dev);
  3445. if (np->msi_flags & NV_MSI_ENABLED) {
  3446. pci_disable_msi(np->pci_dev);
  3447. np->msi_flags &= ~NV_MSI_ENABLED;
  3448. }
  3449. }
  3450. }
  3451. static void nv_do_nic_poll(unsigned long data)
  3452. {
  3453. struct net_device *dev = (struct net_device *) data;
  3454. struct fe_priv *np = netdev_priv(dev);
  3455. u8 __iomem *base = get_hwbase(dev);
  3456. u32 mask = 0;
  3457. /*
  3458. * First disable irq(s) and then
  3459. * reenable interrupts on the nic, we have to do this before calling
  3460. * nv_nic_irq because that may decide to do otherwise
  3461. */
  3462. if (!using_multi_irqs(dev)) {
  3463. if (np->msi_flags & NV_MSI_X_ENABLED)
  3464. disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
  3465. else
  3466. disable_irq_lockdep(np->pci_dev->irq);
  3467. mask = np->irqmask;
  3468. } else {
  3469. if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
  3470. disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
  3471. mask |= NVREG_IRQ_RX_ALL;
  3472. }
  3473. if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
  3474. disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
  3475. mask |= NVREG_IRQ_TX_ALL;
  3476. }
  3477. if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
  3478. disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
  3479. mask |= NVREG_IRQ_OTHER;
  3480. }
  3481. }
  3482. /* disable_irq() contains synchronize_irq, thus no irq handler can run now */
  3483. if (np->recover_error) {
  3484. np->recover_error = 0;
  3485. netdev_info(dev, "MAC in recoverable error state\n");
  3486. if (netif_running(dev)) {
  3487. netif_tx_lock_bh(dev);
  3488. netif_addr_lock(dev);
  3489. spin_lock(&np->lock);
  3490. /* stop engines */
  3491. nv_stop_rxtx(dev);
  3492. if (np->driver_data & DEV_HAS_POWER_CNTRL)
  3493. nv_mac_reset(dev);
  3494. nv_txrx_reset(dev);
  3495. /* drain rx queue */
  3496. nv_drain_rxtx(dev);
  3497. /* reinit driver view of the rx queue */
  3498. set_bufsize(dev);
  3499. if (nv_init_ring(dev)) {
  3500. if (!np->in_shutdown)
  3501. mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
  3502. }
  3503. /* reinit nic view of the rx queue */
  3504. writel(np->rx_buf_sz, base + NvRegOffloadConfig);
  3505. setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
  3506. writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
  3507. base + NvRegRingSizes);
  3508. pci_push(base);
  3509. writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
  3510. pci_push(base);
  3511. /* clear interrupts */
  3512. if (!(np->msi_flags & NV_MSI_X_ENABLED))
  3513. writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
  3514. else
  3515. writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
  3516. /* restart rx engine */
  3517. nv_start_rxtx(dev);
  3518. spin_unlock(&np->lock);
  3519. netif_addr_unlock(dev);
  3520. netif_tx_unlock_bh(dev);
  3521. }
  3522. }
  3523. writel(mask, base + NvRegIrqMask);
  3524. pci_push(base);
  3525. if (!using_multi_irqs(dev)) {
  3526. np->nic_poll_irq = 0;
  3527. if (nv_optimized(np))
  3528. nv_nic_irq_optimized(0, dev);
  3529. else
  3530. nv_nic_irq(0, dev);
  3531. if (np->msi_flags & NV_MSI_X_ENABLED)
  3532. enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
  3533. else
  3534. enable_irq_lockdep(np->pci_dev->irq);
  3535. } else {
  3536. if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
  3537. np->nic_poll_irq &= ~NVREG_IRQ_RX_ALL;
  3538. nv_nic_irq_rx(0, dev);
  3539. enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
  3540. }
  3541. if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
  3542. np->nic_poll_irq &= ~NVREG_IRQ_TX_ALL;
  3543. nv_nic_irq_tx(0, dev);
  3544. enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
  3545. }
  3546. if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
  3547. np->nic_poll_irq &= ~NVREG_IRQ_OTHER;
  3548. nv_nic_irq_other(0, dev);
  3549. enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
  3550. }
  3551. }
  3552. }
  3553. #ifdef CONFIG_NET_POLL_CONTROLLER
  3554. static void nv_poll_controller(struct net_device *dev)
  3555. {
  3556. nv_do_nic_poll((unsigned long) dev);
  3557. }
  3558. #endif
  3559. static void nv_do_stats_poll(unsigned long data)
  3560. {
  3561. struct net_device *dev = (struct net_device *) data;
  3562. struct fe_priv *np = netdev_priv(dev);
  3563. nv_get_hw_stats(dev);
  3564. if (!np->in_shutdown)
  3565. mod_timer(&np->stats_poll,
  3566. round_jiffies(jiffies + STATS_INTERVAL));
  3567. }
  3568. static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
  3569. {
  3570. struct fe_priv *np = netdev_priv(dev);
  3571. strcpy(info->driver, DRV_NAME);
  3572. strcpy(info->version, FORCEDETH_VERSION);
  3573. strcpy(info->bus_info, pci_name(np->pci_dev));
  3574. }
  3575. static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
  3576. {
  3577. struct fe_priv *np = netdev_priv(dev);
  3578. wolinfo->supported = WAKE_MAGIC;
  3579. spin_lock_irq(&np->lock);
  3580. if (np->wolenabled)
  3581. wolinfo->wolopts = WAKE_MAGIC;
  3582. spin_unlock_irq(&np->lock);
  3583. }
  3584. static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
  3585. {
  3586. struct fe_priv *np = netdev_priv(dev);
  3587. u8 __iomem *base = get_hwbase(dev);
  3588. u32 flags = 0;
  3589. if (wolinfo->wolopts == 0) {
  3590. np->wolenabled = 0;
  3591. } else if (wolinfo->wolopts & WAKE_MAGIC) {
  3592. np->wolenabled = 1;
  3593. flags = NVREG_WAKEUPFLAGS_ENABLE;
  3594. }
  3595. if (netif_running(dev)) {
  3596. spin_lock_irq(&np->lock);
  3597. writel(flags, base + NvRegWakeUpFlags);
  3598. spin_unlock_irq(&np->lock);
  3599. }
  3600. return 0;
  3601. }
  3602. static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
  3603. {
  3604. struct fe_priv *np = netdev_priv(dev);
  3605. int adv;
  3606. spin_lock_irq(&np->lock);
  3607. ecmd->port = PORT_MII;
  3608. if (!netif_running(dev)) {
  3609. /* We do not track link speed / duplex setting if the
  3610. * interface is disabled. Force a link check */
  3611. if (nv_update_linkspeed(dev)) {
  3612. if (!netif_carrier_ok(dev))
  3613. netif_carrier_on(dev);
  3614. } else {
  3615. if (netif_carrier_ok(dev))
  3616. netif_carrier_off(dev);
  3617. }
  3618. }
  3619. if (netif_carrier_ok(dev)) {
  3620. switch (np->linkspeed & (NVREG_LINKSPEED_MASK)) {
  3621. case NVREG_LINKSPEED_10:
  3622. ecmd->speed = SPEED_10;
  3623. break;
  3624. case NVREG_LINKSPEED_100:
  3625. ecmd->speed = SPEED_100;
  3626. break;
  3627. case NVREG_LINKSPEED_1000:
  3628. ecmd->speed = SPEED_1000;
  3629. break;
  3630. }
  3631. ecmd->duplex = DUPLEX_HALF;
  3632. if (np->duplex)
  3633. ecmd->duplex = DUPLEX_FULL;
  3634. } else {
  3635. ecmd->speed = -1;
  3636. ecmd->duplex = -1;
  3637. }
  3638. ecmd->autoneg = np->autoneg;
  3639. ecmd->advertising = ADVERTISED_MII;
  3640. if (np->autoneg) {
  3641. ecmd->advertising |= ADVERTISED_Autoneg;
  3642. adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
  3643. if (adv & ADVERTISE_10HALF)
  3644. ecmd->advertising |= ADVERTISED_10baseT_Half;
  3645. if (adv & ADVERTISE_10FULL)
  3646. ecmd->advertising |= ADVERTISED_10baseT_Full;
  3647. if (adv & ADVERTISE_100HALF)
  3648. ecmd->advertising |= ADVERTISED_100baseT_Half;
  3649. if (adv & ADVERTISE_100FULL)
  3650. ecmd->advertising |= ADVERTISED_100baseT_Full;
  3651. if (np->gigabit == PHY_GIGABIT) {
  3652. adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
  3653. if (adv & ADVERTISE_1000FULL)
  3654. ecmd->advertising |= ADVERTISED_1000baseT_Full;
  3655. }
  3656. }
  3657. ecmd->supported = (SUPPORTED_Autoneg |
  3658. SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
  3659. SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
  3660. SUPPORTED_MII);
  3661. if (np->gigabit == PHY_GIGABIT)
  3662. ecmd->supported |= SUPPORTED_1000baseT_Full;
  3663. ecmd->phy_address = np->phyaddr;
  3664. ecmd->transceiver = XCVR_EXTERNAL;
  3665. /* ignore maxtxpkt, maxrxpkt for now */
  3666. spin_unlock_irq(&np->lock);
  3667. return 0;
  3668. }
  3669. static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
  3670. {
  3671. struct fe_priv *np = netdev_priv(dev);
  3672. if (ecmd->port != PORT_MII)
  3673. return -EINVAL;
  3674. if (ecmd->transceiver != XCVR_EXTERNAL)
  3675. return -EINVAL;
  3676. if (ecmd->phy_address != np->phyaddr) {
  3677. /* TODO: support switching between multiple phys. Should be
  3678. * trivial, but not enabled due to lack of test hardware. */
  3679. return -EINVAL;
  3680. }
  3681. if (ecmd->autoneg == AUTONEG_ENABLE) {
  3682. u32 mask;
  3683. mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
  3684. ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
  3685. if (np->gigabit == PHY_GIGABIT)
  3686. mask |= ADVERTISED_1000baseT_Full;
  3687. if ((ecmd->advertising & mask) == 0)
  3688. return -EINVAL;
  3689. } else if (ecmd->autoneg == AUTONEG_DISABLE) {
  3690. /* Note: autonegotiation disable, speed 1000 intentionally
  3691. * forbidden - noone should need that. */
  3692. if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100)
  3693. return -EINVAL;
  3694. if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
  3695. return -EINVAL;
  3696. } else {
  3697. return -EINVAL;
  3698. }
  3699. netif_carrier_off(dev);
  3700. if (netif_running(dev)) {
  3701. unsigned long flags;
  3702. nv_disable_irq(dev);
  3703. netif_tx_lock_bh(dev);
  3704. netif_addr_lock(dev);
  3705. /* with plain spinlock lockdep complains */
  3706. spin_lock_irqsave(&np->lock, flags);
  3707. /* stop engines */
  3708. /* FIXME:
  3709. * this can take some time, and interrupts are disabled
  3710. * due to spin_lock_irqsave, but let's hope no daemon
  3711. * is going to change the settings very often...
  3712. * Worst case:
  3713. * NV_RXSTOP_DELAY1MAX + NV_TXSTOP_DELAY1MAX
  3714. * + some minor delays, which is up to a second approximately
  3715. */
  3716. nv_stop_rxtx(dev);
  3717. spin_unlock_irqrestore(&np->lock, flags);
  3718. netif_addr_unlock(dev);
  3719. netif_tx_unlock_bh(dev);
  3720. }
  3721. if (ecmd->autoneg == AUTONEG_ENABLE) {
  3722. int adv, bmcr;
  3723. np->autoneg = 1;
  3724. /* advertise only what has been requested */
  3725. adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
  3726. adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
  3727. if (ecmd->advertising & ADVERTISED_10baseT_Half)
  3728. adv |= ADVERTISE_10HALF;
  3729. if (ecmd->advertising & ADVERTISED_10baseT_Full)
  3730. adv |= ADVERTISE_10FULL;
  3731. if (ecmd->advertising & ADVERTISED_100baseT_Half)
  3732. adv |= ADVERTISE_100HALF;
  3733. if (ecmd->advertising & ADVERTISED_100baseT_Full)
  3734. adv |= ADVERTISE_100FULL;
  3735. if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */
  3736. adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
  3737. if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
  3738. adv |= ADVERTISE_PAUSE_ASYM;
  3739. mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
  3740. if (np->gigabit == PHY_GIGABIT) {
  3741. adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
  3742. adv &= ~ADVERTISE_1000FULL;
  3743. if (ecmd->advertising & ADVERTISED_1000baseT_Full)
  3744. adv |= ADVERTISE_1000FULL;
  3745. mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
  3746. }
  3747. if (netif_running(dev))
  3748. netdev_info(dev, "link down\n");
  3749. bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
  3750. if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
  3751. bmcr |= BMCR_ANENABLE;
  3752. /* reset the phy in order for settings to stick,
  3753. * and cause autoneg to start */
  3754. if (phy_reset(dev, bmcr)) {
  3755. netdev_info(dev, "phy reset failed\n");
  3756. return -EINVAL;
  3757. }
  3758. } else {
  3759. bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
  3760. mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
  3761. }
  3762. } else {
  3763. int adv, bmcr;
  3764. np->autoneg = 0;
  3765. adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
  3766. adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
  3767. if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF)
  3768. adv |= ADVERTISE_10HALF;
  3769. if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL)
  3770. adv |= ADVERTISE_10FULL;
  3771. if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF)
  3772. adv |= ADVERTISE_100HALF;
  3773. if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL)
  3774. adv |= ADVERTISE_100FULL;
  3775. np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
  3776. if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisments but disable tx pause */
  3777. adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
  3778. np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
  3779. }
  3780. if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) {
  3781. adv |= ADVERTISE_PAUSE_ASYM;
  3782. np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
  3783. }
  3784. mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
  3785. np->fixed_mode = adv;
  3786. if (np->gigabit == PHY_GIGABIT) {
  3787. adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
  3788. adv &= ~ADVERTISE_1000FULL;
  3789. mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
  3790. }
  3791. bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
  3792. bmcr &= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_SPEED1000|BMCR_FULLDPLX);
  3793. if (np->fixed_mode & (ADVERTISE_10FULL|ADVERTISE_100FULL))
  3794. bmcr |= BMCR_FULLDPLX;
  3795. if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL))
  3796. bmcr |= BMCR_SPEED100;
  3797. if (np->phy_oui == PHY_OUI_MARVELL) {
  3798. /* reset the phy in order for forced mode settings to stick */
  3799. if (phy_reset(dev, bmcr)) {
  3800. netdev_info(dev, "phy reset failed\n");
  3801. return -EINVAL;
  3802. }
  3803. } else {
  3804. mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
  3805. if (netif_running(dev)) {
  3806. /* Wait a bit and then reconfigure the nic. */
  3807. udelay(10);
  3808. nv_linkchange(dev);
  3809. }
  3810. }
  3811. }
  3812. if (netif_running(dev)) {
  3813. nv_start_rxtx(dev);
  3814. nv_enable_irq(dev);
  3815. }
  3816. return 0;
  3817. }
  3818. #define FORCEDETH_REGS_VER 1
  3819. static int nv_get_regs_len(struct net_device *dev)
  3820. {
  3821. struct fe_priv *np = netdev_priv(dev);
  3822. return np->register_size;
  3823. }
  3824. static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
  3825. {
  3826. struct fe_priv *np = netdev_priv(dev);
  3827. u8 __iomem *base = get_hwbase(dev);
  3828. u32 *rbuf = buf;
  3829. int i;
  3830. regs->version = FORCEDETH_REGS_VER;
  3831. spin_lock_irq(&np->lock);
  3832. for (i = 0; i <= np->register_size/sizeof(u32); i++)
  3833. rbuf[i] = readl(base + i*sizeof(u32));
  3834. spin_unlock_irq(&np->lock);
  3835. }
  3836. static int nv_nway_reset(struct net_device *dev)
  3837. {
  3838. struct fe_priv *np = netdev_priv(dev);
  3839. int ret;
  3840. if (np->autoneg) {
  3841. int bmcr;
  3842. netif_carrier_off(dev);
  3843. if (netif_running(dev)) {
  3844. nv_disable_irq(dev);
  3845. netif_tx_lock_bh(dev);
  3846. netif_addr_lock(dev);
  3847. spin_lock(&np->lock);
  3848. /* stop engines */
  3849. nv_stop_rxtx(dev);
  3850. spin_unlock(&np->lock);
  3851. netif_addr_unlock(dev);
  3852. netif_tx_unlock_bh(dev);
  3853. netdev_info(dev, "link down\n");
  3854. }
  3855. bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
  3856. if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
  3857. bmcr |= BMCR_ANENABLE;
  3858. /* reset the phy in order for settings to stick*/
  3859. if (phy_reset(dev, bmcr)) {
  3860. netdev_info(dev, "phy reset failed\n");
  3861. return -EINVAL;
  3862. }
  3863. } else {
  3864. bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
  3865. mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
  3866. }
  3867. if (netif_running(dev)) {
  3868. nv_start_rxtx(dev);
  3869. nv_enable_irq(dev);
  3870. }
  3871. ret = 0;
  3872. } else {
  3873. ret = -EINVAL;
  3874. }
  3875. return ret;
  3876. }
  3877. static int nv_set_tso(struct net_device *dev, u32 value)
  3878. {
  3879. struct fe_priv *np = netdev_priv(dev);
  3880. if ((np->driver_data & DEV_HAS_CHECKSUM))
  3881. return ethtool_op_set_tso(dev, value);
  3882. else
  3883. return -EOPNOTSUPP;
  3884. }
  3885. static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
  3886. {
  3887. struct fe_priv *np = netdev_priv(dev);
  3888. ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
  3889. ring->rx_mini_max_pending = 0;
  3890. ring->rx_jumbo_max_pending = 0;
  3891. ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
  3892. ring->rx_pending = np->rx_ring_size;
  3893. ring->rx_mini_pending = 0;
  3894. ring->rx_jumbo_pending = 0;
  3895. ring->tx_pending = np->tx_ring_size;
  3896. }
  3897. static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
  3898. {
  3899. struct fe_priv *np = netdev_priv(dev);
  3900. u8 __iomem *base = get_hwbase(dev);
  3901. u8 *rxtx_ring, *rx_skbuff, *tx_skbuff;
  3902. dma_addr_t ring_addr;
  3903. if (ring->rx_pending < RX_RING_MIN ||
  3904. ring->tx_pending < TX_RING_MIN ||
  3905. ring->rx_mini_pending != 0 ||
  3906. ring->rx_jumbo_pending != 0 ||
  3907. (np->desc_ver == DESC_VER_1 &&
  3908. (ring->rx_pending > RING_MAX_DESC_VER_1 ||
  3909. ring->tx_pending > RING_MAX_DESC_VER_1)) ||
  3910. (np->desc_ver != DESC_VER_1 &&
  3911. (ring->rx_pending > RING_MAX_DESC_VER_2_3 ||
  3912. ring->tx_pending > RING_MAX_DESC_VER_2_3))) {
  3913. return -EINVAL;
  3914. }
  3915. /* allocate new rings */
  3916. if (!nv_optimized(np)) {
  3917. rxtx_ring = pci_alloc_consistent(np->pci_dev,
  3918. sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
  3919. &ring_addr);
  3920. } else {
  3921. rxtx_ring = pci_alloc_consistent(np->pci_dev,
  3922. sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
  3923. &ring_addr);
  3924. }
  3925. rx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->rx_pending, GFP_KERNEL);
  3926. tx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->tx_pending, GFP_KERNEL);
  3927. if (!rxtx_ring || !rx_skbuff || !tx_skbuff) {
  3928. /* fall back to old rings */
  3929. if (!nv_optimized(np)) {
  3930. if (rxtx_ring)
  3931. pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
  3932. rxtx_ring, ring_addr);
  3933. } else {
  3934. if (rxtx_ring)
  3935. pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
  3936. rxtx_ring, ring_addr);
  3937. }
  3938. kfree(rx_skbuff);
  3939. kfree(tx_skbuff);
  3940. goto exit;
  3941. }
  3942. if (netif_running(dev)) {
  3943. nv_disable_irq(dev);
  3944. nv_napi_disable(dev);
  3945. netif_tx_lock_bh(dev);
  3946. netif_addr_lock(dev);
  3947. spin_lock(&np->lock);
  3948. /* stop engines */
  3949. nv_stop_rxtx(dev);
  3950. nv_txrx_reset(dev);
  3951. /* drain queues */
  3952. nv_drain_rxtx(dev);
  3953. /* delete queues */
  3954. free_rings(dev);
  3955. }
  3956. /* set new values */
  3957. np->rx_ring_size = ring->rx_pending;
  3958. np->tx_ring_size = ring->tx_pending;
  3959. if (!nv_optimized(np)) {
  3960. np->rx_ring.orig = (struct ring_desc *)rxtx_ring;
  3961. np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
  3962. } else {
  3963. np->rx_ring.ex = (struct ring_desc_ex *)rxtx_ring;
  3964. np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
  3965. }
  3966. np->rx_skb = (struct nv_skb_map *)rx_skbuff;
  3967. np->tx_skb = (struct nv_skb_map *)tx_skbuff;
  3968. np->ring_addr = ring_addr;
  3969. memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size);
  3970. memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size);
  3971. if (netif_running(dev)) {
  3972. /* reinit driver view of the queues */
  3973. set_bufsize(dev);
  3974. if (nv_init_ring(dev)) {
  3975. if (!np->in_shutdown)
  3976. mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
  3977. }
  3978. /* reinit nic view of the queues */
  3979. writel(np->rx_buf_sz, base + NvRegOffloadConfig);
  3980. setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
  3981. writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
  3982. base + NvRegRingSizes);
  3983. pci_push(base);
  3984. writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
  3985. pci_push(base);
  3986. /* restart engines */
  3987. nv_start_rxtx(dev);
  3988. spin_unlock(&np->lock);
  3989. netif_addr_unlock(dev);
  3990. netif_tx_unlock_bh(dev);
  3991. nv_napi_enable(dev);
  3992. nv_enable_irq(dev);
  3993. }
  3994. return 0;
  3995. exit:
  3996. return -ENOMEM;
  3997. }
  3998. static void nv_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
  3999. {
  4000. struct fe_priv *np = netdev_priv(dev);
  4001. pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0;
  4002. pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0;
  4003. pause->tx_pause = (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) != 0;
  4004. }
  4005. static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
  4006. {
  4007. struct fe_priv *np = netdev_priv(dev);
  4008. int adv, bmcr;
  4009. if ((!np->autoneg && np->duplex == 0) ||
  4010. (np->autoneg && !pause->autoneg && np->duplex == 0)) {
  4011. netdev_info(dev, "can not set pause settings when forced link is in half duplex\n");
  4012. return -EINVAL;
  4013. }
  4014. if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) {
  4015. netdev_info(dev, "hardware does not support tx pause frames\n");
  4016. return -EINVAL;
  4017. }
  4018. netif_carrier_off(dev);
  4019. if (netif_running(dev)) {
  4020. nv_disable_irq(dev);
  4021. netif_tx_lock_bh(dev);
  4022. netif_addr_lock(dev);
  4023. spin_lock(&np->lock);
  4024. /* stop engines */
  4025. nv_stop_rxtx(dev);
  4026. spin_unlock(&np->lock);
  4027. netif_addr_unlock(dev);
  4028. netif_tx_unlock_bh(dev);
  4029. }
  4030. np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ);
  4031. if (pause->rx_pause)
  4032. np->pause_flags |= NV_PAUSEFRAME_RX_REQ;
  4033. if (pause->tx_pause)
  4034. np->pause_flags |= NV_PAUSEFRAME_TX_REQ;
  4035. if (np->autoneg && pause->autoneg) {
  4036. np->pause_flags |= NV_PAUSEFRAME_AUTONEG;
  4037. adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
  4038. adv &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
  4039. if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */
  4040. adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
  4041. if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
  4042. adv |= ADVERTISE_PAUSE_ASYM;
  4043. mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
  4044. if (netif_running(dev))
  4045. netdev_info(dev, "link down\n");
  4046. bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
  4047. bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
  4048. mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
  4049. } else {
  4050. np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
  4051. if (pause->rx_pause)
  4052. np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
  4053. if (pause->tx_pause)
  4054. np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
  4055. if (!netif_running(dev))
  4056. nv_update_linkspeed(dev);
  4057. else
  4058. nv_update_pause(dev, np->pause_flags);
  4059. }
  4060. if (netif_running(dev)) {
  4061. nv_start_rxtx(dev);
  4062. nv_enable_irq(dev);
  4063. }
  4064. return 0;
  4065. }
  4066. static u32 nv_get_rx_csum(struct net_device *dev)
  4067. {
  4068. struct fe_priv *np = netdev_priv(dev);
  4069. return np->rx_csum != 0;
  4070. }
  4071. static int nv_set_rx_csum(struct net_device *dev, u32 data)
  4072. {
  4073. struct fe_priv *np = netdev_priv(dev);
  4074. u8 __iomem *base = get_hwbase(dev);
  4075. int retcode = 0;
  4076. if (np->driver_data & DEV_HAS_CHECKSUM) {
  4077. if (data) {
  4078. np->rx_csum = 1;
  4079. np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
  4080. } else {
  4081. np->rx_csum = 0;
  4082. /* vlan is dependent on rx checksum offload */
  4083. if (!(np->vlanctl_bits & NVREG_VLANCONTROL_ENABLE))
  4084. np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK;
  4085. }
  4086. if (netif_running(dev)) {
  4087. spin_lock_irq(&np->lock);
  4088. writel(np->txrxctl_bits, base + NvRegTxRxControl);
  4089. spin_unlock_irq(&np->lock);
  4090. }
  4091. } else {
  4092. return -EINVAL;
  4093. }
  4094. return retcode;
  4095. }
  4096. static int nv_set_tx_csum(struct net_device *dev, u32 data)
  4097. {
  4098. struct fe_priv *np = netdev_priv(dev);
  4099. if (np->driver_data & DEV_HAS_CHECKSUM)
  4100. return ethtool_op_set_tx_csum(dev, data);
  4101. else
  4102. return -EOPNOTSUPP;
  4103. }
  4104. static int nv_set_sg(struct net_device *dev, u32 data)
  4105. {
  4106. struct fe_priv *np = netdev_priv(dev);
  4107. if (np->driver_data & DEV_HAS_CHECKSUM)
  4108. return ethtool_op_set_sg(dev, data);
  4109. else
  4110. return -EOPNOTSUPP;
  4111. }
  4112. static int nv_get_sset_count(struct net_device *dev, int sset)
  4113. {
  4114. struct fe_priv *np = netdev_priv(dev);
  4115. switch (sset) {
  4116. case ETH_SS_TEST:
  4117. if (np->driver_data & DEV_HAS_TEST_EXTENDED)
  4118. return NV_TEST_COUNT_EXTENDED;
  4119. else
  4120. return NV_TEST_COUNT_BASE;
  4121. case ETH_SS_STATS:
  4122. if (np->driver_data & DEV_HAS_STATISTICS_V3)
  4123. return NV_DEV_STATISTICS_V3_COUNT;
  4124. else if (np->driver_data & DEV_HAS_STATISTICS_V2)
  4125. return NV_DEV_STATISTICS_V2_COUNT;
  4126. else if (np->driver_data & DEV_HAS_STATISTICS_V1)
  4127. return NV_DEV_STATISTICS_V1_COUNT;
  4128. else
  4129. return 0;
  4130. default:
  4131. return -EOPNOTSUPP;
  4132. }
  4133. }
  4134. static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *buffer)
  4135. {
  4136. struct fe_priv *np = netdev_priv(dev);
  4137. /* update stats */
  4138. nv_do_stats_poll((unsigned long)dev);
  4139. memcpy(buffer, &np->estats, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64));
  4140. }
  4141. static int nv_link_test(struct net_device *dev)
  4142. {
  4143. struct fe_priv *np = netdev_priv(dev);
  4144. int mii_status;
  4145. mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
  4146. mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
  4147. /* check phy link status */
  4148. if (!(mii_status & BMSR_LSTATUS))
  4149. return 0;
  4150. else
  4151. return 1;
  4152. }
  4153. static int nv_register_test(struct net_device *dev)
  4154. {
  4155. u8 __iomem *base = get_hwbase(dev);
  4156. int i = 0;
  4157. u32 orig_read, new_read;
  4158. do {
  4159. orig_read = readl(base + nv_registers_test[i].reg);
  4160. /* xor with mask to toggle bits */
  4161. orig_read ^= nv_registers_test[i].mask;
  4162. writel(orig_read, base + nv_registers_test[i].reg);
  4163. new_read = readl(base + nv_registers_test[i].reg);
  4164. if ((new_read & nv_registers_test[i].mask) != (orig_read & nv_registers_test[i].mask))
  4165. return 0;
  4166. /* restore original value */
  4167. orig_read ^= nv_registers_test[i].mask;
  4168. writel(orig_read, base + nv_registers_test[i].reg);
  4169. } while (nv_registers_test[++i].reg != 0);
  4170. return 1;
  4171. }
  4172. static int nv_interrupt_test(struct net_device *dev)
  4173. {
  4174. struct fe_priv *np = netdev_priv(dev);
  4175. u8 __iomem *base = get_hwbase(dev);
  4176. int ret = 1;
  4177. int testcnt;
  4178. u32 save_msi_flags, save_poll_interval = 0;
  4179. if (netif_running(dev)) {
  4180. /* free current irq */
  4181. nv_free_irq(dev);
  4182. save_poll_interval = readl(base+NvRegPollingInterval);
  4183. }
  4184. /* flag to test interrupt handler */
  4185. np->intr_test = 0;
  4186. /* setup test irq */
  4187. save_msi_flags = np->msi_flags;
  4188. np->msi_flags &= ~NV_MSI_X_VECTORS_MASK;
  4189. np->msi_flags |= 0x001; /* setup 1 vector */
  4190. if (nv_request_irq(dev, 1))
  4191. return 0;
  4192. /* setup timer interrupt */
  4193. writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
  4194. writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
  4195. nv_enable_hw_interrupts(dev, NVREG_IRQ_TIMER);
  4196. /* wait for at least one interrupt */
  4197. msleep(100);
  4198. spin_lock_irq(&np->lock);
  4199. /* flag should be set within ISR */
  4200. testcnt = np->intr_test;
  4201. if (!testcnt)
  4202. ret = 2;
  4203. nv_disable_hw_interrupts(dev, NVREG_IRQ_TIMER);
  4204. if (!(np->msi_flags & NV_MSI_X_ENABLED))
  4205. writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
  4206. else
  4207. writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
  4208. spin_unlock_irq(&np->lock);
  4209. nv_free_irq(dev);
  4210. np->msi_flags = save_msi_flags;
  4211. if (netif_running(dev)) {
  4212. writel(save_poll_interval, base + NvRegPollingInterval);
  4213. writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
  4214. /* restore original irq */
  4215. if (nv_request_irq(dev, 0))
  4216. return 0;
  4217. }
  4218. return ret;
  4219. }
  4220. static int nv_loopback_test(struct net_device *dev)
  4221. {
  4222. struct fe_priv *np = netdev_priv(dev);
  4223. u8 __iomem *base = get_hwbase(dev);
  4224. struct sk_buff *tx_skb, *rx_skb;
  4225. dma_addr_t test_dma_addr;
  4226. u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
  4227. u32 flags;
  4228. int len, i, pkt_len;
  4229. u8 *pkt_data;
  4230. u32 filter_flags = 0;
  4231. u32 misc1_flags = 0;
  4232. int ret = 1;
  4233. if (netif_running(dev)) {
  4234. nv_disable_irq(dev);
  4235. filter_flags = readl(base + NvRegPacketFilterFlags);
  4236. misc1_flags = readl(base + NvRegMisc1);
  4237. } else {
  4238. nv_txrx_reset(dev);
  4239. }
  4240. /* reinit driver view of the rx queue */
  4241. set_bufsize(dev);
  4242. nv_init_ring(dev);
  4243. /* setup hardware for loopback */
  4244. writel(NVREG_MISC1_FORCE, base + NvRegMisc1);
  4245. writel(NVREG_PFF_ALWAYS | NVREG_PFF_LOOPBACK, base + NvRegPacketFilterFlags);
  4246. /* reinit nic view of the rx queue */
  4247. writel(np->rx_buf_sz, base + NvRegOffloadConfig);
  4248. setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
  4249. writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
  4250. base + NvRegRingSizes);
  4251. pci_push(base);
  4252. /* restart rx engine */
  4253. nv_start_rxtx(dev);
  4254. /* setup packet for tx */
  4255. pkt_len = ETH_DATA_LEN;
  4256. tx_skb = dev_alloc_skb(pkt_len);
  4257. if (!tx_skb) {
  4258. netdev_err(dev, "dev_alloc_skb() failed during loopback test\n");
  4259. ret = 0;
  4260. goto out;
  4261. }
  4262. test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data,
  4263. skb_tailroom(tx_skb),
  4264. PCI_DMA_FROMDEVICE);
  4265. pkt_data = skb_put(tx_skb, pkt_len);
  4266. for (i = 0; i < pkt_len; i++)
  4267. pkt_data[i] = (u8)(i & 0xff);
  4268. if (!nv_optimized(np)) {
  4269. np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr);
  4270. np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
  4271. } else {
  4272. np->tx_ring.ex[0].bufhigh = cpu_to_le32(dma_high(test_dma_addr));
  4273. np->tx_ring.ex[0].buflow = cpu_to_le32(dma_low(test_dma_addr));
  4274. np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
  4275. }
  4276. writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
  4277. pci_push(get_hwbase(dev));
  4278. msleep(500);
  4279. /* check for rx of the packet */
  4280. if (!nv_optimized(np)) {
  4281. flags = le32_to_cpu(np->rx_ring.orig[0].flaglen);
  4282. len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver);
  4283. } else {
  4284. flags = le32_to_cpu(np->rx_ring.ex[0].flaglen);
  4285. len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver);
  4286. }
  4287. if (flags & NV_RX_AVAIL) {
  4288. ret = 0;
  4289. } else if (np->desc_ver == DESC_VER_1) {
  4290. if (flags & NV_RX_ERROR)
  4291. ret = 0;
  4292. } else {
  4293. if (flags & NV_RX2_ERROR)
  4294. ret = 0;
  4295. }
  4296. if (ret) {
  4297. if (len != pkt_len) {
  4298. ret = 0;
  4299. netdev_dbg(dev, "loopback len mismatch %d vs %d\n",
  4300. len, pkt_len);
  4301. } else {
  4302. rx_skb = np->rx_skb[0].skb;
  4303. for (i = 0; i < pkt_len; i++) {
  4304. if (rx_skb->data[i] != (u8)(i & 0xff)) {
  4305. ret = 0;
  4306. netdev_dbg(dev, "loopback pattern check failed on byte %d\n",
  4307. i);
  4308. break;
  4309. }
  4310. }
  4311. }
  4312. } else {
  4313. netdev_dbg(dev, "loopback - did not receive test packet\n");
  4314. }
  4315. pci_unmap_single(np->pci_dev, test_dma_addr,
  4316. (skb_end_pointer(tx_skb) - tx_skb->data),
  4317. PCI_DMA_TODEVICE);
  4318. dev_kfree_skb_any(tx_skb);
  4319. out:
  4320. /* stop engines */
  4321. nv_stop_rxtx(dev);
  4322. nv_txrx_reset(dev);
  4323. /* drain rx queue */
  4324. nv_drain_rxtx(dev);
  4325. if (netif_running(dev)) {
  4326. writel(misc1_flags, base + NvRegMisc1);
  4327. writel(filter_flags, base + NvRegPacketFilterFlags);
  4328. nv_enable_irq(dev);
  4329. }
  4330. return ret;
  4331. }
  4332. static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 *buffer)
  4333. {
  4334. struct fe_priv *np = netdev_priv(dev);
  4335. u8 __iomem *base = get_hwbase(dev);
  4336. int result;
  4337. memset(buffer, 0, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(u64));
  4338. if (!nv_link_test(dev)) {
  4339. test->flags |= ETH_TEST_FL_FAILED;
  4340. buffer[0] = 1;
  4341. }
  4342. if (test->flags & ETH_TEST_FL_OFFLINE) {
  4343. if (netif_running(dev)) {
  4344. netif_stop_queue(dev);
  4345. nv_napi_disable(dev);
  4346. netif_tx_lock_bh(dev);
  4347. netif_addr_lock(dev);
  4348. spin_lock_irq(&np->lock);
  4349. nv_disable_hw_interrupts(dev, np->irqmask);
  4350. if (!(np->msi_flags & NV_MSI_X_ENABLED))
  4351. writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
  4352. else
  4353. writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
  4354. /* stop engines */
  4355. nv_stop_rxtx(dev);
  4356. nv_txrx_reset(dev);
  4357. /* drain rx queue */
  4358. nv_drain_rxtx(dev);
  4359. spin_unlock_irq(&np->lock);
  4360. netif_addr_unlock(dev);
  4361. netif_tx_unlock_bh(dev);
  4362. }
  4363. if (!nv_register_test(dev)) {
  4364. test->flags |= ETH_TEST_FL_FAILED;
  4365. buffer[1] = 1;
  4366. }
  4367. result = nv_interrupt_test(dev);
  4368. if (result != 1) {
  4369. test->flags |= ETH_TEST_FL_FAILED;
  4370. buffer[2] = 1;
  4371. }
  4372. if (result == 0) {
  4373. /* bail out */
  4374. return;
  4375. }
  4376. if (!nv_loopback_test(dev)) {
  4377. test->flags |= ETH_TEST_FL_FAILED;
  4378. buffer[3] = 1;
  4379. }
  4380. if (netif_running(dev)) {
  4381. /* reinit driver view of the rx queue */
  4382. set_bufsize(dev);
  4383. if (nv_init_ring(dev)) {
  4384. if (!np->in_shutdown)
  4385. mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
  4386. }
  4387. /* reinit nic view of the rx queue */
  4388. writel(np->rx_buf_sz, base + NvRegOffloadConfig);
  4389. setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
  4390. writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
  4391. base + NvRegRingSizes);
  4392. pci_push(base);
  4393. writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
  4394. pci_push(base);
  4395. /* restart rx engine */
  4396. nv_start_rxtx(dev);
  4397. netif_start_queue(dev);
  4398. nv_napi_enable(dev);
  4399. nv_enable_hw_interrupts(dev, np->irqmask);
  4400. }
  4401. }
  4402. }
  4403. static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer)
  4404. {
  4405. switch (stringset) {
  4406. case ETH_SS_STATS:
  4407. memcpy(buffer, &nv_estats_str, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(struct nv_ethtool_str));
  4408. break;
  4409. case ETH_SS_TEST:
  4410. memcpy(buffer, &nv_etests_str, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(struct nv_ethtool_str));
  4411. break;
  4412. }
  4413. }
  4414. static const struct ethtool_ops ops = {
  4415. .get_drvinfo = nv_get_drvinfo,
  4416. .get_link = ethtool_op_get_link,
  4417. .get_wol = nv_get_wol,
  4418. .set_wol = nv_set_wol,
  4419. .get_settings = nv_get_settings,
  4420. .set_settings = nv_set_settings,
  4421. .get_regs_len = nv_get_regs_len,
  4422. .get_regs = nv_get_regs,
  4423. .nway_reset = nv_nway_reset,
  4424. .set_tso = nv_set_tso,
  4425. .get_ringparam = nv_get_ringparam,
  4426. .set_ringparam = nv_set_ringparam,
  4427. .get_pauseparam = nv_get_pauseparam,
  4428. .set_pauseparam = nv_set_pauseparam,
  4429. .get_rx_csum = nv_get_rx_csum,
  4430. .set_rx_csum = nv_set_rx_csum,
  4431. .set_tx_csum = nv_set_tx_csum,
  4432. .set_sg = nv_set_sg,
  4433. .get_strings = nv_get_strings,
  4434. .get_ethtool_stats = nv_get_ethtool_stats,
  4435. .get_sset_count = nv_get_sset_count,
  4436. .self_test = nv_self_test,
  4437. };
  4438. static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
  4439. {
  4440. struct fe_priv *np = get_nvpriv(dev);
  4441. spin_lock_irq(&np->lock);
  4442. /* save vlan group */
  4443. np->vlangrp = grp;
  4444. if (grp) {
  4445. /* enable vlan on MAC */
  4446. np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP | NVREG_TXRXCTL_VLANINS;
  4447. } else {
  4448. /* disable vlan on MAC */
  4449. np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP;
  4450. np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS;
  4451. }
  4452. writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
  4453. spin_unlock_irq(&np->lock);
  4454. }
  4455. /* The mgmt unit and driver use a semaphore to access the phy during init */
  4456. static int nv_mgmt_acquire_sema(struct net_device *dev)
  4457. {
  4458. struct fe_priv *np = netdev_priv(dev);
  4459. u8 __iomem *base = get_hwbase(dev);
  4460. int i;
  4461. u32 tx_ctrl, mgmt_sema;
  4462. for (i = 0; i < 10; i++) {
  4463. mgmt_sema = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_SEMA_MASK;
  4464. if (mgmt_sema == NVREG_XMITCTL_MGMT_SEMA_FREE)
  4465. break;
  4466. msleep(500);
  4467. }
  4468. if (mgmt_sema != NVREG_XMITCTL_MGMT_SEMA_FREE)
  4469. return 0;
  4470. for (i = 0; i < 2; i++) {
  4471. tx_ctrl = readl(base + NvRegTransmitterControl);
  4472. tx_ctrl |= NVREG_XMITCTL_HOST_SEMA_ACQ;
  4473. writel(tx_ctrl, base + NvRegTransmitterControl);
  4474. /* verify that semaphore was acquired */
  4475. tx_ctrl = readl(base + NvRegTransmitterControl);
  4476. if (((tx_ctrl & NVREG_XMITCTL_HOST_SEMA_MASK) == NVREG_XMITCTL_HOST_SEMA_ACQ) &&
  4477. ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE)) {
  4478. np->mgmt_sema = 1;
  4479. return 1;
  4480. } else
  4481. udelay(50);
  4482. }
  4483. return 0;
  4484. }
  4485. static void nv_mgmt_release_sema(struct net_device *dev)
  4486. {
  4487. struct fe_priv *np = netdev_priv(dev);
  4488. u8 __iomem *base = get_hwbase(dev);
  4489. u32 tx_ctrl;
  4490. if (np->driver_data & DEV_HAS_MGMT_UNIT) {
  4491. if (np->mgmt_sema) {
  4492. tx_ctrl = readl(base + NvRegTransmitterControl);
  4493. tx_ctrl &= ~NVREG_XMITCTL_HOST_SEMA_ACQ;
  4494. writel(tx_ctrl, base + NvRegTransmitterControl);
  4495. }
  4496. }
  4497. }
  4498. static int nv_mgmt_get_version(struct net_device *dev)
  4499. {
  4500. struct fe_priv *np = netdev_priv(dev);
  4501. u8 __iomem *base = get_hwbase(dev);
  4502. u32 data_ready = readl(base + NvRegTransmitterControl);
  4503. u32 data_ready2 = 0;
  4504. unsigned long start;
  4505. int ready = 0;
  4506. writel(NVREG_MGMTUNITGETVERSION, base + NvRegMgmtUnitGetVersion);
  4507. writel(data_ready ^ NVREG_XMITCTL_DATA_START, base + NvRegTransmitterControl);
  4508. start = jiffies;
  4509. while (time_before(jiffies, start + 5*HZ)) {
  4510. data_ready2 = readl(base + NvRegTransmitterControl);
  4511. if ((data_ready & NVREG_XMITCTL_DATA_READY) != (data_ready2 & NVREG_XMITCTL_DATA_READY)) {
  4512. ready = 1;
  4513. break;
  4514. }
  4515. schedule_timeout_uninterruptible(1);
  4516. }
  4517. if (!ready || (data_ready2 & NVREG_XMITCTL_DATA_ERROR))
  4518. return 0;
  4519. np->mgmt_version = readl(base + NvRegMgmtUnitVersion) & NVREG_MGMTUNITVERSION;
  4520. return 1;
  4521. }
  4522. static int nv_open(struct net_device *dev)
  4523. {
  4524. struct fe_priv *np = netdev_priv(dev);
  4525. u8 __iomem *base = get_hwbase(dev);
  4526. int ret = 1;
  4527. int oom, i;
  4528. u32 low;
  4529. netdev_dbg(dev, "%s\n", __func__);
  4530. /* power up phy */
  4531. mii_rw(dev, np->phyaddr, MII_BMCR,
  4532. mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ) & ~BMCR_PDOWN);
  4533. nv_txrx_gate(dev, false);
  4534. /* erase previous misconfiguration */
  4535. if (np->driver_data & DEV_HAS_POWER_CNTRL)
  4536. nv_mac_reset(dev);
  4537. writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
  4538. writel(0, base + NvRegMulticastAddrB);
  4539. writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA);
  4540. writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB);
  4541. writel(0, base + NvRegPacketFilterFlags);
  4542. writel(0, base + NvRegTransmitterControl);
  4543. writel(0, base + NvRegReceiverControl);
  4544. writel(0, base + NvRegAdapterControl);
  4545. if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)
  4546. writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame);
  4547. /* initialize descriptor rings */
  4548. set_bufsize(dev);
  4549. oom = nv_init_ring(dev);
  4550. writel(0, base + NvRegLinkSpeed);
  4551. writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
  4552. nv_txrx_reset(dev);
  4553. writel(0, base + NvRegUnknownSetupReg6);
  4554. np->in_shutdown = 0;
  4555. /* give hw rings */
  4556. setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
  4557. writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
  4558. base + NvRegRingSizes);
  4559. writel(np->linkspeed, base + NvRegLinkSpeed);
  4560. if (np->desc_ver == DESC_VER_1)
  4561. writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark);
  4562. else
  4563. writel(NVREG_TX_WM_DESC2_3_DEFAULT, base + NvRegTxWatermark);
  4564. writel(np->txrxctl_bits, base + NvRegTxRxControl);
  4565. writel(np->vlanctl_bits, base + NvRegVlanControl);
  4566. pci_push(base);
  4567. writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl);
  4568. if (reg_delay(dev, NvRegUnknownSetupReg5,
  4569. NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31,
  4570. NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX))
  4571. netdev_info(dev,
  4572. "%s: SetupReg5, Bit 31 remained off\n", __func__);
  4573. writel(0, base + NvRegMIIMask);
  4574. writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
  4575. writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
  4576. writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1);
  4577. writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus);
  4578. writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags);
  4579. writel(np->rx_buf_sz, base + NvRegOffloadConfig);
  4580. writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus);
  4581. get_random_bytes(&low, sizeof(low));
  4582. low &= NVREG_SLOTTIME_MASK;
  4583. if (np->desc_ver == DESC_VER_1) {
  4584. writel(low|NVREG_SLOTTIME_DEFAULT, base + NvRegSlotTime);
  4585. } else {
  4586. if (!(np->driver_data & DEV_HAS_GEAR_MODE)) {
  4587. /* setup legacy backoff */
  4588. writel(NVREG_SLOTTIME_LEGBF_ENABLED|NVREG_SLOTTIME_10_100_FULL|low, base + NvRegSlotTime);
  4589. } else {
  4590. writel(NVREG_SLOTTIME_10_100_FULL, base + NvRegSlotTime);
  4591. nv_gear_backoff_reseed(dev);
  4592. }
  4593. }
  4594. writel(NVREG_TX_DEFERRAL_DEFAULT, base + NvRegTxDeferral);
  4595. writel(NVREG_RX_DEFERRAL_DEFAULT, base + NvRegRxDeferral);
  4596. if (poll_interval == -1) {
  4597. if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT)
  4598. writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval);
  4599. else
  4600. writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
  4601. } else
  4602. writel(poll_interval & 0xFFFF, base + NvRegPollingInterval);
  4603. writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
  4604. writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING,
  4605. base + NvRegAdapterControl);
  4606. writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed);
  4607. writel(NVREG_MII_LINKCHANGE, base + NvRegMIIMask);
  4608. if (np->wolenabled)
  4609. writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags);
  4610. i = readl(base + NvRegPowerState);
  4611. if ((i & NVREG_POWERSTATE_POWEREDUP) == 0)
  4612. writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState);
  4613. pci_push(base);
  4614. udelay(10);
  4615. writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState);
  4616. nv_disable_hw_interrupts(dev, np->irqmask);
  4617. pci_push(base);
  4618. writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
  4619. writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
  4620. pci_push(base);
  4621. if (nv_request_irq(dev, 0))
  4622. goto out_drain;
  4623. /* ask for interrupts */
  4624. nv_enable_hw_interrupts(dev, np->irqmask);
  4625. spin_lock_irq(&np->lock);
  4626. writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
  4627. writel(0, base + NvRegMulticastAddrB);
  4628. writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA);
  4629. writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB);
  4630. writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
  4631. /* One manual link speed update: Interrupts are enabled, future link
  4632. * speed changes cause interrupts and are handled by nv_link_irq().
  4633. */
  4634. {
  4635. u32 miistat;
  4636. miistat = readl(base + NvRegMIIStatus);
  4637. writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
  4638. netdev_dbg(dev, "startup: got 0x%08x\n", miistat);
  4639. }
  4640. /* set linkspeed to invalid value, thus force nv_update_linkspeed
  4641. * to init hw */
  4642. np->linkspeed = 0;
  4643. ret = nv_update_linkspeed(dev);
  4644. nv_start_rxtx(dev);
  4645. netif_start_queue(dev);
  4646. nv_napi_enable(dev);
  4647. if (ret) {
  4648. netif_carrier_on(dev);
  4649. } else {
  4650. netdev_info(dev, "no link during initialization\n");
  4651. netif_carrier_off(dev);
  4652. }
  4653. if (oom)
  4654. mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
  4655. /* start statistics timer */
  4656. if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3))
  4657. mod_timer(&np->stats_poll,
  4658. round_jiffies(jiffies + STATS_INTERVAL));
  4659. spin_unlock_irq(&np->lock);
  4660. return 0;
  4661. out_drain:
  4662. nv_drain_rxtx(dev);
  4663. return ret;
  4664. }
  4665. static int nv_close(struct net_device *dev)
  4666. {
  4667. struct fe_priv *np = netdev_priv(dev);
  4668. u8 __iomem *base;
  4669. spin_lock_irq(&np->lock);
  4670. np->in_shutdown = 1;
  4671. spin_unlock_irq(&np->lock);
  4672. nv_napi_disable(dev);
  4673. synchronize_irq(np->pci_dev->irq);
  4674. del_timer_sync(&np->oom_kick);
  4675. del_timer_sync(&np->nic_poll);
  4676. del_timer_sync(&np->stats_poll);
  4677. netif_stop_queue(dev);
  4678. spin_lock_irq(&np->lock);
  4679. nv_stop_rxtx(dev);
  4680. nv_txrx_reset(dev);
  4681. /* disable interrupts on the nic or we will lock up */
  4682. base = get_hwbase(dev);
  4683. nv_disable_hw_interrupts(dev, np->irqmask);
  4684. pci_push(base);
  4685. netdev_dbg(dev, "Irqmask is zero again\n");
  4686. spin_unlock_irq(&np->lock);
  4687. nv_free_irq(dev);
  4688. nv_drain_rxtx(dev);
  4689. if (np->wolenabled || !phy_power_down) {
  4690. nv_txrx_gate(dev, false);
  4691. writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
  4692. nv_start_rx(dev);
  4693. } else {
  4694. /* power down phy */
  4695. mii_rw(dev, np->phyaddr, MII_BMCR,
  4696. mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ)|BMCR_PDOWN);
  4697. nv_txrx_gate(dev, true);
  4698. }
  4699. /* FIXME: power down nic */
  4700. return 0;
  4701. }
  4702. static const struct net_device_ops nv_netdev_ops = {
  4703. .ndo_open = nv_open,
  4704. .ndo_stop = nv_close,
  4705. .ndo_get_stats = nv_get_stats,
  4706. .ndo_start_xmit = nv_start_xmit,
  4707. .ndo_tx_timeout = nv_tx_timeout,
  4708. .ndo_change_mtu = nv_change_mtu,
  4709. .ndo_validate_addr = eth_validate_addr,
  4710. .ndo_set_mac_address = nv_set_mac_address,
  4711. .ndo_set_multicast_list = nv_set_multicast,
  4712. .ndo_vlan_rx_register = nv_vlan_rx_register,
  4713. #ifdef CONFIG_NET_POLL_CONTROLLER
  4714. .ndo_poll_controller = nv_poll_controller,
  4715. #endif
  4716. };
  4717. static const struct net_device_ops nv_netdev_ops_optimized = {
  4718. .ndo_open = nv_open,
  4719. .ndo_stop = nv_close,
  4720. .ndo_get_stats = nv_get_stats,
  4721. .ndo_start_xmit = nv_start_xmit_optimized,
  4722. .ndo_tx_timeout = nv_tx_timeout,
  4723. .ndo_change_mtu = nv_change_mtu,
  4724. .ndo_validate_addr = eth_validate_addr,
  4725. .ndo_set_mac_address = nv_set_mac_address,
  4726. .ndo_set_multicast_list = nv_set_multicast,
  4727. .ndo_vlan_rx_register = nv_vlan_rx_register,
  4728. #ifdef CONFIG_NET_POLL_CONTROLLER
  4729. .ndo_poll_controller = nv_poll_controller,
  4730. #endif
  4731. };
  4732. static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
  4733. {
  4734. struct net_device *dev;
  4735. struct fe_priv *np;
  4736. unsigned long addr;
  4737. u8 __iomem *base;
  4738. int err, i;
  4739. u32 powerstate, txreg;
  4740. u32 phystate_orig = 0, phystate;
  4741. int phyinitialized = 0;
  4742. static int printed_version;
  4743. if (!printed_version++)
  4744. pr_info("Reverse Engineered nForce ethernet driver. Version %s.\n",
  4745. FORCEDETH_VERSION);
  4746. dev = alloc_etherdev(sizeof(struct fe_priv));
  4747. err = -ENOMEM;
  4748. if (!dev)
  4749. goto out;
  4750. np = netdev_priv(dev);
  4751. np->dev = dev;
  4752. np->pci_dev = pci_dev;
  4753. spin_lock_init(&np->lock);
  4754. SET_NETDEV_DEV(dev, &pci_dev->dev);
  4755. init_timer(&np->oom_kick);
  4756. np->oom_kick.data = (unsigned long) dev;
  4757. np->oom_kick.function = nv_do_rx_refill; /* timer handler */
  4758. init_timer(&np->nic_poll);
  4759. np->nic_poll.data = (unsigned long) dev;
  4760. np->nic_poll.function = nv_do_nic_poll; /* timer handler */
  4761. init_timer(&np->stats_poll);
  4762. np->stats_poll.data = (unsigned long) dev;
  4763. np->stats_poll.function = nv_do_stats_poll; /* timer handler */
  4764. err = pci_enable_device(pci_dev);
  4765. if (err)
  4766. goto out_free;
  4767. pci_set_master(pci_dev);
  4768. err = pci_request_regions(pci_dev, DRV_NAME);
  4769. if (err < 0)
  4770. goto out_disable;
  4771. if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3))
  4772. np->register_size = NV_PCI_REGSZ_VER3;
  4773. else if (id->driver_data & DEV_HAS_STATISTICS_V1)
  4774. np->register_size = NV_PCI_REGSZ_VER2;
  4775. else
  4776. np->register_size = NV_PCI_REGSZ_VER1;
  4777. err = -EINVAL;
  4778. addr = 0;
  4779. for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
  4780. netdev_dbg(dev, "%s: resource %d start %p len %lld flags 0x%08lx\n",
  4781. pci_name(pci_dev), i,
  4782. (void *)(unsigned long)pci_resource_start(pci_dev, i),
  4783. (long long)pci_resource_len(pci_dev, i),
  4784. pci_resource_flags(pci_dev, i));
  4785. if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM &&
  4786. pci_resource_len(pci_dev, i) >= np->register_size) {
  4787. addr = pci_resource_start(pci_dev, i);
  4788. break;
  4789. }
  4790. }
  4791. if (i == DEVICE_COUNT_RESOURCE) {
  4792. dev_info(&pci_dev->dev, "Couldn't find register window\n");
  4793. goto out_relreg;
  4794. }
  4795. /* copy of driver data */
  4796. np->driver_data = id->driver_data;
  4797. /* copy of device id */
  4798. np->device_id = id->device;
  4799. /* handle different descriptor versions */
  4800. if (id->driver_data & DEV_HAS_HIGH_DMA) {
  4801. /* packet format 3: supports 40-bit addressing */
  4802. np->desc_ver = DESC_VER_3;
  4803. np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
  4804. if (dma_64bit) {
  4805. if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(39)))
  4806. dev_info(&pci_dev->dev,
  4807. "64-bit DMA failed, using 32-bit addressing\n");
  4808. else
  4809. dev->features |= NETIF_F_HIGHDMA;
  4810. if (pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(39))) {
  4811. dev_info(&pci_dev->dev,
  4812. "64-bit DMA (consistent) failed, using 32-bit ring buffers\n");
  4813. }
  4814. }
  4815. } else if (id->driver_data & DEV_HAS_LARGEDESC) {
  4816. /* packet format 2: supports jumbo frames */
  4817. np->desc_ver = DESC_VER_2;
  4818. np->txrxctl_bits = NVREG_TXRXCTL_DESC_2;
  4819. } else {
  4820. /* original packet format */
  4821. np->desc_ver = DESC_VER_1;
  4822. np->txrxctl_bits = NVREG_TXRXCTL_DESC_1;
  4823. }
  4824. np->pkt_limit = NV_PKTLIMIT_1;
  4825. if (id->driver_data & DEV_HAS_LARGEDESC)
  4826. np->pkt_limit = NV_PKTLIMIT_2;
  4827. if (id->driver_data & DEV_HAS_CHECKSUM) {
  4828. np->rx_csum = 1;
  4829. np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
  4830. dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
  4831. dev->features |= NETIF_F_TSO;
  4832. dev->features |= NETIF_F_GRO;
  4833. }
  4834. np->vlanctl_bits = 0;
  4835. if (id->driver_data & DEV_HAS_VLAN) {
  4836. np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE;
  4837. dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX;
  4838. }
  4839. np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG;
  4840. if ((id->driver_data & DEV_HAS_PAUSEFRAME_TX_V1) ||
  4841. (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) ||
  4842. (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V3)) {
  4843. np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ;
  4844. }
  4845. err = -ENOMEM;
  4846. np->base = ioremap(addr, np->register_size);
  4847. if (!np->base)
  4848. goto out_relreg;
  4849. dev->base_addr = (unsigned long)np->base;
  4850. dev->irq = pci_dev->irq;
  4851. np->rx_ring_size = RX_RING_DEFAULT;
  4852. np->tx_ring_size = TX_RING_DEFAULT;
  4853. if (!nv_optimized(np)) {
  4854. np->rx_ring.orig = pci_alloc_consistent(pci_dev,
  4855. sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
  4856. &np->ring_addr);
  4857. if (!np->rx_ring.orig)
  4858. goto out_unmap;
  4859. np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
  4860. } else {
  4861. np->rx_ring.ex = pci_alloc_consistent(pci_dev,
  4862. sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
  4863. &np->ring_addr);
  4864. if (!np->rx_ring.ex)
  4865. goto out_unmap;
  4866. np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
  4867. }
  4868. np->rx_skb = kcalloc(np->rx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL);
  4869. np->tx_skb = kcalloc(np->tx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL);
  4870. if (!np->rx_skb || !np->tx_skb)
  4871. goto out_freering;
  4872. if (!nv_optimized(np))
  4873. dev->netdev_ops = &nv_netdev_ops;
  4874. else
  4875. dev->netdev_ops = &nv_netdev_ops_optimized;
  4876. netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP);
  4877. SET_ETHTOOL_OPS(dev, &ops);
  4878. dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
  4879. pci_set_drvdata(pci_dev, dev);
  4880. /* read the mac address */
  4881. base = get_hwbase(dev);
  4882. np->orig_mac[0] = readl(base + NvRegMacAddrA);
  4883. np->orig_mac[1] = readl(base + NvRegMacAddrB);
  4884. /* check the workaround bit for correct mac address order */
  4885. txreg = readl(base + NvRegTransmitPoll);
  4886. if (id->driver_data & DEV_HAS_CORRECT_MACADDR) {
  4887. /* mac address is already in correct order */
  4888. dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff;
  4889. dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff;
  4890. dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff;
  4891. dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
  4892. dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff;
  4893. dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff;
  4894. } else if (txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) {
  4895. /* mac address is already in correct order */
  4896. dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff;
  4897. dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff;
  4898. dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff;
  4899. dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
  4900. dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff;
  4901. dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff;
  4902. /*
  4903. * Set orig mac address back to the reversed version.
  4904. * This flag will be cleared during low power transition.
  4905. * Therefore, we should always put back the reversed address.
  4906. */
  4907. np->orig_mac[0] = (dev->dev_addr[5] << 0) + (dev->dev_addr[4] << 8) +
  4908. (dev->dev_addr[3] << 16) + (dev->dev_addr[2] << 24);
  4909. np->orig_mac[1] = (dev->dev_addr[1] << 0) + (dev->dev_addr[0] << 8);
  4910. } else {
  4911. /* need to reverse mac address to correct order */
  4912. dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff;
  4913. dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff;
  4914. dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff;
  4915. dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
  4916. dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff;
  4917. dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff;
  4918. writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
  4919. printk(KERN_DEBUG "nv_probe: set workaround bit for reversed mac addr\n");
  4920. }
  4921. memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
  4922. if (!is_valid_ether_addr(dev->perm_addr)) {
  4923. /*
  4924. * Bad mac address. At least one bios sets the mac address
  4925. * to 01:23:45:67:89:ab
  4926. */
  4927. dev_err(&pci_dev->dev,
  4928. "Invalid MAC address detected: %pM\n",
  4929. dev->dev_addr);
  4930. dev_err(&pci_dev->dev,
  4931. "Please complain to your hardware vendor. Switched to a random MAC address.\n");
  4932. random_ether_addr(dev->dev_addr);
  4933. }
  4934. netdev_dbg(dev, "%s: MAC Address %pM\n",
  4935. pci_name(pci_dev), dev->dev_addr);
  4936. /* set mac address */
  4937. nv_copy_mac_to_hw(dev);
  4938. /* Workaround current PCI init glitch: wakeup bits aren't
  4939. * being set from PCI PM capability.
  4940. */
  4941. device_init_wakeup(&pci_dev->dev, 1);
  4942. /* disable WOL */
  4943. writel(0, base + NvRegWakeUpFlags);
  4944. np->wolenabled = 0;
  4945. if (id->driver_data & DEV_HAS_POWER_CNTRL) {
  4946. /* take phy and nic out of low power mode */
  4947. powerstate = readl(base + NvRegPowerState2);
  4948. powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK;
  4949. if ((id->driver_data & DEV_NEED_LOW_POWER_FIX) &&
  4950. pci_dev->revision >= 0xA3)
  4951. powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3;
  4952. writel(powerstate, base + NvRegPowerState2);
  4953. }
  4954. if (np->desc_ver == DESC_VER_1)
  4955. np->tx_flags = NV_TX_VALID;
  4956. else
  4957. np->tx_flags = NV_TX2_VALID;
  4958. np->msi_flags = 0;
  4959. if ((id->driver_data & DEV_HAS_MSI) && msi)
  4960. np->msi_flags |= NV_MSI_CAPABLE;
  4961. if ((id->driver_data & DEV_HAS_MSI_X) && msix) {
  4962. /* msix has had reported issues when modifying irqmask
  4963. as in the case of napi, therefore, disable for now
  4964. */
  4965. #if 0
  4966. np->msi_flags |= NV_MSI_X_CAPABLE;
  4967. #endif
  4968. }
  4969. if (optimization_mode == NV_OPTIMIZATION_MODE_CPU) {
  4970. np->irqmask = NVREG_IRQMASK_CPU;
  4971. if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
  4972. np->msi_flags |= 0x0001;
  4973. } else if (optimization_mode == NV_OPTIMIZATION_MODE_DYNAMIC &&
  4974. !(id->driver_data & DEV_NEED_TIMERIRQ)) {
  4975. /* start off in throughput mode */
  4976. np->irqmask = NVREG_IRQMASK_THROUGHPUT;
  4977. /* remove support for msix mode */
  4978. np->msi_flags &= ~NV_MSI_X_CAPABLE;
  4979. } else {
  4980. optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT;
  4981. np->irqmask = NVREG_IRQMASK_THROUGHPUT;
  4982. if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
  4983. np->msi_flags |= 0x0003;
  4984. }
  4985. if (id->driver_data & DEV_NEED_TIMERIRQ)
  4986. np->irqmask |= NVREG_IRQ_TIMER;
  4987. if (id->driver_data & DEV_NEED_LINKTIMER) {
  4988. netdev_dbg(dev, "%s: link timer on\n", pci_name(pci_dev));
  4989. np->need_linktimer = 1;
  4990. np->link_timeout = jiffies + LINK_TIMEOUT;
  4991. } else {
  4992. netdev_dbg(dev, "%s: link timer off\n", pci_name(pci_dev));
  4993. np->need_linktimer = 0;
  4994. }
  4995. /* Limit the number of tx's outstanding for hw bug */
  4996. if (id->driver_data & DEV_NEED_TX_LIMIT) {
  4997. np->tx_limit = 1;
  4998. if (((id->driver_data & DEV_NEED_TX_LIMIT2) == DEV_NEED_TX_LIMIT2) &&
  4999. pci_dev->revision >= 0xA2)
  5000. np->tx_limit = 0;
  5001. }
  5002. /* clear phy state and temporarily halt phy interrupts */
  5003. writel(0, base + NvRegMIIMask);
  5004. phystate = readl(base + NvRegAdapterControl);
  5005. if (phystate & NVREG_ADAPTCTL_RUNNING) {
  5006. phystate_orig = 1;
  5007. phystate &= ~NVREG_ADAPTCTL_RUNNING;
  5008. writel(phystate, base + NvRegAdapterControl);
  5009. }
  5010. writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
  5011. if (id->driver_data & DEV_HAS_MGMT_UNIT) {
  5012. /* management unit running on the mac? */
  5013. if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST) &&
  5014. (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_PHY_INIT) &&
  5015. nv_mgmt_acquire_sema(dev) &&
  5016. nv_mgmt_get_version(dev)) {
  5017. np->mac_in_use = 1;
  5018. if (np->mgmt_version > 0)
  5019. np->mac_in_use = readl(base + NvRegMgmtUnitControl) & NVREG_MGMTUNITCONTROL_INUSE;
  5020. netdev_dbg(dev, "%s: mgmt unit is running. mac in use %x\n",
  5021. pci_name(pci_dev), np->mac_in_use);
  5022. /* management unit setup the phy already? */
  5023. if (np->mac_in_use &&
  5024. ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) ==
  5025. NVREG_XMITCTL_SYNC_PHY_INIT)) {
  5026. /* phy is inited by mgmt unit */
  5027. phyinitialized = 1;
  5028. netdev_dbg(dev, "%s: Phy already initialized by mgmt unit\n",
  5029. pci_name(pci_dev));
  5030. } else {
  5031. /* we need to init the phy */
  5032. }
  5033. }
  5034. }
  5035. /* find a suitable phy */
  5036. for (i = 1; i <= 32; i++) {
  5037. int id1, id2;
  5038. int phyaddr = i & 0x1F;
  5039. spin_lock_irq(&np->lock);
  5040. id1 = mii_rw(dev, phyaddr, MII_PHYSID1, MII_READ);
  5041. spin_unlock_irq(&np->lock);
  5042. if (id1 < 0 || id1 == 0xffff)
  5043. continue;
  5044. spin_lock_irq(&np->lock);
  5045. id2 = mii_rw(dev, phyaddr, MII_PHYSID2, MII_READ);
  5046. spin_unlock_irq(&np->lock);
  5047. if (id2 < 0 || id2 == 0xffff)
  5048. continue;
  5049. np->phy_model = id2 & PHYID2_MODEL_MASK;
  5050. id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT;
  5051. id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT;
  5052. netdev_dbg(dev, "%s: %s: Found PHY %04x:%04x at address %d\n",
  5053. pci_name(pci_dev), __func__, id1, id2, phyaddr);
  5054. np->phyaddr = phyaddr;
  5055. np->phy_oui = id1 | id2;
  5056. /* Realtek hardcoded phy id1 to all zero's on certain phys */
  5057. if (np->phy_oui == PHY_OUI_REALTEK2)
  5058. np->phy_oui = PHY_OUI_REALTEK;
  5059. /* Setup phy revision for Realtek */
  5060. if (np->phy_oui == PHY_OUI_REALTEK && np->phy_model == PHY_MODEL_REALTEK_8211)
  5061. np->phy_rev = mii_rw(dev, phyaddr, MII_RESV1, MII_READ) & PHY_REV_MASK;
  5062. break;
  5063. }
  5064. if (i == 33) {
  5065. dev_info(&pci_dev->dev, "open: Could not find a valid PHY\n");
  5066. goto out_error;
  5067. }
  5068. if (!phyinitialized) {
  5069. /* reset it */
  5070. phy_init(dev);
  5071. } else {
  5072. /* see if it is a gigabit phy */
  5073. u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
  5074. if (mii_status & PHY_GIGABIT)
  5075. np->gigabit = PHY_GIGABIT;
  5076. }
  5077. /* set default link speed settings */
  5078. np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
  5079. np->duplex = 0;
  5080. np->autoneg = 1;
  5081. err = register_netdev(dev);
  5082. if (err) {
  5083. dev_info(&pci_dev->dev, "unable to register netdev: %d\n", err);
  5084. goto out_error;
  5085. }
  5086. dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n",
  5087. dev->name, np->phy_oui, np->phyaddr, dev->dev_addr);
  5088. dev_info(&pci_dev->dev, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n",
  5089. dev->features & NETIF_F_HIGHDMA ? "highdma " : "",
  5090. dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ?
  5091. "csum " : "",
  5092. dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ?
  5093. "vlan " : "",
  5094. id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "",
  5095. id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "",
  5096. id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "",
  5097. np->gigabit == PHY_GIGABIT ? "gbit " : "",
  5098. np->need_linktimer ? "lnktim " : "",
  5099. np->msi_flags & NV_MSI_CAPABLE ? "msi " : "",
  5100. np->msi_flags & NV_MSI_X_CAPABLE ? "msi-x " : "",
  5101. np->desc_ver);
  5102. return 0;
  5103. out_error:
  5104. if (phystate_orig)
  5105. writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl);
  5106. pci_set_drvdata(pci_dev, NULL);
  5107. out_freering:
  5108. free_rings(dev);
  5109. out_unmap:
  5110. iounmap(get_hwbase(dev));
  5111. out_relreg:
  5112. pci_release_regions(pci_dev);
  5113. out_disable:
  5114. pci_disable_device(pci_dev);
  5115. out_free:
  5116. free_netdev(dev);
  5117. out:
  5118. return err;
  5119. }
  5120. static void nv_restore_phy(struct net_device *dev)
  5121. {
  5122. struct fe_priv *np = netdev_priv(dev);
  5123. u16 phy_reserved, mii_control;
  5124. if (np->phy_oui == PHY_OUI_REALTEK &&
  5125. np->phy_model == PHY_MODEL_REALTEK_8201 &&
  5126. phy_cross == NV_CROSSOVER_DETECTION_DISABLED) {
  5127. mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3);
  5128. phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, MII_READ);
  5129. phy_reserved &= ~PHY_REALTEK_INIT_MSK1;
  5130. phy_reserved |= PHY_REALTEK_INIT8;
  5131. mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, phy_reserved);
  5132. mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1);
  5133. /* restart auto negotiation */
  5134. mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
  5135. mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
  5136. mii_rw(dev, np->phyaddr, MII_BMCR, mii_control);
  5137. }
  5138. }
  5139. static void nv_restore_mac_addr(struct pci_dev *pci_dev)
  5140. {
  5141. struct net_device *dev = pci_get_drvdata(pci_dev);
  5142. struct fe_priv *np = netdev_priv(dev);
  5143. u8 __iomem *base = get_hwbase(dev);
  5144. /* special op: write back the misordered MAC address - otherwise
  5145. * the next nv_probe would see a wrong address.
  5146. */
  5147. writel(np->orig_mac[0], base + NvRegMacAddrA);
  5148. writel(np->orig_mac[1], base + NvRegMacAddrB);
  5149. writel(readl(base + NvRegTransmitPoll) & ~NVREG_TRANSMITPOLL_MAC_ADDR_REV,
  5150. base + NvRegTransmitPoll);
  5151. }
  5152. static void __devexit nv_remove(struct pci_dev *pci_dev)
  5153. {
  5154. struct net_device *dev = pci_get_drvdata(pci_dev);
  5155. unregister_netdev(dev);
  5156. nv_restore_mac_addr(pci_dev);
  5157. /* restore any phy related changes */
  5158. nv_restore_phy(dev);
  5159. nv_mgmt_release_sema(dev);
  5160. /* free all structures */
  5161. free_rings(dev);
  5162. iounmap(get_hwbase(dev));
  5163. pci_release_regions(pci_dev);
  5164. pci_disable_device(pci_dev);
  5165. free_netdev(dev);
  5166. pci_set_drvdata(pci_dev, NULL);
  5167. }
  5168. #ifdef CONFIG_PM
  5169. static int nv_suspend(struct pci_dev *pdev, pm_message_t state)
  5170. {
  5171. struct net_device *dev = pci_get_drvdata(pdev);
  5172. struct fe_priv *np = netdev_priv(dev);
  5173. u8 __iomem *base = get_hwbase(dev);
  5174. int i;
  5175. if (netif_running(dev)) {
  5176. /* Gross. */
  5177. nv_close(dev);
  5178. }
  5179. netif_device_detach(dev);
  5180. /* save non-pci configuration space */
  5181. for (i = 0; i <= np->register_size/sizeof(u32); i++)
  5182. np->saved_config_space[i] = readl(base + i*sizeof(u32));
  5183. pci_save_state(pdev);
  5184. pci_enable_wake(pdev, pci_choose_state(pdev, state), np->wolenabled);
  5185. pci_disable_device(pdev);
  5186. pci_set_power_state(pdev, pci_choose_state(pdev, state));
  5187. return 0;
  5188. }
  5189. static int nv_resume(struct pci_dev *pdev)
  5190. {
  5191. struct net_device *dev = pci_get_drvdata(pdev);
  5192. struct fe_priv *np = netdev_priv(dev);
  5193. u8 __iomem *base = get_hwbase(dev);
  5194. int i, rc = 0;
  5195. pci_set_power_state(pdev, PCI_D0);
  5196. pci_restore_state(pdev);
  5197. /* ack any pending wake events, disable PME */
  5198. pci_enable_wake(pdev, PCI_D0, 0);
  5199. /* restore non-pci configuration space */
  5200. for (i = 0; i <= np->register_size/sizeof(u32); i++)
  5201. writel(np->saved_config_space[i], base+i*sizeof(u32));
  5202. if (np->driver_data & DEV_NEED_MSI_FIX)
  5203. pci_write_config_dword(pdev, NV_MSI_PRIV_OFFSET, NV_MSI_PRIV_VALUE);
  5204. /* restore phy state, including autoneg */
  5205. phy_init(dev);
  5206. netif_device_attach(dev);
  5207. if (netif_running(dev)) {
  5208. rc = nv_open(dev);
  5209. nv_set_multicast(dev);
  5210. }
  5211. return rc;
  5212. }
  5213. static void nv_shutdown(struct pci_dev *pdev)
  5214. {
  5215. struct net_device *dev = pci_get_drvdata(pdev);
  5216. struct fe_priv *np = netdev_priv(dev);
  5217. if (netif_running(dev))
  5218. nv_close(dev);
  5219. /*
  5220. * Restore the MAC so a kernel started by kexec won't get confused.
  5221. * If we really go for poweroff, we must not restore the MAC,
  5222. * otherwise the MAC for WOL will be reversed at least on some boards.
  5223. */
  5224. if (system_state != SYSTEM_POWER_OFF)
  5225. nv_restore_mac_addr(pdev);
  5226. pci_disable_device(pdev);
  5227. /*
  5228. * Apparently it is not possible to reinitialise from D3 hot,
  5229. * only put the device into D3 if we really go for poweroff.
  5230. */
  5231. if (system_state == SYSTEM_POWER_OFF) {
  5232. if (pci_enable_wake(pdev, PCI_D3cold, np->wolenabled))
  5233. pci_enable_wake(pdev, PCI_D3hot, np->wolenabled);
  5234. pci_set_power_state(pdev, PCI_D3hot);
  5235. }
  5236. }
  5237. #else
  5238. #define nv_suspend NULL
  5239. #define nv_shutdown NULL
  5240. #define nv_resume NULL
  5241. #endif /* CONFIG_PM */
  5242. static DEFINE_PCI_DEVICE_TABLE(pci_tbl) = {
  5243. { /* nForce Ethernet Controller */
  5244. PCI_DEVICE(0x10DE, 0x01C3),
  5245. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
  5246. },
  5247. { /* nForce2 Ethernet Controller */
  5248. PCI_DEVICE(0x10DE, 0x0066),
  5249. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
  5250. },
  5251. { /* nForce3 Ethernet Controller */
  5252. PCI_DEVICE(0x10DE, 0x00D6),
  5253. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
  5254. },
  5255. { /* nForce3 Ethernet Controller */
  5256. PCI_DEVICE(0x10DE, 0x0086),
  5257. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
  5258. },
  5259. { /* nForce3 Ethernet Controller */
  5260. PCI_DEVICE(0x10DE, 0x008C),
  5261. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
  5262. },
  5263. { /* nForce3 Ethernet Controller */
  5264. PCI_DEVICE(0x10DE, 0x00E6),
  5265. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
  5266. },
  5267. { /* nForce3 Ethernet Controller */
  5268. PCI_DEVICE(0x10DE, 0x00DF),
  5269. .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
  5270. },
  5271. { /* CK804 Ethernet Controller */
  5272. PCI_DEVICE(0x10DE, 0x0056),
  5273. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
  5274. },
  5275. { /* CK804 Ethernet Controller */
  5276. PCI_DEVICE(0x10DE, 0x0057),
  5277. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
  5278. },
  5279. { /* MCP04 Ethernet Controller */
  5280. PCI_DEVICE(0x10DE, 0x0037),
  5281. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
  5282. },
  5283. { /* MCP04 Ethernet Controller */
  5284. PCI_DEVICE(0x10DE, 0x0038),
  5285. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
  5286. },
  5287. { /* MCP51 Ethernet Controller */
  5288. PCI_DEVICE(0x10DE, 0x0268),
  5289. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1|DEV_NEED_LOW_POWER_FIX,
  5290. },
  5291. { /* MCP51 Ethernet Controller */
  5292. PCI_DEVICE(0x10DE, 0x0269),
  5293. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1|DEV_NEED_LOW_POWER_FIX,
  5294. },
  5295. { /* MCP55 Ethernet Controller */
  5296. PCI_DEVICE(0x10DE, 0x0372),
  5297. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX,
  5298. },
  5299. { /* MCP55 Ethernet Controller */
  5300. PCI_DEVICE(0x10DE, 0x0373),
  5301. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX,
  5302. },
  5303. { /* MCP61 Ethernet Controller */
  5304. PCI_DEVICE(0x10DE, 0x03E5),
  5305. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
  5306. },
  5307. { /* MCP61 Ethernet Controller */
  5308. PCI_DEVICE(0x10DE, 0x03E6),
  5309. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
  5310. },
  5311. { /* MCP61 Ethernet Controller */
  5312. PCI_DEVICE(0x10DE, 0x03EE),
  5313. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
  5314. },
  5315. { /* MCP61 Ethernet Controller */
  5316. PCI_DEVICE(0x10DE, 0x03EF),
  5317. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
  5318. },
  5319. { /* MCP65 Ethernet Controller */
  5320. PCI_DEVICE(0x10DE, 0x0450),
  5321. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
  5322. },
  5323. { /* MCP65 Ethernet Controller */
  5324. PCI_DEVICE(0x10DE, 0x0451),
  5325. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
  5326. },
  5327. { /* MCP65 Ethernet Controller */
  5328. PCI_DEVICE(0x10DE, 0x0452),
  5329. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
  5330. },
  5331. { /* MCP65 Ethernet Controller */
  5332. PCI_DEVICE(0x10DE, 0x0453),
  5333. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
  5334. },
  5335. { /* MCP67 Ethernet Controller */
  5336. PCI_DEVICE(0x10DE, 0x054C),
  5337. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
  5338. },
  5339. { /* MCP67 Ethernet Controller */
  5340. PCI_DEVICE(0x10DE, 0x054D),
  5341. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
  5342. },
  5343. { /* MCP67 Ethernet Controller */
  5344. PCI_DEVICE(0x10DE, 0x054E),
  5345. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
  5346. },
  5347. { /* MCP67 Ethernet Controller */
  5348. PCI_DEVICE(0x10DE, 0x054F),
  5349. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
  5350. },
  5351. { /* MCP73 Ethernet Controller */
  5352. PCI_DEVICE(0x10DE, 0x07DC),
  5353. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
  5354. },
  5355. { /* MCP73 Ethernet Controller */
  5356. PCI_DEVICE(0x10DE, 0x07DD),
  5357. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
  5358. },
  5359. { /* MCP73 Ethernet Controller */
  5360. PCI_DEVICE(0x10DE, 0x07DE),
  5361. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
  5362. },
  5363. { /* MCP73 Ethernet Controller */
  5364. PCI_DEVICE(0x10DE, 0x07DF),
  5365. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
  5366. },
  5367. { /* MCP77 Ethernet Controller */
  5368. PCI_DEVICE(0x10DE, 0x0760),
  5369. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
  5370. },
  5371. { /* MCP77 Ethernet Controller */
  5372. PCI_DEVICE(0x10DE, 0x0761),
  5373. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
  5374. },
  5375. { /* MCP77 Ethernet Controller */
  5376. PCI_DEVICE(0x10DE, 0x0762),
  5377. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
  5378. },
  5379. { /* MCP77 Ethernet Controller */
  5380. PCI_DEVICE(0x10DE, 0x0763),
  5381. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
  5382. },
  5383. { /* MCP79 Ethernet Controller */
  5384. PCI_DEVICE(0x10DE, 0x0AB0),
  5385. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
  5386. },
  5387. { /* MCP79 Ethernet Controller */
  5388. PCI_DEVICE(0x10DE, 0x0AB1),
  5389. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
  5390. },
  5391. { /* MCP79 Ethernet Controller */
  5392. PCI_DEVICE(0x10DE, 0x0AB2),
  5393. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
  5394. },
  5395. { /* MCP79 Ethernet Controller */
  5396. PCI_DEVICE(0x10DE, 0x0AB3),
  5397. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
  5398. },
  5399. { /* MCP89 Ethernet Controller */
  5400. PCI_DEVICE(0x10DE, 0x0D7D),
  5401. .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX,
  5402. },
  5403. {0,},
  5404. };
  5405. static struct pci_driver driver = {
  5406. .name = DRV_NAME,
  5407. .id_table = pci_tbl,
  5408. .probe = nv_probe,
  5409. .remove = __devexit_p(nv_remove),
  5410. .suspend = nv_suspend,
  5411. .resume = nv_resume,
  5412. .shutdown = nv_shutdown,
  5413. };
  5414. static int __init init_nic(void)
  5415. {
  5416. return pci_register_driver(&driver);
  5417. }
  5418. static void __exit exit_nic(void)
  5419. {
  5420. pci_unregister_driver(&driver);
  5421. }
  5422. module_param(max_interrupt_work, int, 0);
  5423. MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt");
  5424. module_param(optimization_mode, int, 0);
  5425. MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer. In dynamic mode (2), the mode toggles between throughput and CPU mode based on network load.");
  5426. module_param(poll_interval, int, 0);
  5427. MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535.");
  5428. module_param(msi, int, 0);
  5429. MODULE_PARM_DESC(msi, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0.");
  5430. module_param(msix, int, 0);
  5431. MODULE_PARM_DESC(msix, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0.");
  5432. module_param(dma_64bit, int, 0);
  5433. MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0.");
  5434. module_param(phy_cross, int, 0);
  5435. MODULE_PARM_DESC(phy_cross, "Phy crossover detection for Realtek 8201 phy is enabled by setting to 1 and disabled by setting to 0.");
  5436. module_param(phy_power_down, int, 0);
  5437. MODULE_PARM_DESC(phy_power_down, "Power down phy and disable link when interface is down (1), or leave phy powered up (0).");
  5438. MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
  5439. MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
  5440. MODULE_LICENSE("GPL");
  5441. MODULE_DEVICE_TABLE(pci, pci_tbl);
  5442. module_init(init_nic);
  5443. module_exit(exit_nic);