ksz884x.c 181 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335
  1. /**
  2. * drivers/net/ksx884x.c - Micrel KSZ8841/2 PCI Ethernet driver
  3. *
  4. * Copyright (c) 2009-2010 Micrel, Inc.
  5. * Tristram Ha <Tristram.Ha@micrel.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. */
  16. #include <linux/init.h>
  17. #include <linux/kernel.h>
  18. #include <linux/module.h>
  19. #include <linux/version.h>
  20. #include <linux/ioport.h>
  21. #include <linux/pci.h>
  22. #include <linux/proc_fs.h>
  23. #include <linux/mii.h>
  24. #include <linux/platform_device.h>
  25. #include <linux/ethtool.h>
  26. #include <linux/etherdevice.h>
  27. #include <linux/in.h>
  28. #include <linux/ip.h>
  29. #include <linux/if_vlan.h>
  30. #include <linux/crc32.h>
  31. #include <linux/sched.h>
  32. /* DMA Registers */
  33. #define KS_DMA_TX_CTRL 0x0000
  34. #define DMA_TX_ENABLE 0x00000001
  35. #define DMA_TX_CRC_ENABLE 0x00000002
  36. #define DMA_TX_PAD_ENABLE 0x00000004
  37. #define DMA_TX_LOOPBACK 0x00000100
  38. #define DMA_TX_FLOW_ENABLE 0x00000200
  39. #define DMA_TX_CSUM_IP 0x00010000
  40. #define DMA_TX_CSUM_TCP 0x00020000
  41. #define DMA_TX_CSUM_UDP 0x00040000
  42. #define DMA_TX_BURST_SIZE 0x3F000000
  43. #define KS_DMA_RX_CTRL 0x0004
  44. #define DMA_RX_ENABLE 0x00000001
  45. #define KS884X_DMA_RX_MULTICAST 0x00000002
  46. #define DMA_RX_PROMISCUOUS 0x00000004
  47. #define DMA_RX_ERROR 0x00000008
  48. #define DMA_RX_UNICAST 0x00000010
  49. #define DMA_RX_ALL_MULTICAST 0x00000020
  50. #define DMA_RX_BROADCAST 0x00000040
  51. #define DMA_RX_FLOW_ENABLE 0x00000200
  52. #define DMA_RX_CSUM_IP 0x00010000
  53. #define DMA_RX_CSUM_TCP 0x00020000
  54. #define DMA_RX_CSUM_UDP 0x00040000
  55. #define DMA_RX_BURST_SIZE 0x3F000000
  56. #define DMA_BURST_SHIFT 24
  57. #define DMA_BURST_DEFAULT 8
  58. #define KS_DMA_TX_START 0x0008
  59. #define KS_DMA_RX_START 0x000C
  60. #define DMA_START 0x00000001
  61. #define KS_DMA_TX_ADDR 0x0010
  62. #define KS_DMA_RX_ADDR 0x0014
  63. #define DMA_ADDR_LIST_MASK 0xFFFFFFFC
  64. #define DMA_ADDR_LIST_SHIFT 2
  65. /* MTR0 */
  66. #define KS884X_MULTICAST_0_OFFSET 0x0020
  67. #define KS884X_MULTICAST_1_OFFSET 0x0021
  68. #define KS884X_MULTICAST_2_OFFSET 0x0022
  69. #define KS884x_MULTICAST_3_OFFSET 0x0023
  70. /* MTR1 */
  71. #define KS884X_MULTICAST_4_OFFSET 0x0024
  72. #define KS884X_MULTICAST_5_OFFSET 0x0025
  73. #define KS884X_MULTICAST_6_OFFSET 0x0026
  74. #define KS884X_MULTICAST_7_OFFSET 0x0027
  75. /* Interrupt Registers */
  76. /* INTEN */
  77. #define KS884X_INTERRUPTS_ENABLE 0x0028
  78. /* INTST */
  79. #define KS884X_INTERRUPTS_STATUS 0x002C
  80. #define KS884X_INT_RX_STOPPED 0x02000000
  81. #define KS884X_INT_TX_STOPPED 0x04000000
  82. #define KS884X_INT_RX_OVERRUN 0x08000000
  83. #define KS884X_INT_TX_EMPTY 0x10000000
  84. #define KS884X_INT_RX 0x20000000
  85. #define KS884X_INT_TX 0x40000000
  86. #define KS884X_INT_PHY 0x80000000
  87. #define KS884X_INT_RX_MASK \
  88. (KS884X_INT_RX | KS884X_INT_RX_OVERRUN)
  89. #define KS884X_INT_TX_MASK \
  90. (KS884X_INT_TX | KS884X_INT_TX_EMPTY)
  91. #define KS884X_INT_MASK (KS884X_INT_RX | KS884X_INT_TX | KS884X_INT_PHY)
  92. /* MAC Additional Station Address */
  93. /* MAAL0 */
  94. #define KS_ADD_ADDR_0_LO 0x0080
  95. /* MAAH0 */
  96. #define KS_ADD_ADDR_0_HI 0x0084
  97. /* MAAL1 */
  98. #define KS_ADD_ADDR_1_LO 0x0088
  99. /* MAAH1 */
  100. #define KS_ADD_ADDR_1_HI 0x008C
  101. /* MAAL2 */
  102. #define KS_ADD_ADDR_2_LO 0x0090
  103. /* MAAH2 */
  104. #define KS_ADD_ADDR_2_HI 0x0094
  105. /* MAAL3 */
  106. #define KS_ADD_ADDR_3_LO 0x0098
  107. /* MAAH3 */
  108. #define KS_ADD_ADDR_3_HI 0x009C
  109. /* MAAL4 */
  110. #define KS_ADD_ADDR_4_LO 0x00A0
  111. /* MAAH4 */
  112. #define KS_ADD_ADDR_4_HI 0x00A4
  113. /* MAAL5 */
  114. #define KS_ADD_ADDR_5_LO 0x00A8
  115. /* MAAH5 */
  116. #define KS_ADD_ADDR_5_HI 0x00AC
  117. /* MAAL6 */
  118. #define KS_ADD_ADDR_6_LO 0x00B0
  119. /* MAAH6 */
  120. #define KS_ADD_ADDR_6_HI 0x00B4
  121. /* MAAL7 */
  122. #define KS_ADD_ADDR_7_LO 0x00B8
  123. /* MAAH7 */
  124. #define KS_ADD_ADDR_7_HI 0x00BC
  125. /* MAAL8 */
  126. #define KS_ADD_ADDR_8_LO 0x00C0
  127. /* MAAH8 */
  128. #define KS_ADD_ADDR_8_HI 0x00C4
  129. /* MAAL9 */
  130. #define KS_ADD_ADDR_9_LO 0x00C8
  131. /* MAAH9 */
  132. #define KS_ADD_ADDR_9_HI 0x00CC
  133. /* MAAL10 */
  134. #define KS_ADD_ADDR_A_LO 0x00D0
  135. /* MAAH10 */
  136. #define KS_ADD_ADDR_A_HI 0x00D4
  137. /* MAAL11 */
  138. #define KS_ADD_ADDR_B_LO 0x00D8
  139. /* MAAH11 */
  140. #define KS_ADD_ADDR_B_HI 0x00DC
  141. /* MAAL12 */
  142. #define KS_ADD_ADDR_C_LO 0x00E0
  143. /* MAAH12 */
  144. #define KS_ADD_ADDR_C_HI 0x00E4
  145. /* MAAL13 */
  146. #define KS_ADD_ADDR_D_LO 0x00E8
  147. /* MAAH13 */
  148. #define KS_ADD_ADDR_D_HI 0x00EC
  149. /* MAAL14 */
  150. #define KS_ADD_ADDR_E_LO 0x00F0
  151. /* MAAH14 */
  152. #define KS_ADD_ADDR_E_HI 0x00F4
  153. /* MAAL15 */
  154. #define KS_ADD_ADDR_F_LO 0x00F8
  155. /* MAAH15 */
  156. #define KS_ADD_ADDR_F_HI 0x00FC
  157. #define ADD_ADDR_HI_MASK 0x0000FFFF
  158. #define ADD_ADDR_ENABLE 0x80000000
  159. #define ADD_ADDR_INCR 8
  160. /* Miscellaneous Registers */
  161. /* MARL */
  162. #define KS884X_ADDR_0_OFFSET 0x0200
  163. #define KS884X_ADDR_1_OFFSET 0x0201
  164. /* MARM */
  165. #define KS884X_ADDR_2_OFFSET 0x0202
  166. #define KS884X_ADDR_3_OFFSET 0x0203
  167. /* MARH */
  168. #define KS884X_ADDR_4_OFFSET 0x0204
  169. #define KS884X_ADDR_5_OFFSET 0x0205
  170. /* OBCR */
  171. #define KS884X_BUS_CTRL_OFFSET 0x0210
  172. #define BUS_SPEED_125_MHZ 0x0000
  173. #define BUS_SPEED_62_5_MHZ 0x0001
  174. #define BUS_SPEED_41_66_MHZ 0x0002
  175. #define BUS_SPEED_25_MHZ 0x0003
  176. /* EEPCR */
  177. #define KS884X_EEPROM_CTRL_OFFSET 0x0212
  178. #define EEPROM_CHIP_SELECT 0x0001
  179. #define EEPROM_SERIAL_CLOCK 0x0002
  180. #define EEPROM_DATA_OUT 0x0004
  181. #define EEPROM_DATA_IN 0x0008
  182. #define EEPROM_ACCESS_ENABLE 0x0010
  183. /* MBIR */
  184. #define KS884X_MEM_INFO_OFFSET 0x0214
  185. #define RX_MEM_TEST_FAILED 0x0008
  186. #define RX_MEM_TEST_FINISHED 0x0010
  187. #define TX_MEM_TEST_FAILED 0x0800
  188. #define TX_MEM_TEST_FINISHED 0x1000
  189. /* GCR */
  190. #define KS884X_GLOBAL_CTRL_OFFSET 0x0216
  191. #define GLOBAL_SOFTWARE_RESET 0x0001
  192. #define KS8841_POWER_MANAGE_OFFSET 0x0218
  193. /* WFCR */
  194. #define KS8841_WOL_CTRL_OFFSET 0x021A
  195. #define KS8841_WOL_MAGIC_ENABLE 0x0080
  196. #define KS8841_WOL_FRAME3_ENABLE 0x0008
  197. #define KS8841_WOL_FRAME2_ENABLE 0x0004
  198. #define KS8841_WOL_FRAME1_ENABLE 0x0002
  199. #define KS8841_WOL_FRAME0_ENABLE 0x0001
  200. /* WF0 */
  201. #define KS8841_WOL_FRAME_CRC_OFFSET 0x0220
  202. #define KS8841_WOL_FRAME_BYTE0_OFFSET 0x0224
  203. #define KS8841_WOL_FRAME_BYTE2_OFFSET 0x0228
  204. /* IACR */
  205. #define KS884X_IACR_P 0x04A0
  206. #define KS884X_IACR_OFFSET KS884X_IACR_P
  207. /* IADR1 */
  208. #define KS884X_IADR1_P 0x04A2
  209. #define KS884X_IADR2_P 0x04A4
  210. #define KS884X_IADR3_P 0x04A6
  211. #define KS884X_IADR4_P 0x04A8
  212. #define KS884X_IADR5_P 0x04AA
  213. #define KS884X_ACC_CTRL_SEL_OFFSET KS884X_IACR_P
  214. #define KS884X_ACC_CTRL_INDEX_OFFSET (KS884X_ACC_CTRL_SEL_OFFSET + 1)
  215. #define KS884X_ACC_DATA_0_OFFSET KS884X_IADR4_P
  216. #define KS884X_ACC_DATA_1_OFFSET (KS884X_ACC_DATA_0_OFFSET + 1)
  217. #define KS884X_ACC_DATA_2_OFFSET KS884X_IADR5_P
  218. #define KS884X_ACC_DATA_3_OFFSET (KS884X_ACC_DATA_2_OFFSET + 1)
  219. #define KS884X_ACC_DATA_4_OFFSET KS884X_IADR2_P
  220. #define KS884X_ACC_DATA_5_OFFSET (KS884X_ACC_DATA_4_OFFSET + 1)
  221. #define KS884X_ACC_DATA_6_OFFSET KS884X_IADR3_P
  222. #define KS884X_ACC_DATA_7_OFFSET (KS884X_ACC_DATA_6_OFFSET + 1)
  223. #define KS884X_ACC_DATA_8_OFFSET KS884X_IADR1_P
  224. /* P1MBCR */
  225. #define KS884X_P1MBCR_P 0x04D0
  226. #define KS884X_P1MBSR_P 0x04D2
  227. #define KS884X_PHY1ILR_P 0x04D4
  228. #define KS884X_PHY1IHR_P 0x04D6
  229. #define KS884X_P1ANAR_P 0x04D8
  230. #define KS884X_P1ANLPR_P 0x04DA
  231. /* P2MBCR */
  232. #define KS884X_P2MBCR_P 0x04E0
  233. #define KS884X_P2MBSR_P 0x04E2
  234. #define KS884X_PHY2ILR_P 0x04E4
  235. #define KS884X_PHY2IHR_P 0x04E6
  236. #define KS884X_P2ANAR_P 0x04E8
  237. #define KS884X_P2ANLPR_P 0x04EA
  238. #define KS884X_PHY_1_CTRL_OFFSET KS884X_P1MBCR_P
  239. #define PHY_CTRL_INTERVAL (KS884X_P2MBCR_P - KS884X_P1MBCR_P)
  240. #define KS884X_PHY_CTRL_OFFSET 0x00
  241. /* Mode Control Register */
  242. #define PHY_REG_CTRL 0
  243. #define PHY_RESET 0x8000
  244. #define PHY_LOOPBACK 0x4000
  245. #define PHY_SPEED_100MBIT 0x2000
  246. #define PHY_AUTO_NEG_ENABLE 0x1000
  247. #define PHY_POWER_DOWN 0x0800
  248. #define PHY_MII_DISABLE 0x0400
  249. #define PHY_AUTO_NEG_RESTART 0x0200
  250. #define PHY_FULL_DUPLEX 0x0100
  251. #define PHY_COLLISION_TEST 0x0080
  252. #define PHY_HP_MDIX 0x0020
  253. #define PHY_FORCE_MDIX 0x0010
  254. #define PHY_AUTO_MDIX_DISABLE 0x0008
  255. #define PHY_REMOTE_FAULT_DISABLE 0x0004
  256. #define PHY_TRANSMIT_DISABLE 0x0002
  257. #define PHY_LED_DISABLE 0x0001
  258. #define KS884X_PHY_STATUS_OFFSET 0x02
  259. /* Mode Status Register */
  260. #define PHY_REG_STATUS 1
  261. #define PHY_100BT4_CAPABLE 0x8000
  262. #define PHY_100BTX_FD_CAPABLE 0x4000
  263. #define PHY_100BTX_CAPABLE 0x2000
  264. #define PHY_10BT_FD_CAPABLE 0x1000
  265. #define PHY_10BT_CAPABLE 0x0800
  266. #define PHY_MII_SUPPRESS_CAPABLE 0x0040
  267. #define PHY_AUTO_NEG_ACKNOWLEDGE 0x0020
  268. #define PHY_REMOTE_FAULT 0x0010
  269. #define PHY_AUTO_NEG_CAPABLE 0x0008
  270. #define PHY_LINK_STATUS 0x0004
  271. #define PHY_JABBER_DETECT 0x0002
  272. #define PHY_EXTENDED_CAPABILITY 0x0001
  273. #define KS884X_PHY_ID_1_OFFSET 0x04
  274. #define KS884X_PHY_ID_2_OFFSET 0x06
  275. /* PHY Identifier Registers */
  276. #define PHY_REG_ID_1 2
  277. #define PHY_REG_ID_2 3
  278. #define KS884X_PHY_AUTO_NEG_OFFSET 0x08
  279. /* Auto-Negotiation Advertisement Register */
  280. #define PHY_REG_AUTO_NEGOTIATION 4
  281. #define PHY_AUTO_NEG_NEXT_PAGE 0x8000
  282. #define PHY_AUTO_NEG_REMOTE_FAULT 0x2000
  283. /* Not supported. */
  284. #define PHY_AUTO_NEG_ASYM_PAUSE 0x0800
  285. #define PHY_AUTO_NEG_SYM_PAUSE 0x0400
  286. #define PHY_AUTO_NEG_100BT4 0x0200
  287. #define PHY_AUTO_NEG_100BTX_FD 0x0100
  288. #define PHY_AUTO_NEG_100BTX 0x0080
  289. #define PHY_AUTO_NEG_10BT_FD 0x0040
  290. #define PHY_AUTO_NEG_10BT 0x0020
  291. #define PHY_AUTO_NEG_SELECTOR 0x001F
  292. #define PHY_AUTO_NEG_802_3 0x0001
  293. #define PHY_AUTO_NEG_PAUSE (PHY_AUTO_NEG_SYM_PAUSE | PHY_AUTO_NEG_ASYM_PAUSE)
  294. #define KS884X_PHY_REMOTE_CAP_OFFSET 0x0A
  295. /* Auto-Negotiation Link Partner Ability Register */
  296. #define PHY_REG_REMOTE_CAPABILITY 5
  297. #define PHY_REMOTE_NEXT_PAGE 0x8000
  298. #define PHY_REMOTE_ACKNOWLEDGE 0x4000
  299. #define PHY_REMOTE_REMOTE_FAULT 0x2000
  300. #define PHY_REMOTE_SYM_PAUSE 0x0400
  301. #define PHY_REMOTE_100BTX_FD 0x0100
  302. #define PHY_REMOTE_100BTX 0x0080
  303. #define PHY_REMOTE_10BT_FD 0x0040
  304. #define PHY_REMOTE_10BT 0x0020
  305. /* P1VCT */
  306. #define KS884X_P1VCT_P 0x04F0
  307. #define KS884X_P1PHYCTRL_P 0x04F2
  308. /* P2VCT */
  309. #define KS884X_P2VCT_P 0x04F4
  310. #define KS884X_P2PHYCTRL_P 0x04F6
  311. #define KS884X_PHY_SPECIAL_OFFSET KS884X_P1VCT_P
  312. #define PHY_SPECIAL_INTERVAL (KS884X_P2VCT_P - KS884X_P1VCT_P)
  313. #define KS884X_PHY_LINK_MD_OFFSET 0x00
  314. #define PHY_START_CABLE_DIAG 0x8000
  315. #define PHY_CABLE_DIAG_RESULT 0x6000
  316. #define PHY_CABLE_STAT_NORMAL 0x0000
  317. #define PHY_CABLE_STAT_OPEN 0x2000
  318. #define PHY_CABLE_STAT_SHORT 0x4000
  319. #define PHY_CABLE_STAT_FAILED 0x6000
  320. #define PHY_CABLE_10M_SHORT 0x1000
  321. #define PHY_CABLE_FAULT_COUNTER 0x01FF
  322. #define KS884X_PHY_PHY_CTRL_OFFSET 0x02
  323. #define PHY_STAT_REVERSED_POLARITY 0x0020
  324. #define PHY_STAT_MDIX 0x0010
  325. #define PHY_FORCE_LINK 0x0008
  326. #define PHY_POWER_SAVING_DISABLE 0x0004
  327. #define PHY_REMOTE_LOOPBACK 0x0002
  328. /* SIDER */
  329. #define KS884X_SIDER_P 0x0400
  330. #define KS884X_CHIP_ID_OFFSET KS884X_SIDER_P
  331. #define KS884X_FAMILY_ID_OFFSET (KS884X_CHIP_ID_OFFSET + 1)
  332. #define REG_FAMILY_ID 0x88
  333. #define REG_CHIP_ID_41 0x8810
  334. #define REG_CHIP_ID_42 0x8800
  335. #define KS884X_CHIP_ID_MASK_41 0xFF10
  336. #define KS884X_CHIP_ID_MASK 0xFFF0
  337. #define KS884X_CHIP_ID_SHIFT 4
  338. #define KS884X_REVISION_MASK 0x000E
  339. #define KS884X_REVISION_SHIFT 1
  340. #define KS8842_START 0x0001
  341. #define CHIP_IP_41_M 0x8810
  342. #define CHIP_IP_42_M 0x8800
  343. #define CHIP_IP_61_M 0x8890
  344. #define CHIP_IP_62_M 0x8880
  345. #define CHIP_IP_41_P 0x8850
  346. #define CHIP_IP_42_P 0x8840
  347. #define CHIP_IP_61_P 0x88D0
  348. #define CHIP_IP_62_P 0x88C0
  349. /* SGCR1 */
  350. #define KS8842_SGCR1_P 0x0402
  351. #define KS8842_SWITCH_CTRL_1_OFFSET KS8842_SGCR1_P
  352. #define SWITCH_PASS_ALL 0x8000
  353. #define SWITCH_TX_FLOW_CTRL 0x2000
  354. #define SWITCH_RX_FLOW_CTRL 0x1000
  355. #define SWITCH_CHECK_LENGTH 0x0800
  356. #define SWITCH_AGING_ENABLE 0x0400
  357. #define SWITCH_FAST_AGING 0x0200
  358. #define SWITCH_AGGR_BACKOFF 0x0100
  359. #define SWITCH_PASS_PAUSE 0x0008
  360. #define SWITCH_LINK_AUTO_AGING 0x0001
  361. /* SGCR2 */
  362. #define KS8842_SGCR2_P 0x0404
  363. #define KS8842_SWITCH_CTRL_2_OFFSET KS8842_SGCR2_P
  364. #define SWITCH_VLAN_ENABLE 0x8000
  365. #define SWITCH_IGMP_SNOOP 0x4000
  366. #define IPV6_MLD_SNOOP_ENABLE 0x2000
  367. #define IPV6_MLD_SNOOP_OPTION 0x1000
  368. #define PRIORITY_SCHEME_SELECT 0x0800
  369. #define SWITCH_MIRROR_RX_TX 0x0100
  370. #define UNICAST_VLAN_BOUNDARY 0x0080
  371. #define MULTICAST_STORM_DISABLE 0x0040
  372. #define SWITCH_BACK_PRESSURE 0x0020
  373. #define FAIR_FLOW_CTRL 0x0010
  374. #define NO_EXC_COLLISION_DROP 0x0008
  375. #define SWITCH_HUGE_PACKET 0x0004
  376. #define SWITCH_LEGAL_PACKET 0x0002
  377. #define SWITCH_BUF_RESERVE 0x0001
  378. /* SGCR3 */
  379. #define KS8842_SGCR3_P 0x0406
  380. #define KS8842_SWITCH_CTRL_3_OFFSET KS8842_SGCR3_P
  381. #define BROADCAST_STORM_RATE_LO 0xFF00
  382. #define SWITCH_REPEATER 0x0080
  383. #define SWITCH_HALF_DUPLEX 0x0040
  384. #define SWITCH_FLOW_CTRL 0x0020
  385. #define SWITCH_10_MBIT 0x0010
  386. #define SWITCH_REPLACE_NULL_VID 0x0008
  387. #define BROADCAST_STORM_RATE_HI 0x0007
  388. #define BROADCAST_STORM_RATE 0x07FF
  389. /* SGCR4 */
  390. #define KS8842_SGCR4_P 0x0408
  391. /* SGCR5 */
  392. #define KS8842_SGCR5_P 0x040A
  393. #define KS8842_SWITCH_CTRL_5_OFFSET KS8842_SGCR5_P
  394. #define LED_MODE 0x8200
  395. #define LED_SPEED_DUPLEX_ACT 0x0000
  396. #define LED_SPEED_DUPLEX_LINK_ACT 0x8000
  397. #define LED_DUPLEX_10_100 0x0200
  398. /* SGCR6 */
  399. #define KS8842_SGCR6_P 0x0410
  400. #define KS8842_SWITCH_CTRL_6_OFFSET KS8842_SGCR6_P
  401. #define KS8842_PRIORITY_MASK 3
  402. #define KS8842_PRIORITY_SHIFT 2
  403. /* SGCR7 */
  404. #define KS8842_SGCR7_P 0x0412
  405. #define KS8842_SWITCH_CTRL_7_OFFSET KS8842_SGCR7_P
  406. #define SWITCH_UNK_DEF_PORT_ENABLE 0x0008
  407. #define SWITCH_UNK_DEF_PORT_3 0x0004
  408. #define SWITCH_UNK_DEF_PORT_2 0x0002
  409. #define SWITCH_UNK_DEF_PORT_1 0x0001
  410. /* MACAR1 */
  411. #define KS8842_MACAR1_P 0x0470
  412. #define KS8842_MACAR2_P 0x0472
  413. #define KS8842_MACAR3_P 0x0474
  414. #define KS8842_MAC_ADDR_1_OFFSET KS8842_MACAR1_P
  415. #define KS8842_MAC_ADDR_0_OFFSET (KS8842_MAC_ADDR_1_OFFSET + 1)
  416. #define KS8842_MAC_ADDR_3_OFFSET KS8842_MACAR2_P
  417. #define KS8842_MAC_ADDR_2_OFFSET (KS8842_MAC_ADDR_3_OFFSET + 1)
  418. #define KS8842_MAC_ADDR_5_OFFSET KS8842_MACAR3_P
  419. #define KS8842_MAC_ADDR_4_OFFSET (KS8842_MAC_ADDR_5_OFFSET + 1)
  420. /* TOSR1 */
  421. #define KS8842_TOSR1_P 0x0480
  422. #define KS8842_TOSR2_P 0x0482
  423. #define KS8842_TOSR3_P 0x0484
  424. #define KS8842_TOSR4_P 0x0486
  425. #define KS8842_TOSR5_P 0x0488
  426. #define KS8842_TOSR6_P 0x048A
  427. #define KS8842_TOSR7_P 0x0490
  428. #define KS8842_TOSR8_P 0x0492
  429. #define KS8842_TOS_1_OFFSET KS8842_TOSR1_P
  430. #define KS8842_TOS_2_OFFSET KS8842_TOSR2_P
  431. #define KS8842_TOS_3_OFFSET KS8842_TOSR3_P
  432. #define KS8842_TOS_4_OFFSET KS8842_TOSR4_P
  433. #define KS8842_TOS_5_OFFSET KS8842_TOSR5_P
  434. #define KS8842_TOS_6_OFFSET KS8842_TOSR6_P
  435. #define KS8842_TOS_7_OFFSET KS8842_TOSR7_P
  436. #define KS8842_TOS_8_OFFSET KS8842_TOSR8_P
  437. /* P1CR1 */
  438. #define KS8842_P1CR1_P 0x0500
  439. #define KS8842_P1CR2_P 0x0502
  440. #define KS8842_P1VIDR_P 0x0504
  441. #define KS8842_P1CR3_P 0x0506
  442. #define KS8842_P1IRCR_P 0x0508
  443. #define KS8842_P1ERCR_P 0x050A
  444. #define KS884X_P1SCSLMD_P 0x0510
  445. #define KS884X_P1CR4_P 0x0512
  446. #define KS884X_P1SR_P 0x0514
  447. /* P2CR1 */
  448. #define KS8842_P2CR1_P 0x0520
  449. #define KS8842_P2CR2_P 0x0522
  450. #define KS8842_P2VIDR_P 0x0524
  451. #define KS8842_P2CR3_P 0x0526
  452. #define KS8842_P2IRCR_P 0x0528
  453. #define KS8842_P2ERCR_P 0x052A
  454. #define KS884X_P2SCSLMD_P 0x0530
  455. #define KS884X_P2CR4_P 0x0532
  456. #define KS884X_P2SR_P 0x0534
  457. /* P3CR1 */
  458. #define KS8842_P3CR1_P 0x0540
  459. #define KS8842_P3CR2_P 0x0542
  460. #define KS8842_P3VIDR_P 0x0544
  461. #define KS8842_P3CR3_P 0x0546
  462. #define KS8842_P3IRCR_P 0x0548
  463. #define KS8842_P3ERCR_P 0x054A
  464. #define KS8842_PORT_1_CTRL_1 KS8842_P1CR1_P
  465. #define KS8842_PORT_2_CTRL_1 KS8842_P2CR1_P
  466. #define KS8842_PORT_3_CTRL_1 KS8842_P3CR1_P
  467. #define PORT_CTRL_ADDR(port, addr) \
  468. (addr = KS8842_PORT_1_CTRL_1 + (port) * \
  469. (KS8842_PORT_2_CTRL_1 - KS8842_PORT_1_CTRL_1))
  470. #define KS8842_PORT_CTRL_1_OFFSET 0x00
  471. #define PORT_BROADCAST_STORM 0x0080
  472. #define PORT_DIFFSERV_ENABLE 0x0040
  473. #define PORT_802_1P_ENABLE 0x0020
  474. #define PORT_BASED_PRIORITY_MASK 0x0018
  475. #define PORT_BASED_PRIORITY_BASE 0x0003
  476. #define PORT_BASED_PRIORITY_SHIFT 3
  477. #define PORT_BASED_PRIORITY_0 0x0000
  478. #define PORT_BASED_PRIORITY_1 0x0008
  479. #define PORT_BASED_PRIORITY_2 0x0010
  480. #define PORT_BASED_PRIORITY_3 0x0018
  481. #define PORT_INSERT_TAG 0x0004
  482. #define PORT_REMOVE_TAG 0x0002
  483. #define PORT_PRIO_QUEUE_ENABLE 0x0001
  484. #define KS8842_PORT_CTRL_2_OFFSET 0x02
  485. #define PORT_INGRESS_VLAN_FILTER 0x4000
  486. #define PORT_DISCARD_NON_VID 0x2000
  487. #define PORT_FORCE_FLOW_CTRL 0x1000
  488. #define PORT_BACK_PRESSURE 0x0800
  489. #define PORT_TX_ENABLE 0x0400
  490. #define PORT_RX_ENABLE 0x0200
  491. #define PORT_LEARN_DISABLE 0x0100
  492. #define PORT_MIRROR_SNIFFER 0x0080
  493. #define PORT_MIRROR_RX 0x0040
  494. #define PORT_MIRROR_TX 0x0020
  495. #define PORT_USER_PRIORITY_CEILING 0x0008
  496. #define PORT_VLAN_MEMBERSHIP 0x0007
  497. #define KS8842_PORT_CTRL_VID_OFFSET 0x04
  498. #define PORT_DEFAULT_VID 0x0001
  499. #define KS8842_PORT_CTRL_3_OFFSET 0x06
  500. #define PORT_INGRESS_LIMIT_MODE 0x000C
  501. #define PORT_INGRESS_ALL 0x0000
  502. #define PORT_INGRESS_UNICAST 0x0004
  503. #define PORT_INGRESS_MULTICAST 0x0008
  504. #define PORT_INGRESS_BROADCAST 0x000C
  505. #define PORT_COUNT_IFG 0x0002
  506. #define PORT_COUNT_PREAMBLE 0x0001
  507. #define KS8842_PORT_IN_RATE_OFFSET 0x08
  508. #define KS8842_PORT_OUT_RATE_OFFSET 0x0A
  509. #define PORT_PRIORITY_RATE 0x0F
  510. #define PORT_PRIORITY_RATE_SHIFT 4
  511. #define KS884X_PORT_LINK_MD 0x10
  512. #define PORT_CABLE_10M_SHORT 0x8000
  513. #define PORT_CABLE_DIAG_RESULT 0x6000
  514. #define PORT_CABLE_STAT_NORMAL 0x0000
  515. #define PORT_CABLE_STAT_OPEN 0x2000
  516. #define PORT_CABLE_STAT_SHORT 0x4000
  517. #define PORT_CABLE_STAT_FAILED 0x6000
  518. #define PORT_START_CABLE_DIAG 0x1000
  519. #define PORT_FORCE_LINK 0x0800
  520. #define PORT_POWER_SAVING_DISABLE 0x0400
  521. #define PORT_PHY_REMOTE_LOOPBACK 0x0200
  522. #define PORT_CABLE_FAULT_COUNTER 0x01FF
  523. #define KS884X_PORT_CTRL_4_OFFSET 0x12
  524. #define PORT_LED_OFF 0x8000
  525. #define PORT_TX_DISABLE 0x4000
  526. #define PORT_AUTO_NEG_RESTART 0x2000
  527. #define PORT_REMOTE_FAULT_DISABLE 0x1000
  528. #define PORT_POWER_DOWN 0x0800
  529. #define PORT_AUTO_MDIX_DISABLE 0x0400
  530. #define PORT_FORCE_MDIX 0x0200
  531. #define PORT_LOOPBACK 0x0100
  532. #define PORT_AUTO_NEG_ENABLE 0x0080
  533. #define PORT_FORCE_100_MBIT 0x0040
  534. #define PORT_FORCE_FULL_DUPLEX 0x0020
  535. #define PORT_AUTO_NEG_SYM_PAUSE 0x0010
  536. #define PORT_AUTO_NEG_100BTX_FD 0x0008
  537. #define PORT_AUTO_NEG_100BTX 0x0004
  538. #define PORT_AUTO_NEG_10BT_FD 0x0002
  539. #define PORT_AUTO_NEG_10BT 0x0001
  540. #define KS884X_PORT_STATUS_OFFSET 0x14
  541. #define PORT_HP_MDIX 0x8000
  542. #define PORT_REVERSED_POLARITY 0x2000
  543. #define PORT_RX_FLOW_CTRL 0x0800
  544. #define PORT_TX_FLOW_CTRL 0x1000
  545. #define PORT_STATUS_SPEED_100MBIT 0x0400
  546. #define PORT_STATUS_FULL_DUPLEX 0x0200
  547. #define PORT_REMOTE_FAULT 0x0100
  548. #define PORT_MDIX_STATUS 0x0080
  549. #define PORT_AUTO_NEG_COMPLETE 0x0040
  550. #define PORT_STATUS_LINK_GOOD 0x0020
  551. #define PORT_REMOTE_SYM_PAUSE 0x0010
  552. #define PORT_REMOTE_100BTX_FD 0x0008
  553. #define PORT_REMOTE_100BTX 0x0004
  554. #define PORT_REMOTE_10BT_FD 0x0002
  555. #define PORT_REMOTE_10BT 0x0001
  556. /*
  557. #define STATIC_MAC_TABLE_ADDR 00-0000FFFF-FFFFFFFF
  558. #define STATIC_MAC_TABLE_FWD_PORTS 00-00070000-00000000
  559. #define STATIC_MAC_TABLE_VALID 00-00080000-00000000
  560. #define STATIC_MAC_TABLE_OVERRIDE 00-00100000-00000000
  561. #define STATIC_MAC_TABLE_USE_FID 00-00200000-00000000
  562. #define STATIC_MAC_TABLE_FID 00-03C00000-00000000
  563. */
  564. #define STATIC_MAC_TABLE_ADDR 0x0000FFFF
  565. #define STATIC_MAC_TABLE_FWD_PORTS 0x00070000
  566. #define STATIC_MAC_TABLE_VALID 0x00080000
  567. #define STATIC_MAC_TABLE_OVERRIDE 0x00100000
  568. #define STATIC_MAC_TABLE_USE_FID 0x00200000
  569. #define STATIC_MAC_TABLE_FID 0x03C00000
  570. #define STATIC_MAC_FWD_PORTS_SHIFT 16
  571. #define STATIC_MAC_FID_SHIFT 22
  572. /*
  573. #define VLAN_TABLE_VID 00-00000000-00000FFF
  574. #define VLAN_TABLE_FID 00-00000000-0000F000
  575. #define VLAN_TABLE_MEMBERSHIP 00-00000000-00070000
  576. #define VLAN_TABLE_VALID 00-00000000-00080000
  577. */
  578. #define VLAN_TABLE_VID 0x00000FFF
  579. #define VLAN_TABLE_FID 0x0000F000
  580. #define VLAN_TABLE_MEMBERSHIP 0x00070000
  581. #define VLAN_TABLE_VALID 0x00080000
  582. #define VLAN_TABLE_FID_SHIFT 12
  583. #define VLAN_TABLE_MEMBERSHIP_SHIFT 16
  584. /*
  585. #define DYNAMIC_MAC_TABLE_ADDR 00-0000FFFF-FFFFFFFF
  586. #define DYNAMIC_MAC_TABLE_FID 00-000F0000-00000000
  587. #define DYNAMIC_MAC_TABLE_SRC_PORT 00-00300000-00000000
  588. #define DYNAMIC_MAC_TABLE_TIMESTAMP 00-00C00000-00000000
  589. #define DYNAMIC_MAC_TABLE_ENTRIES 03-FF000000-00000000
  590. #define DYNAMIC_MAC_TABLE_MAC_EMPTY 04-00000000-00000000
  591. #define DYNAMIC_MAC_TABLE_RESERVED 78-00000000-00000000
  592. #define DYNAMIC_MAC_TABLE_NOT_READY 80-00000000-00000000
  593. */
  594. #define DYNAMIC_MAC_TABLE_ADDR 0x0000FFFF
  595. #define DYNAMIC_MAC_TABLE_FID 0x000F0000
  596. #define DYNAMIC_MAC_TABLE_SRC_PORT 0x00300000
  597. #define DYNAMIC_MAC_TABLE_TIMESTAMP 0x00C00000
  598. #define DYNAMIC_MAC_TABLE_ENTRIES 0xFF000000
  599. #define DYNAMIC_MAC_TABLE_ENTRIES_H 0x03
  600. #define DYNAMIC_MAC_TABLE_MAC_EMPTY 0x04
  601. #define DYNAMIC_MAC_TABLE_RESERVED 0x78
  602. #define DYNAMIC_MAC_TABLE_NOT_READY 0x80
  603. #define DYNAMIC_MAC_FID_SHIFT 16
  604. #define DYNAMIC_MAC_SRC_PORT_SHIFT 20
  605. #define DYNAMIC_MAC_TIMESTAMP_SHIFT 22
  606. #define DYNAMIC_MAC_ENTRIES_SHIFT 24
  607. #define DYNAMIC_MAC_ENTRIES_H_SHIFT 8
  608. /*
  609. #define MIB_COUNTER_VALUE 00-00000000-3FFFFFFF
  610. #define MIB_COUNTER_VALID 00-00000000-40000000
  611. #define MIB_COUNTER_OVERFLOW 00-00000000-80000000
  612. */
  613. #define MIB_COUNTER_VALUE 0x3FFFFFFF
  614. #define MIB_COUNTER_VALID 0x40000000
  615. #define MIB_COUNTER_OVERFLOW 0x80000000
  616. #define MIB_PACKET_DROPPED 0x0000FFFF
  617. #define KS_MIB_PACKET_DROPPED_TX_0 0x100
  618. #define KS_MIB_PACKET_DROPPED_TX_1 0x101
  619. #define KS_MIB_PACKET_DROPPED_TX 0x102
  620. #define KS_MIB_PACKET_DROPPED_RX_0 0x103
  621. #define KS_MIB_PACKET_DROPPED_RX_1 0x104
  622. #define KS_MIB_PACKET_DROPPED_RX 0x105
  623. /* Change default LED mode. */
  624. #define SET_DEFAULT_LED LED_SPEED_DUPLEX_ACT
  625. #define MAC_ADDR_LEN 6
  626. #define MAC_ADDR_ORDER(i) (MAC_ADDR_LEN - 1 - (i))
  627. #define MAX_ETHERNET_BODY_SIZE 1500
  628. #define ETHERNET_HEADER_SIZE 14
  629. #define MAX_ETHERNET_PACKET_SIZE \
  630. (MAX_ETHERNET_BODY_SIZE + ETHERNET_HEADER_SIZE)
  631. #define REGULAR_RX_BUF_SIZE (MAX_ETHERNET_PACKET_SIZE + 4)
  632. #define MAX_RX_BUF_SIZE (1912 + 4)
  633. #define ADDITIONAL_ENTRIES 16
  634. #define MAX_MULTICAST_LIST 32
  635. #define HW_MULTICAST_SIZE 8
  636. #define HW_TO_DEV_PORT(port) (port - 1)
  637. enum {
  638. media_connected,
  639. media_disconnected
  640. };
  641. enum {
  642. OID_COUNTER_UNKOWN,
  643. OID_COUNTER_FIRST,
  644. /* total transmit errors */
  645. OID_COUNTER_XMIT_ERROR,
  646. /* total receive errors */
  647. OID_COUNTER_RCV_ERROR,
  648. OID_COUNTER_LAST
  649. };
  650. /*
  651. * Hardware descriptor definitions
  652. */
  653. #define DESC_ALIGNMENT 16
  654. #define BUFFER_ALIGNMENT 8
  655. #define NUM_OF_RX_DESC 64
  656. #define NUM_OF_TX_DESC 64
  657. #define KS_DESC_RX_FRAME_LEN 0x000007FF
  658. #define KS_DESC_RX_FRAME_TYPE 0x00008000
  659. #define KS_DESC_RX_ERROR_CRC 0x00010000
  660. #define KS_DESC_RX_ERROR_RUNT 0x00020000
  661. #define KS_DESC_RX_ERROR_TOO_LONG 0x00040000
  662. #define KS_DESC_RX_ERROR_PHY 0x00080000
  663. #define KS884X_DESC_RX_PORT_MASK 0x00300000
  664. #define KS_DESC_RX_MULTICAST 0x01000000
  665. #define KS_DESC_RX_ERROR 0x02000000
  666. #define KS_DESC_RX_ERROR_CSUM_UDP 0x04000000
  667. #define KS_DESC_RX_ERROR_CSUM_TCP 0x08000000
  668. #define KS_DESC_RX_ERROR_CSUM_IP 0x10000000
  669. #define KS_DESC_RX_LAST 0x20000000
  670. #define KS_DESC_RX_FIRST 0x40000000
  671. #define KS_DESC_RX_ERROR_COND \
  672. (KS_DESC_RX_ERROR_CRC | \
  673. KS_DESC_RX_ERROR_RUNT | \
  674. KS_DESC_RX_ERROR_PHY | \
  675. KS_DESC_RX_ERROR_TOO_LONG)
  676. #define KS_DESC_HW_OWNED 0x80000000
  677. #define KS_DESC_BUF_SIZE 0x000007FF
  678. #define KS884X_DESC_TX_PORT_MASK 0x00300000
  679. #define KS_DESC_END_OF_RING 0x02000000
  680. #define KS_DESC_TX_CSUM_GEN_UDP 0x04000000
  681. #define KS_DESC_TX_CSUM_GEN_TCP 0x08000000
  682. #define KS_DESC_TX_CSUM_GEN_IP 0x10000000
  683. #define KS_DESC_TX_LAST 0x20000000
  684. #define KS_DESC_TX_FIRST 0x40000000
  685. #define KS_DESC_TX_INTERRUPT 0x80000000
  686. #define KS_DESC_PORT_SHIFT 20
  687. #define KS_DESC_RX_MASK (KS_DESC_BUF_SIZE)
  688. #define KS_DESC_TX_MASK \
  689. (KS_DESC_TX_INTERRUPT | \
  690. KS_DESC_TX_FIRST | \
  691. KS_DESC_TX_LAST | \
  692. KS_DESC_TX_CSUM_GEN_IP | \
  693. KS_DESC_TX_CSUM_GEN_TCP | \
  694. KS_DESC_TX_CSUM_GEN_UDP | \
  695. KS_DESC_BUF_SIZE)
  696. struct ksz_desc_rx_stat {
  697. #ifdef __BIG_ENDIAN_BITFIELD
  698. u32 hw_owned:1;
  699. u32 first_desc:1;
  700. u32 last_desc:1;
  701. u32 csum_err_ip:1;
  702. u32 csum_err_tcp:1;
  703. u32 csum_err_udp:1;
  704. u32 error:1;
  705. u32 multicast:1;
  706. u32 src_port:4;
  707. u32 err_phy:1;
  708. u32 err_too_long:1;
  709. u32 err_runt:1;
  710. u32 err_crc:1;
  711. u32 frame_type:1;
  712. u32 reserved1:4;
  713. u32 frame_len:11;
  714. #else
  715. u32 frame_len:11;
  716. u32 reserved1:4;
  717. u32 frame_type:1;
  718. u32 err_crc:1;
  719. u32 err_runt:1;
  720. u32 err_too_long:1;
  721. u32 err_phy:1;
  722. u32 src_port:4;
  723. u32 multicast:1;
  724. u32 error:1;
  725. u32 csum_err_udp:1;
  726. u32 csum_err_tcp:1;
  727. u32 csum_err_ip:1;
  728. u32 last_desc:1;
  729. u32 first_desc:1;
  730. u32 hw_owned:1;
  731. #endif
  732. };
  733. struct ksz_desc_tx_stat {
  734. #ifdef __BIG_ENDIAN_BITFIELD
  735. u32 hw_owned:1;
  736. u32 reserved1:31;
  737. #else
  738. u32 reserved1:31;
  739. u32 hw_owned:1;
  740. #endif
  741. };
  742. struct ksz_desc_rx_buf {
  743. #ifdef __BIG_ENDIAN_BITFIELD
  744. u32 reserved4:6;
  745. u32 end_of_ring:1;
  746. u32 reserved3:14;
  747. u32 buf_size:11;
  748. #else
  749. u32 buf_size:11;
  750. u32 reserved3:14;
  751. u32 end_of_ring:1;
  752. u32 reserved4:6;
  753. #endif
  754. };
  755. struct ksz_desc_tx_buf {
  756. #ifdef __BIG_ENDIAN_BITFIELD
  757. u32 intr:1;
  758. u32 first_seg:1;
  759. u32 last_seg:1;
  760. u32 csum_gen_ip:1;
  761. u32 csum_gen_tcp:1;
  762. u32 csum_gen_udp:1;
  763. u32 end_of_ring:1;
  764. u32 reserved4:1;
  765. u32 dest_port:4;
  766. u32 reserved3:9;
  767. u32 buf_size:11;
  768. #else
  769. u32 buf_size:11;
  770. u32 reserved3:9;
  771. u32 dest_port:4;
  772. u32 reserved4:1;
  773. u32 end_of_ring:1;
  774. u32 csum_gen_udp:1;
  775. u32 csum_gen_tcp:1;
  776. u32 csum_gen_ip:1;
  777. u32 last_seg:1;
  778. u32 first_seg:1;
  779. u32 intr:1;
  780. #endif
  781. };
  782. union desc_stat {
  783. struct ksz_desc_rx_stat rx;
  784. struct ksz_desc_tx_stat tx;
  785. u32 data;
  786. };
  787. union desc_buf {
  788. struct ksz_desc_rx_buf rx;
  789. struct ksz_desc_tx_buf tx;
  790. u32 data;
  791. };
  792. /**
  793. * struct ksz_hw_desc - Hardware descriptor data structure
  794. * @ctrl: Descriptor control value.
  795. * @buf: Descriptor buffer value.
  796. * @addr: Physical address of memory buffer.
  797. * @next: Pointer to next hardware descriptor.
  798. */
  799. struct ksz_hw_desc {
  800. union desc_stat ctrl;
  801. union desc_buf buf;
  802. u32 addr;
  803. u32 next;
  804. };
  805. /**
  806. * struct ksz_sw_desc - Software descriptor data structure
  807. * @ctrl: Descriptor control value.
  808. * @buf: Descriptor buffer value.
  809. * @buf_size: Current buffers size value in hardware descriptor.
  810. */
  811. struct ksz_sw_desc {
  812. union desc_stat ctrl;
  813. union desc_buf buf;
  814. u32 buf_size;
  815. };
  816. /**
  817. * struct ksz_dma_buf - OS dependent DMA buffer data structure
  818. * @skb: Associated socket buffer.
  819. * @dma: Associated physical DMA address.
  820. * len: Actual len used.
  821. */
  822. struct ksz_dma_buf {
  823. struct sk_buff *skb;
  824. dma_addr_t dma;
  825. int len;
  826. };
  827. /**
  828. * struct ksz_desc - Descriptor structure
  829. * @phw: Hardware descriptor pointer to uncached physical memory.
  830. * @sw: Cached memory to hold hardware descriptor values for
  831. * manipulation.
  832. * @dma_buf: Operating system dependent data structure to hold physical
  833. * memory buffer allocation information.
  834. */
  835. struct ksz_desc {
  836. struct ksz_hw_desc *phw;
  837. struct ksz_sw_desc sw;
  838. struct ksz_dma_buf dma_buf;
  839. };
  840. #define DMA_BUFFER(desc) ((struct ksz_dma_buf *)(&(desc)->dma_buf))
  841. /**
  842. * struct ksz_desc_info - Descriptor information data structure
  843. * @ring: First descriptor in the ring.
  844. * @cur: Current descriptor being manipulated.
  845. * @ring_virt: First hardware descriptor in the ring.
  846. * @ring_phys: The physical address of the first descriptor of the ring.
  847. * @size: Size of hardware descriptor.
  848. * @alloc: Number of descriptors allocated.
  849. * @avail: Number of descriptors available for use.
  850. * @last: Index for last descriptor released to hardware.
  851. * @next: Index for next descriptor available for use.
  852. * @mask: Mask for index wrapping.
  853. */
  854. struct ksz_desc_info {
  855. struct ksz_desc *ring;
  856. struct ksz_desc *cur;
  857. struct ksz_hw_desc *ring_virt;
  858. u32 ring_phys;
  859. int size;
  860. int alloc;
  861. int avail;
  862. int last;
  863. int next;
  864. int mask;
  865. };
  866. /*
  867. * KSZ8842 switch definitions
  868. */
  869. enum {
  870. TABLE_STATIC_MAC = 0,
  871. TABLE_VLAN,
  872. TABLE_DYNAMIC_MAC,
  873. TABLE_MIB
  874. };
  875. #define LEARNED_MAC_TABLE_ENTRIES 1024
  876. #define STATIC_MAC_TABLE_ENTRIES 8
  877. /**
  878. * struct ksz_mac_table - Static MAC table data structure
  879. * @mac_addr: MAC address to filter.
  880. * @vid: VID value.
  881. * @fid: FID value.
  882. * @ports: Port membership.
  883. * @override: Override setting.
  884. * @use_fid: FID use setting.
  885. * @valid: Valid setting indicating the entry is being used.
  886. */
  887. struct ksz_mac_table {
  888. u8 mac_addr[MAC_ADDR_LEN];
  889. u16 vid;
  890. u8 fid;
  891. u8 ports;
  892. u8 override:1;
  893. u8 use_fid:1;
  894. u8 valid:1;
  895. };
  896. #define VLAN_TABLE_ENTRIES 16
  897. /**
  898. * struct ksz_vlan_table - VLAN table data structure
  899. * @vid: VID value.
  900. * @fid: FID value.
  901. * @member: Port membership.
  902. */
  903. struct ksz_vlan_table {
  904. u16 vid;
  905. u8 fid;
  906. u8 member;
  907. };
  908. #define DIFFSERV_ENTRIES 64
  909. #define PRIO_802_1P_ENTRIES 8
  910. #define PRIO_QUEUES 4
  911. #define SWITCH_PORT_NUM 2
  912. #define TOTAL_PORT_NUM (SWITCH_PORT_NUM + 1)
  913. #define HOST_MASK (1 << SWITCH_PORT_NUM)
  914. #define PORT_MASK 7
  915. #define MAIN_PORT 0
  916. #define OTHER_PORT 1
  917. #define HOST_PORT SWITCH_PORT_NUM
  918. #define PORT_COUNTER_NUM 0x20
  919. #define TOTAL_PORT_COUNTER_NUM (PORT_COUNTER_NUM + 2)
  920. #define MIB_COUNTER_RX_LO_PRIORITY 0x00
  921. #define MIB_COUNTER_RX_HI_PRIORITY 0x01
  922. #define MIB_COUNTER_RX_UNDERSIZE 0x02
  923. #define MIB_COUNTER_RX_FRAGMENT 0x03
  924. #define MIB_COUNTER_RX_OVERSIZE 0x04
  925. #define MIB_COUNTER_RX_JABBER 0x05
  926. #define MIB_COUNTER_RX_SYMBOL_ERR 0x06
  927. #define MIB_COUNTER_RX_CRC_ERR 0x07
  928. #define MIB_COUNTER_RX_ALIGNMENT_ERR 0x08
  929. #define MIB_COUNTER_RX_CTRL_8808 0x09
  930. #define MIB_COUNTER_RX_PAUSE 0x0A
  931. #define MIB_COUNTER_RX_BROADCAST 0x0B
  932. #define MIB_COUNTER_RX_MULTICAST 0x0C
  933. #define MIB_COUNTER_RX_UNICAST 0x0D
  934. #define MIB_COUNTER_RX_OCTET_64 0x0E
  935. #define MIB_COUNTER_RX_OCTET_65_127 0x0F
  936. #define MIB_COUNTER_RX_OCTET_128_255 0x10
  937. #define MIB_COUNTER_RX_OCTET_256_511 0x11
  938. #define MIB_COUNTER_RX_OCTET_512_1023 0x12
  939. #define MIB_COUNTER_RX_OCTET_1024_1522 0x13
  940. #define MIB_COUNTER_TX_LO_PRIORITY 0x14
  941. #define MIB_COUNTER_TX_HI_PRIORITY 0x15
  942. #define MIB_COUNTER_TX_LATE_COLLISION 0x16
  943. #define MIB_COUNTER_TX_PAUSE 0x17
  944. #define MIB_COUNTER_TX_BROADCAST 0x18
  945. #define MIB_COUNTER_TX_MULTICAST 0x19
  946. #define MIB_COUNTER_TX_UNICAST 0x1A
  947. #define MIB_COUNTER_TX_DEFERRED 0x1B
  948. #define MIB_COUNTER_TX_TOTAL_COLLISION 0x1C
  949. #define MIB_COUNTER_TX_EXCESS_COLLISION 0x1D
  950. #define MIB_COUNTER_TX_SINGLE_COLLISION 0x1E
  951. #define MIB_COUNTER_TX_MULTI_COLLISION 0x1F
  952. #define MIB_COUNTER_RX_DROPPED_PACKET 0x20
  953. #define MIB_COUNTER_TX_DROPPED_PACKET 0x21
  954. /**
  955. * struct ksz_port_mib - Port MIB data structure
  956. * @cnt_ptr: Current pointer to MIB counter index.
  957. * @link_down: Indication the link has just gone down.
  958. * @state: Connection status of the port.
  959. * @mib_start: The starting counter index. Some ports do not start at 0.
  960. * @counter: 64-bit MIB counter value.
  961. * @dropped: Temporary buffer to remember last read packet dropped values.
  962. *
  963. * MIB counters needs to be read periodically so that counters do not get
  964. * overflowed and give incorrect values. A right balance is needed to
  965. * satisfy this condition and not waste too much CPU time.
  966. *
  967. * It is pointless to read MIB counters when the port is disconnected. The
  968. * @state provides the connection status so that MIB counters are read only
  969. * when the port is connected. The @link_down indicates the port is just
  970. * disconnected so that all MIB counters are read one last time to update the
  971. * information.
  972. */
  973. struct ksz_port_mib {
  974. u8 cnt_ptr;
  975. u8 link_down;
  976. u8 state;
  977. u8 mib_start;
  978. u64 counter[TOTAL_PORT_COUNTER_NUM];
  979. u32 dropped[2];
  980. };
  981. /**
  982. * struct ksz_port_cfg - Port configuration data structure
  983. * @vid: VID value.
  984. * @member: Port membership.
  985. * @port_prio: Port priority.
  986. * @rx_rate: Receive priority rate.
  987. * @tx_rate: Transmit priority rate.
  988. * @stp_state: Current Spanning Tree Protocol state.
  989. */
  990. struct ksz_port_cfg {
  991. u16 vid;
  992. u8 member;
  993. u8 port_prio;
  994. u32 rx_rate[PRIO_QUEUES];
  995. u32 tx_rate[PRIO_QUEUES];
  996. int stp_state;
  997. };
  998. /**
  999. * struct ksz_switch - KSZ8842 switch data structure
  1000. * @mac_table: MAC table entries information.
  1001. * @vlan_table: VLAN table entries information.
  1002. * @port_cfg: Port configuration information.
  1003. * @diffserv: DiffServ priority settings. Possible values from 6-bit of ToS
  1004. * (bit7 ~ bit2) field.
  1005. * @p_802_1p: 802.1P priority settings. Possible values from 3-bit of 802.1p
  1006. * Tag priority field.
  1007. * @br_addr: Bridge address. Used for STP.
  1008. * @other_addr: Other MAC address. Used for multiple network device mode.
  1009. * @broad_per: Broadcast storm percentage.
  1010. * @member: Current port membership. Used for STP.
  1011. */
  1012. struct ksz_switch {
  1013. struct ksz_mac_table mac_table[STATIC_MAC_TABLE_ENTRIES];
  1014. struct ksz_vlan_table vlan_table[VLAN_TABLE_ENTRIES];
  1015. struct ksz_port_cfg port_cfg[TOTAL_PORT_NUM];
  1016. u8 diffserv[DIFFSERV_ENTRIES];
  1017. u8 p_802_1p[PRIO_802_1P_ENTRIES];
  1018. u8 br_addr[MAC_ADDR_LEN];
  1019. u8 other_addr[MAC_ADDR_LEN];
  1020. u8 broad_per;
  1021. u8 member;
  1022. };
  1023. #define TX_RATE_UNIT 10000
  1024. /**
  1025. * struct ksz_port_info - Port information data structure
  1026. * @state: Connection status of the port.
  1027. * @tx_rate: Transmit rate divided by 10000 to get Mbit.
  1028. * @duplex: Duplex mode.
  1029. * @advertised: Advertised auto-negotiation setting. Used to determine link.
  1030. * @partner: Auto-negotiation partner setting. Used to determine link.
  1031. * @port_id: Port index to access actual hardware register.
  1032. * @pdev: Pointer to OS dependent network device.
  1033. */
  1034. struct ksz_port_info {
  1035. uint state;
  1036. uint tx_rate;
  1037. u8 duplex;
  1038. u8 advertised;
  1039. u8 partner;
  1040. u8 port_id;
  1041. void *pdev;
  1042. };
  1043. #define MAX_TX_HELD_SIZE 52000
  1044. /* Hardware features and bug fixes. */
  1045. #define LINK_INT_WORKING (1 << 0)
  1046. #define SMALL_PACKET_TX_BUG (1 << 1)
  1047. #define HALF_DUPLEX_SIGNAL_BUG (1 << 2)
  1048. #define IPV6_CSUM_GEN_HACK (1 << 3)
  1049. #define RX_HUGE_FRAME (1 << 4)
  1050. #define STP_SUPPORT (1 << 8)
  1051. /* Software overrides. */
  1052. #define PAUSE_FLOW_CTRL (1 << 0)
  1053. #define FAST_AGING (1 << 1)
  1054. /**
  1055. * struct ksz_hw - KSZ884X hardware data structure
  1056. * @io: Virtual address assigned.
  1057. * @ksz_switch: Pointer to KSZ8842 switch.
  1058. * @port_info: Port information.
  1059. * @port_mib: Port MIB information.
  1060. * @dev_count: Number of network devices this hardware supports.
  1061. * @dst_ports: Destination ports in switch for transmission.
  1062. * @id: Hardware ID. Used for display only.
  1063. * @mib_cnt: Number of MIB counters this hardware has.
  1064. * @mib_port_cnt: Number of ports with MIB counters.
  1065. * @tx_cfg: Cached transmit control settings.
  1066. * @rx_cfg: Cached receive control settings.
  1067. * @intr_mask: Current interrupt mask.
  1068. * @intr_set: Current interrup set.
  1069. * @intr_blocked: Interrupt blocked.
  1070. * @rx_desc_info: Receive descriptor information.
  1071. * @tx_desc_info: Transmit descriptor information.
  1072. * @tx_int_cnt: Transmit interrupt count. Used for TX optimization.
  1073. * @tx_int_mask: Transmit interrupt mask. Used for TX optimization.
  1074. * @tx_size: Transmit data size. Used for TX optimization.
  1075. * The maximum is defined by MAX_TX_HELD_SIZE.
  1076. * @perm_addr: Permanent MAC address.
  1077. * @override_addr: Overrided MAC address.
  1078. * @address: Additional MAC address entries.
  1079. * @addr_list_size: Additional MAC address list size.
  1080. * @mac_override: Indication of MAC address overrided.
  1081. * @promiscuous: Counter to keep track of promiscuous mode set.
  1082. * @all_multi: Counter to keep track of all multicast mode set.
  1083. * @multi_list: Multicast address entries.
  1084. * @multi_bits: Cached multicast hash table settings.
  1085. * @multi_list_size: Multicast address list size.
  1086. * @enabled: Indication of hardware enabled.
  1087. * @rx_stop: Indication of receive process stop.
  1088. * @features: Hardware features to enable.
  1089. * @overrides: Hardware features to override.
  1090. * @parent: Pointer to parent, network device private structure.
  1091. */
  1092. struct ksz_hw {
  1093. void __iomem *io;
  1094. struct ksz_switch *ksz_switch;
  1095. struct ksz_port_info port_info[SWITCH_PORT_NUM];
  1096. struct ksz_port_mib port_mib[TOTAL_PORT_NUM];
  1097. int dev_count;
  1098. int dst_ports;
  1099. int id;
  1100. int mib_cnt;
  1101. int mib_port_cnt;
  1102. u32 tx_cfg;
  1103. u32 rx_cfg;
  1104. u32 intr_mask;
  1105. u32 intr_set;
  1106. uint intr_blocked;
  1107. struct ksz_desc_info rx_desc_info;
  1108. struct ksz_desc_info tx_desc_info;
  1109. int tx_int_cnt;
  1110. int tx_int_mask;
  1111. int tx_size;
  1112. u8 perm_addr[MAC_ADDR_LEN];
  1113. u8 override_addr[MAC_ADDR_LEN];
  1114. u8 address[ADDITIONAL_ENTRIES][MAC_ADDR_LEN];
  1115. u8 addr_list_size;
  1116. u8 mac_override;
  1117. u8 promiscuous;
  1118. u8 all_multi;
  1119. u8 multi_list[MAX_MULTICAST_LIST][MAC_ADDR_LEN];
  1120. u8 multi_bits[HW_MULTICAST_SIZE];
  1121. u8 multi_list_size;
  1122. u8 enabled;
  1123. u8 rx_stop;
  1124. u8 reserved2[1];
  1125. uint features;
  1126. uint overrides;
  1127. void *parent;
  1128. };
  1129. enum {
  1130. PHY_NO_FLOW_CTRL,
  1131. PHY_FLOW_CTRL,
  1132. PHY_TX_ONLY,
  1133. PHY_RX_ONLY
  1134. };
  1135. /**
  1136. * struct ksz_port - Virtual port data structure
  1137. * @duplex: Duplex mode setting. 1 for half duplex, 2 for full
  1138. * duplex, and 0 for auto, which normally results in full
  1139. * duplex.
  1140. * @speed: Speed setting. 10 for 10 Mbit, 100 for 100 Mbit, and
  1141. * 0 for auto, which normally results in 100 Mbit.
  1142. * @force_link: Force link setting. 0 for auto-negotiation, and 1 for
  1143. * force.
  1144. * @flow_ctrl: Flow control setting. PHY_NO_FLOW_CTRL for no flow
  1145. * control, and PHY_FLOW_CTRL for flow control.
  1146. * PHY_TX_ONLY and PHY_RX_ONLY are not supported for 100
  1147. * Mbit PHY.
  1148. * @first_port: Index of first port this port supports.
  1149. * @mib_port_cnt: Number of ports with MIB counters.
  1150. * @port_cnt: Number of ports this port supports.
  1151. * @counter: Port statistics counter.
  1152. * @hw: Pointer to hardware structure.
  1153. * @linked: Pointer to port information linked to this port.
  1154. */
  1155. struct ksz_port {
  1156. u8 duplex;
  1157. u8 speed;
  1158. u8 force_link;
  1159. u8 flow_ctrl;
  1160. int first_port;
  1161. int mib_port_cnt;
  1162. int port_cnt;
  1163. u64 counter[OID_COUNTER_LAST];
  1164. struct ksz_hw *hw;
  1165. struct ksz_port_info *linked;
  1166. };
  1167. /**
  1168. * struct ksz_timer_info - Timer information data structure
  1169. * @timer: Kernel timer.
  1170. * @cnt: Running timer counter.
  1171. * @max: Number of times to run timer; -1 for infinity.
  1172. * @period: Timer period in jiffies.
  1173. */
  1174. struct ksz_timer_info {
  1175. struct timer_list timer;
  1176. int cnt;
  1177. int max;
  1178. int period;
  1179. };
  1180. /**
  1181. * struct ksz_shared_mem - OS dependent shared memory data structure
  1182. * @dma_addr: Physical DMA address allocated.
  1183. * @alloc_size: Allocation size.
  1184. * @phys: Actual physical address used.
  1185. * @alloc_virt: Virtual address allocated.
  1186. * @virt: Actual virtual address used.
  1187. */
  1188. struct ksz_shared_mem {
  1189. dma_addr_t dma_addr;
  1190. uint alloc_size;
  1191. uint phys;
  1192. u8 *alloc_virt;
  1193. u8 *virt;
  1194. };
  1195. /**
  1196. * struct ksz_counter_info - OS dependent counter information data structure
  1197. * @counter: Wait queue to wakeup after counters are read.
  1198. * @time: Next time in jiffies to read counter.
  1199. * @read: Indication of counters read in full or not.
  1200. */
  1201. struct ksz_counter_info {
  1202. wait_queue_head_t counter;
  1203. unsigned long time;
  1204. int read;
  1205. };
  1206. /**
  1207. * struct dev_info - Network device information data structure
  1208. * @dev: Pointer to network device.
  1209. * @pdev: Pointer to PCI device.
  1210. * @hw: Hardware structure.
  1211. * @desc_pool: Physical memory used for descriptor pool.
  1212. * @hwlock: Spinlock to prevent hardware from accessing.
  1213. * @lock: Mutex lock to prevent device from accessing.
  1214. * @dev_rcv: Receive process function used.
  1215. * @last_skb: Socket buffer allocated for descriptor rx fragments.
  1216. * @skb_index: Buffer index for receiving fragments.
  1217. * @skb_len: Buffer length for receiving fragments.
  1218. * @mib_read: Workqueue to read MIB counters.
  1219. * @mib_timer_info: Timer to read MIB counters.
  1220. * @counter: Used for MIB reading.
  1221. * @mtu: Current MTU used. The default is REGULAR_RX_BUF_SIZE;
  1222. * the maximum is MAX_RX_BUF_SIZE.
  1223. * @opened: Counter to keep track of device open.
  1224. * @rx_tasklet: Receive processing tasklet.
  1225. * @tx_tasklet: Transmit processing tasklet.
  1226. * @wol_enable: Wake-on-LAN enable set by ethtool.
  1227. * @wol_support: Wake-on-LAN support used by ethtool.
  1228. * @pme_wait: Used for KSZ8841 power management.
  1229. */
  1230. struct dev_info {
  1231. struct net_device *dev;
  1232. struct pci_dev *pdev;
  1233. struct ksz_hw hw;
  1234. struct ksz_shared_mem desc_pool;
  1235. spinlock_t hwlock;
  1236. struct mutex lock;
  1237. int (*dev_rcv)(struct dev_info *);
  1238. struct sk_buff *last_skb;
  1239. int skb_index;
  1240. int skb_len;
  1241. struct work_struct mib_read;
  1242. struct ksz_timer_info mib_timer_info;
  1243. struct ksz_counter_info counter[TOTAL_PORT_NUM];
  1244. int mtu;
  1245. int opened;
  1246. struct tasklet_struct rx_tasklet;
  1247. struct tasklet_struct tx_tasklet;
  1248. int wol_enable;
  1249. int wol_support;
  1250. unsigned long pme_wait;
  1251. };
  1252. /**
  1253. * struct dev_priv - Network device private data structure
  1254. * @adapter: Adapter device information.
  1255. * @port: Port information.
  1256. * @monitor_time_info: Timer to monitor ports.
  1257. * @stats: Network statistics.
  1258. * @proc_sem: Semaphore for proc accessing.
  1259. * @id: Device ID.
  1260. * @mii_if: MII interface information.
  1261. * @advertising: Temporary variable to store advertised settings.
  1262. * @msg_enable: The message flags controlling driver output.
  1263. * @media_state: The connection status of the device.
  1264. * @multicast: The all multicast state of the device.
  1265. * @promiscuous: The promiscuous state of the device.
  1266. */
  1267. struct dev_priv {
  1268. struct dev_info *adapter;
  1269. struct ksz_port port;
  1270. struct ksz_timer_info monitor_timer_info;
  1271. struct net_device_stats stats;
  1272. struct semaphore proc_sem;
  1273. int id;
  1274. struct mii_if_info mii_if;
  1275. u32 advertising;
  1276. u32 msg_enable;
  1277. int media_state;
  1278. int multicast;
  1279. int promiscuous;
  1280. };
  1281. #define ks_info(_ks, _msg...) dev_info(&(_ks)->pdev->dev, _msg)
  1282. #define ks_warn(_ks, _msg...) dev_warn(&(_ks)->pdev->dev, _msg)
  1283. #define ks_dbg(_ks, _msg...) dev_dbg(&(_ks)->pdev->dev, _msg)
  1284. #define ks_err(_ks, _msg...) dev_err(&(_ks)->pdev->dev, _msg)
  1285. #define DRV_NAME "KSZ884X PCI"
  1286. #define DEVICE_NAME "KSZ884x PCI"
  1287. #define DRV_VERSION "1.0.0"
  1288. #define DRV_RELDATE "Feb 8, 2010"
  1289. static char version[] __devinitdata =
  1290. "Micrel " DEVICE_NAME " " DRV_VERSION " (" DRV_RELDATE ")";
  1291. static u8 DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x88, 0x42, 0x01 };
  1292. /*
  1293. * Interrupt processing primary routines
  1294. */
  1295. static inline void hw_ack_intr(struct ksz_hw *hw, uint interrupt)
  1296. {
  1297. writel(interrupt, hw->io + KS884X_INTERRUPTS_STATUS);
  1298. }
  1299. static inline void hw_dis_intr(struct ksz_hw *hw)
  1300. {
  1301. hw->intr_blocked = hw->intr_mask;
  1302. writel(0, hw->io + KS884X_INTERRUPTS_ENABLE);
  1303. hw->intr_set = readl(hw->io + KS884X_INTERRUPTS_ENABLE);
  1304. }
  1305. static inline void hw_set_intr(struct ksz_hw *hw, uint interrupt)
  1306. {
  1307. hw->intr_set = interrupt;
  1308. writel(interrupt, hw->io + KS884X_INTERRUPTS_ENABLE);
  1309. }
  1310. static inline void hw_ena_intr(struct ksz_hw *hw)
  1311. {
  1312. hw->intr_blocked = 0;
  1313. hw_set_intr(hw, hw->intr_mask);
  1314. }
  1315. static inline void hw_dis_intr_bit(struct ksz_hw *hw, uint bit)
  1316. {
  1317. hw->intr_mask &= ~(bit);
  1318. }
  1319. static inline void hw_turn_off_intr(struct ksz_hw *hw, uint interrupt)
  1320. {
  1321. u32 read_intr;
  1322. read_intr = readl(hw->io + KS884X_INTERRUPTS_ENABLE);
  1323. hw->intr_set = read_intr & ~interrupt;
  1324. writel(hw->intr_set, hw->io + KS884X_INTERRUPTS_ENABLE);
  1325. hw_dis_intr_bit(hw, interrupt);
  1326. }
  1327. /**
  1328. * hw_turn_on_intr - turn on specified interrupts
  1329. * @hw: The hardware instance.
  1330. * @bit: The interrupt bits to be on.
  1331. *
  1332. * This routine turns on the specified interrupts in the interrupt mask so that
  1333. * those interrupts will be enabled.
  1334. */
  1335. static void hw_turn_on_intr(struct ksz_hw *hw, u32 bit)
  1336. {
  1337. hw->intr_mask |= bit;
  1338. if (!hw->intr_blocked)
  1339. hw_set_intr(hw, hw->intr_mask);
  1340. }
  1341. static inline void hw_ena_intr_bit(struct ksz_hw *hw, uint interrupt)
  1342. {
  1343. u32 read_intr;
  1344. read_intr = readl(hw->io + KS884X_INTERRUPTS_ENABLE);
  1345. hw->intr_set = read_intr | interrupt;
  1346. writel(hw->intr_set, hw->io + KS884X_INTERRUPTS_ENABLE);
  1347. }
  1348. static inline void hw_read_intr(struct ksz_hw *hw, uint *status)
  1349. {
  1350. *status = readl(hw->io + KS884X_INTERRUPTS_STATUS);
  1351. *status = *status & hw->intr_set;
  1352. }
  1353. static inline void hw_restore_intr(struct ksz_hw *hw, uint interrupt)
  1354. {
  1355. if (interrupt)
  1356. hw_ena_intr(hw);
  1357. }
  1358. /**
  1359. * hw_block_intr - block hardware interrupts
  1360. *
  1361. * This function blocks all interrupts of the hardware and returns the current
  1362. * interrupt enable mask so that interrupts can be restored later.
  1363. *
  1364. * Return the current interrupt enable mask.
  1365. */
  1366. static uint hw_block_intr(struct ksz_hw *hw)
  1367. {
  1368. uint interrupt = 0;
  1369. if (!hw->intr_blocked) {
  1370. hw_dis_intr(hw);
  1371. interrupt = hw->intr_blocked;
  1372. }
  1373. return interrupt;
  1374. }
  1375. /*
  1376. * Hardware descriptor routines
  1377. */
  1378. static inline void reset_desc(struct ksz_desc *desc, union desc_stat status)
  1379. {
  1380. status.rx.hw_owned = 0;
  1381. desc->phw->ctrl.data = cpu_to_le32(status.data);
  1382. }
  1383. static inline void release_desc(struct ksz_desc *desc)
  1384. {
  1385. desc->sw.ctrl.tx.hw_owned = 1;
  1386. if (desc->sw.buf_size != desc->sw.buf.data) {
  1387. desc->sw.buf_size = desc->sw.buf.data;
  1388. desc->phw->buf.data = cpu_to_le32(desc->sw.buf.data);
  1389. }
  1390. desc->phw->ctrl.data = cpu_to_le32(desc->sw.ctrl.data);
  1391. }
  1392. static void get_rx_pkt(struct ksz_desc_info *info, struct ksz_desc **desc)
  1393. {
  1394. *desc = &info->ring[info->last];
  1395. info->last++;
  1396. info->last &= info->mask;
  1397. info->avail--;
  1398. (*desc)->sw.buf.data &= ~KS_DESC_RX_MASK;
  1399. }
  1400. static inline void set_rx_buf(struct ksz_desc *desc, u32 addr)
  1401. {
  1402. desc->phw->addr = cpu_to_le32(addr);
  1403. }
  1404. static inline void set_rx_len(struct ksz_desc *desc, u32 len)
  1405. {
  1406. desc->sw.buf.rx.buf_size = len;
  1407. }
  1408. static inline void get_tx_pkt(struct ksz_desc_info *info,
  1409. struct ksz_desc **desc)
  1410. {
  1411. *desc = &info->ring[info->next];
  1412. info->next++;
  1413. info->next &= info->mask;
  1414. info->avail--;
  1415. (*desc)->sw.buf.data &= ~KS_DESC_TX_MASK;
  1416. }
  1417. static inline void set_tx_buf(struct ksz_desc *desc, u32 addr)
  1418. {
  1419. desc->phw->addr = cpu_to_le32(addr);
  1420. }
  1421. static inline void set_tx_len(struct ksz_desc *desc, u32 len)
  1422. {
  1423. desc->sw.buf.tx.buf_size = len;
  1424. }
  1425. /* Switch functions */
  1426. #define TABLE_READ 0x10
  1427. #define TABLE_SEL_SHIFT 2
  1428. #define HW_DELAY(hw, reg) \
  1429. do { \
  1430. u16 dummy; \
  1431. dummy = readw(hw->io + reg); \
  1432. } while (0)
  1433. /**
  1434. * sw_r_table - read 4 bytes of data from switch table
  1435. * @hw: The hardware instance.
  1436. * @table: The table selector.
  1437. * @addr: The address of the table entry.
  1438. * @data: Buffer to store the read data.
  1439. *
  1440. * This routine reads 4 bytes of data from the table of the switch.
  1441. * Hardware interrupts are disabled to minimize corruption of read data.
  1442. */
  1443. static void sw_r_table(struct ksz_hw *hw, int table, u16 addr, u32 *data)
  1444. {
  1445. u16 ctrl_addr;
  1446. uint interrupt;
  1447. ctrl_addr = (((table << TABLE_SEL_SHIFT) | TABLE_READ) << 8) | addr;
  1448. interrupt = hw_block_intr(hw);
  1449. writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET);
  1450. HW_DELAY(hw, KS884X_IACR_OFFSET);
  1451. *data = readl(hw->io + KS884X_ACC_DATA_0_OFFSET);
  1452. hw_restore_intr(hw, interrupt);
  1453. }
  1454. /**
  1455. * sw_w_table_64 - write 8 bytes of data to the switch table
  1456. * @hw: The hardware instance.
  1457. * @table: The table selector.
  1458. * @addr: The address of the table entry.
  1459. * @data_hi: The high part of data to be written (bit63 ~ bit32).
  1460. * @data_lo: The low part of data to be written (bit31 ~ bit0).
  1461. *
  1462. * This routine writes 8 bytes of data to the table of the switch.
  1463. * Hardware interrupts are disabled to minimize corruption of written data.
  1464. */
  1465. static void sw_w_table_64(struct ksz_hw *hw, int table, u16 addr, u32 data_hi,
  1466. u32 data_lo)
  1467. {
  1468. u16 ctrl_addr;
  1469. uint interrupt;
  1470. ctrl_addr = ((table << TABLE_SEL_SHIFT) << 8) | addr;
  1471. interrupt = hw_block_intr(hw);
  1472. writel(data_hi, hw->io + KS884X_ACC_DATA_4_OFFSET);
  1473. writel(data_lo, hw->io + KS884X_ACC_DATA_0_OFFSET);
  1474. writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET);
  1475. HW_DELAY(hw, KS884X_IACR_OFFSET);
  1476. hw_restore_intr(hw, interrupt);
  1477. }
  1478. /**
  1479. * sw_w_sta_mac_table - write to the static MAC table
  1480. * @hw: The hardware instance.
  1481. * @addr: The address of the table entry.
  1482. * @mac_addr: The MAC address.
  1483. * @ports: The port members.
  1484. * @override: The flag to override the port receive/transmit settings.
  1485. * @valid: The flag to indicate entry is valid.
  1486. * @use_fid: The flag to indicate the FID is valid.
  1487. * @fid: The FID value.
  1488. *
  1489. * This routine writes an entry of the static MAC table of the switch. It
  1490. * calls sw_w_table_64() to write the data.
  1491. */
  1492. static void sw_w_sta_mac_table(struct ksz_hw *hw, u16 addr, u8 *mac_addr,
  1493. u8 ports, int override, int valid, int use_fid, u8 fid)
  1494. {
  1495. u32 data_hi;
  1496. u32 data_lo;
  1497. data_lo = ((u32) mac_addr[2] << 24) |
  1498. ((u32) mac_addr[3] << 16) |
  1499. ((u32) mac_addr[4] << 8) | mac_addr[5];
  1500. data_hi = ((u32) mac_addr[0] << 8) | mac_addr[1];
  1501. data_hi |= (u32) ports << STATIC_MAC_FWD_PORTS_SHIFT;
  1502. if (override)
  1503. data_hi |= STATIC_MAC_TABLE_OVERRIDE;
  1504. if (use_fid) {
  1505. data_hi |= STATIC_MAC_TABLE_USE_FID;
  1506. data_hi |= (u32) fid << STATIC_MAC_FID_SHIFT;
  1507. }
  1508. if (valid)
  1509. data_hi |= STATIC_MAC_TABLE_VALID;
  1510. sw_w_table_64(hw, TABLE_STATIC_MAC, addr, data_hi, data_lo);
  1511. }
  1512. /**
  1513. * sw_r_vlan_table - read from the VLAN table
  1514. * @hw: The hardware instance.
  1515. * @addr: The address of the table entry.
  1516. * @vid: Buffer to store the VID.
  1517. * @fid: Buffer to store the VID.
  1518. * @member: Buffer to store the port membership.
  1519. *
  1520. * This function reads an entry of the VLAN table of the switch. It calls
  1521. * sw_r_table() to get the data.
  1522. *
  1523. * Return 0 if the entry is valid; otherwise -1.
  1524. */
  1525. static int sw_r_vlan_table(struct ksz_hw *hw, u16 addr, u16 *vid, u8 *fid,
  1526. u8 *member)
  1527. {
  1528. u32 data;
  1529. sw_r_table(hw, TABLE_VLAN, addr, &data);
  1530. if (data & VLAN_TABLE_VALID) {
  1531. *vid = (u16)(data & VLAN_TABLE_VID);
  1532. *fid = (u8)((data & VLAN_TABLE_FID) >> VLAN_TABLE_FID_SHIFT);
  1533. *member = (u8)((data & VLAN_TABLE_MEMBERSHIP) >>
  1534. VLAN_TABLE_MEMBERSHIP_SHIFT);
  1535. return 0;
  1536. }
  1537. return -1;
  1538. }
  1539. /**
  1540. * port_r_mib_cnt - read MIB counter
  1541. * @hw: The hardware instance.
  1542. * @port: The port index.
  1543. * @addr: The address of the counter.
  1544. * @cnt: Buffer to store the counter.
  1545. *
  1546. * This routine reads a MIB counter of the port.
  1547. * Hardware interrupts are disabled to minimize corruption of read data.
  1548. */
  1549. static void port_r_mib_cnt(struct ksz_hw *hw, int port, u16 addr, u64 *cnt)
  1550. {
  1551. u32 data;
  1552. u16 ctrl_addr;
  1553. uint interrupt;
  1554. int timeout;
  1555. ctrl_addr = addr + PORT_COUNTER_NUM * port;
  1556. interrupt = hw_block_intr(hw);
  1557. ctrl_addr |= (((TABLE_MIB << TABLE_SEL_SHIFT) | TABLE_READ) << 8);
  1558. writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET);
  1559. HW_DELAY(hw, KS884X_IACR_OFFSET);
  1560. for (timeout = 100; timeout > 0; timeout--) {
  1561. data = readl(hw->io + KS884X_ACC_DATA_0_OFFSET);
  1562. if (data & MIB_COUNTER_VALID) {
  1563. if (data & MIB_COUNTER_OVERFLOW)
  1564. *cnt += MIB_COUNTER_VALUE + 1;
  1565. *cnt += data & MIB_COUNTER_VALUE;
  1566. break;
  1567. }
  1568. }
  1569. hw_restore_intr(hw, interrupt);
  1570. }
  1571. /**
  1572. * port_r_mib_pkt - read dropped packet counts
  1573. * @hw: The hardware instance.
  1574. * @port: The port index.
  1575. * @cnt: Buffer to store the receive and transmit dropped packet counts.
  1576. *
  1577. * This routine reads the dropped packet counts of the port.
  1578. * Hardware interrupts are disabled to minimize corruption of read data.
  1579. */
  1580. static void port_r_mib_pkt(struct ksz_hw *hw, int port, u32 *last, u64 *cnt)
  1581. {
  1582. u32 cur;
  1583. u32 data;
  1584. u16 ctrl_addr;
  1585. uint interrupt;
  1586. int index;
  1587. index = KS_MIB_PACKET_DROPPED_RX_0 + port;
  1588. do {
  1589. interrupt = hw_block_intr(hw);
  1590. ctrl_addr = (u16) index;
  1591. ctrl_addr |= (((TABLE_MIB << TABLE_SEL_SHIFT) | TABLE_READ)
  1592. << 8);
  1593. writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET);
  1594. HW_DELAY(hw, KS884X_IACR_OFFSET);
  1595. data = readl(hw->io + KS884X_ACC_DATA_0_OFFSET);
  1596. hw_restore_intr(hw, interrupt);
  1597. data &= MIB_PACKET_DROPPED;
  1598. cur = *last;
  1599. if (data != cur) {
  1600. *last = data;
  1601. if (data < cur)
  1602. data += MIB_PACKET_DROPPED + 1;
  1603. data -= cur;
  1604. *cnt += data;
  1605. }
  1606. ++last;
  1607. ++cnt;
  1608. index -= KS_MIB_PACKET_DROPPED_TX -
  1609. KS_MIB_PACKET_DROPPED_TX_0 + 1;
  1610. } while (index >= KS_MIB_PACKET_DROPPED_TX_0 + port);
  1611. }
  1612. /**
  1613. * port_r_cnt - read MIB counters periodically
  1614. * @hw: The hardware instance.
  1615. * @port: The port index.
  1616. *
  1617. * This routine is used to read the counters of the port periodically to avoid
  1618. * counter overflow. The hardware should be acquired first before calling this
  1619. * routine.
  1620. *
  1621. * Return non-zero when not all counters not read.
  1622. */
  1623. static int port_r_cnt(struct ksz_hw *hw, int port)
  1624. {
  1625. struct ksz_port_mib *mib = &hw->port_mib[port];
  1626. if (mib->mib_start < PORT_COUNTER_NUM)
  1627. while (mib->cnt_ptr < PORT_COUNTER_NUM) {
  1628. port_r_mib_cnt(hw, port, mib->cnt_ptr,
  1629. &mib->counter[mib->cnt_ptr]);
  1630. ++mib->cnt_ptr;
  1631. }
  1632. if (hw->mib_cnt > PORT_COUNTER_NUM)
  1633. port_r_mib_pkt(hw, port, mib->dropped,
  1634. &mib->counter[PORT_COUNTER_NUM]);
  1635. mib->cnt_ptr = 0;
  1636. return 0;
  1637. }
  1638. /**
  1639. * port_init_cnt - initialize MIB counter values
  1640. * @hw: The hardware instance.
  1641. * @port: The port index.
  1642. *
  1643. * This routine is used to initialize all counters to zero if the hardware
  1644. * cannot do it after reset.
  1645. */
  1646. static void port_init_cnt(struct ksz_hw *hw, int port)
  1647. {
  1648. struct ksz_port_mib *mib = &hw->port_mib[port];
  1649. mib->cnt_ptr = 0;
  1650. if (mib->mib_start < PORT_COUNTER_NUM)
  1651. do {
  1652. port_r_mib_cnt(hw, port, mib->cnt_ptr,
  1653. &mib->counter[mib->cnt_ptr]);
  1654. ++mib->cnt_ptr;
  1655. } while (mib->cnt_ptr < PORT_COUNTER_NUM);
  1656. if (hw->mib_cnt > PORT_COUNTER_NUM)
  1657. port_r_mib_pkt(hw, port, mib->dropped,
  1658. &mib->counter[PORT_COUNTER_NUM]);
  1659. memset((void *) mib->counter, 0, sizeof(u64) * TOTAL_PORT_COUNTER_NUM);
  1660. mib->cnt_ptr = 0;
  1661. }
  1662. /*
  1663. * Port functions
  1664. */
  1665. /**
  1666. * port_chk - check port register bits
  1667. * @hw: The hardware instance.
  1668. * @port: The port index.
  1669. * @offset: The offset of the port register.
  1670. * @bits: The data bits to check.
  1671. *
  1672. * This function checks whether the specified bits of the port register are set
  1673. * or not.
  1674. *
  1675. * Return 0 if the bits are not set.
  1676. */
  1677. static int port_chk(struct ksz_hw *hw, int port, int offset, u16 bits)
  1678. {
  1679. u32 addr;
  1680. u16 data;
  1681. PORT_CTRL_ADDR(port, addr);
  1682. addr += offset;
  1683. data = readw(hw->io + addr);
  1684. return (data & bits) == bits;
  1685. }
  1686. /**
  1687. * port_cfg - set port register bits
  1688. * @hw: The hardware instance.
  1689. * @port: The port index.
  1690. * @offset: The offset of the port register.
  1691. * @bits: The data bits to set.
  1692. * @set: The flag indicating whether the bits are to be set or not.
  1693. *
  1694. * This routine sets or resets the specified bits of the port register.
  1695. */
  1696. static void port_cfg(struct ksz_hw *hw, int port, int offset, u16 bits,
  1697. int set)
  1698. {
  1699. u32 addr;
  1700. u16 data;
  1701. PORT_CTRL_ADDR(port, addr);
  1702. addr += offset;
  1703. data = readw(hw->io + addr);
  1704. if (set)
  1705. data |= bits;
  1706. else
  1707. data &= ~bits;
  1708. writew(data, hw->io + addr);
  1709. }
  1710. /**
  1711. * port_chk_shift - check port bit
  1712. * @hw: The hardware instance.
  1713. * @port: The port index.
  1714. * @offset: The offset of the register.
  1715. * @shift: Number of bits to shift.
  1716. *
  1717. * This function checks whether the specified port is set in the register or
  1718. * not.
  1719. *
  1720. * Return 0 if the port is not set.
  1721. */
  1722. static int port_chk_shift(struct ksz_hw *hw, int port, u32 addr, int shift)
  1723. {
  1724. u16 data;
  1725. u16 bit = 1 << port;
  1726. data = readw(hw->io + addr);
  1727. data >>= shift;
  1728. return (data & bit) == bit;
  1729. }
  1730. /**
  1731. * port_cfg_shift - set port bit
  1732. * @hw: The hardware instance.
  1733. * @port: The port index.
  1734. * @offset: The offset of the register.
  1735. * @shift: Number of bits to shift.
  1736. * @set: The flag indicating whether the port is to be set or not.
  1737. *
  1738. * This routine sets or resets the specified port in the register.
  1739. */
  1740. static void port_cfg_shift(struct ksz_hw *hw, int port, u32 addr, int shift,
  1741. int set)
  1742. {
  1743. u16 data;
  1744. u16 bits = 1 << port;
  1745. data = readw(hw->io + addr);
  1746. bits <<= shift;
  1747. if (set)
  1748. data |= bits;
  1749. else
  1750. data &= ~bits;
  1751. writew(data, hw->io + addr);
  1752. }
  1753. /**
  1754. * port_r8 - read byte from port register
  1755. * @hw: The hardware instance.
  1756. * @port: The port index.
  1757. * @offset: The offset of the port register.
  1758. * @data: Buffer to store the data.
  1759. *
  1760. * This routine reads a byte from the port register.
  1761. */
  1762. static void port_r8(struct ksz_hw *hw, int port, int offset, u8 *data)
  1763. {
  1764. u32 addr;
  1765. PORT_CTRL_ADDR(port, addr);
  1766. addr += offset;
  1767. *data = readb(hw->io + addr);
  1768. }
  1769. /**
  1770. * port_r16 - read word from port register.
  1771. * @hw: The hardware instance.
  1772. * @port: The port index.
  1773. * @offset: The offset of the port register.
  1774. * @data: Buffer to store the data.
  1775. *
  1776. * This routine reads a word from the port register.
  1777. */
  1778. static void port_r16(struct ksz_hw *hw, int port, int offset, u16 *data)
  1779. {
  1780. u32 addr;
  1781. PORT_CTRL_ADDR(port, addr);
  1782. addr += offset;
  1783. *data = readw(hw->io + addr);
  1784. }
  1785. /**
  1786. * port_w16 - write word to port register.
  1787. * @hw: The hardware instance.
  1788. * @port: The port index.
  1789. * @offset: The offset of the port register.
  1790. * @data: Data to write.
  1791. *
  1792. * This routine writes a word to the port register.
  1793. */
  1794. static void port_w16(struct ksz_hw *hw, int port, int offset, u16 data)
  1795. {
  1796. u32 addr;
  1797. PORT_CTRL_ADDR(port, addr);
  1798. addr += offset;
  1799. writew(data, hw->io + addr);
  1800. }
  1801. /**
  1802. * sw_chk - check switch register bits
  1803. * @hw: The hardware instance.
  1804. * @addr: The address of the switch register.
  1805. * @bits: The data bits to check.
  1806. *
  1807. * This function checks whether the specified bits of the switch register are
  1808. * set or not.
  1809. *
  1810. * Return 0 if the bits are not set.
  1811. */
  1812. static int sw_chk(struct ksz_hw *hw, u32 addr, u16 bits)
  1813. {
  1814. u16 data;
  1815. data = readw(hw->io + addr);
  1816. return (data & bits) == bits;
  1817. }
  1818. /**
  1819. * sw_cfg - set switch register bits
  1820. * @hw: The hardware instance.
  1821. * @addr: The address of the switch register.
  1822. * @bits: The data bits to set.
  1823. * @set: The flag indicating whether the bits are to be set or not.
  1824. *
  1825. * This function sets or resets the specified bits of the switch register.
  1826. */
  1827. static void sw_cfg(struct ksz_hw *hw, u32 addr, u16 bits, int set)
  1828. {
  1829. u16 data;
  1830. data = readw(hw->io + addr);
  1831. if (set)
  1832. data |= bits;
  1833. else
  1834. data &= ~bits;
  1835. writew(data, hw->io + addr);
  1836. }
  1837. /* Bandwidth */
  1838. static inline void port_cfg_broad_storm(struct ksz_hw *hw, int p, int set)
  1839. {
  1840. port_cfg(hw, p,
  1841. KS8842_PORT_CTRL_1_OFFSET, PORT_BROADCAST_STORM, set);
  1842. }
  1843. static inline int port_chk_broad_storm(struct ksz_hw *hw, int p)
  1844. {
  1845. return port_chk(hw, p,
  1846. KS8842_PORT_CTRL_1_OFFSET, PORT_BROADCAST_STORM);
  1847. }
  1848. /* Driver set switch broadcast storm protection at 10% rate. */
  1849. #define BROADCAST_STORM_PROTECTION_RATE 10
  1850. /* 148,800 frames * 67 ms / 100 */
  1851. #define BROADCAST_STORM_VALUE 9969
  1852. /**
  1853. * sw_cfg_broad_storm - configure broadcast storm threshold
  1854. * @hw: The hardware instance.
  1855. * @percent: Broadcast storm threshold in percent of transmit rate.
  1856. *
  1857. * This routine configures the broadcast storm threshold of the switch.
  1858. */
  1859. static void sw_cfg_broad_storm(struct ksz_hw *hw, u8 percent)
  1860. {
  1861. u16 data;
  1862. u32 value = ((u32) BROADCAST_STORM_VALUE * (u32) percent / 100);
  1863. if (value > BROADCAST_STORM_RATE)
  1864. value = BROADCAST_STORM_RATE;
  1865. data = readw(hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
  1866. data &= ~(BROADCAST_STORM_RATE_LO | BROADCAST_STORM_RATE_HI);
  1867. data |= ((value & 0x00FF) << 8) | ((value & 0xFF00) >> 8);
  1868. writew(data, hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
  1869. }
  1870. /**
  1871. * sw_get_board_storm - get broadcast storm threshold
  1872. * @hw: The hardware instance.
  1873. * @percent: Buffer to store the broadcast storm threshold percentage.
  1874. *
  1875. * This routine retrieves the broadcast storm threshold of the switch.
  1876. */
  1877. static void sw_get_broad_storm(struct ksz_hw *hw, u8 *percent)
  1878. {
  1879. int num;
  1880. u16 data;
  1881. data = readw(hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
  1882. num = (data & BROADCAST_STORM_RATE_HI);
  1883. num <<= 8;
  1884. num |= (data & BROADCAST_STORM_RATE_LO) >> 8;
  1885. num = (num * 100 + BROADCAST_STORM_VALUE / 2) / BROADCAST_STORM_VALUE;
  1886. *percent = (u8) num;
  1887. }
  1888. /**
  1889. * sw_dis_broad_storm - disable broadstorm
  1890. * @hw: The hardware instance.
  1891. * @port: The port index.
  1892. *
  1893. * This routine disables the broadcast storm limit function of the switch.
  1894. */
  1895. static void sw_dis_broad_storm(struct ksz_hw *hw, int port)
  1896. {
  1897. port_cfg_broad_storm(hw, port, 0);
  1898. }
  1899. /**
  1900. * sw_ena_broad_storm - enable broadcast storm
  1901. * @hw: The hardware instance.
  1902. * @port: The port index.
  1903. *
  1904. * This routine enables the broadcast storm limit function of the switch.
  1905. */
  1906. static void sw_ena_broad_storm(struct ksz_hw *hw, int port)
  1907. {
  1908. sw_cfg_broad_storm(hw, hw->ksz_switch->broad_per);
  1909. port_cfg_broad_storm(hw, port, 1);
  1910. }
  1911. /**
  1912. * sw_init_broad_storm - initialize broadcast storm
  1913. * @hw: The hardware instance.
  1914. *
  1915. * This routine initializes the broadcast storm limit function of the switch.
  1916. */
  1917. static void sw_init_broad_storm(struct ksz_hw *hw)
  1918. {
  1919. int port;
  1920. hw->ksz_switch->broad_per = 1;
  1921. sw_cfg_broad_storm(hw, hw->ksz_switch->broad_per);
  1922. for (port = 0; port < TOTAL_PORT_NUM; port++)
  1923. sw_dis_broad_storm(hw, port);
  1924. sw_cfg(hw, KS8842_SWITCH_CTRL_2_OFFSET, MULTICAST_STORM_DISABLE, 1);
  1925. }
  1926. /**
  1927. * hw_cfg_broad_storm - configure broadcast storm
  1928. * @hw: The hardware instance.
  1929. * @percent: Broadcast storm threshold in percent of transmit rate.
  1930. *
  1931. * This routine configures the broadcast storm threshold of the switch.
  1932. * It is called by user functions. The hardware should be acquired first.
  1933. */
  1934. static void hw_cfg_broad_storm(struct ksz_hw *hw, u8 percent)
  1935. {
  1936. if (percent > 100)
  1937. percent = 100;
  1938. sw_cfg_broad_storm(hw, percent);
  1939. sw_get_broad_storm(hw, &percent);
  1940. hw->ksz_switch->broad_per = percent;
  1941. }
  1942. /**
  1943. * sw_dis_prio_rate - disable switch priority rate
  1944. * @hw: The hardware instance.
  1945. * @port: The port index.
  1946. *
  1947. * This routine disables the priority rate function of the switch.
  1948. */
  1949. static void sw_dis_prio_rate(struct ksz_hw *hw, int port)
  1950. {
  1951. u32 addr;
  1952. PORT_CTRL_ADDR(port, addr);
  1953. addr += KS8842_PORT_IN_RATE_OFFSET;
  1954. writel(0, hw->io + addr);
  1955. }
  1956. /**
  1957. * sw_init_prio_rate - initialize switch prioirty rate
  1958. * @hw: The hardware instance.
  1959. *
  1960. * This routine initializes the priority rate function of the switch.
  1961. */
  1962. static void sw_init_prio_rate(struct ksz_hw *hw)
  1963. {
  1964. int port;
  1965. int prio;
  1966. struct ksz_switch *sw = hw->ksz_switch;
  1967. for (port = 0; port < TOTAL_PORT_NUM; port++) {
  1968. for (prio = 0; prio < PRIO_QUEUES; prio++) {
  1969. sw->port_cfg[port].rx_rate[prio] =
  1970. sw->port_cfg[port].tx_rate[prio] = 0;
  1971. }
  1972. sw_dis_prio_rate(hw, port);
  1973. }
  1974. }
  1975. /* Communication */
  1976. static inline void port_cfg_back_pressure(struct ksz_hw *hw, int p, int set)
  1977. {
  1978. port_cfg(hw, p,
  1979. KS8842_PORT_CTRL_2_OFFSET, PORT_BACK_PRESSURE, set);
  1980. }
  1981. static inline void port_cfg_force_flow_ctrl(struct ksz_hw *hw, int p, int set)
  1982. {
  1983. port_cfg(hw, p,
  1984. KS8842_PORT_CTRL_2_OFFSET, PORT_FORCE_FLOW_CTRL, set);
  1985. }
  1986. static inline int port_chk_back_pressure(struct ksz_hw *hw, int p)
  1987. {
  1988. return port_chk(hw, p,
  1989. KS8842_PORT_CTRL_2_OFFSET, PORT_BACK_PRESSURE);
  1990. }
  1991. static inline int port_chk_force_flow_ctrl(struct ksz_hw *hw, int p)
  1992. {
  1993. return port_chk(hw, p,
  1994. KS8842_PORT_CTRL_2_OFFSET, PORT_FORCE_FLOW_CTRL);
  1995. }
  1996. /* Spanning Tree */
  1997. static inline void port_cfg_dis_learn(struct ksz_hw *hw, int p, int set)
  1998. {
  1999. port_cfg(hw, p,
  2000. KS8842_PORT_CTRL_2_OFFSET, PORT_LEARN_DISABLE, set);
  2001. }
  2002. static inline void port_cfg_rx(struct ksz_hw *hw, int p, int set)
  2003. {
  2004. port_cfg(hw, p,
  2005. KS8842_PORT_CTRL_2_OFFSET, PORT_RX_ENABLE, set);
  2006. }
  2007. static inline void port_cfg_tx(struct ksz_hw *hw, int p, int set)
  2008. {
  2009. port_cfg(hw, p,
  2010. KS8842_PORT_CTRL_2_OFFSET, PORT_TX_ENABLE, set);
  2011. }
  2012. static inline void sw_cfg_fast_aging(struct ksz_hw *hw, int set)
  2013. {
  2014. sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET, SWITCH_FAST_AGING, set);
  2015. }
  2016. static inline void sw_flush_dyn_mac_table(struct ksz_hw *hw)
  2017. {
  2018. if (!(hw->overrides & FAST_AGING)) {
  2019. sw_cfg_fast_aging(hw, 1);
  2020. mdelay(1);
  2021. sw_cfg_fast_aging(hw, 0);
  2022. }
  2023. }
  2024. /* VLAN */
  2025. static inline void port_cfg_ins_tag(struct ksz_hw *hw, int p, int insert)
  2026. {
  2027. port_cfg(hw, p,
  2028. KS8842_PORT_CTRL_1_OFFSET, PORT_INSERT_TAG, insert);
  2029. }
  2030. static inline void port_cfg_rmv_tag(struct ksz_hw *hw, int p, int remove)
  2031. {
  2032. port_cfg(hw, p,
  2033. KS8842_PORT_CTRL_1_OFFSET, PORT_REMOVE_TAG, remove);
  2034. }
  2035. static inline int port_chk_ins_tag(struct ksz_hw *hw, int p)
  2036. {
  2037. return port_chk(hw, p,
  2038. KS8842_PORT_CTRL_1_OFFSET, PORT_INSERT_TAG);
  2039. }
  2040. static inline int port_chk_rmv_tag(struct ksz_hw *hw, int p)
  2041. {
  2042. return port_chk(hw, p,
  2043. KS8842_PORT_CTRL_1_OFFSET, PORT_REMOVE_TAG);
  2044. }
  2045. static inline void port_cfg_dis_non_vid(struct ksz_hw *hw, int p, int set)
  2046. {
  2047. port_cfg(hw, p,
  2048. KS8842_PORT_CTRL_2_OFFSET, PORT_DISCARD_NON_VID, set);
  2049. }
  2050. static inline void port_cfg_in_filter(struct ksz_hw *hw, int p, int set)
  2051. {
  2052. port_cfg(hw, p,
  2053. KS8842_PORT_CTRL_2_OFFSET, PORT_INGRESS_VLAN_FILTER, set);
  2054. }
  2055. static inline int port_chk_dis_non_vid(struct ksz_hw *hw, int p)
  2056. {
  2057. return port_chk(hw, p,
  2058. KS8842_PORT_CTRL_2_OFFSET, PORT_DISCARD_NON_VID);
  2059. }
  2060. static inline int port_chk_in_filter(struct ksz_hw *hw, int p)
  2061. {
  2062. return port_chk(hw, p,
  2063. KS8842_PORT_CTRL_2_OFFSET, PORT_INGRESS_VLAN_FILTER);
  2064. }
  2065. /* Mirroring */
  2066. static inline void port_cfg_mirror_sniffer(struct ksz_hw *hw, int p, int set)
  2067. {
  2068. port_cfg(hw, p,
  2069. KS8842_PORT_CTRL_2_OFFSET, PORT_MIRROR_SNIFFER, set);
  2070. }
  2071. static inline void port_cfg_mirror_rx(struct ksz_hw *hw, int p, int set)
  2072. {
  2073. port_cfg(hw, p,
  2074. KS8842_PORT_CTRL_2_OFFSET, PORT_MIRROR_RX, set);
  2075. }
  2076. static inline void port_cfg_mirror_tx(struct ksz_hw *hw, int p, int set)
  2077. {
  2078. port_cfg(hw, p,
  2079. KS8842_PORT_CTRL_2_OFFSET, PORT_MIRROR_TX, set);
  2080. }
  2081. static inline void sw_cfg_mirror_rx_tx(struct ksz_hw *hw, int set)
  2082. {
  2083. sw_cfg(hw, KS8842_SWITCH_CTRL_2_OFFSET, SWITCH_MIRROR_RX_TX, set);
  2084. }
  2085. static void sw_init_mirror(struct ksz_hw *hw)
  2086. {
  2087. int port;
  2088. for (port = 0; port < TOTAL_PORT_NUM; port++) {
  2089. port_cfg_mirror_sniffer(hw, port, 0);
  2090. port_cfg_mirror_rx(hw, port, 0);
  2091. port_cfg_mirror_tx(hw, port, 0);
  2092. }
  2093. sw_cfg_mirror_rx_tx(hw, 0);
  2094. }
  2095. static inline void sw_cfg_unk_def_deliver(struct ksz_hw *hw, int set)
  2096. {
  2097. sw_cfg(hw, KS8842_SWITCH_CTRL_7_OFFSET,
  2098. SWITCH_UNK_DEF_PORT_ENABLE, set);
  2099. }
  2100. static inline int sw_cfg_chk_unk_def_deliver(struct ksz_hw *hw)
  2101. {
  2102. return sw_chk(hw, KS8842_SWITCH_CTRL_7_OFFSET,
  2103. SWITCH_UNK_DEF_PORT_ENABLE);
  2104. }
  2105. static inline void sw_cfg_unk_def_port(struct ksz_hw *hw, int port, int set)
  2106. {
  2107. port_cfg_shift(hw, port, KS8842_SWITCH_CTRL_7_OFFSET, 0, set);
  2108. }
  2109. static inline int sw_chk_unk_def_port(struct ksz_hw *hw, int port)
  2110. {
  2111. return port_chk_shift(hw, port, KS8842_SWITCH_CTRL_7_OFFSET, 0);
  2112. }
  2113. /* Priority */
  2114. static inline void port_cfg_diffserv(struct ksz_hw *hw, int p, int set)
  2115. {
  2116. port_cfg(hw, p,
  2117. KS8842_PORT_CTRL_1_OFFSET, PORT_DIFFSERV_ENABLE, set);
  2118. }
  2119. static inline void port_cfg_802_1p(struct ksz_hw *hw, int p, int set)
  2120. {
  2121. port_cfg(hw, p,
  2122. KS8842_PORT_CTRL_1_OFFSET, PORT_802_1P_ENABLE, set);
  2123. }
  2124. static inline void port_cfg_replace_vid(struct ksz_hw *hw, int p, int set)
  2125. {
  2126. port_cfg(hw, p,
  2127. KS8842_PORT_CTRL_2_OFFSET, PORT_USER_PRIORITY_CEILING, set);
  2128. }
  2129. static inline void port_cfg_prio(struct ksz_hw *hw, int p, int set)
  2130. {
  2131. port_cfg(hw, p,
  2132. KS8842_PORT_CTRL_1_OFFSET, PORT_PRIO_QUEUE_ENABLE, set);
  2133. }
  2134. static inline int port_chk_diffserv(struct ksz_hw *hw, int p)
  2135. {
  2136. return port_chk(hw, p,
  2137. KS8842_PORT_CTRL_1_OFFSET, PORT_DIFFSERV_ENABLE);
  2138. }
  2139. static inline int port_chk_802_1p(struct ksz_hw *hw, int p)
  2140. {
  2141. return port_chk(hw, p,
  2142. KS8842_PORT_CTRL_1_OFFSET, PORT_802_1P_ENABLE);
  2143. }
  2144. static inline int port_chk_replace_vid(struct ksz_hw *hw, int p)
  2145. {
  2146. return port_chk(hw, p,
  2147. KS8842_PORT_CTRL_2_OFFSET, PORT_USER_PRIORITY_CEILING);
  2148. }
  2149. static inline int port_chk_prio(struct ksz_hw *hw, int p)
  2150. {
  2151. return port_chk(hw, p,
  2152. KS8842_PORT_CTRL_1_OFFSET, PORT_PRIO_QUEUE_ENABLE);
  2153. }
  2154. /**
  2155. * sw_dis_diffserv - disable switch DiffServ priority
  2156. * @hw: The hardware instance.
  2157. * @port: The port index.
  2158. *
  2159. * This routine disables the DiffServ priority function of the switch.
  2160. */
  2161. static void sw_dis_diffserv(struct ksz_hw *hw, int port)
  2162. {
  2163. port_cfg_diffserv(hw, port, 0);
  2164. }
  2165. /**
  2166. * sw_dis_802_1p - disable switch 802.1p priority
  2167. * @hw: The hardware instance.
  2168. * @port: The port index.
  2169. *
  2170. * This routine disables the 802.1p priority function of the switch.
  2171. */
  2172. static void sw_dis_802_1p(struct ksz_hw *hw, int port)
  2173. {
  2174. port_cfg_802_1p(hw, port, 0);
  2175. }
  2176. /**
  2177. * sw_cfg_replace_null_vid -
  2178. * @hw: The hardware instance.
  2179. * @set: The flag to disable or enable.
  2180. *
  2181. */
  2182. static void sw_cfg_replace_null_vid(struct ksz_hw *hw, int set)
  2183. {
  2184. sw_cfg(hw, KS8842_SWITCH_CTRL_3_OFFSET, SWITCH_REPLACE_NULL_VID, set);
  2185. }
  2186. /**
  2187. * sw_cfg_replace_vid - enable switch 802.10 priority re-mapping
  2188. * @hw: The hardware instance.
  2189. * @port: The port index.
  2190. * @set: The flag to disable or enable.
  2191. *
  2192. * This routine enables the 802.1p priority re-mapping function of the switch.
  2193. * That allows 802.1p priority field to be replaced with the port's default
  2194. * tag's priority value if the ingress packet's 802.1p priority has a higher
  2195. * priority than port's default tag's priority.
  2196. */
  2197. static void sw_cfg_replace_vid(struct ksz_hw *hw, int port, int set)
  2198. {
  2199. port_cfg_replace_vid(hw, port, set);
  2200. }
  2201. /**
  2202. * sw_cfg_port_based - configure switch port based priority
  2203. * @hw: The hardware instance.
  2204. * @port: The port index.
  2205. * @prio: The priority to set.
  2206. *
  2207. * This routine configures the port based priority of the switch.
  2208. */
  2209. static void sw_cfg_port_based(struct ksz_hw *hw, int port, u8 prio)
  2210. {
  2211. u16 data;
  2212. if (prio > PORT_BASED_PRIORITY_BASE)
  2213. prio = PORT_BASED_PRIORITY_BASE;
  2214. hw->ksz_switch->port_cfg[port].port_prio = prio;
  2215. port_r16(hw, port, KS8842_PORT_CTRL_1_OFFSET, &data);
  2216. data &= ~PORT_BASED_PRIORITY_MASK;
  2217. data |= prio << PORT_BASED_PRIORITY_SHIFT;
  2218. port_w16(hw, port, KS8842_PORT_CTRL_1_OFFSET, data);
  2219. }
  2220. /**
  2221. * sw_dis_multi_queue - disable transmit multiple queues
  2222. * @hw: The hardware instance.
  2223. * @port: The port index.
  2224. *
  2225. * This routine disables the transmit multiple queues selection of the switch
  2226. * port. Only single transmit queue on the port.
  2227. */
  2228. static void sw_dis_multi_queue(struct ksz_hw *hw, int port)
  2229. {
  2230. port_cfg_prio(hw, port, 0);
  2231. }
  2232. /**
  2233. * sw_init_prio - initialize switch priority
  2234. * @hw: The hardware instance.
  2235. *
  2236. * This routine initializes the switch QoS priority functions.
  2237. */
  2238. static void sw_init_prio(struct ksz_hw *hw)
  2239. {
  2240. int port;
  2241. int tos;
  2242. struct ksz_switch *sw = hw->ksz_switch;
  2243. /*
  2244. * Init all the 802.1p tag priority value to be assigned to different
  2245. * priority queue.
  2246. */
  2247. sw->p_802_1p[0] = 0;
  2248. sw->p_802_1p[1] = 0;
  2249. sw->p_802_1p[2] = 1;
  2250. sw->p_802_1p[3] = 1;
  2251. sw->p_802_1p[4] = 2;
  2252. sw->p_802_1p[5] = 2;
  2253. sw->p_802_1p[6] = 3;
  2254. sw->p_802_1p[7] = 3;
  2255. /*
  2256. * Init all the DiffServ priority value to be assigned to priority
  2257. * queue 0.
  2258. */
  2259. for (tos = 0; tos < DIFFSERV_ENTRIES; tos++)
  2260. sw->diffserv[tos] = 0;
  2261. /* All QoS functions disabled. */
  2262. for (port = 0; port < TOTAL_PORT_NUM; port++) {
  2263. sw_dis_multi_queue(hw, port);
  2264. sw_dis_diffserv(hw, port);
  2265. sw_dis_802_1p(hw, port);
  2266. sw_cfg_replace_vid(hw, port, 0);
  2267. sw->port_cfg[port].port_prio = 0;
  2268. sw_cfg_port_based(hw, port, sw->port_cfg[port].port_prio);
  2269. }
  2270. sw_cfg_replace_null_vid(hw, 0);
  2271. }
  2272. /**
  2273. * port_get_def_vid - get port default VID.
  2274. * @hw: The hardware instance.
  2275. * @port: The port index.
  2276. * @vid: Buffer to store the VID.
  2277. *
  2278. * This routine retrieves the default VID of the port.
  2279. */
  2280. static void port_get_def_vid(struct ksz_hw *hw, int port, u16 *vid)
  2281. {
  2282. u32 addr;
  2283. PORT_CTRL_ADDR(port, addr);
  2284. addr += KS8842_PORT_CTRL_VID_OFFSET;
  2285. *vid = readw(hw->io + addr);
  2286. }
  2287. /**
  2288. * sw_init_vlan - initialize switch VLAN
  2289. * @hw: The hardware instance.
  2290. *
  2291. * This routine initializes the VLAN function of the switch.
  2292. */
  2293. static void sw_init_vlan(struct ksz_hw *hw)
  2294. {
  2295. int port;
  2296. int entry;
  2297. struct ksz_switch *sw = hw->ksz_switch;
  2298. /* Read 16 VLAN entries from device's VLAN table. */
  2299. for (entry = 0; entry < VLAN_TABLE_ENTRIES; entry++) {
  2300. sw_r_vlan_table(hw, entry,
  2301. &sw->vlan_table[entry].vid,
  2302. &sw->vlan_table[entry].fid,
  2303. &sw->vlan_table[entry].member);
  2304. }
  2305. for (port = 0; port < TOTAL_PORT_NUM; port++) {
  2306. port_get_def_vid(hw, port, &sw->port_cfg[port].vid);
  2307. sw->port_cfg[port].member = PORT_MASK;
  2308. }
  2309. }
  2310. /**
  2311. * sw_cfg_port_base_vlan - configure port-based VLAN membership
  2312. * @hw: The hardware instance.
  2313. * @port: The port index.
  2314. * @member: The port-based VLAN membership.
  2315. *
  2316. * This routine configures the port-based VLAN membership of the port.
  2317. */
  2318. static void sw_cfg_port_base_vlan(struct ksz_hw *hw, int port, u8 member)
  2319. {
  2320. u32 addr;
  2321. u8 data;
  2322. PORT_CTRL_ADDR(port, addr);
  2323. addr += KS8842_PORT_CTRL_2_OFFSET;
  2324. data = readb(hw->io + addr);
  2325. data &= ~PORT_VLAN_MEMBERSHIP;
  2326. data |= (member & PORT_MASK);
  2327. writeb(data, hw->io + addr);
  2328. hw->ksz_switch->port_cfg[port].member = member;
  2329. }
  2330. /**
  2331. * sw_get_addr - get the switch MAC address.
  2332. * @hw: The hardware instance.
  2333. * @mac_addr: Buffer to store the MAC address.
  2334. *
  2335. * This function retrieves the MAC address of the switch.
  2336. */
  2337. static inline void sw_get_addr(struct ksz_hw *hw, u8 *mac_addr)
  2338. {
  2339. int i;
  2340. for (i = 0; i < 6; i += 2) {
  2341. mac_addr[i] = readb(hw->io + KS8842_MAC_ADDR_0_OFFSET + i);
  2342. mac_addr[1 + i] = readb(hw->io + KS8842_MAC_ADDR_1_OFFSET + i);
  2343. }
  2344. }
  2345. /**
  2346. * sw_set_addr - configure switch MAC address
  2347. * @hw: The hardware instance.
  2348. * @mac_addr: The MAC address.
  2349. *
  2350. * This function configures the MAC address of the switch.
  2351. */
  2352. static void sw_set_addr(struct ksz_hw *hw, u8 *mac_addr)
  2353. {
  2354. int i;
  2355. for (i = 0; i < 6; i += 2) {
  2356. writeb(mac_addr[i], hw->io + KS8842_MAC_ADDR_0_OFFSET + i);
  2357. writeb(mac_addr[1 + i], hw->io + KS8842_MAC_ADDR_1_OFFSET + i);
  2358. }
  2359. }
  2360. /**
  2361. * sw_set_global_ctrl - set switch global control
  2362. * @hw: The hardware instance.
  2363. *
  2364. * This routine sets the global control of the switch function.
  2365. */
  2366. static void sw_set_global_ctrl(struct ksz_hw *hw)
  2367. {
  2368. u16 data;
  2369. /* Enable switch MII flow control. */
  2370. data = readw(hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
  2371. data |= SWITCH_FLOW_CTRL;
  2372. writew(data, hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
  2373. data = readw(hw->io + KS8842_SWITCH_CTRL_1_OFFSET);
  2374. /* Enable aggressive back off algorithm in half duplex mode. */
  2375. data |= SWITCH_AGGR_BACKOFF;
  2376. /* Enable automatic fast aging when link changed detected. */
  2377. data |= SWITCH_AGING_ENABLE;
  2378. data |= SWITCH_LINK_AUTO_AGING;
  2379. if (hw->overrides & FAST_AGING)
  2380. data |= SWITCH_FAST_AGING;
  2381. else
  2382. data &= ~SWITCH_FAST_AGING;
  2383. writew(data, hw->io + KS8842_SWITCH_CTRL_1_OFFSET);
  2384. data = readw(hw->io + KS8842_SWITCH_CTRL_2_OFFSET);
  2385. /* Enable no excessive collision drop. */
  2386. data |= NO_EXC_COLLISION_DROP;
  2387. writew(data, hw->io + KS8842_SWITCH_CTRL_2_OFFSET);
  2388. }
  2389. enum {
  2390. STP_STATE_DISABLED = 0,
  2391. STP_STATE_LISTENING,
  2392. STP_STATE_LEARNING,
  2393. STP_STATE_FORWARDING,
  2394. STP_STATE_BLOCKED,
  2395. STP_STATE_SIMPLE
  2396. };
  2397. /**
  2398. * port_set_stp_state - configure port spanning tree state
  2399. * @hw: The hardware instance.
  2400. * @port: The port index.
  2401. * @state: The spanning tree state.
  2402. *
  2403. * This routine configures the spanning tree state of the port.
  2404. */
  2405. static void port_set_stp_state(struct ksz_hw *hw, int port, int state)
  2406. {
  2407. u16 data;
  2408. port_r16(hw, port, KS8842_PORT_CTRL_2_OFFSET, &data);
  2409. switch (state) {
  2410. case STP_STATE_DISABLED:
  2411. data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE);
  2412. data |= PORT_LEARN_DISABLE;
  2413. break;
  2414. case STP_STATE_LISTENING:
  2415. /*
  2416. * No need to turn on transmit because of port direct mode.
  2417. * Turning on receive is required if static MAC table is not setup.
  2418. */
  2419. data &= ~PORT_TX_ENABLE;
  2420. data |= PORT_RX_ENABLE;
  2421. data |= PORT_LEARN_DISABLE;
  2422. break;
  2423. case STP_STATE_LEARNING:
  2424. data &= ~PORT_TX_ENABLE;
  2425. data |= PORT_RX_ENABLE;
  2426. data &= ~PORT_LEARN_DISABLE;
  2427. break;
  2428. case STP_STATE_FORWARDING:
  2429. data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
  2430. data &= ~PORT_LEARN_DISABLE;
  2431. break;
  2432. case STP_STATE_BLOCKED:
  2433. /*
  2434. * Need to setup static MAC table with override to keep receiving BPDU
  2435. * messages. See sw_init_stp routine.
  2436. */
  2437. data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE);
  2438. data |= PORT_LEARN_DISABLE;
  2439. break;
  2440. case STP_STATE_SIMPLE:
  2441. data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
  2442. data |= PORT_LEARN_DISABLE;
  2443. break;
  2444. }
  2445. port_w16(hw, port, KS8842_PORT_CTRL_2_OFFSET, data);
  2446. hw->ksz_switch->port_cfg[port].stp_state = state;
  2447. }
  2448. #define STP_ENTRY 0
  2449. #define BROADCAST_ENTRY 1
  2450. #define BRIDGE_ADDR_ENTRY 2
  2451. #define IPV6_ADDR_ENTRY 3
  2452. /**
  2453. * sw_clr_sta_mac_table - clear static MAC table
  2454. * @hw: The hardware instance.
  2455. *
  2456. * This routine clears the static MAC table.
  2457. */
  2458. static void sw_clr_sta_mac_table(struct ksz_hw *hw)
  2459. {
  2460. struct ksz_mac_table *entry;
  2461. int i;
  2462. for (i = 0; i < STATIC_MAC_TABLE_ENTRIES; i++) {
  2463. entry = &hw->ksz_switch->mac_table[i];
  2464. sw_w_sta_mac_table(hw, i,
  2465. entry->mac_addr, entry->ports,
  2466. entry->override, 0,
  2467. entry->use_fid, entry->fid);
  2468. }
  2469. }
  2470. /**
  2471. * sw_init_stp - initialize switch spanning tree support
  2472. * @hw: The hardware instance.
  2473. *
  2474. * This routine initializes the spanning tree support of the switch.
  2475. */
  2476. static void sw_init_stp(struct ksz_hw *hw)
  2477. {
  2478. struct ksz_mac_table *entry;
  2479. entry = &hw->ksz_switch->mac_table[STP_ENTRY];
  2480. entry->mac_addr[0] = 0x01;
  2481. entry->mac_addr[1] = 0x80;
  2482. entry->mac_addr[2] = 0xC2;
  2483. entry->mac_addr[3] = 0x00;
  2484. entry->mac_addr[4] = 0x00;
  2485. entry->mac_addr[5] = 0x00;
  2486. entry->ports = HOST_MASK;
  2487. entry->override = 1;
  2488. entry->valid = 1;
  2489. sw_w_sta_mac_table(hw, STP_ENTRY,
  2490. entry->mac_addr, entry->ports,
  2491. entry->override, entry->valid,
  2492. entry->use_fid, entry->fid);
  2493. }
  2494. /**
  2495. * sw_block_addr - block certain packets from the host port
  2496. * @hw: The hardware instance.
  2497. *
  2498. * This routine blocks certain packets from reaching to the host port.
  2499. */
  2500. static void sw_block_addr(struct ksz_hw *hw)
  2501. {
  2502. struct ksz_mac_table *entry;
  2503. int i;
  2504. for (i = BROADCAST_ENTRY; i <= IPV6_ADDR_ENTRY; i++) {
  2505. entry = &hw->ksz_switch->mac_table[i];
  2506. entry->valid = 0;
  2507. sw_w_sta_mac_table(hw, i,
  2508. entry->mac_addr, entry->ports,
  2509. entry->override, entry->valid,
  2510. entry->use_fid, entry->fid);
  2511. }
  2512. }
  2513. #define PHY_LINK_SUPPORT \
  2514. (PHY_AUTO_NEG_ASYM_PAUSE | \
  2515. PHY_AUTO_NEG_SYM_PAUSE | \
  2516. PHY_AUTO_NEG_100BT4 | \
  2517. PHY_AUTO_NEG_100BTX_FD | \
  2518. PHY_AUTO_NEG_100BTX | \
  2519. PHY_AUTO_NEG_10BT_FD | \
  2520. PHY_AUTO_NEG_10BT)
  2521. static inline void hw_r_phy_ctrl(struct ksz_hw *hw, int phy, u16 *data)
  2522. {
  2523. *data = readw(hw->io + phy + KS884X_PHY_CTRL_OFFSET);
  2524. }
  2525. static inline void hw_w_phy_ctrl(struct ksz_hw *hw, int phy, u16 data)
  2526. {
  2527. writew(data, hw->io + phy + KS884X_PHY_CTRL_OFFSET);
  2528. }
  2529. static inline void hw_r_phy_link_stat(struct ksz_hw *hw, int phy, u16 *data)
  2530. {
  2531. *data = readw(hw->io + phy + KS884X_PHY_STATUS_OFFSET);
  2532. }
  2533. static inline void hw_r_phy_auto_neg(struct ksz_hw *hw, int phy, u16 *data)
  2534. {
  2535. *data = readw(hw->io + phy + KS884X_PHY_AUTO_NEG_OFFSET);
  2536. }
  2537. static inline void hw_w_phy_auto_neg(struct ksz_hw *hw, int phy, u16 data)
  2538. {
  2539. writew(data, hw->io + phy + KS884X_PHY_AUTO_NEG_OFFSET);
  2540. }
  2541. static inline void hw_r_phy_rem_cap(struct ksz_hw *hw, int phy, u16 *data)
  2542. {
  2543. *data = readw(hw->io + phy + KS884X_PHY_REMOTE_CAP_OFFSET);
  2544. }
  2545. static inline void hw_r_phy_crossover(struct ksz_hw *hw, int phy, u16 *data)
  2546. {
  2547. *data = readw(hw->io + phy + KS884X_PHY_CTRL_OFFSET);
  2548. }
  2549. static inline void hw_w_phy_crossover(struct ksz_hw *hw, int phy, u16 data)
  2550. {
  2551. writew(data, hw->io + phy + KS884X_PHY_CTRL_OFFSET);
  2552. }
  2553. static inline void hw_r_phy_polarity(struct ksz_hw *hw, int phy, u16 *data)
  2554. {
  2555. *data = readw(hw->io + phy + KS884X_PHY_PHY_CTRL_OFFSET);
  2556. }
  2557. static inline void hw_w_phy_polarity(struct ksz_hw *hw, int phy, u16 data)
  2558. {
  2559. writew(data, hw->io + phy + KS884X_PHY_PHY_CTRL_OFFSET);
  2560. }
  2561. static inline void hw_r_phy_link_md(struct ksz_hw *hw, int phy, u16 *data)
  2562. {
  2563. *data = readw(hw->io + phy + KS884X_PHY_LINK_MD_OFFSET);
  2564. }
  2565. static inline void hw_w_phy_link_md(struct ksz_hw *hw, int phy, u16 data)
  2566. {
  2567. writew(data, hw->io + phy + KS884X_PHY_LINK_MD_OFFSET);
  2568. }
  2569. /**
  2570. * hw_r_phy - read data from PHY register
  2571. * @hw: The hardware instance.
  2572. * @port: Port to read.
  2573. * @reg: PHY register to read.
  2574. * @val: Buffer to store the read data.
  2575. *
  2576. * This routine reads data from the PHY register.
  2577. */
  2578. static void hw_r_phy(struct ksz_hw *hw, int port, u16 reg, u16 *val)
  2579. {
  2580. int phy;
  2581. phy = KS884X_PHY_1_CTRL_OFFSET + port * PHY_CTRL_INTERVAL + reg;
  2582. *val = readw(hw->io + phy);
  2583. }
  2584. /**
  2585. * port_w_phy - write data to PHY register
  2586. * @hw: The hardware instance.
  2587. * @port: Port to write.
  2588. * @reg: PHY register to write.
  2589. * @val: Word data to write.
  2590. *
  2591. * This routine writes data to the PHY register.
  2592. */
  2593. static void hw_w_phy(struct ksz_hw *hw, int port, u16 reg, u16 val)
  2594. {
  2595. int phy;
  2596. phy = KS884X_PHY_1_CTRL_OFFSET + port * PHY_CTRL_INTERVAL + reg;
  2597. writew(val, hw->io + phy);
  2598. }
  2599. /*
  2600. * EEPROM access functions
  2601. */
  2602. #define AT93C_CODE 0
  2603. #define AT93C_WR_OFF 0x00
  2604. #define AT93C_WR_ALL 0x10
  2605. #define AT93C_ER_ALL 0x20
  2606. #define AT93C_WR_ON 0x30
  2607. #define AT93C_WRITE 1
  2608. #define AT93C_READ 2
  2609. #define AT93C_ERASE 3
  2610. #define EEPROM_DELAY 4
  2611. static inline void drop_gpio(struct ksz_hw *hw, u8 gpio)
  2612. {
  2613. u16 data;
  2614. data = readw(hw->io + KS884X_EEPROM_CTRL_OFFSET);
  2615. data &= ~gpio;
  2616. writew(data, hw->io + KS884X_EEPROM_CTRL_OFFSET);
  2617. }
  2618. static inline void raise_gpio(struct ksz_hw *hw, u8 gpio)
  2619. {
  2620. u16 data;
  2621. data = readw(hw->io + KS884X_EEPROM_CTRL_OFFSET);
  2622. data |= gpio;
  2623. writew(data, hw->io + KS884X_EEPROM_CTRL_OFFSET);
  2624. }
  2625. static inline u8 state_gpio(struct ksz_hw *hw, u8 gpio)
  2626. {
  2627. u16 data;
  2628. data = readw(hw->io + KS884X_EEPROM_CTRL_OFFSET);
  2629. return (u8)(data & gpio);
  2630. }
  2631. static void eeprom_clk(struct ksz_hw *hw)
  2632. {
  2633. raise_gpio(hw, EEPROM_SERIAL_CLOCK);
  2634. udelay(EEPROM_DELAY);
  2635. drop_gpio(hw, EEPROM_SERIAL_CLOCK);
  2636. udelay(EEPROM_DELAY);
  2637. }
  2638. static u16 spi_r(struct ksz_hw *hw)
  2639. {
  2640. int i;
  2641. u16 temp = 0;
  2642. for (i = 15; i >= 0; i--) {
  2643. raise_gpio(hw, EEPROM_SERIAL_CLOCK);
  2644. udelay(EEPROM_DELAY);
  2645. temp |= (state_gpio(hw, EEPROM_DATA_IN)) ? 1 << i : 0;
  2646. drop_gpio(hw, EEPROM_SERIAL_CLOCK);
  2647. udelay(EEPROM_DELAY);
  2648. }
  2649. return temp;
  2650. }
  2651. static void spi_w(struct ksz_hw *hw, u16 data)
  2652. {
  2653. int i;
  2654. for (i = 15; i >= 0; i--) {
  2655. (data & (0x01 << i)) ? raise_gpio(hw, EEPROM_DATA_OUT) :
  2656. drop_gpio(hw, EEPROM_DATA_OUT);
  2657. eeprom_clk(hw);
  2658. }
  2659. }
  2660. static void spi_reg(struct ksz_hw *hw, u8 data, u8 reg)
  2661. {
  2662. int i;
  2663. /* Initial start bit */
  2664. raise_gpio(hw, EEPROM_DATA_OUT);
  2665. eeprom_clk(hw);
  2666. /* AT93C operation */
  2667. for (i = 1; i >= 0; i--) {
  2668. (data & (0x01 << i)) ? raise_gpio(hw, EEPROM_DATA_OUT) :
  2669. drop_gpio(hw, EEPROM_DATA_OUT);
  2670. eeprom_clk(hw);
  2671. }
  2672. /* Address location */
  2673. for (i = 5; i >= 0; i--) {
  2674. (reg & (0x01 << i)) ? raise_gpio(hw, EEPROM_DATA_OUT) :
  2675. drop_gpio(hw, EEPROM_DATA_OUT);
  2676. eeprom_clk(hw);
  2677. }
  2678. }
  2679. #define EEPROM_DATA_RESERVED 0
  2680. #define EEPROM_DATA_MAC_ADDR_0 1
  2681. #define EEPROM_DATA_MAC_ADDR_1 2
  2682. #define EEPROM_DATA_MAC_ADDR_2 3
  2683. #define EEPROM_DATA_SUBSYS_ID 4
  2684. #define EEPROM_DATA_SUBSYS_VEN_ID 5
  2685. #define EEPROM_DATA_PM_CAP 6
  2686. /* User defined EEPROM data */
  2687. #define EEPROM_DATA_OTHER_MAC_ADDR 9
  2688. /**
  2689. * eeprom_read - read from AT93C46 EEPROM
  2690. * @hw: The hardware instance.
  2691. * @reg: The register offset.
  2692. *
  2693. * This function reads a word from the AT93C46 EEPROM.
  2694. *
  2695. * Return the data value.
  2696. */
  2697. static u16 eeprom_read(struct ksz_hw *hw, u8 reg)
  2698. {
  2699. u16 data;
  2700. raise_gpio(hw, EEPROM_ACCESS_ENABLE | EEPROM_CHIP_SELECT);
  2701. spi_reg(hw, AT93C_READ, reg);
  2702. data = spi_r(hw);
  2703. drop_gpio(hw, EEPROM_ACCESS_ENABLE | EEPROM_CHIP_SELECT);
  2704. return data;
  2705. }
  2706. /**
  2707. * eeprom_write - write to AT93C46 EEPROM
  2708. * @hw: The hardware instance.
  2709. * @reg: The register offset.
  2710. * @data: The data value.
  2711. *
  2712. * This procedure writes a word to the AT93C46 EEPROM.
  2713. */
  2714. static void eeprom_write(struct ksz_hw *hw, u8 reg, u16 data)
  2715. {
  2716. int timeout;
  2717. raise_gpio(hw, EEPROM_ACCESS_ENABLE | EEPROM_CHIP_SELECT);
  2718. /* Enable write. */
  2719. spi_reg(hw, AT93C_CODE, AT93C_WR_ON);
  2720. drop_gpio(hw, EEPROM_CHIP_SELECT);
  2721. udelay(1);
  2722. /* Erase the register. */
  2723. raise_gpio(hw, EEPROM_CHIP_SELECT);
  2724. spi_reg(hw, AT93C_ERASE, reg);
  2725. drop_gpio(hw, EEPROM_CHIP_SELECT);
  2726. udelay(1);
  2727. /* Check operation complete. */
  2728. raise_gpio(hw, EEPROM_CHIP_SELECT);
  2729. timeout = 8;
  2730. mdelay(2);
  2731. do {
  2732. mdelay(1);
  2733. } while (!state_gpio(hw, EEPROM_DATA_IN) && --timeout);
  2734. drop_gpio(hw, EEPROM_CHIP_SELECT);
  2735. udelay(1);
  2736. /* Write the register. */
  2737. raise_gpio(hw, EEPROM_CHIP_SELECT);
  2738. spi_reg(hw, AT93C_WRITE, reg);
  2739. spi_w(hw, data);
  2740. drop_gpio(hw, EEPROM_CHIP_SELECT);
  2741. udelay(1);
  2742. /* Check operation complete. */
  2743. raise_gpio(hw, EEPROM_CHIP_SELECT);
  2744. timeout = 8;
  2745. mdelay(2);
  2746. do {
  2747. mdelay(1);
  2748. } while (!state_gpio(hw, EEPROM_DATA_IN) && --timeout);
  2749. drop_gpio(hw, EEPROM_CHIP_SELECT);
  2750. udelay(1);
  2751. /* Disable write. */
  2752. raise_gpio(hw, EEPROM_CHIP_SELECT);
  2753. spi_reg(hw, AT93C_CODE, AT93C_WR_OFF);
  2754. drop_gpio(hw, EEPROM_ACCESS_ENABLE | EEPROM_CHIP_SELECT);
  2755. }
  2756. /*
  2757. * Link detection routines
  2758. */
  2759. static u16 advertised_flow_ctrl(struct ksz_port *port, u16 ctrl)
  2760. {
  2761. ctrl &= ~PORT_AUTO_NEG_SYM_PAUSE;
  2762. switch (port->flow_ctrl) {
  2763. case PHY_FLOW_CTRL:
  2764. ctrl |= PORT_AUTO_NEG_SYM_PAUSE;
  2765. break;
  2766. /* Not supported. */
  2767. case PHY_TX_ONLY:
  2768. case PHY_RX_ONLY:
  2769. default:
  2770. break;
  2771. }
  2772. return ctrl;
  2773. }
  2774. static void set_flow_ctrl(struct ksz_hw *hw, int rx, int tx)
  2775. {
  2776. u32 rx_cfg;
  2777. u32 tx_cfg;
  2778. rx_cfg = hw->rx_cfg;
  2779. tx_cfg = hw->tx_cfg;
  2780. if (rx)
  2781. hw->rx_cfg |= DMA_RX_FLOW_ENABLE;
  2782. else
  2783. hw->rx_cfg &= ~DMA_RX_FLOW_ENABLE;
  2784. if (tx)
  2785. hw->tx_cfg |= DMA_TX_FLOW_ENABLE;
  2786. else
  2787. hw->tx_cfg &= ~DMA_TX_FLOW_ENABLE;
  2788. if (hw->enabled) {
  2789. if (rx_cfg != hw->rx_cfg)
  2790. writel(hw->rx_cfg, hw->io + KS_DMA_RX_CTRL);
  2791. if (tx_cfg != hw->tx_cfg)
  2792. writel(hw->tx_cfg, hw->io + KS_DMA_TX_CTRL);
  2793. }
  2794. }
  2795. static void determine_flow_ctrl(struct ksz_hw *hw, struct ksz_port *port,
  2796. u16 local, u16 remote)
  2797. {
  2798. int rx;
  2799. int tx;
  2800. if (hw->overrides & PAUSE_FLOW_CTRL)
  2801. return;
  2802. rx = tx = 0;
  2803. if (port->force_link)
  2804. rx = tx = 1;
  2805. if (remote & PHY_AUTO_NEG_SYM_PAUSE) {
  2806. if (local & PHY_AUTO_NEG_SYM_PAUSE) {
  2807. rx = tx = 1;
  2808. } else if ((remote & PHY_AUTO_NEG_ASYM_PAUSE) &&
  2809. (local & PHY_AUTO_NEG_PAUSE) ==
  2810. PHY_AUTO_NEG_ASYM_PAUSE) {
  2811. tx = 1;
  2812. }
  2813. } else if (remote & PHY_AUTO_NEG_ASYM_PAUSE) {
  2814. if ((local & PHY_AUTO_NEG_PAUSE) == PHY_AUTO_NEG_PAUSE)
  2815. rx = 1;
  2816. }
  2817. if (!hw->ksz_switch)
  2818. set_flow_ctrl(hw, rx, tx);
  2819. }
  2820. static inline void port_cfg_change(struct ksz_hw *hw, struct ksz_port *port,
  2821. struct ksz_port_info *info, u16 link_status)
  2822. {
  2823. if ((hw->features & HALF_DUPLEX_SIGNAL_BUG) &&
  2824. !(hw->overrides & PAUSE_FLOW_CTRL)) {
  2825. u32 cfg = hw->tx_cfg;
  2826. /* Disable flow control in the half duplex mode. */
  2827. if (1 == info->duplex)
  2828. hw->tx_cfg &= ~DMA_TX_FLOW_ENABLE;
  2829. if (hw->enabled && cfg != hw->tx_cfg)
  2830. writel(hw->tx_cfg, hw->io + KS_DMA_TX_CTRL);
  2831. }
  2832. }
  2833. /**
  2834. * port_get_link_speed - get current link status
  2835. * @port: The port instance.
  2836. *
  2837. * This routine reads PHY registers to determine the current link status of the
  2838. * switch ports.
  2839. */
  2840. static void port_get_link_speed(struct ksz_port *port)
  2841. {
  2842. uint interrupt;
  2843. struct ksz_port_info *info;
  2844. struct ksz_port_info *linked = NULL;
  2845. struct ksz_hw *hw = port->hw;
  2846. u16 data;
  2847. u16 status;
  2848. u8 local;
  2849. u8 remote;
  2850. int i;
  2851. int p;
  2852. int change = 0;
  2853. interrupt = hw_block_intr(hw);
  2854. for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
  2855. info = &hw->port_info[p];
  2856. port_r16(hw, p, KS884X_PORT_CTRL_4_OFFSET, &data);
  2857. port_r16(hw, p, KS884X_PORT_STATUS_OFFSET, &status);
  2858. /*
  2859. * Link status is changing all the time even when there is no
  2860. * cable connection!
  2861. */
  2862. remote = status & (PORT_AUTO_NEG_COMPLETE |
  2863. PORT_STATUS_LINK_GOOD);
  2864. local = (u8) data;
  2865. /* No change to status. */
  2866. if (local == info->advertised && remote == info->partner)
  2867. continue;
  2868. info->advertised = local;
  2869. info->partner = remote;
  2870. if (status & PORT_STATUS_LINK_GOOD) {
  2871. /* Remember the first linked port. */
  2872. if (!linked)
  2873. linked = info;
  2874. info->tx_rate = 10 * TX_RATE_UNIT;
  2875. if (status & PORT_STATUS_SPEED_100MBIT)
  2876. info->tx_rate = 100 * TX_RATE_UNIT;
  2877. info->duplex = 1;
  2878. if (status & PORT_STATUS_FULL_DUPLEX)
  2879. info->duplex = 2;
  2880. if (media_connected != info->state) {
  2881. hw_r_phy(hw, p, KS884X_PHY_AUTO_NEG_OFFSET,
  2882. &data);
  2883. hw_r_phy(hw, p, KS884X_PHY_REMOTE_CAP_OFFSET,
  2884. &status);
  2885. determine_flow_ctrl(hw, port, data, status);
  2886. if (hw->ksz_switch) {
  2887. port_cfg_back_pressure(hw, p,
  2888. (1 == info->duplex));
  2889. }
  2890. change |= 1 << i;
  2891. port_cfg_change(hw, port, info, status);
  2892. }
  2893. info->state = media_connected;
  2894. } else {
  2895. if (media_disconnected != info->state) {
  2896. change |= 1 << i;
  2897. /* Indicate the link just goes down. */
  2898. hw->port_mib[p].link_down = 1;
  2899. }
  2900. info->state = media_disconnected;
  2901. }
  2902. hw->port_mib[p].state = (u8) info->state;
  2903. }
  2904. if (linked && media_disconnected == port->linked->state)
  2905. port->linked = linked;
  2906. hw_restore_intr(hw, interrupt);
  2907. }
  2908. #define PHY_RESET_TIMEOUT 10
  2909. /**
  2910. * port_set_link_speed - set port speed
  2911. * @port: The port instance.
  2912. *
  2913. * This routine sets the link speed of the switch ports.
  2914. */
  2915. static void port_set_link_speed(struct ksz_port *port)
  2916. {
  2917. struct ksz_port_info *info;
  2918. struct ksz_hw *hw = port->hw;
  2919. u16 data;
  2920. u16 cfg;
  2921. u8 status;
  2922. int i;
  2923. int p;
  2924. for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
  2925. info = &hw->port_info[p];
  2926. port_r16(hw, p, KS884X_PORT_CTRL_4_OFFSET, &data);
  2927. port_r8(hw, p, KS884X_PORT_STATUS_OFFSET, &status);
  2928. cfg = 0;
  2929. if (status & PORT_STATUS_LINK_GOOD)
  2930. cfg = data;
  2931. data |= PORT_AUTO_NEG_ENABLE;
  2932. data = advertised_flow_ctrl(port, data);
  2933. data |= PORT_AUTO_NEG_100BTX_FD | PORT_AUTO_NEG_100BTX |
  2934. PORT_AUTO_NEG_10BT_FD | PORT_AUTO_NEG_10BT;
  2935. /* Check if manual configuration is specified by the user. */
  2936. if (port->speed || port->duplex) {
  2937. if (10 == port->speed)
  2938. data &= ~(PORT_AUTO_NEG_100BTX_FD |
  2939. PORT_AUTO_NEG_100BTX);
  2940. else if (100 == port->speed)
  2941. data &= ~(PORT_AUTO_NEG_10BT_FD |
  2942. PORT_AUTO_NEG_10BT);
  2943. if (1 == port->duplex)
  2944. data &= ~(PORT_AUTO_NEG_100BTX_FD |
  2945. PORT_AUTO_NEG_10BT_FD);
  2946. else if (2 == port->duplex)
  2947. data &= ~(PORT_AUTO_NEG_100BTX |
  2948. PORT_AUTO_NEG_10BT);
  2949. }
  2950. if (data != cfg) {
  2951. data |= PORT_AUTO_NEG_RESTART;
  2952. port_w16(hw, p, KS884X_PORT_CTRL_4_OFFSET, data);
  2953. }
  2954. }
  2955. }
  2956. /**
  2957. * port_force_link_speed - force port speed
  2958. * @port: The port instance.
  2959. *
  2960. * This routine forces the link speed of the switch ports.
  2961. */
  2962. static void port_force_link_speed(struct ksz_port *port)
  2963. {
  2964. struct ksz_hw *hw = port->hw;
  2965. u16 data;
  2966. int i;
  2967. int phy;
  2968. int p;
  2969. for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
  2970. phy = KS884X_PHY_1_CTRL_OFFSET + p * PHY_CTRL_INTERVAL;
  2971. hw_r_phy_ctrl(hw, phy, &data);
  2972. data &= ~PHY_AUTO_NEG_ENABLE;
  2973. if (10 == port->speed)
  2974. data &= ~PHY_SPEED_100MBIT;
  2975. else if (100 == port->speed)
  2976. data |= PHY_SPEED_100MBIT;
  2977. if (1 == port->duplex)
  2978. data &= ~PHY_FULL_DUPLEX;
  2979. else if (2 == port->duplex)
  2980. data |= PHY_FULL_DUPLEX;
  2981. hw_w_phy_ctrl(hw, phy, data);
  2982. }
  2983. }
  2984. static void port_set_power_saving(struct ksz_port *port, int enable)
  2985. {
  2986. struct ksz_hw *hw = port->hw;
  2987. int i;
  2988. int p;
  2989. for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++)
  2990. port_cfg(hw, p,
  2991. KS884X_PORT_CTRL_4_OFFSET, PORT_POWER_DOWN, enable);
  2992. }
  2993. /*
  2994. * KSZ8841 power management functions
  2995. */
  2996. /**
  2997. * hw_chk_wol_pme_status - check PMEN pin
  2998. * @hw: The hardware instance.
  2999. *
  3000. * This function is used to check PMEN pin is asserted.
  3001. *
  3002. * Return 1 if PMEN pin is asserted; otherwise, 0.
  3003. */
  3004. static int hw_chk_wol_pme_status(struct ksz_hw *hw)
  3005. {
  3006. struct dev_info *hw_priv = container_of(hw, struct dev_info, hw);
  3007. struct pci_dev *pdev = hw_priv->pdev;
  3008. u16 data;
  3009. if (!pdev->pm_cap)
  3010. return 0;
  3011. pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &data);
  3012. return (data & PCI_PM_CTRL_PME_STATUS) == PCI_PM_CTRL_PME_STATUS;
  3013. }
  3014. /**
  3015. * hw_clr_wol_pme_status - clear PMEN pin
  3016. * @hw: The hardware instance.
  3017. *
  3018. * This routine is used to clear PME_Status to deassert PMEN pin.
  3019. */
  3020. static void hw_clr_wol_pme_status(struct ksz_hw *hw)
  3021. {
  3022. struct dev_info *hw_priv = container_of(hw, struct dev_info, hw);
  3023. struct pci_dev *pdev = hw_priv->pdev;
  3024. u16 data;
  3025. if (!pdev->pm_cap)
  3026. return;
  3027. /* Clear PME_Status to deassert PMEN pin. */
  3028. pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &data);
  3029. data |= PCI_PM_CTRL_PME_STATUS;
  3030. pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, data);
  3031. }
  3032. /**
  3033. * hw_cfg_wol_pme - enable or disable Wake-on-LAN
  3034. * @hw: The hardware instance.
  3035. * @set: The flag indicating whether to enable or disable.
  3036. *
  3037. * This routine is used to enable or disable Wake-on-LAN.
  3038. */
  3039. static void hw_cfg_wol_pme(struct ksz_hw *hw, int set)
  3040. {
  3041. struct dev_info *hw_priv = container_of(hw, struct dev_info, hw);
  3042. struct pci_dev *pdev = hw_priv->pdev;
  3043. u16 data;
  3044. if (!pdev->pm_cap)
  3045. return;
  3046. pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &data);
  3047. data &= ~PCI_PM_CTRL_STATE_MASK;
  3048. if (set)
  3049. data |= PCI_PM_CTRL_PME_ENABLE | PCI_D3hot;
  3050. else
  3051. data &= ~PCI_PM_CTRL_PME_ENABLE;
  3052. pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, data);
  3053. }
  3054. /**
  3055. * hw_cfg_wol - configure Wake-on-LAN features
  3056. * @hw: The hardware instance.
  3057. * @frame: The pattern frame bit.
  3058. * @set: The flag indicating whether to enable or disable.
  3059. *
  3060. * This routine is used to enable or disable certain Wake-on-LAN features.
  3061. */
  3062. static void hw_cfg_wol(struct ksz_hw *hw, u16 frame, int set)
  3063. {
  3064. u16 data;
  3065. data = readw(hw->io + KS8841_WOL_CTRL_OFFSET);
  3066. if (set)
  3067. data |= frame;
  3068. else
  3069. data &= ~frame;
  3070. writew(data, hw->io + KS8841_WOL_CTRL_OFFSET);
  3071. }
  3072. /**
  3073. * hw_set_wol_frame - program Wake-on-LAN pattern
  3074. * @hw: The hardware instance.
  3075. * @i: The frame index.
  3076. * @mask_size: The size of the mask.
  3077. * @mask: Mask to ignore certain bytes in the pattern.
  3078. * @frame_size: The size of the frame.
  3079. * @pattern: The frame data.
  3080. *
  3081. * This routine is used to program Wake-on-LAN pattern.
  3082. */
  3083. static void hw_set_wol_frame(struct ksz_hw *hw, int i, uint mask_size,
  3084. u8 *mask, uint frame_size, u8 *pattern)
  3085. {
  3086. int bits;
  3087. int from;
  3088. int len;
  3089. int to;
  3090. u32 crc;
  3091. u8 data[64];
  3092. u8 val = 0;
  3093. if (frame_size > mask_size * 8)
  3094. frame_size = mask_size * 8;
  3095. if (frame_size > 64)
  3096. frame_size = 64;
  3097. i *= 0x10;
  3098. writel(0, hw->io + KS8841_WOL_FRAME_BYTE0_OFFSET + i);
  3099. writel(0, hw->io + KS8841_WOL_FRAME_BYTE2_OFFSET + i);
  3100. bits = len = from = to = 0;
  3101. do {
  3102. if (bits) {
  3103. if ((val & 1))
  3104. data[to++] = pattern[from];
  3105. val >>= 1;
  3106. ++from;
  3107. --bits;
  3108. } else {
  3109. val = mask[len];
  3110. writeb(val, hw->io + KS8841_WOL_FRAME_BYTE0_OFFSET + i
  3111. + len);
  3112. ++len;
  3113. if (val)
  3114. bits = 8;
  3115. else
  3116. from += 8;
  3117. }
  3118. } while (from < (int) frame_size);
  3119. if (val) {
  3120. bits = mask[len - 1];
  3121. val <<= (from % 8);
  3122. bits &= ~val;
  3123. writeb(bits, hw->io + KS8841_WOL_FRAME_BYTE0_OFFSET + i + len -
  3124. 1);
  3125. }
  3126. crc = ether_crc(to, data);
  3127. writel(crc, hw->io + KS8841_WOL_FRAME_CRC_OFFSET + i);
  3128. }
  3129. /**
  3130. * hw_add_wol_arp - add ARP pattern
  3131. * @hw: The hardware instance.
  3132. * @ip_addr: The IPv4 address assigned to the device.
  3133. *
  3134. * This routine is used to add ARP pattern for waking up the host.
  3135. */
  3136. static void hw_add_wol_arp(struct ksz_hw *hw, u8 *ip_addr)
  3137. {
  3138. u8 mask[6] = { 0x3F, 0xF0, 0x3F, 0x00, 0xC0, 0x03 };
  3139. u8 pattern[42] = {
  3140. 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
  3141. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  3142. 0x08, 0x06,
  3143. 0x00, 0x01, 0x08, 0x00, 0x06, 0x04, 0x00, 0x01,
  3144. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  3145. 0x00, 0x00, 0x00, 0x00,
  3146. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  3147. 0x00, 0x00, 0x00, 0x00 };
  3148. memcpy(&pattern[38], ip_addr, 4);
  3149. hw_set_wol_frame(hw, 3, 6, mask, 42, pattern);
  3150. }
  3151. /**
  3152. * hw_add_wol_bcast - add broadcast pattern
  3153. * @hw: The hardware instance.
  3154. *
  3155. * This routine is used to add broadcast pattern for waking up the host.
  3156. */
  3157. static void hw_add_wol_bcast(struct ksz_hw *hw)
  3158. {
  3159. u8 mask[] = { 0x3F };
  3160. u8 pattern[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
  3161. hw_set_wol_frame(hw, 2, 1, mask, MAC_ADDR_LEN, pattern);
  3162. }
  3163. /**
  3164. * hw_add_wol_mcast - add multicast pattern
  3165. * @hw: The hardware instance.
  3166. *
  3167. * This routine is used to add multicast pattern for waking up the host.
  3168. *
  3169. * It is assumed the multicast packet is the ICMPv6 neighbor solicitation used
  3170. * by IPv6 ping command. Note that multicast packets are filtred through the
  3171. * multicast hash table, so not all multicast packets can wake up the host.
  3172. */
  3173. static void hw_add_wol_mcast(struct ksz_hw *hw)
  3174. {
  3175. u8 mask[] = { 0x3F };
  3176. u8 pattern[] = { 0x33, 0x33, 0xFF, 0x00, 0x00, 0x00 };
  3177. memcpy(&pattern[3], &hw->override_addr[3], 3);
  3178. hw_set_wol_frame(hw, 1, 1, mask, 6, pattern);
  3179. }
  3180. /**
  3181. * hw_add_wol_ucast - add unicast pattern
  3182. * @hw: The hardware instance.
  3183. *
  3184. * This routine is used to add unicast pattern to wakeup the host.
  3185. *
  3186. * It is assumed the unicast packet is directed to the device, as the hardware
  3187. * can only receive them in normal case.
  3188. */
  3189. static void hw_add_wol_ucast(struct ksz_hw *hw)
  3190. {
  3191. u8 mask[] = { 0x3F };
  3192. hw_set_wol_frame(hw, 0, 1, mask, MAC_ADDR_LEN, hw->override_addr);
  3193. }
  3194. /**
  3195. * hw_enable_wol - enable Wake-on-LAN
  3196. * @hw: The hardware instance.
  3197. * @wol_enable: The Wake-on-LAN settings.
  3198. * @net_addr: The IPv4 address assigned to the device.
  3199. *
  3200. * This routine is used to enable Wake-on-LAN depending on driver settings.
  3201. */
  3202. static void hw_enable_wol(struct ksz_hw *hw, u32 wol_enable, u8 *net_addr)
  3203. {
  3204. hw_cfg_wol(hw, KS8841_WOL_MAGIC_ENABLE, (wol_enable & WAKE_MAGIC));
  3205. hw_cfg_wol(hw, KS8841_WOL_FRAME0_ENABLE, (wol_enable & WAKE_UCAST));
  3206. hw_add_wol_ucast(hw);
  3207. hw_cfg_wol(hw, KS8841_WOL_FRAME1_ENABLE, (wol_enable & WAKE_MCAST));
  3208. hw_add_wol_mcast(hw);
  3209. hw_cfg_wol(hw, KS8841_WOL_FRAME2_ENABLE, (wol_enable & WAKE_BCAST));
  3210. hw_cfg_wol(hw, KS8841_WOL_FRAME3_ENABLE, (wol_enable & WAKE_ARP));
  3211. hw_add_wol_arp(hw, net_addr);
  3212. }
  3213. /**
  3214. * hw_init - check driver is correct for the hardware
  3215. * @hw: The hardware instance.
  3216. *
  3217. * This function checks the hardware is correct for this driver and sets the
  3218. * hardware up for proper initialization.
  3219. *
  3220. * Return number of ports or 0 if not right.
  3221. */
  3222. static int hw_init(struct ksz_hw *hw)
  3223. {
  3224. int rc = 0;
  3225. u16 data;
  3226. u16 revision;
  3227. /* Set bus speed to 125MHz. */
  3228. writew(BUS_SPEED_125_MHZ, hw->io + KS884X_BUS_CTRL_OFFSET);
  3229. /* Check KSZ884x chip ID. */
  3230. data = readw(hw->io + KS884X_CHIP_ID_OFFSET);
  3231. revision = (data & KS884X_REVISION_MASK) >> KS884X_REVISION_SHIFT;
  3232. data &= KS884X_CHIP_ID_MASK_41;
  3233. if (REG_CHIP_ID_41 == data)
  3234. rc = 1;
  3235. else if (REG_CHIP_ID_42 == data)
  3236. rc = 2;
  3237. else
  3238. return 0;
  3239. /* Setup hardware features or bug workarounds. */
  3240. if (revision <= 1) {
  3241. hw->features |= SMALL_PACKET_TX_BUG;
  3242. if (1 == rc)
  3243. hw->features |= HALF_DUPLEX_SIGNAL_BUG;
  3244. }
  3245. hw->features |= IPV6_CSUM_GEN_HACK;
  3246. return rc;
  3247. }
  3248. /**
  3249. * hw_reset - reset the hardware
  3250. * @hw: The hardware instance.
  3251. *
  3252. * This routine resets the hardware.
  3253. */
  3254. static void hw_reset(struct ksz_hw *hw)
  3255. {
  3256. writew(GLOBAL_SOFTWARE_RESET, hw->io + KS884X_GLOBAL_CTRL_OFFSET);
  3257. /* Wait for device to reset. */
  3258. mdelay(10);
  3259. /* Write 0 to clear device reset. */
  3260. writew(0, hw->io + KS884X_GLOBAL_CTRL_OFFSET);
  3261. }
  3262. /**
  3263. * hw_setup - setup the hardware
  3264. * @hw: The hardware instance.
  3265. *
  3266. * This routine setup the hardware for proper operation.
  3267. */
  3268. static void hw_setup(struct ksz_hw *hw)
  3269. {
  3270. #if SET_DEFAULT_LED
  3271. u16 data;
  3272. /* Change default LED mode. */
  3273. data = readw(hw->io + KS8842_SWITCH_CTRL_5_OFFSET);
  3274. data &= ~LED_MODE;
  3275. data |= SET_DEFAULT_LED;
  3276. writew(data, hw->io + KS8842_SWITCH_CTRL_5_OFFSET);
  3277. #endif
  3278. /* Setup transmit control. */
  3279. hw->tx_cfg = (DMA_TX_PAD_ENABLE | DMA_TX_CRC_ENABLE |
  3280. (DMA_BURST_DEFAULT << DMA_BURST_SHIFT) | DMA_TX_ENABLE);
  3281. /* Setup receive control. */
  3282. hw->rx_cfg = (DMA_RX_BROADCAST | DMA_RX_UNICAST |
  3283. (DMA_BURST_DEFAULT << DMA_BURST_SHIFT) | DMA_RX_ENABLE);
  3284. hw->rx_cfg |= KS884X_DMA_RX_MULTICAST;
  3285. /* Hardware cannot handle UDP packet in IP fragments. */
  3286. hw->rx_cfg |= (DMA_RX_CSUM_TCP | DMA_RX_CSUM_IP);
  3287. if (hw->all_multi)
  3288. hw->rx_cfg |= DMA_RX_ALL_MULTICAST;
  3289. if (hw->promiscuous)
  3290. hw->rx_cfg |= DMA_RX_PROMISCUOUS;
  3291. }
  3292. /**
  3293. * hw_setup_intr - setup interrupt mask
  3294. * @hw: The hardware instance.
  3295. *
  3296. * This routine setup the interrupt mask for proper operation.
  3297. */
  3298. static void hw_setup_intr(struct ksz_hw *hw)
  3299. {
  3300. hw->intr_mask = KS884X_INT_MASK | KS884X_INT_RX_OVERRUN;
  3301. }
  3302. static void ksz_check_desc_num(struct ksz_desc_info *info)
  3303. {
  3304. #define MIN_DESC_SHIFT 2
  3305. int alloc = info->alloc;
  3306. int shift;
  3307. shift = 0;
  3308. while (!(alloc & 1)) {
  3309. shift++;
  3310. alloc >>= 1;
  3311. }
  3312. if (alloc != 1 || shift < MIN_DESC_SHIFT) {
  3313. printk(KERN_ALERT "Hardware descriptor numbers not right!\n");
  3314. while (alloc) {
  3315. shift++;
  3316. alloc >>= 1;
  3317. }
  3318. if (shift < MIN_DESC_SHIFT)
  3319. shift = MIN_DESC_SHIFT;
  3320. alloc = 1 << shift;
  3321. info->alloc = alloc;
  3322. }
  3323. info->mask = info->alloc - 1;
  3324. }
  3325. static void hw_init_desc(struct ksz_desc_info *desc_info, int transmit)
  3326. {
  3327. int i;
  3328. u32 phys = desc_info->ring_phys;
  3329. struct ksz_hw_desc *desc = desc_info->ring_virt;
  3330. struct ksz_desc *cur = desc_info->ring;
  3331. struct ksz_desc *previous = NULL;
  3332. for (i = 0; i < desc_info->alloc; i++) {
  3333. cur->phw = desc++;
  3334. phys += desc_info->size;
  3335. previous = cur++;
  3336. previous->phw->next = cpu_to_le32(phys);
  3337. }
  3338. previous->phw->next = cpu_to_le32(desc_info->ring_phys);
  3339. previous->sw.buf.rx.end_of_ring = 1;
  3340. previous->phw->buf.data = cpu_to_le32(previous->sw.buf.data);
  3341. desc_info->avail = desc_info->alloc;
  3342. desc_info->last = desc_info->next = 0;
  3343. desc_info->cur = desc_info->ring;
  3344. }
  3345. /**
  3346. * hw_set_desc_base - set descriptor base addresses
  3347. * @hw: The hardware instance.
  3348. * @tx_addr: The transmit descriptor base.
  3349. * @rx_addr: The receive descriptor base.
  3350. *
  3351. * This routine programs the descriptor base addresses after reset.
  3352. */
  3353. static void hw_set_desc_base(struct ksz_hw *hw, u32 tx_addr, u32 rx_addr)
  3354. {
  3355. /* Set base address of Tx/Rx descriptors. */
  3356. writel(tx_addr, hw->io + KS_DMA_TX_ADDR);
  3357. writel(rx_addr, hw->io + KS_DMA_RX_ADDR);
  3358. }
  3359. static void hw_reset_pkts(struct ksz_desc_info *info)
  3360. {
  3361. info->cur = info->ring;
  3362. info->avail = info->alloc;
  3363. info->last = info->next = 0;
  3364. }
  3365. static inline void hw_resume_rx(struct ksz_hw *hw)
  3366. {
  3367. writel(DMA_START, hw->io + KS_DMA_RX_START);
  3368. }
  3369. /**
  3370. * hw_start_rx - start receiving
  3371. * @hw: The hardware instance.
  3372. *
  3373. * This routine starts the receive function of the hardware.
  3374. */
  3375. static void hw_start_rx(struct ksz_hw *hw)
  3376. {
  3377. writel(hw->rx_cfg, hw->io + KS_DMA_RX_CTRL);
  3378. /* Notify when the receive stops. */
  3379. hw->intr_mask |= KS884X_INT_RX_STOPPED;
  3380. writel(DMA_START, hw->io + KS_DMA_RX_START);
  3381. hw_ack_intr(hw, KS884X_INT_RX_STOPPED);
  3382. hw->rx_stop++;
  3383. /* Variable overflows. */
  3384. if (0 == hw->rx_stop)
  3385. hw->rx_stop = 2;
  3386. }
  3387. /*
  3388. * hw_stop_rx - stop receiving
  3389. * @hw: The hardware instance.
  3390. *
  3391. * This routine stops the receive function of the hardware.
  3392. */
  3393. static void hw_stop_rx(struct ksz_hw *hw)
  3394. {
  3395. hw->rx_stop = 0;
  3396. hw_turn_off_intr(hw, KS884X_INT_RX_STOPPED);
  3397. writel((hw->rx_cfg & ~DMA_RX_ENABLE), hw->io + KS_DMA_RX_CTRL);
  3398. }
  3399. /**
  3400. * hw_start_tx - start transmitting
  3401. * @hw: The hardware instance.
  3402. *
  3403. * This routine starts the transmit function of the hardware.
  3404. */
  3405. static void hw_start_tx(struct ksz_hw *hw)
  3406. {
  3407. writel(hw->tx_cfg, hw->io + KS_DMA_TX_CTRL);
  3408. }
  3409. /**
  3410. * hw_stop_tx - stop transmitting
  3411. * @hw: The hardware instance.
  3412. *
  3413. * This routine stops the transmit function of the hardware.
  3414. */
  3415. static void hw_stop_tx(struct ksz_hw *hw)
  3416. {
  3417. writel((hw->tx_cfg & ~DMA_TX_ENABLE), hw->io + KS_DMA_TX_CTRL);
  3418. }
  3419. /**
  3420. * hw_disable - disable hardware
  3421. * @hw: The hardware instance.
  3422. *
  3423. * This routine disables the hardware.
  3424. */
  3425. static void hw_disable(struct ksz_hw *hw)
  3426. {
  3427. hw_stop_rx(hw);
  3428. hw_stop_tx(hw);
  3429. hw->enabled = 0;
  3430. }
  3431. /**
  3432. * hw_enable - enable hardware
  3433. * @hw: The hardware instance.
  3434. *
  3435. * This routine enables the hardware.
  3436. */
  3437. static void hw_enable(struct ksz_hw *hw)
  3438. {
  3439. hw_start_tx(hw);
  3440. hw_start_rx(hw);
  3441. hw->enabled = 1;
  3442. }
  3443. /**
  3444. * hw_alloc_pkt - allocate enough descriptors for transmission
  3445. * @hw: The hardware instance.
  3446. * @length: The length of the packet.
  3447. * @physical: Number of descriptors required.
  3448. *
  3449. * This function allocates descriptors for transmission.
  3450. *
  3451. * Return 0 if not successful; 1 for buffer copy; or number of descriptors.
  3452. */
  3453. static int hw_alloc_pkt(struct ksz_hw *hw, int length, int physical)
  3454. {
  3455. /* Always leave one descriptor free. */
  3456. if (hw->tx_desc_info.avail <= 1)
  3457. return 0;
  3458. /* Allocate a descriptor for transmission and mark it current. */
  3459. get_tx_pkt(&hw->tx_desc_info, &hw->tx_desc_info.cur);
  3460. hw->tx_desc_info.cur->sw.buf.tx.first_seg = 1;
  3461. /* Keep track of number of transmit descriptors used so far. */
  3462. ++hw->tx_int_cnt;
  3463. hw->tx_size += length;
  3464. /* Cannot hold on too much data. */
  3465. if (hw->tx_size >= MAX_TX_HELD_SIZE)
  3466. hw->tx_int_cnt = hw->tx_int_mask + 1;
  3467. if (physical > hw->tx_desc_info.avail)
  3468. return 1;
  3469. return hw->tx_desc_info.avail;
  3470. }
  3471. /**
  3472. * hw_send_pkt - mark packet for transmission
  3473. * @hw: The hardware instance.
  3474. *
  3475. * This routine marks the packet for transmission in PCI version.
  3476. */
  3477. static void hw_send_pkt(struct ksz_hw *hw)
  3478. {
  3479. struct ksz_desc *cur = hw->tx_desc_info.cur;
  3480. cur->sw.buf.tx.last_seg = 1;
  3481. /* Interrupt only after specified number of descriptors used. */
  3482. if (hw->tx_int_cnt > hw->tx_int_mask) {
  3483. cur->sw.buf.tx.intr = 1;
  3484. hw->tx_int_cnt = 0;
  3485. hw->tx_size = 0;
  3486. }
  3487. /* KSZ8842 supports port directed transmission. */
  3488. cur->sw.buf.tx.dest_port = hw->dst_ports;
  3489. release_desc(cur);
  3490. writel(0, hw->io + KS_DMA_TX_START);
  3491. }
  3492. static int empty_addr(u8 *addr)
  3493. {
  3494. u32 *addr1 = (u32 *) addr;
  3495. u16 *addr2 = (u16 *) &addr[4];
  3496. return 0 == *addr1 && 0 == *addr2;
  3497. }
  3498. /**
  3499. * hw_set_addr - set MAC address
  3500. * @hw: The hardware instance.
  3501. *
  3502. * This routine programs the MAC address of the hardware when the address is
  3503. * overrided.
  3504. */
  3505. static void hw_set_addr(struct ksz_hw *hw)
  3506. {
  3507. int i;
  3508. for (i = 0; i < MAC_ADDR_LEN; i++)
  3509. writeb(hw->override_addr[MAC_ADDR_ORDER(i)],
  3510. hw->io + KS884X_ADDR_0_OFFSET + i);
  3511. sw_set_addr(hw, hw->override_addr);
  3512. }
  3513. /**
  3514. * hw_read_addr - read MAC address
  3515. * @hw: The hardware instance.
  3516. *
  3517. * This routine retrieves the MAC address of the hardware.
  3518. */
  3519. static void hw_read_addr(struct ksz_hw *hw)
  3520. {
  3521. int i;
  3522. for (i = 0; i < MAC_ADDR_LEN; i++)
  3523. hw->perm_addr[MAC_ADDR_ORDER(i)] = readb(hw->io +
  3524. KS884X_ADDR_0_OFFSET + i);
  3525. if (!hw->mac_override) {
  3526. memcpy(hw->override_addr, hw->perm_addr, MAC_ADDR_LEN);
  3527. if (empty_addr(hw->override_addr)) {
  3528. memcpy(hw->perm_addr, DEFAULT_MAC_ADDRESS,
  3529. MAC_ADDR_LEN);
  3530. memcpy(hw->override_addr, DEFAULT_MAC_ADDRESS,
  3531. MAC_ADDR_LEN);
  3532. hw->override_addr[5] += hw->id;
  3533. hw_set_addr(hw);
  3534. }
  3535. }
  3536. }
  3537. static void hw_ena_add_addr(struct ksz_hw *hw, int index, u8 *mac_addr)
  3538. {
  3539. int i;
  3540. u32 mac_addr_lo;
  3541. u32 mac_addr_hi;
  3542. mac_addr_hi = 0;
  3543. for (i = 0; i < 2; i++) {
  3544. mac_addr_hi <<= 8;
  3545. mac_addr_hi |= mac_addr[i];
  3546. }
  3547. mac_addr_hi |= ADD_ADDR_ENABLE;
  3548. mac_addr_lo = 0;
  3549. for (i = 2; i < 6; i++) {
  3550. mac_addr_lo <<= 8;
  3551. mac_addr_lo |= mac_addr[i];
  3552. }
  3553. index *= ADD_ADDR_INCR;
  3554. writel(mac_addr_lo, hw->io + index + KS_ADD_ADDR_0_LO);
  3555. writel(mac_addr_hi, hw->io + index + KS_ADD_ADDR_0_HI);
  3556. }
  3557. static void hw_set_add_addr(struct ksz_hw *hw)
  3558. {
  3559. int i;
  3560. for (i = 0; i < ADDITIONAL_ENTRIES; i++) {
  3561. if (empty_addr(hw->address[i]))
  3562. writel(0, hw->io + ADD_ADDR_INCR * i +
  3563. KS_ADD_ADDR_0_HI);
  3564. else
  3565. hw_ena_add_addr(hw, i, hw->address[i]);
  3566. }
  3567. }
  3568. static int hw_add_addr(struct ksz_hw *hw, u8 *mac_addr)
  3569. {
  3570. int i;
  3571. int j = ADDITIONAL_ENTRIES;
  3572. if (!memcmp(hw->override_addr, mac_addr, MAC_ADDR_LEN))
  3573. return 0;
  3574. for (i = 0; i < hw->addr_list_size; i++) {
  3575. if (!memcmp(hw->address[i], mac_addr, MAC_ADDR_LEN))
  3576. return 0;
  3577. if (ADDITIONAL_ENTRIES == j && empty_addr(hw->address[i]))
  3578. j = i;
  3579. }
  3580. if (j < ADDITIONAL_ENTRIES) {
  3581. memcpy(hw->address[j], mac_addr, MAC_ADDR_LEN);
  3582. hw_ena_add_addr(hw, j, hw->address[j]);
  3583. return 0;
  3584. }
  3585. return -1;
  3586. }
  3587. static int hw_del_addr(struct ksz_hw *hw, u8 *mac_addr)
  3588. {
  3589. int i;
  3590. for (i = 0; i < hw->addr_list_size; i++) {
  3591. if (!memcmp(hw->address[i], mac_addr, MAC_ADDR_LEN)) {
  3592. memset(hw->address[i], 0, MAC_ADDR_LEN);
  3593. writel(0, hw->io + ADD_ADDR_INCR * i +
  3594. KS_ADD_ADDR_0_HI);
  3595. return 0;
  3596. }
  3597. }
  3598. return -1;
  3599. }
  3600. /**
  3601. * hw_clr_multicast - clear multicast addresses
  3602. * @hw: The hardware instance.
  3603. *
  3604. * This routine removes all multicast addresses set in the hardware.
  3605. */
  3606. static void hw_clr_multicast(struct ksz_hw *hw)
  3607. {
  3608. int i;
  3609. for (i = 0; i < HW_MULTICAST_SIZE; i++) {
  3610. hw->multi_bits[i] = 0;
  3611. writeb(0, hw->io + KS884X_MULTICAST_0_OFFSET + i);
  3612. }
  3613. }
  3614. /**
  3615. * hw_set_grp_addr - set multicast addresses
  3616. * @hw: The hardware instance.
  3617. *
  3618. * This routine programs multicast addresses for the hardware to accept those
  3619. * addresses.
  3620. */
  3621. static void hw_set_grp_addr(struct ksz_hw *hw)
  3622. {
  3623. int i;
  3624. int index;
  3625. int position;
  3626. int value;
  3627. memset(hw->multi_bits, 0, sizeof(u8) * HW_MULTICAST_SIZE);
  3628. for (i = 0; i < hw->multi_list_size; i++) {
  3629. position = (ether_crc(6, hw->multi_list[i]) >> 26) & 0x3f;
  3630. index = position >> 3;
  3631. value = 1 << (position & 7);
  3632. hw->multi_bits[index] |= (u8) value;
  3633. }
  3634. for (i = 0; i < HW_MULTICAST_SIZE; i++)
  3635. writeb(hw->multi_bits[i], hw->io + KS884X_MULTICAST_0_OFFSET +
  3636. i);
  3637. }
  3638. /**
  3639. * hw_set_multicast - enable or disable all multicast receiving
  3640. * @hw: The hardware instance.
  3641. * @multicast: To turn on or off the all multicast feature.
  3642. *
  3643. * This routine enables/disables the hardware to accept all multicast packets.
  3644. */
  3645. static void hw_set_multicast(struct ksz_hw *hw, u8 multicast)
  3646. {
  3647. /* Stop receiving for reconfiguration. */
  3648. hw_stop_rx(hw);
  3649. if (multicast)
  3650. hw->rx_cfg |= DMA_RX_ALL_MULTICAST;
  3651. else
  3652. hw->rx_cfg &= ~DMA_RX_ALL_MULTICAST;
  3653. if (hw->enabled)
  3654. hw_start_rx(hw);
  3655. }
  3656. /**
  3657. * hw_set_promiscuous - enable or disable promiscuous receiving
  3658. * @hw: The hardware instance.
  3659. * @prom: To turn on or off the promiscuous feature.
  3660. *
  3661. * This routine enables/disables the hardware to accept all packets.
  3662. */
  3663. static void hw_set_promiscuous(struct ksz_hw *hw, u8 prom)
  3664. {
  3665. /* Stop receiving for reconfiguration. */
  3666. hw_stop_rx(hw);
  3667. if (prom)
  3668. hw->rx_cfg |= DMA_RX_PROMISCUOUS;
  3669. else
  3670. hw->rx_cfg &= ~DMA_RX_PROMISCUOUS;
  3671. if (hw->enabled)
  3672. hw_start_rx(hw);
  3673. }
  3674. /**
  3675. * sw_enable - enable the switch
  3676. * @hw: The hardware instance.
  3677. * @enable: The flag to enable or disable the switch
  3678. *
  3679. * This routine is used to enable/disable the switch in KSZ8842.
  3680. */
  3681. static void sw_enable(struct ksz_hw *hw, int enable)
  3682. {
  3683. int port;
  3684. for (port = 0; port < SWITCH_PORT_NUM; port++) {
  3685. if (hw->dev_count > 1) {
  3686. /* Set port-base vlan membership with host port. */
  3687. sw_cfg_port_base_vlan(hw, port,
  3688. HOST_MASK | (1 << port));
  3689. port_set_stp_state(hw, port, STP_STATE_DISABLED);
  3690. } else {
  3691. sw_cfg_port_base_vlan(hw, port, PORT_MASK);
  3692. port_set_stp_state(hw, port, STP_STATE_FORWARDING);
  3693. }
  3694. }
  3695. if (hw->dev_count > 1)
  3696. port_set_stp_state(hw, SWITCH_PORT_NUM, STP_STATE_SIMPLE);
  3697. else
  3698. port_set_stp_state(hw, SWITCH_PORT_NUM, STP_STATE_FORWARDING);
  3699. if (enable)
  3700. enable = KS8842_START;
  3701. writew(enable, hw->io + KS884X_CHIP_ID_OFFSET);
  3702. }
  3703. /**
  3704. * sw_setup - setup the switch
  3705. * @hw: The hardware instance.
  3706. *
  3707. * This routine setup the hardware switch engine for default operation.
  3708. */
  3709. static void sw_setup(struct ksz_hw *hw)
  3710. {
  3711. int port;
  3712. sw_set_global_ctrl(hw);
  3713. /* Enable switch broadcast storm protection at 10% percent rate. */
  3714. sw_init_broad_storm(hw);
  3715. hw_cfg_broad_storm(hw, BROADCAST_STORM_PROTECTION_RATE);
  3716. for (port = 0; port < SWITCH_PORT_NUM; port++)
  3717. sw_ena_broad_storm(hw, port);
  3718. sw_init_prio(hw);
  3719. sw_init_mirror(hw);
  3720. sw_init_prio_rate(hw);
  3721. sw_init_vlan(hw);
  3722. if (hw->features & STP_SUPPORT)
  3723. sw_init_stp(hw);
  3724. if (!sw_chk(hw, KS8842_SWITCH_CTRL_1_OFFSET,
  3725. SWITCH_TX_FLOW_CTRL | SWITCH_RX_FLOW_CTRL))
  3726. hw->overrides |= PAUSE_FLOW_CTRL;
  3727. sw_enable(hw, 1);
  3728. }
  3729. /**
  3730. * ksz_start_timer - start kernel timer
  3731. * @info: Kernel timer information.
  3732. * @time: The time tick.
  3733. *
  3734. * This routine starts the kernel timer after the specified time tick.
  3735. */
  3736. static void ksz_start_timer(struct ksz_timer_info *info, int time)
  3737. {
  3738. info->cnt = 0;
  3739. info->timer.expires = jiffies + time;
  3740. add_timer(&info->timer);
  3741. /* infinity */
  3742. info->max = -1;
  3743. }
  3744. /**
  3745. * ksz_stop_timer - stop kernel timer
  3746. * @info: Kernel timer information.
  3747. *
  3748. * This routine stops the kernel timer.
  3749. */
  3750. static void ksz_stop_timer(struct ksz_timer_info *info)
  3751. {
  3752. if (info->max) {
  3753. info->max = 0;
  3754. del_timer_sync(&info->timer);
  3755. }
  3756. }
  3757. static void ksz_init_timer(struct ksz_timer_info *info, int period,
  3758. void (*function)(unsigned long), void *data)
  3759. {
  3760. info->max = 0;
  3761. info->period = period;
  3762. init_timer(&info->timer);
  3763. info->timer.function = function;
  3764. info->timer.data = (unsigned long) data;
  3765. }
  3766. static void ksz_update_timer(struct ksz_timer_info *info)
  3767. {
  3768. ++info->cnt;
  3769. if (info->max > 0) {
  3770. if (info->cnt < info->max) {
  3771. info->timer.expires = jiffies + info->period;
  3772. add_timer(&info->timer);
  3773. } else
  3774. info->max = 0;
  3775. } else if (info->max < 0) {
  3776. info->timer.expires = jiffies + info->period;
  3777. add_timer(&info->timer);
  3778. }
  3779. }
  3780. /**
  3781. * ksz_alloc_soft_desc - allocate software descriptors
  3782. * @desc_info: Descriptor information structure.
  3783. * @transmit: Indication that descriptors are for transmit.
  3784. *
  3785. * This local function allocates software descriptors for manipulation in
  3786. * memory.
  3787. *
  3788. * Return 0 if successful.
  3789. */
  3790. static int ksz_alloc_soft_desc(struct ksz_desc_info *desc_info, int transmit)
  3791. {
  3792. desc_info->ring = kmalloc(sizeof(struct ksz_desc) * desc_info->alloc,
  3793. GFP_KERNEL);
  3794. if (!desc_info->ring)
  3795. return 1;
  3796. memset((void *) desc_info->ring, 0,
  3797. sizeof(struct ksz_desc) * desc_info->alloc);
  3798. hw_init_desc(desc_info, transmit);
  3799. return 0;
  3800. }
  3801. /**
  3802. * ksz_alloc_desc - allocate hardware descriptors
  3803. * @adapter: Adapter information structure.
  3804. *
  3805. * This local function allocates hardware descriptors for receiving and
  3806. * transmitting.
  3807. *
  3808. * Return 0 if successful.
  3809. */
  3810. static int ksz_alloc_desc(struct dev_info *adapter)
  3811. {
  3812. struct ksz_hw *hw = &adapter->hw;
  3813. int offset;
  3814. /* Allocate memory for RX & TX descriptors. */
  3815. adapter->desc_pool.alloc_size =
  3816. hw->rx_desc_info.size * hw->rx_desc_info.alloc +
  3817. hw->tx_desc_info.size * hw->tx_desc_info.alloc +
  3818. DESC_ALIGNMENT;
  3819. adapter->desc_pool.alloc_virt =
  3820. pci_alloc_consistent(
  3821. adapter->pdev, adapter->desc_pool.alloc_size,
  3822. &adapter->desc_pool.dma_addr);
  3823. if (adapter->desc_pool.alloc_virt == NULL) {
  3824. adapter->desc_pool.alloc_size = 0;
  3825. return 1;
  3826. }
  3827. memset(adapter->desc_pool.alloc_virt, 0, adapter->desc_pool.alloc_size);
  3828. /* Align to the next cache line boundary. */
  3829. offset = (((ulong) adapter->desc_pool.alloc_virt % DESC_ALIGNMENT) ?
  3830. (DESC_ALIGNMENT -
  3831. ((ulong) adapter->desc_pool.alloc_virt % DESC_ALIGNMENT)) : 0);
  3832. adapter->desc_pool.virt = adapter->desc_pool.alloc_virt + offset;
  3833. adapter->desc_pool.phys = adapter->desc_pool.dma_addr + offset;
  3834. /* Allocate receive/transmit descriptors. */
  3835. hw->rx_desc_info.ring_virt = (struct ksz_hw_desc *)
  3836. adapter->desc_pool.virt;
  3837. hw->rx_desc_info.ring_phys = adapter->desc_pool.phys;
  3838. offset = hw->rx_desc_info.alloc * hw->rx_desc_info.size;
  3839. hw->tx_desc_info.ring_virt = (struct ksz_hw_desc *)
  3840. (adapter->desc_pool.virt + offset);
  3841. hw->tx_desc_info.ring_phys = adapter->desc_pool.phys + offset;
  3842. if (ksz_alloc_soft_desc(&hw->rx_desc_info, 0))
  3843. return 1;
  3844. if (ksz_alloc_soft_desc(&hw->tx_desc_info, 1))
  3845. return 1;
  3846. return 0;
  3847. }
  3848. /**
  3849. * free_dma_buf - release DMA buffer resources
  3850. * @adapter: Adapter information structure.
  3851. *
  3852. * This routine is just a helper function to release the DMA buffer resources.
  3853. */
  3854. static void free_dma_buf(struct dev_info *adapter, struct ksz_dma_buf *dma_buf,
  3855. int direction)
  3856. {
  3857. pci_unmap_single(adapter->pdev, dma_buf->dma, dma_buf->len, direction);
  3858. dev_kfree_skb(dma_buf->skb);
  3859. dma_buf->skb = NULL;
  3860. dma_buf->dma = 0;
  3861. }
  3862. /**
  3863. * ksz_init_rx_buffers - initialize receive descriptors
  3864. * @adapter: Adapter information structure.
  3865. *
  3866. * This routine initializes DMA buffers for receiving.
  3867. */
  3868. static void ksz_init_rx_buffers(struct dev_info *adapter)
  3869. {
  3870. int i;
  3871. struct ksz_desc *desc;
  3872. struct ksz_dma_buf *dma_buf;
  3873. struct ksz_hw *hw = &adapter->hw;
  3874. struct ksz_desc_info *info = &hw->rx_desc_info;
  3875. for (i = 0; i < hw->rx_desc_info.alloc; i++) {
  3876. get_rx_pkt(info, &desc);
  3877. dma_buf = DMA_BUFFER(desc);
  3878. if (dma_buf->skb && dma_buf->len != adapter->mtu)
  3879. free_dma_buf(adapter, dma_buf, PCI_DMA_FROMDEVICE);
  3880. dma_buf->len = adapter->mtu;
  3881. if (!dma_buf->skb)
  3882. dma_buf->skb = alloc_skb(dma_buf->len, GFP_ATOMIC);
  3883. if (dma_buf->skb && !dma_buf->dma) {
  3884. dma_buf->skb->dev = adapter->dev;
  3885. dma_buf->dma = pci_map_single(
  3886. adapter->pdev,
  3887. skb_tail_pointer(dma_buf->skb),
  3888. dma_buf->len,
  3889. PCI_DMA_FROMDEVICE);
  3890. }
  3891. /* Set descriptor. */
  3892. set_rx_buf(desc, dma_buf->dma);
  3893. set_rx_len(desc, dma_buf->len);
  3894. release_desc(desc);
  3895. }
  3896. }
  3897. /**
  3898. * ksz_alloc_mem - allocate memory for hardware descriptors
  3899. * @adapter: Adapter information structure.
  3900. *
  3901. * This function allocates memory for use by hardware descriptors for receiving
  3902. * and transmitting.
  3903. *
  3904. * Return 0 if successful.
  3905. */
  3906. static int ksz_alloc_mem(struct dev_info *adapter)
  3907. {
  3908. struct ksz_hw *hw = &adapter->hw;
  3909. /* Determine the number of receive and transmit descriptors. */
  3910. hw->rx_desc_info.alloc = NUM_OF_RX_DESC;
  3911. hw->tx_desc_info.alloc = NUM_OF_TX_DESC;
  3912. /* Determine how many descriptors to skip transmit interrupt. */
  3913. hw->tx_int_cnt = 0;
  3914. hw->tx_int_mask = NUM_OF_TX_DESC / 4;
  3915. if (hw->tx_int_mask > 8)
  3916. hw->tx_int_mask = 8;
  3917. while (hw->tx_int_mask) {
  3918. hw->tx_int_cnt++;
  3919. hw->tx_int_mask >>= 1;
  3920. }
  3921. if (hw->tx_int_cnt) {
  3922. hw->tx_int_mask = (1 << (hw->tx_int_cnt - 1)) - 1;
  3923. hw->tx_int_cnt = 0;
  3924. }
  3925. /* Determine the descriptor size. */
  3926. hw->rx_desc_info.size =
  3927. (((sizeof(struct ksz_hw_desc) + DESC_ALIGNMENT - 1) /
  3928. DESC_ALIGNMENT) * DESC_ALIGNMENT);
  3929. hw->tx_desc_info.size =
  3930. (((sizeof(struct ksz_hw_desc) + DESC_ALIGNMENT - 1) /
  3931. DESC_ALIGNMENT) * DESC_ALIGNMENT);
  3932. if (hw->rx_desc_info.size != sizeof(struct ksz_hw_desc))
  3933. printk(KERN_ALERT
  3934. "Hardware descriptor size not right!\n");
  3935. ksz_check_desc_num(&hw->rx_desc_info);
  3936. ksz_check_desc_num(&hw->tx_desc_info);
  3937. /* Allocate descriptors. */
  3938. if (ksz_alloc_desc(adapter))
  3939. return 1;
  3940. return 0;
  3941. }
  3942. /**
  3943. * ksz_free_desc - free software and hardware descriptors
  3944. * @adapter: Adapter information structure.
  3945. *
  3946. * This local routine frees the software and hardware descriptors allocated by
  3947. * ksz_alloc_desc().
  3948. */
  3949. static void ksz_free_desc(struct dev_info *adapter)
  3950. {
  3951. struct ksz_hw *hw = &adapter->hw;
  3952. /* Reset descriptor. */
  3953. hw->rx_desc_info.ring_virt = NULL;
  3954. hw->tx_desc_info.ring_virt = NULL;
  3955. hw->rx_desc_info.ring_phys = 0;
  3956. hw->tx_desc_info.ring_phys = 0;
  3957. /* Free memory. */
  3958. if (adapter->desc_pool.alloc_virt)
  3959. pci_free_consistent(
  3960. adapter->pdev,
  3961. adapter->desc_pool.alloc_size,
  3962. adapter->desc_pool.alloc_virt,
  3963. adapter->desc_pool.dma_addr);
  3964. /* Reset resource pool. */
  3965. adapter->desc_pool.alloc_size = 0;
  3966. adapter->desc_pool.alloc_virt = NULL;
  3967. kfree(hw->rx_desc_info.ring);
  3968. hw->rx_desc_info.ring = NULL;
  3969. kfree(hw->tx_desc_info.ring);
  3970. hw->tx_desc_info.ring = NULL;
  3971. }
  3972. /**
  3973. * ksz_free_buffers - free buffers used in the descriptors
  3974. * @adapter: Adapter information structure.
  3975. * @desc_info: Descriptor information structure.
  3976. *
  3977. * This local routine frees buffers used in the DMA buffers.
  3978. */
  3979. static void ksz_free_buffers(struct dev_info *adapter,
  3980. struct ksz_desc_info *desc_info, int direction)
  3981. {
  3982. int i;
  3983. struct ksz_dma_buf *dma_buf;
  3984. struct ksz_desc *desc = desc_info->ring;
  3985. for (i = 0; i < desc_info->alloc; i++) {
  3986. dma_buf = DMA_BUFFER(desc);
  3987. if (dma_buf->skb)
  3988. free_dma_buf(adapter, dma_buf, direction);
  3989. desc++;
  3990. }
  3991. }
  3992. /**
  3993. * ksz_free_mem - free all resources used by descriptors
  3994. * @adapter: Adapter information structure.
  3995. *
  3996. * This local routine frees all the resources allocated by ksz_alloc_mem().
  3997. */
  3998. static void ksz_free_mem(struct dev_info *adapter)
  3999. {
  4000. /* Free transmit buffers. */
  4001. ksz_free_buffers(adapter, &adapter->hw.tx_desc_info,
  4002. PCI_DMA_TODEVICE);
  4003. /* Free receive buffers. */
  4004. ksz_free_buffers(adapter, &adapter->hw.rx_desc_info,
  4005. PCI_DMA_FROMDEVICE);
  4006. /* Free descriptors. */
  4007. ksz_free_desc(adapter);
  4008. }
  4009. static void get_mib_counters(struct ksz_hw *hw, int first, int cnt,
  4010. u64 *counter)
  4011. {
  4012. int i;
  4013. int mib;
  4014. int port;
  4015. struct ksz_port_mib *port_mib;
  4016. memset(counter, 0, sizeof(u64) * TOTAL_PORT_COUNTER_NUM);
  4017. for (i = 0, port = first; i < cnt; i++, port++) {
  4018. port_mib = &hw->port_mib[port];
  4019. for (mib = port_mib->mib_start; mib < hw->mib_cnt; mib++)
  4020. counter[mib] += port_mib->counter[mib];
  4021. }
  4022. }
  4023. /**
  4024. * send_packet - send packet
  4025. * @skb: Socket buffer.
  4026. * @dev: Network device.
  4027. *
  4028. * This routine is used to send a packet out to the network.
  4029. */
  4030. static void send_packet(struct sk_buff *skb, struct net_device *dev)
  4031. {
  4032. struct ksz_desc *desc;
  4033. struct ksz_desc *first;
  4034. struct dev_priv *priv = netdev_priv(dev);
  4035. struct dev_info *hw_priv = priv->adapter;
  4036. struct ksz_hw *hw = &hw_priv->hw;
  4037. struct ksz_desc_info *info = &hw->tx_desc_info;
  4038. struct ksz_dma_buf *dma_buf;
  4039. int len;
  4040. int last_frag = skb_shinfo(skb)->nr_frags;
  4041. /*
  4042. * KSZ8842 with multiple device interfaces needs to be told which port
  4043. * to send.
  4044. */
  4045. if (hw->dev_count > 1)
  4046. hw->dst_ports = 1 << priv->port.first_port;
  4047. /* Hardware will pad the length to 60. */
  4048. len = skb->len;
  4049. /* Remember the very first descriptor. */
  4050. first = info->cur;
  4051. desc = first;
  4052. dma_buf = DMA_BUFFER(desc);
  4053. if (last_frag) {
  4054. int frag;
  4055. skb_frag_t *this_frag;
  4056. dma_buf->len = skb->len - skb->data_len;
  4057. dma_buf->dma = pci_map_single(
  4058. hw_priv->pdev, skb->data, dma_buf->len,
  4059. PCI_DMA_TODEVICE);
  4060. set_tx_buf(desc, dma_buf->dma);
  4061. set_tx_len(desc, dma_buf->len);
  4062. frag = 0;
  4063. do {
  4064. this_frag = &skb_shinfo(skb)->frags[frag];
  4065. /* Get a new descriptor. */
  4066. get_tx_pkt(info, &desc);
  4067. /* Keep track of descriptors used so far. */
  4068. ++hw->tx_int_cnt;
  4069. dma_buf = DMA_BUFFER(desc);
  4070. dma_buf->len = this_frag->size;
  4071. dma_buf->dma = pci_map_single(
  4072. hw_priv->pdev,
  4073. page_address(this_frag->page) +
  4074. this_frag->page_offset,
  4075. dma_buf->len,
  4076. PCI_DMA_TODEVICE);
  4077. set_tx_buf(desc, dma_buf->dma);
  4078. set_tx_len(desc, dma_buf->len);
  4079. frag++;
  4080. if (frag == last_frag)
  4081. break;
  4082. /* Do not release the last descriptor here. */
  4083. release_desc(desc);
  4084. } while (1);
  4085. /* current points to the last descriptor. */
  4086. info->cur = desc;
  4087. /* Release the first descriptor. */
  4088. release_desc(first);
  4089. } else {
  4090. dma_buf->len = len;
  4091. dma_buf->dma = pci_map_single(
  4092. hw_priv->pdev, skb->data, dma_buf->len,
  4093. PCI_DMA_TODEVICE);
  4094. set_tx_buf(desc, dma_buf->dma);
  4095. set_tx_len(desc, dma_buf->len);
  4096. }
  4097. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  4098. (desc)->sw.buf.tx.csum_gen_tcp = 1;
  4099. (desc)->sw.buf.tx.csum_gen_udp = 1;
  4100. }
  4101. /*
  4102. * The last descriptor holds the packet so that it can be returned to
  4103. * network subsystem after all descriptors are transmitted.
  4104. */
  4105. dma_buf->skb = skb;
  4106. hw_send_pkt(hw);
  4107. /* Update transmit statistics. */
  4108. priv->stats.tx_packets++;
  4109. priv->stats.tx_bytes += len;
  4110. }
  4111. /**
  4112. * transmit_cleanup - clean up transmit descriptors
  4113. * @dev: Network device.
  4114. *
  4115. * This routine is called to clean up the transmitted buffers.
  4116. */
  4117. static void transmit_cleanup(struct dev_info *hw_priv, int normal)
  4118. {
  4119. int last;
  4120. union desc_stat status;
  4121. struct ksz_hw *hw = &hw_priv->hw;
  4122. struct ksz_desc_info *info = &hw->tx_desc_info;
  4123. struct ksz_desc *desc;
  4124. struct ksz_dma_buf *dma_buf;
  4125. struct net_device *dev = NULL;
  4126. spin_lock(&hw_priv->hwlock);
  4127. last = info->last;
  4128. while (info->avail < info->alloc) {
  4129. /* Get next descriptor which is not hardware owned. */
  4130. desc = &info->ring[last];
  4131. status.data = le32_to_cpu(desc->phw->ctrl.data);
  4132. if (status.tx.hw_owned) {
  4133. if (normal)
  4134. break;
  4135. else
  4136. reset_desc(desc, status);
  4137. }
  4138. dma_buf = DMA_BUFFER(desc);
  4139. pci_unmap_single(
  4140. hw_priv->pdev, dma_buf->dma, dma_buf->len,
  4141. PCI_DMA_TODEVICE);
  4142. /* This descriptor contains the last buffer in the packet. */
  4143. if (dma_buf->skb) {
  4144. dev = dma_buf->skb->dev;
  4145. /* Release the packet back to network subsystem. */
  4146. dev_kfree_skb_irq(dma_buf->skb);
  4147. dma_buf->skb = NULL;
  4148. }
  4149. /* Free the transmitted descriptor. */
  4150. last++;
  4151. last &= info->mask;
  4152. info->avail++;
  4153. }
  4154. info->last = last;
  4155. spin_unlock(&hw_priv->hwlock);
  4156. /* Notify the network subsystem that the packet has been sent. */
  4157. if (dev)
  4158. dev->trans_start = jiffies;
  4159. }
  4160. /**
  4161. * transmit_done - transmit done processing
  4162. * @dev: Network device.
  4163. *
  4164. * This routine is called when the transmit interrupt is triggered, indicating
  4165. * either a packet is sent successfully or there are transmit errors.
  4166. */
  4167. static void tx_done(struct dev_info *hw_priv)
  4168. {
  4169. struct ksz_hw *hw = &hw_priv->hw;
  4170. int port;
  4171. transmit_cleanup(hw_priv, 1);
  4172. for (port = 0; port < hw->dev_count; port++) {
  4173. struct net_device *dev = hw->port_info[port].pdev;
  4174. if (netif_running(dev) && netif_queue_stopped(dev))
  4175. netif_wake_queue(dev);
  4176. }
  4177. }
  4178. static inline void copy_old_skb(struct sk_buff *old, struct sk_buff *skb)
  4179. {
  4180. skb->dev = old->dev;
  4181. skb->protocol = old->protocol;
  4182. skb->ip_summed = old->ip_summed;
  4183. skb->csum = old->csum;
  4184. skb_set_network_header(skb, ETH_HLEN);
  4185. dev_kfree_skb(old);
  4186. }
  4187. /**
  4188. * netdev_tx - send out packet
  4189. * @skb: Socket buffer.
  4190. * @dev: Network device.
  4191. *
  4192. * This function is used by the upper network layer to send out a packet.
  4193. *
  4194. * Return 0 if successful; otherwise an error code indicating failure.
  4195. */
  4196. static int netdev_tx(struct sk_buff *skb, struct net_device *dev)
  4197. {
  4198. struct dev_priv *priv = netdev_priv(dev);
  4199. struct dev_info *hw_priv = priv->adapter;
  4200. struct ksz_hw *hw = &hw_priv->hw;
  4201. int left;
  4202. int num = 1;
  4203. int rc = 0;
  4204. if (hw->features & SMALL_PACKET_TX_BUG) {
  4205. struct sk_buff *org_skb = skb;
  4206. if (skb->len <= 48) {
  4207. if (skb_end_pointer(skb) - skb->data >= 50) {
  4208. memset(&skb->data[skb->len], 0, 50 - skb->len);
  4209. skb->len = 50;
  4210. } else {
  4211. skb = dev_alloc_skb(50);
  4212. if (!skb)
  4213. return NETDEV_TX_BUSY;
  4214. memcpy(skb->data, org_skb->data, org_skb->len);
  4215. memset(&skb->data[org_skb->len], 0,
  4216. 50 - org_skb->len);
  4217. skb->len = 50;
  4218. copy_old_skb(org_skb, skb);
  4219. }
  4220. }
  4221. }
  4222. spin_lock_irq(&hw_priv->hwlock);
  4223. num = skb_shinfo(skb)->nr_frags + 1;
  4224. left = hw_alloc_pkt(hw, skb->len, num);
  4225. if (left) {
  4226. if (left < num ||
  4227. ((hw->features & IPV6_CSUM_GEN_HACK) &&
  4228. (CHECKSUM_PARTIAL == skb->ip_summed) &&
  4229. (ETH_P_IPV6 == htons(skb->protocol)))) {
  4230. struct sk_buff *org_skb = skb;
  4231. skb = dev_alloc_skb(org_skb->len);
  4232. if (!skb)
  4233. return NETDEV_TX_BUSY;
  4234. skb_copy_and_csum_dev(org_skb, skb->data);
  4235. org_skb->ip_summed = 0;
  4236. skb->len = org_skb->len;
  4237. copy_old_skb(org_skb, skb);
  4238. }
  4239. send_packet(skb, dev);
  4240. if (left <= num)
  4241. netif_stop_queue(dev);
  4242. } else {
  4243. /* Stop the transmit queue until packet is allocated. */
  4244. netif_stop_queue(dev);
  4245. rc = NETDEV_TX_BUSY;
  4246. }
  4247. spin_unlock_irq(&hw_priv->hwlock);
  4248. return rc;
  4249. }
  4250. /**
  4251. * netdev_tx_timeout - transmit timeout processing
  4252. * @dev: Network device.
  4253. *
  4254. * This routine is called when the transmit timer expires. That indicates the
  4255. * hardware is not running correctly because transmit interrupts are not
  4256. * triggered to free up resources so that the transmit routine can continue
  4257. * sending out packets. The hardware is reset to correct the problem.
  4258. */
  4259. static void netdev_tx_timeout(struct net_device *dev)
  4260. {
  4261. static unsigned long last_reset;
  4262. struct dev_priv *priv = netdev_priv(dev);
  4263. struct dev_info *hw_priv = priv->adapter;
  4264. struct ksz_hw *hw = &hw_priv->hw;
  4265. int port;
  4266. if (hw->dev_count > 1) {
  4267. /*
  4268. * Only reset the hardware if time between calls is long
  4269. * enough.
  4270. */
  4271. if (jiffies - last_reset <= dev->watchdog_timeo)
  4272. hw_priv = NULL;
  4273. }
  4274. last_reset = jiffies;
  4275. if (hw_priv) {
  4276. hw_dis_intr(hw);
  4277. hw_disable(hw);
  4278. transmit_cleanup(hw_priv, 0);
  4279. hw_reset_pkts(&hw->rx_desc_info);
  4280. hw_reset_pkts(&hw->tx_desc_info);
  4281. ksz_init_rx_buffers(hw_priv);
  4282. hw_reset(hw);
  4283. hw_set_desc_base(hw,
  4284. hw->tx_desc_info.ring_phys,
  4285. hw->rx_desc_info.ring_phys);
  4286. hw_set_addr(hw);
  4287. if (hw->all_multi)
  4288. hw_set_multicast(hw, hw->all_multi);
  4289. else if (hw->multi_list_size)
  4290. hw_set_grp_addr(hw);
  4291. if (hw->dev_count > 1) {
  4292. hw_set_add_addr(hw);
  4293. for (port = 0; port < SWITCH_PORT_NUM; port++) {
  4294. struct net_device *port_dev;
  4295. port_set_stp_state(hw, port,
  4296. STP_STATE_DISABLED);
  4297. port_dev = hw->port_info[port].pdev;
  4298. if (netif_running(port_dev))
  4299. port_set_stp_state(hw, port,
  4300. STP_STATE_SIMPLE);
  4301. }
  4302. }
  4303. hw_enable(hw);
  4304. hw_ena_intr(hw);
  4305. }
  4306. dev->trans_start = jiffies;
  4307. netif_wake_queue(dev);
  4308. }
  4309. static inline void csum_verified(struct sk_buff *skb)
  4310. {
  4311. unsigned short protocol;
  4312. struct iphdr *iph;
  4313. protocol = skb->protocol;
  4314. skb_reset_network_header(skb);
  4315. iph = (struct iphdr *) skb_network_header(skb);
  4316. if (protocol == htons(ETH_P_8021Q)) {
  4317. protocol = iph->tot_len;
  4318. skb_set_network_header(skb, VLAN_HLEN);
  4319. iph = (struct iphdr *) skb_network_header(skb);
  4320. }
  4321. if (protocol == htons(ETH_P_IP)) {
  4322. if (iph->protocol == IPPROTO_TCP)
  4323. skb->ip_summed = CHECKSUM_UNNECESSARY;
  4324. }
  4325. }
  4326. static inline int rx_proc(struct net_device *dev, struct ksz_hw* hw,
  4327. struct ksz_desc *desc, union desc_stat status)
  4328. {
  4329. int packet_len;
  4330. struct dev_priv *priv = netdev_priv(dev);
  4331. struct dev_info *hw_priv = priv->adapter;
  4332. struct ksz_dma_buf *dma_buf;
  4333. struct sk_buff *skb;
  4334. int rx_status;
  4335. /* Received length includes 4-byte CRC. */
  4336. packet_len = status.rx.frame_len - 4;
  4337. dma_buf = DMA_BUFFER(desc);
  4338. pci_dma_sync_single_for_cpu(
  4339. hw_priv->pdev, dma_buf->dma, packet_len + 4,
  4340. PCI_DMA_FROMDEVICE);
  4341. do {
  4342. /* skb->data != skb->head */
  4343. skb = dev_alloc_skb(packet_len + 2);
  4344. if (!skb) {
  4345. priv->stats.rx_dropped++;
  4346. return -ENOMEM;
  4347. }
  4348. /*
  4349. * Align socket buffer in 4-byte boundary for better
  4350. * performance.
  4351. */
  4352. skb_reserve(skb, 2);
  4353. memcpy(skb_put(skb, packet_len),
  4354. dma_buf->skb->data, packet_len);
  4355. } while (0);
  4356. skb->dev = dev;
  4357. skb->protocol = eth_type_trans(skb, dev);
  4358. if (hw->rx_cfg & (DMA_RX_CSUM_UDP | DMA_RX_CSUM_TCP))
  4359. csum_verified(skb);
  4360. /* Update receive statistics. */
  4361. priv->stats.rx_packets++;
  4362. priv->stats.rx_bytes += packet_len;
  4363. /* Notify upper layer for received packet. */
  4364. dev->last_rx = jiffies;
  4365. rx_status = netif_rx(skb);
  4366. return 0;
  4367. }
  4368. static int dev_rcv_packets(struct dev_info *hw_priv)
  4369. {
  4370. int next;
  4371. union desc_stat status;
  4372. struct ksz_hw *hw = &hw_priv->hw;
  4373. struct net_device *dev = hw->port_info[0].pdev;
  4374. struct ksz_desc_info *info = &hw->rx_desc_info;
  4375. int left = info->alloc;
  4376. struct ksz_desc *desc;
  4377. int received = 0;
  4378. next = info->next;
  4379. while (left--) {
  4380. /* Get next descriptor which is not hardware owned. */
  4381. desc = &info->ring[next];
  4382. status.data = le32_to_cpu(desc->phw->ctrl.data);
  4383. if (status.rx.hw_owned)
  4384. break;
  4385. /* Status valid only when last descriptor bit is set. */
  4386. if (status.rx.last_desc && status.rx.first_desc) {
  4387. if (rx_proc(dev, hw, desc, status))
  4388. goto release_packet;
  4389. received++;
  4390. }
  4391. release_packet:
  4392. release_desc(desc);
  4393. next++;
  4394. next &= info->mask;
  4395. }
  4396. info->next = next;
  4397. return received;
  4398. }
  4399. static int port_rcv_packets(struct dev_info *hw_priv)
  4400. {
  4401. int next;
  4402. union desc_stat status;
  4403. struct ksz_hw *hw = &hw_priv->hw;
  4404. struct net_device *dev = hw->port_info[0].pdev;
  4405. struct ksz_desc_info *info = &hw->rx_desc_info;
  4406. int left = info->alloc;
  4407. struct ksz_desc *desc;
  4408. int received = 0;
  4409. next = info->next;
  4410. while (left--) {
  4411. /* Get next descriptor which is not hardware owned. */
  4412. desc = &info->ring[next];
  4413. status.data = le32_to_cpu(desc->phw->ctrl.data);
  4414. if (status.rx.hw_owned)
  4415. break;
  4416. if (hw->dev_count > 1) {
  4417. /* Get received port number. */
  4418. int p = HW_TO_DEV_PORT(status.rx.src_port);
  4419. dev = hw->port_info[p].pdev;
  4420. if (!netif_running(dev))
  4421. goto release_packet;
  4422. }
  4423. /* Status valid only when last descriptor bit is set. */
  4424. if (status.rx.last_desc && status.rx.first_desc) {
  4425. if (rx_proc(dev, hw, desc, status))
  4426. goto release_packet;
  4427. received++;
  4428. }
  4429. release_packet:
  4430. release_desc(desc);
  4431. next++;
  4432. next &= info->mask;
  4433. }
  4434. info->next = next;
  4435. return received;
  4436. }
  4437. static int dev_rcv_special(struct dev_info *hw_priv)
  4438. {
  4439. int next;
  4440. union desc_stat status;
  4441. struct ksz_hw *hw = &hw_priv->hw;
  4442. struct net_device *dev = hw->port_info[0].pdev;
  4443. struct ksz_desc_info *info = &hw->rx_desc_info;
  4444. int left = info->alloc;
  4445. struct ksz_desc *desc;
  4446. int received = 0;
  4447. next = info->next;
  4448. while (left--) {
  4449. /* Get next descriptor which is not hardware owned. */
  4450. desc = &info->ring[next];
  4451. status.data = le32_to_cpu(desc->phw->ctrl.data);
  4452. if (status.rx.hw_owned)
  4453. break;
  4454. if (hw->dev_count > 1) {
  4455. /* Get received port number. */
  4456. int p = HW_TO_DEV_PORT(status.rx.src_port);
  4457. dev = hw->port_info[p].pdev;
  4458. if (!netif_running(dev))
  4459. goto release_packet;
  4460. }
  4461. /* Status valid only when last descriptor bit is set. */
  4462. if (status.rx.last_desc && status.rx.first_desc) {
  4463. /*
  4464. * Receive without error. With receive errors
  4465. * disabled, packets with receive errors will be
  4466. * dropped, so no need to check the error bit.
  4467. */
  4468. if (!status.rx.error || (status.data &
  4469. KS_DESC_RX_ERROR_COND) ==
  4470. KS_DESC_RX_ERROR_TOO_LONG) {
  4471. if (rx_proc(dev, hw, desc, status))
  4472. goto release_packet;
  4473. received++;
  4474. } else {
  4475. struct dev_priv *priv = netdev_priv(dev);
  4476. /* Update receive error statistics. */
  4477. priv->port.counter[OID_COUNTER_RCV_ERROR]++;
  4478. }
  4479. }
  4480. release_packet:
  4481. release_desc(desc);
  4482. next++;
  4483. next &= info->mask;
  4484. }
  4485. info->next = next;
  4486. return received;
  4487. }
  4488. static void rx_proc_task(unsigned long data)
  4489. {
  4490. struct dev_info *hw_priv = (struct dev_info *) data;
  4491. struct ksz_hw *hw = &hw_priv->hw;
  4492. if (!hw->enabled)
  4493. return;
  4494. if (unlikely(!hw_priv->dev_rcv(hw_priv))) {
  4495. /* In case receive process is suspended because of overrun. */
  4496. hw_resume_rx(hw);
  4497. /* tasklets are interruptible. */
  4498. spin_lock_irq(&hw_priv->hwlock);
  4499. hw_turn_on_intr(hw, KS884X_INT_RX_MASK);
  4500. spin_unlock_irq(&hw_priv->hwlock);
  4501. } else {
  4502. hw_ack_intr(hw, KS884X_INT_RX);
  4503. tasklet_schedule(&hw_priv->rx_tasklet);
  4504. }
  4505. }
  4506. static void tx_proc_task(unsigned long data)
  4507. {
  4508. struct dev_info *hw_priv = (struct dev_info *) data;
  4509. struct ksz_hw *hw = &hw_priv->hw;
  4510. hw_ack_intr(hw, KS884X_INT_TX_MASK);
  4511. tx_done(hw_priv);
  4512. /* tasklets are interruptible. */
  4513. spin_lock_irq(&hw_priv->hwlock);
  4514. hw_turn_on_intr(hw, KS884X_INT_TX);
  4515. spin_unlock_irq(&hw_priv->hwlock);
  4516. }
  4517. static inline void handle_rx_stop(struct ksz_hw *hw)
  4518. {
  4519. /* Receive just has been stopped. */
  4520. if (0 == hw->rx_stop)
  4521. hw->intr_mask &= ~KS884X_INT_RX_STOPPED;
  4522. else if (hw->rx_stop > 1) {
  4523. if (hw->enabled && (hw->rx_cfg & DMA_RX_ENABLE)) {
  4524. hw_start_rx(hw);
  4525. } else {
  4526. hw->intr_mask &= ~KS884X_INT_RX_STOPPED;
  4527. hw->rx_stop = 0;
  4528. }
  4529. } else
  4530. /* Receive just has been started. */
  4531. hw->rx_stop++;
  4532. }
  4533. /**
  4534. * netdev_intr - interrupt handling
  4535. * @irq: Interrupt number.
  4536. * @dev_id: Network device.
  4537. *
  4538. * This function is called by upper network layer to signal interrupt.
  4539. *
  4540. * Return IRQ_HANDLED if interrupt is handled.
  4541. */
  4542. static irqreturn_t netdev_intr(int irq, void *dev_id)
  4543. {
  4544. uint int_enable = 0;
  4545. struct net_device *dev = (struct net_device *) dev_id;
  4546. struct dev_priv *priv = netdev_priv(dev);
  4547. struct dev_info *hw_priv = priv->adapter;
  4548. struct ksz_hw *hw = &hw_priv->hw;
  4549. hw_read_intr(hw, &int_enable);
  4550. /* Not our interrupt! */
  4551. if (!int_enable)
  4552. return IRQ_NONE;
  4553. do {
  4554. hw_ack_intr(hw, int_enable);
  4555. int_enable &= hw->intr_mask;
  4556. if (unlikely(int_enable & KS884X_INT_TX_MASK)) {
  4557. hw_dis_intr_bit(hw, KS884X_INT_TX_MASK);
  4558. tasklet_schedule(&hw_priv->tx_tasklet);
  4559. }
  4560. if (likely(int_enable & KS884X_INT_RX)) {
  4561. hw_dis_intr_bit(hw, KS884X_INT_RX);
  4562. tasklet_schedule(&hw_priv->rx_tasklet);
  4563. }
  4564. if (unlikely(int_enable & KS884X_INT_RX_OVERRUN)) {
  4565. priv->stats.rx_fifo_errors++;
  4566. hw_resume_rx(hw);
  4567. }
  4568. if (unlikely(int_enable & KS884X_INT_PHY)) {
  4569. struct ksz_port *port = &priv->port;
  4570. hw->features |= LINK_INT_WORKING;
  4571. port_get_link_speed(port);
  4572. }
  4573. if (unlikely(int_enable & KS884X_INT_RX_STOPPED)) {
  4574. handle_rx_stop(hw);
  4575. break;
  4576. }
  4577. if (unlikely(int_enable & KS884X_INT_TX_STOPPED)) {
  4578. u32 data;
  4579. hw->intr_mask &= ~KS884X_INT_TX_STOPPED;
  4580. printk(KERN_INFO "Tx stopped\n");
  4581. data = readl(hw->io + KS_DMA_TX_CTRL);
  4582. if (!(data & DMA_TX_ENABLE))
  4583. printk(KERN_INFO "Tx disabled\n");
  4584. break;
  4585. }
  4586. } while (0);
  4587. hw_ena_intr(hw);
  4588. return IRQ_HANDLED;
  4589. }
  4590. /*
  4591. * Linux network device functions
  4592. */
  4593. static unsigned long next_jiffies;
  4594. #ifdef CONFIG_NET_POLL_CONTROLLER
  4595. static void netdev_netpoll(struct net_device *dev)
  4596. {
  4597. struct dev_priv *priv = netdev_priv(dev);
  4598. struct dev_info *hw_priv = priv->adapter;
  4599. hw_dis_intr(&hw_priv->hw);
  4600. netdev_intr(dev->irq, dev);
  4601. }
  4602. #endif
  4603. static void bridge_change(struct ksz_hw *hw)
  4604. {
  4605. int port;
  4606. u8 member;
  4607. struct ksz_switch *sw = hw->ksz_switch;
  4608. /* No ports in forwarding state. */
  4609. if (!sw->member) {
  4610. port_set_stp_state(hw, SWITCH_PORT_NUM, STP_STATE_SIMPLE);
  4611. sw_block_addr(hw);
  4612. }
  4613. for (port = 0; port < SWITCH_PORT_NUM; port++) {
  4614. if (STP_STATE_FORWARDING == sw->port_cfg[port].stp_state)
  4615. member = HOST_MASK | sw->member;
  4616. else
  4617. member = HOST_MASK | (1 << port);
  4618. if (member != sw->port_cfg[port].member)
  4619. sw_cfg_port_base_vlan(hw, port, member);
  4620. }
  4621. }
  4622. /**
  4623. * netdev_close - close network device
  4624. * @dev: Network device.
  4625. *
  4626. * This function process the close operation of network device. This is caused
  4627. * by the user command "ifconfig ethX down."
  4628. *
  4629. * Return 0 if successful; otherwise an error code indicating failure.
  4630. */
  4631. static int netdev_close(struct net_device *dev)
  4632. {
  4633. struct dev_priv *priv = netdev_priv(dev);
  4634. struct dev_info *hw_priv = priv->adapter;
  4635. struct ksz_port *port = &priv->port;
  4636. struct ksz_hw *hw = &hw_priv->hw;
  4637. int pi;
  4638. netif_stop_queue(dev);
  4639. ksz_stop_timer(&priv->monitor_timer_info);
  4640. /* Need to shut the port manually in multiple device interfaces mode. */
  4641. if (hw->dev_count > 1) {
  4642. port_set_stp_state(hw, port->first_port, STP_STATE_DISABLED);
  4643. /* Port is closed. Need to change bridge setting. */
  4644. if (hw->features & STP_SUPPORT) {
  4645. pi = 1 << port->first_port;
  4646. if (hw->ksz_switch->member & pi) {
  4647. hw->ksz_switch->member &= ~pi;
  4648. bridge_change(hw);
  4649. }
  4650. }
  4651. }
  4652. if (port->first_port > 0)
  4653. hw_del_addr(hw, dev->dev_addr);
  4654. if (!hw_priv->wol_enable)
  4655. port_set_power_saving(port, true);
  4656. if (priv->multicast)
  4657. --hw->all_multi;
  4658. if (priv->promiscuous)
  4659. --hw->promiscuous;
  4660. hw_priv->opened--;
  4661. if (!(hw_priv->opened)) {
  4662. ksz_stop_timer(&hw_priv->mib_timer_info);
  4663. flush_work(&hw_priv->mib_read);
  4664. hw_dis_intr(hw);
  4665. hw_disable(hw);
  4666. hw_clr_multicast(hw);
  4667. /* Delay for receive task to stop scheduling itself. */
  4668. msleep(2000 / HZ);
  4669. tasklet_disable(&hw_priv->rx_tasklet);
  4670. tasklet_disable(&hw_priv->tx_tasklet);
  4671. free_irq(dev->irq, hw_priv->dev);
  4672. transmit_cleanup(hw_priv, 0);
  4673. hw_reset_pkts(&hw->rx_desc_info);
  4674. hw_reset_pkts(&hw->tx_desc_info);
  4675. /* Clean out static MAC table when the switch is shutdown. */
  4676. if (hw->features & STP_SUPPORT)
  4677. sw_clr_sta_mac_table(hw);
  4678. }
  4679. return 0;
  4680. }
  4681. static void hw_cfg_huge_frame(struct dev_info *hw_priv, struct ksz_hw *hw)
  4682. {
  4683. if (hw->ksz_switch) {
  4684. u32 data;
  4685. data = readw(hw->io + KS8842_SWITCH_CTRL_2_OFFSET);
  4686. if (hw->features & RX_HUGE_FRAME)
  4687. data |= SWITCH_HUGE_PACKET;
  4688. else
  4689. data &= ~SWITCH_HUGE_PACKET;
  4690. writew(data, hw->io + KS8842_SWITCH_CTRL_2_OFFSET);
  4691. }
  4692. if (hw->features & RX_HUGE_FRAME) {
  4693. hw->rx_cfg |= DMA_RX_ERROR;
  4694. hw_priv->dev_rcv = dev_rcv_special;
  4695. } else {
  4696. hw->rx_cfg &= ~DMA_RX_ERROR;
  4697. if (hw->dev_count > 1)
  4698. hw_priv->dev_rcv = port_rcv_packets;
  4699. else
  4700. hw_priv->dev_rcv = dev_rcv_packets;
  4701. }
  4702. }
  4703. static int prepare_hardware(struct net_device *dev)
  4704. {
  4705. struct dev_priv *priv = netdev_priv(dev);
  4706. struct dev_info *hw_priv = priv->adapter;
  4707. struct ksz_hw *hw = &hw_priv->hw;
  4708. int rc = 0;
  4709. /* Remember the network device that requests interrupts. */
  4710. hw_priv->dev = dev;
  4711. rc = request_irq(dev->irq, netdev_intr, IRQF_SHARED, dev->name, dev);
  4712. if (rc)
  4713. return rc;
  4714. tasklet_enable(&hw_priv->rx_tasklet);
  4715. tasklet_enable(&hw_priv->tx_tasklet);
  4716. hw->promiscuous = 0;
  4717. hw->all_multi = 0;
  4718. hw->multi_list_size = 0;
  4719. hw_reset(hw);
  4720. hw_set_desc_base(hw,
  4721. hw->tx_desc_info.ring_phys, hw->rx_desc_info.ring_phys);
  4722. hw_set_addr(hw);
  4723. hw_cfg_huge_frame(hw_priv, hw);
  4724. ksz_init_rx_buffers(hw_priv);
  4725. return 0;
  4726. }
  4727. /**
  4728. * netdev_open - open network device
  4729. * @dev: Network device.
  4730. *
  4731. * This function process the open operation of network device. This is caused
  4732. * by the user command "ifconfig ethX up."
  4733. *
  4734. * Return 0 if successful; otherwise an error code indicating failure.
  4735. */
  4736. static int netdev_open(struct net_device *dev)
  4737. {
  4738. struct dev_priv *priv = netdev_priv(dev);
  4739. struct dev_info *hw_priv = priv->adapter;
  4740. struct ksz_hw *hw = &hw_priv->hw;
  4741. struct ksz_port *port = &priv->port;
  4742. int i;
  4743. int p;
  4744. int rc = 0;
  4745. priv->multicast = 0;
  4746. priv->promiscuous = 0;
  4747. /* Reset device statistics. */
  4748. memset(&priv->stats, 0, sizeof(struct net_device_stats));
  4749. memset((void *) port->counter, 0,
  4750. (sizeof(u64) * OID_COUNTER_LAST));
  4751. if (!(hw_priv->opened)) {
  4752. rc = prepare_hardware(dev);
  4753. if (rc)
  4754. return rc;
  4755. for (i = 0; i < hw->mib_port_cnt; i++) {
  4756. if (next_jiffies < jiffies)
  4757. next_jiffies = jiffies + HZ * 2;
  4758. else
  4759. next_jiffies += HZ * 1;
  4760. hw_priv->counter[i].time = next_jiffies;
  4761. hw->port_mib[i].state = media_disconnected;
  4762. port_init_cnt(hw, i);
  4763. }
  4764. if (hw->ksz_switch)
  4765. hw->port_mib[HOST_PORT].state = media_connected;
  4766. else {
  4767. hw_add_wol_bcast(hw);
  4768. hw_cfg_wol_pme(hw, 0);
  4769. hw_clr_wol_pme_status(&hw_priv->hw);
  4770. }
  4771. }
  4772. port_set_power_saving(port, false);
  4773. for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
  4774. /*
  4775. * Initialize to invalid value so that link detection
  4776. * is done.
  4777. */
  4778. hw->port_info[p].partner = 0xFF;
  4779. hw->port_info[p].state = media_disconnected;
  4780. }
  4781. /* Need to open the port in multiple device interfaces mode. */
  4782. if (hw->dev_count > 1) {
  4783. port_set_stp_state(hw, port->first_port, STP_STATE_SIMPLE);
  4784. if (port->first_port > 0)
  4785. hw_add_addr(hw, dev->dev_addr);
  4786. }
  4787. port_get_link_speed(port);
  4788. if (port->force_link)
  4789. port_force_link_speed(port);
  4790. else
  4791. port_set_link_speed(port);
  4792. if (!(hw_priv->opened)) {
  4793. hw_setup_intr(hw);
  4794. hw_enable(hw);
  4795. hw_ena_intr(hw);
  4796. if (hw->mib_port_cnt)
  4797. ksz_start_timer(&hw_priv->mib_timer_info,
  4798. hw_priv->mib_timer_info.period);
  4799. }
  4800. hw_priv->opened++;
  4801. ksz_start_timer(&priv->monitor_timer_info,
  4802. priv->monitor_timer_info.period);
  4803. priv->media_state = port->linked->state;
  4804. if (media_connected == priv->media_state)
  4805. netif_carrier_on(dev);
  4806. else
  4807. netif_carrier_off(dev);
  4808. if (netif_msg_link(priv))
  4809. printk(KERN_INFO "%s link %s\n", dev->name,
  4810. (media_connected == priv->media_state ?
  4811. "on" : "off"));
  4812. netif_start_queue(dev);
  4813. return 0;
  4814. }
  4815. /* RX errors = rx_errors */
  4816. /* RX dropped = rx_dropped */
  4817. /* RX overruns = rx_fifo_errors */
  4818. /* RX frame = rx_crc_errors + rx_frame_errors + rx_length_errors */
  4819. /* TX errors = tx_errors */
  4820. /* TX dropped = tx_dropped */
  4821. /* TX overruns = tx_fifo_errors */
  4822. /* TX carrier = tx_aborted_errors + tx_carrier_errors + tx_window_errors */
  4823. /* collisions = collisions */
  4824. /**
  4825. * netdev_query_statistics - query network device statistics
  4826. * @dev: Network device.
  4827. *
  4828. * This function returns the statistics of the network device. The device
  4829. * needs not be opened.
  4830. *
  4831. * Return network device statistics.
  4832. */
  4833. static struct net_device_stats *netdev_query_statistics(struct net_device *dev)
  4834. {
  4835. struct dev_priv *priv = netdev_priv(dev);
  4836. struct ksz_port *port = &priv->port;
  4837. struct ksz_hw *hw = &priv->adapter->hw;
  4838. struct ksz_port_mib *mib;
  4839. int i;
  4840. int p;
  4841. priv->stats.rx_errors = port->counter[OID_COUNTER_RCV_ERROR];
  4842. priv->stats.tx_errors = port->counter[OID_COUNTER_XMIT_ERROR];
  4843. /* Reset to zero to add count later. */
  4844. priv->stats.multicast = 0;
  4845. priv->stats.collisions = 0;
  4846. priv->stats.rx_length_errors = 0;
  4847. priv->stats.rx_crc_errors = 0;
  4848. priv->stats.rx_frame_errors = 0;
  4849. priv->stats.tx_window_errors = 0;
  4850. for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
  4851. mib = &hw->port_mib[p];
  4852. priv->stats.multicast += (unsigned long)
  4853. mib->counter[MIB_COUNTER_RX_MULTICAST];
  4854. priv->stats.collisions += (unsigned long)
  4855. mib->counter[MIB_COUNTER_TX_TOTAL_COLLISION];
  4856. priv->stats.rx_length_errors += (unsigned long)(
  4857. mib->counter[MIB_COUNTER_RX_UNDERSIZE] +
  4858. mib->counter[MIB_COUNTER_RX_FRAGMENT] +
  4859. mib->counter[MIB_COUNTER_RX_OVERSIZE] +
  4860. mib->counter[MIB_COUNTER_RX_JABBER]);
  4861. priv->stats.rx_crc_errors += (unsigned long)
  4862. mib->counter[MIB_COUNTER_RX_CRC_ERR];
  4863. priv->stats.rx_frame_errors += (unsigned long)(
  4864. mib->counter[MIB_COUNTER_RX_ALIGNMENT_ERR] +
  4865. mib->counter[MIB_COUNTER_RX_SYMBOL_ERR]);
  4866. priv->stats.tx_window_errors += (unsigned long)
  4867. mib->counter[MIB_COUNTER_TX_LATE_COLLISION];
  4868. }
  4869. return &priv->stats;
  4870. }
  4871. /**
  4872. * netdev_set_mac_address - set network device MAC address
  4873. * @dev: Network device.
  4874. * @addr: Buffer of MAC address.
  4875. *
  4876. * This function is used to set the MAC address of the network device.
  4877. *
  4878. * Return 0 to indicate success.
  4879. */
  4880. static int netdev_set_mac_address(struct net_device *dev, void *addr)
  4881. {
  4882. struct dev_priv *priv = netdev_priv(dev);
  4883. struct dev_info *hw_priv = priv->adapter;
  4884. struct ksz_hw *hw = &hw_priv->hw;
  4885. struct sockaddr *mac = addr;
  4886. uint interrupt;
  4887. if (priv->port.first_port > 0)
  4888. hw_del_addr(hw, dev->dev_addr);
  4889. else {
  4890. hw->mac_override = 1;
  4891. memcpy(hw->override_addr, mac->sa_data, MAC_ADDR_LEN);
  4892. }
  4893. memcpy(dev->dev_addr, mac->sa_data, MAX_ADDR_LEN);
  4894. interrupt = hw_block_intr(hw);
  4895. if (priv->port.first_port > 0)
  4896. hw_add_addr(hw, dev->dev_addr);
  4897. else
  4898. hw_set_addr(hw);
  4899. hw_restore_intr(hw, interrupt);
  4900. return 0;
  4901. }
  4902. static void dev_set_promiscuous(struct net_device *dev, struct dev_priv *priv,
  4903. struct ksz_hw *hw, int promiscuous)
  4904. {
  4905. if (promiscuous != priv->promiscuous) {
  4906. u8 prev_state = hw->promiscuous;
  4907. if (promiscuous)
  4908. ++hw->promiscuous;
  4909. else
  4910. --hw->promiscuous;
  4911. priv->promiscuous = promiscuous;
  4912. /* Turn on/off promiscuous mode. */
  4913. if (hw->promiscuous <= 1 && prev_state <= 1)
  4914. hw_set_promiscuous(hw, hw->promiscuous);
  4915. /*
  4916. * Port is not in promiscuous mode, meaning it is released
  4917. * from the bridge.
  4918. */
  4919. if ((hw->features & STP_SUPPORT) && !promiscuous &&
  4920. dev->br_port) {
  4921. struct ksz_switch *sw = hw->ksz_switch;
  4922. int port = priv->port.first_port;
  4923. port_set_stp_state(hw, port, STP_STATE_DISABLED);
  4924. port = 1 << port;
  4925. if (sw->member & port) {
  4926. sw->member &= ~port;
  4927. bridge_change(hw);
  4928. }
  4929. }
  4930. }
  4931. }
  4932. static void dev_set_multicast(struct dev_priv *priv, struct ksz_hw *hw,
  4933. int multicast)
  4934. {
  4935. if (multicast != priv->multicast) {
  4936. u8 all_multi = hw->all_multi;
  4937. if (multicast)
  4938. ++hw->all_multi;
  4939. else
  4940. --hw->all_multi;
  4941. priv->multicast = multicast;
  4942. /* Turn on/off all multicast mode. */
  4943. if (hw->all_multi <= 1 && all_multi <= 1)
  4944. hw_set_multicast(hw, hw->all_multi);
  4945. }
  4946. }
  4947. /**
  4948. * netdev_set_rx_mode
  4949. * @dev: Network device.
  4950. *
  4951. * This routine is used to set multicast addresses or put the network device
  4952. * into promiscuous mode.
  4953. */
  4954. static void netdev_set_rx_mode(struct net_device *dev)
  4955. {
  4956. struct dev_priv *priv = netdev_priv(dev);
  4957. struct dev_info *hw_priv = priv->adapter;
  4958. struct ksz_hw *hw = &hw_priv->hw;
  4959. struct dev_mc_list *mc_ptr;
  4960. int multicast = (dev->flags & IFF_ALLMULTI);
  4961. dev_set_promiscuous(dev, priv, hw, (dev->flags & IFF_PROMISC));
  4962. if (hw_priv->hw.dev_count > 1)
  4963. multicast |= (dev->flags & IFF_MULTICAST);
  4964. dev_set_multicast(priv, hw, multicast);
  4965. /* Cannot use different hashes in multiple device interfaces mode. */
  4966. if (hw_priv->hw.dev_count > 1)
  4967. return;
  4968. if ((dev->flags & IFF_MULTICAST) && !netdev_mc_empty(dev)) {
  4969. int i = 0;
  4970. /* List too big to support so turn on all multicast mode. */
  4971. if (dev->mc_count > MAX_MULTICAST_LIST) {
  4972. if (MAX_MULTICAST_LIST != hw->multi_list_size) {
  4973. hw->multi_list_size = MAX_MULTICAST_LIST;
  4974. ++hw->all_multi;
  4975. hw_set_multicast(hw, hw->all_multi);
  4976. }
  4977. return;
  4978. }
  4979. netdev_for_each_mc_addr(mc_ptr, dev) {
  4980. if (!(*mc_ptr->dmi_addr & 1))
  4981. continue;
  4982. if (i >= MAX_MULTICAST_LIST)
  4983. break;
  4984. memcpy(hw->multi_list[i++], mc_ptr->dmi_addr,
  4985. MAC_ADDR_LEN);
  4986. }
  4987. hw->multi_list_size = (u8) i;
  4988. hw_set_grp_addr(hw);
  4989. } else {
  4990. if (MAX_MULTICAST_LIST == hw->multi_list_size) {
  4991. --hw->all_multi;
  4992. hw_set_multicast(hw, hw->all_multi);
  4993. }
  4994. hw->multi_list_size = 0;
  4995. hw_clr_multicast(hw);
  4996. }
  4997. }
  4998. static int netdev_change_mtu(struct net_device *dev, int new_mtu)
  4999. {
  5000. struct dev_priv *priv = netdev_priv(dev);
  5001. struct dev_info *hw_priv = priv->adapter;
  5002. struct ksz_hw *hw = &hw_priv->hw;
  5003. int hw_mtu;
  5004. if (netif_running(dev))
  5005. return -EBUSY;
  5006. /* Cannot use different MTU in multiple device interfaces mode. */
  5007. if (hw->dev_count > 1)
  5008. if (dev != hw_priv->dev)
  5009. return 0;
  5010. if (new_mtu < 60)
  5011. return -EINVAL;
  5012. if (dev->mtu != new_mtu) {
  5013. hw_mtu = new_mtu + ETHERNET_HEADER_SIZE + 4;
  5014. if (hw_mtu > MAX_RX_BUF_SIZE)
  5015. return -EINVAL;
  5016. if (hw_mtu > REGULAR_RX_BUF_SIZE) {
  5017. hw->features |= RX_HUGE_FRAME;
  5018. hw_mtu = MAX_RX_BUF_SIZE;
  5019. } else {
  5020. hw->features &= ~RX_HUGE_FRAME;
  5021. hw_mtu = REGULAR_RX_BUF_SIZE;
  5022. }
  5023. hw_mtu = (hw_mtu + 3) & ~3;
  5024. hw_priv->mtu = hw_mtu;
  5025. dev->mtu = new_mtu;
  5026. }
  5027. return 0;
  5028. }
  5029. /**
  5030. * netdev_ioctl - I/O control processing
  5031. * @dev: Network device.
  5032. * @ifr: Interface request structure.
  5033. * @cmd: I/O control code.
  5034. *
  5035. * This function is used to process I/O control calls.
  5036. *
  5037. * Return 0 to indicate success.
  5038. */
  5039. static int netdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  5040. {
  5041. struct dev_priv *priv = netdev_priv(dev);
  5042. struct dev_info *hw_priv = priv->adapter;
  5043. struct ksz_hw *hw = &hw_priv->hw;
  5044. struct ksz_port *port = &priv->port;
  5045. int rc;
  5046. int result = 0;
  5047. struct mii_ioctl_data *data = if_mii(ifr);
  5048. if (down_interruptible(&priv->proc_sem))
  5049. return -ERESTARTSYS;
  5050. /* assume success */
  5051. rc = 0;
  5052. switch (cmd) {
  5053. /* Get address of MII PHY in use. */
  5054. case SIOCGMIIPHY:
  5055. data->phy_id = priv->id;
  5056. /* Fallthrough... */
  5057. /* Read MII PHY register. */
  5058. case SIOCGMIIREG:
  5059. if (data->phy_id != priv->id || data->reg_num >= 6)
  5060. result = -EIO;
  5061. else
  5062. hw_r_phy(hw, port->linked->port_id, data->reg_num,
  5063. &data->val_out);
  5064. break;
  5065. /* Write MII PHY register. */
  5066. case SIOCSMIIREG:
  5067. if (!capable(CAP_NET_ADMIN))
  5068. result = -EPERM;
  5069. else if (data->phy_id != priv->id || data->reg_num >= 6)
  5070. result = -EIO;
  5071. else
  5072. hw_w_phy(hw, port->linked->port_id, data->reg_num,
  5073. data->val_in);
  5074. break;
  5075. default:
  5076. result = -EOPNOTSUPP;
  5077. }
  5078. up(&priv->proc_sem);
  5079. return result;
  5080. }
  5081. /*
  5082. * MII support
  5083. */
  5084. /**
  5085. * mdio_read - read PHY register
  5086. * @dev: Network device.
  5087. * @phy_id: The PHY id.
  5088. * @reg_num: The register number.
  5089. *
  5090. * This function returns the PHY register value.
  5091. *
  5092. * Return the register value.
  5093. */
  5094. static int mdio_read(struct net_device *dev, int phy_id, int reg_num)
  5095. {
  5096. struct dev_priv *priv = netdev_priv(dev);
  5097. struct ksz_port *port = &priv->port;
  5098. struct ksz_hw *hw = port->hw;
  5099. u16 val_out;
  5100. hw_r_phy(hw, port->linked->port_id, reg_num << 1, &val_out);
  5101. return val_out;
  5102. }
  5103. /**
  5104. * mdio_write - set PHY register
  5105. * @dev: Network device.
  5106. * @phy_id: The PHY id.
  5107. * @reg_num: The register number.
  5108. * @val: The register value.
  5109. *
  5110. * This procedure sets the PHY register value.
  5111. */
  5112. static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val)
  5113. {
  5114. struct dev_priv *priv = netdev_priv(dev);
  5115. struct ksz_port *port = &priv->port;
  5116. struct ksz_hw *hw = port->hw;
  5117. int i;
  5118. int pi;
  5119. for (i = 0, pi = port->first_port; i < port->port_cnt; i++, pi++)
  5120. hw_w_phy(hw, pi, reg_num << 1, val);
  5121. }
  5122. /*
  5123. * ethtool support
  5124. */
  5125. #define EEPROM_SIZE 0x40
  5126. static u16 eeprom_data[EEPROM_SIZE] = { 0 };
  5127. #define ADVERTISED_ALL \
  5128. (ADVERTISED_10baseT_Half | \
  5129. ADVERTISED_10baseT_Full | \
  5130. ADVERTISED_100baseT_Half | \
  5131. ADVERTISED_100baseT_Full)
  5132. /* These functions use the MII functions in mii.c. */
  5133. /**
  5134. * netdev_get_settings - get network device settings
  5135. * @dev: Network device.
  5136. * @cmd: Ethtool command.
  5137. *
  5138. * This function queries the PHY and returns its state in the ethtool command.
  5139. *
  5140. * Return 0 if successful; otherwise an error code.
  5141. */
  5142. static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  5143. {
  5144. struct dev_priv *priv = netdev_priv(dev);
  5145. struct dev_info *hw_priv = priv->adapter;
  5146. mutex_lock(&hw_priv->lock);
  5147. mii_ethtool_gset(&priv->mii_if, cmd);
  5148. cmd->advertising |= SUPPORTED_TP;
  5149. mutex_unlock(&hw_priv->lock);
  5150. /* Save advertised settings for workaround in next function. */
  5151. priv->advertising = cmd->advertising;
  5152. return 0;
  5153. }
  5154. /**
  5155. * netdev_set_settings - set network device settings
  5156. * @dev: Network device.
  5157. * @cmd: Ethtool command.
  5158. *
  5159. * This function sets the PHY according to the ethtool command.
  5160. *
  5161. * Return 0 if successful; otherwise an error code.
  5162. */
  5163. static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  5164. {
  5165. struct dev_priv *priv = netdev_priv(dev);
  5166. struct dev_info *hw_priv = priv->adapter;
  5167. struct ksz_port *port = &priv->port;
  5168. int rc;
  5169. /*
  5170. * ethtool utility does not change advertised setting if auto
  5171. * negotiation is not specified explicitly.
  5172. */
  5173. if (cmd->autoneg && priv->advertising == cmd->advertising) {
  5174. cmd->advertising |= ADVERTISED_ALL;
  5175. if (10 == cmd->speed)
  5176. cmd->advertising &=
  5177. ~(ADVERTISED_100baseT_Full |
  5178. ADVERTISED_100baseT_Half);
  5179. else if (100 == cmd->speed)
  5180. cmd->advertising &=
  5181. ~(ADVERTISED_10baseT_Full |
  5182. ADVERTISED_10baseT_Half);
  5183. if (0 == cmd->duplex)
  5184. cmd->advertising &=
  5185. ~(ADVERTISED_100baseT_Full |
  5186. ADVERTISED_10baseT_Full);
  5187. else if (1 == cmd->duplex)
  5188. cmd->advertising &=
  5189. ~(ADVERTISED_100baseT_Half |
  5190. ADVERTISED_10baseT_Half);
  5191. }
  5192. mutex_lock(&hw_priv->lock);
  5193. if (cmd->autoneg &&
  5194. (cmd->advertising & ADVERTISED_ALL) ==
  5195. ADVERTISED_ALL) {
  5196. port->duplex = 0;
  5197. port->speed = 0;
  5198. port->force_link = 0;
  5199. } else {
  5200. port->duplex = cmd->duplex + 1;
  5201. if (cmd->speed != 1000)
  5202. port->speed = cmd->speed;
  5203. if (cmd->autoneg)
  5204. port->force_link = 0;
  5205. else
  5206. port->force_link = 1;
  5207. }
  5208. rc = mii_ethtool_sset(&priv->mii_if, cmd);
  5209. mutex_unlock(&hw_priv->lock);
  5210. return rc;
  5211. }
  5212. /**
  5213. * netdev_nway_reset - restart auto-negotiation
  5214. * @dev: Network device.
  5215. *
  5216. * This function restarts the PHY for auto-negotiation.
  5217. *
  5218. * Return 0 if successful; otherwise an error code.
  5219. */
  5220. static int netdev_nway_reset(struct net_device *dev)
  5221. {
  5222. struct dev_priv *priv = netdev_priv(dev);
  5223. struct dev_info *hw_priv = priv->adapter;
  5224. int rc;
  5225. mutex_lock(&hw_priv->lock);
  5226. rc = mii_nway_restart(&priv->mii_if);
  5227. mutex_unlock(&hw_priv->lock);
  5228. return rc;
  5229. }
  5230. /**
  5231. * netdev_get_link - get network device link status
  5232. * @dev: Network device.
  5233. *
  5234. * This function gets the link status from the PHY.
  5235. *
  5236. * Return true if PHY is linked and false otherwise.
  5237. */
  5238. static u32 netdev_get_link(struct net_device *dev)
  5239. {
  5240. struct dev_priv *priv = netdev_priv(dev);
  5241. int rc;
  5242. rc = mii_link_ok(&priv->mii_if);
  5243. return rc;
  5244. }
  5245. /**
  5246. * netdev_get_drvinfo - get network driver information
  5247. * @dev: Network device.
  5248. * @info: Ethtool driver info data structure.
  5249. *
  5250. * This procedure returns the driver information.
  5251. */
  5252. static void netdev_get_drvinfo(struct net_device *dev,
  5253. struct ethtool_drvinfo *info)
  5254. {
  5255. struct dev_priv *priv = netdev_priv(dev);
  5256. struct dev_info *hw_priv = priv->adapter;
  5257. strcpy(info->driver, DRV_NAME);
  5258. strcpy(info->version, DRV_VERSION);
  5259. strcpy(info->bus_info, pci_name(hw_priv->pdev));
  5260. }
  5261. /**
  5262. * netdev_get_regs_len - get length of register dump
  5263. * @dev: Network device.
  5264. *
  5265. * This function returns the length of the register dump.
  5266. *
  5267. * Return length of the register dump.
  5268. */
  5269. static struct hw_regs {
  5270. int start;
  5271. int end;
  5272. } hw_regs_range[] = {
  5273. { KS_DMA_TX_CTRL, KS884X_INTERRUPTS_STATUS },
  5274. { KS_ADD_ADDR_0_LO, KS_ADD_ADDR_F_HI },
  5275. { KS884X_ADDR_0_OFFSET, KS8841_WOL_FRAME_BYTE2_OFFSET },
  5276. { KS884X_SIDER_P, KS8842_SGCR7_P },
  5277. { KS8842_MACAR1_P, KS8842_TOSR8_P },
  5278. { KS884X_P1MBCR_P, KS8842_P3ERCR_P },
  5279. { 0, 0 }
  5280. };
  5281. static int netdev_get_regs_len(struct net_device *dev)
  5282. {
  5283. struct hw_regs *range = hw_regs_range;
  5284. int regs_len = 0x10 * sizeof(u32);
  5285. while (range->end > range->start) {
  5286. regs_len += (range->end - range->start + 3) / 4 * 4;
  5287. range++;
  5288. }
  5289. return regs_len;
  5290. }
  5291. /**
  5292. * netdev_get_regs - get register dump
  5293. * @dev: Network device.
  5294. * @regs: Ethtool registers data structure.
  5295. * @ptr: Buffer to store the register values.
  5296. *
  5297. * This procedure dumps the register values in the provided buffer.
  5298. */
  5299. static void netdev_get_regs(struct net_device *dev, struct ethtool_regs *regs,
  5300. void *ptr)
  5301. {
  5302. struct dev_priv *priv = netdev_priv(dev);
  5303. struct dev_info *hw_priv = priv->adapter;
  5304. struct ksz_hw *hw = &hw_priv->hw;
  5305. int *buf = (int *) ptr;
  5306. struct hw_regs *range = hw_regs_range;
  5307. int len;
  5308. mutex_lock(&hw_priv->lock);
  5309. regs->version = 0;
  5310. for (len = 0; len < 0x40; len += 4) {
  5311. pci_read_config_dword(hw_priv->pdev, len, buf);
  5312. buf++;
  5313. }
  5314. while (range->end > range->start) {
  5315. for (len = range->start; len < range->end; len += 4) {
  5316. *buf = readl(hw->io + len);
  5317. buf++;
  5318. }
  5319. range++;
  5320. }
  5321. mutex_unlock(&hw_priv->lock);
  5322. }
  5323. #define WOL_SUPPORT \
  5324. (WAKE_PHY | WAKE_MAGIC | \
  5325. WAKE_UCAST | WAKE_MCAST | \
  5326. WAKE_BCAST | WAKE_ARP)
  5327. /**
  5328. * netdev_get_wol - get Wake-on-LAN support
  5329. * @dev: Network device.
  5330. * @wol: Ethtool Wake-on-LAN data structure.
  5331. *
  5332. * This procedure returns Wake-on-LAN support.
  5333. */
  5334. static void netdev_get_wol(struct net_device *dev,
  5335. struct ethtool_wolinfo *wol)
  5336. {
  5337. struct dev_priv *priv = netdev_priv(dev);
  5338. struct dev_info *hw_priv = priv->adapter;
  5339. wol->supported = hw_priv->wol_support;
  5340. wol->wolopts = hw_priv->wol_enable;
  5341. memset(&wol->sopass, 0, sizeof(wol->sopass));
  5342. }
  5343. /**
  5344. * netdev_set_wol - set Wake-on-LAN support
  5345. * @dev: Network device.
  5346. * @wol: Ethtool Wake-on-LAN data structure.
  5347. *
  5348. * This function sets Wake-on-LAN support.
  5349. *
  5350. * Return 0 if successful; otherwise an error code.
  5351. */
  5352. static int netdev_set_wol(struct net_device *dev,
  5353. struct ethtool_wolinfo *wol)
  5354. {
  5355. struct dev_priv *priv = netdev_priv(dev);
  5356. struct dev_info *hw_priv = priv->adapter;
  5357. /* Need to find a way to retrieve the device IP address. */
  5358. u8 net_addr[] = { 192, 168, 1, 1 };
  5359. if (wol->wolopts & ~hw_priv->wol_support)
  5360. return -EINVAL;
  5361. hw_priv->wol_enable = wol->wolopts;
  5362. /* Link wakeup cannot really be disabled. */
  5363. if (wol->wolopts)
  5364. hw_priv->wol_enable |= WAKE_PHY;
  5365. hw_enable_wol(&hw_priv->hw, hw_priv->wol_enable, net_addr);
  5366. return 0;
  5367. }
  5368. /**
  5369. * netdev_get_msglevel - get debug message level
  5370. * @dev: Network device.
  5371. *
  5372. * This function returns current debug message level.
  5373. *
  5374. * Return current debug message flags.
  5375. */
  5376. static u32 netdev_get_msglevel(struct net_device *dev)
  5377. {
  5378. struct dev_priv *priv = netdev_priv(dev);
  5379. return priv->msg_enable;
  5380. }
  5381. /**
  5382. * netdev_set_msglevel - set debug message level
  5383. * @dev: Network device.
  5384. * @value: Debug message flags.
  5385. *
  5386. * This procedure sets debug message level.
  5387. */
  5388. static void netdev_set_msglevel(struct net_device *dev, u32 value)
  5389. {
  5390. struct dev_priv *priv = netdev_priv(dev);
  5391. priv->msg_enable = value;
  5392. }
  5393. /**
  5394. * netdev_get_eeprom_len - get EEPROM length
  5395. * @dev: Network device.
  5396. *
  5397. * This function returns the length of the EEPROM.
  5398. *
  5399. * Return length of the EEPROM.
  5400. */
  5401. static int netdev_get_eeprom_len(struct net_device *dev)
  5402. {
  5403. return EEPROM_SIZE * 2;
  5404. }
  5405. /**
  5406. * netdev_get_eeprom - get EEPROM data
  5407. * @dev: Network device.
  5408. * @eeprom: Ethtool EEPROM data structure.
  5409. * @data: Buffer to store the EEPROM data.
  5410. *
  5411. * This function dumps the EEPROM data in the provided buffer.
  5412. *
  5413. * Return 0 if successful; otherwise an error code.
  5414. */
  5415. #define EEPROM_MAGIC 0x10A18842
  5416. static int netdev_get_eeprom(struct net_device *dev,
  5417. struct ethtool_eeprom *eeprom, u8 *data)
  5418. {
  5419. struct dev_priv *priv = netdev_priv(dev);
  5420. struct dev_info *hw_priv = priv->adapter;
  5421. u8 *eeprom_byte = (u8 *) eeprom_data;
  5422. int i;
  5423. int len;
  5424. len = (eeprom->offset + eeprom->len + 1) / 2;
  5425. for (i = eeprom->offset / 2; i < len; i++)
  5426. eeprom_data[i] = eeprom_read(&hw_priv->hw, i);
  5427. eeprom->magic = EEPROM_MAGIC;
  5428. memcpy(data, &eeprom_byte[eeprom->offset], eeprom->len);
  5429. return 0;
  5430. }
  5431. /**
  5432. * netdev_set_eeprom - write EEPROM data
  5433. * @dev: Network device.
  5434. * @eeprom: Ethtool EEPROM data structure.
  5435. * @data: Data buffer.
  5436. *
  5437. * This function modifies the EEPROM data one byte at a time.
  5438. *
  5439. * Return 0 if successful; otherwise an error code.
  5440. */
  5441. static int netdev_set_eeprom(struct net_device *dev,
  5442. struct ethtool_eeprom *eeprom, u8 *data)
  5443. {
  5444. struct dev_priv *priv = netdev_priv(dev);
  5445. struct dev_info *hw_priv = priv->adapter;
  5446. u16 eeprom_word[EEPROM_SIZE];
  5447. u8 *eeprom_byte = (u8 *) eeprom_word;
  5448. int i;
  5449. int len;
  5450. if (eeprom->magic != EEPROM_MAGIC)
  5451. return 1;
  5452. len = (eeprom->offset + eeprom->len + 1) / 2;
  5453. for (i = eeprom->offset / 2; i < len; i++)
  5454. eeprom_data[i] = eeprom_read(&hw_priv->hw, i);
  5455. memcpy(eeprom_word, eeprom_data, EEPROM_SIZE * 2);
  5456. memcpy(&eeprom_byte[eeprom->offset], data, eeprom->len);
  5457. for (i = 0; i < EEPROM_SIZE; i++)
  5458. if (eeprom_word[i] != eeprom_data[i]) {
  5459. eeprom_data[i] = eeprom_word[i];
  5460. eeprom_write(&hw_priv->hw, i, eeprom_data[i]);
  5461. }
  5462. return 0;
  5463. }
  5464. /**
  5465. * netdev_get_pauseparam - get flow control parameters
  5466. * @dev: Network device.
  5467. * @pause: Ethtool PAUSE settings data structure.
  5468. *
  5469. * This procedure returns the PAUSE control flow settings.
  5470. */
  5471. static void netdev_get_pauseparam(struct net_device *dev,
  5472. struct ethtool_pauseparam *pause)
  5473. {
  5474. struct dev_priv *priv = netdev_priv(dev);
  5475. struct dev_info *hw_priv = priv->adapter;
  5476. struct ksz_hw *hw = &hw_priv->hw;
  5477. pause->autoneg = (hw->overrides & PAUSE_FLOW_CTRL) ? 0 : 1;
  5478. if (!hw->ksz_switch) {
  5479. pause->rx_pause =
  5480. (hw->rx_cfg & DMA_RX_FLOW_ENABLE) ? 1 : 0;
  5481. pause->tx_pause =
  5482. (hw->tx_cfg & DMA_TX_FLOW_ENABLE) ? 1 : 0;
  5483. } else {
  5484. pause->rx_pause =
  5485. (sw_chk(hw, KS8842_SWITCH_CTRL_1_OFFSET,
  5486. SWITCH_RX_FLOW_CTRL)) ? 1 : 0;
  5487. pause->tx_pause =
  5488. (sw_chk(hw, KS8842_SWITCH_CTRL_1_OFFSET,
  5489. SWITCH_TX_FLOW_CTRL)) ? 1 : 0;
  5490. }
  5491. }
  5492. /**
  5493. * netdev_set_pauseparam - set flow control parameters
  5494. * @dev: Network device.
  5495. * @pause: Ethtool PAUSE settings data structure.
  5496. *
  5497. * This function sets the PAUSE control flow settings.
  5498. * Not implemented yet.
  5499. *
  5500. * Return 0 if successful; otherwise an error code.
  5501. */
  5502. static int netdev_set_pauseparam(struct net_device *dev,
  5503. struct ethtool_pauseparam *pause)
  5504. {
  5505. struct dev_priv *priv = netdev_priv(dev);
  5506. struct dev_info *hw_priv = priv->adapter;
  5507. struct ksz_hw *hw = &hw_priv->hw;
  5508. struct ksz_port *port = &priv->port;
  5509. mutex_lock(&hw_priv->lock);
  5510. if (pause->autoneg) {
  5511. if (!pause->rx_pause && !pause->tx_pause)
  5512. port->flow_ctrl = PHY_NO_FLOW_CTRL;
  5513. else
  5514. port->flow_ctrl = PHY_FLOW_CTRL;
  5515. hw->overrides &= ~PAUSE_FLOW_CTRL;
  5516. port->force_link = 0;
  5517. if (hw->ksz_switch) {
  5518. sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET,
  5519. SWITCH_RX_FLOW_CTRL, 1);
  5520. sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET,
  5521. SWITCH_TX_FLOW_CTRL, 1);
  5522. }
  5523. port_set_link_speed(port);
  5524. } else {
  5525. hw->overrides |= PAUSE_FLOW_CTRL;
  5526. if (hw->ksz_switch) {
  5527. sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET,
  5528. SWITCH_RX_FLOW_CTRL, pause->rx_pause);
  5529. sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET,
  5530. SWITCH_TX_FLOW_CTRL, pause->tx_pause);
  5531. } else
  5532. set_flow_ctrl(hw, pause->rx_pause, pause->tx_pause);
  5533. }
  5534. mutex_unlock(&hw_priv->lock);
  5535. return 0;
  5536. }
  5537. /**
  5538. * netdev_get_ringparam - get tx/rx ring parameters
  5539. * @dev: Network device.
  5540. * @pause: Ethtool RING settings data structure.
  5541. *
  5542. * This procedure returns the TX/RX ring settings.
  5543. */
  5544. static void netdev_get_ringparam(struct net_device *dev,
  5545. struct ethtool_ringparam *ring)
  5546. {
  5547. struct dev_priv *priv = netdev_priv(dev);
  5548. struct dev_info *hw_priv = priv->adapter;
  5549. struct ksz_hw *hw = &hw_priv->hw;
  5550. ring->tx_max_pending = (1 << 9);
  5551. ring->tx_pending = hw->tx_desc_info.alloc;
  5552. ring->rx_max_pending = (1 << 9);
  5553. ring->rx_pending = hw->rx_desc_info.alloc;
  5554. }
  5555. #define STATS_LEN (TOTAL_PORT_COUNTER_NUM)
  5556. static struct {
  5557. char string[ETH_GSTRING_LEN];
  5558. } ethtool_stats_keys[STATS_LEN] = {
  5559. { "rx_lo_priority_octets" },
  5560. { "rx_hi_priority_octets" },
  5561. { "rx_undersize_packets" },
  5562. { "rx_fragments" },
  5563. { "rx_oversize_packets" },
  5564. { "rx_jabbers" },
  5565. { "rx_symbol_errors" },
  5566. { "rx_crc_errors" },
  5567. { "rx_align_errors" },
  5568. { "rx_mac_ctrl_packets" },
  5569. { "rx_pause_packets" },
  5570. { "rx_bcast_packets" },
  5571. { "rx_mcast_packets" },
  5572. { "rx_ucast_packets" },
  5573. { "rx_64_or_less_octet_packets" },
  5574. { "rx_65_to_127_octet_packets" },
  5575. { "rx_128_to_255_octet_packets" },
  5576. { "rx_256_to_511_octet_packets" },
  5577. { "rx_512_to_1023_octet_packets" },
  5578. { "rx_1024_to_1522_octet_packets" },
  5579. { "tx_lo_priority_octets" },
  5580. { "tx_hi_priority_octets" },
  5581. { "tx_late_collisions" },
  5582. { "tx_pause_packets" },
  5583. { "tx_bcast_packets" },
  5584. { "tx_mcast_packets" },
  5585. { "tx_ucast_packets" },
  5586. { "tx_deferred" },
  5587. { "tx_total_collisions" },
  5588. { "tx_excessive_collisions" },
  5589. { "tx_single_collisions" },
  5590. { "tx_mult_collisions" },
  5591. { "rx_discards" },
  5592. { "tx_discards" },
  5593. };
  5594. /**
  5595. * netdev_get_strings - get statistics identity strings
  5596. * @dev: Network device.
  5597. * @stringset: String set identifier.
  5598. * @buf: Buffer to store the strings.
  5599. *
  5600. * This procedure returns the strings used to identify the statistics.
  5601. */
  5602. static void netdev_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
  5603. {
  5604. struct dev_priv *priv = netdev_priv(dev);
  5605. struct dev_info *hw_priv = priv->adapter;
  5606. struct ksz_hw *hw = &hw_priv->hw;
  5607. if (ETH_SS_STATS == stringset)
  5608. memcpy(buf, &ethtool_stats_keys,
  5609. ETH_GSTRING_LEN * hw->mib_cnt);
  5610. }
  5611. /**
  5612. * netdev_get_sset_count - get statistics size
  5613. * @dev: Network device.
  5614. * @sset: The statistics set number.
  5615. *
  5616. * This function returns the size of the statistics to be reported.
  5617. *
  5618. * Return size of the statistics to be reported.
  5619. */
  5620. static int netdev_get_sset_count(struct net_device *dev, int sset)
  5621. {
  5622. struct dev_priv *priv = netdev_priv(dev);
  5623. struct dev_info *hw_priv = priv->adapter;
  5624. struct ksz_hw *hw = &hw_priv->hw;
  5625. switch (sset) {
  5626. case ETH_SS_STATS:
  5627. return hw->mib_cnt;
  5628. default:
  5629. return -EOPNOTSUPP;
  5630. }
  5631. }
  5632. /**
  5633. * netdev_get_ethtool_stats - get network device statistics
  5634. * @dev: Network device.
  5635. * @stats: Ethtool statistics data structure.
  5636. * @data: Buffer to store the statistics.
  5637. *
  5638. * This procedure returns the statistics.
  5639. */
  5640. static void netdev_get_ethtool_stats(struct net_device *dev,
  5641. struct ethtool_stats *stats, u64 *data)
  5642. {
  5643. struct dev_priv *priv = netdev_priv(dev);
  5644. struct dev_info *hw_priv = priv->adapter;
  5645. struct ksz_hw *hw = &hw_priv->hw;
  5646. struct ksz_port *port = &priv->port;
  5647. int n_stats = stats->n_stats;
  5648. int i;
  5649. int n;
  5650. int p;
  5651. int rc;
  5652. u64 counter[TOTAL_PORT_COUNTER_NUM];
  5653. mutex_lock(&hw_priv->lock);
  5654. n = SWITCH_PORT_NUM;
  5655. for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
  5656. if (media_connected == hw->port_mib[p].state) {
  5657. hw_priv->counter[p].read = 1;
  5658. /* Remember first port that requests read. */
  5659. if (n == SWITCH_PORT_NUM)
  5660. n = p;
  5661. }
  5662. }
  5663. mutex_unlock(&hw_priv->lock);
  5664. if (n < SWITCH_PORT_NUM)
  5665. schedule_work(&hw_priv->mib_read);
  5666. if (1 == port->mib_port_cnt && n < SWITCH_PORT_NUM) {
  5667. p = n;
  5668. rc = wait_event_interruptible_timeout(
  5669. hw_priv->counter[p].counter,
  5670. 2 == hw_priv->counter[p].read,
  5671. HZ * 1);
  5672. } else
  5673. for (i = 0, p = n; i < port->mib_port_cnt - n; i++, p++) {
  5674. if (0 == i) {
  5675. rc = wait_event_interruptible_timeout(
  5676. hw_priv->counter[p].counter,
  5677. 2 == hw_priv->counter[p].read,
  5678. HZ * 2);
  5679. } else if (hw->port_mib[p].cnt_ptr) {
  5680. rc = wait_event_interruptible_timeout(
  5681. hw_priv->counter[p].counter,
  5682. 2 == hw_priv->counter[p].read,
  5683. HZ * 1);
  5684. }
  5685. }
  5686. get_mib_counters(hw, port->first_port, port->mib_port_cnt, counter);
  5687. n = hw->mib_cnt;
  5688. if (n > n_stats)
  5689. n = n_stats;
  5690. n_stats -= n;
  5691. for (i = 0; i < n; i++)
  5692. *data++ = counter[i];
  5693. }
  5694. /**
  5695. * netdev_get_rx_csum - get receive checksum support
  5696. * @dev: Network device.
  5697. *
  5698. * This function gets receive checksum support setting.
  5699. *
  5700. * Return true if receive checksum is enabled; false otherwise.
  5701. */
  5702. static u32 netdev_get_rx_csum(struct net_device *dev)
  5703. {
  5704. struct dev_priv *priv = netdev_priv(dev);
  5705. struct dev_info *hw_priv = priv->adapter;
  5706. struct ksz_hw *hw = &hw_priv->hw;
  5707. return hw->rx_cfg &
  5708. (DMA_RX_CSUM_UDP |
  5709. DMA_RX_CSUM_TCP |
  5710. DMA_RX_CSUM_IP);
  5711. }
  5712. /**
  5713. * netdev_set_rx_csum - set receive checksum support
  5714. * @dev: Network device.
  5715. * @data: Zero to disable receive checksum support.
  5716. *
  5717. * This function sets receive checksum support setting.
  5718. *
  5719. * Return 0 if successful; otherwise an error code.
  5720. */
  5721. static int netdev_set_rx_csum(struct net_device *dev, u32 data)
  5722. {
  5723. struct dev_priv *priv = netdev_priv(dev);
  5724. struct dev_info *hw_priv = priv->adapter;
  5725. struct ksz_hw *hw = &hw_priv->hw;
  5726. u32 new_setting = hw->rx_cfg;
  5727. if (data)
  5728. new_setting |=
  5729. (DMA_RX_CSUM_UDP | DMA_RX_CSUM_TCP |
  5730. DMA_RX_CSUM_IP);
  5731. else
  5732. new_setting &=
  5733. ~(DMA_RX_CSUM_UDP | DMA_RX_CSUM_TCP |
  5734. DMA_RX_CSUM_IP);
  5735. new_setting &= ~DMA_RX_CSUM_UDP;
  5736. mutex_lock(&hw_priv->lock);
  5737. if (new_setting != hw->rx_cfg) {
  5738. hw->rx_cfg = new_setting;
  5739. if (hw->enabled)
  5740. writel(hw->rx_cfg, hw->io + KS_DMA_RX_CTRL);
  5741. }
  5742. mutex_unlock(&hw_priv->lock);
  5743. return 0;
  5744. }
  5745. static struct ethtool_ops netdev_ethtool_ops = {
  5746. .get_settings = netdev_get_settings,
  5747. .set_settings = netdev_set_settings,
  5748. .nway_reset = netdev_nway_reset,
  5749. .get_link = netdev_get_link,
  5750. .get_drvinfo = netdev_get_drvinfo,
  5751. .get_regs_len = netdev_get_regs_len,
  5752. .get_regs = netdev_get_regs,
  5753. .get_wol = netdev_get_wol,
  5754. .set_wol = netdev_set_wol,
  5755. .get_msglevel = netdev_get_msglevel,
  5756. .set_msglevel = netdev_set_msglevel,
  5757. .get_eeprom_len = netdev_get_eeprom_len,
  5758. .get_eeprom = netdev_get_eeprom,
  5759. .set_eeprom = netdev_set_eeprom,
  5760. .get_pauseparam = netdev_get_pauseparam,
  5761. .set_pauseparam = netdev_set_pauseparam,
  5762. .get_ringparam = netdev_get_ringparam,
  5763. .get_strings = netdev_get_strings,
  5764. .get_sset_count = netdev_get_sset_count,
  5765. .get_ethtool_stats = netdev_get_ethtool_stats,
  5766. .get_rx_csum = netdev_get_rx_csum,
  5767. .set_rx_csum = netdev_set_rx_csum,
  5768. .get_tx_csum = ethtool_op_get_tx_csum,
  5769. .set_tx_csum = ethtool_op_set_tx_csum,
  5770. .get_sg = ethtool_op_get_sg,
  5771. .set_sg = ethtool_op_set_sg,
  5772. };
  5773. /*
  5774. * Hardware monitoring
  5775. */
  5776. static void update_link(struct net_device *dev, struct dev_priv *priv,
  5777. struct ksz_port *port)
  5778. {
  5779. if (priv->media_state != port->linked->state) {
  5780. priv->media_state = port->linked->state;
  5781. if (netif_running(dev)) {
  5782. if (media_connected == priv->media_state)
  5783. netif_carrier_on(dev);
  5784. else
  5785. netif_carrier_off(dev);
  5786. if (netif_msg_link(priv))
  5787. printk(KERN_INFO "%s link %s\n", dev->name,
  5788. (media_connected == priv->media_state ?
  5789. "on" : "off"));
  5790. }
  5791. }
  5792. }
  5793. static void mib_read_work(struct work_struct *work)
  5794. {
  5795. struct dev_info *hw_priv =
  5796. container_of(work, struct dev_info, mib_read);
  5797. struct ksz_hw *hw = &hw_priv->hw;
  5798. struct ksz_port_mib *mib;
  5799. int i;
  5800. next_jiffies = jiffies;
  5801. for (i = 0; i < hw->mib_port_cnt; i++) {
  5802. mib = &hw->port_mib[i];
  5803. /* Reading MIB counters or requested to read. */
  5804. if (mib->cnt_ptr || 1 == hw_priv->counter[i].read) {
  5805. /* Need to process receive interrupt. */
  5806. if (port_r_cnt(hw, i))
  5807. break;
  5808. hw_priv->counter[i].read = 0;
  5809. /* Finish reading counters. */
  5810. if (0 == mib->cnt_ptr) {
  5811. hw_priv->counter[i].read = 2;
  5812. wake_up_interruptible(
  5813. &hw_priv->counter[i].counter);
  5814. }
  5815. } else if (jiffies >= hw_priv->counter[i].time) {
  5816. /* Only read MIB counters when the port is connected. */
  5817. if (media_connected == mib->state)
  5818. hw_priv->counter[i].read = 1;
  5819. next_jiffies += HZ * 1 * hw->mib_port_cnt;
  5820. hw_priv->counter[i].time = next_jiffies;
  5821. /* Port is just disconnected. */
  5822. } else if (mib->link_down) {
  5823. mib->link_down = 0;
  5824. /* Read counters one last time after link is lost. */
  5825. hw_priv->counter[i].read = 1;
  5826. }
  5827. }
  5828. }
  5829. static void mib_monitor(unsigned long ptr)
  5830. {
  5831. struct dev_info *hw_priv = (struct dev_info *) ptr;
  5832. mib_read_work(&hw_priv->mib_read);
  5833. /* This is used to verify Wake-on-LAN is working. */
  5834. if (hw_priv->pme_wait) {
  5835. if (hw_priv->pme_wait <= jiffies) {
  5836. hw_clr_wol_pme_status(&hw_priv->hw);
  5837. hw_priv->pme_wait = 0;
  5838. }
  5839. } else if (hw_chk_wol_pme_status(&hw_priv->hw)) {
  5840. /* PME is asserted. Wait 2 seconds to clear it. */
  5841. hw_priv->pme_wait = jiffies + HZ * 2;
  5842. }
  5843. ksz_update_timer(&hw_priv->mib_timer_info);
  5844. }
  5845. /**
  5846. * dev_monitor - periodic monitoring
  5847. * @ptr: Network device pointer.
  5848. *
  5849. * This routine is run in a kernel timer to monitor the network device.
  5850. */
  5851. static void dev_monitor(unsigned long ptr)
  5852. {
  5853. struct net_device *dev = (struct net_device *) ptr;
  5854. struct dev_priv *priv = netdev_priv(dev);
  5855. struct dev_info *hw_priv = priv->adapter;
  5856. struct ksz_hw *hw = &hw_priv->hw;
  5857. struct ksz_port *port = &priv->port;
  5858. if (!(hw->features & LINK_INT_WORKING))
  5859. port_get_link_speed(port);
  5860. update_link(dev, priv, port);
  5861. ksz_update_timer(&priv->monitor_timer_info);
  5862. }
  5863. /*
  5864. * Linux network device interface functions
  5865. */
  5866. /* Driver exported variables */
  5867. static int msg_enable;
  5868. static char *macaddr = ":";
  5869. static char *mac1addr = ":";
  5870. /*
  5871. * This enables multiple network device mode for KSZ8842, which contains a
  5872. * switch with two physical ports. Some users like to take control of the
  5873. * ports for running Spanning Tree Protocol. The driver will create an
  5874. * additional eth? device for the other port.
  5875. *
  5876. * Some limitations are the network devices cannot have different MTU and
  5877. * multicast hash tables.
  5878. */
  5879. static int multi_dev;
  5880. /*
  5881. * As most users select multiple network device mode to use Spanning Tree
  5882. * Protocol, this enables a feature in which most unicast and multicast packets
  5883. * are forwarded inside the switch and not passed to the host. Only packets
  5884. * that need the host's attention are passed to it. This prevents the host
  5885. * wasting CPU time to examine each and every incoming packets and do the
  5886. * forwarding itself.
  5887. *
  5888. * As the hack requires the private bridge header, the driver cannot compile
  5889. * with just the kernel headers.
  5890. *
  5891. * Enabling STP support also turns on multiple network device mode.
  5892. */
  5893. static int stp;
  5894. /*
  5895. * This enables fast aging in the KSZ8842 switch. Not sure what situation
  5896. * needs that. However, fast aging is used to flush the dynamic MAC table when
  5897. * STP suport is enabled.
  5898. */
  5899. static int fast_aging;
  5900. /**
  5901. * netdev_init - initalize network device.
  5902. * @dev: Network device.
  5903. *
  5904. * This function initializes the network device.
  5905. *
  5906. * Return 0 if successful; otherwise an error code indicating failure.
  5907. */
  5908. static int __init netdev_init(struct net_device *dev)
  5909. {
  5910. struct dev_priv *priv = netdev_priv(dev);
  5911. /* 500 ms timeout */
  5912. ksz_init_timer(&priv->monitor_timer_info, 500 * HZ / 1000,
  5913. dev_monitor, dev);
  5914. /* 500 ms timeout */
  5915. dev->watchdog_timeo = HZ / 2;
  5916. dev->features |= NETIF_F_IP_CSUM;
  5917. /*
  5918. * Hardware does not really support IPv6 checksum generation, but
  5919. * driver actually runs faster with this on. Refer IPV6_CSUM_GEN_HACK.
  5920. */
  5921. dev->features |= NETIF_F_IPV6_CSUM;
  5922. dev->features |= NETIF_F_SG;
  5923. sema_init(&priv->proc_sem, 1);
  5924. priv->mii_if.phy_id_mask = 0x1;
  5925. priv->mii_if.reg_num_mask = 0x7;
  5926. priv->mii_if.dev = dev;
  5927. priv->mii_if.mdio_read = mdio_read;
  5928. priv->mii_if.mdio_write = mdio_write;
  5929. priv->mii_if.phy_id = priv->port.first_port + 1;
  5930. priv->msg_enable = netif_msg_init(msg_enable,
  5931. (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK));
  5932. return 0;
  5933. }
  5934. static const struct net_device_ops netdev_ops = {
  5935. .ndo_init = netdev_init,
  5936. .ndo_open = netdev_open,
  5937. .ndo_stop = netdev_close,
  5938. .ndo_get_stats = netdev_query_statistics,
  5939. .ndo_start_xmit = netdev_tx,
  5940. .ndo_tx_timeout = netdev_tx_timeout,
  5941. .ndo_change_mtu = netdev_change_mtu,
  5942. .ndo_set_mac_address = netdev_set_mac_address,
  5943. .ndo_do_ioctl = netdev_ioctl,
  5944. .ndo_set_rx_mode = netdev_set_rx_mode,
  5945. #ifdef CONFIG_NET_POLL_CONTROLLER
  5946. .ndo_poll_controller = netdev_netpoll,
  5947. #endif
  5948. };
  5949. static void netdev_free(struct net_device *dev)
  5950. {
  5951. if (dev->watchdog_timeo)
  5952. unregister_netdev(dev);
  5953. free_netdev(dev);
  5954. }
  5955. struct platform_info {
  5956. struct dev_info dev_info;
  5957. struct net_device *netdev[SWITCH_PORT_NUM];
  5958. };
  5959. static int net_device_present;
  5960. static void get_mac_addr(struct dev_info *hw_priv, u8 *macaddr, int port)
  5961. {
  5962. int i;
  5963. int j;
  5964. int got_num;
  5965. int num;
  5966. i = j = num = got_num = 0;
  5967. while (j < MAC_ADDR_LEN) {
  5968. if (macaddr[i]) {
  5969. got_num = 1;
  5970. if ('0' <= macaddr[i] && macaddr[i] <= '9')
  5971. num = num * 16 + macaddr[i] - '0';
  5972. else if ('A' <= macaddr[i] && macaddr[i] <= 'F')
  5973. num = num * 16 + 10 + macaddr[i] - 'A';
  5974. else if ('a' <= macaddr[i] && macaddr[i] <= 'f')
  5975. num = num * 16 + 10 + macaddr[i] - 'a';
  5976. else if (':' == macaddr[i])
  5977. got_num = 2;
  5978. else
  5979. break;
  5980. } else if (got_num)
  5981. got_num = 2;
  5982. else
  5983. break;
  5984. if (2 == got_num) {
  5985. if (MAIN_PORT == port) {
  5986. hw_priv->hw.override_addr[j++] = (u8) num;
  5987. hw_priv->hw.override_addr[5] +=
  5988. hw_priv->hw.id;
  5989. } else {
  5990. hw_priv->hw.ksz_switch->other_addr[j++] =
  5991. (u8) num;
  5992. hw_priv->hw.ksz_switch->other_addr[5] +=
  5993. hw_priv->hw.id;
  5994. }
  5995. num = got_num = 0;
  5996. }
  5997. i++;
  5998. }
  5999. if (MAC_ADDR_LEN == j) {
  6000. if (MAIN_PORT == port)
  6001. hw_priv->hw.mac_override = 1;
  6002. }
  6003. }
  6004. #define KS884X_DMA_MASK (~0x0UL)
  6005. static void read_other_addr(struct ksz_hw *hw)
  6006. {
  6007. int i;
  6008. u16 data[3];
  6009. struct ksz_switch *sw = hw->ksz_switch;
  6010. for (i = 0; i < 3; i++)
  6011. data[i] = eeprom_read(hw, i + EEPROM_DATA_OTHER_MAC_ADDR);
  6012. if ((data[0] || data[1] || data[2]) && data[0] != 0xffff) {
  6013. sw->other_addr[5] = (u8) data[0];
  6014. sw->other_addr[4] = (u8)(data[0] >> 8);
  6015. sw->other_addr[3] = (u8) data[1];
  6016. sw->other_addr[2] = (u8)(data[1] >> 8);
  6017. sw->other_addr[1] = (u8) data[2];
  6018. sw->other_addr[0] = (u8)(data[2] >> 8);
  6019. }
  6020. }
  6021. #ifndef PCI_VENDOR_ID_MICREL_KS
  6022. #define PCI_VENDOR_ID_MICREL_KS 0x16c6
  6023. #endif
  6024. static int __init pcidev_init(struct pci_dev *pdev,
  6025. const struct pci_device_id *id)
  6026. {
  6027. struct net_device *dev;
  6028. struct dev_priv *priv;
  6029. struct dev_info *hw_priv;
  6030. struct ksz_hw *hw;
  6031. struct platform_info *info;
  6032. struct ksz_port *port;
  6033. unsigned long reg_base;
  6034. unsigned long reg_len;
  6035. int cnt;
  6036. int i;
  6037. int mib_port_count;
  6038. int pi;
  6039. int port_count;
  6040. int result;
  6041. char banner[80];
  6042. struct ksz_switch *sw = NULL;
  6043. result = pci_enable_device(pdev);
  6044. if (result)
  6045. return result;
  6046. result = -ENODEV;
  6047. if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) ||
  6048. pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
  6049. return result;
  6050. reg_base = pci_resource_start(pdev, 0);
  6051. reg_len = pci_resource_len(pdev, 0);
  6052. if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0)
  6053. return result;
  6054. if (!request_mem_region(reg_base, reg_len, DRV_NAME))
  6055. return result;
  6056. pci_set_master(pdev);
  6057. result = -ENOMEM;
  6058. info = kmalloc(sizeof(struct platform_info), GFP_KERNEL);
  6059. if (!info)
  6060. goto pcidev_init_dev_err;
  6061. memset(info, 0, sizeof(struct platform_info));
  6062. hw_priv = &info->dev_info;
  6063. hw_priv->pdev = pdev;
  6064. hw = &hw_priv->hw;
  6065. hw->io = ioremap(reg_base, reg_len);
  6066. if (!hw->io)
  6067. goto pcidev_init_io_err;
  6068. cnt = hw_init(hw);
  6069. if (!cnt) {
  6070. if (msg_enable & NETIF_MSG_PROBE)
  6071. printk(KERN_ALERT "chip not detected\n");
  6072. result = -ENODEV;
  6073. goto pcidev_init_alloc_err;
  6074. }
  6075. sprintf(banner, "%s\n", version);
  6076. banner[13] = cnt + '0';
  6077. ks_info(hw_priv, "%s", banner);
  6078. ks_dbg(hw_priv, "Mem = %p; IRQ = %d\n", hw->io, pdev->irq);
  6079. /* Assume device is KSZ8841. */
  6080. hw->dev_count = 1;
  6081. port_count = 1;
  6082. mib_port_count = 1;
  6083. hw->addr_list_size = 0;
  6084. hw->mib_cnt = PORT_COUNTER_NUM;
  6085. hw->mib_port_cnt = 1;
  6086. /* KSZ8842 has a switch with multiple ports. */
  6087. if (2 == cnt) {
  6088. if (fast_aging)
  6089. hw->overrides |= FAST_AGING;
  6090. hw->mib_cnt = TOTAL_PORT_COUNTER_NUM;
  6091. /* Multiple network device interfaces are required. */
  6092. if (multi_dev) {
  6093. hw->dev_count = SWITCH_PORT_NUM;
  6094. hw->addr_list_size = SWITCH_PORT_NUM - 1;
  6095. }
  6096. /* Single network device has multiple ports. */
  6097. if (1 == hw->dev_count) {
  6098. port_count = SWITCH_PORT_NUM;
  6099. mib_port_count = SWITCH_PORT_NUM;
  6100. }
  6101. hw->mib_port_cnt = TOTAL_PORT_NUM;
  6102. hw->ksz_switch = kmalloc(sizeof(struct ksz_switch), GFP_KERNEL);
  6103. if (!hw->ksz_switch)
  6104. goto pcidev_init_alloc_err;
  6105. memset(hw->ksz_switch, 0, sizeof(struct ksz_switch));
  6106. sw = hw->ksz_switch;
  6107. }
  6108. for (i = 0; i < hw->mib_port_cnt; i++)
  6109. hw->port_mib[i].mib_start = 0;
  6110. hw->parent = hw_priv;
  6111. /* Default MTU is 1500. */
  6112. hw_priv->mtu = (REGULAR_RX_BUF_SIZE + 3) & ~3;
  6113. if (ksz_alloc_mem(hw_priv))
  6114. goto pcidev_init_mem_err;
  6115. hw_priv->hw.id = net_device_present;
  6116. spin_lock_init(&hw_priv->hwlock);
  6117. mutex_init(&hw_priv->lock);
  6118. /* tasklet is enabled. */
  6119. tasklet_init(&hw_priv->rx_tasklet, rx_proc_task,
  6120. (unsigned long) hw_priv);
  6121. tasklet_init(&hw_priv->tx_tasklet, tx_proc_task,
  6122. (unsigned long) hw_priv);
  6123. /* tasklet_enable will decrement the atomic counter. */
  6124. tasklet_disable(&hw_priv->rx_tasklet);
  6125. tasklet_disable(&hw_priv->tx_tasklet);
  6126. for (i = 0; i < TOTAL_PORT_NUM; i++)
  6127. init_waitqueue_head(&hw_priv->counter[i].counter);
  6128. if (macaddr[0] != ':')
  6129. get_mac_addr(hw_priv, macaddr, MAIN_PORT);
  6130. /* Read MAC address and initialize override address if not overrided. */
  6131. hw_read_addr(hw);
  6132. /* Multiple device interfaces mode requires a second MAC address. */
  6133. if (hw->dev_count > 1) {
  6134. memcpy(sw->other_addr, hw->override_addr, MAC_ADDR_LEN);
  6135. read_other_addr(hw);
  6136. if (mac1addr[0] != ':')
  6137. get_mac_addr(hw_priv, mac1addr, OTHER_PORT);
  6138. }
  6139. hw_setup(hw);
  6140. if (hw->ksz_switch)
  6141. sw_setup(hw);
  6142. else {
  6143. hw_priv->wol_support = WOL_SUPPORT;
  6144. hw_priv->wol_enable = 0;
  6145. }
  6146. INIT_WORK(&hw_priv->mib_read, mib_read_work);
  6147. /* 500 ms timeout */
  6148. ksz_init_timer(&hw_priv->mib_timer_info, 500 * HZ / 1000,
  6149. mib_monitor, hw_priv);
  6150. for (i = 0; i < hw->dev_count; i++) {
  6151. dev = alloc_etherdev(sizeof(struct dev_priv));
  6152. if (!dev)
  6153. goto pcidev_init_reg_err;
  6154. info->netdev[i] = dev;
  6155. priv = netdev_priv(dev);
  6156. priv->adapter = hw_priv;
  6157. priv->id = net_device_present++;
  6158. port = &priv->port;
  6159. port->port_cnt = port_count;
  6160. port->mib_port_cnt = mib_port_count;
  6161. port->first_port = i;
  6162. port->flow_ctrl = PHY_FLOW_CTRL;
  6163. port->hw = hw;
  6164. port->linked = &hw->port_info[port->first_port];
  6165. for (cnt = 0, pi = i; cnt < port_count; cnt++, pi++) {
  6166. hw->port_info[pi].port_id = pi;
  6167. hw->port_info[pi].pdev = dev;
  6168. hw->port_info[pi].state = media_disconnected;
  6169. }
  6170. dev->mem_start = (unsigned long) hw->io;
  6171. dev->mem_end = dev->mem_start + reg_len - 1;
  6172. dev->irq = pdev->irq;
  6173. if (MAIN_PORT == i)
  6174. memcpy(dev->dev_addr, hw_priv->hw.override_addr,
  6175. MAC_ADDR_LEN);
  6176. else {
  6177. memcpy(dev->dev_addr, sw->other_addr,
  6178. MAC_ADDR_LEN);
  6179. if (!memcmp(sw->other_addr, hw->override_addr,
  6180. MAC_ADDR_LEN))
  6181. dev->dev_addr[5] += port->first_port;
  6182. }
  6183. dev->netdev_ops = &netdev_ops;
  6184. SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
  6185. if (register_netdev(dev))
  6186. goto pcidev_init_reg_err;
  6187. port_set_power_saving(port, true);
  6188. }
  6189. pci_dev_get(hw_priv->pdev);
  6190. pci_set_drvdata(pdev, info);
  6191. return 0;
  6192. pcidev_init_reg_err:
  6193. for (i = 0; i < hw->dev_count; i++) {
  6194. if (info->netdev[i]) {
  6195. netdev_free(info->netdev[i]);
  6196. info->netdev[i] = NULL;
  6197. }
  6198. }
  6199. pcidev_init_mem_err:
  6200. ksz_free_mem(hw_priv);
  6201. kfree(hw->ksz_switch);
  6202. pcidev_init_alloc_err:
  6203. iounmap(hw->io);
  6204. pcidev_init_io_err:
  6205. kfree(info);
  6206. pcidev_init_dev_err:
  6207. release_mem_region(reg_base, reg_len);
  6208. return result;
  6209. }
  6210. static void pcidev_exit(struct pci_dev *pdev)
  6211. {
  6212. int i;
  6213. struct platform_info *info = pci_get_drvdata(pdev);
  6214. struct dev_info *hw_priv = &info->dev_info;
  6215. pci_set_drvdata(pdev, NULL);
  6216. release_mem_region(pci_resource_start(pdev, 0),
  6217. pci_resource_len(pdev, 0));
  6218. for (i = 0; i < hw_priv->hw.dev_count; i++) {
  6219. if (info->netdev[i])
  6220. netdev_free(info->netdev[i]);
  6221. }
  6222. if (hw_priv->hw.io)
  6223. iounmap(hw_priv->hw.io);
  6224. ksz_free_mem(hw_priv);
  6225. kfree(hw_priv->hw.ksz_switch);
  6226. pci_dev_put(hw_priv->pdev);
  6227. kfree(info);
  6228. }
  6229. #ifdef CONFIG_PM
  6230. static int pcidev_resume(struct pci_dev *pdev)
  6231. {
  6232. int i;
  6233. struct platform_info *info = pci_get_drvdata(pdev);
  6234. struct dev_info *hw_priv = &info->dev_info;
  6235. struct ksz_hw *hw = &hw_priv->hw;
  6236. pci_set_power_state(pdev, PCI_D0);
  6237. pci_restore_state(pdev);
  6238. pci_enable_wake(pdev, PCI_D0, 0);
  6239. if (hw_priv->wol_enable)
  6240. hw_cfg_wol_pme(hw, 0);
  6241. for (i = 0; i < hw->dev_count; i++) {
  6242. if (info->netdev[i]) {
  6243. struct net_device *dev = info->netdev[i];
  6244. if (netif_running(dev)) {
  6245. netdev_open(dev);
  6246. netif_device_attach(dev);
  6247. }
  6248. }
  6249. }
  6250. return 0;
  6251. }
  6252. static int pcidev_suspend(struct pci_dev *pdev, pm_message_t state)
  6253. {
  6254. int i;
  6255. struct platform_info *info = pci_get_drvdata(pdev);
  6256. struct dev_info *hw_priv = &info->dev_info;
  6257. struct ksz_hw *hw = &hw_priv->hw;
  6258. /* Need to find a way to retrieve the device IP address. */
  6259. u8 net_addr[] = { 192, 168, 1, 1 };
  6260. for (i = 0; i < hw->dev_count; i++) {
  6261. if (info->netdev[i]) {
  6262. struct net_device *dev = info->netdev[i];
  6263. if (netif_running(dev)) {
  6264. netif_device_detach(dev);
  6265. netdev_close(dev);
  6266. }
  6267. }
  6268. }
  6269. if (hw_priv->wol_enable) {
  6270. hw_enable_wol(hw, hw_priv->wol_enable, net_addr);
  6271. hw_cfg_wol_pme(hw, 1);
  6272. }
  6273. pci_save_state(pdev);
  6274. pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
  6275. pci_set_power_state(pdev, pci_choose_state(pdev, state));
  6276. return 0;
  6277. }
  6278. #endif
  6279. static char pcidev_name[] = "ksz884xp";
  6280. static struct pci_device_id pcidev_table[] = {
  6281. { PCI_VENDOR_ID_MICREL_KS, 0x8841,
  6282. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
  6283. { PCI_VENDOR_ID_MICREL_KS, 0x8842,
  6284. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
  6285. { 0 }
  6286. };
  6287. MODULE_DEVICE_TABLE(pci, pcidev_table);
  6288. static struct pci_driver pci_device_driver = {
  6289. #ifdef CONFIG_PM
  6290. .suspend = pcidev_suspend,
  6291. .resume = pcidev_resume,
  6292. #endif
  6293. .name = pcidev_name,
  6294. .id_table = pcidev_table,
  6295. .probe = pcidev_init,
  6296. .remove = pcidev_exit
  6297. };
  6298. static int __init ksz884x_init_module(void)
  6299. {
  6300. return pci_register_driver(&pci_device_driver);
  6301. }
  6302. static void __exit ksz884x_cleanup_module(void)
  6303. {
  6304. pci_unregister_driver(&pci_device_driver);
  6305. }
  6306. module_init(ksz884x_init_module);
  6307. module_exit(ksz884x_cleanup_module);
  6308. MODULE_DESCRIPTION("KSZ8841/2 PCI network driver");
  6309. MODULE_AUTHOR("Tristram Ha <Tristram.Ha@micrel.com>");
  6310. MODULE_LICENSE("GPL");
  6311. module_param_named(message, msg_enable, int, 0);
  6312. MODULE_PARM_DESC(message, "Message verbosity level (0=none, 31=all)");
  6313. module_param(macaddr, charp, 0);
  6314. module_param(mac1addr, charp, 0);
  6315. module_param(fast_aging, int, 0);
  6316. module_param(multi_dev, int, 0);
  6317. module_param(stp, int, 0);
  6318. MODULE_PARM_DESC(macaddr, "MAC address");
  6319. MODULE_PARM_DESC(mac1addr, "Second MAC address");
  6320. MODULE_PARM_DESC(fast_aging, "Fast aging");
  6321. MODULE_PARM_DESC(multi_dev, "Multiple device interfaces");
  6322. MODULE_PARM_DESC(stp, "STP support");