bnx2.c 193 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008
  1. /* bnx2.c: Broadcom NX2 network driver.
  2. *
  3. * Copyright (c) 2004-2008 Broadcom Corporation
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation.
  8. *
  9. * Written by: Michael Chan (mchan@broadcom.com)
  10. */
  11. #include <linux/module.h>
  12. #include <linux/moduleparam.h>
  13. #include <linux/kernel.h>
  14. #include <linux/timer.h>
  15. #include <linux/errno.h>
  16. #include <linux/ioport.h>
  17. #include <linux/slab.h>
  18. #include <linux/vmalloc.h>
  19. #include <linux/interrupt.h>
  20. #include <linux/pci.h>
  21. #include <linux/init.h>
  22. #include <linux/netdevice.h>
  23. #include <linux/etherdevice.h>
  24. #include <linux/skbuff.h>
  25. #include <linux/dma-mapping.h>
  26. #include <linux/bitops.h>
  27. #include <asm/io.h>
  28. #include <asm/irq.h>
  29. #include <linux/delay.h>
  30. #include <asm/byteorder.h>
  31. #include <asm/page.h>
  32. #include <linux/time.h>
  33. #include <linux/ethtool.h>
  34. #include <linux/mii.h>
  35. #include <linux/if_vlan.h>
  36. #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
  37. #define BCM_VLAN 1
  38. #endif
  39. #include <net/ip.h>
  40. #include <net/tcp.h>
  41. #include <net/checksum.h>
  42. #include <linux/workqueue.h>
  43. #include <linux/crc32.h>
  44. #include <linux/prefetch.h>
  45. #include <linux/cache.h>
  46. #include <linux/zlib.h>
  47. #include <linux/log2.h>
  48. #include "bnx2.h"
  49. #include "bnx2_fw.h"
  50. #include "bnx2_fw2.h"
  51. #define FW_BUF_SIZE 0x10000
  52. #define DRV_MODULE_NAME "bnx2"
  53. #define PFX DRV_MODULE_NAME ": "
  54. #define DRV_MODULE_VERSION "1.9.0"
  55. #define DRV_MODULE_RELDATE "Dec 16, 2008"
  56. #define RUN_AT(x) (jiffies + (x))
  57. /* Time in jiffies before concluding the transmitter is hung. */
  58. #define TX_TIMEOUT (5*HZ)
  59. static char version[] __devinitdata =
  60. "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
  61. MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
  62. MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
  63. MODULE_LICENSE("GPL");
  64. MODULE_VERSION(DRV_MODULE_VERSION);
  65. static int disable_msi = 0;
  66. module_param(disable_msi, int, 0);
  67. MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
  68. typedef enum {
  69. BCM5706 = 0,
  70. NC370T,
  71. NC370I,
  72. BCM5706S,
  73. NC370F,
  74. BCM5708,
  75. BCM5708S,
  76. BCM5709,
  77. BCM5709S,
  78. BCM5716,
  79. BCM5716S,
  80. } board_t;
  81. /* indexed by board_t, above */
  82. static struct {
  83. char *name;
  84. } board_info[] __devinitdata = {
  85. { "Broadcom NetXtreme II BCM5706 1000Base-T" },
  86. { "HP NC370T Multifunction Gigabit Server Adapter" },
  87. { "HP NC370i Multifunction Gigabit Server Adapter" },
  88. { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
  89. { "HP NC370F Multifunction Gigabit Server Adapter" },
  90. { "Broadcom NetXtreme II BCM5708 1000Base-T" },
  91. { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
  92. { "Broadcom NetXtreme II BCM5709 1000Base-T" },
  93. { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
  94. { "Broadcom NetXtreme II BCM5716 1000Base-T" },
  95. { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
  96. };
  97. static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
  98. { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
  99. PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
  100. { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
  101. PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
  102. { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
  103. PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
  104. { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
  105. PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
  106. { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
  107. PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
  108. { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
  109. PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
  110. { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
  111. PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
  112. { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
  113. PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
  114. { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
  115. PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
  116. { PCI_VENDOR_ID_BROADCOM, 0x163b,
  117. PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
  118. { PCI_VENDOR_ID_BROADCOM, 0x163c,
  119. PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
  120. { 0, }
  121. };
  122. static struct flash_spec flash_table[] =
  123. {
  124. #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
  125. #define NONBUFFERED_FLAGS (BNX2_NV_WREN)
  126. /* Slow EEPROM */
  127. {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
  128. BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
  129. SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
  130. "EEPROM - slow"},
  131. /* Expansion entry 0001 */
  132. {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
  133. NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
  134. SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
  135. "Entry 0001"},
  136. /* Saifun SA25F010 (non-buffered flash) */
  137. /* strap, cfg1, & write1 need updates */
  138. {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
  139. NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
  140. SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
  141. "Non-buffered flash (128kB)"},
  142. /* Saifun SA25F020 (non-buffered flash) */
  143. /* strap, cfg1, & write1 need updates */
  144. {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
  145. NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
  146. SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
  147. "Non-buffered flash (256kB)"},
  148. /* Expansion entry 0100 */
  149. {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
  150. NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
  151. SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
  152. "Entry 0100"},
  153. /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
  154. {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
  155. NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
  156. ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
  157. "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
  158. /* Entry 0110: ST M45PE20 (non-buffered flash)*/
  159. {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
  160. NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
  161. ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
  162. "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
  163. /* Saifun SA25F005 (non-buffered flash) */
  164. /* strap, cfg1, & write1 need updates */
  165. {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
  166. NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
  167. SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
  168. "Non-buffered flash (64kB)"},
  169. /* Fast EEPROM */
  170. {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
  171. BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
  172. SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
  173. "EEPROM - fast"},
  174. /* Expansion entry 1001 */
  175. {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
  176. NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
  177. SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
  178. "Entry 1001"},
  179. /* Expansion entry 1010 */
  180. {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
  181. NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
  182. SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
  183. "Entry 1010"},
  184. /* ATMEL AT45DB011B (buffered flash) */
  185. {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
  186. BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
  187. BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
  188. "Buffered flash (128kB)"},
  189. /* Expansion entry 1100 */
  190. {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
  191. NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
  192. SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
  193. "Entry 1100"},
  194. /* Expansion entry 1101 */
  195. {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
  196. NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
  197. SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
  198. "Entry 1101"},
  199. /* Ateml Expansion entry 1110 */
  200. {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
  201. BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
  202. BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
  203. "Entry 1110 (Atmel)"},
  204. /* ATMEL AT45DB021B (buffered flash) */
  205. {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
  206. BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
  207. BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
  208. "Buffered flash (256kB)"},
  209. };
  210. static struct flash_spec flash_5709 = {
  211. .flags = BNX2_NV_BUFFERED,
  212. .page_bits = BCM5709_FLASH_PAGE_BITS,
  213. .page_size = BCM5709_FLASH_PAGE_SIZE,
  214. .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
  215. .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
  216. .name = "5709 Buffered flash (256kB)",
  217. };
  218. MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
  219. static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
  220. {
  221. u32 diff;
  222. smp_mb();
  223. /* The ring uses 256 indices for 255 entries, one of them
  224. * needs to be skipped.
  225. */
  226. diff = txr->tx_prod - txr->tx_cons;
  227. if (unlikely(diff >= TX_DESC_CNT)) {
  228. diff &= 0xffff;
  229. if (diff == TX_DESC_CNT)
  230. diff = MAX_TX_DESC_CNT;
  231. }
  232. return (bp->tx_ring_size - diff);
  233. }
  234. static u32
  235. bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
  236. {
  237. u32 val;
  238. spin_lock_bh(&bp->indirect_lock);
  239. REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
  240. val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
  241. spin_unlock_bh(&bp->indirect_lock);
  242. return val;
  243. }
  244. static void
  245. bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
  246. {
  247. spin_lock_bh(&bp->indirect_lock);
  248. REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
  249. REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
  250. spin_unlock_bh(&bp->indirect_lock);
  251. }
  252. static void
  253. bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
  254. {
  255. bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
  256. }
  257. static u32
  258. bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
  259. {
  260. return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
  261. }
  262. static void
  263. bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
  264. {
  265. offset += cid_addr;
  266. spin_lock_bh(&bp->indirect_lock);
  267. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  268. int i;
  269. REG_WR(bp, BNX2_CTX_CTX_DATA, val);
  270. REG_WR(bp, BNX2_CTX_CTX_CTRL,
  271. offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
  272. for (i = 0; i < 5; i++) {
  273. val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
  274. if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
  275. break;
  276. udelay(5);
  277. }
  278. } else {
  279. REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
  280. REG_WR(bp, BNX2_CTX_DATA, val);
  281. }
  282. spin_unlock_bh(&bp->indirect_lock);
  283. }
  284. static int
  285. bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
  286. {
  287. u32 val1;
  288. int i, ret;
  289. if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
  290. val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
  291. val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
  292. REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
  293. REG_RD(bp, BNX2_EMAC_MDIO_MODE);
  294. udelay(40);
  295. }
  296. val1 = (bp->phy_addr << 21) | (reg << 16) |
  297. BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
  298. BNX2_EMAC_MDIO_COMM_START_BUSY;
  299. REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
  300. for (i = 0; i < 50; i++) {
  301. udelay(10);
  302. val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
  303. if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
  304. udelay(5);
  305. val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
  306. val1 &= BNX2_EMAC_MDIO_COMM_DATA;
  307. break;
  308. }
  309. }
  310. if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
  311. *val = 0x0;
  312. ret = -EBUSY;
  313. }
  314. else {
  315. *val = val1;
  316. ret = 0;
  317. }
  318. if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
  319. val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
  320. val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
  321. REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
  322. REG_RD(bp, BNX2_EMAC_MDIO_MODE);
  323. udelay(40);
  324. }
  325. return ret;
  326. }
  327. static int
  328. bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
  329. {
  330. u32 val1;
  331. int i, ret;
  332. if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
  333. val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
  334. val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
  335. REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
  336. REG_RD(bp, BNX2_EMAC_MDIO_MODE);
  337. udelay(40);
  338. }
  339. val1 = (bp->phy_addr << 21) | (reg << 16) | val |
  340. BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
  341. BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
  342. REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
  343. for (i = 0; i < 50; i++) {
  344. udelay(10);
  345. val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
  346. if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
  347. udelay(5);
  348. break;
  349. }
  350. }
  351. if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
  352. ret = -EBUSY;
  353. else
  354. ret = 0;
  355. if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
  356. val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
  357. val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
  358. REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
  359. REG_RD(bp, BNX2_EMAC_MDIO_MODE);
  360. udelay(40);
  361. }
  362. return ret;
  363. }
  364. static void
  365. bnx2_disable_int(struct bnx2 *bp)
  366. {
  367. int i;
  368. struct bnx2_napi *bnapi;
  369. for (i = 0; i < bp->irq_nvecs; i++) {
  370. bnapi = &bp->bnx2_napi[i];
  371. REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
  372. BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
  373. }
  374. REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
  375. }
  376. static void
  377. bnx2_enable_int(struct bnx2 *bp)
  378. {
  379. int i;
  380. struct bnx2_napi *bnapi;
  381. for (i = 0; i < bp->irq_nvecs; i++) {
  382. bnapi = &bp->bnx2_napi[i];
  383. REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
  384. BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
  385. BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
  386. bnapi->last_status_idx);
  387. REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
  388. BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
  389. bnapi->last_status_idx);
  390. }
  391. REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
  392. }
  393. static void
  394. bnx2_disable_int_sync(struct bnx2 *bp)
  395. {
  396. int i;
  397. atomic_inc(&bp->intr_sem);
  398. bnx2_disable_int(bp);
  399. for (i = 0; i < bp->irq_nvecs; i++)
  400. synchronize_irq(bp->irq_tbl[i].vector);
  401. }
  402. static void
  403. bnx2_napi_disable(struct bnx2 *bp)
  404. {
  405. int i;
  406. for (i = 0; i < bp->irq_nvecs; i++)
  407. napi_disable(&bp->bnx2_napi[i].napi);
  408. }
  409. static void
  410. bnx2_napi_enable(struct bnx2 *bp)
  411. {
  412. int i;
  413. for (i = 0; i < bp->irq_nvecs; i++)
  414. napi_enable(&bp->bnx2_napi[i].napi);
  415. }
  416. static void
  417. bnx2_netif_stop(struct bnx2 *bp)
  418. {
  419. bnx2_disable_int_sync(bp);
  420. if (netif_running(bp->dev)) {
  421. bnx2_napi_disable(bp);
  422. netif_tx_disable(bp->dev);
  423. bp->dev->trans_start = jiffies; /* prevent tx timeout */
  424. }
  425. }
  426. static void
  427. bnx2_netif_start(struct bnx2 *bp)
  428. {
  429. if (atomic_dec_and_test(&bp->intr_sem)) {
  430. if (netif_running(bp->dev)) {
  431. netif_tx_wake_all_queues(bp->dev);
  432. bnx2_napi_enable(bp);
  433. bnx2_enable_int(bp);
  434. }
  435. }
  436. }
  437. static void
  438. bnx2_free_tx_mem(struct bnx2 *bp)
  439. {
  440. int i;
  441. for (i = 0; i < bp->num_tx_rings; i++) {
  442. struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
  443. struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
  444. if (txr->tx_desc_ring) {
  445. pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
  446. txr->tx_desc_ring,
  447. txr->tx_desc_mapping);
  448. txr->tx_desc_ring = NULL;
  449. }
  450. kfree(txr->tx_buf_ring);
  451. txr->tx_buf_ring = NULL;
  452. }
  453. }
  454. static void
  455. bnx2_free_rx_mem(struct bnx2 *bp)
  456. {
  457. int i;
  458. for (i = 0; i < bp->num_rx_rings; i++) {
  459. struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
  460. struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
  461. int j;
  462. for (j = 0; j < bp->rx_max_ring; j++) {
  463. if (rxr->rx_desc_ring[j])
  464. pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
  465. rxr->rx_desc_ring[j],
  466. rxr->rx_desc_mapping[j]);
  467. rxr->rx_desc_ring[j] = NULL;
  468. }
  469. if (rxr->rx_buf_ring)
  470. vfree(rxr->rx_buf_ring);
  471. rxr->rx_buf_ring = NULL;
  472. for (j = 0; j < bp->rx_max_pg_ring; j++) {
  473. if (rxr->rx_pg_desc_ring[j])
  474. pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
  475. rxr->rx_pg_desc_ring[j],
  476. rxr->rx_pg_desc_mapping[j]);
  477. rxr->rx_pg_desc_ring[j] = NULL;
  478. }
  479. if (rxr->rx_pg_ring)
  480. vfree(rxr->rx_pg_ring);
  481. rxr->rx_pg_ring = NULL;
  482. }
  483. }
  484. static int
  485. bnx2_alloc_tx_mem(struct bnx2 *bp)
  486. {
  487. int i;
  488. for (i = 0; i < bp->num_tx_rings; i++) {
  489. struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
  490. struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
  491. txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
  492. if (txr->tx_buf_ring == NULL)
  493. return -ENOMEM;
  494. txr->tx_desc_ring =
  495. pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
  496. &txr->tx_desc_mapping);
  497. if (txr->tx_desc_ring == NULL)
  498. return -ENOMEM;
  499. }
  500. return 0;
  501. }
  502. static int
  503. bnx2_alloc_rx_mem(struct bnx2 *bp)
  504. {
  505. int i;
  506. for (i = 0; i < bp->num_rx_rings; i++) {
  507. struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
  508. struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
  509. int j;
  510. rxr->rx_buf_ring =
  511. vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
  512. if (rxr->rx_buf_ring == NULL)
  513. return -ENOMEM;
  514. memset(rxr->rx_buf_ring, 0,
  515. SW_RXBD_RING_SIZE * bp->rx_max_ring);
  516. for (j = 0; j < bp->rx_max_ring; j++) {
  517. rxr->rx_desc_ring[j] =
  518. pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
  519. &rxr->rx_desc_mapping[j]);
  520. if (rxr->rx_desc_ring[j] == NULL)
  521. return -ENOMEM;
  522. }
  523. if (bp->rx_pg_ring_size) {
  524. rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
  525. bp->rx_max_pg_ring);
  526. if (rxr->rx_pg_ring == NULL)
  527. return -ENOMEM;
  528. memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
  529. bp->rx_max_pg_ring);
  530. }
  531. for (j = 0; j < bp->rx_max_pg_ring; j++) {
  532. rxr->rx_pg_desc_ring[j] =
  533. pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
  534. &rxr->rx_pg_desc_mapping[j]);
  535. if (rxr->rx_pg_desc_ring[j] == NULL)
  536. return -ENOMEM;
  537. }
  538. }
  539. return 0;
  540. }
  541. static void
  542. bnx2_free_mem(struct bnx2 *bp)
  543. {
  544. int i;
  545. struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
  546. bnx2_free_tx_mem(bp);
  547. bnx2_free_rx_mem(bp);
  548. for (i = 0; i < bp->ctx_pages; i++) {
  549. if (bp->ctx_blk[i]) {
  550. pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
  551. bp->ctx_blk[i],
  552. bp->ctx_blk_mapping[i]);
  553. bp->ctx_blk[i] = NULL;
  554. }
  555. }
  556. if (bnapi->status_blk.msi) {
  557. pci_free_consistent(bp->pdev, bp->status_stats_size,
  558. bnapi->status_blk.msi,
  559. bp->status_blk_mapping);
  560. bnapi->status_blk.msi = NULL;
  561. bp->stats_blk = NULL;
  562. }
  563. }
  564. static int
  565. bnx2_alloc_mem(struct bnx2 *bp)
  566. {
  567. int i, status_blk_size, err;
  568. struct bnx2_napi *bnapi;
  569. void *status_blk;
  570. /* Combine status and statistics blocks into one allocation. */
  571. status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
  572. if (bp->flags & BNX2_FLAG_MSIX_CAP)
  573. status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
  574. BNX2_SBLK_MSIX_ALIGN_SIZE);
  575. bp->status_stats_size = status_blk_size +
  576. sizeof(struct statistics_block);
  577. status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
  578. &bp->status_blk_mapping);
  579. if (status_blk == NULL)
  580. goto alloc_mem_err;
  581. memset(status_blk, 0, bp->status_stats_size);
  582. bnapi = &bp->bnx2_napi[0];
  583. bnapi->status_blk.msi = status_blk;
  584. bnapi->hw_tx_cons_ptr =
  585. &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
  586. bnapi->hw_rx_cons_ptr =
  587. &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
  588. if (bp->flags & BNX2_FLAG_MSIX_CAP) {
  589. for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
  590. struct status_block_msix *sblk;
  591. bnapi = &bp->bnx2_napi[i];
  592. sblk = (void *) (status_blk +
  593. BNX2_SBLK_MSIX_ALIGN_SIZE * i);
  594. bnapi->status_blk.msix = sblk;
  595. bnapi->hw_tx_cons_ptr =
  596. &sblk->status_tx_quick_consumer_index;
  597. bnapi->hw_rx_cons_ptr =
  598. &sblk->status_rx_quick_consumer_index;
  599. bnapi->int_num = i << 24;
  600. }
  601. }
  602. bp->stats_blk = status_blk + status_blk_size;
  603. bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
  604. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  605. bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
  606. if (bp->ctx_pages == 0)
  607. bp->ctx_pages = 1;
  608. for (i = 0; i < bp->ctx_pages; i++) {
  609. bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
  610. BCM_PAGE_SIZE,
  611. &bp->ctx_blk_mapping[i]);
  612. if (bp->ctx_blk[i] == NULL)
  613. goto alloc_mem_err;
  614. }
  615. }
  616. err = bnx2_alloc_rx_mem(bp);
  617. if (err)
  618. goto alloc_mem_err;
  619. err = bnx2_alloc_tx_mem(bp);
  620. if (err)
  621. goto alloc_mem_err;
  622. return 0;
  623. alloc_mem_err:
  624. bnx2_free_mem(bp);
  625. return -ENOMEM;
  626. }
  627. static void
  628. bnx2_report_fw_link(struct bnx2 *bp)
  629. {
  630. u32 fw_link_status = 0;
  631. if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
  632. return;
  633. if (bp->link_up) {
  634. u32 bmsr;
  635. switch (bp->line_speed) {
  636. case SPEED_10:
  637. if (bp->duplex == DUPLEX_HALF)
  638. fw_link_status = BNX2_LINK_STATUS_10HALF;
  639. else
  640. fw_link_status = BNX2_LINK_STATUS_10FULL;
  641. break;
  642. case SPEED_100:
  643. if (bp->duplex == DUPLEX_HALF)
  644. fw_link_status = BNX2_LINK_STATUS_100HALF;
  645. else
  646. fw_link_status = BNX2_LINK_STATUS_100FULL;
  647. break;
  648. case SPEED_1000:
  649. if (bp->duplex == DUPLEX_HALF)
  650. fw_link_status = BNX2_LINK_STATUS_1000HALF;
  651. else
  652. fw_link_status = BNX2_LINK_STATUS_1000FULL;
  653. break;
  654. case SPEED_2500:
  655. if (bp->duplex == DUPLEX_HALF)
  656. fw_link_status = BNX2_LINK_STATUS_2500HALF;
  657. else
  658. fw_link_status = BNX2_LINK_STATUS_2500FULL;
  659. break;
  660. }
  661. fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
  662. if (bp->autoneg) {
  663. fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
  664. bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
  665. bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
  666. if (!(bmsr & BMSR_ANEGCOMPLETE) ||
  667. bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
  668. fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
  669. else
  670. fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
  671. }
  672. }
  673. else
  674. fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
  675. bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
  676. }
  677. static char *
  678. bnx2_xceiver_str(struct bnx2 *bp)
  679. {
  680. return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
  681. ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
  682. "Copper"));
  683. }
  684. static void
  685. bnx2_report_link(struct bnx2 *bp)
  686. {
  687. if (bp->link_up) {
  688. netif_carrier_on(bp->dev);
  689. printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
  690. bnx2_xceiver_str(bp));
  691. printk("%d Mbps ", bp->line_speed);
  692. if (bp->duplex == DUPLEX_FULL)
  693. printk("full duplex");
  694. else
  695. printk("half duplex");
  696. if (bp->flow_ctrl) {
  697. if (bp->flow_ctrl & FLOW_CTRL_RX) {
  698. printk(", receive ");
  699. if (bp->flow_ctrl & FLOW_CTRL_TX)
  700. printk("& transmit ");
  701. }
  702. else {
  703. printk(", transmit ");
  704. }
  705. printk("flow control ON");
  706. }
  707. printk("\n");
  708. }
  709. else {
  710. netif_carrier_off(bp->dev);
  711. printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
  712. bnx2_xceiver_str(bp));
  713. }
  714. bnx2_report_fw_link(bp);
  715. }
  716. static void
  717. bnx2_resolve_flow_ctrl(struct bnx2 *bp)
  718. {
  719. u32 local_adv, remote_adv;
  720. bp->flow_ctrl = 0;
  721. if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
  722. (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
  723. if (bp->duplex == DUPLEX_FULL) {
  724. bp->flow_ctrl = bp->req_flow_ctrl;
  725. }
  726. return;
  727. }
  728. if (bp->duplex != DUPLEX_FULL) {
  729. return;
  730. }
  731. if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
  732. (CHIP_NUM(bp) == CHIP_NUM_5708)) {
  733. u32 val;
  734. bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
  735. if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
  736. bp->flow_ctrl |= FLOW_CTRL_TX;
  737. if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
  738. bp->flow_ctrl |= FLOW_CTRL_RX;
  739. return;
  740. }
  741. bnx2_read_phy(bp, bp->mii_adv, &local_adv);
  742. bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
  743. if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
  744. u32 new_local_adv = 0;
  745. u32 new_remote_adv = 0;
  746. if (local_adv & ADVERTISE_1000XPAUSE)
  747. new_local_adv |= ADVERTISE_PAUSE_CAP;
  748. if (local_adv & ADVERTISE_1000XPSE_ASYM)
  749. new_local_adv |= ADVERTISE_PAUSE_ASYM;
  750. if (remote_adv & ADVERTISE_1000XPAUSE)
  751. new_remote_adv |= ADVERTISE_PAUSE_CAP;
  752. if (remote_adv & ADVERTISE_1000XPSE_ASYM)
  753. new_remote_adv |= ADVERTISE_PAUSE_ASYM;
  754. local_adv = new_local_adv;
  755. remote_adv = new_remote_adv;
  756. }
  757. /* See Table 28B-3 of 802.3ab-1999 spec. */
  758. if (local_adv & ADVERTISE_PAUSE_CAP) {
  759. if(local_adv & ADVERTISE_PAUSE_ASYM) {
  760. if (remote_adv & ADVERTISE_PAUSE_CAP) {
  761. bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
  762. }
  763. else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
  764. bp->flow_ctrl = FLOW_CTRL_RX;
  765. }
  766. }
  767. else {
  768. if (remote_adv & ADVERTISE_PAUSE_CAP) {
  769. bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
  770. }
  771. }
  772. }
  773. else if (local_adv & ADVERTISE_PAUSE_ASYM) {
  774. if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
  775. (remote_adv & ADVERTISE_PAUSE_ASYM)) {
  776. bp->flow_ctrl = FLOW_CTRL_TX;
  777. }
  778. }
  779. }
  780. static int
  781. bnx2_5709s_linkup(struct bnx2 *bp)
  782. {
  783. u32 val, speed;
  784. bp->link_up = 1;
  785. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
  786. bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
  787. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
  788. if ((bp->autoneg & AUTONEG_SPEED) == 0) {
  789. bp->line_speed = bp->req_line_speed;
  790. bp->duplex = bp->req_duplex;
  791. return 0;
  792. }
  793. speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
  794. switch (speed) {
  795. case MII_BNX2_GP_TOP_AN_SPEED_10:
  796. bp->line_speed = SPEED_10;
  797. break;
  798. case MII_BNX2_GP_TOP_AN_SPEED_100:
  799. bp->line_speed = SPEED_100;
  800. break;
  801. case MII_BNX2_GP_TOP_AN_SPEED_1G:
  802. case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
  803. bp->line_speed = SPEED_1000;
  804. break;
  805. case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
  806. bp->line_speed = SPEED_2500;
  807. break;
  808. }
  809. if (val & MII_BNX2_GP_TOP_AN_FD)
  810. bp->duplex = DUPLEX_FULL;
  811. else
  812. bp->duplex = DUPLEX_HALF;
  813. return 0;
  814. }
  815. static int
  816. bnx2_5708s_linkup(struct bnx2 *bp)
  817. {
  818. u32 val;
  819. bp->link_up = 1;
  820. bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
  821. switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
  822. case BCM5708S_1000X_STAT1_SPEED_10:
  823. bp->line_speed = SPEED_10;
  824. break;
  825. case BCM5708S_1000X_STAT1_SPEED_100:
  826. bp->line_speed = SPEED_100;
  827. break;
  828. case BCM5708S_1000X_STAT1_SPEED_1G:
  829. bp->line_speed = SPEED_1000;
  830. break;
  831. case BCM5708S_1000X_STAT1_SPEED_2G5:
  832. bp->line_speed = SPEED_2500;
  833. break;
  834. }
  835. if (val & BCM5708S_1000X_STAT1_FD)
  836. bp->duplex = DUPLEX_FULL;
  837. else
  838. bp->duplex = DUPLEX_HALF;
  839. return 0;
  840. }
  841. static int
  842. bnx2_5706s_linkup(struct bnx2 *bp)
  843. {
  844. u32 bmcr, local_adv, remote_adv, common;
  845. bp->link_up = 1;
  846. bp->line_speed = SPEED_1000;
  847. bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  848. if (bmcr & BMCR_FULLDPLX) {
  849. bp->duplex = DUPLEX_FULL;
  850. }
  851. else {
  852. bp->duplex = DUPLEX_HALF;
  853. }
  854. if (!(bmcr & BMCR_ANENABLE)) {
  855. return 0;
  856. }
  857. bnx2_read_phy(bp, bp->mii_adv, &local_adv);
  858. bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
  859. common = local_adv & remote_adv;
  860. if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
  861. if (common & ADVERTISE_1000XFULL) {
  862. bp->duplex = DUPLEX_FULL;
  863. }
  864. else {
  865. bp->duplex = DUPLEX_HALF;
  866. }
  867. }
  868. return 0;
  869. }
  870. static int
  871. bnx2_copper_linkup(struct bnx2 *bp)
  872. {
  873. u32 bmcr;
  874. bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  875. if (bmcr & BMCR_ANENABLE) {
  876. u32 local_adv, remote_adv, common;
  877. bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
  878. bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
  879. common = local_adv & (remote_adv >> 2);
  880. if (common & ADVERTISE_1000FULL) {
  881. bp->line_speed = SPEED_1000;
  882. bp->duplex = DUPLEX_FULL;
  883. }
  884. else if (common & ADVERTISE_1000HALF) {
  885. bp->line_speed = SPEED_1000;
  886. bp->duplex = DUPLEX_HALF;
  887. }
  888. else {
  889. bnx2_read_phy(bp, bp->mii_adv, &local_adv);
  890. bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
  891. common = local_adv & remote_adv;
  892. if (common & ADVERTISE_100FULL) {
  893. bp->line_speed = SPEED_100;
  894. bp->duplex = DUPLEX_FULL;
  895. }
  896. else if (common & ADVERTISE_100HALF) {
  897. bp->line_speed = SPEED_100;
  898. bp->duplex = DUPLEX_HALF;
  899. }
  900. else if (common & ADVERTISE_10FULL) {
  901. bp->line_speed = SPEED_10;
  902. bp->duplex = DUPLEX_FULL;
  903. }
  904. else if (common & ADVERTISE_10HALF) {
  905. bp->line_speed = SPEED_10;
  906. bp->duplex = DUPLEX_HALF;
  907. }
  908. else {
  909. bp->line_speed = 0;
  910. bp->link_up = 0;
  911. }
  912. }
  913. }
  914. else {
  915. if (bmcr & BMCR_SPEED100) {
  916. bp->line_speed = SPEED_100;
  917. }
  918. else {
  919. bp->line_speed = SPEED_10;
  920. }
  921. if (bmcr & BMCR_FULLDPLX) {
  922. bp->duplex = DUPLEX_FULL;
  923. }
  924. else {
  925. bp->duplex = DUPLEX_HALF;
  926. }
  927. }
  928. return 0;
  929. }
  930. static void
  931. bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
  932. {
  933. u32 val, rx_cid_addr = GET_CID_ADDR(cid);
  934. val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
  935. val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
  936. val |= 0x02 << 8;
  937. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  938. u32 lo_water, hi_water;
  939. if (bp->flow_ctrl & FLOW_CTRL_TX)
  940. lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
  941. else
  942. lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
  943. if (lo_water >= bp->rx_ring_size)
  944. lo_water = 0;
  945. hi_water = bp->rx_ring_size / 4;
  946. if (hi_water <= lo_water)
  947. lo_water = 0;
  948. hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
  949. lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
  950. if (hi_water > 0xf)
  951. hi_water = 0xf;
  952. else if (hi_water == 0)
  953. lo_water = 0;
  954. val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
  955. }
  956. bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
  957. }
  958. static void
  959. bnx2_init_all_rx_contexts(struct bnx2 *bp)
  960. {
  961. int i;
  962. u32 cid;
  963. for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
  964. if (i == 1)
  965. cid = RX_RSS_CID;
  966. bnx2_init_rx_context(bp, cid);
  967. }
  968. }
  969. static void
  970. bnx2_set_mac_link(struct bnx2 *bp)
  971. {
  972. u32 val;
  973. REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
  974. if (bp->link_up && (bp->line_speed == SPEED_1000) &&
  975. (bp->duplex == DUPLEX_HALF)) {
  976. REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
  977. }
  978. /* Configure the EMAC mode register. */
  979. val = REG_RD(bp, BNX2_EMAC_MODE);
  980. val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
  981. BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
  982. BNX2_EMAC_MODE_25G_MODE);
  983. if (bp->link_up) {
  984. switch (bp->line_speed) {
  985. case SPEED_10:
  986. if (CHIP_NUM(bp) != CHIP_NUM_5706) {
  987. val |= BNX2_EMAC_MODE_PORT_MII_10M;
  988. break;
  989. }
  990. /* fall through */
  991. case SPEED_100:
  992. val |= BNX2_EMAC_MODE_PORT_MII;
  993. break;
  994. case SPEED_2500:
  995. val |= BNX2_EMAC_MODE_25G_MODE;
  996. /* fall through */
  997. case SPEED_1000:
  998. val |= BNX2_EMAC_MODE_PORT_GMII;
  999. break;
  1000. }
  1001. }
  1002. else {
  1003. val |= BNX2_EMAC_MODE_PORT_GMII;
  1004. }
  1005. /* Set the MAC to operate in the appropriate duplex mode. */
  1006. if (bp->duplex == DUPLEX_HALF)
  1007. val |= BNX2_EMAC_MODE_HALF_DUPLEX;
  1008. REG_WR(bp, BNX2_EMAC_MODE, val);
  1009. /* Enable/disable rx PAUSE. */
  1010. bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
  1011. if (bp->flow_ctrl & FLOW_CTRL_RX)
  1012. bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
  1013. REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
  1014. /* Enable/disable tx PAUSE. */
  1015. val = REG_RD(bp, BNX2_EMAC_TX_MODE);
  1016. val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
  1017. if (bp->flow_ctrl & FLOW_CTRL_TX)
  1018. val |= BNX2_EMAC_TX_MODE_FLOW_EN;
  1019. REG_WR(bp, BNX2_EMAC_TX_MODE, val);
  1020. /* Acknowledge the interrupt. */
  1021. REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
  1022. if (CHIP_NUM(bp) == CHIP_NUM_5709)
  1023. bnx2_init_all_rx_contexts(bp);
  1024. }
  1025. static void
  1026. bnx2_enable_bmsr1(struct bnx2 *bp)
  1027. {
  1028. if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
  1029. (CHIP_NUM(bp) == CHIP_NUM_5709))
  1030. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
  1031. MII_BNX2_BLK_ADDR_GP_STATUS);
  1032. }
  1033. static void
  1034. bnx2_disable_bmsr1(struct bnx2 *bp)
  1035. {
  1036. if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
  1037. (CHIP_NUM(bp) == CHIP_NUM_5709))
  1038. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
  1039. MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
  1040. }
  1041. static int
  1042. bnx2_test_and_enable_2g5(struct bnx2 *bp)
  1043. {
  1044. u32 up1;
  1045. int ret = 1;
  1046. if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
  1047. return 0;
  1048. if (bp->autoneg & AUTONEG_SPEED)
  1049. bp->advertising |= ADVERTISED_2500baseX_Full;
  1050. if (CHIP_NUM(bp) == CHIP_NUM_5709)
  1051. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
  1052. bnx2_read_phy(bp, bp->mii_up1, &up1);
  1053. if (!(up1 & BCM5708S_UP1_2G5)) {
  1054. up1 |= BCM5708S_UP1_2G5;
  1055. bnx2_write_phy(bp, bp->mii_up1, up1);
  1056. ret = 0;
  1057. }
  1058. if (CHIP_NUM(bp) == CHIP_NUM_5709)
  1059. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
  1060. MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
  1061. return ret;
  1062. }
  1063. static int
  1064. bnx2_test_and_disable_2g5(struct bnx2 *bp)
  1065. {
  1066. u32 up1;
  1067. int ret = 0;
  1068. if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
  1069. return 0;
  1070. if (CHIP_NUM(bp) == CHIP_NUM_5709)
  1071. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
  1072. bnx2_read_phy(bp, bp->mii_up1, &up1);
  1073. if (up1 & BCM5708S_UP1_2G5) {
  1074. up1 &= ~BCM5708S_UP1_2G5;
  1075. bnx2_write_phy(bp, bp->mii_up1, up1);
  1076. ret = 1;
  1077. }
  1078. if (CHIP_NUM(bp) == CHIP_NUM_5709)
  1079. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
  1080. MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
  1081. return ret;
  1082. }
  1083. static void
  1084. bnx2_enable_forced_2g5(struct bnx2 *bp)
  1085. {
  1086. u32 bmcr;
  1087. if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
  1088. return;
  1089. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  1090. u32 val;
  1091. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
  1092. MII_BNX2_BLK_ADDR_SERDES_DIG);
  1093. bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
  1094. val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
  1095. val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
  1096. bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
  1097. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
  1098. MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
  1099. bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  1100. } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
  1101. bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  1102. bmcr |= BCM5708S_BMCR_FORCE_2500;
  1103. }
  1104. if (bp->autoneg & AUTONEG_SPEED) {
  1105. bmcr &= ~BMCR_ANENABLE;
  1106. if (bp->req_duplex == DUPLEX_FULL)
  1107. bmcr |= BMCR_FULLDPLX;
  1108. }
  1109. bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
  1110. }
  1111. static void
  1112. bnx2_disable_forced_2g5(struct bnx2 *bp)
  1113. {
  1114. u32 bmcr;
  1115. if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
  1116. return;
  1117. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  1118. u32 val;
  1119. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
  1120. MII_BNX2_BLK_ADDR_SERDES_DIG);
  1121. bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
  1122. val &= ~MII_BNX2_SD_MISC1_FORCE;
  1123. bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
  1124. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
  1125. MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
  1126. bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  1127. } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
  1128. bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  1129. bmcr &= ~BCM5708S_BMCR_FORCE_2500;
  1130. }
  1131. if (bp->autoneg & AUTONEG_SPEED)
  1132. bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
  1133. bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
  1134. }
  1135. static void
  1136. bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
  1137. {
  1138. u32 val;
  1139. bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
  1140. bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
  1141. if (start)
  1142. bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
  1143. else
  1144. bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
  1145. }
  1146. static int
  1147. bnx2_set_link(struct bnx2 *bp)
  1148. {
  1149. u32 bmsr;
  1150. u8 link_up;
  1151. if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
  1152. bp->link_up = 1;
  1153. return 0;
  1154. }
  1155. if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
  1156. return 0;
  1157. link_up = bp->link_up;
  1158. bnx2_enable_bmsr1(bp);
  1159. bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
  1160. bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
  1161. bnx2_disable_bmsr1(bp);
  1162. if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
  1163. (CHIP_NUM(bp) == CHIP_NUM_5706)) {
  1164. u32 val, an_dbg;
  1165. if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
  1166. bnx2_5706s_force_link_dn(bp, 0);
  1167. bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
  1168. }
  1169. val = REG_RD(bp, BNX2_EMAC_STATUS);
  1170. bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
  1171. bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
  1172. bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
  1173. if ((val & BNX2_EMAC_STATUS_LINK) &&
  1174. !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
  1175. bmsr |= BMSR_LSTATUS;
  1176. else
  1177. bmsr &= ~BMSR_LSTATUS;
  1178. }
  1179. if (bmsr & BMSR_LSTATUS) {
  1180. bp->link_up = 1;
  1181. if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
  1182. if (CHIP_NUM(bp) == CHIP_NUM_5706)
  1183. bnx2_5706s_linkup(bp);
  1184. else if (CHIP_NUM(bp) == CHIP_NUM_5708)
  1185. bnx2_5708s_linkup(bp);
  1186. else if (CHIP_NUM(bp) == CHIP_NUM_5709)
  1187. bnx2_5709s_linkup(bp);
  1188. }
  1189. else {
  1190. bnx2_copper_linkup(bp);
  1191. }
  1192. bnx2_resolve_flow_ctrl(bp);
  1193. }
  1194. else {
  1195. if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
  1196. (bp->autoneg & AUTONEG_SPEED))
  1197. bnx2_disable_forced_2g5(bp);
  1198. if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
  1199. u32 bmcr;
  1200. bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  1201. bmcr |= BMCR_ANENABLE;
  1202. bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
  1203. bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
  1204. }
  1205. bp->link_up = 0;
  1206. }
  1207. if (bp->link_up != link_up) {
  1208. bnx2_report_link(bp);
  1209. }
  1210. bnx2_set_mac_link(bp);
  1211. return 0;
  1212. }
  1213. static int
  1214. bnx2_reset_phy(struct bnx2 *bp)
  1215. {
  1216. int i;
  1217. u32 reg;
  1218. bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
  1219. #define PHY_RESET_MAX_WAIT 100
  1220. for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
  1221. udelay(10);
  1222. bnx2_read_phy(bp, bp->mii_bmcr, &reg);
  1223. if (!(reg & BMCR_RESET)) {
  1224. udelay(20);
  1225. break;
  1226. }
  1227. }
  1228. if (i == PHY_RESET_MAX_WAIT) {
  1229. return -EBUSY;
  1230. }
  1231. return 0;
  1232. }
  1233. static u32
  1234. bnx2_phy_get_pause_adv(struct bnx2 *bp)
  1235. {
  1236. u32 adv = 0;
  1237. if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
  1238. (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
  1239. if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
  1240. adv = ADVERTISE_1000XPAUSE;
  1241. }
  1242. else {
  1243. adv = ADVERTISE_PAUSE_CAP;
  1244. }
  1245. }
  1246. else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
  1247. if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
  1248. adv = ADVERTISE_1000XPSE_ASYM;
  1249. }
  1250. else {
  1251. adv = ADVERTISE_PAUSE_ASYM;
  1252. }
  1253. }
  1254. else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
  1255. if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
  1256. adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
  1257. }
  1258. else {
  1259. adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
  1260. }
  1261. }
  1262. return adv;
  1263. }
  1264. static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
  1265. static int
  1266. bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
  1267. __releases(&bp->phy_lock)
  1268. __acquires(&bp->phy_lock)
  1269. {
  1270. u32 speed_arg = 0, pause_adv;
  1271. pause_adv = bnx2_phy_get_pause_adv(bp);
  1272. if (bp->autoneg & AUTONEG_SPEED) {
  1273. speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
  1274. if (bp->advertising & ADVERTISED_10baseT_Half)
  1275. speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
  1276. if (bp->advertising & ADVERTISED_10baseT_Full)
  1277. speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
  1278. if (bp->advertising & ADVERTISED_100baseT_Half)
  1279. speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
  1280. if (bp->advertising & ADVERTISED_100baseT_Full)
  1281. speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
  1282. if (bp->advertising & ADVERTISED_1000baseT_Full)
  1283. speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
  1284. if (bp->advertising & ADVERTISED_2500baseX_Full)
  1285. speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
  1286. } else {
  1287. if (bp->req_line_speed == SPEED_2500)
  1288. speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
  1289. else if (bp->req_line_speed == SPEED_1000)
  1290. speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
  1291. else if (bp->req_line_speed == SPEED_100) {
  1292. if (bp->req_duplex == DUPLEX_FULL)
  1293. speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
  1294. else
  1295. speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
  1296. } else if (bp->req_line_speed == SPEED_10) {
  1297. if (bp->req_duplex == DUPLEX_FULL)
  1298. speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
  1299. else
  1300. speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
  1301. }
  1302. }
  1303. if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
  1304. speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
  1305. if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
  1306. speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
  1307. if (port == PORT_TP)
  1308. speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
  1309. BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
  1310. bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
  1311. spin_unlock_bh(&bp->phy_lock);
  1312. bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
  1313. spin_lock_bh(&bp->phy_lock);
  1314. return 0;
  1315. }
  1316. static int
  1317. bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
  1318. __releases(&bp->phy_lock)
  1319. __acquires(&bp->phy_lock)
  1320. {
  1321. u32 adv, bmcr;
  1322. u32 new_adv = 0;
  1323. if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
  1324. return (bnx2_setup_remote_phy(bp, port));
  1325. if (!(bp->autoneg & AUTONEG_SPEED)) {
  1326. u32 new_bmcr;
  1327. int force_link_down = 0;
  1328. if (bp->req_line_speed == SPEED_2500) {
  1329. if (!bnx2_test_and_enable_2g5(bp))
  1330. force_link_down = 1;
  1331. } else if (bp->req_line_speed == SPEED_1000) {
  1332. if (bnx2_test_and_disable_2g5(bp))
  1333. force_link_down = 1;
  1334. }
  1335. bnx2_read_phy(bp, bp->mii_adv, &adv);
  1336. adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
  1337. bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  1338. new_bmcr = bmcr & ~BMCR_ANENABLE;
  1339. new_bmcr |= BMCR_SPEED1000;
  1340. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  1341. if (bp->req_line_speed == SPEED_2500)
  1342. bnx2_enable_forced_2g5(bp);
  1343. else if (bp->req_line_speed == SPEED_1000) {
  1344. bnx2_disable_forced_2g5(bp);
  1345. new_bmcr &= ~0x2000;
  1346. }
  1347. } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
  1348. if (bp->req_line_speed == SPEED_2500)
  1349. new_bmcr |= BCM5708S_BMCR_FORCE_2500;
  1350. else
  1351. new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
  1352. }
  1353. if (bp->req_duplex == DUPLEX_FULL) {
  1354. adv |= ADVERTISE_1000XFULL;
  1355. new_bmcr |= BMCR_FULLDPLX;
  1356. }
  1357. else {
  1358. adv |= ADVERTISE_1000XHALF;
  1359. new_bmcr &= ~BMCR_FULLDPLX;
  1360. }
  1361. if ((new_bmcr != bmcr) || (force_link_down)) {
  1362. /* Force a link down visible on the other side */
  1363. if (bp->link_up) {
  1364. bnx2_write_phy(bp, bp->mii_adv, adv &
  1365. ~(ADVERTISE_1000XFULL |
  1366. ADVERTISE_1000XHALF));
  1367. bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
  1368. BMCR_ANRESTART | BMCR_ANENABLE);
  1369. bp->link_up = 0;
  1370. netif_carrier_off(bp->dev);
  1371. bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
  1372. bnx2_report_link(bp);
  1373. }
  1374. bnx2_write_phy(bp, bp->mii_adv, adv);
  1375. bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
  1376. } else {
  1377. bnx2_resolve_flow_ctrl(bp);
  1378. bnx2_set_mac_link(bp);
  1379. }
  1380. return 0;
  1381. }
  1382. bnx2_test_and_enable_2g5(bp);
  1383. if (bp->advertising & ADVERTISED_1000baseT_Full)
  1384. new_adv |= ADVERTISE_1000XFULL;
  1385. new_adv |= bnx2_phy_get_pause_adv(bp);
  1386. bnx2_read_phy(bp, bp->mii_adv, &adv);
  1387. bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  1388. bp->serdes_an_pending = 0;
  1389. if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
  1390. /* Force a link down visible on the other side */
  1391. if (bp->link_up) {
  1392. bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
  1393. spin_unlock_bh(&bp->phy_lock);
  1394. msleep(20);
  1395. spin_lock_bh(&bp->phy_lock);
  1396. }
  1397. bnx2_write_phy(bp, bp->mii_adv, new_adv);
  1398. bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
  1399. BMCR_ANENABLE);
  1400. /* Speed up link-up time when the link partner
  1401. * does not autonegotiate which is very common
  1402. * in blade servers. Some blade servers use
  1403. * IPMI for kerboard input and it's important
  1404. * to minimize link disruptions. Autoneg. involves
  1405. * exchanging base pages plus 3 next pages and
  1406. * normally completes in about 120 msec.
  1407. */
  1408. bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
  1409. bp->serdes_an_pending = 1;
  1410. mod_timer(&bp->timer, jiffies + bp->current_interval);
  1411. } else {
  1412. bnx2_resolve_flow_ctrl(bp);
  1413. bnx2_set_mac_link(bp);
  1414. }
  1415. return 0;
  1416. }
  1417. #define ETHTOOL_ALL_FIBRE_SPEED \
  1418. (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \
  1419. (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
  1420. (ADVERTISED_1000baseT_Full)
  1421. #define ETHTOOL_ALL_COPPER_SPEED \
  1422. (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
  1423. ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
  1424. ADVERTISED_1000baseT_Full)
  1425. #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
  1426. ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
  1427. #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
  1428. static void
  1429. bnx2_set_default_remote_link(struct bnx2 *bp)
  1430. {
  1431. u32 link;
  1432. if (bp->phy_port == PORT_TP)
  1433. link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
  1434. else
  1435. link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
  1436. if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
  1437. bp->req_line_speed = 0;
  1438. bp->autoneg |= AUTONEG_SPEED;
  1439. bp->advertising = ADVERTISED_Autoneg;
  1440. if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
  1441. bp->advertising |= ADVERTISED_10baseT_Half;
  1442. if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
  1443. bp->advertising |= ADVERTISED_10baseT_Full;
  1444. if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
  1445. bp->advertising |= ADVERTISED_100baseT_Half;
  1446. if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
  1447. bp->advertising |= ADVERTISED_100baseT_Full;
  1448. if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
  1449. bp->advertising |= ADVERTISED_1000baseT_Full;
  1450. if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
  1451. bp->advertising |= ADVERTISED_2500baseX_Full;
  1452. } else {
  1453. bp->autoneg = 0;
  1454. bp->advertising = 0;
  1455. bp->req_duplex = DUPLEX_FULL;
  1456. if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
  1457. bp->req_line_speed = SPEED_10;
  1458. if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
  1459. bp->req_duplex = DUPLEX_HALF;
  1460. }
  1461. if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
  1462. bp->req_line_speed = SPEED_100;
  1463. if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
  1464. bp->req_duplex = DUPLEX_HALF;
  1465. }
  1466. if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
  1467. bp->req_line_speed = SPEED_1000;
  1468. if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
  1469. bp->req_line_speed = SPEED_2500;
  1470. }
  1471. }
  1472. static void
  1473. bnx2_set_default_link(struct bnx2 *bp)
  1474. {
  1475. if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
  1476. bnx2_set_default_remote_link(bp);
  1477. return;
  1478. }
  1479. bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
  1480. bp->req_line_speed = 0;
  1481. if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
  1482. u32 reg;
  1483. bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
  1484. reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
  1485. reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
  1486. if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
  1487. bp->autoneg = 0;
  1488. bp->req_line_speed = bp->line_speed = SPEED_1000;
  1489. bp->req_duplex = DUPLEX_FULL;
  1490. }
  1491. } else
  1492. bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
  1493. }
  1494. static void
  1495. bnx2_send_heart_beat(struct bnx2 *bp)
  1496. {
  1497. u32 msg;
  1498. u32 addr;
  1499. spin_lock(&bp->indirect_lock);
  1500. msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
  1501. addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
  1502. REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
  1503. REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
  1504. spin_unlock(&bp->indirect_lock);
  1505. }
  1506. static void
  1507. bnx2_remote_phy_event(struct bnx2 *bp)
  1508. {
  1509. u32 msg;
  1510. u8 link_up = bp->link_up;
  1511. u8 old_port;
  1512. msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
  1513. if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
  1514. bnx2_send_heart_beat(bp);
  1515. msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
  1516. if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
  1517. bp->link_up = 0;
  1518. else {
  1519. u32 speed;
  1520. bp->link_up = 1;
  1521. speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
  1522. bp->duplex = DUPLEX_FULL;
  1523. switch (speed) {
  1524. case BNX2_LINK_STATUS_10HALF:
  1525. bp->duplex = DUPLEX_HALF;
  1526. case BNX2_LINK_STATUS_10FULL:
  1527. bp->line_speed = SPEED_10;
  1528. break;
  1529. case BNX2_LINK_STATUS_100HALF:
  1530. bp->duplex = DUPLEX_HALF;
  1531. case BNX2_LINK_STATUS_100BASE_T4:
  1532. case BNX2_LINK_STATUS_100FULL:
  1533. bp->line_speed = SPEED_100;
  1534. break;
  1535. case BNX2_LINK_STATUS_1000HALF:
  1536. bp->duplex = DUPLEX_HALF;
  1537. case BNX2_LINK_STATUS_1000FULL:
  1538. bp->line_speed = SPEED_1000;
  1539. break;
  1540. case BNX2_LINK_STATUS_2500HALF:
  1541. bp->duplex = DUPLEX_HALF;
  1542. case BNX2_LINK_STATUS_2500FULL:
  1543. bp->line_speed = SPEED_2500;
  1544. break;
  1545. default:
  1546. bp->line_speed = 0;
  1547. break;
  1548. }
  1549. bp->flow_ctrl = 0;
  1550. if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
  1551. (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
  1552. if (bp->duplex == DUPLEX_FULL)
  1553. bp->flow_ctrl = bp->req_flow_ctrl;
  1554. } else {
  1555. if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
  1556. bp->flow_ctrl |= FLOW_CTRL_TX;
  1557. if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
  1558. bp->flow_ctrl |= FLOW_CTRL_RX;
  1559. }
  1560. old_port = bp->phy_port;
  1561. if (msg & BNX2_LINK_STATUS_SERDES_LINK)
  1562. bp->phy_port = PORT_FIBRE;
  1563. else
  1564. bp->phy_port = PORT_TP;
  1565. if (old_port != bp->phy_port)
  1566. bnx2_set_default_link(bp);
  1567. }
  1568. if (bp->link_up != link_up)
  1569. bnx2_report_link(bp);
  1570. bnx2_set_mac_link(bp);
  1571. }
  1572. static int
  1573. bnx2_set_remote_link(struct bnx2 *bp)
  1574. {
  1575. u32 evt_code;
  1576. evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
  1577. switch (evt_code) {
  1578. case BNX2_FW_EVT_CODE_LINK_EVENT:
  1579. bnx2_remote_phy_event(bp);
  1580. break;
  1581. case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
  1582. default:
  1583. bnx2_send_heart_beat(bp);
  1584. break;
  1585. }
  1586. return 0;
  1587. }
  1588. static int
  1589. bnx2_setup_copper_phy(struct bnx2 *bp)
  1590. __releases(&bp->phy_lock)
  1591. __acquires(&bp->phy_lock)
  1592. {
  1593. u32 bmcr;
  1594. u32 new_bmcr;
  1595. bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  1596. if (bp->autoneg & AUTONEG_SPEED) {
  1597. u32 adv_reg, adv1000_reg;
  1598. u32 new_adv_reg = 0;
  1599. u32 new_adv1000_reg = 0;
  1600. bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
  1601. adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
  1602. ADVERTISE_PAUSE_ASYM);
  1603. bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
  1604. adv1000_reg &= PHY_ALL_1000_SPEED;
  1605. if (bp->advertising & ADVERTISED_10baseT_Half)
  1606. new_adv_reg |= ADVERTISE_10HALF;
  1607. if (bp->advertising & ADVERTISED_10baseT_Full)
  1608. new_adv_reg |= ADVERTISE_10FULL;
  1609. if (bp->advertising & ADVERTISED_100baseT_Half)
  1610. new_adv_reg |= ADVERTISE_100HALF;
  1611. if (bp->advertising & ADVERTISED_100baseT_Full)
  1612. new_adv_reg |= ADVERTISE_100FULL;
  1613. if (bp->advertising & ADVERTISED_1000baseT_Full)
  1614. new_adv1000_reg |= ADVERTISE_1000FULL;
  1615. new_adv_reg |= ADVERTISE_CSMA;
  1616. new_adv_reg |= bnx2_phy_get_pause_adv(bp);
  1617. if ((adv1000_reg != new_adv1000_reg) ||
  1618. (adv_reg != new_adv_reg) ||
  1619. ((bmcr & BMCR_ANENABLE) == 0)) {
  1620. bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
  1621. bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
  1622. bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
  1623. BMCR_ANENABLE);
  1624. }
  1625. else if (bp->link_up) {
  1626. /* Flow ctrl may have changed from auto to forced */
  1627. /* or vice-versa. */
  1628. bnx2_resolve_flow_ctrl(bp);
  1629. bnx2_set_mac_link(bp);
  1630. }
  1631. return 0;
  1632. }
  1633. new_bmcr = 0;
  1634. if (bp->req_line_speed == SPEED_100) {
  1635. new_bmcr |= BMCR_SPEED100;
  1636. }
  1637. if (bp->req_duplex == DUPLEX_FULL) {
  1638. new_bmcr |= BMCR_FULLDPLX;
  1639. }
  1640. if (new_bmcr != bmcr) {
  1641. u32 bmsr;
  1642. bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
  1643. bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
  1644. if (bmsr & BMSR_LSTATUS) {
  1645. /* Force link down */
  1646. bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
  1647. spin_unlock_bh(&bp->phy_lock);
  1648. msleep(50);
  1649. spin_lock_bh(&bp->phy_lock);
  1650. bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
  1651. bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
  1652. }
  1653. bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
  1654. /* Normally, the new speed is setup after the link has
  1655. * gone down and up again. In some cases, link will not go
  1656. * down so we need to set up the new speed here.
  1657. */
  1658. if (bmsr & BMSR_LSTATUS) {
  1659. bp->line_speed = bp->req_line_speed;
  1660. bp->duplex = bp->req_duplex;
  1661. bnx2_resolve_flow_ctrl(bp);
  1662. bnx2_set_mac_link(bp);
  1663. }
  1664. } else {
  1665. bnx2_resolve_flow_ctrl(bp);
  1666. bnx2_set_mac_link(bp);
  1667. }
  1668. return 0;
  1669. }
  1670. static int
  1671. bnx2_setup_phy(struct bnx2 *bp, u8 port)
  1672. __releases(&bp->phy_lock)
  1673. __acquires(&bp->phy_lock)
  1674. {
  1675. if (bp->loopback == MAC_LOOPBACK)
  1676. return 0;
  1677. if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
  1678. return (bnx2_setup_serdes_phy(bp, port));
  1679. }
  1680. else {
  1681. return (bnx2_setup_copper_phy(bp));
  1682. }
  1683. }
  1684. static int
  1685. bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
  1686. {
  1687. u32 val;
  1688. bp->mii_bmcr = MII_BMCR + 0x10;
  1689. bp->mii_bmsr = MII_BMSR + 0x10;
  1690. bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
  1691. bp->mii_adv = MII_ADVERTISE + 0x10;
  1692. bp->mii_lpa = MII_LPA + 0x10;
  1693. bp->mii_up1 = MII_BNX2_OVER1G_UP1;
  1694. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
  1695. bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
  1696. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
  1697. if (reset_phy)
  1698. bnx2_reset_phy(bp);
  1699. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
  1700. bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
  1701. val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
  1702. val |= MII_BNX2_SD_1000XCTL1_FIBER;
  1703. bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
  1704. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
  1705. bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
  1706. if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
  1707. val |= BCM5708S_UP1_2G5;
  1708. else
  1709. val &= ~BCM5708S_UP1_2G5;
  1710. bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
  1711. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
  1712. bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
  1713. val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
  1714. bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
  1715. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
  1716. val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
  1717. MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
  1718. bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
  1719. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
  1720. return 0;
  1721. }
  1722. static int
  1723. bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
  1724. {
  1725. u32 val;
  1726. if (reset_phy)
  1727. bnx2_reset_phy(bp);
  1728. bp->mii_up1 = BCM5708S_UP1;
  1729. bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
  1730. bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
  1731. bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
  1732. bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
  1733. val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
  1734. bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
  1735. bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
  1736. val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
  1737. bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
  1738. if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
  1739. bnx2_read_phy(bp, BCM5708S_UP1, &val);
  1740. val |= BCM5708S_UP1_2G5;
  1741. bnx2_write_phy(bp, BCM5708S_UP1, val);
  1742. }
  1743. if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
  1744. (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
  1745. (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
  1746. /* increase tx signal amplitude */
  1747. bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
  1748. BCM5708S_BLK_ADDR_TX_MISC);
  1749. bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
  1750. val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
  1751. bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
  1752. bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
  1753. }
  1754. val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
  1755. BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
  1756. if (val) {
  1757. u32 is_backplane;
  1758. is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
  1759. if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
  1760. bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
  1761. BCM5708S_BLK_ADDR_TX_MISC);
  1762. bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
  1763. bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
  1764. BCM5708S_BLK_ADDR_DIG);
  1765. }
  1766. }
  1767. return 0;
  1768. }
  1769. static int
  1770. bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
  1771. {
  1772. if (reset_phy)
  1773. bnx2_reset_phy(bp);
  1774. bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
  1775. if (CHIP_NUM(bp) == CHIP_NUM_5706)
  1776. REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
  1777. if (bp->dev->mtu > 1500) {
  1778. u32 val;
  1779. /* Set extended packet length bit */
  1780. bnx2_write_phy(bp, 0x18, 0x7);
  1781. bnx2_read_phy(bp, 0x18, &val);
  1782. bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
  1783. bnx2_write_phy(bp, 0x1c, 0x6c00);
  1784. bnx2_read_phy(bp, 0x1c, &val);
  1785. bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
  1786. }
  1787. else {
  1788. u32 val;
  1789. bnx2_write_phy(bp, 0x18, 0x7);
  1790. bnx2_read_phy(bp, 0x18, &val);
  1791. bnx2_write_phy(bp, 0x18, val & ~0x4007);
  1792. bnx2_write_phy(bp, 0x1c, 0x6c00);
  1793. bnx2_read_phy(bp, 0x1c, &val);
  1794. bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
  1795. }
  1796. return 0;
  1797. }
  1798. static int
  1799. bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
  1800. {
  1801. u32 val;
  1802. if (reset_phy)
  1803. bnx2_reset_phy(bp);
  1804. if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
  1805. bnx2_write_phy(bp, 0x18, 0x0c00);
  1806. bnx2_write_phy(bp, 0x17, 0x000a);
  1807. bnx2_write_phy(bp, 0x15, 0x310b);
  1808. bnx2_write_phy(bp, 0x17, 0x201f);
  1809. bnx2_write_phy(bp, 0x15, 0x9506);
  1810. bnx2_write_phy(bp, 0x17, 0x401f);
  1811. bnx2_write_phy(bp, 0x15, 0x14e2);
  1812. bnx2_write_phy(bp, 0x18, 0x0400);
  1813. }
  1814. if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
  1815. bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
  1816. MII_BNX2_DSP_EXPAND_REG | 0x8);
  1817. bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
  1818. val &= ~(1 << 8);
  1819. bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
  1820. }
  1821. if (bp->dev->mtu > 1500) {
  1822. /* Set extended packet length bit */
  1823. bnx2_write_phy(bp, 0x18, 0x7);
  1824. bnx2_read_phy(bp, 0x18, &val);
  1825. bnx2_write_phy(bp, 0x18, val | 0x4000);
  1826. bnx2_read_phy(bp, 0x10, &val);
  1827. bnx2_write_phy(bp, 0x10, val | 0x1);
  1828. }
  1829. else {
  1830. bnx2_write_phy(bp, 0x18, 0x7);
  1831. bnx2_read_phy(bp, 0x18, &val);
  1832. bnx2_write_phy(bp, 0x18, val & ~0x4007);
  1833. bnx2_read_phy(bp, 0x10, &val);
  1834. bnx2_write_phy(bp, 0x10, val & ~0x1);
  1835. }
  1836. /* ethernet@wirespeed */
  1837. bnx2_write_phy(bp, 0x18, 0x7007);
  1838. bnx2_read_phy(bp, 0x18, &val);
  1839. bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
  1840. return 0;
  1841. }
  1842. static int
  1843. bnx2_init_phy(struct bnx2 *bp, int reset_phy)
  1844. __releases(&bp->phy_lock)
  1845. __acquires(&bp->phy_lock)
  1846. {
  1847. u32 val;
  1848. int rc = 0;
  1849. bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
  1850. bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
  1851. bp->mii_bmcr = MII_BMCR;
  1852. bp->mii_bmsr = MII_BMSR;
  1853. bp->mii_bmsr1 = MII_BMSR;
  1854. bp->mii_adv = MII_ADVERTISE;
  1855. bp->mii_lpa = MII_LPA;
  1856. REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
  1857. if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
  1858. goto setup_phy;
  1859. bnx2_read_phy(bp, MII_PHYSID1, &val);
  1860. bp->phy_id = val << 16;
  1861. bnx2_read_phy(bp, MII_PHYSID2, &val);
  1862. bp->phy_id |= val & 0xffff;
  1863. if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
  1864. if (CHIP_NUM(bp) == CHIP_NUM_5706)
  1865. rc = bnx2_init_5706s_phy(bp, reset_phy);
  1866. else if (CHIP_NUM(bp) == CHIP_NUM_5708)
  1867. rc = bnx2_init_5708s_phy(bp, reset_phy);
  1868. else if (CHIP_NUM(bp) == CHIP_NUM_5709)
  1869. rc = bnx2_init_5709s_phy(bp, reset_phy);
  1870. }
  1871. else {
  1872. rc = bnx2_init_copper_phy(bp, reset_phy);
  1873. }
  1874. setup_phy:
  1875. if (!rc)
  1876. rc = bnx2_setup_phy(bp, bp->phy_port);
  1877. return rc;
  1878. }
  1879. static int
  1880. bnx2_set_mac_loopback(struct bnx2 *bp)
  1881. {
  1882. u32 mac_mode;
  1883. mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
  1884. mac_mode &= ~BNX2_EMAC_MODE_PORT;
  1885. mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
  1886. REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
  1887. bp->link_up = 1;
  1888. return 0;
  1889. }
  1890. static int bnx2_test_link(struct bnx2 *);
  1891. static int
  1892. bnx2_set_phy_loopback(struct bnx2 *bp)
  1893. {
  1894. u32 mac_mode;
  1895. int rc, i;
  1896. spin_lock_bh(&bp->phy_lock);
  1897. rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
  1898. BMCR_SPEED1000);
  1899. spin_unlock_bh(&bp->phy_lock);
  1900. if (rc)
  1901. return rc;
  1902. for (i = 0; i < 10; i++) {
  1903. if (bnx2_test_link(bp) == 0)
  1904. break;
  1905. msleep(100);
  1906. }
  1907. mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
  1908. mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
  1909. BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
  1910. BNX2_EMAC_MODE_25G_MODE);
  1911. mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
  1912. REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
  1913. bp->link_up = 1;
  1914. return 0;
  1915. }
  1916. static int
  1917. bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
  1918. {
  1919. int i;
  1920. u32 val;
  1921. bp->fw_wr_seq++;
  1922. msg_data |= bp->fw_wr_seq;
  1923. bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
  1924. if (!ack)
  1925. return 0;
  1926. /* wait for an acknowledgement. */
  1927. for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
  1928. msleep(10);
  1929. val = bnx2_shmem_rd(bp, BNX2_FW_MB);
  1930. if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
  1931. break;
  1932. }
  1933. if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
  1934. return 0;
  1935. /* If we timed out, inform the firmware that this is the case. */
  1936. if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
  1937. if (!silent)
  1938. printk(KERN_ERR PFX "fw sync timeout, reset code = "
  1939. "%x\n", msg_data);
  1940. msg_data &= ~BNX2_DRV_MSG_CODE;
  1941. msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
  1942. bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
  1943. return -EBUSY;
  1944. }
  1945. if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
  1946. return -EIO;
  1947. return 0;
  1948. }
  1949. static int
  1950. bnx2_init_5709_context(struct bnx2 *bp)
  1951. {
  1952. int i, ret = 0;
  1953. u32 val;
  1954. val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
  1955. val |= (BCM_PAGE_BITS - 8) << 16;
  1956. REG_WR(bp, BNX2_CTX_COMMAND, val);
  1957. for (i = 0; i < 10; i++) {
  1958. val = REG_RD(bp, BNX2_CTX_COMMAND);
  1959. if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
  1960. break;
  1961. udelay(2);
  1962. }
  1963. if (val & BNX2_CTX_COMMAND_MEM_INIT)
  1964. return -EBUSY;
  1965. for (i = 0; i < bp->ctx_pages; i++) {
  1966. int j;
  1967. if (bp->ctx_blk[i])
  1968. memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
  1969. else
  1970. return -ENOMEM;
  1971. REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
  1972. (bp->ctx_blk_mapping[i] & 0xffffffff) |
  1973. BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
  1974. REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
  1975. (u64) bp->ctx_blk_mapping[i] >> 32);
  1976. REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
  1977. BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
  1978. for (j = 0; j < 10; j++) {
  1979. val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
  1980. if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
  1981. break;
  1982. udelay(5);
  1983. }
  1984. if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
  1985. ret = -EBUSY;
  1986. break;
  1987. }
  1988. }
  1989. return ret;
  1990. }
  1991. static void
  1992. bnx2_init_context(struct bnx2 *bp)
  1993. {
  1994. u32 vcid;
  1995. vcid = 96;
  1996. while (vcid) {
  1997. u32 vcid_addr, pcid_addr, offset;
  1998. int i;
  1999. vcid--;
  2000. if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
  2001. u32 new_vcid;
  2002. vcid_addr = GET_PCID_ADDR(vcid);
  2003. if (vcid & 0x8) {
  2004. new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
  2005. }
  2006. else {
  2007. new_vcid = vcid;
  2008. }
  2009. pcid_addr = GET_PCID_ADDR(new_vcid);
  2010. }
  2011. else {
  2012. vcid_addr = GET_CID_ADDR(vcid);
  2013. pcid_addr = vcid_addr;
  2014. }
  2015. for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
  2016. vcid_addr += (i << PHY_CTX_SHIFT);
  2017. pcid_addr += (i << PHY_CTX_SHIFT);
  2018. REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
  2019. REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
  2020. /* Zero out the context. */
  2021. for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
  2022. bnx2_ctx_wr(bp, vcid_addr, offset, 0);
  2023. }
  2024. }
  2025. }
  2026. static int
  2027. bnx2_alloc_bad_rbuf(struct bnx2 *bp)
  2028. {
  2029. u16 *good_mbuf;
  2030. u32 good_mbuf_cnt;
  2031. u32 val;
  2032. good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
  2033. if (good_mbuf == NULL) {
  2034. printk(KERN_ERR PFX "Failed to allocate memory in "
  2035. "bnx2_alloc_bad_rbuf\n");
  2036. return -ENOMEM;
  2037. }
  2038. REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
  2039. BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
  2040. good_mbuf_cnt = 0;
  2041. /* Allocate a bunch of mbufs and save the good ones in an array. */
  2042. val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
  2043. while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
  2044. bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
  2045. BNX2_RBUF_COMMAND_ALLOC_REQ);
  2046. val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
  2047. val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
  2048. /* The addresses with Bit 9 set are bad memory blocks. */
  2049. if (!(val & (1 << 9))) {
  2050. good_mbuf[good_mbuf_cnt] = (u16) val;
  2051. good_mbuf_cnt++;
  2052. }
  2053. val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
  2054. }
  2055. /* Free the good ones back to the mbuf pool thus discarding
  2056. * all the bad ones. */
  2057. while (good_mbuf_cnt) {
  2058. good_mbuf_cnt--;
  2059. val = good_mbuf[good_mbuf_cnt];
  2060. val = (val << 9) | val | 1;
  2061. bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
  2062. }
  2063. kfree(good_mbuf);
  2064. return 0;
  2065. }
  2066. static void
  2067. bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
  2068. {
  2069. u32 val;
  2070. val = (mac_addr[0] << 8) | mac_addr[1];
  2071. REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
  2072. val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
  2073. (mac_addr[4] << 8) | mac_addr[5];
  2074. REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
  2075. }
  2076. static inline int
  2077. bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
  2078. {
  2079. dma_addr_t mapping;
  2080. struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
  2081. struct rx_bd *rxbd =
  2082. &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
  2083. struct page *page = alloc_page(GFP_ATOMIC);
  2084. if (!page)
  2085. return -ENOMEM;
  2086. mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
  2087. PCI_DMA_FROMDEVICE);
  2088. if (pci_dma_mapping_error(bp->pdev, mapping)) {
  2089. __free_page(page);
  2090. return -EIO;
  2091. }
  2092. rx_pg->page = page;
  2093. pci_unmap_addr_set(rx_pg, mapping, mapping);
  2094. rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
  2095. rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
  2096. return 0;
  2097. }
  2098. static void
  2099. bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
  2100. {
  2101. struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
  2102. struct page *page = rx_pg->page;
  2103. if (!page)
  2104. return;
  2105. pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
  2106. PCI_DMA_FROMDEVICE);
  2107. __free_page(page);
  2108. rx_pg->page = NULL;
  2109. }
  2110. static inline int
  2111. bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
  2112. {
  2113. struct sk_buff *skb;
  2114. struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
  2115. dma_addr_t mapping;
  2116. struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
  2117. unsigned long align;
  2118. skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
  2119. if (skb == NULL) {
  2120. return -ENOMEM;
  2121. }
  2122. if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
  2123. skb_reserve(skb, BNX2_RX_ALIGN - align);
  2124. mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
  2125. PCI_DMA_FROMDEVICE);
  2126. if (pci_dma_mapping_error(bp->pdev, mapping)) {
  2127. dev_kfree_skb(skb);
  2128. return -EIO;
  2129. }
  2130. rx_buf->skb = skb;
  2131. pci_unmap_addr_set(rx_buf, mapping, mapping);
  2132. rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
  2133. rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
  2134. rxr->rx_prod_bseq += bp->rx_buf_use_size;
  2135. return 0;
  2136. }
  2137. static int
  2138. bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
  2139. {
  2140. struct status_block *sblk = bnapi->status_blk.msi;
  2141. u32 new_link_state, old_link_state;
  2142. int is_set = 1;
  2143. new_link_state = sblk->status_attn_bits & event;
  2144. old_link_state = sblk->status_attn_bits_ack & event;
  2145. if (new_link_state != old_link_state) {
  2146. if (new_link_state)
  2147. REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
  2148. else
  2149. REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
  2150. } else
  2151. is_set = 0;
  2152. return is_set;
  2153. }
  2154. static void
  2155. bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
  2156. {
  2157. spin_lock(&bp->phy_lock);
  2158. if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
  2159. bnx2_set_link(bp);
  2160. if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
  2161. bnx2_set_remote_link(bp);
  2162. spin_unlock(&bp->phy_lock);
  2163. }
  2164. static inline u16
  2165. bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
  2166. {
  2167. u16 cons;
  2168. /* Tell compiler that status block fields can change. */
  2169. barrier();
  2170. cons = *bnapi->hw_tx_cons_ptr;
  2171. if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
  2172. cons++;
  2173. return cons;
  2174. }
  2175. static int
  2176. bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
  2177. {
  2178. struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
  2179. u16 hw_cons, sw_cons, sw_ring_cons;
  2180. int tx_pkt = 0, index;
  2181. struct netdev_queue *txq;
  2182. index = (bnapi - bp->bnx2_napi);
  2183. txq = netdev_get_tx_queue(bp->dev, index);
  2184. hw_cons = bnx2_get_hw_tx_cons(bnapi);
  2185. sw_cons = txr->tx_cons;
  2186. while (sw_cons != hw_cons) {
  2187. struct sw_tx_bd *tx_buf;
  2188. struct sk_buff *skb;
  2189. int i, last;
  2190. sw_ring_cons = TX_RING_IDX(sw_cons);
  2191. tx_buf = &txr->tx_buf_ring[sw_ring_cons];
  2192. skb = tx_buf->skb;
  2193. /* partial BD completions possible with TSO packets */
  2194. if (skb_is_gso(skb)) {
  2195. u16 last_idx, last_ring_idx;
  2196. last_idx = sw_cons +
  2197. skb_shinfo(skb)->nr_frags + 1;
  2198. last_ring_idx = sw_ring_cons +
  2199. skb_shinfo(skb)->nr_frags + 1;
  2200. if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
  2201. last_idx++;
  2202. }
  2203. if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
  2204. break;
  2205. }
  2206. }
  2207. skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
  2208. tx_buf->skb = NULL;
  2209. last = skb_shinfo(skb)->nr_frags;
  2210. for (i = 0; i < last; i++) {
  2211. sw_cons = NEXT_TX_BD(sw_cons);
  2212. }
  2213. sw_cons = NEXT_TX_BD(sw_cons);
  2214. dev_kfree_skb(skb);
  2215. tx_pkt++;
  2216. if (tx_pkt == budget)
  2217. break;
  2218. hw_cons = bnx2_get_hw_tx_cons(bnapi);
  2219. }
  2220. txr->hw_tx_cons = hw_cons;
  2221. txr->tx_cons = sw_cons;
  2222. /* Need to make the tx_cons update visible to bnx2_start_xmit()
  2223. * before checking for netif_tx_queue_stopped(). Without the
  2224. * memory barrier, there is a small possibility that bnx2_start_xmit()
  2225. * will miss it and cause the queue to be stopped forever.
  2226. */
  2227. smp_mb();
  2228. if (unlikely(netif_tx_queue_stopped(txq)) &&
  2229. (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
  2230. __netif_tx_lock(txq, smp_processor_id());
  2231. if ((netif_tx_queue_stopped(txq)) &&
  2232. (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
  2233. netif_tx_wake_queue(txq);
  2234. __netif_tx_unlock(txq);
  2235. }
  2236. return tx_pkt;
  2237. }
  2238. static void
  2239. bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
  2240. struct sk_buff *skb, int count)
  2241. {
  2242. struct sw_pg *cons_rx_pg, *prod_rx_pg;
  2243. struct rx_bd *cons_bd, *prod_bd;
  2244. int i;
  2245. u16 hw_prod, prod;
  2246. u16 cons = rxr->rx_pg_cons;
  2247. cons_rx_pg = &rxr->rx_pg_ring[cons];
  2248. /* The caller was unable to allocate a new page to replace the
  2249. * last one in the frags array, so we need to recycle that page
  2250. * and then free the skb.
  2251. */
  2252. if (skb) {
  2253. struct page *page;
  2254. struct skb_shared_info *shinfo;
  2255. shinfo = skb_shinfo(skb);
  2256. shinfo->nr_frags--;
  2257. page = shinfo->frags[shinfo->nr_frags].page;
  2258. shinfo->frags[shinfo->nr_frags].page = NULL;
  2259. cons_rx_pg->page = page;
  2260. dev_kfree_skb(skb);
  2261. }
  2262. hw_prod = rxr->rx_pg_prod;
  2263. for (i = 0; i < count; i++) {
  2264. prod = RX_PG_RING_IDX(hw_prod);
  2265. prod_rx_pg = &rxr->rx_pg_ring[prod];
  2266. cons_rx_pg = &rxr->rx_pg_ring[cons];
  2267. cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
  2268. prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
  2269. if (prod != cons) {
  2270. prod_rx_pg->page = cons_rx_pg->page;
  2271. cons_rx_pg->page = NULL;
  2272. pci_unmap_addr_set(prod_rx_pg, mapping,
  2273. pci_unmap_addr(cons_rx_pg, mapping));
  2274. prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
  2275. prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
  2276. }
  2277. cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
  2278. hw_prod = NEXT_RX_BD(hw_prod);
  2279. }
  2280. rxr->rx_pg_prod = hw_prod;
  2281. rxr->rx_pg_cons = cons;
  2282. }
  2283. static inline void
  2284. bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
  2285. struct sk_buff *skb, u16 cons, u16 prod)
  2286. {
  2287. struct sw_bd *cons_rx_buf, *prod_rx_buf;
  2288. struct rx_bd *cons_bd, *prod_bd;
  2289. cons_rx_buf = &rxr->rx_buf_ring[cons];
  2290. prod_rx_buf = &rxr->rx_buf_ring[prod];
  2291. pci_dma_sync_single_for_device(bp->pdev,
  2292. pci_unmap_addr(cons_rx_buf, mapping),
  2293. BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
  2294. rxr->rx_prod_bseq += bp->rx_buf_use_size;
  2295. prod_rx_buf->skb = skb;
  2296. if (cons == prod)
  2297. return;
  2298. pci_unmap_addr_set(prod_rx_buf, mapping,
  2299. pci_unmap_addr(cons_rx_buf, mapping));
  2300. cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
  2301. prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
  2302. prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
  2303. prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
  2304. }
  2305. static int
  2306. bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
  2307. unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
  2308. u32 ring_idx)
  2309. {
  2310. int err;
  2311. u16 prod = ring_idx & 0xffff;
  2312. err = bnx2_alloc_rx_skb(bp, rxr, prod);
  2313. if (unlikely(err)) {
  2314. bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
  2315. if (hdr_len) {
  2316. unsigned int raw_len = len + 4;
  2317. int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
  2318. bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
  2319. }
  2320. return err;
  2321. }
  2322. skb_reserve(skb, BNX2_RX_OFFSET);
  2323. pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
  2324. PCI_DMA_FROMDEVICE);
  2325. if (hdr_len == 0) {
  2326. skb_put(skb, len);
  2327. return 0;
  2328. } else {
  2329. unsigned int i, frag_len, frag_size, pages;
  2330. struct sw_pg *rx_pg;
  2331. u16 pg_cons = rxr->rx_pg_cons;
  2332. u16 pg_prod = rxr->rx_pg_prod;
  2333. frag_size = len + 4 - hdr_len;
  2334. pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
  2335. skb_put(skb, hdr_len);
  2336. for (i = 0; i < pages; i++) {
  2337. dma_addr_t mapping_old;
  2338. frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
  2339. if (unlikely(frag_len <= 4)) {
  2340. unsigned int tail = 4 - frag_len;
  2341. rxr->rx_pg_cons = pg_cons;
  2342. rxr->rx_pg_prod = pg_prod;
  2343. bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
  2344. pages - i);
  2345. skb->len -= tail;
  2346. if (i == 0) {
  2347. skb->tail -= tail;
  2348. } else {
  2349. skb_frag_t *frag =
  2350. &skb_shinfo(skb)->frags[i - 1];
  2351. frag->size -= tail;
  2352. skb->data_len -= tail;
  2353. skb->truesize -= tail;
  2354. }
  2355. return 0;
  2356. }
  2357. rx_pg = &rxr->rx_pg_ring[pg_cons];
  2358. /* Don't unmap yet. If we're unable to allocate a new
  2359. * page, we need to recycle the page and the DMA addr.
  2360. */
  2361. mapping_old = pci_unmap_addr(rx_pg, mapping);
  2362. if (i == pages - 1)
  2363. frag_len -= 4;
  2364. skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
  2365. rx_pg->page = NULL;
  2366. err = bnx2_alloc_rx_page(bp, rxr,
  2367. RX_PG_RING_IDX(pg_prod));
  2368. if (unlikely(err)) {
  2369. rxr->rx_pg_cons = pg_cons;
  2370. rxr->rx_pg_prod = pg_prod;
  2371. bnx2_reuse_rx_skb_pages(bp, rxr, skb,
  2372. pages - i);
  2373. return err;
  2374. }
  2375. pci_unmap_page(bp->pdev, mapping_old,
  2376. PAGE_SIZE, PCI_DMA_FROMDEVICE);
  2377. frag_size -= frag_len;
  2378. skb->data_len += frag_len;
  2379. skb->truesize += frag_len;
  2380. skb->len += frag_len;
  2381. pg_prod = NEXT_RX_BD(pg_prod);
  2382. pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
  2383. }
  2384. rxr->rx_pg_prod = pg_prod;
  2385. rxr->rx_pg_cons = pg_cons;
  2386. }
  2387. return 0;
  2388. }
  2389. static inline u16
  2390. bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
  2391. {
  2392. u16 cons;
  2393. /* Tell compiler that status block fields can change. */
  2394. barrier();
  2395. cons = *bnapi->hw_rx_cons_ptr;
  2396. if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
  2397. cons++;
  2398. return cons;
  2399. }
  2400. static int
  2401. bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
  2402. {
  2403. struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
  2404. u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
  2405. struct l2_fhdr *rx_hdr;
  2406. int rx_pkt = 0, pg_ring_used = 0;
  2407. hw_cons = bnx2_get_hw_rx_cons(bnapi);
  2408. sw_cons = rxr->rx_cons;
  2409. sw_prod = rxr->rx_prod;
  2410. /* Memory barrier necessary as speculative reads of the rx
  2411. * buffer can be ahead of the index in the status block
  2412. */
  2413. rmb();
  2414. while (sw_cons != hw_cons) {
  2415. unsigned int len, hdr_len;
  2416. u32 status;
  2417. struct sw_bd *rx_buf;
  2418. struct sk_buff *skb;
  2419. dma_addr_t dma_addr;
  2420. u16 vtag = 0;
  2421. int hw_vlan __maybe_unused = 0;
  2422. sw_ring_cons = RX_RING_IDX(sw_cons);
  2423. sw_ring_prod = RX_RING_IDX(sw_prod);
  2424. rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
  2425. skb = rx_buf->skb;
  2426. rx_buf->skb = NULL;
  2427. dma_addr = pci_unmap_addr(rx_buf, mapping);
  2428. pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
  2429. BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
  2430. PCI_DMA_FROMDEVICE);
  2431. rx_hdr = (struct l2_fhdr *) skb->data;
  2432. len = rx_hdr->l2_fhdr_pkt_len;
  2433. if ((status = rx_hdr->l2_fhdr_status) &
  2434. (L2_FHDR_ERRORS_BAD_CRC |
  2435. L2_FHDR_ERRORS_PHY_DECODE |
  2436. L2_FHDR_ERRORS_ALIGNMENT |
  2437. L2_FHDR_ERRORS_TOO_SHORT |
  2438. L2_FHDR_ERRORS_GIANT_FRAME)) {
  2439. bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
  2440. sw_ring_prod);
  2441. goto next_rx;
  2442. }
  2443. hdr_len = 0;
  2444. if (status & L2_FHDR_STATUS_SPLIT) {
  2445. hdr_len = rx_hdr->l2_fhdr_ip_xsum;
  2446. pg_ring_used = 1;
  2447. } else if (len > bp->rx_jumbo_thresh) {
  2448. hdr_len = bp->rx_jumbo_thresh;
  2449. pg_ring_used = 1;
  2450. }
  2451. len -= 4;
  2452. if (len <= bp->rx_copy_thresh) {
  2453. struct sk_buff *new_skb;
  2454. new_skb = netdev_alloc_skb(bp->dev, len + 6);
  2455. if (new_skb == NULL) {
  2456. bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
  2457. sw_ring_prod);
  2458. goto next_rx;
  2459. }
  2460. /* aligned copy */
  2461. skb_copy_from_linear_data_offset(skb,
  2462. BNX2_RX_OFFSET - 6,
  2463. new_skb->data, len + 6);
  2464. skb_reserve(new_skb, 6);
  2465. skb_put(new_skb, len);
  2466. bnx2_reuse_rx_skb(bp, rxr, skb,
  2467. sw_ring_cons, sw_ring_prod);
  2468. skb = new_skb;
  2469. } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
  2470. dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
  2471. goto next_rx;
  2472. if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
  2473. !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
  2474. vtag = rx_hdr->l2_fhdr_vlan_tag;
  2475. #ifdef BCM_VLAN
  2476. if (bp->vlgrp)
  2477. hw_vlan = 1;
  2478. else
  2479. #endif
  2480. {
  2481. struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
  2482. __skb_push(skb, 4);
  2483. memmove(ve, skb->data + 4, ETH_ALEN * 2);
  2484. ve->h_vlan_proto = htons(ETH_P_8021Q);
  2485. ve->h_vlan_TCI = htons(vtag);
  2486. len += 4;
  2487. }
  2488. }
  2489. skb->protocol = eth_type_trans(skb, bp->dev);
  2490. if ((len > (bp->dev->mtu + ETH_HLEN)) &&
  2491. (ntohs(skb->protocol) != 0x8100)) {
  2492. dev_kfree_skb(skb);
  2493. goto next_rx;
  2494. }
  2495. skb->ip_summed = CHECKSUM_NONE;
  2496. if (bp->rx_csum &&
  2497. (status & (L2_FHDR_STATUS_TCP_SEGMENT |
  2498. L2_FHDR_STATUS_UDP_DATAGRAM))) {
  2499. if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
  2500. L2_FHDR_ERRORS_UDP_XSUM)) == 0))
  2501. skb->ip_summed = CHECKSUM_UNNECESSARY;
  2502. }
  2503. #ifdef BCM_VLAN
  2504. if (hw_vlan)
  2505. vlan_hwaccel_receive_skb(skb, bp->vlgrp, vtag);
  2506. else
  2507. #endif
  2508. netif_receive_skb(skb);
  2509. rx_pkt++;
  2510. next_rx:
  2511. sw_cons = NEXT_RX_BD(sw_cons);
  2512. sw_prod = NEXT_RX_BD(sw_prod);
  2513. if ((rx_pkt == budget))
  2514. break;
  2515. /* Refresh hw_cons to see if there is new work */
  2516. if (sw_cons == hw_cons) {
  2517. hw_cons = bnx2_get_hw_rx_cons(bnapi);
  2518. rmb();
  2519. }
  2520. }
  2521. rxr->rx_cons = sw_cons;
  2522. rxr->rx_prod = sw_prod;
  2523. if (pg_ring_used)
  2524. REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
  2525. REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
  2526. REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
  2527. mmiowb();
  2528. return rx_pkt;
  2529. }
  2530. /* MSI ISR - The only difference between this and the INTx ISR
  2531. * is that the MSI interrupt is always serviced.
  2532. */
  2533. static irqreturn_t
  2534. bnx2_msi(int irq, void *dev_instance)
  2535. {
  2536. struct bnx2_napi *bnapi = dev_instance;
  2537. struct bnx2 *bp = bnapi->bp;
  2538. prefetch(bnapi->status_blk.msi);
  2539. REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
  2540. BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
  2541. BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
  2542. /* Return here if interrupt is disabled. */
  2543. if (unlikely(atomic_read(&bp->intr_sem) != 0))
  2544. return IRQ_HANDLED;
  2545. napi_schedule(&bnapi->napi);
  2546. return IRQ_HANDLED;
  2547. }
  2548. static irqreturn_t
  2549. bnx2_msi_1shot(int irq, void *dev_instance)
  2550. {
  2551. struct bnx2_napi *bnapi = dev_instance;
  2552. struct bnx2 *bp = bnapi->bp;
  2553. prefetch(bnapi->status_blk.msi);
  2554. /* Return here if interrupt is disabled. */
  2555. if (unlikely(atomic_read(&bp->intr_sem) != 0))
  2556. return IRQ_HANDLED;
  2557. napi_schedule(&bnapi->napi);
  2558. return IRQ_HANDLED;
  2559. }
  2560. static irqreturn_t
  2561. bnx2_interrupt(int irq, void *dev_instance)
  2562. {
  2563. struct bnx2_napi *bnapi = dev_instance;
  2564. struct bnx2 *bp = bnapi->bp;
  2565. struct status_block *sblk = bnapi->status_blk.msi;
  2566. /* When using INTx, it is possible for the interrupt to arrive
  2567. * at the CPU before the status block posted prior to the
  2568. * interrupt. Reading a register will flush the status block.
  2569. * When using MSI, the MSI message will always complete after
  2570. * the status block write.
  2571. */
  2572. if ((sblk->status_idx == bnapi->last_status_idx) &&
  2573. (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
  2574. BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
  2575. return IRQ_NONE;
  2576. REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
  2577. BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
  2578. BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
  2579. /* Read back to deassert IRQ immediately to avoid too many
  2580. * spurious interrupts.
  2581. */
  2582. REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
  2583. /* Return here if interrupt is shared and is disabled. */
  2584. if (unlikely(atomic_read(&bp->intr_sem) != 0))
  2585. return IRQ_HANDLED;
  2586. if (napi_schedule_prep(&bnapi->napi)) {
  2587. bnapi->last_status_idx = sblk->status_idx;
  2588. __napi_schedule(&bnapi->napi);
  2589. }
  2590. return IRQ_HANDLED;
  2591. }
  2592. static inline int
  2593. bnx2_has_fast_work(struct bnx2_napi *bnapi)
  2594. {
  2595. struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
  2596. struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
  2597. if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
  2598. (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
  2599. return 1;
  2600. return 0;
  2601. }
  2602. #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
  2603. STATUS_ATTN_BITS_TIMER_ABORT)
  2604. static inline int
  2605. bnx2_has_work(struct bnx2_napi *bnapi)
  2606. {
  2607. struct status_block *sblk = bnapi->status_blk.msi;
  2608. if (bnx2_has_fast_work(bnapi))
  2609. return 1;
  2610. if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
  2611. (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
  2612. return 1;
  2613. return 0;
  2614. }
  2615. static void
  2616. bnx2_chk_missed_msi(struct bnx2 *bp)
  2617. {
  2618. struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
  2619. u32 msi_ctrl;
  2620. if (bnx2_has_work(bnapi)) {
  2621. msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
  2622. if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
  2623. return;
  2624. if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
  2625. REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
  2626. ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
  2627. REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
  2628. bnx2_msi(bp->irq_tbl[0].vector, bnapi);
  2629. }
  2630. }
  2631. bp->idle_chk_status_idx = bnapi->last_status_idx;
  2632. }
  2633. static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
  2634. {
  2635. struct status_block *sblk = bnapi->status_blk.msi;
  2636. u32 status_attn_bits = sblk->status_attn_bits;
  2637. u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
  2638. if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
  2639. (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
  2640. bnx2_phy_int(bp, bnapi);
  2641. /* This is needed to take care of transient status
  2642. * during link changes.
  2643. */
  2644. REG_WR(bp, BNX2_HC_COMMAND,
  2645. bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
  2646. REG_RD(bp, BNX2_HC_COMMAND);
  2647. }
  2648. }
  2649. static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
  2650. int work_done, int budget)
  2651. {
  2652. struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
  2653. struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
  2654. if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
  2655. bnx2_tx_int(bp, bnapi, 0);
  2656. if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
  2657. work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
  2658. return work_done;
  2659. }
  2660. static int bnx2_poll_msix(struct napi_struct *napi, int budget)
  2661. {
  2662. struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
  2663. struct bnx2 *bp = bnapi->bp;
  2664. int work_done = 0;
  2665. struct status_block_msix *sblk = bnapi->status_blk.msix;
  2666. while (1) {
  2667. work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
  2668. if (unlikely(work_done >= budget))
  2669. break;
  2670. bnapi->last_status_idx = sblk->status_idx;
  2671. /* status idx must be read before checking for more work. */
  2672. rmb();
  2673. if (likely(!bnx2_has_fast_work(bnapi))) {
  2674. napi_complete(napi);
  2675. REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
  2676. BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
  2677. bnapi->last_status_idx);
  2678. break;
  2679. }
  2680. }
  2681. return work_done;
  2682. }
  2683. static int bnx2_poll(struct napi_struct *napi, int budget)
  2684. {
  2685. struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
  2686. struct bnx2 *bp = bnapi->bp;
  2687. int work_done = 0;
  2688. struct status_block *sblk = bnapi->status_blk.msi;
  2689. while (1) {
  2690. bnx2_poll_link(bp, bnapi);
  2691. work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
  2692. /* bnapi->last_status_idx is used below to tell the hw how
  2693. * much work has been processed, so we must read it before
  2694. * checking for more work.
  2695. */
  2696. bnapi->last_status_idx = sblk->status_idx;
  2697. if (unlikely(work_done >= budget))
  2698. break;
  2699. rmb();
  2700. if (likely(!bnx2_has_work(bnapi))) {
  2701. napi_complete(napi);
  2702. if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
  2703. REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
  2704. BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
  2705. bnapi->last_status_idx);
  2706. break;
  2707. }
  2708. REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
  2709. BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
  2710. BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
  2711. bnapi->last_status_idx);
  2712. REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
  2713. BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
  2714. bnapi->last_status_idx);
  2715. break;
  2716. }
  2717. }
  2718. return work_done;
  2719. }
  2720. /* Called with rtnl_lock from vlan functions and also netif_tx_lock
  2721. * from set_multicast.
  2722. */
  2723. static void
  2724. bnx2_set_rx_mode(struct net_device *dev)
  2725. {
  2726. struct bnx2 *bp = netdev_priv(dev);
  2727. u32 rx_mode, sort_mode;
  2728. struct dev_addr_list *uc_ptr;
  2729. int i;
  2730. if (!netif_running(dev))
  2731. return;
  2732. spin_lock_bh(&bp->phy_lock);
  2733. rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
  2734. BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
  2735. sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
  2736. #ifdef BCM_VLAN
  2737. if (!bp->vlgrp && (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
  2738. rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
  2739. #else
  2740. if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
  2741. rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
  2742. #endif
  2743. if (dev->flags & IFF_PROMISC) {
  2744. /* Promiscuous mode. */
  2745. rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
  2746. sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
  2747. BNX2_RPM_SORT_USER0_PROM_VLAN;
  2748. }
  2749. else if (dev->flags & IFF_ALLMULTI) {
  2750. for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
  2751. REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
  2752. 0xffffffff);
  2753. }
  2754. sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
  2755. }
  2756. else {
  2757. /* Accept one or more multicast(s). */
  2758. struct dev_mc_list *mclist;
  2759. u32 mc_filter[NUM_MC_HASH_REGISTERS];
  2760. u32 regidx;
  2761. u32 bit;
  2762. u32 crc;
  2763. memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
  2764. for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
  2765. i++, mclist = mclist->next) {
  2766. crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
  2767. bit = crc & 0xff;
  2768. regidx = (bit & 0xe0) >> 5;
  2769. bit &= 0x1f;
  2770. mc_filter[regidx] |= (1 << bit);
  2771. }
  2772. for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
  2773. REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
  2774. mc_filter[i]);
  2775. }
  2776. sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
  2777. }
  2778. uc_ptr = NULL;
  2779. if (dev->uc_count > BNX2_MAX_UNICAST_ADDRESSES) {
  2780. rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
  2781. sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
  2782. BNX2_RPM_SORT_USER0_PROM_VLAN;
  2783. } else if (!(dev->flags & IFF_PROMISC)) {
  2784. uc_ptr = dev->uc_list;
  2785. /* Add all entries into to the match filter list */
  2786. for (i = 0; i < dev->uc_count; i++) {
  2787. bnx2_set_mac_addr(bp, uc_ptr->da_addr,
  2788. i + BNX2_START_UNICAST_ADDRESS_INDEX);
  2789. sort_mode |= (1 <<
  2790. (i + BNX2_START_UNICAST_ADDRESS_INDEX));
  2791. uc_ptr = uc_ptr->next;
  2792. }
  2793. }
  2794. if (rx_mode != bp->rx_mode) {
  2795. bp->rx_mode = rx_mode;
  2796. REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
  2797. }
  2798. REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
  2799. REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
  2800. REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
  2801. spin_unlock_bh(&bp->phy_lock);
  2802. }
  2803. static void
  2804. load_rv2p_fw(struct bnx2 *bp, __le32 *rv2p_code, u32 rv2p_code_len,
  2805. u32 rv2p_proc)
  2806. {
  2807. int i;
  2808. u32 val;
  2809. if (rv2p_proc == RV2P_PROC2 && CHIP_NUM(bp) == CHIP_NUM_5709) {
  2810. val = le32_to_cpu(rv2p_code[XI_RV2P_PROC2_MAX_BD_PAGE_LOC]);
  2811. val &= ~XI_RV2P_PROC2_BD_PAGE_SIZE_MSK;
  2812. val |= XI_RV2P_PROC2_BD_PAGE_SIZE;
  2813. rv2p_code[XI_RV2P_PROC2_MAX_BD_PAGE_LOC] = cpu_to_le32(val);
  2814. }
  2815. for (i = 0; i < rv2p_code_len; i += 8) {
  2816. REG_WR(bp, BNX2_RV2P_INSTR_HIGH, le32_to_cpu(*rv2p_code));
  2817. rv2p_code++;
  2818. REG_WR(bp, BNX2_RV2P_INSTR_LOW, le32_to_cpu(*rv2p_code));
  2819. rv2p_code++;
  2820. if (rv2p_proc == RV2P_PROC1) {
  2821. val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
  2822. REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
  2823. }
  2824. else {
  2825. val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
  2826. REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
  2827. }
  2828. }
  2829. /* Reset the processor, un-stall is done later. */
  2830. if (rv2p_proc == RV2P_PROC1) {
  2831. REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
  2832. }
  2833. else {
  2834. REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
  2835. }
  2836. }
  2837. static int
  2838. load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg, struct fw_info *fw)
  2839. {
  2840. u32 offset;
  2841. u32 val;
  2842. int rc;
  2843. /* Halt the CPU. */
  2844. val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
  2845. val |= cpu_reg->mode_value_halt;
  2846. bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
  2847. bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
  2848. /* Load the Text area. */
  2849. offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
  2850. if (fw->gz_text) {
  2851. int j;
  2852. rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
  2853. fw->gz_text_len);
  2854. if (rc < 0)
  2855. return rc;
  2856. for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
  2857. bnx2_reg_wr_ind(bp, offset, le32_to_cpu(fw->text[j]));
  2858. }
  2859. }
  2860. /* Load the Data area. */
  2861. offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
  2862. if (fw->data) {
  2863. int j;
  2864. for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
  2865. bnx2_reg_wr_ind(bp, offset, fw->data[j]);
  2866. }
  2867. }
  2868. /* Load the SBSS area. */
  2869. offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
  2870. if (fw->sbss_len) {
  2871. int j;
  2872. for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
  2873. bnx2_reg_wr_ind(bp, offset, 0);
  2874. }
  2875. }
  2876. /* Load the BSS area. */
  2877. offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
  2878. if (fw->bss_len) {
  2879. int j;
  2880. for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
  2881. bnx2_reg_wr_ind(bp, offset, 0);
  2882. }
  2883. }
  2884. /* Load the Read-Only area. */
  2885. offset = cpu_reg->spad_base +
  2886. (fw->rodata_addr - cpu_reg->mips_view_base);
  2887. if (fw->rodata) {
  2888. int j;
  2889. for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
  2890. bnx2_reg_wr_ind(bp, offset, fw->rodata[j]);
  2891. }
  2892. }
  2893. /* Clear the pre-fetch instruction. */
  2894. bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
  2895. bnx2_reg_wr_ind(bp, cpu_reg->pc, fw->start_addr);
  2896. /* Start the CPU. */
  2897. val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
  2898. val &= ~cpu_reg->mode_value_halt;
  2899. bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
  2900. bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
  2901. return 0;
  2902. }
  2903. static int
  2904. bnx2_init_cpus(struct bnx2 *bp)
  2905. {
  2906. struct fw_info *fw;
  2907. int rc, rv2p_len;
  2908. void *text, *rv2p;
  2909. /* Initialize the RV2P processor. */
  2910. text = vmalloc(FW_BUF_SIZE);
  2911. if (!text)
  2912. return -ENOMEM;
  2913. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  2914. rv2p = bnx2_xi_rv2p_proc1;
  2915. rv2p_len = sizeof(bnx2_xi_rv2p_proc1);
  2916. } else {
  2917. rv2p = bnx2_rv2p_proc1;
  2918. rv2p_len = sizeof(bnx2_rv2p_proc1);
  2919. }
  2920. rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
  2921. if (rc < 0)
  2922. goto init_cpu_err;
  2923. load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
  2924. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  2925. rv2p = bnx2_xi_rv2p_proc2;
  2926. rv2p_len = sizeof(bnx2_xi_rv2p_proc2);
  2927. } else {
  2928. rv2p = bnx2_rv2p_proc2;
  2929. rv2p_len = sizeof(bnx2_rv2p_proc2);
  2930. }
  2931. rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
  2932. if (rc < 0)
  2933. goto init_cpu_err;
  2934. load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
  2935. /* Initialize the RX Processor. */
  2936. if (CHIP_NUM(bp) == CHIP_NUM_5709)
  2937. fw = &bnx2_rxp_fw_09;
  2938. else
  2939. fw = &bnx2_rxp_fw_06;
  2940. fw->text = text;
  2941. rc = load_cpu_fw(bp, &cpu_reg_rxp, fw);
  2942. if (rc)
  2943. goto init_cpu_err;
  2944. /* Initialize the TX Processor. */
  2945. if (CHIP_NUM(bp) == CHIP_NUM_5709)
  2946. fw = &bnx2_txp_fw_09;
  2947. else
  2948. fw = &bnx2_txp_fw_06;
  2949. fw->text = text;
  2950. rc = load_cpu_fw(bp, &cpu_reg_txp, fw);
  2951. if (rc)
  2952. goto init_cpu_err;
  2953. /* Initialize the TX Patch-up Processor. */
  2954. if (CHIP_NUM(bp) == CHIP_NUM_5709)
  2955. fw = &bnx2_tpat_fw_09;
  2956. else
  2957. fw = &bnx2_tpat_fw_06;
  2958. fw->text = text;
  2959. rc = load_cpu_fw(bp, &cpu_reg_tpat, fw);
  2960. if (rc)
  2961. goto init_cpu_err;
  2962. /* Initialize the Completion Processor. */
  2963. if (CHIP_NUM(bp) == CHIP_NUM_5709)
  2964. fw = &bnx2_com_fw_09;
  2965. else
  2966. fw = &bnx2_com_fw_06;
  2967. fw->text = text;
  2968. rc = load_cpu_fw(bp, &cpu_reg_com, fw);
  2969. if (rc)
  2970. goto init_cpu_err;
  2971. /* Initialize the Command Processor. */
  2972. if (CHIP_NUM(bp) == CHIP_NUM_5709)
  2973. fw = &bnx2_cp_fw_09;
  2974. else
  2975. fw = &bnx2_cp_fw_06;
  2976. fw->text = text;
  2977. rc = load_cpu_fw(bp, &cpu_reg_cp, fw);
  2978. init_cpu_err:
  2979. vfree(text);
  2980. return rc;
  2981. }
  2982. static int
  2983. bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
  2984. {
  2985. u16 pmcsr;
  2986. pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
  2987. switch (state) {
  2988. case PCI_D0: {
  2989. u32 val;
  2990. pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
  2991. (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
  2992. PCI_PM_CTRL_PME_STATUS);
  2993. if (pmcsr & PCI_PM_CTRL_STATE_MASK)
  2994. /* delay required during transition out of D3hot */
  2995. msleep(20);
  2996. val = REG_RD(bp, BNX2_EMAC_MODE);
  2997. val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
  2998. val &= ~BNX2_EMAC_MODE_MPKT;
  2999. REG_WR(bp, BNX2_EMAC_MODE, val);
  3000. val = REG_RD(bp, BNX2_RPM_CONFIG);
  3001. val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
  3002. REG_WR(bp, BNX2_RPM_CONFIG, val);
  3003. break;
  3004. }
  3005. case PCI_D3hot: {
  3006. int i;
  3007. u32 val, wol_msg;
  3008. if (bp->wol) {
  3009. u32 advertising;
  3010. u8 autoneg;
  3011. autoneg = bp->autoneg;
  3012. advertising = bp->advertising;
  3013. if (bp->phy_port == PORT_TP) {
  3014. bp->autoneg = AUTONEG_SPEED;
  3015. bp->advertising = ADVERTISED_10baseT_Half |
  3016. ADVERTISED_10baseT_Full |
  3017. ADVERTISED_100baseT_Half |
  3018. ADVERTISED_100baseT_Full |
  3019. ADVERTISED_Autoneg;
  3020. }
  3021. spin_lock_bh(&bp->phy_lock);
  3022. bnx2_setup_phy(bp, bp->phy_port);
  3023. spin_unlock_bh(&bp->phy_lock);
  3024. bp->autoneg = autoneg;
  3025. bp->advertising = advertising;
  3026. bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
  3027. val = REG_RD(bp, BNX2_EMAC_MODE);
  3028. /* Enable port mode. */
  3029. val &= ~BNX2_EMAC_MODE_PORT;
  3030. val |= BNX2_EMAC_MODE_MPKT_RCVD |
  3031. BNX2_EMAC_MODE_ACPI_RCVD |
  3032. BNX2_EMAC_MODE_MPKT;
  3033. if (bp->phy_port == PORT_TP)
  3034. val |= BNX2_EMAC_MODE_PORT_MII;
  3035. else {
  3036. val |= BNX2_EMAC_MODE_PORT_GMII;
  3037. if (bp->line_speed == SPEED_2500)
  3038. val |= BNX2_EMAC_MODE_25G_MODE;
  3039. }
  3040. REG_WR(bp, BNX2_EMAC_MODE, val);
  3041. /* receive all multicast */
  3042. for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
  3043. REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
  3044. 0xffffffff);
  3045. }
  3046. REG_WR(bp, BNX2_EMAC_RX_MODE,
  3047. BNX2_EMAC_RX_MODE_SORT_MODE);
  3048. val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
  3049. BNX2_RPM_SORT_USER0_MC_EN;
  3050. REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
  3051. REG_WR(bp, BNX2_RPM_SORT_USER0, val);
  3052. REG_WR(bp, BNX2_RPM_SORT_USER0, val |
  3053. BNX2_RPM_SORT_USER0_ENA);
  3054. /* Need to enable EMAC and RPM for WOL. */
  3055. REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
  3056. BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
  3057. BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
  3058. BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
  3059. val = REG_RD(bp, BNX2_RPM_CONFIG);
  3060. val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
  3061. REG_WR(bp, BNX2_RPM_CONFIG, val);
  3062. wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
  3063. }
  3064. else {
  3065. wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
  3066. }
  3067. if (!(bp->flags & BNX2_FLAG_NO_WOL))
  3068. bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
  3069. 1, 0);
  3070. pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
  3071. if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
  3072. (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
  3073. if (bp->wol)
  3074. pmcsr |= 3;
  3075. }
  3076. else {
  3077. pmcsr |= 3;
  3078. }
  3079. if (bp->wol) {
  3080. pmcsr |= PCI_PM_CTRL_PME_ENABLE;
  3081. }
  3082. pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
  3083. pmcsr);
  3084. /* No more memory access after this point until
  3085. * device is brought back to D0.
  3086. */
  3087. udelay(50);
  3088. break;
  3089. }
  3090. default:
  3091. return -EINVAL;
  3092. }
  3093. return 0;
  3094. }
  3095. static int
  3096. bnx2_acquire_nvram_lock(struct bnx2 *bp)
  3097. {
  3098. u32 val;
  3099. int j;
  3100. /* Request access to the flash interface. */
  3101. REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
  3102. for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
  3103. val = REG_RD(bp, BNX2_NVM_SW_ARB);
  3104. if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
  3105. break;
  3106. udelay(5);
  3107. }
  3108. if (j >= NVRAM_TIMEOUT_COUNT)
  3109. return -EBUSY;
  3110. return 0;
  3111. }
  3112. static int
  3113. bnx2_release_nvram_lock(struct bnx2 *bp)
  3114. {
  3115. int j;
  3116. u32 val;
  3117. /* Relinquish nvram interface. */
  3118. REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
  3119. for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
  3120. val = REG_RD(bp, BNX2_NVM_SW_ARB);
  3121. if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
  3122. break;
  3123. udelay(5);
  3124. }
  3125. if (j >= NVRAM_TIMEOUT_COUNT)
  3126. return -EBUSY;
  3127. return 0;
  3128. }
  3129. static int
  3130. bnx2_enable_nvram_write(struct bnx2 *bp)
  3131. {
  3132. u32 val;
  3133. val = REG_RD(bp, BNX2_MISC_CFG);
  3134. REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
  3135. if (bp->flash_info->flags & BNX2_NV_WREN) {
  3136. int j;
  3137. REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
  3138. REG_WR(bp, BNX2_NVM_COMMAND,
  3139. BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
  3140. for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
  3141. udelay(5);
  3142. val = REG_RD(bp, BNX2_NVM_COMMAND);
  3143. if (val & BNX2_NVM_COMMAND_DONE)
  3144. break;
  3145. }
  3146. if (j >= NVRAM_TIMEOUT_COUNT)
  3147. return -EBUSY;
  3148. }
  3149. return 0;
  3150. }
  3151. static void
  3152. bnx2_disable_nvram_write(struct bnx2 *bp)
  3153. {
  3154. u32 val;
  3155. val = REG_RD(bp, BNX2_MISC_CFG);
  3156. REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
  3157. }
  3158. static void
  3159. bnx2_enable_nvram_access(struct bnx2 *bp)
  3160. {
  3161. u32 val;
  3162. val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
  3163. /* Enable both bits, even on read. */
  3164. REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
  3165. val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
  3166. }
  3167. static void
  3168. bnx2_disable_nvram_access(struct bnx2 *bp)
  3169. {
  3170. u32 val;
  3171. val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
  3172. /* Disable both bits, even after read. */
  3173. REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
  3174. val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
  3175. BNX2_NVM_ACCESS_ENABLE_WR_EN));
  3176. }
  3177. static int
  3178. bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
  3179. {
  3180. u32 cmd;
  3181. int j;
  3182. if (bp->flash_info->flags & BNX2_NV_BUFFERED)
  3183. /* Buffered flash, no erase needed */
  3184. return 0;
  3185. /* Build an erase command */
  3186. cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
  3187. BNX2_NVM_COMMAND_DOIT;
  3188. /* Need to clear DONE bit separately. */
  3189. REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
  3190. /* Address of the NVRAM to read from. */
  3191. REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
  3192. /* Issue an erase command. */
  3193. REG_WR(bp, BNX2_NVM_COMMAND, cmd);
  3194. /* Wait for completion. */
  3195. for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
  3196. u32 val;
  3197. udelay(5);
  3198. val = REG_RD(bp, BNX2_NVM_COMMAND);
  3199. if (val & BNX2_NVM_COMMAND_DONE)
  3200. break;
  3201. }
  3202. if (j >= NVRAM_TIMEOUT_COUNT)
  3203. return -EBUSY;
  3204. return 0;
  3205. }
  3206. static int
  3207. bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
  3208. {
  3209. u32 cmd;
  3210. int j;
  3211. /* Build the command word. */
  3212. cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
  3213. /* Calculate an offset of a buffered flash, not needed for 5709. */
  3214. if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
  3215. offset = ((offset / bp->flash_info->page_size) <<
  3216. bp->flash_info->page_bits) +
  3217. (offset % bp->flash_info->page_size);
  3218. }
  3219. /* Need to clear DONE bit separately. */
  3220. REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
  3221. /* Address of the NVRAM to read from. */
  3222. REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
  3223. /* Issue a read command. */
  3224. REG_WR(bp, BNX2_NVM_COMMAND, cmd);
  3225. /* Wait for completion. */
  3226. for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
  3227. u32 val;
  3228. udelay(5);
  3229. val = REG_RD(bp, BNX2_NVM_COMMAND);
  3230. if (val & BNX2_NVM_COMMAND_DONE) {
  3231. __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
  3232. memcpy(ret_val, &v, 4);
  3233. break;
  3234. }
  3235. }
  3236. if (j >= NVRAM_TIMEOUT_COUNT)
  3237. return -EBUSY;
  3238. return 0;
  3239. }
  3240. static int
  3241. bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
  3242. {
  3243. u32 cmd;
  3244. __be32 val32;
  3245. int j;
  3246. /* Build the command word. */
  3247. cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
  3248. /* Calculate an offset of a buffered flash, not needed for 5709. */
  3249. if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
  3250. offset = ((offset / bp->flash_info->page_size) <<
  3251. bp->flash_info->page_bits) +
  3252. (offset % bp->flash_info->page_size);
  3253. }
  3254. /* Need to clear DONE bit separately. */
  3255. REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
  3256. memcpy(&val32, val, 4);
  3257. /* Write the data. */
  3258. REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
  3259. /* Address of the NVRAM to write to. */
  3260. REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
  3261. /* Issue the write command. */
  3262. REG_WR(bp, BNX2_NVM_COMMAND, cmd);
  3263. /* Wait for completion. */
  3264. for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
  3265. udelay(5);
  3266. if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
  3267. break;
  3268. }
  3269. if (j >= NVRAM_TIMEOUT_COUNT)
  3270. return -EBUSY;
  3271. return 0;
  3272. }
  3273. static int
  3274. bnx2_init_nvram(struct bnx2 *bp)
  3275. {
  3276. u32 val;
  3277. int j, entry_count, rc = 0;
  3278. struct flash_spec *flash;
  3279. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  3280. bp->flash_info = &flash_5709;
  3281. goto get_flash_size;
  3282. }
  3283. /* Determine the selected interface. */
  3284. val = REG_RD(bp, BNX2_NVM_CFG1);
  3285. entry_count = ARRAY_SIZE(flash_table);
  3286. if (val & 0x40000000) {
  3287. /* Flash interface has been reconfigured */
  3288. for (j = 0, flash = &flash_table[0]; j < entry_count;
  3289. j++, flash++) {
  3290. if ((val & FLASH_BACKUP_STRAP_MASK) ==
  3291. (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
  3292. bp->flash_info = flash;
  3293. break;
  3294. }
  3295. }
  3296. }
  3297. else {
  3298. u32 mask;
  3299. /* Not yet been reconfigured */
  3300. if (val & (1 << 23))
  3301. mask = FLASH_BACKUP_STRAP_MASK;
  3302. else
  3303. mask = FLASH_STRAP_MASK;
  3304. for (j = 0, flash = &flash_table[0]; j < entry_count;
  3305. j++, flash++) {
  3306. if ((val & mask) == (flash->strapping & mask)) {
  3307. bp->flash_info = flash;
  3308. /* Request access to the flash interface. */
  3309. if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
  3310. return rc;
  3311. /* Enable access to flash interface */
  3312. bnx2_enable_nvram_access(bp);
  3313. /* Reconfigure the flash interface */
  3314. REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
  3315. REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
  3316. REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
  3317. REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
  3318. /* Disable access to flash interface */
  3319. bnx2_disable_nvram_access(bp);
  3320. bnx2_release_nvram_lock(bp);
  3321. break;
  3322. }
  3323. }
  3324. } /* if (val & 0x40000000) */
  3325. if (j == entry_count) {
  3326. bp->flash_info = NULL;
  3327. printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
  3328. return -ENODEV;
  3329. }
  3330. get_flash_size:
  3331. val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
  3332. val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
  3333. if (val)
  3334. bp->flash_size = val;
  3335. else
  3336. bp->flash_size = bp->flash_info->total_size;
  3337. return rc;
  3338. }
  3339. static int
  3340. bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
  3341. int buf_size)
  3342. {
  3343. int rc = 0;
  3344. u32 cmd_flags, offset32, len32, extra;
  3345. if (buf_size == 0)
  3346. return 0;
  3347. /* Request access to the flash interface. */
  3348. if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
  3349. return rc;
  3350. /* Enable access to flash interface */
  3351. bnx2_enable_nvram_access(bp);
  3352. len32 = buf_size;
  3353. offset32 = offset;
  3354. extra = 0;
  3355. cmd_flags = 0;
  3356. if (offset32 & 3) {
  3357. u8 buf[4];
  3358. u32 pre_len;
  3359. offset32 &= ~3;
  3360. pre_len = 4 - (offset & 3);
  3361. if (pre_len >= len32) {
  3362. pre_len = len32;
  3363. cmd_flags = BNX2_NVM_COMMAND_FIRST |
  3364. BNX2_NVM_COMMAND_LAST;
  3365. }
  3366. else {
  3367. cmd_flags = BNX2_NVM_COMMAND_FIRST;
  3368. }
  3369. rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
  3370. if (rc)
  3371. return rc;
  3372. memcpy(ret_buf, buf + (offset & 3), pre_len);
  3373. offset32 += 4;
  3374. ret_buf += pre_len;
  3375. len32 -= pre_len;
  3376. }
  3377. if (len32 & 3) {
  3378. extra = 4 - (len32 & 3);
  3379. len32 = (len32 + 4) & ~3;
  3380. }
  3381. if (len32 == 4) {
  3382. u8 buf[4];
  3383. if (cmd_flags)
  3384. cmd_flags = BNX2_NVM_COMMAND_LAST;
  3385. else
  3386. cmd_flags = BNX2_NVM_COMMAND_FIRST |
  3387. BNX2_NVM_COMMAND_LAST;
  3388. rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
  3389. memcpy(ret_buf, buf, 4 - extra);
  3390. }
  3391. else if (len32 > 0) {
  3392. u8 buf[4];
  3393. /* Read the first word. */
  3394. if (cmd_flags)
  3395. cmd_flags = 0;
  3396. else
  3397. cmd_flags = BNX2_NVM_COMMAND_FIRST;
  3398. rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
  3399. /* Advance to the next dword. */
  3400. offset32 += 4;
  3401. ret_buf += 4;
  3402. len32 -= 4;
  3403. while (len32 > 4 && rc == 0) {
  3404. rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
  3405. /* Advance to the next dword. */
  3406. offset32 += 4;
  3407. ret_buf += 4;
  3408. len32 -= 4;
  3409. }
  3410. if (rc)
  3411. return rc;
  3412. cmd_flags = BNX2_NVM_COMMAND_LAST;
  3413. rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
  3414. memcpy(ret_buf, buf, 4 - extra);
  3415. }
  3416. /* Disable access to flash interface */
  3417. bnx2_disable_nvram_access(bp);
  3418. bnx2_release_nvram_lock(bp);
  3419. return rc;
  3420. }
  3421. static int
  3422. bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
  3423. int buf_size)
  3424. {
  3425. u32 written, offset32, len32;
  3426. u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
  3427. int rc = 0;
  3428. int align_start, align_end;
  3429. buf = data_buf;
  3430. offset32 = offset;
  3431. len32 = buf_size;
  3432. align_start = align_end = 0;
  3433. if ((align_start = (offset32 & 3))) {
  3434. offset32 &= ~3;
  3435. len32 += align_start;
  3436. if (len32 < 4)
  3437. len32 = 4;
  3438. if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
  3439. return rc;
  3440. }
  3441. if (len32 & 3) {
  3442. align_end = 4 - (len32 & 3);
  3443. len32 += align_end;
  3444. if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
  3445. return rc;
  3446. }
  3447. if (align_start || align_end) {
  3448. align_buf = kmalloc(len32, GFP_KERNEL);
  3449. if (align_buf == NULL)
  3450. return -ENOMEM;
  3451. if (align_start) {
  3452. memcpy(align_buf, start, 4);
  3453. }
  3454. if (align_end) {
  3455. memcpy(align_buf + len32 - 4, end, 4);
  3456. }
  3457. memcpy(align_buf + align_start, data_buf, buf_size);
  3458. buf = align_buf;
  3459. }
  3460. if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
  3461. flash_buffer = kmalloc(264, GFP_KERNEL);
  3462. if (flash_buffer == NULL) {
  3463. rc = -ENOMEM;
  3464. goto nvram_write_end;
  3465. }
  3466. }
  3467. written = 0;
  3468. while ((written < len32) && (rc == 0)) {
  3469. u32 page_start, page_end, data_start, data_end;
  3470. u32 addr, cmd_flags;
  3471. int i;
  3472. /* Find the page_start addr */
  3473. page_start = offset32 + written;
  3474. page_start -= (page_start % bp->flash_info->page_size);
  3475. /* Find the page_end addr */
  3476. page_end = page_start + bp->flash_info->page_size;
  3477. /* Find the data_start addr */
  3478. data_start = (written == 0) ? offset32 : page_start;
  3479. /* Find the data_end addr */
  3480. data_end = (page_end > offset32 + len32) ?
  3481. (offset32 + len32) : page_end;
  3482. /* Request access to the flash interface. */
  3483. if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
  3484. goto nvram_write_end;
  3485. /* Enable access to flash interface */
  3486. bnx2_enable_nvram_access(bp);
  3487. cmd_flags = BNX2_NVM_COMMAND_FIRST;
  3488. if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
  3489. int j;
  3490. /* Read the whole page into the buffer
  3491. * (non-buffer flash only) */
  3492. for (j = 0; j < bp->flash_info->page_size; j += 4) {
  3493. if (j == (bp->flash_info->page_size - 4)) {
  3494. cmd_flags |= BNX2_NVM_COMMAND_LAST;
  3495. }
  3496. rc = bnx2_nvram_read_dword(bp,
  3497. page_start + j,
  3498. &flash_buffer[j],
  3499. cmd_flags);
  3500. if (rc)
  3501. goto nvram_write_end;
  3502. cmd_flags = 0;
  3503. }
  3504. }
  3505. /* Enable writes to flash interface (unlock write-protect) */
  3506. if ((rc = bnx2_enable_nvram_write(bp)) != 0)
  3507. goto nvram_write_end;
  3508. /* Loop to write back the buffer data from page_start to
  3509. * data_start */
  3510. i = 0;
  3511. if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
  3512. /* Erase the page */
  3513. if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
  3514. goto nvram_write_end;
  3515. /* Re-enable the write again for the actual write */
  3516. bnx2_enable_nvram_write(bp);
  3517. for (addr = page_start; addr < data_start;
  3518. addr += 4, i += 4) {
  3519. rc = bnx2_nvram_write_dword(bp, addr,
  3520. &flash_buffer[i], cmd_flags);
  3521. if (rc != 0)
  3522. goto nvram_write_end;
  3523. cmd_flags = 0;
  3524. }
  3525. }
  3526. /* Loop to write the new data from data_start to data_end */
  3527. for (addr = data_start; addr < data_end; addr += 4, i += 4) {
  3528. if ((addr == page_end - 4) ||
  3529. ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
  3530. (addr == data_end - 4))) {
  3531. cmd_flags |= BNX2_NVM_COMMAND_LAST;
  3532. }
  3533. rc = bnx2_nvram_write_dword(bp, addr, buf,
  3534. cmd_flags);
  3535. if (rc != 0)
  3536. goto nvram_write_end;
  3537. cmd_flags = 0;
  3538. buf += 4;
  3539. }
  3540. /* Loop to write back the buffer data from data_end
  3541. * to page_end */
  3542. if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
  3543. for (addr = data_end; addr < page_end;
  3544. addr += 4, i += 4) {
  3545. if (addr == page_end-4) {
  3546. cmd_flags = BNX2_NVM_COMMAND_LAST;
  3547. }
  3548. rc = bnx2_nvram_write_dword(bp, addr,
  3549. &flash_buffer[i], cmd_flags);
  3550. if (rc != 0)
  3551. goto nvram_write_end;
  3552. cmd_flags = 0;
  3553. }
  3554. }
  3555. /* Disable writes to flash interface (lock write-protect) */
  3556. bnx2_disable_nvram_write(bp);
  3557. /* Disable access to flash interface */
  3558. bnx2_disable_nvram_access(bp);
  3559. bnx2_release_nvram_lock(bp);
  3560. /* Increment written */
  3561. written += data_end - data_start;
  3562. }
  3563. nvram_write_end:
  3564. kfree(flash_buffer);
  3565. kfree(align_buf);
  3566. return rc;
  3567. }
  3568. static void
  3569. bnx2_init_fw_cap(struct bnx2 *bp)
  3570. {
  3571. u32 val, sig = 0;
  3572. bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
  3573. bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
  3574. if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
  3575. bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
  3576. val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
  3577. if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
  3578. return;
  3579. if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
  3580. bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
  3581. sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
  3582. }
  3583. if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
  3584. (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
  3585. u32 link;
  3586. bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
  3587. link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
  3588. if (link & BNX2_LINK_STATUS_SERDES_LINK)
  3589. bp->phy_port = PORT_FIBRE;
  3590. else
  3591. bp->phy_port = PORT_TP;
  3592. sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
  3593. BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
  3594. }
  3595. if (netif_running(bp->dev) && sig)
  3596. bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
  3597. }
  3598. static void
  3599. bnx2_setup_msix_tbl(struct bnx2 *bp)
  3600. {
  3601. REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
  3602. REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
  3603. REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
  3604. }
  3605. static int
  3606. bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
  3607. {
  3608. u32 val;
  3609. int i, rc = 0;
  3610. u8 old_port;
  3611. /* Wait for the current PCI transaction to complete before
  3612. * issuing a reset. */
  3613. REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
  3614. BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
  3615. BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
  3616. BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
  3617. BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
  3618. val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
  3619. udelay(5);
  3620. /* Wait for the firmware to tell us it is ok to issue a reset. */
  3621. bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
  3622. /* Deposit a driver reset signature so the firmware knows that
  3623. * this is a soft reset. */
  3624. bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
  3625. BNX2_DRV_RESET_SIGNATURE_MAGIC);
  3626. /* Do a dummy read to force the chip to complete all current transaction
  3627. * before we issue a reset. */
  3628. val = REG_RD(bp, BNX2_MISC_ID);
  3629. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  3630. REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
  3631. REG_RD(bp, BNX2_MISC_COMMAND);
  3632. udelay(5);
  3633. val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
  3634. BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
  3635. pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
  3636. } else {
  3637. val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
  3638. BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
  3639. BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
  3640. /* Chip reset. */
  3641. REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
  3642. /* Reading back any register after chip reset will hang the
  3643. * bus on 5706 A0 and A1. The msleep below provides plenty
  3644. * of margin for write posting.
  3645. */
  3646. if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
  3647. (CHIP_ID(bp) == CHIP_ID_5706_A1))
  3648. msleep(20);
  3649. /* Reset takes approximate 30 usec */
  3650. for (i = 0; i < 10; i++) {
  3651. val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
  3652. if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
  3653. BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
  3654. break;
  3655. udelay(10);
  3656. }
  3657. if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
  3658. BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
  3659. printk(KERN_ERR PFX "Chip reset did not complete\n");
  3660. return -EBUSY;
  3661. }
  3662. }
  3663. /* Make sure byte swapping is properly configured. */
  3664. val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
  3665. if (val != 0x01020304) {
  3666. printk(KERN_ERR PFX "Chip not in correct endian mode\n");
  3667. return -ENODEV;
  3668. }
  3669. /* Wait for the firmware to finish its initialization. */
  3670. rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
  3671. if (rc)
  3672. return rc;
  3673. spin_lock_bh(&bp->phy_lock);
  3674. old_port = bp->phy_port;
  3675. bnx2_init_fw_cap(bp);
  3676. if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
  3677. old_port != bp->phy_port)
  3678. bnx2_set_default_remote_link(bp);
  3679. spin_unlock_bh(&bp->phy_lock);
  3680. if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
  3681. /* Adjust the voltage regular to two steps lower. The default
  3682. * of this register is 0x0000000e. */
  3683. REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
  3684. /* Remove bad rbuf memory from the free pool. */
  3685. rc = bnx2_alloc_bad_rbuf(bp);
  3686. }
  3687. if (bp->flags & BNX2_FLAG_USING_MSIX)
  3688. bnx2_setup_msix_tbl(bp);
  3689. return rc;
  3690. }
  3691. static int
  3692. bnx2_init_chip(struct bnx2 *bp)
  3693. {
  3694. u32 val, mtu;
  3695. int rc, i;
  3696. /* Make sure the interrupt is not active. */
  3697. REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
  3698. val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
  3699. BNX2_DMA_CONFIG_DATA_WORD_SWAP |
  3700. #ifdef __BIG_ENDIAN
  3701. BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
  3702. #endif
  3703. BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
  3704. DMA_READ_CHANS << 12 |
  3705. DMA_WRITE_CHANS << 16;
  3706. val |= (0x2 << 20) | (1 << 11);
  3707. if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
  3708. val |= (1 << 23);
  3709. if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
  3710. (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
  3711. val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
  3712. REG_WR(bp, BNX2_DMA_CONFIG, val);
  3713. if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
  3714. val = REG_RD(bp, BNX2_TDMA_CONFIG);
  3715. val |= BNX2_TDMA_CONFIG_ONE_DMA;
  3716. REG_WR(bp, BNX2_TDMA_CONFIG, val);
  3717. }
  3718. if (bp->flags & BNX2_FLAG_PCIX) {
  3719. u16 val16;
  3720. pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
  3721. &val16);
  3722. pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
  3723. val16 & ~PCI_X_CMD_ERO);
  3724. }
  3725. REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
  3726. BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
  3727. BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
  3728. BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
  3729. /* Initialize context mapping and zero out the quick contexts. The
  3730. * context block must have already been enabled. */
  3731. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  3732. rc = bnx2_init_5709_context(bp);
  3733. if (rc)
  3734. return rc;
  3735. } else
  3736. bnx2_init_context(bp);
  3737. if ((rc = bnx2_init_cpus(bp)) != 0)
  3738. return rc;
  3739. bnx2_init_nvram(bp);
  3740. bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
  3741. val = REG_RD(bp, BNX2_MQ_CONFIG);
  3742. val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
  3743. val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
  3744. if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
  3745. val |= BNX2_MQ_CONFIG_HALT_DIS;
  3746. REG_WR(bp, BNX2_MQ_CONFIG, val);
  3747. val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
  3748. REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
  3749. REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
  3750. val = (BCM_PAGE_BITS - 8) << 24;
  3751. REG_WR(bp, BNX2_RV2P_CONFIG, val);
  3752. /* Configure page size. */
  3753. val = REG_RD(bp, BNX2_TBDR_CONFIG);
  3754. val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
  3755. val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
  3756. REG_WR(bp, BNX2_TBDR_CONFIG, val);
  3757. val = bp->mac_addr[0] +
  3758. (bp->mac_addr[1] << 8) +
  3759. (bp->mac_addr[2] << 16) +
  3760. bp->mac_addr[3] +
  3761. (bp->mac_addr[4] << 8) +
  3762. (bp->mac_addr[5] << 16);
  3763. REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
  3764. /* Program the MTU. Also include 4 bytes for CRC32. */
  3765. mtu = bp->dev->mtu;
  3766. val = mtu + ETH_HLEN + ETH_FCS_LEN;
  3767. if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
  3768. val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
  3769. REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
  3770. if (mtu < 1500)
  3771. mtu = 1500;
  3772. bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
  3773. bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
  3774. bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
  3775. for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
  3776. bp->bnx2_napi[i].last_status_idx = 0;
  3777. bp->idle_chk_status_idx = 0xffff;
  3778. bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
  3779. /* Set up how to generate a link change interrupt. */
  3780. REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
  3781. REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
  3782. (u64) bp->status_blk_mapping & 0xffffffff);
  3783. REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
  3784. REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
  3785. (u64) bp->stats_blk_mapping & 0xffffffff);
  3786. REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
  3787. (u64) bp->stats_blk_mapping >> 32);
  3788. REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
  3789. (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
  3790. REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
  3791. (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
  3792. REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
  3793. (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
  3794. REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
  3795. REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
  3796. REG_WR(bp, BNX2_HC_COM_TICKS,
  3797. (bp->com_ticks_int << 16) | bp->com_ticks);
  3798. REG_WR(bp, BNX2_HC_CMD_TICKS,
  3799. (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
  3800. if (CHIP_NUM(bp) == CHIP_NUM_5708)
  3801. REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
  3802. else
  3803. REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
  3804. REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
  3805. if (CHIP_ID(bp) == CHIP_ID_5706_A1)
  3806. val = BNX2_HC_CONFIG_COLLECT_STATS;
  3807. else {
  3808. val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
  3809. BNX2_HC_CONFIG_COLLECT_STATS;
  3810. }
  3811. if (bp->irq_nvecs > 1) {
  3812. REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
  3813. BNX2_HC_MSIX_BIT_VECTOR_VAL);
  3814. val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
  3815. }
  3816. if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
  3817. val |= BNX2_HC_CONFIG_ONE_SHOT;
  3818. REG_WR(bp, BNX2_HC_CONFIG, val);
  3819. for (i = 1; i < bp->irq_nvecs; i++) {
  3820. u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
  3821. BNX2_HC_SB_CONFIG_1;
  3822. REG_WR(bp, base,
  3823. BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
  3824. BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
  3825. BNX2_HC_SB_CONFIG_1_ONE_SHOT);
  3826. REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
  3827. (bp->tx_quick_cons_trip_int << 16) |
  3828. bp->tx_quick_cons_trip);
  3829. REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
  3830. (bp->tx_ticks_int << 16) | bp->tx_ticks);
  3831. REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
  3832. (bp->rx_quick_cons_trip_int << 16) |
  3833. bp->rx_quick_cons_trip);
  3834. REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
  3835. (bp->rx_ticks_int << 16) | bp->rx_ticks);
  3836. }
  3837. /* Clear internal stats counters. */
  3838. REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
  3839. REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
  3840. /* Initialize the receive filter. */
  3841. bnx2_set_rx_mode(bp->dev);
  3842. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  3843. val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
  3844. val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
  3845. REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
  3846. }
  3847. rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
  3848. 1, 0);
  3849. REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
  3850. REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
  3851. udelay(20);
  3852. bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
  3853. return rc;
  3854. }
  3855. static void
  3856. bnx2_clear_ring_states(struct bnx2 *bp)
  3857. {
  3858. struct bnx2_napi *bnapi;
  3859. struct bnx2_tx_ring_info *txr;
  3860. struct bnx2_rx_ring_info *rxr;
  3861. int i;
  3862. for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
  3863. bnapi = &bp->bnx2_napi[i];
  3864. txr = &bnapi->tx_ring;
  3865. rxr = &bnapi->rx_ring;
  3866. txr->tx_cons = 0;
  3867. txr->hw_tx_cons = 0;
  3868. rxr->rx_prod_bseq = 0;
  3869. rxr->rx_prod = 0;
  3870. rxr->rx_cons = 0;
  3871. rxr->rx_pg_prod = 0;
  3872. rxr->rx_pg_cons = 0;
  3873. }
  3874. }
  3875. static void
  3876. bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
  3877. {
  3878. u32 val, offset0, offset1, offset2, offset3;
  3879. u32 cid_addr = GET_CID_ADDR(cid);
  3880. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  3881. offset0 = BNX2_L2CTX_TYPE_XI;
  3882. offset1 = BNX2_L2CTX_CMD_TYPE_XI;
  3883. offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
  3884. offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
  3885. } else {
  3886. offset0 = BNX2_L2CTX_TYPE;
  3887. offset1 = BNX2_L2CTX_CMD_TYPE;
  3888. offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
  3889. offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
  3890. }
  3891. val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
  3892. bnx2_ctx_wr(bp, cid_addr, offset0, val);
  3893. val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
  3894. bnx2_ctx_wr(bp, cid_addr, offset1, val);
  3895. val = (u64) txr->tx_desc_mapping >> 32;
  3896. bnx2_ctx_wr(bp, cid_addr, offset2, val);
  3897. val = (u64) txr->tx_desc_mapping & 0xffffffff;
  3898. bnx2_ctx_wr(bp, cid_addr, offset3, val);
  3899. }
  3900. static void
  3901. bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
  3902. {
  3903. struct tx_bd *txbd;
  3904. u32 cid = TX_CID;
  3905. struct bnx2_napi *bnapi;
  3906. struct bnx2_tx_ring_info *txr;
  3907. bnapi = &bp->bnx2_napi[ring_num];
  3908. txr = &bnapi->tx_ring;
  3909. if (ring_num == 0)
  3910. cid = TX_CID;
  3911. else
  3912. cid = TX_TSS_CID + ring_num - 1;
  3913. bp->tx_wake_thresh = bp->tx_ring_size / 2;
  3914. txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
  3915. txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
  3916. txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
  3917. txr->tx_prod = 0;
  3918. txr->tx_prod_bseq = 0;
  3919. txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
  3920. txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
  3921. bnx2_init_tx_context(bp, cid, txr);
  3922. }
  3923. static void
  3924. bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
  3925. int num_rings)
  3926. {
  3927. int i;
  3928. struct rx_bd *rxbd;
  3929. for (i = 0; i < num_rings; i++) {
  3930. int j;
  3931. rxbd = &rx_ring[i][0];
  3932. for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
  3933. rxbd->rx_bd_len = buf_size;
  3934. rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
  3935. }
  3936. if (i == (num_rings - 1))
  3937. j = 0;
  3938. else
  3939. j = i + 1;
  3940. rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
  3941. rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
  3942. }
  3943. }
  3944. static void
  3945. bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
  3946. {
  3947. int i;
  3948. u16 prod, ring_prod;
  3949. u32 cid, rx_cid_addr, val;
  3950. struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
  3951. struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
  3952. if (ring_num == 0)
  3953. cid = RX_CID;
  3954. else
  3955. cid = RX_RSS_CID + ring_num - 1;
  3956. rx_cid_addr = GET_CID_ADDR(cid);
  3957. bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
  3958. bp->rx_buf_use_size, bp->rx_max_ring);
  3959. bnx2_init_rx_context(bp, cid);
  3960. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  3961. val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
  3962. REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
  3963. }
  3964. bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
  3965. if (bp->rx_pg_ring_size) {
  3966. bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
  3967. rxr->rx_pg_desc_mapping,
  3968. PAGE_SIZE, bp->rx_max_pg_ring);
  3969. val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
  3970. bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
  3971. bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
  3972. BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
  3973. val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
  3974. bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
  3975. val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
  3976. bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
  3977. if (CHIP_NUM(bp) == CHIP_NUM_5709)
  3978. REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
  3979. }
  3980. val = (u64) rxr->rx_desc_mapping[0] >> 32;
  3981. bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
  3982. val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
  3983. bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
  3984. ring_prod = prod = rxr->rx_pg_prod;
  3985. for (i = 0; i < bp->rx_pg_ring_size; i++) {
  3986. if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0)
  3987. break;
  3988. prod = NEXT_RX_BD(prod);
  3989. ring_prod = RX_PG_RING_IDX(prod);
  3990. }
  3991. rxr->rx_pg_prod = prod;
  3992. ring_prod = prod = rxr->rx_prod;
  3993. for (i = 0; i < bp->rx_ring_size; i++) {
  3994. if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0)
  3995. break;
  3996. prod = NEXT_RX_BD(prod);
  3997. ring_prod = RX_RING_IDX(prod);
  3998. }
  3999. rxr->rx_prod = prod;
  4000. rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
  4001. rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
  4002. rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
  4003. REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
  4004. REG_WR16(bp, rxr->rx_bidx_addr, prod);
  4005. REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
  4006. }
  4007. static void
  4008. bnx2_init_all_rings(struct bnx2 *bp)
  4009. {
  4010. int i;
  4011. u32 val;
  4012. bnx2_clear_ring_states(bp);
  4013. REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
  4014. for (i = 0; i < bp->num_tx_rings; i++)
  4015. bnx2_init_tx_ring(bp, i);
  4016. if (bp->num_tx_rings > 1)
  4017. REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
  4018. (TX_TSS_CID << 7));
  4019. REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
  4020. bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
  4021. for (i = 0; i < bp->num_rx_rings; i++)
  4022. bnx2_init_rx_ring(bp, i);
  4023. if (bp->num_rx_rings > 1) {
  4024. u32 tbl_32;
  4025. u8 *tbl = (u8 *) &tbl_32;
  4026. bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
  4027. BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
  4028. for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
  4029. tbl[i % 4] = i % (bp->num_rx_rings - 1);
  4030. if ((i % 4) == 3)
  4031. bnx2_reg_wr_ind(bp,
  4032. BNX2_RXP_SCRATCH_RSS_TBL + i,
  4033. cpu_to_be32(tbl_32));
  4034. }
  4035. val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
  4036. BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
  4037. REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
  4038. }
  4039. }
  4040. static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
  4041. {
  4042. u32 max, num_rings = 1;
  4043. while (ring_size > MAX_RX_DESC_CNT) {
  4044. ring_size -= MAX_RX_DESC_CNT;
  4045. num_rings++;
  4046. }
  4047. /* round to next power of 2 */
  4048. max = max_size;
  4049. while ((max & num_rings) == 0)
  4050. max >>= 1;
  4051. if (num_rings != max)
  4052. max <<= 1;
  4053. return max;
  4054. }
  4055. static void
  4056. bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
  4057. {
  4058. u32 rx_size, rx_space, jumbo_size;
  4059. /* 8 for CRC and VLAN */
  4060. rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
  4061. rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
  4062. sizeof(struct skb_shared_info);
  4063. bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
  4064. bp->rx_pg_ring_size = 0;
  4065. bp->rx_max_pg_ring = 0;
  4066. bp->rx_max_pg_ring_idx = 0;
  4067. if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
  4068. int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
  4069. jumbo_size = size * pages;
  4070. if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
  4071. jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
  4072. bp->rx_pg_ring_size = jumbo_size;
  4073. bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
  4074. MAX_RX_PG_RINGS);
  4075. bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
  4076. rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
  4077. bp->rx_copy_thresh = 0;
  4078. }
  4079. bp->rx_buf_use_size = rx_size;
  4080. /* hw alignment */
  4081. bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
  4082. bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
  4083. bp->rx_ring_size = size;
  4084. bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
  4085. bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
  4086. }
  4087. static void
  4088. bnx2_free_tx_skbs(struct bnx2 *bp)
  4089. {
  4090. int i;
  4091. for (i = 0; i < bp->num_tx_rings; i++) {
  4092. struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
  4093. struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
  4094. int j;
  4095. if (txr->tx_buf_ring == NULL)
  4096. continue;
  4097. for (j = 0; j < TX_DESC_CNT; ) {
  4098. struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
  4099. struct sk_buff *skb = tx_buf->skb;
  4100. if (skb == NULL) {
  4101. j++;
  4102. continue;
  4103. }
  4104. skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
  4105. tx_buf->skb = NULL;
  4106. j += skb_shinfo(skb)->nr_frags + 1;
  4107. dev_kfree_skb(skb);
  4108. }
  4109. }
  4110. }
  4111. static void
  4112. bnx2_free_rx_skbs(struct bnx2 *bp)
  4113. {
  4114. int i;
  4115. for (i = 0; i < bp->num_rx_rings; i++) {
  4116. struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
  4117. struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
  4118. int j;
  4119. if (rxr->rx_buf_ring == NULL)
  4120. return;
  4121. for (j = 0; j < bp->rx_max_ring_idx; j++) {
  4122. struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
  4123. struct sk_buff *skb = rx_buf->skb;
  4124. if (skb == NULL)
  4125. continue;
  4126. pci_unmap_single(bp->pdev,
  4127. pci_unmap_addr(rx_buf, mapping),
  4128. bp->rx_buf_use_size,
  4129. PCI_DMA_FROMDEVICE);
  4130. rx_buf->skb = NULL;
  4131. dev_kfree_skb(skb);
  4132. }
  4133. for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
  4134. bnx2_free_rx_page(bp, rxr, j);
  4135. }
  4136. }
  4137. static void
  4138. bnx2_free_skbs(struct bnx2 *bp)
  4139. {
  4140. bnx2_free_tx_skbs(bp);
  4141. bnx2_free_rx_skbs(bp);
  4142. }
  4143. static int
  4144. bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
  4145. {
  4146. int rc;
  4147. rc = bnx2_reset_chip(bp, reset_code);
  4148. bnx2_free_skbs(bp);
  4149. if (rc)
  4150. return rc;
  4151. if ((rc = bnx2_init_chip(bp)) != 0)
  4152. return rc;
  4153. bnx2_init_all_rings(bp);
  4154. return 0;
  4155. }
  4156. static int
  4157. bnx2_init_nic(struct bnx2 *bp, int reset_phy)
  4158. {
  4159. int rc;
  4160. if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
  4161. return rc;
  4162. spin_lock_bh(&bp->phy_lock);
  4163. bnx2_init_phy(bp, reset_phy);
  4164. bnx2_set_link(bp);
  4165. if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
  4166. bnx2_remote_phy_event(bp);
  4167. spin_unlock_bh(&bp->phy_lock);
  4168. return 0;
  4169. }
  4170. static int
  4171. bnx2_shutdown_chip(struct bnx2 *bp)
  4172. {
  4173. u32 reset_code;
  4174. if (bp->flags & BNX2_FLAG_NO_WOL)
  4175. reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
  4176. else if (bp->wol)
  4177. reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
  4178. else
  4179. reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
  4180. return bnx2_reset_chip(bp, reset_code);
  4181. }
  4182. static int
  4183. bnx2_test_registers(struct bnx2 *bp)
  4184. {
  4185. int ret;
  4186. int i, is_5709;
  4187. static const struct {
  4188. u16 offset;
  4189. u16 flags;
  4190. #define BNX2_FL_NOT_5709 1
  4191. u32 rw_mask;
  4192. u32 ro_mask;
  4193. } reg_tbl[] = {
  4194. { 0x006c, 0, 0x00000000, 0x0000003f },
  4195. { 0x0090, 0, 0xffffffff, 0x00000000 },
  4196. { 0x0094, 0, 0x00000000, 0x00000000 },
  4197. { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
  4198. { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
  4199. { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
  4200. { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
  4201. { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
  4202. { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
  4203. { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
  4204. { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
  4205. { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
  4206. { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
  4207. { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
  4208. { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
  4209. { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
  4210. { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
  4211. { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
  4212. { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
  4213. { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
  4214. { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
  4215. { 0x1000, 0, 0x00000000, 0x00000001 },
  4216. { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
  4217. { 0x1408, 0, 0x01c00800, 0x00000000 },
  4218. { 0x149c, 0, 0x8000ffff, 0x00000000 },
  4219. { 0x14a8, 0, 0x00000000, 0x000001ff },
  4220. { 0x14ac, 0, 0x0fffffff, 0x10000000 },
  4221. { 0x14b0, 0, 0x00000002, 0x00000001 },
  4222. { 0x14b8, 0, 0x00000000, 0x00000000 },
  4223. { 0x14c0, 0, 0x00000000, 0x00000009 },
  4224. { 0x14c4, 0, 0x00003fff, 0x00000000 },
  4225. { 0x14cc, 0, 0x00000000, 0x00000001 },
  4226. { 0x14d0, 0, 0xffffffff, 0x00000000 },
  4227. { 0x1800, 0, 0x00000000, 0x00000001 },
  4228. { 0x1804, 0, 0x00000000, 0x00000003 },
  4229. { 0x2800, 0, 0x00000000, 0x00000001 },
  4230. { 0x2804, 0, 0x00000000, 0x00003f01 },
  4231. { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
  4232. { 0x2810, 0, 0xffff0000, 0x00000000 },
  4233. { 0x2814, 0, 0xffff0000, 0x00000000 },
  4234. { 0x2818, 0, 0xffff0000, 0x00000000 },
  4235. { 0x281c, 0, 0xffff0000, 0x00000000 },
  4236. { 0x2834, 0, 0xffffffff, 0x00000000 },
  4237. { 0x2840, 0, 0x00000000, 0xffffffff },
  4238. { 0x2844, 0, 0x00000000, 0xffffffff },
  4239. { 0x2848, 0, 0xffffffff, 0x00000000 },
  4240. { 0x284c, 0, 0xf800f800, 0x07ff07ff },
  4241. { 0x2c00, 0, 0x00000000, 0x00000011 },
  4242. { 0x2c04, 0, 0x00000000, 0x00030007 },
  4243. { 0x3c00, 0, 0x00000000, 0x00000001 },
  4244. { 0x3c04, 0, 0x00000000, 0x00070000 },
  4245. { 0x3c08, 0, 0x00007f71, 0x07f00000 },
  4246. { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
  4247. { 0x3c10, 0, 0xffffffff, 0x00000000 },
  4248. { 0x3c14, 0, 0x00000000, 0xffffffff },
  4249. { 0x3c18, 0, 0x00000000, 0xffffffff },
  4250. { 0x3c1c, 0, 0xfffff000, 0x00000000 },
  4251. { 0x3c20, 0, 0xffffff00, 0x00000000 },
  4252. { 0x5004, 0, 0x00000000, 0x0000007f },
  4253. { 0x5008, 0, 0x0f0007ff, 0x00000000 },
  4254. { 0x5c00, 0, 0x00000000, 0x00000001 },
  4255. { 0x5c04, 0, 0x00000000, 0x0003000f },
  4256. { 0x5c08, 0, 0x00000003, 0x00000000 },
  4257. { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
  4258. { 0x5c10, 0, 0x00000000, 0xffffffff },
  4259. { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
  4260. { 0x5c84, 0, 0x00000000, 0x0000f333 },
  4261. { 0x5c88, 0, 0x00000000, 0x00077373 },
  4262. { 0x5c8c, 0, 0x00000000, 0x0007f737 },
  4263. { 0x6808, 0, 0x0000ff7f, 0x00000000 },
  4264. { 0x680c, 0, 0xffffffff, 0x00000000 },
  4265. { 0x6810, 0, 0xffffffff, 0x00000000 },
  4266. { 0x6814, 0, 0xffffffff, 0x00000000 },
  4267. { 0x6818, 0, 0xffffffff, 0x00000000 },
  4268. { 0x681c, 0, 0xffffffff, 0x00000000 },
  4269. { 0x6820, 0, 0x00ff00ff, 0x00000000 },
  4270. { 0x6824, 0, 0x00ff00ff, 0x00000000 },
  4271. { 0x6828, 0, 0x00ff00ff, 0x00000000 },
  4272. { 0x682c, 0, 0x03ff03ff, 0x00000000 },
  4273. { 0x6830, 0, 0x03ff03ff, 0x00000000 },
  4274. { 0x6834, 0, 0x03ff03ff, 0x00000000 },
  4275. { 0x6838, 0, 0x03ff03ff, 0x00000000 },
  4276. { 0x683c, 0, 0x0000ffff, 0x00000000 },
  4277. { 0x6840, 0, 0x00000ff0, 0x00000000 },
  4278. { 0x6844, 0, 0x00ffff00, 0x00000000 },
  4279. { 0x684c, 0, 0xffffffff, 0x00000000 },
  4280. { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
  4281. { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
  4282. { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
  4283. { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
  4284. { 0x6908, 0, 0x00000000, 0x0001ff0f },
  4285. { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
  4286. { 0xffff, 0, 0x00000000, 0x00000000 },
  4287. };
  4288. ret = 0;
  4289. is_5709 = 0;
  4290. if (CHIP_NUM(bp) == CHIP_NUM_5709)
  4291. is_5709 = 1;
  4292. for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
  4293. u32 offset, rw_mask, ro_mask, save_val, val;
  4294. u16 flags = reg_tbl[i].flags;
  4295. if (is_5709 && (flags & BNX2_FL_NOT_5709))
  4296. continue;
  4297. offset = (u32) reg_tbl[i].offset;
  4298. rw_mask = reg_tbl[i].rw_mask;
  4299. ro_mask = reg_tbl[i].ro_mask;
  4300. save_val = readl(bp->regview + offset);
  4301. writel(0, bp->regview + offset);
  4302. val = readl(bp->regview + offset);
  4303. if ((val & rw_mask) != 0) {
  4304. goto reg_test_err;
  4305. }
  4306. if ((val & ro_mask) != (save_val & ro_mask)) {
  4307. goto reg_test_err;
  4308. }
  4309. writel(0xffffffff, bp->regview + offset);
  4310. val = readl(bp->regview + offset);
  4311. if ((val & rw_mask) != rw_mask) {
  4312. goto reg_test_err;
  4313. }
  4314. if ((val & ro_mask) != (save_val & ro_mask)) {
  4315. goto reg_test_err;
  4316. }
  4317. writel(save_val, bp->regview + offset);
  4318. continue;
  4319. reg_test_err:
  4320. writel(save_val, bp->regview + offset);
  4321. ret = -ENODEV;
  4322. break;
  4323. }
  4324. return ret;
  4325. }
  4326. static int
  4327. bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
  4328. {
  4329. static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
  4330. 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
  4331. int i;
  4332. for (i = 0; i < sizeof(test_pattern) / 4; i++) {
  4333. u32 offset;
  4334. for (offset = 0; offset < size; offset += 4) {
  4335. bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
  4336. if (bnx2_reg_rd_ind(bp, start + offset) !=
  4337. test_pattern[i]) {
  4338. return -ENODEV;
  4339. }
  4340. }
  4341. }
  4342. return 0;
  4343. }
  4344. static int
  4345. bnx2_test_memory(struct bnx2 *bp)
  4346. {
  4347. int ret = 0;
  4348. int i;
  4349. static struct mem_entry {
  4350. u32 offset;
  4351. u32 len;
  4352. } mem_tbl_5706[] = {
  4353. { 0x60000, 0x4000 },
  4354. { 0xa0000, 0x3000 },
  4355. { 0xe0000, 0x4000 },
  4356. { 0x120000, 0x4000 },
  4357. { 0x1a0000, 0x4000 },
  4358. { 0x160000, 0x4000 },
  4359. { 0xffffffff, 0 },
  4360. },
  4361. mem_tbl_5709[] = {
  4362. { 0x60000, 0x4000 },
  4363. { 0xa0000, 0x3000 },
  4364. { 0xe0000, 0x4000 },
  4365. { 0x120000, 0x4000 },
  4366. { 0x1a0000, 0x4000 },
  4367. { 0xffffffff, 0 },
  4368. };
  4369. struct mem_entry *mem_tbl;
  4370. if (CHIP_NUM(bp) == CHIP_NUM_5709)
  4371. mem_tbl = mem_tbl_5709;
  4372. else
  4373. mem_tbl = mem_tbl_5706;
  4374. for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
  4375. if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
  4376. mem_tbl[i].len)) != 0) {
  4377. return ret;
  4378. }
  4379. }
  4380. return ret;
  4381. }
  4382. #define BNX2_MAC_LOOPBACK 0
  4383. #define BNX2_PHY_LOOPBACK 1
  4384. static int
  4385. bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
  4386. {
  4387. unsigned int pkt_size, num_pkts, i;
  4388. struct sk_buff *skb, *rx_skb;
  4389. unsigned char *packet;
  4390. u16 rx_start_idx, rx_idx;
  4391. dma_addr_t map;
  4392. struct tx_bd *txbd;
  4393. struct sw_bd *rx_buf;
  4394. struct l2_fhdr *rx_hdr;
  4395. int ret = -ENODEV;
  4396. struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
  4397. struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
  4398. struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
  4399. tx_napi = bnapi;
  4400. txr = &tx_napi->tx_ring;
  4401. rxr = &bnapi->rx_ring;
  4402. if (loopback_mode == BNX2_MAC_LOOPBACK) {
  4403. bp->loopback = MAC_LOOPBACK;
  4404. bnx2_set_mac_loopback(bp);
  4405. }
  4406. else if (loopback_mode == BNX2_PHY_LOOPBACK) {
  4407. if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
  4408. return 0;
  4409. bp->loopback = PHY_LOOPBACK;
  4410. bnx2_set_phy_loopback(bp);
  4411. }
  4412. else
  4413. return -EINVAL;
  4414. pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
  4415. skb = netdev_alloc_skb(bp->dev, pkt_size);
  4416. if (!skb)
  4417. return -ENOMEM;
  4418. packet = skb_put(skb, pkt_size);
  4419. memcpy(packet, bp->dev->dev_addr, 6);
  4420. memset(packet + 6, 0x0, 8);
  4421. for (i = 14; i < pkt_size; i++)
  4422. packet[i] = (unsigned char) (i & 0xff);
  4423. if (skb_dma_map(&bp->pdev->dev, skb, DMA_TO_DEVICE)) {
  4424. dev_kfree_skb(skb);
  4425. return -EIO;
  4426. }
  4427. map = skb_shinfo(skb)->dma_maps[0];
  4428. REG_WR(bp, BNX2_HC_COMMAND,
  4429. bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
  4430. REG_RD(bp, BNX2_HC_COMMAND);
  4431. udelay(5);
  4432. rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
  4433. num_pkts = 0;
  4434. txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
  4435. txbd->tx_bd_haddr_hi = (u64) map >> 32;
  4436. txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
  4437. txbd->tx_bd_mss_nbytes = pkt_size;
  4438. txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
  4439. num_pkts++;
  4440. txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
  4441. txr->tx_prod_bseq += pkt_size;
  4442. REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
  4443. REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
  4444. udelay(100);
  4445. REG_WR(bp, BNX2_HC_COMMAND,
  4446. bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
  4447. REG_RD(bp, BNX2_HC_COMMAND);
  4448. udelay(5);
  4449. skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
  4450. dev_kfree_skb(skb);
  4451. if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
  4452. goto loopback_test_done;
  4453. rx_idx = bnx2_get_hw_rx_cons(bnapi);
  4454. if (rx_idx != rx_start_idx + num_pkts) {
  4455. goto loopback_test_done;
  4456. }
  4457. rx_buf = &rxr->rx_buf_ring[rx_start_idx];
  4458. rx_skb = rx_buf->skb;
  4459. rx_hdr = (struct l2_fhdr *) rx_skb->data;
  4460. skb_reserve(rx_skb, BNX2_RX_OFFSET);
  4461. pci_dma_sync_single_for_cpu(bp->pdev,
  4462. pci_unmap_addr(rx_buf, mapping),
  4463. bp->rx_buf_size, PCI_DMA_FROMDEVICE);
  4464. if (rx_hdr->l2_fhdr_status &
  4465. (L2_FHDR_ERRORS_BAD_CRC |
  4466. L2_FHDR_ERRORS_PHY_DECODE |
  4467. L2_FHDR_ERRORS_ALIGNMENT |
  4468. L2_FHDR_ERRORS_TOO_SHORT |
  4469. L2_FHDR_ERRORS_GIANT_FRAME)) {
  4470. goto loopback_test_done;
  4471. }
  4472. if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
  4473. goto loopback_test_done;
  4474. }
  4475. for (i = 14; i < pkt_size; i++) {
  4476. if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
  4477. goto loopback_test_done;
  4478. }
  4479. }
  4480. ret = 0;
  4481. loopback_test_done:
  4482. bp->loopback = 0;
  4483. return ret;
  4484. }
  4485. #define BNX2_MAC_LOOPBACK_FAILED 1
  4486. #define BNX2_PHY_LOOPBACK_FAILED 2
  4487. #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
  4488. BNX2_PHY_LOOPBACK_FAILED)
  4489. static int
  4490. bnx2_test_loopback(struct bnx2 *bp)
  4491. {
  4492. int rc = 0;
  4493. if (!netif_running(bp->dev))
  4494. return BNX2_LOOPBACK_FAILED;
  4495. bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
  4496. spin_lock_bh(&bp->phy_lock);
  4497. bnx2_init_phy(bp, 1);
  4498. spin_unlock_bh(&bp->phy_lock);
  4499. if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
  4500. rc |= BNX2_MAC_LOOPBACK_FAILED;
  4501. if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
  4502. rc |= BNX2_PHY_LOOPBACK_FAILED;
  4503. return rc;
  4504. }
  4505. #define NVRAM_SIZE 0x200
  4506. #define CRC32_RESIDUAL 0xdebb20e3
  4507. static int
  4508. bnx2_test_nvram(struct bnx2 *bp)
  4509. {
  4510. __be32 buf[NVRAM_SIZE / 4];
  4511. u8 *data = (u8 *) buf;
  4512. int rc = 0;
  4513. u32 magic, csum;
  4514. if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
  4515. goto test_nvram_done;
  4516. magic = be32_to_cpu(buf[0]);
  4517. if (magic != 0x669955aa) {
  4518. rc = -ENODEV;
  4519. goto test_nvram_done;
  4520. }
  4521. if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
  4522. goto test_nvram_done;
  4523. csum = ether_crc_le(0x100, data);
  4524. if (csum != CRC32_RESIDUAL) {
  4525. rc = -ENODEV;
  4526. goto test_nvram_done;
  4527. }
  4528. csum = ether_crc_le(0x100, data + 0x100);
  4529. if (csum != CRC32_RESIDUAL) {
  4530. rc = -ENODEV;
  4531. }
  4532. test_nvram_done:
  4533. return rc;
  4534. }
  4535. static int
  4536. bnx2_test_link(struct bnx2 *bp)
  4537. {
  4538. u32 bmsr;
  4539. if (!netif_running(bp->dev))
  4540. return -ENODEV;
  4541. if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
  4542. if (bp->link_up)
  4543. return 0;
  4544. return -ENODEV;
  4545. }
  4546. spin_lock_bh(&bp->phy_lock);
  4547. bnx2_enable_bmsr1(bp);
  4548. bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
  4549. bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
  4550. bnx2_disable_bmsr1(bp);
  4551. spin_unlock_bh(&bp->phy_lock);
  4552. if (bmsr & BMSR_LSTATUS) {
  4553. return 0;
  4554. }
  4555. return -ENODEV;
  4556. }
  4557. static int
  4558. bnx2_test_intr(struct bnx2 *bp)
  4559. {
  4560. int i;
  4561. u16 status_idx;
  4562. if (!netif_running(bp->dev))
  4563. return -ENODEV;
  4564. status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
  4565. /* This register is not touched during run-time. */
  4566. REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
  4567. REG_RD(bp, BNX2_HC_COMMAND);
  4568. for (i = 0; i < 10; i++) {
  4569. if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
  4570. status_idx) {
  4571. break;
  4572. }
  4573. msleep_interruptible(10);
  4574. }
  4575. if (i < 10)
  4576. return 0;
  4577. return -ENODEV;
  4578. }
  4579. /* Determining link for parallel detection. */
  4580. static int
  4581. bnx2_5706_serdes_has_link(struct bnx2 *bp)
  4582. {
  4583. u32 mode_ctl, an_dbg, exp;
  4584. if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
  4585. return 0;
  4586. bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
  4587. bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
  4588. if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
  4589. return 0;
  4590. bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
  4591. bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
  4592. bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
  4593. if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
  4594. return 0;
  4595. bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
  4596. bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
  4597. bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
  4598. if (exp & MII_EXPAND_REG1_RUDI_C) /* receiving CONFIG */
  4599. return 0;
  4600. return 1;
  4601. }
  4602. static void
  4603. bnx2_5706_serdes_timer(struct bnx2 *bp)
  4604. {
  4605. int check_link = 1;
  4606. spin_lock(&bp->phy_lock);
  4607. if (bp->serdes_an_pending) {
  4608. bp->serdes_an_pending--;
  4609. check_link = 0;
  4610. } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
  4611. u32 bmcr;
  4612. bp->current_interval = BNX2_TIMER_INTERVAL;
  4613. bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  4614. if (bmcr & BMCR_ANENABLE) {
  4615. if (bnx2_5706_serdes_has_link(bp)) {
  4616. bmcr &= ~BMCR_ANENABLE;
  4617. bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
  4618. bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
  4619. bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
  4620. }
  4621. }
  4622. }
  4623. else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
  4624. (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
  4625. u32 phy2;
  4626. bnx2_write_phy(bp, 0x17, 0x0f01);
  4627. bnx2_read_phy(bp, 0x15, &phy2);
  4628. if (phy2 & 0x20) {
  4629. u32 bmcr;
  4630. bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  4631. bmcr |= BMCR_ANENABLE;
  4632. bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
  4633. bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
  4634. }
  4635. } else
  4636. bp->current_interval = BNX2_TIMER_INTERVAL;
  4637. if (check_link) {
  4638. u32 val;
  4639. bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
  4640. bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
  4641. bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
  4642. if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
  4643. if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
  4644. bnx2_5706s_force_link_dn(bp, 1);
  4645. bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
  4646. } else
  4647. bnx2_set_link(bp);
  4648. } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
  4649. bnx2_set_link(bp);
  4650. }
  4651. spin_unlock(&bp->phy_lock);
  4652. }
  4653. static void
  4654. bnx2_5708_serdes_timer(struct bnx2 *bp)
  4655. {
  4656. if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
  4657. return;
  4658. if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
  4659. bp->serdes_an_pending = 0;
  4660. return;
  4661. }
  4662. spin_lock(&bp->phy_lock);
  4663. if (bp->serdes_an_pending)
  4664. bp->serdes_an_pending--;
  4665. else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
  4666. u32 bmcr;
  4667. bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  4668. if (bmcr & BMCR_ANENABLE) {
  4669. bnx2_enable_forced_2g5(bp);
  4670. bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
  4671. } else {
  4672. bnx2_disable_forced_2g5(bp);
  4673. bp->serdes_an_pending = 2;
  4674. bp->current_interval = BNX2_TIMER_INTERVAL;
  4675. }
  4676. } else
  4677. bp->current_interval = BNX2_TIMER_INTERVAL;
  4678. spin_unlock(&bp->phy_lock);
  4679. }
  4680. static void
  4681. bnx2_timer(unsigned long data)
  4682. {
  4683. struct bnx2 *bp = (struct bnx2 *) data;
  4684. if (!netif_running(bp->dev))
  4685. return;
  4686. if (atomic_read(&bp->intr_sem) != 0)
  4687. goto bnx2_restart_timer;
  4688. if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
  4689. BNX2_FLAG_USING_MSI)
  4690. bnx2_chk_missed_msi(bp);
  4691. bnx2_send_heart_beat(bp);
  4692. bp->stats_blk->stat_FwRxDrop =
  4693. bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
  4694. /* workaround occasional corrupted counters */
  4695. if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
  4696. REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
  4697. BNX2_HC_COMMAND_STATS_NOW);
  4698. if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
  4699. if (CHIP_NUM(bp) == CHIP_NUM_5706)
  4700. bnx2_5706_serdes_timer(bp);
  4701. else
  4702. bnx2_5708_serdes_timer(bp);
  4703. }
  4704. bnx2_restart_timer:
  4705. mod_timer(&bp->timer, jiffies + bp->current_interval);
  4706. }
  4707. static int
  4708. bnx2_request_irq(struct bnx2 *bp)
  4709. {
  4710. unsigned long flags;
  4711. struct bnx2_irq *irq;
  4712. int rc = 0, i;
  4713. if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
  4714. flags = 0;
  4715. else
  4716. flags = IRQF_SHARED;
  4717. for (i = 0; i < bp->irq_nvecs; i++) {
  4718. irq = &bp->irq_tbl[i];
  4719. rc = request_irq(irq->vector, irq->handler, flags, irq->name,
  4720. &bp->bnx2_napi[i]);
  4721. if (rc)
  4722. break;
  4723. irq->requested = 1;
  4724. }
  4725. return rc;
  4726. }
  4727. static void
  4728. bnx2_free_irq(struct bnx2 *bp)
  4729. {
  4730. struct bnx2_irq *irq;
  4731. int i;
  4732. for (i = 0; i < bp->irq_nvecs; i++) {
  4733. irq = &bp->irq_tbl[i];
  4734. if (irq->requested)
  4735. free_irq(irq->vector, &bp->bnx2_napi[i]);
  4736. irq->requested = 0;
  4737. }
  4738. if (bp->flags & BNX2_FLAG_USING_MSI)
  4739. pci_disable_msi(bp->pdev);
  4740. else if (bp->flags & BNX2_FLAG_USING_MSIX)
  4741. pci_disable_msix(bp->pdev);
  4742. bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
  4743. }
  4744. static void
  4745. bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
  4746. {
  4747. int i, rc;
  4748. struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
  4749. struct net_device *dev = bp->dev;
  4750. const int len = sizeof(bp->irq_tbl[0].name);
  4751. bnx2_setup_msix_tbl(bp);
  4752. REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
  4753. REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
  4754. REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
  4755. for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
  4756. msix_ent[i].entry = i;
  4757. msix_ent[i].vector = 0;
  4758. snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
  4759. bp->irq_tbl[i].handler = bnx2_msi_1shot;
  4760. }
  4761. rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
  4762. if (rc != 0)
  4763. return;
  4764. bp->irq_nvecs = msix_vecs;
  4765. bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
  4766. for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
  4767. bp->irq_tbl[i].vector = msix_ent[i].vector;
  4768. }
  4769. static void
  4770. bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
  4771. {
  4772. int cpus = num_online_cpus();
  4773. int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
  4774. bp->irq_tbl[0].handler = bnx2_interrupt;
  4775. strcpy(bp->irq_tbl[0].name, bp->dev->name);
  4776. bp->irq_nvecs = 1;
  4777. bp->irq_tbl[0].vector = bp->pdev->irq;
  4778. if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi && cpus > 1)
  4779. bnx2_enable_msix(bp, msix_vecs);
  4780. if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
  4781. !(bp->flags & BNX2_FLAG_USING_MSIX)) {
  4782. if (pci_enable_msi(bp->pdev) == 0) {
  4783. bp->flags |= BNX2_FLAG_USING_MSI;
  4784. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  4785. bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
  4786. bp->irq_tbl[0].handler = bnx2_msi_1shot;
  4787. } else
  4788. bp->irq_tbl[0].handler = bnx2_msi;
  4789. bp->irq_tbl[0].vector = bp->pdev->irq;
  4790. }
  4791. }
  4792. bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
  4793. bp->dev->real_num_tx_queues = bp->num_tx_rings;
  4794. bp->num_rx_rings = bp->irq_nvecs;
  4795. }
  4796. /* Called with rtnl_lock */
  4797. static int
  4798. bnx2_open(struct net_device *dev)
  4799. {
  4800. struct bnx2 *bp = netdev_priv(dev);
  4801. int rc;
  4802. netif_carrier_off(dev);
  4803. bnx2_set_power_state(bp, PCI_D0);
  4804. bnx2_disable_int(bp);
  4805. bnx2_setup_int_mode(bp, disable_msi);
  4806. bnx2_napi_enable(bp);
  4807. rc = bnx2_alloc_mem(bp);
  4808. if (rc)
  4809. goto open_err;
  4810. rc = bnx2_request_irq(bp);
  4811. if (rc)
  4812. goto open_err;
  4813. rc = bnx2_init_nic(bp, 1);
  4814. if (rc)
  4815. goto open_err;
  4816. mod_timer(&bp->timer, jiffies + bp->current_interval);
  4817. atomic_set(&bp->intr_sem, 0);
  4818. bnx2_enable_int(bp);
  4819. if (bp->flags & BNX2_FLAG_USING_MSI) {
  4820. /* Test MSI to make sure it is working
  4821. * If MSI test fails, go back to INTx mode
  4822. */
  4823. if (bnx2_test_intr(bp) != 0) {
  4824. printk(KERN_WARNING PFX "%s: No interrupt was generated"
  4825. " using MSI, switching to INTx mode. Please"
  4826. " report this failure to the PCI maintainer"
  4827. " and include system chipset information.\n",
  4828. bp->dev->name);
  4829. bnx2_disable_int(bp);
  4830. bnx2_free_irq(bp);
  4831. bnx2_setup_int_mode(bp, 1);
  4832. rc = bnx2_init_nic(bp, 0);
  4833. if (!rc)
  4834. rc = bnx2_request_irq(bp);
  4835. if (rc) {
  4836. del_timer_sync(&bp->timer);
  4837. goto open_err;
  4838. }
  4839. bnx2_enable_int(bp);
  4840. }
  4841. }
  4842. if (bp->flags & BNX2_FLAG_USING_MSI)
  4843. printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
  4844. else if (bp->flags & BNX2_FLAG_USING_MSIX)
  4845. printk(KERN_INFO PFX "%s: using MSIX\n", dev->name);
  4846. netif_tx_start_all_queues(dev);
  4847. return 0;
  4848. open_err:
  4849. bnx2_napi_disable(bp);
  4850. bnx2_free_skbs(bp);
  4851. bnx2_free_irq(bp);
  4852. bnx2_free_mem(bp);
  4853. return rc;
  4854. }
  4855. static void
  4856. bnx2_reset_task(struct work_struct *work)
  4857. {
  4858. struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
  4859. if (!netif_running(bp->dev))
  4860. return;
  4861. bnx2_netif_stop(bp);
  4862. bnx2_init_nic(bp, 1);
  4863. atomic_set(&bp->intr_sem, 1);
  4864. bnx2_netif_start(bp);
  4865. }
  4866. static void
  4867. bnx2_tx_timeout(struct net_device *dev)
  4868. {
  4869. struct bnx2 *bp = netdev_priv(dev);
  4870. /* This allows the netif to be shutdown gracefully before resetting */
  4871. schedule_work(&bp->reset_task);
  4872. }
  4873. #ifdef BCM_VLAN
  4874. /* Called with rtnl_lock */
  4875. static void
  4876. bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
  4877. {
  4878. struct bnx2 *bp = netdev_priv(dev);
  4879. bnx2_netif_stop(bp);
  4880. bp->vlgrp = vlgrp;
  4881. bnx2_set_rx_mode(dev);
  4882. if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
  4883. bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
  4884. bnx2_netif_start(bp);
  4885. }
  4886. #endif
  4887. /* Called with netif_tx_lock.
  4888. * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
  4889. * netif_wake_queue().
  4890. */
  4891. static int
  4892. bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
  4893. {
  4894. struct bnx2 *bp = netdev_priv(dev);
  4895. dma_addr_t mapping;
  4896. struct tx_bd *txbd;
  4897. struct sw_tx_bd *tx_buf;
  4898. u32 len, vlan_tag_flags, last_frag, mss;
  4899. u16 prod, ring_prod;
  4900. int i;
  4901. struct bnx2_napi *bnapi;
  4902. struct bnx2_tx_ring_info *txr;
  4903. struct netdev_queue *txq;
  4904. struct skb_shared_info *sp;
  4905. /* Determine which tx ring we will be placed on */
  4906. i = skb_get_queue_mapping(skb);
  4907. bnapi = &bp->bnx2_napi[i];
  4908. txr = &bnapi->tx_ring;
  4909. txq = netdev_get_tx_queue(dev, i);
  4910. if (unlikely(bnx2_tx_avail(bp, txr) <
  4911. (skb_shinfo(skb)->nr_frags + 1))) {
  4912. netif_tx_stop_queue(txq);
  4913. printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
  4914. dev->name);
  4915. return NETDEV_TX_BUSY;
  4916. }
  4917. len = skb_headlen(skb);
  4918. prod = txr->tx_prod;
  4919. ring_prod = TX_RING_IDX(prod);
  4920. vlan_tag_flags = 0;
  4921. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  4922. vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
  4923. }
  4924. #ifdef BCM_VLAN
  4925. if (bp->vlgrp && vlan_tx_tag_present(skb)) {
  4926. vlan_tag_flags |=
  4927. (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
  4928. }
  4929. #endif
  4930. if ((mss = skb_shinfo(skb)->gso_size)) {
  4931. u32 tcp_opt_len;
  4932. struct iphdr *iph;
  4933. vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
  4934. tcp_opt_len = tcp_optlen(skb);
  4935. if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
  4936. u32 tcp_off = skb_transport_offset(skb) -
  4937. sizeof(struct ipv6hdr) - ETH_HLEN;
  4938. vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
  4939. TX_BD_FLAGS_SW_FLAGS;
  4940. if (likely(tcp_off == 0))
  4941. vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
  4942. else {
  4943. tcp_off >>= 3;
  4944. vlan_tag_flags |= ((tcp_off & 0x3) <<
  4945. TX_BD_FLAGS_TCP6_OFF0_SHL) |
  4946. ((tcp_off & 0x10) <<
  4947. TX_BD_FLAGS_TCP6_OFF4_SHL);
  4948. mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
  4949. }
  4950. } else {
  4951. iph = ip_hdr(skb);
  4952. if (tcp_opt_len || (iph->ihl > 5)) {
  4953. vlan_tag_flags |= ((iph->ihl - 5) +
  4954. (tcp_opt_len >> 2)) << 8;
  4955. }
  4956. }
  4957. } else
  4958. mss = 0;
  4959. if (skb_dma_map(&bp->pdev->dev, skb, DMA_TO_DEVICE)) {
  4960. dev_kfree_skb(skb);
  4961. return NETDEV_TX_OK;
  4962. }
  4963. sp = skb_shinfo(skb);
  4964. mapping = sp->dma_maps[0];
  4965. tx_buf = &txr->tx_buf_ring[ring_prod];
  4966. tx_buf->skb = skb;
  4967. txbd = &txr->tx_desc_ring[ring_prod];
  4968. txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
  4969. txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
  4970. txbd->tx_bd_mss_nbytes = len | (mss << 16);
  4971. txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
  4972. last_frag = skb_shinfo(skb)->nr_frags;
  4973. for (i = 0; i < last_frag; i++) {
  4974. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  4975. prod = NEXT_TX_BD(prod);
  4976. ring_prod = TX_RING_IDX(prod);
  4977. txbd = &txr->tx_desc_ring[ring_prod];
  4978. len = frag->size;
  4979. mapping = sp->dma_maps[i + 1];
  4980. txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
  4981. txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
  4982. txbd->tx_bd_mss_nbytes = len | (mss << 16);
  4983. txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
  4984. }
  4985. txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
  4986. prod = NEXT_TX_BD(prod);
  4987. txr->tx_prod_bseq += skb->len;
  4988. REG_WR16(bp, txr->tx_bidx_addr, prod);
  4989. REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
  4990. mmiowb();
  4991. txr->tx_prod = prod;
  4992. dev->trans_start = jiffies;
  4993. if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
  4994. netif_tx_stop_queue(txq);
  4995. if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
  4996. netif_tx_wake_queue(txq);
  4997. }
  4998. return NETDEV_TX_OK;
  4999. }
  5000. /* Called with rtnl_lock */
  5001. static int
  5002. bnx2_close(struct net_device *dev)
  5003. {
  5004. struct bnx2 *bp = netdev_priv(dev);
  5005. cancel_work_sync(&bp->reset_task);
  5006. bnx2_disable_int_sync(bp);
  5007. bnx2_napi_disable(bp);
  5008. del_timer_sync(&bp->timer);
  5009. bnx2_shutdown_chip(bp);
  5010. bnx2_free_irq(bp);
  5011. bnx2_free_skbs(bp);
  5012. bnx2_free_mem(bp);
  5013. bp->link_up = 0;
  5014. netif_carrier_off(bp->dev);
  5015. bnx2_set_power_state(bp, PCI_D3hot);
  5016. return 0;
  5017. }
  5018. #define GET_NET_STATS64(ctr) \
  5019. (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
  5020. (unsigned long) (ctr##_lo)
  5021. #define GET_NET_STATS32(ctr) \
  5022. (ctr##_lo)
  5023. #if (BITS_PER_LONG == 64)
  5024. #define GET_NET_STATS GET_NET_STATS64
  5025. #else
  5026. #define GET_NET_STATS GET_NET_STATS32
  5027. #endif
  5028. static struct net_device_stats *
  5029. bnx2_get_stats(struct net_device *dev)
  5030. {
  5031. struct bnx2 *bp = netdev_priv(dev);
  5032. struct statistics_block *stats_blk = bp->stats_blk;
  5033. struct net_device_stats *net_stats = &dev->stats;
  5034. if (bp->stats_blk == NULL) {
  5035. return net_stats;
  5036. }
  5037. net_stats->rx_packets =
  5038. GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
  5039. GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
  5040. GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
  5041. net_stats->tx_packets =
  5042. GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
  5043. GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
  5044. GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
  5045. net_stats->rx_bytes =
  5046. GET_NET_STATS(stats_blk->stat_IfHCInOctets);
  5047. net_stats->tx_bytes =
  5048. GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
  5049. net_stats->multicast =
  5050. GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
  5051. net_stats->collisions =
  5052. (unsigned long) stats_blk->stat_EtherStatsCollisions;
  5053. net_stats->rx_length_errors =
  5054. (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
  5055. stats_blk->stat_EtherStatsOverrsizePkts);
  5056. net_stats->rx_over_errors =
  5057. (unsigned long) stats_blk->stat_IfInMBUFDiscards;
  5058. net_stats->rx_frame_errors =
  5059. (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
  5060. net_stats->rx_crc_errors =
  5061. (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
  5062. net_stats->rx_errors = net_stats->rx_length_errors +
  5063. net_stats->rx_over_errors + net_stats->rx_frame_errors +
  5064. net_stats->rx_crc_errors;
  5065. net_stats->tx_aborted_errors =
  5066. (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
  5067. stats_blk->stat_Dot3StatsLateCollisions);
  5068. if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
  5069. (CHIP_ID(bp) == CHIP_ID_5708_A0))
  5070. net_stats->tx_carrier_errors = 0;
  5071. else {
  5072. net_stats->tx_carrier_errors =
  5073. (unsigned long)
  5074. stats_blk->stat_Dot3StatsCarrierSenseErrors;
  5075. }
  5076. net_stats->tx_errors =
  5077. (unsigned long)
  5078. stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
  5079. +
  5080. net_stats->tx_aborted_errors +
  5081. net_stats->tx_carrier_errors;
  5082. net_stats->rx_missed_errors =
  5083. (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
  5084. stats_blk->stat_FwRxDrop);
  5085. return net_stats;
  5086. }
  5087. /* All ethtool functions called with rtnl_lock */
  5088. static int
  5089. bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  5090. {
  5091. struct bnx2 *bp = netdev_priv(dev);
  5092. int support_serdes = 0, support_copper = 0;
  5093. cmd->supported = SUPPORTED_Autoneg;
  5094. if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
  5095. support_serdes = 1;
  5096. support_copper = 1;
  5097. } else if (bp->phy_port == PORT_FIBRE)
  5098. support_serdes = 1;
  5099. else
  5100. support_copper = 1;
  5101. if (support_serdes) {
  5102. cmd->supported |= SUPPORTED_1000baseT_Full |
  5103. SUPPORTED_FIBRE;
  5104. if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
  5105. cmd->supported |= SUPPORTED_2500baseX_Full;
  5106. }
  5107. if (support_copper) {
  5108. cmd->supported |= SUPPORTED_10baseT_Half |
  5109. SUPPORTED_10baseT_Full |
  5110. SUPPORTED_100baseT_Half |
  5111. SUPPORTED_100baseT_Full |
  5112. SUPPORTED_1000baseT_Full |
  5113. SUPPORTED_TP;
  5114. }
  5115. spin_lock_bh(&bp->phy_lock);
  5116. cmd->port = bp->phy_port;
  5117. cmd->advertising = bp->advertising;
  5118. if (bp->autoneg & AUTONEG_SPEED) {
  5119. cmd->autoneg = AUTONEG_ENABLE;
  5120. }
  5121. else {
  5122. cmd->autoneg = AUTONEG_DISABLE;
  5123. }
  5124. if (netif_carrier_ok(dev)) {
  5125. cmd->speed = bp->line_speed;
  5126. cmd->duplex = bp->duplex;
  5127. }
  5128. else {
  5129. cmd->speed = -1;
  5130. cmd->duplex = -1;
  5131. }
  5132. spin_unlock_bh(&bp->phy_lock);
  5133. cmd->transceiver = XCVR_INTERNAL;
  5134. cmd->phy_address = bp->phy_addr;
  5135. return 0;
  5136. }
  5137. static int
  5138. bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  5139. {
  5140. struct bnx2 *bp = netdev_priv(dev);
  5141. u8 autoneg = bp->autoneg;
  5142. u8 req_duplex = bp->req_duplex;
  5143. u16 req_line_speed = bp->req_line_speed;
  5144. u32 advertising = bp->advertising;
  5145. int err = -EINVAL;
  5146. spin_lock_bh(&bp->phy_lock);
  5147. if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
  5148. goto err_out_unlock;
  5149. if (cmd->port != bp->phy_port &&
  5150. !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
  5151. goto err_out_unlock;
  5152. /* If device is down, we can store the settings only if the user
  5153. * is setting the currently active port.
  5154. */
  5155. if (!netif_running(dev) && cmd->port != bp->phy_port)
  5156. goto err_out_unlock;
  5157. if (cmd->autoneg == AUTONEG_ENABLE) {
  5158. autoneg |= AUTONEG_SPEED;
  5159. cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
  5160. /* allow advertising 1 speed */
  5161. if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
  5162. (cmd->advertising == ADVERTISED_10baseT_Full) ||
  5163. (cmd->advertising == ADVERTISED_100baseT_Half) ||
  5164. (cmd->advertising == ADVERTISED_100baseT_Full)) {
  5165. if (cmd->port == PORT_FIBRE)
  5166. goto err_out_unlock;
  5167. advertising = cmd->advertising;
  5168. } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
  5169. if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ||
  5170. (cmd->port == PORT_TP))
  5171. goto err_out_unlock;
  5172. } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
  5173. advertising = cmd->advertising;
  5174. else if (cmd->advertising == ADVERTISED_1000baseT_Half)
  5175. goto err_out_unlock;
  5176. else {
  5177. if (cmd->port == PORT_FIBRE)
  5178. advertising = ETHTOOL_ALL_FIBRE_SPEED;
  5179. else
  5180. advertising = ETHTOOL_ALL_COPPER_SPEED;
  5181. }
  5182. advertising |= ADVERTISED_Autoneg;
  5183. }
  5184. else {
  5185. if (cmd->port == PORT_FIBRE) {
  5186. if ((cmd->speed != SPEED_1000 &&
  5187. cmd->speed != SPEED_2500) ||
  5188. (cmd->duplex != DUPLEX_FULL))
  5189. goto err_out_unlock;
  5190. if (cmd->speed == SPEED_2500 &&
  5191. !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
  5192. goto err_out_unlock;
  5193. }
  5194. else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
  5195. goto err_out_unlock;
  5196. autoneg &= ~AUTONEG_SPEED;
  5197. req_line_speed = cmd->speed;
  5198. req_duplex = cmd->duplex;
  5199. advertising = 0;
  5200. }
  5201. bp->autoneg = autoneg;
  5202. bp->advertising = advertising;
  5203. bp->req_line_speed = req_line_speed;
  5204. bp->req_duplex = req_duplex;
  5205. err = 0;
  5206. /* If device is down, the new settings will be picked up when it is
  5207. * brought up.
  5208. */
  5209. if (netif_running(dev))
  5210. err = bnx2_setup_phy(bp, cmd->port);
  5211. err_out_unlock:
  5212. spin_unlock_bh(&bp->phy_lock);
  5213. return err;
  5214. }
  5215. static void
  5216. bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
  5217. {
  5218. struct bnx2 *bp = netdev_priv(dev);
  5219. strcpy(info->driver, DRV_MODULE_NAME);
  5220. strcpy(info->version, DRV_MODULE_VERSION);
  5221. strcpy(info->bus_info, pci_name(bp->pdev));
  5222. strcpy(info->fw_version, bp->fw_version);
  5223. }
  5224. #define BNX2_REGDUMP_LEN (32 * 1024)
  5225. static int
  5226. bnx2_get_regs_len(struct net_device *dev)
  5227. {
  5228. return BNX2_REGDUMP_LEN;
  5229. }
  5230. static void
  5231. bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
  5232. {
  5233. u32 *p = _p, i, offset;
  5234. u8 *orig_p = _p;
  5235. struct bnx2 *bp = netdev_priv(dev);
  5236. u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
  5237. 0x0800, 0x0880, 0x0c00, 0x0c10,
  5238. 0x0c30, 0x0d08, 0x1000, 0x101c,
  5239. 0x1040, 0x1048, 0x1080, 0x10a4,
  5240. 0x1400, 0x1490, 0x1498, 0x14f0,
  5241. 0x1500, 0x155c, 0x1580, 0x15dc,
  5242. 0x1600, 0x1658, 0x1680, 0x16d8,
  5243. 0x1800, 0x1820, 0x1840, 0x1854,
  5244. 0x1880, 0x1894, 0x1900, 0x1984,
  5245. 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
  5246. 0x1c80, 0x1c94, 0x1d00, 0x1d84,
  5247. 0x2000, 0x2030, 0x23c0, 0x2400,
  5248. 0x2800, 0x2820, 0x2830, 0x2850,
  5249. 0x2b40, 0x2c10, 0x2fc0, 0x3058,
  5250. 0x3c00, 0x3c94, 0x4000, 0x4010,
  5251. 0x4080, 0x4090, 0x43c0, 0x4458,
  5252. 0x4c00, 0x4c18, 0x4c40, 0x4c54,
  5253. 0x4fc0, 0x5010, 0x53c0, 0x5444,
  5254. 0x5c00, 0x5c18, 0x5c80, 0x5c90,
  5255. 0x5fc0, 0x6000, 0x6400, 0x6428,
  5256. 0x6800, 0x6848, 0x684c, 0x6860,
  5257. 0x6888, 0x6910, 0x8000 };
  5258. regs->version = 0;
  5259. memset(p, 0, BNX2_REGDUMP_LEN);
  5260. if (!netif_running(bp->dev))
  5261. return;
  5262. i = 0;
  5263. offset = reg_boundaries[0];
  5264. p += offset;
  5265. while (offset < BNX2_REGDUMP_LEN) {
  5266. *p++ = REG_RD(bp, offset);
  5267. offset += 4;
  5268. if (offset == reg_boundaries[i + 1]) {
  5269. offset = reg_boundaries[i + 2];
  5270. p = (u32 *) (orig_p + offset);
  5271. i += 2;
  5272. }
  5273. }
  5274. }
  5275. static void
  5276. bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
  5277. {
  5278. struct bnx2 *bp = netdev_priv(dev);
  5279. if (bp->flags & BNX2_FLAG_NO_WOL) {
  5280. wol->supported = 0;
  5281. wol->wolopts = 0;
  5282. }
  5283. else {
  5284. wol->supported = WAKE_MAGIC;
  5285. if (bp->wol)
  5286. wol->wolopts = WAKE_MAGIC;
  5287. else
  5288. wol->wolopts = 0;
  5289. }
  5290. memset(&wol->sopass, 0, sizeof(wol->sopass));
  5291. }
  5292. static int
  5293. bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
  5294. {
  5295. struct bnx2 *bp = netdev_priv(dev);
  5296. if (wol->wolopts & ~WAKE_MAGIC)
  5297. return -EINVAL;
  5298. if (wol->wolopts & WAKE_MAGIC) {
  5299. if (bp->flags & BNX2_FLAG_NO_WOL)
  5300. return -EINVAL;
  5301. bp->wol = 1;
  5302. }
  5303. else {
  5304. bp->wol = 0;
  5305. }
  5306. return 0;
  5307. }
  5308. static int
  5309. bnx2_nway_reset(struct net_device *dev)
  5310. {
  5311. struct bnx2 *bp = netdev_priv(dev);
  5312. u32 bmcr;
  5313. if (!netif_running(dev))
  5314. return -EAGAIN;
  5315. if (!(bp->autoneg & AUTONEG_SPEED)) {
  5316. return -EINVAL;
  5317. }
  5318. spin_lock_bh(&bp->phy_lock);
  5319. if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
  5320. int rc;
  5321. rc = bnx2_setup_remote_phy(bp, bp->phy_port);
  5322. spin_unlock_bh(&bp->phy_lock);
  5323. return rc;
  5324. }
  5325. /* Force a link down visible on the other side */
  5326. if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
  5327. bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
  5328. spin_unlock_bh(&bp->phy_lock);
  5329. msleep(20);
  5330. spin_lock_bh(&bp->phy_lock);
  5331. bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
  5332. bp->serdes_an_pending = 1;
  5333. mod_timer(&bp->timer, jiffies + bp->current_interval);
  5334. }
  5335. bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  5336. bmcr &= ~BMCR_LOOPBACK;
  5337. bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
  5338. spin_unlock_bh(&bp->phy_lock);
  5339. return 0;
  5340. }
  5341. static int
  5342. bnx2_get_eeprom_len(struct net_device *dev)
  5343. {
  5344. struct bnx2 *bp = netdev_priv(dev);
  5345. if (bp->flash_info == NULL)
  5346. return 0;
  5347. return (int) bp->flash_size;
  5348. }
  5349. static int
  5350. bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
  5351. u8 *eebuf)
  5352. {
  5353. struct bnx2 *bp = netdev_priv(dev);
  5354. int rc;
  5355. if (!netif_running(dev))
  5356. return -EAGAIN;
  5357. /* parameters already validated in ethtool_get_eeprom */
  5358. rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
  5359. return rc;
  5360. }
  5361. static int
  5362. bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
  5363. u8 *eebuf)
  5364. {
  5365. struct bnx2 *bp = netdev_priv(dev);
  5366. int rc;
  5367. if (!netif_running(dev))
  5368. return -EAGAIN;
  5369. /* parameters already validated in ethtool_set_eeprom */
  5370. rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
  5371. return rc;
  5372. }
  5373. static int
  5374. bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
  5375. {
  5376. struct bnx2 *bp = netdev_priv(dev);
  5377. memset(coal, 0, sizeof(struct ethtool_coalesce));
  5378. coal->rx_coalesce_usecs = bp->rx_ticks;
  5379. coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
  5380. coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
  5381. coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
  5382. coal->tx_coalesce_usecs = bp->tx_ticks;
  5383. coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
  5384. coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
  5385. coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
  5386. coal->stats_block_coalesce_usecs = bp->stats_ticks;
  5387. return 0;
  5388. }
  5389. static int
  5390. bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
  5391. {
  5392. struct bnx2 *bp = netdev_priv(dev);
  5393. bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
  5394. if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
  5395. bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
  5396. if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
  5397. bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
  5398. if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
  5399. bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
  5400. if (bp->rx_quick_cons_trip_int > 0xff)
  5401. bp->rx_quick_cons_trip_int = 0xff;
  5402. bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
  5403. if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
  5404. bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
  5405. if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
  5406. bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
  5407. if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
  5408. bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
  5409. if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
  5410. 0xff;
  5411. bp->stats_ticks = coal->stats_block_coalesce_usecs;
  5412. if (CHIP_NUM(bp) == CHIP_NUM_5708) {
  5413. if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
  5414. bp->stats_ticks = USEC_PER_SEC;
  5415. }
  5416. if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
  5417. bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
  5418. bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
  5419. if (netif_running(bp->dev)) {
  5420. bnx2_netif_stop(bp);
  5421. bnx2_init_nic(bp, 0);
  5422. bnx2_netif_start(bp);
  5423. }
  5424. return 0;
  5425. }
  5426. static void
  5427. bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
  5428. {
  5429. struct bnx2 *bp = netdev_priv(dev);
  5430. ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
  5431. ering->rx_mini_max_pending = 0;
  5432. ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
  5433. ering->rx_pending = bp->rx_ring_size;
  5434. ering->rx_mini_pending = 0;
  5435. ering->rx_jumbo_pending = bp->rx_pg_ring_size;
  5436. ering->tx_max_pending = MAX_TX_DESC_CNT;
  5437. ering->tx_pending = bp->tx_ring_size;
  5438. }
  5439. static int
  5440. bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
  5441. {
  5442. if (netif_running(bp->dev)) {
  5443. bnx2_netif_stop(bp);
  5444. bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
  5445. bnx2_free_skbs(bp);
  5446. bnx2_free_mem(bp);
  5447. }
  5448. bnx2_set_rx_ring_size(bp, rx);
  5449. bp->tx_ring_size = tx;
  5450. if (netif_running(bp->dev)) {
  5451. int rc;
  5452. rc = bnx2_alloc_mem(bp);
  5453. if (rc)
  5454. return rc;
  5455. bnx2_init_nic(bp, 0);
  5456. bnx2_netif_start(bp);
  5457. }
  5458. return 0;
  5459. }
  5460. static int
  5461. bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
  5462. {
  5463. struct bnx2 *bp = netdev_priv(dev);
  5464. int rc;
  5465. if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
  5466. (ering->tx_pending > MAX_TX_DESC_CNT) ||
  5467. (ering->tx_pending <= MAX_SKB_FRAGS)) {
  5468. return -EINVAL;
  5469. }
  5470. rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
  5471. return rc;
  5472. }
  5473. static void
  5474. bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
  5475. {
  5476. struct bnx2 *bp = netdev_priv(dev);
  5477. epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
  5478. epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
  5479. epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
  5480. }
  5481. static int
  5482. bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
  5483. {
  5484. struct bnx2 *bp = netdev_priv(dev);
  5485. bp->req_flow_ctrl = 0;
  5486. if (epause->rx_pause)
  5487. bp->req_flow_ctrl |= FLOW_CTRL_RX;
  5488. if (epause->tx_pause)
  5489. bp->req_flow_ctrl |= FLOW_CTRL_TX;
  5490. if (epause->autoneg) {
  5491. bp->autoneg |= AUTONEG_FLOW_CTRL;
  5492. }
  5493. else {
  5494. bp->autoneg &= ~AUTONEG_FLOW_CTRL;
  5495. }
  5496. if (netif_running(dev)) {
  5497. spin_lock_bh(&bp->phy_lock);
  5498. bnx2_setup_phy(bp, bp->phy_port);
  5499. spin_unlock_bh(&bp->phy_lock);
  5500. }
  5501. return 0;
  5502. }
  5503. static u32
  5504. bnx2_get_rx_csum(struct net_device *dev)
  5505. {
  5506. struct bnx2 *bp = netdev_priv(dev);
  5507. return bp->rx_csum;
  5508. }
  5509. static int
  5510. bnx2_set_rx_csum(struct net_device *dev, u32 data)
  5511. {
  5512. struct bnx2 *bp = netdev_priv(dev);
  5513. bp->rx_csum = data;
  5514. return 0;
  5515. }
  5516. static int
  5517. bnx2_set_tso(struct net_device *dev, u32 data)
  5518. {
  5519. struct bnx2 *bp = netdev_priv(dev);
  5520. if (data) {
  5521. dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
  5522. if (CHIP_NUM(bp) == CHIP_NUM_5709)
  5523. dev->features |= NETIF_F_TSO6;
  5524. } else
  5525. dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
  5526. NETIF_F_TSO_ECN);
  5527. return 0;
  5528. }
  5529. #define BNX2_NUM_STATS 46
  5530. static struct {
  5531. char string[ETH_GSTRING_LEN];
  5532. } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
  5533. { "rx_bytes" },
  5534. { "rx_error_bytes" },
  5535. { "tx_bytes" },
  5536. { "tx_error_bytes" },
  5537. { "rx_ucast_packets" },
  5538. { "rx_mcast_packets" },
  5539. { "rx_bcast_packets" },
  5540. { "tx_ucast_packets" },
  5541. { "tx_mcast_packets" },
  5542. { "tx_bcast_packets" },
  5543. { "tx_mac_errors" },
  5544. { "tx_carrier_errors" },
  5545. { "rx_crc_errors" },
  5546. { "rx_align_errors" },
  5547. { "tx_single_collisions" },
  5548. { "tx_multi_collisions" },
  5549. { "tx_deferred" },
  5550. { "tx_excess_collisions" },
  5551. { "tx_late_collisions" },
  5552. { "tx_total_collisions" },
  5553. { "rx_fragments" },
  5554. { "rx_jabbers" },
  5555. { "rx_undersize_packets" },
  5556. { "rx_oversize_packets" },
  5557. { "rx_64_byte_packets" },
  5558. { "rx_65_to_127_byte_packets" },
  5559. { "rx_128_to_255_byte_packets" },
  5560. { "rx_256_to_511_byte_packets" },
  5561. { "rx_512_to_1023_byte_packets" },
  5562. { "rx_1024_to_1522_byte_packets" },
  5563. { "rx_1523_to_9022_byte_packets" },
  5564. { "tx_64_byte_packets" },
  5565. { "tx_65_to_127_byte_packets" },
  5566. { "tx_128_to_255_byte_packets" },
  5567. { "tx_256_to_511_byte_packets" },
  5568. { "tx_512_to_1023_byte_packets" },
  5569. { "tx_1024_to_1522_byte_packets" },
  5570. { "tx_1523_to_9022_byte_packets" },
  5571. { "rx_xon_frames" },
  5572. { "rx_xoff_frames" },
  5573. { "tx_xon_frames" },
  5574. { "tx_xoff_frames" },
  5575. { "rx_mac_ctrl_frames" },
  5576. { "rx_filtered_packets" },
  5577. { "rx_discards" },
  5578. { "rx_fw_discards" },
  5579. };
  5580. #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
  5581. static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
  5582. STATS_OFFSET32(stat_IfHCInOctets_hi),
  5583. STATS_OFFSET32(stat_IfHCInBadOctets_hi),
  5584. STATS_OFFSET32(stat_IfHCOutOctets_hi),
  5585. STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
  5586. STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
  5587. STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
  5588. STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
  5589. STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
  5590. STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
  5591. STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
  5592. STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
  5593. STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
  5594. STATS_OFFSET32(stat_Dot3StatsFCSErrors),
  5595. STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
  5596. STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
  5597. STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
  5598. STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
  5599. STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
  5600. STATS_OFFSET32(stat_Dot3StatsLateCollisions),
  5601. STATS_OFFSET32(stat_EtherStatsCollisions),
  5602. STATS_OFFSET32(stat_EtherStatsFragments),
  5603. STATS_OFFSET32(stat_EtherStatsJabbers),
  5604. STATS_OFFSET32(stat_EtherStatsUndersizePkts),
  5605. STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
  5606. STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
  5607. STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
  5608. STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
  5609. STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
  5610. STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
  5611. STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
  5612. STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
  5613. STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
  5614. STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
  5615. STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
  5616. STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
  5617. STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
  5618. STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
  5619. STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
  5620. STATS_OFFSET32(stat_XonPauseFramesReceived),
  5621. STATS_OFFSET32(stat_XoffPauseFramesReceived),
  5622. STATS_OFFSET32(stat_OutXonSent),
  5623. STATS_OFFSET32(stat_OutXoffSent),
  5624. STATS_OFFSET32(stat_MacControlFramesReceived),
  5625. STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
  5626. STATS_OFFSET32(stat_IfInMBUFDiscards),
  5627. STATS_OFFSET32(stat_FwRxDrop),
  5628. };
  5629. /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
  5630. * skipped because of errata.
  5631. */
  5632. static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
  5633. 8,0,8,8,8,8,8,8,8,8,
  5634. 4,0,4,4,4,4,4,4,4,4,
  5635. 4,4,4,4,4,4,4,4,4,4,
  5636. 4,4,4,4,4,4,4,4,4,4,
  5637. 4,4,4,4,4,4,
  5638. };
  5639. static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
  5640. 8,0,8,8,8,8,8,8,8,8,
  5641. 4,4,4,4,4,4,4,4,4,4,
  5642. 4,4,4,4,4,4,4,4,4,4,
  5643. 4,4,4,4,4,4,4,4,4,4,
  5644. 4,4,4,4,4,4,
  5645. };
  5646. #define BNX2_NUM_TESTS 6
  5647. static struct {
  5648. char string[ETH_GSTRING_LEN];
  5649. } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
  5650. { "register_test (offline)" },
  5651. { "memory_test (offline)" },
  5652. { "loopback_test (offline)" },
  5653. { "nvram_test (online)" },
  5654. { "interrupt_test (online)" },
  5655. { "link_test (online)" },
  5656. };
  5657. static int
  5658. bnx2_get_sset_count(struct net_device *dev, int sset)
  5659. {
  5660. switch (sset) {
  5661. case ETH_SS_TEST:
  5662. return BNX2_NUM_TESTS;
  5663. case ETH_SS_STATS:
  5664. return BNX2_NUM_STATS;
  5665. default:
  5666. return -EOPNOTSUPP;
  5667. }
  5668. }
  5669. static void
  5670. bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
  5671. {
  5672. struct bnx2 *bp = netdev_priv(dev);
  5673. bnx2_set_power_state(bp, PCI_D0);
  5674. memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
  5675. if (etest->flags & ETH_TEST_FL_OFFLINE) {
  5676. int i;
  5677. bnx2_netif_stop(bp);
  5678. bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
  5679. bnx2_free_skbs(bp);
  5680. if (bnx2_test_registers(bp) != 0) {
  5681. buf[0] = 1;
  5682. etest->flags |= ETH_TEST_FL_FAILED;
  5683. }
  5684. if (bnx2_test_memory(bp) != 0) {
  5685. buf[1] = 1;
  5686. etest->flags |= ETH_TEST_FL_FAILED;
  5687. }
  5688. if ((buf[2] = bnx2_test_loopback(bp)) != 0)
  5689. etest->flags |= ETH_TEST_FL_FAILED;
  5690. if (!netif_running(bp->dev))
  5691. bnx2_shutdown_chip(bp);
  5692. else {
  5693. bnx2_init_nic(bp, 1);
  5694. bnx2_netif_start(bp);
  5695. }
  5696. /* wait for link up */
  5697. for (i = 0; i < 7; i++) {
  5698. if (bp->link_up)
  5699. break;
  5700. msleep_interruptible(1000);
  5701. }
  5702. }
  5703. if (bnx2_test_nvram(bp) != 0) {
  5704. buf[3] = 1;
  5705. etest->flags |= ETH_TEST_FL_FAILED;
  5706. }
  5707. if (bnx2_test_intr(bp) != 0) {
  5708. buf[4] = 1;
  5709. etest->flags |= ETH_TEST_FL_FAILED;
  5710. }
  5711. if (bnx2_test_link(bp) != 0) {
  5712. buf[5] = 1;
  5713. etest->flags |= ETH_TEST_FL_FAILED;
  5714. }
  5715. if (!netif_running(bp->dev))
  5716. bnx2_set_power_state(bp, PCI_D3hot);
  5717. }
  5718. static void
  5719. bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
  5720. {
  5721. switch (stringset) {
  5722. case ETH_SS_STATS:
  5723. memcpy(buf, bnx2_stats_str_arr,
  5724. sizeof(bnx2_stats_str_arr));
  5725. break;
  5726. case ETH_SS_TEST:
  5727. memcpy(buf, bnx2_tests_str_arr,
  5728. sizeof(bnx2_tests_str_arr));
  5729. break;
  5730. }
  5731. }
  5732. static void
  5733. bnx2_get_ethtool_stats(struct net_device *dev,
  5734. struct ethtool_stats *stats, u64 *buf)
  5735. {
  5736. struct bnx2 *bp = netdev_priv(dev);
  5737. int i;
  5738. u32 *hw_stats = (u32 *) bp->stats_blk;
  5739. u8 *stats_len_arr = NULL;
  5740. if (hw_stats == NULL) {
  5741. memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
  5742. return;
  5743. }
  5744. if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
  5745. (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
  5746. (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
  5747. (CHIP_ID(bp) == CHIP_ID_5708_A0))
  5748. stats_len_arr = bnx2_5706_stats_len_arr;
  5749. else
  5750. stats_len_arr = bnx2_5708_stats_len_arr;
  5751. for (i = 0; i < BNX2_NUM_STATS; i++) {
  5752. if (stats_len_arr[i] == 0) {
  5753. /* skip this counter */
  5754. buf[i] = 0;
  5755. continue;
  5756. }
  5757. if (stats_len_arr[i] == 4) {
  5758. /* 4-byte counter */
  5759. buf[i] = (u64)
  5760. *(hw_stats + bnx2_stats_offset_arr[i]);
  5761. continue;
  5762. }
  5763. /* 8-byte counter */
  5764. buf[i] = (((u64) *(hw_stats +
  5765. bnx2_stats_offset_arr[i])) << 32) +
  5766. *(hw_stats + bnx2_stats_offset_arr[i] + 1);
  5767. }
  5768. }
  5769. static int
  5770. bnx2_phys_id(struct net_device *dev, u32 data)
  5771. {
  5772. struct bnx2 *bp = netdev_priv(dev);
  5773. int i;
  5774. u32 save;
  5775. bnx2_set_power_state(bp, PCI_D0);
  5776. if (data == 0)
  5777. data = 2;
  5778. save = REG_RD(bp, BNX2_MISC_CFG);
  5779. REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
  5780. for (i = 0; i < (data * 2); i++) {
  5781. if ((i % 2) == 0) {
  5782. REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
  5783. }
  5784. else {
  5785. REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
  5786. BNX2_EMAC_LED_1000MB_OVERRIDE |
  5787. BNX2_EMAC_LED_100MB_OVERRIDE |
  5788. BNX2_EMAC_LED_10MB_OVERRIDE |
  5789. BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
  5790. BNX2_EMAC_LED_TRAFFIC);
  5791. }
  5792. msleep_interruptible(500);
  5793. if (signal_pending(current))
  5794. break;
  5795. }
  5796. REG_WR(bp, BNX2_EMAC_LED, 0);
  5797. REG_WR(bp, BNX2_MISC_CFG, save);
  5798. if (!netif_running(dev))
  5799. bnx2_set_power_state(bp, PCI_D3hot);
  5800. return 0;
  5801. }
  5802. static int
  5803. bnx2_set_tx_csum(struct net_device *dev, u32 data)
  5804. {
  5805. struct bnx2 *bp = netdev_priv(dev);
  5806. if (CHIP_NUM(bp) == CHIP_NUM_5709)
  5807. return (ethtool_op_set_tx_ipv6_csum(dev, data));
  5808. else
  5809. return (ethtool_op_set_tx_csum(dev, data));
  5810. }
  5811. static const struct ethtool_ops bnx2_ethtool_ops = {
  5812. .get_settings = bnx2_get_settings,
  5813. .set_settings = bnx2_set_settings,
  5814. .get_drvinfo = bnx2_get_drvinfo,
  5815. .get_regs_len = bnx2_get_regs_len,
  5816. .get_regs = bnx2_get_regs,
  5817. .get_wol = bnx2_get_wol,
  5818. .set_wol = bnx2_set_wol,
  5819. .nway_reset = bnx2_nway_reset,
  5820. .get_link = ethtool_op_get_link,
  5821. .get_eeprom_len = bnx2_get_eeprom_len,
  5822. .get_eeprom = bnx2_get_eeprom,
  5823. .set_eeprom = bnx2_set_eeprom,
  5824. .get_coalesce = bnx2_get_coalesce,
  5825. .set_coalesce = bnx2_set_coalesce,
  5826. .get_ringparam = bnx2_get_ringparam,
  5827. .set_ringparam = bnx2_set_ringparam,
  5828. .get_pauseparam = bnx2_get_pauseparam,
  5829. .set_pauseparam = bnx2_set_pauseparam,
  5830. .get_rx_csum = bnx2_get_rx_csum,
  5831. .set_rx_csum = bnx2_set_rx_csum,
  5832. .set_tx_csum = bnx2_set_tx_csum,
  5833. .set_sg = ethtool_op_set_sg,
  5834. .set_tso = bnx2_set_tso,
  5835. .self_test = bnx2_self_test,
  5836. .get_strings = bnx2_get_strings,
  5837. .phys_id = bnx2_phys_id,
  5838. .get_ethtool_stats = bnx2_get_ethtool_stats,
  5839. .get_sset_count = bnx2_get_sset_count,
  5840. };
  5841. /* Called with rtnl_lock */
  5842. static int
  5843. bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  5844. {
  5845. struct mii_ioctl_data *data = if_mii(ifr);
  5846. struct bnx2 *bp = netdev_priv(dev);
  5847. int err;
  5848. switch(cmd) {
  5849. case SIOCGMIIPHY:
  5850. data->phy_id = bp->phy_addr;
  5851. /* fallthru */
  5852. case SIOCGMIIREG: {
  5853. u32 mii_regval;
  5854. if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
  5855. return -EOPNOTSUPP;
  5856. if (!netif_running(dev))
  5857. return -EAGAIN;
  5858. spin_lock_bh(&bp->phy_lock);
  5859. err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
  5860. spin_unlock_bh(&bp->phy_lock);
  5861. data->val_out = mii_regval;
  5862. return err;
  5863. }
  5864. case SIOCSMIIREG:
  5865. if (!capable(CAP_NET_ADMIN))
  5866. return -EPERM;
  5867. if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
  5868. return -EOPNOTSUPP;
  5869. if (!netif_running(dev))
  5870. return -EAGAIN;
  5871. spin_lock_bh(&bp->phy_lock);
  5872. err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
  5873. spin_unlock_bh(&bp->phy_lock);
  5874. return err;
  5875. default:
  5876. /* do nothing */
  5877. break;
  5878. }
  5879. return -EOPNOTSUPP;
  5880. }
  5881. /* Called with rtnl_lock */
  5882. static int
  5883. bnx2_change_mac_addr(struct net_device *dev, void *p)
  5884. {
  5885. struct sockaddr *addr = p;
  5886. struct bnx2 *bp = netdev_priv(dev);
  5887. if (!is_valid_ether_addr(addr->sa_data))
  5888. return -EINVAL;
  5889. memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
  5890. if (netif_running(dev))
  5891. bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
  5892. return 0;
  5893. }
  5894. /* Called with rtnl_lock */
  5895. static int
  5896. bnx2_change_mtu(struct net_device *dev, int new_mtu)
  5897. {
  5898. struct bnx2 *bp = netdev_priv(dev);
  5899. if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
  5900. ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
  5901. return -EINVAL;
  5902. dev->mtu = new_mtu;
  5903. return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
  5904. }
  5905. #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
  5906. static void
  5907. poll_bnx2(struct net_device *dev)
  5908. {
  5909. struct bnx2 *bp = netdev_priv(dev);
  5910. int i;
  5911. for (i = 0; i < bp->irq_nvecs; i++) {
  5912. disable_irq(bp->irq_tbl[i].vector);
  5913. bnx2_interrupt(bp->irq_tbl[i].vector, &bp->bnx2_napi[i]);
  5914. enable_irq(bp->irq_tbl[i].vector);
  5915. }
  5916. }
  5917. #endif
  5918. static void __devinit
  5919. bnx2_get_5709_media(struct bnx2 *bp)
  5920. {
  5921. u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
  5922. u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
  5923. u32 strap;
  5924. if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
  5925. return;
  5926. else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
  5927. bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
  5928. return;
  5929. }
  5930. if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
  5931. strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
  5932. else
  5933. strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
  5934. if (PCI_FUNC(bp->pdev->devfn) == 0) {
  5935. switch (strap) {
  5936. case 0x4:
  5937. case 0x5:
  5938. case 0x6:
  5939. bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
  5940. return;
  5941. }
  5942. } else {
  5943. switch (strap) {
  5944. case 0x1:
  5945. case 0x2:
  5946. case 0x4:
  5947. bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
  5948. return;
  5949. }
  5950. }
  5951. }
  5952. static void __devinit
  5953. bnx2_get_pci_speed(struct bnx2 *bp)
  5954. {
  5955. u32 reg;
  5956. reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
  5957. if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
  5958. u32 clkreg;
  5959. bp->flags |= BNX2_FLAG_PCIX;
  5960. clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
  5961. clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
  5962. switch (clkreg) {
  5963. case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
  5964. bp->bus_speed_mhz = 133;
  5965. break;
  5966. case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
  5967. bp->bus_speed_mhz = 100;
  5968. break;
  5969. case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
  5970. case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
  5971. bp->bus_speed_mhz = 66;
  5972. break;
  5973. case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
  5974. case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
  5975. bp->bus_speed_mhz = 50;
  5976. break;
  5977. case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
  5978. case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
  5979. case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
  5980. bp->bus_speed_mhz = 33;
  5981. break;
  5982. }
  5983. }
  5984. else {
  5985. if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
  5986. bp->bus_speed_mhz = 66;
  5987. else
  5988. bp->bus_speed_mhz = 33;
  5989. }
  5990. if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
  5991. bp->flags |= BNX2_FLAG_PCI_32BIT;
  5992. }
  5993. static int __devinit
  5994. bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
  5995. {
  5996. struct bnx2 *bp;
  5997. unsigned long mem_len;
  5998. int rc, i, j;
  5999. u32 reg;
  6000. u64 dma_mask, persist_dma_mask;
  6001. SET_NETDEV_DEV(dev, &pdev->dev);
  6002. bp = netdev_priv(dev);
  6003. bp->flags = 0;
  6004. bp->phy_flags = 0;
  6005. /* enable device (incl. PCI PM wakeup), and bus-mastering */
  6006. rc = pci_enable_device(pdev);
  6007. if (rc) {
  6008. dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
  6009. goto err_out;
  6010. }
  6011. if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
  6012. dev_err(&pdev->dev,
  6013. "Cannot find PCI device base address, aborting.\n");
  6014. rc = -ENODEV;
  6015. goto err_out_disable;
  6016. }
  6017. rc = pci_request_regions(pdev, DRV_MODULE_NAME);
  6018. if (rc) {
  6019. dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
  6020. goto err_out_disable;
  6021. }
  6022. pci_set_master(pdev);
  6023. pci_save_state(pdev);
  6024. bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
  6025. if (bp->pm_cap == 0) {
  6026. dev_err(&pdev->dev,
  6027. "Cannot find power management capability, aborting.\n");
  6028. rc = -EIO;
  6029. goto err_out_release;
  6030. }
  6031. bp->dev = dev;
  6032. bp->pdev = pdev;
  6033. spin_lock_init(&bp->phy_lock);
  6034. spin_lock_init(&bp->indirect_lock);
  6035. INIT_WORK(&bp->reset_task, bnx2_reset_task);
  6036. dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
  6037. mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS);
  6038. dev->mem_end = dev->mem_start + mem_len;
  6039. dev->irq = pdev->irq;
  6040. bp->regview = ioremap_nocache(dev->base_addr, mem_len);
  6041. if (!bp->regview) {
  6042. dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
  6043. rc = -ENOMEM;
  6044. goto err_out_release;
  6045. }
  6046. /* Configure byte swap and enable write to the reg_window registers.
  6047. * Rely on CPU to do target byte swapping on big endian systems
  6048. * The chip's target access swapping will not swap all accesses
  6049. */
  6050. pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
  6051. BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
  6052. BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
  6053. bnx2_set_power_state(bp, PCI_D0);
  6054. bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
  6055. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  6056. if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
  6057. dev_err(&pdev->dev,
  6058. "Cannot find PCIE capability, aborting.\n");
  6059. rc = -EIO;
  6060. goto err_out_unmap;
  6061. }
  6062. bp->flags |= BNX2_FLAG_PCIE;
  6063. if (CHIP_REV(bp) == CHIP_REV_Ax)
  6064. bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
  6065. } else {
  6066. bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
  6067. if (bp->pcix_cap == 0) {
  6068. dev_err(&pdev->dev,
  6069. "Cannot find PCIX capability, aborting.\n");
  6070. rc = -EIO;
  6071. goto err_out_unmap;
  6072. }
  6073. }
  6074. if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
  6075. if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
  6076. bp->flags |= BNX2_FLAG_MSIX_CAP;
  6077. }
  6078. if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
  6079. if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
  6080. bp->flags |= BNX2_FLAG_MSI_CAP;
  6081. }
  6082. /* 5708 cannot support DMA addresses > 40-bit. */
  6083. if (CHIP_NUM(bp) == CHIP_NUM_5708)
  6084. persist_dma_mask = dma_mask = DMA_40BIT_MASK;
  6085. else
  6086. persist_dma_mask = dma_mask = DMA_64BIT_MASK;
  6087. /* Configure DMA attributes. */
  6088. if (pci_set_dma_mask(pdev, dma_mask) == 0) {
  6089. dev->features |= NETIF_F_HIGHDMA;
  6090. rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
  6091. if (rc) {
  6092. dev_err(&pdev->dev,
  6093. "pci_set_consistent_dma_mask failed, aborting.\n");
  6094. goto err_out_unmap;
  6095. }
  6096. } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
  6097. dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
  6098. goto err_out_unmap;
  6099. }
  6100. if (!(bp->flags & BNX2_FLAG_PCIE))
  6101. bnx2_get_pci_speed(bp);
  6102. /* 5706A0 may falsely detect SERR and PERR. */
  6103. if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
  6104. reg = REG_RD(bp, PCI_COMMAND);
  6105. reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
  6106. REG_WR(bp, PCI_COMMAND, reg);
  6107. }
  6108. else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
  6109. !(bp->flags & BNX2_FLAG_PCIX)) {
  6110. dev_err(&pdev->dev,
  6111. "5706 A1 can only be used in a PCIX bus, aborting.\n");
  6112. goto err_out_unmap;
  6113. }
  6114. bnx2_init_nvram(bp);
  6115. reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
  6116. if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
  6117. BNX2_SHM_HDR_SIGNATURE_SIG) {
  6118. u32 off = PCI_FUNC(pdev->devfn) << 2;
  6119. bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
  6120. } else
  6121. bp->shmem_base = HOST_VIEW_SHMEM_BASE;
  6122. /* Get the permanent MAC address. First we need to make sure the
  6123. * firmware is actually running.
  6124. */
  6125. reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
  6126. if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
  6127. BNX2_DEV_INFO_SIGNATURE_MAGIC) {
  6128. dev_err(&pdev->dev, "Firmware not running, aborting.\n");
  6129. rc = -ENODEV;
  6130. goto err_out_unmap;
  6131. }
  6132. reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
  6133. for (i = 0, j = 0; i < 3; i++) {
  6134. u8 num, k, skip0;
  6135. num = (u8) (reg >> (24 - (i * 8)));
  6136. for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
  6137. if (num >= k || !skip0 || k == 1) {
  6138. bp->fw_version[j++] = (num / k) + '0';
  6139. skip0 = 0;
  6140. }
  6141. }
  6142. if (i != 2)
  6143. bp->fw_version[j++] = '.';
  6144. }
  6145. reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
  6146. if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
  6147. bp->wol = 1;
  6148. if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
  6149. bp->flags |= BNX2_FLAG_ASF_ENABLE;
  6150. for (i = 0; i < 30; i++) {
  6151. reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
  6152. if (reg & BNX2_CONDITION_MFW_RUN_MASK)
  6153. break;
  6154. msleep(10);
  6155. }
  6156. }
  6157. reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
  6158. reg &= BNX2_CONDITION_MFW_RUN_MASK;
  6159. if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
  6160. reg != BNX2_CONDITION_MFW_RUN_NONE) {
  6161. u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
  6162. bp->fw_version[j++] = ' ';
  6163. for (i = 0; i < 3; i++) {
  6164. reg = bnx2_reg_rd_ind(bp, addr + i * 4);
  6165. reg = swab32(reg);
  6166. memcpy(&bp->fw_version[j], &reg, 4);
  6167. j += 4;
  6168. }
  6169. }
  6170. reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
  6171. bp->mac_addr[0] = (u8) (reg >> 8);
  6172. bp->mac_addr[1] = (u8) reg;
  6173. reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
  6174. bp->mac_addr[2] = (u8) (reg >> 24);
  6175. bp->mac_addr[3] = (u8) (reg >> 16);
  6176. bp->mac_addr[4] = (u8) (reg >> 8);
  6177. bp->mac_addr[5] = (u8) reg;
  6178. bp->tx_ring_size = MAX_TX_DESC_CNT;
  6179. bnx2_set_rx_ring_size(bp, 255);
  6180. bp->rx_csum = 1;
  6181. bp->tx_quick_cons_trip_int = 20;
  6182. bp->tx_quick_cons_trip = 20;
  6183. bp->tx_ticks_int = 80;
  6184. bp->tx_ticks = 80;
  6185. bp->rx_quick_cons_trip_int = 6;
  6186. bp->rx_quick_cons_trip = 6;
  6187. bp->rx_ticks_int = 18;
  6188. bp->rx_ticks = 18;
  6189. bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
  6190. bp->current_interval = BNX2_TIMER_INTERVAL;
  6191. bp->phy_addr = 1;
  6192. /* Disable WOL support if we are running on a SERDES chip. */
  6193. if (CHIP_NUM(bp) == CHIP_NUM_5709)
  6194. bnx2_get_5709_media(bp);
  6195. else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
  6196. bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
  6197. bp->phy_port = PORT_TP;
  6198. if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
  6199. bp->phy_port = PORT_FIBRE;
  6200. reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
  6201. if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
  6202. bp->flags |= BNX2_FLAG_NO_WOL;
  6203. bp->wol = 0;
  6204. }
  6205. if (CHIP_NUM(bp) == CHIP_NUM_5706) {
  6206. /* Don't do parallel detect on this board because of
  6207. * some board problems. The link will not go down
  6208. * if we do parallel detect.
  6209. */
  6210. if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
  6211. pdev->subsystem_device == 0x310c)
  6212. bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
  6213. } else {
  6214. bp->phy_addr = 2;
  6215. if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
  6216. bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
  6217. }
  6218. } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
  6219. CHIP_NUM(bp) == CHIP_NUM_5708)
  6220. bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
  6221. else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
  6222. (CHIP_REV(bp) == CHIP_REV_Ax ||
  6223. CHIP_REV(bp) == CHIP_REV_Bx))
  6224. bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
  6225. bnx2_init_fw_cap(bp);
  6226. if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
  6227. (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
  6228. (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
  6229. !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
  6230. bp->flags |= BNX2_FLAG_NO_WOL;
  6231. bp->wol = 0;
  6232. }
  6233. if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
  6234. bp->tx_quick_cons_trip_int =
  6235. bp->tx_quick_cons_trip;
  6236. bp->tx_ticks_int = bp->tx_ticks;
  6237. bp->rx_quick_cons_trip_int =
  6238. bp->rx_quick_cons_trip;
  6239. bp->rx_ticks_int = bp->rx_ticks;
  6240. bp->comp_prod_trip_int = bp->comp_prod_trip;
  6241. bp->com_ticks_int = bp->com_ticks;
  6242. bp->cmd_ticks_int = bp->cmd_ticks;
  6243. }
  6244. /* Disable MSI on 5706 if AMD 8132 bridge is found.
  6245. *
  6246. * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
  6247. * with byte enables disabled on the unused 32-bit word. This is legal
  6248. * but causes problems on the AMD 8132 which will eventually stop
  6249. * responding after a while.
  6250. *
  6251. * AMD believes this incompatibility is unique to the 5706, and
  6252. * prefers to locally disable MSI rather than globally disabling it.
  6253. */
  6254. if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
  6255. struct pci_dev *amd_8132 = NULL;
  6256. while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
  6257. PCI_DEVICE_ID_AMD_8132_BRIDGE,
  6258. amd_8132))) {
  6259. if (amd_8132->revision >= 0x10 &&
  6260. amd_8132->revision <= 0x13) {
  6261. disable_msi = 1;
  6262. pci_dev_put(amd_8132);
  6263. break;
  6264. }
  6265. }
  6266. }
  6267. bnx2_set_default_link(bp);
  6268. bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
  6269. init_timer(&bp->timer);
  6270. bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
  6271. bp->timer.data = (unsigned long) bp;
  6272. bp->timer.function = bnx2_timer;
  6273. return 0;
  6274. err_out_unmap:
  6275. if (bp->regview) {
  6276. iounmap(bp->regview);
  6277. bp->regview = NULL;
  6278. }
  6279. err_out_release:
  6280. pci_release_regions(pdev);
  6281. err_out_disable:
  6282. pci_disable_device(pdev);
  6283. pci_set_drvdata(pdev, NULL);
  6284. err_out:
  6285. return rc;
  6286. }
  6287. static char * __devinit
  6288. bnx2_bus_string(struct bnx2 *bp, char *str)
  6289. {
  6290. char *s = str;
  6291. if (bp->flags & BNX2_FLAG_PCIE) {
  6292. s += sprintf(s, "PCI Express");
  6293. } else {
  6294. s += sprintf(s, "PCI");
  6295. if (bp->flags & BNX2_FLAG_PCIX)
  6296. s += sprintf(s, "-X");
  6297. if (bp->flags & BNX2_FLAG_PCI_32BIT)
  6298. s += sprintf(s, " 32-bit");
  6299. else
  6300. s += sprintf(s, " 64-bit");
  6301. s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
  6302. }
  6303. return str;
  6304. }
  6305. static void __devinit
  6306. bnx2_init_napi(struct bnx2 *bp)
  6307. {
  6308. int i;
  6309. for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
  6310. struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
  6311. int (*poll)(struct napi_struct *, int);
  6312. if (i == 0)
  6313. poll = bnx2_poll;
  6314. else
  6315. poll = bnx2_poll_msix;
  6316. netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
  6317. bnapi->bp = bp;
  6318. }
  6319. }
  6320. static const struct net_device_ops bnx2_netdev_ops = {
  6321. .ndo_open = bnx2_open,
  6322. .ndo_start_xmit = bnx2_start_xmit,
  6323. .ndo_stop = bnx2_close,
  6324. .ndo_get_stats = bnx2_get_stats,
  6325. .ndo_set_rx_mode = bnx2_set_rx_mode,
  6326. .ndo_do_ioctl = bnx2_ioctl,
  6327. .ndo_validate_addr = eth_validate_addr,
  6328. .ndo_set_mac_address = bnx2_change_mac_addr,
  6329. .ndo_change_mtu = bnx2_change_mtu,
  6330. .ndo_tx_timeout = bnx2_tx_timeout,
  6331. #ifdef BCM_VLAN
  6332. .ndo_vlan_rx_register = bnx2_vlan_rx_register,
  6333. #endif
  6334. #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
  6335. .ndo_poll_controller = poll_bnx2,
  6336. #endif
  6337. };
  6338. static int __devinit
  6339. bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
  6340. {
  6341. static int version_printed = 0;
  6342. struct net_device *dev = NULL;
  6343. struct bnx2 *bp;
  6344. int rc;
  6345. char str[40];
  6346. if (version_printed++ == 0)
  6347. printk(KERN_INFO "%s", version);
  6348. /* dev zeroed in init_etherdev */
  6349. dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
  6350. if (!dev)
  6351. return -ENOMEM;
  6352. rc = bnx2_init_board(pdev, dev);
  6353. if (rc < 0) {
  6354. free_netdev(dev);
  6355. return rc;
  6356. }
  6357. dev->netdev_ops = &bnx2_netdev_ops;
  6358. dev->watchdog_timeo = TX_TIMEOUT;
  6359. dev->ethtool_ops = &bnx2_ethtool_ops;
  6360. bp = netdev_priv(dev);
  6361. bnx2_init_napi(bp);
  6362. pci_set_drvdata(pdev, dev);
  6363. memcpy(dev->dev_addr, bp->mac_addr, 6);
  6364. memcpy(dev->perm_addr, bp->mac_addr, 6);
  6365. dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
  6366. if (CHIP_NUM(bp) == CHIP_NUM_5709)
  6367. dev->features |= NETIF_F_IPV6_CSUM;
  6368. #ifdef BCM_VLAN
  6369. dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
  6370. #endif
  6371. dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
  6372. if (CHIP_NUM(bp) == CHIP_NUM_5709)
  6373. dev->features |= NETIF_F_TSO6;
  6374. if ((rc = register_netdev(dev))) {
  6375. dev_err(&pdev->dev, "Cannot register net device\n");
  6376. if (bp->regview)
  6377. iounmap(bp->regview);
  6378. pci_release_regions(pdev);
  6379. pci_disable_device(pdev);
  6380. pci_set_drvdata(pdev, NULL);
  6381. free_netdev(dev);
  6382. return rc;
  6383. }
  6384. printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
  6385. "IRQ %d, node addr %pM\n",
  6386. dev->name,
  6387. board_info[ent->driver_data].name,
  6388. ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
  6389. ((CHIP_ID(bp) & 0x0ff0) >> 4),
  6390. bnx2_bus_string(bp, str),
  6391. dev->base_addr,
  6392. bp->pdev->irq, dev->dev_addr);
  6393. return 0;
  6394. }
  6395. static void __devexit
  6396. bnx2_remove_one(struct pci_dev *pdev)
  6397. {
  6398. struct net_device *dev = pci_get_drvdata(pdev);
  6399. struct bnx2 *bp = netdev_priv(dev);
  6400. flush_scheduled_work();
  6401. unregister_netdev(dev);
  6402. if (bp->regview)
  6403. iounmap(bp->regview);
  6404. free_netdev(dev);
  6405. pci_release_regions(pdev);
  6406. pci_disable_device(pdev);
  6407. pci_set_drvdata(pdev, NULL);
  6408. }
  6409. static int
  6410. bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
  6411. {
  6412. struct net_device *dev = pci_get_drvdata(pdev);
  6413. struct bnx2 *bp = netdev_priv(dev);
  6414. /* PCI register 4 needs to be saved whether netif_running() or not.
  6415. * MSI address and data need to be saved if using MSI and
  6416. * netif_running().
  6417. */
  6418. pci_save_state(pdev);
  6419. if (!netif_running(dev))
  6420. return 0;
  6421. flush_scheduled_work();
  6422. bnx2_netif_stop(bp);
  6423. netif_device_detach(dev);
  6424. del_timer_sync(&bp->timer);
  6425. bnx2_shutdown_chip(bp);
  6426. bnx2_free_skbs(bp);
  6427. bnx2_set_power_state(bp, pci_choose_state(pdev, state));
  6428. return 0;
  6429. }
  6430. static int
  6431. bnx2_resume(struct pci_dev *pdev)
  6432. {
  6433. struct net_device *dev = pci_get_drvdata(pdev);
  6434. struct bnx2 *bp = netdev_priv(dev);
  6435. pci_restore_state(pdev);
  6436. if (!netif_running(dev))
  6437. return 0;
  6438. bnx2_set_power_state(bp, PCI_D0);
  6439. netif_device_attach(dev);
  6440. bnx2_init_nic(bp, 1);
  6441. bnx2_netif_start(bp);
  6442. return 0;
  6443. }
  6444. /**
  6445. * bnx2_io_error_detected - called when PCI error is detected
  6446. * @pdev: Pointer to PCI device
  6447. * @state: The current pci connection state
  6448. *
  6449. * This function is called after a PCI bus error affecting
  6450. * this device has been detected.
  6451. */
  6452. static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
  6453. pci_channel_state_t state)
  6454. {
  6455. struct net_device *dev = pci_get_drvdata(pdev);
  6456. struct bnx2 *bp = netdev_priv(dev);
  6457. rtnl_lock();
  6458. netif_device_detach(dev);
  6459. if (netif_running(dev)) {
  6460. bnx2_netif_stop(bp);
  6461. del_timer_sync(&bp->timer);
  6462. bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
  6463. }
  6464. pci_disable_device(pdev);
  6465. rtnl_unlock();
  6466. /* Request a slot slot reset. */
  6467. return PCI_ERS_RESULT_NEED_RESET;
  6468. }
  6469. /**
  6470. * bnx2_io_slot_reset - called after the pci bus has been reset.
  6471. * @pdev: Pointer to PCI device
  6472. *
  6473. * Restart the card from scratch, as if from a cold-boot.
  6474. */
  6475. static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
  6476. {
  6477. struct net_device *dev = pci_get_drvdata(pdev);
  6478. struct bnx2 *bp = netdev_priv(dev);
  6479. rtnl_lock();
  6480. if (pci_enable_device(pdev)) {
  6481. dev_err(&pdev->dev,
  6482. "Cannot re-enable PCI device after reset.\n");
  6483. rtnl_unlock();
  6484. return PCI_ERS_RESULT_DISCONNECT;
  6485. }
  6486. pci_set_master(pdev);
  6487. pci_restore_state(pdev);
  6488. if (netif_running(dev)) {
  6489. bnx2_set_power_state(bp, PCI_D0);
  6490. bnx2_init_nic(bp, 1);
  6491. }
  6492. rtnl_unlock();
  6493. return PCI_ERS_RESULT_RECOVERED;
  6494. }
  6495. /**
  6496. * bnx2_io_resume - called when traffic can start flowing again.
  6497. * @pdev: Pointer to PCI device
  6498. *
  6499. * This callback is called when the error recovery driver tells us that
  6500. * its OK to resume normal operation.
  6501. */
  6502. static void bnx2_io_resume(struct pci_dev *pdev)
  6503. {
  6504. struct net_device *dev = pci_get_drvdata(pdev);
  6505. struct bnx2 *bp = netdev_priv(dev);
  6506. rtnl_lock();
  6507. if (netif_running(dev))
  6508. bnx2_netif_start(bp);
  6509. netif_device_attach(dev);
  6510. rtnl_unlock();
  6511. }
  6512. static struct pci_error_handlers bnx2_err_handler = {
  6513. .error_detected = bnx2_io_error_detected,
  6514. .slot_reset = bnx2_io_slot_reset,
  6515. .resume = bnx2_io_resume,
  6516. };
  6517. static struct pci_driver bnx2_pci_driver = {
  6518. .name = DRV_MODULE_NAME,
  6519. .id_table = bnx2_pci_tbl,
  6520. .probe = bnx2_init_one,
  6521. .remove = __devexit_p(bnx2_remove_one),
  6522. .suspend = bnx2_suspend,
  6523. .resume = bnx2_resume,
  6524. .err_handler = &bnx2_err_handler,
  6525. };
  6526. static int __init bnx2_init(void)
  6527. {
  6528. return pci_register_driver(&bnx2_pci_driver);
  6529. }
  6530. static void __exit bnx2_cleanup(void)
  6531. {
  6532. pci_unregister_driver(&bnx2_pci_driver);
  6533. }
  6534. module_init(bnx2_init);
  6535. module_exit(bnx2_cleanup);