bnx2x_main.c 372 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014801580168017801880198020802180228023802480258026802780288029803080318032803380348035803680378038803980408041804280438044804580468047804880498050805180528053805480558056805780588059806080618062806380648065806680678068806980708071807280738074807580768077807880798080808180828083808480858086808780888089809080918092809380948095809680978098809981008101810281038104810581068107810881098110811181128113811481158116811781188119812081218122812381248125812681278128812981308131813281338134813581368137813881398140814181428143814481458146814781488149815081518152815381548155815681578158815981608161816281638164816581668167816881698170817181728173817481758176817781788179818081818182818381848185818681878188818981908191819281938194819581968197819881998200820182028203820482058206820782088209821082118212821382148215821682178218821982208221822282238224822582268227822882298230823182328233823482358236823782388239824082418242824382448245824682478248824982508251825282538254825582568257825882598260826182628263826482658266826782688269827082718272827382748275827682778278827982808281828282838284828582868287828882898290829182928293829482958296829782988299830083018302830383048305830683078308830983108311831283138314831583168317831883198320832183228323832483258326832783288329833083318332833383348335833683378338833983408341834283438344834583468347834883498350835183528353835483558356835783588359836083618362836383648365836683678368836983708371837283738374837583768377837883798380838183828383838483858386838783888389839083918392839383948395839683978398839984008401840284038404840584068407840884098410841184128413841484158416841784188419842084218422842384248425842684278428842984308431843284338434843584368437843884398440844184428443844484458446844784488449845084518452845384548455845684578458845984608461846284638464846584668467846884698470847184728473847484758476847784788479848084818482848384848485848684878488848984908491849284938494849584968497849884998500850185028503850485058506850785088509851085118512851385148515851685178518851985208521852285238524852585268527852885298530853185328533853485358536853785388539854085418542854385448545854685478548854985508551855285538554855585568557855885598560856185628563856485658566856785688569857085718572857385748575857685778578857985808581858285838584858585868587858885898590859185928593859485958596859785988599860086018602860386048605860686078608860986108611861286138614861586168617861886198620862186228623862486258626862786288629863086318632863386348635863686378638863986408641864286438644864586468647864886498650865186528653865486558656865786588659866086618662866386648665866686678668866986708671867286738674867586768677867886798680868186828683868486858686868786888689869086918692869386948695869686978698869987008701870287038704870587068707870887098710871187128713871487158716871787188719872087218722872387248725872687278728872987308731873287338734873587368737873887398740874187428743874487458746874787488749875087518752875387548755875687578758875987608761876287638764876587668767876887698770877187728773877487758776877787788779878087818782878387848785878687878788878987908791879287938794879587968797879887998800880188028803880488058806880788088809881088118812881388148815881688178818881988208821882288238824882588268827882888298830883188328833883488358836883788388839884088418842884388448845884688478848884988508851885288538854885588568857885888598860886188628863886488658866886788688869887088718872887388748875887688778878887988808881888288838884888588868887888888898890889188928893889488958896889788988899890089018902890389048905890689078908890989108911891289138914891589168917891889198920892189228923892489258926892789288929893089318932893389348935893689378938893989408941894289438944894589468947894889498950895189528953895489558956895789588959896089618962896389648965896689678968896989708971897289738974897589768977897889798980898189828983898489858986898789888989899089918992899389948995899689978998899990009001900290039004900590069007900890099010901190129013901490159016901790189019902090219022902390249025902690279028902990309031903290339034903590369037903890399040904190429043904490459046904790489049905090519052905390549055905690579058905990609061906290639064906590669067906890699070907190729073907490759076907790789079908090819082908390849085908690879088908990909091909290939094909590969097909890999100910191029103910491059106910791089109911091119112911391149115911691179118911991209121912291239124912591269127912891299130913191329133913491359136913791389139914091419142914391449145914691479148914991509151915291539154915591569157915891599160916191629163916491659166916791689169917091719172917391749175917691779178917991809181918291839184918591869187918891899190919191929193919491959196919791989199920092019202920392049205920692079208920992109211921292139214921592169217921892199220922192229223922492259226922792289229923092319232923392349235923692379238923992409241924292439244924592469247924892499250925192529253925492559256925792589259926092619262926392649265926692679268926992709271927292739274927592769277927892799280928192829283928492859286928792889289929092919292929392949295929692979298929993009301930293039304930593069307930893099310931193129313931493159316931793189319932093219322932393249325932693279328932993309331933293339334933593369337933893399340934193429343934493459346934793489349935093519352935393549355935693579358935993609361936293639364936593669367936893699370937193729373937493759376937793789379938093819382938393849385938693879388938993909391939293939394939593969397939893999400940194029403940494059406940794089409941094119412941394149415941694179418941994209421942294239424942594269427942894299430943194329433943494359436943794389439944094419442944394449445944694479448944994509451945294539454945594569457945894599460946194629463946494659466946794689469947094719472947394749475947694779478947994809481948294839484948594869487948894899490949194929493949494959496949794989499950095019502950395049505950695079508950995109511951295139514951595169517951895199520952195229523952495259526952795289529953095319532953395349535953695379538953995409541954295439544954595469547954895499550955195529553955495559556955795589559956095619562956395649565956695679568956995709571957295739574957595769577957895799580958195829583958495859586958795889589959095919592959395949595959695979598959996009601960296039604960596069607960896099610961196129613961496159616961796189619962096219622962396249625962696279628962996309631963296339634963596369637963896399640964196429643964496459646964796489649965096519652965396549655965696579658965996609661966296639664966596669667966896699670967196729673967496759676967796789679968096819682968396849685968696879688968996909691969296939694969596969697969896999700970197029703970497059706970797089709971097119712971397149715971697179718971997209721972297239724972597269727972897299730973197329733973497359736973797389739974097419742974397449745974697479748974997509751975297539754975597569757975897599760976197629763976497659766976797689769977097719772977397749775977697779778977997809781978297839784978597869787978897899790979197929793979497959796979797989799980098019802980398049805980698079808980998109811981298139814981598169817981898199820982198229823982498259826982798289829983098319832983398349835983698379838983998409841984298439844984598469847984898499850985198529853985498559856985798589859986098619862986398649865986698679868986998709871987298739874987598769877987898799880988198829883988498859886988798889889989098919892989398949895989698979898989999009901990299039904990599069907990899099910991199129913991499159916991799189919992099219922992399249925992699279928992999309931993299339934993599369937993899399940994199429943994499459946994799489949995099519952995399549955995699579958995999609961996299639964996599669967996899699970997199729973997499759976997799789979998099819982998399849985998699879988998999909991999299939994999599969997999899991000010001100021000310004100051000610007100081000910010100111001210013100141001510016100171001810019100201002110022100231002410025100261002710028100291003010031100321003310034100351003610037100381003910040100411004210043100441004510046100471004810049100501005110052100531005410055100561005710058100591006010061100621006310064100651006610067100681006910070100711007210073100741007510076100771007810079100801008110082100831008410085100861008710088100891009010091100921009310094100951009610097100981009910100101011010210103101041010510106101071010810109101101011110112101131011410115101161011710118101191012010121101221012310124101251012610127101281012910130101311013210133101341013510136101371013810139101401014110142101431014410145101461014710148101491015010151101521015310154101551015610157101581015910160101611016210163101641016510166101671016810169101701017110172101731017410175101761017710178101791018010181101821018310184101851018610187101881018910190101911019210193101941019510196101971019810199102001020110202102031020410205102061020710208102091021010211102121021310214102151021610217102181021910220102211022210223102241022510226102271022810229102301023110232102331023410235102361023710238102391024010241102421024310244102451024610247102481024910250102511025210253102541025510256102571025810259102601026110262102631026410265102661026710268102691027010271102721027310274102751027610277102781027910280102811028210283102841028510286102871028810289102901029110292102931029410295102961029710298102991030010301103021030310304103051030610307103081030910310103111031210313103141031510316103171031810319103201032110322103231032410325103261032710328103291033010331103321033310334103351033610337103381033910340103411034210343103441034510346103471034810349103501035110352103531035410355103561035710358103591036010361103621036310364103651036610367103681036910370103711037210373103741037510376103771037810379103801038110382103831038410385103861038710388103891039010391103921039310394103951039610397103981039910400104011040210403104041040510406104071040810409104101041110412104131041410415104161041710418104191042010421104221042310424104251042610427104281042910430104311043210433104341043510436104371043810439104401044110442104431044410445104461044710448104491045010451104521045310454104551045610457104581045910460104611046210463104641046510466104671046810469104701047110472104731047410475104761047710478104791048010481104821048310484104851048610487104881048910490104911049210493104941049510496104971049810499105001050110502105031050410505105061050710508105091051010511105121051310514105151051610517105181051910520105211052210523105241052510526105271052810529105301053110532105331053410535105361053710538105391054010541105421054310544105451054610547105481054910550105511055210553105541055510556105571055810559105601056110562105631056410565105661056710568105691057010571105721057310574105751057610577105781057910580105811058210583105841058510586105871058810589105901059110592105931059410595105961059710598105991060010601106021060310604106051060610607106081060910610106111061210613106141061510616106171061810619106201062110622106231062410625106261062710628106291063010631106321063310634106351063610637106381063910640106411064210643106441064510646106471064810649106501065110652106531065410655106561065710658106591066010661106621066310664106651066610667106681066910670106711067210673106741067510676106771067810679106801068110682106831068410685106861068710688106891069010691106921069310694106951069610697106981069910700107011070210703107041070510706107071070810709107101071110712107131071410715107161071710718107191072010721107221072310724107251072610727107281072910730107311073210733107341073510736107371073810739107401074110742107431074410745107461074710748107491075010751107521075310754107551075610757107581075910760107611076210763107641076510766107671076810769107701077110772107731077410775107761077710778107791078010781107821078310784107851078610787107881078910790107911079210793107941079510796107971079810799108001080110802108031080410805108061080710808108091081010811108121081310814108151081610817108181081910820108211082210823108241082510826108271082810829108301083110832108331083410835108361083710838108391084010841108421084310844108451084610847108481084910850108511085210853108541085510856108571085810859108601086110862108631086410865108661086710868108691087010871108721087310874108751087610877108781087910880108811088210883108841088510886108871088810889108901089110892108931089410895108961089710898108991090010901109021090310904109051090610907109081090910910109111091210913109141091510916109171091810919109201092110922109231092410925109261092710928109291093010931109321093310934109351093610937109381093910940109411094210943109441094510946109471094810949109501095110952109531095410955109561095710958109591096010961109621096310964109651096610967109681096910970109711097210973109741097510976109771097810979109801098110982109831098410985109861098710988109891099010991109921099310994109951099610997109981099911000110011100211003110041100511006110071100811009110101101111012110131101411015110161101711018110191102011021110221102311024110251102611027110281102911030110311103211033110341103511036110371103811039110401104111042110431104411045110461104711048110491105011051110521105311054110551105611057110581105911060110611106211063110641106511066110671106811069110701107111072110731107411075110761107711078110791108011081110821108311084110851108611087110881108911090110911109211093110941109511096110971109811099111001110111102111031110411105111061110711108111091111011111111121111311114111151111611117111181111911120111211112211123111241112511126111271112811129111301113111132111331113411135111361113711138111391114011141111421114311144111451114611147111481114911150111511115211153111541115511156111571115811159111601116111162111631116411165111661116711168111691117011171111721117311174111751117611177111781117911180111811118211183111841118511186111871118811189111901119111192111931119411195111961119711198111991120011201112021120311204112051120611207112081120911210112111121211213112141121511216112171121811219112201122111222112231122411225112261122711228112291123011231112321123311234112351123611237112381123911240112411124211243112441124511246112471124811249112501125111252112531125411255112561125711258112591126011261112621126311264112651126611267112681126911270112711127211273112741127511276112771127811279112801128111282112831128411285112861128711288112891129011291112921129311294112951129611297112981129911300113011130211303113041130511306113071130811309113101131111312113131131411315113161131711318113191132011321113221132311324113251132611327113281132911330113311133211333113341133511336113371133811339113401134111342113431134411345113461134711348113491135011351113521135311354113551135611357113581135911360113611136211363113641136511366113671136811369113701137111372113731137411375113761137711378113791138011381113821138311384113851138611387113881138911390113911139211393113941139511396113971139811399114001140111402114031140411405114061140711408114091141011411114121141311414114151141611417114181141911420114211142211423114241142511426114271142811429114301143111432114331143411435114361143711438114391144011441114421144311444114451144611447114481144911450114511145211453114541145511456114571145811459114601146111462114631146411465114661146711468114691147011471114721147311474114751147611477114781147911480114811148211483114841148511486114871148811489114901149111492114931149411495114961149711498114991150011501115021150311504115051150611507115081150911510115111151211513115141151511516115171151811519115201152111522115231152411525115261152711528115291153011531115321153311534115351153611537115381153911540115411154211543115441154511546115471154811549115501155111552115531155411555115561155711558115591156011561115621156311564115651156611567115681156911570115711157211573115741157511576115771157811579115801158111582115831158411585115861158711588115891159011591115921159311594115951159611597115981159911600116011160211603116041160511606116071160811609116101161111612116131161411615116161161711618116191162011621116221162311624116251162611627116281162911630116311163211633116341163511636116371163811639116401164111642116431164411645116461164711648116491165011651116521165311654116551165611657116581165911660116611166211663116641166511666116671166811669116701167111672116731167411675116761167711678116791168011681116821168311684116851168611687116881168911690116911169211693116941169511696116971169811699117001170111702117031170411705117061170711708117091171011711117121171311714117151171611717117181171911720117211172211723117241172511726117271172811729117301173111732117331173411735117361173711738117391174011741117421174311744117451174611747117481174911750117511175211753117541175511756117571175811759117601176111762117631176411765117661176711768117691177011771117721177311774117751177611777117781177911780117811178211783117841178511786117871178811789117901179111792117931179411795117961179711798117991180011801118021180311804118051180611807118081180911810118111181211813118141181511816118171181811819118201182111822118231182411825118261182711828118291183011831118321183311834118351183611837118381183911840118411184211843118441184511846118471184811849118501185111852118531185411855118561185711858118591186011861118621186311864118651186611867118681186911870118711187211873118741187511876118771187811879118801188111882118831188411885118861188711888118891189011891118921189311894118951189611897118981189911900119011190211903119041190511906119071190811909119101191111912119131191411915119161191711918119191192011921119221192311924119251192611927119281192911930119311193211933119341193511936119371193811939119401194111942119431194411945119461194711948119491195011951119521195311954119551195611957119581195911960119611196211963119641196511966119671196811969119701197111972119731197411975119761197711978119791198011981119821198311984119851198611987119881198911990119911199211993119941199511996119971199811999120001200112002120031200412005120061200712008120091201012011120121201312014120151201612017120181201912020120211202212023120241202512026120271202812029120301203112032120331203412035120361203712038120391204012041120421204312044120451204612047120481204912050120511205212053120541205512056120571205812059120601206112062120631206412065120661206712068120691207012071120721207312074120751207612077120781207912080120811208212083120841208512086120871208812089120901209112092120931209412095120961209712098120991210012101121021210312104121051210612107121081210912110121111211212113121141211512116121171211812119121201212112122121231212412125121261212712128121291213012131121321213312134121351213612137121381213912140121411214212143121441214512146121471214812149121501215112152121531215412155121561215712158121591216012161121621216312164121651216612167121681216912170121711217212173121741217512176121771217812179121801218112182121831218412185121861218712188121891219012191121921219312194121951219612197121981219912200122011220212203122041220512206122071220812209122101221112212122131221412215122161221712218122191222012221122221222312224122251222612227122281222912230122311223212233122341223512236122371223812239122401224112242122431224412245122461224712248122491225012251122521225312254122551225612257122581225912260122611226212263122641226512266122671226812269122701227112272122731227412275122761227712278122791228012281122821228312284122851228612287122881228912290122911229212293122941229512296122971229812299123001230112302123031230412305123061230712308123091231012311123121231312314123151231612317123181231912320123211232212323123241232512326123271232812329123301233112332123331233412335123361233712338123391234012341123421234312344123451234612347123481234912350123511235212353123541235512356123571235812359123601236112362123631236412365123661236712368123691237012371123721237312374123751237612377123781237912380123811238212383123841238512386123871238812389123901239112392123931239412395123961239712398123991240012401124021240312404124051240612407124081240912410124111241212413124141241512416124171241812419124201242112422124231242412425124261242712428124291243012431124321243312434124351243612437124381243912440124411244212443124441244512446124471244812449124501245112452124531245412455124561245712458124591246012461124621246312464124651246612467124681246912470124711247212473124741247512476124771247812479124801248112482124831248412485124861248712488124891249012491124921249312494124951249612497124981249912500125011250212503125041250512506125071250812509125101251112512125131251412515125161251712518125191252012521125221252312524125251252612527125281252912530125311253212533125341253512536125371253812539125401254112542125431254412545125461254712548125491255012551125521255312554125551255612557125581255912560125611256212563125641256512566125671256812569125701257112572125731257412575125761257712578125791258012581125821258312584125851258612587125881258912590125911259212593125941259512596125971259812599126001260112602126031260412605126061260712608126091261012611126121261312614126151261612617126181261912620126211262212623126241262512626126271262812629126301263112632126331263412635126361263712638126391264012641126421264312644126451264612647126481264912650126511265212653126541265512656126571265812659126601266112662126631266412665126661266712668126691267012671126721267312674126751267612677126781267912680126811268212683126841268512686126871268812689126901269112692126931269412695126961269712698126991270012701127021270312704127051270612707127081270912710127111271212713127141271512716127171271812719127201272112722127231272412725127261272712728127291273012731127321273312734127351273612737127381273912740127411274212743127441274512746127471274812749127501275112752127531275412755127561275712758127591276012761127621276312764127651276612767127681276912770127711277212773127741277512776127771277812779127801278112782127831278412785127861278712788127891279012791127921279312794127951279612797127981279912800128011280212803128041280512806128071280812809128101281112812128131281412815128161281712818128191282012821128221282312824128251282612827128281282912830128311283212833128341283512836128371283812839128401284112842128431284412845128461284712848128491285012851128521285312854128551285612857128581285912860128611286212863128641286512866128671286812869128701287112872128731287412875128761287712878128791288012881128821288312884128851288612887128881288912890128911289212893128941289512896128971289812899129001290112902129031290412905129061290712908129091291012911129121291312914129151291612917129181291912920129211292212923129241292512926129271292812929129301293112932129331293412935129361293712938129391294012941129421294312944129451294612947129481294912950129511295212953129541295512956129571295812959129601296112962129631296412965129661296712968129691297012971129721297312974129751297612977129781297912980129811298212983129841298512986129871298812989129901299112992129931299412995129961299712998129991300013001130021300313004130051300613007130081300913010130111301213013130141301513016130171301813019130201302113022130231302413025130261302713028130291303013031130321303313034130351303613037130381303913040130411304213043130441304513046130471304813049130501305113052130531305413055130561305713058130591306013061130621306313064130651306613067130681306913070130711307213073130741307513076130771307813079130801308113082130831308413085130861308713088130891309013091130921309313094130951309613097130981309913100131011310213103131041310513106131071310813109131101311113112131131311413115131161311713118131191312013121131221312313124131251312613127131281312913130131311313213133131341313513136131371313813139131401314113142131431314413145131461314713148131491315013151131521315313154131551315613157131581315913160131611316213163131641316513166131671316813169131701317113172131731317413175131761317713178131791318013181131821318313184131851318613187131881318913190131911319213193131941319513196131971319813199132001320113202132031320413205132061320713208132091321013211132121321313214132151321613217132181321913220132211322213223132241322513226132271322813229132301323113232132331323413235132361323713238132391324013241132421324313244132451324613247132481324913250132511325213253132541325513256132571325813259132601326113262132631326413265132661326713268132691327013271132721327313274132751327613277132781327913280132811328213283132841328513286132871328813289132901329113292132931329413295132961329713298132991330013301133021330313304133051330613307133081330913310133111331213313133141331513316133171331813319133201332113322133231332413325133261332713328133291333013331133321333313334133351333613337133381333913340133411334213343133441334513346133471334813349133501335113352133531335413355133561335713358133591336013361133621336313364133651336613367133681336913370133711337213373133741337513376133771337813379133801338113382133831338413385133861338713388133891339013391133921339313394133951339613397133981339913400134011340213403134041340513406134071340813409134101341113412134131341413415134161341713418134191342013421134221342313424134251342613427134281342913430134311343213433134341343513436134371343813439134401344113442134431344413445134461344713448134491345013451134521345313454134551345613457134581345913460134611346213463134641346513466134671346813469134701347113472134731347413475134761347713478134791348013481134821348313484134851348613487134881348913490134911349213493134941349513496134971349813499135001350113502135031350413505135061350713508135091351013511135121351313514135151351613517135181351913520135211352213523135241352513526135271352813529135301353113532135331353413535135361353713538135391354013541135421354313544135451354613547135481354913550135511355213553135541355513556135571355813559135601356113562135631356413565135661356713568135691357013571135721357313574135751357613577135781357913580135811358213583135841358513586135871358813589135901359113592135931359413595135961359713598135991360013601136021360313604136051360613607136081360913610136111361213613136141361513616136171361813619136201362113622136231362413625136261362713628136291363013631136321363313634136351363613637136381363913640136411364213643136441364513646136471364813649136501365113652136531365413655136561365713658136591366013661136621366313664136651366613667136681366913670136711367213673136741367513676136771367813679136801368113682136831368413685136861368713688136891369013691136921369313694136951369613697136981369913700137011370213703137041370513706137071370813709137101371113712137131371413715137161371713718137191372013721137221372313724137251372613727137281372913730137311373213733137341373513736137371373813739137401374113742137431374413745137461374713748137491375013751137521375313754137551375613757137581375913760137611376213763137641376513766137671376813769137701377113772137731377413775137761377713778137791378013781137821378313784137851378613787137881378913790137911379213793137941379513796137971379813799138001380113802138031380413805138061380713808138091381013811138121381313814138151381613817138181381913820138211382213823138241382513826138271382813829138301383113832138331383413835138361383713838138391384013841138421384313844138451384613847138481384913850138511385213853138541385513856138571385813859138601386113862138631386413865138661386713868138691387013871138721387313874138751387613877138781387913880138811388213883138841388513886138871388813889138901389113892138931389413895138961389713898138991390013901139021390313904139051390613907139081390913910139111391213913139141391513916139171391813919139201392113922139231392413925139261392713928
  1. /* bnx2x_main.c: Broadcom Everest network driver.
  2. *
  3. * Copyright (c) 2007-2010 Broadcom Corporation
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation.
  8. *
  9. * Maintained by: Eilon Greenstein <eilong@broadcom.com>
  10. * Written by: Eliezer Tamir
  11. * Based on code from Michael Chan's bnx2 driver
  12. * UDP CSUM errata workaround by Arik Gendelman
  13. * Slowpath and fastpath rework by Vladislav Zolotarov
  14. * Statistics and Link management by Yitchak Gertner
  15. *
  16. */
  17. #include <linux/module.h>
  18. #include <linux/moduleparam.h>
  19. #include <linux/kernel.h>
  20. #include <linux/device.h> /* for dev_info() */
  21. #include <linux/timer.h>
  22. #include <linux/errno.h>
  23. #include <linux/ioport.h>
  24. #include <linux/slab.h>
  25. #include <linux/vmalloc.h>
  26. #include <linux/interrupt.h>
  27. #include <linux/pci.h>
  28. #include <linux/init.h>
  29. #include <linux/netdevice.h>
  30. #include <linux/etherdevice.h>
  31. #include <linux/skbuff.h>
  32. #include <linux/dma-mapping.h>
  33. #include <linux/bitops.h>
  34. #include <linux/irq.h>
  35. #include <linux/delay.h>
  36. #include <asm/byteorder.h>
  37. #include <linux/time.h>
  38. #include <linux/ethtool.h>
  39. #include <linux/mii.h>
  40. #include <linux/if_vlan.h>
  41. #include <net/ip.h>
  42. #include <net/tcp.h>
  43. #include <net/checksum.h>
  44. #include <net/ip6_checksum.h>
  45. #include <linux/workqueue.h>
  46. #include <linux/crc32.h>
  47. #include <linux/crc32c.h>
  48. #include <linux/prefetch.h>
  49. #include <linux/zlib.h>
  50. #include <linux/io.h>
  51. #include <linux/stringify.h>
  52. #include "bnx2x.h"
  53. #include "bnx2x_init.h"
  54. #include "bnx2x_init_ops.h"
  55. #include "bnx2x_dump.h"
  56. #define DRV_MODULE_VERSION "1.52.53-2"
  57. #define DRV_MODULE_RELDATE "2010/21/07"
  58. #define BNX2X_BC_VER 0x040200
  59. #include <linux/firmware.h>
  60. #include "bnx2x_fw_file_hdr.h"
  61. /* FW files */
  62. #define FW_FILE_VERSION \
  63. __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
  64. __stringify(BCM_5710_FW_MINOR_VERSION) "." \
  65. __stringify(BCM_5710_FW_REVISION_VERSION) "." \
  66. __stringify(BCM_5710_FW_ENGINEERING_VERSION)
  67. #define FW_FILE_NAME_E1 "bnx2x-e1-" FW_FILE_VERSION ".fw"
  68. #define FW_FILE_NAME_E1H "bnx2x-e1h-" FW_FILE_VERSION ".fw"
  69. /* Time in jiffies before concluding the transmitter is hung */
  70. #define TX_TIMEOUT (5*HZ)
  71. static char version[] __devinitdata =
  72. "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
  73. DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
  74. MODULE_AUTHOR("Eliezer Tamir");
  75. MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
  76. MODULE_LICENSE("GPL");
  77. MODULE_VERSION(DRV_MODULE_VERSION);
  78. MODULE_FIRMWARE(FW_FILE_NAME_E1);
  79. MODULE_FIRMWARE(FW_FILE_NAME_E1H);
  80. static int multi_mode = 1;
  81. module_param(multi_mode, int, 0);
  82. MODULE_PARM_DESC(multi_mode, " Multi queue mode "
  83. "(0 Disable; 1 Enable (default))");
  84. static int num_queues;
  85. module_param(num_queues, int, 0);
  86. MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
  87. " (default is as a number of CPUs)");
  88. static int disable_tpa;
  89. module_param(disable_tpa, int, 0);
  90. MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
  91. static int int_mode;
  92. module_param(int_mode, int, 0);
  93. MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
  94. "(1 INT#x; 2 MSI)");
  95. static int dropless_fc;
  96. module_param(dropless_fc, int, 0);
  97. MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
  98. static int poll;
  99. module_param(poll, int, 0);
  100. MODULE_PARM_DESC(poll, " Use polling (for debug)");
  101. static int mrrs = -1;
  102. module_param(mrrs, int, 0);
  103. MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
  104. static int debug;
  105. module_param(debug, int, 0);
  106. MODULE_PARM_DESC(debug, " Default debug msglevel");
  107. static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
  108. static struct workqueue_struct *bnx2x_wq;
  109. enum bnx2x_board_type {
  110. BCM57710 = 0,
  111. BCM57711 = 1,
  112. BCM57711E = 2,
  113. };
  114. /* indexed by board_type, above */
  115. static struct {
  116. char *name;
  117. } board_info[] __devinitdata = {
  118. { "Broadcom NetXtreme II BCM57710 XGb" },
  119. { "Broadcom NetXtreme II BCM57711 XGb" },
  120. { "Broadcom NetXtreme II BCM57711E XGb" }
  121. };
  122. static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
  123. { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
  124. { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
  125. { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
  126. { 0 }
  127. };
  128. MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
  129. /****************************************************************************
  130. * General service functions
  131. ****************************************************************************/
  132. /* used only at init
  133. * locking is done by mcp
  134. */
  135. void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
  136. {
  137. pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
  138. pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
  139. pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
  140. PCICFG_VENDOR_ID_OFFSET);
  141. }
  142. static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
  143. {
  144. u32 val;
  145. pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
  146. pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
  147. pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
  148. PCICFG_VENDOR_ID_OFFSET);
  149. return val;
  150. }
  151. static const u32 dmae_reg_go_c[] = {
  152. DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
  153. DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
  154. DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
  155. DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
  156. };
  157. /* copy command into DMAE command memory and set DMAE command go */
  158. static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
  159. int idx)
  160. {
  161. u32 cmd_offset;
  162. int i;
  163. cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
  164. for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
  165. REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
  166. DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
  167. idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
  168. }
  169. REG_WR(bp, dmae_reg_go_c[idx], 1);
  170. }
  171. void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
  172. u32 len32)
  173. {
  174. struct dmae_command dmae;
  175. u32 *wb_comp = bnx2x_sp(bp, wb_comp);
  176. int cnt = 200;
  177. if (!bp->dmae_ready) {
  178. u32 *data = bnx2x_sp(bp, wb_data[0]);
  179. DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
  180. " using indirect\n", dst_addr, len32);
  181. bnx2x_init_ind_wr(bp, dst_addr, data, len32);
  182. return;
  183. }
  184. memset(&dmae, 0, sizeof(struct dmae_command));
  185. dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
  186. DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
  187. DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
  188. #ifdef __BIG_ENDIAN
  189. DMAE_CMD_ENDIANITY_B_DW_SWAP |
  190. #else
  191. DMAE_CMD_ENDIANITY_DW_SWAP |
  192. #endif
  193. (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
  194. (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
  195. dmae.src_addr_lo = U64_LO(dma_addr);
  196. dmae.src_addr_hi = U64_HI(dma_addr);
  197. dmae.dst_addr_lo = dst_addr >> 2;
  198. dmae.dst_addr_hi = 0;
  199. dmae.len = len32;
  200. dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
  201. dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
  202. dmae.comp_val = DMAE_COMP_VAL;
  203. DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
  204. DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
  205. "dst_addr [%x:%08x (%08x)]\n"
  206. DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
  207. dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
  208. dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
  209. dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
  210. DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
  211. bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
  212. bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
  213. mutex_lock(&bp->dmae_mutex);
  214. *wb_comp = 0;
  215. bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
  216. udelay(5);
  217. while (*wb_comp != DMAE_COMP_VAL) {
  218. DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
  219. if (!cnt) {
  220. BNX2X_ERR("DMAE timeout!\n");
  221. break;
  222. }
  223. cnt--;
  224. /* adjust delay for emulation/FPGA */
  225. if (CHIP_REV_IS_SLOW(bp))
  226. msleep(100);
  227. else
  228. udelay(5);
  229. }
  230. mutex_unlock(&bp->dmae_mutex);
  231. }
  232. void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
  233. {
  234. struct dmae_command dmae;
  235. u32 *wb_comp = bnx2x_sp(bp, wb_comp);
  236. int cnt = 200;
  237. if (!bp->dmae_ready) {
  238. u32 *data = bnx2x_sp(bp, wb_data[0]);
  239. int i;
  240. DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
  241. " using indirect\n", src_addr, len32);
  242. for (i = 0; i < len32; i++)
  243. data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
  244. return;
  245. }
  246. memset(&dmae, 0, sizeof(struct dmae_command));
  247. dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
  248. DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
  249. DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
  250. #ifdef __BIG_ENDIAN
  251. DMAE_CMD_ENDIANITY_B_DW_SWAP |
  252. #else
  253. DMAE_CMD_ENDIANITY_DW_SWAP |
  254. #endif
  255. (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
  256. (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
  257. dmae.src_addr_lo = src_addr >> 2;
  258. dmae.src_addr_hi = 0;
  259. dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
  260. dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
  261. dmae.len = len32;
  262. dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
  263. dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
  264. dmae.comp_val = DMAE_COMP_VAL;
  265. DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
  266. DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
  267. "dst_addr [%x:%08x (%08x)]\n"
  268. DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
  269. dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
  270. dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
  271. dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
  272. mutex_lock(&bp->dmae_mutex);
  273. memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
  274. *wb_comp = 0;
  275. bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
  276. udelay(5);
  277. while (*wb_comp != DMAE_COMP_VAL) {
  278. if (!cnt) {
  279. BNX2X_ERR("DMAE timeout!\n");
  280. break;
  281. }
  282. cnt--;
  283. /* adjust delay for emulation/FPGA */
  284. if (CHIP_REV_IS_SLOW(bp))
  285. msleep(100);
  286. else
  287. udelay(5);
  288. }
  289. DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
  290. bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
  291. bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
  292. mutex_unlock(&bp->dmae_mutex);
  293. }
  294. void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
  295. u32 addr, u32 len)
  296. {
  297. int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
  298. int offset = 0;
  299. while (len > dmae_wr_max) {
  300. bnx2x_write_dmae(bp, phys_addr + offset,
  301. addr + offset, dmae_wr_max);
  302. offset += dmae_wr_max * 4;
  303. len -= dmae_wr_max;
  304. }
  305. bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
  306. }
  307. /* used only for slowpath so not inlined */
  308. static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
  309. {
  310. u32 wb_write[2];
  311. wb_write[0] = val_hi;
  312. wb_write[1] = val_lo;
  313. REG_WR_DMAE(bp, reg, wb_write, 2);
  314. }
  315. #ifdef USE_WB_RD
  316. static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
  317. {
  318. u32 wb_data[2];
  319. REG_RD_DMAE(bp, reg, wb_data, 2);
  320. return HILO_U64(wb_data[0], wb_data[1]);
  321. }
  322. #endif
  323. static int bnx2x_mc_assert(struct bnx2x *bp)
  324. {
  325. char last_idx;
  326. int i, rc = 0;
  327. u32 row0, row1, row2, row3;
  328. /* XSTORM */
  329. last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
  330. XSTORM_ASSERT_LIST_INDEX_OFFSET);
  331. if (last_idx)
  332. BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
  333. /* print the asserts */
  334. for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
  335. row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
  336. XSTORM_ASSERT_LIST_OFFSET(i));
  337. row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
  338. XSTORM_ASSERT_LIST_OFFSET(i) + 4);
  339. row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
  340. XSTORM_ASSERT_LIST_OFFSET(i) + 8);
  341. row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
  342. XSTORM_ASSERT_LIST_OFFSET(i) + 12);
  343. if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
  344. BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
  345. " 0x%08x 0x%08x 0x%08x\n",
  346. i, row3, row2, row1, row0);
  347. rc++;
  348. } else {
  349. break;
  350. }
  351. }
  352. /* TSTORM */
  353. last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
  354. TSTORM_ASSERT_LIST_INDEX_OFFSET);
  355. if (last_idx)
  356. BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
  357. /* print the asserts */
  358. for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
  359. row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
  360. TSTORM_ASSERT_LIST_OFFSET(i));
  361. row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
  362. TSTORM_ASSERT_LIST_OFFSET(i) + 4);
  363. row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
  364. TSTORM_ASSERT_LIST_OFFSET(i) + 8);
  365. row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
  366. TSTORM_ASSERT_LIST_OFFSET(i) + 12);
  367. if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
  368. BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
  369. " 0x%08x 0x%08x 0x%08x\n",
  370. i, row3, row2, row1, row0);
  371. rc++;
  372. } else {
  373. break;
  374. }
  375. }
  376. /* CSTORM */
  377. last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
  378. CSTORM_ASSERT_LIST_INDEX_OFFSET);
  379. if (last_idx)
  380. BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
  381. /* print the asserts */
  382. for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
  383. row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
  384. CSTORM_ASSERT_LIST_OFFSET(i));
  385. row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
  386. CSTORM_ASSERT_LIST_OFFSET(i) + 4);
  387. row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
  388. CSTORM_ASSERT_LIST_OFFSET(i) + 8);
  389. row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
  390. CSTORM_ASSERT_LIST_OFFSET(i) + 12);
  391. if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
  392. BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
  393. " 0x%08x 0x%08x 0x%08x\n",
  394. i, row3, row2, row1, row0);
  395. rc++;
  396. } else {
  397. break;
  398. }
  399. }
  400. /* USTORM */
  401. last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
  402. USTORM_ASSERT_LIST_INDEX_OFFSET);
  403. if (last_idx)
  404. BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
  405. /* print the asserts */
  406. for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
  407. row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
  408. USTORM_ASSERT_LIST_OFFSET(i));
  409. row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
  410. USTORM_ASSERT_LIST_OFFSET(i) + 4);
  411. row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
  412. USTORM_ASSERT_LIST_OFFSET(i) + 8);
  413. row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
  414. USTORM_ASSERT_LIST_OFFSET(i) + 12);
  415. if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
  416. BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
  417. " 0x%08x 0x%08x 0x%08x\n",
  418. i, row3, row2, row1, row0);
  419. rc++;
  420. } else {
  421. break;
  422. }
  423. }
  424. return rc;
  425. }
  426. static void bnx2x_fw_dump(struct bnx2x *bp)
  427. {
  428. u32 addr;
  429. u32 mark, offset;
  430. __be32 data[9];
  431. int word;
  432. if (BP_NOMCP(bp)) {
  433. BNX2X_ERR("NO MCP - can not dump\n");
  434. return;
  435. }
  436. addr = bp->common.shmem_base - 0x0800 + 4;
  437. mark = REG_RD(bp, addr);
  438. mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000;
  439. pr_err("begin fw dump (mark 0x%x)\n", mark);
  440. pr_err("");
  441. for (offset = mark; offset <= bp->common.shmem_base; offset += 0x8*4) {
  442. for (word = 0; word < 8; word++)
  443. data[word] = htonl(REG_RD(bp, offset + 4*word));
  444. data[8] = 0x0;
  445. pr_cont("%s", (char *)data);
  446. }
  447. for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
  448. for (word = 0; word < 8; word++)
  449. data[word] = htonl(REG_RD(bp, offset + 4*word));
  450. data[8] = 0x0;
  451. pr_cont("%s", (char *)data);
  452. }
  453. pr_err("end of fw dump\n");
  454. }
  455. static void bnx2x_panic_dump(struct bnx2x *bp)
  456. {
  457. int i;
  458. u16 j, start, end;
  459. bp->stats_state = STATS_STATE_DISABLED;
  460. DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
  461. BNX2X_ERR("begin crash dump -----------------\n");
  462. /* Indices */
  463. /* Common */
  464. BNX2X_ERR("def_c_idx(0x%x) def_u_idx(0x%x) def_x_idx(0x%x)"
  465. " def_t_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
  466. " spq_prod_idx(0x%x)\n",
  467. bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
  468. bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
  469. /* Rx */
  470. for_each_queue(bp, i) {
  471. struct bnx2x_fastpath *fp = &bp->fp[i];
  472. BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
  473. " *rx_bd_cons_sb(0x%x) rx_comp_prod(0x%x)"
  474. " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
  475. i, fp->rx_bd_prod, fp->rx_bd_cons,
  476. le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
  477. fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
  478. BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
  479. " fp_u_idx(0x%x) *sb_u_idx(0x%x)\n",
  480. fp->rx_sge_prod, fp->last_max_sge,
  481. le16_to_cpu(fp->fp_u_idx),
  482. fp->status_blk->u_status_block.status_block_index);
  483. }
  484. /* Tx */
  485. for_each_queue(bp, i) {
  486. struct bnx2x_fastpath *fp = &bp->fp[i];
  487. BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
  488. " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
  489. " *tx_cons_sb(0x%x)\n",
  490. i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
  491. fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
  492. BNX2X_ERR(" fp_c_idx(0x%x) *sb_c_idx(0x%x)"
  493. " tx_db_prod(0x%x)\n", le16_to_cpu(fp->fp_c_idx),
  494. fp->status_blk->c_status_block.status_block_index,
  495. fp->tx_db.data.prod);
  496. }
  497. /* Rings */
  498. /* Rx */
  499. for_each_queue(bp, i) {
  500. struct bnx2x_fastpath *fp = &bp->fp[i];
  501. start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
  502. end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
  503. for (j = start; j != end; j = RX_BD(j + 1)) {
  504. u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
  505. struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
  506. BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
  507. i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
  508. }
  509. start = RX_SGE(fp->rx_sge_prod);
  510. end = RX_SGE(fp->last_max_sge);
  511. for (j = start; j != end; j = RX_SGE(j + 1)) {
  512. u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
  513. struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
  514. BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
  515. i, j, rx_sge[1], rx_sge[0], sw_page->page);
  516. }
  517. start = RCQ_BD(fp->rx_comp_cons - 10);
  518. end = RCQ_BD(fp->rx_comp_cons + 503);
  519. for (j = start; j != end; j = RCQ_BD(j + 1)) {
  520. u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
  521. BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
  522. i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
  523. }
  524. }
  525. /* Tx */
  526. for_each_queue(bp, i) {
  527. struct bnx2x_fastpath *fp = &bp->fp[i];
  528. start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
  529. end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
  530. for (j = start; j != end; j = TX_BD(j + 1)) {
  531. struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
  532. BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
  533. i, j, sw_bd->skb, sw_bd->first_bd);
  534. }
  535. start = TX_BD(fp->tx_bd_cons - 10);
  536. end = TX_BD(fp->tx_bd_cons + 254);
  537. for (j = start; j != end; j = TX_BD(j + 1)) {
  538. u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
  539. BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
  540. i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
  541. }
  542. }
  543. bnx2x_fw_dump(bp);
  544. bnx2x_mc_assert(bp);
  545. BNX2X_ERR("end crash dump -----------------\n");
  546. }
  547. static void bnx2x_int_enable(struct bnx2x *bp)
  548. {
  549. int port = BP_PORT(bp);
  550. u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
  551. u32 val = REG_RD(bp, addr);
  552. int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
  553. int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
  554. if (msix) {
  555. val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
  556. HC_CONFIG_0_REG_INT_LINE_EN_0);
  557. val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
  558. HC_CONFIG_0_REG_ATTN_BIT_EN_0);
  559. } else if (msi) {
  560. val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
  561. val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
  562. HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
  563. HC_CONFIG_0_REG_ATTN_BIT_EN_0);
  564. } else {
  565. val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
  566. HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
  567. HC_CONFIG_0_REG_INT_LINE_EN_0 |
  568. HC_CONFIG_0_REG_ATTN_BIT_EN_0);
  569. DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
  570. val, port, addr);
  571. REG_WR(bp, addr, val);
  572. val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
  573. }
  574. DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
  575. val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
  576. REG_WR(bp, addr, val);
  577. /*
  578. * Ensure that HC_CONFIG is written before leading/trailing edge config
  579. */
  580. mmiowb();
  581. barrier();
  582. if (CHIP_IS_E1H(bp)) {
  583. /* init leading/trailing edge */
  584. if (IS_E1HMF(bp)) {
  585. val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
  586. if (bp->port.pmf)
  587. /* enable nig and gpio3 attention */
  588. val |= 0x1100;
  589. } else
  590. val = 0xffff;
  591. REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
  592. REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
  593. }
  594. /* Make sure that interrupts are indeed enabled from here on */
  595. mmiowb();
  596. }
  597. static void bnx2x_int_disable(struct bnx2x *bp)
  598. {
  599. int port = BP_PORT(bp);
  600. u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
  601. u32 val = REG_RD(bp, addr);
  602. val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
  603. HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
  604. HC_CONFIG_0_REG_INT_LINE_EN_0 |
  605. HC_CONFIG_0_REG_ATTN_BIT_EN_0);
  606. DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
  607. val, port, addr);
  608. /* flush all outstanding writes */
  609. mmiowb();
  610. REG_WR(bp, addr, val);
  611. if (REG_RD(bp, addr) != val)
  612. BNX2X_ERR("BUG! proper val not read from IGU!\n");
  613. }
  614. static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
  615. {
  616. int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
  617. int i, offset;
  618. /* disable interrupt handling */
  619. atomic_inc(&bp->intr_sem);
  620. smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
  621. if (disable_hw)
  622. /* prevent the HW from sending interrupts */
  623. bnx2x_int_disable(bp);
  624. /* make sure all ISRs are done */
  625. if (msix) {
  626. synchronize_irq(bp->msix_table[0].vector);
  627. offset = 1;
  628. #ifdef BCM_CNIC
  629. offset++;
  630. #endif
  631. for_each_queue(bp, i)
  632. synchronize_irq(bp->msix_table[i + offset].vector);
  633. } else
  634. synchronize_irq(bp->pdev->irq);
  635. /* make sure sp_task is not running */
  636. cancel_delayed_work(&bp->sp_task);
  637. flush_workqueue(bnx2x_wq);
  638. }
  639. /* fast path */
  640. /*
  641. * General service functions
  642. */
  643. /* Return true if succeeded to acquire the lock */
  644. static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
  645. {
  646. u32 lock_status;
  647. u32 resource_bit = (1 << resource);
  648. int func = BP_FUNC(bp);
  649. u32 hw_lock_control_reg;
  650. DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
  651. /* Validating that the resource is within range */
  652. if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
  653. DP(NETIF_MSG_HW,
  654. "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
  655. resource, HW_LOCK_MAX_RESOURCE_VALUE);
  656. return -EINVAL;
  657. }
  658. if (func <= 5)
  659. hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
  660. else
  661. hw_lock_control_reg =
  662. (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
  663. /* Try to acquire the lock */
  664. REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
  665. lock_status = REG_RD(bp, hw_lock_control_reg);
  666. if (lock_status & resource_bit)
  667. return true;
  668. DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
  669. return false;
  670. }
  671. static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
  672. u8 storm, u16 index, u8 op, u8 update)
  673. {
  674. u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
  675. COMMAND_REG_INT_ACK);
  676. struct igu_ack_register igu_ack;
  677. igu_ack.status_block_index = index;
  678. igu_ack.sb_id_and_flags =
  679. ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
  680. (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
  681. (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
  682. (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
  683. DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
  684. (*(u32 *)&igu_ack), hc_addr);
  685. REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
  686. /* Make sure that ACK is written */
  687. mmiowb();
  688. barrier();
  689. }
  690. static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
  691. {
  692. struct host_status_block *fpsb = fp->status_blk;
  693. barrier(); /* status block is written to by the chip */
  694. fp->fp_c_idx = fpsb->c_status_block.status_block_index;
  695. fp->fp_u_idx = fpsb->u_status_block.status_block_index;
  696. }
  697. static u16 bnx2x_ack_int(struct bnx2x *bp)
  698. {
  699. u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
  700. COMMAND_REG_SIMD_MASK);
  701. u32 result = REG_RD(bp, hc_addr);
  702. DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
  703. result, hc_addr);
  704. return result;
  705. }
  706. /*
  707. * fast path service functions
  708. */
  709. static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
  710. {
  711. /* Tell compiler that consumer and producer can change */
  712. barrier();
  713. return (fp->tx_pkt_prod != fp->tx_pkt_cons);
  714. }
  715. /* free skb in the packet ring at pos idx
  716. * return idx of last bd freed
  717. */
  718. static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
  719. u16 idx)
  720. {
  721. struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
  722. struct eth_tx_start_bd *tx_start_bd;
  723. struct eth_tx_bd *tx_data_bd;
  724. struct sk_buff *skb = tx_buf->skb;
  725. u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
  726. int nbd;
  727. /* prefetch skb end pointer to speedup dev_kfree_skb() */
  728. prefetch(&skb->end);
  729. DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
  730. idx, tx_buf, skb);
  731. /* unmap first bd */
  732. DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
  733. tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
  734. dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
  735. BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
  736. nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
  737. #ifdef BNX2X_STOP_ON_ERROR
  738. if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
  739. BNX2X_ERR("BAD nbd!\n");
  740. bnx2x_panic();
  741. }
  742. #endif
  743. new_cons = nbd + tx_buf->first_bd;
  744. /* Get the next bd */
  745. bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
  746. /* Skip a parse bd... */
  747. --nbd;
  748. bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
  749. /* ...and the TSO split header bd since they have no mapping */
  750. if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
  751. --nbd;
  752. bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
  753. }
  754. /* now free frags */
  755. while (nbd > 0) {
  756. DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
  757. tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
  758. dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
  759. BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
  760. if (--nbd)
  761. bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
  762. }
  763. /* release skb */
  764. WARN_ON(!skb);
  765. dev_kfree_skb(skb);
  766. tx_buf->first_bd = 0;
  767. tx_buf->skb = NULL;
  768. return new_cons;
  769. }
  770. static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
  771. {
  772. s16 used;
  773. u16 prod;
  774. u16 cons;
  775. prod = fp->tx_bd_prod;
  776. cons = fp->tx_bd_cons;
  777. /* NUM_TX_RINGS = number of "next-page" entries
  778. It will be used as a threshold */
  779. used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
  780. #ifdef BNX2X_STOP_ON_ERROR
  781. WARN_ON(used < 0);
  782. WARN_ON(used > fp->bp->tx_ring_size);
  783. WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
  784. #endif
  785. return (s16)(fp->bp->tx_ring_size) - used;
  786. }
  787. static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
  788. {
  789. u16 hw_cons;
  790. /* Tell compiler that status block fields can change */
  791. barrier();
  792. hw_cons = le16_to_cpu(*fp->tx_cons_sb);
  793. return hw_cons != fp->tx_pkt_cons;
  794. }
  795. static int bnx2x_tx_int(struct bnx2x_fastpath *fp)
  796. {
  797. struct bnx2x *bp = fp->bp;
  798. struct netdev_queue *txq;
  799. u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
  800. #ifdef BNX2X_STOP_ON_ERROR
  801. if (unlikely(bp->panic))
  802. return -1;
  803. #endif
  804. txq = netdev_get_tx_queue(bp->dev, fp->index);
  805. hw_cons = le16_to_cpu(*fp->tx_cons_sb);
  806. sw_cons = fp->tx_pkt_cons;
  807. while (sw_cons != hw_cons) {
  808. u16 pkt_cons;
  809. pkt_cons = TX_BD(sw_cons);
  810. /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
  811. DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
  812. hw_cons, sw_cons, pkt_cons);
  813. /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
  814. rmb();
  815. prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
  816. }
  817. */
  818. bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
  819. sw_cons++;
  820. }
  821. fp->tx_pkt_cons = sw_cons;
  822. fp->tx_bd_cons = bd_cons;
  823. /* Need to make the tx_bd_cons update visible to start_xmit()
  824. * before checking for netif_tx_queue_stopped(). Without the
  825. * memory barrier, there is a small possibility that
  826. * start_xmit() will miss it and cause the queue to be stopped
  827. * forever.
  828. */
  829. smp_mb();
  830. /* TBD need a thresh? */
  831. if (unlikely(netif_tx_queue_stopped(txq))) {
  832. /* Taking tx_lock() is needed to prevent reenabling the queue
  833. * while it's empty. This could have happen if rx_action() gets
  834. * suspended in bnx2x_tx_int() after the condition before
  835. * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
  836. *
  837. * stops the queue->sees fresh tx_bd_cons->releases the queue->
  838. * sends some packets consuming the whole queue again->
  839. * stops the queue
  840. */
  841. __netif_tx_lock(txq, smp_processor_id());
  842. if ((netif_tx_queue_stopped(txq)) &&
  843. (bp->state == BNX2X_STATE_OPEN) &&
  844. (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
  845. netif_tx_wake_queue(txq);
  846. __netif_tx_unlock(txq);
  847. }
  848. return 0;
  849. }
  850. #ifdef BCM_CNIC
  851. static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
  852. #endif
  853. static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
  854. union eth_rx_cqe *rr_cqe)
  855. {
  856. struct bnx2x *bp = fp->bp;
  857. int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
  858. int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
  859. DP(BNX2X_MSG_SP,
  860. "fp %d cid %d got ramrod #%d state is %x type is %d\n",
  861. fp->index, cid, command, bp->state,
  862. rr_cqe->ramrod_cqe.ramrod_type);
  863. bp->spq_left++;
  864. if (fp->index) {
  865. switch (command | fp->state) {
  866. case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
  867. BNX2X_FP_STATE_OPENING):
  868. DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
  869. cid);
  870. fp->state = BNX2X_FP_STATE_OPEN;
  871. break;
  872. case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
  873. DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
  874. cid);
  875. fp->state = BNX2X_FP_STATE_HALTED;
  876. break;
  877. default:
  878. BNX2X_ERR("unexpected MC reply (%d) "
  879. "fp[%d] state is %x\n",
  880. command, fp->index, fp->state);
  881. break;
  882. }
  883. mb(); /* force bnx2x_wait_ramrod() to see the change */
  884. return;
  885. }
  886. switch (command | bp->state) {
  887. case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
  888. DP(NETIF_MSG_IFUP, "got setup ramrod\n");
  889. bp->state = BNX2X_STATE_OPEN;
  890. break;
  891. case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
  892. DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
  893. bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
  894. fp->state = BNX2X_FP_STATE_HALTED;
  895. break;
  896. case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
  897. DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
  898. bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
  899. break;
  900. #ifdef BCM_CNIC
  901. case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
  902. DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
  903. bnx2x_cnic_cfc_comp(bp, cid);
  904. break;
  905. #endif
  906. case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
  907. case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
  908. DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
  909. bp->set_mac_pending--;
  910. smp_wmb();
  911. break;
  912. case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
  913. DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
  914. bp->set_mac_pending--;
  915. smp_wmb();
  916. break;
  917. default:
  918. BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
  919. command, bp->state);
  920. break;
  921. }
  922. mb(); /* force bnx2x_wait_ramrod() to see the change */
  923. }
  924. static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
  925. struct bnx2x_fastpath *fp, u16 index)
  926. {
  927. struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
  928. struct page *page = sw_buf->page;
  929. struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
  930. /* Skip "next page" elements */
  931. if (!page)
  932. return;
  933. dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
  934. SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
  935. __free_pages(page, PAGES_PER_SGE_SHIFT);
  936. sw_buf->page = NULL;
  937. sge->addr_hi = 0;
  938. sge->addr_lo = 0;
  939. }
  940. static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
  941. struct bnx2x_fastpath *fp, int last)
  942. {
  943. int i;
  944. for (i = 0; i < last; i++)
  945. bnx2x_free_rx_sge(bp, fp, i);
  946. }
  947. static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
  948. struct bnx2x_fastpath *fp, u16 index)
  949. {
  950. struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
  951. struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
  952. struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
  953. dma_addr_t mapping;
  954. if (unlikely(page == NULL))
  955. return -ENOMEM;
  956. mapping = dma_map_page(&bp->pdev->dev, page, 0,
  957. SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
  958. if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
  959. __free_pages(page, PAGES_PER_SGE_SHIFT);
  960. return -ENOMEM;
  961. }
  962. sw_buf->page = page;
  963. dma_unmap_addr_set(sw_buf, mapping, mapping);
  964. sge->addr_hi = cpu_to_le32(U64_HI(mapping));
  965. sge->addr_lo = cpu_to_le32(U64_LO(mapping));
  966. return 0;
  967. }
  968. static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
  969. struct bnx2x_fastpath *fp, u16 index)
  970. {
  971. struct sk_buff *skb;
  972. struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
  973. struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
  974. dma_addr_t mapping;
  975. skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
  976. if (unlikely(skb == NULL))
  977. return -ENOMEM;
  978. mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size,
  979. DMA_FROM_DEVICE);
  980. if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
  981. dev_kfree_skb(skb);
  982. return -ENOMEM;
  983. }
  984. rx_buf->skb = skb;
  985. dma_unmap_addr_set(rx_buf, mapping, mapping);
  986. rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
  987. rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
  988. return 0;
  989. }
  990. /* note that we are not allocating a new skb,
  991. * we are just moving one from cons to prod
  992. * we are not creating a new mapping,
  993. * so there is no need to check for dma_mapping_error().
  994. */
  995. static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
  996. struct sk_buff *skb, u16 cons, u16 prod)
  997. {
  998. struct bnx2x *bp = fp->bp;
  999. struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
  1000. struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
  1001. struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
  1002. struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
  1003. dma_sync_single_for_device(&bp->pdev->dev,
  1004. dma_unmap_addr(cons_rx_buf, mapping),
  1005. RX_COPY_THRESH, DMA_FROM_DEVICE);
  1006. prod_rx_buf->skb = cons_rx_buf->skb;
  1007. dma_unmap_addr_set(prod_rx_buf, mapping,
  1008. dma_unmap_addr(cons_rx_buf, mapping));
  1009. *prod_bd = *cons_bd;
  1010. }
  1011. static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
  1012. u16 idx)
  1013. {
  1014. u16 last_max = fp->last_max_sge;
  1015. if (SUB_S16(idx, last_max) > 0)
  1016. fp->last_max_sge = idx;
  1017. }
  1018. static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
  1019. {
  1020. int i, j;
  1021. for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
  1022. int idx = RX_SGE_CNT * i - 1;
  1023. for (j = 0; j < 2; j++) {
  1024. SGE_MASK_CLEAR_BIT(fp, idx);
  1025. idx--;
  1026. }
  1027. }
  1028. }
  1029. static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
  1030. struct eth_fast_path_rx_cqe *fp_cqe)
  1031. {
  1032. struct bnx2x *bp = fp->bp;
  1033. u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
  1034. le16_to_cpu(fp_cqe->len_on_bd)) >>
  1035. SGE_PAGE_SHIFT;
  1036. u16 last_max, last_elem, first_elem;
  1037. u16 delta = 0;
  1038. u16 i;
  1039. if (!sge_len)
  1040. return;
  1041. /* First mark all used pages */
  1042. for (i = 0; i < sge_len; i++)
  1043. SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
  1044. DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
  1045. sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
  1046. /* Here we assume that the last SGE index is the biggest */
  1047. prefetch((void *)(fp->sge_mask));
  1048. bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
  1049. last_max = RX_SGE(fp->last_max_sge);
  1050. last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
  1051. first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
  1052. /* If ring is not full */
  1053. if (last_elem + 1 != first_elem)
  1054. last_elem++;
  1055. /* Now update the prod */
  1056. for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
  1057. if (likely(fp->sge_mask[i]))
  1058. break;
  1059. fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
  1060. delta += RX_SGE_MASK_ELEM_SZ;
  1061. }
  1062. if (delta > 0) {
  1063. fp->rx_sge_prod += delta;
  1064. /* clear page-end entries */
  1065. bnx2x_clear_sge_mask_next_elems(fp);
  1066. }
  1067. DP(NETIF_MSG_RX_STATUS,
  1068. "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
  1069. fp->last_max_sge, fp->rx_sge_prod);
  1070. }
  1071. static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
  1072. {
  1073. /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
  1074. memset(fp->sge_mask, 0xff,
  1075. (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
  1076. /* Clear the two last indices in the page to 1:
  1077. these are the indices that correspond to the "next" element,
  1078. hence will never be indicated and should be removed from
  1079. the calculations. */
  1080. bnx2x_clear_sge_mask_next_elems(fp);
  1081. }
  1082. static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
  1083. struct sk_buff *skb, u16 cons, u16 prod)
  1084. {
  1085. struct bnx2x *bp = fp->bp;
  1086. struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
  1087. struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
  1088. struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
  1089. dma_addr_t mapping;
  1090. /* move empty skb from pool to prod and map it */
  1091. prod_rx_buf->skb = fp->tpa_pool[queue].skb;
  1092. mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
  1093. bp->rx_buf_size, DMA_FROM_DEVICE);
  1094. dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
  1095. /* move partial skb from cons to pool (don't unmap yet) */
  1096. fp->tpa_pool[queue] = *cons_rx_buf;
  1097. /* mark bin state as start - print error if current state != stop */
  1098. if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
  1099. BNX2X_ERR("start of bin not in stop [%d]\n", queue);
  1100. fp->tpa_state[queue] = BNX2X_TPA_START;
  1101. /* point prod_bd to new skb */
  1102. prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
  1103. prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
  1104. #ifdef BNX2X_STOP_ON_ERROR
  1105. fp->tpa_queue_used |= (1 << queue);
  1106. #ifdef _ASM_GENERIC_INT_L64_H
  1107. DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
  1108. #else
  1109. DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
  1110. #endif
  1111. fp->tpa_queue_used);
  1112. #endif
  1113. }
  1114. static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
  1115. struct sk_buff *skb,
  1116. struct eth_fast_path_rx_cqe *fp_cqe,
  1117. u16 cqe_idx)
  1118. {
  1119. struct sw_rx_page *rx_pg, old_rx_pg;
  1120. u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
  1121. u32 i, frag_len, frag_size, pages;
  1122. int err;
  1123. int j;
  1124. frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
  1125. pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
  1126. /* This is needed in order to enable forwarding support */
  1127. if (frag_size)
  1128. skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
  1129. max(frag_size, (u32)len_on_bd));
  1130. #ifdef BNX2X_STOP_ON_ERROR
  1131. if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
  1132. BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
  1133. pages, cqe_idx);
  1134. BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
  1135. fp_cqe->pkt_len, len_on_bd);
  1136. bnx2x_panic();
  1137. return -EINVAL;
  1138. }
  1139. #endif
  1140. /* Run through the SGL and compose the fragmented skb */
  1141. for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
  1142. u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
  1143. /* FW gives the indices of the SGE as if the ring is an array
  1144. (meaning that "next" element will consume 2 indices) */
  1145. frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
  1146. rx_pg = &fp->rx_page_ring[sge_idx];
  1147. old_rx_pg = *rx_pg;
  1148. /* If we fail to allocate a substitute page, we simply stop
  1149. where we are and drop the whole packet */
  1150. err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
  1151. if (unlikely(err)) {
  1152. fp->eth_q_stats.rx_skb_alloc_failed++;
  1153. return err;
  1154. }
  1155. /* Unmap the page as we r going to pass it to the stack */
  1156. dma_unmap_page(&bp->pdev->dev,
  1157. dma_unmap_addr(&old_rx_pg, mapping),
  1158. SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
  1159. /* Add one frag and update the appropriate fields in the skb */
  1160. skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
  1161. skb->data_len += frag_len;
  1162. skb->truesize += frag_len;
  1163. skb->len += frag_len;
  1164. frag_size -= frag_len;
  1165. }
  1166. return 0;
  1167. }
  1168. static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
  1169. u16 queue, int pad, int len, union eth_rx_cqe *cqe,
  1170. u16 cqe_idx)
  1171. {
  1172. struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
  1173. struct sk_buff *skb = rx_buf->skb;
  1174. /* alloc new skb */
  1175. struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
  1176. /* Unmap skb in the pool anyway, as we are going to change
  1177. pool entry status to BNX2X_TPA_STOP even if new skb allocation
  1178. fails. */
  1179. dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
  1180. bp->rx_buf_size, DMA_FROM_DEVICE);
  1181. if (likely(new_skb)) {
  1182. /* fix ip xsum and give it to the stack */
  1183. /* (no need to map the new skb) */
  1184. #ifdef BCM_VLAN
  1185. int is_vlan_cqe =
  1186. (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
  1187. PARSING_FLAGS_VLAN);
  1188. int is_not_hwaccel_vlan_cqe =
  1189. (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
  1190. #endif
  1191. prefetch(skb);
  1192. prefetch(((char *)(skb)) + 128);
  1193. #ifdef BNX2X_STOP_ON_ERROR
  1194. if (pad + len > bp->rx_buf_size) {
  1195. BNX2X_ERR("skb_put is about to fail... "
  1196. "pad %d len %d rx_buf_size %d\n",
  1197. pad, len, bp->rx_buf_size);
  1198. bnx2x_panic();
  1199. return;
  1200. }
  1201. #endif
  1202. skb_reserve(skb, pad);
  1203. skb_put(skb, len);
  1204. skb->protocol = eth_type_trans(skb, bp->dev);
  1205. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1206. {
  1207. struct iphdr *iph;
  1208. iph = (struct iphdr *)skb->data;
  1209. #ifdef BCM_VLAN
  1210. /* If there is no Rx VLAN offloading -
  1211. take VLAN tag into an account */
  1212. if (unlikely(is_not_hwaccel_vlan_cqe))
  1213. iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
  1214. #endif
  1215. iph->check = 0;
  1216. iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
  1217. }
  1218. if (!bnx2x_fill_frag_skb(bp, fp, skb,
  1219. &cqe->fast_path_cqe, cqe_idx)) {
  1220. #ifdef BCM_VLAN
  1221. if ((bp->vlgrp != NULL) && is_vlan_cqe &&
  1222. (!is_not_hwaccel_vlan_cqe))
  1223. vlan_gro_receive(&fp->napi, bp->vlgrp,
  1224. le16_to_cpu(cqe->fast_path_cqe.
  1225. vlan_tag), skb);
  1226. else
  1227. #endif
  1228. napi_gro_receive(&fp->napi, skb);
  1229. } else {
  1230. DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
  1231. " - dropping packet!\n");
  1232. dev_kfree_skb(skb);
  1233. }
  1234. /* put new skb in bin */
  1235. fp->tpa_pool[queue].skb = new_skb;
  1236. } else {
  1237. /* else drop the packet and keep the buffer in the bin */
  1238. DP(NETIF_MSG_RX_STATUS,
  1239. "Failed to allocate new skb - dropping packet!\n");
  1240. fp->eth_q_stats.rx_skb_alloc_failed++;
  1241. }
  1242. fp->tpa_state[queue] = BNX2X_TPA_STOP;
  1243. }
  1244. static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
  1245. struct bnx2x_fastpath *fp,
  1246. u16 bd_prod, u16 rx_comp_prod,
  1247. u16 rx_sge_prod)
  1248. {
  1249. struct ustorm_eth_rx_producers rx_prods = {0};
  1250. int i;
  1251. /* Update producers */
  1252. rx_prods.bd_prod = bd_prod;
  1253. rx_prods.cqe_prod = rx_comp_prod;
  1254. rx_prods.sge_prod = rx_sge_prod;
  1255. /*
  1256. * Make sure that the BD and SGE data is updated before updating the
  1257. * producers since FW might read the BD/SGE right after the producer
  1258. * is updated.
  1259. * This is only applicable for weak-ordered memory model archs such
  1260. * as IA-64. The following barrier is also mandatory since FW will
  1261. * assumes BDs must have buffers.
  1262. */
  1263. wmb();
  1264. for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
  1265. REG_WR(bp, BAR_USTRORM_INTMEM +
  1266. USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
  1267. ((u32 *)&rx_prods)[i]);
  1268. mmiowb(); /* keep prod updates ordered */
  1269. DP(NETIF_MSG_RX_STATUS,
  1270. "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
  1271. fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
  1272. }
  1273. static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
  1274. {
  1275. struct bnx2x *bp = fp->bp;
  1276. u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
  1277. u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
  1278. int rx_pkt = 0;
  1279. #ifdef BNX2X_STOP_ON_ERROR
  1280. if (unlikely(bp->panic))
  1281. return 0;
  1282. #endif
  1283. /* CQ "next element" is of the size of the regular element,
  1284. that's why it's ok here */
  1285. hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
  1286. if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
  1287. hw_comp_cons++;
  1288. bd_cons = fp->rx_bd_cons;
  1289. bd_prod = fp->rx_bd_prod;
  1290. bd_prod_fw = bd_prod;
  1291. sw_comp_cons = fp->rx_comp_cons;
  1292. sw_comp_prod = fp->rx_comp_prod;
  1293. /* Memory barrier necessary as speculative reads of the rx
  1294. * buffer can be ahead of the index in the status block
  1295. */
  1296. rmb();
  1297. DP(NETIF_MSG_RX_STATUS,
  1298. "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
  1299. fp->index, hw_comp_cons, sw_comp_cons);
  1300. while (sw_comp_cons != hw_comp_cons) {
  1301. struct sw_rx_bd *rx_buf = NULL;
  1302. struct sk_buff *skb;
  1303. union eth_rx_cqe *cqe;
  1304. u8 cqe_fp_flags, cqe_fp_status_flags;
  1305. u16 len, pad;
  1306. comp_ring_cons = RCQ_BD(sw_comp_cons);
  1307. bd_prod = RX_BD(bd_prod);
  1308. bd_cons = RX_BD(bd_cons);
  1309. /* Prefetch the page containing the BD descriptor
  1310. at producer's index. It will be needed when new skb is
  1311. allocated */
  1312. prefetch((void *)(PAGE_ALIGN((unsigned long)
  1313. (&fp->rx_desc_ring[bd_prod])) -
  1314. PAGE_SIZE + 1));
  1315. cqe = &fp->rx_comp_ring[comp_ring_cons];
  1316. cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
  1317. cqe_fp_status_flags = cqe->fast_path_cqe.status_flags;
  1318. DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
  1319. " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
  1320. cqe_fp_flags, cqe->fast_path_cqe.status_flags,
  1321. le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
  1322. le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
  1323. le16_to_cpu(cqe->fast_path_cqe.pkt_len));
  1324. /* is this a slowpath msg? */
  1325. if (unlikely(CQE_TYPE(cqe_fp_flags))) {
  1326. bnx2x_sp_event(fp, cqe);
  1327. goto next_cqe;
  1328. /* this is an rx packet */
  1329. } else {
  1330. rx_buf = &fp->rx_buf_ring[bd_cons];
  1331. skb = rx_buf->skb;
  1332. prefetch(skb);
  1333. len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
  1334. pad = cqe->fast_path_cqe.placement_offset;
  1335. /* If CQE is marked both TPA_START and TPA_END
  1336. it is a non-TPA CQE */
  1337. if ((!fp->disable_tpa) &&
  1338. (TPA_TYPE(cqe_fp_flags) !=
  1339. (TPA_TYPE_START | TPA_TYPE_END))) {
  1340. u16 queue = cqe->fast_path_cqe.queue_index;
  1341. if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
  1342. DP(NETIF_MSG_RX_STATUS,
  1343. "calling tpa_start on queue %d\n",
  1344. queue);
  1345. bnx2x_tpa_start(fp, queue, skb,
  1346. bd_cons, bd_prod);
  1347. goto next_rx;
  1348. }
  1349. if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
  1350. DP(NETIF_MSG_RX_STATUS,
  1351. "calling tpa_stop on queue %d\n",
  1352. queue);
  1353. if (!BNX2X_RX_SUM_FIX(cqe))
  1354. BNX2X_ERR("STOP on none TCP "
  1355. "data\n");
  1356. /* This is a size of the linear data
  1357. on this skb */
  1358. len = le16_to_cpu(cqe->fast_path_cqe.
  1359. len_on_bd);
  1360. bnx2x_tpa_stop(bp, fp, queue, pad,
  1361. len, cqe, comp_ring_cons);
  1362. #ifdef BNX2X_STOP_ON_ERROR
  1363. if (bp->panic)
  1364. return 0;
  1365. #endif
  1366. bnx2x_update_sge_prod(fp,
  1367. &cqe->fast_path_cqe);
  1368. goto next_cqe;
  1369. }
  1370. }
  1371. dma_sync_single_for_device(&bp->pdev->dev,
  1372. dma_unmap_addr(rx_buf, mapping),
  1373. pad + RX_COPY_THRESH,
  1374. DMA_FROM_DEVICE);
  1375. prefetch(((char *)(skb)) + 128);
  1376. /* is this an error packet? */
  1377. if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
  1378. DP(NETIF_MSG_RX_ERR,
  1379. "ERROR flags %x rx packet %u\n",
  1380. cqe_fp_flags, sw_comp_cons);
  1381. fp->eth_q_stats.rx_err_discard_pkt++;
  1382. goto reuse_rx;
  1383. }
  1384. /* Since we don't have a jumbo ring
  1385. * copy small packets if mtu > 1500
  1386. */
  1387. if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
  1388. (len <= RX_COPY_THRESH)) {
  1389. struct sk_buff *new_skb;
  1390. new_skb = netdev_alloc_skb(bp->dev,
  1391. len + pad);
  1392. if (new_skb == NULL) {
  1393. DP(NETIF_MSG_RX_ERR,
  1394. "ERROR packet dropped "
  1395. "because of alloc failure\n");
  1396. fp->eth_q_stats.rx_skb_alloc_failed++;
  1397. goto reuse_rx;
  1398. }
  1399. /* aligned copy */
  1400. skb_copy_from_linear_data_offset(skb, pad,
  1401. new_skb->data + pad, len);
  1402. skb_reserve(new_skb, pad);
  1403. skb_put(new_skb, len);
  1404. bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
  1405. skb = new_skb;
  1406. } else
  1407. if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
  1408. dma_unmap_single(&bp->pdev->dev,
  1409. dma_unmap_addr(rx_buf, mapping),
  1410. bp->rx_buf_size,
  1411. DMA_FROM_DEVICE);
  1412. skb_reserve(skb, pad);
  1413. skb_put(skb, len);
  1414. } else {
  1415. DP(NETIF_MSG_RX_ERR,
  1416. "ERROR packet dropped because "
  1417. "of alloc failure\n");
  1418. fp->eth_q_stats.rx_skb_alloc_failed++;
  1419. reuse_rx:
  1420. bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
  1421. goto next_rx;
  1422. }
  1423. skb->protocol = eth_type_trans(skb, bp->dev);
  1424. if ((bp->dev->features & NETIF_F_RXHASH) &&
  1425. (cqe_fp_status_flags &
  1426. ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
  1427. skb->rxhash = le32_to_cpu(
  1428. cqe->fast_path_cqe.rss_hash_result);
  1429. skb->ip_summed = CHECKSUM_NONE;
  1430. if (bp->rx_csum) {
  1431. if (likely(BNX2X_RX_CSUM_OK(cqe)))
  1432. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1433. else
  1434. fp->eth_q_stats.hw_csum_err++;
  1435. }
  1436. }
  1437. skb_record_rx_queue(skb, fp->index);
  1438. #ifdef BCM_VLAN
  1439. if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
  1440. (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
  1441. PARSING_FLAGS_VLAN))
  1442. vlan_gro_receive(&fp->napi, bp->vlgrp,
  1443. le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
  1444. else
  1445. #endif
  1446. napi_gro_receive(&fp->napi, skb);
  1447. next_rx:
  1448. rx_buf->skb = NULL;
  1449. bd_cons = NEXT_RX_IDX(bd_cons);
  1450. bd_prod = NEXT_RX_IDX(bd_prod);
  1451. bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
  1452. rx_pkt++;
  1453. next_cqe:
  1454. sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
  1455. sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
  1456. if (rx_pkt == budget)
  1457. break;
  1458. } /* while */
  1459. fp->rx_bd_cons = bd_cons;
  1460. fp->rx_bd_prod = bd_prod_fw;
  1461. fp->rx_comp_cons = sw_comp_cons;
  1462. fp->rx_comp_prod = sw_comp_prod;
  1463. /* Update producers */
  1464. bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
  1465. fp->rx_sge_prod);
  1466. fp->rx_pkt += rx_pkt;
  1467. fp->rx_calls++;
  1468. return rx_pkt;
  1469. }
  1470. static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
  1471. {
  1472. struct bnx2x_fastpath *fp = fp_cookie;
  1473. struct bnx2x *bp = fp->bp;
  1474. /* Return here if interrupt is disabled */
  1475. if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
  1476. DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
  1477. return IRQ_HANDLED;
  1478. }
  1479. DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
  1480. fp->index, fp->sb_id);
  1481. bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
  1482. #ifdef BNX2X_STOP_ON_ERROR
  1483. if (unlikely(bp->panic))
  1484. return IRQ_HANDLED;
  1485. #endif
  1486. /* Handle Rx and Tx according to MSI-X vector */
  1487. prefetch(fp->rx_cons_sb);
  1488. prefetch(fp->tx_cons_sb);
  1489. prefetch(&fp->status_blk->u_status_block.status_block_index);
  1490. prefetch(&fp->status_blk->c_status_block.status_block_index);
  1491. napi_schedule(&bnx2x_fp(bp, fp->index, napi));
  1492. return IRQ_HANDLED;
  1493. }
  1494. static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
  1495. {
  1496. struct bnx2x *bp = netdev_priv(dev_instance);
  1497. u16 status = bnx2x_ack_int(bp);
  1498. u16 mask;
  1499. int i;
  1500. /* Return here if interrupt is shared and it's not for us */
  1501. if (unlikely(status == 0)) {
  1502. DP(NETIF_MSG_INTR, "not our interrupt!\n");
  1503. return IRQ_NONE;
  1504. }
  1505. DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
  1506. /* Return here if interrupt is disabled */
  1507. if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
  1508. DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
  1509. return IRQ_HANDLED;
  1510. }
  1511. #ifdef BNX2X_STOP_ON_ERROR
  1512. if (unlikely(bp->panic))
  1513. return IRQ_HANDLED;
  1514. #endif
  1515. for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
  1516. struct bnx2x_fastpath *fp = &bp->fp[i];
  1517. mask = 0x2 << fp->sb_id;
  1518. if (status & mask) {
  1519. /* Handle Rx and Tx according to SB id */
  1520. prefetch(fp->rx_cons_sb);
  1521. prefetch(&fp->status_blk->u_status_block.
  1522. status_block_index);
  1523. prefetch(fp->tx_cons_sb);
  1524. prefetch(&fp->status_blk->c_status_block.
  1525. status_block_index);
  1526. napi_schedule(&bnx2x_fp(bp, fp->index, napi));
  1527. status &= ~mask;
  1528. }
  1529. }
  1530. #ifdef BCM_CNIC
  1531. mask = 0x2 << CNIC_SB_ID(bp);
  1532. if (status & (mask | 0x1)) {
  1533. struct cnic_ops *c_ops = NULL;
  1534. rcu_read_lock();
  1535. c_ops = rcu_dereference(bp->cnic_ops);
  1536. if (c_ops)
  1537. c_ops->cnic_handler(bp->cnic_data, NULL);
  1538. rcu_read_unlock();
  1539. status &= ~mask;
  1540. }
  1541. #endif
  1542. if (unlikely(status & 0x1)) {
  1543. queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
  1544. status &= ~0x1;
  1545. if (!status)
  1546. return IRQ_HANDLED;
  1547. }
  1548. if (unlikely(status))
  1549. DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
  1550. status);
  1551. return IRQ_HANDLED;
  1552. }
  1553. /* end of fast path */
  1554. static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
  1555. /* Link */
  1556. /*
  1557. * General service functions
  1558. */
  1559. static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
  1560. {
  1561. u32 lock_status;
  1562. u32 resource_bit = (1 << resource);
  1563. int func = BP_FUNC(bp);
  1564. u32 hw_lock_control_reg;
  1565. int cnt;
  1566. /* Validating that the resource is within range */
  1567. if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
  1568. DP(NETIF_MSG_HW,
  1569. "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
  1570. resource, HW_LOCK_MAX_RESOURCE_VALUE);
  1571. return -EINVAL;
  1572. }
  1573. if (func <= 5) {
  1574. hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
  1575. } else {
  1576. hw_lock_control_reg =
  1577. (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
  1578. }
  1579. /* Validating that the resource is not already taken */
  1580. lock_status = REG_RD(bp, hw_lock_control_reg);
  1581. if (lock_status & resource_bit) {
  1582. DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
  1583. lock_status, resource_bit);
  1584. return -EEXIST;
  1585. }
  1586. /* Try for 5 second every 5ms */
  1587. for (cnt = 0; cnt < 1000; cnt++) {
  1588. /* Try to acquire the lock */
  1589. REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
  1590. lock_status = REG_RD(bp, hw_lock_control_reg);
  1591. if (lock_status & resource_bit)
  1592. return 0;
  1593. msleep(5);
  1594. }
  1595. DP(NETIF_MSG_HW, "Timeout\n");
  1596. return -EAGAIN;
  1597. }
  1598. static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
  1599. {
  1600. u32 lock_status;
  1601. u32 resource_bit = (1 << resource);
  1602. int func = BP_FUNC(bp);
  1603. u32 hw_lock_control_reg;
  1604. DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
  1605. /* Validating that the resource is within range */
  1606. if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
  1607. DP(NETIF_MSG_HW,
  1608. "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
  1609. resource, HW_LOCK_MAX_RESOURCE_VALUE);
  1610. return -EINVAL;
  1611. }
  1612. if (func <= 5) {
  1613. hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
  1614. } else {
  1615. hw_lock_control_reg =
  1616. (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
  1617. }
  1618. /* Validating that the resource is currently taken */
  1619. lock_status = REG_RD(bp, hw_lock_control_reg);
  1620. if (!(lock_status & resource_bit)) {
  1621. DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
  1622. lock_status, resource_bit);
  1623. return -EFAULT;
  1624. }
  1625. REG_WR(bp, hw_lock_control_reg, resource_bit);
  1626. return 0;
  1627. }
  1628. /* HW Lock for shared dual port PHYs */
  1629. static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
  1630. {
  1631. mutex_lock(&bp->port.phy_mutex);
  1632. if (bp->port.need_hw_lock)
  1633. bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
  1634. }
  1635. static void bnx2x_release_phy_lock(struct bnx2x *bp)
  1636. {
  1637. if (bp->port.need_hw_lock)
  1638. bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
  1639. mutex_unlock(&bp->port.phy_mutex);
  1640. }
  1641. int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
  1642. {
  1643. /* The GPIO should be swapped if swap register is set and active */
  1644. int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
  1645. REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
  1646. int gpio_shift = gpio_num +
  1647. (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
  1648. u32 gpio_mask = (1 << gpio_shift);
  1649. u32 gpio_reg;
  1650. int value;
  1651. if (gpio_num > MISC_REGISTERS_GPIO_3) {
  1652. BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
  1653. return -EINVAL;
  1654. }
  1655. /* read GPIO value */
  1656. gpio_reg = REG_RD(bp, MISC_REG_GPIO);
  1657. /* get the requested pin value */
  1658. if ((gpio_reg & gpio_mask) == gpio_mask)
  1659. value = 1;
  1660. else
  1661. value = 0;
  1662. DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
  1663. return value;
  1664. }
  1665. int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
  1666. {
  1667. /* The GPIO should be swapped if swap register is set and active */
  1668. int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
  1669. REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
  1670. int gpio_shift = gpio_num +
  1671. (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
  1672. u32 gpio_mask = (1 << gpio_shift);
  1673. u32 gpio_reg;
  1674. if (gpio_num > MISC_REGISTERS_GPIO_3) {
  1675. BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
  1676. return -EINVAL;
  1677. }
  1678. bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
  1679. /* read GPIO and mask except the float bits */
  1680. gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
  1681. switch (mode) {
  1682. case MISC_REGISTERS_GPIO_OUTPUT_LOW:
  1683. DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
  1684. gpio_num, gpio_shift);
  1685. /* clear FLOAT and set CLR */
  1686. gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
  1687. gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
  1688. break;
  1689. case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
  1690. DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
  1691. gpio_num, gpio_shift);
  1692. /* clear FLOAT and set SET */
  1693. gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
  1694. gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
  1695. break;
  1696. case MISC_REGISTERS_GPIO_INPUT_HI_Z:
  1697. DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
  1698. gpio_num, gpio_shift);
  1699. /* set FLOAT */
  1700. gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
  1701. break;
  1702. default:
  1703. break;
  1704. }
  1705. REG_WR(bp, MISC_REG_GPIO, gpio_reg);
  1706. bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
  1707. return 0;
  1708. }
  1709. int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
  1710. {
  1711. /* The GPIO should be swapped if swap register is set and active */
  1712. int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
  1713. REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
  1714. int gpio_shift = gpio_num +
  1715. (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
  1716. u32 gpio_mask = (1 << gpio_shift);
  1717. u32 gpio_reg;
  1718. if (gpio_num > MISC_REGISTERS_GPIO_3) {
  1719. BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
  1720. return -EINVAL;
  1721. }
  1722. bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
  1723. /* read GPIO int */
  1724. gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
  1725. switch (mode) {
  1726. case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
  1727. DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
  1728. "output low\n", gpio_num, gpio_shift);
  1729. /* clear SET and set CLR */
  1730. gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
  1731. gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
  1732. break;
  1733. case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
  1734. DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
  1735. "output high\n", gpio_num, gpio_shift);
  1736. /* clear CLR and set SET */
  1737. gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
  1738. gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
  1739. break;
  1740. default:
  1741. break;
  1742. }
  1743. REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
  1744. bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
  1745. return 0;
  1746. }
  1747. static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
  1748. {
  1749. u32 spio_mask = (1 << spio_num);
  1750. u32 spio_reg;
  1751. if ((spio_num < MISC_REGISTERS_SPIO_4) ||
  1752. (spio_num > MISC_REGISTERS_SPIO_7)) {
  1753. BNX2X_ERR("Invalid SPIO %d\n", spio_num);
  1754. return -EINVAL;
  1755. }
  1756. bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
  1757. /* read SPIO and mask except the float bits */
  1758. spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
  1759. switch (mode) {
  1760. case MISC_REGISTERS_SPIO_OUTPUT_LOW:
  1761. DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
  1762. /* clear FLOAT and set CLR */
  1763. spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
  1764. spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
  1765. break;
  1766. case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
  1767. DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
  1768. /* clear FLOAT and set SET */
  1769. spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
  1770. spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
  1771. break;
  1772. case MISC_REGISTERS_SPIO_INPUT_HI_Z:
  1773. DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
  1774. /* set FLOAT */
  1775. spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
  1776. break;
  1777. default:
  1778. break;
  1779. }
  1780. REG_WR(bp, MISC_REG_SPIO, spio_reg);
  1781. bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
  1782. return 0;
  1783. }
  1784. static void bnx2x_calc_fc_adv(struct bnx2x *bp)
  1785. {
  1786. switch (bp->link_vars.ieee_fc &
  1787. MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
  1788. case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
  1789. bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
  1790. ADVERTISED_Pause);
  1791. break;
  1792. case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
  1793. bp->port.advertising |= (ADVERTISED_Asym_Pause |
  1794. ADVERTISED_Pause);
  1795. break;
  1796. case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
  1797. bp->port.advertising |= ADVERTISED_Asym_Pause;
  1798. break;
  1799. default:
  1800. bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
  1801. ADVERTISED_Pause);
  1802. break;
  1803. }
  1804. }
  1805. static void bnx2x_link_report(struct bnx2x *bp)
  1806. {
  1807. if (bp->flags & MF_FUNC_DIS) {
  1808. netif_carrier_off(bp->dev);
  1809. netdev_err(bp->dev, "NIC Link is Down\n");
  1810. return;
  1811. }
  1812. if (bp->link_vars.link_up) {
  1813. u16 line_speed;
  1814. if (bp->state == BNX2X_STATE_OPEN)
  1815. netif_carrier_on(bp->dev);
  1816. netdev_info(bp->dev, "NIC Link is Up, ");
  1817. line_speed = bp->link_vars.line_speed;
  1818. if (IS_E1HMF(bp)) {
  1819. u16 vn_max_rate;
  1820. vn_max_rate =
  1821. ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
  1822. FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
  1823. if (vn_max_rate < line_speed)
  1824. line_speed = vn_max_rate;
  1825. }
  1826. pr_cont("%d Mbps ", line_speed);
  1827. if (bp->link_vars.duplex == DUPLEX_FULL)
  1828. pr_cont("full duplex");
  1829. else
  1830. pr_cont("half duplex");
  1831. if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
  1832. if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
  1833. pr_cont(", receive ");
  1834. if (bp->link_vars.flow_ctrl &
  1835. BNX2X_FLOW_CTRL_TX)
  1836. pr_cont("& transmit ");
  1837. } else {
  1838. pr_cont(", transmit ");
  1839. }
  1840. pr_cont("flow control ON");
  1841. }
  1842. pr_cont("\n");
  1843. } else { /* link_down */
  1844. netif_carrier_off(bp->dev);
  1845. netdev_err(bp->dev, "NIC Link is Down\n");
  1846. }
  1847. }
  1848. static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
  1849. {
  1850. if (!BP_NOMCP(bp)) {
  1851. u8 rc;
  1852. /* Initialize link parameters structure variables */
  1853. /* It is recommended to turn off RX FC for jumbo frames
  1854. for better performance */
  1855. if (bp->dev->mtu > 5000)
  1856. bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
  1857. else
  1858. bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
  1859. bnx2x_acquire_phy_lock(bp);
  1860. if (load_mode == LOAD_DIAG)
  1861. bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
  1862. rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
  1863. bnx2x_release_phy_lock(bp);
  1864. bnx2x_calc_fc_adv(bp);
  1865. if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
  1866. bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
  1867. bnx2x_link_report(bp);
  1868. }
  1869. return rc;
  1870. }
  1871. BNX2X_ERR("Bootcode is missing - can not initialize link\n");
  1872. return -EINVAL;
  1873. }
  1874. static void bnx2x_link_set(struct bnx2x *bp)
  1875. {
  1876. if (!BP_NOMCP(bp)) {
  1877. bnx2x_acquire_phy_lock(bp);
  1878. bnx2x_phy_init(&bp->link_params, &bp->link_vars);
  1879. bnx2x_release_phy_lock(bp);
  1880. bnx2x_calc_fc_adv(bp);
  1881. } else
  1882. BNX2X_ERR("Bootcode is missing - can not set link\n");
  1883. }
  1884. static void bnx2x__link_reset(struct bnx2x *bp)
  1885. {
  1886. if (!BP_NOMCP(bp)) {
  1887. bnx2x_acquire_phy_lock(bp);
  1888. bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
  1889. bnx2x_release_phy_lock(bp);
  1890. } else
  1891. BNX2X_ERR("Bootcode is missing - can not reset link\n");
  1892. }
  1893. static u8 bnx2x_link_test(struct bnx2x *bp)
  1894. {
  1895. u8 rc = 0;
  1896. if (!BP_NOMCP(bp)) {
  1897. bnx2x_acquire_phy_lock(bp);
  1898. rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
  1899. bnx2x_release_phy_lock(bp);
  1900. } else
  1901. BNX2X_ERR("Bootcode is missing - can not test link\n");
  1902. return rc;
  1903. }
  1904. static void bnx2x_init_port_minmax(struct bnx2x *bp)
  1905. {
  1906. u32 r_param = bp->link_vars.line_speed / 8;
  1907. u32 fair_periodic_timeout_usec;
  1908. u32 t_fair;
  1909. memset(&(bp->cmng.rs_vars), 0,
  1910. sizeof(struct rate_shaping_vars_per_port));
  1911. memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
  1912. /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
  1913. bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
  1914. /* this is the threshold below which no timer arming will occur
  1915. 1.25 coefficient is for the threshold to be a little bigger
  1916. than the real time, to compensate for timer in-accuracy */
  1917. bp->cmng.rs_vars.rs_threshold =
  1918. (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
  1919. /* resolution of fairness timer */
  1920. fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
  1921. /* for 10G it is 1000usec. for 1G it is 10000usec. */
  1922. t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
  1923. /* this is the threshold below which we won't arm the timer anymore */
  1924. bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
  1925. /* we multiply by 1e3/8 to get bytes/msec.
  1926. We don't want the credits to pass a credit
  1927. of the t_fair*FAIR_MEM (algorithm resolution) */
  1928. bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
  1929. /* since each tick is 4 usec */
  1930. bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
  1931. }
  1932. /* Calculates the sum of vn_min_rates.
  1933. It's needed for further normalizing of the min_rates.
  1934. Returns:
  1935. sum of vn_min_rates.
  1936. or
  1937. 0 - if all the min_rates are 0.
  1938. In the later case fainess algorithm should be deactivated.
  1939. If not all min_rates are zero then those that are zeroes will be set to 1.
  1940. */
  1941. static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
  1942. {
  1943. int all_zero = 1;
  1944. int port = BP_PORT(bp);
  1945. int vn;
  1946. bp->vn_weight_sum = 0;
  1947. for (vn = VN_0; vn < E1HVN_MAX; vn++) {
  1948. int func = 2*vn + port;
  1949. u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
  1950. u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
  1951. FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
  1952. /* Skip hidden vns */
  1953. if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
  1954. continue;
  1955. /* If min rate is zero - set it to 1 */
  1956. if (!vn_min_rate)
  1957. vn_min_rate = DEF_MIN_RATE;
  1958. else
  1959. all_zero = 0;
  1960. bp->vn_weight_sum += vn_min_rate;
  1961. }
  1962. /* ... only if all min rates are zeros - disable fairness */
  1963. if (all_zero) {
  1964. bp->cmng.flags.cmng_enables &=
  1965. ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
  1966. DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
  1967. " fairness will be disabled\n");
  1968. } else
  1969. bp->cmng.flags.cmng_enables |=
  1970. CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
  1971. }
  1972. static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
  1973. {
  1974. struct rate_shaping_vars_per_vn m_rs_vn;
  1975. struct fairness_vars_per_vn m_fair_vn;
  1976. u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
  1977. u16 vn_min_rate, vn_max_rate;
  1978. int i;
  1979. /* If function is hidden - set min and max to zeroes */
  1980. if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
  1981. vn_min_rate = 0;
  1982. vn_max_rate = 0;
  1983. } else {
  1984. vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
  1985. FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
  1986. /* If min rate is zero - set it to 1 */
  1987. if (!vn_min_rate)
  1988. vn_min_rate = DEF_MIN_RATE;
  1989. vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
  1990. FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
  1991. }
  1992. DP(NETIF_MSG_IFUP,
  1993. "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
  1994. func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
  1995. memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
  1996. memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
  1997. /* global vn counter - maximal Mbps for this vn */
  1998. m_rs_vn.vn_counter.rate = vn_max_rate;
  1999. /* quota - number of bytes transmitted in this period */
  2000. m_rs_vn.vn_counter.quota =
  2001. (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
  2002. if (bp->vn_weight_sum) {
  2003. /* credit for each period of the fairness algorithm:
  2004. number of bytes in T_FAIR (the vn share the port rate).
  2005. vn_weight_sum should not be larger than 10000, thus
  2006. T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
  2007. than zero */
  2008. m_fair_vn.vn_credit_delta =
  2009. max_t(u32, (vn_min_rate * (T_FAIR_COEF /
  2010. (8 * bp->vn_weight_sum))),
  2011. (bp->cmng.fair_vars.fair_threshold * 2));
  2012. DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
  2013. m_fair_vn.vn_credit_delta);
  2014. }
  2015. /* Store it to internal memory */
  2016. for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
  2017. REG_WR(bp, BAR_XSTRORM_INTMEM +
  2018. XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
  2019. ((u32 *)(&m_rs_vn))[i]);
  2020. for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
  2021. REG_WR(bp, BAR_XSTRORM_INTMEM +
  2022. XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
  2023. ((u32 *)(&m_fair_vn))[i]);
  2024. }
  2025. /* This function is called upon link interrupt */
  2026. static void bnx2x_link_attn(struct bnx2x *bp)
  2027. {
  2028. u32 prev_link_status = bp->link_vars.link_status;
  2029. /* Make sure that we are synced with the current statistics */
  2030. bnx2x_stats_handle(bp, STATS_EVENT_STOP);
  2031. bnx2x_link_update(&bp->link_params, &bp->link_vars);
  2032. if (bp->link_vars.link_up) {
  2033. /* dropless flow control */
  2034. if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
  2035. int port = BP_PORT(bp);
  2036. u32 pause_enabled = 0;
  2037. if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
  2038. pause_enabled = 1;
  2039. REG_WR(bp, BAR_USTRORM_INTMEM +
  2040. USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
  2041. pause_enabled);
  2042. }
  2043. if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
  2044. struct host_port_stats *pstats;
  2045. pstats = bnx2x_sp(bp, port_stats);
  2046. /* reset old bmac stats */
  2047. memset(&(pstats->mac_stx[0]), 0,
  2048. sizeof(struct mac_stx));
  2049. }
  2050. if (bp->state == BNX2X_STATE_OPEN)
  2051. bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
  2052. }
  2053. /* indicate link status only if link status actually changed */
  2054. if (prev_link_status != bp->link_vars.link_status)
  2055. bnx2x_link_report(bp);
  2056. if (IS_E1HMF(bp)) {
  2057. int port = BP_PORT(bp);
  2058. int func;
  2059. int vn;
  2060. /* Set the attention towards other drivers on the same port */
  2061. for (vn = VN_0; vn < E1HVN_MAX; vn++) {
  2062. if (vn == BP_E1HVN(bp))
  2063. continue;
  2064. func = ((vn << 1) | port);
  2065. REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
  2066. (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
  2067. }
  2068. if (bp->link_vars.link_up) {
  2069. int i;
  2070. /* Init rate shaping and fairness contexts */
  2071. bnx2x_init_port_minmax(bp);
  2072. for (vn = VN_0; vn < E1HVN_MAX; vn++)
  2073. bnx2x_init_vn_minmax(bp, 2*vn + port);
  2074. /* Store it to internal memory */
  2075. for (i = 0;
  2076. i < sizeof(struct cmng_struct_per_port) / 4; i++)
  2077. REG_WR(bp, BAR_XSTRORM_INTMEM +
  2078. XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
  2079. ((u32 *)(&bp->cmng))[i]);
  2080. }
  2081. }
  2082. }
  2083. static void bnx2x__link_status_update(struct bnx2x *bp)
  2084. {
  2085. if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
  2086. return;
  2087. bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
  2088. if (bp->link_vars.link_up)
  2089. bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
  2090. else
  2091. bnx2x_stats_handle(bp, STATS_EVENT_STOP);
  2092. bnx2x_calc_vn_weight_sum(bp);
  2093. /* indicate link status */
  2094. bnx2x_link_report(bp);
  2095. }
  2096. static void bnx2x_pmf_update(struct bnx2x *bp)
  2097. {
  2098. int port = BP_PORT(bp);
  2099. u32 val;
  2100. bp->port.pmf = 1;
  2101. DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
  2102. /* enable nig attention */
  2103. val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
  2104. REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
  2105. REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
  2106. bnx2x_stats_handle(bp, STATS_EVENT_PMF);
  2107. }
  2108. /* end of Link */
  2109. /* slow path */
  2110. /*
  2111. * General service functions
  2112. */
  2113. /* send the MCP a request, block until there is a reply */
  2114. u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
  2115. {
  2116. int func = BP_FUNC(bp);
  2117. u32 seq = ++bp->fw_seq;
  2118. u32 rc = 0;
  2119. u32 cnt = 1;
  2120. u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
  2121. mutex_lock(&bp->fw_mb_mutex);
  2122. SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
  2123. DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
  2124. do {
  2125. /* let the FW do it's magic ... */
  2126. msleep(delay);
  2127. rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
  2128. /* Give the FW up to 5 second (500*10ms) */
  2129. } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
  2130. DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
  2131. cnt*delay, rc, seq);
  2132. /* is this a reply to our command? */
  2133. if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
  2134. rc &= FW_MSG_CODE_MASK;
  2135. else {
  2136. /* FW BUG! */
  2137. BNX2X_ERR("FW failed to respond!\n");
  2138. bnx2x_fw_dump(bp);
  2139. rc = 0;
  2140. }
  2141. mutex_unlock(&bp->fw_mb_mutex);
  2142. return rc;
  2143. }
  2144. static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
  2145. static void bnx2x_set_rx_mode(struct net_device *dev);
  2146. static void bnx2x_e1h_disable(struct bnx2x *bp)
  2147. {
  2148. int port = BP_PORT(bp);
  2149. netif_tx_disable(bp->dev);
  2150. REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
  2151. netif_carrier_off(bp->dev);
  2152. }
  2153. static void bnx2x_e1h_enable(struct bnx2x *bp)
  2154. {
  2155. int port = BP_PORT(bp);
  2156. REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
  2157. /* Tx queue should be only reenabled */
  2158. netif_tx_wake_all_queues(bp->dev);
  2159. /*
  2160. * Should not call netif_carrier_on since it will be called if the link
  2161. * is up when checking for link state
  2162. */
  2163. }
  2164. static void bnx2x_update_min_max(struct bnx2x *bp)
  2165. {
  2166. int port = BP_PORT(bp);
  2167. int vn, i;
  2168. /* Init rate shaping and fairness contexts */
  2169. bnx2x_init_port_minmax(bp);
  2170. bnx2x_calc_vn_weight_sum(bp);
  2171. for (vn = VN_0; vn < E1HVN_MAX; vn++)
  2172. bnx2x_init_vn_minmax(bp, 2*vn + port);
  2173. if (bp->port.pmf) {
  2174. int func;
  2175. /* Set the attention towards other drivers on the same port */
  2176. for (vn = VN_0; vn < E1HVN_MAX; vn++) {
  2177. if (vn == BP_E1HVN(bp))
  2178. continue;
  2179. func = ((vn << 1) | port);
  2180. REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
  2181. (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
  2182. }
  2183. /* Store it to internal memory */
  2184. for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
  2185. REG_WR(bp, BAR_XSTRORM_INTMEM +
  2186. XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
  2187. ((u32 *)(&bp->cmng))[i]);
  2188. }
  2189. }
  2190. static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
  2191. {
  2192. DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
  2193. if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
  2194. /*
  2195. * This is the only place besides the function initialization
  2196. * where the bp->flags can change so it is done without any
  2197. * locks
  2198. */
  2199. if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
  2200. DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
  2201. bp->flags |= MF_FUNC_DIS;
  2202. bnx2x_e1h_disable(bp);
  2203. } else {
  2204. DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
  2205. bp->flags &= ~MF_FUNC_DIS;
  2206. bnx2x_e1h_enable(bp);
  2207. }
  2208. dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
  2209. }
  2210. if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
  2211. bnx2x_update_min_max(bp);
  2212. dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
  2213. }
  2214. /* Report results to MCP */
  2215. if (dcc_event)
  2216. bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
  2217. else
  2218. bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
  2219. }
  2220. /* must be called under the spq lock */
  2221. static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
  2222. {
  2223. struct eth_spe *next_spe = bp->spq_prod_bd;
  2224. if (bp->spq_prod_bd == bp->spq_last_bd) {
  2225. bp->spq_prod_bd = bp->spq;
  2226. bp->spq_prod_idx = 0;
  2227. DP(NETIF_MSG_TIMER, "end of spq\n");
  2228. } else {
  2229. bp->spq_prod_bd++;
  2230. bp->spq_prod_idx++;
  2231. }
  2232. return next_spe;
  2233. }
  2234. /* must be called under the spq lock */
  2235. static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
  2236. {
  2237. int func = BP_FUNC(bp);
  2238. /* Make sure that BD data is updated before writing the producer */
  2239. wmb();
  2240. REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
  2241. bp->spq_prod_idx);
  2242. mmiowb();
  2243. }
  2244. /* the slow path queue is odd since completions arrive on the fastpath ring */
  2245. static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
  2246. u32 data_hi, u32 data_lo, int common)
  2247. {
  2248. struct eth_spe *spe;
  2249. #ifdef BNX2X_STOP_ON_ERROR
  2250. if (unlikely(bp->panic))
  2251. return -EIO;
  2252. #endif
  2253. spin_lock_bh(&bp->spq_lock);
  2254. if (!bp->spq_left) {
  2255. BNX2X_ERR("BUG! SPQ ring full!\n");
  2256. spin_unlock_bh(&bp->spq_lock);
  2257. bnx2x_panic();
  2258. return -EBUSY;
  2259. }
  2260. spe = bnx2x_sp_get_next(bp);
  2261. /* CID needs port number to be encoded int it */
  2262. spe->hdr.conn_and_cmd_data =
  2263. cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
  2264. HW_CID(bp, cid));
  2265. spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
  2266. if (common)
  2267. spe->hdr.type |=
  2268. cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
  2269. spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
  2270. spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
  2271. bp->spq_left--;
  2272. DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
  2273. "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
  2274. bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
  2275. (u32)(U64_LO(bp->spq_mapping) +
  2276. (void *)bp->spq_prod_bd - (void *)bp->spq), command,
  2277. HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
  2278. bnx2x_sp_prod_update(bp);
  2279. spin_unlock_bh(&bp->spq_lock);
  2280. return 0;
  2281. }
  2282. /* acquire split MCP access lock register */
  2283. static int bnx2x_acquire_alr(struct bnx2x *bp)
  2284. {
  2285. u32 j, val;
  2286. int rc = 0;
  2287. might_sleep();
  2288. for (j = 0; j < 1000; j++) {
  2289. val = (1UL << 31);
  2290. REG_WR(bp, GRCBASE_MCP + 0x9c, val);
  2291. val = REG_RD(bp, GRCBASE_MCP + 0x9c);
  2292. if (val & (1L << 31))
  2293. break;
  2294. msleep(5);
  2295. }
  2296. if (!(val & (1L << 31))) {
  2297. BNX2X_ERR("Cannot acquire MCP access lock register\n");
  2298. rc = -EBUSY;
  2299. }
  2300. return rc;
  2301. }
  2302. /* release split MCP access lock register */
  2303. static void bnx2x_release_alr(struct bnx2x *bp)
  2304. {
  2305. REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
  2306. }
  2307. static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
  2308. {
  2309. struct host_def_status_block *def_sb = bp->def_status_blk;
  2310. u16 rc = 0;
  2311. barrier(); /* status block is written to by the chip */
  2312. if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
  2313. bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
  2314. rc |= 1;
  2315. }
  2316. if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
  2317. bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
  2318. rc |= 2;
  2319. }
  2320. if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
  2321. bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
  2322. rc |= 4;
  2323. }
  2324. if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
  2325. bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
  2326. rc |= 8;
  2327. }
  2328. if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
  2329. bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
  2330. rc |= 16;
  2331. }
  2332. return rc;
  2333. }
  2334. /*
  2335. * slow path service functions
  2336. */
  2337. static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
  2338. {
  2339. int port = BP_PORT(bp);
  2340. u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
  2341. COMMAND_REG_ATTN_BITS_SET);
  2342. u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
  2343. MISC_REG_AEU_MASK_ATTN_FUNC_0;
  2344. u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
  2345. NIG_REG_MASK_INTERRUPT_PORT0;
  2346. u32 aeu_mask;
  2347. u32 nig_mask = 0;
  2348. if (bp->attn_state & asserted)
  2349. BNX2X_ERR("IGU ERROR\n");
  2350. bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
  2351. aeu_mask = REG_RD(bp, aeu_addr);
  2352. DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
  2353. aeu_mask, asserted);
  2354. aeu_mask &= ~(asserted & 0x3ff);
  2355. DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
  2356. REG_WR(bp, aeu_addr, aeu_mask);
  2357. bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
  2358. DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
  2359. bp->attn_state |= asserted;
  2360. DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
  2361. if (asserted & ATTN_HARD_WIRED_MASK) {
  2362. if (asserted & ATTN_NIG_FOR_FUNC) {
  2363. bnx2x_acquire_phy_lock(bp);
  2364. /* save nig interrupt mask */
  2365. nig_mask = REG_RD(bp, nig_int_mask_addr);
  2366. REG_WR(bp, nig_int_mask_addr, 0);
  2367. bnx2x_link_attn(bp);
  2368. /* handle unicore attn? */
  2369. }
  2370. if (asserted & ATTN_SW_TIMER_4_FUNC)
  2371. DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
  2372. if (asserted & GPIO_2_FUNC)
  2373. DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
  2374. if (asserted & GPIO_3_FUNC)
  2375. DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
  2376. if (asserted & GPIO_4_FUNC)
  2377. DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
  2378. if (port == 0) {
  2379. if (asserted & ATTN_GENERAL_ATTN_1) {
  2380. DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
  2381. REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
  2382. }
  2383. if (asserted & ATTN_GENERAL_ATTN_2) {
  2384. DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
  2385. REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
  2386. }
  2387. if (asserted & ATTN_GENERAL_ATTN_3) {
  2388. DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
  2389. REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
  2390. }
  2391. } else {
  2392. if (asserted & ATTN_GENERAL_ATTN_4) {
  2393. DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
  2394. REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
  2395. }
  2396. if (asserted & ATTN_GENERAL_ATTN_5) {
  2397. DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
  2398. REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
  2399. }
  2400. if (asserted & ATTN_GENERAL_ATTN_6) {
  2401. DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
  2402. REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
  2403. }
  2404. }
  2405. } /* if hardwired */
  2406. DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
  2407. asserted, hc_addr);
  2408. REG_WR(bp, hc_addr, asserted);
  2409. /* now set back the mask */
  2410. if (asserted & ATTN_NIG_FOR_FUNC) {
  2411. REG_WR(bp, nig_int_mask_addr, nig_mask);
  2412. bnx2x_release_phy_lock(bp);
  2413. }
  2414. }
  2415. static inline void bnx2x_fan_failure(struct bnx2x *bp)
  2416. {
  2417. int port = BP_PORT(bp);
  2418. /* mark the failure */
  2419. bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
  2420. bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
  2421. SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
  2422. bp->link_params.ext_phy_config);
  2423. /* log the failure */
  2424. netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
  2425. " the driver to shutdown the card to prevent permanent"
  2426. " damage. Please contact OEM Support for assistance\n");
  2427. }
  2428. static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
  2429. {
  2430. int port = BP_PORT(bp);
  2431. int reg_offset;
  2432. u32 val, swap_val, swap_override;
  2433. reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
  2434. MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
  2435. if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
  2436. val = REG_RD(bp, reg_offset);
  2437. val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
  2438. REG_WR(bp, reg_offset, val);
  2439. BNX2X_ERR("SPIO5 hw attention\n");
  2440. /* Fan failure attention */
  2441. switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
  2442. case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
  2443. /* Low power mode is controlled by GPIO 2 */
  2444. bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
  2445. MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
  2446. /* The PHY reset is controlled by GPIO 1 */
  2447. bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
  2448. MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
  2449. break;
  2450. case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
  2451. /* The PHY reset is controlled by GPIO 1 */
  2452. /* fake the port number to cancel the swap done in
  2453. set_gpio() */
  2454. swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
  2455. swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
  2456. port = (swap_val && swap_override) ^ 1;
  2457. bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
  2458. MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
  2459. break;
  2460. default:
  2461. break;
  2462. }
  2463. bnx2x_fan_failure(bp);
  2464. }
  2465. if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
  2466. AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
  2467. bnx2x_acquire_phy_lock(bp);
  2468. bnx2x_handle_module_detect_int(&bp->link_params);
  2469. bnx2x_release_phy_lock(bp);
  2470. }
  2471. if (attn & HW_INTERRUT_ASSERT_SET_0) {
  2472. val = REG_RD(bp, reg_offset);
  2473. val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
  2474. REG_WR(bp, reg_offset, val);
  2475. BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
  2476. (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
  2477. bnx2x_panic();
  2478. }
  2479. }
  2480. static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
  2481. {
  2482. u32 val;
  2483. if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
  2484. val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
  2485. BNX2X_ERR("DB hw attention 0x%x\n", val);
  2486. /* DORQ discard attention */
  2487. if (val & 0x2)
  2488. BNX2X_ERR("FATAL error from DORQ\n");
  2489. }
  2490. if (attn & HW_INTERRUT_ASSERT_SET_1) {
  2491. int port = BP_PORT(bp);
  2492. int reg_offset;
  2493. reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
  2494. MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
  2495. val = REG_RD(bp, reg_offset);
  2496. val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
  2497. REG_WR(bp, reg_offset, val);
  2498. BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
  2499. (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
  2500. bnx2x_panic();
  2501. }
  2502. }
  2503. static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
  2504. {
  2505. u32 val;
  2506. if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
  2507. val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
  2508. BNX2X_ERR("CFC hw attention 0x%x\n", val);
  2509. /* CFC error attention */
  2510. if (val & 0x2)
  2511. BNX2X_ERR("FATAL error from CFC\n");
  2512. }
  2513. if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
  2514. val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
  2515. BNX2X_ERR("PXP hw attention 0x%x\n", val);
  2516. /* RQ_USDMDP_FIFO_OVERFLOW */
  2517. if (val & 0x18000)
  2518. BNX2X_ERR("FATAL error from PXP\n");
  2519. }
  2520. if (attn & HW_INTERRUT_ASSERT_SET_2) {
  2521. int port = BP_PORT(bp);
  2522. int reg_offset;
  2523. reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
  2524. MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
  2525. val = REG_RD(bp, reg_offset);
  2526. val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
  2527. REG_WR(bp, reg_offset, val);
  2528. BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
  2529. (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
  2530. bnx2x_panic();
  2531. }
  2532. }
  2533. static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
  2534. {
  2535. u32 val;
  2536. if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
  2537. if (attn & BNX2X_PMF_LINK_ASSERT) {
  2538. int func = BP_FUNC(bp);
  2539. REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
  2540. bp->mf_config = SHMEM_RD(bp,
  2541. mf_cfg.func_mf_config[func].config);
  2542. val = SHMEM_RD(bp, func_mb[func].drv_status);
  2543. if (val & DRV_STATUS_DCC_EVENT_MASK)
  2544. bnx2x_dcc_event(bp,
  2545. (val & DRV_STATUS_DCC_EVENT_MASK));
  2546. bnx2x__link_status_update(bp);
  2547. if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
  2548. bnx2x_pmf_update(bp);
  2549. } else if (attn & BNX2X_MC_ASSERT_BITS) {
  2550. BNX2X_ERR("MC assert!\n");
  2551. REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
  2552. REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
  2553. REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
  2554. REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
  2555. bnx2x_panic();
  2556. } else if (attn & BNX2X_MCP_ASSERT) {
  2557. BNX2X_ERR("MCP assert!\n");
  2558. REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
  2559. bnx2x_fw_dump(bp);
  2560. } else
  2561. BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
  2562. }
  2563. if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
  2564. BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
  2565. if (attn & BNX2X_GRC_TIMEOUT) {
  2566. val = CHIP_IS_E1H(bp) ?
  2567. REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
  2568. BNX2X_ERR("GRC time-out 0x%08x\n", val);
  2569. }
  2570. if (attn & BNX2X_GRC_RSV) {
  2571. val = CHIP_IS_E1H(bp) ?
  2572. REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
  2573. BNX2X_ERR("GRC reserved 0x%08x\n", val);
  2574. }
  2575. REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
  2576. }
  2577. }
  2578. static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
  2579. static int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
  2580. #define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
  2581. #define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
  2582. #define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
  2583. #define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
  2584. #define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
  2585. #define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
  2586. /*
  2587. * should be run under rtnl lock
  2588. */
  2589. static inline void bnx2x_set_reset_done(struct bnx2x *bp)
  2590. {
  2591. u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
  2592. val &= ~(1 << RESET_DONE_FLAG_SHIFT);
  2593. REG_WR(bp, BNX2X_MISC_GEN_REG, val);
  2594. barrier();
  2595. mmiowb();
  2596. }
  2597. /*
  2598. * should be run under rtnl lock
  2599. */
  2600. static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
  2601. {
  2602. u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
  2603. val |= (1 << 16);
  2604. REG_WR(bp, BNX2X_MISC_GEN_REG, val);
  2605. barrier();
  2606. mmiowb();
  2607. }
  2608. /*
  2609. * should be run under rtnl lock
  2610. */
  2611. static inline bool bnx2x_reset_is_done(struct bnx2x *bp)
  2612. {
  2613. u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
  2614. DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
  2615. return (val & RESET_DONE_FLAG_MASK) ? false : true;
  2616. }
  2617. /*
  2618. * should be run under rtnl lock
  2619. */
  2620. static inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
  2621. {
  2622. u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
  2623. DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
  2624. val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
  2625. REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
  2626. barrier();
  2627. mmiowb();
  2628. }
  2629. /*
  2630. * should be run under rtnl lock
  2631. */
  2632. static inline u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
  2633. {
  2634. u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
  2635. DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
  2636. val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
  2637. REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
  2638. barrier();
  2639. mmiowb();
  2640. return val1;
  2641. }
  2642. /*
  2643. * should be run under rtnl lock
  2644. */
  2645. static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
  2646. {
  2647. return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
  2648. }
  2649. static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
  2650. {
  2651. u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
  2652. REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
  2653. }
  2654. static inline void _print_next_block(int idx, const char *blk)
  2655. {
  2656. if (idx)
  2657. pr_cont(", ");
  2658. pr_cont("%s", blk);
  2659. }
  2660. static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
  2661. {
  2662. int i = 0;
  2663. u32 cur_bit = 0;
  2664. for (i = 0; sig; i++) {
  2665. cur_bit = ((u32)0x1 << i);
  2666. if (sig & cur_bit) {
  2667. switch (cur_bit) {
  2668. case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
  2669. _print_next_block(par_num++, "BRB");
  2670. break;
  2671. case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
  2672. _print_next_block(par_num++, "PARSER");
  2673. break;
  2674. case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
  2675. _print_next_block(par_num++, "TSDM");
  2676. break;
  2677. case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
  2678. _print_next_block(par_num++, "SEARCHER");
  2679. break;
  2680. case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
  2681. _print_next_block(par_num++, "TSEMI");
  2682. break;
  2683. }
  2684. /* Clear the bit */
  2685. sig &= ~cur_bit;
  2686. }
  2687. }
  2688. return par_num;
  2689. }
  2690. static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
  2691. {
  2692. int i = 0;
  2693. u32 cur_bit = 0;
  2694. for (i = 0; sig; i++) {
  2695. cur_bit = ((u32)0x1 << i);
  2696. if (sig & cur_bit) {
  2697. switch (cur_bit) {
  2698. case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
  2699. _print_next_block(par_num++, "PBCLIENT");
  2700. break;
  2701. case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
  2702. _print_next_block(par_num++, "QM");
  2703. break;
  2704. case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
  2705. _print_next_block(par_num++, "XSDM");
  2706. break;
  2707. case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
  2708. _print_next_block(par_num++, "XSEMI");
  2709. break;
  2710. case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
  2711. _print_next_block(par_num++, "DOORBELLQ");
  2712. break;
  2713. case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
  2714. _print_next_block(par_num++, "VAUX PCI CORE");
  2715. break;
  2716. case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
  2717. _print_next_block(par_num++, "DEBUG");
  2718. break;
  2719. case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
  2720. _print_next_block(par_num++, "USDM");
  2721. break;
  2722. case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
  2723. _print_next_block(par_num++, "USEMI");
  2724. break;
  2725. case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
  2726. _print_next_block(par_num++, "UPB");
  2727. break;
  2728. case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
  2729. _print_next_block(par_num++, "CSDM");
  2730. break;
  2731. }
  2732. /* Clear the bit */
  2733. sig &= ~cur_bit;
  2734. }
  2735. }
  2736. return par_num;
  2737. }
  2738. static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
  2739. {
  2740. int i = 0;
  2741. u32 cur_bit = 0;
  2742. for (i = 0; sig; i++) {
  2743. cur_bit = ((u32)0x1 << i);
  2744. if (sig & cur_bit) {
  2745. switch (cur_bit) {
  2746. case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
  2747. _print_next_block(par_num++, "CSEMI");
  2748. break;
  2749. case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
  2750. _print_next_block(par_num++, "PXP");
  2751. break;
  2752. case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
  2753. _print_next_block(par_num++,
  2754. "PXPPCICLOCKCLIENT");
  2755. break;
  2756. case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
  2757. _print_next_block(par_num++, "CFC");
  2758. break;
  2759. case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
  2760. _print_next_block(par_num++, "CDU");
  2761. break;
  2762. case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
  2763. _print_next_block(par_num++, "IGU");
  2764. break;
  2765. case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
  2766. _print_next_block(par_num++, "MISC");
  2767. break;
  2768. }
  2769. /* Clear the bit */
  2770. sig &= ~cur_bit;
  2771. }
  2772. }
  2773. return par_num;
  2774. }
  2775. static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
  2776. {
  2777. int i = 0;
  2778. u32 cur_bit = 0;
  2779. for (i = 0; sig; i++) {
  2780. cur_bit = ((u32)0x1 << i);
  2781. if (sig & cur_bit) {
  2782. switch (cur_bit) {
  2783. case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
  2784. _print_next_block(par_num++, "MCP ROM");
  2785. break;
  2786. case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
  2787. _print_next_block(par_num++, "MCP UMP RX");
  2788. break;
  2789. case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
  2790. _print_next_block(par_num++, "MCP UMP TX");
  2791. break;
  2792. case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
  2793. _print_next_block(par_num++, "MCP SCPAD");
  2794. break;
  2795. }
  2796. /* Clear the bit */
  2797. sig &= ~cur_bit;
  2798. }
  2799. }
  2800. return par_num;
  2801. }
  2802. static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
  2803. u32 sig2, u32 sig3)
  2804. {
  2805. if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
  2806. (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
  2807. int par_num = 0;
  2808. DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
  2809. "[0]:0x%08x [1]:0x%08x "
  2810. "[2]:0x%08x [3]:0x%08x\n",
  2811. sig0 & HW_PRTY_ASSERT_SET_0,
  2812. sig1 & HW_PRTY_ASSERT_SET_1,
  2813. sig2 & HW_PRTY_ASSERT_SET_2,
  2814. sig3 & HW_PRTY_ASSERT_SET_3);
  2815. printk(KERN_ERR"%s: Parity errors detected in blocks: ",
  2816. bp->dev->name);
  2817. par_num = bnx2x_print_blocks_with_parity0(
  2818. sig0 & HW_PRTY_ASSERT_SET_0, par_num);
  2819. par_num = bnx2x_print_blocks_with_parity1(
  2820. sig1 & HW_PRTY_ASSERT_SET_1, par_num);
  2821. par_num = bnx2x_print_blocks_with_parity2(
  2822. sig2 & HW_PRTY_ASSERT_SET_2, par_num);
  2823. par_num = bnx2x_print_blocks_with_parity3(
  2824. sig3 & HW_PRTY_ASSERT_SET_3, par_num);
  2825. printk("\n");
  2826. return true;
  2827. } else
  2828. return false;
  2829. }
  2830. static bool bnx2x_chk_parity_attn(struct bnx2x *bp)
  2831. {
  2832. struct attn_route attn;
  2833. int port = BP_PORT(bp);
  2834. attn.sig[0] = REG_RD(bp,
  2835. MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
  2836. port*4);
  2837. attn.sig[1] = REG_RD(bp,
  2838. MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
  2839. port*4);
  2840. attn.sig[2] = REG_RD(bp,
  2841. MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
  2842. port*4);
  2843. attn.sig[3] = REG_RD(bp,
  2844. MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
  2845. port*4);
  2846. return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
  2847. attn.sig[3]);
  2848. }
  2849. static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
  2850. {
  2851. struct attn_route attn, *group_mask;
  2852. int port = BP_PORT(bp);
  2853. int index;
  2854. u32 reg_addr;
  2855. u32 val;
  2856. u32 aeu_mask;
  2857. /* need to take HW lock because MCP or other port might also
  2858. try to handle this event */
  2859. bnx2x_acquire_alr(bp);
  2860. if (bnx2x_chk_parity_attn(bp)) {
  2861. bp->recovery_state = BNX2X_RECOVERY_INIT;
  2862. bnx2x_set_reset_in_progress(bp);
  2863. schedule_delayed_work(&bp->reset_task, 0);
  2864. /* Disable HW interrupts */
  2865. bnx2x_int_disable(bp);
  2866. bnx2x_release_alr(bp);
  2867. /* In case of parity errors don't handle attentions so that
  2868. * other function would "see" parity errors.
  2869. */
  2870. return;
  2871. }
  2872. attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
  2873. attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
  2874. attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
  2875. attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
  2876. DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
  2877. attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
  2878. for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
  2879. if (deasserted & (1 << index)) {
  2880. group_mask = &bp->attn_group[index];
  2881. DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
  2882. index, group_mask->sig[0], group_mask->sig[1],
  2883. group_mask->sig[2], group_mask->sig[3]);
  2884. bnx2x_attn_int_deasserted3(bp,
  2885. attn.sig[3] & group_mask->sig[3]);
  2886. bnx2x_attn_int_deasserted1(bp,
  2887. attn.sig[1] & group_mask->sig[1]);
  2888. bnx2x_attn_int_deasserted2(bp,
  2889. attn.sig[2] & group_mask->sig[2]);
  2890. bnx2x_attn_int_deasserted0(bp,
  2891. attn.sig[0] & group_mask->sig[0]);
  2892. }
  2893. }
  2894. bnx2x_release_alr(bp);
  2895. reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
  2896. val = ~deasserted;
  2897. DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
  2898. val, reg_addr);
  2899. REG_WR(bp, reg_addr, val);
  2900. if (~bp->attn_state & deasserted)
  2901. BNX2X_ERR("IGU ERROR\n");
  2902. reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
  2903. MISC_REG_AEU_MASK_ATTN_FUNC_0;
  2904. bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
  2905. aeu_mask = REG_RD(bp, reg_addr);
  2906. DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
  2907. aeu_mask, deasserted);
  2908. aeu_mask |= (deasserted & 0x3ff);
  2909. DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
  2910. REG_WR(bp, reg_addr, aeu_mask);
  2911. bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
  2912. DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
  2913. bp->attn_state &= ~deasserted;
  2914. DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
  2915. }
  2916. static void bnx2x_attn_int(struct bnx2x *bp)
  2917. {
  2918. /* read local copy of bits */
  2919. u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
  2920. attn_bits);
  2921. u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
  2922. attn_bits_ack);
  2923. u32 attn_state = bp->attn_state;
  2924. /* look for changed bits */
  2925. u32 asserted = attn_bits & ~attn_ack & ~attn_state;
  2926. u32 deasserted = ~attn_bits & attn_ack & attn_state;
  2927. DP(NETIF_MSG_HW,
  2928. "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
  2929. attn_bits, attn_ack, asserted, deasserted);
  2930. if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
  2931. BNX2X_ERR("BAD attention state\n");
  2932. /* handle bits that were raised */
  2933. if (asserted)
  2934. bnx2x_attn_int_asserted(bp, asserted);
  2935. if (deasserted)
  2936. bnx2x_attn_int_deasserted(bp, deasserted);
  2937. }
  2938. static void bnx2x_sp_task(struct work_struct *work)
  2939. {
  2940. struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
  2941. u16 status;
  2942. /* Return here if interrupt is disabled */
  2943. if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
  2944. DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
  2945. return;
  2946. }
  2947. status = bnx2x_update_dsb_idx(bp);
  2948. /* if (status == 0) */
  2949. /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
  2950. DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
  2951. /* HW attentions */
  2952. if (status & 0x1) {
  2953. bnx2x_attn_int(bp);
  2954. status &= ~0x1;
  2955. }
  2956. /* CStorm events: STAT_QUERY */
  2957. if (status & 0x2) {
  2958. DP(BNX2X_MSG_SP, "CStorm events: STAT_QUERY\n");
  2959. status &= ~0x2;
  2960. }
  2961. if (unlikely(status))
  2962. DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
  2963. status);
  2964. bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
  2965. IGU_INT_NOP, 1);
  2966. bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
  2967. IGU_INT_NOP, 1);
  2968. bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
  2969. IGU_INT_NOP, 1);
  2970. bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
  2971. IGU_INT_NOP, 1);
  2972. bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
  2973. IGU_INT_ENABLE, 1);
  2974. }
  2975. static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
  2976. {
  2977. struct net_device *dev = dev_instance;
  2978. struct bnx2x *bp = netdev_priv(dev);
  2979. /* Return here if interrupt is disabled */
  2980. if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
  2981. DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
  2982. return IRQ_HANDLED;
  2983. }
  2984. bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
  2985. #ifdef BNX2X_STOP_ON_ERROR
  2986. if (unlikely(bp->panic))
  2987. return IRQ_HANDLED;
  2988. #endif
  2989. #ifdef BCM_CNIC
  2990. {
  2991. struct cnic_ops *c_ops;
  2992. rcu_read_lock();
  2993. c_ops = rcu_dereference(bp->cnic_ops);
  2994. if (c_ops)
  2995. c_ops->cnic_handler(bp->cnic_data, NULL);
  2996. rcu_read_unlock();
  2997. }
  2998. #endif
  2999. queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
  3000. return IRQ_HANDLED;
  3001. }
  3002. /* end of slow path */
  3003. /* Statistics */
  3004. /****************************************************************************
  3005. * Macros
  3006. ****************************************************************************/
  3007. /* sum[hi:lo] += add[hi:lo] */
  3008. #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
  3009. do { \
  3010. s_lo += a_lo; \
  3011. s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
  3012. } while (0)
  3013. /* difference = minuend - subtrahend */
  3014. #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
  3015. do { \
  3016. if (m_lo < s_lo) { \
  3017. /* underflow */ \
  3018. d_hi = m_hi - s_hi; \
  3019. if (d_hi > 0) { \
  3020. /* we can 'loan' 1 */ \
  3021. d_hi--; \
  3022. d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
  3023. } else { \
  3024. /* m_hi <= s_hi */ \
  3025. d_hi = 0; \
  3026. d_lo = 0; \
  3027. } \
  3028. } else { \
  3029. /* m_lo >= s_lo */ \
  3030. if (m_hi < s_hi) { \
  3031. d_hi = 0; \
  3032. d_lo = 0; \
  3033. } else { \
  3034. /* m_hi >= s_hi */ \
  3035. d_hi = m_hi - s_hi; \
  3036. d_lo = m_lo - s_lo; \
  3037. } \
  3038. } \
  3039. } while (0)
  3040. #define UPDATE_STAT64(s, t) \
  3041. do { \
  3042. DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
  3043. diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
  3044. pstats->mac_stx[0].t##_hi = new->s##_hi; \
  3045. pstats->mac_stx[0].t##_lo = new->s##_lo; \
  3046. ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
  3047. pstats->mac_stx[1].t##_lo, diff.lo); \
  3048. } while (0)
  3049. #define UPDATE_STAT64_NIG(s, t) \
  3050. do { \
  3051. DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
  3052. diff.lo, new->s##_lo, old->s##_lo); \
  3053. ADD_64(estats->t##_hi, diff.hi, \
  3054. estats->t##_lo, diff.lo); \
  3055. } while (0)
  3056. /* sum[hi:lo] += add */
  3057. #define ADD_EXTEND_64(s_hi, s_lo, a) \
  3058. do { \
  3059. s_lo += a; \
  3060. s_hi += (s_lo < a) ? 1 : 0; \
  3061. } while (0)
  3062. #define UPDATE_EXTEND_STAT(s) \
  3063. do { \
  3064. ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
  3065. pstats->mac_stx[1].s##_lo, \
  3066. new->s); \
  3067. } while (0)
  3068. #define UPDATE_EXTEND_TSTAT(s, t) \
  3069. do { \
  3070. diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
  3071. old_tclient->s = tclient->s; \
  3072. ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
  3073. } while (0)
  3074. #define UPDATE_EXTEND_USTAT(s, t) \
  3075. do { \
  3076. diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
  3077. old_uclient->s = uclient->s; \
  3078. ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
  3079. } while (0)
  3080. #define UPDATE_EXTEND_XSTAT(s, t) \
  3081. do { \
  3082. diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
  3083. old_xclient->s = xclient->s; \
  3084. ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
  3085. } while (0)
  3086. /* minuend -= subtrahend */
  3087. #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
  3088. do { \
  3089. DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
  3090. } while (0)
  3091. /* minuend[hi:lo] -= subtrahend */
  3092. #define SUB_EXTEND_64(m_hi, m_lo, s) \
  3093. do { \
  3094. SUB_64(m_hi, 0, m_lo, s); \
  3095. } while (0)
  3096. #define SUB_EXTEND_USTAT(s, t) \
  3097. do { \
  3098. diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
  3099. SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
  3100. } while (0)
  3101. /*
  3102. * General service functions
  3103. */
  3104. static inline long bnx2x_hilo(u32 *hiref)
  3105. {
  3106. u32 lo = *(hiref + 1);
  3107. #if (BITS_PER_LONG == 64)
  3108. u32 hi = *hiref;
  3109. return HILO_U64(hi, lo);
  3110. #else
  3111. return lo;
  3112. #endif
  3113. }
  3114. /*
  3115. * Init service functions
  3116. */
  3117. static void bnx2x_storm_stats_post(struct bnx2x *bp)
  3118. {
  3119. if (!bp->stats_pending) {
  3120. struct eth_query_ramrod_data ramrod_data = {0};
  3121. int i, rc;
  3122. spin_lock_bh(&bp->stats_lock);
  3123. ramrod_data.drv_counter = bp->stats_counter++;
  3124. ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
  3125. for_each_queue(bp, i)
  3126. ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
  3127. rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
  3128. ((u32 *)&ramrod_data)[1],
  3129. ((u32 *)&ramrod_data)[0], 0);
  3130. if (rc == 0) {
  3131. /* stats ramrod has it's own slot on the spq */
  3132. bp->spq_left++;
  3133. bp->stats_pending = 1;
  3134. }
  3135. spin_unlock_bh(&bp->stats_lock);
  3136. }
  3137. }
  3138. static void bnx2x_hw_stats_post(struct bnx2x *bp)
  3139. {
  3140. struct dmae_command *dmae = &bp->stats_dmae;
  3141. u32 *stats_comp = bnx2x_sp(bp, stats_comp);
  3142. *stats_comp = DMAE_COMP_VAL;
  3143. if (CHIP_REV_IS_SLOW(bp))
  3144. return;
  3145. /* loader */
  3146. if (bp->executer_idx) {
  3147. int loader_idx = PMF_DMAE_C(bp);
  3148. memset(dmae, 0, sizeof(struct dmae_command));
  3149. dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
  3150. DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
  3151. DMAE_CMD_DST_RESET |
  3152. #ifdef __BIG_ENDIAN
  3153. DMAE_CMD_ENDIANITY_B_DW_SWAP |
  3154. #else
  3155. DMAE_CMD_ENDIANITY_DW_SWAP |
  3156. #endif
  3157. (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
  3158. DMAE_CMD_PORT_0) |
  3159. (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
  3160. dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
  3161. dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
  3162. dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
  3163. sizeof(struct dmae_command) *
  3164. (loader_idx + 1)) >> 2;
  3165. dmae->dst_addr_hi = 0;
  3166. dmae->len = sizeof(struct dmae_command) >> 2;
  3167. if (CHIP_IS_E1(bp))
  3168. dmae->len--;
  3169. dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
  3170. dmae->comp_addr_hi = 0;
  3171. dmae->comp_val = 1;
  3172. *stats_comp = 0;
  3173. bnx2x_post_dmae(bp, dmae, loader_idx);
  3174. } else if (bp->func_stx) {
  3175. *stats_comp = 0;
  3176. bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
  3177. }
  3178. }
  3179. static int bnx2x_stats_comp(struct bnx2x *bp)
  3180. {
  3181. u32 *stats_comp = bnx2x_sp(bp, stats_comp);
  3182. int cnt = 10;
  3183. might_sleep();
  3184. while (*stats_comp != DMAE_COMP_VAL) {
  3185. if (!cnt) {
  3186. BNX2X_ERR("timeout waiting for stats finished\n");
  3187. break;
  3188. }
  3189. cnt--;
  3190. msleep(1);
  3191. }
  3192. return 1;
  3193. }
  3194. /*
  3195. * Statistics service functions
  3196. */
  3197. static void bnx2x_stats_pmf_update(struct bnx2x *bp)
  3198. {
  3199. struct dmae_command *dmae;
  3200. u32 opcode;
  3201. int loader_idx = PMF_DMAE_C(bp);
  3202. u32 *stats_comp = bnx2x_sp(bp, stats_comp);
  3203. /* sanity */
  3204. if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
  3205. BNX2X_ERR("BUG!\n");
  3206. return;
  3207. }
  3208. bp->executer_idx = 0;
  3209. opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
  3210. DMAE_CMD_C_ENABLE |
  3211. DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
  3212. #ifdef __BIG_ENDIAN
  3213. DMAE_CMD_ENDIANITY_B_DW_SWAP |
  3214. #else
  3215. DMAE_CMD_ENDIANITY_DW_SWAP |
  3216. #endif
  3217. (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
  3218. (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
  3219. dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
  3220. dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
  3221. dmae->src_addr_lo = bp->port.port_stx >> 2;
  3222. dmae->src_addr_hi = 0;
  3223. dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
  3224. dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
  3225. dmae->len = DMAE_LEN32_RD_MAX;
  3226. dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
  3227. dmae->comp_addr_hi = 0;
  3228. dmae->comp_val = 1;
  3229. dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
  3230. dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
  3231. dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
  3232. dmae->src_addr_hi = 0;
  3233. dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
  3234. DMAE_LEN32_RD_MAX * 4);
  3235. dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
  3236. DMAE_LEN32_RD_MAX * 4);
  3237. dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
  3238. dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
  3239. dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
  3240. dmae->comp_val = DMAE_COMP_VAL;
  3241. *stats_comp = 0;
  3242. bnx2x_hw_stats_post(bp);
  3243. bnx2x_stats_comp(bp);
  3244. }
  3245. static void bnx2x_port_stats_init(struct bnx2x *bp)
  3246. {
  3247. struct dmae_command *dmae;
  3248. int port = BP_PORT(bp);
  3249. int vn = BP_E1HVN(bp);
  3250. u32 opcode;
  3251. int loader_idx = PMF_DMAE_C(bp);
  3252. u32 mac_addr;
  3253. u32 *stats_comp = bnx2x_sp(bp, stats_comp);
  3254. /* sanity */
  3255. if (!bp->link_vars.link_up || !bp->port.pmf) {
  3256. BNX2X_ERR("BUG!\n");
  3257. return;
  3258. }
  3259. bp->executer_idx = 0;
  3260. /* MCP */
  3261. opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
  3262. DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
  3263. DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
  3264. #ifdef __BIG_ENDIAN
  3265. DMAE_CMD_ENDIANITY_B_DW_SWAP |
  3266. #else
  3267. DMAE_CMD_ENDIANITY_DW_SWAP |
  3268. #endif
  3269. (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
  3270. (vn << DMAE_CMD_E1HVN_SHIFT));
  3271. if (bp->port.port_stx) {
  3272. dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
  3273. dmae->opcode = opcode;
  3274. dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
  3275. dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
  3276. dmae->dst_addr_lo = bp->port.port_stx >> 2;
  3277. dmae->dst_addr_hi = 0;
  3278. dmae->len = sizeof(struct host_port_stats) >> 2;
  3279. dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
  3280. dmae->comp_addr_hi = 0;
  3281. dmae->comp_val = 1;
  3282. }
  3283. if (bp->func_stx) {
  3284. dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
  3285. dmae->opcode = opcode;
  3286. dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
  3287. dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
  3288. dmae->dst_addr_lo = bp->func_stx >> 2;
  3289. dmae->dst_addr_hi = 0;
  3290. dmae->len = sizeof(struct host_func_stats) >> 2;
  3291. dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
  3292. dmae->comp_addr_hi = 0;
  3293. dmae->comp_val = 1;
  3294. }
  3295. /* MAC */
  3296. opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
  3297. DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
  3298. DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
  3299. #ifdef __BIG_ENDIAN
  3300. DMAE_CMD_ENDIANITY_B_DW_SWAP |
  3301. #else
  3302. DMAE_CMD_ENDIANITY_DW_SWAP |
  3303. #endif
  3304. (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
  3305. (vn << DMAE_CMD_E1HVN_SHIFT));
  3306. if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
  3307. mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
  3308. NIG_REG_INGRESS_BMAC0_MEM);
  3309. /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
  3310. BIGMAC_REGISTER_TX_STAT_GTBYT */
  3311. dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
  3312. dmae->opcode = opcode;
  3313. dmae->src_addr_lo = (mac_addr +
  3314. BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
  3315. dmae->src_addr_hi = 0;
  3316. dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
  3317. dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
  3318. dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
  3319. BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
  3320. dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
  3321. dmae->comp_addr_hi = 0;
  3322. dmae->comp_val = 1;
  3323. /* BIGMAC_REGISTER_RX_STAT_GR64 ..
  3324. BIGMAC_REGISTER_RX_STAT_GRIPJ */
  3325. dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
  3326. dmae->opcode = opcode;
  3327. dmae->src_addr_lo = (mac_addr +
  3328. BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
  3329. dmae->src_addr_hi = 0;
  3330. dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
  3331. offsetof(struct bmac_stats, rx_stat_gr64_lo));
  3332. dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
  3333. offsetof(struct bmac_stats, rx_stat_gr64_lo));
  3334. dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
  3335. BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
  3336. dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
  3337. dmae->comp_addr_hi = 0;
  3338. dmae->comp_val = 1;
  3339. } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
  3340. mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
  3341. /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
  3342. dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
  3343. dmae->opcode = opcode;
  3344. dmae->src_addr_lo = (mac_addr +
  3345. EMAC_REG_EMAC_RX_STAT_AC) >> 2;
  3346. dmae->src_addr_hi = 0;
  3347. dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
  3348. dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
  3349. dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
  3350. dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
  3351. dmae->comp_addr_hi = 0;
  3352. dmae->comp_val = 1;
  3353. /* EMAC_REG_EMAC_RX_STAT_AC_28 */
  3354. dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
  3355. dmae->opcode = opcode;
  3356. dmae->src_addr_lo = (mac_addr +
  3357. EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
  3358. dmae->src_addr_hi = 0;
  3359. dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
  3360. offsetof(struct emac_stats, rx_stat_falsecarriererrors));
  3361. dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
  3362. offsetof(struct emac_stats, rx_stat_falsecarriererrors));
  3363. dmae->len = 1;
  3364. dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
  3365. dmae->comp_addr_hi = 0;
  3366. dmae->comp_val = 1;
  3367. /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
  3368. dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
  3369. dmae->opcode = opcode;
  3370. dmae->src_addr_lo = (mac_addr +
  3371. EMAC_REG_EMAC_TX_STAT_AC) >> 2;
  3372. dmae->src_addr_hi = 0;
  3373. dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
  3374. offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
  3375. dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
  3376. offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
  3377. dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
  3378. dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
  3379. dmae->comp_addr_hi = 0;
  3380. dmae->comp_val = 1;
  3381. }
  3382. /* NIG */
  3383. dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
  3384. dmae->opcode = opcode;
  3385. dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
  3386. NIG_REG_STAT0_BRB_DISCARD) >> 2;
  3387. dmae->src_addr_hi = 0;
  3388. dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
  3389. dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
  3390. dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
  3391. dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
  3392. dmae->comp_addr_hi = 0;
  3393. dmae->comp_val = 1;
  3394. dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
  3395. dmae->opcode = opcode;
  3396. dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
  3397. NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
  3398. dmae->src_addr_hi = 0;
  3399. dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
  3400. offsetof(struct nig_stats, egress_mac_pkt0_lo));
  3401. dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
  3402. offsetof(struct nig_stats, egress_mac_pkt0_lo));
  3403. dmae->len = (2*sizeof(u32)) >> 2;
  3404. dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
  3405. dmae->comp_addr_hi = 0;
  3406. dmae->comp_val = 1;
  3407. dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
  3408. dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
  3409. DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
  3410. DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
  3411. #ifdef __BIG_ENDIAN
  3412. DMAE_CMD_ENDIANITY_B_DW_SWAP |
  3413. #else
  3414. DMAE_CMD_ENDIANITY_DW_SWAP |
  3415. #endif
  3416. (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
  3417. (vn << DMAE_CMD_E1HVN_SHIFT));
  3418. dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
  3419. NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
  3420. dmae->src_addr_hi = 0;
  3421. dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
  3422. offsetof(struct nig_stats, egress_mac_pkt1_lo));
  3423. dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
  3424. offsetof(struct nig_stats, egress_mac_pkt1_lo));
  3425. dmae->len = (2*sizeof(u32)) >> 2;
  3426. dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
  3427. dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
  3428. dmae->comp_val = DMAE_COMP_VAL;
  3429. *stats_comp = 0;
  3430. }
  3431. static void bnx2x_func_stats_init(struct bnx2x *bp)
  3432. {
  3433. struct dmae_command *dmae = &bp->stats_dmae;
  3434. u32 *stats_comp = bnx2x_sp(bp, stats_comp);
  3435. /* sanity */
  3436. if (!bp->func_stx) {
  3437. BNX2X_ERR("BUG!\n");
  3438. return;
  3439. }
  3440. bp->executer_idx = 0;
  3441. memset(dmae, 0, sizeof(struct dmae_command));
  3442. dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
  3443. DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
  3444. DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
  3445. #ifdef __BIG_ENDIAN
  3446. DMAE_CMD_ENDIANITY_B_DW_SWAP |
  3447. #else
  3448. DMAE_CMD_ENDIANITY_DW_SWAP |
  3449. #endif
  3450. (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
  3451. (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
  3452. dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
  3453. dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
  3454. dmae->dst_addr_lo = bp->func_stx >> 2;
  3455. dmae->dst_addr_hi = 0;
  3456. dmae->len = sizeof(struct host_func_stats) >> 2;
  3457. dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
  3458. dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
  3459. dmae->comp_val = DMAE_COMP_VAL;
  3460. *stats_comp = 0;
  3461. }
  3462. static void bnx2x_stats_start(struct bnx2x *bp)
  3463. {
  3464. if (bp->port.pmf)
  3465. bnx2x_port_stats_init(bp);
  3466. else if (bp->func_stx)
  3467. bnx2x_func_stats_init(bp);
  3468. bnx2x_hw_stats_post(bp);
  3469. bnx2x_storm_stats_post(bp);
  3470. }
  3471. static void bnx2x_stats_pmf_start(struct bnx2x *bp)
  3472. {
  3473. bnx2x_stats_comp(bp);
  3474. bnx2x_stats_pmf_update(bp);
  3475. bnx2x_stats_start(bp);
  3476. }
  3477. static void bnx2x_stats_restart(struct bnx2x *bp)
  3478. {
  3479. bnx2x_stats_comp(bp);
  3480. bnx2x_stats_start(bp);
  3481. }
  3482. static void bnx2x_bmac_stats_update(struct bnx2x *bp)
  3483. {
  3484. struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
  3485. struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
  3486. struct bnx2x_eth_stats *estats = &bp->eth_stats;
  3487. struct {
  3488. u32 lo;
  3489. u32 hi;
  3490. } diff;
  3491. UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
  3492. UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
  3493. UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
  3494. UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
  3495. UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
  3496. UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
  3497. UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
  3498. UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
  3499. UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
  3500. UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
  3501. UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
  3502. UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
  3503. UPDATE_STAT64(tx_stat_gt127,
  3504. tx_stat_etherstatspkts65octetsto127octets);
  3505. UPDATE_STAT64(tx_stat_gt255,
  3506. tx_stat_etherstatspkts128octetsto255octets);
  3507. UPDATE_STAT64(tx_stat_gt511,
  3508. tx_stat_etherstatspkts256octetsto511octets);
  3509. UPDATE_STAT64(tx_stat_gt1023,
  3510. tx_stat_etherstatspkts512octetsto1023octets);
  3511. UPDATE_STAT64(tx_stat_gt1518,
  3512. tx_stat_etherstatspkts1024octetsto1522octets);
  3513. UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
  3514. UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
  3515. UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
  3516. UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
  3517. UPDATE_STAT64(tx_stat_gterr,
  3518. tx_stat_dot3statsinternalmactransmiterrors);
  3519. UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
  3520. estats->pause_frames_received_hi =
  3521. pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
  3522. estats->pause_frames_received_lo =
  3523. pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
  3524. estats->pause_frames_sent_hi =
  3525. pstats->mac_stx[1].tx_stat_outxoffsent_hi;
  3526. estats->pause_frames_sent_lo =
  3527. pstats->mac_stx[1].tx_stat_outxoffsent_lo;
  3528. }
  3529. static void bnx2x_emac_stats_update(struct bnx2x *bp)
  3530. {
  3531. struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
  3532. struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
  3533. struct bnx2x_eth_stats *estats = &bp->eth_stats;
  3534. UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
  3535. UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
  3536. UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
  3537. UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
  3538. UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
  3539. UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
  3540. UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
  3541. UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
  3542. UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
  3543. UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
  3544. UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
  3545. UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
  3546. UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
  3547. UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
  3548. UPDATE_EXTEND_STAT(tx_stat_outxonsent);
  3549. UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
  3550. UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
  3551. UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
  3552. UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
  3553. UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
  3554. UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
  3555. UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
  3556. UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
  3557. UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
  3558. UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
  3559. UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
  3560. UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
  3561. UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
  3562. UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
  3563. UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
  3564. UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
  3565. estats->pause_frames_received_hi =
  3566. pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
  3567. estats->pause_frames_received_lo =
  3568. pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
  3569. ADD_64(estats->pause_frames_received_hi,
  3570. pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
  3571. estats->pause_frames_received_lo,
  3572. pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
  3573. estats->pause_frames_sent_hi =
  3574. pstats->mac_stx[1].tx_stat_outxonsent_hi;
  3575. estats->pause_frames_sent_lo =
  3576. pstats->mac_stx[1].tx_stat_outxonsent_lo;
  3577. ADD_64(estats->pause_frames_sent_hi,
  3578. pstats->mac_stx[1].tx_stat_outxoffsent_hi,
  3579. estats->pause_frames_sent_lo,
  3580. pstats->mac_stx[1].tx_stat_outxoffsent_lo);
  3581. }
  3582. static int bnx2x_hw_stats_update(struct bnx2x *bp)
  3583. {
  3584. struct nig_stats *new = bnx2x_sp(bp, nig_stats);
  3585. struct nig_stats *old = &(bp->port.old_nig_stats);
  3586. struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
  3587. struct bnx2x_eth_stats *estats = &bp->eth_stats;
  3588. struct {
  3589. u32 lo;
  3590. u32 hi;
  3591. } diff;
  3592. if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
  3593. bnx2x_bmac_stats_update(bp);
  3594. else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
  3595. bnx2x_emac_stats_update(bp);
  3596. else { /* unreached */
  3597. BNX2X_ERR("stats updated by DMAE but no MAC active\n");
  3598. return -1;
  3599. }
  3600. ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
  3601. new->brb_discard - old->brb_discard);
  3602. ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
  3603. new->brb_truncate - old->brb_truncate);
  3604. UPDATE_STAT64_NIG(egress_mac_pkt0,
  3605. etherstatspkts1024octetsto1522octets);
  3606. UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
  3607. memcpy(old, new, sizeof(struct nig_stats));
  3608. memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
  3609. sizeof(struct mac_stx));
  3610. estats->brb_drop_hi = pstats->brb_drop_hi;
  3611. estats->brb_drop_lo = pstats->brb_drop_lo;
  3612. pstats->host_port_stats_start = ++pstats->host_port_stats_end;
  3613. if (!BP_NOMCP(bp)) {
  3614. u32 nig_timer_max =
  3615. SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
  3616. if (nig_timer_max != estats->nig_timer_max) {
  3617. estats->nig_timer_max = nig_timer_max;
  3618. BNX2X_ERR("NIG timer max (%u)\n",
  3619. estats->nig_timer_max);
  3620. }
  3621. }
  3622. return 0;
  3623. }
  3624. static int bnx2x_storm_stats_update(struct bnx2x *bp)
  3625. {
  3626. struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
  3627. struct tstorm_per_port_stats *tport =
  3628. &stats->tstorm_common.port_statistics;
  3629. struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
  3630. struct bnx2x_eth_stats *estats = &bp->eth_stats;
  3631. int i;
  3632. u16 cur_stats_counter;
  3633. /* Make sure we use the value of the counter
  3634. * used for sending the last stats ramrod.
  3635. */
  3636. spin_lock_bh(&bp->stats_lock);
  3637. cur_stats_counter = bp->stats_counter - 1;
  3638. spin_unlock_bh(&bp->stats_lock);
  3639. memcpy(&(fstats->total_bytes_received_hi),
  3640. &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
  3641. sizeof(struct host_func_stats) - 2*sizeof(u32));
  3642. estats->error_bytes_received_hi = 0;
  3643. estats->error_bytes_received_lo = 0;
  3644. estats->etherstatsoverrsizepkts_hi = 0;
  3645. estats->etherstatsoverrsizepkts_lo = 0;
  3646. estats->no_buff_discard_hi = 0;
  3647. estats->no_buff_discard_lo = 0;
  3648. for_each_queue(bp, i) {
  3649. struct bnx2x_fastpath *fp = &bp->fp[i];
  3650. int cl_id = fp->cl_id;
  3651. struct tstorm_per_client_stats *tclient =
  3652. &stats->tstorm_common.client_statistics[cl_id];
  3653. struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
  3654. struct ustorm_per_client_stats *uclient =
  3655. &stats->ustorm_common.client_statistics[cl_id];
  3656. struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
  3657. struct xstorm_per_client_stats *xclient =
  3658. &stats->xstorm_common.client_statistics[cl_id];
  3659. struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
  3660. struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
  3661. u32 diff;
  3662. /* are storm stats valid? */
  3663. if (le16_to_cpu(xclient->stats_counter) != cur_stats_counter) {
  3664. DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
  3665. " xstorm counter (0x%x) != stats_counter (0x%x)\n",
  3666. i, xclient->stats_counter, cur_stats_counter + 1);
  3667. return -1;
  3668. }
  3669. if (le16_to_cpu(tclient->stats_counter) != cur_stats_counter) {
  3670. DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
  3671. " tstorm counter (0x%x) != stats_counter (0x%x)\n",
  3672. i, tclient->stats_counter, cur_stats_counter + 1);
  3673. return -2;
  3674. }
  3675. if (le16_to_cpu(uclient->stats_counter) != cur_stats_counter) {
  3676. DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
  3677. " ustorm counter (0x%x) != stats_counter (0x%x)\n",
  3678. i, uclient->stats_counter, cur_stats_counter + 1);
  3679. return -4;
  3680. }
  3681. qstats->total_bytes_received_hi =
  3682. le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
  3683. qstats->total_bytes_received_lo =
  3684. le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
  3685. ADD_64(qstats->total_bytes_received_hi,
  3686. le32_to_cpu(tclient->rcv_multicast_bytes.hi),
  3687. qstats->total_bytes_received_lo,
  3688. le32_to_cpu(tclient->rcv_multicast_bytes.lo));
  3689. ADD_64(qstats->total_bytes_received_hi,
  3690. le32_to_cpu(tclient->rcv_unicast_bytes.hi),
  3691. qstats->total_bytes_received_lo,
  3692. le32_to_cpu(tclient->rcv_unicast_bytes.lo));
  3693. SUB_64(qstats->total_bytes_received_hi,
  3694. le32_to_cpu(uclient->bcast_no_buff_bytes.hi),
  3695. qstats->total_bytes_received_lo,
  3696. le32_to_cpu(uclient->bcast_no_buff_bytes.lo));
  3697. SUB_64(qstats->total_bytes_received_hi,
  3698. le32_to_cpu(uclient->mcast_no_buff_bytes.hi),
  3699. qstats->total_bytes_received_lo,
  3700. le32_to_cpu(uclient->mcast_no_buff_bytes.lo));
  3701. SUB_64(qstats->total_bytes_received_hi,
  3702. le32_to_cpu(uclient->ucast_no_buff_bytes.hi),
  3703. qstats->total_bytes_received_lo,
  3704. le32_to_cpu(uclient->ucast_no_buff_bytes.lo));
  3705. qstats->valid_bytes_received_hi =
  3706. qstats->total_bytes_received_hi;
  3707. qstats->valid_bytes_received_lo =
  3708. qstats->total_bytes_received_lo;
  3709. qstats->error_bytes_received_hi =
  3710. le32_to_cpu(tclient->rcv_error_bytes.hi);
  3711. qstats->error_bytes_received_lo =
  3712. le32_to_cpu(tclient->rcv_error_bytes.lo);
  3713. ADD_64(qstats->total_bytes_received_hi,
  3714. qstats->error_bytes_received_hi,
  3715. qstats->total_bytes_received_lo,
  3716. qstats->error_bytes_received_lo);
  3717. UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
  3718. total_unicast_packets_received);
  3719. UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
  3720. total_multicast_packets_received);
  3721. UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
  3722. total_broadcast_packets_received);
  3723. UPDATE_EXTEND_TSTAT(packets_too_big_discard,
  3724. etherstatsoverrsizepkts);
  3725. UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
  3726. SUB_EXTEND_USTAT(ucast_no_buff_pkts,
  3727. total_unicast_packets_received);
  3728. SUB_EXTEND_USTAT(mcast_no_buff_pkts,
  3729. total_multicast_packets_received);
  3730. SUB_EXTEND_USTAT(bcast_no_buff_pkts,
  3731. total_broadcast_packets_received);
  3732. UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
  3733. UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
  3734. UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
  3735. qstats->total_bytes_transmitted_hi =
  3736. le32_to_cpu(xclient->unicast_bytes_sent.hi);
  3737. qstats->total_bytes_transmitted_lo =
  3738. le32_to_cpu(xclient->unicast_bytes_sent.lo);
  3739. ADD_64(qstats->total_bytes_transmitted_hi,
  3740. le32_to_cpu(xclient->multicast_bytes_sent.hi),
  3741. qstats->total_bytes_transmitted_lo,
  3742. le32_to_cpu(xclient->multicast_bytes_sent.lo));
  3743. ADD_64(qstats->total_bytes_transmitted_hi,
  3744. le32_to_cpu(xclient->broadcast_bytes_sent.hi),
  3745. qstats->total_bytes_transmitted_lo,
  3746. le32_to_cpu(xclient->broadcast_bytes_sent.lo));
  3747. UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
  3748. total_unicast_packets_transmitted);
  3749. UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
  3750. total_multicast_packets_transmitted);
  3751. UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
  3752. total_broadcast_packets_transmitted);
  3753. old_tclient->checksum_discard = tclient->checksum_discard;
  3754. old_tclient->ttl0_discard = tclient->ttl0_discard;
  3755. ADD_64(fstats->total_bytes_received_hi,
  3756. qstats->total_bytes_received_hi,
  3757. fstats->total_bytes_received_lo,
  3758. qstats->total_bytes_received_lo);
  3759. ADD_64(fstats->total_bytes_transmitted_hi,
  3760. qstats->total_bytes_transmitted_hi,
  3761. fstats->total_bytes_transmitted_lo,
  3762. qstats->total_bytes_transmitted_lo);
  3763. ADD_64(fstats->total_unicast_packets_received_hi,
  3764. qstats->total_unicast_packets_received_hi,
  3765. fstats->total_unicast_packets_received_lo,
  3766. qstats->total_unicast_packets_received_lo);
  3767. ADD_64(fstats->total_multicast_packets_received_hi,
  3768. qstats->total_multicast_packets_received_hi,
  3769. fstats->total_multicast_packets_received_lo,
  3770. qstats->total_multicast_packets_received_lo);
  3771. ADD_64(fstats->total_broadcast_packets_received_hi,
  3772. qstats->total_broadcast_packets_received_hi,
  3773. fstats->total_broadcast_packets_received_lo,
  3774. qstats->total_broadcast_packets_received_lo);
  3775. ADD_64(fstats->total_unicast_packets_transmitted_hi,
  3776. qstats->total_unicast_packets_transmitted_hi,
  3777. fstats->total_unicast_packets_transmitted_lo,
  3778. qstats->total_unicast_packets_transmitted_lo);
  3779. ADD_64(fstats->total_multicast_packets_transmitted_hi,
  3780. qstats->total_multicast_packets_transmitted_hi,
  3781. fstats->total_multicast_packets_transmitted_lo,
  3782. qstats->total_multicast_packets_transmitted_lo);
  3783. ADD_64(fstats->total_broadcast_packets_transmitted_hi,
  3784. qstats->total_broadcast_packets_transmitted_hi,
  3785. fstats->total_broadcast_packets_transmitted_lo,
  3786. qstats->total_broadcast_packets_transmitted_lo);
  3787. ADD_64(fstats->valid_bytes_received_hi,
  3788. qstats->valid_bytes_received_hi,
  3789. fstats->valid_bytes_received_lo,
  3790. qstats->valid_bytes_received_lo);
  3791. ADD_64(estats->error_bytes_received_hi,
  3792. qstats->error_bytes_received_hi,
  3793. estats->error_bytes_received_lo,
  3794. qstats->error_bytes_received_lo);
  3795. ADD_64(estats->etherstatsoverrsizepkts_hi,
  3796. qstats->etherstatsoverrsizepkts_hi,
  3797. estats->etherstatsoverrsizepkts_lo,
  3798. qstats->etherstatsoverrsizepkts_lo);
  3799. ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
  3800. estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
  3801. }
  3802. ADD_64(fstats->total_bytes_received_hi,
  3803. estats->rx_stat_ifhcinbadoctets_hi,
  3804. fstats->total_bytes_received_lo,
  3805. estats->rx_stat_ifhcinbadoctets_lo);
  3806. memcpy(estats, &(fstats->total_bytes_received_hi),
  3807. sizeof(struct host_func_stats) - 2*sizeof(u32));
  3808. ADD_64(estats->etherstatsoverrsizepkts_hi,
  3809. estats->rx_stat_dot3statsframestoolong_hi,
  3810. estats->etherstatsoverrsizepkts_lo,
  3811. estats->rx_stat_dot3statsframestoolong_lo);
  3812. ADD_64(estats->error_bytes_received_hi,
  3813. estats->rx_stat_ifhcinbadoctets_hi,
  3814. estats->error_bytes_received_lo,
  3815. estats->rx_stat_ifhcinbadoctets_lo);
  3816. if (bp->port.pmf) {
  3817. estats->mac_filter_discard =
  3818. le32_to_cpu(tport->mac_filter_discard);
  3819. estats->xxoverflow_discard =
  3820. le32_to_cpu(tport->xxoverflow_discard);
  3821. estats->brb_truncate_discard =
  3822. le32_to_cpu(tport->brb_truncate_discard);
  3823. estats->mac_discard = le32_to_cpu(tport->mac_discard);
  3824. }
  3825. fstats->host_func_stats_start = ++fstats->host_func_stats_end;
  3826. bp->stats_pending = 0;
  3827. return 0;
  3828. }
  3829. static void bnx2x_net_stats_update(struct bnx2x *bp)
  3830. {
  3831. struct bnx2x_eth_stats *estats = &bp->eth_stats;
  3832. struct net_device_stats *nstats = &bp->dev->stats;
  3833. int i;
  3834. nstats->rx_packets =
  3835. bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
  3836. bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
  3837. bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
  3838. nstats->tx_packets =
  3839. bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
  3840. bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
  3841. bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
  3842. nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
  3843. nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
  3844. nstats->rx_dropped = estats->mac_discard;
  3845. for_each_queue(bp, i)
  3846. nstats->rx_dropped +=
  3847. le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
  3848. nstats->tx_dropped = 0;
  3849. nstats->multicast =
  3850. bnx2x_hilo(&estats->total_multicast_packets_received_hi);
  3851. nstats->collisions =
  3852. bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
  3853. nstats->rx_length_errors =
  3854. bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
  3855. bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
  3856. nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
  3857. bnx2x_hilo(&estats->brb_truncate_hi);
  3858. nstats->rx_crc_errors =
  3859. bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
  3860. nstats->rx_frame_errors =
  3861. bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
  3862. nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
  3863. nstats->rx_missed_errors = estats->xxoverflow_discard;
  3864. nstats->rx_errors = nstats->rx_length_errors +
  3865. nstats->rx_over_errors +
  3866. nstats->rx_crc_errors +
  3867. nstats->rx_frame_errors +
  3868. nstats->rx_fifo_errors +
  3869. nstats->rx_missed_errors;
  3870. nstats->tx_aborted_errors =
  3871. bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
  3872. bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
  3873. nstats->tx_carrier_errors =
  3874. bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
  3875. nstats->tx_fifo_errors = 0;
  3876. nstats->tx_heartbeat_errors = 0;
  3877. nstats->tx_window_errors = 0;
  3878. nstats->tx_errors = nstats->tx_aborted_errors +
  3879. nstats->tx_carrier_errors +
  3880. bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
  3881. }
  3882. static void bnx2x_drv_stats_update(struct bnx2x *bp)
  3883. {
  3884. struct bnx2x_eth_stats *estats = &bp->eth_stats;
  3885. int i;
  3886. estats->driver_xoff = 0;
  3887. estats->rx_err_discard_pkt = 0;
  3888. estats->rx_skb_alloc_failed = 0;
  3889. estats->hw_csum_err = 0;
  3890. for_each_queue(bp, i) {
  3891. struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
  3892. estats->driver_xoff += qstats->driver_xoff;
  3893. estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
  3894. estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
  3895. estats->hw_csum_err += qstats->hw_csum_err;
  3896. }
  3897. }
  3898. static void bnx2x_stats_update(struct bnx2x *bp)
  3899. {
  3900. u32 *stats_comp = bnx2x_sp(bp, stats_comp);
  3901. if (*stats_comp != DMAE_COMP_VAL)
  3902. return;
  3903. if (bp->port.pmf)
  3904. bnx2x_hw_stats_update(bp);
  3905. if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
  3906. BNX2X_ERR("storm stats were not updated for 3 times\n");
  3907. bnx2x_panic();
  3908. return;
  3909. }
  3910. bnx2x_net_stats_update(bp);
  3911. bnx2x_drv_stats_update(bp);
  3912. if (netif_msg_timer(bp)) {
  3913. struct bnx2x_eth_stats *estats = &bp->eth_stats;
  3914. int i;
  3915. printk(KERN_DEBUG "%s: brb drops %u brb truncate %u\n",
  3916. bp->dev->name,
  3917. estats->brb_drop_lo, estats->brb_truncate_lo);
  3918. for_each_queue(bp, i) {
  3919. struct bnx2x_fastpath *fp = &bp->fp[i];
  3920. struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
  3921. printk(KERN_DEBUG "%s: rx usage(%4u) *rx_cons_sb(%u)"
  3922. " rx pkt(%lu) rx calls(%lu %lu)\n",
  3923. fp->name, (le16_to_cpu(*fp->rx_cons_sb) -
  3924. fp->rx_comp_cons),
  3925. le16_to_cpu(*fp->rx_cons_sb),
  3926. bnx2x_hilo(&qstats->
  3927. total_unicast_packets_received_hi),
  3928. fp->rx_calls, fp->rx_pkt);
  3929. }
  3930. for_each_queue(bp, i) {
  3931. struct bnx2x_fastpath *fp = &bp->fp[i];
  3932. struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
  3933. struct netdev_queue *txq =
  3934. netdev_get_tx_queue(bp->dev, i);
  3935. printk(KERN_DEBUG "%s: tx avail(%4u) *tx_cons_sb(%u)"
  3936. " tx pkt(%lu) tx calls (%lu)"
  3937. " %s (Xoff events %u)\n",
  3938. fp->name, bnx2x_tx_avail(fp),
  3939. le16_to_cpu(*fp->tx_cons_sb),
  3940. bnx2x_hilo(&qstats->
  3941. total_unicast_packets_transmitted_hi),
  3942. fp->tx_pkt,
  3943. (netif_tx_queue_stopped(txq) ? "Xoff" : "Xon"),
  3944. qstats->driver_xoff);
  3945. }
  3946. }
  3947. bnx2x_hw_stats_post(bp);
  3948. bnx2x_storm_stats_post(bp);
  3949. }
  3950. static void bnx2x_port_stats_stop(struct bnx2x *bp)
  3951. {
  3952. struct dmae_command *dmae;
  3953. u32 opcode;
  3954. int loader_idx = PMF_DMAE_C(bp);
  3955. u32 *stats_comp = bnx2x_sp(bp, stats_comp);
  3956. bp->executer_idx = 0;
  3957. opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
  3958. DMAE_CMD_C_ENABLE |
  3959. DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
  3960. #ifdef __BIG_ENDIAN
  3961. DMAE_CMD_ENDIANITY_B_DW_SWAP |
  3962. #else
  3963. DMAE_CMD_ENDIANITY_DW_SWAP |
  3964. #endif
  3965. (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
  3966. (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
  3967. if (bp->port.port_stx) {
  3968. dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
  3969. if (bp->func_stx)
  3970. dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
  3971. else
  3972. dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
  3973. dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
  3974. dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
  3975. dmae->dst_addr_lo = bp->port.port_stx >> 2;
  3976. dmae->dst_addr_hi = 0;
  3977. dmae->len = sizeof(struct host_port_stats) >> 2;
  3978. if (bp->func_stx) {
  3979. dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
  3980. dmae->comp_addr_hi = 0;
  3981. dmae->comp_val = 1;
  3982. } else {
  3983. dmae->comp_addr_lo =
  3984. U64_LO(bnx2x_sp_mapping(bp, stats_comp));
  3985. dmae->comp_addr_hi =
  3986. U64_HI(bnx2x_sp_mapping(bp, stats_comp));
  3987. dmae->comp_val = DMAE_COMP_VAL;
  3988. *stats_comp = 0;
  3989. }
  3990. }
  3991. if (bp->func_stx) {
  3992. dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
  3993. dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
  3994. dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
  3995. dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
  3996. dmae->dst_addr_lo = bp->func_stx >> 2;
  3997. dmae->dst_addr_hi = 0;
  3998. dmae->len = sizeof(struct host_func_stats) >> 2;
  3999. dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
  4000. dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
  4001. dmae->comp_val = DMAE_COMP_VAL;
  4002. *stats_comp = 0;
  4003. }
  4004. }
  4005. static void bnx2x_stats_stop(struct bnx2x *bp)
  4006. {
  4007. int update = 0;
  4008. bnx2x_stats_comp(bp);
  4009. if (bp->port.pmf)
  4010. update = (bnx2x_hw_stats_update(bp) == 0);
  4011. update |= (bnx2x_storm_stats_update(bp) == 0);
  4012. if (update) {
  4013. bnx2x_net_stats_update(bp);
  4014. if (bp->port.pmf)
  4015. bnx2x_port_stats_stop(bp);
  4016. bnx2x_hw_stats_post(bp);
  4017. bnx2x_stats_comp(bp);
  4018. }
  4019. }
  4020. static void bnx2x_stats_do_nothing(struct bnx2x *bp)
  4021. {
  4022. }
  4023. static const struct {
  4024. void (*action)(struct bnx2x *bp);
  4025. enum bnx2x_stats_state next_state;
  4026. } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
  4027. /* state event */
  4028. {
  4029. /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
  4030. /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
  4031. /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
  4032. /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
  4033. },
  4034. {
  4035. /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
  4036. /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
  4037. /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
  4038. /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
  4039. }
  4040. };
  4041. static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
  4042. {
  4043. enum bnx2x_stats_state state;
  4044. if (unlikely(bp->panic))
  4045. return;
  4046. /* Protect a state change flow */
  4047. spin_lock_bh(&bp->stats_lock);
  4048. state = bp->stats_state;
  4049. bp->stats_state = bnx2x_stats_stm[state][event].next_state;
  4050. spin_unlock_bh(&bp->stats_lock);
  4051. bnx2x_stats_stm[state][event].action(bp);
  4052. if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
  4053. DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
  4054. state, event, bp->stats_state);
  4055. }
  4056. static void bnx2x_port_stats_base_init(struct bnx2x *bp)
  4057. {
  4058. struct dmae_command *dmae;
  4059. u32 *stats_comp = bnx2x_sp(bp, stats_comp);
  4060. /* sanity */
  4061. if (!bp->port.pmf || !bp->port.port_stx) {
  4062. BNX2X_ERR("BUG!\n");
  4063. return;
  4064. }
  4065. bp->executer_idx = 0;
  4066. dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
  4067. dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
  4068. DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
  4069. DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
  4070. #ifdef __BIG_ENDIAN
  4071. DMAE_CMD_ENDIANITY_B_DW_SWAP |
  4072. #else
  4073. DMAE_CMD_ENDIANITY_DW_SWAP |
  4074. #endif
  4075. (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
  4076. (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
  4077. dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
  4078. dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
  4079. dmae->dst_addr_lo = bp->port.port_stx >> 2;
  4080. dmae->dst_addr_hi = 0;
  4081. dmae->len = sizeof(struct host_port_stats) >> 2;
  4082. dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
  4083. dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
  4084. dmae->comp_val = DMAE_COMP_VAL;
  4085. *stats_comp = 0;
  4086. bnx2x_hw_stats_post(bp);
  4087. bnx2x_stats_comp(bp);
  4088. }
  4089. static void bnx2x_func_stats_base_init(struct bnx2x *bp)
  4090. {
  4091. int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
  4092. int port = BP_PORT(bp);
  4093. int func;
  4094. u32 func_stx;
  4095. /* sanity */
  4096. if (!bp->port.pmf || !bp->func_stx) {
  4097. BNX2X_ERR("BUG!\n");
  4098. return;
  4099. }
  4100. /* save our func_stx */
  4101. func_stx = bp->func_stx;
  4102. for (vn = VN_0; vn < vn_max; vn++) {
  4103. func = 2*vn + port;
  4104. bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
  4105. bnx2x_func_stats_init(bp);
  4106. bnx2x_hw_stats_post(bp);
  4107. bnx2x_stats_comp(bp);
  4108. }
  4109. /* restore our func_stx */
  4110. bp->func_stx = func_stx;
  4111. }
  4112. static void bnx2x_func_stats_base_update(struct bnx2x *bp)
  4113. {
  4114. struct dmae_command *dmae = &bp->stats_dmae;
  4115. u32 *stats_comp = bnx2x_sp(bp, stats_comp);
  4116. /* sanity */
  4117. if (!bp->func_stx) {
  4118. BNX2X_ERR("BUG!\n");
  4119. return;
  4120. }
  4121. bp->executer_idx = 0;
  4122. memset(dmae, 0, sizeof(struct dmae_command));
  4123. dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
  4124. DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
  4125. DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
  4126. #ifdef __BIG_ENDIAN
  4127. DMAE_CMD_ENDIANITY_B_DW_SWAP |
  4128. #else
  4129. DMAE_CMD_ENDIANITY_DW_SWAP |
  4130. #endif
  4131. (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
  4132. (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
  4133. dmae->src_addr_lo = bp->func_stx >> 2;
  4134. dmae->src_addr_hi = 0;
  4135. dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
  4136. dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
  4137. dmae->len = sizeof(struct host_func_stats) >> 2;
  4138. dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
  4139. dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
  4140. dmae->comp_val = DMAE_COMP_VAL;
  4141. *stats_comp = 0;
  4142. bnx2x_hw_stats_post(bp);
  4143. bnx2x_stats_comp(bp);
  4144. }
  4145. static void bnx2x_stats_init(struct bnx2x *bp)
  4146. {
  4147. int port = BP_PORT(bp);
  4148. int func = BP_FUNC(bp);
  4149. int i;
  4150. bp->stats_pending = 0;
  4151. bp->executer_idx = 0;
  4152. bp->stats_counter = 0;
  4153. /* port and func stats for management */
  4154. if (!BP_NOMCP(bp)) {
  4155. bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
  4156. bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
  4157. } else {
  4158. bp->port.port_stx = 0;
  4159. bp->func_stx = 0;
  4160. }
  4161. DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
  4162. bp->port.port_stx, bp->func_stx);
  4163. /* port stats */
  4164. memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
  4165. bp->port.old_nig_stats.brb_discard =
  4166. REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
  4167. bp->port.old_nig_stats.brb_truncate =
  4168. REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
  4169. REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
  4170. &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
  4171. REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
  4172. &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
  4173. /* function stats */
  4174. for_each_queue(bp, i) {
  4175. struct bnx2x_fastpath *fp = &bp->fp[i];
  4176. memset(&fp->old_tclient, 0,
  4177. sizeof(struct tstorm_per_client_stats));
  4178. memset(&fp->old_uclient, 0,
  4179. sizeof(struct ustorm_per_client_stats));
  4180. memset(&fp->old_xclient, 0,
  4181. sizeof(struct xstorm_per_client_stats));
  4182. memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
  4183. }
  4184. memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
  4185. memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
  4186. bp->stats_state = STATS_STATE_DISABLED;
  4187. if (bp->port.pmf) {
  4188. if (bp->port.port_stx)
  4189. bnx2x_port_stats_base_init(bp);
  4190. if (bp->func_stx)
  4191. bnx2x_func_stats_base_init(bp);
  4192. } else if (bp->func_stx)
  4193. bnx2x_func_stats_base_update(bp);
  4194. }
  4195. static void bnx2x_timer(unsigned long data)
  4196. {
  4197. struct bnx2x *bp = (struct bnx2x *) data;
  4198. if (!netif_running(bp->dev))
  4199. return;
  4200. if (atomic_read(&bp->intr_sem) != 0)
  4201. goto timer_restart;
  4202. if (poll) {
  4203. struct bnx2x_fastpath *fp = &bp->fp[0];
  4204. int rc;
  4205. bnx2x_tx_int(fp);
  4206. rc = bnx2x_rx_int(fp, 1000);
  4207. }
  4208. if (!BP_NOMCP(bp)) {
  4209. int func = BP_FUNC(bp);
  4210. u32 drv_pulse;
  4211. u32 mcp_pulse;
  4212. ++bp->fw_drv_pulse_wr_seq;
  4213. bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
  4214. /* TBD - add SYSTEM_TIME */
  4215. drv_pulse = bp->fw_drv_pulse_wr_seq;
  4216. SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
  4217. mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
  4218. MCP_PULSE_SEQ_MASK);
  4219. /* The delta between driver pulse and mcp response
  4220. * should be 1 (before mcp response) or 0 (after mcp response)
  4221. */
  4222. if ((drv_pulse != mcp_pulse) &&
  4223. (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
  4224. /* someone lost a heartbeat... */
  4225. BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
  4226. drv_pulse, mcp_pulse);
  4227. }
  4228. }
  4229. if (bp->state == BNX2X_STATE_OPEN)
  4230. bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
  4231. timer_restart:
  4232. mod_timer(&bp->timer, jiffies + bp->current_interval);
  4233. }
  4234. /* end of Statistics */
  4235. /* nic init */
  4236. /*
  4237. * nic init service functions
  4238. */
  4239. static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
  4240. {
  4241. int port = BP_PORT(bp);
  4242. /* "CSTORM" */
  4243. bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
  4244. CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
  4245. CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
  4246. bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
  4247. CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
  4248. CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
  4249. }
  4250. static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
  4251. dma_addr_t mapping, int sb_id)
  4252. {
  4253. int port = BP_PORT(bp);
  4254. int func = BP_FUNC(bp);
  4255. int index;
  4256. u64 section;
  4257. /* USTORM */
  4258. section = ((u64)mapping) + offsetof(struct host_status_block,
  4259. u_status_block);
  4260. sb->u_status_block.status_block_id = sb_id;
  4261. REG_WR(bp, BAR_CSTRORM_INTMEM +
  4262. CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
  4263. REG_WR(bp, BAR_CSTRORM_INTMEM +
  4264. ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
  4265. U64_HI(section));
  4266. REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
  4267. CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
  4268. for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
  4269. REG_WR16(bp, BAR_CSTRORM_INTMEM +
  4270. CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
  4271. /* CSTORM */
  4272. section = ((u64)mapping) + offsetof(struct host_status_block,
  4273. c_status_block);
  4274. sb->c_status_block.status_block_id = sb_id;
  4275. REG_WR(bp, BAR_CSTRORM_INTMEM +
  4276. CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
  4277. REG_WR(bp, BAR_CSTRORM_INTMEM +
  4278. ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
  4279. U64_HI(section));
  4280. REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
  4281. CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
  4282. for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
  4283. REG_WR16(bp, BAR_CSTRORM_INTMEM +
  4284. CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
  4285. bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
  4286. }
  4287. static void bnx2x_zero_def_sb(struct bnx2x *bp)
  4288. {
  4289. int func = BP_FUNC(bp);
  4290. bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
  4291. TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
  4292. sizeof(struct tstorm_def_status_block)/4);
  4293. bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
  4294. CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
  4295. sizeof(struct cstorm_def_status_block_u)/4);
  4296. bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
  4297. CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
  4298. sizeof(struct cstorm_def_status_block_c)/4);
  4299. bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
  4300. XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
  4301. sizeof(struct xstorm_def_status_block)/4);
  4302. }
  4303. static void bnx2x_init_def_sb(struct bnx2x *bp,
  4304. struct host_def_status_block *def_sb,
  4305. dma_addr_t mapping, int sb_id)
  4306. {
  4307. int port = BP_PORT(bp);
  4308. int func = BP_FUNC(bp);
  4309. int index, val, reg_offset;
  4310. u64 section;
  4311. /* ATTN */
  4312. section = ((u64)mapping) + offsetof(struct host_def_status_block,
  4313. atten_status_block);
  4314. def_sb->atten_status_block.status_block_id = sb_id;
  4315. bp->attn_state = 0;
  4316. reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
  4317. MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
  4318. for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
  4319. bp->attn_group[index].sig[0] = REG_RD(bp,
  4320. reg_offset + 0x10*index);
  4321. bp->attn_group[index].sig[1] = REG_RD(bp,
  4322. reg_offset + 0x4 + 0x10*index);
  4323. bp->attn_group[index].sig[2] = REG_RD(bp,
  4324. reg_offset + 0x8 + 0x10*index);
  4325. bp->attn_group[index].sig[3] = REG_RD(bp,
  4326. reg_offset + 0xc + 0x10*index);
  4327. }
  4328. reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
  4329. HC_REG_ATTN_MSG0_ADDR_L);
  4330. REG_WR(bp, reg_offset, U64_LO(section));
  4331. REG_WR(bp, reg_offset + 4, U64_HI(section));
  4332. reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
  4333. val = REG_RD(bp, reg_offset);
  4334. val |= sb_id;
  4335. REG_WR(bp, reg_offset, val);
  4336. /* USTORM */
  4337. section = ((u64)mapping) + offsetof(struct host_def_status_block,
  4338. u_def_status_block);
  4339. def_sb->u_def_status_block.status_block_id = sb_id;
  4340. REG_WR(bp, BAR_CSTRORM_INTMEM +
  4341. CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
  4342. REG_WR(bp, BAR_CSTRORM_INTMEM +
  4343. ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
  4344. U64_HI(section));
  4345. REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
  4346. CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
  4347. for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
  4348. REG_WR16(bp, BAR_CSTRORM_INTMEM +
  4349. CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
  4350. /* CSTORM */
  4351. section = ((u64)mapping) + offsetof(struct host_def_status_block,
  4352. c_def_status_block);
  4353. def_sb->c_def_status_block.status_block_id = sb_id;
  4354. REG_WR(bp, BAR_CSTRORM_INTMEM +
  4355. CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
  4356. REG_WR(bp, BAR_CSTRORM_INTMEM +
  4357. ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
  4358. U64_HI(section));
  4359. REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
  4360. CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
  4361. for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
  4362. REG_WR16(bp, BAR_CSTRORM_INTMEM +
  4363. CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
  4364. /* TSTORM */
  4365. section = ((u64)mapping) + offsetof(struct host_def_status_block,
  4366. t_def_status_block);
  4367. def_sb->t_def_status_block.status_block_id = sb_id;
  4368. REG_WR(bp, BAR_TSTRORM_INTMEM +
  4369. TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
  4370. REG_WR(bp, BAR_TSTRORM_INTMEM +
  4371. ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
  4372. U64_HI(section));
  4373. REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
  4374. TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
  4375. for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
  4376. REG_WR16(bp, BAR_TSTRORM_INTMEM +
  4377. TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
  4378. /* XSTORM */
  4379. section = ((u64)mapping) + offsetof(struct host_def_status_block,
  4380. x_def_status_block);
  4381. def_sb->x_def_status_block.status_block_id = sb_id;
  4382. REG_WR(bp, BAR_XSTRORM_INTMEM +
  4383. XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
  4384. REG_WR(bp, BAR_XSTRORM_INTMEM +
  4385. ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
  4386. U64_HI(section));
  4387. REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
  4388. XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
  4389. for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
  4390. REG_WR16(bp, BAR_XSTRORM_INTMEM +
  4391. XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
  4392. bp->stats_pending = 0;
  4393. bp->set_mac_pending = 0;
  4394. bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
  4395. }
  4396. static void bnx2x_update_coalesce(struct bnx2x *bp)
  4397. {
  4398. int port = BP_PORT(bp);
  4399. int i;
  4400. for_each_queue(bp, i) {
  4401. int sb_id = bp->fp[i].sb_id;
  4402. /* HC_INDEX_U_ETH_RX_CQ_CONS */
  4403. REG_WR8(bp, BAR_CSTRORM_INTMEM +
  4404. CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
  4405. U_SB_ETH_RX_CQ_INDEX),
  4406. bp->rx_ticks/(4 * BNX2X_BTR));
  4407. REG_WR16(bp, BAR_CSTRORM_INTMEM +
  4408. CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
  4409. U_SB_ETH_RX_CQ_INDEX),
  4410. (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
  4411. /* HC_INDEX_C_ETH_TX_CQ_CONS */
  4412. REG_WR8(bp, BAR_CSTRORM_INTMEM +
  4413. CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
  4414. C_SB_ETH_TX_CQ_INDEX),
  4415. bp->tx_ticks/(4 * BNX2X_BTR));
  4416. REG_WR16(bp, BAR_CSTRORM_INTMEM +
  4417. CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
  4418. C_SB_ETH_TX_CQ_INDEX),
  4419. (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
  4420. }
  4421. }
  4422. static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
  4423. struct bnx2x_fastpath *fp, int last)
  4424. {
  4425. int i;
  4426. for (i = 0; i < last; i++) {
  4427. struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
  4428. struct sk_buff *skb = rx_buf->skb;
  4429. if (skb == NULL) {
  4430. DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
  4431. continue;
  4432. }
  4433. if (fp->tpa_state[i] == BNX2X_TPA_START)
  4434. dma_unmap_single(&bp->pdev->dev,
  4435. dma_unmap_addr(rx_buf, mapping),
  4436. bp->rx_buf_size, DMA_FROM_DEVICE);
  4437. dev_kfree_skb(skb);
  4438. rx_buf->skb = NULL;
  4439. }
  4440. }
  4441. static void bnx2x_init_rx_rings(struct bnx2x *bp)
  4442. {
  4443. int func = BP_FUNC(bp);
  4444. int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
  4445. ETH_MAX_AGGREGATION_QUEUES_E1H;
  4446. u16 ring_prod, cqe_ring_prod;
  4447. int i, j;
  4448. bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
  4449. DP(NETIF_MSG_IFUP,
  4450. "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
  4451. if (bp->flags & TPA_ENABLE_FLAG) {
  4452. for_each_queue(bp, j) {
  4453. struct bnx2x_fastpath *fp = &bp->fp[j];
  4454. for (i = 0; i < max_agg_queues; i++) {
  4455. fp->tpa_pool[i].skb =
  4456. netdev_alloc_skb(bp->dev, bp->rx_buf_size);
  4457. if (!fp->tpa_pool[i].skb) {
  4458. BNX2X_ERR("Failed to allocate TPA "
  4459. "skb pool for queue[%d] - "
  4460. "disabling TPA on this "
  4461. "queue!\n", j);
  4462. bnx2x_free_tpa_pool(bp, fp, i);
  4463. fp->disable_tpa = 1;
  4464. break;
  4465. }
  4466. dma_unmap_addr_set((struct sw_rx_bd *)
  4467. &bp->fp->tpa_pool[i],
  4468. mapping, 0);
  4469. fp->tpa_state[i] = BNX2X_TPA_STOP;
  4470. }
  4471. }
  4472. }
  4473. for_each_queue(bp, j) {
  4474. struct bnx2x_fastpath *fp = &bp->fp[j];
  4475. fp->rx_bd_cons = 0;
  4476. fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
  4477. fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
  4478. /* "next page" elements initialization */
  4479. /* SGE ring */
  4480. for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
  4481. struct eth_rx_sge *sge;
  4482. sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
  4483. sge->addr_hi =
  4484. cpu_to_le32(U64_HI(fp->rx_sge_mapping +
  4485. BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
  4486. sge->addr_lo =
  4487. cpu_to_le32(U64_LO(fp->rx_sge_mapping +
  4488. BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
  4489. }
  4490. bnx2x_init_sge_ring_bit_mask(fp);
  4491. /* RX BD ring */
  4492. for (i = 1; i <= NUM_RX_RINGS; i++) {
  4493. struct eth_rx_bd *rx_bd;
  4494. rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
  4495. rx_bd->addr_hi =
  4496. cpu_to_le32(U64_HI(fp->rx_desc_mapping +
  4497. BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
  4498. rx_bd->addr_lo =
  4499. cpu_to_le32(U64_LO(fp->rx_desc_mapping +
  4500. BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
  4501. }
  4502. /* CQ ring */
  4503. for (i = 1; i <= NUM_RCQ_RINGS; i++) {
  4504. struct eth_rx_cqe_next_page *nextpg;
  4505. nextpg = (struct eth_rx_cqe_next_page *)
  4506. &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
  4507. nextpg->addr_hi =
  4508. cpu_to_le32(U64_HI(fp->rx_comp_mapping +
  4509. BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
  4510. nextpg->addr_lo =
  4511. cpu_to_le32(U64_LO(fp->rx_comp_mapping +
  4512. BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
  4513. }
  4514. /* Allocate SGEs and initialize the ring elements */
  4515. for (i = 0, ring_prod = 0;
  4516. i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
  4517. if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
  4518. BNX2X_ERR("was only able to allocate "
  4519. "%d rx sges\n", i);
  4520. BNX2X_ERR("disabling TPA for queue[%d]\n", j);
  4521. /* Cleanup already allocated elements */
  4522. bnx2x_free_rx_sge_range(bp, fp, ring_prod);
  4523. bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
  4524. fp->disable_tpa = 1;
  4525. ring_prod = 0;
  4526. break;
  4527. }
  4528. ring_prod = NEXT_SGE_IDX(ring_prod);
  4529. }
  4530. fp->rx_sge_prod = ring_prod;
  4531. /* Allocate BDs and initialize BD ring */
  4532. fp->rx_comp_cons = 0;
  4533. cqe_ring_prod = ring_prod = 0;
  4534. for (i = 0; i < bp->rx_ring_size; i++) {
  4535. if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
  4536. BNX2X_ERR("was only able to allocate "
  4537. "%d rx skbs on queue[%d]\n", i, j);
  4538. fp->eth_q_stats.rx_skb_alloc_failed++;
  4539. break;
  4540. }
  4541. ring_prod = NEXT_RX_IDX(ring_prod);
  4542. cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
  4543. WARN_ON(ring_prod <= i);
  4544. }
  4545. fp->rx_bd_prod = ring_prod;
  4546. /* must not have more available CQEs than BDs */
  4547. fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
  4548. cqe_ring_prod);
  4549. fp->rx_pkt = fp->rx_calls = 0;
  4550. /* Warning!
  4551. * this will generate an interrupt (to the TSTORM)
  4552. * must only be done after chip is initialized
  4553. */
  4554. bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
  4555. fp->rx_sge_prod);
  4556. if (j != 0)
  4557. continue;
  4558. REG_WR(bp, BAR_USTRORM_INTMEM +
  4559. USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
  4560. U64_LO(fp->rx_comp_mapping));
  4561. REG_WR(bp, BAR_USTRORM_INTMEM +
  4562. USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
  4563. U64_HI(fp->rx_comp_mapping));
  4564. }
  4565. }
  4566. static void bnx2x_init_tx_ring(struct bnx2x *bp)
  4567. {
  4568. int i, j;
  4569. for_each_queue(bp, j) {
  4570. struct bnx2x_fastpath *fp = &bp->fp[j];
  4571. for (i = 1; i <= NUM_TX_RINGS; i++) {
  4572. struct eth_tx_next_bd *tx_next_bd =
  4573. &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
  4574. tx_next_bd->addr_hi =
  4575. cpu_to_le32(U64_HI(fp->tx_desc_mapping +
  4576. BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
  4577. tx_next_bd->addr_lo =
  4578. cpu_to_le32(U64_LO(fp->tx_desc_mapping +
  4579. BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
  4580. }
  4581. fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
  4582. fp->tx_db.data.zero_fill1 = 0;
  4583. fp->tx_db.data.prod = 0;
  4584. fp->tx_pkt_prod = 0;
  4585. fp->tx_pkt_cons = 0;
  4586. fp->tx_bd_prod = 0;
  4587. fp->tx_bd_cons = 0;
  4588. fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
  4589. fp->tx_pkt = 0;
  4590. }
  4591. }
  4592. static void bnx2x_init_sp_ring(struct bnx2x *bp)
  4593. {
  4594. int func = BP_FUNC(bp);
  4595. spin_lock_init(&bp->spq_lock);
  4596. bp->spq_left = MAX_SPQ_PENDING;
  4597. bp->spq_prod_idx = 0;
  4598. bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
  4599. bp->spq_prod_bd = bp->spq;
  4600. bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
  4601. REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
  4602. U64_LO(bp->spq_mapping));
  4603. REG_WR(bp,
  4604. XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
  4605. U64_HI(bp->spq_mapping));
  4606. REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
  4607. bp->spq_prod_idx);
  4608. }
  4609. static void bnx2x_init_context(struct bnx2x *bp)
  4610. {
  4611. int i;
  4612. /* Rx */
  4613. for_each_queue(bp, i) {
  4614. struct eth_context *context = bnx2x_sp(bp, context[i].eth);
  4615. struct bnx2x_fastpath *fp = &bp->fp[i];
  4616. u8 cl_id = fp->cl_id;
  4617. context->ustorm_st_context.common.sb_index_numbers =
  4618. BNX2X_RX_SB_INDEX_NUM;
  4619. context->ustorm_st_context.common.clientId = cl_id;
  4620. context->ustorm_st_context.common.status_block_id = fp->sb_id;
  4621. context->ustorm_st_context.common.flags =
  4622. (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
  4623. USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
  4624. context->ustorm_st_context.common.statistics_counter_id =
  4625. cl_id;
  4626. context->ustorm_st_context.common.mc_alignment_log_size =
  4627. BNX2X_RX_ALIGN_SHIFT;
  4628. context->ustorm_st_context.common.bd_buff_size =
  4629. bp->rx_buf_size;
  4630. context->ustorm_st_context.common.bd_page_base_hi =
  4631. U64_HI(fp->rx_desc_mapping);
  4632. context->ustorm_st_context.common.bd_page_base_lo =
  4633. U64_LO(fp->rx_desc_mapping);
  4634. if (!fp->disable_tpa) {
  4635. context->ustorm_st_context.common.flags |=
  4636. USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
  4637. context->ustorm_st_context.common.sge_buff_size =
  4638. (u16)min_t(u32, SGE_PAGE_SIZE*PAGES_PER_SGE,
  4639. 0xffff);
  4640. context->ustorm_st_context.common.sge_page_base_hi =
  4641. U64_HI(fp->rx_sge_mapping);
  4642. context->ustorm_st_context.common.sge_page_base_lo =
  4643. U64_LO(fp->rx_sge_mapping);
  4644. context->ustorm_st_context.common.max_sges_for_packet =
  4645. SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
  4646. context->ustorm_st_context.common.max_sges_for_packet =
  4647. ((context->ustorm_st_context.common.
  4648. max_sges_for_packet + PAGES_PER_SGE - 1) &
  4649. (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
  4650. }
  4651. context->ustorm_ag_context.cdu_usage =
  4652. CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
  4653. CDU_REGION_NUMBER_UCM_AG,
  4654. ETH_CONNECTION_TYPE);
  4655. context->xstorm_ag_context.cdu_reserved =
  4656. CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
  4657. CDU_REGION_NUMBER_XCM_AG,
  4658. ETH_CONNECTION_TYPE);
  4659. }
  4660. /* Tx */
  4661. for_each_queue(bp, i) {
  4662. struct bnx2x_fastpath *fp = &bp->fp[i];
  4663. struct eth_context *context =
  4664. bnx2x_sp(bp, context[i].eth);
  4665. context->cstorm_st_context.sb_index_number =
  4666. C_SB_ETH_TX_CQ_INDEX;
  4667. context->cstorm_st_context.status_block_id = fp->sb_id;
  4668. context->xstorm_st_context.tx_bd_page_base_hi =
  4669. U64_HI(fp->tx_desc_mapping);
  4670. context->xstorm_st_context.tx_bd_page_base_lo =
  4671. U64_LO(fp->tx_desc_mapping);
  4672. context->xstorm_st_context.statistics_data = (fp->cl_id |
  4673. XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
  4674. }
  4675. }
  4676. static void bnx2x_init_ind_table(struct bnx2x *bp)
  4677. {
  4678. int func = BP_FUNC(bp);
  4679. int i;
  4680. if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
  4681. return;
  4682. DP(NETIF_MSG_IFUP,
  4683. "Initializing indirection table multi_mode %d\n", bp->multi_mode);
  4684. for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
  4685. REG_WR8(bp, BAR_TSTRORM_INTMEM +
  4686. TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
  4687. bp->fp->cl_id + (i % bp->num_queues));
  4688. }
  4689. static void bnx2x_set_client_config(struct bnx2x *bp)
  4690. {
  4691. struct tstorm_eth_client_config tstorm_client = {0};
  4692. int port = BP_PORT(bp);
  4693. int i;
  4694. tstorm_client.mtu = bp->dev->mtu;
  4695. tstorm_client.config_flags =
  4696. (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
  4697. TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
  4698. #ifdef BCM_VLAN
  4699. if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
  4700. tstorm_client.config_flags |=
  4701. TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
  4702. DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
  4703. }
  4704. #endif
  4705. for_each_queue(bp, i) {
  4706. tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
  4707. REG_WR(bp, BAR_TSTRORM_INTMEM +
  4708. TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
  4709. ((u32 *)&tstorm_client)[0]);
  4710. REG_WR(bp, BAR_TSTRORM_INTMEM +
  4711. TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
  4712. ((u32 *)&tstorm_client)[1]);
  4713. }
  4714. DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
  4715. ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
  4716. }
  4717. static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
  4718. {
  4719. struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
  4720. int mode = bp->rx_mode;
  4721. int mask = bp->rx_mode_cl_mask;
  4722. int func = BP_FUNC(bp);
  4723. int port = BP_PORT(bp);
  4724. int i;
  4725. /* All but management unicast packets should pass to the host as well */
  4726. u32 llh_mask =
  4727. NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
  4728. NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
  4729. NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
  4730. NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
  4731. DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
  4732. switch (mode) {
  4733. case BNX2X_RX_MODE_NONE: /* no Rx */
  4734. tstorm_mac_filter.ucast_drop_all = mask;
  4735. tstorm_mac_filter.mcast_drop_all = mask;
  4736. tstorm_mac_filter.bcast_drop_all = mask;
  4737. break;
  4738. case BNX2X_RX_MODE_NORMAL:
  4739. tstorm_mac_filter.bcast_accept_all = mask;
  4740. break;
  4741. case BNX2X_RX_MODE_ALLMULTI:
  4742. tstorm_mac_filter.mcast_accept_all = mask;
  4743. tstorm_mac_filter.bcast_accept_all = mask;
  4744. break;
  4745. case BNX2X_RX_MODE_PROMISC:
  4746. tstorm_mac_filter.ucast_accept_all = mask;
  4747. tstorm_mac_filter.mcast_accept_all = mask;
  4748. tstorm_mac_filter.bcast_accept_all = mask;
  4749. /* pass management unicast packets as well */
  4750. llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
  4751. break;
  4752. default:
  4753. BNX2X_ERR("BAD rx mode (%d)\n", mode);
  4754. break;
  4755. }
  4756. REG_WR(bp,
  4757. (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
  4758. llh_mask);
  4759. for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
  4760. REG_WR(bp, BAR_TSTRORM_INTMEM +
  4761. TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
  4762. ((u32 *)&tstorm_mac_filter)[i]);
  4763. /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
  4764. ((u32 *)&tstorm_mac_filter)[i]); */
  4765. }
  4766. if (mode != BNX2X_RX_MODE_NONE)
  4767. bnx2x_set_client_config(bp);
  4768. }
  4769. static void bnx2x_init_internal_common(struct bnx2x *bp)
  4770. {
  4771. int i;
  4772. /* Zero this manually as its initialization is
  4773. currently missing in the initTool */
  4774. for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
  4775. REG_WR(bp, BAR_USTRORM_INTMEM +
  4776. USTORM_AGG_DATA_OFFSET + i * 4, 0);
  4777. }
  4778. static void bnx2x_init_internal_port(struct bnx2x *bp)
  4779. {
  4780. int port = BP_PORT(bp);
  4781. REG_WR(bp,
  4782. BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
  4783. REG_WR(bp,
  4784. BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
  4785. REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
  4786. REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
  4787. }
  4788. static void bnx2x_init_internal_func(struct bnx2x *bp)
  4789. {
  4790. struct tstorm_eth_function_common_config tstorm_config = {0};
  4791. struct stats_indication_flags stats_flags = {0};
  4792. int port = BP_PORT(bp);
  4793. int func = BP_FUNC(bp);
  4794. int i, j;
  4795. u32 offset;
  4796. u16 max_agg_size;
  4797. tstorm_config.config_flags = RSS_FLAGS(bp);
  4798. if (is_multi(bp))
  4799. tstorm_config.rss_result_mask = MULTI_MASK;
  4800. /* Enable TPA if needed */
  4801. if (bp->flags & TPA_ENABLE_FLAG)
  4802. tstorm_config.config_flags |=
  4803. TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
  4804. if (IS_E1HMF(bp))
  4805. tstorm_config.config_flags |=
  4806. TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
  4807. tstorm_config.leading_client_id = BP_L_ID(bp);
  4808. REG_WR(bp, BAR_TSTRORM_INTMEM +
  4809. TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
  4810. (*(u32 *)&tstorm_config));
  4811. bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
  4812. bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
  4813. bnx2x_set_storm_rx_mode(bp);
  4814. for_each_queue(bp, i) {
  4815. u8 cl_id = bp->fp[i].cl_id;
  4816. /* reset xstorm per client statistics */
  4817. offset = BAR_XSTRORM_INTMEM +
  4818. XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
  4819. for (j = 0;
  4820. j < sizeof(struct xstorm_per_client_stats) / 4; j++)
  4821. REG_WR(bp, offset + j*4, 0);
  4822. /* reset tstorm per client statistics */
  4823. offset = BAR_TSTRORM_INTMEM +
  4824. TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
  4825. for (j = 0;
  4826. j < sizeof(struct tstorm_per_client_stats) / 4; j++)
  4827. REG_WR(bp, offset + j*4, 0);
  4828. /* reset ustorm per client statistics */
  4829. offset = BAR_USTRORM_INTMEM +
  4830. USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
  4831. for (j = 0;
  4832. j < sizeof(struct ustorm_per_client_stats) / 4; j++)
  4833. REG_WR(bp, offset + j*4, 0);
  4834. }
  4835. /* Init statistics related context */
  4836. stats_flags.collect_eth = 1;
  4837. REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
  4838. ((u32 *)&stats_flags)[0]);
  4839. REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
  4840. ((u32 *)&stats_flags)[1]);
  4841. REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
  4842. ((u32 *)&stats_flags)[0]);
  4843. REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
  4844. ((u32 *)&stats_flags)[1]);
  4845. REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
  4846. ((u32 *)&stats_flags)[0]);
  4847. REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
  4848. ((u32 *)&stats_flags)[1]);
  4849. REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
  4850. ((u32 *)&stats_flags)[0]);
  4851. REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
  4852. ((u32 *)&stats_flags)[1]);
  4853. REG_WR(bp, BAR_XSTRORM_INTMEM +
  4854. XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
  4855. U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
  4856. REG_WR(bp, BAR_XSTRORM_INTMEM +
  4857. XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
  4858. U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
  4859. REG_WR(bp, BAR_TSTRORM_INTMEM +
  4860. TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
  4861. U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
  4862. REG_WR(bp, BAR_TSTRORM_INTMEM +
  4863. TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
  4864. U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
  4865. REG_WR(bp, BAR_USTRORM_INTMEM +
  4866. USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
  4867. U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
  4868. REG_WR(bp, BAR_USTRORM_INTMEM +
  4869. USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
  4870. U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
  4871. if (CHIP_IS_E1H(bp)) {
  4872. REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
  4873. IS_E1HMF(bp));
  4874. REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
  4875. IS_E1HMF(bp));
  4876. REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
  4877. IS_E1HMF(bp));
  4878. REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
  4879. IS_E1HMF(bp));
  4880. REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
  4881. bp->e1hov);
  4882. }
  4883. /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
  4884. max_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) *
  4885. SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
  4886. for_each_queue(bp, i) {
  4887. struct bnx2x_fastpath *fp = &bp->fp[i];
  4888. REG_WR(bp, BAR_USTRORM_INTMEM +
  4889. USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
  4890. U64_LO(fp->rx_comp_mapping));
  4891. REG_WR(bp, BAR_USTRORM_INTMEM +
  4892. USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
  4893. U64_HI(fp->rx_comp_mapping));
  4894. /* Next page */
  4895. REG_WR(bp, BAR_USTRORM_INTMEM +
  4896. USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
  4897. U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
  4898. REG_WR(bp, BAR_USTRORM_INTMEM +
  4899. USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
  4900. U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
  4901. REG_WR16(bp, BAR_USTRORM_INTMEM +
  4902. USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
  4903. max_agg_size);
  4904. }
  4905. /* dropless flow control */
  4906. if (CHIP_IS_E1H(bp)) {
  4907. struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
  4908. rx_pause.bd_thr_low = 250;
  4909. rx_pause.cqe_thr_low = 250;
  4910. rx_pause.cos = 1;
  4911. rx_pause.sge_thr_low = 0;
  4912. rx_pause.bd_thr_high = 350;
  4913. rx_pause.cqe_thr_high = 350;
  4914. rx_pause.sge_thr_high = 0;
  4915. for_each_queue(bp, i) {
  4916. struct bnx2x_fastpath *fp = &bp->fp[i];
  4917. if (!fp->disable_tpa) {
  4918. rx_pause.sge_thr_low = 150;
  4919. rx_pause.sge_thr_high = 250;
  4920. }
  4921. offset = BAR_USTRORM_INTMEM +
  4922. USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
  4923. fp->cl_id);
  4924. for (j = 0;
  4925. j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
  4926. j++)
  4927. REG_WR(bp, offset + j*4,
  4928. ((u32 *)&rx_pause)[j]);
  4929. }
  4930. }
  4931. memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
  4932. /* Init rate shaping and fairness contexts */
  4933. if (IS_E1HMF(bp)) {
  4934. int vn;
  4935. /* During init there is no active link
  4936. Until link is up, set link rate to 10Gbps */
  4937. bp->link_vars.line_speed = SPEED_10000;
  4938. bnx2x_init_port_minmax(bp);
  4939. if (!BP_NOMCP(bp))
  4940. bp->mf_config =
  4941. SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
  4942. bnx2x_calc_vn_weight_sum(bp);
  4943. for (vn = VN_0; vn < E1HVN_MAX; vn++)
  4944. bnx2x_init_vn_minmax(bp, 2*vn + port);
  4945. /* Enable rate shaping and fairness */
  4946. bp->cmng.flags.cmng_enables |=
  4947. CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
  4948. } else {
  4949. /* rate shaping and fairness are disabled */
  4950. DP(NETIF_MSG_IFUP,
  4951. "single function mode minmax will be disabled\n");
  4952. }
  4953. /* Store cmng structures to internal memory */
  4954. if (bp->port.pmf)
  4955. for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
  4956. REG_WR(bp, BAR_XSTRORM_INTMEM +
  4957. XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
  4958. ((u32 *)(&bp->cmng))[i]);
  4959. }
  4960. static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
  4961. {
  4962. switch (load_code) {
  4963. case FW_MSG_CODE_DRV_LOAD_COMMON:
  4964. bnx2x_init_internal_common(bp);
  4965. /* no break */
  4966. case FW_MSG_CODE_DRV_LOAD_PORT:
  4967. bnx2x_init_internal_port(bp);
  4968. /* no break */
  4969. case FW_MSG_CODE_DRV_LOAD_FUNCTION:
  4970. bnx2x_init_internal_func(bp);
  4971. break;
  4972. default:
  4973. BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
  4974. break;
  4975. }
  4976. }
  4977. static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
  4978. {
  4979. int i;
  4980. for_each_queue(bp, i) {
  4981. struct bnx2x_fastpath *fp = &bp->fp[i];
  4982. fp->bp = bp;
  4983. fp->state = BNX2X_FP_STATE_CLOSED;
  4984. fp->index = i;
  4985. fp->cl_id = BP_L_ID(bp) + i;
  4986. #ifdef BCM_CNIC
  4987. fp->sb_id = fp->cl_id + 1;
  4988. #else
  4989. fp->sb_id = fp->cl_id;
  4990. #endif
  4991. DP(NETIF_MSG_IFUP,
  4992. "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
  4993. i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
  4994. bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
  4995. fp->sb_id);
  4996. bnx2x_update_fpsb_idx(fp);
  4997. }
  4998. /* ensure status block indices were read */
  4999. rmb();
  5000. bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
  5001. DEF_SB_ID);
  5002. bnx2x_update_dsb_idx(bp);
  5003. bnx2x_update_coalesce(bp);
  5004. bnx2x_init_rx_rings(bp);
  5005. bnx2x_init_tx_ring(bp);
  5006. bnx2x_init_sp_ring(bp);
  5007. bnx2x_init_context(bp);
  5008. bnx2x_init_internal(bp, load_code);
  5009. bnx2x_init_ind_table(bp);
  5010. bnx2x_stats_init(bp);
  5011. /* At this point, we are ready for interrupts */
  5012. atomic_set(&bp->intr_sem, 0);
  5013. /* flush all before enabling interrupts */
  5014. mb();
  5015. mmiowb();
  5016. bnx2x_int_enable(bp);
  5017. /* Check for SPIO5 */
  5018. bnx2x_attn_int_deasserted0(bp,
  5019. REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
  5020. AEU_INPUTS_ATTN_BITS_SPIO5);
  5021. }
  5022. /* end of nic init */
  5023. /*
  5024. * gzip service functions
  5025. */
  5026. static int bnx2x_gunzip_init(struct bnx2x *bp)
  5027. {
  5028. bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
  5029. &bp->gunzip_mapping, GFP_KERNEL);
  5030. if (bp->gunzip_buf == NULL)
  5031. goto gunzip_nomem1;
  5032. bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
  5033. if (bp->strm == NULL)
  5034. goto gunzip_nomem2;
  5035. bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
  5036. GFP_KERNEL);
  5037. if (bp->strm->workspace == NULL)
  5038. goto gunzip_nomem3;
  5039. return 0;
  5040. gunzip_nomem3:
  5041. kfree(bp->strm);
  5042. bp->strm = NULL;
  5043. gunzip_nomem2:
  5044. dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
  5045. bp->gunzip_mapping);
  5046. bp->gunzip_buf = NULL;
  5047. gunzip_nomem1:
  5048. netdev_err(bp->dev, "Cannot allocate firmware buffer for"
  5049. " un-compression\n");
  5050. return -ENOMEM;
  5051. }
  5052. static void bnx2x_gunzip_end(struct bnx2x *bp)
  5053. {
  5054. kfree(bp->strm->workspace);
  5055. kfree(bp->strm);
  5056. bp->strm = NULL;
  5057. if (bp->gunzip_buf) {
  5058. dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
  5059. bp->gunzip_mapping);
  5060. bp->gunzip_buf = NULL;
  5061. }
  5062. }
  5063. static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
  5064. {
  5065. int n, rc;
  5066. /* check gzip header */
  5067. if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
  5068. BNX2X_ERR("Bad gzip header\n");
  5069. return -EINVAL;
  5070. }
  5071. n = 10;
  5072. #define FNAME 0x8
  5073. if (zbuf[3] & FNAME)
  5074. while ((zbuf[n++] != 0) && (n < len));
  5075. bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
  5076. bp->strm->avail_in = len - n;
  5077. bp->strm->next_out = bp->gunzip_buf;
  5078. bp->strm->avail_out = FW_BUF_SIZE;
  5079. rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
  5080. if (rc != Z_OK)
  5081. return rc;
  5082. rc = zlib_inflate(bp->strm, Z_FINISH);
  5083. if ((rc != Z_OK) && (rc != Z_STREAM_END))
  5084. netdev_err(bp->dev, "Firmware decompression error: %s\n",
  5085. bp->strm->msg);
  5086. bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
  5087. if (bp->gunzip_outlen & 0x3)
  5088. netdev_err(bp->dev, "Firmware decompression error:"
  5089. " gunzip_outlen (%d) not aligned\n",
  5090. bp->gunzip_outlen);
  5091. bp->gunzip_outlen >>= 2;
  5092. zlib_inflateEnd(bp->strm);
  5093. if (rc == Z_STREAM_END)
  5094. return 0;
  5095. return rc;
  5096. }
  5097. /* nic load/unload */
  5098. /*
  5099. * General service functions
  5100. */
  5101. /* send a NIG loopback debug packet */
  5102. static void bnx2x_lb_pckt(struct bnx2x *bp)
  5103. {
  5104. u32 wb_write[3];
  5105. /* Ethernet source and destination addresses */
  5106. wb_write[0] = 0x55555555;
  5107. wb_write[1] = 0x55555555;
  5108. wb_write[2] = 0x20; /* SOP */
  5109. REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
  5110. /* NON-IP protocol */
  5111. wb_write[0] = 0x09000000;
  5112. wb_write[1] = 0x55555555;
  5113. wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
  5114. REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
  5115. }
  5116. /* some of the internal memories
  5117. * are not directly readable from the driver
  5118. * to test them we send debug packets
  5119. */
  5120. static int bnx2x_int_mem_test(struct bnx2x *bp)
  5121. {
  5122. int factor;
  5123. int count, i;
  5124. u32 val = 0;
  5125. if (CHIP_REV_IS_FPGA(bp))
  5126. factor = 120;
  5127. else if (CHIP_REV_IS_EMUL(bp))
  5128. factor = 200;
  5129. else
  5130. factor = 1;
  5131. DP(NETIF_MSG_HW, "start part1\n");
  5132. /* Disable inputs of parser neighbor blocks */
  5133. REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
  5134. REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
  5135. REG_WR(bp, CFC_REG_DEBUG0, 0x1);
  5136. REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
  5137. /* Write 0 to parser credits for CFC search request */
  5138. REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
  5139. /* send Ethernet packet */
  5140. bnx2x_lb_pckt(bp);
  5141. /* TODO do i reset NIG statistic? */
  5142. /* Wait until NIG register shows 1 packet of size 0x10 */
  5143. count = 1000 * factor;
  5144. while (count) {
  5145. bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
  5146. val = *bnx2x_sp(bp, wb_data[0]);
  5147. if (val == 0x10)
  5148. break;
  5149. msleep(10);
  5150. count--;
  5151. }
  5152. if (val != 0x10) {
  5153. BNX2X_ERR("NIG timeout val = 0x%x\n", val);
  5154. return -1;
  5155. }
  5156. /* Wait until PRS register shows 1 packet */
  5157. count = 1000 * factor;
  5158. while (count) {
  5159. val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
  5160. if (val == 1)
  5161. break;
  5162. msleep(10);
  5163. count--;
  5164. }
  5165. if (val != 0x1) {
  5166. BNX2X_ERR("PRS timeout val = 0x%x\n", val);
  5167. return -2;
  5168. }
  5169. /* Reset and init BRB, PRS */
  5170. REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
  5171. msleep(50);
  5172. REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
  5173. msleep(50);
  5174. bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
  5175. bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
  5176. DP(NETIF_MSG_HW, "part2\n");
  5177. /* Disable inputs of parser neighbor blocks */
  5178. REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
  5179. REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
  5180. REG_WR(bp, CFC_REG_DEBUG0, 0x1);
  5181. REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
  5182. /* Write 0 to parser credits for CFC search request */
  5183. REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
  5184. /* send 10 Ethernet packets */
  5185. for (i = 0; i < 10; i++)
  5186. bnx2x_lb_pckt(bp);
  5187. /* Wait until NIG register shows 10 + 1
  5188. packets of size 11*0x10 = 0xb0 */
  5189. count = 1000 * factor;
  5190. while (count) {
  5191. bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
  5192. val = *bnx2x_sp(bp, wb_data[0]);
  5193. if (val == 0xb0)
  5194. break;
  5195. msleep(10);
  5196. count--;
  5197. }
  5198. if (val != 0xb0) {
  5199. BNX2X_ERR("NIG timeout val = 0x%x\n", val);
  5200. return -3;
  5201. }
  5202. /* Wait until PRS register shows 2 packets */
  5203. val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
  5204. if (val != 2)
  5205. BNX2X_ERR("PRS timeout val = 0x%x\n", val);
  5206. /* Write 1 to parser credits for CFC search request */
  5207. REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
  5208. /* Wait until PRS register shows 3 packets */
  5209. msleep(10 * factor);
  5210. /* Wait until NIG register shows 1 packet of size 0x10 */
  5211. val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
  5212. if (val != 3)
  5213. BNX2X_ERR("PRS timeout val = 0x%x\n", val);
  5214. /* clear NIG EOP FIFO */
  5215. for (i = 0; i < 11; i++)
  5216. REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
  5217. val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
  5218. if (val != 1) {
  5219. BNX2X_ERR("clear of NIG failed\n");
  5220. return -4;
  5221. }
  5222. /* Reset and init BRB, PRS, NIG */
  5223. REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
  5224. msleep(50);
  5225. REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
  5226. msleep(50);
  5227. bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
  5228. bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
  5229. #ifndef BCM_CNIC
  5230. /* set NIC mode */
  5231. REG_WR(bp, PRS_REG_NIC_MODE, 1);
  5232. #endif
  5233. /* Enable inputs of parser neighbor blocks */
  5234. REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
  5235. REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
  5236. REG_WR(bp, CFC_REG_DEBUG0, 0x0);
  5237. REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
  5238. DP(NETIF_MSG_HW, "done\n");
  5239. return 0; /* OK */
  5240. }
  5241. static void enable_blocks_attention(struct bnx2x *bp)
  5242. {
  5243. REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
  5244. REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
  5245. REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
  5246. REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
  5247. REG_WR(bp, QM_REG_QM_INT_MASK, 0);
  5248. REG_WR(bp, TM_REG_TM_INT_MASK, 0);
  5249. REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
  5250. REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
  5251. REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
  5252. /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
  5253. /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
  5254. REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
  5255. REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
  5256. REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
  5257. /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
  5258. /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
  5259. REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
  5260. REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
  5261. REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
  5262. REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
  5263. /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
  5264. /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
  5265. if (CHIP_REV_IS_FPGA(bp))
  5266. REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
  5267. else
  5268. REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
  5269. REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
  5270. REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
  5271. REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
  5272. /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
  5273. /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
  5274. REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
  5275. REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
  5276. /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
  5277. REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
  5278. }
  5279. static const struct {
  5280. u32 addr;
  5281. u32 mask;
  5282. } bnx2x_parity_mask[] = {
  5283. {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
  5284. {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
  5285. {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
  5286. {HC_REG_HC_PRTY_MASK, 0xffffffff},
  5287. {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
  5288. {QM_REG_QM_PRTY_MASK, 0x0},
  5289. {DORQ_REG_DORQ_PRTY_MASK, 0x0},
  5290. {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
  5291. {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
  5292. {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
  5293. {CDU_REG_CDU_PRTY_MASK, 0x0},
  5294. {CFC_REG_CFC_PRTY_MASK, 0x0},
  5295. {DBG_REG_DBG_PRTY_MASK, 0x0},
  5296. {DMAE_REG_DMAE_PRTY_MASK, 0x0},
  5297. {BRB1_REG_BRB1_PRTY_MASK, 0x0},
  5298. {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
  5299. {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
  5300. {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
  5301. {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
  5302. {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
  5303. {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
  5304. {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
  5305. {USEM_REG_USEM_PRTY_MASK_0, 0x0},
  5306. {USEM_REG_USEM_PRTY_MASK_1, 0x0},
  5307. {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
  5308. {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
  5309. {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
  5310. {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
  5311. };
  5312. static void enable_blocks_parity(struct bnx2x *bp)
  5313. {
  5314. int i, mask_arr_len =
  5315. sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
  5316. for (i = 0; i < mask_arr_len; i++)
  5317. REG_WR(bp, bnx2x_parity_mask[i].addr,
  5318. bnx2x_parity_mask[i].mask);
  5319. }
  5320. static void bnx2x_reset_common(struct bnx2x *bp)
  5321. {
  5322. /* reset_common */
  5323. REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
  5324. 0xd3ffff7f);
  5325. REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
  5326. }
  5327. static void bnx2x_init_pxp(struct bnx2x *bp)
  5328. {
  5329. u16 devctl;
  5330. int r_order, w_order;
  5331. pci_read_config_word(bp->pdev,
  5332. bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
  5333. DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
  5334. w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
  5335. if (bp->mrrs == -1)
  5336. r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
  5337. else {
  5338. DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
  5339. r_order = bp->mrrs;
  5340. }
  5341. bnx2x_init_pxp_arb(bp, r_order, w_order);
  5342. }
  5343. static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
  5344. {
  5345. int is_required;
  5346. u32 val;
  5347. int port;
  5348. if (BP_NOMCP(bp))
  5349. return;
  5350. is_required = 0;
  5351. val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
  5352. SHARED_HW_CFG_FAN_FAILURE_MASK;
  5353. if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
  5354. is_required = 1;
  5355. /*
  5356. * The fan failure mechanism is usually related to the PHY type since
  5357. * the power consumption of the board is affected by the PHY. Currently,
  5358. * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
  5359. */
  5360. else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
  5361. for (port = PORT_0; port < PORT_MAX; port++) {
  5362. u32 phy_type =
  5363. SHMEM_RD(bp, dev_info.port_hw_config[port].
  5364. external_phy_config) &
  5365. PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
  5366. is_required |=
  5367. ((phy_type ==
  5368. PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
  5369. (phy_type ==
  5370. PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
  5371. (phy_type ==
  5372. PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
  5373. }
  5374. DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
  5375. if (is_required == 0)
  5376. return;
  5377. /* Fan failure is indicated by SPIO 5 */
  5378. bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
  5379. MISC_REGISTERS_SPIO_INPUT_HI_Z);
  5380. /* set to active low mode */
  5381. val = REG_RD(bp, MISC_REG_SPIO_INT);
  5382. val |= ((1 << MISC_REGISTERS_SPIO_5) <<
  5383. MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
  5384. REG_WR(bp, MISC_REG_SPIO_INT, val);
  5385. /* enable interrupt to signal the IGU */
  5386. val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
  5387. val |= (1 << MISC_REGISTERS_SPIO_5);
  5388. REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
  5389. }
  5390. static int bnx2x_init_common(struct bnx2x *bp)
  5391. {
  5392. u32 val, i;
  5393. #ifdef BCM_CNIC
  5394. u32 wb_write[2];
  5395. #endif
  5396. DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
  5397. bnx2x_reset_common(bp);
  5398. REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
  5399. REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
  5400. bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
  5401. if (CHIP_IS_E1H(bp))
  5402. REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
  5403. REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
  5404. msleep(30);
  5405. REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
  5406. bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
  5407. if (CHIP_IS_E1(bp)) {
  5408. /* enable HW interrupt from PXP on USDM overflow
  5409. bit 16 on INT_MASK_0 */
  5410. REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
  5411. }
  5412. bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
  5413. bnx2x_init_pxp(bp);
  5414. #ifdef __BIG_ENDIAN
  5415. REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
  5416. REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
  5417. REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
  5418. REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
  5419. REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
  5420. /* make sure this value is 0 */
  5421. REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
  5422. /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
  5423. REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
  5424. REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
  5425. REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
  5426. REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
  5427. #endif
  5428. REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
  5429. #ifdef BCM_CNIC
  5430. REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
  5431. REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
  5432. REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
  5433. #endif
  5434. if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
  5435. REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
  5436. /* let the HW do it's magic ... */
  5437. msleep(100);
  5438. /* finish PXP init */
  5439. val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
  5440. if (val != 1) {
  5441. BNX2X_ERR("PXP2 CFG failed\n");
  5442. return -EBUSY;
  5443. }
  5444. val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
  5445. if (val != 1) {
  5446. BNX2X_ERR("PXP2 RD_INIT failed\n");
  5447. return -EBUSY;
  5448. }
  5449. REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
  5450. REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
  5451. bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
  5452. /* clean the DMAE memory */
  5453. bp->dmae_ready = 1;
  5454. bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
  5455. bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
  5456. bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
  5457. bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
  5458. bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
  5459. bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
  5460. bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
  5461. bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
  5462. bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
  5463. bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
  5464. #ifdef BCM_CNIC
  5465. wb_write[0] = 0;
  5466. wb_write[1] = 0;
  5467. for (i = 0; i < 64; i++) {
  5468. REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
  5469. bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
  5470. if (CHIP_IS_E1H(bp)) {
  5471. REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
  5472. bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
  5473. wb_write, 2);
  5474. }
  5475. }
  5476. #endif
  5477. /* soft reset pulse */
  5478. REG_WR(bp, QM_REG_SOFT_RESET, 1);
  5479. REG_WR(bp, QM_REG_SOFT_RESET, 0);
  5480. #ifdef BCM_CNIC
  5481. bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
  5482. #endif
  5483. bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
  5484. REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
  5485. if (!CHIP_REV_IS_SLOW(bp)) {
  5486. /* enable hw interrupt from doorbell Q */
  5487. REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
  5488. }
  5489. bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
  5490. bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
  5491. REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
  5492. #ifndef BCM_CNIC
  5493. /* set NIC mode */
  5494. REG_WR(bp, PRS_REG_NIC_MODE, 1);
  5495. #endif
  5496. if (CHIP_IS_E1H(bp))
  5497. REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
  5498. bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
  5499. bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
  5500. bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
  5501. bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
  5502. bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
  5503. bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
  5504. bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
  5505. bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
  5506. bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
  5507. bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
  5508. bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
  5509. bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
  5510. /* sync semi rtc */
  5511. REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
  5512. 0x80000000);
  5513. REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
  5514. 0x80000000);
  5515. bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
  5516. bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
  5517. bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
  5518. REG_WR(bp, SRC_REG_SOFT_RST, 1);
  5519. for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
  5520. REG_WR(bp, i, random32());
  5521. bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
  5522. #ifdef BCM_CNIC
  5523. REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
  5524. REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
  5525. REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
  5526. REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
  5527. REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
  5528. REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
  5529. REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
  5530. REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
  5531. REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
  5532. REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
  5533. #endif
  5534. REG_WR(bp, SRC_REG_SOFT_RST, 0);
  5535. if (sizeof(union cdu_context) != 1024)
  5536. /* we currently assume that a context is 1024 bytes */
  5537. dev_alert(&bp->pdev->dev, "please adjust the size "
  5538. "of cdu_context(%ld)\n",
  5539. (long)sizeof(union cdu_context));
  5540. bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
  5541. val = (4 << 24) + (0 << 12) + 1024;
  5542. REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
  5543. bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
  5544. REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
  5545. /* enable context validation interrupt from CFC */
  5546. REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
  5547. /* set the thresholds to prevent CFC/CDU race */
  5548. REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
  5549. bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
  5550. bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
  5551. bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
  5552. /* Reset PCIE errors for debug */
  5553. REG_WR(bp, 0x2814, 0xffffffff);
  5554. REG_WR(bp, 0x3820, 0xffffffff);
  5555. bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
  5556. bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
  5557. bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
  5558. bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
  5559. bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
  5560. if (CHIP_IS_E1H(bp)) {
  5561. REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
  5562. REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
  5563. }
  5564. if (CHIP_REV_IS_SLOW(bp))
  5565. msleep(200);
  5566. /* finish CFC init */
  5567. val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
  5568. if (val != 1) {
  5569. BNX2X_ERR("CFC LL_INIT failed\n");
  5570. return -EBUSY;
  5571. }
  5572. val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
  5573. if (val != 1) {
  5574. BNX2X_ERR("CFC AC_INIT failed\n");
  5575. return -EBUSY;
  5576. }
  5577. val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
  5578. if (val != 1) {
  5579. BNX2X_ERR("CFC CAM_INIT failed\n");
  5580. return -EBUSY;
  5581. }
  5582. REG_WR(bp, CFC_REG_DEBUG0, 0);
  5583. /* read NIG statistic
  5584. to see if this is our first up since powerup */
  5585. bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
  5586. val = *bnx2x_sp(bp, wb_data[0]);
  5587. /* do internal memory self test */
  5588. if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
  5589. BNX2X_ERR("internal mem self test failed\n");
  5590. return -EBUSY;
  5591. }
  5592. switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
  5593. case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
  5594. case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
  5595. case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
  5596. case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
  5597. bp->port.need_hw_lock = 1;
  5598. break;
  5599. default:
  5600. break;
  5601. }
  5602. bnx2x_setup_fan_failure_detection(bp);
  5603. /* clear PXP2 attentions */
  5604. REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
  5605. enable_blocks_attention(bp);
  5606. if (CHIP_PARITY_SUPPORTED(bp))
  5607. enable_blocks_parity(bp);
  5608. if (!BP_NOMCP(bp)) {
  5609. bnx2x_acquire_phy_lock(bp);
  5610. bnx2x_common_init_phy(bp, bp->common.shmem_base);
  5611. bnx2x_release_phy_lock(bp);
  5612. } else
  5613. BNX2X_ERR("Bootcode is missing - can not initialize link\n");
  5614. return 0;
  5615. }
  5616. static int bnx2x_init_port(struct bnx2x *bp)
  5617. {
  5618. int port = BP_PORT(bp);
  5619. int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
  5620. u32 low, high;
  5621. u32 val;
  5622. DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
  5623. REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
  5624. bnx2x_init_block(bp, PXP_BLOCK, init_stage);
  5625. bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
  5626. bnx2x_init_block(bp, TCM_BLOCK, init_stage);
  5627. bnx2x_init_block(bp, UCM_BLOCK, init_stage);
  5628. bnx2x_init_block(bp, CCM_BLOCK, init_stage);
  5629. bnx2x_init_block(bp, XCM_BLOCK, init_stage);
  5630. #ifdef BCM_CNIC
  5631. REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
  5632. bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
  5633. REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
  5634. REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
  5635. #endif
  5636. bnx2x_init_block(bp, DQ_BLOCK, init_stage);
  5637. bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
  5638. if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
  5639. /* no pause for emulation and FPGA */
  5640. low = 0;
  5641. high = 513;
  5642. } else {
  5643. if (IS_E1HMF(bp))
  5644. low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
  5645. else if (bp->dev->mtu > 4096) {
  5646. if (bp->flags & ONE_PORT_FLAG)
  5647. low = 160;
  5648. else {
  5649. val = bp->dev->mtu;
  5650. /* (24*1024 + val*4)/256 */
  5651. low = 96 + (val/64) + ((val % 64) ? 1 : 0);
  5652. }
  5653. } else
  5654. low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
  5655. high = low + 56; /* 14*1024/256 */
  5656. }
  5657. REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
  5658. REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
  5659. bnx2x_init_block(bp, PRS_BLOCK, init_stage);
  5660. bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
  5661. bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
  5662. bnx2x_init_block(bp, USDM_BLOCK, init_stage);
  5663. bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
  5664. bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
  5665. bnx2x_init_block(bp, USEM_BLOCK, init_stage);
  5666. bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
  5667. bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
  5668. bnx2x_init_block(bp, UPB_BLOCK, init_stage);
  5669. bnx2x_init_block(bp, XPB_BLOCK, init_stage);
  5670. bnx2x_init_block(bp, PBF_BLOCK, init_stage);
  5671. /* configure PBF to work without PAUSE mtu 9000 */
  5672. REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
  5673. /* update threshold */
  5674. REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
  5675. /* update init credit */
  5676. REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
  5677. /* probe changes */
  5678. REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
  5679. msleep(5);
  5680. REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
  5681. #ifdef BCM_CNIC
  5682. bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
  5683. #endif
  5684. bnx2x_init_block(bp, CDU_BLOCK, init_stage);
  5685. bnx2x_init_block(bp, CFC_BLOCK, init_stage);
  5686. if (CHIP_IS_E1(bp)) {
  5687. REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
  5688. REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
  5689. }
  5690. bnx2x_init_block(bp, HC_BLOCK, init_stage);
  5691. bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
  5692. /* init aeu_mask_attn_func_0/1:
  5693. * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
  5694. * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
  5695. * bits 4-7 are used for "per vn group attention" */
  5696. REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
  5697. (IS_E1HMF(bp) ? 0xF7 : 0x7));
  5698. bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
  5699. bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
  5700. bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
  5701. bnx2x_init_block(bp, DBU_BLOCK, init_stage);
  5702. bnx2x_init_block(bp, DBG_BLOCK, init_stage);
  5703. bnx2x_init_block(bp, NIG_BLOCK, init_stage);
  5704. REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
  5705. if (CHIP_IS_E1H(bp)) {
  5706. /* 0x2 disable e1hov, 0x1 enable */
  5707. REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
  5708. (IS_E1HMF(bp) ? 0x1 : 0x2));
  5709. {
  5710. REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
  5711. REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
  5712. REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
  5713. }
  5714. }
  5715. bnx2x_init_block(bp, MCP_BLOCK, init_stage);
  5716. bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
  5717. switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
  5718. case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
  5719. {
  5720. u32 swap_val, swap_override, aeu_gpio_mask, offset;
  5721. bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
  5722. MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
  5723. /* The GPIO should be swapped if the swap register is
  5724. set and active */
  5725. swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
  5726. swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
  5727. /* Select function upon port-swap configuration */
  5728. if (port == 0) {
  5729. offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
  5730. aeu_gpio_mask = (swap_val && swap_override) ?
  5731. AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
  5732. AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
  5733. } else {
  5734. offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
  5735. aeu_gpio_mask = (swap_val && swap_override) ?
  5736. AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
  5737. AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
  5738. }
  5739. val = REG_RD(bp, offset);
  5740. /* add GPIO3 to group */
  5741. val |= aeu_gpio_mask;
  5742. REG_WR(bp, offset, val);
  5743. }
  5744. break;
  5745. case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
  5746. case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
  5747. /* add SPIO 5 to group 0 */
  5748. {
  5749. u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
  5750. MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
  5751. val = REG_RD(bp, reg_addr);
  5752. val |= AEU_INPUTS_ATTN_BITS_SPIO5;
  5753. REG_WR(bp, reg_addr, val);
  5754. }
  5755. break;
  5756. default:
  5757. break;
  5758. }
  5759. bnx2x__link_reset(bp);
  5760. return 0;
  5761. }
  5762. #define ILT_PER_FUNC (768/2)
  5763. #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
  5764. /* the phys address is shifted right 12 bits and has an added
  5765. 1=valid bit added to the 53rd bit
  5766. then since this is a wide register(TM)
  5767. we split it into two 32 bit writes
  5768. */
  5769. #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
  5770. #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
  5771. #define PXP_ONE_ILT(x) (((x) << 10) | x)
  5772. #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
  5773. #ifdef BCM_CNIC
  5774. #define CNIC_ILT_LINES 127
  5775. #define CNIC_CTX_PER_ILT 16
  5776. #else
  5777. #define CNIC_ILT_LINES 0
  5778. #endif
  5779. static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
  5780. {
  5781. int reg;
  5782. if (CHIP_IS_E1H(bp))
  5783. reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
  5784. else /* E1 */
  5785. reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
  5786. bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
  5787. }
  5788. static int bnx2x_init_func(struct bnx2x *bp)
  5789. {
  5790. int port = BP_PORT(bp);
  5791. int func = BP_FUNC(bp);
  5792. u32 addr, val;
  5793. int i;
  5794. DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
  5795. /* set MSI reconfigure capability */
  5796. addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
  5797. val = REG_RD(bp, addr);
  5798. val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
  5799. REG_WR(bp, addr, val);
  5800. i = FUNC_ILT_BASE(func);
  5801. bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
  5802. if (CHIP_IS_E1H(bp)) {
  5803. REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
  5804. REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
  5805. } else /* E1 */
  5806. REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
  5807. PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
  5808. #ifdef BCM_CNIC
  5809. i += 1 + CNIC_ILT_LINES;
  5810. bnx2x_ilt_wr(bp, i, bp->timers_mapping);
  5811. if (CHIP_IS_E1(bp))
  5812. REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
  5813. else {
  5814. REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
  5815. REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
  5816. }
  5817. i++;
  5818. bnx2x_ilt_wr(bp, i, bp->qm_mapping);
  5819. if (CHIP_IS_E1(bp))
  5820. REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
  5821. else {
  5822. REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
  5823. REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
  5824. }
  5825. i++;
  5826. bnx2x_ilt_wr(bp, i, bp->t1_mapping);
  5827. if (CHIP_IS_E1(bp))
  5828. REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
  5829. else {
  5830. REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
  5831. REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
  5832. }
  5833. /* tell the searcher where the T2 table is */
  5834. REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
  5835. bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
  5836. U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
  5837. bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
  5838. U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
  5839. U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
  5840. REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
  5841. #endif
  5842. if (CHIP_IS_E1H(bp)) {
  5843. bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
  5844. bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
  5845. bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
  5846. bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
  5847. bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
  5848. bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
  5849. bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
  5850. bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
  5851. bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
  5852. REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
  5853. REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
  5854. }
  5855. /* HC init per function */
  5856. if (CHIP_IS_E1H(bp)) {
  5857. REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
  5858. REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
  5859. REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
  5860. }
  5861. bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
  5862. /* Reset PCIE errors for debug */
  5863. REG_WR(bp, 0x2114, 0xffffffff);
  5864. REG_WR(bp, 0x2120, 0xffffffff);
  5865. return 0;
  5866. }
  5867. static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
  5868. {
  5869. int i, rc = 0;
  5870. DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
  5871. BP_FUNC(bp), load_code);
  5872. bp->dmae_ready = 0;
  5873. mutex_init(&bp->dmae_mutex);
  5874. rc = bnx2x_gunzip_init(bp);
  5875. if (rc)
  5876. return rc;
  5877. switch (load_code) {
  5878. case FW_MSG_CODE_DRV_LOAD_COMMON:
  5879. rc = bnx2x_init_common(bp);
  5880. if (rc)
  5881. goto init_hw_err;
  5882. /* no break */
  5883. case FW_MSG_CODE_DRV_LOAD_PORT:
  5884. bp->dmae_ready = 1;
  5885. rc = bnx2x_init_port(bp);
  5886. if (rc)
  5887. goto init_hw_err;
  5888. /* no break */
  5889. case FW_MSG_CODE_DRV_LOAD_FUNCTION:
  5890. bp->dmae_ready = 1;
  5891. rc = bnx2x_init_func(bp);
  5892. if (rc)
  5893. goto init_hw_err;
  5894. break;
  5895. default:
  5896. BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
  5897. break;
  5898. }
  5899. if (!BP_NOMCP(bp)) {
  5900. int func = BP_FUNC(bp);
  5901. bp->fw_drv_pulse_wr_seq =
  5902. (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
  5903. DRV_PULSE_SEQ_MASK);
  5904. DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
  5905. }
  5906. /* this needs to be done before gunzip end */
  5907. bnx2x_zero_def_sb(bp);
  5908. for_each_queue(bp, i)
  5909. bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
  5910. #ifdef BCM_CNIC
  5911. bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
  5912. #endif
  5913. init_hw_err:
  5914. bnx2x_gunzip_end(bp);
  5915. return rc;
  5916. }
  5917. static void bnx2x_free_mem(struct bnx2x *bp)
  5918. {
  5919. #define BNX2X_PCI_FREE(x, y, size) \
  5920. do { \
  5921. if (x) { \
  5922. dma_free_coherent(&bp->pdev->dev, size, x, y); \
  5923. x = NULL; \
  5924. y = 0; \
  5925. } \
  5926. } while (0)
  5927. #define BNX2X_FREE(x) \
  5928. do { \
  5929. if (x) { \
  5930. vfree(x); \
  5931. x = NULL; \
  5932. } \
  5933. } while (0)
  5934. int i;
  5935. /* fastpath */
  5936. /* Common */
  5937. for_each_queue(bp, i) {
  5938. /* status blocks */
  5939. BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
  5940. bnx2x_fp(bp, i, status_blk_mapping),
  5941. sizeof(struct host_status_block));
  5942. }
  5943. /* Rx */
  5944. for_each_queue(bp, i) {
  5945. /* fastpath rx rings: rx_buf rx_desc rx_comp */
  5946. BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
  5947. BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
  5948. bnx2x_fp(bp, i, rx_desc_mapping),
  5949. sizeof(struct eth_rx_bd) * NUM_RX_BD);
  5950. BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
  5951. bnx2x_fp(bp, i, rx_comp_mapping),
  5952. sizeof(struct eth_fast_path_rx_cqe) *
  5953. NUM_RCQ_BD);
  5954. /* SGE ring */
  5955. BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
  5956. BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
  5957. bnx2x_fp(bp, i, rx_sge_mapping),
  5958. BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
  5959. }
  5960. /* Tx */
  5961. for_each_queue(bp, i) {
  5962. /* fastpath tx rings: tx_buf tx_desc */
  5963. BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
  5964. BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
  5965. bnx2x_fp(bp, i, tx_desc_mapping),
  5966. sizeof(union eth_tx_bd_types) * NUM_TX_BD);
  5967. }
  5968. /* end of fastpath */
  5969. BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
  5970. sizeof(struct host_def_status_block));
  5971. BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
  5972. sizeof(struct bnx2x_slowpath));
  5973. #ifdef BCM_CNIC
  5974. BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
  5975. BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
  5976. BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
  5977. BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
  5978. BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
  5979. sizeof(struct host_status_block));
  5980. #endif
  5981. BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
  5982. #undef BNX2X_PCI_FREE
  5983. #undef BNX2X_KFREE
  5984. }
  5985. static int bnx2x_alloc_mem(struct bnx2x *bp)
  5986. {
  5987. #define BNX2X_PCI_ALLOC(x, y, size) \
  5988. do { \
  5989. x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
  5990. if (x == NULL) \
  5991. goto alloc_mem_err; \
  5992. memset(x, 0, size); \
  5993. } while (0)
  5994. #define BNX2X_ALLOC(x, size) \
  5995. do { \
  5996. x = vmalloc(size); \
  5997. if (x == NULL) \
  5998. goto alloc_mem_err; \
  5999. memset(x, 0, size); \
  6000. } while (0)
  6001. int i;
  6002. /* fastpath */
  6003. /* Common */
  6004. for_each_queue(bp, i) {
  6005. bnx2x_fp(bp, i, bp) = bp;
  6006. /* status blocks */
  6007. BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
  6008. &bnx2x_fp(bp, i, status_blk_mapping),
  6009. sizeof(struct host_status_block));
  6010. }
  6011. /* Rx */
  6012. for_each_queue(bp, i) {
  6013. /* fastpath rx rings: rx_buf rx_desc rx_comp */
  6014. BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
  6015. sizeof(struct sw_rx_bd) * NUM_RX_BD);
  6016. BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
  6017. &bnx2x_fp(bp, i, rx_desc_mapping),
  6018. sizeof(struct eth_rx_bd) * NUM_RX_BD);
  6019. BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
  6020. &bnx2x_fp(bp, i, rx_comp_mapping),
  6021. sizeof(struct eth_fast_path_rx_cqe) *
  6022. NUM_RCQ_BD);
  6023. /* SGE ring */
  6024. BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
  6025. sizeof(struct sw_rx_page) * NUM_RX_SGE);
  6026. BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
  6027. &bnx2x_fp(bp, i, rx_sge_mapping),
  6028. BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
  6029. }
  6030. /* Tx */
  6031. for_each_queue(bp, i) {
  6032. /* fastpath tx rings: tx_buf tx_desc */
  6033. BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
  6034. sizeof(struct sw_tx_bd) * NUM_TX_BD);
  6035. BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
  6036. &bnx2x_fp(bp, i, tx_desc_mapping),
  6037. sizeof(union eth_tx_bd_types) * NUM_TX_BD);
  6038. }
  6039. /* end of fastpath */
  6040. BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
  6041. sizeof(struct host_def_status_block));
  6042. BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
  6043. sizeof(struct bnx2x_slowpath));
  6044. #ifdef BCM_CNIC
  6045. BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
  6046. /* allocate searcher T2 table
  6047. we allocate 1/4 of alloc num for T2
  6048. (which is not entered into the ILT) */
  6049. BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
  6050. /* Initialize T2 (for 1024 connections) */
  6051. for (i = 0; i < 16*1024; i += 64)
  6052. *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
  6053. /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
  6054. BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
  6055. /* QM queues (128*MAX_CONN) */
  6056. BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
  6057. BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
  6058. sizeof(struct host_status_block));
  6059. #endif
  6060. /* Slow path ring */
  6061. BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
  6062. return 0;
  6063. alloc_mem_err:
  6064. bnx2x_free_mem(bp);
  6065. return -ENOMEM;
  6066. #undef BNX2X_PCI_ALLOC
  6067. #undef BNX2X_ALLOC
  6068. }
  6069. static void bnx2x_free_tx_skbs(struct bnx2x *bp)
  6070. {
  6071. int i;
  6072. for_each_queue(bp, i) {
  6073. struct bnx2x_fastpath *fp = &bp->fp[i];
  6074. u16 bd_cons = fp->tx_bd_cons;
  6075. u16 sw_prod = fp->tx_pkt_prod;
  6076. u16 sw_cons = fp->tx_pkt_cons;
  6077. while (sw_cons != sw_prod) {
  6078. bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
  6079. sw_cons++;
  6080. }
  6081. }
  6082. }
  6083. static void bnx2x_free_rx_skbs(struct bnx2x *bp)
  6084. {
  6085. int i, j;
  6086. for_each_queue(bp, j) {
  6087. struct bnx2x_fastpath *fp = &bp->fp[j];
  6088. for (i = 0; i < NUM_RX_BD; i++) {
  6089. struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
  6090. struct sk_buff *skb = rx_buf->skb;
  6091. if (skb == NULL)
  6092. continue;
  6093. dma_unmap_single(&bp->pdev->dev,
  6094. dma_unmap_addr(rx_buf, mapping),
  6095. bp->rx_buf_size, DMA_FROM_DEVICE);
  6096. rx_buf->skb = NULL;
  6097. dev_kfree_skb(skb);
  6098. }
  6099. if (!fp->disable_tpa)
  6100. bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
  6101. ETH_MAX_AGGREGATION_QUEUES_E1 :
  6102. ETH_MAX_AGGREGATION_QUEUES_E1H);
  6103. }
  6104. }
  6105. static void bnx2x_free_skbs(struct bnx2x *bp)
  6106. {
  6107. bnx2x_free_tx_skbs(bp);
  6108. bnx2x_free_rx_skbs(bp);
  6109. }
  6110. static void bnx2x_free_msix_irqs(struct bnx2x *bp)
  6111. {
  6112. int i, offset = 1;
  6113. free_irq(bp->msix_table[0].vector, bp->dev);
  6114. DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
  6115. bp->msix_table[0].vector);
  6116. #ifdef BCM_CNIC
  6117. offset++;
  6118. #endif
  6119. for_each_queue(bp, i) {
  6120. DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
  6121. "state %x\n", i, bp->msix_table[i + offset].vector,
  6122. bnx2x_fp(bp, i, state));
  6123. free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
  6124. }
  6125. }
  6126. static void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
  6127. {
  6128. if (bp->flags & USING_MSIX_FLAG) {
  6129. if (!disable_only)
  6130. bnx2x_free_msix_irqs(bp);
  6131. pci_disable_msix(bp->pdev);
  6132. bp->flags &= ~USING_MSIX_FLAG;
  6133. } else if (bp->flags & USING_MSI_FLAG) {
  6134. if (!disable_only)
  6135. free_irq(bp->pdev->irq, bp->dev);
  6136. pci_disable_msi(bp->pdev);
  6137. bp->flags &= ~USING_MSI_FLAG;
  6138. } else if (!disable_only)
  6139. free_irq(bp->pdev->irq, bp->dev);
  6140. }
  6141. static int bnx2x_enable_msix(struct bnx2x *bp)
  6142. {
  6143. int i, rc, offset = 1;
  6144. int igu_vec = 0;
  6145. bp->msix_table[0].entry = igu_vec;
  6146. DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
  6147. #ifdef BCM_CNIC
  6148. igu_vec = BP_L_ID(bp) + offset;
  6149. bp->msix_table[1].entry = igu_vec;
  6150. DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
  6151. offset++;
  6152. #endif
  6153. for_each_queue(bp, i) {
  6154. igu_vec = BP_L_ID(bp) + offset + i;
  6155. bp->msix_table[i + offset].entry = igu_vec;
  6156. DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
  6157. "(fastpath #%u)\n", i + offset, igu_vec, i);
  6158. }
  6159. rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
  6160. BNX2X_NUM_QUEUES(bp) + offset);
  6161. /*
  6162. * reconfigure number of tx/rx queues according to available
  6163. * MSI-X vectors
  6164. */
  6165. if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
  6166. /* vectors available for FP */
  6167. int fp_vec = rc - BNX2X_MSIX_VEC_FP_START;
  6168. DP(NETIF_MSG_IFUP,
  6169. "Trying to use less MSI-X vectors: %d\n", rc);
  6170. rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
  6171. if (rc) {
  6172. DP(NETIF_MSG_IFUP,
  6173. "MSI-X is not attainable rc %d\n", rc);
  6174. return rc;
  6175. }
  6176. bp->num_queues = min(bp->num_queues, fp_vec);
  6177. DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
  6178. bp->num_queues);
  6179. } else if (rc) {
  6180. DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
  6181. return rc;
  6182. }
  6183. bp->flags |= USING_MSIX_FLAG;
  6184. return 0;
  6185. }
  6186. static int bnx2x_req_msix_irqs(struct bnx2x *bp)
  6187. {
  6188. int i, rc, offset = 1;
  6189. rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
  6190. bp->dev->name, bp->dev);
  6191. if (rc) {
  6192. BNX2X_ERR("request sp irq failed\n");
  6193. return -EBUSY;
  6194. }
  6195. #ifdef BCM_CNIC
  6196. offset++;
  6197. #endif
  6198. for_each_queue(bp, i) {
  6199. struct bnx2x_fastpath *fp = &bp->fp[i];
  6200. snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
  6201. bp->dev->name, i);
  6202. rc = request_irq(bp->msix_table[i + offset].vector,
  6203. bnx2x_msix_fp_int, 0, fp->name, fp);
  6204. if (rc) {
  6205. BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
  6206. bnx2x_free_msix_irqs(bp);
  6207. return -EBUSY;
  6208. }
  6209. fp->state = BNX2X_FP_STATE_IRQ;
  6210. }
  6211. i = BNX2X_NUM_QUEUES(bp);
  6212. netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
  6213. " ... fp[%d] %d\n",
  6214. bp->msix_table[0].vector,
  6215. 0, bp->msix_table[offset].vector,
  6216. i - 1, bp->msix_table[offset + i - 1].vector);
  6217. return 0;
  6218. }
  6219. static int bnx2x_enable_msi(struct bnx2x *bp)
  6220. {
  6221. int rc;
  6222. rc = pci_enable_msi(bp->pdev);
  6223. if (rc) {
  6224. DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
  6225. return -1;
  6226. }
  6227. bp->flags |= USING_MSI_FLAG;
  6228. return 0;
  6229. }
  6230. static int bnx2x_req_irq(struct bnx2x *bp)
  6231. {
  6232. unsigned long flags;
  6233. int rc;
  6234. if (bp->flags & USING_MSI_FLAG)
  6235. flags = 0;
  6236. else
  6237. flags = IRQF_SHARED;
  6238. rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
  6239. bp->dev->name, bp->dev);
  6240. if (!rc)
  6241. bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
  6242. return rc;
  6243. }
  6244. static void bnx2x_napi_enable(struct bnx2x *bp)
  6245. {
  6246. int i;
  6247. for_each_queue(bp, i)
  6248. napi_enable(&bnx2x_fp(bp, i, napi));
  6249. }
  6250. static void bnx2x_napi_disable(struct bnx2x *bp)
  6251. {
  6252. int i;
  6253. for_each_queue(bp, i)
  6254. napi_disable(&bnx2x_fp(bp, i, napi));
  6255. }
  6256. static void bnx2x_netif_start(struct bnx2x *bp)
  6257. {
  6258. int intr_sem;
  6259. intr_sem = atomic_dec_and_test(&bp->intr_sem);
  6260. smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
  6261. if (intr_sem) {
  6262. if (netif_running(bp->dev)) {
  6263. bnx2x_napi_enable(bp);
  6264. bnx2x_int_enable(bp);
  6265. if (bp->state == BNX2X_STATE_OPEN)
  6266. netif_tx_wake_all_queues(bp->dev);
  6267. }
  6268. }
  6269. }
  6270. static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
  6271. {
  6272. bnx2x_int_disable_sync(bp, disable_hw);
  6273. bnx2x_napi_disable(bp);
  6274. netif_tx_disable(bp->dev);
  6275. }
  6276. /*
  6277. * Init service functions
  6278. */
  6279. /**
  6280. * Sets a MAC in a CAM for a few L2 Clients for E1 chip
  6281. *
  6282. * @param bp driver descriptor
  6283. * @param set set or clear an entry (1 or 0)
  6284. * @param mac pointer to a buffer containing a MAC
  6285. * @param cl_bit_vec bit vector of clients to register a MAC for
  6286. * @param cam_offset offset in a CAM to use
  6287. * @param with_bcast set broadcast MAC as well
  6288. */
  6289. static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
  6290. u32 cl_bit_vec, u8 cam_offset,
  6291. u8 with_bcast)
  6292. {
  6293. struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
  6294. int port = BP_PORT(bp);
  6295. /* CAM allocation
  6296. * unicasts 0-31:port0 32-63:port1
  6297. * multicast 64-127:port0 128-191:port1
  6298. */
  6299. config->hdr.length = 1 + (with_bcast ? 1 : 0);
  6300. config->hdr.offset = cam_offset;
  6301. config->hdr.client_id = 0xff;
  6302. config->hdr.reserved1 = 0;
  6303. /* primary MAC */
  6304. config->config_table[0].cam_entry.msb_mac_addr =
  6305. swab16(*(u16 *)&mac[0]);
  6306. config->config_table[0].cam_entry.middle_mac_addr =
  6307. swab16(*(u16 *)&mac[2]);
  6308. config->config_table[0].cam_entry.lsb_mac_addr =
  6309. swab16(*(u16 *)&mac[4]);
  6310. config->config_table[0].cam_entry.flags = cpu_to_le16(port);
  6311. if (set)
  6312. config->config_table[0].target_table_entry.flags = 0;
  6313. else
  6314. CAM_INVALIDATE(config->config_table[0]);
  6315. config->config_table[0].target_table_entry.clients_bit_vector =
  6316. cpu_to_le32(cl_bit_vec);
  6317. config->config_table[0].target_table_entry.vlan_id = 0;
  6318. DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
  6319. (set ? "setting" : "clearing"),
  6320. config->config_table[0].cam_entry.msb_mac_addr,
  6321. config->config_table[0].cam_entry.middle_mac_addr,
  6322. config->config_table[0].cam_entry.lsb_mac_addr);
  6323. /* broadcast */
  6324. if (with_bcast) {
  6325. config->config_table[1].cam_entry.msb_mac_addr =
  6326. cpu_to_le16(0xffff);
  6327. config->config_table[1].cam_entry.middle_mac_addr =
  6328. cpu_to_le16(0xffff);
  6329. config->config_table[1].cam_entry.lsb_mac_addr =
  6330. cpu_to_le16(0xffff);
  6331. config->config_table[1].cam_entry.flags = cpu_to_le16(port);
  6332. if (set)
  6333. config->config_table[1].target_table_entry.flags =
  6334. TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
  6335. else
  6336. CAM_INVALIDATE(config->config_table[1]);
  6337. config->config_table[1].target_table_entry.clients_bit_vector =
  6338. cpu_to_le32(cl_bit_vec);
  6339. config->config_table[1].target_table_entry.vlan_id = 0;
  6340. }
  6341. bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
  6342. U64_HI(bnx2x_sp_mapping(bp, mac_config)),
  6343. U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
  6344. }
  6345. /**
  6346. * Sets a MAC in a CAM for a few L2 Clients for E1H chip
  6347. *
  6348. * @param bp driver descriptor
  6349. * @param set set or clear an entry (1 or 0)
  6350. * @param mac pointer to a buffer containing a MAC
  6351. * @param cl_bit_vec bit vector of clients to register a MAC for
  6352. * @param cam_offset offset in a CAM to use
  6353. */
  6354. static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
  6355. u32 cl_bit_vec, u8 cam_offset)
  6356. {
  6357. struct mac_configuration_cmd_e1h *config =
  6358. (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
  6359. config->hdr.length = 1;
  6360. config->hdr.offset = cam_offset;
  6361. config->hdr.client_id = 0xff;
  6362. config->hdr.reserved1 = 0;
  6363. /* primary MAC */
  6364. config->config_table[0].msb_mac_addr =
  6365. swab16(*(u16 *)&mac[0]);
  6366. config->config_table[0].middle_mac_addr =
  6367. swab16(*(u16 *)&mac[2]);
  6368. config->config_table[0].lsb_mac_addr =
  6369. swab16(*(u16 *)&mac[4]);
  6370. config->config_table[0].clients_bit_vector =
  6371. cpu_to_le32(cl_bit_vec);
  6372. config->config_table[0].vlan_id = 0;
  6373. config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
  6374. if (set)
  6375. config->config_table[0].flags = BP_PORT(bp);
  6376. else
  6377. config->config_table[0].flags =
  6378. MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
  6379. DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
  6380. (set ? "setting" : "clearing"),
  6381. config->config_table[0].msb_mac_addr,
  6382. config->config_table[0].middle_mac_addr,
  6383. config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
  6384. bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
  6385. U64_HI(bnx2x_sp_mapping(bp, mac_config)),
  6386. U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
  6387. }
  6388. static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
  6389. int *state_p, int poll)
  6390. {
  6391. /* can take a while if any port is running */
  6392. int cnt = 5000;
  6393. DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
  6394. poll ? "polling" : "waiting", state, idx);
  6395. might_sleep();
  6396. while (cnt--) {
  6397. if (poll) {
  6398. bnx2x_rx_int(bp->fp, 10);
  6399. /* if index is different from 0
  6400. * the reply for some commands will
  6401. * be on the non default queue
  6402. */
  6403. if (idx)
  6404. bnx2x_rx_int(&bp->fp[idx], 10);
  6405. }
  6406. mb(); /* state is changed by bnx2x_sp_event() */
  6407. if (*state_p == state) {
  6408. #ifdef BNX2X_STOP_ON_ERROR
  6409. DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
  6410. #endif
  6411. return 0;
  6412. }
  6413. msleep(1);
  6414. if (bp->panic)
  6415. return -EIO;
  6416. }
  6417. /* timeout! */
  6418. BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
  6419. poll ? "polling" : "waiting", state, idx);
  6420. #ifdef BNX2X_STOP_ON_ERROR
  6421. bnx2x_panic();
  6422. #endif
  6423. return -EBUSY;
  6424. }
  6425. static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
  6426. {
  6427. bp->set_mac_pending++;
  6428. smp_wmb();
  6429. bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
  6430. (1 << bp->fp->cl_id), BP_FUNC(bp));
  6431. /* Wait for a completion */
  6432. bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
  6433. }
  6434. static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
  6435. {
  6436. bp->set_mac_pending++;
  6437. smp_wmb();
  6438. bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
  6439. (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
  6440. 1);
  6441. /* Wait for a completion */
  6442. bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
  6443. }
  6444. #ifdef BCM_CNIC
  6445. /**
  6446. * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
  6447. * MAC(s). This function will wait until the ramdord completion
  6448. * returns.
  6449. *
  6450. * @param bp driver handle
  6451. * @param set set or clear the CAM entry
  6452. *
  6453. * @return 0 if cussess, -ENODEV if ramrod doesn't return.
  6454. */
  6455. static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
  6456. {
  6457. u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
  6458. bp->set_mac_pending++;
  6459. smp_wmb();
  6460. /* Send a SET_MAC ramrod */
  6461. if (CHIP_IS_E1(bp))
  6462. bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
  6463. cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
  6464. 1);
  6465. else
  6466. /* CAM allocation for E1H
  6467. * unicasts: by func number
  6468. * multicast: 20+FUNC*20, 20 each
  6469. */
  6470. bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
  6471. cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
  6472. /* Wait for a completion when setting */
  6473. bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
  6474. return 0;
  6475. }
  6476. #endif
  6477. static int bnx2x_setup_leading(struct bnx2x *bp)
  6478. {
  6479. int rc;
  6480. /* reset IGU state */
  6481. bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
  6482. /* SETUP ramrod */
  6483. bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
  6484. /* Wait for completion */
  6485. rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
  6486. return rc;
  6487. }
  6488. static int bnx2x_setup_multi(struct bnx2x *bp, int index)
  6489. {
  6490. struct bnx2x_fastpath *fp = &bp->fp[index];
  6491. /* reset IGU state */
  6492. bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
  6493. /* SETUP ramrod */
  6494. fp->state = BNX2X_FP_STATE_OPENING;
  6495. bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
  6496. fp->cl_id, 0);
  6497. /* Wait for completion */
  6498. return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
  6499. &(fp->state), 0);
  6500. }
  6501. static int bnx2x_poll(struct napi_struct *napi, int budget);
  6502. static void bnx2x_set_num_queues_msix(struct bnx2x *bp)
  6503. {
  6504. switch (bp->multi_mode) {
  6505. case ETH_RSS_MODE_DISABLED:
  6506. bp->num_queues = 1;
  6507. break;
  6508. case ETH_RSS_MODE_REGULAR:
  6509. if (num_queues)
  6510. bp->num_queues = min_t(u32, num_queues,
  6511. BNX2X_MAX_QUEUES(bp));
  6512. else
  6513. bp->num_queues = min_t(u32, num_online_cpus(),
  6514. BNX2X_MAX_QUEUES(bp));
  6515. break;
  6516. default:
  6517. bp->num_queues = 1;
  6518. break;
  6519. }
  6520. }
  6521. static int bnx2x_set_num_queues(struct bnx2x *bp)
  6522. {
  6523. int rc = 0;
  6524. switch (int_mode) {
  6525. case INT_MODE_INTx:
  6526. case INT_MODE_MSI:
  6527. bp->num_queues = 1;
  6528. DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
  6529. break;
  6530. default:
  6531. /* Set number of queues according to bp->multi_mode value */
  6532. bnx2x_set_num_queues_msix(bp);
  6533. DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
  6534. bp->num_queues);
  6535. /* if we can't use MSI-X we only need one fp,
  6536. * so try to enable MSI-X with the requested number of fp's
  6537. * and fallback to MSI or legacy INTx with one fp
  6538. */
  6539. rc = bnx2x_enable_msix(bp);
  6540. if (rc)
  6541. /* failed to enable MSI-X */
  6542. bp->num_queues = 1;
  6543. break;
  6544. }
  6545. bp->dev->real_num_tx_queues = bp->num_queues;
  6546. return rc;
  6547. }
  6548. #ifdef BCM_CNIC
  6549. static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
  6550. static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
  6551. #endif
  6552. /* must be called with rtnl_lock */
  6553. static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
  6554. {
  6555. u32 load_code;
  6556. int i, rc;
  6557. #ifdef BNX2X_STOP_ON_ERROR
  6558. if (unlikely(bp->panic))
  6559. return -EPERM;
  6560. #endif
  6561. bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
  6562. rc = bnx2x_set_num_queues(bp);
  6563. if (bnx2x_alloc_mem(bp)) {
  6564. bnx2x_free_irq(bp, true);
  6565. return -ENOMEM;
  6566. }
  6567. for_each_queue(bp, i)
  6568. bnx2x_fp(bp, i, disable_tpa) =
  6569. ((bp->flags & TPA_ENABLE_FLAG) == 0);
  6570. for_each_queue(bp, i)
  6571. netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
  6572. bnx2x_poll, 128);
  6573. bnx2x_napi_enable(bp);
  6574. if (bp->flags & USING_MSIX_FLAG) {
  6575. rc = bnx2x_req_msix_irqs(bp);
  6576. if (rc) {
  6577. bnx2x_free_irq(bp, true);
  6578. goto load_error1;
  6579. }
  6580. } else {
  6581. /* Fall to INTx if failed to enable MSI-X due to lack of
  6582. memory (in bnx2x_set_num_queues()) */
  6583. if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
  6584. bnx2x_enable_msi(bp);
  6585. bnx2x_ack_int(bp);
  6586. rc = bnx2x_req_irq(bp);
  6587. if (rc) {
  6588. BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
  6589. bnx2x_free_irq(bp, true);
  6590. goto load_error1;
  6591. }
  6592. if (bp->flags & USING_MSI_FLAG) {
  6593. bp->dev->irq = bp->pdev->irq;
  6594. netdev_info(bp->dev, "using MSI IRQ %d\n",
  6595. bp->pdev->irq);
  6596. }
  6597. }
  6598. /* Send LOAD_REQUEST command to MCP
  6599. Returns the type of LOAD command:
  6600. if it is the first port to be initialized
  6601. common blocks should be initialized, otherwise - not
  6602. */
  6603. if (!BP_NOMCP(bp)) {
  6604. load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
  6605. if (!load_code) {
  6606. BNX2X_ERR("MCP response failure, aborting\n");
  6607. rc = -EBUSY;
  6608. goto load_error2;
  6609. }
  6610. if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
  6611. rc = -EBUSY; /* other port in diagnostic mode */
  6612. goto load_error2;
  6613. }
  6614. } else {
  6615. int port = BP_PORT(bp);
  6616. DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
  6617. load_count[0], load_count[1], load_count[2]);
  6618. load_count[0]++;
  6619. load_count[1 + port]++;
  6620. DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
  6621. load_count[0], load_count[1], load_count[2]);
  6622. if (load_count[0] == 1)
  6623. load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
  6624. else if (load_count[1 + port] == 1)
  6625. load_code = FW_MSG_CODE_DRV_LOAD_PORT;
  6626. else
  6627. load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
  6628. }
  6629. if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
  6630. (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
  6631. bp->port.pmf = 1;
  6632. else
  6633. bp->port.pmf = 0;
  6634. DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
  6635. /* Initialize HW */
  6636. rc = bnx2x_init_hw(bp, load_code);
  6637. if (rc) {
  6638. BNX2X_ERR("HW init failed, aborting\n");
  6639. bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
  6640. bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
  6641. bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
  6642. goto load_error2;
  6643. }
  6644. /* Setup NIC internals and enable interrupts */
  6645. bnx2x_nic_init(bp, load_code);
  6646. if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
  6647. (bp->common.shmem2_base))
  6648. SHMEM2_WR(bp, dcc_support,
  6649. (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
  6650. SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
  6651. /* Send LOAD_DONE command to MCP */
  6652. if (!BP_NOMCP(bp)) {
  6653. load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
  6654. if (!load_code) {
  6655. BNX2X_ERR("MCP response failure, aborting\n");
  6656. rc = -EBUSY;
  6657. goto load_error3;
  6658. }
  6659. }
  6660. bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
  6661. rc = bnx2x_setup_leading(bp);
  6662. if (rc) {
  6663. BNX2X_ERR("Setup leading failed!\n");
  6664. #ifndef BNX2X_STOP_ON_ERROR
  6665. goto load_error3;
  6666. #else
  6667. bp->panic = 1;
  6668. return -EBUSY;
  6669. #endif
  6670. }
  6671. if (CHIP_IS_E1H(bp))
  6672. if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
  6673. DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
  6674. bp->flags |= MF_FUNC_DIS;
  6675. }
  6676. if (bp->state == BNX2X_STATE_OPEN) {
  6677. #ifdef BCM_CNIC
  6678. /* Enable Timer scan */
  6679. REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
  6680. #endif
  6681. for_each_nondefault_queue(bp, i) {
  6682. rc = bnx2x_setup_multi(bp, i);
  6683. if (rc)
  6684. #ifdef BCM_CNIC
  6685. goto load_error4;
  6686. #else
  6687. goto load_error3;
  6688. #endif
  6689. }
  6690. if (CHIP_IS_E1(bp))
  6691. bnx2x_set_eth_mac_addr_e1(bp, 1);
  6692. else
  6693. bnx2x_set_eth_mac_addr_e1h(bp, 1);
  6694. #ifdef BCM_CNIC
  6695. /* Set iSCSI L2 MAC */
  6696. mutex_lock(&bp->cnic_mutex);
  6697. if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
  6698. bnx2x_set_iscsi_eth_mac_addr(bp, 1);
  6699. bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
  6700. bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
  6701. CNIC_SB_ID(bp));
  6702. }
  6703. mutex_unlock(&bp->cnic_mutex);
  6704. #endif
  6705. }
  6706. if (bp->port.pmf)
  6707. bnx2x_initial_phy_init(bp, load_mode);
  6708. /* Start fast path */
  6709. switch (load_mode) {
  6710. case LOAD_NORMAL:
  6711. if (bp->state == BNX2X_STATE_OPEN) {
  6712. /* Tx queue should be only reenabled */
  6713. netif_tx_wake_all_queues(bp->dev);
  6714. }
  6715. /* Initialize the receive filter. */
  6716. bnx2x_set_rx_mode(bp->dev);
  6717. break;
  6718. case LOAD_OPEN:
  6719. netif_tx_start_all_queues(bp->dev);
  6720. if (bp->state != BNX2X_STATE_OPEN)
  6721. netif_tx_disable(bp->dev);
  6722. /* Initialize the receive filter. */
  6723. bnx2x_set_rx_mode(bp->dev);
  6724. break;
  6725. case LOAD_DIAG:
  6726. /* Initialize the receive filter. */
  6727. bnx2x_set_rx_mode(bp->dev);
  6728. bp->state = BNX2X_STATE_DIAG;
  6729. break;
  6730. default:
  6731. break;
  6732. }
  6733. if (!bp->port.pmf)
  6734. bnx2x__link_status_update(bp);
  6735. /* start the timer */
  6736. mod_timer(&bp->timer, jiffies + bp->current_interval);
  6737. #ifdef BCM_CNIC
  6738. bnx2x_setup_cnic_irq_info(bp);
  6739. if (bp->state == BNX2X_STATE_OPEN)
  6740. bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
  6741. #endif
  6742. bnx2x_inc_load_cnt(bp);
  6743. return 0;
  6744. #ifdef BCM_CNIC
  6745. load_error4:
  6746. /* Disable Timer scan */
  6747. REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
  6748. #endif
  6749. load_error3:
  6750. bnx2x_int_disable_sync(bp, 1);
  6751. if (!BP_NOMCP(bp)) {
  6752. bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
  6753. bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
  6754. }
  6755. bp->port.pmf = 0;
  6756. /* Free SKBs, SGEs, TPA pool and driver internals */
  6757. bnx2x_free_skbs(bp);
  6758. for_each_queue(bp, i)
  6759. bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
  6760. load_error2:
  6761. /* Release IRQs */
  6762. bnx2x_free_irq(bp, false);
  6763. load_error1:
  6764. bnx2x_napi_disable(bp);
  6765. for_each_queue(bp, i)
  6766. netif_napi_del(&bnx2x_fp(bp, i, napi));
  6767. bnx2x_free_mem(bp);
  6768. return rc;
  6769. }
  6770. static int bnx2x_stop_multi(struct bnx2x *bp, int index)
  6771. {
  6772. struct bnx2x_fastpath *fp = &bp->fp[index];
  6773. int rc;
  6774. /* halt the connection */
  6775. fp->state = BNX2X_FP_STATE_HALTING;
  6776. bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
  6777. /* Wait for completion */
  6778. rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
  6779. &(fp->state), 1);
  6780. if (rc) /* timeout */
  6781. return rc;
  6782. /* delete cfc entry */
  6783. bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
  6784. /* Wait for completion */
  6785. rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
  6786. &(fp->state), 1);
  6787. return rc;
  6788. }
  6789. static int bnx2x_stop_leading(struct bnx2x *bp)
  6790. {
  6791. __le16 dsb_sp_prod_idx;
  6792. /* if the other port is handling traffic,
  6793. this can take a lot of time */
  6794. int cnt = 500;
  6795. int rc;
  6796. might_sleep();
  6797. /* Send HALT ramrod */
  6798. bp->fp[0].state = BNX2X_FP_STATE_HALTING;
  6799. bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
  6800. /* Wait for completion */
  6801. rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
  6802. &(bp->fp[0].state), 1);
  6803. if (rc) /* timeout */
  6804. return rc;
  6805. dsb_sp_prod_idx = *bp->dsb_sp_prod;
  6806. /* Send PORT_DELETE ramrod */
  6807. bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
  6808. /* Wait for completion to arrive on default status block
  6809. we are going to reset the chip anyway
  6810. so there is not much to do if this times out
  6811. */
  6812. while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
  6813. if (!cnt) {
  6814. DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
  6815. "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
  6816. *bp->dsb_sp_prod, dsb_sp_prod_idx);
  6817. #ifdef BNX2X_STOP_ON_ERROR
  6818. bnx2x_panic();
  6819. #endif
  6820. rc = -EBUSY;
  6821. break;
  6822. }
  6823. cnt--;
  6824. msleep(1);
  6825. rmb(); /* Refresh the dsb_sp_prod */
  6826. }
  6827. bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
  6828. bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
  6829. return rc;
  6830. }
  6831. static void bnx2x_reset_func(struct bnx2x *bp)
  6832. {
  6833. int port = BP_PORT(bp);
  6834. int func = BP_FUNC(bp);
  6835. int base, i;
  6836. /* Configure IGU */
  6837. REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
  6838. REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
  6839. #ifdef BCM_CNIC
  6840. /* Disable Timer scan */
  6841. REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
  6842. /*
  6843. * Wait for at least 10ms and up to 2 second for the timers scan to
  6844. * complete
  6845. */
  6846. for (i = 0; i < 200; i++) {
  6847. msleep(10);
  6848. if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
  6849. break;
  6850. }
  6851. #endif
  6852. /* Clear ILT */
  6853. base = FUNC_ILT_BASE(func);
  6854. for (i = base; i < base + ILT_PER_FUNC; i++)
  6855. bnx2x_ilt_wr(bp, i, 0);
  6856. }
  6857. static void bnx2x_reset_port(struct bnx2x *bp)
  6858. {
  6859. int port = BP_PORT(bp);
  6860. u32 val;
  6861. REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
  6862. /* Do not rcv packets to BRB */
  6863. REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
  6864. /* Do not direct rcv packets that are not for MCP to the BRB */
  6865. REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
  6866. NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
  6867. /* Configure AEU */
  6868. REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
  6869. msleep(100);
  6870. /* Check for BRB port occupancy */
  6871. val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
  6872. if (val)
  6873. DP(NETIF_MSG_IFDOWN,
  6874. "BRB1 is not empty %d blocks are occupied\n", val);
  6875. /* TODO: Close Doorbell port? */
  6876. }
  6877. static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
  6878. {
  6879. DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
  6880. BP_FUNC(bp), reset_code);
  6881. switch (reset_code) {
  6882. case FW_MSG_CODE_DRV_UNLOAD_COMMON:
  6883. bnx2x_reset_port(bp);
  6884. bnx2x_reset_func(bp);
  6885. bnx2x_reset_common(bp);
  6886. break;
  6887. case FW_MSG_CODE_DRV_UNLOAD_PORT:
  6888. bnx2x_reset_port(bp);
  6889. bnx2x_reset_func(bp);
  6890. break;
  6891. case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
  6892. bnx2x_reset_func(bp);
  6893. break;
  6894. default:
  6895. BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
  6896. break;
  6897. }
  6898. }
  6899. static void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
  6900. {
  6901. int port = BP_PORT(bp);
  6902. u32 reset_code = 0;
  6903. int i, cnt, rc;
  6904. /* Wait until tx fastpath tasks complete */
  6905. for_each_queue(bp, i) {
  6906. struct bnx2x_fastpath *fp = &bp->fp[i];
  6907. cnt = 1000;
  6908. while (bnx2x_has_tx_work_unload(fp)) {
  6909. bnx2x_tx_int(fp);
  6910. if (!cnt) {
  6911. BNX2X_ERR("timeout waiting for queue[%d]\n",
  6912. i);
  6913. #ifdef BNX2X_STOP_ON_ERROR
  6914. bnx2x_panic();
  6915. return -EBUSY;
  6916. #else
  6917. break;
  6918. #endif
  6919. }
  6920. cnt--;
  6921. msleep(1);
  6922. }
  6923. }
  6924. /* Give HW time to discard old tx messages */
  6925. msleep(1);
  6926. if (CHIP_IS_E1(bp)) {
  6927. struct mac_configuration_cmd *config =
  6928. bnx2x_sp(bp, mcast_config);
  6929. bnx2x_set_eth_mac_addr_e1(bp, 0);
  6930. for (i = 0; i < config->hdr.length; i++)
  6931. CAM_INVALIDATE(config->config_table[i]);
  6932. config->hdr.length = i;
  6933. if (CHIP_REV_IS_SLOW(bp))
  6934. config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
  6935. else
  6936. config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
  6937. config->hdr.client_id = bp->fp->cl_id;
  6938. config->hdr.reserved1 = 0;
  6939. bp->set_mac_pending++;
  6940. smp_wmb();
  6941. bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
  6942. U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
  6943. U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
  6944. } else { /* E1H */
  6945. REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
  6946. bnx2x_set_eth_mac_addr_e1h(bp, 0);
  6947. for (i = 0; i < MC_HASH_SIZE; i++)
  6948. REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
  6949. REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
  6950. }
  6951. #ifdef BCM_CNIC
  6952. /* Clear iSCSI L2 MAC */
  6953. mutex_lock(&bp->cnic_mutex);
  6954. if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
  6955. bnx2x_set_iscsi_eth_mac_addr(bp, 0);
  6956. bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
  6957. }
  6958. mutex_unlock(&bp->cnic_mutex);
  6959. #endif
  6960. if (unload_mode == UNLOAD_NORMAL)
  6961. reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
  6962. else if (bp->flags & NO_WOL_FLAG)
  6963. reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
  6964. else if (bp->wol) {
  6965. u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
  6966. u8 *mac_addr = bp->dev->dev_addr;
  6967. u32 val;
  6968. /* The mac address is written to entries 1-4 to
  6969. preserve entry 0 which is used by the PMF */
  6970. u8 entry = (BP_E1HVN(bp) + 1)*8;
  6971. val = (mac_addr[0] << 8) | mac_addr[1];
  6972. EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
  6973. val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
  6974. (mac_addr[4] << 8) | mac_addr[5];
  6975. EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
  6976. reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
  6977. } else
  6978. reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
  6979. /* Close multi and leading connections
  6980. Completions for ramrods are collected in a synchronous way */
  6981. for_each_nondefault_queue(bp, i)
  6982. if (bnx2x_stop_multi(bp, i))
  6983. goto unload_error;
  6984. rc = bnx2x_stop_leading(bp);
  6985. if (rc) {
  6986. BNX2X_ERR("Stop leading failed!\n");
  6987. #ifdef BNX2X_STOP_ON_ERROR
  6988. return -EBUSY;
  6989. #else
  6990. goto unload_error;
  6991. #endif
  6992. }
  6993. unload_error:
  6994. if (!BP_NOMCP(bp))
  6995. reset_code = bnx2x_fw_command(bp, reset_code);
  6996. else {
  6997. DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
  6998. load_count[0], load_count[1], load_count[2]);
  6999. load_count[0]--;
  7000. load_count[1 + port]--;
  7001. DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
  7002. load_count[0], load_count[1], load_count[2]);
  7003. if (load_count[0] == 0)
  7004. reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
  7005. else if (load_count[1 + port] == 0)
  7006. reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
  7007. else
  7008. reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
  7009. }
  7010. if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
  7011. (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
  7012. bnx2x__link_reset(bp);
  7013. /* Reset the chip */
  7014. bnx2x_reset_chip(bp, reset_code);
  7015. /* Report UNLOAD_DONE to MCP */
  7016. if (!BP_NOMCP(bp))
  7017. bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
  7018. }
  7019. static inline void bnx2x_disable_close_the_gate(struct bnx2x *bp)
  7020. {
  7021. u32 val;
  7022. DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
  7023. if (CHIP_IS_E1(bp)) {
  7024. int port = BP_PORT(bp);
  7025. u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
  7026. MISC_REG_AEU_MASK_ATTN_FUNC_0;
  7027. val = REG_RD(bp, addr);
  7028. val &= ~(0x300);
  7029. REG_WR(bp, addr, val);
  7030. } else if (CHIP_IS_E1H(bp)) {
  7031. val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
  7032. val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
  7033. MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
  7034. REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
  7035. }
  7036. }
  7037. /* must be called with rtnl_lock */
  7038. static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
  7039. {
  7040. int i;
  7041. if (bp->state == BNX2X_STATE_CLOSED) {
  7042. /* Interface has been removed - nothing to recover */
  7043. bp->recovery_state = BNX2X_RECOVERY_DONE;
  7044. bp->is_leader = 0;
  7045. bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
  7046. smp_wmb();
  7047. return -EINVAL;
  7048. }
  7049. #ifdef BCM_CNIC
  7050. bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
  7051. #endif
  7052. bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
  7053. /* Set "drop all" */
  7054. bp->rx_mode = BNX2X_RX_MODE_NONE;
  7055. bnx2x_set_storm_rx_mode(bp);
  7056. /* Disable HW interrupts, NAPI and Tx */
  7057. bnx2x_netif_stop(bp, 1);
  7058. netif_carrier_off(bp->dev);
  7059. del_timer_sync(&bp->timer);
  7060. SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
  7061. (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
  7062. bnx2x_stats_handle(bp, STATS_EVENT_STOP);
  7063. /* Release IRQs */
  7064. bnx2x_free_irq(bp, false);
  7065. /* Cleanup the chip if needed */
  7066. if (unload_mode != UNLOAD_RECOVERY)
  7067. bnx2x_chip_cleanup(bp, unload_mode);
  7068. bp->port.pmf = 0;
  7069. /* Free SKBs, SGEs, TPA pool and driver internals */
  7070. bnx2x_free_skbs(bp);
  7071. for_each_queue(bp, i)
  7072. bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
  7073. for_each_queue(bp, i)
  7074. netif_napi_del(&bnx2x_fp(bp, i, napi));
  7075. bnx2x_free_mem(bp);
  7076. bp->state = BNX2X_STATE_CLOSED;
  7077. /* The last driver must disable a "close the gate" if there is no
  7078. * parity attention or "process kill" pending.
  7079. */
  7080. if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
  7081. bnx2x_reset_is_done(bp))
  7082. bnx2x_disable_close_the_gate(bp);
  7083. /* Reset MCP mail box sequence if there is on going recovery */
  7084. if (unload_mode == UNLOAD_RECOVERY)
  7085. bp->fw_seq = 0;
  7086. return 0;
  7087. }
  7088. /* Close gates #2, #3 and #4: */
  7089. static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
  7090. {
  7091. u32 val, addr;
  7092. /* Gates #2 and #4a are closed/opened for "not E1" only */
  7093. if (!CHIP_IS_E1(bp)) {
  7094. /* #4 */
  7095. val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
  7096. REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
  7097. close ? (val | 0x1) : (val & (~(u32)1)));
  7098. /* #2 */
  7099. val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
  7100. REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
  7101. close ? (val | 0x1) : (val & (~(u32)1)));
  7102. }
  7103. /* #3 */
  7104. addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
  7105. val = REG_RD(bp, addr);
  7106. REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
  7107. DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
  7108. close ? "closing" : "opening");
  7109. mmiowb();
  7110. }
  7111. #define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
  7112. static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
  7113. {
  7114. /* Do some magic... */
  7115. u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
  7116. *magic_val = val & SHARED_MF_CLP_MAGIC;
  7117. MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
  7118. }
  7119. /* Restore the value of the `magic' bit.
  7120. *
  7121. * @param pdev Device handle.
  7122. * @param magic_val Old value of the `magic' bit.
  7123. */
  7124. static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
  7125. {
  7126. /* Restore the `magic' bit value... */
  7127. /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
  7128. SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
  7129. (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
  7130. u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
  7131. MF_CFG_WR(bp, shared_mf_config.clp_mb,
  7132. (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
  7133. }
  7134. /* Prepares for MCP reset: takes care of CLP configurations.
  7135. *
  7136. * @param bp
  7137. * @param magic_val Old value of 'magic' bit.
  7138. */
  7139. static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
  7140. {
  7141. u32 shmem;
  7142. u32 validity_offset;
  7143. DP(NETIF_MSG_HW, "Starting\n");
  7144. /* Set `magic' bit in order to save MF config */
  7145. if (!CHIP_IS_E1(bp))
  7146. bnx2x_clp_reset_prep(bp, magic_val);
  7147. /* Get shmem offset */
  7148. shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
  7149. validity_offset = offsetof(struct shmem_region, validity_map[0]);
  7150. /* Clear validity map flags */
  7151. if (shmem > 0)
  7152. REG_WR(bp, shmem + validity_offset, 0);
  7153. }
  7154. #define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
  7155. #define MCP_ONE_TIMEOUT 100 /* 100 ms */
  7156. /* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
  7157. * depending on the HW type.
  7158. *
  7159. * @param bp
  7160. */
  7161. static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
  7162. {
  7163. /* special handling for emulation and FPGA,
  7164. wait 10 times longer */
  7165. if (CHIP_REV_IS_SLOW(bp))
  7166. msleep(MCP_ONE_TIMEOUT*10);
  7167. else
  7168. msleep(MCP_ONE_TIMEOUT);
  7169. }
  7170. static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
  7171. {
  7172. u32 shmem, cnt, validity_offset, val;
  7173. int rc = 0;
  7174. msleep(100);
  7175. /* Get shmem offset */
  7176. shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
  7177. if (shmem == 0) {
  7178. BNX2X_ERR("Shmem 0 return failure\n");
  7179. rc = -ENOTTY;
  7180. goto exit_lbl;
  7181. }
  7182. validity_offset = offsetof(struct shmem_region, validity_map[0]);
  7183. /* Wait for MCP to come up */
  7184. for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
  7185. /* TBD: its best to check validity map of last port.
  7186. * currently checks on port 0.
  7187. */
  7188. val = REG_RD(bp, shmem + validity_offset);
  7189. DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
  7190. shmem + validity_offset, val);
  7191. /* check that shared memory is valid. */
  7192. if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
  7193. == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
  7194. break;
  7195. bnx2x_mcp_wait_one(bp);
  7196. }
  7197. DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
  7198. /* Check that shared memory is valid. This indicates that MCP is up. */
  7199. if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
  7200. (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
  7201. BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
  7202. rc = -ENOTTY;
  7203. goto exit_lbl;
  7204. }
  7205. exit_lbl:
  7206. /* Restore the `magic' bit value */
  7207. if (!CHIP_IS_E1(bp))
  7208. bnx2x_clp_reset_done(bp, magic_val);
  7209. return rc;
  7210. }
  7211. static void bnx2x_pxp_prep(struct bnx2x *bp)
  7212. {
  7213. if (!CHIP_IS_E1(bp)) {
  7214. REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
  7215. REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
  7216. REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
  7217. mmiowb();
  7218. }
  7219. }
  7220. /*
  7221. * Reset the whole chip except for:
  7222. * - PCIE core
  7223. * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
  7224. * one reset bit)
  7225. * - IGU
  7226. * - MISC (including AEU)
  7227. * - GRC
  7228. * - RBCN, RBCP
  7229. */
  7230. static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
  7231. {
  7232. u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
  7233. not_reset_mask1 =
  7234. MISC_REGISTERS_RESET_REG_1_RST_HC |
  7235. MISC_REGISTERS_RESET_REG_1_RST_PXPV |
  7236. MISC_REGISTERS_RESET_REG_1_RST_PXP;
  7237. not_reset_mask2 =
  7238. MISC_REGISTERS_RESET_REG_2_RST_MDIO |
  7239. MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
  7240. MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
  7241. MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
  7242. MISC_REGISTERS_RESET_REG_2_RST_RBCN |
  7243. MISC_REGISTERS_RESET_REG_2_RST_GRC |
  7244. MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
  7245. MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
  7246. reset_mask1 = 0xffffffff;
  7247. if (CHIP_IS_E1(bp))
  7248. reset_mask2 = 0xffff;
  7249. else
  7250. reset_mask2 = 0x1ffff;
  7251. REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
  7252. reset_mask1 & (~not_reset_mask1));
  7253. REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
  7254. reset_mask2 & (~not_reset_mask2));
  7255. barrier();
  7256. mmiowb();
  7257. REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
  7258. REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
  7259. mmiowb();
  7260. }
  7261. static int bnx2x_process_kill(struct bnx2x *bp)
  7262. {
  7263. int cnt = 1000;
  7264. u32 val = 0;
  7265. u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
  7266. /* Empty the Tetris buffer, wait for 1s */
  7267. do {
  7268. sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
  7269. blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
  7270. port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
  7271. port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
  7272. pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
  7273. if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
  7274. ((port_is_idle_0 & 0x1) == 0x1) &&
  7275. ((port_is_idle_1 & 0x1) == 0x1) &&
  7276. (pgl_exp_rom2 == 0xffffffff))
  7277. break;
  7278. msleep(1);
  7279. } while (cnt-- > 0);
  7280. if (cnt <= 0) {
  7281. DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
  7282. " are still"
  7283. " outstanding read requests after 1s!\n");
  7284. DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
  7285. " port_is_idle_0=0x%08x,"
  7286. " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
  7287. sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
  7288. pgl_exp_rom2);
  7289. return -EAGAIN;
  7290. }
  7291. barrier();
  7292. /* Close gates #2, #3 and #4 */
  7293. bnx2x_set_234_gates(bp, true);
  7294. /* TBD: Indicate that "process kill" is in progress to MCP */
  7295. /* Clear "unprepared" bit */
  7296. REG_WR(bp, MISC_REG_UNPREPARED, 0);
  7297. barrier();
  7298. /* Make sure all is written to the chip before the reset */
  7299. mmiowb();
  7300. /* Wait for 1ms to empty GLUE and PCI-E core queues,
  7301. * PSWHST, GRC and PSWRD Tetris buffer.
  7302. */
  7303. msleep(1);
  7304. /* Prepare to chip reset: */
  7305. /* MCP */
  7306. bnx2x_reset_mcp_prep(bp, &val);
  7307. /* PXP */
  7308. bnx2x_pxp_prep(bp);
  7309. barrier();
  7310. /* reset the chip */
  7311. bnx2x_process_kill_chip_reset(bp);
  7312. barrier();
  7313. /* Recover after reset: */
  7314. /* MCP */
  7315. if (bnx2x_reset_mcp_comp(bp, val))
  7316. return -EAGAIN;
  7317. /* PXP */
  7318. bnx2x_pxp_prep(bp);
  7319. /* Open the gates #2, #3 and #4 */
  7320. bnx2x_set_234_gates(bp, false);
  7321. /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
  7322. * reset state, re-enable attentions. */
  7323. return 0;
  7324. }
  7325. static int bnx2x_leader_reset(struct bnx2x *bp)
  7326. {
  7327. int rc = 0;
  7328. /* Try to recover after the failure */
  7329. if (bnx2x_process_kill(bp)) {
  7330. printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
  7331. bp->dev->name);
  7332. rc = -EAGAIN;
  7333. goto exit_leader_reset;
  7334. }
  7335. /* Clear "reset is in progress" bit and update the driver state */
  7336. bnx2x_set_reset_done(bp);
  7337. bp->recovery_state = BNX2X_RECOVERY_DONE;
  7338. exit_leader_reset:
  7339. bp->is_leader = 0;
  7340. bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
  7341. smp_wmb();
  7342. return rc;
  7343. }
  7344. static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
  7345. /* Assumption: runs under rtnl lock. This together with the fact
  7346. * that it's called only from bnx2x_reset_task() ensure that it
  7347. * will never be called when netif_running(bp->dev) is false.
  7348. */
  7349. static void bnx2x_parity_recover(struct bnx2x *bp)
  7350. {
  7351. DP(NETIF_MSG_HW, "Handling parity\n");
  7352. while (1) {
  7353. switch (bp->recovery_state) {
  7354. case BNX2X_RECOVERY_INIT:
  7355. DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
  7356. /* Try to get a LEADER_LOCK HW lock */
  7357. if (bnx2x_trylock_hw_lock(bp,
  7358. HW_LOCK_RESOURCE_RESERVED_08))
  7359. bp->is_leader = 1;
  7360. /* Stop the driver */
  7361. /* If interface has been removed - break */
  7362. if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
  7363. return;
  7364. bp->recovery_state = BNX2X_RECOVERY_WAIT;
  7365. /* Ensure "is_leader" and "recovery_state"
  7366. * update values are seen on other CPUs
  7367. */
  7368. smp_wmb();
  7369. break;
  7370. case BNX2X_RECOVERY_WAIT:
  7371. DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
  7372. if (bp->is_leader) {
  7373. u32 load_counter = bnx2x_get_load_cnt(bp);
  7374. if (load_counter) {
  7375. /* Wait until all other functions get
  7376. * down.
  7377. */
  7378. schedule_delayed_work(&bp->reset_task,
  7379. HZ/10);
  7380. return;
  7381. } else {
  7382. /* If all other functions got down -
  7383. * try to bring the chip back to
  7384. * normal. In any case it's an exit
  7385. * point for a leader.
  7386. */
  7387. if (bnx2x_leader_reset(bp) ||
  7388. bnx2x_nic_load(bp, LOAD_NORMAL)) {
  7389. printk(KERN_ERR"%s: Recovery "
  7390. "has failed. Power cycle is "
  7391. "needed.\n", bp->dev->name);
  7392. /* Disconnect this device */
  7393. netif_device_detach(bp->dev);
  7394. /* Block ifup for all function
  7395. * of this ASIC until
  7396. * "process kill" or power
  7397. * cycle.
  7398. */
  7399. bnx2x_set_reset_in_progress(bp);
  7400. /* Shut down the power */
  7401. bnx2x_set_power_state(bp,
  7402. PCI_D3hot);
  7403. return;
  7404. }
  7405. return;
  7406. }
  7407. } else { /* non-leader */
  7408. if (!bnx2x_reset_is_done(bp)) {
  7409. /* Try to get a LEADER_LOCK HW lock as
  7410. * long as a former leader may have
  7411. * been unloaded by the user or
  7412. * released a leadership by another
  7413. * reason.
  7414. */
  7415. if (bnx2x_trylock_hw_lock(bp,
  7416. HW_LOCK_RESOURCE_RESERVED_08)) {
  7417. /* I'm a leader now! Restart a
  7418. * switch case.
  7419. */
  7420. bp->is_leader = 1;
  7421. break;
  7422. }
  7423. schedule_delayed_work(&bp->reset_task,
  7424. HZ/10);
  7425. return;
  7426. } else { /* A leader has completed
  7427. * the "process kill". It's an exit
  7428. * point for a non-leader.
  7429. */
  7430. bnx2x_nic_load(bp, LOAD_NORMAL);
  7431. bp->recovery_state =
  7432. BNX2X_RECOVERY_DONE;
  7433. smp_wmb();
  7434. return;
  7435. }
  7436. }
  7437. default:
  7438. return;
  7439. }
  7440. }
  7441. }
  7442. /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
  7443. * scheduled on a general queue in order to prevent a dead lock.
  7444. */
  7445. static void bnx2x_reset_task(struct work_struct *work)
  7446. {
  7447. struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
  7448. #ifdef BNX2X_STOP_ON_ERROR
  7449. BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
  7450. " so reset not done to allow debug dump,\n"
  7451. KERN_ERR " you will need to reboot when done\n");
  7452. return;
  7453. #endif
  7454. rtnl_lock();
  7455. if (!netif_running(bp->dev))
  7456. goto reset_task_exit;
  7457. if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
  7458. bnx2x_parity_recover(bp);
  7459. else {
  7460. bnx2x_nic_unload(bp, UNLOAD_NORMAL);
  7461. bnx2x_nic_load(bp, LOAD_NORMAL);
  7462. }
  7463. reset_task_exit:
  7464. rtnl_unlock();
  7465. }
  7466. /* end of nic load/unload */
  7467. /* ethtool_ops */
  7468. /*
  7469. * Init service functions
  7470. */
  7471. static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
  7472. {
  7473. switch (func) {
  7474. case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
  7475. case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
  7476. case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
  7477. case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
  7478. case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
  7479. case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
  7480. case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
  7481. case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
  7482. default:
  7483. BNX2X_ERR("Unsupported function index: %d\n", func);
  7484. return (u32)(-1);
  7485. }
  7486. }
  7487. static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
  7488. {
  7489. u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
  7490. /* Flush all outstanding writes */
  7491. mmiowb();
  7492. /* Pretend to be function 0 */
  7493. REG_WR(bp, reg, 0);
  7494. /* Flush the GRC transaction (in the chip) */
  7495. new_val = REG_RD(bp, reg);
  7496. if (new_val != 0) {
  7497. BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
  7498. new_val);
  7499. BUG();
  7500. }
  7501. /* From now we are in the "like-E1" mode */
  7502. bnx2x_int_disable(bp);
  7503. /* Flush all outstanding writes */
  7504. mmiowb();
  7505. /* Restore the original funtion settings */
  7506. REG_WR(bp, reg, orig_func);
  7507. new_val = REG_RD(bp, reg);
  7508. if (new_val != orig_func) {
  7509. BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
  7510. orig_func, new_val);
  7511. BUG();
  7512. }
  7513. }
  7514. static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
  7515. {
  7516. if (CHIP_IS_E1H(bp))
  7517. bnx2x_undi_int_disable_e1h(bp, func);
  7518. else
  7519. bnx2x_int_disable(bp);
  7520. }
  7521. static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
  7522. {
  7523. u32 val;
  7524. /* Check if there is any driver already loaded */
  7525. val = REG_RD(bp, MISC_REG_UNPREPARED);
  7526. if (val == 0x1) {
  7527. /* Check if it is the UNDI driver
  7528. * UNDI driver initializes CID offset for normal bell to 0x7
  7529. */
  7530. bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
  7531. val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
  7532. if (val == 0x7) {
  7533. u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
  7534. /* save our func */
  7535. int func = BP_FUNC(bp);
  7536. u32 swap_en;
  7537. u32 swap_val;
  7538. /* clear the UNDI indication */
  7539. REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
  7540. BNX2X_DEV_INFO("UNDI is active! reset device\n");
  7541. /* try unload UNDI on port 0 */
  7542. bp->func = 0;
  7543. bp->fw_seq =
  7544. (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
  7545. DRV_MSG_SEQ_NUMBER_MASK);
  7546. reset_code = bnx2x_fw_command(bp, reset_code);
  7547. /* if UNDI is loaded on the other port */
  7548. if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
  7549. /* send "DONE" for previous unload */
  7550. bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
  7551. /* unload UNDI on port 1 */
  7552. bp->func = 1;
  7553. bp->fw_seq =
  7554. (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
  7555. DRV_MSG_SEQ_NUMBER_MASK);
  7556. reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
  7557. bnx2x_fw_command(bp, reset_code);
  7558. }
  7559. /* now it's safe to release the lock */
  7560. bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
  7561. bnx2x_undi_int_disable(bp, func);
  7562. /* close input traffic and wait for it */
  7563. /* Do not rcv packets to BRB */
  7564. REG_WR(bp,
  7565. (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
  7566. NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
  7567. /* Do not direct rcv packets that are not for MCP to
  7568. * the BRB */
  7569. REG_WR(bp,
  7570. (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
  7571. NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
  7572. /* clear AEU */
  7573. REG_WR(bp,
  7574. (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
  7575. MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
  7576. msleep(10);
  7577. /* save NIG port swap info */
  7578. swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
  7579. swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
  7580. /* reset device */
  7581. REG_WR(bp,
  7582. GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
  7583. 0xd3ffffff);
  7584. REG_WR(bp,
  7585. GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
  7586. 0x1403);
  7587. /* take the NIG out of reset and restore swap values */
  7588. REG_WR(bp,
  7589. GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
  7590. MISC_REGISTERS_RESET_REG_1_RST_NIG);
  7591. REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
  7592. REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
  7593. /* send unload done to the MCP */
  7594. bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
  7595. /* restore our func and fw_seq */
  7596. bp->func = func;
  7597. bp->fw_seq =
  7598. (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
  7599. DRV_MSG_SEQ_NUMBER_MASK);
  7600. } else
  7601. bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
  7602. }
  7603. }
  7604. static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
  7605. {
  7606. u32 val, val2, val3, val4, id;
  7607. u16 pmc;
  7608. /* Get the chip revision id and number. */
  7609. /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
  7610. val = REG_RD(bp, MISC_REG_CHIP_NUM);
  7611. id = ((val & 0xffff) << 16);
  7612. val = REG_RD(bp, MISC_REG_CHIP_REV);
  7613. id |= ((val & 0xf) << 12);
  7614. val = REG_RD(bp, MISC_REG_CHIP_METAL);
  7615. id |= ((val & 0xff) << 4);
  7616. val = REG_RD(bp, MISC_REG_BOND_ID);
  7617. id |= (val & 0xf);
  7618. bp->common.chip_id = id;
  7619. bp->link_params.chip_id = bp->common.chip_id;
  7620. BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
  7621. val = (REG_RD(bp, 0x2874) & 0x55);
  7622. if ((bp->common.chip_id & 0x1) ||
  7623. (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
  7624. bp->flags |= ONE_PORT_FLAG;
  7625. BNX2X_DEV_INFO("single port device\n");
  7626. }
  7627. val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
  7628. bp->common.flash_size = (NVRAM_1MB_SIZE <<
  7629. (val & MCPR_NVM_CFG4_FLASH_SIZE));
  7630. BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
  7631. bp->common.flash_size, bp->common.flash_size);
  7632. bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
  7633. bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
  7634. bp->link_params.shmem_base = bp->common.shmem_base;
  7635. BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
  7636. bp->common.shmem_base, bp->common.shmem2_base);
  7637. if (!bp->common.shmem_base ||
  7638. (bp->common.shmem_base < 0xA0000) ||
  7639. (bp->common.shmem_base >= 0xC0000)) {
  7640. BNX2X_DEV_INFO("MCP not active\n");
  7641. bp->flags |= NO_MCP_FLAG;
  7642. return;
  7643. }
  7644. val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
  7645. if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
  7646. != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
  7647. BNX2X_ERROR("BAD MCP validity signature\n");
  7648. bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
  7649. BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
  7650. bp->link_params.hw_led_mode = ((bp->common.hw_config &
  7651. SHARED_HW_CFG_LED_MODE_MASK) >>
  7652. SHARED_HW_CFG_LED_MODE_SHIFT);
  7653. bp->link_params.feature_config_flags = 0;
  7654. val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
  7655. if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
  7656. bp->link_params.feature_config_flags |=
  7657. FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
  7658. else
  7659. bp->link_params.feature_config_flags &=
  7660. ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
  7661. val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
  7662. bp->common.bc_ver = val;
  7663. BNX2X_DEV_INFO("bc_ver %X\n", val);
  7664. if (val < BNX2X_BC_VER) {
  7665. /* for now only warn
  7666. * later we might need to enforce this */
  7667. BNX2X_ERROR("This driver needs bc_ver %X but found %X, "
  7668. "please upgrade BC\n", BNX2X_BC_VER, val);
  7669. }
  7670. bp->link_params.feature_config_flags |=
  7671. (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
  7672. FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
  7673. if (BP_E1HVN(bp) == 0) {
  7674. pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
  7675. bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
  7676. } else {
  7677. /* no WOL capability for E1HVN != 0 */
  7678. bp->flags |= NO_WOL_FLAG;
  7679. }
  7680. BNX2X_DEV_INFO("%sWoL capable\n",
  7681. (bp->flags & NO_WOL_FLAG) ? "not " : "");
  7682. val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
  7683. val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
  7684. val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
  7685. val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
  7686. dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
  7687. val, val2, val3, val4);
  7688. }
  7689. static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
  7690. u32 switch_cfg)
  7691. {
  7692. int port = BP_PORT(bp);
  7693. u32 ext_phy_type;
  7694. switch (switch_cfg) {
  7695. case SWITCH_CFG_1G:
  7696. BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
  7697. ext_phy_type =
  7698. SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
  7699. switch (ext_phy_type) {
  7700. case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
  7701. BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
  7702. ext_phy_type);
  7703. bp->port.supported |= (SUPPORTED_10baseT_Half |
  7704. SUPPORTED_10baseT_Full |
  7705. SUPPORTED_100baseT_Half |
  7706. SUPPORTED_100baseT_Full |
  7707. SUPPORTED_1000baseT_Full |
  7708. SUPPORTED_2500baseX_Full |
  7709. SUPPORTED_TP |
  7710. SUPPORTED_FIBRE |
  7711. SUPPORTED_Autoneg |
  7712. SUPPORTED_Pause |
  7713. SUPPORTED_Asym_Pause);
  7714. break;
  7715. case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
  7716. BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
  7717. ext_phy_type);
  7718. bp->port.supported |= (SUPPORTED_10baseT_Half |
  7719. SUPPORTED_10baseT_Full |
  7720. SUPPORTED_100baseT_Half |
  7721. SUPPORTED_100baseT_Full |
  7722. SUPPORTED_1000baseT_Full |
  7723. SUPPORTED_TP |
  7724. SUPPORTED_FIBRE |
  7725. SUPPORTED_Autoneg |
  7726. SUPPORTED_Pause |
  7727. SUPPORTED_Asym_Pause);
  7728. break;
  7729. default:
  7730. BNX2X_ERR("NVRAM config error. "
  7731. "BAD SerDes ext_phy_config 0x%x\n",
  7732. bp->link_params.ext_phy_config);
  7733. return;
  7734. }
  7735. bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
  7736. port*0x10);
  7737. BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
  7738. break;
  7739. case SWITCH_CFG_10G:
  7740. BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
  7741. ext_phy_type =
  7742. XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
  7743. switch (ext_phy_type) {
  7744. case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
  7745. BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
  7746. ext_phy_type);
  7747. bp->port.supported |= (SUPPORTED_10baseT_Half |
  7748. SUPPORTED_10baseT_Full |
  7749. SUPPORTED_100baseT_Half |
  7750. SUPPORTED_100baseT_Full |
  7751. SUPPORTED_1000baseT_Full |
  7752. SUPPORTED_2500baseX_Full |
  7753. SUPPORTED_10000baseT_Full |
  7754. SUPPORTED_TP |
  7755. SUPPORTED_FIBRE |
  7756. SUPPORTED_Autoneg |
  7757. SUPPORTED_Pause |
  7758. SUPPORTED_Asym_Pause);
  7759. break;
  7760. case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
  7761. BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
  7762. ext_phy_type);
  7763. bp->port.supported |= (SUPPORTED_10000baseT_Full |
  7764. SUPPORTED_1000baseT_Full |
  7765. SUPPORTED_FIBRE |
  7766. SUPPORTED_Autoneg |
  7767. SUPPORTED_Pause |
  7768. SUPPORTED_Asym_Pause);
  7769. break;
  7770. case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
  7771. BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
  7772. ext_phy_type);
  7773. bp->port.supported |= (SUPPORTED_10000baseT_Full |
  7774. SUPPORTED_2500baseX_Full |
  7775. SUPPORTED_1000baseT_Full |
  7776. SUPPORTED_FIBRE |
  7777. SUPPORTED_Autoneg |
  7778. SUPPORTED_Pause |
  7779. SUPPORTED_Asym_Pause);
  7780. break;
  7781. case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
  7782. BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
  7783. ext_phy_type);
  7784. bp->port.supported |= (SUPPORTED_10000baseT_Full |
  7785. SUPPORTED_FIBRE |
  7786. SUPPORTED_Pause |
  7787. SUPPORTED_Asym_Pause);
  7788. break;
  7789. case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
  7790. BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
  7791. ext_phy_type);
  7792. bp->port.supported |= (SUPPORTED_10000baseT_Full |
  7793. SUPPORTED_1000baseT_Full |
  7794. SUPPORTED_FIBRE |
  7795. SUPPORTED_Pause |
  7796. SUPPORTED_Asym_Pause);
  7797. break;
  7798. case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
  7799. BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
  7800. ext_phy_type);
  7801. bp->port.supported |= (SUPPORTED_10000baseT_Full |
  7802. SUPPORTED_1000baseT_Full |
  7803. SUPPORTED_Autoneg |
  7804. SUPPORTED_FIBRE |
  7805. SUPPORTED_Pause |
  7806. SUPPORTED_Asym_Pause);
  7807. break;
  7808. case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
  7809. BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
  7810. ext_phy_type);
  7811. bp->port.supported |= (SUPPORTED_10000baseT_Full |
  7812. SUPPORTED_1000baseT_Full |
  7813. SUPPORTED_Autoneg |
  7814. SUPPORTED_FIBRE |
  7815. SUPPORTED_Pause |
  7816. SUPPORTED_Asym_Pause);
  7817. break;
  7818. case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
  7819. BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
  7820. ext_phy_type);
  7821. bp->port.supported |= (SUPPORTED_10000baseT_Full |
  7822. SUPPORTED_TP |
  7823. SUPPORTED_Autoneg |
  7824. SUPPORTED_Pause |
  7825. SUPPORTED_Asym_Pause);
  7826. break;
  7827. case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
  7828. BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
  7829. ext_phy_type);
  7830. bp->port.supported |= (SUPPORTED_10baseT_Half |
  7831. SUPPORTED_10baseT_Full |
  7832. SUPPORTED_100baseT_Half |
  7833. SUPPORTED_100baseT_Full |
  7834. SUPPORTED_1000baseT_Full |
  7835. SUPPORTED_10000baseT_Full |
  7836. SUPPORTED_TP |
  7837. SUPPORTED_Autoneg |
  7838. SUPPORTED_Pause |
  7839. SUPPORTED_Asym_Pause);
  7840. break;
  7841. case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
  7842. BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
  7843. bp->link_params.ext_phy_config);
  7844. break;
  7845. default:
  7846. BNX2X_ERR("NVRAM config error. "
  7847. "BAD XGXS ext_phy_config 0x%x\n",
  7848. bp->link_params.ext_phy_config);
  7849. return;
  7850. }
  7851. bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
  7852. port*0x18);
  7853. BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
  7854. break;
  7855. default:
  7856. BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
  7857. bp->port.link_config);
  7858. return;
  7859. }
  7860. bp->link_params.phy_addr = bp->port.phy_addr;
  7861. /* mask what we support according to speed_cap_mask */
  7862. if (!(bp->link_params.speed_cap_mask &
  7863. PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
  7864. bp->port.supported &= ~SUPPORTED_10baseT_Half;
  7865. if (!(bp->link_params.speed_cap_mask &
  7866. PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
  7867. bp->port.supported &= ~SUPPORTED_10baseT_Full;
  7868. if (!(bp->link_params.speed_cap_mask &
  7869. PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
  7870. bp->port.supported &= ~SUPPORTED_100baseT_Half;
  7871. if (!(bp->link_params.speed_cap_mask &
  7872. PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
  7873. bp->port.supported &= ~SUPPORTED_100baseT_Full;
  7874. if (!(bp->link_params.speed_cap_mask &
  7875. PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
  7876. bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
  7877. SUPPORTED_1000baseT_Full);
  7878. if (!(bp->link_params.speed_cap_mask &
  7879. PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
  7880. bp->port.supported &= ~SUPPORTED_2500baseX_Full;
  7881. if (!(bp->link_params.speed_cap_mask &
  7882. PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
  7883. bp->port.supported &= ~SUPPORTED_10000baseT_Full;
  7884. BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
  7885. }
  7886. static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
  7887. {
  7888. bp->link_params.req_duplex = DUPLEX_FULL;
  7889. switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
  7890. case PORT_FEATURE_LINK_SPEED_AUTO:
  7891. if (bp->port.supported & SUPPORTED_Autoneg) {
  7892. bp->link_params.req_line_speed = SPEED_AUTO_NEG;
  7893. bp->port.advertising = bp->port.supported;
  7894. } else {
  7895. u32 ext_phy_type =
  7896. XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
  7897. if ((ext_phy_type ==
  7898. PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
  7899. (ext_phy_type ==
  7900. PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
  7901. /* force 10G, no AN */
  7902. bp->link_params.req_line_speed = SPEED_10000;
  7903. bp->port.advertising =
  7904. (ADVERTISED_10000baseT_Full |
  7905. ADVERTISED_FIBRE);
  7906. break;
  7907. }
  7908. BNX2X_ERR("NVRAM config error. "
  7909. "Invalid link_config 0x%x"
  7910. " Autoneg not supported\n",
  7911. bp->port.link_config);
  7912. return;
  7913. }
  7914. break;
  7915. case PORT_FEATURE_LINK_SPEED_10M_FULL:
  7916. if (bp->port.supported & SUPPORTED_10baseT_Full) {
  7917. bp->link_params.req_line_speed = SPEED_10;
  7918. bp->port.advertising = (ADVERTISED_10baseT_Full |
  7919. ADVERTISED_TP);
  7920. } else {
  7921. BNX2X_ERROR("NVRAM config error. "
  7922. "Invalid link_config 0x%x"
  7923. " speed_cap_mask 0x%x\n",
  7924. bp->port.link_config,
  7925. bp->link_params.speed_cap_mask);
  7926. return;
  7927. }
  7928. break;
  7929. case PORT_FEATURE_LINK_SPEED_10M_HALF:
  7930. if (bp->port.supported & SUPPORTED_10baseT_Half) {
  7931. bp->link_params.req_line_speed = SPEED_10;
  7932. bp->link_params.req_duplex = DUPLEX_HALF;
  7933. bp->port.advertising = (ADVERTISED_10baseT_Half |
  7934. ADVERTISED_TP);
  7935. } else {
  7936. BNX2X_ERROR("NVRAM config error. "
  7937. "Invalid link_config 0x%x"
  7938. " speed_cap_mask 0x%x\n",
  7939. bp->port.link_config,
  7940. bp->link_params.speed_cap_mask);
  7941. return;
  7942. }
  7943. break;
  7944. case PORT_FEATURE_LINK_SPEED_100M_FULL:
  7945. if (bp->port.supported & SUPPORTED_100baseT_Full) {
  7946. bp->link_params.req_line_speed = SPEED_100;
  7947. bp->port.advertising = (ADVERTISED_100baseT_Full |
  7948. ADVERTISED_TP);
  7949. } else {
  7950. BNX2X_ERROR("NVRAM config error. "
  7951. "Invalid link_config 0x%x"
  7952. " speed_cap_mask 0x%x\n",
  7953. bp->port.link_config,
  7954. bp->link_params.speed_cap_mask);
  7955. return;
  7956. }
  7957. break;
  7958. case PORT_FEATURE_LINK_SPEED_100M_HALF:
  7959. if (bp->port.supported & SUPPORTED_100baseT_Half) {
  7960. bp->link_params.req_line_speed = SPEED_100;
  7961. bp->link_params.req_duplex = DUPLEX_HALF;
  7962. bp->port.advertising = (ADVERTISED_100baseT_Half |
  7963. ADVERTISED_TP);
  7964. } else {
  7965. BNX2X_ERROR("NVRAM config error. "
  7966. "Invalid link_config 0x%x"
  7967. " speed_cap_mask 0x%x\n",
  7968. bp->port.link_config,
  7969. bp->link_params.speed_cap_mask);
  7970. return;
  7971. }
  7972. break;
  7973. case PORT_FEATURE_LINK_SPEED_1G:
  7974. if (bp->port.supported & SUPPORTED_1000baseT_Full) {
  7975. bp->link_params.req_line_speed = SPEED_1000;
  7976. bp->port.advertising = (ADVERTISED_1000baseT_Full |
  7977. ADVERTISED_TP);
  7978. } else {
  7979. BNX2X_ERROR("NVRAM config error. "
  7980. "Invalid link_config 0x%x"
  7981. " speed_cap_mask 0x%x\n",
  7982. bp->port.link_config,
  7983. bp->link_params.speed_cap_mask);
  7984. return;
  7985. }
  7986. break;
  7987. case PORT_FEATURE_LINK_SPEED_2_5G:
  7988. if (bp->port.supported & SUPPORTED_2500baseX_Full) {
  7989. bp->link_params.req_line_speed = SPEED_2500;
  7990. bp->port.advertising = (ADVERTISED_2500baseX_Full |
  7991. ADVERTISED_TP);
  7992. } else {
  7993. BNX2X_ERROR("NVRAM config error. "
  7994. "Invalid link_config 0x%x"
  7995. " speed_cap_mask 0x%x\n",
  7996. bp->port.link_config,
  7997. bp->link_params.speed_cap_mask);
  7998. return;
  7999. }
  8000. break;
  8001. case PORT_FEATURE_LINK_SPEED_10G_CX4:
  8002. case PORT_FEATURE_LINK_SPEED_10G_KX4:
  8003. case PORT_FEATURE_LINK_SPEED_10G_KR:
  8004. if (bp->port.supported & SUPPORTED_10000baseT_Full) {
  8005. bp->link_params.req_line_speed = SPEED_10000;
  8006. bp->port.advertising = (ADVERTISED_10000baseT_Full |
  8007. ADVERTISED_FIBRE);
  8008. } else {
  8009. BNX2X_ERROR("NVRAM config error. "
  8010. "Invalid link_config 0x%x"
  8011. " speed_cap_mask 0x%x\n",
  8012. bp->port.link_config,
  8013. bp->link_params.speed_cap_mask);
  8014. return;
  8015. }
  8016. break;
  8017. default:
  8018. BNX2X_ERROR("NVRAM config error. "
  8019. "BAD link speed link_config 0x%x\n",
  8020. bp->port.link_config);
  8021. bp->link_params.req_line_speed = SPEED_AUTO_NEG;
  8022. bp->port.advertising = bp->port.supported;
  8023. break;
  8024. }
  8025. bp->link_params.req_flow_ctrl = (bp->port.link_config &
  8026. PORT_FEATURE_FLOW_CONTROL_MASK);
  8027. if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
  8028. !(bp->port.supported & SUPPORTED_Autoneg))
  8029. bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
  8030. BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
  8031. " advertising 0x%x\n",
  8032. bp->link_params.req_line_speed,
  8033. bp->link_params.req_duplex,
  8034. bp->link_params.req_flow_ctrl, bp->port.advertising);
  8035. }
  8036. static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
  8037. {
  8038. mac_hi = cpu_to_be16(mac_hi);
  8039. mac_lo = cpu_to_be32(mac_lo);
  8040. memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
  8041. memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
  8042. }
  8043. static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
  8044. {
  8045. int port = BP_PORT(bp);
  8046. u32 val, val2;
  8047. u32 config;
  8048. u16 i;
  8049. u32 ext_phy_type;
  8050. bp->link_params.bp = bp;
  8051. bp->link_params.port = port;
  8052. bp->link_params.lane_config =
  8053. SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
  8054. bp->link_params.ext_phy_config =
  8055. SHMEM_RD(bp,
  8056. dev_info.port_hw_config[port].external_phy_config);
  8057. /* BCM8727_NOC => BCM8727 no over current */
  8058. if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
  8059. PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
  8060. bp->link_params.ext_phy_config &=
  8061. ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
  8062. bp->link_params.ext_phy_config |=
  8063. PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
  8064. bp->link_params.feature_config_flags |=
  8065. FEATURE_CONFIG_BCM8727_NOC;
  8066. }
  8067. bp->link_params.speed_cap_mask =
  8068. SHMEM_RD(bp,
  8069. dev_info.port_hw_config[port].speed_capability_mask);
  8070. bp->port.link_config =
  8071. SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
  8072. /* Get the 4 lanes xgxs config rx and tx */
  8073. for (i = 0; i < 2; i++) {
  8074. val = SHMEM_RD(bp,
  8075. dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
  8076. bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
  8077. bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
  8078. val = SHMEM_RD(bp,
  8079. dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
  8080. bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
  8081. bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
  8082. }
  8083. /* If the device is capable of WoL, set the default state according
  8084. * to the HW
  8085. */
  8086. config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
  8087. bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
  8088. (config & PORT_FEATURE_WOL_ENABLED));
  8089. BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
  8090. " speed_cap_mask 0x%08x link_config 0x%08x\n",
  8091. bp->link_params.lane_config,
  8092. bp->link_params.ext_phy_config,
  8093. bp->link_params.speed_cap_mask, bp->port.link_config);
  8094. bp->link_params.switch_cfg |= (bp->port.link_config &
  8095. PORT_FEATURE_CONNECTED_SWITCH_MASK);
  8096. bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
  8097. bnx2x_link_settings_requested(bp);
  8098. /*
  8099. * If connected directly, work with the internal PHY, otherwise, work
  8100. * with the external PHY
  8101. */
  8102. ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
  8103. if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
  8104. bp->mdio.prtad = bp->link_params.phy_addr;
  8105. else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
  8106. (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
  8107. bp->mdio.prtad =
  8108. XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
  8109. val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
  8110. val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
  8111. bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
  8112. memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
  8113. memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
  8114. #ifdef BCM_CNIC
  8115. val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
  8116. val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
  8117. bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
  8118. #endif
  8119. }
  8120. static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
  8121. {
  8122. int func = BP_FUNC(bp);
  8123. u32 val, val2;
  8124. int rc = 0;
  8125. bnx2x_get_common_hwinfo(bp);
  8126. bp->e1hov = 0;
  8127. bp->e1hmf = 0;
  8128. if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) {
  8129. bp->mf_config =
  8130. SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
  8131. val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
  8132. FUNC_MF_CFG_E1HOV_TAG_MASK);
  8133. if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
  8134. bp->e1hmf = 1;
  8135. BNX2X_DEV_INFO("%s function mode\n",
  8136. IS_E1HMF(bp) ? "multi" : "single");
  8137. if (IS_E1HMF(bp)) {
  8138. val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
  8139. e1hov_tag) &
  8140. FUNC_MF_CFG_E1HOV_TAG_MASK);
  8141. if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
  8142. bp->e1hov = val;
  8143. BNX2X_DEV_INFO("E1HOV for func %d is %d "
  8144. "(0x%04x)\n",
  8145. func, bp->e1hov, bp->e1hov);
  8146. } else {
  8147. BNX2X_ERROR("No valid E1HOV for func %d,"
  8148. " aborting\n", func);
  8149. rc = -EPERM;
  8150. }
  8151. } else {
  8152. if (BP_E1HVN(bp)) {
  8153. BNX2X_ERROR("VN %d in single function mode,"
  8154. " aborting\n", BP_E1HVN(bp));
  8155. rc = -EPERM;
  8156. }
  8157. }
  8158. }
  8159. if (!BP_NOMCP(bp)) {
  8160. bnx2x_get_port_hwinfo(bp);
  8161. bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
  8162. DRV_MSG_SEQ_NUMBER_MASK);
  8163. BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
  8164. }
  8165. if (IS_E1HMF(bp)) {
  8166. val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
  8167. val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
  8168. if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
  8169. (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
  8170. bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
  8171. bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
  8172. bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
  8173. bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
  8174. bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
  8175. bp->dev->dev_addr[5] = (u8)(val & 0xff);
  8176. memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
  8177. ETH_ALEN);
  8178. memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
  8179. ETH_ALEN);
  8180. }
  8181. return rc;
  8182. }
  8183. if (BP_NOMCP(bp)) {
  8184. /* only supposed to happen on emulation/FPGA */
  8185. BNX2X_ERROR("warning: random MAC workaround active\n");
  8186. random_ether_addr(bp->dev->dev_addr);
  8187. memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
  8188. }
  8189. return rc;
  8190. }
  8191. static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
  8192. {
  8193. int cnt, i, block_end, rodi;
  8194. char vpd_data[BNX2X_VPD_LEN+1];
  8195. char str_id_reg[VENDOR_ID_LEN+1];
  8196. char str_id_cap[VENDOR_ID_LEN+1];
  8197. u8 len;
  8198. cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
  8199. memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
  8200. if (cnt < BNX2X_VPD_LEN)
  8201. goto out_not_found;
  8202. i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
  8203. PCI_VPD_LRDT_RO_DATA);
  8204. if (i < 0)
  8205. goto out_not_found;
  8206. block_end = i + PCI_VPD_LRDT_TAG_SIZE +
  8207. pci_vpd_lrdt_size(&vpd_data[i]);
  8208. i += PCI_VPD_LRDT_TAG_SIZE;
  8209. if (block_end > BNX2X_VPD_LEN)
  8210. goto out_not_found;
  8211. rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
  8212. PCI_VPD_RO_KEYWORD_MFR_ID);
  8213. if (rodi < 0)
  8214. goto out_not_found;
  8215. len = pci_vpd_info_field_size(&vpd_data[rodi]);
  8216. if (len != VENDOR_ID_LEN)
  8217. goto out_not_found;
  8218. rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
  8219. /* vendor specific info */
  8220. snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
  8221. snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
  8222. if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
  8223. !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
  8224. rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
  8225. PCI_VPD_RO_KEYWORD_VENDOR0);
  8226. if (rodi >= 0) {
  8227. len = pci_vpd_info_field_size(&vpd_data[rodi]);
  8228. rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
  8229. if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
  8230. memcpy(bp->fw_ver, &vpd_data[rodi], len);
  8231. bp->fw_ver[len] = ' ';
  8232. }
  8233. }
  8234. return;
  8235. }
  8236. out_not_found:
  8237. return;
  8238. }
  8239. static int __devinit bnx2x_init_bp(struct bnx2x *bp)
  8240. {
  8241. int func = BP_FUNC(bp);
  8242. int timer_interval;
  8243. int rc;
  8244. /* Disable interrupt handling until HW is initialized */
  8245. atomic_set(&bp->intr_sem, 1);
  8246. smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
  8247. mutex_init(&bp->port.phy_mutex);
  8248. mutex_init(&bp->fw_mb_mutex);
  8249. spin_lock_init(&bp->stats_lock);
  8250. #ifdef BCM_CNIC
  8251. mutex_init(&bp->cnic_mutex);
  8252. #endif
  8253. INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
  8254. INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
  8255. rc = bnx2x_get_hwinfo(bp);
  8256. bnx2x_read_fwinfo(bp);
  8257. /* need to reset chip if undi was active */
  8258. if (!BP_NOMCP(bp))
  8259. bnx2x_undi_unload(bp);
  8260. if (CHIP_REV_IS_FPGA(bp))
  8261. dev_err(&bp->pdev->dev, "FPGA detected\n");
  8262. if (BP_NOMCP(bp) && (func == 0))
  8263. dev_err(&bp->pdev->dev, "MCP disabled, "
  8264. "must load devices in order!\n");
  8265. /* Set multi queue mode */
  8266. if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
  8267. ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
  8268. dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
  8269. "requested is not MSI-X\n");
  8270. multi_mode = ETH_RSS_MODE_DISABLED;
  8271. }
  8272. bp->multi_mode = multi_mode;
  8273. bp->dev->features |= NETIF_F_GRO;
  8274. /* Set TPA flags */
  8275. if (disable_tpa) {
  8276. bp->flags &= ~TPA_ENABLE_FLAG;
  8277. bp->dev->features &= ~NETIF_F_LRO;
  8278. } else {
  8279. bp->flags |= TPA_ENABLE_FLAG;
  8280. bp->dev->features |= NETIF_F_LRO;
  8281. }
  8282. if (CHIP_IS_E1(bp))
  8283. bp->dropless_fc = 0;
  8284. else
  8285. bp->dropless_fc = dropless_fc;
  8286. bp->mrrs = mrrs;
  8287. bp->tx_ring_size = MAX_TX_AVAIL;
  8288. bp->rx_ring_size = MAX_RX_AVAIL;
  8289. bp->rx_csum = 1;
  8290. /* make sure that the numbers are in the right granularity */
  8291. bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
  8292. bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
  8293. timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
  8294. bp->current_interval = (poll ? poll : timer_interval);
  8295. init_timer(&bp->timer);
  8296. bp->timer.expires = jiffies + bp->current_interval;
  8297. bp->timer.data = (unsigned long) bp;
  8298. bp->timer.function = bnx2x_timer;
  8299. return rc;
  8300. }
  8301. /*
  8302. * ethtool service functions
  8303. */
  8304. /* All ethtool functions called with rtnl_lock */
  8305. static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  8306. {
  8307. struct bnx2x *bp = netdev_priv(dev);
  8308. cmd->supported = bp->port.supported;
  8309. cmd->advertising = bp->port.advertising;
  8310. if ((bp->state == BNX2X_STATE_OPEN) &&
  8311. !(bp->flags & MF_FUNC_DIS) &&
  8312. (bp->link_vars.link_up)) {
  8313. cmd->speed = bp->link_vars.line_speed;
  8314. cmd->duplex = bp->link_vars.duplex;
  8315. if (IS_E1HMF(bp)) {
  8316. u16 vn_max_rate;
  8317. vn_max_rate =
  8318. ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
  8319. FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
  8320. if (vn_max_rate < cmd->speed)
  8321. cmd->speed = vn_max_rate;
  8322. }
  8323. } else {
  8324. cmd->speed = -1;
  8325. cmd->duplex = -1;
  8326. }
  8327. if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
  8328. u32 ext_phy_type =
  8329. XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
  8330. switch (ext_phy_type) {
  8331. case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
  8332. case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
  8333. case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
  8334. case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
  8335. case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
  8336. case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
  8337. case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
  8338. cmd->port = PORT_FIBRE;
  8339. break;
  8340. case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
  8341. case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
  8342. cmd->port = PORT_TP;
  8343. break;
  8344. case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
  8345. BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
  8346. bp->link_params.ext_phy_config);
  8347. break;
  8348. default:
  8349. DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
  8350. bp->link_params.ext_phy_config);
  8351. break;
  8352. }
  8353. } else
  8354. cmd->port = PORT_TP;
  8355. cmd->phy_address = bp->mdio.prtad;
  8356. cmd->transceiver = XCVR_INTERNAL;
  8357. if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
  8358. cmd->autoneg = AUTONEG_ENABLE;
  8359. else
  8360. cmd->autoneg = AUTONEG_DISABLE;
  8361. cmd->maxtxpkt = 0;
  8362. cmd->maxrxpkt = 0;
  8363. DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
  8364. DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
  8365. DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
  8366. DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
  8367. cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
  8368. cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
  8369. cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
  8370. return 0;
  8371. }
  8372. static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  8373. {
  8374. struct bnx2x *bp = netdev_priv(dev);
  8375. u32 advertising;
  8376. if (IS_E1HMF(bp))
  8377. return 0;
  8378. DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
  8379. DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
  8380. DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
  8381. DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
  8382. cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
  8383. cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
  8384. cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
  8385. if (cmd->autoneg == AUTONEG_ENABLE) {
  8386. if (!(bp->port.supported & SUPPORTED_Autoneg)) {
  8387. DP(NETIF_MSG_LINK, "Autoneg not supported\n");
  8388. return -EINVAL;
  8389. }
  8390. /* advertise the requested speed and duplex if supported */
  8391. cmd->advertising &= bp->port.supported;
  8392. bp->link_params.req_line_speed = SPEED_AUTO_NEG;
  8393. bp->link_params.req_duplex = DUPLEX_FULL;
  8394. bp->port.advertising |= (ADVERTISED_Autoneg |
  8395. cmd->advertising);
  8396. } else { /* forced speed */
  8397. /* advertise the requested speed and duplex if supported */
  8398. switch (cmd->speed) {
  8399. case SPEED_10:
  8400. if (cmd->duplex == DUPLEX_FULL) {
  8401. if (!(bp->port.supported &
  8402. SUPPORTED_10baseT_Full)) {
  8403. DP(NETIF_MSG_LINK,
  8404. "10M full not supported\n");
  8405. return -EINVAL;
  8406. }
  8407. advertising = (ADVERTISED_10baseT_Full |
  8408. ADVERTISED_TP);
  8409. } else {
  8410. if (!(bp->port.supported &
  8411. SUPPORTED_10baseT_Half)) {
  8412. DP(NETIF_MSG_LINK,
  8413. "10M half not supported\n");
  8414. return -EINVAL;
  8415. }
  8416. advertising = (ADVERTISED_10baseT_Half |
  8417. ADVERTISED_TP);
  8418. }
  8419. break;
  8420. case SPEED_100:
  8421. if (cmd->duplex == DUPLEX_FULL) {
  8422. if (!(bp->port.supported &
  8423. SUPPORTED_100baseT_Full)) {
  8424. DP(NETIF_MSG_LINK,
  8425. "100M full not supported\n");
  8426. return -EINVAL;
  8427. }
  8428. advertising = (ADVERTISED_100baseT_Full |
  8429. ADVERTISED_TP);
  8430. } else {
  8431. if (!(bp->port.supported &
  8432. SUPPORTED_100baseT_Half)) {
  8433. DP(NETIF_MSG_LINK,
  8434. "100M half not supported\n");
  8435. return -EINVAL;
  8436. }
  8437. advertising = (ADVERTISED_100baseT_Half |
  8438. ADVERTISED_TP);
  8439. }
  8440. break;
  8441. case SPEED_1000:
  8442. if (cmd->duplex != DUPLEX_FULL) {
  8443. DP(NETIF_MSG_LINK, "1G half not supported\n");
  8444. return -EINVAL;
  8445. }
  8446. if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
  8447. DP(NETIF_MSG_LINK, "1G full not supported\n");
  8448. return -EINVAL;
  8449. }
  8450. advertising = (ADVERTISED_1000baseT_Full |
  8451. ADVERTISED_TP);
  8452. break;
  8453. case SPEED_2500:
  8454. if (cmd->duplex != DUPLEX_FULL) {
  8455. DP(NETIF_MSG_LINK,
  8456. "2.5G half not supported\n");
  8457. return -EINVAL;
  8458. }
  8459. if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
  8460. DP(NETIF_MSG_LINK,
  8461. "2.5G full not supported\n");
  8462. return -EINVAL;
  8463. }
  8464. advertising = (ADVERTISED_2500baseX_Full |
  8465. ADVERTISED_TP);
  8466. break;
  8467. case SPEED_10000:
  8468. if (cmd->duplex != DUPLEX_FULL) {
  8469. DP(NETIF_MSG_LINK, "10G half not supported\n");
  8470. return -EINVAL;
  8471. }
  8472. if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
  8473. DP(NETIF_MSG_LINK, "10G full not supported\n");
  8474. return -EINVAL;
  8475. }
  8476. advertising = (ADVERTISED_10000baseT_Full |
  8477. ADVERTISED_FIBRE);
  8478. break;
  8479. default:
  8480. DP(NETIF_MSG_LINK, "Unsupported speed\n");
  8481. return -EINVAL;
  8482. }
  8483. bp->link_params.req_line_speed = cmd->speed;
  8484. bp->link_params.req_duplex = cmd->duplex;
  8485. bp->port.advertising = advertising;
  8486. }
  8487. DP(NETIF_MSG_LINK, "req_line_speed %d\n"
  8488. DP_LEVEL " req_duplex %d advertising 0x%x\n",
  8489. bp->link_params.req_line_speed, bp->link_params.req_duplex,
  8490. bp->port.advertising);
  8491. if (netif_running(dev)) {
  8492. bnx2x_stats_handle(bp, STATS_EVENT_STOP);
  8493. bnx2x_link_set(bp);
  8494. }
  8495. return 0;
  8496. }
  8497. #define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
  8498. #define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
  8499. static int bnx2x_get_regs_len(struct net_device *dev)
  8500. {
  8501. struct bnx2x *bp = netdev_priv(dev);
  8502. int regdump_len = 0;
  8503. int i;
  8504. if (CHIP_IS_E1(bp)) {
  8505. for (i = 0; i < REGS_COUNT; i++)
  8506. if (IS_E1_ONLINE(reg_addrs[i].info))
  8507. regdump_len += reg_addrs[i].size;
  8508. for (i = 0; i < WREGS_COUNT_E1; i++)
  8509. if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
  8510. regdump_len += wreg_addrs_e1[i].size *
  8511. (1 + wreg_addrs_e1[i].read_regs_count);
  8512. } else { /* E1H */
  8513. for (i = 0; i < REGS_COUNT; i++)
  8514. if (IS_E1H_ONLINE(reg_addrs[i].info))
  8515. regdump_len += reg_addrs[i].size;
  8516. for (i = 0; i < WREGS_COUNT_E1H; i++)
  8517. if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
  8518. regdump_len += wreg_addrs_e1h[i].size *
  8519. (1 + wreg_addrs_e1h[i].read_regs_count);
  8520. }
  8521. regdump_len *= 4;
  8522. regdump_len += sizeof(struct dump_hdr);
  8523. return regdump_len;
  8524. }
  8525. static void bnx2x_get_regs(struct net_device *dev,
  8526. struct ethtool_regs *regs, void *_p)
  8527. {
  8528. u32 *p = _p, i, j;
  8529. struct bnx2x *bp = netdev_priv(dev);
  8530. struct dump_hdr dump_hdr = {0};
  8531. regs->version = 0;
  8532. memset(p, 0, regs->len);
  8533. if (!netif_running(bp->dev))
  8534. return;
  8535. dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
  8536. dump_hdr.dump_sign = dump_sign_all;
  8537. dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
  8538. dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
  8539. dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
  8540. dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
  8541. dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
  8542. memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
  8543. p += dump_hdr.hdr_size + 1;
  8544. if (CHIP_IS_E1(bp)) {
  8545. for (i = 0; i < REGS_COUNT; i++)
  8546. if (IS_E1_ONLINE(reg_addrs[i].info))
  8547. for (j = 0; j < reg_addrs[i].size; j++)
  8548. *p++ = REG_RD(bp,
  8549. reg_addrs[i].addr + j*4);
  8550. } else { /* E1H */
  8551. for (i = 0; i < REGS_COUNT; i++)
  8552. if (IS_E1H_ONLINE(reg_addrs[i].info))
  8553. for (j = 0; j < reg_addrs[i].size; j++)
  8554. *p++ = REG_RD(bp,
  8555. reg_addrs[i].addr + j*4);
  8556. }
  8557. }
  8558. #define PHY_FW_VER_LEN 10
  8559. static void bnx2x_get_drvinfo(struct net_device *dev,
  8560. struct ethtool_drvinfo *info)
  8561. {
  8562. struct bnx2x *bp = netdev_priv(dev);
  8563. u8 phy_fw_ver[PHY_FW_VER_LEN];
  8564. strcpy(info->driver, DRV_MODULE_NAME);
  8565. strcpy(info->version, DRV_MODULE_VERSION);
  8566. phy_fw_ver[0] = '\0';
  8567. if (bp->port.pmf) {
  8568. bnx2x_acquire_phy_lock(bp);
  8569. bnx2x_get_ext_phy_fw_version(&bp->link_params,
  8570. (bp->state != BNX2X_STATE_CLOSED),
  8571. phy_fw_ver, PHY_FW_VER_LEN);
  8572. bnx2x_release_phy_lock(bp);
  8573. }
  8574. strncpy(info->fw_version, bp->fw_ver, 32);
  8575. snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
  8576. "bc %d.%d.%d%s%s",
  8577. (bp->common.bc_ver & 0xff0000) >> 16,
  8578. (bp->common.bc_ver & 0xff00) >> 8,
  8579. (bp->common.bc_ver & 0xff),
  8580. ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
  8581. strcpy(info->bus_info, pci_name(bp->pdev));
  8582. info->n_stats = BNX2X_NUM_STATS;
  8583. info->testinfo_len = BNX2X_NUM_TESTS;
  8584. info->eedump_len = bp->common.flash_size;
  8585. info->regdump_len = bnx2x_get_regs_len(dev);
  8586. }
  8587. static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
  8588. {
  8589. struct bnx2x *bp = netdev_priv(dev);
  8590. if (bp->flags & NO_WOL_FLAG) {
  8591. wol->supported = 0;
  8592. wol->wolopts = 0;
  8593. } else {
  8594. wol->supported = WAKE_MAGIC;
  8595. if (bp->wol)
  8596. wol->wolopts = WAKE_MAGIC;
  8597. else
  8598. wol->wolopts = 0;
  8599. }
  8600. memset(&wol->sopass, 0, sizeof(wol->sopass));
  8601. }
  8602. static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
  8603. {
  8604. struct bnx2x *bp = netdev_priv(dev);
  8605. if (wol->wolopts & ~WAKE_MAGIC)
  8606. return -EINVAL;
  8607. if (wol->wolopts & WAKE_MAGIC) {
  8608. if (bp->flags & NO_WOL_FLAG)
  8609. return -EINVAL;
  8610. bp->wol = 1;
  8611. } else
  8612. bp->wol = 0;
  8613. return 0;
  8614. }
  8615. static u32 bnx2x_get_msglevel(struct net_device *dev)
  8616. {
  8617. struct bnx2x *bp = netdev_priv(dev);
  8618. return bp->msg_enable;
  8619. }
  8620. static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
  8621. {
  8622. struct bnx2x *bp = netdev_priv(dev);
  8623. if (capable(CAP_NET_ADMIN))
  8624. bp->msg_enable = level;
  8625. }
  8626. static int bnx2x_nway_reset(struct net_device *dev)
  8627. {
  8628. struct bnx2x *bp = netdev_priv(dev);
  8629. if (!bp->port.pmf)
  8630. return 0;
  8631. if (netif_running(dev)) {
  8632. bnx2x_stats_handle(bp, STATS_EVENT_STOP);
  8633. bnx2x_link_set(bp);
  8634. }
  8635. return 0;
  8636. }
  8637. static u32 bnx2x_get_link(struct net_device *dev)
  8638. {
  8639. struct bnx2x *bp = netdev_priv(dev);
  8640. if (bp->flags & MF_FUNC_DIS)
  8641. return 0;
  8642. return bp->link_vars.link_up;
  8643. }
  8644. static int bnx2x_get_eeprom_len(struct net_device *dev)
  8645. {
  8646. struct bnx2x *bp = netdev_priv(dev);
  8647. return bp->common.flash_size;
  8648. }
  8649. static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
  8650. {
  8651. int port = BP_PORT(bp);
  8652. int count, i;
  8653. u32 val = 0;
  8654. /* adjust timeout for emulation/FPGA */
  8655. count = NVRAM_TIMEOUT_COUNT;
  8656. if (CHIP_REV_IS_SLOW(bp))
  8657. count *= 100;
  8658. /* request access to nvram interface */
  8659. REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
  8660. (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
  8661. for (i = 0; i < count*10; i++) {
  8662. val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
  8663. if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
  8664. break;
  8665. udelay(5);
  8666. }
  8667. if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
  8668. DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
  8669. return -EBUSY;
  8670. }
  8671. return 0;
  8672. }
  8673. static int bnx2x_release_nvram_lock(struct bnx2x *bp)
  8674. {
  8675. int port = BP_PORT(bp);
  8676. int count, i;
  8677. u32 val = 0;
  8678. /* adjust timeout for emulation/FPGA */
  8679. count = NVRAM_TIMEOUT_COUNT;
  8680. if (CHIP_REV_IS_SLOW(bp))
  8681. count *= 100;
  8682. /* relinquish nvram interface */
  8683. REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
  8684. (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
  8685. for (i = 0; i < count*10; i++) {
  8686. val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
  8687. if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
  8688. break;
  8689. udelay(5);
  8690. }
  8691. if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
  8692. DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
  8693. return -EBUSY;
  8694. }
  8695. return 0;
  8696. }
  8697. static void bnx2x_enable_nvram_access(struct bnx2x *bp)
  8698. {
  8699. u32 val;
  8700. val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
  8701. /* enable both bits, even on read */
  8702. REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
  8703. (val | MCPR_NVM_ACCESS_ENABLE_EN |
  8704. MCPR_NVM_ACCESS_ENABLE_WR_EN));
  8705. }
  8706. static void bnx2x_disable_nvram_access(struct bnx2x *bp)
  8707. {
  8708. u32 val;
  8709. val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
  8710. /* disable both bits, even after read */
  8711. REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
  8712. (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
  8713. MCPR_NVM_ACCESS_ENABLE_WR_EN)));
  8714. }
  8715. static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
  8716. u32 cmd_flags)
  8717. {
  8718. int count, i, rc;
  8719. u32 val;
  8720. /* build the command word */
  8721. cmd_flags |= MCPR_NVM_COMMAND_DOIT;
  8722. /* need to clear DONE bit separately */
  8723. REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
  8724. /* address of the NVRAM to read from */
  8725. REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
  8726. (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
  8727. /* issue a read command */
  8728. REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
  8729. /* adjust timeout for emulation/FPGA */
  8730. count = NVRAM_TIMEOUT_COUNT;
  8731. if (CHIP_REV_IS_SLOW(bp))
  8732. count *= 100;
  8733. /* wait for completion */
  8734. *ret_val = 0;
  8735. rc = -EBUSY;
  8736. for (i = 0; i < count; i++) {
  8737. udelay(5);
  8738. val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
  8739. if (val & MCPR_NVM_COMMAND_DONE) {
  8740. val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
  8741. /* we read nvram data in cpu order
  8742. * but ethtool sees it as an array of bytes
  8743. * converting to big-endian will do the work */
  8744. *ret_val = cpu_to_be32(val);
  8745. rc = 0;
  8746. break;
  8747. }
  8748. }
  8749. return rc;
  8750. }
  8751. static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
  8752. int buf_size)
  8753. {
  8754. int rc;
  8755. u32 cmd_flags;
  8756. __be32 val;
  8757. if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
  8758. DP(BNX2X_MSG_NVM,
  8759. "Invalid parameter: offset 0x%x buf_size 0x%x\n",
  8760. offset, buf_size);
  8761. return -EINVAL;
  8762. }
  8763. if (offset + buf_size > bp->common.flash_size) {
  8764. DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
  8765. " buf_size (0x%x) > flash_size (0x%x)\n",
  8766. offset, buf_size, bp->common.flash_size);
  8767. return -EINVAL;
  8768. }
  8769. /* request access to nvram interface */
  8770. rc = bnx2x_acquire_nvram_lock(bp);
  8771. if (rc)
  8772. return rc;
  8773. /* enable access to nvram interface */
  8774. bnx2x_enable_nvram_access(bp);
  8775. /* read the first word(s) */
  8776. cmd_flags = MCPR_NVM_COMMAND_FIRST;
  8777. while ((buf_size > sizeof(u32)) && (rc == 0)) {
  8778. rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
  8779. memcpy(ret_buf, &val, 4);
  8780. /* advance to the next dword */
  8781. offset += sizeof(u32);
  8782. ret_buf += sizeof(u32);
  8783. buf_size -= sizeof(u32);
  8784. cmd_flags = 0;
  8785. }
  8786. if (rc == 0) {
  8787. cmd_flags |= MCPR_NVM_COMMAND_LAST;
  8788. rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
  8789. memcpy(ret_buf, &val, 4);
  8790. }
  8791. /* disable access to nvram interface */
  8792. bnx2x_disable_nvram_access(bp);
  8793. bnx2x_release_nvram_lock(bp);
  8794. return rc;
  8795. }
  8796. static int bnx2x_get_eeprom(struct net_device *dev,
  8797. struct ethtool_eeprom *eeprom, u8 *eebuf)
  8798. {
  8799. struct bnx2x *bp = netdev_priv(dev);
  8800. int rc;
  8801. if (!netif_running(dev))
  8802. return -EAGAIN;
  8803. DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
  8804. DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
  8805. eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
  8806. eeprom->len, eeprom->len);
  8807. /* parameters already validated in ethtool_get_eeprom */
  8808. rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
  8809. return rc;
  8810. }
  8811. static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
  8812. u32 cmd_flags)
  8813. {
  8814. int count, i, rc;
  8815. /* build the command word */
  8816. cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
  8817. /* need to clear DONE bit separately */
  8818. REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
  8819. /* write the data */
  8820. REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
  8821. /* address of the NVRAM to write to */
  8822. REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
  8823. (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
  8824. /* issue the write command */
  8825. REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
  8826. /* adjust timeout for emulation/FPGA */
  8827. count = NVRAM_TIMEOUT_COUNT;
  8828. if (CHIP_REV_IS_SLOW(bp))
  8829. count *= 100;
  8830. /* wait for completion */
  8831. rc = -EBUSY;
  8832. for (i = 0; i < count; i++) {
  8833. udelay(5);
  8834. val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
  8835. if (val & MCPR_NVM_COMMAND_DONE) {
  8836. rc = 0;
  8837. break;
  8838. }
  8839. }
  8840. return rc;
  8841. }
  8842. #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
  8843. static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
  8844. int buf_size)
  8845. {
  8846. int rc;
  8847. u32 cmd_flags;
  8848. u32 align_offset;
  8849. __be32 val;
  8850. if (offset + buf_size > bp->common.flash_size) {
  8851. DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
  8852. " buf_size (0x%x) > flash_size (0x%x)\n",
  8853. offset, buf_size, bp->common.flash_size);
  8854. return -EINVAL;
  8855. }
  8856. /* request access to nvram interface */
  8857. rc = bnx2x_acquire_nvram_lock(bp);
  8858. if (rc)
  8859. return rc;
  8860. /* enable access to nvram interface */
  8861. bnx2x_enable_nvram_access(bp);
  8862. cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
  8863. align_offset = (offset & ~0x03);
  8864. rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
  8865. if (rc == 0) {
  8866. val &= ~(0xff << BYTE_OFFSET(offset));
  8867. val |= (*data_buf << BYTE_OFFSET(offset));
  8868. /* nvram data is returned as an array of bytes
  8869. * convert it back to cpu order */
  8870. val = be32_to_cpu(val);
  8871. rc = bnx2x_nvram_write_dword(bp, align_offset, val,
  8872. cmd_flags);
  8873. }
  8874. /* disable access to nvram interface */
  8875. bnx2x_disable_nvram_access(bp);
  8876. bnx2x_release_nvram_lock(bp);
  8877. return rc;
  8878. }
  8879. static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
  8880. int buf_size)
  8881. {
  8882. int rc;
  8883. u32 cmd_flags;
  8884. u32 val;
  8885. u32 written_so_far;
  8886. if (buf_size == 1) /* ethtool */
  8887. return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
  8888. if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
  8889. DP(BNX2X_MSG_NVM,
  8890. "Invalid parameter: offset 0x%x buf_size 0x%x\n",
  8891. offset, buf_size);
  8892. return -EINVAL;
  8893. }
  8894. if (offset + buf_size > bp->common.flash_size) {
  8895. DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
  8896. " buf_size (0x%x) > flash_size (0x%x)\n",
  8897. offset, buf_size, bp->common.flash_size);
  8898. return -EINVAL;
  8899. }
  8900. /* request access to nvram interface */
  8901. rc = bnx2x_acquire_nvram_lock(bp);
  8902. if (rc)
  8903. return rc;
  8904. /* enable access to nvram interface */
  8905. bnx2x_enable_nvram_access(bp);
  8906. written_so_far = 0;
  8907. cmd_flags = MCPR_NVM_COMMAND_FIRST;
  8908. while ((written_so_far < buf_size) && (rc == 0)) {
  8909. if (written_so_far == (buf_size - sizeof(u32)))
  8910. cmd_flags |= MCPR_NVM_COMMAND_LAST;
  8911. else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
  8912. cmd_flags |= MCPR_NVM_COMMAND_LAST;
  8913. else if ((offset % NVRAM_PAGE_SIZE) == 0)
  8914. cmd_flags |= MCPR_NVM_COMMAND_FIRST;
  8915. memcpy(&val, data_buf, 4);
  8916. rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
  8917. /* advance to the next dword */
  8918. offset += sizeof(u32);
  8919. data_buf += sizeof(u32);
  8920. written_so_far += sizeof(u32);
  8921. cmd_flags = 0;
  8922. }
  8923. /* disable access to nvram interface */
  8924. bnx2x_disable_nvram_access(bp);
  8925. bnx2x_release_nvram_lock(bp);
  8926. return rc;
  8927. }
  8928. static int bnx2x_set_eeprom(struct net_device *dev,
  8929. struct ethtool_eeprom *eeprom, u8 *eebuf)
  8930. {
  8931. struct bnx2x *bp = netdev_priv(dev);
  8932. int port = BP_PORT(bp);
  8933. int rc = 0;
  8934. if (!netif_running(dev))
  8935. return -EAGAIN;
  8936. DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
  8937. DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
  8938. eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
  8939. eeprom->len, eeprom->len);
  8940. /* parameters already validated in ethtool_set_eeprom */
  8941. /* PHY eeprom can be accessed only by the PMF */
  8942. if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
  8943. !bp->port.pmf)
  8944. return -EINVAL;
  8945. if (eeprom->magic == 0x50485950) {
  8946. /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
  8947. bnx2x_stats_handle(bp, STATS_EVENT_STOP);
  8948. bnx2x_acquire_phy_lock(bp);
  8949. rc |= bnx2x_link_reset(&bp->link_params,
  8950. &bp->link_vars, 0);
  8951. if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
  8952. PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
  8953. bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
  8954. MISC_REGISTERS_GPIO_HIGH, port);
  8955. bnx2x_release_phy_lock(bp);
  8956. bnx2x_link_report(bp);
  8957. } else if (eeprom->magic == 0x50485952) {
  8958. /* 'PHYR' (0x50485952): re-init link after FW upgrade */
  8959. if (bp->state == BNX2X_STATE_OPEN) {
  8960. bnx2x_acquire_phy_lock(bp);
  8961. rc |= bnx2x_link_reset(&bp->link_params,
  8962. &bp->link_vars, 1);
  8963. rc |= bnx2x_phy_init(&bp->link_params,
  8964. &bp->link_vars);
  8965. bnx2x_release_phy_lock(bp);
  8966. bnx2x_calc_fc_adv(bp);
  8967. }
  8968. } else if (eeprom->magic == 0x53985943) {
  8969. /* 'PHYC' (0x53985943): PHY FW upgrade completed */
  8970. if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
  8971. PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
  8972. u8 ext_phy_addr =
  8973. XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
  8974. /* DSP Remove Download Mode */
  8975. bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
  8976. MISC_REGISTERS_GPIO_LOW, port);
  8977. bnx2x_acquire_phy_lock(bp);
  8978. bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
  8979. /* wait 0.5 sec to allow it to run */
  8980. msleep(500);
  8981. bnx2x_ext_phy_hw_reset(bp, port);
  8982. msleep(500);
  8983. bnx2x_release_phy_lock(bp);
  8984. }
  8985. } else
  8986. rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
  8987. return rc;
  8988. }
  8989. static int bnx2x_get_coalesce(struct net_device *dev,
  8990. struct ethtool_coalesce *coal)
  8991. {
  8992. struct bnx2x *bp = netdev_priv(dev);
  8993. memset(coal, 0, sizeof(struct ethtool_coalesce));
  8994. coal->rx_coalesce_usecs = bp->rx_ticks;
  8995. coal->tx_coalesce_usecs = bp->tx_ticks;
  8996. return 0;
  8997. }
  8998. static int bnx2x_set_coalesce(struct net_device *dev,
  8999. struct ethtool_coalesce *coal)
  9000. {
  9001. struct bnx2x *bp = netdev_priv(dev);
  9002. bp->rx_ticks = (u16)coal->rx_coalesce_usecs;
  9003. if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
  9004. bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
  9005. bp->tx_ticks = (u16)coal->tx_coalesce_usecs;
  9006. if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
  9007. bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
  9008. if (netif_running(dev))
  9009. bnx2x_update_coalesce(bp);
  9010. return 0;
  9011. }
  9012. static void bnx2x_get_ringparam(struct net_device *dev,
  9013. struct ethtool_ringparam *ering)
  9014. {
  9015. struct bnx2x *bp = netdev_priv(dev);
  9016. ering->rx_max_pending = MAX_RX_AVAIL;
  9017. ering->rx_mini_max_pending = 0;
  9018. ering->rx_jumbo_max_pending = 0;
  9019. ering->rx_pending = bp->rx_ring_size;
  9020. ering->rx_mini_pending = 0;
  9021. ering->rx_jumbo_pending = 0;
  9022. ering->tx_max_pending = MAX_TX_AVAIL;
  9023. ering->tx_pending = bp->tx_ring_size;
  9024. }
  9025. static int bnx2x_set_ringparam(struct net_device *dev,
  9026. struct ethtool_ringparam *ering)
  9027. {
  9028. struct bnx2x *bp = netdev_priv(dev);
  9029. int rc = 0;
  9030. if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
  9031. printk(KERN_ERR "Handling parity error recovery. Try again later\n");
  9032. return -EAGAIN;
  9033. }
  9034. if ((ering->rx_pending > MAX_RX_AVAIL) ||
  9035. (ering->tx_pending > MAX_TX_AVAIL) ||
  9036. (ering->tx_pending <= MAX_SKB_FRAGS + 4))
  9037. return -EINVAL;
  9038. bp->rx_ring_size = ering->rx_pending;
  9039. bp->tx_ring_size = ering->tx_pending;
  9040. if (netif_running(dev)) {
  9041. bnx2x_nic_unload(bp, UNLOAD_NORMAL);
  9042. rc = bnx2x_nic_load(bp, LOAD_NORMAL);
  9043. }
  9044. return rc;
  9045. }
  9046. static void bnx2x_get_pauseparam(struct net_device *dev,
  9047. struct ethtool_pauseparam *epause)
  9048. {
  9049. struct bnx2x *bp = netdev_priv(dev);
  9050. epause->autoneg = (bp->link_params.req_flow_ctrl ==
  9051. BNX2X_FLOW_CTRL_AUTO) &&
  9052. (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
  9053. epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
  9054. BNX2X_FLOW_CTRL_RX);
  9055. epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
  9056. BNX2X_FLOW_CTRL_TX);
  9057. DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
  9058. DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
  9059. epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
  9060. }
  9061. static int bnx2x_set_pauseparam(struct net_device *dev,
  9062. struct ethtool_pauseparam *epause)
  9063. {
  9064. struct bnx2x *bp = netdev_priv(dev);
  9065. if (IS_E1HMF(bp))
  9066. return 0;
  9067. DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
  9068. DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
  9069. epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
  9070. bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
  9071. if (epause->rx_pause)
  9072. bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
  9073. if (epause->tx_pause)
  9074. bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
  9075. if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
  9076. bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
  9077. if (epause->autoneg) {
  9078. if (!(bp->port.supported & SUPPORTED_Autoneg)) {
  9079. DP(NETIF_MSG_LINK, "autoneg not supported\n");
  9080. return -EINVAL;
  9081. }
  9082. if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
  9083. bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
  9084. }
  9085. DP(NETIF_MSG_LINK,
  9086. "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
  9087. if (netif_running(dev)) {
  9088. bnx2x_stats_handle(bp, STATS_EVENT_STOP);
  9089. bnx2x_link_set(bp);
  9090. }
  9091. return 0;
  9092. }
  9093. static int bnx2x_set_flags(struct net_device *dev, u32 data)
  9094. {
  9095. struct bnx2x *bp = netdev_priv(dev);
  9096. int changed = 0;
  9097. int rc = 0;
  9098. if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
  9099. printk(KERN_ERR "Handling parity error recovery. Try again later\n");
  9100. return -EAGAIN;
  9101. }
  9102. /* TPA requires Rx CSUM offloading */
  9103. if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
  9104. if (!disable_tpa) {
  9105. if (!(dev->features & NETIF_F_LRO)) {
  9106. dev->features |= NETIF_F_LRO;
  9107. bp->flags |= TPA_ENABLE_FLAG;
  9108. changed = 1;
  9109. }
  9110. } else
  9111. rc = -EINVAL;
  9112. } else if (dev->features & NETIF_F_LRO) {
  9113. dev->features &= ~NETIF_F_LRO;
  9114. bp->flags &= ~TPA_ENABLE_FLAG;
  9115. changed = 1;
  9116. }
  9117. if (data & ETH_FLAG_RXHASH)
  9118. dev->features |= NETIF_F_RXHASH;
  9119. else
  9120. dev->features &= ~NETIF_F_RXHASH;
  9121. if (changed && netif_running(dev)) {
  9122. bnx2x_nic_unload(bp, UNLOAD_NORMAL);
  9123. rc = bnx2x_nic_load(bp, LOAD_NORMAL);
  9124. }
  9125. return rc;
  9126. }
  9127. static u32 bnx2x_get_rx_csum(struct net_device *dev)
  9128. {
  9129. struct bnx2x *bp = netdev_priv(dev);
  9130. return bp->rx_csum;
  9131. }
  9132. static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
  9133. {
  9134. struct bnx2x *bp = netdev_priv(dev);
  9135. int rc = 0;
  9136. if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
  9137. printk(KERN_ERR "Handling parity error recovery. Try again later\n");
  9138. return -EAGAIN;
  9139. }
  9140. bp->rx_csum = data;
  9141. /* Disable TPA, when Rx CSUM is disabled. Otherwise all
  9142. TPA'ed packets will be discarded due to wrong TCP CSUM */
  9143. if (!data) {
  9144. u32 flags = ethtool_op_get_flags(dev);
  9145. rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
  9146. }
  9147. return rc;
  9148. }
  9149. static int bnx2x_set_tso(struct net_device *dev, u32 data)
  9150. {
  9151. if (data) {
  9152. dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
  9153. dev->features |= NETIF_F_TSO6;
  9154. } else {
  9155. dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
  9156. dev->features &= ~NETIF_F_TSO6;
  9157. }
  9158. return 0;
  9159. }
  9160. static const struct {
  9161. char string[ETH_GSTRING_LEN];
  9162. } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
  9163. { "register_test (offline)" },
  9164. { "memory_test (offline)" },
  9165. { "loopback_test (offline)" },
  9166. { "nvram_test (online)" },
  9167. { "interrupt_test (online)" },
  9168. { "link_test (online)" },
  9169. { "idle check (online)" }
  9170. };
  9171. static int bnx2x_test_registers(struct bnx2x *bp)
  9172. {
  9173. int idx, i, rc = -ENODEV;
  9174. u32 wr_val = 0;
  9175. int port = BP_PORT(bp);
  9176. static const struct {
  9177. u32 offset0;
  9178. u32 offset1;
  9179. u32 mask;
  9180. } reg_tbl[] = {
  9181. /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
  9182. { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
  9183. { HC_REG_AGG_INT_0, 4, 0x000003ff },
  9184. { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
  9185. { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
  9186. { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
  9187. { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
  9188. { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
  9189. { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
  9190. { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
  9191. /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
  9192. { QM_REG_CONNNUM_0, 4, 0x000fffff },
  9193. { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
  9194. { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
  9195. { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
  9196. { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
  9197. { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
  9198. { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
  9199. { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
  9200. { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
  9201. /* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
  9202. { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
  9203. { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
  9204. { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
  9205. { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
  9206. { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
  9207. { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
  9208. { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
  9209. { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
  9210. { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
  9211. /* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
  9212. { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
  9213. { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
  9214. { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
  9215. { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
  9216. { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
  9217. { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
  9218. { 0xffffffff, 0, 0x00000000 }
  9219. };
  9220. if (!netif_running(bp->dev))
  9221. return rc;
  9222. /* Repeat the test twice:
  9223. First by writing 0x00000000, second by writing 0xffffffff */
  9224. for (idx = 0; idx < 2; idx++) {
  9225. switch (idx) {
  9226. case 0:
  9227. wr_val = 0;
  9228. break;
  9229. case 1:
  9230. wr_val = 0xffffffff;
  9231. break;
  9232. }
  9233. for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
  9234. u32 offset, mask, save_val, val;
  9235. offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
  9236. mask = reg_tbl[i].mask;
  9237. save_val = REG_RD(bp, offset);
  9238. REG_WR(bp, offset, (wr_val & mask));
  9239. val = REG_RD(bp, offset);
  9240. /* Restore the original register's value */
  9241. REG_WR(bp, offset, save_val);
  9242. /* verify value is as expected */
  9243. if ((val & mask) != (wr_val & mask)) {
  9244. DP(NETIF_MSG_PROBE,
  9245. "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n",
  9246. offset, val, wr_val, mask);
  9247. goto test_reg_exit;
  9248. }
  9249. }
  9250. }
  9251. rc = 0;
  9252. test_reg_exit:
  9253. return rc;
  9254. }
  9255. static int bnx2x_test_memory(struct bnx2x *bp)
  9256. {
  9257. int i, j, rc = -ENODEV;
  9258. u32 val;
  9259. static const struct {
  9260. u32 offset;
  9261. int size;
  9262. } mem_tbl[] = {
  9263. { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
  9264. { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
  9265. { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
  9266. { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
  9267. { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
  9268. { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
  9269. { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
  9270. { 0xffffffff, 0 }
  9271. };
  9272. static const struct {
  9273. char *name;
  9274. u32 offset;
  9275. u32 e1_mask;
  9276. u32 e1h_mask;
  9277. } prty_tbl[] = {
  9278. { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
  9279. { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
  9280. { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
  9281. { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
  9282. { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
  9283. { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
  9284. { NULL, 0xffffffff, 0, 0 }
  9285. };
  9286. if (!netif_running(bp->dev))
  9287. return rc;
  9288. /* Go through all the memories */
  9289. for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
  9290. for (j = 0; j < mem_tbl[i].size; j++)
  9291. REG_RD(bp, mem_tbl[i].offset + j*4);
  9292. /* Check the parity status */
  9293. for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
  9294. val = REG_RD(bp, prty_tbl[i].offset);
  9295. if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
  9296. (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
  9297. DP(NETIF_MSG_HW,
  9298. "%s is 0x%x\n", prty_tbl[i].name, val);
  9299. goto test_mem_exit;
  9300. }
  9301. }
  9302. rc = 0;
  9303. test_mem_exit:
  9304. return rc;
  9305. }
  9306. static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
  9307. {
  9308. int cnt = 1000;
  9309. if (link_up)
  9310. while (bnx2x_link_test(bp) && cnt--)
  9311. msleep(10);
  9312. }
  9313. static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
  9314. {
  9315. unsigned int pkt_size, num_pkts, i;
  9316. struct sk_buff *skb;
  9317. unsigned char *packet;
  9318. struct bnx2x_fastpath *fp_rx = &bp->fp[0];
  9319. struct bnx2x_fastpath *fp_tx = &bp->fp[0];
  9320. u16 tx_start_idx, tx_idx;
  9321. u16 rx_start_idx, rx_idx;
  9322. u16 pkt_prod, bd_prod;
  9323. struct sw_tx_bd *tx_buf;
  9324. struct eth_tx_start_bd *tx_start_bd;
  9325. struct eth_tx_parse_bd *pbd = NULL;
  9326. dma_addr_t mapping;
  9327. union eth_rx_cqe *cqe;
  9328. u8 cqe_fp_flags;
  9329. struct sw_rx_bd *rx_buf;
  9330. u16 len;
  9331. int rc = -ENODEV;
  9332. /* check the loopback mode */
  9333. switch (loopback_mode) {
  9334. case BNX2X_PHY_LOOPBACK:
  9335. if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
  9336. return -EINVAL;
  9337. break;
  9338. case BNX2X_MAC_LOOPBACK:
  9339. bp->link_params.loopback_mode = LOOPBACK_BMAC;
  9340. bnx2x_phy_init(&bp->link_params, &bp->link_vars);
  9341. break;
  9342. default:
  9343. return -EINVAL;
  9344. }
  9345. /* prepare the loopback packet */
  9346. pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
  9347. bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
  9348. skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
  9349. if (!skb) {
  9350. rc = -ENOMEM;
  9351. goto test_loopback_exit;
  9352. }
  9353. packet = skb_put(skb, pkt_size);
  9354. memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
  9355. memset(packet + ETH_ALEN, 0, ETH_ALEN);
  9356. memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
  9357. for (i = ETH_HLEN; i < pkt_size; i++)
  9358. packet[i] = (unsigned char) (i & 0xff);
  9359. /* send the loopback packet */
  9360. num_pkts = 0;
  9361. tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
  9362. rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
  9363. pkt_prod = fp_tx->tx_pkt_prod++;
  9364. tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
  9365. tx_buf->first_bd = fp_tx->tx_bd_prod;
  9366. tx_buf->skb = skb;
  9367. tx_buf->flags = 0;
  9368. bd_prod = TX_BD(fp_tx->tx_bd_prod);
  9369. tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
  9370. mapping = dma_map_single(&bp->pdev->dev, skb->data,
  9371. skb_headlen(skb), DMA_TO_DEVICE);
  9372. tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
  9373. tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
  9374. tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
  9375. tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
  9376. tx_start_bd->vlan = cpu_to_le16(pkt_prod);
  9377. tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
  9378. tx_start_bd->general_data = ((UNICAST_ADDRESS <<
  9379. ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
  9380. /* turn on parsing and get a BD */
  9381. bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
  9382. pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
  9383. memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
  9384. wmb();
  9385. fp_tx->tx_db.data.prod += 2;
  9386. barrier();
  9387. DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
  9388. mmiowb();
  9389. num_pkts++;
  9390. fp_tx->tx_bd_prod += 2; /* start + pbd */
  9391. udelay(100);
  9392. tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
  9393. if (tx_idx != tx_start_idx + num_pkts)
  9394. goto test_loopback_exit;
  9395. rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
  9396. if (rx_idx != rx_start_idx + num_pkts)
  9397. goto test_loopback_exit;
  9398. cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
  9399. cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
  9400. if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
  9401. goto test_loopback_rx_exit;
  9402. len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
  9403. if (len != pkt_size)
  9404. goto test_loopback_rx_exit;
  9405. rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
  9406. skb = rx_buf->skb;
  9407. skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
  9408. for (i = ETH_HLEN; i < pkt_size; i++)
  9409. if (*(skb->data + i) != (unsigned char) (i & 0xff))
  9410. goto test_loopback_rx_exit;
  9411. rc = 0;
  9412. test_loopback_rx_exit:
  9413. fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
  9414. fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
  9415. fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
  9416. fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
  9417. /* Update producers */
  9418. bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
  9419. fp_rx->rx_sge_prod);
  9420. test_loopback_exit:
  9421. bp->link_params.loopback_mode = LOOPBACK_NONE;
  9422. return rc;
  9423. }
  9424. static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
  9425. {
  9426. int rc = 0, res;
  9427. if (BP_NOMCP(bp))
  9428. return rc;
  9429. if (!netif_running(bp->dev))
  9430. return BNX2X_LOOPBACK_FAILED;
  9431. bnx2x_netif_stop(bp, 1);
  9432. bnx2x_acquire_phy_lock(bp);
  9433. res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
  9434. if (res) {
  9435. DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
  9436. rc |= BNX2X_PHY_LOOPBACK_FAILED;
  9437. }
  9438. res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
  9439. if (res) {
  9440. DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
  9441. rc |= BNX2X_MAC_LOOPBACK_FAILED;
  9442. }
  9443. bnx2x_release_phy_lock(bp);
  9444. bnx2x_netif_start(bp);
  9445. return rc;
  9446. }
  9447. #define CRC32_RESIDUAL 0xdebb20e3
  9448. static int bnx2x_test_nvram(struct bnx2x *bp)
  9449. {
  9450. static const struct {
  9451. int offset;
  9452. int size;
  9453. } nvram_tbl[] = {
  9454. { 0, 0x14 }, /* bootstrap */
  9455. { 0x14, 0xec }, /* dir */
  9456. { 0x100, 0x350 }, /* manuf_info */
  9457. { 0x450, 0xf0 }, /* feature_info */
  9458. { 0x640, 0x64 }, /* upgrade_key_info */
  9459. { 0x6a4, 0x64 },
  9460. { 0x708, 0x70 }, /* manuf_key_info */
  9461. { 0x778, 0x70 },
  9462. { 0, 0 }
  9463. };
  9464. __be32 buf[0x350 / 4];
  9465. u8 *data = (u8 *)buf;
  9466. int i, rc;
  9467. u32 magic, crc;
  9468. if (BP_NOMCP(bp))
  9469. return 0;
  9470. rc = bnx2x_nvram_read(bp, 0, data, 4);
  9471. if (rc) {
  9472. DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
  9473. goto test_nvram_exit;
  9474. }
  9475. magic = be32_to_cpu(buf[0]);
  9476. if (magic != 0x669955aa) {
  9477. DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
  9478. rc = -ENODEV;
  9479. goto test_nvram_exit;
  9480. }
  9481. for (i = 0; nvram_tbl[i].size; i++) {
  9482. rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
  9483. nvram_tbl[i].size);
  9484. if (rc) {
  9485. DP(NETIF_MSG_PROBE,
  9486. "nvram_tbl[%d] read data (rc %d)\n", i, rc);
  9487. goto test_nvram_exit;
  9488. }
  9489. crc = ether_crc_le(nvram_tbl[i].size, data);
  9490. if (crc != CRC32_RESIDUAL) {
  9491. DP(NETIF_MSG_PROBE,
  9492. "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
  9493. rc = -ENODEV;
  9494. goto test_nvram_exit;
  9495. }
  9496. }
  9497. test_nvram_exit:
  9498. return rc;
  9499. }
  9500. static int bnx2x_test_intr(struct bnx2x *bp)
  9501. {
  9502. struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
  9503. int i, rc;
  9504. if (!netif_running(bp->dev))
  9505. return -ENODEV;
  9506. config->hdr.length = 0;
  9507. if (CHIP_IS_E1(bp))
  9508. /* use last unicast entries */
  9509. config->hdr.offset = (BP_PORT(bp) ? 63 : 31);
  9510. else
  9511. config->hdr.offset = BP_FUNC(bp);
  9512. config->hdr.client_id = bp->fp->cl_id;
  9513. config->hdr.reserved1 = 0;
  9514. bp->set_mac_pending++;
  9515. smp_wmb();
  9516. rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
  9517. U64_HI(bnx2x_sp_mapping(bp, mac_config)),
  9518. U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
  9519. if (rc == 0) {
  9520. for (i = 0; i < 10; i++) {
  9521. if (!bp->set_mac_pending)
  9522. break;
  9523. smp_rmb();
  9524. msleep_interruptible(10);
  9525. }
  9526. if (i == 10)
  9527. rc = -ENODEV;
  9528. }
  9529. return rc;
  9530. }
  9531. static void bnx2x_self_test(struct net_device *dev,
  9532. struct ethtool_test *etest, u64 *buf)
  9533. {
  9534. struct bnx2x *bp = netdev_priv(dev);
  9535. if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
  9536. printk(KERN_ERR "Handling parity error recovery. Try again later\n");
  9537. etest->flags |= ETH_TEST_FL_FAILED;
  9538. return;
  9539. }
  9540. memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
  9541. if (!netif_running(dev))
  9542. return;
  9543. /* offline tests are not supported in MF mode */
  9544. if (IS_E1HMF(bp))
  9545. etest->flags &= ~ETH_TEST_FL_OFFLINE;
  9546. if (etest->flags & ETH_TEST_FL_OFFLINE) {
  9547. int port = BP_PORT(bp);
  9548. u32 val;
  9549. u8 link_up;
  9550. /* save current value of input enable for TX port IF */
  9551. val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
  9552. /* disable input for TX port IF */
  9553. REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
  9554. link_up = (bnx2x_link_test(bp) == 0);
  9555. bnx2x_nic_unload(bp, UNLOAD_NORMAL);
  9556. bnx2x_nic_load(bp, LOAD_DIAG);
  9557. /* wait until link state is restored */
  9558. bnx2x_wait_for_link(bp, link_up);
  9559. if (bnx2x_test_registers(bp) != 0) {
  9560. buf[0] = 1;
  9561. etest->flags |= ETH_TEST_FL_FAILED;
  9562. }
  9563. if (bnx2x_test_memory(bp) != 0) {
  9564. buf[1] = 1;
  9565. etest->flags |= ETH_TEST_FL_FAILED;
  9566. }
  9567. buf[2] = bnx2x_test_loopback(bp, link_up);
  9568. if (buf[2] != 0)
  9569. etest->flags |= ETH_TEST_FL_FAILED;
  9570. bnx2x_nic_unload(bp, UNLOAD_NORMAL);
  9571. /* restore input for TX port IF */
  9572. REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
  9573. bnx2x_nic_load(bp, LOAD_NORMAL);
  9574. /* wait until link state is restored */
  9575. bnx2x_wait_for_link(bp, link_up);
  9576. }
  9577. if (bnx2x_test_nvram(bp) != 0) {
  9578. buf[3] = 1;
  9579. etest->flags |= ETH_TEST_FL_FAILED;
  9580. }
  9581. if (bnx2x_test_intr(bp) != 0) {
  9582. buf[4] = 1;
  9583. etest->flags |= ETH_TEST_FL_FAILED;
  9584. }
  9585. if (bp->port.pmf)
  9586. if (bnx2x_link_test(bp) != 0) {
  9587. buf[5] = 1;
  9588. etest->flags |= ETH_TEST_FL_FAILED;
  9589. }
  9590. #ifdef BNX2X_EXTRA_DEBUG
  9591. bnx2x_panic_dump(bp);
  9592. #endif
  9593. }
  9594. static const struct {
  9595. long offset;
  9596. int size;
  9597. u8 string[ETH_GSTRING_LEN];
  9598. } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
  9599. /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
  9600. { Q_STATS_OFFSET32(error_bytes_received_hi),
  9601. 8, "[%d]: rx_error_bytes" },
  9602. { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
  9603. 8, "[%d]: rx_ucast_packets" },
  9604. { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
  9605. 8, "[%d]: rx_mcast_packets" },
  9606. { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
  9607. 8, "[%d]: rx_bcast_packets" },
  9608. { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
  9609. { Q_STATS_OFFSET32(rx_err_discard_pkt),
  9610. 4, "[%d]: rx_phy_ip_err_discards"},
  9611. { Q_STATS_OFFSET32(rx_skb_alloc_failed),
  9612. 4, "[%d]: rx_skb_alloc_discard" },
  9613. { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
  9614. /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
  9615. { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
  9616. 8, "[%d]: tx_ucast_packets" },
  9617. { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
  9618. 8, "[%d]: tx_mcast_packets" },
  9619. { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
  9620. 8, "[%d]: tx_bcast_packets" }
  9621. };
  9622. static const struct {
  9623. long offset;
  9624. int size;
  9625. u32 flags;
  9626. #define STATS_FLAGS_PORT 1
  9627. #define STATS_FLAGS_FUNC 2
  9628. #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
  9629. u8 string[ETH_GSTRING_LEN];
  9630. } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
  9631. /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
  9632. 8, STATS_FLAGS_BOTH, "rx_bytes" },
  9633. { STATS_OFFSET32(error_bytes_received_hi),
  9634. 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
  9635. { STATS_OFFSET32(total_unicast_packets_received_hi),
  9636. 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
  9637. { STATS_OFFSET32(total_multicast_packets_received_hi),
  9638. 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
  9639. { STATS_OFFSET32(total_broadcast_packets_received_hi),
  9640. 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
  9641. { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
  9642. 8, STATS_FLAGS_PORT, "rx_crc_errors" },
  9643. { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
  9644. 8, STATS_FLAGS_PORT, "rx_align_errors" },
  9645. { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
  9646. 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
  9647. { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
  9648. 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
  9649. /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
  9650. 8, STATS_FLAGS_PORT, "rx_fragments" },
  9651. { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
  9652. 8, STATS_FLAGS_PORT, "rx_jabbers" },
  9653. { STATS_OFFSET32(no_buff_discard_hi),
  9654. 8, STATS_FLAGS_BOTH, "rx_discards" },
  9655. { STATS_OFFSET32(mac_filter_discard),
  9656. 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
  9657. { STATS_OFFSET32(xxoverflow_discard),
  9658. 4, STATS_FLAGS_PORT, "rx_fw_discards" },
  9659. { STATS_OFFSET32(brb_drop_hi),
  9660. 8, STATS_FLAGS_PORT, "rx_brb_discard" },
  9661. { STATS_OFFSET32(brb_truncate_hi),
  9662. 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
  9663. { STATS_OFFSET32(pause_frames_received_hi),
  9664. 8, STATS_FLAGS_PORT, "rx_pause_frames" },
  9665. { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
  9666. 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
  9667. { STATS_OFFSET32(nig_timer_max),
  9668. 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
  9669. /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
  9670. 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
  9671. { STATS_OFFSET32(rx_skb_alloc_failed),
  9672. 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
  9673. { STATS_OFFSET32(hw_csum_err),
  9674. 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
  9675. { STATS_OFFSET32(total_bytes_transmitted_hi),
  9676. 8, STATS_FLAGS_BOTH, "tx_bytes" },
  9677. { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
  9678. 8, STATS_FLAGS_PORT, "tx_error_bytes" },
  9679. { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
  9680. 8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
  9681. { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
  9682. 8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
  9683. { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
  9684. 8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
  9685. { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
  9686. 8, STATS_FLAGS_PORT, "tx_mac_errors" },
  9687. { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
  9688. 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
  9689. /* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
  9690. 8, STATS_FLAGS_PORT, "tx_single_collisions" },
  9691. { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
  9692. 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
  9693. { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
  9694. 8, STATS_FLAGS_PORT, "tx_deferred" },
  9695. { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
  9696. 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
  9697. { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
  9698. 8, STATS_FLAGS_PORT, "tx_late_collisions" },
  9699. { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
  9700. 8, STATS_FLAGS_PORT, "tx_total_collisions" },
  9701. { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
  9702. 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
  9703. { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
  9704. 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
  9705. { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
  9706. 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
  9707. { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
  9708. 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
  9709. /* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
  9710. 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
  9711. { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
  9712. 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
  9713. { STATS_OFFSET32(etherstatspktsover1522octets_hi),
  9714. 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
  9715. { STATS_OFFSET32(pause_frames_sent_hi),
  9716. 8, STATS_FLAGS_PORT, "tx_pause_frames" }
  9717. };
  9718. #define IS_PORT_STAT(i) \
  9719. ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
  9720. #define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
  9721. #define IS_E1HMF_MODE_STAT(bp) \
  9722. (IS_E1HMF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
  9723. static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
  9724. {
  9725. struct bnx2x *bp = netdev_priv(dev);
  9726. int i, num_stats;
  9727. switch (stringset) {
  9728. case ETH_SS_STATS:
  9729. if (is_multi(bp)) {
  9730. num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
  9731. if (!IS_E1HMF_MODE_STAT(bp))
  9732. num_stats += BNX2X_NUM_STATS;
  9733. } else {
  9734. if (IS_E1HMF_MODE_STAT(bp)) {
  9735. num_stats = 0;
  9736. for (i = 0; i < BNX2X_NUM_STATS; i++)
  9737. if (IS_FUNC_STAT(i))
  9738. num_stats++;
  9739. } else
  9740. num_stats = BNX2X_NUM_STATS;
  9741. }
  9742. return num_stats;
  9743. case ETH_SS_TEST:
  9744. return BNX2X_NUM_TESTS;
  9745. default:
  9746. return -EINVAL;
  9747. }
  9748. }
  9749. static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
  9750. {
  9751. struct bnx2x *bp = netdev_priv(dev);
  9752. int i, j, k;
  9753. switch (stringset) {
  9754. case ETH_SS_STATS:
  9755. if (is_multi(bp)) {
  9756. k = 0;
  9757. for_each_queue(bp, i) {
  9758. for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
  9759. sprintf(buf + (k + j)*ETH_GSTRING_LEN,
  9760. bnx2x_q_stats_arr[j].string, i);
  9761. k += BNX2X_NUM_Q_STATS;
  9762. }
  9763. if (IS_E1HMF_MODE_STAT(bp))
  9764. break;
  9765. for (j = 0; j < BNX2X_NUM_STATS; j++)
  9766. strcpy(buf + (k + j)*ETH_GSTRING_LEN,
  9767. bnx2x_stats_arr[j].string);
  9768. } else {
  9769. for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
  9770. if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
  9771. continue;
  9772. strcpy(buf + j*ETH_GSTRING_LEN,
  9773. bnx2x_stats_arr[i].string);
  9774. j++;
  9775. }
  9776. }
  9777. break;
  9778. case ETH_SS_TEST:
  9779. memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
  9780. break;
  9781. }
  9782. }
  9783. static void bnx2x_get_ethtool_stats(struct net_device *dev,
  9784. struct ethtool_stats *stats, u64 *buf)
  9785. {
  9786. struct bnx2x *bp = netdev_priv(dev);
  9787. u32 *hw_stats, *offset;
  9788. int i, j, k;
  9789. if (is_multi(bp)) {
  9790. k = 0;
  9791. for_each_queue(bp, i) {
  9792. hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
  9793. for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
  9794. if (bnx2x_q_stats_arr[j].size == 0) {
  9795. /* skip this counter */
  9796. buf[k + j] = 0;
  9797. continue;
  9798. }
  9799. offset = (hw_stats +
  9800. bnx2x_q_stats_arr[j].offset);
  9801. if (bnx2x_q_stats_arr[j].size == 4) {
  9802. /* 4-byte counter */
  9803. buf[k + j] = (u64) *offset;
  9804. continue;
  9805. }
  9806. /* 8-byte counter */
  9807. buf[k + j] = HILO_U64(*offset, *(offset + 1));
  9808. }
  9809. k += BNX2X_NUM_Q_STATS;
  9810. }
  9811. if (IS_E1HMF_MODE_STAT(bp))
  9812. return;
  9813. hw_stats = (u32 *)&bp->eth_stats;
  9814. for (j = 0; j < BNX2X_NUM_STATS; j++) {
  9815. if (bnx2x_stats_arr[j].size == 0) {
  9816. /* skip this counter */
  9817. buf[k + j] = 0;
  9818. continue;
  9819. }
  9820. offset = (hw_stats + bnx2x_stats_arr[j].offset);
  9821. if (bnx2x_stats_arr[j].size == 4) {
  9822. /* 4-byte counter */
  9823. buf[k + j] = (u64) *offset;
  9824. continue;
  9825. }
  9826. /* 8-byte counter */
  9827. buf[k + j] = HILO_U64(*offset, *(offset + 1));
  9828. }
  9829. } else {
  9830. hw_stats = (u32 *)&bp->eth_stats;
  9831. for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
  9832. if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
  9833. continue;
  9834. if (bnx2x_stats_arr[i].size == 0) {
  9835. /* skip this counter */
  9836. buf[j] = 0;
  9837. j++;
  9838. continue;
  9839. }
  9840. offset = (hw_stats + bnx2x_stats_arr[i].offset);
  9841. if (bnx2x_stats_arr[i].size == 4) {
  9842. /* 4-byte counter */
  9843. buf[j] = (u64) *offset;
  9844. j++;
  9845. continue;
  9846. }
  9847. /* 8-byte counter */
  9848. buf[j] = HILO_U64(*offset, *(offset + 1));
  9849. j++;
  9850. }
  9851. }
  9852. }
  9853. static int bnx2x_phys_id(struct net_device *dev, u32 data)
  9854. {
  9855. struct bnx2x *bp = netdev_priv(dev);
  9856. int i;
  9857. if (!netif_running(dev))
  9858. return 0;
  9859. if (!bp->port.pmf)
  9860. return 0;
  9861. if (data == 0)
  9862. data = 2;
  9863. for (i = 0; i < (data * 2); i++) {
  9864. if ((i % 2) == 0)
  9865. bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
  9866. SPEED_1000);
  9867. else
  9868. bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
  9869. msleep_interruptible(500);
  9870. if (signal_pending(current))
  9871. break;
  9872. }
  9873. if (bp->link_vars.link_up)
  9874. bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
  9875. bp->link_vars.line_speed);
  9876. return 0;
  9877. }
  9878. static const struct ethtool_ops bnx2x_ethtool_ops = {
  9879. .get_settings = bnx2x_get_settings,
  9880. .set_settings = bnx2x_set_settings,
  9881. .get_drvinfo = bnx2x_get_drvinfo,
  9882. .get_regs_len = bnx2x_get_regs_len,
  9883. .get_regs = bnx2x_get_regs,
  9884. .get_wol = bnx2x_get_wol,
  9885. .set_wol = bnx2x_set_wol,
  9886. .get_msglevel = bnx2x_get_msglevel,
  9887. .set_msglevel = bnx2x_set_msglevel,
  9888. .nway_reset = bnx2x_nway_reset,
  9889. .get_link = bnx2x_get_link,
  9890. .get_eeprom_len = bnx2x_get_eeprom_len,
  9891. .get_eeprom = bnx2x_get_eeprom,
  9892. .set_eeprom = bnx2x_set_eeprom,
  9893. .get_coalesce = bnx2x_get_coalesce,
  9894. .set_coalesce = bnx2x_set_coalesce,
  9895. .get_ringparam = bnx2x_get_ringparam,
  9896. .set_ringparam = bnx2x_set_ringparam,
  9897. .get_pauseparam = bnx2x_get_pauseparam,
  9898. .set_pauseparam = bnx2x_set_pauseparam,
  9899. .get_rx_csum = bnx2x_get_rx_csum,
  9900. .set_rx_csum = bnx2x_set_rx_csum,
  9901. .get_tx_csum = ethtool_op_get_tx_csum,
  9902. .set_tx_csum = ethtool_op_set_tx_hw_csum,
  9903. .set_flags = bnx2x_set_flags,
  9904. .get_flags = ethtool_op_get_flags,
  9905. .get_sg = ethtool_op_get_sg,
  9906. .set_sg = ethtool_op_set_sg,
  9907. .get_tso = ethtool_op_get_tso,
  9908. .set_tso = bnx2x_set_tso,
  9909. .self_test = bnx2x_self_test,
  9910. .get_sset_count = bnx2x_get_sset_count,
  9911. .get_strings = bnx2x_get_strings,
  9912. .phys_id = bnx2x_phys_id,
  9913. .get_ethtool_stats = bnx2x_get_ethtool_stats,
  9914. };
  9915. /* end of ethtool_ops */
  9916. /****************************************************************************
  9917. * General service functions
  9918. ****************************************************************************/
  9919. static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
  9920. {
  9921. u16 pmcsr;
  9922. pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
  9923. switch (state) {
  9924. case PCI_D0:
  9925. pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
  9926. ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
  9927. PCI_PM_CTRL_PME_STATUS));
  9928. if (pmcsr & PCI_PM_CTRL_STATE_MASK)
  9929. /* delay required during transition out of D3hot */
  9930. msleep(20);
  9931. break;
  9932. case PCI_D3hot:
  9933. /* If there are other clients above don't
  9934. shut down the power */
  9935. if (atomic_read(&bp->pdev->enable_cnt) != 1)
  9936. return 0;
  9937. /* Don't shut down the power for emulation and FPGA */
  9938. if (CHIP_REV_IS_SLOW(bp))
  9939. return 0;
  9940. pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
  9941. pmcsr |= 3;
  9942. if (bp->wol)
  9943. pmcsr |= PCI_PM_CTRL_PME_ENABLE;
  9944. pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
  9945. pmcsr);
  9946. /* No more memory access after this point until
  9947. * device is brought back to D0.
  9948. */
  9949. break;
  9950. default:
  9951. return -EINVAL;
  9952. }
  9953. return 0;
  9954. }
  9955. static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
  9956. {
  9957. u16 rx_cons_sb;
  9958. /* Tell compiler that status block fields can change */
  9959. barrier();
  9960. rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
  9961. if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
  9962. rx_cons_sb++;
  9963. return (fp->rx_comp_cons != rx_cons_sb);
  9964. }
  9965. /*
  9966. * net_device service functions
  9967. */
  9968. static int bnx2x_poll(struct napi_struct *napi, int budget)
  9969. {
  9970. int work_done = 0;
  9971. struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
  9972. napi);
  9973. struct bnx2x *bp = fp->bp;
  9974. while (1) {
  9975. #ifdef BNX2X_STOP_ON_ERROR
  9976. if (unlikely(bp->panic)) {
  9977. napi_complete(napi);
  9978. return 0;
  9979. }
  9980. #endif
  9981. if (bnx2x_has_tx_work(fp))
  9982. bnx2x_tx_int(fp);
  9983. if (bnx2x_has_rx_work(fp)) {
  9984. work_done += bnx2x_rx_int(fp, budget - work_done);
  9985. /* must not complete if we consumed full budget */
  9986. if (work_done >= budget)
  9987. break;
  9988. }
  9989. /* Fall out from the NAPI loop if needed */
  9990. if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
  9991. bnx2x_update_fpsb_idx(fp);
  9992. /* bnx2x_has_rx_work() reads the status block, thus we need
  9993. * to ensure that status block indices have been actually read
  9994. * (bnx2x_update_fpsb_idx) prior to this check
  9995. * (bnx2x_has_rx_work) so that we won't write the "newer"
  9996. * value of the status block to IGU (if there was a DMA right
  9997. * after bnx2x_has_rx_work and if there is no rmb, the memory
  9998. * reading (bnx2x_update_fpsb_idx) may be postponed to right
  9999. * before bnx2x_ack_sb). In this case there will never be
  10000. * another interrupt until there is another update of the
  10001. * status block, while there is still unhandled work.
  10002. */
  10003. rmb();
  10004. if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
  10005. napi_complete(napi);
  10006. /* Re-enable interrupts */
  10007. bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
  10008. le16_to_cpu(fp->fp_c_idx),
  10009. IGU_INT_NOP, 1);
  10010. bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
  10011. le16_to_cpu(fp->fp_u_idx),
  10012. IGU_INT_ENABLE, 1);
  10013. break;
  10014. }
  10015. }
  10016. }
  10017. return work_done;
  10018. }
  10019. /* we split the first BD into headers and data BDs
  10020. * to ease the pain of our fellow microcode engineers
  10021. * we use one mapping for both BDs
  10022. * So far this has only been observed to happen
  10023. * in Other Operating Systems(TM)
  10024. */
  10025. static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
  10026. struct bnx2x_fastpath *fp,
  10027. struct sw_tx_bd *tx_buf,
  10028. struct eth_tx_start_bd **tx_bd, u16 hlen,
  10029. u16 bd_prod, int nbd)
  10030. {
  10031. struct eth_tx_start_bd *h_tx_bd = *tx_bd;
  10032. struct eth_tx_bd *d_tx_bd;
  10033. dma_addr_t mapping;
  10034. int old_len = le16_to_cpu(h_tx_bd->nbytes);
  10035. /* first fix first BD */
  10036. h_tx_bd->nbd = cpu_to_le16(nbd);
  10037. h_tx_bd->nbytes = cpu_to_le16(hlen);
  10038. DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
  10039. "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
  10040. h_tx_bd->addr_lo, h_tx_bd->nbd);
  10041. /* now get a new data BD
  10042. * (after the pbd) and fill it */
  10043. bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
  10044. d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
  10045. mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
  10046. le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
  10047. d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
  10048. d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
  10049. d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
  10050. /* this marks the BD as one that has no individual mapping */
  10051. tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
  10052. DP(NETIF_MSG_TX_QUEUED,
  10053. "TSO split data size is %d (%x:%x)\n",
  10054. d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
  10055. /* update tx_bd */
  10056. *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
  10057. return bd_prod;
  10058. }
  10059. static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
  10060. {
  10061. if (fix > 0)
  10062. csum = (u16) ~csum_fold(csum_sub(csum,
  10063. csum_partial(t_header - fix, fix, 0)));
  10064. else if (fix < 0)
  10065. csum = (u16) ~csum_fold(csum_add(csum,
  10066. csum_partial(t_header, -fix, 0)));
  10067. return swab16(csum);
  10068. }
  10069. static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
  10070. {
  10071. u32 rc;
  10072. if (skb->ip_summed != CHECKSUM_PARTIAL)
  10073. rc = XMIT_PLAIN;
  10074. else {
  10075. if (skb->protocol == htons(ETH_P_IPV6)) {
  10076. rc = XMIT_CSUM_V6;
  10077. if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
  10078. rc |= XMIT_CSUM_TCP;
  10079. } else {
  10080. rc = XMIT_CSUM_V4;
  10081. if (ip_hdr(skb)->protocol == IPPROTO_TCP)
  10082. rc |= XMIT_CSUM_TCP;
  10083. }
  10084. }
  10085. if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
  10086. rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
  10087. else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
  10088. rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
  10089. return rc;
  10090. }
  10091. #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
  10092. /* check if packet requires linearization (packet is too fragmented)
  10093. no need to check fragmentation if page size > 8K (there will be no
  10094. violation to FW restrictions) */
  10095. static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
  10096. u32 xmit_type)
  10097. {
  10098. int to_copy = 0;
  10099. int hlen = 0;
  10100. int first_bd_sz = 0;
  10101. /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
  10102. if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
  10103. if (xmit_type & XMIT_GSO) {
  10104. unsigned short lso_mss = skb_shinfo(skb)->gso_size;
  10105. /* Check if LSO packet needs to be copied:
  10106. 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
  10107. int wnd_size = MAX_FETCH_BD - 3;
  10108. /* Number of windows to check */
  10109. int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
  10110. int wnd_idx = 0;
  10111. int frag_idx = 0;
  10112. u32 wnd_sum = 0;
  10113. /* Headers length */
  10114. hlen = (int)(skb_transport_header(skb) - skb->data) +
  10115. tcp_hdrlen(skb);
  10116. /* Amount of data (w/o headers) on linear part of SKB*/
  10117. first_bd_sz = skb_headlen(skb) - hlen;
  10118. wnd_sum = first_bd_sz;
  10119. /* Calculate the first sum - it's special */
  10120. for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
  10121. wnd_sum +=
  10122. skb_shinfo(skb)->frags[frag_idx].size;
  10123. /* If there was data on linear skb data - check it */
  10124. if (first_bd_sz > 0) {
  10125. if (unlikely(wnd_sum < lso_mss)) {
  10126. to_copy = 1;
  10127. goto exit_lbl;
  10128. }
  10129. wnd_sum -= first_bd_sz;
  10130. }
  10131. /* Others are easier: run through the frag list and
  10132. check all windows */
  10133. for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
  10134. wnd_sum +=
  10135. skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
  10136. if (unlikely(wnd_sum < lso_mss)) {
  10137. to_copy = 1;
  10138. break;
  10139. }
  10140. wnd_sum -=
  10141. skb_shinfo(skb)->frags[wnd_idx].size;
  10142. }
  10143. } else {
  10144. /* in non-LSO too fragmented packet should always
  10145. be linearized */
  10146. to_copy = 1;
  10147. }
  10148. }
  10149. exit_lbl:
  10150. if (unlikely(to_copy))
  10151. DP(NETIF_MSG_TX_QUEUED,
  10152. "Linearization IS REQUIRED for %s packet. "
  10153. "num_frags %d hlen %d first_bd_sz %d\n",
  10154. (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
  10155. skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
  10156. return to_copy;
  10157. }
  10158. #endif
  10159. /* called with netif_tx_lock
  10160. * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
  10161. * netif_wake_queue()
  10162. */
  10163. static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
  10164. {
  10165. struct bnx2x *bp = netdev_priv(dev);
  10166. struct bnx2x_fastpath *fp;
  10167. struct netdev_queue *txq;
  10168. struct sw_tx_bd *tx_buf;
  10169. struct eth_tx_start_bd *tx_start_bd;
  10170. struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
  10171. struct eth_tx_parse_bd *pbd = NULL;
  10172. u16 pkt_prod, bd_prod;
  10173. int nbd, fp_index;
  10174. dma_addr_t mapping;
  10175. u32 xmit_type = bnx2x_xmit_type(bp, skb);
  10176. int i;
  10177. u8 hlen = 0;
  10178. __le16 pkt_size = 0;
  10179. struct ethhdr *eth;
  10180. u8 mac_type = UNICAST_ADDRESS;
  10181. #ifdef BNX2X_STOP_ON_ERROR
  10182. if (unlikely(bp->panic))
  10183. return NETDEV_TX_BUSY;
  10184. #endif
  10185. fp_index = skb_get_queue_mapping(skb);
  10186. txq = netdev_get_tx_queue(dev, fp_index);
  10187. fp = &bp->fp[fp_index];
  10188. if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
  10189. fp->eth_q_stats.driver_xoff++;
  10190. netif_tx_stop_queue(txq);
  10191. BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
  10192. return NETDEV_TX_BUSY;
  10193. }
  10194. DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
  10195. " gso type %x xmit_type %x\n",
  10196. skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
  10197. ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
  10198. eth = (struct ethhdr *)skb->data;
  10199. /* set flag according to packet type (UNICAST_ADDRESS is default)*/
  10200. if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
  10201. if (is_broadcast_ether_addr(eth->h_dest))
  10202. mac_type = BROADCAST_ADDRESS;
  10203. else
  10204. mac_type = MULTICAST_ADDRESS;
  10205. }
  10206. #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
  10207. /* First, check if we need to linearize the skb (due to FW
  10208. restrictions). No need to check fragmentation if page size > 8K
  10209. (there will be no violation to FW restrictions) */
  10210. if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
  10211. /* Statistics of linearization */
  10212. bp->lin_cnt++;
  10213. if (skb_linearize(skb) != 0) {
  10214. DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
  10215. "silently dropping this SKB\n");
  10216. dev_kfree_skb_any(skb);
  10217. return NETDEV_TX_OK;
  10218. }
  10219. }
  10220. #endif
  10221. /*
  10222. Please read carefully. First we use one BD which we mark as start,
  10223. then we have a parsing info BD (used for TSO or xsum),
  10224. and only then we have the rest of the TSO BDs.
  10225. (don't forget to mark the last one as last,
  10226. and to unmap only AFTER you write to the BD ...)
  10227. And above all, all pdb sizes are in words - NOT DWORDS!
  10228. */
  10229. pkt_prod = fp->tx_pkt_prod++;
  10230. bd_prod = TX_BD(fp->tx_bd_prod);
  10231. /* get a tx_buf and first BD */
  10232. tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
  10233. tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
  10234. tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
  10235. tx_start_bd->general_data = (mac_type <<
  10236. ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
  10237. /* header nbd */
  10238. tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
  10239. /* remember the first BD of the packet */
  10240. tx_buf->first_bd = fp->tx_bd_prod;
  10241. tx_buf->skb = skb;
  10242. tx_buf->flags = 0;
  10243. DP(NETIF_MSG_TX_QUEUED,
  10244. "sending pkt %u @%p next_idx %u bd %u @%p\n",
  10245. pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
  10246. #ifdef BCM_VLAN
  10247. if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
  10248. (bp->flags & HW_VLAN_TX_FLAG)) {
  10249. tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
  10250. tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
  10251. } else
  10252. #endif
  10253. tx_start_bd->vlan = cpu_to_le16(pkt_prod);
  10254. /* turn on parsing and get a BD */
  10255. bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
  10256. pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
  10257. memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
  10258. if (xmit_type & XMIT_CSUM) {
  10259. hlen = (skb_network_header(skb) - skb->data) / 2;
  10260. /* for now NS flag is not used in Linux */
  10261. pbd->global_data =
  10262. (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
  10263. ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
  10264. pbd->ip_hlen = (skb_transport_header(skb) -
  10265. skb_network_header(skb)) / 2;
  10266. hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
  10267. pbd->total_hlen = cpu_to_le16(hlen);
  10268. hlen = hlen*2;
  10269. tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
  10270. if (xmit_type & XMIT_CSUM_V4)
  10271. tx_start_bd->bd_flags.as_bitfield |=
  10272. ETH_TX_BD_FLAGS_IP_CSUM;
  10273. else
  10274. tx_start_bd->bd_flags.as_bitfield |=
  10275. ETH_TX_BD_FLAGS_IPV6;
  10276. if (xmit_type & XMIT_CSUM_TCP) {
  10277. pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
  10278. } else {
  10279. s8 fix = SKB_CS_OFF(skb); /* signed! */
  10280. pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
  10281. DP(NETIF_MSG_TX_QUEUED,
  10282. "hlen %d fix %d csum before fix %x\n",
  10283. le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
  10284. /* HW bug: fixup the CSUM */
  10285. pbd->tcp_pseudo_csum =
  10286. bnx2x_csum_fix(skb_transport_header(skb),
  10287. SKB_CS(skb), fix);
  10288. DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
  10289. pbd->tcp_pseudo_csum);
  10290. }
  10291. }
  10292. mapping = dma_map_single(&bp->pdev->dev, skb->data,
  10293. skb_headlen(skb), DMA_TO_DEVICE);
  10294. tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
  10295. tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
  10296. nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
  10297. tx_start_bd->nbd = cpu_to_le16(nbd);
  10298. tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
  10299. pkt_size = tx_start_bd->nbytes;
  10300. DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
  10301. " nbytes %d flags %x vlan %x\n",
  10302. tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
  10303. le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
  10304. tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
  10305. if (xmit_type & XMIT_GSO) {
  10306. DP(NETIF_MSG_TX_QUEUED,
  10307. "TSO packet len %d hlen %d total len %d tso size %d\n",
  10308. skb->len, hlen, skb_headlen(skb),
  10309. skb_shinfo(skb)->gso_size);
  10310. tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
  10311. if (unlikely(skb_headlen(skb) > hlen))
  10312. bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
  10313. hlen, bd_prod, ++nbd);
  10314. pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
  10315. pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
  10316. pbd->tcp_flags = pbd_tcp_flags(skb);
  10317. if (xmit_type & XMIT_GSO_V4) {
  10318. pbd->ip_id = swab16(ip_hdr(skb)->id);
  10319. pbd->tcp_pseudo_csum =
  10320. swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
  10321. ip_hdr(skb)->daddr,
  10322. 0, IPPROTO_TCP, 0));
  10323. } else
  10324. pbd->tcp_pseudo_csum =
  10325. swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
  10326. &ipv6_hdr(skb)->daddr,
  10327. 0, IPPROTO_TCP, 0));
  10328. pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
  10329. }
  10330. tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
  10331. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  10332. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  10333. bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
  10334. tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
  10335. if (total_pkt_bd == NULL)
  10336. total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
  10337. mapping = dma_map_page(&bp->pdev->dev, frag->page,
  10338. frag->page_offset,
  10339. frag->size, DMA_TO_DEVICE);
  10340. tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
  10341. tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
  10342. tx_data_bd->nbytes = cpu_to_le16(frag->size);
  10343. le16_add_cpu(&pkt_size, frag->size);
  10344. DP(NETIF_MSG_TX_QUEUED,
  10345. "frag %d bd @%p addr (%x:%x) nbytes %d\n",
  10346. i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
  10347. le16_to_cpu(tx_data_bd->nbytes));
  10348. }
  10349. DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
  10350. bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
  10351. /* now send a tx doorbell, counting the next BD
  10352. * if the packet contains or ends with it
  10353. */
  10354. if (TX_BD_POFF(bd_prod) < nbd)
  10355. nbd++;
  10356. if (total_pkt_bd != NULL)
  10357. total_pkt_bd->total_pkt_bytes = pkt_size;
  10358. if (pbd)
  10359. DP(NETIF_MSG_TX_QUEUED,
  10360. "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
  10361. " tcp_flags %x xsum %x seq %u hlen %u\n",
  10362. pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
  10363. pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
  10364. pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
  10365. DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
  10366. /*
  10367. * Make sure that the BD data is updated before updating the producer
  10368. * since FW might read the BD right after the producer is updated.
  10369. * This is only applicable for weak-ordered memory model archs such
  10370. * as IA-64. The following barrier is also mandatory since FW will
  10371. * assumes packets must have BDs.
  10372. */
  10373. wmb();
  10374. fp->tx_db.data.prod += nbd;
  10375. barrier();
  10376. DOORBELL(bp, fp->index, fp->tx_db.raw);
  10377. mmiowb();
  10378. fp->tx_bd_prod += nbd;
  10379. if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
  10380. netif_tx_stop_queue(txq);
  10381. /* paired memory barrier is in bnx2x_tx_int(), we have to keep
  10382. * ordering of set_bit() in netif_tx_stop_queue() and read of
  10383. * fp->bd_tx_cons */
  10384. smp_mb();
  10385. fp->eth_q_stats.driver_xoff++;
  10386. if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
  10387. netif_tx_wake_queue(txq);
  10388. }
  10389. fp->tx_pkt++;
  10390. return NETDEV_TX_OK;
  10391. }
  10392. /* called with rtnl_lock */
  10393. static int bnx2x_open(struct net_device *dev)
  10394. {
  10395. struct bnx2x *bp = netdev_priv(dev);
  10396. netif_carrier_off(dev);
  10397. bnx2x_set_power_state(bp, PCI_D0);
  10398. if (!bnx2x_reset_is_done(bp)) {
  10399. do {
  10400. /* Reset MCP mail box sequence if there is on going
  10401. * recovery
  10402. */
  10403. bp->fw_seq = 0;
  10404. /* If it's the first function to load and reset done
  10405. * is still not cleared it may mean that. We don't
  10406. * check the attention state here because it may have
  10407. * already been cleared by a "common" reset but we
  10408. * shell proceed with "process kill" anyway.
  10409. */
  10410. if ((bnx2x_get_load_cnt(bp) == 0) &&
  10411. bnx2x_trylock_hw_lock(bp,
  10412. HW_LOCK_RESOURCE_RESERVED_08) &&
  10413. (!bnx2x_leader_reset(bp))) {
  10414. DP(NETIF_MSG_HW, "Recovered in open\n");
  10415. break;
  10416. }
  10417. bnx2x_set_power_state(bp, PCI_D3hot);
  10418. printk(KERN_ERR"%s: Recovery flow hasn't been properly"
  10419. " completed yet. Try again later. If u still see this"
  10420. " message after a few retries then power cycle is"
  10421. " required.\n", bp->dev->name);
  10422. return -EAGAIN;
  10423. } while (0);
  10424. }
  10425. bp->recovery_state = BNX2X_RECOVERY_DONE;
  10426. return bnx2x_nic_load(bp, LOAD_OPEN);
  10427. }
  10428. /* called with rtnl_lock */
  10429. static int bnx2x_close(struct net_device *dev)
  10430. {
  10431. struct bnx2x *bp = netdev_priv(dev);
  10432. /* Unload the driver, release IRQs */
  10433. bnx2x_nic_unload(bp, UNLOAD_CLOSE);
  10434. bnx2x_set_power_state(bp, PCI_D3hot);
  10435. return 0;
  10436. }
  10437. /* called with netif_tx_lock from dev_mcast.c */
  10438. static void bnx2x_set_rx_mode(struct net_device *dev)
  10439. {
  10440. struct bnx2x *bp = netdev_priv(dev);
  10441. u32 rx_mode = BNX2X_RX_MODE_NORMAL;
  10442. int port = BP_PORT(bp);
  10443. if (bp->state != BNX2X_STATE_OPEN) {
  10444. DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
  10445. return;
  10446. }
  10447. DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
  10448. if (dev->flags & IFF_PROMISC)
  10449. rx_mode = BNX2X_RX_MODE_PROMISC;
  10450. else if ((dev->flags & IFF_ALLMULTI) ||
  10451. ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
  10452. CHIP_IS_E1(bp)))
  10453. rx_mode = BNX2X_RX_MODE_ALLMULTI;
  10454. else { /* some multicasts */
  10455. if (CHIP_IS_E1(bp)) {
  10456. int i, old, offset;
  10457. struct netdev_hw_addr *ha;
  10458. struct mac_configuration_cmd *config =
  10459. bnx2x_sp(bp, mcast_config);
  10460. i = 0;
  10461. netdev_for_each_mc_addr(ha, dev) {
  10462. config->config_table[i].
  10463. cam_entry.msb_mac_addr =
  10464. swab16(*(u16 *)&ha->addr[0]);
  10465. config->config_table[i].
  10466. cam_entry.middle_mac_addr =
  10467. swab16(*(u16 *)&ha->addr[2]);
  10468. config->config_table[i].
  10469. cam_entry.lsb_mac_addr =
  10470. swab16(*(u16 *)&ha->addr[4]);
  10471. config->config_table[i].cam_entry.flags =
  10472. cpu_to_le16(port);
  10473. config->config_table[i].
  10474. target_table_entry.flags = 0;
  10475. config->config_table[i].target_table_entry.
  10476. clients_bit_vector =
  10477. cpu_to_le32(1 << BP_L_ID(bp));
  10478. config->config_table[i].
  10479. target_table_entry.vlan_id = 0;
  10480. DP(NETIF_MSG_IFUP,
  10481. "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
  10482. config->config_table[i].
  10483. cam_entry.msb_mac_addr,
  10484. config->config_table[i].
  10485. cam_entry.middle_mac_addr,
  10486. config->config_table[i].
  10487. cam_entry.lsb_mac_addr);
  10488. i++;
  10489. }
  10490. old = config->hdr.length;
  10491. if (old > i) {
  10492. for (; i < old; i++) {
  10493. if (CAM_IS_INVALID(config->
  10494. config_table[i])) {
  10495. /* already invalidated */
  10496. break;
  10497. }
  10498. /* invalidate */
  10499. CAM_INVALIDATE(config->
  10500. config_table[i]);
  10501. }
  10502. }
  10503. if (CHIP_REV_IS_SLOW(bp))
  10504. offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
  10505. else
  10506. offset = BNX2X_MAX_MULTICAST*(1 + port);
  10507. config->hdr.length = i;
  10508. config->hdr.offset = offset;
  10509. config->hdr.client_id = bp->fp->cl_id;
  10510. config->hdr.reserved1 = 0;
  10511. bp->set_mac_pending++;
  10512. smp_wmb();
  10513. bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
  10514. U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
  10515. U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
  10516. 0);
  10517. } else { /* E1H */
  10518. /* Accept one or more multicasts */
  10519. struct netdev_hw_addr *ha;
  10520. u32 mc_filter[MC_HASH_SIZE];
  10521. u32 crc, bit, regidx;
  10522. int i;
  10523. memset(mc_filter, 0, 4 * MC_HASH_SIZE);
  10524. netdev_for_each_mc_addr(ha, dev) {
  10525. DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
  10526. ha->addr);
  10527. crc = crc32c_le(0, ha->addr, ETH_ALEN);
  10528. bit = (crc >> 24) & 0xff;
  10529. regidx = bit >> 5;
  10530. bit &= 0x1f;
  10531. mc_filter[regidx] |= (1 << bit);
  10532. }
  10533. for (i = 0; i < MC_HASH_SIZE; i++)
  10534. REG_WR(bp, MC_HASH_OFFSET(bp, i),
  10535. mc_filter[i]);
  10536. }
  10537. }
  10538. bp->rx_mode = rx_mode;
  10539. bnx2x_set_storm_rx_mode(bp);
  10540. }
  10541. /* called with rtnl_lock */
  10542. static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
  10543. {
  10544. struct sockaddr *addr = p;
  10545. struct bnx2x *bp = netdev_priv(dev);
  10546. if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
  10547. return -EINVAL;
  10548. memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
  10549. if (netif_running(dev)) {
  10550. if (CHIP_IS_E1(bp))
  10551. bnx2x_set_eth_mac_addr_e1(bp, 1);
  10552. else
  10553. bnx2x_set_eth_mac_addr_e1h(bp, 1);
  10554. }
  10555. return 0;
  10556. }
  10557. /* called with rtnl_lock */
  10558. static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
  10559. int devad, u16 addr)
  10560. {
  10561. struct bnx2x *bp = netdev_priv(netdev);
  10562. u16 value;
  10563. int rc;
  10564. u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
  10565. DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
  10566. prtad, devad, addr);
  10567. if (prtad != bp->mdio.prtad) {
  10568. DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
  10569. prtad, bp->mdio.prtad);
  10570. return -EINVAL;
  10571. }
  10572. /* The HW expects different devad if CL22 is used */
  10573. devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
  10574. bnx2x_acquire_phy_lock(bp);
  10575. rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
  10576. devad, addr, &value);
  10577. bnx2x_release_phy_lock(bp);
  10578. DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
  10579. if (!rc)
  10580. rc = value;
  10581. return rc;
  10582. }
  10583. /* called with rtnl_lock */
  10584. static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
  10585. u16 addr, u16 value)
  10586. {
  10587. struct bnx2x *bp = netdev_priv(netdev);
  10588. u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
  10589. int rc;
  10590. DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
  10591. " value 0x%x\n", prtad, devad, addr, value);
  10592. if (prtad != bp->mdio.prtad) {
  10593. DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
  10594. prtad, bp->mdio.prtad);
  10595. return -EINVAL;
  10596. }
  10597. /* The HW expects different devad if CL22 is used */
  10598. devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
  10599. bnx2x_acquire_phy_lock(bp);
  10600. rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
  10601. devad, addr, value);
  10602. bnx2x_release_phy_lock(bp);
  10603. return rc;
  10604. }
  10605. /* called with rtnl_lock */
  10606. static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  10607. {
  10608. struct bnx2x *bp = netdev_priv(dev);
  10609. struct mii_ioctl_data *mdio = if_mii(ifr);
  10610. DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
  10611. mdio->phy_id, mdio->reg_num, mdio->val_in);
  10612. if (!netif_running(dev))
  10613. return -EAGAIN;
  10614. return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
  10615. }
  10616. /* called with rtnl_lock */
  10617. static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
  10618. {
  10619. struct bnx2x *bp = netdev_priv(dev);
  10620. int rc = 0;
  10621. if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
  10622. printk(KERN_ERR "Handling parity error recovery. Try again later\n");
  10623. return -EAGAIN;
  10624. }
  10625. if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
  10626. ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
  10627. return -EINVAL;
  10628. /* This does not race with packet allocation
  10629. * because the actual alloc size is
  10630. * only updated as part of load
  10631. */
  10632. dev->mtu = new_mtu;
  10633. if (netif_running(dev)) {
  10634. bnx2x_nic_unload(bp, UNLOAD_NORMAL);
  10635. rc = bnx2x_nic_load(bp, LOAD_NORMAL);
  10636. }
  10637. return rc;
  10638. }
  10639. static void bnx2x_tx_timeout(struct net_device *dev)
  10640. {
  10641. struct bnx2x *bp = netdev_priv(dev);
  10642. #ifdef BNX2X_STOP_ON_ERROR
  10643. if (!bp->panic)
  10644. bnx2x_panic();
  10645. #endif
  10646. /* This allows the netif to be shutdown gracefully before resetting */
  10647. schedule_delayed_work(&bp->reset_task, 0);
  10648. }
  10649. #ifdef BCM_VLAN
  10650. /* called with rtnl_lock */
  10651. static void bnx2x_vlan_rx_register(struct net_device *dev,
  10652. struct vlan_group *vlgrp)
  10653. {
  10654. struct bnx2x *bp = netdev_priv(dev);
  10655. bp->vlgrp = vlgrp;
  10656. /* Set flags according to the required capabilities */
  10657. bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
  10658. if (dev->features & NETIF_F_HW_VLAN_TX)
  10659. bp->flags |= HW_VLAN_TX_FLAG;
  10660. if (dev->features & NETIF_F_HW_VLAN_RX)
  10661. bp->flags |= HW_VLAN_RX_FLAG;
  10662. if (netif_running(dev))
  10663. bnx2x_set_client_config(bp);
  10664. }
  10665. #endif
  10666. #ifdef CONFIG_NET_POLL_CONTROLLER
  10667. static void poll_bnx2x(struct net_device *dev)
  10668. {
  10669. struct bnx2x *bp = netdev_priv(dev);
  10670. disable_irq(bp->pdev->irq);
  10671. bnx2x_interrupt(bp->pdev->irq, dev);
  10672. enable_irq(bp->pdev->irq);
  10673. }
  10674. #endif
  10675. static const struct net_device_ops bnx2x_netdev_ops = {
  10676. .ndo_open = bnx2x_open,
  10677. .ndo_stop = bnx2x_close,
  10678. .ndo_start_xmit = bnx2x_start_xmit,
  10679. .ndo_set_multicast_list = bnx2x_set_rx_mode,
  10680. .ndo_set_mac_address = bnx2x_change_mac_addr,
  10681. .ndo_validate_addr = eth_validate_addr,
  10682. .ndo_do_ioctl = bnx2x_ioctl,
  10683. .ndo_change_mtu = bnx2x_change_mtu,
  10684. .ndo_tx_timeout = bnx2x_tx_timeout,
  10685. #ifdef BCM_VLAN
  10686. .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
  10687. #endif
  10688. #ifdef CONFIG_NET_POLL_CONTROLLER
  10689. .ndo_poll_controller = poll_bnx2x,
  10690. #endif
  10691. };
  10692. static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
  10693. struct net_device *dev)
  10694. {
  10695. struct bnx2x *bp;
  10696. int rc;
  10697. SET_NETDEV_DEV(dev, &pdev->dev);
  10698. bp = netdev_priv(dev);
  10699. bp->dev = dev;
  10700. bp->pdev = pdev;
  10701. bp->flags = 0;
  10702. bp->func = PCI_FUNC(pdev->devfn);
  10703. rc = pci_enable_device(pdev);
  10704. if (rc) {
  10705. dev_err(&bp->pdev->dev,
  10706. "Cannot enable PCI device, aborting\n");
  10707. goto err_out;
  10708. }
  10709. if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
  10710. dev_err(&bp->pdev->dev,
  10711. "Cannot find PCI device base address, aborting\n");
  10712. rc = -ENODEV;
  10713. goto err_out_disable;
  10714. }
  10715. if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
  10716. dev_err(&bp->pdev->dev, "Cannot find second PCI device"
  10717. " base address, aborting\n");
  10718. rc = -ENODEV;
  10719. goto err_out_disable;
  10720. }
  10721. if (atomic_read(&pdev->enable_cnt) == 1) {
  10722. rc = pci_request_regions(pdev, DRV_MODULE_NAME);
  10723. if (rc) {
  10724. dev_err(&bp->pdev->dev,
  10725. "Cannot obtain PCI resources, aborting\n");
  10726. goto err_out_disable;
  10727. }
  10728. pci_set_master(pdev);
  10729. pci_save_state(pdev);
  10730. }
  10731. bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
  10732. if (bp->pm_cap == 0) {
  10733. dev_err(&bp->pdev->dev,
  10734. "Cannot find power management capability, aborting\n");
  10735. rc = -EIO;
  10736. goto err_out_release;
  10737. }
  10738. bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
  10739. if (bp->pcie_cap == 0) {
  10740. dev_err(&bp->pdev->dev,
  10741. "Cannot find PCI Express capability, aborting\n");
  10742. rc = -EIO;
  10743. goto err_out_release;
  10744. }
  10745. if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
  10746. bp->flags |= USING_DAC_FLAG;
  10747. if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
  10748. dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
  10749. " failed, aborting\n");
  10750. rc = -EIO;
  10751. goto err_out_release;
  10752. }
  10753. } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
  10754. dev_err(&bp->pdev->dev,
  10755. "System does not support DMA, aborting\n");
  10756. rc = -EIO;
  10757. goto err_out_release;
  10758. }
  10759. dev->mem_start = pci_resource_start(pdev, 0);
  10760. dev->base_addr = dev->mem_start;
  10761. dev->mem_end = pci_resource_end(pdev, 0);
  10762. dev->irq = pdev->irq;
  10763. bp->regview = pci_ioremap_bar(pdev, 0);
  10764. if (!bp->regview) {
  10765. dev_err(&bp->pdev->dev,
  10766. "Cannot map register space, aborting\n");
  10767. rc = -ENOMEM;
  10768. goto err_out_release;
  10769. }
  10770. bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
  10771. min_t(u64, BNX2X_DB_SIZE,
  10772. pci_resource_len(pdev, 2)));
  10773. if (!bp->doorbells) {
  10774. dev_err(&bp->pdev->dev,
  10775. "Cannot map doorbell space, aborting\n");
  10776. rc = -ENOMEM;
  10777. goto err_out_unmap;
  10778. }
  10779. bnx2x_set_power_state(bp, PCI_D0);
  10780. /* clean indirect addresses */
  10781. pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
  10782. PCICFG_VENDOR_ID_OFFSET);
  10783. REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
  10784. REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
  10785. REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
  10786. REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
  10787. /* Reset the load counter */
  10788. bnx2x_clear_load_cnt(bp);
  10789. dev->watchdog_timeo = TX_TIMEOUT;
  10790. dev->netdev_ops = &bnx2x_netdev_ops;
  10791. dev->ethtool_ops = &bnx2x_ethtool_ops;
  10792. dev->features |= NETIF_F_SG;
  10793. dev->features |= NETIF_F_HW_CSUM;
  10794. if (bp->flags & USING_DAC_FLAG)
  10795. dev->features |= NETIF_F_HIGHDMA;
  10796. dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
  10797. dev->features |= NETIF_F_TSO6;
  10798. #ifdef BCM_VLAN
  10799. dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
  10800. bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
  10801. dev->vlan_features |= NETIF_F_SG;
  10802. dev->vlan_features |= NETIF_F_HW_CSUM;
  10803. if (bp->flags & USING_DAC_FLAG)
  10804. dev->vlan_features |= NETIF_F_HIGHDMA;
  10805. dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
  10806. dev->vlan_features |= NETIF_F_TSO6;
  10807. #endif
  10808. /* get_port_hwinfo() will set prtad and mmds properly */
  10809. bp->mdio.prtad = MDIO_PRTAD_NONE;
  10810. bp->mdio.mmds = 0;
  10811. bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
  10812. bp->mdio.dev = dev;
  10813. bp->mdio.mdio_read = bnx2x_mdio_read;
  10814. bp->mdio.mdio_write = bnx2x_mdio_write;
  10815. return 0;
  10816. err_out_unmap:
  10817. if (bp->regview) {
  10818. iounmap(bp->regview);
  10819. bp->regview = NULL;
  10820. }
  10821. if (bp->doorbells) {
  10822. iounmap(bp->doorbells);
  10823. bp->doorbells = NULL;
  10824. }
  10825. err_out_release:
  10826. if (atomic_read(&pdev->enable_cnt) == 1)
  10827. pci_release_regions(pdev);
  10828. err_out_disable:
  10829. pci_disable_device(pdev);
  10830. pci_set_drvdata(pdev, NULL);
  10831. err_out:
  10832. return rc;
  10833. }
  10834. static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
  10835. int *width, int *speed)
  10836. {
  10837. u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
  10838. *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
  10839. /* return value of 1=2.5GHz 2=5GHz */
  10840. *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
  10841. }
  10842. static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
  10843. {
  10844. const struct firmware *firmware = bp->firmware;
  10845. struct bnx2x_fw_file_hdr *fw_hdr;
  10846. struct bnx2x_fw_file_section *sections;
  10847. u32 offset, len, num_ops;
  10848. u16 *ops_offsets;
  10849. int i;
  10850. const u8 *fw_ver;
  10851. if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
  10852. return -EINVAL;
  10853. fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
  10854. sections = (struct bnx2x_fw_file_section *)fw_hdr;
  10855. /* Make sure none of the offsets and sizes make us read beyond
  10856. * the end of the firmware data */
  10857. for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
  10858. offset = be32_to_cpu(sections[i].offset);
  10859. len = be32_to_cpu(sections[i].len);
  10860. if (offset + len > firmware->size) {
  10861. dev_err(&bp->pdev->dev,
  10862. "Section %d length is out of bounds\n", i);
  10863. return -EINVAL;
  10864. }
  10865. }
  10866. /* Likewise for the init_ops offsets */
  10867. offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
  10868. ops_offsets = (u16 *)(firmware->data + offset);
  10869. num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
  10870. for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
  10871. if (be16_to_cpu(ops_offsets[i]) > num_ops) {
  10872. dev_err(&bp->pdev->dev,
  10873. "Section offset %d is out of bounds\n", i);
  10874. return -EINVAL;
  10875. }
  10876. }
  10877. /* Check FW version */
  10878. offset = be32_to_cpu(fw_hdr->fw_version.offset);
  10879. fw_ver = firmware->data + offset;
  10880. if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
  10881. (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
  10882. (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
  10883. (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
  10884. dev_err(&bp->pdev->dev,
  10885. "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
  10886. fw_ver[0], fw_ver[1], fw_ver[2],
  10887. fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
  10888. BCM_5710_FW_MINOR_VERSION,
  10889. BCM_5710_FW_REVISION_VERSION,
  10890. BCM_5710_FW_ENGINEERING_VERSION);
  10891. return -EINVAL;
  10892. }
  10893. return 0;
  10894. }
  10895. static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
  10896. {
  10897. const __be32 *source = (const __be32 *)_source;
  10898. u32 *target = (u32 *)_target;
  10899. u32 i;
  10900. for (i = 0; i < n/4; i++)
  10901. target[i] = be32_to_cpu(source[i]);
  10902. }
  10903. /*
  10904. Ops array is stored in the following format:
  10905. {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
  10906. */
  10907. static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
  10908. {
  10909. const __be32 *source = (const __be32 *)_source;
  10910. struct raw_op *target = (struct raw_op *)_target;
  10911. u32 i, j, tmp;
  10912. for (i = 0, j = 0; i < n/8; i++, j += 2) {
  10913. tmp = be32_to_cpu(source[j]);
  10914. target[i].op = (tmp >> 24) & 0xff;
  10915. target[i].offset = tmp & 0xffffff;
  10916. target[i].raw_data = be32_to_cpu(source[j + 1]);
  10917. }
  10918. }
  10919. static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
  10920. {
  10921. const __be16 *source = (const __be16 *)_source;
  10922. u16 *target = (u16 *)_target;
  10923. u32 i;
  10924. for (i = 0; i < n/2; i++)
  10925. target[i] = be16_to_cpu(source[i]);
  10926. }
  10927. #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
  10928. do { \
  10929. u32 len = be32_to_cpu(fw_hdr->arr.len); \
  10930. bp->arr = kmalloc(len, GFP_KERNEL); \
  10931. if (!bp->arr) { \
  10932. pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
  10933. goto lbl; \
  10934. } \
  10935. func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
  10936. (u8 *)bp->arr, len); \
  10937. } while (0)
  10938. static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
  10939. {
  10940. const char *fw_file_name;
  10941. struct bnx2x_fw_file_hdr *fw_hdr;
  10942. int rc;
  10943. if (CHIP_IS_E1(bp))
  10944. fw_file_name = FW_FILE_NAME_E1;
  10945. else if (CHIP_IS_E1H(bp))
  10946. fw_file_name = FW_FILE_NAME_E1H;
  10947. else {
  10948. dev_err(dev, "Unsupported chip revision\n");
  10949. return -EINVAL;
  10950. }
  10951. dev_info(dev, "Loading %s\n", fw_file_name);
  10952. rc = request_firmware(&bp->firmware, fw_file_name, dev);
  10953. if (rc) {
  10954. dev_err(dev, "Can't load firmware file %s\n", fw_file_name);
  10955. goto request_firmware_exit;
  10956. }
  10957. rc = bnx2x_check_firmware(bp);
  10958. if (rc) {
  10959. dev_err(dev, "Corrupt firmware file %s\n", fw_file_name);
  10960. goto request_firmware_exit;
  10961. }
  10962. fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
  10963. /* Initialize the pointers to the init arrays */
  10964. /* Blob */
  10965. BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
  10966. /* Opcodes */
  10967. BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
  10968. /* Offsets */
  10969. BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
  10970. be16_to_cpu_n);
  10971. /* STORMs firmware */
  10972. INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
  10973. be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
  10974. INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
  10975. be32_to_cpu(fw_hdr->tsem_pram_data.offset);
  10976. INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
  10977. be32_to_cpu(fw_hdr->usem_int_table_data.offset);
  10978. INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
  10979. be32_to_cpu(fw_hdr->usem_pram_data.offset);
  10980. INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
  10981. be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
  10982. INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
  10983. be32_to_cpu(fw_hdr->xsem_pram_data.offset);
  10984. INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
  10985. be32_to_cpu(fw_hdr->csem_int_table_data.offset);
  10986. INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
  10987. be32_to_cpu(fw_hdr->csem_pram_data.offset);
  10988. return 0;
  10989. init_offsets_alloc_err:
  10990. kfree(bp->init_ops);
  10991. init_ops_alloc_err:
  10992. kfree(bp->init_data);
  10993. request_firmware_exit:
  10994. release_firmware(bp->firmware);
  10995. return rc;
  10996. }
  10997. static int __devinit bnx2x_init_one(struct pci_dev *pdev,
  10998. const struct pci_device_id *ent)
  10999. {
  11000. struct net_device *dev = NULL;
  11001. struct bnx2x *bp;
  11002. int pcie_width, pcie_speed;
  11003. int rc;
  11004. /* dev zeroed in init_etherdev */
  11005. dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
  11006. if (!dev) {
  11007. dev_err(&pdev->dev, "Cannot allocate net device\n");
  11008. return -ENOMEM;
  11009. }
  11010. bp = netdev_priv(dev);
  11011. bp->msg_enable = debug;
  11012. pci_set_drvdata(pdev, dev);
  11013. rc = bnx2x_init_dev(pdev, dev);
  11014. if (rc < 0) {
  11015. free_netdev(dev);
  11016. return rc;
  11017. }
  11018. rc = bnx2x_init_bp(bp);
  11019. if (rc)
  11020. goto init_one_exit;
  11021. /* Set init arrays */
  11022. rc = bnx2x_init_firmware(bp, &pdev->dev);
  11023. if (rc) {
  11024. dev_err(&pdev->dev, "Error loading firmware\n");
  11025. goto init_one_exit;
  11026. }
  11027. rc = register_netdev(dev);
  11028. if (rc) {
  11029. dev_err(&pdev->dev, "Cannot register net device\n");
  11030. goto init_one_exit;
  11031. }
  11032. bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
  11033. netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
  11034. " IRQ %d, ", board_info[ent->driver_data].name,
  11035. (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
  11036. pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
  11037. dev->base_addr, bp->pdev->irq);
  11038. pr_cont("node addr %pM\n", dev->dev_addr);
  11039. return 0;
  11040. init_one_exit:
  11041. if (bp->regview)
  11042. iounmap(bp->regview);
  11043. if (bp->doorbells)
  11044. iounmap(bp->doorbells);
  11045. free_netdev(dev);
  11046. if (atomic_read(&pdev->enable_cnt) == 1)
  11047. pci_release_regions(pdev);
  11048. pci_disable_device(pdev);
  11049. pci_set_drvdata(pdev, NULL);
  11050. return rc;
  11051. }
  11052. static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
  11053. {
  11054. struct net_device *dev = pci_get_drvdata(pdev);
  11055. struct bnx2x *bp;
  11056. if (!dev) {
  11057. dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
  11058. return;
  11059. }
  11060. bp = netdev_priv(dev);
  11061. unregister_netdev(dev);
  11062. /* Make sure RESET task is not scheduled before continuing */
  11063. cancel_delayed_work_sync(&bp->reset_task);
  11064. kfree(bp->init_ops_offsets);
  11065. kfree(bp->init_ops);
  11066. kfree(bp->init_data);
  11067. release_firmware(bp->firmware);
  11068. if (bp->regview)
  11069. iounmap(bp->regview);
  11070. if (bp->doorbells)
  11071. iounmap(bp->doorbells);
  11072. free_netdev(dev);
  11073. if (atomic_read(&pdev->enable_cnt) == 1)
  11074. pci_release_regions(pdev);
  11075. pci_disable_device(pdev);
  11076. pci_set_drvdata(pdev, NULL);
  11077. }
  11078. static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
  11079. {
  11080. struct net_device *dev = pci_get_drvdata(pdev);
  11081. struct bnx2x *bp;
  11082. if (!dev) {
  11083. dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
  11084. return -ENODEV;
  11085. }
  11086. bp = netdev_priv(dev);
  11087. rtnl_lock();
  11088. pci_save_state(pdev);
  11089. if (!netif_running(dev)) {
  11090. rtnl_unlock();
  11091. return 0;
  11092. }
  11093. netif_device_detach(dev);
  11094. bnx2x_nic_unload(bp, UNLOAD_CLOSE);
  11095. bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
  11096. rtnl_unlock();
  11097. return 0;
  11098. }
  11099. static int bnx2x_resume(struct pci_dev *pdev)
  11100. {
  11101. struct net_device *dev = pci_get_drvdata(pdev);
  11102. struct bnx2x *bp;
  11103. int rc;
  11104. if (!dev) {
  11105. dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
  11106. return -ENODEV;
  11107. }
  11108. bp = netdev_priv(dev);
  11109. if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
  11110. printk(KERN_ERR "Handling parity error recovery. Try again later\n");
  11111. return -EAGAIN;
  11112. }
  11113. rtnl_lock();
  11114. pci_restore_state(pdev);
  11115. if (!netif_running(dev)) {
  11116. rtnl_unlock();
  11117. return 0;
  11118. }
  11119. bnx2x_set_power_state(bp, PCI_D0);
  11120. netif_device_attach(dev);
  11121. rc = bnx2x_nic_load(bp, LOAD_OPEN);
  11122. rtnl_unlock();
  11123. return rc;
  11124. }
  11125. static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
  11126. {
  11127. int i;
  11128. bp->state = BNX2X_STATE_ERROR;
  11129. bp->rx_mode = BNX2X_RX_MODE_NONE;
  11130. bnx2x_netif_stop(bp, 0);
  11131. netif_carrier_off(bp->dev);
  11132. del_timer_sync(&bp->timer);
  11133. bp->stats_state = STATS_STATE_DISABLED;
  11134. DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
  11135. /* Release IRQs */
  11136. bnx2x_free_irq(bp, false);
  11137. if (CHIP_IS_E1(bp)) {
  11138. struct mac_configuration_cmd *config =
  11139. bnx2x_sp(bp, mcast_config);
  11140. for (i = 0; i < config->hdr.length; i++)
  11141. CAM_INVALIDATE(config->config_table[i]);
  11142. }
  11143. /* Free SKBs, SGEs, TPA pool and driver internals */
  11144. bnx2x_free_skbs(bp);
  11145. for_each_queue(bp, i)
  11146. bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
  11147. for_each_queue(bp, i)
  11148. netif_napi_del(&bnx2x_fp(bp, i, napi));
  11149. bnx2x_free_mem(bp);
  11150. bp->state = BNX2X_STATE_CLOSED;
  11151. return 0;
  11152. }
  11153. static void bnx2x_eeh_recover(struct bnx2x *bp)
  11154. {
  11155. u32 val;
  11156. mutex_init(&bp->port.phy_mutex);
  11157. bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
  11158. bp->link_params.shmem_base = bp->common.shmem_base;
  11159. BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
  11160. if (!bp->common.shmem_base ||
  11161. (bp->common.shmem_base < 0xA0000) ||
  11162. (bp->common.shmem_base >= 0xC0000)) {
  11163. BNX2X_DEV_INFO("MCP not active\n");
  11164. bp->flags |= NO_MCP_FLAG;
  11165. return;
  11166. }
  11167. val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
  11168. if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
  11169. != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
  11170. BNX2X_ERR("BAD MCP validity signature\n");
  11171. if (!BP_NOMCP(bp)) {
  11172. bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
  11173. & DRV_MSG_SEQ_NUMBER_MASK);
  11174. BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
  11175. }
  11176. }
  11177. /**
  11178. * bnx2x_io_error_detected - called when PCI error is detected
  11179. * @pdev: Pointer to PCI device
  11180. * @state: The current pci connection state
  11181. *
  11182. * This function is called after a PCI bus error affecting
  11183. * this device has been detected.
  11184. */
  11185. static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
  11186. pci_channel_state_t state)
  11187. {
  11188. struct net_device *dev = pci_get_drvdata(pdev);
  11189. struct bnx2x *bp = netdev_priv(dev);
  11190. rtnl_lock();
  11191. netif_device_detach(dev);
  11192. if (state == pci_channel_io_perm_failure) {
  11193. rtnl_unlock();
  11194. return PCI_ERS_RESULT_DISCONNECT;
  11195. }
  11196. if (netif_running(dev))
  11197. bnx2x_eeh_nic_unload(bp);
  11198. pci_disable_device(pdev);
  11199. rtnl_unlock();
  11200. /* Request a slot reset */
  11201. return PCI_ERS_RESULT_NEED_RESET;
  11202. }
  11203. /**
  11204. * bnx2x_io_slot_reset - called after the PCI bus has been reset
  11205. * @pdev: Pointer to PCI device
  11206. *
  11207. * Restart the card from scratch, as if from a cold-boot.
  11208. */
  11209. static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
  11210. {
  11211. struct net_device *dev = pci_get_drvdata(pdev);
  11212. struct bnx2x *bp = netdev_priv(dev);
  11213. rtnl_lock();
  11214. if (pci_enable_device(pdev)) {
  11215. dev_err(&pdev->dev,
  11216. "Cannot re-enable PCI device after reset\n");
  11217. rtnl_unlock();
  11218. return PCI_ERS_RESULT_DISCONNECT;
  11219. }
  11220. pci_set_master(pdev);
  11221. pci_restore_state(pdev);
  11222. if (netif_running(dev))
  11223. bnx2x_set_power_state(bp, PCI_D0);
  11224. rtnl_unlock();
  11225. return PCI_ERS_RESULT_RECOVERED;
  11226. }
  11227. /**
  11228. * bnx2x_io_resume - called when traffic can start flowing again
  11229. * @pdev: Pointer to PCI device
  11230. *
  11231. * This callback is called when the error recovery driver tells us that
  11232. * its OK to resume normal operation.
  11233. */
  11234. static void bnx2x_io_resume(struct pci_dev *pdev)
  11235. {
  11236. struct net_device *dev = pci_get_drvdata(pdev);
  11237. struct bnx2x *bp = netdev_priv(dev);
  11238. if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
  11239. printk(KERN_ERR "Handling parity error recovery. Try again later\n");
  11240. return;
  11241. }
  11242. rtnl_lock();
  11243. bnx2x_eeh_recover(bp);
  11244. if (netif_running(dev))
  11245. bnx2x_nic_load(bp, LOAD_NORMAL);
  11246. netif_device_attach(dev);
  11247. rtnl_unlock();
  11248. }
  11249. static struct pci_error_handlers bnx2x_err_handler = {
  11250. .error_detected = bnx2x_io_error_detected,
  11251. .slot_reset = bnx2x_io_slot_reset,
  11252. .resume = bnx2x_io_resume,
  11253. };
  11254. static struct pci_driver bnx2x_pci_driver = {
  11255. .name = DRV_MODULE_NAME,
  11256. .id_table = bnx2x_pci_tbl,
  11257. .probe = bnx2x_init_one,
  11258. .remove = __devexit_p(bnx2x_remove_one),
  11259. .suspend = bnx2x_suspend,
  11260. .resume = bnx2x_resume,
  11261. .err_handler = &bnx2x_err_handler,
  11262. };
  11263. static int __init bnx2x_init(void)
  11264. {
  11265. int ret;
  11266. pr_info("%s", version);
  11267. bnx2x_wq = create_singlethread_workqueue("bnx2x");
  11268. if (bnx2x_wq == NULL) {
  11269. pr_err("Cannot create workqueue\n");
  11270. return -ENOMEM;
  11271. }
  11272. ret = pci_register_driver(&bnx2x_pci_driver);
  11273. if (ret) {
  11274. pr_err("Cannot register driver\n");
  11275. destroy_workqueue(bnx2x_wq);
  11276. }
  11277. return ret;
  11278. }
  11279. static void __exit bnx2x_cleanup(void)
  11280. {
  11281. pci_unregister_driver(&bnx2x_pci_driver);
  11282. destroy_workqueue(bnx2x_wq);
  11283. }
  11284. module_init(bnx2x_init);
  11285. module_exit(bnx2x_cleanup);
  11286. #ifdef BCM_CNIC
  11287. /* count denotes the number of new completions we have seen */
  11288. static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
  11289. {
  11290. struct eth_spe *spe;
  11291. #ifdef BNX2X_STOP_ON_ERROR
  11292. if (unlikely(bp->panic))
  11293. return;
  11294. #endif
  11295. spin_lock_bh(&bp->spq_lock);
  11296. bp->cnic_spq_pending -= count;
  11297. for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
  11298. bp->cnic_spq_pending++) {
  11299. if (!bp->cnic_kwq_pending)
  11300. break;
  11301. spe = bnx2x_sp_get_next(bp);
  11302. *spe = *bp->cnic_kwq_cons;
  11303. bp->cnic_kwq_pending--;
  11304. DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
  11305. bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
  11306. if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
  11307. bp->cnic_kwq_cons = bp->cnic_kwq;
  11308. else
  11309. bp->cnic_kwq_cons++;
  11310. }
  11311. bnx2x_sp_prod_update(bp);
  11312. spin_unlock_bh(&bp->spq_lock);
  11313. }
  11314. static int bnx2x_cnic_sp_queue(struct net_device *dev,
  11315. struct kwqe_16 *kwqes[], u32 count)
  11316. {
  11317. struct bnx2x *bp = netdev_priv(dev);
  11318. int i;
  11319. #ifdef BNX2X_STOP_ON_ERROR
  11320. if (unlikely(bp->panic))
  11321. return -EIO;
  11322. #endif
  11323. spin_lock_bh(&bp->spq_lock);
  11324. for (i = 0; i < count; i++) {
  11325. struct eth_spe *spe = (struct eth_spe *)kwqes[i];
  11326. if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
  11327. break;
  11328. *bp->cnic_kwq_prod = *spe;
  11329. bp->cnic_kwq_pending++;
  11330. DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
  11331. spe->hdr.conn_and_cmd_data, spe->hdr.type,
  11332. spe->data.mac_config_addr.hi,
  11333. spe->data.mac_config_addr.lo,
  11334. bp->cnic_kwq_pending);
  11335. if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
  11336. bp->cnic_kwq_prod = bp->cnic_kwq;
  11337. else
  11338. bp->cnic_kwq_prod++;
  11339. }
  11340. spin_unlock_bh(&bp->spq_lock);
  11341. if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
  11342. bnx2x_cnic_sp_post(bp, 0);
  11343. return i;
  11344. }
  11345. static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
  11346. {
  11347. struct cnic_ops *c_ops;
  11348. int rc = 0;
  11349. mutex_lock(&bp->cnic_mutex);
  11350. c_ops = bp->cnic_ops;
  11351. if (c_ops)
  11352. rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
  11353. mutex_unlock(&bp->cnic_mutex);
  11354. return rc;
  11355. }
  11356. static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
  11357. {
  11358. struct cnic_ops *c_ops;
  11359. int rc = 0;
  11360. rcu_read_lock();
  11361. c_ops = rcu_dereference(bp->cnic_ops);
  11362. if (c_ops)
  11363. rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
  11364. rcu_read_unlock();
  11365. return rc;
  11366. }
  11367. /*
  11368. * for commands that have no data
  11369. */
  11370. static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
  11371. {
  11372. struct cnic_ctl_info ctl = {0};
  11373. ctl.cmd = cmd;
  11374. return bnx2x_cnic_ctl_send(bp, &ctl);
  11375. }
  11376. static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
  11377. {
  11378. struct cnic_ctl_info ctl;
  11379. /* first we tell CNIC and only then we count this as a completion */
  11380. ctl.cmd = CNIC_CTL_COMPLETION_CMD;
  11381. ctl.data.comp.cid = cid;
  11382. bnx2x_cnic_ctl_send_bh(bp, &ctl);
  11383. bnx2x_cnic_sp_post(bp, 1);
  11384. }
  11385. static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
  11386. {
  11387. struct bnx2x *bp = netdev_priv(dev);
  11388. int rc = 0;
  11389. switch (ctl->cmd) {
  11390. case DRV_CTL_CTXTBL_WR_CMD: {
  11391. u32 index = ctl->data.io.offset;
  11392. dma_addr_t addr = ctl->data.io.dma_addr;
  11393. bnx2x_ilt_wr(bp, index, addr);
  11394. break;
  11395. }
  11396. case DRV_CTL_COMPLETION_CMD: {
  11397. int count = ctl->data.comp.comp_count;
  11398. bnx2x_cnic_sp_post(bp, count);
  11399. break;
  11400. }
  11401. /* rtnl_lock is held. */
  11402. case DRV_CTL_START_L2_CMD: {
  11403. u32 cli = ctl->data.ring.client_id;
  11404. bp->rx_mode_cl_mask |= (1 << cli);
  11405. bnx2x_set_storm_rx_mode(bp);
  11406. break;
  11407. }
  11408. /* rtnl_lock is held. */
  11409. case DRV_CTL_STOP_L2_CMD: {
  11410. u32 cli = ctl->data.ring.client_id;
  11411. bp->rx_mode_cl_mask &= ~(1 << cli);
  11412. bnx2x_set_storm_rx_mode(bp);
  11413. break;
  11414. }
  11415. default:
  11416. BNX2X_ERR("unknown command %x\n", ctl->cmd);
  11417. rc = -EINVAL;
  11418. }
  11419. return rc;
  11420. }
  11421. static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
  11422. {
  11423. struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
  11424. if (bp->flags & USING_MSIX_FLAG) {
  11425. cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
  11426. cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
  11427. cp->irq_arr[0].vector = bp->msix_table[1].vector;
  11428. } else {
  11429. cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
  11430. cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
  11431. }
  11432. cp->irq_arr[0].status_blk = bp->cnic_sb;
  11433. cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
  11434. cp->irq_arr[1].status_blk = bp->def_status_blk;
  11435. cp->irq_arr[1].status_blk_num = DEF_SB_ID;
  11436. cp->num_irq = 2;
  11437. }
  11438. static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
  11439. void *data)
  11440. {
  11441. struct bnx2x *bp = netdev_priv(dev);
  11442. struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
  11443. if (ops == NULL)
  11444. return -EINVAL;
  11445. if (atomic_read(&bp->intr_sem) != 0)
  11446. return -EBUSY;
  11447. bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
  11448. if (!bp->cnic_kwq)
  11449. return -ENOMEM;
  11450. bp->cnic_kwq_cons = bp->cnic_kwq;
  11451. bp->cnic_kwq_prod = bp->cnic_kwq;
  11452. bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
  11453. bp->cnic_spq_pending = 0;
  11454. bp->cnic_kwq_pending = 0;
  11455. bp->cnic_data = data;
  11456. cp->num_irq = 0;
  11457. cp->drv_state = CNIC_DRV_STATE_REGD;
  11458. bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
  11459. bnx2x_setup_cnic_irq_info(bp);
  11460. bnx2x_set_iscsi_eth_mac_addr(bp, 1);
  11461. bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
  11462. rcu_assign_pointer(bp->cnic_ops, ops);
  11463. return 0;
  11464. }
  11465. static int bnx2x_unregister_cnic(struct net_device *dev)
  11466. {
  11467. struct bnx2x *bp = netdev_priv(dev);
  11468. struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
  11469. mutex_lock(&bp->cnic_mutex);
  11470. if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
  11471. bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
  11472. bnx2x_set_iscsi_eth_mac_addr(bp, 0);
  11473. }
  11474. cp->drv_state = 0;
  11475. rcu_assign_pointer(bp->cnic_ops, NULL);
  11476. mutex_unlock(&bp->cnic_mutex);
  11477. synchronize_rcu();
  11478. kfree(bp->cnic_kwq);
  11479. bp->cnic_kwq = NULL;
  11480. return 0;
  11481. }
  11482. struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
  11483. {
  11484. struct bnx2x *bp = netdev_priv(dev);
  11485. struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
  11486. cp->drv_owner = THIS_MODULE;
  11487. cp->chip_id = CHIP_ID(bp);
  11488. cp->pdev = bp->pdev;
  11489. cp->io_base = bp->regview;
  11490. cp->io_base2 = bp->doorbells;
  11491. cp->max_kwqe_pending = 8;
  11492. cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
  11493. cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
  11494. cp->ctx_tbl_len = CNIC_ILT_LINES;
  11495. cp->starting_cid = BCM_CNIC_CID_START;
  11496. cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
  11497. cp->drv_ctl = bnx2x_drv_ctl;
  11498. cp->drv_register_cnic = bnx2x_register_cnic;
  11499. cp->drv_unregister_cnic = bnx2x_unregister_cnic;
  11500. return cp;
  11501. }
  11502. EXPORT_SYMBOL(bnx2x_cnic_probe);
  11503. #endif /* BCM_CNIC */