igb_main.c 158 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809
  1. /*******************************************************************************
  2. Intel(R) Gigabit Ethernet Linux driver
  3. Copyright(c) 2007-2009 Intel Corporation.
  4. This program is free software; you can redistribute it and/or modify it
  5. under the terms and conditions of the GNU General Public License,
  6. version 2, as published by the Free Software Foundation.
  7. This program is distributed in the hope it will be useful, but WITHOUT
  8. ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  9. FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  10. more details.
  11. You should have received a copy of the GNU General Public License along with
  12. this program; if not, write to the Free Software Foundation, Inc.,
  13. 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  14. The full GNU General Public License is included in this distribution in
  15. the file called "COPYING".
  16. Contact Information:
  17. e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  18. Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  19. *******************************************************************************/
  20. #include <linux/module.h>
  21. #include <linux/types.h>
  22. #include <linux/init.h>
  23. #include <linux/vmalloc.h>
  24. #include <linux/pagemap.h>
  25. #include <linux/netdevice.h>
  26. #include <linux/ipv6.h>
  27. #include <net/checksum.h>
  28. #include <net/ip6_checksum.h>
  29. #include <linux/net_tstamp.h>
  30. #include <linux/mii.h>
  31. #include <linux/ethtool.h>
  32. #include <linux/if_vlan.h>
  33. #include <linux/pci.h>
  34. #include <linux/pci-aspm.h>
  35. #include <linux/delay.h>
  36. #include <linux/interrupt.h>
  37. #include <linux/if_ether.h>
  38. #include <linux/aer.h>
  39. #ifdef CONFIG_IGB_DCA
  40. #include <linux/dca.h>
  41. #endif
  42. #include "igb.h"
  43. #define DRV_VERSION "1.3.16-k2"
  44. char igb_driver_name[] = "igb";
  45. char igb_driver_version[] = DRV_VERSION;
  46. static const char igb_driver_string[] =
  47. "Intel(R) Gigabit Ethernet Network Driver";
  48. static const char igb_copyright[] = "Copyright (c) 2007-2009 Intel Corporation.";
  49. static const struct e1000_info *igb_info_tbl[] = {
  50. [board_82575] = &e1000_82575_info,
  51. };
  52. static struct pci_device_id igb_pci_tbl[] = {
  53. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
  54. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
  55. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
  56. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
  57. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
  58. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
  59. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
  60. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
  61. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
  62. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
  63. /* required last entry */
  64. {0, }
  65. };
  66. MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
  67. void igb_reset(struct igb_adapter *);
  68. static int igb_setup_all_tx_resources(struct igb_adapter *);
  69. static int igb_setup_all_rx_resources(struct igb_adapter *);
  70. static void igb_free_all_tx_resources(struct igb_adapter *);
  71. static void igb_free_all_rx_resources(struct igb_adapter *);
  72. static void igb_setup_mrqc(struct igb_adapter *);
  73. void igb_update_stats(struct igb_adapter *);
  74. static int igb_probe(struct pci_dev *, const struct pci_device_id *);
  75. static void __devexit igb_remove(struct pci_dev *pdev);
  76. static int igb_sw_init(struct igb_adapter *);
  77. static int igb_open(struct net_device *);
  78. static int igb_close(struct net_device *);
  79. static void igb_configure_tx(struct igb_adapter *);
  80. static void igb_configure_rx(struct igb_adapter *);
  81. static void igb_clean_all_tx_rings(struct igb_adapter *);
  82. static void igb_clean_all_rx_rings(struct igb_adapter *);
  83. static void igb_clean_tx_ring(struct igb_ring *);
  84. static void igb_clean_rx_ring(struct igb_ring *);
  85. static void igb_set_rx_mode(struct net_device *);
  86. static void igb_update_phy_info(unsigned long);
  87. static void igb_watchdog(unsigned long);
  88. static void igb_watchdog_task(struct work_struct *);
  89. static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *);
  90. static struct net_device_stats *igb_get_stats(struct net_device *);
  91. static int igb_change_mtu(struct net_device *, int);
  92. static int igb_set_mac(struct net_device *, void *);
  93. static void igb_set_uta(struct igb_adapter *adapter);
  94. static irqreturn_t igb_intr(int irq, void *);
  95. static irqreturn_t igb_intr_msi(int irq, void *);
  96. static irqreturn_t igb_msix_other(int irq, void *);
  97. static irqreturn_t igb_msix_ring(int irq, void *);
  98. #ifdef CONFIG_IGB_DCA
  99. static void igb_update_dca(struct igb_q_vector *);
  100. static void igb_setup_dca(struct igb_adapter *);
  101. #endif /* CONFIG_IGB_DCA */
  102. static bool igb_clean_tx_irq(struct igb_q_vector *);
  103. static int igb_poll(struct napi_struct *, int);
  104. static bool igb_clean_rx_irq_adv(struct igb_q_vector *, int *, int);
  105. static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
  106. static void igb_tx_timeout(struct net_device *);
  107. static void igb_reset_task(struct work_struct *);
  108. static void igb_vlan_rx_register(struct net_device *, struct vlan_group *);
  109. static void igb_vlan_rx_add_vid(struct net_device *, u16);
  110. static void igb_vlan_rx_kill_vid(struct net_device *, u16);
  111. static void igb_restore_vlan(struct igb_adapter *);
  112. static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
  113. static void igb_ping_all_vfs(struct igb_adapter *);
  114. static void igb_msg_task(struct igb_adapter *);
  115. static int igb_rcv_msg_from_vf(struct igb_adapter *, u32);
  116. static void igb_vmm_control(struct igb_adapter *);
  117. static int igb_set_vf_mac(struct igb_adapter *adapter, int, unsigned char *);
  118. static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
  119. static inline void igb_set_vmolr(struct e1000_hw *hw, int vfn)
  120. {
  121. u32 reg_data;
  122. reg_data = rd32(E1000_VMOLR(vfn));
  123. reg_data |= E1000_VMOLR_BAM | /* Accept broadcast */
  124. E1000_VMOLR_ROMPE | /* Accept packets matched in MTA */
  125. E1000_VMOLR_AUPE | /* Accept untagged packets */
  126. E1000_VMOLR_STRVLAN; /* Strip vlan tags */
  127. wr32(E1000_VMOLR(vfn), reg_data);
  128. }
  129. static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
  130. int vfn)
  131. {
  132. struct e1000_hw *hw = &adapter->hw;
  133. u32 vmolr;
  134. /* if it isn't the PF check to see if VFs are enabled and
  135. * increase the size to support vlan tags */
  136. if (vfn < adapter->vfs_allocated_count &&
  137. adapter->vf_data[vfn].vlans_enabled)
  138. size += VLAN_TAG_SIZE;
  139. vmolr = rd32(E1000_VMOLR(vfn));
  140. vmolr &= ~E1000_VMOLR_RLPML_MASK;
  141. vmolr |= size | E1000_VMOLR_LPE;
  142. wr32(E1000_VMOLR(vfn), vmolr);
  143. return 0;
  144. }
  145. #ifdef CONFIG_PM
  146. static int igb_suspend(struct pci_dev *, pm_message_t);
  147. static int igb_resume(struct pci_dev *);
  148. #endif
  149. static void igb_shutdown(struct pci_dev *);
  150. #ifdef CONFIG_IGB_DCA
  151. static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
  152. static struct notifier_block dca_notifier = {
  153. .notifier_call = igb_notify_dca,
  154. .next = NULL,
  155. .priority = 0
  156. };
  157. #endif
  158. #ifdef CONFIG_NET_POLL_CONTROLLER
  159. /* for netdump / net console */
  160. static void igb_netpoll(struct net_device *);
  161. #endif
  162. #ifdef CONFIG_PCI_IOV
  163. static unsigned int max_vfs = 0;
  164. module_param(max_vfs, uint, 0);
  165. MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
  166. "per physical function");
  167. #endif /* CONFIG_PCI_IOV */
  168. static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
  169. pci_channel_state_t);
  170. static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
  171. static void igb_io_resume(struct pci_dev *);
  172. static struct pci_error_handlers igb_err_handler = {
  173. .error_detected = igb_io_error_detected,
  174. .slot_reset = igb_io_slot_reset,
  175. .resume = igb_io_resume,
  176. };
  177. static struct pci_driver igb_driver = {
  178. .name = igb_driver_name,
  179. .id_table = igb_pci_tbl,
  180. .probe = igb_probe,
  181. .remove = __devexit_p(igb_remove),
  182. #ifdef CONFIG_PM
  183. /* Power Managment Hooks */
  184. .suspend = igb_suspend,
  185. .resume = igb_resume,
  186. #endif
  187. .shutdown = igb_shutdown,
  188. .err_handler = &igb_err_handler
  189. };
  190. static int global_quad_port_a; /* global quad port a indication */
  191. MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
  192. MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
  193. MODULE_LICENSE("GPL");
  194. MODULE_VERSION(DRV_VERSION);
  195. /**
  196. * Scale the NIC clock cycle by a large factor so that
  197. * relatively small clock corrections can be added or
  198. * substracted at each clock tick. The drawbacks of a
  199. * large factor are a) that the clock register overflows
  200. * more quickly (not such a big deal) and b) that the
  201. * increment per tick has to fit into 24 bits.
  202. *
  203. * Note that
  204. * TIMINCA = IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS *
  205. * IGB_TSYNC_SCALE
  206. * TIMINCA += TIMINCA * adjustment [ppm] / 1e9
  207. *
  208. * The base scale factor is intentionally a power of two
  209. * so that the division in %struct timecounter can be done with
  210. * a shift.
  211. */
  212. #define IGB_TSYNC_SHIFT (19)
  213. #define IGB_TSYNC_SCALE (1<<IGB_TSYNC_SHIFT)
  214. /**
  215. * The duration of one clock cycle of the NIC.
  216. *
  217. * @todo This hard-coded value is part of the specification and might change
  218. * in future hardware revisions. Add revision check.
  219. */
  220. #define IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS 16
  221. #if (IGB_TSYNC_SCALE * IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS) >= (1<<24)
  222. # error IGB_TSYNC_SCALE and/or IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS are too large to fit into TIMINCA
  223. #endif
  224. /**
  225. * igb_read_clock - read raw cycle counter (to be used by time counter)
  226. */
  227. static cycle_t igb_read_clock(const struct cyclecounter *tc)
  228. {
  229. struct igb_adapter *adapter =
  230. container_of(tc, struct igb_adapter, cycles);
  231. struct e1000_hw *hw = &adapter->hw;
  232. u64 stamp;
  233. stamp = rd32(E1000_SYSTIML);
  234. stamp |= (u64)rd32(E1000_SYSTIMH) << 32ULL;
  235. return stamp;
  236. }
  237. #ifdef DEBUG
  238. /**
  239. * igb_get_hw_dev_name - return device name string
  240. * used by hardware layer to print debugging information
  241. **/
  242. char *igb_get_hw_dev_name(struct e1000_hw *hw)
  243. {
  244. struct igb_adapter *adapter = hw->back;
  245. return adapter->netdev->name;
  246. }
  247. /**
  248. * igb_get_time_str - format current NIC and system time as string
  249. */
  250. static char *igb_get_time_str(struct igb_adapter *adapter,
  251. char buffer[160])
  252. {
  253. cycle_t hw = adapter->cycles.read(&adapter->cycles);
  254. struct timespec nic = ns_to_timespec(timecounter_read(&adapter->clock));
  255. struct timespec sys;
  256. struct timespec delta;
  257. getnstimeofday(&sys);
  258. delta = timespec_sub(nic, sys);
  259. sprintf(buffer,
  260. "HW %llu, NIC %ld.%09lus, SYS %ld.%09lus, NIC-SYS %lds + %09luns",
  261. hw,
  262. (long)nic.tv_sec, nic.tv_nsec,
  263. (long)sys.tv_sec, sys.tv_nsec,
  264. (long)delta.tv_sec, delta.tv_nsec);
  265. return buffer;
  266. }
  267. #endif
  268. /**
  269. * igb_init_module - Driver Registration Routine
  270. *
  271. * igb_init_module is the first routine called when the driver is
  272. * loaded. All it does is register with the PCI subsystem.
  273. **/
  274. static int __init igb_init_module(void)
  275. {
  276. int ret;
  277. printk(KERN_INFO "%s - version %s\n",
  278. igb_driver_string, igb_driver_version);
  279. printk(KERN_INFO "%s\n", igb_copyright);
  280. global_quad_port_a = 0;
  281. #ifdef CONFIG_IGB_DCA
  282. dca_register_notify(&dca_notifier);
  283. #endif
  284. ret = pci_register_driver(&igb_driver);
  285. return ret;
  286. }
  287. module_init(igb_init_module);
  288. /**
  289. * igb_exit_module - Driver Exit Cleanup Routine
  290. *
  291. * igb_exit_module is called just before the driver is removed
  292. * from memory.
  293. **/
  294. static void __exit igb_exit_module(void)
  295. {
  296. #ifdef CONFIG_IGB_DCA
  297. dca_unregister_notify(&dca_notifier);
  298. #endif
  299. pci_unregister_driver(&igb_driver);
  300. }
  301. module_exit(igb_exit_module);
  302. #define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
  303. /**
  304. * igb_cache_ring_register - Descriptor ring to register mapping
  305. * @adapter: board private structure to initialize
  306. *
  307. * Once we know the feature-set enabled for the device, we'll cache
  308. * the register offset the descriptor ring is assigned to.
  309. **/
  310. static void igb_cache_ring_register(struct igb_adapter *adapter)
  311. {
  312. int i;
  313. u32 rbase_offset = adapter->vfs_allocated_count;
  314. switch (adapter->hw.mac.type) {
  315. case e1000_82576:
  316. /* The queues are allocated for virtualization such that VF 0
  317. * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
  318. * In order to avoid collision we start at the first free queue
  319. * and continue consuming queues in the same sequence
  320. */
  321. for (i = 0; i < adapter->num_rx_queues; i++)
  322. adapter->rx_ring[i].reg_idx = rbase_offset +
  323. Q_IDX_82576(i);
  324. for (i = 0; i < adapter->num_tx_queues; i++)
  325. adapter->tx_ring[i].reg_idx = rbase_offset +
  326. Q_IDX_82576(i);
  327. break;
  328. case e1000_82575:
  329. default:
  330. for (i = 0; i < adapter->num_rx_queues; i++)
  331. adapter->rx_ring[i].reg_idx = i;
  332. for (i = 0; i < adapter->num_tx_queues; i++)
  333. adapter->tx_ring[i].reg_idx = i;
  334. break;
  335. }
  336. }
  337. static void igb_free_queues(struct igb_adapter *adapter)
  338. {
  339. kfree(adapter->tx_ring);
  340. kfree(adapter->rx_ring);
  341. adapter->tx_ring = NULL;
  342. adapter->rx_ring = NULL;
  343. adapter->num_rx_queues = 0;
  344. adapter->num_tx_queues = 0;
  345. }
  346. /**
  347. * igb_alloc_queues - Allocate memory for all rings
  348. * @adapter: board private structure to initialize
  349. *
  350. * We allocate one ring per queue at run-time since we don't know the
  351. * number of queues at compile-time.
  352. **/
  353. static int igb_alloc_queues(struct igb_adapter *adapter)
  354. {
  355. int i;
  356. adapter->tx_ring = kcalloc(adapter->num_tx_queues,
  357. sizeof(struct igb_ring), GFP_KERNEL);
  358. if (!adapter->tx_ring)
  359. goto err;
  360. adapter->rx_ring = kcalloc(adapter->num_rx_queues,
  361. sizeof(struct igb_ring), GFP_KERNEL);
  362. if (!adapter->rx_ring)
  363. goto err;
  364. for (i = 0; i < adapter->num_tx_queues; i++) {
  365. struct igb_ring *ring = &(adapter->tx_ring[i]);
  366. ring->count = adapter->tx_ring_count;
  367. ring->queue_index = i;
  368. ring->pdev = adapter->pdev;
  369. ring->netdev = adapter->netdev;
  370. /* For 82575, context index must be unique per ring. */
  371. if (adapter->hw.mac.type == e1000_82575)
  372. ring->flags = IGB_RING_FLAG_TX_CTX_IDX;
  373. }
  374. for (i = 0; i < adapter->num_rx_queues; i++) {
  375. struct igb_ring *ring = &(adapter->rx_ring[i]);
  376. ring->count = adapter->rx_ring_count;
  377. ring->queue_index = i;
  378. ring->pdev = adapter->pdev;
  379. ring->netdev = adapter->netdev;
  380. ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
  381. ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */
  382. /* set flag indicating ring supports SCTP checksum offload */
  383. if (adapter->hw.mac.type >= e1000_82576)
  384. ring->flags |= IGB_RING_FLAG_RX_SCTP_CSUM;
  385. }
  386. igb_cache_ring_register(adapter);
  387. return 0;
  388. err:
  389. igb_free_queues(adapter);
  390. return -ENOMEM;
  391. }
  392. #define IGB_N0_QUEUE -1
  393. static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
  394. {
  395. u32 msixbm = 0;
  396. struct igb_adapter *adapter = q_vector->adapter;
  397. struct e1000_hw *hw = &adapter->hw;
  398. u32 ivar, index;
  399. int rx_queue = IGB_N0_QUEUE;
  400. int tx_queue = IGB_N0_QUEUE;
  401. if (q_vector->rx_ring)
  402. rx_queue = q_vector->rx_ring->reg_idx;
  403. if (q_vector->tx_ring)
  404. tx_queue = q_vector->tx_ring->reg_idx;
  405. switch (hw->mac.type) {
  406. case e1000_82575:
  407. /* The 82575 assigns vectors using a bitmask, which matches the
  408. bitmask for the EICR/EIMS/EIMC registers. To assign one
  409. or more queues to a vector, we write the appropriate bits
  410. into the MSIXBM register for that vector. */
  411. if (rx_queue > IGB_N0_QUEUE)
  412. msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
  413. if (tx_queue > IGB_N0_QUEUE)
  414. msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
  415. array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
  416. q_vector->eims_value = msixbm;
  417. break;
  418. case e1000_82576:
  419. /* 82576 uses a table-based method for assigning vectors.
  420. Each queue has a single entry in the table to which we write
  421. a vector number along with a "valid" bit. Sadly, the layout
  422. of the table is somewhat counterintuitive. */
  423. if (rx_queue > IGB_N0_QUEUE) {
  424. index = (rx_queue & 0x7);
  425. ivar = array_rd32(E1000_IVAR0, index);
  426. if (rx_queue < 8) {
  427. /* vector goes into low byte of register */
  428. ivar = ivar & 0xFFFFFF00;
  429. ivar |= msix_vector | E1000_IVAR_VALID;
  430. } else {
  431. /* vector goes into third byte of register */
  432. ivar = ivar & 0xFF00FFFF;
  433. ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
  434. }
  435. array_wr32(E1000_IVAR0, index, ivar);
  436. }
  437. if (tx_queue > IGB_N0_QUEUE) {
  438. index = (tx_queue & 0x7);
  439. ivar = array_rd32(E1000_IVAR0, index);
  440. if (tx_queue < 8) {
  441. /* vector goes into second byte of register */
  442. ivar = ivar & 0xFFFF00FF;
  443. ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
  444. } else {
  445. /* vector goes into high byte of register */
  446. ivar = ivar & 0x00FFFFFF;
  447. ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
  448. }
  449. array_wr32(E1000_IVAR0, index, ivar);
  450. }
  451. q_vector->eims_value = 1 << msix_vector;
  452. break;
  453. default:
  454. BUG();
  455. break;
  456. }
  457. }
  458. /**
  459. * igb_configure_msix - Configure MSI-X hardware
  460. *
  461. * igb_configure_msix sets up the hardware to properly
  462. * generate MSI-X interrupts.
  463. **/
  464. static void igb_configure_msix(struct igb_adapter *adapter)
  465. {
  466. u32 tmp;
  467. int i, vector = 0;
  468. struct e1000_hw *hw = &adapter->hw;
  469. adapter->eims_enable_mask = 0;
  470. /* set vector for other causes, i.e. link changes */
  471. switch (hw->mac.type) {
  472. case e1000_82575:
  473. tmp = rd32(E1000_CTRL_EXT);
  474. /* enable MSI-X PBA support*/
  475. tmp |= E1000_CTRL_EXT_PBA_CLR;
  476. /* Auto-Mask interrupts upon ICR read. */
  477. tmp |= E1000_CTRL_EXT_EIAME;
  478. tmp |= E1000_CTRL_EXT_IRCA;
  479. wr32(E1000_CTRL_EXT, tmp);
  480. /* enable msix_other interrupt */
  481. array_wr32(E1000_MSIXBM(0), vector++,
  482. E1000_EIMS_OTHER);
  483. adapter->eims_other = E1000_EIMS_OTHER;
  484. break;
  485. case e1000_82576:
  486. /* Turn on MSI-X capability first, or our settings
  487. * won't stick. And it will take days to debug. */
  488. wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
  489. E1000_GPIE_PBA | E1000_GPIE_EIAME |
  490. E1000_GPIE_NSICR);
  491. /* enable msix_other interrupt */
  492. adapter->eims_other = 1 << vector;
  493. tmp = (vector++ | E1000_IVAR_VALID) << 8;
  494. wr32(E1000_IVAR_MISC, tmp);
  495. break;
  496. default:
  497. /* do nothing, since nothing else supports MSI-X */
  498. break;
  499. } /* switch (hw->mac.type) */
  500. adapter->eims_enable_mask |= adapter->eims_other;
  501. for (i = 0; i < adapter->num_q_vectors; i++) {
  502. struct igb_q_vector *q_vector = adapter->q_vector[i];
  503. igb_assign_vector(q_vector, vector++);
  504. adapter->eims_enable_mask |= q_vector->eims_value;
  505. }
  506. wrfl();
  507. }
  508. /**
  509. * igb_request_msix - Initialize MSI-X interrupts
  510. *
  511. * igb_request_msix allocates MSI-X vectors and requests interrupts from the
  512. * kernel.
  513. **/
  514. static int igb_request_msix(struct igb_adapter *adapter)
  515. {
  516. struct net_device *netdev = adapter->netdev;
  517. struct e1000_hw *hw = &adapter->hw;
  518. int i, err = 0, vector = 0;
  519. err = request_irq(adapter->msix_entries[vector].vector,
  520. &igb_msix_other, 0, netdev->name, adapter);
  521. if (err)
  522. goto out;
  523. vector++;
  524. for (i = 0; i < adapter->num_q_vectors; i++) {
  525. struct igb_q_vector *q_vector = adapter->q_vector[i];
  526. q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
  527. if (q_vector->rx_ring && q_vector->tx_ring)
  528. sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
  529. q_vector->rx_ring->queue_index);
  530. else if (q_vector->tx_ring)
  531. sprintf(q_vector->name, "%s-tx-%u", netdev->name,
  532. q_vector->tx_ring->queue_index);
  533. else if (q_vector->rx_ring)
  534. sprintf(q_vector->name, "%s-rx-%u", netdev->name,
  535. q_vector->rx_ring->queue_index);
  536. else
  537. sprintf(q_vector->name, "%s-unused", netdev->name);
  538. err = request_irq(adapter->msix_entries[vector].vector,
  539. &igb_msix_ring, 0, q_vector->name,
  540. q_vector);
  541. if (err)
  542. goto out;
  543. vector++;
  544. }
  545. igb_configure_msix(adapter);
  546. return 0;
  547. out:
  548. return err;
  549. }
  550. static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
  551. {
  552. if (adapter->msix_entries) {
  553. pci_disable_msix(adapter->pdev);
  554. kfree(adapter->msix_entries);
  555. adapter->msix_entries = NULL;
  556. } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
  557. pci_disable_msi(adapter->pdev);
  558. }
  559. }
  560. /**
  561. * igb_free_q_vectors - Free memory allocated for interrupt vectors
  562. * @adapter: board private structure to initialize
  563. *
  564. * This function frees the memory allocated to the q_vectors. In addition if
  565. * NAPI is enabled it will delete any references to the NAPI struct prior
  566. * to freeing the q_vector.
  567. **/
  568. static void igb_free_q_vectors(struct igb_adapter *adapter)
  569. {
  570. int v_idx;
  571. for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
  572. struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
  573. adapter->q_vector[v_idx] = NULL;
  574. netif_napi_del(&q_vector->napi);
  575. kfree(q_vector);
  576. }
  577. adapter->num_q_vectors = 0;
  578. }
  579. /**
  580. * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
  581. *
  582. * This function resets the device so that it has 0 rx queues, tx queues, and
  583. * MSI-X interrupts allocated.
  584. */
  585. static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
  586. {
  587. igb_free_queues(adapter);
  588. igb_free_q_vectors(adapter);
  589. igb_reset_interrupt_capability(adapter);
  590. }
  591. /**
  592. * igb_set_interrupt_capability - set MSI or MSI-X if supported
  593. *
  594. * Attempt to configure interrupts using the best available
  595. * capabilities of the hardware and kernel.
  596. **/
  597. static void igb_set_interrupt_capability(struct igb_adapter *adapter)
  598. {
  599. int err;
  600. int numvecs, i;
  601. /* Number of supported queues. */
  602. adapter->num_rx_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
  603. adapter->num_tx_queues = min_t(u32, IGB_MAX_TX_QUEUES, num_online_cpus());
  604. /* start with one vector for every rx queue */
  605. numvecs = adapter->num_rx_queues;
  606. /* if tx handler is seperate add 1 for every tx queue */
  607. numvecs += adapter->num_tx_queues;
  608. /* store the number of vectors reserved for queues */
  609. adapter->num_q_vectors = numvecs;
  610. /* add 1 vector for link status interrupts */
  611. numvecs++;
  612. adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
  613. GFP_KERNEL);
  614. if (!adapter->msix_entries)
  615. goto msi_only;
  616. for (i = 0; i < numvecs; i++)
  617. adapter->msix_entries[i].entry = i;
  618. err = pci_enable_msix(adapter->pdev,
  619. adapter->msix_entries,
  620. numvecs);
  621. if (err == 0)
  622. goto out;
  623. igb_reset_interrupt_capability(adapter);
  624. /* If we can't do MSI-X, try MSI */
  625. msi_only:
  626. #ifdef CONFIG_PCI_IOV
  627. /* disable SR-IOV for non MSI-X configurations */
  628. if (adapter->vf_data) {
  629. struct e1000_hw *hw = &adapter->hw;
  630. /* disable iov and allow time for transactions to clear */
  631. pci_disable_sriov(adapter->pdev);
  632. msleep(500);
  633. kfree(adapter->vf_data);
  634. adapter->vf_data = NULL;
  635. wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
  636. msleep(100);
  637. dev_info(&adapter->pdev->dev, "IOV Disabled\n");
  638. }
  639. #endif
  640. adapter->num_rx_queues = 1;
  641. adapter->num_tx_queues = 1;
  642. adapter->num_q_vectors = 1;
  643. if (!pci_enable_msi(adapter->pdev))
  644. adapter->flags |= IGB_FLAG_HAS_MSI;
  645. out:
  646. /* Notify the stack of the (possibly) reduced Tx Queue count. */
  647. adapter->netdev->real_num_tx_queues = adapter->num_tx_queues;
  648. return;
  649. }
  650. /**
  651. * igb_alloc_q_vectors - Allocate memory for interrupt vectors
  652. * @adapter: board private structure to initialize
  653. *
  654. * We allocate one q_vector per queue interrupt. If allocation fails we
  655. * return -ENOMEM.
  656. **/
  657. static int igb_alloc_q_vectors(struct igb_adapter *adapter)
  658. {
  659. struct igb_q_vector *q_vector;
  660. struct e1000_hw *hw = &adapter->hw;
  661. int v_idx;
  662. for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
  663. q_vector = kzalloc(sizeof(struct igb_q_vector), GFP_KERNEL);
  664. if (!q_vector)
  665. goto err_out;
  666. q_vector->adapter = adapter;
  667. q_vector->itr_shift = (hw->mac.type == e1000_82575) ? 16 : 0;
  668. q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
  669. q_vector->itr_val = IGB_START_ITR;
  670. q_vector->set_itr = 1;
  671. netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
  672. adapter->q_vector[v_idx] = q_vector;
  673. }
  674. return 0;
  675. err_out:
  676. while (v_idx) {
  677. v_idx--;
  678. q_vector = adapter->q_vector[v_idx];
  679. netif_napi_del(&q_vector->napi);
  680. kfree(q_vector);
  681. adapter->q_vector[v_idx] = NULL;
  682. }
  683. return -ENOMEM;
  684. }
  685. static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
  686. int ring_idx, int v_idx)
  687. {
  688. struct igb_q_vector *q_vector;
  689. q_vector = adapter->q_vector[v_idx];
  690. q_vector->rx_ring = &adapter->rx_ring[ring_idx];
  691. q_vector->rx_ring->q_vector = q_vector;
  692. q_vector->itr_val = adapter->itr;
  693. }
  694. static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
  695. int ring_idx, int v_idx)
  696. {
  697. struct igb_q_vector *q_vector;
  698. q_vector = adapter->q_vector[v_idx];
  699. q_vector->tx_ring = &adapter->tx_ring[ring_idx];
  700. q_vector->tx_ring->q_vector = q_vector;
  701. q_vector->itr_val = adapter->itr;
  702. }
  703. /**
  704. * igb_map_ring_to_vector - maps allocated queues to vectors
  705. *
  706. * This function maps the recently allocated queues to vectors.
  707. **/
  708. static int igb_map_ring_to_vector(struct igb_adapter *adapter)
  709. {
  710. int i;
  711. int v_idx = 0;
  712. if ((adapter->num_q_vectors < adapter->num_rx_queues) ||
  713. (adapter->num_q_vectors < adapter->num_tx_queues))
  714. return -ENOMEM;
  715. if (adapter->num_q_vectors >=
  716. (adapter->num_rx_queues + adapter->num_tx_queues)) {
  717. for (i = 0; i < adapter->num_rx_queues; i++)
  718. igb_map_rx_ring_to_vector(adapter, i, v_idx++);
  719. for (i = 0; i < adapter->num_tx_queues; i++)
  720. igb_map_tx_ring_to_vector(adapter, i, v_idx++);
  721. } else {
  722. for (i = 0; i < adapter->num_rx_queues; i++) {
  723. if (i < adapter->num_tx_queues)
  724. igb_map_tx_ring_to_vector(adapter, i, v_idx);
  725. igb_map_rx_ring_to_vector(adapter, i, v_idx++);
  726. }
  727. for (; i < adapter->num_tx_queues; i++)
  728. igb_map_tx_ring_to_vector(adapter, i, v_idx++);
  729. }
  730. return 0;
  731. }
  732. /**
  733. * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
  734. *
  735. * This function initializes the interrupts and allocates all of the queues.
  736. **/
  737. static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
  738. {
  739. struct pci_dev *pdev = adapter->pdev;
  740. int err;
  741. igb_set_interrupt_capability(adapter);
  742. err = igb_alloc_q_vectors(adapter);
  743. if (err) {
  744. dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
  745. goto err_alloc_q_vectors;
  746. }
  747. err = igb_alloc_queues(adapter);
  748. if (err) {
  749. dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
  750. goto err_alloc_queues;
  751. }
  752. err = igb_map_ring_to_vector(adapter);
  753. if (err) {
  754. dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n");
  755. goto err_map_queues;
  756. }
  757. return 0;
  758. err_map_queues:
  759. igb_free_queues(adapter);
  760. err_alloc_queues:
  761. igb_free_q_vectors(adapter);
  762. err_alloc_q_vectors:
  763. igb_reset_interrupt_capability(adapter);
  764. return err;
  765. }
  766. /**
  767. * igb_request_irq - initialize interrupts
  768. *
  769. * Attempts to configure interrupts using the best available
  770. * capabilities of the hardware and kernel.
  771. **/
  772. static int igb_request_irq(struct igb_adapter *adapter)
  773. {
  774. struct net_device *netdev = adapter->netdev;
  775. struct pci_dev *pdev = adapter->pdev;
  776. struct e1000_hw *hw = &adapter->hw;
  777. int err = 0;
  778. if (adapter->msix_entries) {
  779. err = igb_request_msix(adapter);
  780. if (!err)
  781. goto request_done;
  782. /* fall back to MSI */
  783. igb_clear_interrupt_scheme(adapter);
  784. if (!pci_enable_msi(adapter->pdev))
  785. adapter->flags |= IGB_FLAG_HAS_MSI;
  786. igb_free_all_tx_resources(adapter);
  787. igb_free_all_rx_resources(adapter);
  788. adapter->num_tx_queues = 1;
  789. adapter->num_rx_queues = 1;
  790. adapter->num_q_vectors = 1;
  791. err = igb_alloc_q_vectors(adapter);
  792. if (err) {
  793. dev_err(&pdev->dev,
  794. "Unable to allocate memory for vectors\n");
  795. goto request_done;
  796. }
  797. err = igb_alloc_queues(adapter);
  798. if (err) {
  799. dev_err(&pdev->dev,
  800. "Unable to allocate memory for queues\n");
  801. igb_free_q_vectors(adapter);
  802. goto request_done;
  803. }
  804. igb_setup_all_tx_resources(adapter);
  805. igb_setup_all_rx_resources(adapter);
  806. } else {
  807. switch (hw->mac.type) {
  808. case e1000_82575:
  809. wr32(E1000_MSIXBM(0),
  810. (E1000_EICR_RX_QUEUE0 |
  811. E1000_EICR_TX_QUEUE0 |
  812. E1000_EIMS_OTHER));
  813. break;
  814. case e1000_82576:
  815. wr32(E1000_IVAR0, E1000_IVAR_VALID);
  816. break;
  817. default:
  818. break;
  819. }
  820. }
  821. if (adapter->flags & IGB_FLAG_HAS_MSI) {
  822. err = request_irq(adapter->pdev->irq, &igb_intr_msi, 0,
  823. netdev->name, adapter);
  824. if (!err)
  825. goto request_done;
  826. /* fall back to legacy interrupts */
  827. igb_reset_interrupt_capability(adapter);
  828. adapter->flags &= ~IGB_FLAG_HAS_MSI;
  829. }
  830. err = request_irq(adapter->pdev->irq, &igb_intr, IRQF_SHARED,
  831. netdev->name, adapter);
  832. if (err)
  833. dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n",
  834. err);
  835. request_done:
  836. return err;
  837. }
  838. static void igb_free_irq(struct igb_adapter *adapter)
  839. {
  840. if (adapter->msix_entries) {
  841. int vector = 0, i;
  842. free_irq(adapter->msix_entries[vector++].vector, adapter);
  843. for (i = 0; i < adapter->num_q_vectors; i++) {
  844. struct igb_q_vector *q_vector = adapter->q_vector[i];
  845. free_irq(adapter->msix_entries[vector++].vector,
  846. q_vector);
  847. }
  848. } else {
  849. free_irq(adapter->pdev->irq, adapter);
  850. }
  851. }
  852. /**
  853. * igb_irq_disable - Mask off interrupt generation on the NIC
  854. * @adapter: board private structure
  855. **/
  856. static void igb_irq_disable(struct igb_adapter *adapter)
  857. {
  858. struct e1000_hw *hw = &adapter->hw;
  859. if (adapter->msix_entries) {
  860. u32 regval = rd32(E1000_EIAM);
  861. wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
  862. wr32(E1000_EIMC, adapter->eims_enable_mask);
  863. regval = rd32(E1000_EIAC);
  864. wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
  865. }
  866. wr32(E1000_IAM, 0);
  867. wr32(E1000_IMC, ~0);
  868. wrfl();
  869. synchronize_irq(adapter->pdev->irq);
  870. }
  871. /**
  872. * igb_irq_enable - Enable default interrupt generation settings
  873. * @adapter: board private structure
  874. **/
  875. static void igb_irq_enable(struct igb_adapter *adapter)
  876. {
  877. struct e1000_hw *hw = &adapter->hw;
  878. if (adapter->msix_entries) {
  879. u32 regval = rd32(E1000_EIAC);
  880. wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
  881. regval = rd32(E1000_EIAM);
  882. wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
  883. wr32(E1000_EIMS, adapter->eims_enable_mask);
  884. if (adapter->vfs_allocated_count)
  885. wr32(E1000_MBVFIMR, 0xFF);
  886. wr32(E1000_IMS, (E1000_IMS_LSC | E1000_IMS_VMMB |
  887. E1000_IMS_DOUTSYNC));
  888. } else {
  889. wr32(E1000_IMS, IMS_ENABLE_MASK);
  890. wr32(E1000_IAM, IMS_ENABLE_MASK);
  891. }
  892. }
  893. static void igb_update_mng_vlan(struct igb_adapter *adapter)
  894. {
  895. struct net_device *netdev = adapter->netdev;
  896. u16 vid = adapter->hw.mng_cookie.vlan_id;
  897. u16 old_vid = adapter->mng_vlan_id;
  898. if (adapter->vlgrp) {
  899. if (!vlan_group_get_device(adapter->vlgrp, vid)) {
  900. if (adapter->hw.mng_cookie.status &
  901. E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
  902. igb_vlan_rx_add_vid(netdev, vid);
  903. adapter->mng_vlan_id = vid;
  904. } else
  905. adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
  906. if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
  907. (vid != old_vid) &&
  908. !vlan_group_get_device(adapter->vlgrp, old_vid))
  909. igb_vlan_rx_kill_vid(netdev, old_vid);
  910. } else
  911. adapter->mng_vlan_id = vid;
  912. }
  913. }
  914. /**
  915. * igb_release_hw_control - release control of the h/w to f/w
  916. * @adapter: address of board private structure
  917. *
  918. * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
  919. * For ASF and Pass Through versions of f/w this means that the
  920. * driver is no longer loaded.
  921. *
  922. **/
  923. static void igb_release_hw_control(struct igb_adapter *adapter)
  924. {
  925. struct e1000_hw *hw = &adapter->hw;
  926. u32 ctrl_ext;
  927. /* Let firmware take over control of h/w */
  928. ctrl_ext = rd32(E1000_CTRL_EXT);
  929. wr32(E1000_CTRL_EXT,
  930. ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
  931. }
  932. /**
  933. * igb_get_hw_control - get control of the h/w from f/w
  934. * @adapter: address of board private structure
  935. *
  936. * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
  937. * For ASF and Pass Through versions of f/w this means that
  938. * the driver is loaded.
  939. *
  940. **/
  941. static void igb_get_hw_control(struct igb_adapter *adapter)
  942. {
  943. struct e1000_hw *hw = &adapter->hw;
  944. u32 ctrl_ext;
  945. /* Let firmware know the driver has taken over */
  946. ctrl_ext = rd32(E1000_CTRL_EXT);
  947. wr32(E1000_CTRL_EXT,
  948. ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
  949. }
  950. /**
  951. * igb_configure - configure the hardware for RX and TX
  952. * @adapter: private board structure
  953. **/
  954. static void igb_configure(struct igb_adapter *adapter)
  955. {
  956. struct net_device *netdev = adapter->netdev;
  957. int i;
  958. igb_get_hw_control(adapter);
  959. igb_set_rx_mode(netdev);
  960. igb_restore_vlan(adapter);
  961. igb_setup_tctl(adapter);
  962. igb_setup_mrqc(adapter);
  963. igb_setup_rctl(adapter);
  964. igb_configure_tx(adapter);
  965. igb_configure_rx(adapter);
  966. igb_rx_fifo_flush_82575(&adapter->hw);
  967. /* call igb_desc_unused which always leaves
  968. * at least 1 descriptor unused to make sure
  969. * next_to_use != next_to_clean */
  970. for (i = 0; i < adapter->num_rx_queues; i++) {
  971. struct igb_ring *ring = &adapter->rx_ring[i];
  972. igb_alloc_rx_buffers_adv(ring, igb_desc_unused(ring));
  973. }
  974. adapter->tx_queue_len = netdev->tx_queue_len;
  975. }
  976. /**
  977. * igb_up - Open the interface and prepare it to handle traffic
  978. * @adapter: board private structure
  979. **/
  980. int igb_up(struct igb_adapter *adapter)
  981. {
  982. struct e1000_hw *hw = &adapter->hw;
  983. int i;
  984. /* hardware has been reset, we need to reload some things */
  985. igb_configure(adapter);
  986. clear_bit(__IGB_DOWN, &adapter->state);
  987. for (i = 0; i < adapter->num_q_vectors; i++) {
  988. struct igb_q_vector *q_vector = adapter->q_vector[i];
  989. napi_enable(&q_vector->napi);
  990. }
  991. if (adapter->msix_entries)
  992. igb_configure_msix(adapter);
  993. igb_set_vmolr(hw, adapter->vfs_allocated_count);
  994. /* Clear any pending interrupts. */
  995. rd32(E1000_ICR);
  996. igb_irq_enable(adapter);
  997. /* notify VFs that reset has been completed */
  998. if (adapter->vfs_allocated_count) {
  999. u32 reg_data = rd32(E1000_CTRL_EXT);
  1000. reg_data |= E1000_CTRL_EXT_PFRSTD;
  1001. wr32(E1000_CTRL_EXT, reg_data);
  1002. }
  1003. netif_tx_start_all_queues(adapter->netdev);
  1004. /* Fire a link change interrupt to start the watchdog. */
  1005. wr32(E1000_ICS, E1000_ICS_LSC);
  1006. return 0;
  1007. }
  1008. void igb_down(struct igb_adapter *adapter)
  1009. {
  1010. struct e1000_hw *hw = &adapter->hw;
  1011. struct net_device *netdev = adapter->netdev;
  1012. u32 tctl, rctl;
  1013. int i;
  1014. /* signal that we're down so the interrupt handler does not
  1015. * reschedule our watchdog timer */
  1016. set_bit(__IGB_DOWN, &adapter->state);
  1017. /* disable receives in the hardware */
  1018. rctl = rd32(E1000_RCTL);
  1019. wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
  1020. /* flush and sleep below */
  1021. netif_tx_stop_all_queues(netdev);
  1022. /* disable transmits in the hardware */
  1023. tctl = rd32(E1000_TCTL);
  1024. tctl &= ~E1000_TCTL_EN;
  1025. wr32(E1000_TCTL, tctl);
  1026. /* flush both disables and wait for them to finish */
  1027. wrfl();
  1028. msleep(10);
  1029. for (i = 0; i < adapter->num_q_vectors; i++) {
  1030. struct igb_q_vector *q_vector = adapter->q_vector[i];
  1031. napi_disable(&q_vector->napi);
  1032. }
  1033. igb_irq_disable(adapter);
  1034. del_timer_sync(&adapter->watchdog_timer);
  1035. del_timer_sync(&adapter->phy_info_timer);
  1036. netdev->tx_queue_len = adapter->tx_queue_len;
  1037. netif_carrier_off(netdev);
  1038. /* record the stats before reset*/
  1039. igb_update_stats(adapter);
  1040. adapter->link_speed = 0;
  1041. adapter->link_duplex = 0;
  1042. if (!pci_channel_offline(adapter->pdev))
  1043. igb_reset(adapter);
  1044. igb_clean_all_tx_rings(adapter);
  1045. igb_clean_all_rx_rings(adapter);
  1046. #ifdef CONFIG_IGB_DCA
  1047. /* since we reset the hardware DCA settings were cleared */
  1048. igb_setup_dca(adapter);
  1049. #endif
  1050. }
  1051. void igb_reinit_locked(struct igb_adapter *adapter)
  1052. {
  1053. WARN_ON(in_interrupt());
  1054. while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
  1055. msleep(1);
  1056. igb_down(adapter);
  1057. igb_up(adapter);
  1058. clear_bit(__IGB_RESETTING, &adapter->state);
  1059. }
  1060. void igb_reset(struct igb_adapter *adapter)
  1061. {
  1062. struct e1000_hw *hw = &adapter->hw;
  1063. struct e1000_mac_info *mac = &hw->mac;
  1064. struct e1000_fc_info *fc = &hw->fc;
  1065. u32 pba = 0, tx_space, min_tx_space, min_rx_space;
  1066. u16 hwm;
  1067. /* Repartition Pba for greater than 9k mtu
  1068. * To take effect CTRL.RST is required.
  1069. */
  1070. switch (mac->type) {
  1071. case e1000_82576:
  1072. pba = E1000_PBA_64K;
  1073. break;
  1074. case e1000_82575:
  1075. default:
  1076. pba = E1000_PBA_34K;
  1077. break;
  1078. }
  1079. if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
  1080. (mac->type < e1000_82576)) {
  1081. /* adjust PBA for jumbo frames */
  1082. wr32(E1000_PBA, pba);
  1083. /* To maintain wire speed transmits, the Tx FIFO should be
  1084. * large enough to accommodate two full transmit packets,
  1085. * rounded up to the next 1KB and expressed in KB. Likewise,
  1086. * the Rx FIFO should be large enough to accommodate at least
  1087. * one full receive packet and is similarly rounded up and
  1088. * expressed in KB. */
  1089. pba = rd32(E1000_PBA);
  1090. /* upper 16 bits has Tx packet buffer allocation size in KB */
  1091. tx_space = pba >> 16;
  1092. /* lower 16 bits has Rx packet buffer allocation size in KB */
  1093. pba &= 0xffff;
  1094. /* the tx fifo also stores 16 bytes of information about the tx
  1095. * but don't include ethernet FCS because hardware appends it */
  1096. min_tx_space = (adapter->max_frame_size +
  1097. sizeof(union e1000_adv_tx_desc) -
  1098. ETH_FCS_LEN) * 2;
  1099. min_tx_space = ALIGN(min_tx_space, 1024);
  1100. min_tx_space >>= 10;
  1101. /* software strips receive CRC, so leave room for it */
  1102. min_rx_space = adapter->max_frame_size;
  1103. min_rx_space = ALIGN(min_rx_space, 1024);
  1104. min_rx_space >>= 10;
  1105. /* If current Tx allocation is less than the min Tx FIFO size,
  1106. * and the min Tx FIFO size is less than the current Rx FIFO
  1107. * allocation, take space away from current Rx allocation */
  1108. if (tx_space < min_tx_space &&
  1109. ((min_tx_space - tx_space) < pba)) {
  1110. pba = pba - (min_tx_space - tx_space);
  1111. /* if short on rx space, rx wins and must trump tx
  1112. * adjustment */
  1113. if (pba < min_rx_space)
  1114. pba = min_rx_space;
  1115. }
  1116. wr32(E1000_PBA, pba);
  1117. }
  1118. /* flow control settings */
  1119. /* The high water mark must be low enough to fit one full frame
  1120. * (or the size used for early receive) above it in the Rx FIFO.
  1121. * Set it to the lower of:
  1122. * - 90% of the Rx FIFO size, or
  1123. * - the full Rx FIFO size minus one full frame */
  1124. hwm = min(((pba << 10) * 9 / 10),
  1125. ((pba << 10) - 2 * adapter->max_frame_size));
  1126. if (mac->type < e1000_82576) {
  1127. fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */
  1128. fc->low_water = fc->high_water - 8;
  1129. } else {
  1130. fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
  1131. fc->low_water = fc->high_water - 16;
  1132. }
  1133. fc->pause_time = 0xFFFF;
  1134. fc->send_xon = 1;
  1135. fc->current_mode = fc->requested_mode;
  1136. /* disable receive for all VFs and wait one second */
  1137. if (adapter->vfs_allocated_count) {
  1138. int i;
  1139. for (i = 0 ; i < adapter->vfs_allocated_count; i++)
  1140. adapter->vf_data[i].clear_to_send = false;
  1141. /* ping all the active vfs to let them know we are going down */
  1142. igb_ping_all_vfs(adapter);
  1143. /* disable transmits and receives */
  1144. wr32(E1000_VFRE, 0);
  1145. wr32(E1000_VFTE, 0);
  1146. }
  1147. /* Allow time for pending master requests to run */
  1148. adapter->hw.mac.ops.reset_hw(&adapter->hw);
  1149. wr32(E1000_WUC, 0);
  1150. if (adapter->hw.mac.ops.init_hw(&adapter->hw))
  1151. dev_err(&adapter->pdev->dev, "Hardware Error\n");
  1152. igb_update_mng_vlan(adapter);
  1153. /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
  1154. wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
  1155. igb_reset_adaptive(&adapter->hw);
  1156. igb_get_phy_info(&adapter->hw);
  1157. }
  1158. static const struct net_device_ops igb_netdev_ops = {
  1159. .ndo_open = igb_open,
  1160. .ndo_stop = igb_close,
  1161. .ndo_start_xmit = igb_xmit_frame_adv,
  1162. .ndo_get_stats = igb_get_stats,
  1163. .ndo_set_rx_mode = igb_set_rx_mode,
  1164. .ndo_set_multicast_list = igb_set_rx_mode,
  1165. .ndo_set_mac_address = igb_set_mac,
  1166. .ndo_change_mtu = igb_change_mtu,
  1167. .ndo_do_ioctl = igb_ioctl,
  1168. .ndo_tx_timeout = igb_tx_timeout,
  1169. .ndo_validate_addr = eth_validate_addr,
  1170. .ndo_vlan_rx_register = igb_vlan_rx_register,
  1171. .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
  1172. .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
  1173. #ifdef CONFIG_NET_POLL_CONTROLLER
  1174. .ndo_poll_controller = igb_netpoll,
  1175. #endif
  1176. };
  1177. /**
  1178. * igb_probe - Device Initialization Routine
  1179. * @pdev: PCI device information struct
  1180. * @ent: entry in igb_pci_tbl
  1181. *
  1182. * Returns 0 on success, negative on failure
  1183. *
  1184. * igb_probe initializes an adapter identified by a pci_dev structure.
  1185. * The OS initialization, configuring of the adapter private structure,
  1186. * and a hardware reset occur.
  1187. **/
  1188. static int __devinit igb_probe(struct pci_dev *pdev,
  1189. const struct pci_device_id *ent)
  1190. {
  1191. struct net_device *netdev;
  1192. struct igb_adapter *adapter;
  1193. struct e1000_hw *hw;
  1194. const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
  1195. unsigned long mmio_start, mmio_len;
  1196. int err, pci_using_dac;
  1197. u16 eeprom_data = 0;
  1198. u16 eeprom_apme_mask = IGB_EEPROM_APME;
  1199. u32 part_num;
  1200. err = pci_enable_device_mem(pdev);
  1201. if (err)
  1202. return err;
  1203. pci_using_dac = 0;
  1204. err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
  1205. if (!err) {
  1206. err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
  1207. if (!err)
  1208. pci_using_dac = 1;
  1209. } else {
  1210. err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
  1211. if (err) {
  1212. err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
  1213. if (err) {
  1214. dev_err(&pdev->dev, "No usable DMA "
  1215. "configuration, aborting\n");
  1216. goto err_dma;
  1217. }
  1218. }
  1219. }
  1220. err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
  1221. IORESOURCE_MEM),
  1222. igb_driver_name);
  1223. if (err)
  1224. goto err_pci_reg;
  1225. pci_enable_pcie_error_reporting(pdev);
  1226. pci_set_master(pdev);
  1227. pci_save_state(pdev);
  1228. err = -ENOMEM;
  1229. netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
  1230. IGB_ABS_MAX_TX_QUEUES);
  1231. if (!netdev)
  1232. goto err_alloc_etherdev;
  1233. SET_NETDEV_DEV(netdev, &pdev->dev);
  1234. pci_set_drvdata(pdev, netdev);
  1235. adapter = netdev_priv(netdev);
  1236. adapter->netdev = netdev;
  1237. adapter->pdev = pdev;
  1238. hw = &adapter->hw;
  1239. hw->back = adapter;
  1240. adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE;
  1241. mmio_start = pci_resource_start(pdev, 0);
  1242. mmio_len = pci_resource_len(pdev, 0);
  1243. err = -EIO;
  1244. hw->hw_addr = ioremap(mmio_start, mmio_len);
  1245. if (!hw->hw_addr)
  1246. goto err_ioremap;
  1247. netdev->netdev_ops = &igb_netdev_ops;
  1248. igb_set_ethtool_ops(netdev);
  1249. netdev->watchdog_timeo = 5 * HZ;
  1250. strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
  1251. netdev->mem_start = mmio_start;
  1252. netdev->mem_end = mmio_start + mmio_len;
  1253. /* PCI config space info */
  1254. hw->vendor_id = pdev->vendor;
  1255. hw->device_id = pdev->device;
  1256. hw->revision_id = pdev->revision;
  1257. hw->subsystem_vendor_id = pdev->subsystem_vendor;
  1258. hw->subsystem_device_id = pdev->subsystem_device;
  1259. /* setup the private structure */
  1260. hw->back = adapter;
  1261. /* Copy the default MAC, PHY and NVM function pointers */
  1262. memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
  1263. memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
  1264. memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
  1265. /* Initialize skew-specific constants */
  1266. err = ei->get_invariants(hw);
  1267. if (err)
  1268. goto err_sw_init;
  1269. #ifdef CONFIG_PCI_IOV
  1270. /* since iov functionality isn't critical to base device function we
  1271. * can accept failure. If it fails we don't allow iov to be enabled */
  1272. if (hw->mac.type == e1000_82576) {
  1273. /* 82576 supports a maximum of 7 VFs in addition to the PF */
  1274. unsigned int num_vfs = (max_vfs > 7) ? 7 : max_vfs;
  1275. int i;
  1276. unsigned char mac_addr[ETH_ALEN];
  1277. if (num_vfs) {
  1278. adapter->vf_data = kcalloc(num_vfs,
  1279. sizeof(struct vf_data_storage),
  1280. GFP_KERNEL);
  1281. if (!adapter->vf_data) {
  1282. dev_err(&pdev->dev,
  1283. "Could not allocate VF private data - "
  1284. "IOV enable failed\n");
  1285. } else {
  1286. err = pci_enable_sriov(pdev, num_vfs);
  1287. if (!err) {
  1288. adapter->vfs_allocated_count = num_vfs;
  1289. dev_info(&pdev->dev,
  1290. "%d vfs allocated\n",
  1291. num_vfs);
  1292. for (i = 0;
  1293. i < adapter->vfs_allocated_count;
  1294. i++) {
  1295. random_ether_addr(mac_addr);
  1296. igb_set_vf_mac(adapter, i,
  1297. mac_addr);
  1298. }
  1299. } else {
  1300. kfree(adapter->vf_data);
  1301. adapter->vf_data = NULL;
  1302. }
  1303. }
  1304. }
  1305. }
  1306. #endif
  1307. /* setup the private structure */
  1308. err = igb_sw_init(adapter);
  1309. if (err)
  1310. goto err_sw_init;
  1311. igb_get_bus_info_pcie(hw);
  1312. hw->phy.autoneg_wait_to_complete = false;
  1313. hw->mac.adaptive_ifs = true;
  1314. /* Copper options */
  1315. if (hw->phy.media_type == e1000_media_type_copper) {
  1316. hw->phy.mdix = AUTO_ALL_MODES;
  1317. hw->phy.disable_polarity_correction = false;
  1318. hw->phy.ms_type = e1000_ms_hw_default;
  1319. }
  1320. if (igb_check_reset_block(hw))
  1321. dev_info(&pdev->dev,
  1322. "PHY reset is blocked due to SOL/IDER session.\n");
  1323. netdev->features = NETIF_F_SG |
  1324. NETIF_F_IP_CSUM |
  1325. NETIF_F_HW_VLAN_TX |
  1326. NETIF_F_HW_VLAN_RX |
  1327. NETIF_F_HW_VLAN_FILTER;
  1328. netdev->features |= NETIF_F_IPV6_CSUM;
  1329. netdev->features |= NETIF_F_TSO;
  1330. netdev->features |= NETIF_F_TSO6;
  1331. netdev->features |= NETIF_F_GRO;
  1332. netdev->vlan_features |= NETIF_F_TSO;
  1333. netdev->vlan_features |= NETIF_F_TSO6;
  1334. netdev->vlan_features |= NETIF_F_IP_CSUM;
  1335. netdev->vlan_features |= NETIF_F_IPV6_CSUM;
  1336. netdev->vlan_features |= NETIF_F_SG;
  1337. if (pci_using_dac)
  1338. netdev->features |= NETIF_F_HIGHDMA;
  1339. if (adapter->hw.mac.type == e1000_82576)
  1340. netdev->features |= NETIF_F_SCTP_CSUM;
  1341. adapter->en_mng_pt = igb_enable_mng_pass_thru(&adapter->hw);
  1342. /* before reading the NVM, reset the controller to put the device in a
  1343. * known good starting state */
  1344. hw->mac.ops.reset_hw(hw);
  1345. /* make sure the NVM is good */
  1346. if (igb_validate_nvm_checksum(hw) < 0) {
  1347. dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
  1348. err = -EIO;
  1349. goto err_eeprom;
  1350. }
  1351. /* copy the MAC address out of the NVM */
  1352. if (hw->mac.ops.read_mac_addr(hw))
  1353. dev_err(&pdev->dev, "NVM Read Error\n");
  1354. memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
  1355. memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
  1356. if (!is_valid_ether_addr(netdev->perm_addr)) {
  1357. dev_err(&pdev->dev, "Invalid MAC Address\n");
  1358. err = -EIO;
  1359. goto err_eeprom;
  1360. }
  1361. setup_timer(&adapter->watchdog_timer, &igb_watchdog,
  1362. (unsigned long) adapter);
  1363. setup_timer(&adapter->phy_info_timer, &igb_update_phy_info,
  1364. (unsigned long) adapter);
  1365. INIT_WORK(&adapter->reset_task, igb_reset_task);
  1366. INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
  1367. /* Initialize link properties that are user-changeable */
  1368. adapter->fc_autoneg = true;
  1369. hw->mac.autoneg = true;
  1370. hw->phy.autoneg_advertised = 0x2f;
  1371. hw->fc.requested_mode = e1000_fc_default;
  1372. hw->fc.current_mode = e1000_fc_default;
  1373. adapter->itr_setting = IGB_DEFAULT_ITR;
  1374. adapter->itr = IGB_START_ITR;
  1375. igb_validate_mdi_setting(hw);
  1376. /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
  1377. * enable the ACPI Magic Packet filter
  1378. */
  1379. if (hw->bus.func == 0)
  1380. hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
  1381. else if (hw->bus.func == 1)
  1382. hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
  1383. if (eeprom_data & eeprom_apme_mask)
  1384. adapter->eeprom_wol |= E1000_WUFC_MAG;
  1385. /* now that we have the eeprom settings, apply the special cases where
  1386. * the eeprom may be wrong or the board simply won't support wake on
  1387. * lan on a particular port */
  1388. switch (pdev->device) {
  1389. case E1000_DEV_ID_82575GB_QUAD_COPPER:
  1390. adapter->eeprom_wol = 0;
  1391. break;
  1392. case E1000_DEV_ID_82575EB_FIBER_SERDES:
  1393. case E1000_DEV_ID_82576_FIBER:
  1394. case E1000_DEV_ID_82576_SERDES:
  1395. /* Wake events only supported on port A for dual fiber
  1396. * regardless of eeprom setting */
  1397. if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
  1398. adapter->eeprom_wol = 0;
  1399. break;
  1400. case E1000_DEV_ID_82576_QUAD_COPPER:
  1401. /* if quad port adapter, disable WoL on all but port A */
  1402. if (global_quad_port_a != 0)
  1403. adapter->eeprom_wol = 0;
  1404. else
  1405. adapter->flags |= IGB_FLAG_QUAD_PORT_A;
  1406. /* Reset for multiple quad port adapters */
  1407. if (++global_quad_port_a == 4)
  1408. global_quad_port_a = 0;
  1409. break;
  1410. }
  1411. /* initialize the wol settings based on the eeprom settings */
  1412. adapter->wol = adapter->eeprom_wol;
  1413. device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
  1414. /* reset the hardware with the new settings */
  1415. igb_reset(adapter);
  1416. /* let the f/w know that the h/w is now under the control of the
  1417. * driver. */
  1418. igb_get_hw_control(adapter);
  1419. strcpy(netdev->name, "eth%d");
  1420. err = register_netdev(netdev);
  1421. if (err)
  1422. goto err_register;
  1423. /* carrier off reporting is important to ethtool even BEFORE open */
  1424. netif_carrier_off(netdev);
  1425. #ifdef CONFIG_IGB_DCA
  1426. if (dca_add_requester(&pdev->dev) == 0) {
  1427. adapter->flags |= IGB_FLAG_DCA_ENABLED;
  1428. dev_info(&pdev->dev, "DCA enabled\n");
  1429. igb_setup_dca(adapter);
  1430. }
  1431. #endif
  1432. /*
  1433. * Initialize hardware timer: we keep it running just in case
  1434. * that some program needs it later on.
  1435. */
  1436. memset(&adapter->cycles, 0, sizeof(adapter->cycles));
  1437. adapter->cycles.read = igb_read_clock;
  1438. adapter->cycles.mask = CLOCKSOURCE_MASK(64);
  1439. adapter->cycles.mult = 1;
  1440. adapter->cycles.shift = IGB_TSYNC_SHIFT;
  1441. wr32(E1000_TIMINCA,
  1442. (1<<24) |
  1443. IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS * IGB_TSYNC_SCALE);
  1444. #if 0
  1445. /*
  1446. * Avoid rollover while we initialize by resetting the time counter.
  1447. */
  1448. wr32(E1000_SYSTIML, 0x00000000);
  1449. wr32(E1000_SYSTIMH, 0x00000000);
  1450. #else
  1451. /*
  1452. * Set registers so that rollover occurs soon to test this.
  1453. */
  1454. wr32(E1000_SYSTIML, 0x00000000);
  1455. wr32(E1000_SYSTIMH, 0xFF800000);
  1456. #endif
  1457. wrfl();
  1458. timecounter_init(&adapter->clock,
  1459. &adapter->cycles,
  1460. ktime_to_ns(ktime_get_real()));
  1461. /*
  1462. * Synchronize our NIC clock against system wall clock. NIC
  1463. * time stamp reading requires ~3us per sample, each sample
  1464. * was pretty stable even under load => only require 10
  1465. * samples for each offset comparison.
  1466. */
  1467. memset(&adapter->compare, 0, sizeof(adapter->compare));
  1468. adapter->compare.source = &adapter->clock;
  1469. adapter->compare.target = ktime_get_real;
  1470. adapter->compare.num_samples = 10;
  1471. timecompare_update(&adapter->compare, 0);
  1472. #ifdef DEBUG
  1473. {
  1474. char buffer[160];
  1475. printk(KERN_DEBUG
  1476. "igb: %s: hw %p initialized timer\n",
  1477. igb_get_time_str(adapter, buffer),
  1478. &adapter->hw);
  1479. }
  1480. #endif
  1481. dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
  1482. /* print bus type/speed/width info */
  1483. dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
  1484. netdev->name,
  1485. ((hw->bus.speed == e1000_bus_speed_2500)
  1486. ? "2.5Gb/s" : "unknown"),
  1487. ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
  1488. (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
  1489. (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
  1490. "unknown"),
  1491. netdev->dev_addr);
  1492. igb_read_part_num(hw, &part_num);
  1493. dev_info(&pdev->dev, "%s: PBA No: %06x-%03x\n", netdev->name,
  1494. (part_num >> 8), (part_num & 0xff));
  1495. dev_info(&pdev->dev,
  1496. "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
  1497. adapter->msix_entries ? "MSI-X" :
  1498. (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
  1499. adapter->num_rx_queues, adapter->num_tx_queues);
  1500. return 0;
  1501. err_register:
  1502. igb_release_hw_control(adapter);
  1503. err_eeprom:
  1504. if (!igb_check_reset_block(hw))
  1505. igb_reset_phy(hw);
  1506. if (hw->flash_address)
  1507. iounmap(hw->flash_address);
  1508. err_sw_init:
  1509. igb_clear_interrupt_scheme(adapter);
  1510. iounmap(hw->hw_addr);
  1511. err_ioremap:
  1512. free_netdev(netdev);
  1513. err_alloc_etherdev:
  1514. pci_release_selected_regions(pdev, pci_select_bars(pdev,
  1515. IORESOURCE_MEM));
  1516. err_pci_reg:
  1517. err_dma:
  1518. pci_disable_device(pdev);
  1519. return err;
  1520. }
  1521. /**
  1522. * igb_remove - Device Removal Routine
  1523. * @pdev: PCI device information struct
  1524. *
  1525. * igb_remove is called by the PCI subsystem to alert the driver
  1526. * that it should release a PCI device. The could be caused by a
  1527. * Hot-Plug event, or because the driver is going to be removed from
  1528. * memory.
  1529. **/
  1530. static void __devexit igb_remove(struct pci_dev *pdev)
  1531. {
  1532. struct net_device *netdev = pci_get_drvdata(pdev);
  1533. struct igb_adapter *adapter = netdev_priv(netdev);
  1534. struct e1000_hw *hw = &adapter->hw;
  1535. /* flush_scheduled work may reschedule our watchdog task, so
  1536. * explicitly disable watchdog tasks from being rescheduled */
  1537. set_bit(__IGB_DOWN, &adapter->state);
  1538. del_timer_sync(&adapter->watchdog_timer);
  1539. del_timer_sync(&adapter->phy_info_timer);
  1540. flush_scheduled_work();
  1541. #ifdef CONFIG_IGB_DCA
  1542. if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
  1543. dev_info(&pdev->dev, "DCA disabled\n");
  1544. dca_remove_requester(&pdev->dev);
  1545. adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
  1546. wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
  1547. }
  1548. #endif
  1549. /* Release control of h/w to f/w. If f/w is AMT enabled, this
  1550. * would have already happened in close and is redundant. */
  1551. igb_release_hw_control(adapter);
  1552. unregister_netdev(netdev);
  1553. if (!igb_check_reset_block(&adapter->hw))
  1554. igb_reset_phy(&adapter->hw);
  1555. igb_clear_interrupt_scheme(adapter);
  1556. #ifdef CONFIG_PCI_IOV
  1557. /* reclaim resources allocated to VFs */
  1558. if (adapter->vf_data) {
  1559. /* disable iov and allow time for transactions to clear */
  1560. pci_disable_sriov(pdev);
  1561. msleep(500);
  1562. kfree(adapter->vf_data);
  1563. adapter->vf_data = NULL;
  1564. wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
  1565. msleep(100);
  1566. dev_info(&pdev->dev, "IOV Disabled\n");
  1567. }
  1568. #endif
  1569. iounmap(hw->hw_addr);
  1570. if (hw->flash_address)
  1571. iounmap(hw->flash_address);
  1572. pci_release_selected_regions(pdev, pci_select_bars(pdev,
  1573. IORESOURCE_MEM));
  1574. free_netdev(netdev);
  1575. pci_disable_pcie_error_reporting(pdev);
  1576. pci_disable_device(pdev);
  1577. }
  1578. /**
  1579. * igb_sw_init - Initialize general software structures (struct igb_adapter)
  1580. * @adapter: board private structure to initialize
  1581. *
  1582. * igb_sw_init initializes the Adapter private data structure.
  1583. * Fields are initialized based on PCI device information and
  1584. * OS network device settings (MTU size).
  1585. **/
  1586. static int __devinit igb_sw_init(struct igb_adapter *adapter)
  1587. {
  1588. struct e1000_hw *hw = &adapter->hw;
  1589. struct net_device *netdev = adapter->netdev;
  1590. struct pci_dev *pdev = adapter->pdev;
  1591. pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
  1592. adapter->tx_ring_count = IGB_DEFAULT_TXD;
  1593. adapter->rx_ring_count = IGB_DEFAULT_RXD;
  1594. adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
  1595. adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
  1596. /* This call may decrease the number of queues depending on
  1597. * interrupt mode. */
  1598. if (igb_init_interrupt_scheme(adapter)) {
  1599. dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
  1600. return -ENOMEM;
  1601. }
  1602. /* Explicitly disable IRQ since the NIC can be in any state. */
  1603. igb_irq_disable(adapter);
  1604. set_bit(__IGB_DOWN, &adapter->state);
  1605. return 0;
  1606. }
  1607. /**
  1608. * igb_open - Called when a network interface is made active
  1609. * @netdev: network interface device structure
  1610. *
  1611. * Returns 0 on success, negative value on failure
  1612. *
  1613. * The open entry point is called when a network interface is made
  1614. * active by the system (IFF_UP). At this point all resources needed
  1615. * for transmit and receive operations are allocated, the interrupt
  1616. * handler is registered with the OS, the watchdog timer is started,
  1617. * and the stack is notified that the interface is ready.
  1618. **/
  1619. static int igb_open(struct net_device *netdev)
  1620. {
  1621. struct igb_adapter *adapter = netdev_priv(netdev);
  1622. struct e1000_hw *hw = &adapter->hw;
  1623. int err;
  1624. int i;
  1625. /* disallow open during test */
  1626. if (test_bit(__IGB_TESTING, &adapter->state))
  1627. return -EBUSY;
  1628. netif_carrier_off(netdev);
  1629. /* allocate transmit descriptors */
  1630. err = igb_setup_all_tx_resources(adapter);
  1631. if (err)
  1632. goto err_setup_tx;
  1633. /* allocate receive descriptors */
  1634. err = igb_setup_all_rx_resources(adapter);
  1635. if (err)
  1636. goto err_setup_rx;
  1637. /* e1000_power_up_phy(adapter); */
  1638. adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
  1639. if ((adapter->hw.mng_cookie.status &
  1640. E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
  1641. igb_update_mng_vlan(adapter);
  1642. /* before we allocate an interrupt, we must be ready to handle it.
  1643. * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
  1644. * as soon as we call pci_request_irq, so we have to setup our
  1645. * clean_rx handler before we do so. */
  1646. igb_configure(adapter);
  1647. igb_set_vmolr(hw, adapter->vfs_allocated_count);
  1648. err = igb_request_irq(adapter);
  1649. if (err)
  1650. goto err_req_irq;
  1651. /* From here on the code is the same as igb_up() */
  1652. clear_bit(__IGB_DOWN, &adapter->state);
  1653. for (i = 0; i < adapter->num_q_vectors; i++) {
  1654. struct igb_q_vector *q_vector = adapter->q_vector[i];
  1655. napi_enable(&q_vector->napi);
  1656. }
  1657. /* Clear any pending interrupts. */
  1658. rd32(E1000_ICR);
  1659. igb_irq_enable(adapter);
  1660. /* notify VFs that reset has been completed */
  1661. if (adapter->vfs_allocated_count) {
  1662. u32 reg_data = rd32(E1000_CTRL_EXT);
  1663. reg_data |= E1000_CTRL_EXT_PFRSTD;
  1664. wr32(E1000_CTRL_EXT, reg_data);
  1665. }
  1666. netif_tx_start_all_queues(netdev);
  1667. /* Fire a link status change interrupt to start the watchdog. */
  1668. wr32(E1000_ICS, E1000_ICS_LSC);
  1669. return 0;
  1670. err_req_irq:
  1671. igb_release_hw_control(adapter);
  1672. /* e1000_power_down_phy(adapter); */
  1673. igb_free_all_rx_resources(adapter);
  1674. err_setup_rx:
  1675. igb_free_all_tx_resources(adapter);
  1676. err_setup_tx:
  1677. igb_reset(adapter);
  1678. return err;
  1679. }
  1680. /**
  1681. * igb_close - Disables a network interface
  1682. * @netdev: network interface device structure
  1683. *
  1684. * Returns 0, this is not allowed to fail
  1685. *
  1686. * The close entry point is called when an interface is de-activated
  1687. * by the OS. The hardware is still under the driver's control, but
  1688. * needs to be disabled. A global MAC reset is issued to stop the
  1689. * hardware, and all transmit and receive resources are freed.
  1690. **/
  1691. static int igb_close(struct net_device *netdev)
  1692. {
  1693. struct igb_adapter *adapter = netdev_priv(netdev);
  1694. WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
  1695. igb_down(adapter);
  1696. igb_free_irq(adapter);
  1697. igb_free_all_tx_resources(adapter);
  1698. igb_free_all_rx_resources(adapter);
  1699. /* kill manageability vlan ID if supported, but not if a vlan with
  1700. * the same ID is registered on the host OS (let 8021q kill it) */
  1701. if ((adapter->hw.mng_cookie.status &
  1702. E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
  1703. !(adapter->vlgrp &&
  1704. vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id)))
  1705. igb_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
  1706. return 0;
  1707. }
  1708. /**
  1709. * igb_setup_tx_resources - allocate Tx resources (Descriptors)
  1710. * @tx_ring: tx descriptor ring (for a specific queue) to setup
  1711. *
  1712. * Return 0 on success, negative on failure
  1713. **/
  1714. int igb_setup_tx_resources(struct igb_ring *tx_ring)
  1715. {
  1716. struct pci_dev *pdev = tx_ring->pdev;
  1717. int size;
  1718. size = sizeof(struct igb_buffer) * tx_ring->count;
  1719. tx_ring->buffer_info = vmalloc(size);
  1720. if (!tx_ring->buffer_info)
  1721. goto err;
  1722. memset(tx_ring->buffer_info, 0, size);
  1723. /* round up to nearest 4K */
  1724. tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
  1725. tx_ring->size = ALIGN(tx_ring->size, 4096);
  1726. tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
  1727. &tx_ring->dma);
  1728. if (!tx_ring->desc)
  1729. goto err;
  1730. tx_ring->next_to_use = 0;
  1731. tx_ring->next_to_clean = 0;
  1732. return 0;
  1733. err:
  1734. vfree(tx_ring->buffer_info);
  1735. dev_err(&pdev->dev,
  1736. "Unable to allocate memory for the transmit descriptor ring\n");
  1737. return -ENOMEM;
  1738. }
  1739. /**
  1740. * igb_setup_all_tx_resources - wrapper to allocate Tx resources
  1741. * (Descriptors) for all queues
  1742. * @adapter: board private structure
  1743. *
  1744. * Return 0 on success, negative on failure
  1745. **/
  1746. static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
  1747. {
  1748. int i, err = 0;
  1749. int r_idx;
  1750. for (i = 0; i < adapter->num_tx_queues; i++) {
  1751. err = igb_setup_tx_resources(&adapter->tx_ring[i]);
  1752. if (err) {
  1753. dev_err(&adapter->pdev->dev,
  1754. "Allocation for Tx Queue %u failed\n", i);
  1755. for (i--; i >= 0; i--)
  1756. igb_free_tx_resources(&adapter->tx_ring[i]);
  1757. break;
  1758. }
  1759. }
  1760. for (i = 0; i < IGB_MAX_TX_QUEUES; i++) {
  1761. r_idx = i % adapter->num_tx_queues;
  1762. adapter->multi_tx_table[i] = &adapter->tx_ring[r_idx];
  1763. }
  1764. return err;
  1765. }
  1766. /**
  1767. * igb_setup_tctl - configure the transmit control registers
  1768. * @adapter: Board private structure
  1769. **/
  1770. void igb_setup_tctl(struct igb_adapter *adapter)
  1771. {
  1772. struct e1000_hw *hw = &adapter->hw;
  1773. u32 tctl;
  1774. /* disable queue 0 which is enabled by default on 82575 and 82576 */
  1775. wr32(E1000_TXDCTL(0), 0);
  1776. /* Program the Transmit Control Register */
  1777. tctl = rd32(E1000_TCTL);
  1778. tctl &= ~E1000_TCTL_CT;
  1779. tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
  1780. (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
  1781. igb_config_collision_dist(hw);
  1782. /* Enable transmits */
  1783. tctl |= E1000_TCTL_EN;
  1784. wr32(E1000_TCTL, tctl);
  1785. }
  1786. /**
  1787. * igb_configure_tx_ring - Configure transmit ring after Reset
  1788. * @adapter: board private structure
  1789. * @ring: tx ring to configure
  1790. *
  1791. * Configure a transmit ring after a reset.
  1792. **/
  1793. void igb_configure_tx_ring(struct igb_adapter *adapter,
  1794. struct igb_ring *ring)
  1795. {
  1796. struct e1000_hw *hw = &adapter->hw;
  1797. u32 txdctl;
  1798. u64 tdba = ring->dma;
  1799. int reg_idx = ring->reg_idx;
  1800. /* disable the queue */
  1801. txdctl = rd32(E1000_TXDCTL(reg_idx));
  1802. wr32(E1000_TXDCTL(reg_idx),
  1803. txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
  1804. wrfl();
  1805. mdelay(10);
  1806. wr32(E1000_TDLEN(reg_idx),
  1807. ring->count * sizeof(union e1000_adv_tx_desc));
  1808. wr32(E1000_TDBAL(reg_idx),
  1809. tdba & 0x00000000ffffffffULL);
  1810. wr32(E1000_TDBAH(reg_idx), tdba >> 32);
  1811. ring->head = hw->hw_addr + E1000_TDH(reg_idx);
  1812. ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
  1813. writel(0, ring->head);
  1814. writel(0, ring->tail);
  1815. txdctl |= IGB_TX_PTHRESH;
  1816. txdctl |= IGB_TX_HTHRESH << 8;
  1817. txdctl |= IGB_TX_WTHRESH << 16;
  1818. txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
  1819. wr32(E1000_TXDCTL(reg_idx), txdctl);
  1820. }
  1821. /**
  1822. * igb_configure_tx - Configure transmit Unit after Reset
  1823. * @adapter: board private structure
  1824. *
  1825. * Configure the Tx unit of the MAC after a reset.
  1826. **/
  1827. static void igb_configure_tx(struct igb_adapter *adapter)
  1828. {
  1829. int i;
  1830. for (i = 0; i < adapter->num_tx_queues; i++)
  1831. igb_configure_tx_ring(adapter, &adapter->tx_ring[i]);
  1832. }
  1833. /**
  1834. * igb_setup_rx_resources - allocate Rx resources (Descriptors)
  1835. * @rx_ring: rx descriptor ring (for a specific queue) to setup
  1836. *
  1837. * Returns 0 on success, negative on failure
  1838. **/
  1839. int igb_setup_rx_resources(struct igb_ring *rx_ring)
  1840. {
  1841. struct pci_dev *pdev = rx_ring->pdev;
  1842. int size, desc_len;
  1843. size = sizeof(struct igb_buffer) * rx_ring->count;
  1844. rx_ring->buffer_info = vmalloc(size);
  1845. if (!rx_ring->buffer_info)
  1846. goto err;
  1847. memset(rx_ring->buffer_info, 0, size);
  1848. desc_len = sizeof(union e1000_adv_rx_desc);
  1849. /* Round up to nearest 4K */
  1850. rx_ring->size = rx_ring->count * desc_len;
  1851. rx_ring->size = ALIGN(rx_ring->size, 4096);
  1852. rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
  1853. &rx_ring->dma);
  1854. if (!rx_ring->desc)
  1855. goto err;
  1856. rx_ring->next_to_clean = 0;
  1857. rx_ring->next_to_use = 0;
  1858. return 0;
  1859. err:
  1860. vfree(rx_ring->buffer_info);
  1861. dev_err(&pdev->dev, "Unable to allocate memory for "
  1862. "the receive descriptor ring\n");
  1863. return -ENOMEM;
  1864. }
  1865. /**
  1866. * igb_setup_all_rx_resources - wrapper to allocate Rx resources
  1867. * (Descriptors) for all queues
  1868. * @adapter: board private structure
  1869. *
  1870. * Return 0 on success, negative on failure
  1871. **/
  1872. static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
  1873. {
  1874. int i, err = 0;
  1875. for (i = 0; i < adapter->num_rx_queues; i++) {
  1876. err = igb_setup_rx_resources(&adapter->rx_ring[i]);
  1877. if (err) {
  1878. dev_err(&adapter->pdev->dev,
  1879. "Allocation for Rx Queue %u failed\n", i);
  1880. for (i--; i >= 0; i--)
  1881. igb_free_rx_resources(&adapter->rx_ring[i]);
  1882. break;
  1883. }
  1884. }
  1885. return err;
  1886. }
  1887. /**
  1888. * igb_setup_mrqc - configure the multiple receive queue control registers
  1889. * @adapter: Board private structure
  1890. **/
  1891. static void igb_setup_mrqc(struct igb_adapter *adapter)
  1892. {
  1893. struct e1000_hw *hw = &adapter->hw;
  1894. u32 mrqc, rxcsum;
  1895. u32 j, num_rx_queues, shift = 0, shift2 = 0;
  1896. union e1000_reta {
  1897. u32 dword;
  1898. u8 bytes[4];
  1899. } reta;
  1900. static const u8 rsshash[40] = {
  1901. 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
  1902. 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
  1903. 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
  1904. 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
  1905. /* Fill out hash function seeds */
  1906. for (j = 0; j < 10; j++) {
  1907. u32 rsskey = rsshash[(j * 4)];
  1908. rsskey |= rsshash[(j * 4) + 1] << 8;
  1909. rsskey |= rsshash[(j * 4) + 2] << 16;
  1910. rsskey |= rsshash[(j * 4) + 3] << 24;
  1911. array_wr32(E1000_RSSRK(0), j, rsskey);
  1912. }
  1913. num_rx_queues = adapter->num_rx_queues;
  1914. if (adapter->vfs_allocated_count) {
  1915. /* 82575 and 82576 supports 2 RSS queues for VMDq */
  1916. switch (hw->mac.type) {
  1917. case e1000_82576:
  1918. shift = 3;
  1919. num_rx_queues = 2;
  1920. break;
  1921. case e1000_82575:
  1922. shift = 2;
  1923. shift2 = 6;
  1924. default:
  1925. break;
  1926. }
  1927. } else {
  1928. if (hw->mac.type == e1000_82575)
  1929. shift = 6;
  1930. }
  1931. for (j = 0; j < (32 * 4); j++) {
  1932. reta.bytes[j & 3] = (j % num_rx_queues) << shift;
  1933. if (shift2)
  1934. reta.bytes[j & 3] |= num_rx_queues << shift2;
  1935. if ((j & 3) == 3)
  1936. wr32(E1000_RETA(j >> 2), reta.dword);
  1937. }
  1938. /*
  1939. * Disable raw packet checksumming so that RSS hash is placed in
  1940. * descriptor on writeback. No need to enable TCP/UDP/IP checksum
  1941. * offloads as they are enabled by default
  1942. */
  1943. rxcsum = rd32(E1000_RXCSUM);
  1944. rxcsum |= E1000_RXCSUM_PCSD;
  1945. if (adapter->hw.mac.type >= e1000_82576)
  1946. /* Enable Receive Checksum Offload for SCTP */
  1947. rxcsum |= E1000_RXCSUM_CRCOFL;
  1948. /* Don't need to set TUOFL or IPOFL, they default to 1 */
  1949. wr32(E1000_RXCSUM, rxcsum);
  1950. /* If VMDq is enabled then we set the appropriate mode for that, else
  1951. * we default to RSS so that an RSS hash is calculated per packet even
  1952. * if we are only using one queue */
  1953. if (adapter->vfs_allocated_count) {
  1954. if (hw->mac.type > e1000_82575) {
  1955. /* Set the default pool for the PF's first queue */
  1956. u32 vtctl = rd32(E1000_VT_CTL);
  1957. vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
  1958. E1000_VT_CTL_DISABLE_DEF_POOL);
  1959. vtctl |= adapter->vfs_allocated_count <<
  1960. E1000_VT_CTL_DEFAULT_POOL_SHIFT;
  1961. wr32(E1000_VT_CTL, vtctl);
  1962. }
  1963. if (adapter->num_rx_queues > 1)
  1964. mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
  1965. else
  1966. mrqc = E1000_MRQC_ENABLE_VMDQ;
  1967. } else {
  1968. mrqc = E1000_MRQC_ENABLE_RSS_4Q;
  1969. }
  1970. igb_vmm_control(adapter);
  1971. mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
  1972. E1000_MRQC_RSS_FIELD_IPV4_TCP);
  1973. mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 |
  1974. E1000_MRQC_RSS_FIELD_IPV6_TCP);
  1975. mrqc |= (E1000_MRQC_RSS_FIELD_IPV4_UDP |
  1976. E1000_MRQC_RSS_FIELD_IPV6_UDP);
  1977. mrqc |= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
  1978. E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
  1979. wr32(E1000_MRQC, mrqc);
  1980. }
  1981. /**
  1982. * igb_setup_rctl - configure the receive control registers
  1983. * @adapter: Board private structure
  1984. **/
  1985. void igb_setup_rctl(struct igb_adapter *adapter)
  1986. {
  1987. struct e1000_hw *hw = &adapter->hw;
  1988. u32 rctl;
  1989. rctl = rd32(E1000_RCTL);
  1990. rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
  1991. rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
  1992. rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
  1993. (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
  1994. /*
  1995. * enable stripping of CRC. It's unlikely this will break BMC
  1996. * redirection as it did with e1000. Newer features require
  1997. * that the HW strips the CRC.
  1998. */
  1999. rctl |= E1000_RCTL_SECRC;
  2000. /*
  2001. * disable store bad packets and clear size bits.
  2002. */
  2003. rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
  2004. /* enable LPE to prevent packets larger than max_frame_size */
  2005. rctl |= E1000_RCTL_LPE;
  2006. /* disable queue 0 to prevent tail write w/o re-config */
  2007. wr32(E1000_RXDCTL(0), 0);
  2008. /* Attention!!! For SR-IOV PF driver operations you must enable
  2009. * queue drop for all VF and PF queues to prevent head of line blocking
  2010. * if an un-trusted VF does not provide descriptors to hardware.
  2011. */
  2012. if (adapter->vfs_allocated_count) {
  2013. u32 vmolr;
  2014. /* set all queue drop enable bits */
  2015. wr32(E1000_QDE, ALL_QUEUES);
  2016. vmolr = rd32(E1000_VMOLR(adapter->vfs_allocated_count));
  2017. if (rctl & E1000_RCTL_LPE)
  2018. vmolr |= E1000_VMOLR_LPE;
  2019. if (adapter->num_rx_queues > 1)
  2020. vmolr |= E1000_VMOLR_RSSE;
  2021. wr32(E1000_VMOLR(adapter->vfs_allocated_count), vmolr);
  2022. }
  2023. wr32(E1000_RCTL, rctl);
  2024. }
  2025. /**
  2026. * igb_rlpml_set - set maximum receive packet size
  2027. * @adapter: board private structure
  2028. *
  2029. * Configure maximum receivable packet size.
  2030. **/
  2031. static void igb_rlpml_set(struct igb_adapter *adapter)
  2032. {
  2033. u32 max_frame_size = adapter->max_frame_size;
  2034. struct e1000_hw *hw = &adapter->hw;
  2035. u16 pf_id = adapter->vfs_allocated_count;
  2036. if (adapter->vlgrp)
  2037. max_frame_size += VLAN_TAG_SIZE;
  2038. /* if vfs are enabled we set RLPML to the largest possible request
  2039. * size and set the VMOLR RLPML to the size we need */
  2040. if (pf_id) {
  2041. igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
  2042. max_frame_size = MAX_STD_JUMBO_FRAME_SIZE + VLAN_TAG_SIZE;
  2043. }
  2044. wr32(E1000_RLPML, max_frame_size);
  2045. }
  2046. /**
  2047. * igb_configure_rx_ring - Configure a receive ring after Reset
  2048. * @adapter: board private structure
  2049. * @ring: receive ring to be configured
  2050. *
  2051. * Configure the Rx unit of the MAC after a reset.
  2052. **/
  2053. void igb_configure_rx_ring(struct igb_adapter *adapter,
  2054. struct igb_ring *ring)
  2055. {
  2056. struct e1000_hw *hw = &adapter->hw;
  2057. u64 rdba = ring->dma;
  2058. int reg_idx = ring->reg_idx;
  2059. u32 srrctl, rxdctl;
  2060. /* disable the queue */
  2061. rxdctl = rd32(E1000_RXDCTL(reg_idx));
  2062. wr32(E1000_RXDCTL(reg_idx),
  2063. rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
  2064. /* Set DMA base address registers */
  2065. wr32(E1000_RDBAL(reg_idx),
  2066. rdba & 0x00000000ffffffffULL);
  2067. wr32(E1000_RDBAH(reg_idx), rdba >> 32);
  2068. wr32(E1000_RDLEN(reg_idx),
  2069. ring->count * sizeof(union e1000_adv_rx_desc));
  2070. /* initialize head and tail */
  2071. ring->head = hw->hw_addr + E1000_RDH(reg_idx);
  2072. ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
  2073. writel(0, ring->head);
  2074. writel(0, ring->tail);
  2075. /* set descriptor configuration */
  2076. if (ring->rx_buffer_len < IGB_RXBUFFER_1024) {
  2077. srrctl = ALIGN(ring->rx_buffer_len, 64) <<
  2078. E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
  2079. #if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
  2080. srrctl |= IGB_RXBUFFER_16384 >>
  2081. E1000_SRRCTL_BSIZEPKT_SHIFT;
  2082. #else
  2083. srrctl |= (PAGE_SIZE / 2) >>
  2084. E1000_SRRCTL_BSIZEPKT_SHIFT;
  2085. #endif
  2086. srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
  2087. } else {
  2088. srrctl = ALIGN(ring->rx_buffer_len, 1024) >>
  2089. E1000_SRRCTL_BSIZEPKT_SHIFT;
  2090. srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
  2091. }
  2092. wr32(E1000_SRRCTL(reg_idx), srrctl);
  2093. /* enable receive descriptor fetching */
  2094. rxdctl = rd32(E1000_RXDCTL(reg_idx));
  2095. rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
  2096. rxdctl &= 0xFFF00000;
  2097. rxdctl |= IGB_RX_PTHRESH;
  2098. rxdctl |= IGB_RX_HTHRESH << 8;
  2099. rxdctl |= IGB_RX_WTHRESH << 16;
  2100. wr32(E1000_RXDCTL(reg_idx), rxdctl);
  2101. }
  2102. /**
  2103. * igb_configure_rx - Configure receive Unit after Reset
  2104. * @adapter: board private structure
  2105. *
  2106. * Configure the Rx unit of the MAC after a reset.
  2107. **/
  2108. static void igb_configure_rx(struct igb_adapter *adapter)
  2109. {
  2110. int i;
  2111. /* set UTA to appropriate mode */
  2112. igb_set_uta(adapter);
  2113. /* set the correct pool for the PF default MAC address in entry 0 */
  2114. igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
  2115. adapter->vfs_allocated_count);
  2116. /* Setup the HW Rx Head and Tail Descriptor Pointers and
  2117. * the Base and Length of the Rx Descriptor Ring */
  2118. for (i = 0; i < adapter->num_rx_queues; i++)
  2119. igb_configure_rx_ring(adapter, &adapter->rx_ring[i]);
  2120. }
  2121. /**
  2122. * igb_free_tx_resources - Free Tx Resources per Queue
  2123. * @tx_ring: Tx descriptor ring for a specific queue
  2124. *
  2125. * Free all transmit software resources
  2126. **/
  2127. void igb_free_tx_resources(struct igb_ring *tx_ring)
  2128. {
  2129. igb_clean_tx_ring(tx_ring);
  2130. vfree(tx_ring->buffer_info);
  2131. tx_ring->buffer_info = NULL;
  2132. pci_free_consistent(tx_ring->pdev, tx_ring->size,
  2133. tx_ring->desc, tx_ring->dma);
  2134. tx_ring->desc = NULL;
  2135. }
  2136. /**
  2137. * igb_free_all_tx_resources - Free Tx Resources for All Queues
  2138. * @adapter: board private structure
  2139. *
  2140. * Free all transmit software resources
  2141. **/
  2142. static void igb_free_all_tx_resources(struct igb_adapter *adapter)
  2143. {
  2144. int i;
  2145. for (i = 0; i < adapter->num_tx_queues; i++)
  2146. igb_free_tx_resources(&adapter->tx_ring[i]);
  2147. }
  2148. void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring,
  2149. struct igb_buffer *buffer_info)
  2150. {
  2151. buffer_info->dma = 0;
  2152. if (buffer_info->skb) {
  2153. skb_dma_unmap(&tx_ring->pdev->dev,
  2154. buffer_info->skb,
  2155. DMA_TO_DEVICE);
  2156. dev_kfree_skb_any(buffer_info->skb);
  2157. buffer_info->skb = NULL;
  2158. }
  2159. buffer_info->time_stamp = 0;
  2160. /* buffer_info must be completely set up in the transmit path */
  2161. }
  2162. /**
  2163. * igb_clean_tx_ring - Free Tx Buffers
  2164. * @tx_ring: ring to be cleaned
  2165. **/
  2166. static void igb_clean_tx_ring(struct igb_ring *tx_ring)
  2167. {
  2168. struct igb_buffer *buffer_info;
  2169. unsigned long size;
  2170. unsigned int i;
  2171. if (!tx_ring->buffer_info)
  2172. return;
  2173. /* Free all the Tx ring sk_buffs */
  2174. for (i = 0; i < tx_ring->count; i++) {
  2175. buffer_info = &tx_ring->buffer_info[i];
  2176. igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
  2177. }
  2178. size = sizeof(struct igb_buffer) * tx_ring->count;
  2179. memset(tx_ring->buffer_info, 0, size);
  2180. /* Zero out the descriptor ring */
  2181. memset(tx_ring->desc, 0, tx_ring->size);
  2182. tx_ring->next_to_use = 0;
  2183. tx_ring->next_to_clean = 0;
  2184. writel(0, tx_ring->head);
  2185. writel(0, tx_ring->tail);
  2186. }
  2187. /**
  2188. * igb_clean_all_tx_rings - Free Tx Buffers for all queues
  2189. * @adapter: board private structure
  2190. **/
  2191. static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
  2192. {
  2193. int i;
  2194. for (i = 0; i < adapter->num_tx_queues; i++)
  2195. igb_clean_tx_ring(&adapter->tx_ring[i]);
  2196. }
  2197. /**
  2198. * igb_free_rx_resources - Free Rx Resources
  2199. * @rx_ring: ring to clean the resources from
  2200. *
  2201. * Free all receive software resources
  2202. **/
  2203. void igb_free_rx_resources(struct igb_ring *rx_ring)
  2204. {
  2205. igb_clean_rx_ring(rx_ring);
  2206. vfree(rx_ring->buffer_info);
  2207. rx_ring->buffer_info = NULL;
  2208. pci_free_consistent(rx_ring->pdev, rx_ring->size,
  2209. rx_ring->desc, rx_ring->dma);
  2210. rx_ring->desc = NULL;
  2211. }
  2212. /**
  2213. * igb_free_all_rx_resources - Free Rx Resources for All Queues
  2214. * @adapter: board private structure
  2215. *
  2216. * Free all receive software resources
  2217. **/
  2218. static void igb_free_all_rx_resources(struct igb_adapter *adapter)
  2219. {
  2220. int i;
  2221. for (i = 0; i < adapter->num_rx_queues; i++)
  2222. igb_free_rx_resources(&adapter->rx_ring[i]);
  2223. }
  2224. /**
  2225. * igb_clean_rx_ring - Free Rx Buffers per Queue
  2226. * @rx_ring: ring to free buffers from
  2227. **/
  2228. static void igb_clean_rx_ring(struct igb_ring *rx_ring)
  2229. {
  2230. struct igb_buffer *buffer_info;
  2231. unsigned long size;
  2232. unsigned int i;
  2233. if (!rx_ring->buffer_info)
  2234. return;
  2235. /* Free all the Rx ring sk_buffs */
  2236. for (i = 0; i < rx_ring->count; i++) {
  2237. buffer_info = &rx_ring->buffer_info[i];
  2238. if (buffer_info->dma) {
  2239. pci_unmap_single(rx_ring->pdev,
  2240. buffer_info->dma,
  2241. rx_ring->rx_buffer_len,
  2242. PCI_DMA_FROMDEVICE);
  2243. buffer_info->dma = 0;
  2244. }
  2245. if (buffer_info->skb) {
  2246. dev_kfree_skb(buffer_info->skb);
  2247. buffer_info->skb = NULL;
  2248. }
  2249. if (buffer_info->page_dma) {
  2250. pci_unmap_page(rx_ring->pdev,
  2251. buffer_info->page_dma,
  2252. PAGE_SIZE / 2,
  2253. PCI_DMA_FROMDEVICE);
  2254. buffer_info->page_dma = 0;
  2255. }
  2256. if (buffer_info->page) {
  2257. put_page(buffer_info->page);
  2258. buffer_info->page = NULL;
  2259. buffer_info->page_offset = 0;
  2260. }
  2261. }
  2262. size = sizeof(struct igb_buffer) * rx_ring->count;
  2263. memset(rx_ring->buffer_info, 0, size);
  2264. /* Zero out the descriptor ring */
  2265. memset(rx_ring->desc, 0, rx_ring->size);
  2266. rx_ring->next_to_clean = 0;
  2267. rx_ring->next_to_use = 0;
  2268. writel(0, rx_ring->head);
  2269. writel(0, rx_ring->tail);
  2270. }
  2271. /**
  2272. * igb_clean_all_rx_rings - Free Rx Buffers for all queues
  2273. * @adapter: board private structure
  2274. **/
  2275. static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
  2276. {
  2277. int i;
  2278. for (i = 0; i < adapter->num_rx_queues; i++)
  2279. igb_clean_rx_ring(&adapter->rx_ring[i]);
  2280. }
  2281. /**
  2282. * igb_set_mac - Change the Ethernet Address of the NIC
  2283. * @netdev: network interface device structure
  2284. * @p: pointer to an address structure
  2285. *
  2286. * Returns 0 on success, negative on failure
  2287. **/
  2288. static int igb_set_mac(struct net_device *netdev, void *p)
  2289. {
  2290. struct igb_adapter *adapter = netdev_priv(netdev);
  2291. struct e1000_hw *hw = &adapter->hw;
  2292. struct sockaddr *addr = p;
  2293. if (!is_valid_ether_addr(addr->sa_data))
  2294. return -EADDRNOTAVAIL;
  2295. memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
  2296. memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
  2297. /* set the correct pool for the new PF MAC address in entry 0 */
  2298. igb_rar_set_qsel(adapter, hw->mac.addr, 0,
  2299. adapter->vfs_allocated_count);
  2300. return 0;
  2301. }
  2302. /**
  2303. * igb_write_mc_addr_list - write multicast addresses to MTA
  2304. * @netdev: network interface device structure
  2305. *
  2306. * Writes multicast address list to the MTA hash table.
  2307. * Returns: -ENOMEM on failure
  2308. * 0 on no addresses written
  2309. * X on writing X addresses to MTA
  2310. **/
  2311. static int igb_write_mc_addr_list(struct net_device *netdev)
  2312. {
  2313. struct igb_adapter *adapter = netdev_priv(netdev);
  2314. struct e1000_hw *hw = &adapter->hw;
  2315. struct dev_mc_list *mc_ptr = netdev->mc_list;
  2316. u8 *mta_list;
  2317. u32 vmolr = 0;
  2318. int i;
  2319. if (!netdev->mc_count) {
  2320. /* nothing to program, so clear mc list */
  2321. igb_update_mc_addr_list(hw, NULL, 0);
  2322. igb_restore_vf_multicasts(adapter);
  2323. return 0;
  2324. }
  2325. mta_list = kzalloc(netdev->mc_count * 6, GFP_ATOMIC);
  2326. if (!mta_list)
  2327. return -ENOMEM;
  2328. /* set vmolr receive overflow multicast bit */
  2329. vmolr |= E1000_VMOLR_ROMPE;
  2330. /* The shared function expects a packed array of only addresses. */
  2331. mc_ptr = netdev->mc_list;
  2332. for (i = 0; i < netdev->mc_count; i++) {
  2333. if (!mc_ptr)
  2334. break;
  2335. memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
  2336. mc_ptr = mc_ptr->next;
  2337. }
  2338. igb_update_mc_addr_list(hw, mta_list, i);
  2339. kfree(mta_list);
  2340. return netdev->mc_count;
  2341. }
  2342. /**
  2343. * igb_write_uc_addr_list - write unicast addresses to RAR table
  2344. * @netdev: network interface device structure
  2345. *
  2346. * Writes unicast address list to the RAR table.
  2347. * Returns: -ENOMEM on failure/insufficient address space
  2348. * 0 on no addresses written
  2349. * X on writing X addresses to the RAR table
  2350. **/
  2351. static int igb_write_uc_addr_list(struct net_device *netdev)
  2352. {
  2353. struct igb_adapter *adapter = netdev_priv(netdev);
  2354. struct e1000_hw *hw = &adapter->hw;
  2355. unsigned int vfn = adapter->vfs_allocated_count;
  2356. unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
  2357. int count = 0;
  2358. /* return ENOMEM indicating insufficient memory for addresses */
  2359. if (netdev->uc.count > rar_entries)
  2360. return -ENOMEM;
  2361. if (netdev->uc.count && rar_entries) {
  2362. struct netdev_hw_addr *ha;
  2363. list_for_each_entry(ha, &netdev->uc.list, list) {
  2364. if (!rar_entries)
  2365. break;
  2366. igb_rar_set_qsel(adapter, ha->addr,
  2367. rar_entries--,
  2368. vfn);
  2369. count++;
  2370. }
  2371. }
  2372. /* write the addresses in reverse order to avoid write combining */
  2373. for (; rar_entries > 0 ; rar_entries--) {
  2374. wr32(E1000_RAH(rar_entries), 0);
  2375. wr32(E1000_RAL(rar_entries), 0);
  2376. }
  2377. wrfl();
  2378. return count;
  2379. }
  2380. /**
  2381. * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
  2382. * @netdev: network interface device structure
  2383. *
  2384. * The set_rx_mode entry point is called whenever the unicast or multicast
  2385. * address lists or the network interface flags are updated. This routine is
  2386. * responsible for configuring the hardware for proper unicast, multicast,
  2387. * promiscuous mode, and all-multi behavior.
  2388. **/
  2389. static void igb_set_rx_mode(struct net_device *netdev)
  2390. {
  2391. struct igb_adapter *adapter = netdev_priv(netdev);
  2392. struct e1000_hw *hw = &adapter->hw;
  2393. unsigned int vfn = adapter->vfs_allocated_count;
  2394. u32 rctl, vmolr = 0;
  2395. int count;
  2396. /* Check for Promiscuous and All Multicast modes */
  2397. rctl = rd32(E1000_RCTL);
  2398. /* clear the effected bits */
  2399. rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
  2400. if (netdev->flags & IFF_PROMISC) {
  2401. rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
  2402. vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
  2403. } else {
  2404. if (netdev->flags & IFF_ALLMULTI) {
  2405. rctl |= E1000_RCTL_MPE;
  2406. vmolr |= E1000_VMOLR_MPME;
  2407. } else {
  2408. /*
  2409. * Write addresses to the MTA, if the attempt fails
  2410. * then we should just turn on promiscous mode so
  2411. * that we can at least receive multicast traffic
  2412. */
  2413. count = igb_write_mc_addr_list(netdev);
  2414. if (count < 0) {
  2415. rctl |= E1000_RCTL_MPE;
  2416. vmolr |= E1000_VMOLR_MPME;
  2417. } else if (count) {
  2418. vmolr |= E1000_VMOLR_ROMPE;
  2419. }
  2420. }
  2421. /*
  2422. * Write addresses to available RAR registers, if there is not
  2423. * sufficient space to store all the addresses then enable
  2424. * unicast promiscous mode
  2425. */
  2426. count = igb_write_uc_addr_list(netdev);
  2427. if (count < 0) {
  2428. rctl |= E1000_RCTL_UPE;
  2429. vmolr |= E1000_VMOLR_ROPE;
  2430. }
  2431. rctl |= E1000_RCTL_VFE;
  2432. }
  2433. wr32(E1000_RCTL, rctl);
  2434. /*
  2435. * In order to support SR-IOV and eventually VMDq it is necessary to set
  2436. * the VMOLR to enable the appropriate modes. Without this workaround
  2437. * we will have issues with VLAN tag stripping not being done for frames
  2438. * that are only arriving because we are the default pool
  2439. */
  2440. if (hw->mac.type < e1000_82576)
  2441. return;
  2442. vmolr |= rd32(E1000_VMOLR(vfn)) &
  2443. ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
  2444. wr32(E1000_VMOLR(vfn), vmolr);
  2445. igb_restore_vf_multicasts(adapter);
  2446. }
  2447. /* Need to wait a few seconds after link up to get diagnostic information from
  2448. * the phy */
  2449. static void igb_update_phy_info(unsigned long data)
  2450. {
  2451. struct igb_adapter *adapter = (struct igb_adapter *) data;
  2452. igb_get_phy_info(&adapter->hw);
  2453. }
  2454. /**
  2455. * igb_has_link - check shared code for link and determine up/down
  2456. * @adapter: pointer to driver private info
  2457. **/
  2458. static bool igb_has_link(struct igb_adapter *adapter)
  2459. {
  2460. struct e1000_hw *hw = &adapter->hw;
  2461. bool link_active = false;
  2462. s32 ret_val = 0;
  2463. /* get_link_status is set on LSC (link status) interrupt or
  2464. * rx sequence error interrupt. get_link_status will stay
  2465. * false until the e1000_check_for_link establishes link
  2466. * for copper adapters ONLY
  2467. */
  2468. switch (hw->phy.media_type) {
  2469. case e1000_media_type_copper:
  2470. if (hw->mac.get_link_status) {
  2471. ret_val = hw->mac.ops.check_for_link(hw);
  2472. link_active = !hw->mac.get_link_status;
  2473. } else {
  2474. link_active = true;
  2475. }
  2476. break;
  2477. case e1000_media_type_internal_serdes:
  2478. ret_val = hw->mac.ops.check_for_link(hw);
  2479. link_active = hw->mac.serdes_has_link;
  2480. break;
  2481. default:
  2482. case e1000_media_type_unknown:
  2483. break;
  2484. }
  2485. return link_active;
  2486. }
  2487. /**
  2488. * igb_watchdog - Timer Call-back
  2489. * @data: pointer to adapter cast into an unsigned long
  2490. **/
  2491. static void igb_watchdog(unsigned long data)
  2492. {
  2493. struct igb_adapter *adapter = (struct igb_adapter *)data;
  2494. /* Do the rest outside of interrupt context */
  2495. schedule_work(&adapter->watchdog_task);
  2496. }
  2497. static void igb_watchdog_task(struct work_struct *work)
  2498. {
  2499. struct igb_adapter *adapter = container_of(work,
  2500. struct igb_adapter, watchdog_task);
  2501. struct e1000_hw *hw = &adapter->hw;
  2502. struct net_device *netdev = adapter->netdev;
  2503. struct igb_ring *tx_ring = adapter->tx_ring;
  2504. u32 link;
  2505. int i;
  2506. link = igb_has_link(adapter);
  2507. if ((netif_carrier_ok(netdev)) && link)
  2508. goto link_up;
  2509. if (link) {
  2510. if (!netif_carrier_ok(netdev)) {
  2511. u32 ctrl;
  2512. hw->mac.ops.get_speed_and_duplex(&adapter->hw,
  2513. &adapter->link_speed,
  2514. &adapter->link_duplex);
  2515. ctrl = rd32(E1000_CTRL);
  2516. /* Links status message must follow this format */
  2517. printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, "
  2518. "Flow Control: %s\n",
  2519. netdev->name,
  2520. adapter->link_speed,
  2521. adapter->link_duplex == FULL_DUPLEX ?
  2522. "Full Duplex" : "Half Duplex",
  2523. ((ctrl & E1000_CTRL_TFCE) && (ctrl &
  2524. E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
  2525. E1000_CTRL_RFCE) ? "RX" : ((ctrl &
  2526. E1000_CTRL_TFCE) ? "TX" : "None")));
  2527. /* tweak tx_queue_len according to speed/duplex and
  2528. * adjust the timeout factor */
  2529. netdev->tx_queue_len = adapter->tx_queue_len;
  2530. adapter->tx_timeout_factor = 1;
  2531. switch (adapter->link_speed) {
  2532. case SPEED_10:
  2533. netdev->tx_queue_len = 10;
  2534. adapter->tx_timeout_factor = 14;
  2535. break;
  2536. case SPEED_100:
  2537. netdev->tx_queue_len = 100;
  2538. /* maybe add some timeout factor ? */
  2539. break;
  2540. }
  2541. netif_carrier_on(netdev);
  2542. igb_ping_all_vfs(adapter);
  2543. /* link state has changed, schedule phy info update */
  2544. if (!test_bit(__IGB_DOWN, &adapter->state))
  2545. mod_timer(&adapter->phy_info_timer,
  2546. round_jiffies(jiffies + 2 * HZ));
  2547. }
  2548. } else {
  2549. if (netif_carrier_ok(netdev)) {
  2550. adapter->link_speed = 0;
  2551. adapter->link_duplex = 0;
  2552. /* Links status message must follow this format */
  2553. printk(KERN_INFO "igb: %s NIC Link is Down\n",
  2554. netdev->name);
  2555. netif_carrier_off(netdev);
  2556. igb_ping_all_vfs(adapter);
  2557. /* link state has changed, schedule phy info update */
  2558. if (!test_bit(__IGB_DOWN, &adapter->state))
  2559. mod_timer(&adapter->phy_info_timer,
  2560. round_jiffies(jiffies + 2 * HZ));
  2561. }
  2562. }
  2563. link_up:
  2564. igb_update_stats(adapter);
  2565. hw->mac.tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
  2566. adapter->tpt_old = adapter->stats.tpt;
  2567. hw->mac.collision_delta = adapter->stats.colc - adapter->colc_old;
  2568. adapter->colc_old = adapter->stats.colc;
  2569. adapter->gorc = adapter->stats.gorc - adapter->gorc_old;
  2570. adapter->gorc_old = adapter->stats.gorc;
  2571. adapter->gotc = adapter->stats.gotc - adapter->gotc_old;
  2572. adapter->gotc_old = adapter->stats.gotc;
  2573. igb_update_adaptive(&adapter->hw);
  2574. if (!netif_carrier_ok(netdev)) {
  2575. if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
  2576. /* We've lost link, so the controller stops DMA,
  2577. * but we've got queued Tx work that's never going
  2578. * to get done, so reset controller to flush Tx.
  2579. * (Do the reset outside of interrupt context). */
  2580. adapter->tx_timeout_count++;
  2581. schedule_work(&adapter->reset_task);
  2582. /* return immediately since reset is imminent */
  2583. return;
  2584. }
  2585. }
  2586. /* Cause software interrupt to ensure rx ring is cleaned */
  2587. if (adapter->msix_entries) {
  2588. u32 eics = 0;
  2589. for (i = 0; i < adapter->num_q_vectors; i++) {
  2590. struct igb_q_vector *q_vector = adapter->q_vector[i];
  2591. eics |= q_vector->eims_value;
  2592. }
  2593. wr32(E1000_EICS, eics);
  2594. } else {
  2595. wr32(E1000_ICS, E1000_ICS_RXDMT0);
  2596. }
  2597. /* Force detection of hung controller every watchdog period */
  2598. tx_ring->detect_tx_hung = true;
  2599. /* Reset the timer */
  2600. if (!test_bit(__IGB_DOWN, &adapter->state))
  2601. mod_timer(&adapter->watchdog_timer,
  2602. round_jiffies(jiffies + 2 * HZ));
  2603. }
  2604. enum latency_range {
  2605. lowest_latency = 0,
  2606. low_latency = 1,
  2607. bulk_latency = 2,
  2608. latency_invalid = 255
  2609. };
  2610. /**
  2611. * igb_update_ring_itr - update the dynamic ITR value based on packet size
  2612. *
  2613. * Stores a new ITR value based on strictly on packet size. This
  2614. * algorithm is less sophisticated than that used in igb_update_itr,
  2615. * due to the difficulty of synchronizing statistics across multiple
  2616. * receive rings. The divisors and thresholds used by this fuction
  2617. * were determined based on theoretical maximum wire speed and testing
  2618. * data, in order to minimize response time while increasing bulk
  2619. * throughput.
  2620. * This functionality is controlled by the InterruptThrottleRate module
  2621. * parameter (see igb_param.c)
  2622. * NOTE: This function is called only when operating in a multiqueue
  2623. * receive environment.
  2624. * @q_vector: pointer to q_vector
  2625. **/
  2626. static void igb_update_ring_itr(struct igb_q_vector *q_vector)
  2627. {
  2628. int new_val = q_vector->itr_val;
  2629. int avg_wire_size = 0;
  2630. struct igb_adapter *adapter = q_vector->adapter;
  2631. /* For non-gigabit speeds, just fix the interrupt rate at 4000
  2632. * ints/sec - ITR timer value of 120 ticks.
  2633. */
  2634. if (adapter->link_speed != SPEED_1000) {
  2635. new_val = 976;
  2636. goto set_itr_val;
  2637. }
  2638. if (q_vector->rx_ring && q_vector->rx_ring->total_packets) {
  2639. struct igb_ring *ring = q_vector->rx_ring;
  2640. avg_wire_size = ring->total_bytes / ring->total_packets;
  2641. }
  2642. if (q_vector->tx_ring && q_vector->tx_ring->total_packets) {
  2643. struct igb_ring *ring = q_vector->tx_ring;
  2644. avg_wire_size = max_t(u32, avg_wire_size,
  2645. (ring->total_bytes /
  2646. ring->total_packets));
  2647. }
  2648. /* if avg_wire_size isn't set no work was done */
  2649. if (!avg_wire_size)
  2650. goto clear_counts;
  2651. /* Add 24 bytes to size to account for CRC, preamble, and gap */
  2652. avg_wire_size += 24;
  2653. /* Don't starve jumbo frames */
  2654. avg_wire_size = min(avg_wire_size, 3000);
  2655. /* Give a little boost to mid-size frames */
  2656. if ((avg_wire_size > 300) && (avg_wire_size < 1200))
  2657. new_val = avg_wire_size / 3;
  2658. else
  2659. new_val = avg_wire_size / 2;
  2660. set_itr_val:
  2661. if (new_val != q_vector->itr_val) {
  2662. q_vector->itr_val = new_val;
  2663. q_vector->set_itr = 1;
  2664. }
  2665. clear_counts:
  2666. if (q_vector->rx_ring) {
  2667. q_vector->rx_ring->total_bytes = 0;
  2668. q_vector->rx_ring->total_packets = 0;
  2669. }
  2670. if (q_vector->tx_ring) {
  2671. q_vector->tx_ring->total_bytes = 0;
  2672. q_vector->tx_ring->total_packets = 0;
  2673. }
  2674. }
  2675. /**
  2676. * igb_update_itr - update the dynamic ITR value based on statistics
  2677. * Stores a new ITR value based on packets and byte
  2678. * counts during the last interrupt. The advantage of per interrupt
  2679. * computation is faster updates and more accurate ITR for the current
  2680. * traffic pattern. Constants in this function were computed
  2681. * based on theoretical maximum wire speed and thresholds were set based
  2682. * on testing data as well as attempting to minimize response time
  2683. * while increasing bulk throughput.
  2684. * this functionality is controlled by the InterruptThrottleRate module
  2685. * parameter (see igb_param.c)
  2686. * NOTE: These calculations are only valid when operating in a single-
  2687. * queue environment.
  2688. * @adapter: pointer to adapter
  2689. * @itr_setting: current q_vector->itr_val
  2690. * @packets: the number of packets during this measurement interval
  2691. * @bytes: the number of bytes during this measurement interval
  2692. **/
  2693. static unsigned int igb_update_itr(struct igb_adapter *adapter, u16 itr_setting,
  2694. int packets, int bytes)
  2695. {
  2696. unsigned int retval = itr_setting;
  2697. if (packets == 0)
  2698. goto update_itr_done;
  2699. switch (itr_setting) {
  2700. case lowest_latency:
  2701. /* handle TSO and jumbo frames */
  2702. if (bytes/packets > 8000)
  2703. retval = bulk_latency;
  2704. else if ((packets < 5) && (bytes > 512))
  2705. retval = low_latency;
  2706. break;
  2707. case low_latency: /* 50 usec aka 20000 ints/s */
  2708. if (bytes > 10000) {
  2709. /* this if handles the TSO accounting */
  2710. if (bytes/packets > 8000) {
  2711. retval = bulk_latency;
  2712. } else if ((packets < 10) || ((bytes/packets) > 1200)) {
  2713. retval = bulk_latency;
  2714. } else if ((packets > 35)) {
  2715. retval = lowest_latency;
  2716. }
  2717. } else if (bytes/packets > 2000) {
  2718. retval = bulk_latency;
  2719. } else if (packets <= 2 && bytes < 512) {
  2720. retval = lowest_latency;
  2721. }
  2722. break;
  2723. case bulk_latency: /* 250 usec aka 4000 ints/s */
  2724. if (bytes > 25000) {
  2725. if (packets > 35)
  2726. retval = low_latency;
  2727. } else if (bytes < 1500) {
  2728. retval = low_latency;
  2729. }
  2730. break;
  2731. }
  2732. update_itr_done:
  2733. return retval;
  2734. }
  2735. static void igb_set_itr(struct igb_adapter *adapter)
  2736. {
  2737. struct igb_q_vector *q_vector = adapter->q_vector[0];
  2738. u16 current_itr;
  2739. u32 new_itr = q_vector->itr_val;
  2740. /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
  2741. if (adapter->link_speed != SPEED_1000) {
  2742. current_itr = 0;
  2743. new_itr = 4000;
  2744. goto set_itr_now;
  2745. }
  2746. adapter->rx_itr = igb_update_itr(adapter,
  2747. adapter->rx_itr,
  2748. adapter->rx_ring->total_packets,
  2749. adapter->rx_ring->total_bytes);
  2750. adapter->tx_itr = igb_update_itr(adapter,
  2751. adapter->tx_itr,
  2752. adapter->tx_ring->total_packets,
  2753. adapter->tx_ring->total_bytes);
  2754. current_itr = max(adapter->rx_itr, adapter->tx_itr);
  2755. /* conservative mode (itr 3) eliminates the lowest_latency setting */
  2756. if (adapter->itr_setting == 3 && current_itr == lowest_latency)
  2757. current_itr = low_latency;
  2758. switch (current_itr) {
  2759. /* counts and packets in update_itr are dependent on these numbers */
  2760. case lowest_latency:
  2761. new_itr = 56; /* aka 70,000 ints/sec */
  2762. break;
  2763. case low_latency:
  2764. new_itr = 196; /* aka 20,000 ints/sec */
  2765. break;
  2766. case bulk_latency:
  2767. new_itr = 980; /* aka 4,000 ints/sec */
  2768. break;
  2769. default:
  2770. break;
  2771. }
  2772. set_itr_now:
  2773. adapter->rx_ring->total_bytes = 0;
  2774. adapter->rx_ring->total_packets = 0;
  2775. adapter->tx_ring->total_bytes = 0;
  2776. adapter->tx_ring->total_packets = 0;
  2777. if (new_itr != q_vector->itr_val) {
  2778. /* this attempts to bias the interrupt rate towards Bulk
  2779. * by adding intermediate steps when interrupt rate is
  2780. * increasing */
  2781. new_itr = new_itr > q_vector->itr_val ?
  2782. max((new_itr * q_vector->itr_val) /
  2783. (new_itr + (q_vector->itr_val >> 2)),
  2784. new_itr) :
  2785. new_itr;
  2786. /* Don't write the value here; it resets the adapter's
  2787. * internal timer, and causes us to delay far longer than
  2788. * we should between interrupts. Instead, we write the ITR
  2789. * value at the beginning of the next interrupt so the timing
  2790. * ends up being correct.
  2791. */
  2792. q_vector->itr_val = new_itr;
  2793. q_vector->set_itr = 1;
  2794. }
  2795. return;
  2796. }
  2797. #define IGB_TX_FLAGS_CSUM 0x00000001
  2798. #define IGB_TX_FLAGS_VLAN 0x00000002
  2799. #define IGB_TX_FLAGS_TSO 0x00000004
  2800. #define IGB_TX_FLAGS_IPV4 0x00000008
  2801. #define IGB_TX_FLAGS_TSTAMP 0x00000010
  2802. #define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
  2803. #define IGB_TX_FLAGS_VLAN_SHIFT 16
  2804. static inline int igb_tso_adv(struct igb_ring *tx_ring,
  2805. struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
  2806. {
  2807. struct e1000_adv_tx_context_desc *context_desc;
  2808. unsigned int i;
  2809. int err;
  2810. struct igb_buffer *buffer_info;
  2811. u32 info = 0, tu_cmd = 0;
  2812. u32 mss_l4len_idx, l4len;
  2813. *hdr_len = 0;
  2814. if (skb_header_cloned(skb)) {
  2815. err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
  2816. if (err)
  2817. return err;
  2818. }
  2819. l4len = tcp_hdrlen(skb);
  2820. *hdr_len += l4len;
  2821. if (skb->protocol == htons(ETH_P_IP)) {
  2822. struct iphdr *iph = ip_hdr(skb);
  2823. iph->tot_len = 0;
  2824. iph->check = 0;
  2825. tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
  2826. iph->daddr, 0,
  2827. IPPROTO_TCP,
  2828. 0);
  2829. } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
  2830. ipv6_hdr(skb)->payload_len = 0;
  2831. tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
  2832. &ipv6_hdr(skb)->daddr,
  2833. 0, IPPROTO_TCP, 0);
  2834. }
  2835. i = tx_ring->next_to_use;
  2836. buffer_info = &tx_ring->buffer_info[i];
  2837. context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
  2838. /* VLAN MACLEN IPLEN */
  2839. if (tx_flags & IGB_TX_FLAGS_VLAN)
  2840. info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
  2841. info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
  2842. *hdr_len += skb_network_offset(skb);
  2843. info |= skb_network_header_len(skb);
  2844. *hdr_len += skb_network_header_len(skb);
  2845. context_desc->vlan_macip_lens = cpu_to_le32(info);
  2846. /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
  2847. tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
  2848. if (skb->protocol == htons(ETH_P_IP))
  2849. tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
  2850. tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
  2851. context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
  2852. /* MSS L4LEN IDX */
  2853. mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT);
  2854. mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
  2855. /* For 82575, context index must be unique per ring. */
  2856. if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
  2857. mss_l4len_idx |= tx_ring->reg_idx << 4;
  2858. context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
  2859. context_desc->seqnum_seed = 0;
  2860. buffer_info->time_stamp = jiffies;
  2861. buffer_info->next_to_watch = i;
  2862. buffer_info->dma = 0;
  2863. i++;
  2864. if (i == tx_ring->count)
  2865. i = 0;
  2866. tx_ring->next_to_use = i;
  2867. return true;
  2868. }
  2869. static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring,
  2870. struct sk_buff *skb, u32 tx_flags)
  2871. {
  2872. struct e1000_adv_tx_context_desc *context_desc;
  2873. struct pci_dev *pdev = tx_ring->pdev;
  2874. struct igb_buffer *buffer_info;
  2875. u32 info = 0, tu_cmd = 0;
  2876. unsigned int i;
  2877. if ((skb->ip_summed == CHECKSUM_PARTIAL) ||
  2878. (tx_flags & IGB_TX_FLAGS_VLAN)) {
  2879. i = tx_ring->next_to_use;
  2880. buffer_info = &tx_ring->buffer_info[i];
  2881. context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
  2882. if (tx_flags & IGB_TX_FLAGS_VLAN)
  2883. info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
  2884. info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
  2885. if (skb->ip_summed == CHECKSUM_PARTIAL)
  2886. info |= skb_network_header_len(skb);
  2887. context_desc->vlan_macip_lens = cpu_to_le32(info);
  2888. tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
  2889. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  2890. __be16 protocol;
  2891. if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
  2892. const struct vlan_ethhdr *vhdr =
  2893. (const struct vlan_ethhdr*)skb->data;
  2894. protocol = vhdr->h_vlan_encapsulated_proto;
  2895. } else {
  2896. protocol = skb->protocol;
  2897. }
  2898. switch (protocol) {
  2899. case cpu_to_be16(ETH_P_IP):
  2900. tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
  2901. if (ip_hdr(skb)->protocol == IPPROTO_TCP)
  2902. tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
  2903. else if (ip_hdr(skb)->protocol == IPPROTO_SCTP)
  2904. tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
  2905. break;
  2906. case cpu_to_be16(ETH_P_IPV6):
  2907. /* XXX what about other V6 headers?? */
  2908. if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
  2909. tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
  2910. else if (ipv6_hdr(skb)->nexthdr == IPPROTO_SCTP)
  2911. tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
  2912. break;
  2913. default:
  2914. if (unlikely(net_ratelimit()))
  2915. dev_warn(&pdev->dev,
  2916. "partial checksum but proto=%x!\n",
  2917. skb->protocol);
  2918. break;
  2919. }
  2920. }
  2921. context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
  2922. context_desc->seqnum_seed = 0;
  2923. if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
  2924. context_desc->mss_l4len_idx =
  2925. cpu_to_le32(tx_ring->reg_idx << 4);
  2926. buffer_info->time_stamp = jiffies;
  2927. buffer_info->next_to_watch = i;
  2928. buffer_info->dma = 0;
  2929. i++;
  2930. if (i == tx_ring->count)
  2931. i = 0;
  2932. tx_ring->next_to_use = i;
  2933. return true;
  2934. }
  2935. return false;
  2936. }
  2937. #define IGB_MAX_TXD_PWR 16
  2938. #define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
  2939. static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
  2940. unsigned int first)
  2941. {
  2942. struct igb_buffer *buffer_info;
  2943. struct pci_dev *pdev = tx_ring->pdev;
  2944. unsigned int len = skb_headlen(skb);
  2945. unsigned int count = 0, i;
  2946. unsigned int f;
  2947. dma_addr_t *map;
  2948. i = tx_ring->next_to_use;
  2949. if (skb_dma_map(&pdev->dev, skb, DMA_TO_DEVICE)) {
  2950. dev_err(&pdev->dev, "TX DMA map failed\n");
  2951. return 0;
  2952. }
  2953. map = skb_shinfo(skb)->dma_maps;
  2954. buffer_info = &tx_ring->buffer_info[i];
  2955. BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
  2956. buffer_info->length = len;
  2957. /* set time_stamp *before* dma to help avoid a possible race */
  2958. buffer_info->time_stamp = jiffies;
  2959. buffer_info->next_to_watch = i;
  2960. buffer_info->dma = skb_shinfo(skb)->dma_head;
  2961. for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
  2962. struct skb_frag_struct *frag;
  2963. i++;
  2964. if (i == tx_ring->count)
  2965. i = 0;
  2966. frag = &skb_shinfo(skb)->frags[f];
  2967. len = frag->size;
  2968. buffer_info = &tx_ring->buffer_info[i];
  2969. BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
  2970. buffer_info->length = len;
  2971. buffer_info->time_stamp = jiffies;
  2972. buffer_info->next_to_watch = i;
  2973. buffer_info->dma = map[count];
  2974. count++;
  2975. }
  2976. tx_ring->buffer_info[i].skb = skb;
  2977. tx_ring->buffer_info[first].next_to_watch = i;
  2978. return count + 1;
  2979. }
  2980. static inline void igb_tx_queue_adv(struct igb_ring *tx_ring,
  2981. int tx_flags, int count, u32 paylen,
  2982. u8 hdr_len)
  2983. {
  2984. union e1000_adv_tx_desc *tx_desc = NULL;
  2985. struct igb_buffer *buffer_info;
  2986. u32 olinfo_status = 0, cmd_type_len;
  2987. unsigned int i;
  2988. cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
  2989. E1000_ADVTXD_DCMD_DEXT);
  2990. if (tx_flags & IGB_TX_FLAGS_VLAN)
  2991. cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
  2992. if (tx_flags & IGB_TX_FLAGS_TSTAMP)
  2993. cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
  2994. if (tx_flags & IGB_TX_FLAGS_TSO) {
  2995. cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
  2996. /* insert tcp checksum */
  2997. olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
  2998. /* insert ip checksum */
  2999. if (tx_flags & IGB_TX_FLAGS_IPV4)
  3000. olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
  3001. } else if (tx_flags & IGB_TX_FLAGS_CSUM) {
  3002. olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
  3003. }
  3004. if ((tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX) &&
  3005. (tx_flags & (IGB_TX_FLAGS_CSUM |
  3006. IGB_TX_FLAGS_TSO |
  3007. IGB_TX_FLAGS_VLAN)))
  3008. olinfo_status |= tx_ring->reg_idx << 4;
  3009. olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
  3010. i = tx_ring->next_to_use;
  3011. while (count--) {
  3012. buffer_info = &tx_ring->buffer_info[i];
  3013. tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
  3014. tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
  3015. tx_desc->read.cmd_type_len =
  3016. cpu_to_le32(cmd_type_len | buffer_info->length);
  3017. tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
  3018. i++;
  3019. if (i == tx_ring->count)
  3020. i = 0;
  3021. }
  3022. tx_desc->read.cmd_type_len |= cpu_to_le32(IGB_ADVTXD_DCMD);
  3023. /* Force memory writes to complete before letting h/w
  3024. * know there are new descriptors to fetch. (Only
  3025. * applicable for weak-ordered memory model archs,
  3026. * such as IA-64). */
  3027. wmb();
  3028. tx_ring->next_to_use = i;
  3029. writel(i, tx_ring->tail);
  3030. /* we need this if more than one processor can write to our tail
  3031. * at a time, it syncronizes IO on IA64/Altix systems */
  3032. mmiowb();
  3033. }
  3034. static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
  3035. {
  3036. struct net_device *netdev = tx_ring->netdev;
  3037. netif_stop_subqueue(netdev, tx_ring->queue_index);
  3038. /* Herbert's original patch had:
  3039. * smp_mb__after_netif_stop_queue();
  3040. * but since that doesn't exist yet, just open code it. */
  3041. smp_mb();
  3042. /* We need to check again in a case another CPU has just
  3043. * made room available. */
  3044. if (igb_desc_unused(tx_ring) < size)
  3045. return -EBUSY;
  3046. /* A reprieve! */
  3047. netif_wake_subqueue(netdev, tx_ring->queue_index);
  3048. tx_ring->tx_stats.restart_queue++;
  3049. return 0;
  3050. }
  3051. static int igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
  3052. {
  3053. if (igb_desc_unused(tx_ring) >= size)
  3054. return 0;
  3055. return __igb_maybe_stop_tx(tx_ring, size);
  3056. }
  3057. netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
  3058. struct igb_ring *tx_ring)
  3059. {
  3060. struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
  3061. unsigned int first;
  3062. unsigned int tx_flags = 0;
  3063. u8 hdr_len = 0;
  3064. int count = 0;
  3065. int tso = 0;
  3066. union skb_shared_tx *shtx;
  3067. /* need: 1 descriptor per page,
  3068. * + 2 desc gap to keep tail from touching head,
  3069. * + 1 desc for skb->data,
  3070. * + 1 desc for context descriptor,
  3071. * otherwise try next time */
  3072. if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
  3073. /* this is a hard error */
  3074. return NETDEV_TX_BUSY;
  3075. }
  3076. /*
  3077. * TODO: check that there currently is no other packet with
  3078. * time stamping in the queue
  3079. *
  3080. * When doing time stamping, keep the connection to the socket
  3081. * a while longer: it is still needed by skb_hwtstamp_tx(),
  3082. * called either in igb_tx_hwtstamp() or by our caller when
  3083. * doing software time stamping.
  3084. */
  3085. shtx = skb_tx(skb);
  3086. if (unlikely(shtx->hardware)) {
  3087. shtx->in_progress = 1;
  3088. tx_flags |= IGB_TX_FLAGS_TSTAMP;
  3089. }
  3090. if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
  3091. tx_flags |= IGB_TX_FLAGS_VLAN;
  3092. tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
  3093. }
  3094. if (skb->protocol == htons(ETH_P_IP))
  3095. tx_flags |= IGB_TX_FLAGS_IPV4;
  3096. first = tx_ring->next_to_use;
  3097. if (skb_is_gso(skb)) {
  3098. tso = igb_tso_adv(tx_ring, skb, tx_flags, &hdr_len);
  3099. if (tso < 0) {
  3100. dev_kfree_skb_any(skb);
  3101. return NETDEV_TX_OK;
  3102. }
  3103. }
  3104. if (tso)
  3105. tx_flags |= IGB_TX_FLAGS_TSO;
  3106. else if (igb_tx_csum_adv(tx_ring, skb, tx_flags) &&
  3107. (skb->ip_summed == CHECKSUM_PARTIAL))
  3108. tx_flags |= IGB_TX_FLAGS_CSUM;
  3109. /*
  3110. * count reflects descriptors mapped, if 0 then mapping error
  3111. * has occured and we need to rewind the descriptor queue
  3112. */
  3113. count = igb_tx_map_adv(tx_ring, skb, first);
  3114. if (!count) {
  3115. dev_kfree_skb_any(skb);
  3116. tx_ring->buffer_info[first].time_stamp = 0;
  3117. tx_ring->next_to_use = first;
  3118. return NETDEV_TX_OK;
  3119. }
  3120. igb_tx_queue_adv(tx_ring, tx_flags, count, skb->len, hdr_len);
  3121. /* Make sure there is space in the ring for the next send. */
  3122. igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
  3123. return NETDEV_TX_OK;
  3124. }
  3125. static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb,
  3126. struct net_device *netdev)
  3127. {
  3128. struct igb_adapter *adapter = netdev_priv(netdev);
  3129. struct igb_ring *tx_ring;
  3130. int r_idx = 0;
  3131. if (test_bit(__IGB_DOWN, &adapter->state)) {
  3132. dev_kfree_skb_any(skb);
  3133. return NETDEV_TX_OK;
  3134. }
  3135. if (skb->len <= 0) {
  3136. dev_kfree_skb_any(skb);
  3137. return NETDEV_TX_OK;
  3138. }
  3139. r_idx = skb->queue_mapping & (IGB_ABS_MAX_TX_QUEUES - 1);
  3140. tx_ring = adapter->multi_tx_table[r_idx];
  3141. /* This goes back to the question of how to logically map a tx queue
  3142. * to a flow. Right now, performance is impacted slightly negatively
  3143. * if using multiple tx queues. If the stack breaks away from a
  3144. * single qdisc implementation, we can look at this again. */
  3145. return igb_xmit_frame_ring_adv(skb, tx_ring);
  3146. }
  3147. /**
  3148. * igb_tx_timeout - Respond to a Tx Hang
  3149. * @netdev: network interface device structure
  3150. **/
  3151. static void igb_tx_timeout(struct net_device *netdev)
  3152. {
  3153. struct igb_adapter *adapter = netdev_priv(netdev);
  3154. struct e1000_hw *hw = &adapter->hw;
  3155. /* Do the reset outside of interrupt context */
  3156. adapter->tx_timeout_count++;
  3157. schedule_work(&adapter->reset_task);
  3158. wr32(E1000_EICS,
  3159. (adapter->eims_enable_mask & ~adapter->eims_other));
  3160. }
  3161. static void igb_reset_task(struct work_struct *work)
  3162. {
  3163. struct igb_adapter *adapter;
  3164. adapter = container_of(work, struct igb_adapter, reset_task);
  3165. igb_reinit_locked(adapter);
  3166. }
  3167. /**
  3168. * igb_get_stats - Get System Network Statistics
  3169. * @netdev: network interface device structure
  3170. *
  3171. * Returns the address of the device statistics structure.
  3172. * The statistics are actually updated from the timer callback.
  3173. **/
  3174. static struct net_device_stats *igb_get_stats(struct net_device *netdev)
  3175. {
  3176. /* only return the current stats */
  3177. return &netdev->stats;
  3178. }
  3179. /**
  3180. * igb_change_mtu - Change the Maximum Transfer Unit
  3181. * @netdev: network interface device structure
  3182. * @new_mtu: new value for maximum frame size
  3183. *
  3184. * Returns 0 on success, negative on failure
  3185. **/
  3186. static int igb_change_mtu(struct net_device *netdev, int new_mtu)
  3187. {
  3188. struct igb_adapter *adapter = netdev_priv(netdev);
  3189. int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
  3190. u32 rx_buffer_len, i;
  3191. if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
  3192. (max_frame > MAX_JUMBO_FRAME_SIZE)) {
  3193. dev_err(&adapter->pdev->dev, "Invalid MTU setting\n");
  3194. return -EINVAL;
  3195. }
  3196. if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
  3197. dev_err(&adapter->pdev->dev, "MTU > 9216 not supported.\n");
  3198. return -EINVAL;
  3199. }
  3200. while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
  3201. msleep(1);
  3202. /* igb_down has a dependency on max_frame_size */
  3203. adapter->max_frame_size = max_frame;
  3204. /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
  3205. * means we reserve 2 more, this pushes us to allocate from the next
  3206. * larger slab size.
  3207. * i.e. RXBUFFER_2048 --> size-4096 slab
  3208. */
  3209. if (max_frame <= IGB_RXBUFFER_1024)
  3210. rx_buffer_len = IGB_RXBUFFER_1024;
  3211. else if (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)
  3212. rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
  3213. else
  3214. rx_buffer_len = IGB_RXBUFFER_128;
  3215. if (netif_running(netdev))
  3216. igb_down(adapter);
  3217. dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n",
  3218. netdev->mtu, new_mtu);
  3219. netdev->mtu = new_mtu;
  3220. for (i = 0; i < adapter->num_rx_queues; i++)
  3221. adapter->rx_ring[i].rx_buffer_len = rx_buffer_len;
  3222. if (netif_running(netdev))
  3223. igb_up(adapter);
  3224. else
  3225. igb_reset(adapter);
  3226. clear_bit(__IGB_RESETTING, &adapter->state);
  3227. return 0;
  3228. }
  3229. /**
  3230. * igb_update_stats - Update the board statistics counters
  3231. * @adapter: board private structure
  3232. **/
  3233. void igb_update_stats(struct igb_adapter *adapter)
  3234. {
  3235. struct net_device *netdev = adapter->netdev;
  3236. struct e1000_hw *hw = &adapter->hw;
  3237. struct pci_dev *pdev = adapter->pdev;
  3238. u16 phy_tmp;
  3239. #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
  3240. /*
  3241. * Prevent stats update while adapter is being reset, or if the pci
  3242. * connection is down.
  3243. */
  3244. if (adapter->link_speed == 0)
  3245. return;
  3246. if (pci_channel_offline(pdev))
  3247. return;
  3248. adapter->stats.crcerrs += rd32(E1000_CRCERRS);
  3249. adapter->stats.gprc += rd32(E1000_GPRC);
  3250. adapter->stats.gorc += rd32(E1000_GORCL);
  3251. rd32(E1000_GORCH); /* clear GORCL */
  3252. adapter->stats.bprc += rd32(E1000_BPRC);
  3253. adapter->stats.mprc += rd32(E1000_MPRC);
  3254. adapter->stats.roc += rd32(E1000_ROC);
  3255. adapter->stats.prc64 += rd32(E1000_PRC64);
  3256. adapter->stats.prc127 += rd32(E1000_PRC127);
  3257. adapter->stats.prc255 += rd32(E1000_PRC255);
  3258. adapter->stats.prc511 += rd32(E1000_PRC511);
  3259. adapter->stats.prc1023 += rd32(E1000_PRC1023);
  3260. adapter->stats.prc1522 += rd32(E1000_PRC1522);
  3261. adapter->stats.symerrs += rd32(E1000_SYMERRS);
  3262. adapter->stats.sec += rd32(E1000_SEC);
  3263. adapter->stats.mpc += rd32(E1000_MPC);
  3264. adapter->stats.scc += rd32(E1000_SCC);
  3265. adapter->stats.ecol += rd32(E1000_ECOL);
  3266. adapter->stats.mcc += rd32(E1000_MCC);
  3267. adapter->stats.latecol += rd32(E1000_LATECOL);
  3268. adapter->stats.dc += rd32(E1000_DC);
  3269. adapter->stats.rlec += rd32(E1000_RLEC);
  3270. adapter->stats.xonrxc += rd32(E1000_XONRXC);
  3271. adapter->stats.xontxc += rd32(E1000_XONTXC);
  3272. adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
  3273. adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
  3274. adapter->stats.fcruc += rd32(E1000_FCRUC);
  3275. adapter->stats.gptc += rd32(E1000_GPTC);
  3276. adapter->stats.gotc += rd32(E1000_GOTCL);
  3277. rd32(E1000_GOTCH); /* clear GOTCL */
  3278. adapter->stats.rnbc += rd32(E1000_RNBC);
  3279. adapter->stats.ruc += rd32(E1000_RUC);
  3280. adapter->stats.rfc += rd32(E1000_RFC);
  3281. adapter->stats.rjc += rd32(E1000_RJC);
  3282. adapter->stats.tor += rd32(E1000_TORH);
  3283. adapter->stats.tot += rd32(E1000_TOTH);
  3284. adapter->stats.tpr += rd32(E1000_TPR);
  3285. adapter->stats.ptc64 += rd32(E1000_PTC64);
  3286. adapter->stats.ptc127 += rd32(E1000_PTC127);
  3287. adapter->stats.ptc255 += rd32(E1000_PTC255);
  3288. adapter->stats.ptc511 += rd32(E1000_PTC511);
  3289. adapter->stats.ptc1023 += rd32(E1000_PTC1023);
  3290. adapter->stats.ptc1522 += rd32(E1000_PTC1522);
  3291. adapter->stats.mptc += rd32(E1000_MPTC);
  3292. adapter->stats.bptc += rd32(E1000_BPTC);
  3293. /* used for adaptive IFS */
  3294. hw->mac.tx_packet_delta = rd32(E1000_TPT);
  3295. adapter->stats.tpt += hw->mac.tx_packet_delta;
  3296. hw->mac.collision_delta = rd32(E1000_COLC);
  3297. adapter->stats.colc += hw->mac.collision_delta;
  3298. adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
  3299. adapter->stats.rxerrc += rd32(E1000_RXERRC);
  3300. adapter->stats.tncrs += rd32(E1000_TNCRS);
  3301. adapter->stats.tsctc += rd32(E1000_TSCTC);
  3302. adapter->stats.tsctfc += rd32(E1000_TSCTFC);
  3303. adapter->stats.iac += rd32(E1000_IAC);
  3304. adapter->stats.icrxoc += rd32(E1000_ICRXOC);
  3305. adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
  3306. adapter->stats.icrxatc += rd32(E1000_ICRXATC);
  3307. adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
  3308. adapter->stats.ictxatc += rd32(E1000_ICTXATC);
  3309. adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
  3310. adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
  3311. adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
  3312. /* Fill out the OS statistics structure */
  3313. netdev->stats.multicast = adapter->stats.mprc;
  3314. netdev->stats.collisions = adapter->stats.colc;
  3315. /* Rx Errors */
  3316. if (hw->mac.type != e1000_82575) {
  3317. u32 rqdpc_tmp;
  3318. u64 rqdpc_total = 0;
  3319. int i;
  3320. /* Read out drops stats per RX queue. Notice RQDPC (Receive
  3321. * Queue Drop Packet Count) stats only gets incremented, if
  3322. * the DROP_EN but it set (in the SRRCTL register for that
  3323. * queue). If DROP_EN bit is NOT set, then the some what
  3324. * equivalent count is stored in RNBC (not per queue basis).
  3325. * Also note the drop count is due to lack of available
  3326. * descriptors.
  3327. */
  3328. for (i = 0; i < adapter->num_rx_queues; i++) {
  3329. rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0xFFF;
  3330. adapter->rx_ring[i].rx_stats.drops += rqdpc_tmp;
  3331. rqdpc_total += adapter->rx_ring[i].rx_stats.drops;
  3332. }
  3333. netdev->stats.rx_fifo_errors = rqdpc_total;
  3334. }
  3335. /* Note RNBC (Receive No Buffers Count) is an not an exact
  3336. * drop count as the hardware FIFO might save the day. Thats
  3337. * one of the reason for saving it in rx_fifo_errors, as its
  3338. * potentially not a true drop.
  3339. */
  3340. netdev->stats.rx_fifo_errors += adapter->stats.rnbc;
  3341. /* RLEC on some newer hardware can be incorrect so build
  3342. * our own version based on RUC and ROC */
  3343. netdev->stats.rx_errors = adapter->stats.rxerrc +
  3344. adapter->stats.crcerrs + adapter->stats.algnerrc +
  3345. adapter->stats.ruc + adapter->stats.roc +
  3346. adapter->stats.cexterr;
  3347. netdev->stats.rx_length_errors = adapter->stats.ruc +
  3348. adapter->stats.roc;
  3349. netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
  3350. netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
  3351. netdev->stats.rx_missed_errors = adapter->stats.mpc;
  3352. /* Tx Errors */
  3353. netdev->stats.tx_errors = adapter->stats.ecol +
  3354. adapter->stats.latecol;
  3355. netdev->stats.tx_aborted_errors = adapter->stats.ecol;
  3356. netdev->stats.tx_window_errors = adapter->stats.latecol;
  3357. netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
  3358. /* Tx Dropped needs to be maintained elsewhere */
  3359. /* Phy Stats */
  3360. if (hw->phy.media_type == e1000_media_type_copper) {
  3361. if ((adapter->link_speed == SPEED_1000) &&
  3362. (!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
  3363. phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
  3364. adapter->phy_stats.idle_errors += phy_tmp;
  3365. }
  3366. }
  3367. /* Management Stats */
  3368. adapter->stats.mgptc += rd32(E1000_MGTPTC);
  3369. adapter->stats.mgprc += rd32(E1000_MGTPRC);
  3370. adapter->stats.mgpdc += rd32(E1000_MGTPDC);
  3371. }
  3372. static irqreturn_t igb_msix_other(int irq, void *data)
  3373. {
  3374. struct igb_adapter *adapter = data;
  3375. struct e1000_hw *hw = &adapter->hw;
  3376. u32 icr = rd32(E1000_ICR);
  3377. /* reading ICR causes bit 31 of EICR to be cleared */
  3378. if (icr & E1000_ICR_DOUTSYNC) {
  3379. /* HW is reporting DMA is out of sync */
  3380. adapter->stats.doosync++;
  3381. }
  3382. /* Check for a mailbox event */
  3383. if (icr & E1000_ICR_VMMB)
  3384. igb_msg_task(adapter);
  3385. if (icr & E1000_ICR_LSC) {
  3386. hw->mac.get_link_status = 1;
  3387. /* guard against interrupt when we're going down */
  3388. if (!test_bit(__IGB_DOWN, &adapter->state))
  3389. mod_timer(&adapter->watchdog_timer, jiffies + 1);
  3390. }
  3391. wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_VMMB);
  3392. wr32(E1000_EIMS, adapter->eims_other);
  3393. return IRQ_HANDLED;
  3394. }
  3395. static void igb_write_itr(struct igb_q_vector *q_vector)
  3396. {
  3397. u32 itr_val = q_vector->itr_val & 0x7FFC;
  3398. if (!q_vector->set_itr)
  3399. return;
  3400. if (!itr_val)
  3401. itr_val = 0x4;
  3402. if (q_vector->itr_shift)
  3403. itr_val |= itr_val << q_vector->itr_shift;
  3404. else
  3405. itr_val |= 0x8000000;
  3406. writel(itr_val, q_vector->itr_register);
  3407. q_vector->set_itr = 0;
  3408. }
  3409. static irqreturn_t igb_msix_ring(int irq, void *data)
  3410. {
  3411. struct igb_q_vector *q_vector = data;
  3412. /* Write the ITR value calculated from the previous interrupt. */
  3413. igb_write_itr(q_vector);
  3414. napi_schedule(&q_vector->napi);
  3415. return IRQ_HANDLED;
  3416. }
  3417. #ifdef CONFIG_IGB_DCA
  3418. static void igb_update_dca(struct igb_q_vector *q_vector)
  3419. {
  3420. struct igb_adapter *adapter = q_vector->adapter;
  3421. struct e1000_hw *hw = &adapter->hw;
  3422. int cpu = get_cpu();
  3423. if (q_vector->cpu == cpu)
  3424. goto out_no_update;
  3425. if (q_vector->tx_ring) {
  3426. int q = q_vector->tx_ring->reg_idx;
  3427. u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
  3428. if (hw->mac.type == e1000_82575) {
  3429. dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
  3430. dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
  3431. } else {
  3432. dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
  3433. dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
  3434. E1000_DCA_TXCTRL_CPUID_SHIFT;
  3435. }
  3436. dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
  3437. wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
  3438. }
  3439. if (q_vector->rx_ring) {
  3440. int q = q_vector->rx_ring->reg_idx;
  3441. u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
  3442. if (hw->mac.type == e1000_82575) {
  3443. dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
  3444. dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
  3445. } else {
  3446. dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
  3447. dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
  3448. E1000_DCA_RXCTRL_CPUID_SHIFT;
  3449. }
  3450. dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
  3451. dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
  3452. dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
  3453. wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
  3454. }
  3455. q_vector->cpu = cpu;
  3456. out_no_update:
  3457. put_cpu();
  3458. }
  3459. static void igb_setup_dca(struct igb_adapter *adapter)
  3460. {
  3461. struct e1000_hw *hw = &adapter->hw;
  3462. int i;
  3463. if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
  3464. return;
  3465. /* Always use CB2 mode, difference is masked in the CB driver. */
  3466. wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
  3467. for (i = 0; i < adapter->num_q_vectors; i++) {
  3468. struct igb_q_vector *q_vector = adapter->q_vector[i];
  3469. q_vector->cpu = -1;
  3470. igb_update_dca(q_vector);
  3471. }
  3472. }
  3473. static int __igb_notify_dca(struct device *dev, void *data)
  3474. {
  3475. struct net_device *netdev = dev_get_drvdata(dev);
  3476. struct igb_adapter *adapter = netdev_priv(netdev);
  3477. struct e1000_hw *hw = &adapter->hw;
  3478. unsigned long event = *(unsigned long *)data;
  3479. switch (event) {
  3480. case DCA_PROVIDER_ADD:
  3481. /* if already enabled, don't do it again */
  3482. if (adapter->flags & IGB_FLAG_DCA_ENABLED)
  3483. break;
  3484. /* Always use CB2 mode, difference is masked
  3485. * in the CB driver. */
  3486. wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
  3487. if (dca_add_requester(dev) == 0) {
  3488. adapter->flags |= IGB_FLAG_DCA_ENABLED;
  3489. dev_info(&adapter->pdev->dev, "DCA enabled\n");
  3490. igb_setup_dca(adapter);
  3491. break;
  3492. }
  3493. /* Fall Through since DCA is disabled. */
  3494. case DCA_PROVIDER_REMOVE:
  3495. if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
  3496. /* without this a class_device is left
  3497. * hanging around in the sysfs model */
  3498. dca_remove_requester(dev);
  3499. dev_info(&adapter->pdev->dev, "DCA disabled\n");
  3500. adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
  3501. wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
  3502. }
  3503. break;
  3504. }
  3505. return 0;
  3506. }
  3507. static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
  3508. void *p)
  3509. {
  3510. int ret_val;
  3511. ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
  3512. __igb_notify_dca);
  3513. return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
  3514. }
  3515. #endif /* CONFIG_IGB_DCA */
  3516. static void igb_ping_all_vfs(struct igb_adapter *adapter)
  3517. {
  3518. struct e1000_hw *hw = &adapter->hw;
  3519. u32 ping;
  3520. int i;
  3521. for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
  3522. ping = E1000_PF_CONTROL_MSG;
  3523. if (adapter->vf_data[i].clear_to_send)
  3524. ping |= E1000_VT_MSGTYPE_CTS;
  3525. igb_write_mbx(hw, &ping, 1, i);
  3526. }
  3527. }
  3528. static int igb_set_vf_multicasts(struct igb_adapter *adapter,
  3529. u32 *msgbuf, u32 vf)
  3530. {
  3531. int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
  3532. u16 *hash_list = (u16 *)&msgbuf[1];
  3533. struct vf_data_storage *vf_data = &adapter->vf_data[vf];
  3534. int i;
  3535. /* only up to 30 hash values supported */
  3536. if (n > 30)
  3537. n = 30;
  3538. /* salt away the number of multi cast addresses assigned
  3539. * to this VF for later use to restore when the PF multi cast
  3540. * list changes
  3541. */
  3542. vf_data->num_vf_mc_hashes = n;
  3543. /* VFs are limited to using the MTA hash table for their multicast
  3544. * addresses */
  3545. for (i = 0; i < n; i++)
  3546. vf_data->vf_mc_hashes[i] = hash_list[i];
  3547. /* Flush and reset the mta with the new values */
  3548. igb_set_rx_mode(adapter->netdev);
  3549. return 0;
  3550. }
  3551. static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
  3552. {
  3553. struct e1000_hw *hw = &adapter->hw;
  3554. struct vf_data_storage *vf_data;
  3555. int i, j;
  3556. for (i = 0; i < adapter->vfs_allocated_count; i++) {
  3557. vf_data = &adapter->vf_data[i];
  3558. for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
  3559. igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
  3560. }
  3561. }
  3562. static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
  3563. {
  3564. struct e1000_hw *hw = &adapter->hw;
  3565. u32 pool_mask, reg, vid;
  3566. int i;
  3567. pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
  3568. /* Find the vlan filter for this id */
  3569. for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
  3570. reg = rd32(E1000_VLVF(i));
  3571. /* remove the vf from the pool */
  3572. reg &= ~pool_mask;
  3573. /* if pool is empty then remove entry from vfta */
  3574. if (!(reg & E1000_VLVF_POOLSEL_MASK) &&
  3575. (reg & E1000_VLVF_VLANID_ENABLE)) {
  3576. reg = 0;
  3577. vid = reg & E1000_VLVF_VLANID_MASK;
  3578. igb_vfta_set(hw, vid, false);
  3579. }
  3580. wr32(E1000_VLVF(i), reg);
  3581. }
  3582. adapter->vf_data[vf].vlans_enabled = 0;
  3583. }
  3584. static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
  3585. {
  3586. struct e1000_hw *hw = &adapter->hw;
  3587. u32 reg, i;
  3588. /* It is an error to call this function when VFs are not enabled */
  3589. if (!adapter->vfs_allocated_count)
  3590. return -1;
  3591. /* Find the vlan filter for this id */
  3592. for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
  3593. reg = rd32(E1000_VLVF(i));
  3594. if ((reg & E1000_VLVF_VLANID_ENABLE) &&
  3595. vid == (reg & E1000_VLVF_VLANID_MASK))
  3596. break;
  3597. }
  3598. if (add) {
  3599. if (i == E1000_VLVF_ARRAY_SIZE) {
  3600. /* Did not find a matching VLAN ID entry that was
  3601. * enabled. Search for a free filter entry, i.e.
  3602. * one without the enable bit set
  3603. */
  3604. for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
  3605. reg = rd32(E1000_VLVF(i));
  3606. if (!(reg & E1000_VLVF_VLANID_ENABLE))
  3607. break;
  3608. }
  3609. }
  3610. if (i < E1000_VLVF_ARRAY_SIZE) {
  3611. /* Found an enabled/available entry */
  3612. reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
  3613. /* if !enabled we need to set this up in vfta */
  3614. if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
  3615. /* add VID to filter table, if bit already set
  3616. * PF must have added it outside of table */
  3617. if (igb_vfta_set(hw, vid, true))
  3618. reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT +
  3619. adapter->vfs_allocated_count);
  3620. reg |= E1000_VLVF_VLANID_ENABLE;
  3621. }
  3622. reg &= ~E1000_VLVF_VLANID_MASK;
  3623. reg |= vid;
  3624. wr32(E1000_VLVF(i), reg);
  3625. /* do not modify RLPML for PF devices */
  3626. if (vf >= adapter->vfs_allocated_count)
  3627. return 0;
  3628. if (!adapter->vf_data[vf].vlans_enabled) {
  3629. u32 size;
  3630. reg = rd32(E1000_VMOLR(vf));
  3631. size = reg & E1000_VMOLR_RLPML_MASK;
  3632. size += 4;
  3633. reg &= ~E1000_VMOLR_RLPML_MASK;
  3634. reg |= size;
  3635. wr32(E1000_VMOLR(vf), reg);
  3636. }
  3637. adapter->vf_data[vf].vlans_enabled++;
  3638. return 0;
  3639. }
  3640. } else {
  3641. if (i < E1000_VLVF_ARRAY_SIZE) {
  3642. /* remove vf from the pool */
  3643. reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf));
  3644. /* if pool is empty then remove entry from vfta */
  3645. if (!(reg & E1000_VLVF_POOLSEL_MASK)) {
  3646. reg = 0;
  3647. igb_vfta_set(hw, vid, false);
  3648. }
  3649. wr32(E1000_VLVF(i), reg);
  3650. /* do not modify RLPML for PF devices */
  3651. if (vf >= adapter->vfs_allocated_count)
  3652. return 0;
  3653. adapter->vf_data[vf].vlans_enabled--;
  3654. if (!adapter->vf_data[vf].vlans_enabled) {
  3655. u32 size;
  3656. reg = rd32(E1000_VMOLR(vf));
  3657. size = reg & E1000_VMOLR_RLPML_MASK;
  3658. size -= 4;
  3659. reg &= ~E1000_VMOLR_RLPML_MASK;
  3660. reg |= size;
  3661. wr32(E1000_VMOLR(vf), reg);
  3662. }
  3663. return 0;
  3664. }
  3665. }
  3666. return -1;
  3667. }
  3668. static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
  3669. {
  3670. int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
  3671. int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
  3672. return igb_vlvf_set(adapter, vid, add, vf);
  3673. }
  3674. static inline void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
  3675. {
  3676. struct e1000_hw *hw = &adapter->hw;
  3677. /* disable mailbox functionality for vf */
  3678. adapter->vf_data[vf].clear_to_send = false;
  3679. /* reset offloads to defaults */
  3680. igb_set_vmolr(hw, vf);
  3681. /* reset vlans for device */
  3682. igb_clear_vf_vfta(adapter, vf);
  3683. /* reset multicast table array for vf */
  3684. adapter->vf_data[vf].num_vf_mc_hashes = 0;
  3685. /* Flush and reset the mta with the new values */
  3686. igb_set_rx_mode(adapter->netdev);
  3687. }
  3688. static inline void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
  3689. {
  3690. struct e1000_hw *hw = &adapter->hw;
  3691. unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
  3692. int rar_entry = hw->mac.rar_entry_count - (vf + 1);
  3693. u32 reg, msgbuf[3];
  3694. u8 *addr = (u8 *)(&msgbuf[1]);
  3695. /* process all the same items cleared in a function level reset */
  3696. igb_vf_reset_event(adapter, vf);
  3697. /* set vf mac address */
  3698. igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
  3699. /* enable transmit and receive for vf */
  3700. reg = rd32(E1000_VFTE);
  3701. wr32(E1000_VFTE, reg | (1 << vf));
  3702. reg = rd32(E1000_VFRE);
  3703. wr32(E1000_VFRE, reg | (1 << vf));
  3704. /* enable mailbox functionality for vf */
  3705. adapter->vf_data[vf].clear_to_send = true;
  3706. /* reply to reset with ack and vf mac address */
  3707. msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
  3708. memcpy(addr, vf_mac, 6);
  3709. igb_write_mbx(hw, msgbuf, 3, vf);
  3710. }
  3711. static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
  3712. {
  3713. unsigned char *addr = (char *)&msg[1];
  3714. int err = -1;
  3715. if (is_valid_ether_addr(addr))
  3716. err = igb_set_vf_mac(adapter, vf, addr);
  3717. return err;
  3718. }
  3719. static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
  3720. {
  3721. struct e1000_hw *hw = &adapter->hw;
  3722. u32 msg = E1000_VT_MSGTYPE_NACK;
  3723. /* if device isn't clear to send it shouldn't be reading either */
  3724. if (!adapter->vf_data[vf].clear_to_send)
  3725. igb_write_mbx(hw, &msg, 1, vf);
  3726. }
  3727. static void igb_msg_task(struct igb_adapter *adapter)
  3728. {
  3729. struct e1000_hw *hw = &adapter->hw;
  3730. u32 vf;
  3731. for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
  3732. /* process any reset requests */
  3733. if (!igb_check_for_rst(hw, vf)) {
  3734. adapter->vf_data[vf].clear_to_send = false;
  3735. igb_vf_reset_event(adapter, vf);
  3736. }
  3737. /* process any messages pending */
  3738. if (!igb_check_for_msg(hw, vf))
  3739. igb_rcv_msg_from_vf(adapter, vf);
  3740. /* process any acks */
  3741. if (!igb_check_for_ack(hw, vf))
  3742. igb_rcv_ack_from_vf(adapter, vf);
  3743. }
  3744. }
  3745. static int igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
  3746. {
  3747. u32 mbx_size = E1000_VFMAILBOX_SIZE;
  3748. u32 msgbuf[mbx_size];
  3749. struct e1000_hw *hw = &adapter->hw;
  3750. s32 retval;
  3751. retval = igb_read_mbx(hw, msgbuf, mbx_size, vf);
  3752. if (retval)
  3753. dev_err(&adapter->pdev->dev,
  3754. "Error receiving message from VF\n");
  3755. /* this is a message we already processed, do nothing */
  3756. if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
  3757. return retval;
  3758. /*
  3759. * until the vf completes a reset it should not be
  3760. * allowed to start any configuration.
  3761. */
  3762. if (msgbuf[0] == E1000_VF_RESET) {
  3763. igb_vf_reset_msg(adapter, vf);
  3764. return retval;
  3765. }
  3766. if (!adapter->vf_data[vf].clear_to_send) {
  3767. msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
  3768. igb_write_mbx(hw, msgbuf, 1, vf);
  3769. return retval;
  3770. }
  3771. switch ((msgbuf[0] & 0xFFFF)) {
  3772. case E1000_VF_SET_MAC_ADDR:
  3773. retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
  3774. break;
  3775. case E1000_VF_SET_MULTICAST:
  3776. retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
  3777. break;
  3778. case E1000_VF_SET_LPE:
  3779. retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
  3780. break;
  3781. case E1000_VF_SET_VLAN:
  3782. retval = igb_set_vf_vlan(adapter, msgbuf, vf);
  3783. break;
  3784. default:
  3785. dev_err(&adapter->pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
  3786. retval = -1;
  3787. break;
  3788. }
  3789. /* notify the VF of the results of what it sent us */
  3790. if (retval)
  3791. msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
  3792. else
  3793. msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
  3794. msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
  3795. igb_write_mbx(hw, msgbuf, 1, vf);
  3796. return retval;
  3797. }
  3798. /**
  3799. * igb_set_uta - Set unicast filter table address
  3800. * @adapter: board private structure
  3801. *
  3802. * The unicast table address is a register array of 32-bit registers.
  3803. * The table is meant to be used in a way similar to how the MTA is used
  3804. * however due to certain limitations in the hardware it is necessary to
  3805. * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscous
  3806. * enable bit to allow vlan tag stripping when promiscous mode is enabled
  3807. **/
  3808. static void igb_set_uta(struct igb_adapter *adapter)
  3809. {
  3810. struct e1000_hw *hw = &adapter->hw;
  3811. int i;
  3812. /* The UTA table only exists on 82576 hardware and newer */
  3813. if (hw->mac.type < e1000_82576)
  3814. return;
  3815. /* we only need to do this if VMDq is enabled */
  3816. if (!adapter->vfs_allocated_count)
  3817. return;
  3818. for (i = 0; i < hw->mac.uta_reg_count; i++)
  3819. array_wr32(E1000_UTA, i, ~0);
  3820. }
  3821. /**
  3822. * igb_intr_msi - Interrupt Handler
  3823. * @irq: interrupt number
  3824. * @data: pointer to a network interface device structure
  3825. **/
  3826. static irqreturn_t igb_intr_msi(int irq, void *data)
  3827. {
  3828. struct igb_adapter *adapter = data;
  3829. struct igb_q_vector *q_vector = adapter->q_vector[0];
  3830. struct e1000_hw *hw = &adapter->hw;
  3831. /* read ICR disables interrupts using IAM */
  3832. u32 icr = rd32(E1000_ICR);
  3833. igb_write_itr(q_vector);
  3834. if (icr & E1000_ICR_DOUTSYNC) {
  3835. /* HW is reporting DMA is out of sync */
  3836. adapter->stats.doosync++;
  3837. }
  3838. if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
  3839. hw->mac.get_link_status = 1;
  3840. if (!test_bit(__IGB_DOWN, &adapter->state))
  3841. mod_timer(&adapter->watchdog_timer, jiffies + 1);
  3842. }
  3843. napi_schedule(&q_vector->napi);
  3844. return IRQ_HANDLED;
  3845. }
  3846. /**
  3847. * igb_intr - Legacy Interrupt Handler
  3848. * @irq: interrupt number
  3849. * @data: pointer to a network interface device structure
  3850. **/
  3851. static irqreturn_t igb_intr(int irq, void *data)
  3852. {
  3853. struct igb_adapter *adapter = data;
  3854. struct igb_q_vector *q_vector = adapter->q_vector[0];
  3855. struct e1000_hw *hw = &adapter->hw;
  3856. /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
  3857. * need for the IMC write */
  3858. u32 icr = rd32(E1000_ICR);
  3859. if (!icr)
  3860. return IRQ_NONE; /* Not our interrupt */
  3861. igb_write_itr(q_vector);
  3862. /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
  3863. * not set, then the adapter didn't send an interrupt */
  3864. if (!(icr & E1000_ICR_INT_ASSERTED))
  3865. return IRQ_NONE;
  3866. if (icr & E1000_ICR_DOUTSYNC) {
  3867. /* HW is reporting DMA is out of sync */
  3868. adapter->stats.doosync++;
  3869. }
  3870. if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
  3871. hw->mac.get_link_status = 1;
  3872. /* guard against interrupt when we're going down */
  3873. if (!test_bit(__IGB_DOWN, &adapter->state))
  3874. mod_timer(&adapter->watchdog_timer, jiffies + 1);
  3875. }
  3876. napi_schedule(&q_vector->napi);
  3877. return IRQ_HANDLED;
  3878. }
  3879. static inline void igb_ring_irq_enable(struct igb_q_vector *q_vector)
  3880. {
  3881. struct igb_adapter *adapter = q_vector->adapter;
  3882. struct e1000_hw *hw = &adapter->hw;
  3883. if (adapter->itr_setting & 3) {
  3884. if (!adapter->msix_entries)
  3885. igb_set_itr(adapter);
  3886. else
  3887. igb_update_ring_itr(q_vector);
  3888. }
  3889. if (!test_bit(__IGB_DOWN, &adapter->state)) {
  3890. if (adapter->msix_entries)
  3891. wr32(E1000_EIMS, q_vector->eims_value);
  3892. else
  3893. igb_irq_enable(adapter);
  3894. }
  3895. }
  3896. /**
  3897. * igb_poll - NAPI Rx polling callback
  3898. * @napi: napi polling structure
  3899. * @budget: count of how many packets we should handle
  3900. **/
  3901. static int igb_poll(struct napi_struct *napi, int budget)
  3902. {
  3903. struct igb_q_vector *q_vector = container_of(napi,
  3904. struct igb_q_vector,
  3905. napi);
  3906. int tx_clean_complete = 1, work_done = 0;
  3907. #ifdef CONFIG_IGB_DCA
  3908. if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
  3909. igb_update_dca(q_vector);
  3910. #endif
  3911. if (q_vector->tx_ring)
  3912. tx_clean_complete = igb_clean_tx_irq(q_vector);
  3913. if (q_vector->rx_ring)
  3914. igb_clean_rx_irq_adv(q_vector, &work_done, budget);
  3915. if (!tx_clean_complete)
  3916. work_done = budget;
  3917. /* If not enough Rx work done, exit the polling mode */
  3918. if (work_done < budget) {
  3919. napi_complete(napi);
  3920. igb_ring_irq_enable(q_vector);
  3921. }
  3922. return work_done;
  3923. }
  3924. /**
  3925. * igb_hwtstamp - utility function which checks for TX time stamp
  3926. * @adapter: board private structure
  3927. * @skb: packet that was just sent
  3928. *
  3929. * If we were asked to do hardware stamping and such a time stamp is
  3930. * available, then it must have been for this skb here because we only
  3931. * allow only one such packet into the queue.
  3932. */
  3933. static void igb_tx_hwtstamp(struct igb_adapter *adapter, struct sk_buff *skb)
  3934. {
  3935. union skb_shared_tx *shtx = skb_tx(skb);
  3936. struct e1000_hw *hw = &adapter->hw;
  3937. if (unlikely(shtx->hardware)) {
  3938. u32 valid = rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID;
  3939. if (valid) {
  3940. u64 regval = rd32(E1000_TXSTMPL);
  3941. u64 ns;
  3942. struct skb_shared_hwtstamps shhwtstamps;
  3943. memset(&shhwtstamps, 0, sizeof(shhwtstamps));
  3944. regval |= (u64)rd32(E1000_TXSTMPH) << 32;
  3945. ns = timecounter_cyc2time(&adapter->clock,
  3946. regval);
  3947. timecompare_update(&adapter->compare, ns);
  3948. shhwtstamps.hwtstamp = ns_to_ktime(ns);
  3949. shhwtstamps.syststamp =
  3950. timecompare_transform(&adapter->compare, ns);
  3951. skb_tstamp_tx(skb, &shhwtstamps);
  3952. }
  3953. }
  3954. }
  3955. /**
  3956. * igb_clean_tx_irq - Reclaim resources after transmit completes
  3957. * @q_vector: pointer to q_vector containing needed info
  3958. * returns true if ring is completely cleaned
  3959. **/
  3960. static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
  3961. {
  3962. struct igb_adapter *adapter = q_vector->adapter;
  3963. struct igb_ring *tx_ring = q_vector->tx_ring;
  3964. struct net_device *netdev = tx_ring->netdev;
  3965. struct e1000_hw *hw = &adapter->hw;
  3966. struct igb_buffer *buffer_info;
  3967. struct sk_buff *skb;
  3968. union e1000_adv_tx_desc *tx_desc, *eop_desc;
  3969. unsigned int total_bytes = 0, total_packets = 0;
  3970. unsigned int i, eop, count = 0;
  3971. bool cleaned = false;
  3972. i = tx_ring->next_to_clean;
  3973. eop = tx_ring->buffer_info[i].next_to_watch;
  3974. eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
  3975. while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) &&
  3976. (count < tx_ring->count)) {
  3977. for (cleaned = false; !cleaned; count++) {
  3978. tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
  3979. buffer_info = &tx_ring->buffer_info[i];
  3980. cleaned = (i == eop);
  3981. skb = buffer_info->skb;
  3982. if (skb) {
  3983. unsigned int segs, bytecount;
  3984. /* gso_segs is currently only valid for tcp */
  3985. segs = skb_shinfo(skb)->gso_segs ?: 1;
  3986. /* multiply data chunks by size of headers */
  3987. bytecount = ((segs - 1) * skb_headlen(skb)) +
  3988. skb->len;
  3989. total_packets += segs;
  3990. total_bytes += bytecount;
  3991. igb_tx_hwtstamp(adapter, skb);
  3992. }
  3993. igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
  3994. tx_desc->wb.status = 0;
  3995. i++;
  3996. if (i == tx_ring->count)
  3997. i = 0;
  3998. }
  3999. eop = tx_ring->buffer_info[i].next_to_watch;
  4000. eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
  4001. }
  4002. tx_ring->next_to_clean = i;
  4003. if (unlikely(count &&
  4004. netif_carrier_ok(netdev) &&
  4005. igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
  4006. /* Make sure that anybody stopping the queue after this
  4007. * sees the new next_to_clean.
  4008. */
  4009. smp_mb();
  4010. if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
  4011. !(test_bit(__IGB_DOWN, &adapter->state))) {
  4012. netif_wake_subqueue(netdev, tx_ring->queue_index);
  4013. tx_ring->tx_stats.restart_queue++;
  4014. }
  4015. }
  4016. if (tx_ring->detect_tx_hung) {
  4017. /* Detect a transmit hang in hardware, this serializes the
  4018. * check with the clearing of time_stamp and movement of i */
  4019. tx_ring->detect_tx_hung = false;
  4020. if (tx_ring->buffer_info[i].time_stamp &&
  4021. time_after(jiffies, tx_ring->buffer_info[i].time_stamp +
  4022. (adapter->tx_timeout_factor * HZ))
  4023. && !(rd32(E1000_STATUS) &
  4024. E1000_STATUS_TXOFF)) {
  4025. /* detected Tx unit hang */
  4026. dev_err(&tx_ring->pdev->dev,
  4027. "Detected Tx Unit Hang\n"
  4028. " Tx Queue <%d>\n"
  4029. " TDH <%x>\n"
  4030. " TDT <%x>\n"
  4031. " next_to_use <%x>\n"
  4032. " next_to_clean <%x>\n"
  4033. "buffer_info[next_to_clean]\n"
  4034. " time_stamp <%lx>\n"
  4035. " next_to_watch <%x>\n"
  4036. " jiffies <%lx>\n"
  4037. " desc.status <%x>\n",
  4038. tx_ring->queue_index,
  4039. readl(tx_ring->head),
  4040. readl(tx_ring->tail),
  4041. tx_ring->next_to_use,
  4042. tx_ring->next_to_clean,
  4043. tx_ring->buffer_info[i].time_stamp,
  4044. eop,
  4045. jiffies,
  4046. eop_desc->wb.status);
  4047. netif_stop_subqueue(netdev, tx_ring->queue_index);
  4048. }
  4049. }
  4050. tx_ring->total_bytes += total_bytes;
  4051. tx_ring->total_packets += total_packets;
  4052. tx_ring->tx_stats.bytes += total_bytes;
  4053. tx_ring->tx_stats.packets += total_packets;
  4054. netdev->stats.tx_bytes += total_bytes;
  4055. netdev->stats.tx_packets += total_packets;
  4056. return (count < tx_ring->count);
  4057. }
  4058. /**
  4059. * igb_receive_skb - helper function to handle rx indications
  4060. * @q_vector: structure containing interrupt and ring information
  4061. * @skb: packet to send up
  4062. * @vlan_tag: vlan tag for packet
  4063. **/
  4064. static void igb_receive_skb(struct igb_q_vector *q_vector,
  4065. struct sk_buff *skb,
  4066. u16 vlan_tag)
  4067. {
  4068. struct igb_adapter *adapter = q_vector->adapter;
  4069. if (vlan_tag)
  4070. vlan_gro_receive(&q_vector->napi, adapter->vlgrp,
  4071. vlan_tag, skb);
  4072. else
  4073. napi_gro_receive(&q_vector->napi, skb);
  4074. }
  4075. static inline void igb_rx_checksum_adv(struct igb_ring *ring,
  4076. u32 status_err, struct sk_buff *skb)
  4077. {
  4078. skb->ip_summed = CHECKSUM_NONE;
  4079. /* Ignore Checksum bit is set or checksum is disabled through ethtool */
  4080. if (!(ring->flags & IGB_RING_FLAG_RX_CSUM) ||
  4081. (status_err & E1000_RXD_STAT_IXSM))
  4082. return;
  4083. /* TCP/UDP checksum error bit is set */
  4084. if (status_err &
  4085. (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
  4086. /*
  4087. * work around errata with sctp packets where the TCPE aka
  4088. * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
  4089. * packets, (aka let the stack check the crc32c)
  4090. */
  4091. if ((skb->len == 60) &&
  4092. (ring->flags & IGB_RING_FLAG_RX_SCTP_CSUM))
  4093. ring->rx_stats.csum_err++;
  4094. /* let the stack verify checksum errors */
  4095. return;
  4096. }
  4097. /* It must be a TCP or UDP packet with a valid checksum */
  4098. if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
  4099. skb->ip_summed = CHECKSUM_UNNECESSARY;
  4100. dev_dbg(&ring->pdev->dev, "cksum success: bits %08X\n", status_err);
  4101. }
  4102. static inline u16 igb_get_hlen(struct igb_ring *rx_ring,
  4103. union e1000_adv_rx_desc *rx_desc)
  4104. {
  4105. /* HW will not DMA in data larger than the given buffer, even if it
  4106. * parses the (NFS, of course) header to be larger. In that case, it
  4107. * fills the header buffer and spills the rest into the page.
  4108. */
  4109. u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
  4110. E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
  4111. if (hlen > rx_ring->rx_buffer_len)
  4112. hlen = rx_ring->rx_buffer_len;
  4113. return hlen;
  4114. }
  4115. static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
  4116. int *work_done, int budget)
  4117. {
  4118. struct igb_adapter *adapter = q_vector->adapter;
  4119. struct igb_ring *rx_ring = q_vector->rx_ring;
  4120. struct net_device *netdev = rx_ring->netdev;
  4121. struct e1000_hw *hw = &adapter->hw;
  4122. struct pci_dev *pdev = rx_ring->pdev;
  4123. union e1000_adv_rx_desc *rx_desc , *next_rxd;
  4124. struct igb_buffer *buffer_info , *next_buffer;
  4125. struct sk_buff *skb;
  4126. bool cleaned = false;
  4127. int cleaned_count = 0;
  4128. unsigned int total_bytes = 0, total_packets = 0;
  4129. unsigned int i;
  4130. u32 staterr;
  4131. u16 length;
  4132. u16 vlan_tag;
  4133. i = rx_ring->next_to_clean;
  4134. buffer_info = &rx_ring->buffer_info[i];
  4135. rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
  4136. staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
  4137. while (staterr & E1000_RXD_STAT_DD) {
  4138. if (*work_done >= budget)
  4139. break;
  4140. (*work_done)++;
  4141. skb = buffer_info->skb;
  4142. prefetch(skb->data - NET_IP_ALIGN);
  4143. buffer_info->skb = NULL;
  4144. i++;
  4145. if (i == rx_ring->count)
  4146. i = 0;
  4147. next_rxd = E1000_RX_DESC_ADV(*rx_ring, i);
  4148. prefetch(next_rxd);
  4149. next_buffer = &rx_ring->buffer_info[i];
  4150. length = le16_to_cpu(rx_desc->wb.upper.length);
  4151. cleaned = true;
  4152. cleaned_count++;
  4153. if (buffer_info->dma) {
  4154. pci_unmap_single(pdev, buffer_info->dma,
  4155. rx_ring->rx_buffer_len,
  4156. PCI_DMA_FROMDEVICE);
  4157. buffer_info->dma = 0;
  4158. if (rx_ring->rx_buffer_len >= IGB_RXBUFFER_1024) {
  4159. skb_put(skb, length);
  4160. goto send_up;
  4161. }
  4162. skb_put(skb, igb_get_hlen(rx_ring, rx_desc));
  4163. }
  4164. if (length) {
  4165. pci_unmap_page(pdev, buffer_info->page_dma,
  4166. PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
  4167. buffer_info->page_dma = 0;
  4168. skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++,
  4169. buffer_info->page,
  4170. buffer_info->page_offset,
  4171. length);
  4172. if (page_count(buffer_info->page) != 1)
  4173. buffer_info->page = NULL;
  4174. else
  4175. get_page(buffer_info->page);
  4176. skb->len += length;
  4177. skb->data_len += length;
  4178. skb->truesize += length;
  4179. }
  4180. if (!(staterr & E1000_RXD_STAT_EOP)) {
  4181. buffer_info->skb = next_buffer->skb;
  4182. buffer_info->dma = next_buffer->dma;
  4183. next_buffer->skb = skb;
  4184. next_buffer->dma = 0;
  4185. goto next_desc;
  4186. }
  4187. send_up:
  4188. /*
  4189. * If this bit is set, then the RX registers contain
  4190. * the time stamp. No other packet will be time
  4191. * stamped until we read these registers, so read the
  4192. * registers to make them available again. Because
  4193. * only one packet can be time stamped at a time, we
  4194. * know that the register values must belong to this
  4195. * one here and therefore we don't need to compare
  4196. * any of the additional attributes stored for it.
  4197. *
  4198. * If nothing went wrong, then it should have a
  4199. * skb_shared_tx that we can turn into a
  4200. * skb_shared_hwtstamps.
  4201. *
  4202. * TODO: can time stamping be triggered (thus locking
  4203. * the registers) without the packet reaching this point
  4204. * here? In that case RX time stamping would get stuck.
  4205. *
  4206. * TODO: in "time stamp all packets" mode this bit is
  4207. * not set. Need a global flag for this mode and then
  4208. * always read the registers. Cannot be done without
  4209. * a race condition.
  4210. */
  4211. if (unlikely(staterr & E1000_RXD_STAT_TS)) {
  4212. u64 regval;
  4213. u64 ns;
  4214. struct skb_shared_hwtstamps *shhwtstamps =
  4215. skb_hwtstamps(skb);
  4216. WARN(!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID),
  4217. "igb: no RX time stamp available for time stamped packet");
  4218. regval = rd32(E1000_RXSTMPL);
  4219. regval |= (u64)rd32(E1000_RXSTMPH) << 32;
  4220. ns = timecounter_cyc2time(&adapter->clock, regval);
  4221. timecompare_update(&adapter->compare, ns);
  4222. memset(shhwtstamps, 0, sizeof(*shhwtstamps));
  4223. shhwtstamps->hwtstamp = ns_to_ktime(ns);
  4224. shhwtstamps->syststamp =
  4225. timecompare_transform(&adapter->compare, ns);
  4226. }
  4227. if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
  4228. dev_kfree_skb_irq(skb);
  4229. goto next_desc;
  4230. }
  4231. total_bytes += skb->len;
  4232. total_packets++;
  4233. igb_rx_checksum_adv(rx_ring, staterr, skb);
  4234. skb->protocol = eth_type_trans(skb, netdev);
  4235. skb_record_rx_queue(skb, rx_ring->queue_index);
  4236. vlan_tag = ((staterr & E1000_RXD_STAT_VP) ?
  4237. le16_to_cpu(rx_desc->wb.upper.vlan) : 0);
  4238. igb_receive_skb(q_vector, skb, vlan_tag);
  4239. next_desc:
  4240. rx_desc->wb.upper.status_error = 0;
  4241. /* return some buffers to hardware, one at a time is too slow */
  4242. if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
  4243. igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
  4244. cleaned_count = 0;
  4245. }
  4246. /* use prefetched values */
  4247. rx_desc = next_rxd;
  4248. buffer_info = next_buffer;
  4249. staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
  4250. }
  4251. rx_ring->next_to_clean = i;
  4252. cleaned_count = igb_desc_unused(rx_ring);
  4253. if (cleaned_count)
  4254. igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
  4255. rx_ring->total_packets += total_packets;
  4256. rx_ring->total_bytes += total_bytes;
  4257. rx_ring->rx_stats.packets += total_packets;
  4258. rx_ring->rx_stats.bytes += total_bytes;
  4259. netdev->stats.rx_bytes += total_bytes;
  4260. netdev->stats.rx_packets += total_packets;
  4261. return cleaned;
  4262. }
  4263. /**
  4264. * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split
  4265. * @adapter: address of board private structure
  4266. **/
  4267. void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
  4268. {
  4269. struct net_device *netdev = rx_ring->netdev;
  4270. union e1000_adv_rx_desc *rx_desc;
  4271. struct igb_buffer *buffer_info;
  4272. struct sk_buff *skb;
  4273. unsigned int i;
  4274. int bufsz;
  4275. i = rx_ring->next_to_use;
  4276. buffer_info = &rx_ring->buffer_info[i];
  4277. bufsz = rx_ring->rx_buffer_len;
  4278. while (cleaned_count--) {
  4279. rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
  4280. if ((bufsz < IGB_RXBUFFER_1024) && !buffer_info->page_dma) {
  4281. if (!buffer_info->page) {
  4282. buffer_info->page = alloc_page(GFP_ATOMIC);
  4283. if (!buffer_info->page) {
  4284. rx_ring->rx_stats.alloc_failed++;
  4285. goto no_buffers;
  4286. }
  4287. buffer_info->page_offset = 0;
  4288. } else {
  4289. buffer_info->page_offset ^= PAGE_SIZE / 2;
  4290. }
  4291. buffer_info->page_dma =
  4292. pci_map_page(rx_ring->pdev, buffer_info->page,
  4293. buffer_info->page_offset,
  4294. PAGE_SIZE / 2,
  4295. PCI_DMA_FROMDEVICE);
  4296. }
  4297. if (!buffer_info->skb) {
  4298. skb = netdev_alloc_skb_ip_align(netdev, bufsz);
  4299. if (!skb) {
  4300. rx_ring->rx_stats.alloc_failed++;
  4301. goto no_buffers;
  4302. }
  4303. buffer_info->skb = skb;
  4304. buffer_info->dma = pci_map_single(rx_ring->pdev,
  4305. skb->data,
  4306. bufsz,
  4307. PCI_DMA_FROMDEVICE);
  4308. }
  4309. /* Refresh the desc even if buffer_addrs didn't change because
  4310. * each write-back erases this info. */
  4311. if (bufsz < IGB_RXBUFFER_1024) {
  4312. rx_desc->read.pkt_addr =
  4313. cpu_to_le64(buffer_info->page_dma);
  4314. rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
  4315. } else {
  4316. rx_desc->read.pkt_addr =
  4317. cpu_to_le64(buffer_info->dma);
  4318. rx_desc->read.hdr_addr = 0;
  4319. }
  4320. i++;
  4321. if (i == rx_ring->count)
  4322. i = 0;
  4323. buffer_info = &rx_ring->buffer_info[i];
  4324. }
  4325. no_buffers:
  4326. if (rx_ring->next_to_use != i) {
  4327. rx_ring->next_to_use = i;
  4328. if (i == 0)
  4329. i = (rx_ring->count - 1);
  4330. else
  4331. i--;
  4332. /* Force memory writes to complete before letting h/w
  4333. * know there are new descriptors to fetch. (Only
  4334. * applicable for weak-ordered memory model archs,
  4335. * such as IA-64). */
  4336. wmb();
  4337. writel(i, rx_ring->tail);
  4338. }
  4339. }
  4340. /**
  4341. * igb_mii_ioctl -
  4342. * @netdev:
  4343. * @ifreq:
  4344. * @cmd:
  4345. **/
  4346. static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
  4347. {
  4348. struct igb_adapter *adapter = netdev_priv(netdev);
  4349. struct mii_ioctl_data *data = if_mii(ifr);
  4350. if (adapter->hw.phy.media_type != e1000_media_type_copper)
  4351. return -EOPNOTSUPP;
  4352. switch (cmd) {
  4353. case SIOCGMIIPHY:
  4354. data->phy_id = adapter->hw.phy.addr;
  4355. break;
  4356. case SIOCGMIIREG:
  4357. if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
  4358. &data->val_out))
  4359. return -EIO;
  4360. break;
  4361. case SIOCSMIIREG:
  4362. default:
  4363. return -EOPNOTSUPP;
  4364. }
  4365. return 0;
  4366. }
  4367. /**
  4368. * igb_hwtstamp_ioctl - control hardware time stamping
  4369. * @netdev:
  4370. * @ifreq:
  4371. * @cmd:
  4372. *
  4373. * Outgoing time stamping can be enabled and disabled. Play nice and
  4374. * disable it when requested, although it shouldn't case any overhead
  4375. * when no packet needs it. At most one packet in the queue may be
  4376. * marked for time stamping, otherwise it would be impossible to tell
  4377. * for sure to which packet the hardware time stamp belongs.
  4378. *
  4379. * Incoming time stamping has to be configured via the hardware
  4380. * filters. Not all combinations are supported, in particular event
  4381. * type has to be specified. Matching the kind of event packet is
  4382. * not supported, with the exception of "all V2 events regardless of
  4383. * level 2 or 4".
  4384. *
  4385. **/
  4386. static int igb_hwtstamp_ioctl(struct net_device *netdev,
  4387. struct ifreq *ifr, int cmd)
  4388. {
  4389. struct igb_adapter *adapter = netdev_priv(netdev);
  4390. struct e1000_hw *hw = &adapter->hw;
  4391. struct hwtstamp_config config;
  4392. u32 tsync_tx_ctl_bit = E1000_TSYNCTXCTL_ENABLED;
  4393. u32 tsync_rx_ctl_bit = E1000_TSYNCRXCTL_ENABLED;
  4394. u32 tsync_rx_ctl_type = 0;
  4395. u32 tsync_rx_cfg = 0;
  4396. int is_l4 = 0;
  4397. int is_l2 = 0;
  4398. short port = 319; /* PTP */
  4399. u32 regval;
  4400. if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
  4401. return -EFAULT;
  4402. /* reserved for future extensions */
  4403. if (config.flags)
  4404. return -EINVAL;
  4405. switch (config.tx_type) {
  4406. case HWTSTAMP_TX_OFF:
  4407. tsync_tx_ctl_bit = 0;
  4408. break;
  4409. case HWTSTAMP_TX_ON:
  4410. tsync_tx_ctl_bit = E1000_TSYNCTXCTL_ENABLED;
  4411. break;
  4412. default:
  4413. return -ERANGE;
  4414. }
  4415. switch (config.rx_filter) {
  4416. case HWTSTAMP_FILTER_NONE:
  4417. tsync_rx_ctl_bit = 0;
  4418. break;
  4419. case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
  4420. case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
  4421. case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
  4422. case HWTSTAMP_FILTER_ALL:
  4423. /*
  4424. * register TSYNCRXCFG must be set, therefore it is not
  4425. * possible to time stamp both Sync and Delay_Req messages
  4426. * => fall back to time stamping all packets
  4427. */
  4428. tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_ALL;
  4429. config.rx_filter = HWTSTAMP_FILTER_ALL;
  4430. break;
  4431. case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
  4432. tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L4_V1;
  4433. tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
  4434. is_l4 = 1;
  4435. break;
  4436. case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
  4437. tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L4_V1;
  4438. tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
  4439. is_l4 = 1;
  4440. break;
  4441. case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
  4442. case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
  4443. tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
  4444. tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
  4445. is_l2 = 1;
  4446. is_l4 = 1;
  4447. config.rx_filter = HWTSTAMP_FILTER_SOME;
  4448. break;
  4449. case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
  4450. case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
  4451. tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
  4452. tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
  4453. is_l2 = 1;
  4454. is_l4 = 1;
  4455. config.rx_filter = HWTSTAMP_FILTER_SOME;
  4456. break;
  4457. case HWTSTAMP_FILTER_PTP_V2_EVENT:
  4458. case HWTSTAMP_FILTER_PTP_V2_SYNC:
  4459. case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
  4460. tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_EVENT_V2;
  4461. config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
  4462. is_l2 = 1;
  4463. break;
  4464. default:
  4465. return -ERANGE;
  4466. }
  4467. /* enable/disable TX */
  4468. regval = rd32(E1000_TSYNCTXCTL);
  4469. regval = (regval & ~E1000_TSYNCTXCTL_ENABLED) | tsync_tx_ctl_bit;
  4470. wr32(E1000_TSYNCTXCTL, regval);
  4471. /* enable/disable RX, define which PTP packets are time stamped */
  4472. regval = rd32(E1000_TSYNCRXCTL);
  4473. regval = (regval & ~E1000_TSYNCRXCTL_ENABLED) | tsync_rx_ctl_bit;
  4474. regval = (regval & ~0xE) | tsync_rx_ctl_type;
  4475. wr32(E1000_TSYNCRXCTL, regval);
  4476. wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
  4477. /*
  4478. * Ethertype Filter Queue Filter[0][15:0] = 0x88F7
  4479. * (Ethertype to filter on)
  4480. * Ethertype Filter Queue Filter[0][26] = 0x1 (Enable filter)
  4481. * Ethertype Filter Queue Filter[0][30] = 0x1 (Enable Timestamping)
  4482. */
  4483. wr32(E1000_ETQF0, is_l2 ? 0x440088f7 : 0);
  4484. /* L4 Queue Filter[0]: only filter by source and destination port */
  4485. wr32(E1000_SPQF0, htons(port));
  4486. wr32(E1000_IMIREXT(0), is_l4 ?
  4487. ((1<<12) | (1<<19) /* bypass size and control flags */) : 0);
  4488. wr32(E1000_IMIR(0), is_l4 ?
  4489. (htons(port)
  4490. | (0<<16) /* immediate interrupt disabled */
  4491. | 0 /* (1<<17) bit cleared: do not bypass
  4492. destination port check */)
  4493. : 0);
  4494. wr32(E1000_FTQF0, is_l4 ?
  4495. (0x11 /* UDP */
  4496. | (1<<15) /* VF not compared */
  4497. | (1<<27) /* Enable Timestamping */
  4498. | (7<<28) /* only source port filter enabled,
  4499. source/target address and protocol
  4500. masked */)
  4501. : ((1<<15) | (15<<28) /* all mask bits set = filter not
  4502. enabled */));
  4503. wrfl();
  4504. adapter->hwtstamp_config = config;
  4505. /* clear TX/RX time stamp registers, just to be sure */
  4506. regval = rd32(E1000_TXSTMPH);
  4507. regval = rd32(E1000_RXSTMPH);
  4508. return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
  4509. -EFAULT : 0;
  4510. }
  4511. /**
  4512. * igb_ioctl -
  4513. * @netdev:
  4514. * @ifreq:
  4515. * @cmd:
  4516. **/
  4517. static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
  4518. {
  4519. switch (cmd) {
  4520. case SIOCGMIIPHY:
  4521. case SIOCGMIIREG:
  4522. case SIOCSMIIREG:
  4523. return igb_mii_ioctl(netdev, ifr, cmd);
  4524. case SIOCSHWTSTAMP:
  4525. return igb_hwtstamp_ioctl(netdev, ifr, cmd);
  4526. default:
  4527. return -EOPNOTSUPP;
  4528. }
  4529. }
  4530. s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
  4531. {
  4532. struct igb_adapter *adapter = hw->back;
  4533. u16 cap_offset;
  4534. cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
  4535. if (!cap_offset)
  4536. return -E1000_ERR_CONFIG;
  4537. pci_read_config_word(adapter->pdev, cap_offset + reg, value);
  4538. return 0;
  4539. }
  4540. s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
  4541. {
  4542. struct igb_adapter *adapter = hw->back;
  4543. u16 cap_offset;
  4544. cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
  4545. if (!cap_offset)
  4546. return -E1000_ERR_CONFIG;
  4547. pci_write_config_word(adapter->pdev, cap_offset + reg, *value);
  4548. return 0;
  4549. }
  4550. static void igb_vlan_rx_register(struct net_device *netdev,
  4551. struct vlan_group *grp)
  4552. {
  4553. struct igb_adapter *adapter = netdev_priv(netdev);
  4554. struct e1000_hw *hw = &adapter->hw;
  4555. u32 ctrl, rctl;
  4556. igb_irq_disable(adapter);
  4557. adapter->vlgrp = grp;
  4558. if (grp) {
  4559. /* enable VLAN tag insert/strip */
  4560. ctrl = rd32(E1000_CTRL);
  4561. ctrl |= E1000_CTRL_VME;
  4562. wr32(E1000_CTRL, ctrl);
  4563. /* enable VLAN receive filtering */
  4564. rctl = rd32(E1000_RCTL);
  4565. rctl &= ~E1000_RCTL_CFIEN;
  4566. wr32(E1000_RCTL, rctl);
  4567. igb_update_mng_vlan(adapter);
  4568. } else {
  4569. /* disable VLAN tag insert/strip */
  4570. ctrl = rd32(E1000_CTRL);
  4571. ctrl &= ~E1000_CTRL_VME;
  4572. wr32(E1000_CTRL, ctrl);
  4573. if (adapter->mng_vlan_id != (u16)IGB_MNG_VLAN_NONE) {
  4574. igb_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
  4575. adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
  4576. }
  4577. }
  4578. igb_rlpml_set(adapter);
  4579. if (!test_bit(__IGB_DOWN, &adapter->state))
  4580. igb_irq_enable(adapter);
  4581. }
  4582. static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
  4583. {
  4584. struct igb_adapter *adapter = netdev_priv(netdev);
  4585. struct e1000_hw *hw = &adapter->hw;
  4586. int pf_id = adapter->vfs_allocated_count;
  4587. if ((hw->mng_cookie.status &
  4588. E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
  4589. (vid == adapter->mng_vlan_id))
  4590. return;
  4591. /* add vid to vlvf if sr-iov is enabled,
  4592. * if that fails add directly to filter table */
  4593. if (igb_vlvf_set(adapter, vid, true, pf_id))
  4594. igb_vfta_set(hw, vid, true);
  4595. }
  4596. static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
  4597. {
  4598. struct igb_adapter *adapter = netdev_priv(netdev);
  4599. struct e1000_hw *hw = &adapter->hw;
  4600. int pf_id = adapter->vfs_allocated_count;
  4601. igb_irq_disable(adapter);
  4602. vlan_group_set_device(adapter->vlgrp, vid, NULL);
  4603. if (!test_bit(__IGB_DOWN, &adapter->state))
  4604. igb_irq_enable(adapter);
  4605. if ((adapter->hw.mng_cookie.status &
  4606. E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
  4607. (vid == adapter->mng_vlan_id)) {
  4608. /* release control to f/w */
  4609. igb_release_hw_control(adapter);
  4610. return;
  4611. }
  4612. /* remove vid from vlvf if sr-iov is enabled,
  4613. * if not in vlvf remove from vfta */
  4614. if (igb_vlvf_set(adapter, vid, false, pf_id))
  4615. igb_vfta_set(hw, vid, false);
  4616. }
  4617. static void igb_restore_vlan(struct igb_adapter *adapter)
  4618. {
  4619. igb_vlan_rx_register(adapter->netdev, adapter->vlgrp);
  4620. if (adapter->vlgrp) {
  4621. u16 vid;
  4622. for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
  4623. if (!vlan_group_get_device(adapter->vlgrp, vid))
  4624. continue;
  4625. igb_vlan_rx_add_vid(adapter->netdev, vid);
  4626. }
  4627. }
  4628. }
  4629. int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)
  4630. {
  4631. struct e1000_mac_info *mac = &adapter->hw.mac;
  4632. mac->autoneg = 0;
  4633. switch (spddplx) {
  4634. case SPEED_10 + DUPLEX_HALF:
  4635. mac->forced_speed_duplex = ADVERTISE_10_HALF;
  4636. break;
  4637. case SPEED_10 + DUPLEX_FULL:
  4638. mac->forced_speed_duplex = ADVERTISE_10_FULL;
  4639. break;
  4640. case SPEED_100 + DUPLEX_HALF:
  4641. mac->forced_speed_duplex = ADVERTISE_100_HALF;
  4642. break;
  4643. case SPEED_100 + DUPLEX_FULL:
  4644. mac->forced_speed_duplex = ADVERTISE_100_FULL;
  4645. break;
  4646. case SPEED_1000 + DUPLEX_FULL:
  4647. mac->autoneg = 1;
  4648. adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
  4649. break;
  4650. case SPEED_1000 + DUPLEX_HALF: /* not supported */
  4651. default:
  4652. dev_err(&adapter->pdev->dev,
  4653. "Unsupported Speed/Duplex configuration\n");
  4654. return -EINVAL;
  4655. }
  4656. return 0;
  4657. }
  4658. static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
  4659. {
  4660. struct net_device *netdev = pci_get_drvdata(pdev);
  4661. struct igb_adapter *adapter = netdev_priv(netdev);
  4662. struct e1000_hw *hw = &adapter->hw;
  4663. u32 ctrl, rctl, status;
  4664. u32 wufc = adapter->wol;
  4665. #ifdef CONFIG_PM
  4666. int retval = 0;
  4667. #endif
  4668. netif_device_detach(netdev);
  4669. if (netif_running(netdev))
  4670. igb_close(netdev);
  4671. igb_clear_interrupt_scheme(adapter);
  4672. #ifdef CONFIG_PM
  4673. retval = pci_save_state(pdev);
  4674. if (retval)
  4675. return retval;
  4676. #endif
  4677. status = rd32(E1000_STATUS);
  4678. if (status & E1000_STATUS_LU)
  4679. wufc &= ~E1000_WUFC_LNKC;
  4680. if (wufc) {
  4681. igb_setup_rctl(adapter);
  4682. igb_set_rx_mode(netdev);
  4683. /* turn on all-multi mode if wake on multicast is enabled */
  4684. if (wufc & E1000_WUFC_MC) {
  4685. rctl = rd32(E1000_RCTL);
  4686. rctl |= E1000_RCTL_MPE;
  4687. wr32(E1000_RCTL, rctl);
  4688. }
  4689. ctrl = rd32(E1000_CTRL);
  4690. /* advertise wake from D3Cold */
  4691. #define E1000_CTRL_ADVD3WUC 0x00100000
  4692. /* phy power management enable */
  4693. #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
  4694. ctrl |= E1000_CTRL_ADVD3WUC;
  4695. wr32(E1000_CTRL, ctrl);
  4696. /* Allow time for pending master requests to run */
  4697. igb_disable_pcie_master(&adapter->hw);
  4698. wr32(E1000_WUC, E1000_WUC_PME_EN);
  4699. wr32(E1000_WUFC, wufc);
  4700. } else {
  4701. wr32(E1000_WUC, 0);
  4702. wr32(E1000_WUFC, 0);
  4703. }
  4704. *enable_wake = wufc || adapter->en_mng_pt;
  4705. if (!*enable_wake)
  4706. igb_shutdown_serdes_link_82575(hw);
  4707. /* Release control of h/w to f/w. If f/w is AMT enabled, this
  4708. * would have already happened in close and is redundant. */
  4709. igb_release_hw_control(adapter);
  4710. pci_disable_device(pdev);
  4711. return 0;
  4712. }
  4713. #ifdef CONFIG_PM
  4714. static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
  4715. {
  4716. int retval;
  4717. bool wake;
  4718. retval = __igb_shutdown(pdev, &wake);
  4719. if (retval)
  4720. return retval;
  4721. if (wake) {
  4722. pci_prepare_to_sleep(pdev);
  4723. } else {
  4724. pci_wake_from_d3(pdev, false);
  4725. pci_set_power_state(pdev, PCI_D3hot);
  4726. }
  4727. return 0;
  4728. }
  4729. static int igb_resume(struct pci_dev *pdev)
  4730. {
  4731. struct net_device *netdev = pci_get_drvdata(pdev);
  4732. struct igb_adapter *adapter = netdev_priv(netdev);
  4733. struct e1000_hw *hw = &adapter->hw;
  4734. u32 err;
  4735. pci_set_power_state(pdev, PCI_D0);
  4736. pci_restore_state(pdev);
  4737. err = pci_enable_device_mem(pdev);
  4738. if (err) {
  4739. dev_err(&pdev->dev,
  4740. "igb: Cannot enable PCI device from suspend\n");
  4741. return err;
  4742. }
  4743. pci_set_master(pdev);
  4744. pci_enable_wake(pdev, PCI_D3hot, 0);
  4745. pci_enable_wake(pdev, PCI_D3cold, 0);
  4746. if (igb_init_interrupt_scheme(adapter)) {
  4747. dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
  4748. return -ENOMEM;
  4749. }
  4750. /* e1000_power_up_phy(adapter); */
  4751. igb_reset(adapter);
  4752. /* let the f/w know that the h/w is now under the control of the
  4753. * driver. */
  4754. igb_get_hw_control(adapter);
  4755. wr32(E1000_WUS, ~0);
  4756. if (netif_running(netdev)) {
  4757. err = igb_open(netdev);
  4758. if (err)
  4759. return err;
  4760. }
  4761. netif_device_attach(netdev);
  4762. return 0;
  4763. }
  4764. #endif
  4765. static void igb_shutdown(struct pci_dev *pdev)
  4766. {
  4767. bool wake;
  4768. __igb_shutdown(pdev, &wake);
  4769. if (system_state == SYSTEM_POWER_OFF) {
  4770. pci_wake_from_d3(pdev, wake);
  4771. pci_set_power_state(pdev, PCI_D3hot);
  4772. }
  4773. }
  4774. #ifdef CONFIG_NET_POLL_CONTROLLER
  4775. /*
  4776. * Polling 'interrupt' - used by things like netconsole to send skbs
  4777. * without having to re-enable interrupts. It's not called while
  4778. * the interrupt routine is executing.
  4779. */
  4780. static void igb_netpoll(struct net_device *netdev)
  4781. {
  4782. struct igb_adapter *adapter = netdev_priv(netdev);
  4783. struct e1000_hw *hw = &adapter->hw;
  4784. int i;
  4785. if (!adapter->msix_entries) {
  4786. struct igb_q_vector *q_vector = adapter->q_vector[0];
  4787. igb_irq_disable(adapter);
  4788. napi_schedule(&q_vector->napi);
  4789. return;
  4790. }
  4791. for (i = 0; i < adapter->num_q_vectors; i++) {
  4792. struct igb_q_vector *q_vector = adapter->q_vector[i];
  4793. wr32(E1000_EIMC, q_vector->eims_value);
  4794. napi_schedule(&q_vector->napi);
  4795. }
  4796. }
  4797. #endif /* CONFIG_NET_POLL_CONTROLLER */
  4798. /**
  4799. * igb_io_error_detected - called when PCI error is detected
  4800. * @pdev: Pointer to PCI device
  4801. * @state: The current pci connection state
  4802. *
  4803. * This function is called after a PCI bus error affecting
  4804. * this device has been detected.
  4805. */
  4806. static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
  4807. pci_channel_state_t state)
  4808. {
  4809. struct net_device *netdev = pci_get_drvdata(pdev);
  4810. struct igb_adapter *adapter = netdev_priv(netdev);
  4811. netif_device_detach(netdev);
  4812. if (state == pci_channel_io_perm_failure)
  4813. return PCI_ERS_RESULT_DISCONNECT;
  4814. if (netif_running(netdev))
  4815. igb_down(adapter);
  4816. pci_disable_device(pdev);
  4817. /* Request a slot slot reset. */
  4818. return PCI_ERS_RESULT_NEED_RESET;
  4819. }
  4820. /**
  4821. * igb_io_slot_reset - called after the pci bus has been reset.
  4822. * @pdev: Pointer to PCI device
  4823. *
  4824. * Restart the card from scratch, as if from a cold-boot. Implementation
  4825. * resembles the first-half of the igb_resume routine.
  4826. */
  4827. static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
  4828. {
  4829. struct net_device *netdev = pci_get_drvdata(pdev);
  4830. struct igb_adapter *adapter = netdev_priv(netdev);
  4831. struct e1000_hw *hw = &adapter->hw;
  4832. pci_ers_result_t result;
  4833. int err;
  4834. if (pci_enable_device_mem(pdev)) {
  4835. dev_err(&pdev->dev,
  4836. "Cannot re-enable PCI device after reset.\n");
  4837. result = PCI_ERS_RESULT_DISCONNECT;
  4838. } else {
  4839. pci_set_master(pdev);
  4840. pci_restore_state(pdev);
  4841. pci_enable_wake(pdev, PCI_D3hot, 0);
  4842. pci_enable_wake(pdev, PCI_D3cold, 0);
  4843. igb_reset(adapter);
  4844. wr32(E1000_WUS, ~0);
  4845. result = PCI_ERS_RESULT_RECOVERED;
  4846. }
  4847. err = pci_cleanup_aer_uncorrect_error_status(pdev);
  4848. if (err) {
  4849. dev_err(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status "
  4850. "failed 0x%0x\n", err);
  4851. /* non-fatal, continue */
  4852. }
  4853. return result;
  4854. }
  4855. /**
  4856. * igb_io_resume - called when traffic can start flowing again.
  4857. * @pdev: Pointer to PCI device
  4858. *
  4859. * This callback is called when the error recovery driver tells us that
  4860. * its OK to resume normal operation. Implementation resembles the
  4861. * second-half of the igb_resume routine.
  4862. */
  4863. static void igb_io_resume(struct pci_dev *pdev)
  4864. {
  4865. struct net_device *netdev = pci_get_drvdata(pdev);
  4866. struct igb_adapter *adapter = netdev_priv(netdev);
  4867. if (netif_running(netdev)) {
  4868. if (igb_up(adapter)) {
  4869. dev_err(&pdev->dev, "igb_up failed after reset\n");
  4870. return;
  4871. }
  4872. }
  4873. netif_device_attach(netdev);
  4874. /* let the f/w know that the h/w is now under the control of the
  4875. * driver. */
  4876. igb_get_hw_control(adapter);
  4877. }
  4878. static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
  4879. u8 qsel)
  4880. {
  4881. u32 rar_low, rar_high;
  4882. struct e1000_hw *hw = &adapter->hw;
  4883. /* HW expects these in little endian so we reverse the byte order
  4884. * from network order (big endian) to little endian
  4885. */
  4886. rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
  4887. ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
  4888. rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
  4889. /* Indicate to hardware the Address is Valid. */
  4890. rar_high |= E1000_RAH_AV;
  4891. if (hw->mac.type == e1000_82575)
  4892. rar_high |= E1000_RAH_POOL_1 * qsel;
  4893. else
  4894. rar_high |= E1000_RAH_POOL_1 << qsel;
  4895. wr32(E1000_RAL(index), rar_low);
  4896. wrfl();
  4897. wr32(E1000_RAH(index), rar_high);
  4898. wrfl();
  4899. }
  4900. static int igb_set_vf_mac(struct igb_adapter *adapter,
  4901. int vf, unsigned char *mac_addr)
  4902. {
  4903. struct e1000_hw *hw = &adapter->hw;
  4904. /* VF MAC addresses start at end of receive addresses and moves
  4905. * torwards the first, as a result a collision should not be possible */
  4906. int rar_entry = hw->mac.rar_entry_count - (vf + 1);
  4907. memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
  4908. igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf);
  4909. return 0;
  4910. }
  4911. static void igb_vmm_control(struct igb_adapter *adapter)
  4912. {
  4913. struct e1000_hw *hw = &adapter->hw;
  4914. u32 reg;
  4915. /* replication is not supported for 82575 */
  4916. if (hw->mac.type == e1000_82575)
  4917. return;
  4918. /* enable replication vlan tag stripping */
  4919. reg = rd32(E1000_RPLOLR);
  4920. reg |= E1000_RPLOLR_STRVLAN;
  4921. wr32(E1000_RPLOLR, reg);
  4922. /* notify HW that the MAC is adding vlan tags */
  4923. reg = rd32(E1000_DTXCTL);
  4924. reg |= E1000_DTXCTL_VLAN_ADDED;
  4925. wr32(E1000_DTXCTL, reg);
  4926. if (adapter->vfs_allocated_count) {
  4927. igb_vmdq_set_loopback_pf(hw, true);
  4928. igb_vmdq_set_replication_pf(hw, true);
  4929. } else {
  4930. igb_vmdq_set_loopback_pf(hw, false);
  4931. igb_vmdq_set_replication_pf(hw, false);
  4932. }
  4933. }
  4934. /* igb_main.c */