e1000_main.c 143 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209
  1. /*******************************************************************************
  2. Intel PRO/1000 Linux driver
  3. Copyright(c) 1999 - 2006 Intel Corporation.
  4. This program is free software; you can redistribute it and/or modify it
  5. under the terms and conditions of the GNU General Public License,
  6. version 2, as published by the Free Software Foundation.
  7. This program is distributed in the hope it will be useful, but WITHOUT
  8. ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  9. FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  10. more details.
  11. You should have received a copy of the GNU General Public License along with
  12. this program; if not, write to the Free Software Foundation, Inc.,
  13. 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  14. The full GNU General Public License is included in this distribution in
  15. the file called "COPYING".
  16. Contact Information:
  17. Linux NICS <linux.nics@intel.com>
  18. e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  19. Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  20. *******************************************************************************/
  21. #include "e1000.h"
  22. #include <net/ip6_checksum.h>
  23. #include <linux/io.h>
  24. #include <linux/prefetch.h>
  25. #include <linux/bitops.h>
  26. #include <linux/if_vlan.h>
  27. char e1000_driver_name[] = "e1000";
  28. static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
  29. #define DRV_VERSION "7.3.21-k8-NAPI"
  30. const char e1000_driver_version[] = DRV_VERSION;
  31. static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
  32. /* e1000_pci_tbl - PCI Device ID Table
  33. *
  34. * Last entry must be all 0s
  35. *
  36. * Macro expands to...
  37. * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
  38. */
  39. static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
  40. INTEL_E1000_ETHERNET_DEVICE(0x1000),
  41. INTEL_E1000_ETHERNET_DEVICE(0x1001),
  42. INTEL_E1000_ETHERNET_DEVICE(0x1004),
  43. INTEL_E1000_ETHERNET_DEVICE(0x1008),
  44. INTEL_E1000_ETHERNET_DEVICE(0x1009),
  45. INTEL_E1000_ETHERNET_DEVICE(0x100C),
  46. INTEL_E1000_ETHERNET_DEVICE(0x100D),
  47. INTEL_E1000_ETHERNET_DEVICE(0x100E),
  48. INTEL_E1000_ETHERNET_DEVICE(0x100F),
  49. INTEL_E1000_ETHERNET_DEVICE(0x1010),
  50. INTEL_E1000_ETHERNET_DEVICE(0x1011),
  51. INTEL_E1000_ETHERNET_DEVICE(0x1012),
  52. INTEL_E1000_ETHERNET_DEVICE(0x1013),
  53. INTEL_E1000_ETHERNET_DEVICE(0x1014),
  54. INTEL_E1000_ETHERNET_DEVICE(0x1015),
  55. INTEL_E1000_ETHERNET_DEVICE(0x1016),
  56. INTEL_E1000_ETHERNET_DEVICE(0x1017),
  57. INTEL_E1000_ETHERNET_DEVICE(0x1018),
  58. INTEL_E1000_ETHERNET_DEVICE(0x1019),
  59. INTEL_E1000_ETHERNET_DEVICE(0x101A),
  60. INTEL_E1000_ETHERNET_DEVICE(0x101D),
  61. INTEL_E1000_ETHERNET_DEVICE(0x101E),
  62. INTEL_E1000_ETHERNET_DEVICE(0x1026),
  63. INTEL_E1000_ETHERNET_DEVICE(0x1027),
  64. INTEL_E1000_ETHERNET_DEVICE(0x1028),
  65. INTEL_E1000_ETHERNET_DEVICE(0x1075),
  66. INTEL_E1000_ETHERNET_DEVICE(0x1076),
  67. INTEL_E1000_ETHERNET_DEVICE(0x1077),
  68. INTEL_E1000_ETHERNET_DEVICE(0x1078),
  69. INTEL_E1000_ETHERNET_DEVICE(0x1079),
  70. INTEL_E1000_ETHERNET_DEVICE(0x107A),
  71. INTEL_E1000_ETHERNET_DEVICE(0x107B),
  72. INTEL_E1000_ETHERNET_DEVICE(0x107C),
  73. INTEL_E1000_ETHERNET_DEVICE(0x108A),
  74. INTEL_E1000_ETHERNET_DEVICE(0x1099),
  75. INTEL_E1000_ETHERNET_DEVICE(0x10B5),
  76. INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
  77. /* required last entry */
  78. {0,}
  79. };
  80. MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
  81. int e1000_up(struct e1000_adapter *adapter);
  82. void e1000_down(struct e1000_adapter *adapter);
  83. void e1000_reinit_locked(struct e1000_adapter *adapter);
  84. void e1000_reset(struct e1000_adapter *adapter);
  85. int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
  86. int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
  87. void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
  88. void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
  89. static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
  90. struct e1000_tx_ring *txdr);
  91. static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
  92. struct e1000_rx_ring *rxdr);
  93. static void e1000_free_tx_resources(struct e1000_adapter *adapter,
  94. struct e1000_tx_ring *tx_ring);
  95. static void e1000_free_rx_resources(struct e1000_adapter *adapter,
  96. struct e1000_rx_ring *rx_ring);
  97. void e1000_update_stats(struct e1000_adapter *adapter);
  98. static int e1000_init_module(void);
  99. static void e1000_exit_module(void);
  100. static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
  101. static void __devexit e1000_remove(struct pci_dev *pdev);
  102. static int e1000_alloc_queues(struct e1000_adapter *adapter);
  103. static int e1000_sw_init(struct e1000_adapter *adapter);
  104. static int e1000_open(struct net_device *netdev);
  105. static int e1000_close(struct net_device *netdev);
  106. static void e1000_configure_tx(struct e1000_adapter *adapter);
  107. static void e1000_configure_rx(struct e1000_adapter *adapter);
  108. static void e1000_setup_rctl(struct e1000_adapter *adapter);
  109. static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
  110. static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
  111. static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
  112. struct e1000_tx_ring *tx_ring);
  113. static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
  114. struct e1000_rx_ring *rx_ring);
  115. static void e1000_set_rx_mode(struct net_device *netdev);
  116. static void e1000_update_phy_info_task(struct work_struct *work);
  117. static void e1000_watchdog(struct work_struct *work);
  118. static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
  119. static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
  120. struct net_device *netdev);
  121. static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
  122. static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
  123. static int e1000_set_mac(struct net_device *netdev, void *p);
  124. static irqreturn_t e1000_intr(int irq, void *data);
  125. static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
  126. struct e1000_tx_ring *tx_ring);
  127. static int e1000_clean(struct napi_struct *napi, int budget);
  128. static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
  129. struct e1000_rx_ring *rx_ring,
  130. int *work_done, int work_to_do);
  131. static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
  132. struct e1000_rx_ring *rx_ring,
  133. int *work_done, int work_to_do);
  134. static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
  135. struct e1000_rx_ring *rx_ring,
  136. int cleaned_count);
  137. static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
  138. struct e1000_rx_ring *rx_ring,
  139. int cleaned_count);
  140. static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
  141. static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
  142. int cmd);
  143. static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
  144. static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
  145. static void e1000_tx_timeout(struct net_device *dev);
  146. static void e1000_reset_task(struct work_struct *work);
  147. static void e1000_smartspeed(struct e1000_adapter *adapter);
  148. static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
  149. struct sk_buff *skb);
  150. static bool e1000_vlan_used(struct e1000_adapter *adapter);
  151. static void e1000_vlan_mode(struct net_device *netdev,
  152. netdev_features_t features);
  153. static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
  154. bool filter_on);
  155. static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
  156. static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
  157. static void e1000_restore_vlan(struct e1000_adapter *adapter);
  158. #ifdef CONFIG_PM
  159. static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
  160. static int e1000_resume(struct pci_dev *pdev);
  161. #endif
  162. static void e1000_shutdown(struct pci_dev *pdev);
  163. #ifdef CONFIG_NET_POLL_CONTROLLER
  164. /* for netdump / net console */
  165. static void e1000_netpoll (struct net_device *netdev);
  166. #endif
  167. #define COPYBREAK_DEFAULT 256
  168. static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
  169. module_param(copybreak, uint, 0644);
  170. MODULE_PARM_DESC(copybreak,
  171. "Maximum size of packet that is copied to a new buffer on receive");
  172. static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
  173. pci_channel_state_t state);
  174. static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
  175. static void e1000_io_resume(struct pci_dev *pdev);
  176. static struct pci_error_handlers e1000_err_handler = {
  177. .error_detected = e1000_io_error_detected,
  178. .slot_reset = e1000_io_slot_reset,
  179. .resume = e1000_io_resume,
  180. };
  181. static struct pci_driver e1000_driver = {
  182. .name = e1000_driver_name,
  183. .id_table = e1000_pci_tbl,
  184. .probe = e1000_probe,
  185. .remove = __devexit_p(e1000_remove),
  186. #ifdef CONFIG_PM
  187. /* Power Management Hooks */
  188. .suspend = e1000_suspend,
  189. .resume = e1000_resume,
  190. #endif
  191. .shutdown = e1000_shutdown,
  192. .err_handler = &e1000_err_handler
  193. };
  194. MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
  195. MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
  196. MODULE_LICENSE("GPL");
  197. MODULE_VERSION(DRV_VERSION);
  198. #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
  199. static int debug = -1;
  200. module_param(debug, int, 0);
  201. MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
  202. /**
  203. * e1000_get_hw_dev - return device
  204. * used by hardware layer to print debugging information
  205. *
  206. **/
  207. struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
  208. {
  209. struct e1000_adapter *adapter = hw->back;
  210. return adapter->netdev;
  211. }
  212. /**
  213. * e1000_init_module - Driver Registration Routine
  214. *
  215. * e1000_init_module is the first routine called when the driver is
  216. * loaded. All it does is register with the PCI subsystem.
  217. **/
  218. static int __init e1000_init_module(void)
  219. {
  220. int ret;
  221. pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version);
  222. pr_info("%s\n", e1000_copyright);
  223. ret = pci_register_driver(&e1000_driver);
  224. if (copybreak != COPYBREAK_DEFAULT) {
  225. if (copybreak == 0)
  226. pr_info("copybreak disabled\n");
  227. else
  228. pr_info("copybreak enabled for "
  229. "packets <= %u bytes\n", copybreak);
  230. }
  231. return ret;
  232. }
  233. module_init(e1000_init_module);
  234. /**
  235. * e1000_exit_module - Driver Exit Cleanup Routine
  236. *
  237. * e1000_exit_module is called just before the driver is removed
  238. * from memory.
  239. **/
  240. static void __exit e1000_exit_module(void)
  241. {
  242. pci_unregister_driver(&e1000_driver);
  243. }
  244. module_exit(e1000_exit_module);
  245. static int e1000_request_irq(struct e1000_adapter *adapter)
  246. {
  247. struct net_device *netdev = adapter->netdev;
  248. irq_handler_t handler = e1000_intr;
  249. int irq_flags = IRQF_SHARED;
  250. int err;
  251. err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
  252. netdev);
  253. if (err) {
  254. e_err(probe, "Unable to allocate interrupt Error: %d\n", err);
  255. }
  256. return err;
  257. }
  258. static void e1000_free_irq(struct e1000_adapter *adapter)
  259. {
  260. struct net_device *netdev = adapter->netdev;
  261. free_irq(adapter->pdev->irq, netdev);
  262. }
  263. /**
  264. * e1000_irq_disable - Mask off interrupt generation on the NIC
  265. * @adapter: board private structure
  266. **/
  267. static void e1000_irq_disable(struct e1000_adapter *adapter)
  268. {
  269. struct e1000_hw *hw = &adapter->hw;
  270. ew32(IMC, ~0);
  271. E1000_WRITE_FLUSH();
  272. synchronize_irq(adapter->pdev->irq);
  273. }
  274. /**
  275. * e1000_irq_enable - Enable default interrupt generation settings
  276. * @adapter: board private structure
  277. **/
  278. static void e1000_irq_enable(struct e1000_adapter *adapter)
  279. {
  280. struct e1000_hw *hw = &adapter->hw;
  281. ew32(IMS, IMS_ENABLE_MASK);
  282. E1000_WRITE_FLUSH();
  283. }
  284. static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
  285. {
  286. struct e1000_hw *hw = &adapter->hw;
  287. struct net_device *netdev = adapter->netdev;
  288. u16 vid = hw->mng_cookie.vlan_id;
  289. u16 old_vid = adapter->mng_vlan_id;
  290. if (!e1000_vlan_used(adapter))
  291. return;
  292. if (!test_bit(vid, adapter->active_vlans)) {
  293. if (hw->mng_cookie.status &
  294. E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
  295. e1000_vlan_rx_add_vid(netdev, vid);
  296. adapter->mng_vlan_id = vid;
  297. } else {
  298. adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
  299. }
  300. if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
  301. (vid != old_vid) &&
  302. !test_bit(old_vid, adapter->active_vlans))
  303. e1000_vlan_rx_kill_vid(netdev, old_vid);
  304. } else {
  305. adapter->mng_vlan_id = vid;
  306. }
  307. }
  308. static void e1000_init_manageability(struct e1000_adapter *adapter)
  309. {
  310. struct e1000_hw *hw = &adapter->hw;
  311. if (adapter->en_mng_pt) {
  312. u32 manc = er32(MANC);
  313. /* disable hardware interception of ARP */
  314. manc &= ~(E1000_MANC_ARP_EN);
  315. ew32(MANC, manc);
  316. }
  317. }
  318. static void e1000_release_manageability(struct e1000_adapter *adapter)
  319. {
  320. struct e1000_hw *hw = &adapter->hw;
  321. if (adapter->en_mng_pt) {
  322. u32 manc = er32(MANC);
  323. /* re-enable hardware interception of ARP */
  324. manc |= E1000_MANC_ARP_EN;
  325. ew32(MANC, manc);
  326. }
  327. }
  328. /**
  329. * e1000_configure - configure the hardware for RX and TX
  330. * @adapter = private board structure
  331. **/
  332. static void e1000_configure(struct e1000_adapter *adapter)
  333. {
  334. struct net_device *netdev = adapter->netdev;
  335. int i;
  336. e1000_set_rx_mode(netdev);
  337. e1000_restore_vlan(adapter);
  338. e1000_init_manageability(adapter);
  339. e1000_configure_tx(adapter);
  340. e1000_setup_rctl(adapter);
  341. e1000_configure_rx(adapter);
  342. /* call E1000_DESC_UNUSED which always leaves
  343. * at least 1 descriptor unused to make sure
  344. * next_to_use != next_to_clean */
  345. for (i = 0; i < adapter->num_rx_queues; i++) {
  346. struct e1000_rx_ring *ring = &adapter->rx_ring[i];
  347. adapter->alloc_rx_buf(adapter, ring,
  348. E1000_DESC_UNUSED(ring));
  349. }
  350. }
  351. int e1000_up(struct e1000_adapter *adapter)
  352. {
  353. struct e1000_hw *hw = &adapter->hw;
  354. /* hardware has been reset, we need to reload some things */
  355. e1000_configure(adapter);
  356. clear_bit(__E1000_DOWN, &adapter->flags);
  357. napi_enable(&adapter->napi);
  358. e1000_irq_enable(adapter);
  359. netif_wake_queue(adapter->netdev);
  360. /* fire a link change interrupt to start the watchdog */
  361. ew32(ICS, E1000_ICS_LSC);
  362. return 0;
  363. }
  364. /**
  365. * e1000_power_up_phy - restore link in case the phy was powered down
  366. * @adapter: address of board private structure
  367. *
  368. * The phy may be powered down to save power and turn off link when the
  369. * driver is unloaded and wake on lan is not enabled (among others)
  370. * *** this routine MUST be followed by a call to e1000_reset ***
  371. *
  372. **/
  373. void e1000_power_up_phy(struct e1000_adapter *adapter)
  374. {
  375. struct e1000_hw *hw = &adapter->hw;
  376. u16 mii_reg = 0;
  377. /* Just clear the power down bit to wake the phy back up */
  378. if (hw->media_type == e1000_media_type_copper) {
  379. /* according to the manual, the phy will retain its
  380. * settings across a power-down/up cycle */
  381. e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
  382. mii_reg &= ~MII_CR_POWER_DOWN;
  383. e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
  384. }
  385. }
  386. static void e1000_power_down_phy(struct e1000_adapter *adapter)
  387. {
  388. struct e1000_hw *hw = &adapter->hw;
  389. /* Power down the PHY so no link is implied when interface is down *
  390. * The PHY cannot be powered down if any of the following is true *
  391. * (a) WoL is enabled
  392. * (b) AMT is active
  393. * (c) SoL/IDER session is active */
  394. if (!adapter->wol && hw->mac_type >= e1000_82540 &&
  395. hw->media_type == e1000_media_type_copper) {
  396. u16 mii_reg = 0;
  397. switch (hw->mac_type) {
  398. case e1000_82540:
  399. case e1000_82545:
  400. case e1000_82545_rev_3:
  401. case e1000_82546:
  402. case e1000_ce4100:
  403. case e1000_82546_rev_3:
  404. case e1000_82541:
  405. case e1000_82541_rev_2:
  406. case e1000_82547:
  407. case e1000_82547_rev_2:
  408. if (er32(MANC) & E1000_MANC_SMBUS_EN)
  409. goto out;
  410. break;
  411. default:
  412. goto out;
  413. }
  414. e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
  415. mii_reg |= MII_CR_POWER_DOWN;
  416. e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
  417. msleep(1);
  418. }
  419. out:
  420. return;
  421. }
  422. static void e1000_down_and_stop(struct e1000_adapter *adapter)
  423. {
  424. set_bit(__E1000_DOWN, &adapter->flags);
  425. cancel_work_sync(&adapter->reset_task);
  426. cancel_delayed_work_sync(&adapter->watchdog_task);
  427. cancel_delayed_work_sync(&adapter->phy_info_task);
  428. cancel_delayed_work_sync(&adapter->fifo_stall_task);
  429. }
  430. void e1000_down(struct e1000_adapter *adapter)
  431. {
  432. struct e1000_hw *hw = &adapter->hw;
  433. struct net_device *netdev = adapter->netdev;
  434. u32 rctl, tctl;
  435. /* disable receives in the hardware */
  436. rctl = er32(RCTL);
  437. ew32(RCTL, rctl & ~E1000_RCTL_EN);
  438. /* flush and sleep below */
  439. netif_tx_disable(netdev);
  440. /* disable transmits in the hardware */
  441. tctl = er32(TCTL);
  442. tctl &= ~E1000_TCTL_EN;
  443. ew32(TCTL, tctl);
  444. /* flush both disables and wait for them to finish */
  445. E1000_WRITE_FLUSH();
  446. msleep(10);
  447. napi_disable(&adapter->napi);
  448. e1000_irq_disable(adapter);
  449. /*
  450. * Setting DOWN must be after irq_disable to prevent
  451. * a screaming interrupt. Setting DOWN also prevents
  452. * tasks from rescheduling.
  453. */
  454. e1000_down_and_stop(adapter);
  455. adapter->link_speed = 0;
  456. adapter->link_duplex = 0;
  457. netif_carrier_off(netdev);
  458. e1000_reset(adapter);
  459. e1000_clean_all_tx_rings(adapter);
  460. e1000_clean_all_rx_rings(adapter);
  461. }
  462. static void e1000_reinit_safe(struct e1000_adapter *adapter)
  463. {
  464. while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
  465. msleep(1);
  466. mutex_lock(&adapter->mutex);
  467. e1000_down(adapter);
  468. e1000_up(adapter);
  469. mutex_unlock(&adapter->mutex);
  470. clear_bit(__E1000_RESETTING, &adapter->flags);
  471. }
  472. void e1000_reinit_locked(struct e1000_adapter *adapter)
  473. {
  474. /* if rtnl_lock is not held the call path is bogus */
  475. ASSERT_RTNL();
  476. WARN_ON(in_interrupt());
  477. while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
  478. msleep(1);
  479. e1000_down(adapter);
  480. e1000_up(adapter);
  481. clear_bit(__E1000_RESETTING, &adapter->flags);
  482. }
  483. void e1000_reset(struct e1000_adapter *adapter)
  484. {
  485. struct e1000_hw *hw = &adapter->hw;
  486. u32 pba = 0, tx_space, min_tx_space, min_rx_space;
  487. bool legacy_pba_adjust = false;
  488. u16 hwm;
  489. /* Repartition Pba for greater than 9k mtu
  490. * To take effect CTRL.RST is required.
  491. */
  492. switch (hw->mac_type) {
  493. case e1000_82542_rev2_0:
  494. case e1000_82542_rev2_1:
  495. case e1000_82543:
  496. case e1000_82544:
  497. case e1000_82540:
  498. case e1000_82541:
  499. case e1000_82541_rev_2:
  500. legacy_pba_adjust = true;
  501. pba = E1000_PBA_48K;
  502. break;
  503. case e1000_82545:
  504. case e1000_82545_rev_3:
  505. case e1000_82546:
  506. case e1000_ce4100:
  507. case e1000_82546_rev_3:
  508. pba = E1000_PBA_48K;
  509. break;
  510. case e1000_82547:
  511. case e1000_82547_rev_2:
  512. legacy_pba_adjust = true;
  513. pba = E1000_PBA_30K;
  514. break;
  515. case e1000_undefined:
  516. case e1000_num_macs:
  517. break;
  518. }
  519. if (legacy_pba_adjust) {
  520. if (hw->max_frame_size > E1000_RXBUFFER_8192)
  521. pba -= 8; /* allocate more FIFO for Tx */
  522. if (hw->mac_type == e1000_82547) {
  523. adapter->tx_fifo_head = 0;
  524. adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
  525. adapter->tx_fifo_size =
  526. (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
  527. atomic_set(&adapter->tx_fifo_stall, 0);
  528. }
  529. } else if (hw->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
  530. /* adjust PBA for jumbo frames */
  531. ew32(PBA, pba);
  532. /* To maintain wire speed transmits, the Tx FIFO should be
  533. * large enough to accommodate two full transmit packets,
  534. * rounded up to the next 1KB and expressed in KB. Likewise,
  535. * the Rx FIFO should be large enough to accommodate at least
  536. * one full receive packet and is similarly rounded up and
  537. * expressed in KB. */
  538. pba = er32(PBA);
  539. /* upper 16 bits has Tx packet buffer allocation size in KB */
  540. tx_space = pba >> 16;
  541. /* lower 16 bits has Rx packet buffer allocation size in KB */
  542. pba &= 0xffff;
  543. /*
  544. * the tx fifo also stores 16 bytes of information about the tx
  545. * but don't include ethernet FCS because hardware appends it
  546. */
  547. min_tx_space = (hw->max_frame_size +
  548. sizeof(struct e1000_tx_desc) -
  549. ETH_FCS_LEN) * 2;
  550. min_tx_space = ALIGN(min_tx_space, 1024);
  551. min_tx_space >>= 10;
  552. /* software strips receive CRC, so leave room for it */
  553. min_rx_space = hw->max_frame_size;
  554. min_rx_space = ALIGN(min_rx_space, 1024);
  555. min_rx_space >>= 10;
  556. /* If current Tx allocation is less than the min Tx FIFO size,
  557. * and the min Tx FIFO size is less than the current Rx FIFO
  558. * allocation, take space away from current Rx allocation */
  559. if (tx_space < min_tx_space &&
  560. ((min_tx_space - tx_space) < pba)) {
  561. pba = pba - (min_tx_space - tx_space);
  562. /* PCI/PCIx hardware has PBA alignment constraints */
  563. switch (hw->mac_type) {
  564. case e1000_82545 ... e1000_82546_rev_3:
  565. pba &= ~(E1000_PBA_8K - 1);
  566. break;
  567. default:
  568. break;
  569. }
  570. /* if short on rx space, rx wins and must trump tx
  571. * adjustment or use Early Receive if available */
  572. if (pba < min_rx_space)
  573. pba = min_rx_space;
  574. }
  575. }
  576. ew32(PBA, pba);
  577. /*
  578. * flow control settings:
  579. * The high water mark must be low enough to fit one full frame
  580. * (or the size used for early receive) above it in the Rx FIFO.
  581. * Set it to the lower of:
  582. * - 90% of the Rx FIFO size, and
  583. * - the full Rx FIFO size minus the early receive size (for parts
  584. * with ERT support assuming ERT set to E1000_ERT_2048), or
  585. * - the full Rx FIFO size minus one full frame
  586. */
  587. hwm = min(((pba << 10) * 9 / 10),
  588. ((pba << 10) - hw->max_frame_size));
  589. hw->fc_high_water = hwm & 0xFFF8; /* 8-byte granularity */
  590. hw->fc_low_water = hw->fc_high_water - 8;
  591. hw->fc_pause_time = E1000_FC_PAUSE_TIME;
  592. hw->fc_send_xon = 1;
  593. hw->fc = hw->original_fc;
  594. /* Allow time for pending master requests to run */
  595. e1000_reset_hw(hw);
  596. if (hw->mac_type >= e1000_82544)
  597. ew32(WUC, 0);
  598. if (e1000_init_hw(hw))
  599. e_dev_err("Hardware Error\n");
  600. e1000_update_mng_vlan(adapter);
  601. /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
  602. if (hw->mac_type >= e1000_82544 &&
  603. hw->autoneg == 1 &&
  604. hw->autoneg_advertised == ADVERTISE_1000_FULL) {
  605. u32 ctrl = er32(CTRL);
  606. /* clear phy power management bit if we are in gig only mode,
  607. * which if enabled will attempt negotiation to 100Mb, which
  608. * can cause a loss of link at power off or driver unload */
  609. ctrl &= ~E1000_CTRL_SWDPIN3;
  610. ew32(CTRL, ctrl);
  611. }
  612. /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
  613. ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
  614. e1000_reset_adaptive(hw);
  615. e1000_phy_get_info(hw, &adapter->phy_info);
  616. e1000_release_manageability(adapter);
  617. }
  618. /**
  619. * Dump the eeprom for users having checksum issues
  620. **/
  621. static void e1000_dump_eeprom(struct e1000_adapter *adapter)
  622. {
  623. struct net_device *netdev = adapter->netdev;
  624. struct ethtool_eeprom eeprom;
  625. const struct ethtool_ops *ops = netdev->ethtool_ops;
  626. u8 *data;
  627. int i;
  628. u16 csum_old, csum_new = 0;
  629. eeprom.len = ops->get_eeprom_len(netdev);
  630. eeprom.offset = 0;
  631. data = kmalloc(eeprom.len, GFP_KERNEL);
  632. if (!data)
  633. return;
  634. ops->get_eeprom(netdev, &eeprom, data);
  635. csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
  636. (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
  637. for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
  638. csum_new += data[i] + (data[i + 1] << 8);
  639. csum_new = EEPROM_SUM - csum_new;
  640. pr_err("/*********************/\n");
  641. pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old);
  642. pr_err("Calculated : 0x%04x\n", csum_new);
  643. pr_err("Offset Values\n");
  644. pr_err("======== ======\n");
  645. print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
  646. pr_err("Include this output when contacting your support provider.\n");
  647. pr_err("This is not a software error! Something bad happened to\n");
  648. pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
  649. pr_err("result in further problems, possibly loss of data,\n");
  650. pr_err("corruption or system hangs!\n");
  651. pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
  652. pr_err("which is invalid and requires you to set the proper MAC\n");
  653. pr_err("address manually before continuing to enable this network\n");
  654. pr_err("device. Please inspect the EEPROM dump and report the\n");
  655. pr_err("issue to your hardware vendor or Intel Customer Support.\n");
  656. pr_err("/*********************/\n");
  657. kfree(data);
  658. }
  659. /**
  660. * e1000_is_need_ioport - determine if an adapter needs ioport resources or not
  661. * @pdev: PCI device information struct
  662. *
  663. * Return true if an adapter needs ioport resources
  664. **/
  665. static int e1000_is_need_ioport(struct pci_dev *pdev)
  666. {
  667. switch (pdev->device) {
  668. case E1000_DEV_ID_82540EM:
  669. case E1000_DEV_ID_82540EM_LOM:
  670. case E1000_DEV_ID_82540EP:
  671. case E1000_DEV_ID_82540EP_LOM:
  672. case E1000_DEV_ID_82540EP_LP:
  673. case E1000_DEV_ID_82541EI:
  674. case E1000_DEV_ID_82541EI_MOBILE:
  675. case E1000_DEV_ID_82541ER:
  676. case E1000_DEV_ID_82541ER_LOM:
  677. case E1000_DEV_ID_82541GI:
  678. case E1000_DEV_ID_82541GI_LF:
  679. case E1000_DEV_ID_82541GI_MOBILE:
  680. case E1000_DEV_ID_82544EI_COPPER:
  681. case E1000_DEV_ID_82544EI_FIBER:
  682. case E1000_DEV_ID_82544GC_COPPER:
  683. case E1000_DEV_ID_82544GC_LOM:
  684. case E1000_DEV_ID_82545EM_COPPER:
  685. case E1000_DEV_ID_82545EM_FIBER:
  686. case E1000_DEV_ID_82546EB_COPPER:
  687. case E1000_DEV_ID_82546EB_FIBER:
  688. case E1000_DEV_ID_82546EB_QUAD_COPPER:
  689. return true;
  690. default:
  691. return false;
  692. }
  693. }
  694. static netdev_features_t e1000_fix_features(struct net_device *netdev,
  695. netdev_features_t features)
  696. {
  697. /*
  698. * Since there is no support for separate rx/tx vlan accel
  699. * enable/disable make sure tx flag is always in same state as rx.
  700. */
  701. if (features & NETIF_F_HW_VLAN_RX)
  702. features |= NETIF_F_HW_VLAN_TX;
  703. else
  704. features &= ~NETIF_F_HW_VLAN_TX;
  705. return features;
  706. }
  707. static int e1000_set_features(struct net_device *netdev,
  708. netdev_features_t features)
  709. {
  710. struct e1000_adapter *adapter = netdev_priv(netdev);
  711. netdev_features_t changed = features ^ netdev->features;
  712. if (changed & NETIF_F_HW_VLAN_RX)
  713. e1000_vlan_mode(netdev, features);
  714. if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL)))
  715. return 0;
  716. netdev->features = features;
  717. adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
  718. if (netif_running(netdev))
  719. e1000_reinit_locked(adapter);
  720. else
  721. e1000_reset(adapter);
  722. return 0;
  723. }
  724. static const struct net_device_ops e1000_netdev_ops = {
  725. .ndo_open = e1000_open,
  726. .ndo_stop = e1000_close,
  727. .ndo_start_xmit = e1000_xmit_frame,
  728. .ndo_get_stats = e1000_get_stats,
  729. .ndo_set_rx_mode = e1000_set_rx_mode,
  730. .ndo_set_mac_address = e1000_set_mac,
  731. .ndo_tx_timeout = e1000_tx_timeout,
  732. .ndo_change_mtu = e1000_change_mtu,
  733. .ndo_do_ioctl = e1000_ioctl,
  734. .ndo_validate_addr = eth_validate_addr,
  735. .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid,
  736. .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid,
  737. #ifdef CONFIG_NET_POLL_CONTROLLER
  738. .ndo_poll_controller = e1000_netpoll,
  739. #endif
  740. .ndo_fix_features = e1000_fix_features,
  741. .ndo_set_features = e1000_set_features,
  742. };
  743. /**
  744. * e1000_init_hw_struct - initialize members of hw struct
  745. * @adapter: board private struct
  746. * @hw: structure used by e1000_hw.c
  747. *
  748. * Factors out initialization of the e1000_hw struct to its own function
  749. * that can be called very early at init (just after struct allocation).
  750. * Fields are initialized based on PCI device information and
  751. * OS network device settings (MTU size).
  752. * Returns negative error codes if MAC type setup fails.
  753. */
  754. static int e1000_init_hw_struct(struct e1000_adapter *adapter,
  755. struct e1000_hw *hw)
  756. {
  757. struct pci_dev *pdev = adapter->pdev;
  758. /* PCI config space info */
  759. hw->vendor_id = pdev->vendor;
  760. hw->device_id = pdev->device;
  761. hw->subsystem_vendor_id = pdev->subsystem_vendor;
  762. hw->subsystem_id = pdev->subsystem_device;
  763. hw->revision_id = pdev->revision;
  764. pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
  765. hw->max_frame_size = adapter->netdev->mtu +
  766. ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
  767. hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
  768. /* identify the MAC */
  769. if (e1000_set_mac_type(hw)) {
  770. e_err(probe, "Unknown MAC Type\n");
  771. return -EIO;
  772. }
  773. switch (hw->mac_type) {
  774. default:
  775. break;
  776. case e1000_82541:
  777. case e1000_82547:
  778. case e1000_82541_rev_2:
  779. case e1000_82547_rev_2:
  780. hw->phy_init_script = 1;
  781. break;
  782. }
  783. e1000_set_media_type(hw);
  784. e1000_get_bus_info(hw);
  785. hw->wait_autoneg_complete = false;
  786. hw->tbi_compatibility_en = true;
  787. hw->adaptive_ifs = true;
  788. /* Copper options */
  789. if (hw->media_type == e1000_media_type_copper) {
  790. hw->mdix = AUTO_ALL_MODES;
  791. hw->disable_polarity_correction = false;
  792. hw->master_slave = E1000_MASTER_SLAVE;
  793. }
  794. return 0;
  795. }
  796. /**
  797. * e1000_probe - Device Initialization Routine
  798. * @pdev: PCI device information struct
  799. * @ent: entry in e1000_pci_tbl
  800. *
  801. * Returns 0 on success, negative on failure
  802. *
  803. * e1000_probe initializes an adapter identified by a pci_dev structure.
  804. * The OS initialization, configuring of the adapter private structure,
  805. * and a hardware reset occur.
  806. **/
  807. static int __devinit e1000_probe(struct pci_dev *pdev,
  808. const struct pci_device_id *ent)
  809. {
  810. struct net_device *netdev;
  811. struct e1000_adapter *adapter;
  812. struct e1000_hw *hw;
  813. static int cards_found = 0;
  814. static int global_quad_port_a = 0; /* global ksp3 port a indication */
  815. int i, err, pci_using_dac;
  816. u16 eeprom_data = 0;
  817. u16 tmp = 0;
  818. u16 eeprom_apme_mask = E1000_EEPROM_APME;
  819. int bars, need_ioport;
  820. /* do not allocate ioport bars when not needed */
  821. need_ioport = e1000_is_need_ioport(pdev);
  822. if (need_ioport) {
  823. bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
  824. err = pci_enable_device(pdev);
  825. } else {
  826. bars = pci_select_bars(pdev, IORESOURCE_MEM);
  827. err = pci_enable_device_mem(pdev);
  828. }
  829. if (err)
  830. return err;
  831. err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
  832. if (err)
  833. goto err_pci_reg;
  834. pci_set_master(pdev);
  835. err = pci_save_state(pdev);
  836. if (err)
  837. goto err_alloc_etherdev;
  838. err = -ENOMEM;
  839. netdev = alloc_etherdev(sizeof(struct e1000_adapter));
  840. if (!netdev)
  841. goto err_alloc_etherdev;
  842. SET_NETDEV_DEV(netdev, &pdev->dev);
  843. pci_set_drvdata(pdev, netdev);
  844. adapter = netdev_priv(netdev);
  845. adapter->netdev = netdev;
  846. adapter->pdev = pdev;
  847. adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
  848. adapter->bars = bars;
  849. adapter->need_ioport = need_ioport;
  850. hw = &adapter->hw;
  851. hw->back = adapter;
  852. err = -EIO;
  853. hw->hw_addr = pci_ioremap_bar(pdev, BAR_0);
  854. if (!hw->hw_addr)
  855. goto err_ioremap;
  856. if (adapter->need_ioport) {
  857. for (i = BAR_1; i <= BAR_5; i++) {
  858. if (pci_resource_len(pdev, i) == 0)
  859. continue;
  860. if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
  861. hw->io_base = pci_resource_start(pdev, i);
  862. break;
  863. }
  864. }
  865. }
  866. /* make ready for any if (hw->...) below */
  867. err = e1000_init_hw_struct(adapter, hw);
  868. if (err)
  869. goto err_sw_init;
  870. /*
  871. * there is a workaround being applied below that limits
  872. * 64-bit DMA addresses to 64-bit hardware. There are some
  873. * 32-bit adapters that Tx hang when given 64-bit DMA addresses
  874. */
  875. pci_using_dac = 0;
  876. if ((hw->bus_type == e1000_bus_type_pcix) &&
  877. !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
  878. /*
  879. * according to DMA-API-HOWTO, coherent calls will always
  880. * succeed if the set call did
  881. */
  882. dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
  883. pci_using_dac = 1;
  884. } else {
  885. err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
  886. if (err) {
  887. pr_err("No usable DMA config, aborting\n");
  888. goto err_dma;
  889. }
  890. dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
  891. }
  892. netdev->netdev_ops = &e1000_netdev_ops;
  893. e1000_set_ethtool_ops(netdev);
  894. netdev->watchdog_timeo = 5 * HZ;
  895. netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
  896. strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
  897. adapter->bd_number = cards_found;
  898. /* setup the private structure */
  899. err = e1000_sw_init(adapter);
  900. if (err)
  901. goto err_sw_init;
  902. err = -EIO;
  903. if (hw->mac_type == e1000_ce4100) {
  904. hw->ce4100_gbe_mdio_base_virt =
  905. ioremap(pci_resource_start(pdev, BAR_1),
  906. pci_resource_len(pdev, BAR_1));
  907. if (!hw->ce4100_gbe_mdio_base_virt)
  908. goto err_mdio_ioremap;
  909. }
  910. if (hw->mac_type >= e1000_82543) {
  911. netdev->hw_features = NETIF_F_SG |
  912. NETIF_F_HW_CSUM |
  913. NETIF_F_HW_VLAN_RX;
  914. netdev->features = NETIF_F_HW_VLAN_TX |
  915. NETIF_F_HW_VLAN_FILTER;
  916. }
  917. if ((hw->mac_type >= e1000_82544) &&
  918. (hw->mac_type != e1000_82547))
  919. netdev->hw_features |= NETIF_F_TSO;
  920. netdev->priv_flags |= IFF_SUPP_NOFCS;
  921. netdev->features |= netdev->hw_features;
  922. netdev->hw_features |= NETIF_F_RXCSUM;
  923. netdev->hw_features |= NETIF_F_RXALL;
  924. netdev->hw_features |= NETIF_F_RXFCS;
  925. if (pci_using_dac) {
  926. netdev->features |= NETIF_F_HIGHDMA;
  927. netdev->vlan_features |= NETIF_F_HIGHDMA;
  928. }
  929. netdev->vlan_features |= NETIF_F_TSO;
  930. netdev->vlan_features |= NETIF_F_HW_CSUM;
  931. netdev->vlan_features |= NETIF_F_SG;
  932. netdev->priv_flags |= IFF_UNICAST_FLT;
  933. adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
  934. /* initialize eeprom parameters */
  935. if (e1000_init_eeprom_params(hw)) {
  936. e_err(probe, "EEPROM initialization failed\n");
  937. goto err_eeprom;
  938. }
  939. /* before reading the EEPROM, reset the controller to
  940. * put the device in a known good starting state */
  941. e1000_reset_hw(hw);
  942. /* make sure the EEPROM is good */
  943. if (e1000_validate_eeprom_checksum(hw) < 0) {
  944. e_err(probe, "The EEPROM Checksum Is Not Valid\n");
  945. e1000_dump_eeprom(adapter);
  946. /*
  947. * set MAC address to all zeroes to invalidate and temporary
  948. * disable this device for the user. This blocks regular
  949. * traffic while still permitting ethtool ioctls from reaching
  950. * the hardware as well as allowing the user to run the
  951. * interface after manually setting a hw addr using
  952. * `ip set address`
  953. */
  954. memset(hw->mac_addr, 0, netdev->addr_len);
  955. } else {
  956. /* copy the MAC address out of the EEPROM */
  957. if (e1000_read_mac_addr(hw))
  958. e_err(probe, "EEPROM Read Error\n");
  959. }
  960. /* don't block initalization here due to bad MAC address */
  961. memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
  962. memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len);
  963. if (!is_valid_ether_addr(netdev->perm_addr))
  964. e_err(probe, "Invalid MAC Address\n");
  965. INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog);
  966. INIT_DELAYED_WORK(&adapter->fifo_stall_task,
  967. e1000_82547_tx_fifo_stall_task);
  968. INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
  969. INIT_WORK(&adapter->reset_task, e1000_reset_task);
  970. e1000_check_options(adapter);
  971. /* Initial Wake on LAN setting
  972. * If APM wake is enabled in the EEPROM,
  973. * enable the ACPI Magic Packet filter
  974. */
  975. switch (hw->mac_type) {
  976. case e1000_82542_rev2_0:
  977. case e1000_82542_rev2_1:
  978. case e1000_82543:
  979. break;
  980. case e1000_82544:
  981. e1000_read_eeprom(hw,
  982. EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
  983. eeprom_apme_mask = E1000_EEPROM_82544_APM;
  984. break;
  985. case e1000_82546:
  986. case e1000_82546_rev_3:
  987. if (er32(STATUS) & E1000_STATUS_FUNC_1){
  988. e1000_read_eeprom(hw,
  989. EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
  990. break;
  991. }
  992. /* Fall Through */
  993. default:
  994. e1000_read_eeprom(hw,
  995. EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
  996. break;
  997. }
  998. if (eeprom_data & eeprom_apme_mask)
  999. adapter->eeprom_wol |= E1000_WUFC_MAG;
  1000. /* now that we have the eeprom settings, apply the special cases
  1001. * where the eeprom may be wrong or the board simply won't support
  1002. * wake on lan on a particular port */
  1003. switch (pdev->device) {
  1004. case E1000_DEV_ID_82546GB_PCIE:
  1005. adapter->eeprom_wol = 0;
  1006. break;
  1007. case E1000_DEV_ID_82546EB_FIBER:
  1008. case E1000_DEV_ID_82546GB_FIBER:
  1009. /* Wake events only supported on port A for dual fiber
  1010. * regardless of eeprom setting */
  1011. if (er32(STATUS) & E1000_STATUS_FUNC_1)
  1012. adapter->eeprom_wol = 0;
  1013. break;
  1014. case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
  1015. /* if quad port adapter, disable WoL on all but port A */
  1016. if (global_quad_port_a != 0)
  1017. adapter->eeprom_wol = 0;
  1018. else
  1019. adapter->quad_port_a = true;
  1020. /* Reset for multiple quad port adapters */
  1021. if (++global_quad_port_a == 4)
  1022. global_quad_port_a = 0;
  1023. break;
  1024. }
  1025. /* initialize the wol settings based on the eeprom settings */
  1026. adapter->wol = adapter->eeprom_wol;
  1027. device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
  1028. /* Auto detect PHY address */
  1029. if (hw->mac_type == e1000_ce4100) {
  1030. for (i = 0; i < 32; i++) {
  1031. hw->phy_addr = i;
  1032. e1000_read_phy_reg(hw, PHY_ID2, &tmp);
  1033. if (tmp == 0 || tmp == 0xFF) {
  1034. if (i == 31)
  1035. goto err_eeprom;
  1036. continue;
  1037. } else
  1038. break;
  1039. }
  1040. }
  1041. /* reset the hardware with the new settings */
  1042. e1000_reset(adapter);
  1043. strcpy(netdev->name, "eth%d");
  1044. err = register_netdev(netdev);
  1045. if (err)
  1046. goto err_register;
  1047. e1000_vlan_filter_on_off(adapter, false);
  1048. /* print bus type/speed/width info */
  1049. e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
  1050. ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
  1051. ((hw->bus_speed == e1000_bus_speed_133) ? 133 :
  1052. (hw->bus_speed == e1000_bus_speed_120) ? 120 :
  1053. (hw->bus_speed == e1000_bus_speed_100) ? 100 :
  1054. (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33),
  1055. ((hw->bus_width == e1000_bus_width_64) ? 64 : 32),
  1056. netdev->dev_addr);
  1057. /* carrier off reporting is important to ethtool even BEFORE open */
  1058. netif_carrier_off(netdev);
  1059. e_info(probe, "Intel(R) PRO/1000 Network Connection\n");
  1060. cards_found++;
  1061. return 0;
  1062. err_register:
  1063. err_eeprom:
  1064. e1000_phy_hw_reset(hw);
  1065. if (hw->flash_address)
  1066. iounmap(hw->flash_address);
  1067. kfree(adapter->tx_ring);
  1068. kfree(adapter->rx_ring);
  1069. err_dma:
  1070. err_sw_init:
  1071. err_mdio_ioremap:
  1072. iounmap(hw->ce4100_gbe_mdio_base_virt);
  1073. iounmap(hw->hw_addr);
  1074. err_ioremap:
  1075. free_netdev(netdev);
  1076. err_alloc_etherdev:
  1077. pci_release_selected_regions(pdev, bars);
  1078. err_pci_reg:
  1079. pci_disable_device(pdev);
  1080. return err;
  1081. }
  1082. /**
  1083. * e1000_remove - Device Removal Routine
  1084. * @pdev: PCI device information struct
  1085. *
  1086. * e1000_remove is called by the PCI subsystem to alert the driver
  1087. * that it should release a PCI device. The could be caused by a
  1088. * Hot-Plug event, or because the driver is going to be removed from
  1089. * memory.
  1090. **/
  1091. static void __devexit e1000_remove(struct pci_dev *pdev)
  1092. {
  1093. struct net_device *netdev = pci_get_drvdata(pdev);
  1094. struct e1000_adapter *adapter = netdev_priv(netdev);
  1095. struct e1000_hw *hw = &adapter->hw;
  1096. e1000_down_and_stop(adapter);
  1097. e1000_release_manageability(adapter);
  1098. unregister_netdev(netdev);
  1099. e1000_phy_hw_reset(hw);
  1100. kfree(adapter->tx_ring);
  1101. kfree(adapter->rx_ring);
  1102. if (hw->mac_type == e1000_ce4100)
  1103. iounmap(hw->ce4100_gbe_mdio_base_virt);
  1104. iounmap(hw->hw_addr);
  1105. if (hw->flash_address)
  1106. iounmap(hw->flash_address);
  1107. pci_release_selected_regions(pdev, adapter->bars);
  1108. free_netdev(netdev);
  1109. pci_disable_device(pdev);
  1110. }
  1111. /**
  1112. * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
  1113. * @adapter: board private structure to initialize
  1114. *
  1115. * e1000_sw_init initializes the Adapter private data structure.
  1116. * e1000_init_hw_struct MUST be called before this function
  1117. **/
  1118. static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
  1119. {
  1120. adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
  1121. adapter->num_tx_queues = 1;
  1122. adapter->num_rx_queues = 1;
  1123. if (e1000_alloc_queues(adapter)) {
  1124. e_err(probe, "Unable to allocate memory for queues\n");
  1125. return -ENOMEM;
  1126. }
  1127. /* Explicitly disable IRQ since the NIC can be in any state. */
  1128. e1000_irq_disable(adapter);
  1129. spin_lock_init(&adapter->stats_lock);
  1130. mutex_init(&adapter->mutex);
  1131. set_bit(__E1000_DOWN, &adapter->flags);
  1132. return 0;
  1133. }
  1134. /**
  1135. * e1000_alloc_queues - Allocate memory for all rings
  1136. * @adapter: board private structure to initialize
  1137. *
  1138. * We allocate one ring per queue at run-time since we don't know the
  1139. * number of queues at compile-time.
  1140. **/
  1141. static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
  1142. {
  1143. adapter->tx_ring = kcalloc(adapter->num_tx_queues,
  1144. sizeof(struct e1000_tx_ring), GFP_KERNEL);
  1145. if (!adapter->tx_ring)
  1146. return -ENOMEM;
  1147. adapter->rx_ring = kcalloc(adapter->num_rx_queues,
  1148. sizeof(struct e1000_rx_ring), GFP_KERNEL);
  1149. if (!adapter->rx_ring) {
  1150. kfree(adapter->tx_ring);
  1151. return -ENOMEM;
  1152. }
  1153. return E1000_SUCCESS;
  1154. }
  1155. /**
  1156. * e1000_open - Called when a network interface is made active
  1157. * @netdev: network interface device structure
  1158. *
  1159. * Returns 0 on success, negative value on failure
  1160. *
  1161. * The open entry point is called when a network interface is made
  1162. * active by the system (IFF_UP). At this point all resources needed
  1163. * for transmit and receive operations are allocated, the interrupt
  1164. * handler is registered with the OS, the watchdog task is started,
  1165. * and the stack is notified that the interface is ready.
  1166. **/
  1167. static int e1000_open(struct net_device *netdev)
  1168. {
  1169. struct e1000_adapter *adapter = netdev_priv(netdev);
  1170. struct e1000_hw *hw = &adapter->hw;
  1171. int err;
  1172. /* disallow open during test */
  1173. if (test_bit(__E1000_TESTING, &adapter->flags))
  1174. return -EBUSY;
  1175. netif_carrier_off(netdev);
  1176. /* allocate transmit descriptors */
  1177. err = e1000_setup_all_tx_resources(adapter);
  1178. if (err)
  1179. goto err_setup_tx;
  1180. /* allocate receive descriptors */
  1181. err = e1000_setup_all_rx_resources(adapter);
  1182. if (err)
  1183. goto err_setup_rx;
  1184. e1000_power_up_phy(adapter);
  1185. adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
  1186. if ((hw->mng_cookie.status &
  1187. E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
  1188. e1000_update_mng_vlan(adapter);
  1189. }
  1190. /* before we allocate an interrupt, we must be ready to handle it.
  1191. * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
  1192. * as soon as we call pci_request_irq, so we have to setup our
  1193. * clean_rx handler before we do so. */
  1194. e1000_configure(adapter);
  1195. err = e1000_request_irq(adapter);
  1196. if (err)
  1197. goto err_req_irq;
  1198. /* From here on the code is the same as e1000_up() */
  1199. clear_bit(__E1000_DOWN, &adapter->flags);
  1200. napi_enable(&adapter->napi);
  1201. e1000_irq_enable(adapter);
  1202. netif_start_queue(netdev);
  1203. /* fire a link status change interrupt to start the watchdog */
  1204. ew32(ICS, E1000_ICS_LSC);
  1205. return E1000_SUCCESS;
  1206. err_req_irq:
  1207. e1000_power_down_phy(adapter);
  1208. e1000_free_all_rx_resources(adapter);
  1209. err_setup_rx:
  1210. e1000_free_all_tx_resources(adapter);
  1211. err_setup_tx:
  1212. e1000_reset(adapter);
  1213. return err;
  1214. }
  1215. /**
  1216. * e1000_close - Disables a network interface
  1217. * @netdev: network interface device structure
  1218. *
  1219. * Returns 0, this is not allowed to fail
  1220. *
  1221. * The close entry point is called when an interface is de-activated
  1222. * by the OS. The hardware is still under the drivers control, but
  1223. * needs to be disabled. A global MAC reset is issued to stop the
  1224. * hardware, and all transmit and receive resources are freed.
  1225. **/
  1226. static int e1000_close(struct net_device *netdev)
  1227. {
  1228. struct e1000_adapter *adapter = netdev_priv(netdev);
  1229. struct e1000_hw *hw = &adapter->hw;
  1230. WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
  1231. e1000_down(adapter);
  1232. e1000_power_down_phy(adapter);
  1233. e1000_free_irq(adapter);
  1234. e1000_free_all_tx_resources(adapter);
  1235. e1000_free_all_rx_resources(adapter);
  1236. /* kill manageability vlan ID if supported, but not if a vlan with
  1237. * the same ID is registered on the host OS (let 8021q kill it) */
  1238. if ((hw->mng_cookie.status &
  1239. E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
  1240. !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
  1241. e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
  1242. }
  1243. return 0;
  1244. }
  1245. /**
  1246. * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
  1247. * @adapter: address of board private structure
  1248. * @start: address of beginning of memory
  1249. * @len: length of memory
  1250. **/
  1251. static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
  1252. unsigned long len)
  1253. {
  1254. struct e1000_hw *hw = &adapter->hw;
  1255. unsigned long begin = (unsigned long)start;
  1256. unsigned long end = begin + len;
  1257. /* First rev 82545 and 82546 need to not allow any memory
  1258. * write location to cross 64k boundary due to errata 23 */
  1259. if (hw->mac_type == e1000_82545 ||
  1260. hw->mac_type == e1000_ce4100 ||
  1261. hw->mac_type == e1000_82546) {
  1262. return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
  1263. }
  1264. return true;
  1265. }
  1266. /**
  1267. * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
  1268. * @adapter: board private structure
  1269. * @txdr: tx descriptor ring (for a specific queue) to setup
  1270. *
  1271. * Return 0 on success, negative on failure
  1272. **/
  1273. static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
  1274. struct e1000_tx_ring *txdr)
  1275. {
  1276. struct pci_dev *pdev = adapter->pdev;
  1277. int size;
  1278. size = sizeof(struct e1000_buffer) * txdr->count;
  1279. txdr->buffer_info = vzalloc(size);
  1280. if (!txdr->buffer_info) {
  1281. e_err(probe, "Unable to allocate memory for the Tx descriptor "
  1282. "ring\n");
  1283. return -ENOMEM;
  1284. }
  1285. /* round up to nearest 4K */
  1286. txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
  1287. txdr->size = ALIGN(txdr->size, 4096);
  1288. txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
  1289. GFP_KERNEL);
  1290. if (!txdr->desc) {
  1291. setup_tx_desc_die:
  1292. vfree(txdr->buffer_info);
  1293. e_err(probe, "Unable to allocate memory for the Tx descriptor "
  1294. "ring\n");
  1295. return -ENOMEM;
  1296. }
  1297. /* Fix for errata 23, can't cross 64kB boundary */
  1298. if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
  1299. void *olddesc = txdr->desc;
  1300. dma_addr_t olddma = txdr->dma;
  1301. e_err(tx_err, "txdr align check failed: %u bytes at %p\n",
  1302. txdr->size, txdr->desc);
  1303. /* Try again, without freeing the previous */
  1304. txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
  1305. &txdr->dma, GFP_KERNEL);
  1306. /* Failed allocation, critical failure */
  1307. if (!txdr->desc) {
  1308. dma_free_coherent(&pdev->dev, txdr->size, olddesc,
  1309. olddma);
  1310. goto setup_tx_desc_die;
  1311. }
  1312. if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
  1313. /* give up */
  1314. dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
  1315. txdr->dma);
  1316. dma_free_coherent(&pdev->dev, txdr->size, olddesc,
  1317. olddma);
  1318. e_err(probe, "Unable to allocate aligned memory "
  1319. "for the transmit descriptor ring\n");
  1320. vfree(txdr->buffer_info);
  1321. return -ENOMEM;
  1322. } else {
  1323. /* Free old allocation, new allocation was successful */
  1324. dma_free_coherent(&pdev->dev, txdr->size, olddesc,
  1325. olddma);
  1326. }
  1327. }
  1328. memset(txdr->desc, 0, txdr->size);
  1329. txdr->next_to_use = 0;
  1330. txdr->next_to_clean = 0;
  1331. return 0;
  1332. }
  1333. /**
  1334. * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
  1335. * (Descriptors) for all queues
  1336. * @adapter: board private structure
  1337. *
  1338. * Return 0 on success, negative on failure
  1339. **/
  1340. int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
  1341. {
  1342. int i, err = 0;
  1343. for (i = 0; i < adapter->num_tx_queues; i++) {
  1344. err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
  1345. if (err) {
  1346. e_err(probe, "Allocation for Tx Queue %u failed\n", i);
  1347. for (i-- ; i >= 0; i--)
  1348. e1000_free_tx_resources(adapter,
  1349. &adapter->tx_ring[i]);
  1350. break;
  1351. }
  1352. }
  1353. return err;
  1354. }
  1355. /**
  1356. * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
  1357. * @adapter: board private structure
  1358. *
  1359. * Configure the Tx unit of the MAC after a reset.
  1360. **/
  1361. static void e1000_configure_tx(struct e1000_adapter *adapter)
  1362. {
  1363. u64 tdba;
  1364. struct e1000_hw *hw = &adapter->hw;
  1365. u32 tdlen, tctl, tipg;
  1366. u32 ipgr1, ipgr2;
  1367. /* Setup the HW Tx Head and Tail descriptor pointers */
  1368. switch (adapter->num_tx_queues) {
  1369. case 1:
  1370. default:
  1371. tdba = adapter->tx_ring[0].dma;
  1372. tdlen = adapter->tx_ring[0].count *
  1373. sizeof(struct e1000_tx_desc);
  1374. ew32(TDLEN, tdlen);
  1375. ew32(TDBAH, (tdba >> 32));
  1376. ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
  1377. ew32(TDT, 0);
  1378. ew32(TDH, 0);
  1379. adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH);
  1380. adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT);
  1381. break;
  1382. }
  1383. /* Set the default values for the Tx Inter Packet Gap timer */
  1384. if ((hw->media_type == e1000_media_type_fiber ||
  1385. hw->media_type == e1000_media_type_internal_serdes))
  1386. tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
  1387. else
  1388. tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
  1389. switch (hw->mac_type) {
  1390. case e1000_82542_rev2_0:
  1391. case e1000_82542_rev2_1:
  1392. tipg = DEFAULT_82542_TIPG_IPGT;
  1393. ipgr1 = DEFAULT_82542_TIPG_IPGR1;
  1394. ipgr2 = DEFAULT_82542_TIPG_IPGR2;
  1395. break;
  1396. default:
  1397. ipgr1 = DEFAULT_82543_TIPG_IPGR1;
  1398. ipgr2 = DEFAULT_82543_TIPG_IPGR2;
  1399. break;
  1400. }
  1401. tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
  1402. tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
  1403. ew32(TIPG, tipg);
  1404. /* Set the Tx Interrupt Delay register */
  1405. ew32(TIDV, adapter->tx_int_delay);
  1406. if (hw->mac_type >= e1000_82540)
  1407. ew32(TADV, adapter->tx_abs_int_delay);
  1408. /* Program the Transmit Control Register */
  1409. tctl = er32(TCTL);
  1410. tctl &= ~E1000_TCTL_CT;
  1411. tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
  1412. (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
  1413. e1000_config_collision_dist(hw);
  1414. /* Setup Transmit Descriptor Settings for eop descriptor */
  1415. adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
  1416. /* only set IDE if we are delaying interrupts using the timers */
  1417. if (adapter->tx_int_delay)
  1418. adapter->txd_cmd |= E1000_TXD_CMD_IDE;
  1419. if (hw->mac_type < e1000_82543)
  1420. adapter->txd_cmd |= E1000_TXD_CMD_RPS;
  1421. else
  1422. adapter->txd_cmd |= E1000_TXD_CMD_RS;
  1423. /* Cache if we're 82544 running in PCI-X because we'll
  1424. * need this to apply a workaround later in the send path. */
  1425. if (hw->mac_type == e1000_82544 &&
  1426. hw->bus_type == e1000_bus_type_pcix)
  1427. adapter->pcix_82544 = true;
  1428. ew32(TCTL, tctl);
  1429. }
  1430. /**
  1431. * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
  1432. * @adapter: board private structure
  1433. * @rxdr: rx descriptor ring (for a specific queue) to setup
  1434. *
  1435. * Returns 0 on success, negative on failure
  1436. **/
  1437. static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
  1438. struct e1000_rx_ring *rxdr)
  1439. {
  1440. struct pci_dev *pdev = adapter->pdev;
  1441. int size, desc_len;
  1442. size = sizeof(struct e1000_buffer) * rxdr->count;
  1443. rxdr->buffer_info = vzalloc(size);
  1444. if (!rxdr->buffer_info) {
  1445. e_err(probe, "Unable to allocate memory for the Rx descriptor "
  1446. "ring\n");
  1447. return -ENOMEM;
  1448. }
  1449. desc_len = sizeof(struct e1000_rx_desc);
  1450. /* Round up to nearest 4K */
  1451. rxdr->size = rxdr->count * desc_len;
  1452. rxdr->size = ALIGN(rxdr->size, 4096);
  1453. rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
  1454. GFP_KERNEL);
  1455. if (!rxdr->desc) {
  1456. e_err(probe, "Unable to allocate memory for the Rx descriptor "
  1457. "ring\n");
  1458. setup_rx_desc_die:
  1459. vfree(rxdr->buffer_info);
  1460. return -ENOMEM;
  1461. }
  1462. /* Fix for errata 23, can't cross 64kB boundary */
  1463. if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
  1464. void *olddesc = rxdr->desc;
  1465. dma_addr_t olddma = rxdr->dma;
  1466. e_err(rx_err, "rxdr align check failed: %u bytes at %p\n",
  1467. rxdr->size, rxdr->desc);
  1468. /* Try again, without freeing the previous */
  1469. rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
  1470. &rxdr->dma, GFP_KERNEL);
  1471. /* Failed allocation, critical failure */
  1472. if (!rxdr->desc) {
  1473. dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
  1474. olddma);
  1475. e_err(probe, "Unable to allocate memory for the Rx "
  1476. "descriptor ring\n");
  1477. goto setup_rx_desc_die;
  1478. }
  1479. if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
  1480. /* give up */
  1481. dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
  1482. rxdr->dma);
  1483. dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
  1484. olddma);
  1485. e_err(probe, "Unable to allocate aligned memory for "
  1486. "the Rx descriptor ring\n");
  1487. goto setup_rx_desc_die;
  1488. } else {
  1489. /* Free old allocation, new allocation was successful */
  1490. dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
  1491. olddma);
  1492. }
  1493. }
  1494. memset(rxdr->desc, 0, rxdr->size);
  1495. rxdr->next_to_clean = 0;
  1496. rxdr->next_to_use = 0;
  1497. rxdr->rx_skb_top = NULL;
  1498. return 0;
  1499. }
  1500. /**
  1501. * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
  1502. * (Descriptors) for all queues
  1503. * @adapter: board private structure
  1504. *
  1505. * Return 0 on success, negative on failure
  1506. **/
  1507. int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
  1508. {
  1509. int i, err = 0;
  1510. for (i = 0; i < adapter->num_rx_queues; i++) {
  1511. err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
  1512. if (err) {
  1513. e_err(probe, "Allocation for Rx Queue %u failed\n", i);
  1514. for (i-- ; i >= 0; i--)
  1515. e1000_free_rx_resources(adapter,
  1516. &adapter->rx_ring[i]);
  1517. break;
  1518. }
  1519. }
  1520. return err;
  1521. }
  1522. /**
  1523. * e1000_setup_rctl - configure the receive control registers
  1524. * @adapter: Board private structure
  1525. **/
  1526. static void e1000_setup_rctl(struct e1000_adapter *adapter)
  1527. {
  1528. struct e1000_hw *hw = &adapter->hw;
  1529. u32 rctl;
  1530. rctl = er32(RCTL);
  1531. rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
  1532. rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
  1533. E1000_RCTL_RDMTS_HALF |
  1534. (hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
  1535. if (hw->tbi_compatibility_on == 1)
  1536. rctl |= E1000_RCTL_SBP;
  1537. else
  1538. rctl &= ~E1000_RCTL_SBP;
  1539. if (adapter->netdev->mtu <= ETH_DATA_LEN)
  1540. rctl &= ~E1000_RCTL_LPE;
  1541. else
  1542. rctl |= E1000_RCTL_LPE;
  1543. /* Setup buffer sizes */
  1544. rctl &= ~E1000_RCTL_SZ_4096;
  1545. rctl |= E1000_RCTL_BSEX;
  1546. switch (adapter->rx_buffer_len) {
  1547. case E1000_RXBUFFER_2048:
  1548. default:
  1549. rctl |= E1000_RCTL_SZ_2048;
  1550. rctl &= ~E1000_RCTL_BSEX;
  1551. break;
  1552. case E1000_RXBUFFER_4096:
  1553. rctl |= E1000_RCTL_SZ_4096;
  1554. break;
  1555. case E1000_RXBUFFER_8192:
  1556. rctl |= E1000_RCTL_SZ_8192;
  1557. break;
  1558. case E1000_RXBUFFER_16384:
  1559. rctl |= E1000_RCTL_SZ_16384;
  1560. break;
  1561. }
  1562. /* This is useful for sniffing bad packets. */
  1563. if (adapter->netdev->features & NETIF_F_RXALL) {
  1564. /* UPE and MPE will be handled by normal PROMISC logic
  1565. * in e1000e_set_rx_mode */
  1566. rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
  1567. E1000_RCTL_BAM | /* RX All Bcast Pkts */
  1568. E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
  1569. rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
  1570. E1000_RCTL_DPF | /* Allow filtered pause */
  1571. E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
  1572. /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
  1573. * and that breaks VLANs.
  1574. */
  1575. }
  1576. ew32(RCTL, rctl);
  1577. }
  1578. /**
  1579. * e1000_configure_rx - Configure 8254x Receive Unit after Reset
  1580. * @adapter: board private structure
  1581. *
  1582. * Configure the Rx unit of the MAC after a reset.
  1583. **/
  1584. static void e1000_configure_rx(struct e1000_adapter *adapter)
  1585. {
  1586. u64 rdba;
  1587. struct e1000_hw *hw = &adapter->hw;
  1588. u32 rdlen, rctl, rxcsum;
  1589. if (adapter->netdev->mtu > ETH_DATA_LEN) {
  1590. rdlen = adapter->rx_ring[0].count *
  1591. sizeof(struct e1000_rx_desc);
  1592. adapter->clean_rx = e1000_clean_jumbo_rx_irq;
  1593. adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
  1594. } else {
  1595. rdlen = adapter->rx_ring[0].count *
  1596. sizeof(struct e1000_rx_desc);
  1597. adapter->clean_rx = e1000_clean_rx_irq;
  1598. adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
  1599. }
  1600. /* disable receives while setting up the descriptors */
  1601. rctl = er32(RCTL);
  1602. ew32(RCTL, rctl & ~E1000_RCTL_EN);
  1603. /* set the Receive Delay Timer Register */
  1604. ew32(RDTR, adapter->rx_int_delay);
  1605. if (hw->mac_type >= e1000_82540) {
  1606. ew32(RADV, adapter->rx_abs_int_delay);
  1607. if (adapter->itr_setting != 0)
  1608. ew32(ITR, 1000000000 / (adapter->itr * 256));
  1609. }
  1610. /* Setup the HW Rx Head and Tail Descriptor Pointers and
  1611. * the Base and Length of the Rx Descriptor Ring */
  1612. switch (adapter->num_rx_queues) {
  1613. case 1:
  1614. default:
  1615. rdba = adapter->rx_ring[0].dma;
  1616. ew32(RDLEN, rdlen);
  1617. ew32(RDBAH, (rdba >> 32));
  1618. ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
  1619. ew32(RDT, 0);
  1620. ew32(RDH, 0);
  1621. adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH);
  1622. adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT);
  1623. break;
  1624. }
  1625. /* Enable 82543 Receive Checksum Offload for TCP and UDP */
  1626. if (hw->mac_type >= e1000_82543) {
  1627. rxcsum = er32(RXCSUM);
  1628. if (adapter->rx_csum)
  1629. rxcsum |= E1000_RXCSUM_TUOFL;
  1630. else
  1631. /* don't need to clear IPPCSE as it defaults to 0 */
  1632. rxcsum &= ~E1000_RXCSUM_TUOFL;
  1633. ew32(RXCSUM, rxcsum);
  1634. }
  1635. /* Enable Receives */
  1636. ew32(RCTL, rctl | E1000_RCTL_EN);
  1637. }
  1638. /**
  1639. * e1000_free_tx_resources - Free Tx Resources per Queue
  1640. * @adapter: board private structure
  1641. * @tx_ring: Tx descriptor ring for a specific queue
  1642. *
  1643. * Free all transmit software resources
  1644. **/
  1645. static void e1000_free_tx_resources(struct e1000_adapter *adapter,
  1646. struct e1000_tx_ring *tx_ring)
  1647. {
  1648. struct pci_dev *pdev = adapter->pdev;
  1649. e1000_clean_tx_ring(adapter, tx_ring);
  1650. vfree(tx_ring->buffer_info);
  1651. tx_ring->buffer_info = NULL;
  1652. dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
  1653. tx_ring->dma);
  1654. tx_ring->desc = NULL;
  1655. }
  1656. /**
  1657. * e1000_free_all_tx_resources - Free Tx Resources for All Queues
  1658. * @adapter: board private structure
  1659. *
  1660. * Free all transmit software resources
  1661. **/
  1662. void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
  1663. {
  1664. int i;
  1665. for (i = 0; i < adapter->num_tx_queues; i++)
  1666. e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
  1667. }
  1668. static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
  1669. struct e1000_buffer *buffer_info)
  1670. {
  1671. if (buffer_info->dma) {
  1672. if (buffer_info->mapped_as_page)
  1673. dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
  1674. buffer_info->length, DMA_TO_DEVICE);
  1675. else
  1676. dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
  1677. buffer_info->length,
  1678. DMA_TO_DEVICE);
  1679. buffer_info->dma = 0;
  1680. }
  1681. if (buffer_info->skb) {
  1682. dev_kfree_skb_any(buffer_info->skb);
  1683. buffer_info->skb = NULL;
  1684. }
  1685. buffer_info->time_stamp = 0;
  1686. /* buffer_info must be completely set up in the transmit path */
  1687. }
  1688. /**
  1689. * e1000_clean_tx_ring - Free Tx Buffers
  1690. * @adapter: board private structure
  1691. * @tx_ring: ring to be cleaned
  1692. **/
  1693. static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
  1694. struct e1000_tx_ring *tx_ring)
  1695. {
  1696. struct e1000_hw *hw = &adapter->hw;
  1697. struct e1000_buffer *buffer_info;
  1698. unsigned long size;
  1699. unsigned int i;
  1700. /* Free all the Tx ring sk_buffs */
  1701. for (i = 0; i < tx_ring->count; i++) {
  1702. buffer_info = &tx_ring->buffer_info[i];
  1703. e1000_unmap_and_free_tx_resource(adapter, buffer_info);
  1704. }
  1705. size = sizeof(struct e1000_buffer) * tx_ring->count;
  1706. memset(tx_ring->buffer_info, 0, size);
  1707. /* Zero out the descriptor ring */
  1708. memset(tx_ring->desc, 0, tx_ring->size);
  1709. tx_ring->next_to_use = 0;
  1710. tx_ring->next_to_clean = 0;
  1711. tx_ring->last_tx_tso = false;
  1712. writel(0, hw->hw_addr + tx_ring->tdh);
  1713. writel(0, hw->hw_addr + tx_ring->tdt);
  1714. }
  1715. /**
  1716. * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
  1717. * @adapter: board private structure
  1718. **/
  1719. static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
  1720. {
  1721. int i;
  1722. for (i = 0; i < adapter->num_tx_queues; i++)
  1723. e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
  1724. }
  1725. /**
  1726. * e1000_free_rx_resources - Free Rx Resources
  1727. * @adapter: board private structure
  1728. * @rx_ring: ring to clean the resources from
  1729. *
  1730. * Free all receive software resources
  1731. **/
  1732. static void e1000_free_rx_resources(struct e1000_adapter *adapter,
  1733. struct e1000_rx_ring *rx_ring)
  1734. {
  1735. struct pci_dev *pdev = adapter->pdev;
  1736. e1000_clean_rx_ring(adapter, rx_ring);
  1737. vfree(rx_ring->buffer_info);
  1738. rx_ring->buffer_info = NULL;
  1739. dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
  1740. rx_ring->dma);
  1741. rx_ring->desc = NULL;
  1742. }
  1743. /**
  1744. * e1000_free_all_rx_resources - Free Rx Resources for All Queues
  1745. * @adapter: board private structure
  1746. *
  1747. * Free all receive software resources
  1748. **/
  1749. void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
  1750. {
  1751. int i;
  1752. for (i = 0; i < adapter->num_rx_queues; i++)
  1753. e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
  1754. }
  1755. /**
  1756. * e1000_clean_rx_ring - Free Rx Buffers per Queue
  1757. * @adapter: board private structure
  1758. * @rx_ring: ring to free buffers from
  1759. **/
  1760. static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
  1761. struct e1000_rx_ring *rx_ring)
  1762. {
  1763. struct e1000_hw *hw = &adapter->hw;
  1764. struct e1000_buffer *buffer_info;
  1765. struct pci_dev *pdev = adapter->pdev;
  1766. unsigned long size;
  1767. unsigned int i;
  1768. /* Free all the Rx ring sk_buffs */
  1769. for (i = 0; i < rx_ring->count; i++) {
  1770. buffer_info = &rx_ring->buffer_info[i];
  1771. if (buffer_info->dma &&
  1772. adapter->clean_rx == e1000_clean_rx_irq) {
  1773. dma_unmap_single(&pdev->dev, buffer_info->dma,
  1774. buffer_info->length,
  1775. DMA_FROM_DEVICE);
  1776. } else if (buffer_info->dma &&
  1777. adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
  1778. dma_unmap_page(&pdev->dev, buffer_info->dma,
  1779. buffer_info->length,
  1780. DMA_FROM_DEVICE);
  1781. }
  1782. buffer_info->dma = 0;
  1783. if (buffer_info->page) {
  1784. put_page(buffer_info->page);
  1785. buffer_info->page = NULL;
  1786. }
  1787. if (buffer_info->skb) {
  1788. dev_kfree_skb(buffer_info->skb);
  1789. buffer_info->skb = NULL;
  1790. }
  1791. }
  1792. /* there also may be some cached data from a chained receive */
  1793. if (rx_ring->rx_skb_top) {
  1794. dev_kfree_skb(rx_ring->rx_skb_top);
  1795. rx_ring->rx_skb_top = NULL;
  1796. }
  1797. size = sizeof(struct e1000_buffer) * rx_ring->count;
  1798. memset(rx_ring->buffer_info, 0, size);
  1799. /* Zero out the descriptor ring */
  1800. memset(rx_ring->desc, 0, rx_ring->size);
  1801. rx_ring->next_to_clean = 0;
  1802. rx_ring->next_to_use = 0;
  1803. writel(0, hw->hw_addr + rx_ring->rdh);
  1804. writel(0, hw->hw_addr + rx_ring->rdt);
  1805. }
  1806. /**
  1807. * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
  1808. * @adapter: board private structure
  1809. **/
  1810. static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
  1811. {
  1812. int i;
  1813. for (i = 0; i < adapter->num_rx_queues; i++)
  1814. e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
  1815. }
  1816. /* The 82542 2.0 (revision 2) needs to have the receive unit in reset
  1817. * and memory write and invalidate disabled for certain operations
  1818. */
  1819. static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
  1820. {
  1821. struct e1000_hw *hw = &adapter->hw;
  1822. struct net_device *netdev = adapter->netdev;
  1823. u32 rctl;
  1824. e1000_pci_clear_mwi(hw);
  1825. rctl = er32(RCTL);
  1826. rctl |= E1000_RCTL_RST;
  1827. ew32(RCTL, rctl);
  1828. E1000_WRITE_FLUSH();
  1829. mdelay(5);
  1830. if (netif_running(netdev))
  1831. e1000_clean_all_rx_rings(adapter);
  1832. }
  1833. static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
  1834. {
  1835. struct e1000_hw *hw = &adapter->hw;
  1836. struct net_device *netdev = adapter->netdev;
  1837. u32 rctl;
  1838. rctl = er32(RCTL);
  1839. rctl &= ~E1000_RCTL_RST;
  1840. ew32(RCTL, rctl);
  1841. E1000_WRITE_FLUSH();
  1842. mdelay(5);
  1843. if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
  1844. e1000_pci_set_mwi(hw);
  1845. if (netif_running(netdev)) {
  1846. /* No need to loop, because 82542 supports only 1 queue */
  1847. struct e1000_rx_ring *ring = &adapter->rx_ring[0];
  1848. e1000_configure_rx(adapter);
  1849. adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
  1850. }
  1851. }
  1852. /**
  1853. * e1000_set_mac - Change the Ethernet Address of the NIC
  1854. * @netdev: network interface device structure
  1855. * @p: pointer to an address structure
  1856. *
  1857. * Returns 0 on success, negative on failure
  1858. **/
  1859. static int e1000_set_mac(struct net_device *netdev, void *p)
  1860. {
  1861. struct e1000_adapter *adapter = netdev_priv(netdev);
  1862. struct e1000_hw *hw = &adapter->hw;
  1863. struct sockaddr *addr = p;
  1864. if (!is_valid_ether_addr(addr->sa_data))
  1865. return -EADDRNOTAVAIL;
  1866. /* 82542 2.0 needs to be in reset to write receive address registers */
  1867. if (hw->mac_type == e1000_82542_rev2_0)
  1868. e1000_enter_82542_rst(adapter);
  1869. memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
  1870. memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
  1871. e1000_rar_set(hw, hw->mac_addr, 0);
  1872. if (hw->mac_type == e1000_82542_rev2_0)
  1873. e1000_leave_82542_rst(adapter);
  1874. return 0;
  1875. }
  1876. /**
  1877. * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
  1878. * @netdev: network interface device structure
  1879. *
  1880. * The set_rx_mode entry point is called whenever the unicast or multicast
  1881. * address lists or the network interface flags are updated. This routine is
  1882. * responsible for configuring the hardware for proper unicast, multicast,
  1883. * promiscuous mode, and all-multi behavior.
  1884. **/
  1885. static void e1000_set_rx_mode(struct net_device *netdev)
  1886. {
  1887. struct e1000_adapter *adapter = netdev_priv(netdev);
  1888. struct e1000_hw *hw = &adapter->hw;
  1889. struct netdev_hw_addr *ha;
  1890. bool use_uc = false;
  1891. u32 rctl;
  1892. u32 hash_value;
  1893. int i, rar_entries = E1000_RAR_ENTRIES;
  1894. int mta_reg_count = E1000_NUM_MTA_REGISTERS;
  1895. u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
  1896. if (!mcarray) {
  1897. e_err(probe, "memory allocation failed\n");
  1898. return;
  1899. }
  1900. /* Check for Promiscuous and All Multicast modes */
  1901. rctl = er32(RCTL);
  1902. if (netdev->flags & IFF_PROMISC) {
  1903. rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
  1904. rctl &= ~E1000_RCTL_VFE;
  1905. } else {
  1906. if (netdev->flags & IFF_ALLMULTI)
  1907. rctl |= E1000_RCTL_MPE;
  1908. else
  1909. rctl &= ~E1000_RCTL_MPE;
  1910. /* Enable VLAN filter if there is a VLAN */
  1911. if (e1000_vlan_used(adapter))
  1912. rctl |= E1000_RCTL_VFE;
  1913. }
  1914. if (netdev_uc_count(netdev) > rar_entries - 1) {
  1915. rctl |= E1000_RCTL_UPE;
  1916. } else if (!(netdev->flags & IFF_PROMISC)) {
  1917. rctl &= ~E1000_RCTL_UPE;
  1918. use_uc = true;
  1919. }
  1920. ew32(RCTL, rctl);
  1921. /* 82542 2.0 needs to be in reset to write receive address registers */
  1922. if (hw->mac_type == e1000_82542_rev2_0)
  1923. e1000_enter_82542_rst(adapter);
  1924. /* load the first 14 addresses into the exact filters 1-14. Unicast
  1925. * addresses take precedence to avoid disabling unicast filtering
  1926. * when possible.
  1927. *
  1928. * RAR 0 is used for the station MAC address
  1929. * if there are not 14 addresses, go ahead and clear the filters
  1930. */
  1931. i = 1;
  1932. if (use_uc)
  1933. netdev_for_each_uc_addr(ha, netdev) {
  1934. if (i == rar_entries)
  1935. break;
  1936. e1000_rar_set(hw, ha->addr, i++);
  1937. }
  1938. netdev_for_each_mc_addr(ha, netdev) {
  1939. if (i == rar_entries) {
  1940. /* load any remaining addresses into the hash table */
  1941. u32 hash_reg, hash_bit, mta;
  1942. hash_value = e1000_hash_mc_addr(hw, ha->addr);
  1943. hash_reg = (hash_value >> 5) & 0x7F;
  1944. hash_bit = hash_value & 0x1F;
  1945. mta = (1 << hash_bit);
  1946. mcarray[hash_reg] |= mta;
  1947. } else {
  1948. e1000_rar_set(hw, ha->addr, i++);
  1949. }
  1950. }
  1951. for (; i < rar_entries; i++) {
  1952. E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
  1953. E1000_WRITE_FLUSH();
  1954. E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
  1955. E1000_WRITE_FLUSH();
  1956. }
  1957. /* write the hash table completely, write from bottom to avoid
  1958. * both stupid write combining chipsets, and flushing each write */
  1959. for (i = mta_reg_count - 1; i >= 0 ; i--) {
  1960. /*
  1961. * If we are on an 82544 has an errata where writing odd
  1962. * offsets overwrites the previous even offset, but writing
  1963. * backwards over the range solves the issue by always
  1964. * writing the odd offset first
  1965. */
  1966. E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]);
  1967. }
  1968. E1000_WRITE_FLUSH();
  1969. if (hw->mac_type == e1000_82542_rev2_0)
  1970. e1000_leave_82542_rst(adapter);
  1971. kfree(mcarray);
  1972. }
  1973. /**
  1974. * e1000_update_phy_info_task - get phy info
  1975. * @work: work struct contained inside adapter struct
  1976. *
  1977. * Need to wait a few seconds after link up to get diagnostic information from
  1978. * the phy
  1979. */
  1980. static void e1000_update_phy_info_task(struct work_struct *work)
  1981. {
  1982. struct e1000_adapter *adapter = container_of(work,
  1983. struct e1000_adapter,
  1984. phy_info_task.work);
  1985. if (test_bit(__E1000_DOWN, &adapter->flags))
  1986. return;
  1987. mutex_lock(&adapter->mutex);
  1988. e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
  1989. mutex_unlock(&adapter->mutex);
  1990. }
  1991. /**
  1992. * e1000_82547_tx_fifo_stall_task - task to complete work
  1993. * @work: work struct contained inside adapter struct
  1994. **/
  1995. static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
  1996. {
  1997. struct e1000_adapter *adapter = container_of(work,
  1998. struct e1000_adapter,
  1999. fifo_stall_task.work);
  2000. struct e1000_hw *hw = &adapter->hw;
  2001. struct net_device *netdev = adapter->netdev;
  2002. u32 tctl;
  2003. if (test_bit(__E1000_DOWN, &adapter->flags))
  2004. return;
  2005. mutex_lock(&adapter->mutex);
  2006. if (atomic_read(&adapter->tx_fifo_stall)) {
  2007. if ((er32(TDT) == er32(TDH)) &&
  2008. (er32(TDFT) == er32(TDFH)) &&
  2009. (er32(TDFTS) == er32(TDFHS))) {
  2010. tctl = er32(TCTL);
  2011. ew32(TCTL, tctl & ~E1000_TCTL_EN);
  2012. ew32(TDFT, adapter->tx_head_addr);
  2013. ew32(TDFH, adapter->tx_head_addr);
  2014. ew32(TDFTS, adapter->tx_head_addr);
  2015. ew32(TDFHS, adapter->tx_head_addr);
  2016. ew32(TCTL, tctl);
  2017. E1000_WRITE_FLUSH();
  2018. adapter->tx_fifo_head = 0;
  2019. atomic_set(&adapter->tx_fifo_stall, 0);
  2020. netif_wake_queue(netdev);
  2021. } else if (!test_bit(__E1000_DOWN, &adapter->flags)) {
  2022. schedule_delayed_work(&adapter->fifo_stall_task, 1);
  2023. }
  2024. }
  2025. mutex_unlock(&adapter->mutex);
  2026. }
  2027. bool e1000_has_link(struct e1000_adapter *adapter)
  2028. {
  2029. struct e1000_hw *hw = &adapter->hw;
  2030. bool link_active = false;
  2031. /* get_link_status is set on LSC (link status) interrupt or rx
  2032. * sequence error interrupt (except on intel ce4100).
  2033. * get_link_status will stay false until the
  2034. * e1000_check_for_link establishes link for copper adapters
  2035. * ONLY
  2036. */
  2037. switch (hw->media_type) {
  2038. case e1000_media_type_copper:
  2039. if (hw->mac_type == e1000_ce4100)
  2040. hw->get_link_status = 1;
  2041. if (hw->get_link_status) {
  2042. e1000_check_for_link(hw);
  2043. link_active = !hw->get_link_status;
  2044. } else {
  2045. link_active = true;
  2046. }
  2047. break;
  2048. case e1000_media_type_fiber:
  2049. e1000_check_for_link(hw);
  2050. link_active = !!(er32(STATUS) & E1000_STATUS_LU);
  2051. break;
  2052. case e1000_media_type_internal_serdes:
  2053. e1000_check_for_link(hw);
  2054. link_active = hw->serdes_has_link;
  2055. break;
  2056. default:
  2057. break;
  2058. }
  2059. return link_active;
  2060. }
  2061. /**
  2062. * e1000_watchdog - work function
  2063. * @work: work struct contained inside adapter struct
  2064. **/
  2065. static void e1000_watchdog(struct work_struct *work)
  2066. {
  2067. struct e1000_adapter *adapter = container_of(work,
  2068. struct e1000_adapter,
  2069. watchdog_task.work);
  2070. struct e1000_hw *hw = &adapter->hw;
  2071. struct net_device *netdev = adapter->netdev;
  2072. struct e1000_tx_ring *txdr = adapter->tx_ring;
  2073. u32 link, tctl;
  2074. if (test_bit(__E1000_DOWN, &adapter->flags))
  2075. return;
  2076. mutex_lock(&adapter->mutex);
  2077. link = e1000_has_link(adapter);
  2078. if ((netif_carrier_ok(netdev)) && link)
  2079. goto link_up;
  2080. if (link) {
  2081. if (!netif_carrier_ok(netdev)) {
  2082. u32 ctrl;
  2083. bool txb2b = true;
  2084. /* update snapshot of PHY registers on LSC */
  2085. e1000_get_speed_and_duplex(hw,
  2086. &adapter->link_speed,
  2087. &adapter->link_duplex);
  2088. ctrl = er32(CTRL);
  2089. pr_info("%s NIC Link is Up %d Mbps %s, "
  2090. "Flow Control: %s\n",
  2091. netdev->name,
  2092. adapter->link_speed,
  2093. adapter->link_duplex == FULL_DUPLEX ?
  2094. "Full Duplex" : "Half Duplex",
  2095. ((ctrl & E1000_CTRL_TFCE) && (ctrl &
  2096. E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
  2097. E1000_CTRL_RFCE) ? "RX" : ((ctrl &
  2098. E1000_CTRL_TFCE) ? "TX" : "None")));
  2099. /* adjust timeout factor according to speed/duplex */
  2100. adapter->tx_timeout_factor = 1;
  2101. switch (adapter->link_speed) {
  2102. case SPEED_10:
  2103. txb2b = false;
  2104. adapter->tx_timeout_factor = 16;
  2105. break;
  2106. case SPEED_100:
  2107. txb2b = false;
  2108. /* maybe add some timeout factor ? */
  2109. break;
  2110. }
  2111. /* enable transmits in the hardware */
  2112. tctl = er32(TCTL);
  2113. tctl |= E1000_TCTL_EN;
  2114. ew32(TCTL, tctl);
  2115. netif_carrier_on(netdev);
  2116. if (!test_bit(__E1000_DOWN, &adapter->flags))
  2117. schedule_delayed_work(&adapter->phy_info_task,
  2118. 2 * HZ);
  2119. adapter->smartspeed = 0;
  2120. }
  2121. } else {
  2122. if (netif_carrier_ok(netdev)) {
  2123. adapter->link_speed = 0;
  2124. adapter->link_duplex = 0;
  2125. pr_info("%s NIC Link is Down\n",
  2126. netdev->name);
  2127. netif_carrier_off(netdev);
  2128. if (!test_bit(__E1000_DOWN, &adapter->flags))
  2129. schedule_delayed_work(&adapter->phy_info_task,
  2130. 2 * HZ);
  2131. }
  2132. e1000_smartspeed(adapter);
  2133. }
  2134. link_up:
  2135. e1000_update_stats(adapter);
  2136. hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
  2137. adapter->tpt_old = adapter->stats.tpt;
  2138. hw->collision_delta = adapter->stats.colc - adapter->colc_old;
  2139. adapter->colc_old = adapter->stats.colc;
  2140. adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
  2141. adapter->gorcl_old = adapter->stats.gorcl;
  2142. adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
  2143. adapter->gotcl_old = adapter->stats.gotcl;
  2144. e1000_update_adaptive(hw);
  2145. if (!netif_carrier_ok(netdev)) {
  2146. if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
  2147. /* We've lost link, so the controller stops DMA,
  2148. * but we've got queued Tx work that's never going
  2149. * to get done, so reset controller to flush Tx.
  2150. * (Do the reset outside of interrupt context). */
  2151. adapter->tx_timeout_count++;
  2152. schedule_work(&adapter->reset_task);
  2153. /* exit immediately since reset is imminent */
  2154. goto unlock;
  2155. }
  2156. }
  2157. /* Simple mode for Interrupt Throttle Rate (ITR) */
  2158. if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
  2159. /*
  2160. * Symmetric Tx/Rx gets a reduced ITR=2000;
  2161. * Total asymmetrical Tx or Rx gets ITR=8000;
  2162. * everyone else is between 2000-8000.
  2163. */
  2164. u32 goc = (adapter->gotcl + adapter->gorcl) / 10000;
  2165. u32 dif = (adapter->gotcl > adapter->gorcl ?
  2166. adapter->gotcl - adapter->gorcl :
  2167. adapter->gorcl - adapter->gotcl) / 10000;
  2168. u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
  2169. ew32(ITR, 1000000000 / (itr * 256));
  2170. }
  2171. /* Cause software interrupt to ensure rx ring is cleaned */
  2172. ew32(ICS, E1000_ICS_RXDMT0);
  2173. /* Force detection of hung controller every watchdog period */
  2174. adapter->detect_tx_hung = true;
  2175. /* Reschedule the task */
  2176. if (!test_bit(__E1000_DOWN, &adapter->flags))
  2177. schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
  2178. unlock:
  2179. mutex_unlock(&adapter->mutex);
  2180. }
  2181. enum latency_range {
  2182. lowest_latency = 0,
  2183. low_latency = 1,
  2184. bulk_latency = 2,
  2185. latency_invalid = 255
  2186. };
  2187. /**
  2188. * e1000_update_itr - update the dynamic ITR value based on statistics
  2189. * @adapter: pointer to adapter
  2190. * @itr_setting: current adapter->itr
  2191. * @packets: the number of packets during this measurement interval
  2192. * @bytes: the number of bytes during this measurement interval
  2193. *
  2194. * Stores a new ITR value based on packets and byte
  2195. * counts during the last interrupt. The advantage of per interrupt
  2196. * computation is faster updates and more accurate ITR for the current
  2197. * traffic pattern. Constants in this function were computed
  2198. * based on theoretical maximum wire speed and thresholds were set based
  2199. * on testing data as well as attempting to minimize response time
  2200. * while increasing bulk throughput.
  2201. * this functionality is controlled by the InterruptThrottleRate module
  2202. * parameter (see e1000_param.c)
  2203. **/
  2204. static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
  2205. u16 itr_setting, int packets, int bytes)
  2206. {
  2207. unsigned int retval = itr_setting;
  2208. struct e1000_hw *hw = &adapter->hw;
  2209. if (unlikely(hw->mac_type < e1000_82540))
  2210. goto update_itr_done;
  2211. if (packets == 0)
  2212. goto update_itr_done;
  2213. switch (itr_setting) {
  2214. case lowest_latency:
  2215. /* jumbo frames get bulk treatment*/
  2216. if (bytes/packets > 8000)
  2217. retval = bulk_latency;
  2218. else if ((packets < 5) && (bytes > 512))
  2219. retval = low_latency;
  2220. break;
  2221. case low_latency: /* 50 usec aka 20000 ints/s */
  2222. if (bytes > 10000) {
  2223. /* jumbo frames need bulk latency setting */
  2224. if (bytes/packets > 8000)
  2225. retval = bulk_latency;
  2226. else if ((packets < 10) || ((bytes/packets) > 1200))
  2227. retval = bulk_latency;
  2228. else if ((packets > 35))
  2229. retval = lowest_latency;
  2230. } else if (bytes/packets > 2000)
  2231. retval = bulk_latency;
  2232. else if (packets <= 2 && bytes < 512)
  2233. retval = lowest_latency;
  2234. break;
  2235. case bulk_latency: /* 250 usec aka 4000 ints/s */
  2236. if (bytes > 25000) {
  2237. if (packets > 35)
  2238. retval = low_latency;
  2239. } else if (bytes < 6000) {
  2240. retval = low_latency;
  2241. }
  2242. break;
  2243. }
  2244. update_itr_done:
  2245. return retval;
  2246. }
  2247. static void e1000_set_itr(struct e1000_adapter *adapter)
  2248. {
  2249. struct e1000_hw *hw = &adapter->hw;
  2250. u16 current_itr;
  2251. u32 new_itr = adapter->itr;
  2252. if (unlikely(hw->mac_type < e1000_82540))
  2253. return;
  2254. /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
  2255. if (unlikely(adapter->link_speed != SPEED_1000)) {
  2256. current_itr = 0;
  2257. new_itr = 4000;
  2258. goto set_itr_now;
  2259. }
  2260. adapter->tx_itr = e1000_update_itr(adapter,
  2261. adapter->tx_itr,
  2262. adapter->total_tx_packets,
  2263. adapter->total_tx_bytes);
  2264. /* conservative mode (itr 3) eliminates the lowest_latency setting */
  2265. if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
  2266. adapter->tx_itr = low_latency;
  2267. adapter->rx_itr = e1000_update_itr(adapter,
  2268. adapter->rx_itr,
  2269. adapter->total_rx_packets,
  2270. adapter->total_rx_bytes);
  2271. /* conservative mode (itr 3) eliminates the lowest_latency setting */
  2272. if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
  2273. adapter->rx_itr = low_latency;
  2274. current_itr = max(adapter->rx_itr, adapter->tx_itr);
  2275. switch (current_itr) {
  2276. /* counts and packets in update_itr are dependent on these numbers */
  2277. case lowest_latency:
  2278. new_itr = 70000;
  2279. break;
  2280. case low_latency:
  2281. new_itr = 20000; /* aka hwitr = ~200 */
  2282. break;
  2283. case bulk_latency:
  2284. new_itr = 4000;
  2285. break;
  2286. default:
  2287. break;
  2288. }
  2289. set_itr_now:
  2290. if (new_itr != adapter->itr) {
  2291. /* this attempts to bias the interrupt rate towards Bulk
  2292. * by adding intermediate steps when interrupt rate is
  2293. * increasing */
  2294. new_itr = new_itr > adapter->itr ?
  2295. min(adapter->itr + (new_itr >> 2), new_itr) :
  2296. new_itr;
  2297. adapter->itr = new_itr;
  2298. ew32(ITR, 1000000000 / (new_itr * 256));
  2299. }
  2300. }
  2301. #define E1000_TX_FLAGS_CSUM 0x00000001
  2302. #define E1000_TX_FLAGS_VLAN 0x00000002
  2303. #define E1000_TX_FLAGS_TSO 0x00000004
  2304. #define E1000_TX_FLAGS_IPV4 0x00000008
  2305. #define E1000_TX_FLAGS_NO_FCS 0x00000010
  2306. #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
  2307. #define E1000_TX_FLAGS_VLAN_SHIFT 16
  2308. static int e1000_tso(struct e1000_adapter *adapter,
  2309. struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
  2310. {
  2311. struct e1000_context_desc *context_desc;
  2312. struct e1000_buffer *buffer_info;
  2313. unsigned int i;
  2314. u32 cmd_length = 0;
  2315. u16 ipcse = 0, tucse, mss;
  2316. u8 ipcss, ipcso, tucss, tucso, hdr_len;
  2317. int err;
  2318. if (skb_is_gso(skb)) {
  2319. if (skb_header_cloned(skb)) {
  2320. err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
  2321. if (err)
  2322. return err;
  2323. }
  2324. hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
  2325. mss = skb_shinfo(skb)->gso_size;
  2326. if (skb->protocol == htons(ETH_P_IP)) {
  2327. struct iphdr *iph = ip_hdr(skb);
  2328. iph->tot_len = 0;
  2329. iph->check = 0;
  2330. tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
  2331. iph->daddr, 0,
  2332. IPPROTO_TCP,
  2333. 0);
  2334. cmd_length = E1000_TXD_CMD_IP;
  2335. ipcse = skb_transport_offset(skb) - 1;
  2336. } else if (skb->protocol == htons(ETH_P_IPV6)) {
  2337. ipv6_hdr(skb)->payload_len = 0;
  2338. tcp_hdr(skb)->check =
  2339. ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
  2340. &ipv6_hdr(skb)->daddr,
  2341. 0, IPPROTO_TCP, 0);
  2342. ipcse = 0;
  2343. }
  2344. ipcss = skb_network_offset(skb);
  2345. ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
  2346. tucss = skb_transport_offset(skb);
  2347. tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
  2348. tucse = 0;
  2349. cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
  2350. E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
  2351. i = tx_ring->next_to_use;
  2352. context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
  2353. buffer_info = &tx_ring->buffer_info[i];
  2354. context_desc->lower_setup.ip_fields.ipcss = ipcss;
  2355. context_desc->lower_setup.ip_fields.ipcso = ipcso;
  2356. context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
  2357. context_desc->upper_setup.tcp_fields.tucss = tucss;
  2358. context_desc->upper_setup.tcp_fields.tucso = tucso;
  2359. context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
  2360. context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
  2361. context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
  2362. context_desc->cmd_and_length = cpu_to_le32(cmd_length);
  2363. buffer_info->time_stamp = jiffies;
  2364. buffer_info->next_to_watch = i;
  2365. if (++i == tx_ring->count) i = 0;
  2366. tx_ring->next_to_use = i;
  2367. return true;
  2368. }
  2369. return false;
  2370. }
  2371. static bool e1000_tx_csum(struct e1000_adapter *adapter,
  2372. struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
  2373. {
  2374. struct e1000_context_desc *context_desc;
  2375. struct e1000_buffer *buffer_info;
  2376. unsigned int i;
  2377. u8 css;
  2378. u32 cmd_len = E1000_TXD_CMD_DEXT;
  2379. if (skb->ip_summed != CHECKSUM_PARTIAL)
  2380. return false;
  2381. switch (skb->protocol) {
  2382. case cpu_to_be16(ETH_P_IP):
  2383. if (ip_hdr(skb)->protocol == IPPROTO_TCP)
  2384. cmd_len |= E1000_TXD_CMD_TCP;
  2385. break;
  2386. case cpu_to_be16(ETH_P_IPV6):
  2387. /* XXX not handling all IPV6 headers */
  2388. if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
  2389. cmd_len |= E1000_TXD_CMD_TCP;
  2390. break;
  2391. default:
  2392. if (unlikely(net_ratelimit()))
  2393. e_warn(drv, "checksum_partial proto=%x!\n",
  2394. skb->protocol);
  2395. break;
  2396. }
  2397. css = skb_checksum_start_offset(skb);
  2398. i = tx_ring->next_to_use;
  2399. buffer_info = &tx_ring->buffer_info[i];
  2400. context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
  2401. context_desc->lower_setup.ip_config = 0;
  2402. context_desc->upper_setup.tcp_fields.tucss = css;
  2403. context_desc->upper_setup.tcp_fields.tucso =
  2404. css + skb->csum_offset;
  2405. context_desc->upper_setup.tcp_fields.tucse = 0;
  2406. context_desc->tcp_seg_setup.data = 0;
  2407. context_desc->cmd_and_length = cpu_to_le32(cmd_len);
  2408. buffer_info->time_stamp = jiffies;
  2409. buffer_info->next_to_watch = i;
  2410. if (unlikely(++i == tx_ring->count)) i = 0;
  2411. tx_ring->next_to_use = i;
  2412. return true;
  2413. }
  2414. #define E1000_MAX_TXD_PWR 12
  2415. #define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR)
  2416. static int e1000_tx_map(struct e1000_adapter *adapter,
  2417. struct e1000_tx_ring *tx_ring,
  2418. struct sk_buff *skb, unsigned int first,
  2419. unsigned int max_per_txd, unsigned int nr_frags,
  2420. unsigned int mss)
  2421. {
  2422. struct e1000_hw *hw = &adapter->hw;
  2423. struct pci_dev *pdev = adapter->pdev;
  2424. struct e1000_buffer *buffer_info;
  2425. unsigned int len = skb_headlen(skb);
  2426. unsigned int offset = 0, size, count = 0, i;
  2427. unsigned int f, bytecount, segs;
  2428. i = tx_ring->next_to_use;
  2429. while (len) {
  2430. buffer_info = &tx_ring->buffer_info[i];
  2431. size = min(len, max_per_txd);
  2432. /* Workaround for Controller erratum --
  2433. * descriptor for non-tso packet in a linear SKB that follows a
  2434. * tso gets written back prematurely before the data is fully
  2435. * DMA'd to the controller */
  2436. if (!skb->data_len && tx_ring->last_tx_tso &&
  2437. !skb_is_gso(skb)) {
  2438. tx_ring->last_tx_tso = false;
  2439. size -= 4;
  2440. }
  2441. /* Workaround for premature desc write-backs
  2442. * in TSO mode. Append 4-byte sentinel desc */
  2443. if (unlikely(mss && !nr_frags && size == len && size > 8))
  2444. size -= 4;
  2445. /* work-around for errata 10 and it applies
  2446. * to all controllers in PCI-X mode
  2447. * The fix is to make sure that the first descriptor of a
  2448. * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
  2449. */
  2450. if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
  2451. (size > 2015) && count == 0))
  2452. size = 2015;
  2453. /* Workaround for potential 82544 hang in PCI-X. Avoid
  2454. * terminating buffers within evenly-aligned dwords. */
  2455. if (unlikely(adapter->pcix_82544 &&
  2456. !((unsigned long)(skb->data + offset + size - 1) & 4) &&
  2457. size > 4))
  2458. size -= 4;
  2459. buffer_info->length = size;
  2460. /* set time_stamp *before* dma to help avoid a possible race */
  2461. buffer_info->time_stamp = jiffies;
  2462. buffer_info->mapped_as_page = false;
  2463. buffer_info->dma = dma_map_single(&pdev->dev,
  2464. skb->data + offset,
  2465. size, DMA_TO_DEVICE);
  2466. if (dma_mapping_error(&pdev->dev, buffer_info->dma))
  2467. goto dma_error;
  2468. buffer_info->next_to_watch = i;
  2469. len -= size;
  2470. offset += size;
  2471. count++;
  2472. if (len) {
  2473. i++;
  2474. if (unlikely(i == tx_ring->count))
  2475. i = 0;
  2476. }
  2477. }
  2478. for (f = 0; f < nr_frags; f++) {
  2479. const struct skb_frag_struct *frag;
  2480. frag = &skb_shinfo(skb)->frags[f];
  2481. len = skb_frag_size(frag);
  2482. offset = 0;
  2483. while (len) {
  2484. unsigned long bufend;
  2485. i++;
  2486. if (unlikely(i == tx_ring->count))
  2487. i = 0;
  2488. buffer_info = &tx_ring->buffer_info[i];
  2489. size = min(len, max_per_txd);
  2490. /* Workaround for premature desc write-backs
  2491. * in TSO mode. Append 4-byte sentinel desc */
  2492. if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
  2493. size -= 4;
  2494. /* Workaround for potential 82544 hang in PCI-X.
  2495. * Avoid terminating buffers within evenly-aligned
  2496. * dwords. */
  2497. bufend = (unsigned long)
  2498. page_to_phys(skb_frag_page(frag));
  2499. bufend += offset + size - 1;
  2500. if (unlikely(adapter->pcix_82544 &&
  2501. !(bufend & 4) &&
  2502. size > 4))
  2503. size -= 4;
  2504. buffer_info->length = size;
  2505. buffer_info->time_stamp = jiffies;
  2506. buffer_info->mapped_as_page = true;
  2507. buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
  2508. offset, size, DMA_TO_DEVICE);
  2509. if (dma_mapping_error(&pdev->dev, buffer_info->dma))
  2510. goto dma_error;
  2511. buffer_info->next_to_watch = i;
  2512. len -= size;
  2513. offset += size;
  2514. count++;
  2515. }
  2516. }
  2517. segs = skb_shinfo(skb)->gso_segs ?: 1;
  2518. /* multiply data chunks by size of headers */
  2519. bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
  2520. tx_ring->buffer_info[i].skb = skb;
  2521. tx_ring->buffer_info[i].segs = segs;
  2522. tx_ring->buffer_info[i].bytecount = bytecount;
  2523. tx_ring->buffer_info[first].next_to_watch = i;
  2524. return count;
  2525. dma_error:
  2526. dev_err(&pdev->dev, "TX DMA map failed\n");
  2527. buffer_info->dma = 0;
  2528. if (count)
  2529. count--;
  2530. while (count--) {
  2531. if (i==0)
  2532. i += tx_ring->count;
  2533. i--;
  2534. buffer_info = &tx_ring->buffer_info[i];
  2535. e1000_unmap_and_free_tx_resource(adapter, buffer_info);
  2536. }
  2537. return 0;
  2538. }
  2539. static void e1000_tx_queue(struct e1000_adapter *adapter,
  2540. struct e1000_tx_ring *tx_ring, int tx_flags,
  2541. int count)
  2542. {
  2543. struct e1000_hw *hw = &adapter->hw;
  2544. struct e1000_tx_desc *tx_desc = NULL;
  2545. struct e1000_buffer *buffer_info;
  2546. u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
  2547. unsigned int i;
  2548. if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
  2549. txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
  2550. E1000_TXD_CMD_TSE;
  2551. txd_upper |= E1000_TXD_POPTS_TXSM << 8;
  2552. if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
  2553. txd_upper |= E1000_TXD_POPTS_IXSM << 8;
  2554. }
  2555. if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
  2556. txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
  2557. txd_upper |= E1000_TXD_POPTS_TXSM << 8;
  2558. }
  2559. if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
  2560. txd_lower |= E1000_TXD_CMD_VLE;
  2561. txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
  2562. }
  2563. if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
  2564. txd_lower &= ~(E1000_TXD_CMD_IFCS);
  2565. i = tx_ring->next_to_use;
  2566. while (count--) {
  2567. buffer_info = &tx_ring->buffer_info[i];
  2568. tx_desc = E1000_TX_DESC(*tx_ring, i);
  2569. tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
  2570. tx_desc->lower.data =
  2571. cpu_to_le32(txd_lower | buffer_info->length);
  2572. tx_desc->upper.data = cpu_to_le32(txd_upper);
  2573. if (unlikely(++i == tx_ring->count)) i = 0;
  2574. }
  2575. tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
  2576. /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
  2577. if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
  2578. tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
  2579. /* Force memory writes to complete before letting h/w
  2580. * know there are new descriptors to fetch. (Only
  2581. * applicable for weak-ordered memory model archs,
  2582. * such as IA-64). */
  2583. wmb();
  2584. tx_ring->next_to_use = i;
  2585. writel(i, hw->hw_addr + tx_ring->tdt);
  2586. /* we need this if more than one processor can write to our tail
  2587. * at a time, it syncronizes IO on IA64/Altix systems */
  2588. mmiowb();
  2589. }
  2590. /**
  2591. * 82547 workaround to avoid controller hang in half-duplex environment.
  2592. * The workaround is to avoid queuing a large packet that would span
  2593. * the internal Tx FIFO ring boundary by notifying the stack to resend
  2594. * the packet at a later time. This gives the Tx FIFO an opportunity to
  2595. * flush all packets. When that occurs, we reset the Tx FIFO pointers
  2596. * to the beginning of the Tx FIFO.
  2597. **/
  2598. #define E1000_FIFO_HDR 0x10
  2599. #define E1000_82547_PAD_LEN 0x3E0
  2600. static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
  2601. struct sk_buff *skb)
  2602. {
  2603. u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
  2604. u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
  2605. skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
  2606. if (adapter->link_duplex != HALF_DUPLEX)
  2607. goto no_fifo_stall_required;
  2608. if (atomic_read(&adapter->tx_fifo_stall))
  2609. return 1;
  2610. if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
  2611. atomic_set(&adapter->tx_fifo_stall, 1);
  2612. return 1;
  2613. }
  2614. no_fifo_stall_required:
  2615. adapter->tx_fifo_head += skb_fifo_len;
  2616. if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
  2617. adapter->tx_fifo_head -= adapter->tx_fifo_size;
  2618. return 0;
  2619. }
  2620. static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
  2621. {
  2622. struct e1000_adapter *adapter = netdev_priv(netdev);
  2623. struct e1000_tx_ring *tx_ring = adapter->tx_ring;
  2624. netif_stop_queue(netdev);
  2625. /* Herbert's original patch had:
  2626. * smp_mb__after_netif_stop_queue();
  2627. * but since that doesn't exist yet, just open code it. */
  2628. smp_mb();
  2629. /* We need to check again in a case another CPU has just
  2630. * made room available. */
  2631. if (likely(E1000_DESC_UNUSED(tx_ring) < size))
  2632. return -EBUSY;
  2633. /* A reprieve! */
  2634. netif_start_queue(netdev);
  2635. ++adapter->restart_queue;
  2636. return 0;
  2637. }
  2638. static int e1000_maybe_stop_tx(struct net_device *netdev,
  2639. struct e1000_tx_ring *tx_ring, int size)
  2640. {
  2641. if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
  2642. return 0;
  2643. return __e1000_maybe_stop_tx(netdev, size);
  2644. }
  2645. #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
  2646. static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
  2647. struct net_device *netdev)
  2648. {
  2649. struct e1000_adapter *adapter = netdev_priv(netdev);
  2650. struct e1000_hw *hw = &adapter->hw;
  2651. struct e1000_tx_ring *tx_ring;
  2652. unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
  2653. unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
  2654. unsigned int tx_flags = 0;
  2655. unsigned int len = skb_headlen(skb);
  2656. unsigned int nr_frags;
  2657. unsigned int mss;
  2658. int count = 0;
  2659. int tso;
  2660. unsigned int f;
  2661. /* This goes back to the question of how to logically map a tx queue
  2662. * to a flow. Right now, performance is impacted slightly negatively
  2663. * if using multiple tx queues. If the stack breaks away from a
  2664. * single qdisc implementation, we can look at this again. */
  2665. tx_ring = adapter->tx_ring;
  2666. if (unlikely(skb->len <= 0)) {
  2667. dev_kfree_skb_any(skb);
  2668. return NETDEV_TX_OK;
  2669. }
  2670. mss = skb_shinfo(skb)->gso_size;
  2671. /* The controller does a simple calculation to
  2672. * make sure there is enough room in the FIFO before
  2673. * initiating the DMA for each buffer. The calc is:
  2674. * 4 = ceil(buffer len/mss). To make sure we don't
  2675. * overrun the FIFO, adjust the max buffer len if mss
  2676. * drops. */
  2677. if (mss) {
  2678. u8 hdr_len;
  2679. max_per_txd = min(mss << 2, max_per_txd);
  2680. max_txd_pwr = fls(max_per_txd) - 1;
  2681. hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
  2682. if (skb->data_len && hdr_len == len) {
  2683. switch (hw->mac_type) {
  2684. unsigned int pull_size;
  2685. case e1000_82544:
  2686. /* Make sure we have room to chop off 4 bytes,
  2687. * and that the end alignment will work out to
  2688. * this hardware's requirements
  2689. * NOTE: this is a TSO only workaround
  2690. * if end byte alignment not correct move us
  2691. * into the next dword */
  2692. if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4)
  2693. break;
  2694. /* fall through */
  2695. pull_size = min((unsigned int)4, skb->data_len);
  2696. if (!__pskb_pull_tail(skb, pull_size)) {
  2697. e_err(drv, "__pskb_pull_tail "
  2698. "failed.\n");
  2699. dev_kfree_skb_any(skb);
  2700. return NETDEV_TX_OK;
  2701. }
  2702. len = skb_headlen(skb);
  2703. break;
  2704. default:
  2705. /* do nothing */
  2706. break;
  2707. }
  2708. }
  2709. }
  2710. /* reserve a descriptor for the offload context */
  2711. if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
  2712. count++;
  2713. count++;
  2714. /* Controller Erratum workaround */
  2715. if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
  2716. count++;
  2717. count += TXD_USE_COUNT(len, max_txd_pwr);
  2718. if (adapter->pcix_82544)
  2719. count++;
  2720. /* work-around for errata 10 and it applies to all controllers
  2721. * in PCI-X mode, so add one more descriptor to the count
  2722. */
  2723. if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
  2724. (len > 2015)))
  2725. count++;
  2726. nr_frags = skb_shinfo(skb)->nr_frags;
  2727. for (f = 0; f < nr_frags; f++)
  2728. count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
  2729. max_txd_pwr);
  2730. if (adapter->pcix_82544)
  2731. count += nr_frags;
  2732. /* need: count + 2 desc gap to keep tail from touching
  2733. * head, otherwise try next time */
  2734. if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
  2735. return NETDEV_TX_BUSY;
  2736. if (unlikely((hw->mac_type == e1000_82547) &&
  2737. (e1000_82547_fifo_workaround(adapter, skb)))) {
  2738. netif_stop_queue(netdev);
  2739. if (!test_bit(__E1000_DOWN, &adapter->flags))
  2740. schedule_delayed_work(&adapter->fifo_stall_task, 1);
  2741. return NETDEV_TX_BUSY;
  2742. }
  2743. if (vlan_tx_tag_present(skb)) {
  2744. tx_flags |= E1000_TX_FLAGS_VLAN;
  2745. tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
  2746. }
  2747. first = tx_ring->next_to_use;
  2748. tso = e1000_tso(adapter, tx_ring, skb);
  2749. if (tso < 0) {
  2750. dev_kfree_skb_any(skb);
  2751. return NETDEV_TX_OK;
  2752. }
  2753. if (likely(tso)) {
  2754. if (likely(hw->mac_type != e1000_82544))
  2755. tx_ring->last_tx_tso = true;
  2756. tx_flags |= E1000_TX_FLAGS_TSO;
  2757. } else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
  2758. tx_flags |= E1000_TX_FLAGS_CSUM;
  2759. if (likely(skb->protocol == htons(ETH_P_IP)))
  2760. tx_flags |= E1000_TX_FLAGS_IPV4;
  2761. if (unlikely(skb->no_fcs))
  2762. tx_flags |= E1000_TX_FLAGS_NO_FCS;
  2763. count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
  2764. nr_frags, mss);
  2765. if (count) {
  2766. skb_tx_timestamp(skb);
  2767. e1000_tx_queue(adapter, tx_ring, tx_flags, count);
  2768. /* Make sure there is space in the ring for the next send. */
  2769. e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
  2770. } else {
  2771. dev_kfree_skb_any(skb);
  2772. tx_ring->buffer_info[first].time_stamp = 0;
  2773. tx_ring->next_to_use = first;
  2774. }
  2775. return NETDEV_TX_OK;
  2776. }
  2777. #define NUM_REGS 38 /* 1 based count */
  2778. static void e1000_regdump(struct e1000_adapter *adapter)
  2779. {
  2780. struct e1000_hw *hw = &adapter->hw;
  2781. u32 regs[NUM_REGS];
  2782. u32 *regs_buff = regs;
  2783. int i = 0;
  2784. static const char * const reg_name[] = {
  2785. "CTRL", "STATUS",
  2786. "RCTL", "RDLEN", "RDH", "RDT", "RDTR",
  2787. "TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT",
  2788. "TIDV", "TXDCTL", "TADV", "TARC0",
  2789. "TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1",
  2790. "TXDCTL1", "TARC1",
  2791. "CTRL_EXT", "ERT", "RDBAL", "RDBAH",
  2792. "TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC",
  2793. "RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC"
  2794. };
  2795. regs_buff[0] = er32(CTRL);
  2796. regs_buff[1] = er32(STATUS);
  2797. regs_buff[2] = er32(RCTL);
  2798. regs_buff[3] = er32(RDLEN);
  2799. regs_buff[4] = er32(RDH);
  2800. regs_buff[5] = er32(RDT);
  2801. regs_buff[6] = er32(RDTR);
  2802. regs_buff[7] = er32(TCTL);
  2803. regs_buff[8] = er32(TDBAL);
  2804. regs_buff[9] = er32(TDBAH);
  2805. regs_buff[10] = er32(TDLEN);
  2806. regs_buff[11] = er32(TDH);
  2807. regs_buff[12] = er32(TDT);
  2808. regs_buff[13] = er32(TIDV);
  2809. regs_buff[14] = er32(TXDCTL);
  2810. regs_buff[15] = er32(TADV);
  2811. regs_buff[16] = er32(TARC0);
  2812. regs_buff[17] = er32(TDBAL1);
  2813. regs_buff[18] = er32(TDBAH1);
  2814. regs_buff[19] = er32(TDLEN1);
  2815. regs_buff[20] = er32(TDH1);
  2816. regs_buff[21] = er32(TDT1);
  2817. regs_buff[22] = er32(TXDCTL1);
  2818. regs_buff[23] = er32(TARC1);
  2819. regs_buff[24] = er32(CTRL_EXT);
  2820. regs_buff[25] = er32(ERT);
  2821. regs_buff[26] = er32(RDBAL0);
  2822. regs_buff[27] = er32(RDBAH0);
  2823. regs_buff[28] = er32(TDFH);
  2824. regs_buff[29] = er32(TDFT);
  2825. regs_buff[30] = er32(TDFHS);
  2826. regs_buff[31] = er32(TDFTS);
  2827. regs_buff[32] = er32(TDFPC);
  2828. regs_buff[33] = er32(RDFH);
  2829. regs_buff[34] = er32(RDFT);
  2830. regs_buff[35] = er32(RDFHS);
  2831. regs_buff[36] = er32(RDFTS);
  2832. regs_buff[37] = er32(RDFPC);
  2833. pr_info("Register dump\n");
  2834. for (i = 0; i < NUM_REGS; i++)
  2835. pr_info("%-15s %08x\n", reg_name[i], regs_buff[i]);
  2836. }
  2837. /*
  2838. * e1000_dump: Print registers, tx ring and rx ring
  2839. */
  2840. static void e1000_dump(struct e1000_adapter *adapter)
  2841. {
  2842. /* this code doesn't handle multiple rings */
  2843. struct e1000_tx_ring *tx_ring = adapter->tx_ring;
  2844. struct e1000_rx_ring *rx_ring = adapter->rx_ring;
  2845. int i;
  2846. if (!netif_msg_hw(adapter))
  2847. return;
  2848. /* Print Registers */
  2849. e1000_regdump(adapter);
  2850. /*
  2851. * transmit dump
  2852. */
  2853. pr_info("TX Desc ring0 dump\n");
  2854. /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
  2855. *
  2856. * Legacy Transmit Descriptor
  2857. * +--------------------------------------------------------------+
  2858. * 0 | Buffer Address [63:0] (Reserved on Write Back) |
  2859. * +--------------------------------------------------------------+
  2860. * 8 | Special | CSS | Status | CMD | CSO | Length |
  2861. * +--------------------------------------------------------------+
  2862. * 63 48 47 36 35 32 31 24 23 16 15 0
  2863. *
  2864. * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
  2865. * 63 48 47 40 39 32 31 16 15 8 7 0
  2866. * +----------------------------------------------------------------+
  2867. * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS |
  2868. * +----------------------------------------------------------------+
  2869. * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN |
  2870. * +----------------------------------------------------------------+
  2871. * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
  2872. *
  2873. * Extended Data Descriptor (DTYP=0x1)
  2874. * +----------------------------------------------------------------+
  2875. * 0 | Buffer Address [63:0] |
  2876. * +----------------------------------------------------------------+
  2877. * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN |
  2878. * +----------------------------------------------------------------+
  2879. * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
  2880. */
  2881. pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestmp bi->skb\n");
  2882. pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestmp bi->skb\n");
  2883. if (!netif_msg_tx_done(adapter))
  2884. goto rx_ring_summary;
  2885. for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
  2886. struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
  2887. struct e1000_buffer *buffer_info = &tx_ring->buffer_info[i];
  2888. struct my_u { __le64 a; __le64 b; };
  2889. struct my_u *u = (struct my_u *)tx_desc;
  2890. const char *type;
  2891. if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
  2892. type = "NTC/U";
  2893. else if (i == tx_ring->next_to_use)
  2894. type = "NTU";
  2895. else if (i == tx_ring->next_to_clean)
  2896. type = "NTC";
  2897. else
  2898. type = "";
  2899. pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p %s\n",
  2900. ((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i,
  2901. le64_to_cpu(u->a), le64_to_cpu(u->b),
  2902. (u64)buffer_info->dma, buffer_info->length,
  2903. buffer_info->next_to_watch,
  2904. (u64)buffer_info->time_stamp, buffer_info->skb, type);
  2905. }
  2906. rx_ring_summary:
  2907. /*
  2908. * receive dump
  2909. */
  2910. pr_info("\nRX Desc ring dump\n");
  2911. /* Legacy Receive Descriptor Format
  2912. *
  2913. * +-----------------------------------------------------+
  2914. * | Buffer Address [63:0] |
  2915. * +-----------------------------------------------------+
  2916. * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
  2917. * +-----------------------------------------------------+
  2918. * 63 48 47 40 39 32 31 16 15 0
  2919. */
  2920. pr_info("R[desc] [address 63:0 ] [vl er S cks ln] [bi->dma ] [bi->skb]\n");
  2921. if (!netif_msg_rx_status(adapter))
  2922. goto exit;
  2923. for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
  2924. struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
  2925. struct e1000_buffer *buffer_info = &rx_ring->buffer_info[i];
  2926. struct my_u { __le64 a; __le64 b; };
  2927. struct my_u *u = (struct my_u *)rx_desc;
  2928. const char *type;
  2929. if (i == rx_ring->next_to_use)
  2930. type = "NTU";
  2931. else if (i == rx_ring->next_to_clean)
  2932. type = "NTC";
  2933. else
  2934. type = "";
  2935. pr_info("R[0x%03X] %016llX %016llX %016llX %p %s\n",
  2936. i, le64_to_cpu(u->a), le64_to_cpu(u->b),
  2937. (u64)buffer_info->dma, buffer_info->skb, type);
  2938. } /* for */
  2939. /* dump the descriptor caches */
  2940. /* rx */
  2941. pr_info("Rx descriptor cache in 64bit format\n");
  2942. for (i = 0x6000; i <= 0x63FF ; i += 0x10) {
  2943. pr_info("R%04X: %08X|%08X %08X|%08X\n",
  2944. i,
  2945. readl(adapter->hw.hw_addr + i+4),
  2946. readl(adapter->hw.hw_addr + i),
  2947. readl(adapter->hw.hw_addr + i+12),
  2948. readl(adapter->hw.hw_addr + i+8));
  2949. }
  2950. /* tx */
  2951. pr_info("Tx descriptor cache in 64bit format\n");
  2952. for (i = 0x7000; i <= 0x73FF ; i += 0x10) {
  2953. pr_info("T%04X: %08X|%08X %08X|%08X\n",
  2954. i,
  2955. readl(adapter->hw.hw_addr + i+4),
  2956. readl(adapter->hw.hw_addr + i),
  2957. readl(adapter->hw.hw_addr + i+12),
  2958. readl(adapter->hw.hw_addr + i+8));
  2959. }
  2960. exit:
  2961. return;
  2962. }
  2963. /**
  2964. * e1000_tx_timeout - Respond to a Tx Hang
  2965. * @netdev: network interface device structure
  2966. **/
  2967. static void e1000_tx_timeout(struct net_device *netdev)
  2968. {
  2969. struct e1000_adapter *adapter = netdev_priv(netdev);
  2970. /* Do the reset outside of interrupt context */
  2971. adapter->tx_timeout_count++;
  2972. schedule_work(&adapter->reset_task);
  2973. }
  2974. static void e1000_reset_task(struct work_struct *work)
  2975. {
  2976. struct e1000_adapter *adapter =
  2977. container_of(work, struct e1000_adapter, reset_task);
  2978. if (test_bit(__E1000_DOWN, &adapter->flags))
  2979. return;
  2980. e_err(drv, "Reset adapter\n");
  2981. e1000_reinit_safe(adapter);
  2982. }
  2983. /**
  2984. * e1000_get_stats - Get System Network Statistics
  2985. * @netdev: network interface device structure
  2986. *
  2987. * Returns the address of the device statistics structure.
  2988. * The statistics are actually updated from the watchdog.
  2989. **/
  2990. static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
  2991. {
  2992. /* only return the current stats */
  2993. return &netdev->stats;
  2994. }
  2995. /**
  2996. * e1000_change_mtu - Change the Maximum Transfer Unit
  2997. * @netdev: network interface device structure
  2998. * @new_mtu: new value for maximum frame size
  2999. *
  3000. * Returns 0 on success, negative on failure
  3001. **/
  3002. static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
  3003. {
  3004. struct e1000_adapter *adapter = netdev_priv(netdev);
  3005. struct e1000_hw *hw = &adapter->hw;
  3006. int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
  3007. if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
  3008. (max_frame > MAX_JUMBO_FRAME_SIZE)) {
  3009. e_err(probe, "Invalid MTU setting\n");
  3010. return -EINVAL;
  3011. }
  3012. /* Adapter-specific max frame size limits. */
  3013. switch (hw->mac_type) {
  3014. case e1000_undefined ... e1000_82542_rev2_1:
  3015. if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
  3016. e_err(probe, "Jumbo Frames not supported.\n");
  3017. return -EINVAL;
  3018. }
  3019. break;
  3020. default:
  3021. /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
  3022. break;
  3023. }
  3024. while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
  3025. msleep(1);
  3026. /* e1000_down has a dependency on max_frame_size */
  3027. hw->max_frame_size = max_frame;
  3028. if (netif_running(netdev))
  3029. e1000_down(adapter);
  3030. /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
  3031. * means we reserve 2 more, this pushes us to allocate from the next
  3032. * larger slab size.
  3033. * i.e. RXBUFFER_2048 --> size-4096 slab
  3034. * however with the new *_jumbo_rx* routines, jumbo receives will use
  3035. * fragmented skbs */
  3036. if (max_frame <= E1000_RXBUFFER_2048)
  3037. adapter->rx_buffer_len = E1000_RXBUFFER_2048;
  3038. else
  3039. #if (PAGE_SIZE >= E1000_RXBUFFER_16384)
  3040. adapter->rx_buffer_len = E1000_RXBUFFER_16384;
  3041. #elif (PAGE_SIZE >= E1000_RXBUFFER_4096)
  3042. adapter->rx_buffer_len = PAGE_SIZE;
  3043. #endif
  3044. /* adjust allocation if LPE protects us, and we aren't using SBP */
  3045. if (!hw->tbi_compatibility_on &&
  3046. ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
  3047. (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
  3048. adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
  3049. pr_info("%s changing MTU from %d to %d\n",
  3050. netdev->name, netdev->mtu, new_mtu);
  3051. netdev->mtu = new_mtu;
  3052. if (netif_running(netdev))
  3053. e1000_up(adapter);
  3054. else
  3055. e1000_reset(adapter);
  3056. clear_bit(__E1000_RESETTING, &adapter->flags);
  3057. return 0;
  3058. }
  3059. /**
  3060. * e1000_update_stats - Update the board statistics counters
  3061. * @adapter: board private structure
  3062. **/
  3063. void e1000_update_stats(struct e1000_adapter *adapter)
  3064. {
  3065. struct net_device *netdev = adapter->netdev;
  3066. struct e1000_hw *hw = &adapter->hw;
  3067. struct pci_dev *pdev = adapter->pdev;
  3068. unsigned long flags;
  3069. u16 phy_tmp;
  3070. #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
  3071. /*
  3072. * Prevent stats update while adapter is being reset, or if the pci
  3073. * connection is down.
  3074. */
  3075. if (adapter->link_speed == 0)
  3076. return;
  3077. if (pci_channel_offline(pdev))
  3078. return;
  3079. spin_lock_irqsave(&adapter->stats_lock, flags);
  3080. /* these counters are modified from e1000_tbi_adjust_stats,
  3081. * called from the interrupt context, so they must only
  3082. * be written while holding adapter->stats_lock
  3083. */
  3084. adapter->stats.crcerrs += er32(CRCERRS);
  3085. adapter->stats.gprc += er32(GPRC);
  3086. adapter->stats.gorcl += er32(GORCL);
  3087. adapter->stats.gorch += er32(GORCH);
  3088. adapter->stats.bprc += er32(BPRC);
  3089. adapter->stats.mprc += er32(MPRC);
  3090. adapter->stats.roc += er32(ROC);
  3091. adapter->stats.prc64 += er32(PRC64);
  3092. adapter->stats.prc127 += er32(PRC127);
  3093. adapter->stats.prc255 += er32(PRC255);
  3094. adapter->stats.prc511 += er32(PRC511);
  3095. adapter->stats.prc1023 += er32(PRC1023);
  3096. adapter->stats.prc1522 += er32(PRC1522);
  3097. adapter->stats.symerrs += er32(SYMERRS);
  3098. adapter->stats.mpc += er32(MPC);
  3099. adapter->stats.scc += er32(SCC);
  3100. adapter->stats.ecol += er32(ECOL);
  3101. adapter->stats.mcc += er32(MCC);
  3102. adapter->stats.latecol += er32(LATECOL);
  3103. adapter->stats.dc += er32(DC);
  3104. adapter->stats.sec += er32(SEC);
  3105. adapter->stats.rlec += er32(RLEC);
  3106. adapter->stats.xonrxc += er32(XONRXC);
  3107. adapter->stats.xontxc += er32(XONTXC);
  3108. adapter->stats.xoffrxc += er32(XOFFRXC);
  3109. adapter->stats.xofftxc += er32(XOFFTXC);
  3110. adapter->stats.fcruc += er32(FCRUC);
  3111. adapter->stats.gptc += er32(GPTC);
  3112. adapter->stats.gotcl += er32(GOTCL);
  3113. adapter->stats.gotch += er32(GOTCH);
  3114. adapter->stats.rnbc += er32(RNBC);
  3115. adapter->stats.ruc += er32(RUC);
  3116. adapter->stats.rfc += er32(RFC);
  3117. adapter->stats.rjc += er32(RJC);
  3118. adapter->stats.torl += er32(TORL);
  3119. adapter->stats.torh += er32(TORH);
  3120. adapter->stats.totl += er32(TOTL);
  3121. adapter->stats.toth += er32(TOTH);
  3122. adapter->stats.tpr += er32(TPR);
  3123. adapter->stats.ptc64 += er32(PTC64);
  3124. adapter->stats.ptc127 += er32(PTC127);
  3125. adapter->stats.ptc255 += er32(PTC255);
  3126. adapter->stats.ptc511 += er32(PTC511);
  3127. adapter->stats.ptc1023 += er32(PTC1023);
  3128. adapter->stats.ptc1522 += er32(PTC1522);
  3129. adapter->stats.mptc += er32(MPTC);
  3130. adapter->stats.bptc += er32(BPTC);
  3131. /* used for adaptive IFS */
  3132. hw->tx_packet_delta = er32(TPT);
  3133. adapter->stats.tpt += hw->tx_packet_delta;
  3134. hw->collision_delta = er32(COLC);
  3135. adapter->stats.colc += hw->collision_delta;
  3136. if (hw->mac_type >= e1000_82543) {
  3137. adapter->stats.algnerrc += er32(ALGNERRC);
  3138. adapter->stats.rxerrc += er32(RXERRC);
  3139. adapter->stats.tncrs += er32(TNCRS);
  3140. adapter->stats.cexterr += er32(CEXTERR);
  3141. adapter->stats.tsctc += er32(TSCTC);
  3142. adapter->stats.tsctfc += er32(TSCTFC);
  3143. }
  3144. /* Fill out the OS statistics structure */
  3145. netdev->stats.multicast = adapter->stats.mprc;
  3146. netdev->stats.collisions = adapter->stats.colc;
  3147. /* Rx Errors */
  3148. /* RLEC on some newer hardware can be incorrect so build
  3149. * our own version based on RUC and ROC */
  3150. netdev->stats.rx_errors = adapter->stats.rxerrc +
  3151. adapter->stats.crcerrs + adapter->stats.algnerrc +
  3152. adapter->stats.ruc + adapter->stats.roc +
  3153. adapter->stats.cexterr;
  3154. adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
  3155. netdev->stats.rx_length_errors = adapter->stats.rlerrc;
  3156. netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
  3157. netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
  3158. netdev->stats.rx_missed_errors = adapter->stats.mpc;
  3159. /* Tx Errors */
  3160. adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
  3161. netdev->stats.tx_errors = adapter->stats.txerrc;
  3162. netdev->stats.tx_aborted_errors = adapter->stats.ecol;
  3163. netdev->stats.tx_window_errors = adapter->stats.latecol;
  3164. netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
  3165. if (hw->bad_tx_carr_stats_fd &&
  3166. adapter->link_duplex == FULL_DUPLEX) {
  3167. netdev->stats.tx_carrier_errors = 0;
  3168. adapter->stats.tncrs = 0;
  3169. }
  3170. /* Tx Dropped needs to be maintained elsewhere */
  3171. /* Phy Stats */
  3172. if (hw->media_type == e1000_media_type_copper) {
  3173. if ((adapter->link_speed == SPEED_1000) &&
  3174. (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
  3175. phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
  3176. adapter->phy_stats.idle_errors += phy_tmp;
  3177. }
  3178. if ((hw->mac_type <= e1000_82546) &&
  3179. (hw->phy_type == e1000_phy_m88) &&
  3180. !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
  3181. adapter->phy_stats.receive_errors += phy_tmp;
  3182. }
  3183. /* Management Stats */
  3184. if (hw->has_smbus) {
  3185. adapter->stats.mgptc += er32(MGTPTC);
  3186. adapter->stats.mgprc += er32(MGTPRC);
  3187. adapter->stats.mgpdc += er32(MGTPDC);
  3188. }
  3189. spin_unlock_irqrestore(&adapter->stats_lock, flags);
  3190. }
  3191. /**
  3192. * e1000_intr - Interrupt Handler
  3193. * @irq: interrupt number
  3194. * @data: pointer to a network interface device structure
  3195. **/
  3196. static irqreturn_t e1000_intr(int irq, void *data)
  3197. {
  3198. struct net_device *netdev = data;
  3199. struct e1000_adapter *adapter = netdev_priv(netdev);
  3200. struct e1000_hw *hw = &adapter->hw;
  3201. u32 icr = er32(ICR);
  3202. if (unlikely((!icr)))
  3203. return IRQ_NONE; /* Not our interrupt */
  3204. /*
  3205. * we might have caused the interrupt, but the above
  3206. * read cleared it, and just in case the driver is
  3207. * down there is nothing to do so return handled
  3208. */
  3209. if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
  3210. return IRQ_HANDLED;
  3211. if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
  3212. hw->get_link_status = 1;
  3213. /* guard against interrupt when we're going down */
  3214. if (!test_bit(__E1000_DOWN, &adapter->flags))
  3215. schedule_delayed_work(&adapter->watchdog_task, 1);
  3216. }
  3217. /* disable interrupts, without the synchronize_irq bit */
  3218. ew32(IMC, ~0);
  3219. E1000_WRITE_FLUSH();
  3220. if (likely(napi_schedule_prep(&adapter->napi))) {
  3221. adapter->total_tx_bytes = 0;
  3222. adapter->total_tx_packets = 0;
  3223. adapter->total_rx_bytes = 0;
  3224. adapter->total_rx_packets = 0;
  3225. __napi_schedule(&adapter->napi);
  3226. } else {
  3227. /* this really should not happen! if it does it is basically a
  3228. * bug, but not a hard error, so enable ints and continue */
  3229. if (!test_bit(__E1000_DOWN, &adapter->flags))
  3230. e1000_irq_enable(adapter);
  3231. }
  3232. return IRQ_HANDLED;
  3233. }
  3234. /**
  3235. * e1000_clean - NAPI Rx polling callback
  3236. * @adapter: board private structure
  3237. **/
  3238. static int e1000_clean(struct napi_struct *napi, int budget)
  3239. {
  3240. struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
  3241. int tx_clean_complete = 0, work_done = 0;
  3242. tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
  3243. adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
  3244. if (!tx_clean_complete)
  3245. work_done = budget;
  3246. /* If budget not fully consumed, exit the polling mode */
  3247. if (work_done < budget) {
  3248. if (likely(adapter->itr_setting & 3))
  3249. e1000_set_itr(adapter);
  3250. napi_complete(napi);
  3251. if (!test_bit(__E1000_DOWN, &adapter->flags))
  3252. e1000_irq_enable(adapter);
  3253. }
  3254. return work_done;
  3255. }
  3256. /**
  3257. * e1000_clean_tx_irq - Reclaim resources after transmit completes
  3258. * @adapter: board private structure
  3259. **/
  3260. static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
  3261. struct e1000_tx_ring *tx_ring)
  3262. {
  3263. struct e1000_hw *hw = &adapter->hw;
  3264. struct net_device *netdev = adapter->netdev;
  3265. struct e1000_tx_desc *tx_desc, *eop_desc;
  3266. struct e1000_buffer *buffer_info;
  3267. unsigned int i, eop;
  3268. unsigned int count = 0;
  3269. unsigned int total_tx_bytes=0, total_tx_packets=0;
  3270. i = tx_ring->next_to_clean;
  3271. eop = tx_ring->buffer_info[i].next_to_watch;
  3272. eop_desc = E1000_TX_DESC(*tx_ring, eop);
  3273. while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
  3274. (count < tx_ring->count)) {
  3275. bool cleaned = false;
  3276. rmb(); /* read buffer_info after eop_desc */
  3277. for ( ; !cleaned; count++) {
  3278. tx_desc = E1000_TX_DESC(*tx_ring, i);
  3279. buffer_info = &tx_ring->buffer_info[i];
  3280. cleaned = (i == eop);
  3281. if (cleaned) {
  3282. total_tx_packets += buffer_info->segs;
  3283. total_tx_bytes += buffer_info->bytecount;
  3284. }
  3285. e1000_unmap_and_free_tx_resource(adapter, buffer_info);
  3286. tx_desc->upper.data = 0;
  3287. if (unlikely(++i == tx_ring->count)) i = 0;
  3288. }
  3289. eop = tx_ring->buffer_info[i].next_to_watch;
  3290. eop_desc = E1000_TX_DESC(*tx_ring, eop);
  3291. }
  3292. tx_ring->next_to_clean = i;
  3293. #define TX_WAKE_THRESHOLD 32
  3294. if (unlikely(count && netif_carrier_ok(netdev) &&
  3295. E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
  3296. /* Make sure that anybody stopping the queue after this
  3297. * sees the new next_to_clean.
  3298. */
  3299. smp_mb();
  3300. if (netif_queue_stopped(netdev) &&
  3301. !(test_bit(__E1000_DOWN, &adapter->flags))) {
  3302. netif_wake_queue(netdev);
  3303. ++adapter->restart_queue;
  3304. }
  3305. }
  3306. if (adapter->detect_tx_hung) {
  3307. /* Detect a transmit hang in hardware, this serializes the
  3308. * check with the clearing of time_stamp and movement of i */
  3309. adapter->detect_tx_hung = false;
  3310. if (tx_ring->buffer_info[eop].time_stamp &&
  3311. time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
  3312. (adapter->tx_timeout_factor * HZ)) &&
  3313. !(er32(STATUS) & E1000_STATUS_TXOFF)) {
  3314. /* detected Tx unit hang */
  3315. e_err(drv, "Detected Tx Unit Hang\n"
  3316. " Tx Queue <%lu>\n"
  3317. " TDH <%x>\n"
  3318. " TDT <%x>\n"
  3319. " next_to_use <%x>\n"
  3320. " next_to_clean <%x>\n"
  3321. "buffer_info[next_to_clean]\n"
  3322. " time_stamp <%lx>\n"
  3323. " next_to_watch <%x>\n"
  3324. " jiffies <%lx>\n"
  3325. " next_to_watch.status <%x>\n",
  3326. (unsigned long)((tx_ring - adapter->tx_ring) /
  3327. sizeof(struct e1000_tx_ring)),
  3328. readl(hw->hw_addr + tx_ring->tdh),
  3329. readl(hw->hw_addr + tx_ring->tdt),
  3330. tx_ring->next_to_use,
  3331. tx_ring->next_to_clean,
  3332. tx_ring->buffer_info[eop].time_stamp,
  3333. eop,
  3334. jiffies,
  3335. eop_desc->upper.fields.status);
  3336. e1000_dump(adapter);
  3337. netif_stop_queue(netdev);
  3338. }
  3339. }
  3340. adapter->total_tx_bytes += total_tx_bytes;
  3341. adapter->total_tx_packets += total_tx_packets;
  3342. netdev->stats.tx_bytes += total_tx_bytes;
  3343. netdev->stats.tx_packets += total_tx_packets;
  3344. return count < tx_ring->count;
  3345. }
  3346. /**
  3347. * e1000_rx_checksum - Receive Checksum Offload for 82543
  3348. * @adapter: board private structure
  3349. * @status_err: receive descriptor status and error fields
  3350. * @csum: receive descriptor csum field
  3351. * @sk_buff: socket buffer with received data
  3352. **/
  3353. static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
  3354. u32 csum, struct sk_buff *skb)
  3355. {
  3356. struct e1000_hw *hw = &adapter->hw;
  3357. u16 status = (u16)status_err;
  3358. u8 errors = (u8)(status_err >> 24);
  3359. skb_checksum_none_assert(skb);
  3360. /* 82543 or newer only */
  3361. if (unlikely(hw->mac_type < e1000_82543)) return;
  3362. /* Ignore Checksum bit is set */
  3363. if (unlikely(status & E1000_RXD_STAT_IXSM)) return;
  3364. /* TCP/UDP checksum error bit is set */
  3365. if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
  3366. /* let the stack verify checksum errors */
  3367. adapter->hw_csum_err++;
  3368. return;
  3369. }
  3370. /* TCP/UDP Checksum has not been calculated */
  3371. if (!(status & E1000_RXD_STAT_TCPCS))
  3372. return;
  3373. /* It must be a TCP or UDP packet with a valid checksum */
  3374. if (likely(status & E1000_RXD_STAT_TCPCS)) {
  3375. /* TCP checksum is good */
  3376. skb->ip_summed = CHECKSUM_UNNECESSARY;
  3377. }
  3378. adapter->hw_csum_good++;
  3379. }
  3380. /**
  3381. * e1000_consume_page - helper function
  3382. **/
  3383. static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
  3384. u16 length)
  3385. {
  3386. bi->page = NULL;
  3387. skb->len += length;
  3388. skb->data_len += length;
  3389. skb->truesize += PAGE_SIZE;
  3390. }
  3391. /**
  3392. * e1000_receive_skb - helper function to handle rx indications
  3393. * @adapter: board private structure
  3394. * @status: descriptor status field as written by hardware
  3395. * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
  3396. * @skb: pointer to sk_buff to be indicated to stack
  3397. */
  3398. static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
  3399. __le16 vlan, struct sk_buff *skb)
  3400. {
  3401. skb->protocol = eth_type_trans(skb, adapter->netdev);
  3402. if (status & E1000_RXD_STAT_VP) {
  3403. u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
  3404. __vlan_hwaccel_put_tag(skb, vid);
  3405. }
  3406. napi_gro_receive(&adapter->napi, skb);
  3407. }
  3408. /**
  3409. * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
  3410. * @adapter: board private structure
  3411. * @rx_ring: ring to clean
  3412. * @work_done: amount of napi work completed this call
  3413. * @work_to_do: max amount of work allowed for this call to do
  3414. *
  3415. * the return value indicates whether actual cleaning was done, there
  3416. * is no guarantee that everything was cleaned
  3417. */
  3418. static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
  3419. struct e1000_rx_ring *rx_ring,
  3420. int *work_done, int work_to_do)
  3421. {
  3422. struct e1000_hw *hw = &adapter->hw;
  3423. struct net_device *netdev = adapter->netdev;
  3424. struct pci_dev *pdev = adapter->pdev;
  3425. struct e1000_rx_desc *rx_desc, *next_rxd;
  3426. struct e1000_buffer *buffer_info, *next_buffer;
  3427. unsigned long irq_flags;
  3428. u32 length;
  3429. unsigned int i;
  3430. int cleaned_count = 0;
  3431. bool cleaned = false;
  3432. unsigned int total_rx_bytes=0, total_rx_packets=0;
  3433. i = rx_ring->next_to_clean;
  3434. rx_desc = E1000_RX_DESC(*rx_ring, i);
  3435. buffer_info = &rx_ring->buffer_info[i];
  3436. while (rx_desc->status & E1000_RXD_STAT_DD) {
  3437. struct sk_buff *skb;
  3438. u8 status;
  3439. if (*work_done >= work_to_do)
  3440. break;
  3441. (*work_done)++;
  3442. rmb(); /* read descriptor and rx_buffer_info after status DD */
  3443. status = rx_desc->status;
  3444. skb = buffer_info->skb;
  3445. buffer_info->skb = NULL;
  3446. if (++i == rx_ring->count) i = 0;
  3447. next_rxd = E1000_RX_DESC(*rx_ring, i);
  3448. prefetch(next_rxd);
  3449. next_buffer = &rx_ring->buffer_info[i];
  3450. cleaned = true;
  3451. cleaned_count++;
  3452. dma_unmap_page(&pdev->dev, buffer_info->dma,
  3453. buffer_info->length, DMA_FROM_DEVICE);
  3454. buffer_info->dma = 0;
  3455. length = le16_to_cpu(rx_desc->length);
  3456. /* errors is only valid for DD + EOP descriptors */
  3457. if (unlikely((status & E1000_RXD_STAT_EOP) &&
  3458. (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
  3459. u8 *mapped;
  3460. u8 last_byte;
  3461. mapped = page_address(buffer_info->page);
  3462. last_byte = *(mapped + length - 1);
  3463. if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
  3464. last_byte)) {
  3465. spin_lock_irqsave(&adapter->stats_lock,
  3466. irq_flags);
  3467. e1000_tbi_adjust_stats(hw, &adapter->stats,
  3468. length, skb->data);
  3469. spin_unlock_irqrestore(&adapter->stats_lock,
  3470. irq_flags);
  3471. length--;
  3472. } else {
  3473. if (netdev->features & NETIF_F_RXALL)
  3474. goto process_skb;
  3475. /* recycle both page and skb */
  3476. buffer_info->skb = skb;
  3477. /* an error means any chain goes out the window
  3478. * too */
  3479. if (rx_ring->rx_skb_top)
  3480. dev_kfree_skb(rx_ring->rx_skb_top);
  3481. rx_ring->rx_skb_top = NULL;
  3482. goto next_desc;
  3483. }
  3484. }
  3485. #define rxtop rx_ring->rx_skb_top
  3486. process_skb:
  3487. if (!(status & E1000_RXD_STAT_EOP)) {
  3488. /* this descriptor is only the beginning (or middle) */
  3489. if (!rxtop) {
  3490. /* this is the beginning of a chain */
  3491. rxtop = skb;
  3492. skb_fill_page_desc(rxtop, 0, buffer_info->page,
  3493. 0, length);
  3494. } else {
  3495. /* this is the middle of a chain */
  3496. skb_fill_page_desc(rxtop,
  3497. skb_shinfo(rxtop)->nr_frags,
  3498. buffer_info->page, 0, length);
  3499. /* re-use the skb, only consumed the page */
  3500. buffer_info->skb = skb;
  3501. }
  3502. e1000_consume_page(buffer_info, rxtop, length);
  3503. goto next_desc;
  3504. } else {
  3505. if (rxtop) {
  3506. /* end of the chain */
  3507. skb_fill_page_desc(rxtop,
  3508. skb_shinfo(rxtop)->nr_frags,
  3509. buffer_info->page, 0, length);
  3510. /* re-use the current skb, we only consumed the
  3511. * page */
  3512. buffer_info->skb = skb;
  3513. skb = rxtop;
  3514. rxtop = NULL;
  3515. e1000_consume_page(buffer_info, skb, length);
  3516. } else {
  3517. /* no chain, got EOP, this buf is the packet
  3518. * copybreak to save the put_page/alloc_page */
  3519. if (length <= copybreak &&
  3520. skb_tailroom(skb) >= length) {
  3521. u8 *vaddr;
  3522. vaddr = kmap_atomic(buffer_info->page);
  3523. memcpy(skb_tail_pointer(skb), vaddr, length);
  3524. kunmap_atomic(vaddr);
  3525. /* re-use the page, so don't erase
  3526. * buffer_info->page */
  3527. skb_put(skb, length);
  3528. } else {
  3529. skb_fill_page_desc(skb, 0,
  3530. buffer_info->page, 0,
  3531. length);
  3532. e1000_consume_page(buffer_info, skb,
  3533. length);
  3534. }
  3535. }
  3536. }
  3537. /* Receive Checksum Offload XXX recompute due to CRC strip? */
  3538. e1000_rx_checksum(adapter,
  3539. (u32)(status) |
  3540. ((u32)(rx_desc->errors) << 24),
  3541. le16_to_cpu(rx_desc->csum), skb);
  3542. total_rx_bytes += (skb->len - 4); /* don't count FCS */
  3543. if (likely(!(netdev->features & NETIF_F_RXFCS)))
  3544. pskb_trim(skb, skb->len - 4);
  3545. total_rx_packets++;
  3546. /* eth type trans needs skb->data to point to something */
  3547. if (!pskb_may_pull(skb, ETH_HLEN)) {
  3548. e_err(drv, "pskb_may_pull failed.\n");
  3549. dev_kfree_skb(skb);
  3550. goto next_desc;
  3551. }
  3552. e1000_receive_skb(adapter, status, rx_desc->special, skb);
  3553. next_desc:
  3554. rx_desc->status = 0;
  3555. /* return some buffers to hardware, one at a time is too slow */
  3556. if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
  3557. adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
  3558. cleaned_count = 0;
  3559. }
  3560. /* use prefetched values */
  3561. rx_desc = next_rxd;
  3562. buffer_info = next_buffer;
  3563. }
  3564. rx_ring->next_to_clean = i;
  3565. cleaned_count = E1000_DESC_UNUSED(rx_ring);
  3566. if (cleaned_count)
  3567. adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
  3568. adapter->total_rx_packets += total_rx_packets;
  3569. adapter->total_rx_bytes += total_rx_bytes;
  3570. netdev->stats.rx_bytes += total_rx_bytes;
  3571. netdev->stats.rx_packets += total_rx_packets;
  3572. return cleaned;
  3573. }
  3574. /*
  3575. * this should improve performance for small packets with large amounts
  3576. * of reassembly being done in the stack
  3577. */
  3578. static void e1000_check_copybreak(struct net_device *netdev,
  3579. struct e1000_buffer *buffer_info,
  3580. u32 length, struct sk_buff **skb)
  3581. {
  3582. struct sk_buff *new_skb;
  3583. if (length > copybreak)
  3584. return;
  3585. new_skb = netdev_alloc_skb_ip_align(netdev, length);
  3586. if (!new_skb)
  3587. return;
  3588. skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
  3589. (*skb)->data - NET_IP_ALIGN,
  3590. length + NET_IP_ALIGN);
  3591. /* save the skb in buffer_info as good */
  3592. buffer_info->skb = *skb;
  3593. *skb = new_skb;
  3594. }
  3595. /**
  3596. * e1000_clean_rx_irq - Send received data up the network stack; legacy
  3597. * @adapter: board private structure
  3598. * @rx_ring: ring to clean
  3599. * @work_done: amount of napi work completed this call
  3600. * @work_to_do: max amount of work allowed for this call to do
  3601. */
  3602. static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
  3603. struct e1000_rx_ring *rx_ring,
  3604. int *work_done, int work_to_do)
  3605. {
  3606. struct e1000_hw *hw = &adapter->hw;
  3607. struct net_device *netdev = adapter->netdev;
  3608. struct pci_dev *pdev = adapter->pdev;
  3609. struct e1000_rx_desc *rx_desc, *next_rxd;
  3610. struct e1000_buffer *buffer_info, *next_buffer;
  3611. unsigned long flags;
  3612. u32 length;
  3613. unsigned int i;
  3614. int cleaned_count = 0;
  3615. bool cleaned = false;
  3616. unsigned int total_rx_bytes=0, total_rx_packets=0;
  3617. i = rx_ring->next_to_clean;
  3618. rx_desc = E1000_RX_DESC(*rx_ring, i);
  3619. buffer_info = &rx_ring->buffer_info[i];
  3620. while (rx_desc->status & E1000_RXD_STAT_DD) {
  3621. struct sk_buff *skb;
  3622. u8 status;
  3623. if (*work_done >= work_to_do)
  3624. break;
  3625. (*work_done)++;
  3626. rmb(); /* read descriptor and rx_buffer_info after status DD */
  3627. status = rx_desc->status;
  3628. skb = buffer_info->skb;
  3629. buffer_info->skb = NULL;
  3630. prefetch(skb->data - NET_IP_ALIGN);
  3631. if (++i == rx_ring->count) i = 0;
  3632. next_rxd = E1000_RX_DESC(*rx_ring, i);
  3633. prefetch(next_rxd);
  3634. next_buffer = &rx_ring->buffer_info[i];
  3635. cleaned = true;
  3636. cleaned_count++;
  3637. dma_unmap_single(&pdev->dev, buffer_info->dma,
  3638. buffer_info->length, DMA_FROM_DEVICE);
  3639. buffer_info->dma = 0;
  3640. length = le16_to_cpu(rx_desc->length);
  3641. /* !EOP means multiple descriptors were used to store a single
  3642. * packet, if thats the case we need to toss it. In fact, we
  3643. * to toss every packet with the EOP bit clear and the next
  3644. * frame that _does_ have the EOP bit set, as it is by
  3645. * definition only a frame fragment
  3646. */
  3647. if (unlikely(!(status & E1000_RXD_STAT_EOP)))
  3648. adapter->discarding = true;
  3649. if (adapter->discarding) {
  3650. /* All receives must fit into a single buffer */
  3651. e_dbg("Receive packet consumed multiple buffers\n");
  3652. /* recycle */
  3653. buffer_info->skb = skb;
  3654. if (status & E1000_RXD_STAT_EOP)
  3655. adapter->discarding = false;
  3656. goto next_desc;
  3657. }
  3658. if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
  3659. u8 last_byte = *(skb->data + length - 1);
  3660. if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
  3661. last_byte)) {
  3662. spin_lock_irqsave(&adapter->stats_lock, flags);
  3663. e1000_tbi_adjust_stats(hw, &adapter->stats,
  3664. length, skb->data);
  3665. spin_unlock_irqrestore(&adapter->stats_lock,
  3666. flags);
  3667. length--;
  3668. } else {
  3669. if (netdev->features & NETIF_F_RXALL)
  3670. goto process_skb;
  3671. /* recycle */
  3672. buffer_info->skb = skb;
  3673. goto next_desc;
  3674. }
  3675. }
  3676. process_skb:
  3677. total_rx_bytes += (length - 4); /* don't count FCS */
  3678. total_rx_packets++;
  3679. if (likely(!(netdev->features & NETIF_F_RXFCS)))
  3680. /* adjust length to remove Ethernet CRC, this must be
  3681. * done after the TBI_ACCEPT workaround above
  3682. */
  3683. length -= 4;
  3684. e1000_check_copybreak(netdev, buffer_info, length, &skb);
  3685. skb_put(skb, length);
  3686. /* Receive Checksum Offload */
  3687. e1000_rx_checksum(adapter,
  3688. (u32)(status) |
  3689. ((u32)(rx_desc->errors) << 24),
  3690. le16_to_cpu(rx_desc->csum), skb);
  3691. e1000_receive_skb(adapter, status, rx_desc->special, skb);
  3692. next_desc:
  3693. rx_desc->status = 0;
  3694. /* return some buffers to hardware, one at a time is too slow */
  3695. if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
  3696. adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
  3697. cleaned_count = 0;
  3698. }
  3699. /* use prefetched values */
  3700. rx_desc = next_rxd;
  3701. buffer_info = next_buffer;
  3702. }
  3703. rx_ring->next_to_clean = i;
  3704. cleaned_count = E1000_DESC_UNUSED(rx_ring);
  3705. if (cleaned_count)
  3706. adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
  3707. adapter->total_rx_packets += total_rx_packets;
  3708. adapter->total_rx_bytes += total_rx_bytes;
  3709. netdev->stats.rx_bytes += total_rx_bytes;
  3710. netdev->stats.rx_packets += total_rx_packets;
  3711. return cleaned;
  3712. }
  3713. /**
  3714. * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
  3715. * @adapter: address of board private structure
  3716. * @rx_ring: pointer to receive ring structure
  3717. * @cleaned_count: number of buffers to allocate this pass
  3718. **/
  3719. static void
  3720. e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
  3721. struct e1000_rx_ring *rx_ring, int cleaned_count)
  3722. {
  3723. struct net_device *netdev = adapter->netdev;
  3724. struct pci_dev *pdev = adapter->pdev;
  3725. struct e1000_rx_desc *rx_desc;
  3726. struct e1000_buffer *buffer_info;
  3727. struct sk_buff *skb;
  3728. unsigned int i;
  3729. unsigned int bufsz = 256 - 16 /*for skb_reserve */ ;
  3730. i = rx_ring->next_to_use;
  3731. buffer_info = &rx_ring->buffer_info[i];
  3732. while (cleaned_count--) {
  3733. skb = buffer_info->skb;
  3734. if (skb) {
  3735. skb_trim(skb, 0);
  3736. goto check_page;
  3737. }
  3738. skb = netdev_alloc_skb_ip_align(netdev, bufsz);
  3739. if (unlikely(!skb)) {
  3740. /* Better luck next round */
  3741. adapter->alloc_rx_buff_failed++;
  3742. break;
  3743. }
  3744. buffer_info->skb = skb;
  3745. buffer_info->length = adapter->rx_buffer_len;
  3746. check_page:
  3747. /* allocate a new page if necessary */
  3748. if (!buffer_info->page) {
  3749. buffer_info->page = alloc_page(GFP_ATOMIC);
  3750. if (unlikely(!buffer_info->page)) {
  3751. adapter->alloc_rx_buff_failed++;
  3752. break;
  3753. }
  3754. }
  3755. if (!buffer_info->dma) {
  3756. buffer_info->dma = dma_map_page(&pdev->dev,
  3757. buffer_info->page, 0,
  3758. buffer_info->length,
  3759. DMA_FROM_DEVICE);
  3760. if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
  3761. put_page(buffer_info->page);
  3762. dev_kfree_skb(skb);
  3763. buffer_info->page = NULL;
  3764. buffer_info->skb = NULL;
  3765. buffer_info->dma = 0;
  3766. adapter->alloc_rx_buff_failed++;
  3767. break; /* while !buffer_info->skb */
  3768. }
  3769. }
  3770. rx_desc = E1000_RX_DESC(*rx_ring, i);
  3771. rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
  3772. if (unlikely(++i == rx_ring->count))
  3773. i = 0;
  3774. buffer_info = &rx_ring->buffer_info[i];
  3775. }
  3776. if (likely(rx_ring->next_to_use != i)) {
  3777. rx_ring->next_to_use = i;
  3778. if (unlikely(i-- == 0))
  3779. i = (rx_ring->count - 1);
  3780. /* Force memory writes to complete before letting h/w
  3781. * know there are new descriptors to fetch. (Only
  3782. * applicable for weak-ordered memory model archs,
  3783. * such as IA-64). */
  3784. wmb();
  3785. writel(i, adapter->hw.hw_addr + rx_ring->rdt);
  3786. }
  3787. }
  3788. /**
  3789. * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
  3790. * @adapter: address of board private structure
  3791. **/
  3792. static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
  3793. struct e1000_rx_ring *rx_ring,
  3794. int cleaned_count)
  3795. {
  3796. struct e1000_hw *hw = &adapter->hw;
  3797. struct net_device *netdev = adapter->netdev;
  3798. struct pci_dev *pdev = adapter->pdev;
  3799. struct e1000_rx_desc *rx_desc;
  3800. struct e1000_buffer *buffer_info;
  3801. struct sk_buff *skb;
  3802. unsigned int i;
  3803. unsigned int bufsz = adapter->rx_buffer_len;
  3804. i = rx_ring->next_to_use;
  3805. buffer_info = &rx_ring->buffer_info[i];
  3806. while (cleaned_count--) {
  3807. skb = buffer_info->skb;
  3808. if (skb) {
  3809. skb_trim(skb, 0);
  3810. goto map_skb;
  3811. }
  3812. skb = netdev_alloc_skb_ip_align(netdev, bufsz);
  3813. if (unlikely(!skb)) {
  3814. /* Better luck next round */
  3815. adapter->alloc_rx_buff_failed++;
  3816. break;
  3817. }
  3818. /* Fix for errata 23, can't cross 64kB boundary */
  3819. if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
  3820. struct sk_buff *oldskb = skb;
  3821. e_err(rx_err, "skb align check failed: %u bytes at "
  3822. "%p\n", bufsz, skb->data);
  3823. /* Try again, without freeing the previous */
  3824. skb = netdev_alloc_skb_ip_align(netdev, bufsz);
  3825. /* Failed allocation, critical failure */
  3826. if (!skb) {
  3827. dev_kfree_skb(oldskb);
  3828. adapter->alloc_rx_buff_failed++;
  3829. break;
  3830. }
  3831. if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
  3832. /* give up */
  3833. dev_kfree_skb(skb);
  3834. dev_kfree_skb(oldskb);
  3835. adapter->alloc_rx_buff_failed++;
  3836. break; /* while !buffer_info->skb */
  3837. }
  3838. /* Use new allocation */
  3839. dev_kfree_skb(oldskb);
  3840. }
  3841. buffer_info->skb = skb;
  3842. buffer_info->length = adapter->rx_buffer_len;
  3843. map_skb:
  3844. buffer_info->dma = dma_map_single(&pdev->dev,
  3845. skb->data,
  3846. buffer_info->length,
  3847. DMA_FROM_DEVICE);
  3848. if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
  3849. dev_kfree_skb(skb);
  3850. buffer_info->skb = NULL;
  3851. buffer_info->dma = 0;
  3852. adapter->alloc_rx_buff_failed++;
  3853. break; /* while !buffer_info->skb */
  3854. }
  3855. /*
  3856. * XXX if it was allocated cleanly it will never map to a
  3857. * boundary crossing
  3858. */
  3859. /* Fix for errata 23, can't cross 64kB boundary */
  3860. if (!e1000_check_64k_bound(adapter,
  3861. (void *)(unsigned long)buffer_info->dma,
  3862. adapter->rx_buffer_len)) {
  3863. e_err(rx_err, "dma align check failed: %u bytes at "
  3864. "%p\n", adapter->rx_buffer_len,
  3865. (void *)(unsigned long)buffer_info->dma);
  3866. dev_kfree_skb(skb);
  3867. buffer_info->skb = NULL;
  3868. dma_unmap_single(&pdev->dev, buffer_info->dma,
  3869. adapter->rx_buffer_len,
  3870. DMA_FROM_DEVICE);
  3871. buffer_info->dma = 0;
  3872. adapter->alloc_rx_buff_failed++;
  3873. break; /* while !buffer_info->skb */
  3874. }
  3875. rx_desc = E1000_RX_DESC(*rx_ring, i);
  3876. rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
  3877. if (unlikely(++i == rx_ring->count))
  3878. i = 0;
  3879. buffer_info = &rx_ring->buffer_info[i];
  3880. }
  3881. if (likely(rx_ring->next_to_use != i)) {
  3882. rx_ring->next_to_use = i;
  3883. if (unlikely(i-- == 0))
  3884. i = (rx_ring->count - 1);
  3885. /* Force memory writes to complete before letting h/w
  3886. * know there are new descriptors to fetch. (Only
  3887. * applicable for weak-ordered memory model archs,
  3888. * such as IA-64). */
  3889. wmb();
  3890. writel(i, hw->hw_addr + rx_ring->rdt);
  3891. }
  3892. }
  3893. /**
  3894. * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
  3895. * @adapter:
  3896. **/
  3897. static void e1000_smartspeed(struct e1000_adapter *adapter)
  3898. {
  3899. struct e1000_hw *hw = &adapter->hw;
  3900. u16 phy_status;
  3901. u16 phy_ctrl;
  3902. if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
  3903. !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
  3904. return;
  3905. if (adapter->smartspeed == 0) {
  3906. /* If Master/Slave config fault is asserted twice,
  3907. * we assume back-to-back */
  3908. e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
  3909. if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
  3910. e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
  3911. if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
  3912. e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
  3913. if (phy_ctrl & CR_1000T_MS_ENABLE) {
  3914. phy_ctrl &= ~CR_1000T_MS_ENABLE;
  3915. e1000_write_phy_reg(hw, PHY_1000T_CTRL,
  3916. phy_ctrl);
  3917. adapter->smartspeed++;
  3918. if (!e1000_phy_setup_autoneg(hw) &&
  3919. !e1000_read_phy_reg(hw, PHY_CTRL,
  3920. &phy_ctrl)) {
  3921. phy_ctrl |= (MII_CR_AUTO_NEG_EN |
  3922. MII_CR_RESTART_AUTO_NEG);
  3923. e1000_write_phy_reg(hw, PHY_CTRL,
  3924. phy_ctrl);
  3925. }
  3926. }
  3927. return;
  3928. } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
  3929. /* If still no link, perhaps using 2/3 pair cable */
  3930. e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
  3931. phy_ctrl |= CR_1000T_MS_ENABLE;
  3932. e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
  3933. if (!e1000_phy_setup_autoneg(hw) &&
  3934. !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
  3935. phy_ctrl |= (MII_CR_AUTO_NEG_EN |
  3936. MII_CR_RESTART_AUTO_NEG);
  3937. e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
  3938. }
  3939. }
  3940. /* Restart process after E1000_SMARTSPEED_MAX iterations */
  3941. if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
  3942. adapter->smartspeed = 0;
  3943. }
  3944. /**
  3945. * e1000_ioctl -
  3946. * @netdev:
  3947. * @ifreq:
  3948. * @cmd:
  3949. **/
  3950. static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
  3951. {
  3952. switch (cmd) {
  3953. case SIOCGMIIPHY:
  3954. case SIOCGMIIREG:
  3955. case SIOCSMIIREG:
  3956. return e1000_mii_ioctl(netdev, ifr, cmd);
  3957. default:
  3958. return -EOPNOTSUPP;
  3959. }
  3960. }
  3961. /**
  3962. * e1000_mii_ioctl -
  3963. * @netdev:
  3964. * @ifreq:
  3965. * @cmd:
  3966. **/
  3967. static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
  3968. int cmd)
  3969. {
  3970. struct e1000_adapter *adapter = netdev_priv(netdev);
  3971. struct e1000_hw *hw = &adapter->hw;
  3972. struct mii_ioctl_data *data = if_mii(ifr);
  3973. int retval;
  3974. u16 mii_reg;
  3975. unsigned long flags;
  3976. if (hw->media_type != e1000_media_type_copper)
  3977. return -EOPNOTSUPP;
  3978. switch (cmd) {
  3979. case SIOCGMIIPHY:
  3980. data->phy_id = hw->phy_addr;
  3981. break;
  3982. case SIOCGMIIREG:
  3983. spin_lock_irqsave(&adapter->stats_lock, flags);
  3984. if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
  3985. &data->val_out)) {
  3986. spin_unlock_irqrestore(&adapter->stats_lock, flags);
  3987. return -EIO;
  3988. }
  3989. spin_unlock_irqrestore(&adapter->stats_lock, flags);
  3990. break;
  3991. case SIOCSMIIREG:
  3992. if (data->reg_num & ~(0x1F))
  3993. return -EFAULT;
  3994. mii_reg = data->val_in;
  3995. spin_lock_irqsave(&adapter->stats_lock, flags);
  3996. if (e1000_write_phy_reg(hw, data->reg_num,
  3997. mii_reg)) {
  3998. spin_unlock_irqrestore(&adapter->stats_lock, flags);
  3999. return -EIO;
  4000. }
  4001. spin_unlock_irqrestore(&adapter->stats_lock, flags);
  4002. if (hw->media_type == e1000_media_type_copper) {
  4003. switch (data->reg_num) {
  4004. case PHY_CTRL:
  4005. if (mii_reg & MII_CR_POWER_DOWN)
  4006. break;
  4007. if (mii_reg & MII_CR_AUTO_NEG_EN) {
  4008. hw->autoneg = 1;
  4009. hw->autoneg_advertised = 0x2F;
  4010. } else {
  4011. u32 speed;
  4012. if (mii_reg & 0x40)
  4013. speed = SPEED_1000;
  4014. else if (mii_reg & 0x2000)
  4015. speed = SPEED_100;
  4016. else
  4017. speed = SPEED_10;
  4018. retval = e1000_set_spd_dplx(
  4019. adapter, speed,
  4020. ((mii_reg & 0x100)
  4021. ? DUPLEX_FULL :
  4022. DUPLEX_HALF));
  4023. if (retval)
  4024. return retval;
  4025. }
  4026. if (netif_running(adapter->netdev))
  4027. e1000_reinit_locked(adapter);
  4028. else
  4029. e1000_reset(adapter);
  4030. break;
  4031. case M88E1000_PHY_SPEC_CTRL:
  4032. case M88E1000_EXT_PHY_SPEC_CTRL:
  4033. if (e1000_phy_reset(hw))
  4034. return -EIO;
  4035. break;
  4036. }
  4037. } else {
  4038. switch (data->reg_num) {
  4039. case PHY_CTRL:
  4040. if (mii_reg & MII_CR_POWER_DOWN)
  4041. break;
  4042. if (netif_running(adapter->netdev))
  4043. e1000_reinit_locked(adapter);
  4044. else
  4045. e1000_reset(adapter);
  4046. break;
  4047. }
  4048. }
  4049. break;
  4050. default:
  4051. return -EOPNOTSUPP;
  4052. }
  4053. return E1000_SUCCESS;
  4054. }
  4055. void e1000_pci_set_mwi(struct e1000_hw *hw)
  4056. {
  4057. struct e1000_adapter *adapter = hw->back;
  4058. int ret_val = pci_set_mwi(adapter->pdev);
  4059. if (ret_val)
  4060. e_err(probe, "Error in setting MWI\n");
  4061. }
  4062. void e1000_pci_clear_mwi(struct e1000_hw *hw)
  4063. {
  4064. struct e1000_adapter *adapter = hw->back;
  4065. pci_clear_mwi(adapter->pdev);
  4066. }
  4067. int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
  4068. {
  4069. struct e1000_adapter *adapter = hw->back;
  4070. return pcix_get_mmrbc(adapter->pdev);
  4071. }
  4072. void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
  4073. {
  4074. struct e1000_adapter *adapter = hw->back;
  4075. pcix_set_mmrbc(adapter->pdev, mmrbc);
  4076. }
  4077. void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
  4078. {
  4079. outl(value, port);
  4080. }
  4081. static bool e1000_vlan_used(struct e1000_adapter *adapter)
  4082. {
  4083. u16 vid;
  4084. for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
  4085. return true;
  4086. return false;
  4087. }
  4088. static void __e1000_vlan_mode(struct e1000_adapter *adapter,
  4089. netdev_features_t features)
  4090. {
  4091. struct e1000_hw *hw = &adapter->hw;
  4092. u32 ctrl;
  4093. ctrl = er32(CTRL);
  4094. if (features & NETIF_F_HW_VLAN_RX) {
  4095. /* enable VLAN tag insert/strip */
  4096. ctrl |= E1000_CTRL_VME;
  4097. } else {
  4098. /* disable VLAN tag insert/strip */
  4099. ctrl &= ~E1000_CTRL_VME;
  4100. }
  4101. ew32(CTRL, ctrl);
  4102. }
  4103. static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
  4104. bool filter_on)
  4105. {
  4106. struct e1000_hw *hw = &adapter->hw;
  4107. u32 rctl;
  4108. if (!test_bit(__E1000_DOWN, &adapter->flags))
  4109. e1000_irq_disable(adapter);
  4110. __e1000_vlan_mode(adapter, adapter->netdev->features);
  4111. if (filter_on) {
  4112. /* enable VLAN receive filtering */
  4113. rctl = er32(RCTL);
  4114. rctl &= ~E1000_RCTL_CFIEN;
  4115. if (!(adapter->netdev->flags & IFF_PROMISC))
  4116. rctl |= E1000_RCTL_VFE;
  4117. ew32(RCTL, rctl);
  4118. e1000_update_mng_vlan(adapter);
  4119. } else {
  4120. /* disable VLAN receive filtering */
  4121. rctl = er32(RCTL);
  4122. rctl &= ~E1000_RCTL_VFE;
  4123. ew32(RCTL, rctl);
  4124. }
  4125. if (!test_bit(__E1000_DOWN, &adapter->flags))
  4126. e1000_irq_enable(adapter);
  4127. }
  4128. static void e1000_vlan_mode(struct net_device *netdev,
  4129. netdev_features_t features)
  4130. {
  4131. struct e1000_adapter *adapter = netdev_priv(netdev);
  4132. if (!test_bit(__E1000_DOWN, &adapter->flags))
  4133. e1000_irq_disable(adapter);
  4134. __e1000_vlan_mode(adapter, features);
  4135. if (!test_bit(__E1000_DOWN, &adapter->flags))
  4136. e1000_irq_enable(adapter);
  4137. }
  4138. static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
  4139. {
  4140. struct e1000_adapter *adapter = netdev_priv(netdev);
  4141. struct e1000_hw *hw = &adapter->hw;
  4142. u32 vfta, index;
  4143. if ((hw->mng_cookie.status &
  4144. E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
  4145. (vid == adapter->mng_vlan_id))
  4146. return 0;
  4147. if (!e1000_vlan_used(adapter))
  4148. e1000_vlan_filter_on_off(adapter, true);
  4149. /* add VID to filter table */
  4150. index = (vid >> 5) & 0x7F;
  4151. vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
  4152. vfta |= (1 << (vid & 0x1F));
  4153. e1000_write_vfta(hw, index, vfta);
  4154. set_bit(vid, adapter->active_vlans);
  4155. return 0;
  4156. }
  4157. static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
  4158. {
  4159. struct e1000_adapter *adapter = netdev_priv(netdev);
  4160. struct e1000_hw *hw = &adapter->hw;
  4161. u32 vfta, index;
  4162. if (!test_bit(__E1000_DOWN, &adapter->flags))
  4163. e1000_irq_disable(adapter);
  4164. if (!test_bit(__E1000_DOWN, &adapter->flags))
  4165. e1000_irq_enable(adapter);
  4166. /* remove VID from filter table */
  4167. index = (vid >> 5) & 0x7F;
  4168. vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
  4169. vfta &= ~(1 << (vid & 0x1F));
  4170. e1000_write_vfta(hw, index, vfta);
  4171. clear_bit(vid, adapter->active_vlans);
  4172. if (!e1000_vlan_used(adapter))
  4173. e1000_vlan_filter_on_off(adapter, false);
  4174. return 0;
  4175. }
  4176. static void e1000_restore_vlan(struct e1000_adapter *adapter)
  4177. {
  4178. u16 vid;
  4179. if (!e1000_vlan_used(adapter))
  4180. return;
  4181. e1000_vlan_filter_on_off(adapter, true);
  4182. for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
  4183. e1000_vlan_rx_add_vid(adapter->netdev, vid);
  4184. }
  4185. int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
  4186. {
  4187. struct e1000_hw *hw = &adapter->hw;
  4188. hw->autoneg = 0;
  4189. /* Make sure dplx is at most 1 bit and lsb of speed is not set
  4190. * for the switch() below to work */
  4191. if ((spd & 1) || (dplx & ~1))
  4192. goto err_inval;
  4193. /* Fiber NICs only allow 1000 gbps Full duplex */
  4194. if ((hw->media_type == e1000_media_type_fiber) &&
  4195. spd != SPEED_1000 &&
  4196. dplx != DUPLEX_FULL)
  4197. goto err_inval;
  4198. switch (spd + dplx) {
  4199. case SPEED_10 + DUPLEX_HALF:
  4200. hw->forced_speed_duplex = e1000_10_half;
  4201. break;
  4202. case SPEED_10 + DUPLEX_FULL:
  4203. hw->forced_speed_duplex = e1000_10_full;
  4204. break;
  4205. case SPEED_100 + DUPLEX_HALF:
  4206. hw->forced_speed_duplex = e1000_100_half;
  4207. break;
  4208. case SPEED_100 + DUPLEX_FULL:
  4209. hw->forced_speed_duplex = e1000_100_full;
  4210. break;
  4211. case SPEED_1000 + DUPLEX_FULL:
  4212. hw->autoneg = 1;
  4213. hw->autoneg_advertised = ADVERTISE_1000_FULL;
  4214. break;
  4215. case SPEED_1000 + DUPLEX_HALF: /* not supported */
  4216. default:
  4217. goto err_inval;
  4218. }
  4219. return 0;
  4220. err_inval:
  4221. e_err(probe, "Unsupported Speed/Duplex configuration\n");
  4222. return -EINVAL;
  4223. }
  4224. static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
  4225. {
  4226. struct net_device *netdev = pci_get_drvdata(pdev);
  4227. struct e1000_adapter *adapter = netdev_priv(netdev);
  4228. struct e1000_hw *hw = &adapter->hw;
  4229. u32 ctrl, ctrl_ext, rctl, status;
  4230. u32 wufc = adapter->wol;
  4231. #ifdef CONFIG_PM
  4232. int retval = 0;
  4233. #endif
  4234. netif_device_detach(netdev);
  4235. if (netif_running(netdev)) {
  4236. WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
  4237. e1000_down(adapter);
  4238. }
  4239. #ifdef CONFIG_PM
  4240. retval = pci_save_state(pdev);
  4241. if (retval)
  4242. return retval;
  4243. #endif
  4244. status = er32(STATUS);
  4245. if (status & E1000_STATUS_LU)
  4246. wufc &= ~E1000_WUFC_LNKC;
  4247. if (wufc) {
  4248. e1000_setup_rctl(adapter);
  4249. e1000_set_rx_mode(netdev);
  4250. rctl = er32(RCTL);
  4251. /* turn on all-multi mode if wake on multicast is enabled */
  4252. if (wufc & E1000_WUFC_MC)
  4253. rctl |= E1000_RCTL_MPE;
  4254. /* enable receives in the hardware */
  4255. ew32(RCTL, rctl | E1000_RCTL_EN);
  4256. if (hw->mac_type >= e1000_82540) {
  4257. ctrl = er32(CTRL);
  4258. /* advertise wake from D3Cold */
  4259. #define E1000_CTRL_ADVD3WUC 0x00100000
  4260. /* phy power management enable */
  4261. #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
  4262. ctrl |= E1000_CTRL_ADVD3WUC |
  4263. E1000_CTRL_EN_PHY_PWR_MGMT;
  4264. ew32(CTRL, ctrl);
  4265. }
  4266. if (hw->media_type == e1000_media_type_fiber ||
  4267. hw->media_type == e1000_media_type_internal_serdes) {
  4268. /* keep the laser running in D3 */
  4269. ctrl_ext = er32(CTRL_EXT);
  4270. ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
  4271. ew32(CTRL_EXT, ctrl_ext);
  4272. }
  4273. ew32(WUC, E1000_WUC_PME_EN);
  4274. ew32(WUFC, wufc);
  4275. } else {
  4276. ew32(WUC, 0);
  4277. ew32(WUFC, 0);
  4278. }
  4279. e1000_release_manageability(adapter);
  4280. *enable_wake = !!wufc;
  4281. /* make sure adapter isn't asleep if manageability is enabled */
  4282. if (adapter->en_mng_pt)
  4283. *enable_wake = true;
  4284. if (netif_running(netdev))
  4285. e1000_free_irq(adapter);
  4286. pci_disable_device(pdev);
  4287. return 0;
  4288. }
  4289. #ifdef CONFIG_PM
  4290. static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
  4291. {
  4292. int retval;
  4293. bool wake;
  4294. retval = __e1000_shutdown(pdev, &wake);
  4295. if (retval)
  4296. return retval;
  4297. if (wake) {
  4298. pci_prepare_to_sleep(pdev);
  4299. } else {
  4300. pci_wake_from_d3(pdev, false);
  4301. pci_set_power_state(pdev, PCI_D3hot);
  4302. }
  4303. return 0;
  4304. }
  4305. static int e1000_resume(struct pci_dev *pdev)
  4306. {
  4307. struct net_device *netdev = pci_get_drvdata(pdev);
  4308. struct e1000_adapter *adapter = netdev_priv(netdev);
  4309. struct e1000_hw *hw = &adapter->hw;
  4310. u32 err;
  4311. pci_set_power_state(pdev, PCI_D0);
  4312. pci_restore_state(pdev);
  4313. pci_save_state(pdev);
  4314. if (adapter->need_ioport)
  4315. err = pci_enable_device(pdev);
  4316. else
  4317. err = pci_enable_device_mem(pdev);
  4318. if (err) {
  4319. pr_err("Cannot enable PCI device from suspend\n");
  4320. return err;
  4321. }
  4322. pci_set_master(pdev);
  4323. pci_enable_wake(pdev, PCI_D3hot, 0);
  4324. pci_enable_wake(pdev, PCI_D3cold, 0);
  4325. if (netif_running(netdev)) {
  4326. err = e1000_request_irq(adapter);
  4327. if (err)
  4328. return err;
  4329. }
  4330. e1000_power_up_phy(adapter);
  4331. e1000_reset(adapter);
  4332. ew32(WUS, ~0);
  4333. e1000_init_manageability(adapter);
  4334. if (netif_running(netdev))
  4335. e1000_up(adapter);
  4336. netif_device_attach(netdev);
  4337. return 0;
  4338. }
  4339. #endif
  4340. static void e1000_shutdown(struct pci_dev *pdev)
  4341. {
  4342. bool wake;
  4343. __e1000_shutdown(pdev, &wake);
  4344. if (system_state == SYSTEM_POWER_OFF) {
  4345. pci_wake_from_d3(pdev, wake);
  4346. pci_set_power_state(pdev, PCI_D3hot);
  4347. }
  4348. }
  4349. #ifdef CONFIG_NET_POLL_CONTROLLER
  4350. /*
  4351. * Polling 'interrupt' - used by things like netconsole to send skbs
  4352. * without having to re-enable interrupts. It's not called while
  4353. * the interrupt routine is executing.
  4354. */
  4355. static void e1000_netpoll(struct net_device *netdev)
  4356. {
  4357. struct e1000_adapter *adapter = netdev_priv(netdev);
  4358. disable_irq(adapter->pdev->irq);
  4359. e1000_intr(adapter->pdev->irq, netdev);
  4360. enable_irq(adapter->pdev->irq);
  4361. }
  4362. #endif
  4363. /**
  4364. * e1000_io_error_detected - called when PCI error is detected
  4365. * @pdev: Pointer to PCI device
  4366. * @state: The current pci connection state
  4367. *
  4368. * This function is called after a PCI bus error affecting
  4369. * this device has been detected.
  4370. */
  4371. static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
  4372. pci_channel_state_t state)
  4373. {
  4374. struct net_device *netdev = pci_get_drvdata(pdev);
  4375. struct e1000_adapter *adapter = netdev_priv(netdev);
  4376. netif_device_detach(netdev);
  4377. if (state == pci_channel_io_perm_failure)
  4378. return PCI_ERS_RESULT_DISCONNECT;
  4379. if (netif_running(netdev))
  4380. e1000_down(adapter);
  4381. pci_disable_device(pdev);
  4382. /* Request a slot slot reset. */
  4383. return PCI_ERS_RESULT_NEED_RESET;
  4384. }
  4385. /**
  4386. * e1000_io_slot_reset - called after the pci bus has been reset.
  4387. * @pdev: Pointer to PCI device
  4388. *
  4389. * Restart the card from scratch, as if from a cold-boot. Implementation
  4390. * resembles the first-half of the e1000_resume routine.
  4391. */
  4392. static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
  4393. {
  4394. struct net_device *netdev = pci_get_drvdata(pdev);
  4395. struct e1000_adapter *adapter = netdev_priv(netdev);
  4396. struct e1000_hw *hw = &adapter->hw;
  4397. int err;
  4398. if (adapter->need_ioport)
  4399. err = pci_enable_device(pdev);
  4400. else
  4401. err = pci_enable_device_mem(pdev);
  4402. if (err) {
  4403. pr_err("Cannot re-enable PCI device after reset.\n");
  4404. return PCI_ERS_RESULT_DISCONNECT;
  4405. }
  4406. pci_set_master(pdev);
  4407. pci_enable_wake(pdev, PCI_D3hot, 0);
  4408. pci_enable_wake(pdev, PCI_D3cold, 0);
  4409. e1000_reset(adapter);
  4410. ew32(WUS, ~0);
  4411. return PCI_ERS_RESULT_RECOVERED;
  4412. }
  4413. /**
  4414. * e1000_io_resume - called when traffic can start flowing again.
  4415. * @pdev: Pointer to PCI device
  4416. *
  4417. * This callback is called when the error recovery driver tells us that
  4418. * its OK to resume normal operation. Implementation resembles the
  4419. * second-half of the e1000_resume routine.
  4420. */
  4421. static void e1000_io_resume(struct pci_dev *pdev)
  4422. {
  4423. struct net_device *netdev = pci_get_drvdata(pdev);
  4424. struct e1000_adapter *adapter = netdev_priv(netdev);
  4425. e1000_init_manageability(adapter);
  4426. if (netif_running(netdev)) {
  4427. if (e1000_up(adapter)) {
  4428. pr_info("can't bring device back up after reset\n");
  4429. return;
  4430. }
  4431. }
  4432. netif_device_attach(netdev);
  4433. }
  4434. /* e1000_main.c */