e1000_main.c 128 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588
  1. /*******************************************************************************
  2. Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
  3. This program is free software; you can redistribute it and/or modify it
  4. under the terms of the GNU General Public License as published by the Free
  5. Software Foundation; either version 2 of the License, or (at your option)
  6. any later version.
  7. This program is distributed in the hope that it will be useful, but WITHOUT
  8. ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  9. FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  10. more details.
  11. You should have received a copy of the GNU General Public License along with
  12. this program; if not, write to the Free Software Foundation, Inc., 59
  13. Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  14. The full GNU General Public License is included in this distribution in the
  15. file called LICENSE.
  16. Contact Information:
  17. Linux NICS <linux.nics@intel.com>
  18. Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  19. *******************************************************************************/
  20. #include "e1000.h"
  21. /* Change Log
  22. * 6.0.58 4/20/05
  23. * o Accepted ethtool cleanup patch from Stephen Hemminger
  24. * 6.0.44+ 2/15/05
  25. * o applied Anton's patch to resolve tx hang in hardware
  26. * o Applied Andrew Mortons patch - e1000 stops working after resume
  27. */
  28. char e1000_driver_name[] = "e1000";
  29. static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
  30. #ifndef CONFIG_E1000_NAPI
  31. #define DRIVERNAPI
  32. #else
  33. #define DRIVERNAPI "-NAPI"
  34. #endif
  35. #define DRV_VERSION "6.3.9-k2"DRIVERNAPI
  36. char e1000_driver_version[] = DRV_VERSION;
  37. static char e1000_copyright[] = "Copyright (c) 1999-2005 Intel Corporation.";
  38. /* e1000_pci_tbl - PCI Device ID Table
  39. *
  40. * Last entry must be all 0s
  41. *
  42. * Macro expands to...
  43. * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
  44. */
  45. static struct pci_device_id e1000_pci_tbl[] = {
  46. INTEL_E1000_ETHERNET_DEVICE(0x1000),
  47. INTEL_E1000_ETHERNET_DEVICE(0x1001),
  48. INTEL_E1000_ETHERNET_DEVICE(0x1004),
  49. INTEL_E1000_ETHERNET_DEVICE(0x1008),
  50. INTEL_E1000_ETHERNET_DEVICE(0x1009),
  51. INTEL_E1000_ETHERNET_DEVICE(0x100C),
  52. INTEL_E1000_ETHERNET_DEVICE(0x100D),
  53. INTEL_E1000_ETHERNET_DEVICE(0x100E),
  54. INTEL_E1000_ETHERNET_DEVICE(0x100F),
  55. INTEL_E1000_ETHERNET_DEVICE(0x1010),
  56. INTEL_E1000_ETHERNET_DEVICE(0x1011),
  57. INTEL_E1000_ETHERNET_DEVICE(0x1012),
  58. INTEL_E1000_ETHERNET_DEVICE(0x1013),
  59. INTEL_E1000_ETHERNET_DEVICE(0x1014),
  60. INTEL_E1000_ETHERNET_DEVICE(0x1015),
  61. INTEL_E1000_ETHERNET_DEVICE(0x1016),
  62. INTEL_E1000_ETHERNET_DEVICE(0x1017),
  63. INTEL_E1000_ETHERNET_DEVICE(0x1018),
  64. INTEL_E1000_ETHERNET_DEVICE(0x1019),
  65. INTEL_E1000_ETHERNET_DEVICE(0x101A),
  66. INTEL_E1000_ETHERNET_DEVICE(0x101D),
  67. INTEL_E1000_ETHERNET_DEVICE(0x101E),
  68. INTEL_E1000_ETHERNET_DEVICE(0x1026),
  69. INTEL_E1000_ETHERNET_DEVICE(0x1027),
  70. INTEL_E1000_ETHERNET_DEVICE(0x1028),
  71. INTEL_E1000_ETHERNET_DEVICE(0x105E),
  72. INTEL_E1000_ETHERNET_DEVICE(0x105F),
  73. INTEL_E1000_ETHERNET_DEVICE(0x1060),
  74. INTEL_E1000_ETHERNET_DEVICE(0x1075),
  75. INTEL_E1000_ETHERNET_DEVICE(0x1076),
  76. INTEL_E1000_ETHERNET_DEVICE(0x1077),
  77. INTEL_E1000_ETHERNET_DEVICE(0x1078),
  78. INTEL_E1000_ETHERNET_DEVICE(0x1079),
  79. INTEL_E1000_ETHERNET_DEVICE(0x107A),
  80. INTEL_E1000_ETHERNET_DEVICE(0x107B),
  81. INTEL_E1000_ETHERNET_DEVICE(0x107C),
  82. INTEL_E1000_ETHERNET_DEVICE(0x107D),
  83. INTEL_E1000_ETHERNET_DEVICE(0x107E),
  84. INTEL_E1000_ETHERNET_DEVICE(0x107F),
  85. INTEL_E1000_ETHERNET_DEVICE(0x108A),
  86. INTEL_E1000_ETHERNET_DEVICE(0x108B),
  87. INTEL_E1000_ETHERNET_DEVICE(0x108C),
  88. INTEL_E1000_ETHERNET_DEVICE(0x1099),
  89. INTEL_E1000_ETHERNET_DEVICE(0x109A),
  90. INTEL_E1000_ETHERNET_DEVICE(0x10B5),
  91. /* required last entry */
  92. {0,}
  93. };
  94. MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
  95. int e1000_up(struct e1000_adapter *adapter);
  96. void e1000_down(struct e1000_adapter *adapter);
  97. void e1000_reset(struct e1000_adapter *adapter);
  98. int e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx);
  99. int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
  100. int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
  101. void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
  102. void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
  103. static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
  104. struct e1000_tx_ring *txdr);
  105. static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
  106. struct e1000_rx_ring *rxdr);
  107. static void e1000_free_tx_resources(struct e1000_adapter *adapter,
  108. struct e1000_tx_ring *tx_ring);
  109. static void e1000_free_rx_resources(struct e1000_adapter *adapter,
  110. struct e1000_rx_ring *rx_ring);
  111. void e1000_update_stats(struct e1000_adapter *adapter);
  112. /* Local Function Prototypes */
  113. static int e1000_init_module(void);
  114. static void e1000_exit_module(void);
  115. static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
  116. static void __devexit e1000_remove(struct pci_dev *pdev);
  117. static int e1000_alloc_queues(struct e1000_adapter *adapter);
  118. #ifdef CONFIG_E1000_MQ
  119. static void e1000_setup_queue_mapping(struct e1000_adapter *adapter);
  120. #endif
  121. static int e1000_sw_init(struct e1000_adapter *adapter);
  122. static int e1000_open(struct net_device *netdev);
  123. static int e1000_close(struct net_device *netdev);
  124. static void e1000_configure_tx(struct e1000_adapter *adapter);
  125. static void e1000_configure_rx(struct e1000_adapter *adapter);
  126. static void e1000_setup_rctl(struct e1000_adapter *adapter);
  127. static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
  128. static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
  129. static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
  130. struct e1000_tx_ring *tx_ring);
  131. static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
  132. struct e1000_rx_ring *rx_ring);
  133. static void e1000_set_multi(struct net_device *netdev);
  134. static void e1000_update_phy_info(unsigned long data);
  135. static void e1000_watchdog(unsigned long data);
  136. static void e1000_watchdog_task(struct e1000_adapter *adapter);
  137. static void e1000_82547_tx_fifo_stall(unsigned long data);
  138. static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
  139. static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
  140. static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
  141. static int e1000_set_mac(struct net_device *netdev, void *p);
  142. static irqreturn_t e1000_intr(int irq, void *data, struct pt_regs *regs);
  143. static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter,
  144. struct e1000_tx_ring *tx_ring);
  145. #ifdef CONFIG_E1000_NAPI
  146. static int e1000_clean(struct net_device *poll_dev, int *budget);
  147. static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter,
  148. struct e1000_rx_ring *rx_ring,
  149. int *work_done, int work_to_do);
  150. static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
  151. struct e1000_rx_ring *rx_ring,
  152. int *work_done, int work_to_do);
  153. #else
  154. static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter,
  155. struct e1000_rx_ring *rx_ring);
  156. static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
  157. struct e1000_rx_ring *rx_ring);
  158. #endif
  159. static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
  160. struct e1000_rx_ring *rx_ring,
  161. int cleaned_count);
  162. static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
  163. struct e1000_rx_ring *rx_ring,
  164. int cleaned_count);
  165. static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
  166. static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
  167. int cmd);
  168. void e1000_set_ethtool_ops(struct net_device *netdev);
  169. static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
  170. static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
  171. static void e1000_tx_timeout(struct net_device *dev);
  172. static void e1000_tx_timeout_task(struct net_device *dev);
  173. static void e1000_smartspeed(struct e1000_adapter *adapter);
  174. static inline int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
  175. struct sk_buff *skb);
  176. static void e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp);
  177. static void e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid);
  178. static void e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid);
  179. static void e1000_restore_vlan(struct e1000_adapter *adapter);
  180. #ifdef CONFIG_PM
  181. static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
  182. static int e1000_resume(struct pci_dev *pdev);
  183. #endif
  184. #ifdef CONFIG_NET_POLL_CONTROLLER
  185. /* for netdump / net console */
  186. static void e1000_netpoll (struct net_device *netdev);
  187. #endif
  188. #ifdef CONFIG_E1000_MQ
  189. /* for multiple Rx queues */
  190. void e1000_rx_schedule(void *data);
  191. #endif
  192. /* Exported from other modules */
  193. extern void e1000_check_options(struct e1000_adapter *adapter);
  194. static struct pci_driver e1000_driver = {
  195. .name = e1000_driver_name,
  196. .id_table = e1000_pci_tbl,
  197. .probe = e1000_probe,
  198. .remove = __devexit_p(e1000_remove),
  199. /* Power Managment Hooks */
  200. #ifdef CONFIG_PM
  201. .suspend = e1000_suspend,
  202. .resume = e1000_resume
  203. #endif
  204. };
  205. MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
  206. MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
  207. MODULE_LICENSE("GPL");
  208. MODULE_VERSION(DRV_VERSION);
  209. static int debug = NETIF_MSG_DRV | NETIF_MSG_PROBE;
  210. module_param(debug, int, 0);
  211. MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
  212. /**
  213. * e1000_init_module - Driver Registration Routine
  214. *
  215. * e1000_init_module is the first routine called when the driver is
  216. * loaded. All it does is register with the PCI subsystem.
  217. **/
  218. static int __init
  219. e1000_init_module(void)
  220. {
  221. int ret;
  222. printk(KERN_INFO "%s - version %s\n",
  223. e1000_driver_string, e1000_driver_version);
  224. printk(KERN_INFO "%s\n", e1000_copyright);
  225. ret = pci_module_init(&e1000_driver);
  226. return ret;
  227. }
  228. module_init(e1000_init_module);
  229. /**
  230. * e1000_exit_module - Driver Exit Cleanup Routine
  231. *
  232. * e1000_exit_module is called just before the driver is removed
  233. * from memory.
  234. **/
  235. static void __exit
  236. e1000_exit_module(void)
  237. {
  238. pci_unregister_driver(&e1000_driver);
  239. }
  240. module_exit(e1000_exit_module);
  241. /**
  242. * e1000_irq_disable - Mask off interrupt generation on the NIC
  243. * @adapter: board private structure
  244. **/
  245. static inline void
  246. e1000_irq_disable(struct e1000_adapter *adapter)
  247. {
  248. atomic_inc(&adapter->irq_sem);
  249. E1000_WRITE_REG(&adapter->hw, IMC, ~0);
  250. E1000_WRITE_FLUSH(&adapter->hw);
  251. synchronize_irq(adapter->pdev->irq);
  252. }
  253. /**
  254. * e1000_irq_enable - Enable default interrupt generation settings
  255. * @adapter: board private structure
  256. **/
  257. static inline void
  258. e1000_irq_enable(struct e1000_adapter *adapter)
  259. {
  260. if(likely(atomic_dec_and_test(&adapter->irq_sem))) {
  261. E1000_WRITE_REG(&adapter->hw, IMS, IMS_ENABLE_MASK);
  262. E1000_WRITE_FLUSH(&adapter->hw);
  263. }
  264. }
  265. static void
  266. e1000_update_mng_vlan(struct e1000_adapter *adapter)
  267. {
  268. struct net_device *netdev = adapter->netdev;
  269. uint16_t vid = adapter->hw.mng_cookie.vlan_id;
  270. uint16_t old_vid = adapter->mng_vlan_id;
  271. if(adapter->vlgrp) {
  272. if(!adapter->vlgrp->vlan_devices[vid]) {
  273. if(adapter->hw.mng_cookie.status &
  274. E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
  275. e1000_vlan_rx_add_vid(netdev, vid);
  276. adapter->mng_vlan_id = vid;
  277. } else
  278. adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
  279. if((old_vid != (uint16_t)E1000_MNG_VLAN_NONE) &&
  280. (vid != old_vid) &&
  281. !adapter->vlgrp->vlan_devices[old_vid])
  282. e1000_vlan_rx_kill_vid(netdev, old_vid);
  283. }
  284. }
  285. }
  286. /**
  287. * e1000_release_hw_control - release control of the h/w to f/w
  288. * @adapter: address of board private structure
  289. *
  290. * e1000_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
  291. * For ASF and Pass Through versions of f/w this means that the
  292. * driver is no longer loaded. For AMT version (only with 82573) i
  293. * of the f/w this means that the netowrk i/f is closed.
  294. *
  295. **/
  296. static inline void
  297. e1000_release_hw_control(struct e1000_adapter *adapter)
  298. {
  299. uint32_t ctrl_ext;
  300. uint32_t swsm;
  301. /* Let firmware taken over control of h/w */
  302. switch (adapter->hw.mac_type) {
  303. case e1000_82571:
  304. case e1000_82572:
  305. ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
  306. E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
  307. ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
  308. break;
  309. case e1000_82573:
  310. swsm = E1000_READ_REG(&adapter->hw, SWSM);
  311. E1000_WRITE_REG(&adapter->hw, SWSM,
  312. swsm & ~E1000_SWSM_DRV_LOAD);
  313. default:
  314. break;
  315. }
  316. }
  317. /**
  318. * e1000_get_hw_control - get control of the h/w from f/w
  319. * @adapter: address of board private structure
  320. *
  321. * e1000_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
  322. * For ASF and Pass Through versions of f/w this means that
  323. * the driver is loaded. For AMT version (only with 82573)
  324. * of the f/w this means that the netowrk i/f is open.
  325. *
  326. **/
  327. static inline void
  328. e1000_get_hw_control(struct e1000_adapter *adapter)
  329. {
  330. uint32_t ctrl_ext;
  331. uint32_t swsm;
  332. /* Let firmware know the driver has taken over */
  333. switch (adapter->hw.mac_type) {
  334. case e1000_82571:
  335. case e1000_82572:
  336. ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
  337. E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
  338. ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
  339. break;
  340. case e1000_82573:
  341. swsm = E1000_READ_REG(&adapter->hw, SWSM);
  342. E1000_WRITE_REG(&adapter->hw, SWSM,
  343. swsm | E1000_SWSM_DRV_LOAD);
  344. break;
  345. default:
  346. break;
  347. }
  348. }
  349. int
  350. e1000_up(struct e1000_adapter *adapter)
  351. {
  352. struct net_device *netdev = adapter->netdev;
  353. int i, err;
  354. /* hardware has been reset, we need to reload some things */
  355. /* Reset the PHY if it was previously powered down */
  356. if(adapter->hw.media_type == e1000_media_type_copper) {
  357. uint16_t mii_reg;
  358. e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
  359. if(mii_reg & MII_CR_POWER_DOWN)
  360. e1000_phy_reset(&adapter->hw);
  361. }
  362. e1000_set_multi(netdev);
  363. e1000_restore_vlan(adapter);
  364. e1000_configure_tx(adapter);
  365. e1000_setup_rctl(adapter);
  366. e1000_configure_rx(adapter);
  367. /* call E1000_DESC_UNUSED which always leaves
  368. * at least 1 descriptor unused to make sure
  369. * next_to_use != next_to_clean */
  370. for (i = 0; i < adapter->num_rx_queues; i++) {
  371. struct e1000_rx_ring *ring = &adapter->rx_ring[i];
  372. adapter->alloc_rx_buf(adapter, ring,
  373. E1000_DESC_UNUSED(ring));
  374. }
  375. #ifdef CONFIG_PCI_MSI
  376. if(adapter->hw.mac_type > e1000_82547_rev_2) {
  377. adapter->have_msi = TRUE;
  378. if((err = pci_enable_msi(adapter->pdev))) {
  379. DPRINTK(PROBE, ERR,
  380. "Unable to allocate MSI interrupt Error: %d\n", err);
  381. adapter->have_msi = FALSE;
  382. }
  383. }
  384. #endif
  385. if((err = request_irq(adapter->pdev->irq, &e1000_intr,
  386. SA_SHIRQ | SA_SAMPLE_RANDOM,
  387. netdev->name, netdev))) {
  388. DPRINTK(PROBE, ERR,
  389. "Unable to allocate interrupt Error: %d\n", err);
  390. return err;
  391. }
  392. #ifdef CONFIG_E1000_MQ
  393. e1000_setup_queue_mapping(adapter);
  394. #endif
  395. adapter->tx_queue_len = netdev->tx_queue_len;
  396. mod_timer(&adapter->watchdog_timer, jiffies);
  397. #ifdef CONFIG_E1000_NAPI
  398. netif_poll_enable(netdev);
  399. #endif
  400. e1000_irq_enable(adapter);
  401. return 0;
  402. }
  403. void
  404. e1000_down(struct e1000_adapter *adapter)
  405. {
  406. struct net_device *netdev = adapter->netdev;
  407. boolean_t mng_mode_enabled = (adapter->hw.mac_type >= e1000_82571) &&
  408. e1000_check_mng_mode(&adapter->hw);
  409. e1000_irq_disable(adapter);
  410. #ifdef CONFIG_E1000_MQ
  411. while (atomic_read(&adapter->rx_sched_call_data.count) != 0);
  412. #endif
  413. free_irq(adapter->pdev->irq, netdev);
  414. #ifdef CONFIG_PCI_MSI
  415. if(adapter->hw.mac_type > e1000_82547_rev_2 &&
  416. adapter->have_msi == TRUE)
  417. pci_disable_msi(adapter->pdev);
  418. #endif
  419. del_timer_sync(&adapter->tx_fifo_stall_timer);
  420. del_timer_sync(&adapter->watchdog_timer);
  421. del_timer_sync(&adapter->phy_info_timer);
  422. #ifdef CONFIG_E1000_NAPI
  423. netif_poll_disable(netdev);
  424. #endif
  425. netdev->tx_queue_len = adapter->tx_queue_len;
  426. adapter->link_speed = 0;
  427. adapter->link_duplex = 0;
  428. netif_carrier_off(netdev);
  429. netif_stop_queue(netdev);
  430. e1000_reset(adapter);
  431. e1000_clean_all_tx_rings(adapter);
  432. e1000_clean_all_rx_rings(adapter);
  433. /* Power down the PHY so no link is implied when interface is down *
  434. * The PHY cannot be powered down if any of the following is TRUE *
  435. * (a) WoL is enabled
  436. * (b) AMT is active
  437. * (c) SoL/IDER session is active */
  438. if (!adapter->wol && adapter->hw.mac_type >= e1000_82540 &&
  439. adapter->hw.media_type == e1000_media_type_copper &&
  440. !(E1000_READ_REG(&adapter->hw, MANC) & E1000_MANC_SMBUS_EN) &&
  441. !mng_mode_enabled &&
  442. !e1000_check_phy_reset_block(&adapter->hw)) {
  443. uint16_t mii_reg;
  444. e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
  445. mii_reg |= MII_CR_POWER_DOWN;
  446. e1000_write_phy_reg(&adapter->hw, PHY_CTRL, mii_reg);
  447. mdelay(1);
  448. }
  449. }
  450. void
  451. e1000_reset(struct e1000_adapter *adapter)
  452. {
  453. uint32_t pba, manc;
  454. uint16_t fc_high_water_mark = E1000_FC_HIGH_DIFF;
  455. /* Repartition Pba for greater than 9k mtu
  456. * To take effect CTRL.RST is required.
  457. */
  458. switch (adapter->hw.mac_type) {
  459. case e1000_82547:
  460. case e1000_82547_rev_2:
  461. pba = E1000_PBA_30K;
  462. break;
  463. case e1000_82571:
  464. case e1000_82572:
  465. pba = E1000_PBA_38K;
  466. break;
  467. case e1000_82573:
  468. pba = E1000_PBA_12K;
  469. break;
  470. default:
  471. pba = E1000_PBA_48K;
  472. break;
  473. }
  474. if((adapter->hw.mac_type != e1000_82573) &&
  475. (adapter->netdev->mtu > E1000_RXBUFFER_8192))
  476. pba -= 8; /* allocate more FIFO for Tx */
  477. if(adapter->hw.mac_type == e1000_82547) {
  478. adapter->tx_fifo_head = 0;
  479. adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
  480. adapter->tx_fifo_size =
  481. (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
  482. atomic_set(&adapter->tx_fifo_stall, 0);
  483. }
  484. E1000_WRITE_REG(&adapter->hw, PBA, pba);
  485. /* flow control settings */
  486. /* Set the FC high water mark to 90% of the FIFO size.
  487. * Required to clear last 3 LSB */
  488. fc_high_water_mark = ((pba * 9216)/10) & 0xFFF8;
  489. adapter->hw.fc_high_water = fc_high_water_mark;
  490. adapter->hw.fc_low_water = fc_high_water_mark - 8;
  491. adapter->hw.fc_pause_time = E1000_FC_PAUSE_TIME;
  492. adapter->hw.fc_send_xon = 1;
  493. adapter->hw.fc = adapter->hw.original_fc;
  494. /* Allow time for pending master requests to run */
  495. e1000_reset_hw(&adapter->hw);
  496. if(adapter->hw.mac_type >= e1000_82544)
  497. E1000_WRITE_REG(&adapter->hw, WUC, 0);
  498. if(e1000_init_hw(&adapter->hw))
  499. DPRINTK(PROBE, ERR, "Hardware Error\n");
  500. e1000_update_mng_vlan(adapter);
  501. /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
  502. E1000_WRITE_REG(&adapter->hw, VET, ETHERNET_IEEE_VLAN_TYPE);
  503. e1000_reset_adaptive(&adapter->hw);
  504. e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
  505. if (adapter->en_mng_pt) {
  506. manc = E1000_READ_REG(&adapter->hw, MANC);
  507. manc |= (E1000_MANC_ARP_EN | E1000_MANC_EN_MNG2HOST);
  508. E1000_WRITE_REG(&adapter->hw, MANC, manc);
  509. }
  510. }
  511. /**
  512. * e1000_probe - Device Initialization Routine
  513. * @pdev: PCI device information struct
  514. * @ent: entry in e1000_pci_tbl
  515. *
  516. * Returns 0 on success, negative on failure
  517. *
  518. * e1000_probe initializes an adapter identified by a pci_dev structure.
  519. * The OS initialization, configuring of the adapter private structure,
  520. * and a hardware reset occur.
  521. **/
  522. static int __devinit
  523. e1000_probe(struct pci_dev *pdev,
  524. const struct pci_device_id *ent)
  525. {
  526. struct net_device *netdev;
  527. struct e1000_adapter *adapter;
  528. unsigned long mmio_start, mmio_len;
  529. static int cards_found = 0;
  530. int i, err, pci_using_dac;
  531. uint16_t eeprom_data;
  532. uint16_t eeprom_apme_mask = E1000_EEPROM_APME;
  533. if((err = pci_enable_device(pdev)))
  534. return err;
  535. if(!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
  536. pci_using_dac = 1;
  537. } else {
  538. if((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
  539. E1000_ERR("No usable DMA configuration, aborting\n");
  540. return err;
  541. }
  542. pci_using_dac = 0;
  543. }
  544. if((err = pci_request_regions(pdev, e1000_driver_name)))
  545. return err;
  546. pci_set_master(pdev);
  547. netdev = alloc_etherdev(sizeof(struct e1000_adapter));
  548. if(!netdev) {
  549. err = -ENOMEM;
  550. goto err_alloc_etherdev;
  551. }
  552. SET_MODULE_OWNER(netdev);
  553. SET_NETDEV_DEV(netdev, &pdev->dev);
  554. pci_set_drvdata(pdev, netdev);
  555. adapter = netdev_priv(netdev);
  556. adapter->netdev = netdev;
  557. adapter->pdev = pdev;
  558. adapter->hw.back = adapter;
  559. adapter->msg_enable = (1 << debug) - 1;
  560. mmio_start = pci_resource_start(pdev, BAR_0);
  561. mmio_len = pci_resource_len(pdev, BAR_0);
  562. adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
  563. if(!adapter->hw.hw_addr) {
  564. err = -EIO;
  565. goto err_ioremap;
  566. }
  567. for(i = BAR_1; i <= BAR_5; i++) {
  568. if(pci_resource_len(pdev, i) == 0)
  569. continue;
  570. if(pci_resource_flags(pdev, i) & IORESOURCE_IO) {
  571. adapter->hw.io_base = pci_resource_start(pdev, i);
  572. break;
  573. }
  574. }
  575. netdev->open = &e1000_open;
  576. netdev->stop = &e1000_close;
  577. netdev->hard_start_xmit = &e1000_xmit_frame;
  578. netdev->get_stats = &e1000_get_stats;
  579. netdev->set_multicast_list = &e1000_set_multi;
  580. netdev->set_mac_address = &e1000_set_mac;
  581. netdev->change_mtu = &e1000_change_mtu;
  582. netdev->do_ioctl = &e1000_ioctl;
  583. e1000_set_ethtool_ops(netdev);
  584. netdev->tx_timeout = &e1000_tx_timeout;
  585. netdev->watchdog_timeo = 5 * HZ;
  586. #ifdef CONFIG_E1000_NAPI
  587. netdev->poll = &e1000_clean;
  588. netdev->weight = 64;
  589. #endif
  590. netdev->vlan_rx_register = e1000_vlan_rx_register;
  591. netdev->vlan_rx_add_vid = e1000_vlan_rx_add_vid;
  592. netdev->vlan_rx_kill_vid = e1000_vlan_rx_kill_vid;
  593. #ifdef CONFIG_NET_POLL_CONTROLLER
  594. netdev->poll_controller = e1000_netpoll;
  595. #endif
  596. strcpy(netdev->name, pci_name(pdev));
  597. netdev->mem_start = mmio_start;
  598. netdev->mem_end = mmio_start + mmio_len;
  599. netdev->base_addr = adapter->hw.io_base;
  600. adapter->bd_number = cards_found;
  601. /* setup the private structure */
  602. if((err = e1000_sw_init(adapter)))
  603. goto err_sw_init;
  604. if((err = e1000_check_phy_reset_block(&adapter->hw)))
  605. DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n");
  606. if(adapter->hw.mac_type >= e1000_82543) {
  607. netdev->features = NETIF_F_SG |
  608. NETIF_F_HW_CSUM |
  609. NETIF_F_HW_VLAN_TX |
  610. NETIF_F_HW_VLAN_RX |
  611. NETIF_F_HW_VLAN_FILTER;
  612. }
  613. #ifdef NETIF_F_TSO
  614. if((adapter->hw.mac_type >= e1000_82544) &&
  615. (adapter->hw.mac_type != e1000_82547))
  616. netdev->features |= NETIF_F_TSO;
  617. #ifdef NETIF_F_TSO_IPV6
  618. if(adapter->hw.mac_type > e1000_82547_rev_2)
  619. netdev->features |= NETIF_F_TSO_IPV6;
  620. #endif
  621. #endif
  622. if(pci_using_dac)
  623. netdev->features |= NETIF_F_HIGHDMA;
  624. /* hard_start_xmit is safe against parallel locking */
  625. netdev->features |= NETIF_F_LLTX;
  626. adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw);
  627. /* before reading the EEPROM, reset the controller to
  628. * put the device in a known good starting state */
  629. e1000_reset_hw(&adapter->hw);
  630. /* make sure the EEPROM is good */
  631. if(e1000_validate_eeprom_checksum(&adapter->hw) < 0) {
  632. DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
  633. err = -EIO;
  634. goto err_eeprom;
  635. }
  636. /* copy the MAC address out of the EEPROM */
  637. if(e1000_read_mac_addr(&adapter->hw))
  638. DPRINTK(PROBE, ERR, "EEPROM Read Error\n");
  639. memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
  640. memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len);
  641. if(!is_valid_ether_addr(netdev->perm_addr)) {
  642. DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
  643. err = -EIO;
  644. goto err_eeprom;
  645. }
  646. e1000_read_part_num(&adapter->hw, &(adapter->part_num));
  647. e1000_get_bus_info(&adapter->hw);
  648. init_timer(&adapter->tx_fifo_stall_timer);
  649. adapter->tx_fifo_stall_timer.function = &e1000_82547_tx_fifo_stall;
  650. adapter->tx_fifo_stall_timer.data = (unsigned long) adapter;
  651. init_timer(&adapter->watchdog_timer);
  652. adapter->watchdog_timer.function = &e1000_watchdog;
  653. adapter->watchdog_timer.data = (unsigned long) adapter;
  654. INIT_WORK(&adapter->watchdog_task,
  655. (void (*)(void *))e1000_watchdog_task, adapter);
  656. init_timer(&adapter->phy_info_timer);
  657. adapter->phy_info_timer.function = &e1000_update_phy_info;
  658. adapter->phy_info_timer.data = (unsigned long) adapter;
  659. INIT_WORK(&adapter->tx_timeout_task,
  660. (void (*)(void *))e1000_tx_timeout_task, netdev);
  661. /* we're going to reset, so assume we have no link for now */
  662. netif_carrier_off(netdev);
  663. netif_stop_queue(netdev);
  664. e1000_check_options(adapter);
  665. /* Initial Wake on LAN setting
  666. * If APM wake is enabled in the EEPROM,
  667. * enable the ACPI Magic Packet filter
  668. */
  669. switch(adapter->hw.mac_type) {
  670. case e1000_82542_rev2_0:
  671. case e1000_82542_rev2_1:
  672. case e1000_82543:
  673. break;
  674. case e1000_82544:
  675. e1000_read_eeprom(&adapter->hw,
  676. EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
  677. eeprom_apme_mask = E1000_EEPROM_82544_APM;
  678. break;
  679. case e1000_82546:
  680. case e1000_82546_rev_3:
  681. case e1000_82571:
  682. if(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1){
  683. e1000_read_eeprom(&adapter->hw,
  684. EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
  685. break;
  686. }
  687. /* Fall Through */
  688. default:
  689. e1000_read_eeprom(&adapter->hw,
  690. EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
  691. break;
  692. }
  693. if(eeprom_data & eeprom_apme_mask)
  694. adapter->wol |= E1000_WUFC_MAG;
  695. /* print bus type/speed/width info */
  696. {
  697. struct e1000_hw *hw = &adapter->hw;
  698. DPRINTK(PROBE, INFO, "(PCI%s:%s:%s) ",
  699. ((hw->bus_type == e1000_bus_type_pcix) ? "-X" :
  700. (hw->bus_type == e1000_bus_type_pci_express ? " Express":"")),
  701. ((hw->bus_speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
  702. (hw->bus_speed == e1000_bus_speed_133) ? "133MHz" :
  703. (hw->bus_speed == e1000_bus_speed_120) ? "120MHz" :
  704. (hw->bus_speed == e1000_bus_speed_100) ? "100MHz" :
  705. (hw->bus_speed == e1000_bus_speed_66) ? "66MHz" : "33MHz"),
  706. ((hw->bus_width == e1000_bus_width_64) ? "64-bit" :
  707. (hw->bus_width == e1000_bus_width_pciex_4) ? "Width x4" :
  708. (hw->bus_width == e1000_bus_width_pciex_1) ? "Width x1" :
  709. "32-bit"));
  710. }
  711. for (i = 0; i < 6; i++)
  712. printk("%2.2x%c", netdev->dev_addr[i], i == 5 ? '\n' : ':');
  713. /* reset the hardware with the new settings */
  714. e1000_reset(adapter);
  715. /* If the controller is 82573 and f/w is AMT, do not set
  716. * DRV_LOAD until the interface is up. For all other cases,
  717. * let the f/w know that the h/w is now under the control
  718. * of the driver. */
  719. if (adapter->hw.mac_type != e1000_82573 ||
  720. !e1000_check_mng_mode(&adapter->hw))
  721. e1000_get_hw_control(adapter);
  722. strcpy(netdev->name, "eth%d");
  723. if((err = register_netdev(netdev)))
  724. goto err_register;
  725. DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n");
  726. cards_found++;
  727. return 0;
  728. err_register:
  729. err_sw_init:
  730. err_eeprom:
  731. iounmap(adapter->hw.hw_addr);
  732. err_ioremap:
  733. free_netdev(netdev);
  734. err_alloc_etherdev:
  735. pci_release_regions(pdev);
  736. return err;
  737. }
  738. /**
  739. * e1000_remove - Device Removal Routine
  740. * @pdev: PCI device information struct
  741. *
  742. * e1000_remove is called by the PCI subsystem to alert the driver
  743. * that it should release a PCI device. The could be caused by a
  744. * Hot-Plug event, or because the driver is going to be removed from
  745. * memory.
  746. **/
  747. static void __devexit
  748. e1000_remove(struct pci_dev *pdev)
  749. {
  750. struct net_device *netdev = pci_get_drvdata(pdev);
  751. struct e1000_adapter *adapter = netdev_priv(netdev);
  752. uint32_t manc;
  753. #ifdef CONFIG_E1000_NAPI
  754. int i;
  755. #endif
  756. flush_scheduled_work();
  757. if(adapter->hw.mac_type >= e1000_82540 &&
  758. adapter->hw.media_type == e1000_media_type_copper) {
  759. manc = E1000_READ_REG(&adapter->hw, MANC);
  760. if(manc & E1000_MANC_SMBUS_EN) {
  761. manc |= E1000_MANC_ARP_EN;
  762. E1000_WRITE_REG(&adapter->hw, MANC, manc);
  763. }
  764. }
  765. /* Release control of h/w to f/w. If f/w is AMT enabled, this
  766. * would have already happened in close and is redundant. */
  767. e1000_release_hw_control(adapter);
  768. unregister_netdev(netdev);
  769. #ifdef CONFIG_E1000_NAPI
  770. for (i = 0; i < adapter->num_rx_queues; i++)
  771. __dev_put(&adapter->polling_netdev[i]);
  772. #endif
  773. if(!e1000_check_phy_reset_block(&adapter->hw))
  774. e1000_phy_hw_reset(&adapter->hw);
  775. kfree(adapter->tx_ring);
  776. kfree(adapter->rx_ring);
  777. #ifdef CONFIG_E1000_NAPI
  778. kfree(adapter->polling_netdev);
  779. #endif
  780. iounmap(adapter->hw.hw_addr);
  781. pci_release_regions(pdev);
  782. #ifdef CONFIG_E1000_MQ
  783. free_percpu(adapter->cpu_netdev);
  784. free_percpu(adapter->cpu_tx_ring);
  785. #endif
  786. free_netdev(netdev);
  787. pci_disable_device(pdev);
  788. }
  789. /**
  790. * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
  791. * @adapter: board private structure to initialize
  792. *
  793. * e1000_sw_init initializes the Adapter private data structure.
  794. * Fields are initialized based on PCI device information and
  795. * OS network device settings (MTU size).
  796. **/
  797. static int __devinit
  798. e1000_sw_init(struct e1000_adapter *adapter)
  799. {
  800. struct e1000_hw *hw = &adapter->hw;
  801. struct net_device *netdev = adapter->netdev;
  802. struct pci_dev *pdev = adapter->pdev;
  803. #ifdef CONFIG_E1000_NAPI
  804. int i;
  805. #endif
  806. /* PCI config space info */
  807. hw->vendor_id = pdev->vendor;
  808. hw->device_id = pdev->device;
  809. hw->subsystem_vendor_id = pdev->subsystem_vendor;
  810. hw->subsystem_id = pdev->subsystem_device;
  811. pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
  812. pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
  813. adapter->rx_buffer_len = E1000_RXBUFFER_2048;
  814. adapter->rx_ps_bsize0 = E1000_RXBUFFER_256;
  815. hw->max_frame_size = netdev->mtu +
  816. ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
  817. hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
  818. /* identify the MAC */
  819. if(e1000_set_mac_type(hw)) {
  820. DPRINTK(PROBE, ERR, "Unknown MAC Type\n");
  821. return -EIO;
  822. }
  823. /* initialize eeprom parameters */
  824. if(e1000_init_eeprom_params(hw)) {
  825. E1000_ERR("EEPROM initialization failed\n");
  826. return -EIO;
  827. }
  828. switch(hw->mac_type) {
  829. default:
  830. break;
  831. case e1000_82541:
  832. case e1000_82547:
  833. case e1000_82541_rev_2:
  834. case e1000_82547_rev_2:
  835. hw->phy_init_script = 1;
  836. break;
  837. }
  838. e1000_set_media_type(hw);
  839. hw->wait_autoneg_complete = FALSE;
  840. hw->tbi_compatibility_en = TRUE;
  841. hw->adaptive_ifs = TRUE;
  842. /* Copper options */
  843. if(hw->media_type == e1000_media_type_copper) {
  844. hw->mdix = AUTO_ALL_MODES;
  845. hw->disable_polarity_correction = FALSE;
  846. hw->master_slave = E1000_MASTER_SLAVE;
  847. }
  848. #ifdef CONFIG_E1000_MQ
  849. /* Number of supported queues */
  850. switch (hw->mac_type) {
  851. case e1000_82571:
  852. case e1000_82572:
  853. /* These controllers support 2 tx queues, but with a single
  854. * qdisc implementation, multiple tx queues aren't quite as
  855. * interesting. If we can find a logical way of mapping
  856. * flows to a queue, then perhaps we can up the num_tx_queue
  857. * count back to its default. Until then, we run the risk of
  858. * terrible performance due to SACK overload. */
  859. adapter->num_tx_queues = 1;
  860. adapter->num_rx_queues = 2;
  861. break;
  862. default:
  863. adapter->num_tx_queues = 1;
  864. adapter->num_rx_queues = 1;
  865. break;
  866. }
  867. adapter->num_rx_queues = min(adapter->num_rx_queues, num_online_cpus());
  868. adapter->num_tx_queues = min(adapter->num_tx_queues, num_online_cpus());
  869. DPRINTK(DRV, INFO, "Multiqueue Enabled: Rx Queue count = %u %s\n",
  870. adapter->num_rx_queues,
  871. ((adapter->num_rx_queues == 1)
  872. ? ((num_online_cpus() > 1)
  873. ? "(due to unsupported feature in current adapter)"
  874. : "(due to unsupported system configuration)")
  875. : ""));
  876. DPRINTK(DRV, INFO, "Multiqueue Enabled: Tx Queue count = %u\n",
  877. adapter->num_tx_queues);
  878. #else
  879. adapter->num_tx_queues = 1;
  880. adapter->num_rx_queues = 1;
  881. #endif
  882. if (e1000_alloc_queues(adapter)) {
  883. DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
  884. return -ENOMEM;
  885. }
  886. #ifdef CONFIG_E1000_NAPI
  887. for (i = 0; i < adapter->num_rx_queues; i++) {
  888. adapter->polling_netdev[i].priv = adapter;
  889. adapter->polling_netdev[i].poll = &e1000_clean;
  890. adapter->polling_netdev[i].weight = 64;
  891. dev_hold(&adapter->polling_netdev[i]);
  892. set_bit(__LINK_STATE_START, &adapter->polling_netdev[i].state);
  893. }
  894. spin_lock_init(&adapter->tx_queue_lock);
  895. #endif
  896. atomic_set(&adapter->irq_sem, 1);
  897. spin_lock_init(&adapter->stats_lock);
  898. return 0;
  899. }
  900. /**
  901. * e1000_alloc_queues - Allocate memory for all rings
  902. * @adapter: board private structure to initialize
  903. *
  904. * We allocate one ring per queue at run-time since we don't know the
  905. * number of queues at compile-time. The polling_netdev array is
  906. * intended for Multiqueue, but should work fine with a single queue.
  907. **/
  908. static int __devinit
  909. e1000_alloc_queues(struct e1000_adapter *adapter)
  910. {
  911. int size;
  912. size = sizeof(struct e1000_tx_ring) * adapter->num_tx_queues;
  913. adapter->tx_ring = kmalloc(size, GFP_KERNEL);
  914. if (!adapter->tx_ring)
  915. return -ENOMEM;
  916. memset(adapter->tx_ring, 0, size);
  917. size = sizeof(struct e1000_rx_ring) * adapter->num_rx_queues;
  918. adapter->rx_ring = kmalloc(size, GFP_KERNEL);
  919. if (!adapter->rx_ring) {
  920. kfree(adapter->tx_ring);
  921. return -ENOMEM;
  922. }
  923. memset(adapter->rx_ring, 0, size);
  924. #ifdef CONFIG_E1000_NAPI
  925. size = sizeof(struct net_device) * adapter->num_rx_queues;
  926. adapter->polling_netdev = kmalloc(size, GFP_KERNEL);
  927. if (!adapter->polling_netdev) {
  928. kfree(adapter->tx_ring);
  929. kfree(adapter->rx_ring);
  930. return -ENOMEM;
  931. }
  932. memset(adapter->polling_netdev, 0, size);
  933. #endif
  934. #ifdef CONFIG_E1000_MQ
  935. adapter->rx_sched_call_data.func = e1000_rx_schedule;
  936. adapter->rx_sched_call_data.info = adapter->netdev;
  937. adapter->cpu_netdev = alloc_percpu(struct net_device *);
  938. adapter->cpu_tx_ring = alloc_percpu(struct e1000_tx_ring *);
  939. #endif
  940. return E1000_SUCCESS;
  941. }
  942. #ifdef CONFIG_E1000_MQ
  943. static void __devinit
  944. e1000_setup_queue_mapping(struct e1000_adapter *adapter)
  945. {
  946. int i, cpu;
  947. adapter->rx_sched_call_data.func = e1000_rx_schedule;
  948. adapter->rx_sched_call_data.info = adapter->netdev;
  949. cpus_clear(adapter->rx_sched_call_data.cpumask);
  950. adapter->cpu_netdev = alloc_percpu(struct net_device *);
  951. adapter->cpu_tx_ring = alloc_percpu(struct e1000_tx_ring *);
  952. lock_cpu_hotplug();
  953. i = 0;
  954. for_each_online_cpu(cpu) {
  955. *per_cpu_ptr(adapter->cpu_tx_ring, cpu) = &adapter->tx_ring[i % adapter->num_tx_queues];
  956. /* This is incomplete because we'd like to assign separate
  957. * physical cpus to these netdev polling structures and
  958. * avoid saturating a subset of cpus.
  959. */
  960. if (i < adapter->num_rx_queues) {
  961. *per_cpu_ptr(adapter->cpu_netdev, cpu) = &adapter->polling_netdev[i];
  962. adapter->rx_ring[i].cpu = cpu;
  963. cpu_set(cpu, adapter->cpumask);
  964. } else
  965. *per_cpu_ptr(adapter->cpu_netdev, cpu) = NULL;
  966. i++;
  967. }
  968. unlock_cpu_hotplug();
  969. }
  970. #endif
  971. /**
  972. * e1000_open - Called when a network interface is made active
  973. * @netdev: network interface device structure
  974. *
  975. * Returns 0 on success, negative value on failure
  976. *
  977. * The open entry point is called when a network interface is made
  978. * active by the system (IFF_UP). At this point all resources needed
  979. * for transmit and receive operations are allocated, the interrupt
  980. * handler is registered with the OS, the watchdog timer is started,
  981. * and the stack is notified that the interface is ready.
  982. **/
  983. static int
  984. e1000_open(struct net_device *netdev)
  985. {
  986. struct e1000_adapter *adapter = netdev_priv(netdev);
  987. int err;
  988. /* allocate transmit descriptors */
  989. if ((err = e1000_setup_all_tx_resources(adapter)))
  990. goto err_setup_tx;
  991. /* allocate receive descriptors */
  992. if ((err = e1000_setup_all_rx_resources(adapter)))
  993. goto err_setup_rx;
  994. if((err = e1000_up(adapter)))
  995. goto err_up;
  996. adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
  997. if((adapter->hw.mng_cookie.status &
  998. E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
  999. e1000_update_mng_vlan(adapter);
  1000. }
  1001. /* If AMT is enabled, let the firmware know that the network
  1002. * interface is now open */
  1003. if (adapter->hw.mac_type == e1000_82573 &&
  1004. e1000_check_mng_mode(&adapter->hw))
  1005. e1000_get_hw_control(adapter);
  1006. return E1000_SUCCESS;
  1007. err_up:
  1008. e1000_free_all_rx_resources(adapter);
  1009. err_setup_rx:
  1010. e1000_free_all_tx_resources(adapter);
  1011. err_setup_tx:
  1012. e1000_reset(adapter);
  1013. return err;
  1014. }
  1015. /**
  1016. * e1000_close - Disables a network interface
  1017. * @netdev: network interface device structure
  1018. *
  1019. * Returns 0, this is not allowed to fail
  1020. *
  1021. * The close entry point is called when an interface is de-activated
  1022. * by the OS. The hardware is still under the drivers control, but
  1023. * needs to be disabled. A global MAC reset is issued to stop the
  1024. * hardware, and all transmit and receive resources are freed.
  1025. **/
  1026. static int
  1027. e1000_close(struct net_device *netdev)
  1028. {
  1029. struct e1000_adapter *adapter = netdev_priv(netdev);
  1030. e1000_down(adapter);
  1031. e1000_free_all_tx_resources(adapter);
  1032. e1000_free_all_rx_resources(adapter);
  1033. if((adapter->hw.mng_cookie.status &
  1034. E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
  1035. e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
  1036. }
  1037. /* If AMT is enabled, let the firmware know that the network
  1038. * interface is now closed */
  1039. if (adapter->hw.mac_type == e1000_82573 &&
  1040. e1000_check_mng_mode(&adapter->hw))
  1041. e1000_release_hw_control(adapter);
  1042. return 0;
  1043. }
  1044. /**
  1045. * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
  1046. * @adapter: address of board private structure
  1047. * @start: address of beginning of memory
  1048. * @len: length of memory
  1049. **/
  1050. static inline boolean_t
  1051. e1000_check_64k_bound(struct e1000_adapter *adapter,
  1052. void *start, unsigned long len)
  1053. {
  1054. unsigned long begin = (unsigned long) start;
  1055. unsigned long end = begin + len;
  1056. /* First rev 82545 and 82546 need to not allow any memory
  1057. * write location to cross 64k boundary due to errata 23 */
  1058. if (adapter->hw.mac_type == e1000_82545 ||
  1059. adapter->hw.mac_type == e1000_82546) {
  1060. return ((begin ^ (end - 1)) >> 16) != 0 ? FALSE : TRUE;
  1061. }
  1062. return TRUE;
  1063. }
  1064. /**
  1065. * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
  1066. * @adapter: board private structure
  1067. * @txdr: tx descriptor ring (for a specific queue) to setup
  1068. *
  1069. * Return 0 on success, negative on failure
  1070. **/
  1071. static int
  1072. e1000_setup_tx_resources(struct e1000_adapter *adapter,
  1073. struct e1000_tx_ring *txdr)
  1074. {
  1075. struct pci_dev *pdev = adapter->pdev;
  1076. int size;
  1077. size = sizeof(struct e1000_buffer) * txdr->count;
  1078. txdr->buffer_info = vmalloc_node(size, pcibus_to_node(pdev->bus));
  1079. if(!txdr->buffer_info) {
  1080. DPRINTK(PROBE, ERR,
  1081. "Unable to allocate memory for the transmit descriptor ring\n");
  1082. return -ENOMEM;
  1083. }
  1084. memset(txdr->buffer_info, 0, size);
  1085. /* round up to nearest 4K */
  1086. txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
  1087. E1000_ROUNDUP(txdr->size, 4096);
  1088. txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
  1089. if(!txdr->desc) {
  1090. setup_tx_desc_die:
  1091. vfree(txdr->buffer_info);
  1092. DPRINTK(PROBE, ERR,
  1093. "Unable to allocate memory for the transmit descriptor ring\n");
  1094. return -ENOMEM;
  1095. }
  1096. /* Fix for errata 23, can't cross 64kB boundary */
  1097. if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
  1098. void *olddesc = txdr->desc;
  1099. dma_addr_t olddma = txdr->dma;
  1100. DPRINTK(TX_ERR, ERR, "txdr align check failed: %u bytes "
  1101. "at %p\n", txdr->size, txdr->desc);
  1102. /* Try again, without freeing the previous */
  1103. txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
  1104. if(!txdr->desc) {
  1105. /* Failed allocation, critical failure */
  1106. pci_free_consistent(pdev, txdr->size, olddesc, olddma);
  1107. goto setup_tx_desc_die;
  1108. }
  1109. if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
  1110. /* give up */
  1111. pci_free_consistent(pdev, txdr->size, txdr->desc,
  1112. txdr->dma);
  1113. pci_free_consistent(pdev, txdr->size, olddesc, olddma);
  1114. DPRINTK(PROBE, ERR,
  1115. "Unable to allocate aligned memory "
  1116. "for the transmit descriptor ring\n");
  1117. vfree(txdr->buffer_info);
  1118. return -ENOMEM;
  1119. } else {
  1120. /* Free old allocation, new allocation was successful */
  1121. pci_free_consistent(pdev, txdr->size, olddesc, olddma);
  1122. }
  1123. }
  1124. memset(txdr->desc, 0, txdr->size);
  1125. txdr->next_to_use = 0;
  1126. txdr->next_to_clean = 0;
  1127. spin_lock_init(&txdr->tx_lock);
  1128. return 0;
  1129. }
  1130. /**
  1131. * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
  1132. * (Descriptors) for all queues
  1133. * @adapter: board private structure
  1134. *
  1135. * If this function returns with an error, then it's possible one or
  1136. * more of the rings is populated (while the rest are not). It is the
  1137. * callers duty to clean those orphaned rings.
  1138. *
  1139. * Return 0 on success, negative on failure
  1140. **/
  1141. int
  1142. e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
  1143. {
  1144. int i, err = 0;
  1145. for (i = 0; i < adapter->num_tx_queues; i++) {
  1146. err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
  1147. if (err) {
  1148. DPRINTK(PROBE, ERR,
  1149. "Allocation for Tx Queue %u failed\n", i);
  1150. break;
  1151. }
  1152. }
  1153. return err;
  1154. }
  1155. /**
  1156. * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
  1157. * @adapter: board private structure
  1158. *
  1159. * Configure the Tx unit of the MAC after a reset.
  1160. **/
  1161. static void
  1162. e1000_configure_tx(struct e1000_adapter *adapter)
  1163. {
  1164. uint64_t tdba;
  1165. struct e1000_hw *hw = &adapter->hw;
  1166. uint32_t tdlen, tctl, tipg, tarc;
  1167. uint32_t ipgr1, ipgr2;
  1168. /* Setup the HW Tx Head and Tail descriptor pointers */
  1169. switch (adapter->num_tx_queues) {
  1170. case 2:
  1171. tdba = adapter->tx_ring[1].dma;
  1172. tdlen = adapter->tx_ring[1].count *
  1173. sizeof(struct e1000_tx_desc);
  1174. E1000_WRITE_REG(hw, TDBAL1, (tdba & 0x00000000ffffffffULL));
  1175. E1000_WRITE_REG(hw, TDBAH1, (tdba >> 32));
  1176. E1000_WRITE_REG(hw, TDLEN1, tdlen);
  1177. E1000_WRITE_REG(hw, TDH1, 0);
  1178. E1000_WRITE_REG(hw, TDT1, 0);
  1179. adapter->tx_ring[1].tdh = E1000_TDH1;
  1180. adapter->tx_ring[1].tdt = E1000_TDT1;
  1181. /* Fall Through */
  1182. case 1:
  1183. default:
  1184. tdba = adapter->tx_ring[0].dma;
  1185. tdlen = adapter->tx_ring[0].count *
  1186. sizeof(struct e1000_tx_desc);
  1187. E1000_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL));
  1188. E1000_WRITE_REG(hw, TDBAH, (tdba >> 32));
  1189. E1000_WRITE_REG(hw, TDLEN, tdlen);
  1190. E1000_WRITE_REG(hw, TDH, 0);
  1191. E1000_WRITE_REG(hw, TDT, 0);
  1192. adapter->tx_ring[0].tdh = E1000_TDH;
  1193. adapter->tx_ring[0].tdt = E1000_TDT;
  1194. break;
  1195. }
  1196. /* Set the default values for the Tx Inter Packet Gap timer */
  1197. if (hw->media_type == e1000_media_type_fiber ||
  1198. hw->media_type == e1000_media_type_internal_serdes)
  1199. tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
  1200. else
  1201. tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
  1202. switch (hw->mac_type) {
  1203. case e1000_82542_rev2_0:
  1204. case e1000_82542_rev2_1:
  1205. tipg = DEFAULT_82542_TIPG_IPGT;
  1206. ipgr1 = DEFAULT_82542_TIPG_IPGR1;
  1207. ipgr2 = DEFAULT_82542_TIPG_IPGR2;
  1208. break;
  1209. default:
  1210. ipgr1 = DEFAULT_82543_TIPG_IPGR1;
  1211. ipgr2 = DEFAULT_82543_TIPG_IPGR2;
  1212. break;
  1213. }
  1214. tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
  1215. tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
  1216. E1000_WRITE_REG(hw, TIPG, tipg);
  1217. /* Set the Tx Interrupt Delay register */
  1218. E1000_WRITE_REG(hw, TIDV, adapter->tx_int_delay);
  1219. if (hw->mac_type >= e1000_82540)
  1220. E1000_WRITE_REG(hw, TADV, adapter->tx_abs_int_delay);
  1221. /* Program the Transmit Control Register */
  1222. tctl = E1000_READ_REG(hw, TCTL);
  1223. tctl &= ~E1000_TCTL_CT;
  1224. tctl |= E1000_TCTL_EN | E1000_TCTL_PSP | E1000_TCTL_RTLC |
  1225. (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
  1226. E1000_WRITE_REG(hw, TCTL, tctl);
  1227. if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572) {
  1228. tarc = E1000_READ_REG(hw, TARC0);
  1229. tarc |= ((1 << 25) | (1 << 21));
  1230. E1000_WRITE_REG(hw, TARC0, tarc);
  1231. tarc = E1000_READ_REG(hw, TARC1);
  1232. tarc |= (1 << 25);
  1233. if (tctl & E1000_TCTL_MULR)
  1234. tarc &= ~(1 << 28);
  1235. else
  1236. tarc |= (1 << 28);
  1237. E1000_WRITE_REG(hw, TARC1, tarc);
  1238. }
  1239. e1000_config_collision_dist(hw);
  1240. /* Setup Transmit Descriptor Settings for eop descriptor */
  1241. adapter->txd_cmd = E1000_TXD_CMD_IDE | E1000_TXD_CMD_EOP |
  1242. E1000_TXD_CMD_IFCS;
  1243. if (hw->mac_type < e1000_82543)
  1244. adapter->txd_cmd |= E1000_TXD_CMD_RPS;
  1245. else
  1246. adapter->txd_cmd |= E1000_TXD_CMD_RS;
  1247. /* Cache if we're 82544 running in PCI-X because we'll
  1248. * need this to apply a workaround later in the send path. */
  1249. if (hw->mac_type == e1000_82544 &&
  1250. hw->bus_type == e1000_bus_type_pcix)
  1251. adapter->pcix_82544 = 1;
  1252. }
  1253. /**
  1254. * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
  1255. * @adapter: board private structure
  1256. * @rxdr: rx descriptor ring (for a specific queue) to setup
  1257. *
  1258. * Returns 0 on success, negative on failure
  1259. **/
  1260. static int
  1261. e1000_setup_rx_resources(struct e1000_adapter *adapter,
  1262. struct e1000_rx_ring *rxdr)
  1263. {
  1264. struct pci_dev *pdev = adapter->pdev;
  1265. int size, desc_len;
  1266. size = sizeof(struct e1000_buffer) * rxdr->count;
  1267. rxdr->buffer_info = vmalloc_node(size, pcibus_to_node(pdev->bus));
  1268. if (!rxdr->buffer_info) {
  1269. DPRINTK(PROBE, ERR,
  1270. "Unable to allocate memory for the receive descriptor ring\n");
  1271. return -ENOMEM;
  1272. }
  1273. memset(rxdr->buffer_info, 0, size);
  1274. size = sizeof(struct e1000_ps_page) * rxdr->count;
  1275. rxdr->ps_page = kmalloc(size, GFP_KERNEL);
  1276. if(!rxdr->ps_page) {
  1277. vfree(rxdr->buffer_info);
  1278. DPRINTK(PROBE, ERR,
  1279. "Unable to allocate memory for the receive descriptor ring\n");
  1280. return -ENOMEM;
  1281. }
  1282. memset(rxdr->ps_page, 0, size);
  1283. size = sizeof(struct e1000_ps_page_dma) * rxdr->count;
  1284. rxdr->ps_page_dma = kmalloc(size, GFP_KERNEL);
  1285. if(!rxdr->ps_page_dma) {
  1286. vfree(rxdr->buffer_info);
  1287. kfree(rxdr->ps_page);
  1288. DPRINTK(PROBE, ERR,
  1289. "Unable to allocate memory for the receive descriptor ring\n");
  1290. return -ENOMEM;
  1291. }
  1292. memset(rxdr->ps_page_dma, 0, size);
  1293. if(adapter->hw.mac_type <= e1000_82547_rev_2)
  1294. desc_len = sizeof(struct e1000_rx_desc);
  1295. else
  1296. desc_len = sizeof(union e1000_rx_desc_packet_split);
  1297. /* Round up to nearest 4K */
  1298. rxdr->size = rxdr->count * desc_len;
  1299. E1000_ROUNDUP(rxdr->size, 4096);
  1300. rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
  1301. if (!rxdr->desc) {
  1302. DPRINTK(PROBE, ERR,
  1303. "Unable to allocate memory for the receive descriptor ring\n");
  1304. setup_rx_desc_die:
  1305. vfree(rxdr->buffer_info);
  1306. kfree(rxdr->ps_page);
  1307. kfree(rxdr->ps_page_dma);
  1308. return -ENOMEM;
  1309. }
  1310. /* Fix for errata 23, can't cross 64kB boundary */
  1311. if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
  1312. void *olddesc = rxdr->desc;
  1313. dma_addr_t olddma = rxdr->dma;
  1314. DPRINTK(RX_ERR, ERR, "rxdr align check failed: %u bytes "
  1315. "at %p\n", rxdr->size, rxdr->desc);
  1316. /* Try again, without freeing the previous */
  1317. rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
  1318. /* Failed allocation, critical failure */
  1319. if (!rxdr->desc) {
  1320. pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
  1321. DPRINTK(PROBE, ERR,
  1322. "Unable to allocate memory "
  1323. "for the receive descriptor ring\n");
  1324. goto setup_rx_desc_die;
  1325. }
  1326. if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
  1327. /* give up */
  1328. pci_free_consistent(pdev, rxdr->size, rxdr->desc,
  1329. rxdr->dma);
  1330. pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
  1331. DPRINTK(PROBE, ERR,
  1332. "Unable to allocate aligned memory "
  1333. "for the receive descriptor ring\n");
  1334. goto setup_rx_desc_die;
  1335. } else {
  1336. /* Free old allocation, new allocation was successful */
  1337. pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
  1338. }
  1339. }
  1340. memset(rxdr->desc, 0, rxdr->size);
  1341. rxdr->next_to_clean = 0;
  1342. rxdr->next_to_use = 0;
  1343. rxdr->rx_skb_top = NULL;
  1344. rxdr->rx_skb_prev = NULL;
  1345. return 0;
  1346. }
  1347. /**
  1348. * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
  1349. * (Descriptors) for all queues
  1350. * @adapter: board private structure
  1351. *
  1352. * If this function returns with an error, then it's possible one or
  1353. * more of the rings is populated (while the rest are not). It is the
  1354. * callers duty to clean those orphaned rings.
  1355. *
  1356. * Return 0 on success, negative on failure
  1357. **/
  1358. int
  1359. e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
  1360. {
  1361. int i, err = 0;
  1362. for (i = 0; i < adapter->num_rx_queues; i++) {
  1363. err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
  1364. if (err) {
  1365. DPRINTK(PROBE, ERR,
  1366. "Allocation for Rx Queue %u failed\n", i);
  1367. break;
  1368. }
  1369. }
  1370. return err;
  1371. }
  1372. /**
  1373. * e1000_setup_rctl - configure the receive control registers
  1374. * @adapter: Board private structure
  1375. **/
  1376. #define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
  1377. (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
  1378. static void
  1379. e1000_setup_rctl(struct e1000_adapter *adapter)
  1380. {
  1381. uint32_t rctl, rfctl;
  1382. uint32_t psrctl = 0;
  1383. #ifdef CONFIG_E1000_PACKET_SPLIT
  1384. uint32_t pages = 0;
  1385. #endif
  1386. rctl = E1000_READ_REG(&adapter->hw, RCTL);
  1387. rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
  1388. rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
  1389. E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
  1390. (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
  1391. if (adapter->hw.mac_type > e1000_82543)
  1392. rctl |= E1000_RCTL_SECRC;
  1393. if (adapter->hw.tbi_compatibility_on == 1)
  1394. rctl |= E1000_RCTL_SBP;
  1395. else
  1396. rctl &= ~E1000_RCTL_SBP;
  1397. if (adapter->netdev->mtu <= ETH_DATA_LEN)
  1398. rctl &= ~E1000_RCTL_LPE;
  1399. else
  1400. rctl |= E1000_RCTL_LPE;
  1401. /* Setup buffer sizes */
  1402. if(adapter->hw.mac_type >= e1000_82571) {
  1403. /* We can now specify buffers in 1K increments.
  1404. * BSIZE and BSEX are ignored in this case. */
  1405. rctl |= adapter->rx_buffer_len << 0x11;
  1406. } else {
  1407. rctl &= ~E1000_RCTL_SZ_4096;
  1408. rctl |= E1000_RCTL_BSEX;
  1409. switch (adapter->rx_buffer_len) {
  1410. case E1000_RXBUFFER_2048:
  1411. default:
  1412. rctl |= E1000_RCTL_SZ_2048;
  1413. rctl &= ~E1000_RCTL_BSEX;
  1414. break;
  1415. case E1000_RXBUFFER_4096:
  1416. rctl |= E1000_RCTL_SZ_4096;
  1417. break;
  1418. case E1000_RXBUFFER_8192:
  1419. rctl |= E1000_RCTL_SZ_8192;
  1420. break;
  1421. case E1000_RXBUFFER_16384:
  1422. rctl |= E1000_RCTL_SZ_16384;
  1423. break;
  1424. }
  1425. }
  1426. #ifdef CONFIG_E1000_PACKET_SPLIT
  1427. /* 82571 and greater support packet-split where the protocol
  1428. * header is placed in skb->data and the packet data is
  1429. * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
  1430. * In the case of a non-split, skb->data is linearly filled,
  1431. * followed by the page buffers. Therefore, skb->data is
  1432. * sized to hold the largest protocol header.
  1433. */
  1434. pages = PAGE_USE_COUNT(adapter->netdev->mtu);
  1435. if ((adapter->hw.mac_type > e1000_82547_rev_2) && (pages <= 3) &&
  1436. PAGE_SIZE <= 16384)
  1437. adapter->rx_ps_pages = pages;
  1438. else
  1439. adapter->rx_ps_pages = 0;
  1440. #endif
  1441. if (adapter->rx_ps_pages) {
  1442. /* Configure extra packet-split registers */
  1443. rfctl = E1000_READ_REG(&adapter->hw, RFCTL);
  1444. rfctl |= E1000_RFCTL_EXTEN;
  1445. /* disable IPv6 packet split support */
  1446. rfctl |= E1000_RFCTL_IPV6_DIS;
  1447. E1000_WRITE_REG(&adapter->hw, RFCTL, rfctl);
  1448. rctl |= E1000_RCTL_DTYP_PS | E1000_RCTL_SECRC;
  1449. psrctl |= adapter->rx_ps_bsize0 >>
  1450. E1000_PSRCTL_BSIZE0_SHIFT;
  1451. switch (adapter->rx_ps_pages) {
  1452. case 3:
  1453. psrctl |= PAGE_SIZE <<
  1454. E1000_PSRCTL_BSIZE3_SHIFT;
  1455. case 2:
  1456. psrctl |= PAGE_SIZE <<
  1457. E1000_PSRCTL_BSIZE2_SHIFT;
  1458. case 1:
  1459. psrctl |= PAGE_SIZE >>
  1460. E1000_PSRCTL_BSIZE1_SHIFT;
  1461. break;
  1462. }
  1463. E1000_WRITE_REG(&adapter->hw, PSRCTL, psrctl);
  1464. }
  1465. E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
  1466. }
  1467. /**
  1468. * e1000_configure_rx - Configure 8254x Receive Unit after Reset
  1469. * @adapter: board private structure
  1470. *
  1471. * Configure the Rx unit of the MAC after a reset.
  1472. **/
  1473. static void
  1474. e1000_configure_rx(struct e1000_adapter *adapter)
  1475. {
  1476. uint64_t rdba;
  1477. struct e1000_hw *hw = &adapter->hw;
  1478. uint32_t rdlen, rctl, rxcsum, ctrl_ext;
  1479. #ifdef CONFIG_E1000_MQ
  1480. uint32_t reta, mrqc;
  1481. int i;
  1482. #endif
  1483. if (adapter->rx_ps_pages) {
  1484. rdlen = adapter->rx_ring[0].count *
  1485. sizeof(union e1000_rx_desc_packet_split);
  1486. adapter->clean_rx = e1000_clean_rx_irq_ps;
  1487. adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
  1488. } else {
  1489. rdlen = adapter->rx_ring[0].count *
  1490. sizeof(struct e1000_rx_desc);
  1491. adapter->clean_rx = e1000_clean_rx_irq;
  1492. adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
  1493. }
  1494. /* disable receives while setting up the descriptors */
  1495. rctl = E1000_READ_REG(hw, RCTL);
  1496. E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN);
  1497. /* set the Receive Delay Timer Register */
  1498. E1000_WRITE_REG(hw, RDTR, adapter->rx_int_delay);
  1499. if (hw->mac_type >= e1000_82540) {
  1500. E1000_WRITE_REG(hw, RADV, adapter->rx_abs_int_delay);
  1501. if(adapter->itr > 1)
  1502. E1000_WRITE_REG(hw, ITR,
  1503. 1000000000 / (adapter->itr * 256));
  1504. }
  1505. if (hw->mac_type >= e1000_82571) {
  1506. ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
  1507. /* Reset delay timers after every interrupt */
  1508. ctrl_ext |= E1000_CTRL_EXT_CANC;
  1509. #ifdef CONFIG_E1000_NAPI
  1510. /* Auto-Mask interrupts upon ICR read. */
  1511. ctrl_ext |= E1000_CTRL_EXT_IAME;
  1512. #endif
  1513. E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
  1514. E1000_WRITE_REG(hw, IAM, ~0);
  1515. E1000_WRITE_FLUSH(hw);
  1516. }
  1517. /* Setup the HW Rx Head and Tail Descriptor Pointers and
  1518. * the Base and Length of the Rx Descriptor Ring */
  1519. switch (adapter->num_rx_queues) {
  1520. #ifdef CONFIG_E1000_MQ
  1521. case 2:
  1522. rdba = adapter->rx_ring[1].dma;
  1523. E1000_WRITE_REG(hw, RDBAL1, (rdba & 0x00000000ffffffffULL));
  1524. E1000_WRITE_REG(hw, RDBAH1, (rdba >> 32));
  1525. E1000_WRITE_REG(hw, RDLEN1, rdlen);
  1526. E1000_WRITE_REG(hw, RDH1, 0);
  1527. E1000_WRITE_REG(hw, RDT1, 0);
  1528. adapter->rx_ring[1].rdh = E1000_RDH1;
  1529. adapter->rx_ring[1].rdt = E1000_RDT1;
  1530. /* Fall Through */
  1531. #endif
  1532. case 1:
  1533. default:
  1534. rdba = adapter->rx_ring[0].dma;
  1535. E1000_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL));
  1536. E1000_WRITE_REG(hw, RDBAH, (rdba >> 32));
  1537. E1000_WRITE_REG(hw, RDLEN, rdlen);
  1538. E1000_WRITE_REG(hw, RDH, 0);
  1539. E1000_WRITE_REG(hw, RDT, 0);
  1540. adapter->rx_ring[0].rdh = E1000_RDH;
  1541. adapter->rx_ring[0].rdt = E1000_RDT;
  1542. break;
  1543. }
  1544. #ifdef CONFIG_E1000_MQ
  1545. if (adapter->num_rx_queues > 1) {
  1546. uint32_t random[10];
  1547. get_random_bytes(&random[0], 40);
  1548. if (hw->mac_type <= e1000_82572) {
  1549. E1000_WRITE_REG(hw, RSSIR, 0);
  1550. E1000_WRITE_REG(hw, RSSIM, 0);
  1551. }
  1552. switch (adapter->num_rx_queues) {
  1553. case 2:
  1554. default:
  1555. reta = 0x00800080;
  1556. mrqc = E1000_MRQC_ENABLE_RSS_2Q;
  1557. break;
  1558. }
  1559. /* Fill out redirection table */
  1560. for (i = 0; i < 32; i++)
  1561. E1000_WRITE_REG_ARRAY(hw, RETA, i, reta);
  1562. /* Fill out hash function seeds */
  1563. for (i = 0; i < 10; i++)
  1564. E1000_WRITE_REG_ARRAY(hw, RSSRK, i, random[i]);
  1565. mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
  1566. E1000_MRQC_RSS_FIELD_IPV4_TCP);
  1567. E1000_WRITE_REG(hw, MRQC, mrqc);
  1568. }
  1569. /* Multiqueue and packet checksumming are mutually exclusive. */
  1570. if (hw->mac_type >= e1000_82571) {
  1571. rxcsum = E1000_READ_REG(hw, RXCSUM);
  1572. rxcsum |= E1000_RXCSUM_PCSD;
  1573. E1000_WRITE_REG(hw, RXCSUM, rxcsum);
  1574. }
  1575. #else
  1576. /* Enable 82543 Receive Checksum Offload for TCP and UDP */
  1577. if (hw->mac_type >= e1000_82543) {
  1578. rxcsum = E1000_READ_REG(hw, RXCSUM);
  1579. if(adapter->rx_csum == TRUE) {
  1580. rxcsum |= E1000_RXCSUM_TUOFL;
  1581. /* Enable 82571 IPv4 payload checksum for UDP fragments
  1582. * Must be used in conjunction with packet-split. */
  1583. if ((hw->mac_type >= e1000_82571) &&
  1584. (adapter->rx_ps_pages)) {
  1585. rxcsum |= E1000_RXCSUM_IPPCSE;
  1586. }
  1587. } else {
  1588. rxcsum &= ~E1000_RXCSUM_TUOFL;
  1589. /* don't need to clear IPPCSE as it defaults to 0 */
  1590. }
  1591. E1000_WRITE_REG(hw, RXCSUM, rxcsum);
  1592. }
  1593. #endif /* CONFIG_E1000_MQ */
  1594. if (hw->mac_type == e1000_82573)
  1595. E1000_WRITE_REG(hw, ERT, 0x0100);
  1596. /* Enable Receives */
  1597. E1000_WRITE_REG(hw, RCTL, rctl);
  1598. }
  1599. /**
  1600. * e1000_free_tx_resources - Free Tx Resources per Queue
  1601. * @adapter: board private structure
  1602. * @tx_ring: Tx descriptor ring for a specific queue
  1603. *
  1604. * Free all transmit software resources
  1605. **/
  1606. static void
  1607. e1000_free_tx_resources(struct e1000_adapter *adapter,
  1608. struct e1000_tx_ring *tx_ring)
  1609. {
  1610. struct pci_dev *pdev = adapter->pdev;
  1611. e1000_clean_tx_ring(adapter, tx_ring);
  1612. vfree(tx_ring->buffer_info);
  1613. tx_ring->buffer_info = NULL;
  1614. pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
  1615. tx_ring->desc = NULL;
  1616. }
  1617. /**
  1618. * e1000_free_all_tx_resources - Free Tx Resources for All Queues
  1619. * @adapter: board private structure
  1620. *
  1621. * Free all transmit software resources
  1622. **/
  1623. void
  1624. e1000_free_all_tx_resources(struct e1000_adapter *adapter)
  1625. {
  1626. int i;
  1627. for (i = 0; i < adapter->num_tx_queues; i++)
  1628. e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
  1629. }
  1630. static inline void
  1631. e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
  1632. struct e1000_buffer *buffer_info)
  1633. {
  1634. if(buffer_info->dma) {
  1635. pci_unmap_page(adapter->pdev,
  1636. buffer_info->dma,
  1637. buffer_info->length,
  1638. PCI_DMA_TODEVICE);
  1639. }
  1640. if (buffer_info->skb)
  1641. dev_kfree_skb_any(buffer_info->skb);
  1642. memset(buffer_info, 0, sizeof(struct e1000_buffer));
  1643. }
  1644. /**
  1645. * e1000_clean_tx_ring - Free Tx Buffers
  1646. * @adapter: board private structure
  1647. * @tx_ring: ring to be cleaned
  1648. **/
  1649. static void
  1650. e1000_clean_tx_ring(struct e1000_adapter *adapter,
  1651. struct e1000_tx_ring *tx_ring)
  1652. {
  1653. struct e1000_buffer *buffer_info;
  1654. unsigned long size;
  1655. unsigned int i;
  1656. /* Free all the Tx ring sk_buffs */
  1657. for(i = 0; i < tx_ring->count; i++) {
  1658. buffer_info = &tx_ring->buffer_info[i];
  1659. e1000_unmap_and_free_tx_resource(adapter, buffer_info);
  1660. }
  1661. size = sizeof(struct e1000_buffer) * tx_ring->count;
  1662. memset(tx_ring->buffer_info, 0, size);
  1663. /* Zero out the descriptor ring */
  1664. memset(tx_ring->desc, 0, tx_ring->size);
  1665. tx_ring->next_to_use = 0;
  1666. tx_ring->next_to_clean = 0;
  1667. tx_ring->last_tx_tso = 0;
  1668. writel(0, adapter->hw.hw_addr + tx_ring->tdh);
  1669. writel(0, adapter->hw.hw_addr + tx_ring->tdt);
  1670. }
  1671. /**
  1672. * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
  1673. * @adapter: board private structure
  1674. **/
  1675. static void
  1676. e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
  1677. {
  1678. int i;
  1679. for (i = 0; i < adapter->num_tx_queues; i++)
  1680. e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
  1681. }
  1682. /**
  1683. * e1000_free_rx_resources - Free Rx Resources
  1684. * @adapter: board private structure
  1685. * @rx_ring: ring to clean the resources from
  1686. *
  1687. * Free all receive software resources
  1688. **/
  1689. static void
  1690. e1000_free_rx_resources(struct e1000_adapter *adapter,
  1691. struct e1000_rx_ring *rx_ring)
  1692. {
  1693. struct pci_dev *pdev = adapter->pdev;
  1694. e1000_clean_rx_ring(adapter, rx_ring);
  1695. vfree(rx_ring->buffer_info);
  1696. rx_ring->buffer_info = NULL;
  1697. kfree(rx_ring->ps_page);
  1698. rx_ring->ps_page = NULL;
  1699. kfree(rx_ring->ps_page_dma);
  1700. rx_ring->ps_page_dma = NULL;
  1701. pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
  1702. rx_ring->desc = NULL;
  1703. }
  1704. /**
  1705. * e1000_free_all_rx_resources - Free Rx Resources for All Queues
  1706. * @adapter: board private structure
  1707. *
  1708. * Free all receive software resources
  1709. **/
  1710. void
  1711. e1000_free_all_rx_resources(struct e1000_adapter *adapter)
  1712. {
  1713. int i;
  1714. for (i = 0; i < adapter->num_rx_queues; i++)
  1715. e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
  1716. }
  1717. /**
  1718. * e1000_clean_rx_ring - Free Rx Buffers per Queue
  1719. * @adapter: board private structure
  1720. * @rx_ring: ring to free buffers from
  1721. **/
  1722. static void
  1723. e1000_clean_rx_ring(struct e1000_adapter *adapter,
  1724. struct e1000_rx_ring *rx_ring)
  1725. {
  1726. struct e1000_buffer *buffer_info;
  1727. struct e1000_ps_page *ps_page;
  1728. struct e1000_ps_page_dma *ps_page_dma;
  1729. struct pci_dev *pdev = adapter->pdev;
  1730. unsigned long size;
  1731. unsigned int i, j;
  1732. /* Free all the Rx ring sk_buffs */
  1733. for(i = 0; i < rx_ring->count; i++) {
  1734. buffer_info = &rx_ring->buffer_info[i];
  1735. if(buffer_info->skb) {
  1736. pci_unmap_single(pdev,
  1737. buffer_info->dma,
  1738. buffer_info->length,
  1739. PCI_DMA_FROMDEVICE);
  1740. dev_kfree_skb(buffer_info->skb);
  1741. buffer_info->skb = NULL;
  1742. }
  1743. ps_page = &rx_ring->ps_page[i];
  1744. ps_page_dma = &rx_ring->ps_page_dma[i];
  1745. for (j = 0; j < adapter->rx_ps_pages; j++) {
  1746. if (!ps_page->ps_page[j]) break;
  1747. pci_unmap_page(pdev,
  1748. ps_page_dma->ps_page_dma[j],
  1749. PAGE_SIZE, PCI_DMA_FROMDEVICE);
  1750. ps_page_dma->ps_page_dma[j] = 0;
  1751. put_page(ps_page->ps_page[j]);
  1752. ps_page->ps_page[j] = NULL;
  1753. }
  1754. }
  1755. /* there also may be some cached data in our adapter */
  1756. if (rx_ring->rx_skb_top) {
  1757. dev_kfree_skb(rx_ring->rx_skb_top);
  1758. /* rx_skb_prev will be wiped out by rx_skb_top */
  1759. rx_ring->rx_skb_top = NULL;
  1760. rx_ring->rx_skb_prev = NULL;
  1761. }
  1762. size = sizeof(struct e1000_buffer) * rx_ring->count;
  1763. memset(rx_ring->buffer_info, 0, size);
  1764. size = sizeof(struct e1000_ps_page) * rx_ring->count;
  1765. memset(rx_ring->ps_page, 0, size);
  1766. size = sizeof(struct e1000_ps_page_dma) * rx_ring->count;
  1767. memset(rx_ring->ps_page_dma, 0, size);
  1768. /* Zero out the descriptor ring */
  1769. memset(rx_ring->desc, 0, rx_ring->size);
  1770. rx_ring->next_to_clean = 0;
  1771. rx_ring->next_to_use = 0;
  1772. writel(0, adapter->hw.hw_addr + rx_ring->rdh);
  1773. writel(0, adapter->hw.hw_addr + rx_ring->rdt);
  1774. }
  1775. /**
  1776. * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
  1777. * @adapter: board private structure
  1778. **/
  1779. static void
  1780. e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
  1781. {
  1782. int i;
  1783. for (i = 0; i < adapter->num_rx_queues; i++)
  1784. e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
  1785. }
  1786. /* The 82542 2.0 (revision 2) needs to have the receive unit in reset
  1787. * and memory write and invalidate disabled for certain operations
  1788. */
  1789. static void
  1790. e1000_enter_82542_rst(struct e1000_adapter *adapter)
  1791. {
  1792. struct net_device *netdev = adapter->netdev;
  1793. uint32_t rctl;
  1794. e1000_pci_clear_mwi(&adapter->hw);
  1795. rctl = E1000_READ_REG(&adapter->hw, RCTL);
  1796. rctl |= E1000_RCTL_RST;
  1797. E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
  1798. E1000_WRITE_FLUSH(&adapter->hw);
  1799. mdelay(5);
  1800. if(netif_running(netdev))
  1801. e1000_clean_all_rx_rings(adapter);
  1802. }
  1803. static void
  1804. e1000_leave_82542_rst(struct e1000_adapter *adapter)
  1805. {
  1806. struct net_device *netdev = adapter->netdev;
  1807. uint32_t rctl;
  1808. rctl = E1000_READ_REG(&adapter->hw, RCTL);
  1809. rctl &= ~E1000_RCTL_RST;
  1810. E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
  1811. E1000_WRITE_FLUSH(&adapter->hw);
  1812. mdelay(5);
  1813. if(adapter->hw.pci_cmd_word & PCI_COMMAND_INVALIDATE)
  1814. e1000_pci_set_mwi(&adapter->hw);
  1815. if(netif_running(netdev)) {
  1816. e1000_configure_rx(adapter);
  1817. /* No need to loop, because 82542 supports only 1 queue */
  1818. struct e1000_rx_ring *ring = &adapter->rx_ring[0];
  1819. adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
  1820. }
  1821. }
  1822. /**
  1823. * e1000_set_mac - Change the Ethernet Address of the NIC
  1824. * @netdev: network interface device structure
  1825. * @p: pointer to an address structure
  1826. *
  1827. * Returns 0 on success, negative on failure
  1828. **/
  1829. static int
  1830. e1000_set_mac(struct net_device *netdev, void *p)
  1831. {
  1832. struct e1000_adapter *adapter = netdev_priv(netdev);
  1833. struct sockaddr *addr = p;
  1834. if(!is_valid_ether_addr(addr->sa_data))
  1835. return -EADDRNOTAVAIL;
  1836. /* 82542 2.0 needs to be in reset to write receive address registers */
  1837. if(adapter->hw.mac_type == e1000_82542_rev2_0)
  1838. e1000_enter_82542_rst(adapter);
  1839. memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
  1840. memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
  1841. e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0);
  1842. /* With 82571 controllers, LAA may be overwritten (with the default)
  1843. * due to controller reset from the other port. */
  1844. if (adapter->hw.mac_type == e1000_82571) {
  1845. /* activate the work around */
  1846. adapter->hw.laa_is_present = 1;
  1847. /* Hold a copy of the LAA in RAR[14] This is done so that
  1848. * between the time RAR[0] gets clobbered and the time it
  1849. * gets fixed (in e1000_watchdog), the actual LAA is in one
  1850. * of the RARs and no incoming packets directed to this port
  1851. * are dropped. Eventaully the LAA will be in RAR[0] and
  1852. * RAR[14] */
  1853. e1000_rar_set(&adapter->hw, adapter->hw.mac_addr,
  1854. E1000_RAR_ENTRIES - 1);
  1855. }
  1856. if(adapter->hw.mac_type == e1000_82542_rev2_0)
  1857. e1000_leave_82542_rst(adapter);
  1858. return 0;
  1859. }
  1860. /**
  1861. * e1000_set_multi - Multicast and Promiscuous mode set
  1862. * @netdev: network interface device structure
  1863. *
  1864. * The set_multi entry point is called whenever the multicast address
  1865. * list or the network interface flags are updated. This routine is
  1866. * responsible for configuring the hardware for proper multicast,
  1867. * promiscuous mode, and all-multi behavior.
  1868. **/
  1869. static void
  1870. e1000_set_multi(struct net_device *netdev)
  1871. {
  1872. struct e1000_adapter *adapter = netdev_priv(netdev);
  1873. struct e1000_hw *hw = &adapter->hw;
  1874. struct dev_mc_list *mc_ptr;
  1875. uint32_t rctl;
  1876. uint32_t hash_value;
  1877. int i, rar_entries = E1000_RAR_ENTRIES;
  1878. /* reserve RAR[14] for LAA over-write work-around */
  1879. if (adapter->hw.mac_type == e1000_82571)
  1880. rar_entries--;
  1881. /* Check for Promiscuous and All Multicast modes */
  1882. rctl = E1000_READ_REG(hw, RCTL);
  1883. if(netdev->flags & IFF_PROMISC) {
  1884. rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
  1885. } else if(netdev->flags & IFF_ALLMULTI) {
  1886. rctl |= E1000_RCTL_MPE;
  1887. rctl &= ~E1000_RCTL_UPE;
  1888. } else {
  1889. rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
  1890. }
  1891. E1000_WRITE_REG(hw, RCTL, rctl);
  1892. /* 82542 2.0 needs to be in reset to write receive address registers */
  1893. if(hw->mac_type == e1000_82542_rev2_0)
  1894. e1000_enter_82542_rst(adapter);
  1895. /* load the first 14 multicast address into the exact filters 1-14
  1896. * RAR 0 is used for the station MAC adddress
  1897. * if there are not 14 addresses, go ahead and clear the filters
  1898. * -- with 82571 controllers only 0-13 entries are filled here
  1899. */
  1900. mc_ptr = netdev->mc_list;
  1901. for(i = 1; i < rar_entries; i++) {
  1902. if (mc_ptr) {
  1903. e1000_rar_set(hw, mc_ptr->dmi_addr, i);
  1904. mc_ptr = mc_ptr->next;
  1905. } else {
  1906. E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
  1907. E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
  1908. }
  1909. }
  1910. /* clear the old settings from the multicast hash table */
  1911. for(i = 0; i < E1000_NUM_MTA_REGISTERS; i++)
  1912. E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
  1913. /* load any remaining addresses into the hash table */
  1914. for(; mc_ptr; mc_ptr = mc_ptr->next) {
  1915. hash_value = e1000_hash_mc_addr(hw, mc_ptr->dmi_addr);
  1916. e1000_mta_set(hw, hash_value);
  1917. }
  1918. if(hw->mac_type == e1000_82542_rev2_0)
  1919. e1000_leave_82542_rst(adapter);
  1920. }
  1921. /* Need to wait a few seconds after link up to get diagnostic information from
  1922. * the phy */
  1923. static void
  1924. e1000_update_phy_info(unsigned long data)
  1925. {
  1926. struct e1000_adapter *adapter = (struct e1000_adapter *) data;
  1927. e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
  1928. }
  1929. /**
  1930. * e1000_82547_tx_fifo_stall - Timer Call-back
  1931. * @data: pointer to adapter cast into an unsigned long
  1932. **/
  1933. static void
  1934. e1000_82547_tx_fifo_stall(unsigned long data)
  1935. {
  1936. struct e1000_adapter *adapter = (struct e1000_adapter *) data;
  1937. struct net_device *netdev = adapter->netdev;
  1938. uint32_t tctl;
  1939. if(atomic_read(&adapter->tx_fifo_stall)) {
  1940. if((E1000_READ_REG(&adapter->hw, TDT) ==
  1941. E1000_READ_REG(&adapter->hw, TDH)) &&
  1942. (E1000_READ_REG(&adapter->hw, TDFT) ==
  1943. E1000_READ_REG(&adapter->hw, TDFH)) &&
  1944. (E1000_READ_REG(&adapter->hw, TDFTS) ==
  1945. E1000_READ_REG(&adapter->hw, TDFHS))) {
  1946. tctl = E1000_READ_REG(&adapter->hw, TCTL);
  1947. E1000_WRITE_REG(&adapter->hw, TCTL,
  1948. tctl & ~E1000_TCTL_EN);
  1949. E1000_WRITE_REG(&adapter->hw, TDFT,
  1950. adapter->tx_head_addr);
  1951. E1000_WRITE_REG(&adapter->hw, TDFH,
  1952. adapter->tx_head_addr);
  1953. E1000_WRITE_REG(&adapter->hw, TDFTS,
  1954. adapter->tx_head_addr);
  1955. E1000_WRITE_REG(&adapter->hw, TDFHS,
  1956. adapter->tx_head_addr);
  1957. E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
  1958. E1000_WRITE_FLUSH(&adapter->hw);
  1959. adapter->tx_fifo_head = 0;
  1960. atomic_set(&adapter->tx_fifo_stall, 0);
  1961. netif_wake_queue(netdev);
  1962. } else {
  1963. mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1);
  1964. }
  1965. }
  1966. }
  1967. /**
  1968. * e1000_watchdog - Timer Call-back
  1969. * @data: pointer to adapter cast into an unsigned long
  1970. **/
  1971. static void
  1972. e1000_watchdog(unsigned long data)
  1973. {
  1974. struct e1000_adapter *adapter = (struct e1000_adapter *) data;
  1975. /* Do the rest outside of interrupt context */
  1976. schedule_work(&adapter->watchdog_task);
  1977. }
  1978. static void
  1979. e1000_watchdog_task(struct e1000_adapter *adapter)
  1980. {
  1981. struct net_device *netdev = adapter->netdev;
  1982. struct e1000_tx_ring *txdr = adapter->tx_ring;
  1983. uint32_t link;
  1984. e1000_check_for_link(&adapter->hw);
  1985. if (adapter->hw.mac_type == e1000_82573) {
  1986. e1000_enable_tx_pkt_filtering(&adapter->hw);
  1987. if(adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)
  1988. e1000_update_mng_vlan(adapter);
  1989. }
  1990. if((adapter->hw.media_type == e1000_media_type_internal_serdes) &&
  1991. !(E1000_READ_REG(&adapter->hw, TXCW) & E1000_TXCW_ANE))
  1992. link = !adapter->hw.serdes_link_down;
  1993. else
  1994. link = E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU;
  1995. if(link) {
  1996. if(!netif_carrier_ok(netdev)) {
  1997. e1000_get_speed_and_duplex(&adapter->hw,
  1998. &adapter->link_speed,
  1999. &adapter->link_duplex);
  2000. DPRINTK(LINK, INFO, "NIC Link is Up %d Mbps %s\n",
  2001. adapter->link_speed,
  2002. adapter->link_duplex == FULL_DUPLEX ?
  2003. "Full Duplex" : "Half Duplex");
  2004. /* tweak tx_queue_len according to speed/duplex */
  2005. netdev->tx_queue_len = adapter->tx_queue_len;
  2006. adapter->tx_timeout_factor = 1;
  2007. if (adapter->link_duplex == HALF_DUPLEX) {
  2008. switch (adapter->link_speed) {
  2009. case SPEED_10:
  2010. netdev->tx_queue_len = 10;
  2011. adapter->tx_timeout_factor = 8;
  2012. break;
  2013. case SPEED_100:
  2014. netdev->tx_queue_len = 100;
  2015. break;
  2016. }
  2017. }
  2018. netif_carrier_on(netdev);
  2019. netif_wake_queue(netdev);
  2020. mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ);
  2021. adapter->smartspeed = 0;
  2022. }
  2023. } else {
  2024. if(netif_carrier_ok(netdev)) {
  2025. adapter->link_speed = 0;
  2026. adapter->link_duplex = 0;
  2027. DPRINTK(LINK, INFO, "NIC Link is Down\n");
  2028. netif_carrier_off(netdev);
  2029. netif_stop_queue(netdev);
  2030. mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ);
  2031. }
  2032. e1000_smartspeed(adapter);
  2033. }
  2034. e1000_update_stats(adapter);
  2035. adapter->hw.tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
  2036. adapter->tpt_old = adapter->stats.tpt;
  2037. adapter->hw.collision_delta = adapter->stats.colc - adapter->colc_old;
  2038. adapter->colc_old = adapter->stats.colc;
  2039. adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
  2040. adapter->gorcl_old = adapter->stats.gorcl;
  2041. adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
  2042. adapter->gotcl_old = adapter->stats.gotcl;
  2043. e1000_update_adaptive(&adapter->hw);
  2044. #ifdef CONFIG_E1000_MQ
  2045. txdr = *per_cpu_ptr(adapter->cpu_tx_ring, smp_processor_id());
  2046. #endif
  2047. if (!netif_carrier_ok(netdev)) {
  2048. if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
  2049. /* We've lost link, so the controller stops DMA,
  2050. * but we've got queued Tx work that's never going
  2051. * to get done, so reset controller to flush Tx.
  2052. * (Do the reset outside of interrupt context). */
  2053. schedule_work(&adapter->tx_timeout_task);
  2054. }
  2055. }
  2056. /* Dynamic mode for Interrupt Throttle Rate (ITR) */
  2057. if(adapter->hw.mac_type >= e1000_82540 && adapter->itr == 1) {
  2058. /* Symmetric Tx/Rx gets a reduced ITR=2000; Total
  2059. * asymmetrical Tx or Rx gets ITR=8000; everyone
  2060. * else is between 2000-8000. */
  2061. uint32_t goc = (adapter->gotcl + adapter->gorcl) / 10000;
  2062. uint32_t dif = (adapter->gotcl > adapter->gorcl ?
  2063. adapter->gotcl - adapter->gorcl :
  2064. adapter->gorcl - adapter->gotcl) / 10000;
  2065. uint32_t itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
  2066. E1000_WRITE_REG(&adapter->hw, ITR, 1000000000 / (itr * 256));
  2067. }
  2068. /* Cause software interrupt to ensure rx ring is cleaned */
  2069. E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_RXDMT0);
  2070. /* Force detection of hung controller every watchdog period */
  2071. adapter->detect_tx_hung = TRUE;
  2072. /* With 82571 controllers, LAA may be overwritten due to controller
  2073. * reset from the other port. Set the appropriate LAA in RAR[0] */
  2074. if (adapter->hw.mac_type == e1000_82571 && adapter->hw.laa_is_present)
  2075. e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0);
  2076. /* Reset the timer */
  2077. mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
  2078. }
  2079. #define E1000_TX_FLAGS_CSUM 0x00000001
  2080. #define E1000_TX_FLAGS_VLAN 0x00000002
  2081. #define E1000_TX_FLAGS_TSO 0x00000004
  2082. #define E1000_TX_FLAGS_IPV4 0x00000008
  2083. #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
  2084. #define E1000_TX_FLAGS_VLAN_SHIFT 16
  2085. static inline int
  2086. e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
  2087. struct sk_buff *skb)
  2088. {
  2089. #ifdef NETIF_F_TSO
  2090. struct e1000_context_desc *context_desc;
  2091. struct e1000_buffer *buffer_info;
  2092. unsigned int i;
  2093. uint32_t cmd_length = 0;
  2094. uint16_t ipcse = 0, tucse, mss;
  2095. uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
  2096. int err;
  2097. if(skb_shinfo(skb)->tso_size) {
  2098. if (skb_header_cloned(skb)) {
  2099. err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
  2100. if (err)
  2101. return err;
  2102. }
  2103. hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
  2104. mss = skb_shinfo(skb)->tso_size;
  2105. if(skb->protocol == ntohs(ETH_P_IP)) {
  2106. skb->nh.iph->tot_len = 0;
  2107. skb->nh.iph->check = 0;
  2108. skb->h.th->check =
  2109. ~csum_tcpudp_magic(skb->nh.iph->saddr,
  2110. skb->nh.iph->daddr,
  2111. 0,
  2112. IPPROTO_TCP,
  2113. 0);
  2114. cmd_length = E1000_TXD_CMD_IP;
  2115. ipcse = skb->h.raw - skb->data - 1;
  2116. #ifdef NETIF_F_TSO_IPV6
  2117. } else if(skb->protocol == ntohs(ETH_P_IPV6)) {
  2118. skb->nh.ipv6h->payload_len = 0;
  2119. skb->h.th->check =
  2120. ~csum_ipv6_magic(&skb->nh.ipv6h->saddr,
  2121. &skb->nh.ipv6h->daddr,
  2122. 0,
  2123. IPPROTO_TCP,
  2124. 0);
  2125. ipcse = 0;
  2126. #endif
  2127. }
  2128. ipcss = skb->nh.raw - skb->data;
  2129. ipcso = (void *)&(skb->nh.iph->check) - (void *)skb->data;
  2130. tucss = skb->h.raw - skb->data;
  2131. tucso = (void *)&(skb->h.th->check) - (void *)skb->data;
  2132. tucse = 0;
  2133. cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
  2134. E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
  2135. i = tx_ring->next_to_use;
  2136. context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
  2137. buffer_info = &tx_ring->buffer_info[i];
  2138. context_desc->lower_setup.ip_fields.ipcss = ipcss;
  2139. context_desc->lower_setup.ip_fields.ipcso = ipcso;
  2140. context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
  2141. context_desc->upper_setup.tcp_fields.tucss = tucss;
  2142. context_desc->upper_setup.tcp_fields.tucso = tucso;
  2143. context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
  2144. context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
  2145. context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
  2146. context_desc->cmd_and_length = cpu_to_le32(cmd_length);
  2147. buffer_info->time_stamp = jiffies;
  2148. if (++i == tx_ring->count) i = 0;
  2149. tx_ring->next_to_use = i;
  2150. return TRUE;
  2151. }
  2152. #endif
  2153. return FALSE;
  2154. }
  2155. static inline boolean_t
  2156. e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
  2157. struct sk_buff *skb)
  2158. {
  2159. struct e1000_context_desc *context_desc;
  2160. struct e1000_buffer *buffer_info;
  2161. unsigned int i;
  2162. uint8_t css;
  2163. if(likely(skb->ip_summed == CHECKSUM_HW)) {
  2164. css = skb->h.raw - skb->data;
  2165. i = tx_ring->next_to_use;
  2166. buffer_info = &tx_ring->buffer_info[i];
  2167. context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
  2168. context_desc->upper_setup.tcp_fields.tucss = css;
  2169. context_desc->upper_setup.tcp_fields.tucso = css + skb->csum;
  2170. context_desc->upper_setup.tcp_fields.tucse = 0;
  2171. context_desc->tcp_seg_setup.data = 0;
  2172. context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT);
  2173. buffer_info->time_stamp = jiffies;
  2174. if (unlikely(++i == tx_ring->count)) i = 0;
  2175. tx_ring->next_to_use = i;
  2176. return TRUE;
  2177. }
  2178. return FALSE;
  2179. }
  2180. #define E1000_MAX_TXD_PWR 12
  2181. #define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR)
  2182. static inline int
  2183. e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
  2184. struct sk_buff *skb, unsigned int first, unsigned int max_per_txd,
  2185. unsigned int nr_frags, unsigned int mss)
  2186. {
  2187. struct e1000_buffer *buffer_info;
  2188. unsigned int len = skb->len;
  2189. unsigned int offset = 0, size, count = 0, i;
  2190. unsigned int f;
  2191. len -= skb->data_len;
  2192. i = tx_ring->next_to_use;
  2193. while(len) {
  2194. buffer_info = &tx_ring->buffer_info[i];
  2195. size = min(len, max_per_txd);
  2196. #ifdef NETIF_F_TSO
  2197. /* Workaround for Controller erratum --
  2198. * descriptor for non-tso packet in a linear SKB that follows a
  2199. * tso gets written back prematurely before the data is fully
  2200. * DMAd to the controller */
  2201. if (!skb->data_len && tx_ring->last_tx_tso &&
  2202. !skb_shinfo(skb)->tso_size) {
  2203. tx_ring->last_tx_tso = 0;
  2204. size -= 4;
  2205. }
  2206. /* Workaround for premature desc write-backs
  2207. * in TSO mode. Append 4-byte sentinel desc */
  2208. if(unlikely(mss && !nr_frags && size == len && size > 8))
  2209. size -= 4;
  2210. #endif
  2211. /* work-around for errata 10 and it applies
  2212. * to all controllers in PCI-X mode
  2213. * The fix is to make sure that the first descriptor of a
  2214. * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
  2215. */
  2216. if(unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) &&
  2217. (size > 2015) && count == 0))
  2218. size = 2015;
  2219. /* Workaround for potential 82544 hang in PCI-X. Avoid
  2220. * terminating buffers within evenly-aligned dwords. */
  2221. if(unlikely(adapter->pcix_82544 &&
  2222. !((unsigned long)(skb->data + offset + size - 1) & 4) &&
  2223. size > 4))
  2224. size -= 4;
  2225. buffer_info->length = size;
  2226. buffer_info->dma =
  2227. pci_map_single(adapter->pdev,
  2228. skb->data + offset,
  2229. size,
  2230. PCI_DMA_TODEVICE);
  2231. buffer_info->time_stamp = jiffies;
  2232. len -= size;
  2233. offset += size;
  2234. count++;
  2235. if(unlikely(++i == tx_ring->count)) i = 0;
  2236. }
  2237. for(f = 0; f < nr_frags; f++) {
  2238. struct skb_frag_struct *frag;
  2239. frag = &skb_shinfo(skb)->frags[f];
  2240. len = frag->size;
  2241. offset = frag->page_offset;
  2242. while(len) {
  2243. buffer_info = &tx_ring->buffer_info[i];
  2244. size = min(len, max_per_txd);
  2245. #ifdef NETIF_F_TSO
  2246. /* Workaround for premature desc write-backs
  2247. * in TSO mode. Append 4-byte sentinel desc */
  2248. if(unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
  2249. size -= 4;
  2250. #endif
  2251. /* Workaround for potential 82544 hang in PCI-X.
  2252. * Avoid terminating buffers within evenly-aligned
  2253. * dwords. */
  2254. if(unlikely(adapter->pcix_82544 &&
  2255. !((unsigned long)(frag->page+offset+size-1) & 4) &&
  2256. size > 4))
  2257. size -= 4;
  2258. buffer_info->length = size;
  2259. buffer_info->dma =
  2260. pci_map_page(adapter->pdev,
  2261. frag->page,
  2262. offset,
  2263. size,
  2264. PCI_DMA_TODEVICE);
  2265. buffer_info->time_stamp = jiffies;
  2266. len -= size;
  2267. offset += size;
  2268. count++;
  2269. if(unlikely(++i == tx_ring->count)) i = 0;
  2270. }
  2271. }
  2272. i = (i == 0) ? tx_ring->count - 1 : i - 1;
  2273. tx_ring->buffer_info[i].skb = skb;
  2274. tx_ring->buffer_info[first].next_to_watch = i;
  2275. return count;
  2276. }
  2277. static inline void
  2278. e1000_tx_queue(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
  2279. int tx_flags, int count)
  2280. {
  2281. struct e1000_tx_desc *tx_desc = NULL;
  2282. struct e1000_buffer *buffer_info;
  2283. uint32_t txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
  2284. unsigned int i;
  2285. if(likely(tx_flags & E1000_TX_FLAGS_TSO)) {
  2286. txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
  2287. E1000_TXD_CMD_TSE;
  2288. txd_upper |= E1000_TXD_POPTS_TXSM << 8;
  2289. if(likely(tx_flags & E1000_TX_FLAGS_IPV4))
  2290. txd_upper |= E1000_TXD_POPTS_IXSM << 8;
  2291. }
  2292. if(likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
  2293. txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
  2294. txd_upper |= E1000_TXD_POPTS_TXSM << 8;
  2295. }
  2296. if(unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
  2297. txd_lower |= E1000_TXD_CMD_VLE;
  2298. txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
  2299. }
  2300. i = tx_ring->next_to_use;
  2301. while(count--) {
  2302. buffer_info = &tx_ring->buffer_info[i];
  2303. tx_desc = E1000_TX_DESC(*tx_ring, i);
  2304. tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
  2305. tx_desc->lower.data =
  2306. cpu_to_le32(txd_lower | buffer_info->length);
  2307. tx_desc->upper.data = cpu_to_le32(txd_upper);
  2308. if(unlikely(++i == tx_ring->count)) i = 0;
  2309. }
  2310. tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
  2311. /* Force memory writes to complete before letting h/w
  2312. * know there are new descriptors to fetch. (Only
  2313. * applicable for weak-ordered memory model archs,
  2314. * such as IA-64). */
  2315. wmb();
  2316. tx_ring->next_to_use = i;
  2317. writel(i, adapter->hw.hw_addr + tx_ring->tdt);
  2318. }
  2319. /**
  2320. * 82547 workaround to avoid controller hang in half-duplex environment.
  2321. * The workaround is to avoid queuing a large packet that would span
  2322. * the internal Tx FIFO ring boundary by notifying the stack to resend
  2323. * the packet at a later time. This gives the Tx FIFO an opportunity to
  2324. * flush all packets. When that occurs, we reset the Tx FIFO pointers
  2325. * to the beginning of the Tx FIFO.
  2326. **/
  2327. #define E1000_FIFO_HDR 0x10
  2328. #define E1000_82547_PAD_LEN 0x3E0
  2329. static inline int
  2330. e1000_82547_fifo_workaround(struct e1000_adapter *adapter, struct sk_buff *skb)
  2331. {
  2332. uint32_t fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
  2333. uint32_t skb_fifo_len = skb->len + E1000_FIFO_HDR;
  2334. E1000_ROUNDUP(skb_fifo_len, E1000_FIFO_HDR);
  2335. if(adapter->link_duplex != HALF_DUPLEX)
  2336. goto no_fifo_stall_required;
  2337. if(atomic_read(&adapter->tx_fifo_stall))
  2338. return 1;
  2339. if(skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
  2340. atomic_set(&adapter->tx_fifo_stall, 1);
  2341. return 1;
  2342. }
  2343. no_fifo_stall_required:
  2344. adapter->tx_fifo_head += skb_fifo_len;
  2345. if(adapter->tx_fifo_head >= adapter->tx_fifo_size)
  2346. adapter->tx_fifo_head -= adapter->tx_fifo_size;
  2347. return 0;
  2348. }
  2349. #define MINIMUM_DHCP_PACKET_SIZE 282
  2350. static inline int
  2351. e1000_transfer_dhcp_info(struct e1000_adapter *adapter, struct sk_buff *skb)
  2352. {
  2353. struct e1000_hw *hw = &adapter->hw;
  2354. uint16_t length, offset;
  2355. if(vlan_tx_tag_present(skb)) {
  2356. if(!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
  2357. ( adapter->hw.mng_cookie.status &
  2358. E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) )
  2359. return 0;
  2360. }
  2361. if ((skb->len > MINIMUM_DHCP_PACKET_SIZE) && (!skb->protocol)) {
  2362. struct ethhdr *eth = (struct ethhdr *) skb->data;
  2363. if((htons(ETH_P_IP) == eth->h_proto)) {
  2364. const struct iphdr *ip =
  2365. (struct iphdr *)((uint8_t *)skb->data+14);
  2366. if(IPPROTO_UDP == ip->protocol) {
  2367. struct udphdr *udp =
  2368. (struct udphdr *)((uint8_t *)ip +
  2369. (ip->ihl << 2));
  2370. if(ntohs(udp->dest) == 67) {
  2371. offset = (uint8_t *)udp + 8 - skb->data;
  2372. length = skb->len - offset;
  2373. return e1000_mng_write_dhcp_info(hw,
  2374. (uint8_t *)udp + 8,
  2375. length);
  2376. }
  2377. }
  2378. }
  2379. }
  2380. return 0;
  2381. }
  2382. #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
  2383. static int
  2384. e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
  2385. {
  2386. struct e1000_adapter *adapter = netdev_priv(netdev);
  2387. struct e1000_tx_ring *tx_ring;
  2388. unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
  2389. unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
  2390. unsigned int tx_flags = 0;
  2391. unsigned int len = skb->len;
  2392. unsigned long flags;
  2393. unsigned int nr_frags = 0;
  2394. unsigned int mss = 0;
  2395. int count = 0;
  2396. int tso;
  2397. unsigned int f;
  2398. len -= skb->data_len;
  2399. #ifdef CONFIG_E1000_MQ
  2400. tx_ring = *per_cpu_ptr(adapter->cpu_tx_ring, smp_processor_id());
  2401. #else
  2402. tx_ring = adapter->tx_ring;
  2403. #endif
  2404. if (unlikely(skb->len <= 0)) {
  2405. dev_kfree_skb_any(skb);
  2406. return NETDEV_TX_OK;
  2407. }
  2408. #ifdef NETIF_F_TSO
  2409. mss = skb_shinfo(skb)->tso_size;
  2410. /* The controller does a simple calculation to
  2411. * make sure there is enough room in the FIFO before
  2412. * initiating the DMA for each buffer. The calc is:
  2413. * 4 = ceil(buffer len/mss). To make sure we don't
  2414. * overrun the FIFO, adjust the max buffer len if mss
  2415. * drops. */
  2416. if(mss) {
  2417. uint8_t hdr_len;
  2418. max_per_txd = min(mss << 2, max_per_txd);
  2419. max_txd_pwr = fls(max_per_txd) - 1;
  2420. /* TSO Workaround for 82571/2 Controllers -- if skb->data
  2421. * points to just header, pull a few bytes of payload from
  2422. * frags into skb->data */
  2423. hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
  2424. if (skb->data_len && (hdr_len == (skb->len - skb->data_len)) &&
  2425. (adapter->hw.mac_type == e1000_82571 ||
  2426. adapter->hw.mac_type == e1000_82572)) {
  2427. unsigned int pull_size;
  2428. pull_size = min((unsigned int)4, skb->data_len);
  2429. if (!__pskb_pull_tail(skb, pull_size)) {
  2430. printk(KERN_ERR "__pskb_pull_tail failed.\n");
  2431. dev_kfree_skb_any(skb);
  2432. return -EFAULT;
  2433. }
  2434. len = skb->len - skb->data_len;
  2435. }
  2436. }
  2437. if((mss) || (skb->ip_summed == CHECKSUM_HW))
  2438. /* reserve a descriptor for the offload context */
  2439. count++;
  2440. count++;
  2441. #else
  2442. if(skb->ip_summed == CHECKSUM_HW)
  2443. count++;
  2444. #endif
  2445. #ifdef NETIF_F_TSO
  2446. /* Controller Erratum workaround */
  2447. if (!skb->data_len && tx_ring->last_tx_tso &&
  2448. !skb_shinfo(skb)->tso_size)
  2449. count++;
  2450. #endif
  2451. count += TXD_USE_COUNT(len, max_txd_pwr);
  2452. if(adapter->pcix_82544)
  2453. count++;
  2454. /* work-around for errata 10 and it applies to all controllers
  2455. * in PCI-X mode, so add one more descriptor to the count
  2456. */
  2457. if(unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) &&
  2458. (len > 2015)))
  2459. count++;
  2460. nr_frags = skb_shinfo(skb)->nr_frags;
  2461. for(f = 0; f < nr_frags; f++)
  2462. count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,
  2463. max_txd_pwr);
  2464. if(adapter->pcix_82544)
  2465. count += nr_frags;
  2466. if(adapter->hw.tx_pkt_filtering && (adapter->hw.mac_type == e1000_82573) )
  2467. e1000_transfer_dhcp_info(adapter, skb);
  2468. local_irq_save(flags);
  2469. if (!spin_trylock(&tx_ring->tx_lock)) {
  2470. /* Collision - tell upper layer to requeue */
  2471. local_irq_restore(flags);
  2472. return NETDEV_TX_LOCKED;
  2473. }
  2474. /* need: count + 2 desc gap to keep tail from touching
  2475. * head, otherwise try next time */
  2476. if (unlikely(E1000_DESC_UNUSED(tx_ring) < count + 2)) {
  2477. netif_stop_queue(netdev);
  2478. spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
  2479. return NETDEV_TX_BUSY;
  2480. }
  2481. if(unlikely(adapter->hw.mac_type == e1000_82547)) {
  2482. if(unlikely(e1000_82547_fifo_workaround(adapter, skb))) {
  2483. netif_stop_queue(netdev);
  2484. mod_timer(&adapter->tx_fifo_stall_timer, jiffies);
  2485. spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
  2486. return NETDEV_TX_BUSY;
  2487. }
  2488. }
  2489. if(unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) {
  2490. tx_flags |= E1000_TX_FLAGS_VLAN;
  2491. tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
  2492. }
  2493. first = tx_ring->next_to_use;
  2494. tso = e1000_tso(adapter, tx_ring, skb);
  2495. if (tso < 0) {
  2496. dev_kfree_skb_any(skb);
  2497. spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
  2498. return NETDEV_TX_OK;
  2499. }
  2500. if (likely(tso)) {
  2501. tx_ring->last_tx_tso = 1;
  2502. tx_flags |= E1000_TX_FLAGS_TSO;
  2503. } else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
  2504. tx_flags |= E1000_TX_FLAGS_CSUM;
  2505. /* Old method was to assume IPv4 packet by default if TSO was enabled.
  2506. * 82571 hardware supports TSO capabilities for IPv6 as well...
  2507. * no longer assume, we must. */
  2508. if (likely(skb->protocol == ntohs(ETH_P_IP)))
  2509. tx_flags |= E1000_TX_FLAGS_IPV4;
  2510. e1000_tx_queue(adapter, tx_ring, tx_flags,
  2511. e1000_tx_map(adapter, tx_ring, skb, first,
  2512. max_per_txd, nr_frags, mss));
  2513. netdev->trans_start = jiffies;
  2514. /* Make sure there is space in the ring for the next send. */
  2515. if (unlikely(E1000_DESC_UNUSED(tx_ring) < MAX_SKB_FRAGS + 2))
  2516. netif_stop_queue(netdev);
  2517. spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
  2518. return NETDEV_TX_OK;
  2519. }
  2520. /**
  2521. * e1000_tx_timeout - Respond to a Tx Hang
  2522. * @netdev: network interface device structure
  2523. **/
  2524. static void
  2525. e1000_tx_timeout(struct net_device *netdev)
  2526. {
  2527. struct e1000_adapter *adapter = netdev_priv(netdev);
  2528. /* Do the reset outside of interrupt context */
  2529. schedule_work(&adapter->tx_timeout_task);
  2530. }
  2531. static void
  2532. e1000_tx_timeout_task(struct net_device *netdev)
  2533. {
  2534. struct e1000_adapter *adapter = netdev_priv(netdev);
  2535. adapter->tx_timeout_count++;
  2536. e1000_down(adapter);
  2537. e1000_up(adapter);
  2538. }
  2539. /**
  2540. * e1000_get_stats - Get System Network Statistics
  2541. * @netdev: network interface device structure
  2542. *
  2543. * Returns the address of the device statistics structure.
  2544. * The statistics are actually updated from the timer callback.
  2545. **/
  2546. static struct net_device_stats *
  2547. e1000_get_stats(struct net_device *netdev)
  2548. {
  2549. struct e1000_adapter *adapter = netdev_priv(netdev);
  2550. /* only return the current stats */
  2551. return &adapter->net_stats;
  2552. }
  2553. /**
  2554. * e1000_change_mtu - Change the Maximum Transfer Unit
  2555. * @netdev: network interface device structure
  2556. * @new_mtu: new value for maximum frame size
  2557. *
  2558. * Returns 0 on success, negative on failure
  2559. **/
  2560. static int
  2561. e1000_change_mtu(struct net_device *netdev, int new_mtu)
  2562. {
  2563. struct e1000_adapter *adapter = netdev_priv(netdev);
  2564. int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
  2565. if((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
  2566. (max_frame > MAX_JUMBO_FRAME_SIZE)) {
  2567. DPRINTK(PROBE, ERR, "Invalid MTU setting\n");
  2568. return -EINVAL;
  2569. }
  2570. /* Adapter-specific max frame size limits. */
  2571. switch (adapter->hw.mac_type) {
  2572. case e1000_82542_rev2_0:
  2573. case e1000_82542_rev2_1:
  2574. case e1000_82573:
  2575. if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
  2576. DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n");
  2577. return -EINVAL;
  2578. }
  2579. break;
  2580. case e1000_82571:
  2581. case e1000_82572:
  2582. #define MAX_STD_JUMBO_FRAME_SIZE 9234
  2583. if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
  2584. DPRINTK(PROBE, ERR, "MTU > 9216 not supported.\n");
  2585. return -EINVAL;
  2586. }
  2587. break;
  2588. default:
  2589. /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
  2590. break;
  2591. }
  2592. /* since the driver code now supports splitting a packet across
  2593. * multiple descriptors, most of the fifo related limitations on
  2594. * jumbo frame traffic have gone away.
  2595. * simply use 2k descriptors for everything.
  2596. *
  2597. * NOTE: dev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
  2598. * means we reserve 2 more, this pushes us to allocate from the next
  2599. * larger slab size
  2600. * i.e. RXBUFFER_2048 --> size-4096 slab */
  2601. /* recent hardware supports 1KB granularity */
  2602. if (adapter->hw.mac_type > e1000_82547_rev_2) {
  2603. adapter->rx_buffer_len =
  2604. ((max_frame < E1000_RXBUFFER_2048) ?
  2605. max_frame : E1000_RXBUFFER_2048);
  2606. E1000_ROUNDUP(adapter->rx_buffer_len, 1024);
  2607. } else
  2608. adapter->rx_buffer_len = E1000_RXBUFFER_2048;
  2609. netdev->mtu = new_mtu;
  2610. if(netif_running(netdev)) {
  2611. e1000_down(adapter);
  2612. e1000_up(adapter);
  2613. }
  2614. adapter->hw.max_frame_size = max_frame;
  2615. return 0;
  2616. }
  2617. /**
  2618. * e1000_update_stats - Update the board statistics counters
  2619. * @adapter: board private structure
  2620. **/
  2621. void
  2622. e1000_update_stats(struct e1000_adapter *adapter)
  2623. {
  2624. struct e1000_hw *hw = &adapter->hw;
  2625. unsigned long flags;
  2626. uint16_t phy_tmp;
  2627. #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
  2628. spin_lock_irqsave(&adapter->stats_lock, flags);
  2629. /* these counters are modified from e1000_adjust_tbi_stats,
  2630. * called from the interrupt context, so they must only
  2631. * be written while holding adapter->stats_lock
  2632. */
  2633. adapter->stats.crcerrs += E1000_READ_REG(hw, CRCERRS);
  2634. adapter->stats.gprc += E1000_READ_REG(hw, GPRC);
  2635. adapter->stats.gorcl += E1000_READ_REG(hw, GORCL);
  2636. adapter->stats.gorch += E1000_READ_REG(hw, GORCH);
  2637. adapter->stats.bprc += E1000_READ_REG(hw, BPRC);
  2638. adapter->stats.mprc += E1000_READ_REG(hw, MPRC);
  2639. adapter->stats.roc += E1000_READ_REG(hw, ROC);
  2640. adapter->stats.prc64 += E1000_READ_REG(hw, PRC64);
  2641. adapter->stats.prc127 += E1000_READ_REG(hw, PRC127);
  2642. adapter->stats.prc255 += E1000_READ_REG(hw, PRC255);
  2643. adapter->stats.prc511 += E1000_READ_REG(hw, PRC511);
  2644. adapter->stats.prc1023 += E1000_READ_REG(hw, PRC1023);
  2645. adapter->stats.prc1522 += E1000_READ_REG(hw, PRC1522);
  2646. adapter->stats.symerrs += E1000_READ_REG(hw, SYMERRS);
  2647. adapter->stats.mpc += E1000_READ_REG(hw, MPC);
  2648. adapter->stats.scc += E1000_READ_REG(hw, SCC);
  2649. adapter->stats.ecol += E1000_READ_REG(hw, ECOL);
  2650. adapter->stats.mcc += E1000_READ_REG(hw, MCC);
  2651. adapter->stats.latecol += E1000_READ_REG(hw, LATECOL);
  2652. adapter->stats.dc += E1000_READ_REG(hw, DC);
  2653. adapter->stats.sec += E1000_READ_REG(hw, SEC);
  2654. adapter->stats.rlec += E1000_READ_REG(hw, RLEC);
  2655. adapter->stats.xonrxc += E1000_READ_REG(hw, XONRXC);
  2656. adapter->stats.xontxc += E1000_READ_REG(hw, XONTXC);
  2657. adapter->stats.xoffrxc += E1000_READ_REG(hw, XOFFRXC);
  2658. adapter->stats.xofftxc += E1000_READ_REG(hw, XOFFTXC);
  2659. adapter->stats.fcruc += E1000_READ_REG(hw, FCRUC);
  2660. adapter->stats.gptc += E1000_READ_REG(hw, GPTC);
  2661. adapter->stats.gotcl += E1000_READ_REG(hw, GOTCL);
  2662. adapter->stats.gotch += E1000_READ_REG(hw, GOTCH);
  2663. adapter->stats.rnbc += E1000_READ_REG(hw, RNBC);
  2664. adapter->stats.ruc += E1000_READ_REG(hw, RUC);
  2665. adapter->stats.rfc += E1000_READ_REG(hw, RFC);
  2666. adapter->stats.rjc += E1000_READ_REG(hw, RJC);
  2667. adapter->stats.torl += E1000_READ_REG(hw, TORL);
  2668. adapter->stats.torh += E1000_READ_REG(hw, TORH);
  2669. adapter->stats.totl += E1000_READ_REG(hw, TOTL);
  2670. adapter->stats.toth += E1000_READ_REG(hw, TOTH);
  2671. adapter->stats.tpr += E1000_READ_REG(hw, TPR);
  2672. adapter->stats.ptc64 += E1000_READ_REG(hw, PTC64);
  2673. adapter->stats.ptc127 += E1000_READ_REG(hw, PTC127);
  2674. adapter->stats.ptc255 += E1000_READ_REG(hw, PTC255);
  2675. adapter->stats.ptc511 += E1000_READ_REG(hw, PTC511);
  2676. adapter->stats.ptc1023 += E1000_READ_REG(hw, PTC1023);
  2677. adapter->stats.ptc1522 += E1000_READ_REG(hw, PTC1522);
  2678. adapter->stats.mptc += E1000_READ_REG(hw, MPTC);
  2679. adapter->stats.bptc += E1000_READ_REG(hw, BPTC);
  2680. /* used for adaptive IFS */
  2681. hw->tx_packet_delta = E1000_READ_REG(hw, TPT);
  2682. adapter->stats.tpt += hw->tx_packet_delta;
  2683. hw->collision_delta = E1000_READ_REG(hw, COLC);
  2684. adapter->stats.colc += hw->collision_delta;
  2685. if(hw->mac_type >= e1000_82543) {
  2686. adapter->stats.algnerrc += E1000_READ_REG(hw, ALGNERRC);
  2687. adapter->stats.rxerrc += E1000_READ_REG(hw, RXERRC);
  2688. adapter->stats.tncrs += E1000_READ_REG(hw, TNCRS);
  2689. adapter->stats.cexterr += E1000_READ_REG(hw, CEXTERR);
  2690. adapter->stats.tsctc += E1000_READ_REG(hw, TSCTC);
  2691. adapter->stats.tsctfc += E1000_READ_REG(hw, TSCTFC);
  2692. }
  2693. if(hw->mac_type > e1000_82547_rev_2) {
  2694. adapter->stats.iac += E1000_READ_REG(hw, IAC);
  2695. adapter->stats.icrxoc += E1000_READ_REG(hw, ICRXOC);
  2696. adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC);
  2697. adapter->stats.icrxatc += E1000_READ_REG(hw, ICRXATC);
  2698. adapter->stats.ictxptc += E1000_READ_REG(hw, ICTXPTC);
  2699. adapter->stats.ictxatc += E1000_READ_REG(hw, ICTXATC);
  2700. adapter->stats.ictxqec += E1000_READ_REG(hw, ICTXQEC);
  2701. adapter->stats.ictxqmtc += E1000_READ_REG(hw, ICTXQMTC);
  2702. adapter->stats.icrxdmtc += E1000_READ_REG(hw, ICRXDMTC);
  2703. }
  2704. /* Fill out the OS statistics structure */
  2705. adapter->net_stats.rx_packets = adapter->stats.gprc;
  2706. adapter->net_stats.tx_packets = adapter->stats.gptc;
  2707. adapter->net_stats.rx_bytes = adapter->stats.gorcl;
  2708. adapter->net_stats.tx_bytes = adapter->stats.gotcl;
  2709. adapter->net_stats.multicast = adapter->stats.mprc;
  2710. adapter->net_stats.collisions = adapter->stats.colc;
  2711. /* Rx Errors */
  2712. adapter->net_stats.rx_errors = adapter->stats.rxerrc +
  2713. adapter->stats.crcerrs + adapter->stats.algnerrc +
  2714. adapter->stats.rlec + adapter->stats.cexterr;
  2715. adapter->net_stats.rx_dropped = 0;
  2716. adapter->net_stats.rx_length_errors = adapter->stats.rlec;
  2717. adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
  2718. adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
  2719. adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
  2720. /* Tx Errors */
  2721. adapter->net_stats.tx_errors = adapter->stats.ecol +
  2722. adapter->stats.latecol;
  2723. adapter->net_stats.tx_aborted_errors = adapter->stats.ecol;
  2724. adapter->net_stats.tx_window_errors = adapter->stats.latecol;
  2725. adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs;
  2726. /* Tx Dropped needs to be maintained elsewhere */
  2727. /* Phy Stats */
  2728. if(hw->media_type == e1000_media_type_copper) {
  2729. if((adapter->link_speed == SPEED_1000) &&
  2730. (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
  2731. phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
  2732. adapter->phy_stats.idle_errors += phy_tmp;
  2733. }
  2734. if((hw->mac_type <= e1000_82546) &&
  2735. (hw->phy_type == e1000_phy_m88) &&
  2736. !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
  2737. adapter->phy_stats.receive_errors += phy_tmp;
  2738. }
  2739. spin_unlock_irqrestore(&adapter->stats_lock, flags);
  2740. }
  2741. #ifdef CONFIG_E1000_MQ
  2742. void
  2743. e1000_rx_schedule(void *data)
  2744. {
  2745. struct net_device *poll_dev, *netdev = data;
  2746. struct e1000_adapter *adapter = netdev->priv;
  2747. int this_cpu = get_cpu();
  2748. poll_dev = *per_cpu_ptr(adapter->cpu_netdev, this_cpu);
  2749. if (poll_dev == NULL) {
  2750. put_cpu();
  2751. return;
  2752. }
  2753. if (likely(netif_rx_schedule_prep(poll_dev)))
  2754. __netif_rx_schedule(poll_dev);
  2755. else
  2756. e1000_irq_enable(adapter);
  2757. put_cpu();
  2758. }
  2759. #endif
  2760. /**
  2761. * e1000_intr - Interrupt Handler
  2762. * @irq: interrupt number
  2763. * @data: pointer to a network interface device structure
  2764. * @pt_regs: CPU registers structure
  2765. **/
  2766. static irqreturn_t
  2767. e1000_intr(int irq, void *data, struct pt_regs *regs)
  2768. {
  2769. struct net_device *netdev = data;
  2770. struct e1000_adapter *adapter = netdev_priv(netdev);
  2771. struct e1000_hw *hw = &adapter->hw;
  2772. uint32_t icr = E1000_READ_REG(hw, ICR);
  2773. #ifndef CONFIG_E1000_NAPI
  2774. int i;
  2775. #else
  2776. /* Interrupt Auto-Mask...upon reading ICR,
  2777. * interrupts are masked. No need for the
  2778. * IMC write, but it does mean we should
  2779. * account for it ASAP. */
  2780. if (likely(hw->mac_type >= e1000_82571))
  2781. atomic_inc(&adapter->irq_sem);
  2782. #endif
  2783. if (unlikely(!icr)) {
  2784. #ifdef CONFIG_E1000_NAPI
  2785. if (hw->mac_type >= e1000_82571)
  2786. e1000_irq_enable(adapter);
  2787. #endif
  2788. return IRQ_NONE; /* Not our interrupt */
  2789. }
  2790. if(unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
  2791. hw->get_link_status = 1;
  2792. mod_timer(&adapter->watchdog_timer, jiffies);
  2793. }
  2794. #ifdef CONFIG_E1000_NAPI
  2795. if (unlikely(hw->mac_type < e1000_82571)) {
  2796. atomic_inc(&adapter->irq_sem);
  2797. E1000_WRITE_REG(hw, IMC, ~0);
  2798. E1000_WRITE_FLUSH(hw);
  2799. }
  2800. #ifdef CONFIG_E1000_MQ
  2801. if (atomic_read(&adapter->rx_sched_call_data.count) == 0) {
  2802. /* We must setup the cpumask once count == 0 since
  2803. * each cpu bit is cleared when the work is done. */
  2804. adapter->rx_sched_call_data.cpumask = adapter->cpumask;
  2805. atomic_add(adapter->num_rx_queues - 1, &adapter->irq_sem);
  2806. atomic_set(&adapter->rx_sched_call_data.count,
  2807. adapter->num_rx_queues);
  2808. smp_call_async_mask(&adapter->rx_sched_call_data);
  2809. } else {
  2810. printk("call_data.count == %u\n", atomic_read(&adapter->rx_sched_call_data.count));
  2811. }
  2812. #else /* if !CONFIG_E1000_MQ */
  2813. if (likely(netif_rx_schedule_prep(&adapter->polling_netdev[0])))
  2814. __netif_rx_schedule(&adapter->polling_netdev[0]);
  2815. else
  2816. e1000_irq_enable(adapter);
  2817. #endif /* CONFIG_E1000_MQ */
  2818. #else /* if !CONFIG_E1000_NAPI */
  2819. /* Writing IMC and IMS is needed for 82547.
  2820. Due to Hub Link bus being occupied, an interrupt
  2821. de-assertion message is not able to be sent.
  2822. When an interrupt assertion message is generated later,
  2823. two messages are re-ordered and sent out.
  2824. That causes APIC to think 82547 is in de-assertion
  2825. state, while 82547 is in assertion state, resulting
  2826. in dead lock. Writing IMC forces 82547 into
  2827. de-assertion state.
  2828. */
  2829. if(hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2){
  2830. atomic_inc(&adapter->irq_sem);
  2831. E1000_WRITE_REG(hw, IMC, ~0);
  2832. }
  2833. for(i = 0; i < E1000_MAX_INTR; i++)
  2834. if(unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
  2835. !e1000_clean_tx_irq(adapter, adapter->tx_ring)))
  2836. break;
  2837. if(hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2)
  2838. e1000_irq_enable(adapter);
  2839. #endif /* CONFIG_E1000_NAPI */
  2840. return IRQ_HANDLED;
  2841. }
  2842. #ifdef CONFIG_E1000_NAPI
  2843. /**
  2844. * e1000_clean - NAPI Rx polling callback
  2845. * @adapter: board private structure
  2846. **/
  2847. static int
  2848. e1000_clean(struct net_device *poll_dev, int *budget)
  2849. {
  2850. struct e1000_adapter *adapter;
  2851. int work_to_do = min(*budget, poll_dev->quota);
  2852. int tx_cleaned = 0, i = 0, work_done = 0;
  2853. /* Must NOT use netdev_priv macro here. */
  2854. adapter = poll_dev->priv;
  2855. /* Keep link state information with original netdev */
  2856. if (!netif_carrier_ok(adapter->netdev))
  2857. goto quit_polling;
  2858. while (poll_dev != &adapter->polling_netdev[i]) {
  2859. i++;
  2860. if (unlikely(i == adapter->num_rx_queues))
  2861. BUG();
  2862. }
  2863. if (likely(adapter->num_tx_queues == 1)) {
  2864. /* e1000_clean is called per-cpu. This lock protects
  2865. * tx_ring[0] from being cleaned by multiple cpus
  2866. * simultaneously. A failure obtaining the lock means
  2867. * tx_ring[0] is currently being cleaned anyway. */
  2868. if (spin_trylock(&adapter->tx_queue_lock)) {
  2869. tx_cleaned = e1000_clean_tx_irq(adapter,
  2870. &adapter->tx_ring[0]);
  2871. spin_unlock(&adapter->tx_queue_lock);
  2872. }
  2873. } else
  2874. tx_cleaned = e1000_clean_tx_irq(adapter, &adapter->tx_ring[i]);
  2875. adapter->clean_rx(adapter, &adapter->rx_ring[i],
  2876. &work_done, work_to_do);
  2877. *budget -= work_done;
  2878. poll_dev->quota -= work_done;
  2879. /* If no Tx and not enough Rx work done, exit the polling mode */
  2880. if((!tx_cleaned && (work_done == 0)) ||
  2881. !netif_running(adapter->netdev)) {
  2882. quit_polling:
  2883. netif_rx_complete(poll_dev);
  2884. e1000_irq_enable(adapter);
  2885. return 0;
  2886. }
  2887. return 1;
  2888. }
  2889. #endif
  2890. /**
  2891. * e1000_clean_tx_irq - Reclaim resources after transmit completes
  2892. * @adapter: board private structure
  2893. **/
  2894. static boolean_t
  2895. e1000_clean_tx_irq(struct e1000_adapter *adapter,
  2896. struct e1000_tx_ring *tx_ring)
  2897. {
  2898. struct net_device *netdev = adapter->netdev;
  2899. struct e1000_tx_desc *tx_desc, *eop_desc;
  2900. struct e1000_buffer *buffer_info;
  2901. unsigned int i, eop;
  2902. boolean_t cleaned = FALSE;
  2903. i = tx_ring->next_to_clean;
  2904. eop = tx_ring->buffer_info[i].next_to_watch;
  2905. eop_desc = E1000_TX_DESC(*tx_ring, eop);
  2906. while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
  2907. for(cleaned = FALSE; !cleaned; ) {
  2908. tx_desc = E1000_TX_DESC(*tx_ring, i);
  2909. buffer_info = &tx_ring->buffer_info[i];
  2910. cleaned = (i == eop);
  2911. #ifdef CONFIG_E1000_MQ
  2912. tx_ring->tx_stats.bytes += buffer_info->length;
  2913. #endif
  2914. e1000_unmap_and_free_tx_resource(adapter, buffer_info);
  2915. memset(tx_desc, 0, sizeof(struct e1000_tx_desc));
  2916. if(unlikely(++i == tx_ring->count)) i = 0;
  2917. }
  2918. #ifdef CONFIG_E1000_MQ
  2919. tx_ring->tx_stats.packets++;
  2920. #endif
  2921. eop = tx_ring->buffer_info[i].next_to_watch;
  2922. eop_desc = E1000_TX_DESC(*tx_ring, eop);
  2923. }
  2924. tx_ring->next_to_clean = i;
  2925. spin_lock(&tx_ring->tx_lock);
  2926. if(unlikely(cleaned && netif_queue_stopped(netdev) &&
  2927. netif_carrier_ok(netdev)))
  2928. netif_wake_queue(netdev);
  2929. spin_unlock(&tx_ring->tx_lock);
  2930. if (adapter->detect_tx_hung) {
  2931. /* Detect a transmit hang in hardware, this serializes the
  2932. * check with the clearing of time_stamp and movement of i */
  2933. adapter->detect_tx_hung = FALSE;
  2934. if (tx_ring->buffer_info[eop].dma &&
  2935. time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
  2936. adapter->tx_timeout_factor * HZ)
  2937. && !(E1000_READ_REG(&adapter->hw, STATUS) &
  2938. E1000_STATUS_TXOFF)) {
  2939. /* detected Tx unit hang */
  2940. DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
  2941. " Tx Queue <%lu>\n"
  2942. " TDH <%x>\n"
  2943. " TDT <%x>\n"
  2944. " next_to_use <%x>\n"
  2945. " next_to_clean <%x>\n"
  2946. "buffer_info[next_to_clean]\n"
  2947. " time_stamp <%lx>\n"
  2948. " next_to_watch <%x>\n"
  2949. " jiffies <%lx>\n"
  2950. " next_to_watch.status <%x>\n",
  2951. (unsigned long)((tx_ring - adapter->tx_ring) /
  2952. sizeof(struct e1000_tx_ring)),
  2953. readl(adapter->hw.hw_addr + tx_ring->tdh),
  2954. readl(adapter->hw.hw_addr + tx_ring->tdt),
  2955. tx_ring->next_to_use,
  2956. tx_ring->next_to_clean,
  2957. tx_ring->buffer_info[eop].time_stamp,
  2958. eop,
  2959. jiffies,
  2960. eop_desc->upper.fields.status);
  2961. netif_stop_queue(netdev);
  2962. }
  2963. }
  2964. return cleaned;
  2965. }
  2966. /**
  2967. * e1000_rx_checksum - Receive Checksum Offload for 82543
  2968. * @adapter: board private structure
  2969. * @status_err: receive descriptor status and error fields
  2970. * @csum: receive descriptor csum field
  2971. * @sk_buff: socket buffer with received data
  2972. **/
  2973. static inline void
  2974. e1000_rx_checksum(struct e1000_adapter *adapter,
  2975. uint32_t status_err, uint32_t csum,
  2976. struct sk_buff *skb)
  2977. {
  2978. uint16_t status = (uint16_t)status_err;
  2979. uint8_t errors = (uint8_t)(status_err >> 24);
  2980. skb->ip_summed = CHECKSUM_NONE;
  2981. /* 82543 or newer only */
  2982. if(unlikely(adapter->hw.mac_type < e1000_82543)) return;
  2983. /* Ignore Checksum bit is set */
  2984. if(unlikely(status & E1000_RXD_STAT_IXSM)) return;
  2985. /* TCP/UDP checksum error bit is set */
  2986. if(unlikely(errors & E1000_RXD_ERR_TCPE)) {
  2987. /* let the stack verify checksum errors */
  2988. adapter->hw_csum_err++;
  2989. return;
  2990. }
  2991. /* TCP/UDP Checksum has not been calculated */
  2992. if(adapter->hw.mac_type <= e1000_82547_rev_2) {
  2993. if(!(status & E1000_RXD_STAT_TCPCS))
  2994. return;
  2995. } else {
  2996. if(!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
  2997. return;
  2998. }
  2999. /* It must be a TCP or UDP packet with a valid checksum */
  3000. if (likely(status & E1000_RXD_STAT_TCPCS)) {
  3001. /* TCP checksum is good */
  3002. skb->ip_summed = CHECKSUM_UNNECESSARY;
  3003. } else if (adapter->hw.mac_type > e1000_82547_rev_2) {
  3004. /* IP fragment with UDP payload */
  3005. /* Hardware complements the payload checksum, so we undo it
  3006. * and then put the value in host order for further stack use.
  3007. */
  3008. csum = ntohl(csum ^ 0xFFFF);
  3009. skb->csum = csum;
  3010. skb->ip_summed = CHECKSUM_HW;
  3011. }
  3012. adapter->hw_csum_good++;
  3013. }
  3014. /**
  3015. * e1000_clean_rx_irq - Send received data up the network stack; legacy
  3016. * @adapter: board private structure
  3017. **/
  3018. static boolean_t
  3019. #ifdef CONFIG_E1000_NAPI
  3020. e1000_clean_rx_irq(struct e1000_adapter *adapter,
  3021. struct e1000_rx_ring *rx_ring,
  3022. int *work_done, int work_to_do)
  3023. #else
  3024. e1000_clean_rx_irq(struct e1000_adapter *adapter,
  3025. struct e1000_rx_ring *rx_ring)
  3026. #endif
  3027. {
  3028. struct net_device *netdev = adapter->netdev;
  3029. struct pci_dev *pdev = adapter->pdev;
  3030. struct e1000_rx_desc *rx_desc;
  3031. struct e1000_buffer *buffer_info;
  3032. struct sk_buff *skb;
  3033. unsigned long flags;
  3034. uint32_t length;
  3035. uint8_t last_byte;
  3036. unsigned int i;
  3037. int cleaned_count = 0;
  3038. boolean_t cleaned = FALSE, multi_descriptor = FALSE;
  3039. i = rx_ring->next_to_clean;
  3040. rx_desc = E1000_RX_DESC(*rx_ring, i);
  3041. while(rx_desc->status & E1000_RXD_STAT_DD) {
  3042. buffer_info = &rx_ring->buffer_info[i];
  3043. u8 status;
  3044. #ifdef CONFIG_E1000_NAPI
  3045. if(*work_done >= work_to_do)
  3046. break;
  3047. (*work_done)++;
  3048. #endif
  3049. status = rx_desc->status;
  3050. cleaned = TRUE;
  3051. cleaned_count++;
  3052. pci_unmap_single(pdev,
  3053. buffer_info->dma,
  3054. buffer_info->length,
  3055. PCI_DMA_FROMDEVICE);
  3056. skb = buffer_info->skb;
  3057. length = le16_to_cpu(rx_desc->length);
  3058. if(unlikely(!(rx_desc->status & E1000_RXD_STAT_EOP))) {
  3059. /* All receives must fit into a single buffer */
  3060. E1000_DBG("%s: Receive packet consumed multiple"
  3061. " buffers\n", netdev->name);
  3062. dev_kfree_skb_irq(skb);
  3063. goto next_desc;
  3064. }
  3065. if(unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
  3066. last_byte = *(skb->data + length - 1);
  3067. if(TBI_ACCEPT(&adapter->hw, rx_desc->status,
  3068. rx_desc->errors, length, last_byte)) {
  3069. spin_lock_irqsave(&adapter->stats_lock, flags);
  3070. e1000_tbi_adjust_stats(&adapter->hw,
  3071. &adapter->stats,
  3072. length, skb->data);
  3073. spin_unlock_irqrestore(&adapter->stats_lock,
  3074. flags);
  3075. length--;
  3076. } else {
  3077. dev_kfree_skb_irq(skb);
  3078. goto next_desc;
  3079. }
  3080. }
  3081. /* code added for copybreak, this should improve
  3082. * performance for small packets with large amounts
  3083. * of reassembly being done in the stack */
  3084. #define E1000_CB_LENGTH 256
  3085. if ((length < E1000_CB_LENGTH) &&
  3086. !rx_ring->rx_skb_top &&
  3087. /* or maybe (status & E1000_RXD_STAT_EOP) && */
  3088. !multi_descriptor) {
  3089. struct sk_buff *new_skb =
  3090. dev_alloc_skb(length + NET_IP_ALIGN);
  3091. if (new_skb) {
  3092. skb_reserve(new_skb, NET_IP_ALIGN);
  3093. new_skb->dev = netdev;
  3094. memcpy(new_skb->data - NET_IP_ALIGN,
  3095. skb->data - NET_IP_ALIGN,
  3096. length + NET_IP_ALIGN);
  3097. /* save the skb in buffer_info as good */
  3098. buffer_info->skb = skb;
  3099. skb = new_skb;
  3100. skb_put(skb, length);
  3101. }
  3102. }
  3103. /* end copybreak code */
  3104. /* Receive Checksum Offload */
  3105. e1000_rx_checksum(adapter,
  3106. (uint32_t)(status) |
  3107. ((uint32_t)(rx_desc->errors) << 24),
  3108. rx_desc->csum, skb);
  3109. skb->protocol = eth_type_trans(skb, netdev);
  3110. #ifdef CONFIG_E1000_NAPI
  3111. if(unlikely(adapter->vlgrp &&
  3112. (status & E1000_RXD_STAT_VP))) {
  3113. vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
  3114. le16_to_cpu(rx_desc->special) &
  3115. E1000_RXD_SPC_VLAN_MASK);
  3116. } else {
  3117. netif_receive_skb(skb);
  3118. }
  3119. #else /* CONFIG_E1000_NAPI */
  3120. if(unlikely(adapter->vlgrp &&
  3121. (rx_desc->status & E1000_RXD_STAT_VP))) {
  3122. vlan_hwaccel_rx(skb, adapter->vlgrp,
  3123. le16_to_cpu(rx_desc->special) &
  3124. E1000_RXD_SPC_VLAN_MASK);
  3125. } else {
  3126. netif_rx(skb);
  3127. }
  3128. #endif /* CONFIG_E1000_NAPI */
  3129. netdev->last_rx = jiffies;
  3130. #ifdef CONFIG_E1000_MQ
  3131. rx_ring->rx_stats.packets++;
  3132. rx_ring->rx_stats.bytes += length;
  3133. #endif
  3134. next_desc:
  3135. rx_desc->status = 0;
  3136. /* return some buffers to hardware, one at a time is too slow */
  3137. if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
  3138. adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
  3139. cleaned_count = 0;
  3140. }
  3141. }
  3142. rx_ring->next_to_clean = i;
  3143. cleaned_count = E1000_DESC_UNUSED(rx_ring);
  3144. if (cleaned_count)
  3145. adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
  3146. return cleaned;
  3147. }
  3148. /**
  3149. * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
  3150. * @adapter: board private structure
  3151. **/
  3152. static boolean_t
  3153. #ifdef CONFIG_E1000_NAPI
  3154. e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
  3155. struct e1000_rx_ring *rx_ring,
  3156. int *work_done, int work_to_do)
  3157. #else
  3158. e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
  3159. struct e1000_rx_ring *rx_ring)
  3160. #endif
  3161. {
  3162. union e1000_rx_desc_packet_split *rx_desc;
  3163. struct net_device *netdev = adapter->netdev;
  3164. struct pci_dev *pdev = adapter->pdev;
  3165. struct e1000_buffer *buffer_info;
  3166. struct e1000_ps_page *ps_page;
  3167. struct e1000_ps_page_dma *ps_page_dma;
  3168. struct sk_buff *skb;
  3169. unsigned int i, j;
  3170. uint32_t length, staterr;
  3171. int cleaned_count = 0;
  3172. boolean_t cleaned = FALSE;
  3173. i = rx_ring->next_to_clean;
  3174. rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
  3175. staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
  3176. while(staterr & E1000_RXD_STAT_DD) {
  3177. buffer_info = &rx_ring->buffer_info[i];
  3178. ps_page = &rx_ring->ps_page[i];
  3179. ps_page_dma = &rx_ring->ps_page_dma[i];
  3180. #ifdef CONFIG_E1000_NAPI
  3181. if(unlikely(*work_done >= work_to_do))
  3182. break;
  3183. (*work_done)++;
  3184. #endif
  3185. cleaned = TRUE;
  3186. cleaned_count++;
  3187. pci_unmap_single(pdev, buffer_info->dma,
  3188. buffer_info->length,
  3189. PCI_DMA_FROMDEVICE);
  3190. skb = buffer_info->skb;
  3191. if(unlikely(!(staterr & E1000_RXD_STAT_EOP))) {
  3192. E1000_DBG("%s: Packet Split buffers didn't pick up"
  3193. " the full packet\n", netdev->name);
  3194. dev_kfree_skb_irq(skb);
  3195. goto next_desc;
  3196. }
  3197. if(unlikely(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) {
  3198. dev_kfree_skb_irq(skb);
  3199. goto next_desc;
  3200. }
  3201. length = le16_to_cpu(rx_desc->wb.middle.length0);
  3202. if(unlikely(!length)) {
  3203. E1000_DBG("%s: Last part of the packet spanning"
  3204. " multiple descriptors\n", netdev->name);
  3205. dev_kfree_skb_irq(skb);
  3206. goto next_desc;
  3207. }
  3208. /* Good Receive */
  3209. skb_put(skb, length);
  3210. for(j = 0; j < adapter->rx_ps_pages; j++) {
  3211. if(!(length = le16_to_cpu(rx_desc->wb.upper.length[j])))
  3212. break;
  3213. pci_unmap_page(pdev, ps_page_dma->ps_page_dma[j],
  3214. PAGE_SIZE, PCI_DMA_FROMDEVICE);
  3215. ps_page_dma->ps_page_dma[j] = 0;
  3216. skb_shinfo(skb)->frags[j].page =
  3217. ps_page->ps_page[j];
  3218. ps_page->ps_page[j] = NULL;
  3219. skb_shinfo(skb)->frags[j].page_offset = 0;
  3220. skb_shinfo(skb)->frags[j].size = length;
  3221. skb_shinfo(skb)->nr_frags++;
  3222. skb->len += length;
  3223. skb->data_len += length;
  3224. }
  3225. e1000_rx_checksum(adapter, staterr,
  3226. rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
  3227. skb->protocol = eth_type_trans(skb, netdev);
  3228. if(likely(rx_desc->wb.upper.header_status &
  3229. E1000_RXDPS_HDRSTAT_HDRSP)) {
  3230. adapter->rx_hdr_split++;
  3231. #ifdef HAVE_RX_ZERO_COPY
  3232. skb_shinfo(skb)->zero_copy = TRUE;
  3233. #endif
  3234. }
  3235. #ifdef CONFIG_E1000_NAPI
  3236. if(unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
  3237. vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
  3238. le16_to_cpu(rx_desc->wb.middle.vlan) &
  3239. E1000_RXD_SPC_VLAN_MASK);
  3240. } else {
  3241. netif_receive_skb(skb);
  3242. }
  3243. #else /* CONFIG_E1000_NAPI */
  3244. if(unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
  3245. vlan_hwaccel_rx(skb, adapter->vlgrp,
  3246. le16_to_cpu(rx_desc->wb.middle.vlan) &
  3247. E1000_RXD_SPC_VLAN_MASK);
  3248. } else {
  3249. netif_rx(skb);
  3250. }
  3251. #endif /* CONFIG_E1000_NAPI */
  3252. netdev->last_rx = jiffies;
  3253. #ifdef CONFIG_E1000_MQ
  3254. rx_ring->rx_stats.packets++;
  3255. rx_ring->rx_stats.bytes += length;
  3256. #endif
  3257. next_desc:
  3258. rx_desc->wb.middle.status_error &= ~0xFF;
  3259. buffer_info->skb = NULL;
  3260. /* return some buffers to hardware, one at a time is too slow */
  3261. if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
  3262. adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
  3263. cleaned_count = 0;
  3264. }
  3265. staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
  3266. }
  3267. rx_ring->next_to_clean = i;
  3268. cleaned_count = E1000_DESC_UNUSED(rx_ring);
  3269. if (cleaned_count)
  3270. adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
  3271. return cleaned;
  3272. }
  3273. /**
  3274. * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
  3275. * @adapter: address of board private structure
  3276. **/
  3277. static void
  3278. e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
  3279. struct e1000_rx_ring *rx_ring,
  3280. int cleaned_count)
  3281. {
  3282. struct net_device *netdev = adapter->netdev;
  3283. struct pci_dev *pdev = adapter->pdev;
  3284. struct e1000_rx_desc *rx_desc;
  3285. struct e1000_buffer *buffer_info;
  3286. struct sk_buff *skb;
  3287. unsigned int i;
  3288. unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
  3289. i = rx_ring->next_to_use;
  3290. buffer_info = &rx_ring->buffer_info[i];
  3291. while (cleaned_count--) {
  3292. if (!(skb = buffer_info->skb))
  3293. skb = dev_alloc_skb(bufsz);
  3294. else {
  3295. skb_trim(skb, 0);
  3296. goto map_skb;
  3297. }
  3298. if(unlikely(!skb)) {
  3299. /* Better luck next round */
  3300. adapter->alloc_rx_buff_failed++;
  3301. break;
  3302. }
  3303. /* Fix for errata 23, can't cross 64kB boundary */
  3304. if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
  3305. struct sk_buff *oldskb = skb;
  3306. DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes "
  3307. "at %p\n", bufsz, skb->data);
  3308. /* Try again, without freeing the previous */
  3309. skb = dev_alloc_skb(bufsz);
  3310. /* Failed allocation, critical failure */
  3311. if (!skb) {
  3312. dev_kfree_skb(oldskb);
  3313. break;
  3314. }
  3315. if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
  3316. /* give up */
  3317. dev_kfree_skb(skb);
  3318. dev_kfree_skb(oldskb);
  3319. break; /* while !buffer_info->skb */
  3320. } else {
  3321. /* Use new allocation */
  3322. dev_kfree_skb(oldskb);
  3323. }
  3324. }
  3325. /* Make buffer alignment 2 beyond a 16 byte boundary
  3326. * this will result in a 16 byte aligned IP header after
  3327. * the 14 byte MAC header is removed
  3328. */
  3329. skb_reserve(skb, NET_IP_ALIGN);
  3330. skb->dev = netdev;
  3331. buffer_info->skb = skb;
  3332. buffer_info->length = adapter->rx_buffer_len;
  3333. map_skb:
  3334. buffer_info->dma = pci_map_single(pdev,
  3335. skb->data,
  3336. adapter->rx_buffer_len,
  3337. PCI_DMA_FROMDEVICE);
  3338. /* Fix for errata 23, can't cross 64kB boundary */
  3339. if (!e1000_check_64k_bound(adapter,
  3340. (void *)(unsigned long)buffer_info->dma,
  3341. adapter->rx_buffer_len)) {
  3342. DPRINTK(RX_ERR, ERR,
  3343. "dma align check failed: %u bytes at %p\n",
  3344. adapter->rx_buffer_len,
  3345. (void *)(unsigned long)buffer_info->dma);
  3346. dev_kfree_skb(skb);
  3347. buffer_info->skb = NULL;
  3348. pci_unmap_single(pdev, buffer_info->dma,
  3349. adapter->rx_buffer_len,
  3350. PCI_DMA_FROMDEVICE);
  3351. break; /* while !buffer_info->skb */
  3352. }
  3353. rx_desc = E1000_RX_DESC(*rx_ring, i);
  3354. rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
  3355. if(unlikely((i & ~(E1000_RX_BUFFER_WRITE - 1)) == i)) {
  3356. /* Force memory writes to complete before letting h/w
  3357. * know there are new descriptors to fetch. (Only
  3358. * applicable for weak-ordered memory model archs,
  3359. * such as IA-64). */
  3360. wmb();
  3361. writel(i, adapter->hw.hw_addr + rx_ring->rdt);
  3362. }
  3363. if(unlikely(++i == rx_ring->count)) i = 0;
  3364. buffer_info = &rx_ring->buffer_info[i];
  3365. }
  3366. rx_ring->next_to_use = i;
  3367. }
  3368. /**
  3369. * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
  3370. * @adapter: address of board private structure
  3371. **/
  3372. static void
  3373. e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
  3374. struct e1000_rx_ring *rx_ring,
  3375. int cleaned_count)
  3376. {
  3377. struct net_device *netdev = adapter->netdev;
  3378. struct pci_dev *pdev = adapter->pdev;
  3379. union e1000_rx_desc_packet_split *rx_desc;
  3380. struct e1000_buffer *buffer_info;
  3381. struct e1000_ps_page *ps_page;
  3382. struct e1000_ps_page_dma *ps_page_dma;
  3383. struct sk_buff *skb;
  3384. unsigned int i, j;
  3385. i = rx_ring->next_to_use;
  3386. buffer_info = &rx_ring->buffer_info[i];
  3387. ps_page = &rx_ring->ps_page[i];
  3388. ps_page_dma = &rx_ring->ps_page_dma[i];
  3389. while (cleaned_count--) {
  3390. rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
  3391. for(j = 0; j < PS_PAGE_BUFFERS; j++) {
  3392. if (j < adapter->rx_ps_pages) {
  3393. if (likely(!ps_page->ps_page[j])) {
  3394. ps_page->ps_page[j] =
  3395. alloc_page(GFP_ATOMIC);
  3396. if (unlikely(!ps_page->ps_page[j]))
  3397. goto no_buffers;
  3398. ps_page_dma->ps_page_dma[j] =
  3399. pci_map_page(pdev,
  3400. ps_page->ps_page[j],
  3401. 0, PAGE_SIZE,
  3402. PCI_DMA_FROMDEVICE);
  3403. }
  3404. /* Refresh the desc even if buffer_addrs didn't
  3405. * change because each write-back erases
  3406. * this info.
  3407. */
  3408. rx_desc->read.buffer_addr[j+1] =
  3409. cpu_to_le64(ps_page_dma->ps_page_dma[j]);
  3410. } else
  3411. rx_desc->read.buffer_addr[j+1] = ~0;
  3412. }
  3413. skb = dev_alloc_skb(adapter->rx_ps_bsize0 + NET_IP_ALIGN);
  3414. if(unlikely(!skb))
  3415. break;
  3416. /* Make buffer alignment 2 beyond a 16 byte boundary
  3417. * this will result in a 16 byte aligned IP header after
  3418. * the 14 byte MAC header is removed
  3419. */
  3420. skb_reserve(skb, NET_IP_ALIGN);
  3421. skb->dev = netdev;
  3422. buffer_info->skb = skb;
  3423. buffer_info->length = adapter->rx_ps_bsize0;
  3424. buffer_info->dma = pci_map_single(pdev, skb->data,
  3425. adapter->rx_ps_bsize0,
  3426. PCI_DMA_FROMDEVICE);
  3427. rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
  3428. if(unlikely((i & ~(E1000_RX_BUFFER_WRITE - 1)) == i)) {
  3429. /* Force memory writes to complete before letting h/w
  3430. * know there are new descriptors to fetch. (Only
  3431. * applicable for weak-ordered memory model archs,
  3432. * such as IA-64). */
  3433. wmb();
  3434. /* Hardware increments by 16 bytes, but packet split
  3435. * descriptors are 32 bytes...so we increment tail
  3436. * twice as much.
  3437. */
  3438. writel(i<<1, adapter->hw.hw_addr + rx_ring->rdt);
  3439. }
  3440. if(unlikely(++i == rx_ring->count)) i = 0;
  3441. buffer_info = &rx_ring->buffer_info[i];
  3442. ps_page = &rx_ring->ps_page[i];
  3443. ps_page_dma = &rx_ring->ps_page_dma[i];
  3444. }
  3445. no_buffers:
  3446. rx_ring->next_to_use = i;
  3447. }
  3448. /**
  3449. * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
  3450. * @adapter:
  3451. **/
  3452. static void
  3453. e1000_smartspeed(struct e1000_adapter *adapter)
  3454. {
  3455. uint16_t phy_status;
  3456. uint16_t phy_ctrl;
  3457. if((adapter->hw.phy_type != e1000_phy_igp) || !adapter->hw.autoneg ||
  3458. !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL))
  3459. return;
  3460. if(adapter->smartspeed == 0) {
  3461. /* If Master/Slave config fault is asserted twice,
  3462. * we assume back-to-back */
  3463. e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
  3464. if(!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
  3465. e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
  3466. if(!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
  3467. e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
  3468. if(phy_ctrl & CR_1000T_MS_ENABLE) {
  3469. phy_ctrl &= ~CR_1000T_MS_ENABLE;
  3470. e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL,
  3471. phy_ctrl);
  3472. adapter->smartspeed++;
  3473. if(!e1000_phy_setup_autoneg(&adapter->hw) &&
  3474. !e1000_read_phy_reg(&adapter->hw, PHY_CTRL,
  3475. &phy_ctrl)) {
  3476. phy_ctrl |= (MII_CR_AUTO_NEG_EN |
  3477. MII_CR_RESTART_AUTO_NEG);
  3478. e1000_write_phy_reg(&adapter->hw, PHY_CTRL,
  3479. phy_ctrl);
  3480. }
  3481. }
  3482. return;
  3483. } else if(adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
  3484. /* If still no link, perhaps using 2/3 pair cable */
  3485. e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
  3486. phy_ctrl |= CR_1000T_MS_ENABLE;
  3487. e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_ctrl);
  3488. if(!e1000_phy_setup_autoneg(&adapter->hw) &&
  3489. !e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_ctrl)) {
  3490. phy_ctrl |= (MII_CR_AUTO_NEG_EN |
  3491. MII_CR_RESTART_AUTO_NEG);
  3492. e1000_write_phy_reg(&adapter->hw, PHY_CTRL, phy_ctrl);
  3493. }
  3494. }
  3495. /* Restart process after E1000_SMARTSPEED_MAX iterations */
  3496. if(adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
  3497. adapter->smartspeed = 0;
  3498. }
  3499. /**
  3500. * e1000_ioctl -
  3501. * @netdev:
  3502. * @ifreq:
  3503. * @cmd:
  3504. **/
  3505. static int
  3506. e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
  3507. {
  3508. switch (cmd) {
  3509. case SIOCGMIIPHY:
  3510. case SIOCGMIIREG:
  3511. case SIOCSMIIREG:
  3512. return e1000_mii_ioctl(netdev, ifr, cmd);
  3513. default:
  3514. return -EOPNOTSUPP;
  3515. }
  3516. }
  3517. /**
  3518. * e1000_mii_ioctl -
  3519. * @netdev:
  3520. * @ifreq:
  3521. * @cmd:
  3522. **/
  3523. static int
  3524. e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
  3525. {
  3526. struct e1000_adapter *adapter = netdev_priv(netdev);
  3527. struct mii_ioctl_data *data = if_mii(ifr);
  3528. int retval;
  3529. uint16_t mii_reg;
  3530. uint16_t spddplx;
  3531. unsigned long flags;
  3532. if(adapter->hw.media_type != e1000_media_type_copper)
  3533. return -EOPNOTSUPP;
  3534. switch (cmd) {
  3535. case SIOCGMIIPHY:
  3536. data->phy_id = adapter->hw.phy_addr;
  3537. break;
  3538. case SIOCGMIIREG:
  3539. if(!capable(CAP_NET_ADMIN))
  3540. return -EPERM;
  3541. spin_lock_irqsave(&adapter->stats_lock, flags);
  3542. if(e1000_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
  3543. &data->val_out)) {
  3544. spin_unlock_irqrestore(&adapter->stats_lock, flags);
  3545. return -EIO;
  3546. }
  3547. spin_unlock_irqrestore(&adapter->stats_lock, flags);
  3548. break;
  3549. case SIOCSMIIREG:
  3550. if(!capable(CAP_NET_ADMIN))
  3551. return -EPERM;
  3552. if(data->reg_num & ~(0x1F))
  3553. return -EFAULT;
  3554. mii_reg = data->val_in;
  3555. spin_lock_irqsave(&adapter->stats_lock, flags);
  3556. if(e1000_write_phy_reg(&adapter->hw, data->reg_num,
  3557. mii_reg)) {
  3558. spin_unlock_irqrestore(&adapter->stats_lock, flags);
  3559. return -EIO;
  3560. }
  3561. if(adapter->hw.phy_type == e1000_phy_m88) {
  3562. switch (data->reg_num) {
  3563. case PHY_CTRL:
  3564. if(mii_reg & MII_CR_POWER_DOWN)
  3565. break;
  3566. if(mii_reg & MII_CR_AUTO_NEG_EN) {
  3567. adapter->hw.autoneg = 1;
  3568. adapter->hw.autoneg_advertised = 0x2F;
  3569. } else {
  3570. if (mii_reg & 0x40)
  3571. spddplx = SPEED_1000;
  3572. else if (mii_reg & 0x2000)
  3573. spddplx = SPEED_100;
  3574. else
  3575. spddplx = SPEED_10;
  3576. spddplx += (mii_reg & 0x100)
  3577. ? FULL_DUPLEX :
  3578. HALF_DUPLEX;
  3579. retval = e1000_set_spd_dplx(adapter,
  3580. spddplx);
  3581. if(retval) {
  3582. spin_unlock_irqrestore(
  3583. &adapter->stats_lock,
  3584. flags);
  3585. return retval;
  3586. }
  3587. }
  3588. if(netif_running(adapter->netdev)) {
  3589. e1000_down(adapter);
  3590. e1000_up(adapter);
  3591. } else
  3592. e1000_reset(adapter);
  3593. break;
  3594. case M88E1000_PHY_SPEC_CTRL:
  3595. case M88E1000_EXT_PHY_SPEC_CTRL:
  3596. if(e1000_phy_reset(&adapter->hw)) {
  3597. spin_unlock_irqrestore(
  3598. &adapter->stats_lock, flags);
  3599. return -EIO;
  3600. }
  3601. break;
  3602. }
  3603. } else {
  3604. switch (data->reg_num) {
  3605. case PHY_CTRL:
  3606. if(mii_reg & MII_CR_POWER_DOWN)
  3607. break;
  3608. if(netif_running(adapter->netdev)) {
  3609. e1000_down(adapter);
  3610. e1000_up(adapter);
  3611. } else
  3612. e1000_reset(adapter);
  3613. break;
  3614. }
  3615. }
  3616. spin_unlock_irqrestore(&adapter->stats_lock, flags);
  3617. break;
  3618. default:
  3619. return -EOPNOTSUPP;
  3620. }
  3621. return E1000_SUCCESS;
  3622. }
  3623. void
  3624. e1000_pci_set_mwi(struct e1000_hw *hw)
  3625. {
  3626. struct e1000_adapter *adapter = hw->back;
  3627. int ret_val = pci_set_mwi(adapter->pdev);
  3628. if(ret_val)
  3629. DPRINTK(PROBE, ERR, "Error in setting MWI\n");
  3630. }
  3631. void
  3632. e1000_pci_clear_mwi(struct e1000_hw *hw)
  3633. {
  3634. struct e1000_adapter *adapter = hw->back;
  3635. pci_clear_mwi(adapter->pdev);
  3636. }
  3637. void
  3638. e1000_read_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
  3639. {
  3640. struct e1000_adapter *adapter = hw->back;
  3641. pci_read_config_word(adapter->pdev, reg, value);
  3642. }
  3643. void
  3644. e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
  3645. {
  3646. struct e1000_adapter *adapter = hw->back;
  3647. pci_write_config_word(adapter->pdev, reg, *value);
  3648. }
  3649. uint32_t
  3650. e1000_io_read(struct e1000_hw *hw, unsigned long port)
  3651. {
  3652. return inl(port);
  3653. }
  3654. void
  3655. e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value)
  3656. {
  3657. outl(value, port);
  3658. }
  3659. static void
  3660. e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
  3661. {
  3662. struct e1000_adapter *adapter = netdev_priv(netdev);
  3663. uint32_t ctrl, rctl;
  3664. e1000_irq_disable(adapter);
  3665. adapter->vlgrp = grp;
  3666. if(grp) {
  3667. /* enable VLAN tag insert/strip */
  3668. ctrl = E1000_READ_REG(&adapter->hw, CTRL);
  3669. ctrl |= E1000_CTRL_VME;
  3670. E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
  3671. /* enable VLAN receive filtering */
  3672. rctl = E1000_READ_REG(&adapter->hw, RCTL);
  3673. rctl |= E1000_RCTL_VFE;
  3674. rctl &= ~E1000_RCTL_CFIEN;
  3675. E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
  3676. e1000_update_mng_vlan(adapter);
  3677. } else {
  3678. /* disable VLAN tag insert/strip */
  3679. ctrl = E1000_READ_REG(&adapter->hw, CTRL);
  3680. ctrl &= ~E1000_CTRL_VME;
  3681. E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
  3682. /* disable VLAN filtering */
  3683. rctl = E1000_READ_REG(&adapter->hw, RCTL);
  3684. rctl &= ~E1000_RCTL_VFE;
  3685. E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
  3686. if(adapter->mng_vlan_id != (uint16_t)E1000_MNG_VLAN_NONE) {
  3687. e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
  3688. adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
  3689. }
  3690. }
  3691. e1000_irq_enable(adapter);
  3692. }
  3693. static void
  3694. e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid)
  3695. {
  3696. struct e1000_adapter *adapter = netdev_priv(netdev);
  3697. uint32_t vfta, index;
  3698. if((adapter->hw.mng_cookie.status &
  3699. E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
  3700. (vid == adapter->mng_vlan_id))
  3701. return;
  3702. /* add VID to filter table */
  3703. index = (vid >> 5) & 0x7F;
  3704. vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index);
  3705. vfta |= (1 << (vid & 0x1F));
  3706. e1000_write_vfta(&adapter->hw, index, vfta);
  3707. }
  3708. static void
  3709. e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
  3710. {
  3711. struct e1000_adapter *adapter = netdev_priv(netdev);
  3712. uint32_t vfta, index;
  3713. e1000_irq_disable(adapter);
  3714. if(adapter->vlgrp)
  3715. adapter->vlgrp->vlan_devices[vid] = NULL;
  3716. e1000_irq_enable(adapter);
  3717. if((adapter->hw.mng_cookie.status &
  3718. E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
  3719. (vid == adapter->mng_vlan_id)) {
  3720. /* release control to f/w */
  3721. e1000_release_hw_control(adapter);
  3722. return;
  3723. }
  3724. /* remove VID from filter table */
  3725. index = (vid >> 5) & 0x7F;
  3726. vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index);
  3727. vfta &= ~(1 << (vid & 0x1F));
  3728. e1000_write_vfta(&adapter->hw, index, vfta);
  3729. }
  3730. static void
  3731. e1000_restore_vlan(struct e1000_adapter *adapter)
  3732. {
  3733. e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp);
  3734. if(adapter->vlgrp) {
  3735. uint16_t vid;
  3736. for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
  3737. if(!adapter->vlgrp->vlan_devices[vid])
  3738. continue;
  3739. e1000_vlan_rx_add_vid(adapter->netdev, vid);
  3740. }
  3741. }
  3742. }
  3743. int
  3744. e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx)
  3745. {
  3746. adapter->hw.autoneg = 0;
  3747. /* Fiber NICs only allow 1000 gbps Full duplex */
  3748. if((adapter->hw.media_type == e1000_media_type_fiber) &&
  3749. spddplx != (SPEED_1000 + DUPLEX_FULL)) {
  3750. DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
  3751. return -EINVAL;
  3752. }
  3753. switch(spddplx) {
  3754. case SPEED_10 + DUPLEX_HALF:
  3755. adapter->hw.forced_speed_duplex = e1000_10_half;
  3756. break;
  3757. case SPEED_10 + DUPLEX_FULL:
  3758. adapter->hw.forced_speed_duplex = e1000_10_full;
  3759. break;
  3760. case SPEED_100 + DUPLEX_HALF:
  3761. adapter->hw.forced_speed_duplex = e1000_100_half;
  3762. break;
  3763. case SPEED_100 + DUPLEX_FULL:
  3764. adapter->hw.forced_speed_duplex = e1000_100_full;
  3765. break;
  3766. case SPEED_1000 + DUPLEX_FULL:
  3767. adapter->hw.autoneg = 1;
  3768. adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
  3769. break;
  3770. case SPEED_1000 + DUPLEX_HALF: /* not supported */
  3771. default:
  3772. DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
  3773. return -EINVAL;
  3774. }
  3775. return 0;
  3776. }
  3777. #ifdef CONFIG_PM
  3778. static int
  3779. e1000_suspend(struct pci_dev *pdev, pm_message_t state)
  3780. {
  3781. struct net_device *netdev = pci_get_drvdata(pdev);
  3782. struct e1000_adapter *adapter = netdev_priv(netdev);
  3783. uint32_t ctrl, ctrl_ext, rctl, manc, status;
  3784. uint32_t wufc = adapter->wol;
  3785. int retval = 0;
  3786. netif_device_detach(netdev);
  3787. if(netif_running(netdev))
  3788. e1000_down(adapter);
  3789. status = E1000_READ_REG(&adapter->hw, STATUS);
  3790. if(status & E1000_STATUS_LU)
  3791. wufc &= ~E1000_WUFC_LNKC;
  3792. if(wufc) {
  3793. e1000_setup_rctl(adapter);
  3794. e1000_set_multi(netdev);
  3795. /* turn on all-multi mode if wake on multicast is enabled */
  3796. if(adapter->wol & E1000_WUFC_MC) {
  3797. rctl = E1000_READ_REG(&adapter->hw, RCTL);
  3798. rctl |= E1000_RCTL_MPE;
  3799. E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
  3800. }
  3801. if(adapter->hw.mac_type >= e1000_82540) {
  3802. ctrl = E1000_READ_REG(&adapter->hw, CTRL);
  3803. /* advertise wake from D3Cold */
  3804. #define E1000_CTRL_ADVD3WUC 0x00100000
  3805. /* phy power management enable */
  3806. #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
  3807. ctrl |= E1000_CTRL_ADVD3WUC |
  3808. E1000_CTRL_EN_PHY_PWR_MGMT;
  3809. E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
  3810. }
  3811. if(adapter->hw.media_type == e1000_media_type_fiber ||
  3812. adapter->hw.media_type == e1000_media_type_internal_serdes) {
  3813. /* keep the laser running in D3 */
  3814. ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
  3815. ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
  3816. E1000_WRITE_REG(&adapter->hw, CTRL_EXT, ctrl_ext);
  3817. }
  3818. /* Allow time for pending master requests to run */
  3819. e1000_disable_pciex_master(&adapter->hw);
  3820. E1000_WRITE_REG(&adapter->hw, WUC, E1000_WUC_PME_EN);
  3821. E1000_WRITE_REG(&adapter->hw, WUFC, wufc);
  3822. retval = pci_enable_wake(pdev, PCI_D3hot, 1);
  3823. if (retval)
  3824. DPRINTK(PROBE, ERR, "Error enabling D3 wake\n");
  3825. retval = pci_enable_wake(pdev, PCI_D3cold, 1);
  3826. if (retval)
  3827. DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n");
  3828. } else {
  3829. E1000_WRITE_REG(&adapter->hw, WUC, 0);
  3830. E1000_WRITE_REG(&adapter->hw, WUFC, 0);
  3831. retval = pci_enable_wake(pdev, PCI_D3hot, 0);
  3832. if (retval)
  3833. DPRINTK(PROBE, ERR, "Error enabling D3 wake\n");
  3834. retval = pci_enable_wake(pdev, PCI_D3cold, 0); /* 4 == D3 cold */
  3835. if (retval)
  3836. DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n");
  3837. }
  3838. pci_save_state(pdev);
  3839. if(adapter->hw.mac_type >= e1000_82540 &&
  3840. adapter->hw.media_type == e1000_media_type_copper) {
  3841. manc = E1000_READ_REG(&adapter->hw, MANC);
  3842. if(manc & E1000_MANC_SMBUS_EN) {
  3843. manc |= E1000_MANC_ARP_EN;
  3844. E1000_WRITE_REG(&adapter->hw, MANC, manc);
  3845. retval = pci_enable_wake(pdev, PCI_D3hot, 1);
  3846. if (retval)
  3847. DPRINTK(PROBE, ERR, "Error enabling D3 wake\n");
  3848. retval = pci_enable_wake(pdev, PCI_D3cold, 1);
  3849. if (retval)
  3850. DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n");
  3851. }
  3852. }
  3853. /* Release control of h/w to f/w. If f/w is AMT enabled, this
  3854. * would have already happened in close and is redundant. */
  3855. e1000_release_hw_control(adapter);
  3856. pci_disable_device(pdev);
  3857. retval = pci_set_power_state(pdev, pci_choose_state(pdev, state));
  3858. if (retval)
  3859. DPRINTK(PROBE, ERR, "Error in setting power state\n");
  3860. return 0;
  3861. }
  3862. static int
  3863. e1000_resume(struct pci_dev *pdev)
  3864. {
  3865. struct net_device *netdev = pci_get_drvdata(pdev);
  3866. struct e1000_adapter *adapter = netdev_priv(netdev);
  3867. int retval;
  3868. uint32_t manc, ret_val;
  3869. retval = pci_set_power_state(pdev, PCI_D0);
  3870. if (retval)
  3871. DPRINTK(PROBE, ERR, "Error in setting power state\n");
  3872. ret_val = pci_enable_device(pdev);
  3873. pci_set_master(pdev);
  3874. retval = pci_enable_wake(pdev, PCI_D3hot, 0);
  3875. if (retval)
  3876. DPRINTK(PROBE, ERR, "Error enabling D3 wake\n");
  3877. retval = pci_enable_wake(pdev, PCI_D3cold, 0);
  3878. if (retval)
  3879. DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n");
  3880. e1000_reset(adapter);
  3881. E1000_WRITE_REG(&adapter->hw, WUS, ~0);
  3882. if(netif_running(netdev))
  3883. e1000_up(adapter);
  3884. netif_device_attach(netdev);
  3885. if(adapter->hw.mac_type >= e1000_82540 &&
  3886. adapter->hw.media_type == e1000_media_type_copper) {
  3887. manc = E1000_READ_REG(&adapter->hw, MANC);
  3888. manc &= ~(E1000_MANC_ARP_EN);
  3889. E1000_WRITE_REG(&adapter->hw, MANC, manc);
  3890. }
  3891. /* If the controller is 82573 and f/w is AMT, do not set
  3892. * DRV_LOAD until the interface is up. For all other cases,
  3893. * let the f/w know that the h/w is now under the control
  3894. * of the driver. */
  3895. if (adapter->hw.mac_type != e1000_82573 ||
  3896. !e1000_check_mng_mode(&adapter->hw))
  3897. e1000_get_hw_control(adapter);
  3898. return 0;
  3899. }
  3900. #endif
  3901. #ifdef CONFIG_NET_POLL_CONTROLLER
  3902. /*
  3903. * Polling 'interrupt' - used by things like netconsole to send skbs
  3904. * without having to re-enable interrupts. It's not called while
  3905. * the interrupt routine is executing.
  3906. */
  3907. static void
  3908. e1000_netpoll(struct net_device *netdev)
  3909. {
  3910. struct e1000_adapter *adapter = netdev_priv(netdev);
  3911. disable_irq(adapter->pdev->irq);
  3912. e1000_intr(adapter->pdev->irq, netdev, NULL);
  3913. e1000_clean_tx_irq(adapter, adapter->tx_ring);
  3914. #ifndef CONFIG_E1000_NAPI
  3915. adapter->clean_rx(adapter, adapter->rx_ring);
  3916. #endif
  3917. enable_irq(adapter->pdev->irq);
  3918. }
  3919. #endif
  3920. /* e1000_main.c */