qlge_main.c 133 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960
  1. /*
  2. * QLogic qlge NIC HBA Driver
  3. * Copyright (c) 2003-2008 QLogic Corporation
  4. * See LICENSE.qlge for copyright and licensing details.
  5. * Author: Linux qlge network device driver by
  6. * Ron Mercer <ron.mercer@qlogic.com>
  7. */
  8. #include <linux/kernel.h>
  9. #include <linux/init.h>
  10. #include <linux/bitops.h>
  11. #include <linux/types.h>
  12. #include <linux/module.h>
  13. #include <linux/list.h>
  14. #include <linux/pci.h>
  15. #include <linux/dma-mapping.h>
  16. #include <linux/pagemap.h>
  17. #include <linux/sched.h>
  18. #include <linux/slab.h>
  19. #include <linux/dmapool.h>
  20. #include <linux/mempool.h>
  21. #include <linux/spinlock.h>
  22. #include <linux/kthread.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/errno.h>
  25. #include <linux/ioport.h>
  26. #include <linux/in.h>
  27. #include <linux/ip.h>
  28. #include <linux/ipv6.h>
  29. #include <net/ipv6.h>
  30. #include <linux/tcp.h>
  31. #include <linux/udp.h>
  32. #include <linux/if_arp.h>
  33. #include <linux/if_ether.h>
  34. #include <linux/netdevice.h>
  35. #include <linux/etherdevice.h>
  36. #include <linux/ethtool.h>
  37. #include <linux/if_vlan.h>
  38. #include <linux/skbuff.h>
  39. #include <linux/delay.h>
  40. #include <linux/mm.h>
  41. #include <linux/vmalloc.h>
  42. #include <linux/prefetch.h>
  43. #include <net/ip6_checksum.h>
  44. #include "qlge.h"
  45. char qlge_driver_name[] = DRV_NAME;
  46. const char qlge_driver_version[] = DRV_VERSION;
  47. MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
  48. MODULE_DESCRIPTION(DRV_STRING " ");
  49. MODULE_LICENSE("GPL");
  50. MODULE_VERSION(DRV_VERSION);
  51. static const u32 default_msg =
  52. NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
  53. /* NETIF_MSG_TIMER | */
  54. NETIF_MSG_IFDOWN |
  55. NETIF_MSG_IFUP |
  56. NETIF_MSG_RX_ERR |
  57. NETIF_MSG_TX_ERR |
  58. /* NETIF_MSG_TX_QUEUED | */
  59. /* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
  60. /* NETIF_MSG_PKTDATA | */
  61. NETIF_MSG_HW | NETIF_MSG_WOL | 0;
  62. static int debug = -1; /* defaults above */
  63. module_param(debug, int, 0664);
  64. MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
  65. #define MSIX_IRQ 0
  66. #define MSI_IRQ 1
  67. #define LEG_IRQ 2
  68. static int qlge_irq_type = MSIX_IRQ;
  69. module_param(qlge_irq_type, int, 0664);
  70. MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
  71. static int qlge_mpi_coredump;
  72. module_param(qlge_mpi_coredump, int, 0);
  73. MODULE_PARM_DESC(qlge_mpi_coredump,
  74. "Option to enable MPI firmware dump. "
  75. "Default is OFF - Do Not allocate memory. ");
  76. static int qlge_force_coredump;
  77. module_param(qlge_force_coredump, int, 0);
  78. MODULE_PARM_DESC(qlge_force_coredump,
  79. "Option to allow force of firmware core dump. "
  80. "Default is OFF - Do not allow.");
  81. static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
  82. {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
  83. {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
  84. /* required last entry */
  85. {0,}
  86. };
  87. MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
  88. static int ql_wol(struct ql_adapter *qdev);
  89. static void qlge_set_multicast_list(struct net_device *ndev);
  90. /* This hardware semaphore causes exclusive access to
  91. * resources shared between the NIC driver, MPI firmware,
  92. * FCOE firmware and the FC driver.
  93. */
  94. static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
  95. {
  96. u32 sem_bits = 0;
  97. switch (sem_mask) {
  98. case SEM_XGMAC0_MASK:
  99. sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
  100. break;
  101. case SEM_XGMAC1_MASK:
  102. sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
  103. break;
  104. case SEM_ICB_MASK:
  105. sem_bits = SEM_SET << SEM_ICB_SHIFT;
  106. break;
  107. case SEM_MAC_ADDR_MASK:
  108. sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
  109. break;
  110. case SEM_FLASH_MASK:
  111. sem_bits = SEM_SET << SEM_FLASH_SHIFT;
  112. break;
  113. case SEM_PROBE_MASK:
  114. sem_bits = SEM_SET << SEM_PROBE_SHIFT;
  115. break;
  116. case SEM_RT_IDX_MASK:
  117. sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
  118. break;
  119. case SEM_PROC_REG_MASK:
  120. sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
  121. break;
  122. default:
  123. netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
  124. return -EINVAL;
  125. }
  126. ql_write32(qdev, SEM, sem_bits | sem_mask);
  127. return !(ql_read32(qdev, SEM) & sem_bits);
  128. }
  129. int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
  130. {
  131. unsigned int wait_count = 30;
  132. do {
  133. if (!ql_sem_trylock(qdev, sem_mask))
  134. return 0;
  135. udelay(100);
  136. } while (--wait_count);
  137. return -ETIMEDOUT;
  138. }
  139. void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
  140. {
  141. ql_write32(qdev, SEM, sem_mask);
  142. ql_read32(qdev, SEM); /* flush */
  143. }
  144. /* This function waits for a specific bit to come ready
  145. * in a given register. It is used mostly by the initialize
  146. * process, but is also used in kernel thread API such as
  147. * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
  148. */
  149. int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
  150. {
  151. u32 temp;
  152. int count = UDELAY_COUNT;
  153. while (count) {
  154. temp = ql_read32(qdev, reg);
  155. /* check for errors */
  156. if (temp & err_bit) {
  157. netif_alert(qdev, probe, qdev->ndev,
  158. "register 0x%.08x access error, value = 0x%.08x!.\n",
  159. reg, temp);
  160. return -EIO;
  161. } else if (temp & bit)
  162. return 0;
  163. udelay(UDELAY_DELAY);
  164. count--;
  165. }
  166. netif_alert(qdev, probe, qdev->ndev,
  167. "Timed out waiting for reg %x to come ready.\n", reg);
  168. return -ETIMEDOUT;
  169. }
  170. /* The CFG register is used to download TX and RX control blocks
  171. * to the chip. This function waits for an operation to complete.
  172. */
  173. static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
  174. {
  175. int count = UDELAY_COUNT;
  176. u32 temp;
  177. while (count) {
  178. temp = ql_read32(qdev, CFG);
  179. if (temp & CFG_LE)
  180. return -EIO;
  181. if (!(temp & bit))
  182. return 0;
  183. udelay(UDELAY_DELAY);
  184. count--;
  185. }
  186. return -ETIMEDOUT;
  187. }
  188. /* Used to issue init control blocks to hw. Maps control block,
  189. * sets address, triggers download, waits for completion.
  190. */
  191. int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
  192. u16 q_id)
  193. {
  194. u64 map;
  195. int status = 0;
  196. int direction;
  197. u32 mask;
  198. u32 value;
  199. direction =
  200. (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
  201. PCI_DMA_FROMDEVICE;
  202. map = pci_map_single(qdev->pdev, ptr, size, direction);
  203. if (pci_dma_mapping_error(qdev->pdev, map)) {
  204. netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
  205. return -ENOMEM;
  206. }
  207. status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
  208. if (status)
  209. return status;
  210. status = ql_wait_cfg(qdev, bit);
  211. if (status) {
  212. netif_err(qdev, ifup, qdev->ndev,
  213. "Timed out waiting for CFG to come ready.\n");
  214. goto exit;
  215. }
  216. ql_write32(qdev, ICB_L, (u32) map);
  217. ql_write32(qdev, ICB_H, (u32) (map >> 32));
  218. mask = CFG_Q_MASK | (bit << 16);
  219. value = bit | (q_id << CFG_Q_SHIFT);
  220. ql_write32(qdev, CFG, (mask | value));
  221. /*
  222. * Wait for the bit to clear after signaling hw.
  223. */
  224. status = ql_wait_cfg(qdev, bit);
  225. exit:
  226. ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
  227. pci_unmap_single(qdev->pdev, map, size, direction);
  228. return status;
  229. }
  230. /* Get a specific MAC address from the CAM. Used for debug and reg dump. */
  231. int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
  232. u32 *value)
  233. {
  234. u32 offset = 0;
  235. int status;
  236. switch (type) {
  237. case MAC_ADDR_TYPE_MULTI_MAC:
  238. case MAC_ADDR_TYPE_CAM_MAC:
  239. {
  240. status =
  241. ql_wait_reg_rdy(qdev,
  242. MAC_ADDR_IDX, MAC_ADDR_MW, 0);
  243. if (status)
  244. goto exit;
  245. ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
  246. (index << MAC_ADDR_IDX_SHIFT) | /* index */
  247. MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
  248. status =
  249. ql_wait_reg_rdy(qdev,
  250. MAC_ADDR_IDX, MAC_ADDR_MR, 0);
  251. if (status)
  252. goto exit;
  253. *value++ = ql_read32(qdev, MAC_ADDR_DATA);
  254. status =
  255. ql_wait_reg_rdy(qdev,
  256. MAC_ADDR_IDX, MAC_ADDR_MW, 0);
  257. if (status)
  258. goto exit;
  259. ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
  260. (index << MAC_ADDR_IDX_SHIFT) | /* index */
  261. MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
  262. status =
  263. ql_wait_reg_rdy(qdev,
  264. MAC_ADDR_IDX, MAC_ADDR_MR, 0);
  265. if (status)
  266. goto exit;
  267. *value++ = ql_read32(qdev, MAC_ADDR_DATA);
  268. if (type == MAC_ADDR_TYPE_CAM_MAC) {
  269. status =
  270. ql_wait_reg_rdy(qdev,
  271. MAC_ADDR_IDX, MAC_ADDR_MW, 0);
  272. if (status)
  273. goto exit;
  274. ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
  275. (index << MAC_ADDR_IDX_SHIFT) | /* index */
  276. MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
  277. status =
  278. ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
  279. MAC_ADDR_MR, 0);
  280. if (status)
  281. goto exit;
  282. *value++ = ql_read32(qdev, MAC_ADDR_DATA);
  283. }
  284. break;
  285. }
  286. case MAC_ADDR_TYPE_VLAN:
  287. case MAC_ADDR_TYPE_MULTI_FLTR:
  288. default:
  289. netif_crit(qdev, ifup, qdev->ndev,
  290. "Address type %d not yet supported.\n", type);
  291. status = -EPERM;
  292. }
  293. exit:
  294. return status;
  295. }
  296. /* Set up a MAC, multicast or VLAN address for the
  297. * inbound frame matching.
  298. */
  299. static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
  300. u16 index)
  301. {
  302. u32 offset = 0;
  303. int status = 0;
  304. switch (type) {
  305. case MAC_ADDR_TYPE_MULTI_MAC:
  306. {
  307. u32 upper = (addr[0] << 8) | addr[1];
  308. u32 lower = (addr[2] << 24) | (addr[3] << 16) |
  309. (addr[4] << 8) | (addr[5]);
  310. status =
  311. ql_wait_reg_rdy(qdev,
  312. MAC_ADDR_IDX, MAC_ADDR_MW, 0);
  313. if (status)
  314. goto exit;
  315. ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
  316. (index << MAC_ADDR_IDX_SHIFT) |
  317. type | MAC_ADDR_E);
  318. ql_write32(qdev, MAC_ADDR_DATA, lower);
  319. status =
  320. ql_wait_reg_rdy(qdev,
  321. MAC_ADDR_IDX, MAC_ADDR_MW, 0);
  322. if (status)
  323. goto exit;
  324. ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
  325. (index << MAC_ADDR_IDX_SHIFT) |
  326. type | MAC_ADDR_E);
  327. ql_write32(qdev, MAC_ADDR_DATA, upper);
  328. status =
  329. ql_wait_reg_rdy(qdev,
  330. MAC_ADDR_IDX, MAC_ADDR_MW, 0);
  331. if (status)
  332. goto exit;
  333. break;
  334. }
  335. case MAC_ADDR_TYPE_CAM_MAC:
  336. {
  337. u32 cam_output;
  338. u32 upper = (addr[0] << 8) | addr[1];
  339. u32 lower =
  340. (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
  341. (addr[5]);
  342. status =
  343. ql_wait_reg_rdy(qdev,
  344. MAC_ADDR_IDX, MAC_ADDR_MW, 0);
  345. if (status)
  346. goto exit;
  347. ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
  348. (index << MAC_ADDR_IDX_SHIFT) | /* index */
  349. type); /* type */
  350. ql_write32(qdev, MAC_ADDR_DATA, lower);
  351. status =
  352. ql_wait_reg_rdy(qdev,
  353. MAC_ADDR_IDX, MAC_ADDR_MW, 0);
  354. if (status)
  355. goto exit;
  356. ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
  357. (index << MAC_ADDR_IDX_SHIFT) | /* index */
  358. type); /* type */
  359. ql_write32(qdev, MAC_ADDR_DATA, upper);
  360. status =
  361. ql_wait_reg_rdy(qdev,
  362. MAC_ADDR_IDX, MAC_ADDR_MW, 0);
  363. if (status)
  364. goto exit;
  365. ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
  366. (index << MAC_ADDR_IDX_SHIFT) | /* index */
  367. type); /* type */
  368. /* This field should also include the queue id
  369. and possibly the function id. Right now we hardcode
  370. the route field to NIC core.
  371. */
  372. cam_output = (CAM_OUT_ROUTE_NIC |
  373. (qdev->
  374. func << CAM_OUT_FUNC_SHIFT) |
  375. (0 << CAM_OUT_CQ_ID_SHIFT));
  376. if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
  377. cam_output |= CAM_OUT_RV;
  378. /* route to NIC core */
  379. ql_write32(qdev, MAC_ADDR_DATA, cam_output);
  380. break;
  381. }
  382. case MAC_ADDR_TYPE_VLAN:
  383. {
  384. u32 enable_bit = *((u32 *) &addr[0]);
  385. /* For VLAN, the addr actually holds a bit that
  386. * either enables or disables the vlan id we are
  387. * addressing. It's either MAC_ADDR_E on or off.
  388. * That's bit-27 we're talking about.
  389. */
  390. status =
  391. ql_wait_reg_rdy(qdev,
  392. MAC_ADDR_IDX, MAC_ADDR_MW, 0);
  393. if (status)
  394. goto exit;
  395. ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
  396. (index << MAC_ADDR_IDX_SHIFT) | /* index */
  397. type | /* type */
  398. enable_bit); /* enable/disable */
  399. break;
  400. }
  401. case MAC_ADDR_TYPE_MULTI_FLTR:
  402. default:
  403. netif_crit(qdev, ifup, qdev->ndev,
  404. "Address type %d not yet supported.\n", type);
  405. status = -EPERM;
  406. }
  407. exit:
  408. return status;
  409. }
  410. /* Set or clear MAC address in hardware. We sometimes
  411. * have to clear it to prevent wrong frame routing
  412. * especially in a bonding environment.
  413. */
  414. static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
  415. {
  416. int status;
  417. char zero_mac_addr[ETH_ALEN];
  418. char *addr;
  419. if (set) {
  420. addr = &qdev->current_mac_addr[0];
  421. netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
  422. "Set Mac addr %pM\n", addr);
  423. } else {
  424. memset(zero_mac_addr, 0, ETH_ALEN);
  425. addr = &zero_mac_addr[0];
  426. netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
  427. "Clearing MAC address\n");
  428. }
  429. status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
  430. if (status)
  431. return status;
  432. status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
  433. MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
  434. ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
  435. if (status)
  436. netif_err(qdev, ifup, qdev->ndev,
  437. "Failed to init mac address.\n");
  438. return status;
  439. }
  440. void ql_link_on(struct ql_adapter *qdev)
  441. {
  442. netif_err(qdev, link, qdev->ndev, "Link is up.\n");
  443. netif_carrier_on(qdev->ndev);
  444. ql_set_mac_addr(qdev, 1);
  445. }
  446. void ql_link_off(struct ql_adapter *qdev)
  447. {
  448. netif_err(qdev, link, qdev->ndev, "Link is down.\n");
  449. netif_carrier_off(qdev->ndev);
  450. ql_set_mac_addr(qdev, 0);
  451. }
  452. /* Get a specific frame routing value from the CAM.
  453. * Used for debug and reg dump.
  454. */
  455. int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
  456. {
  457. int status = 0;
  458. status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
  459. if (status)
  460. goto exit;
  461. ql_write32(qdev, RT_IDX,
  462. RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
  463. status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
  464. if (status)
  465. goto exit;
  466. *value = ql_read32(qdev, RT_DATA);
  467. exit:
  468. return status;
  469. }
  470. /* The NIC function for this chip has 16 routing indexes. Each one can be used
  471. * to route different frame types to various inbound queues. We send broadcast/
  472. * multicast/error frames to the default queue for slow handling,
  473. * and CAM hit/RSS frames to the fast handling queues.
  474. */
  475. static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
  476. int enable)
  477. {
  478. int status = -EINVAL; /* Return error if no mask match. */
  479. u32 value = 0;
  480. switch (mask) {
  481. case RT_IDX_CAM_HIT:
  482. {
  483. value = RT_IDX_DST_CAM_Q | /* dest */
  484. RT_IDX_TYPE_NICQ | /* type */
  485. (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
  486. break;
  487. }
  488. case RT_IDX_VALID: /* Promiscuous Mode frames. */
  489. {
  490. value = RT_IDX_DST_DFLT_Q | /* dest */
  491. RT_IDX_TYPE_NICQ | /* type */
  492. (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
  493. break;
  494. }
  495. case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
  496. {
  497. value = RT_IDX_DST_DFLT_Q | /* dest */
  498. RT_IDX_TYPE_NICQ | /* type */
  499. (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
  500. break;
  501. }
  502. case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
  503. {
  504. value = RT_IDX_DST_DFLT_Q | /* dest */
  505. RT_IDX_TYPE_NICQ | /* type */
  506. (RT_IDX_IP_CSUM_ERR_SLOT <<
  507. RT_IDX_IDX_SHIFT); /* index */
  508. break;
  509. }
  510. case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
  511. {
  512. value = RT_IDX_DST_DFLT_Q | /* dest */
  513. RT_IDX_TYPE_NICQ | /* type */
  514. (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
  515. RT_IDX_IDX_SHIFT); /* index */
  516. break;
  517. }
  518. case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
  519. {
  520. value = RT_IDX_DST_DFLT_Q | /* dest */
  521. RT_IDX_TYPE_NICQ | /* type */
  522. (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
  523. break;
  524. }
  525. case RT_IDX_MCAST: /* Pass up All Multicast frames. */
  526. {
  527. value = RT_IDX_DST_DFLT_Q | /* dest */
  528. RT_IDX_TYPE_NICQ | /* type */
  529. (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
  530. break;
  531. }
  532. case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
  533. {
  534. value = RT_IDX_DST_DFLT_Q | /* dest */
  535. RT_IDX_TYPE_NICQ | /* type */
  536. (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
  537. break;
  538. }
  539. case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
  540. {
  541. value = RT_IDX_DST_RSS | /* dest */
  542. RT_IDX_TYPE_NICQ | /* type */
  543. (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
  544. break;
  545. }
  546. case 0: /* Clear the E-bit on an entry. */
  547. {
  548. value = RT_IDX_DST_DFLT_Q | /* dest */
  549. RT_IDX_TYPE_NICQ | /* type */
  550. (index << RT_IDX_IDX_SHIFT);/* index */
  551. break;
  552. }
  553. default:
  554. netif_err(qdev, ifup, qdev->ndev,
  555. "Mask type %d not yet supported.\n", mask);
  556. status = -EPERM;
  557. goto exit;
  558. }
  559. if (value) {
  560. status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
  561. if (status)
  562. goto exit;
  563. value |= (enable ? RT_IDX_E : 0);
  564. ql_write32(qdev, RT_IDX, value);
  565. ql_write32(qdev, RT_DATA, enable ? mask : 0);
  566. }
  567. exit:
  568. return status;
  569. }
  570. static void ql_enable_interrupts(struct ql_adapter *qdev)
  571. {
  572. ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
  573. }
  574. static void ql_disable_interrupts(struct ql_adapter *qdev)
  575. {
  576. ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
  577. }
  578. /* If we're running with multiple MSI-X vectors then we enable on the fly.
  579. * Otherwise, we may have multiple outstanding workers and don't want to
  580. * enable until the last one finishes. In this case, the irq_cnt gets
  581. * incremented every time we queue a worker and decremented every time
  582. * a worker finishes. Once it hits zero we enable the interrupt.
  583. */
  584. u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
  585. {
  586. u32 var = 0;
  587. unsigned long hw_flags = 0;
  588. struct intr_context *ctx = qdev->intr_context + intr;
  589. if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
  590. /* Always enable if we're MSIX multi interrupts and
  591. * it's not the default (zeroeth) interrupt.
  592. */
  593. ql_write32(qdev, INTR_EN,
  594. ctx->intr_en_mask);
  595. var = ql_read32(qdev, STS);
  596. return var;
  597. }
  598. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  599. if (atomic_dec_and_test(&ctx->irq_cnt)) {
  600. ql_write32(qdev, INTR_EN,
  601. ctx->intr_en_mask);
  602. var = ql_read32(qdev, STS);
  603. }
  604. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  605. return var;
  606. }
  607. static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
  608. {
  609. u32 var = 0;
  610. struct intr_context *ctx;
  611. /* HW disables for us if we're MSIX multi interrupts and
  612. * it's not the default (zeroeth) interrupt.
  613. */
  614. if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
  615. return 0;
  616. ctx = qdev->intr_context + intr;
  617. spin_lock(&qdev->hw_lock);
  618. if (!atomic_read(&ctx->irq_cnt)) {
  619. ql_write32(qdev, INTR_EN,
  620. ctx->intr_dis_mask);
  621. var = ql_read32(qdev, STS);
  622. }
  623. atomic_inc(&ctx->irq_cnt);
  624. spin_unlock(&qdev->hw_lock);
  625. return var;
  626. }
  627. static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
  628. {
  629. int i;
  630. for (i = 0; i < qdev->intr_count; i++) {
  631. /* The enable call does a atomic_dec_and_test
  632. * and enables only if the result is zero.
  633. * So we precharge it here.
  634. */
  635. if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
  636. i == 0))
  637. atomic_set(&qdev->intr_context[i].irq_cnt, 1);
  638. ql_enable_completion_interrupt(qdev, i);
  639. }
  640. }
  641. static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
  642. {
  643. int status, i;
  644. u16 csum = 0;
  645. __le16 *flash = (__le16 *)&qdev->flash;
  646. status = strncmp((char *)&qdev->flash, str, 4);
  647. if (status) {
  648. netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
  649. return status;
  650. }
  651. for (i = 0; i < size; i++)
  652. csum += le16_to_cpu(*flash++);
  653. if (csum)
  654. netif_err(qdev, ifup, qdev->ndev,
  655. "Invalid flash checksum, csum = 0x%.04x.\n", csum);
  656. return csum;
  657. }
  658. static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
  659. {
  660. int status = 0;
  661. /* wait for reg to come ready */
  662. status = ql_wait_reg_rdy(qdev,
  663. FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
  664. if (status)
  665. goto exit;
  666. /* set up for reg read */
  667. ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
  668. /* wait for reg to come ready */
  669. status = ql_wait_reg_rdy(qdev,
  670. FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
  671. if (status)
  672. goto exit;
  673. /* This data is stored on flash as an array of
  674. * __le32. Since ql_read32() returns cpu endian
  675. * we need to swap it back.
  676. */
  677. *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
  678. exit:
  679. return status;
  680. }
  681. static int ql_get_8000_flash_params(struct ql_adapter *qdev)
  682. {
  683. u32 i, size;
  684. int status;
  685. __le32 *p = (__le32 *)&qdev->flash;
  686. u32 offset;
  687. u8 mac_addr[6];
  688. /* Get flash offset for function and adjust
  689. * for dword access.
  690. */
  691. if (!qdev->port)
  692. offset = FUNC0_FLASH_OFFSET / sizeof(u32);
  693. else
  694. offset = FUNC1_FLASH_OFFSET / sizeof(u32);
  695. if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
  696. return -ETIMEDOUT;
  697. size = sizeof(struct flash_params_8000) / sizeof(u32);
  698. for (i = 0; i < size; i++, p++) {
  699. status = ql_read_flash_word(qdev, i+offset, p);
  700. if (status) {
  701. netif_err(qdev, ifup, qdev->ndev,
  702. "Error reading flash.\n");
  703. goto exit;
  704. }
  705. }
  706. status = ql_validate_flash(qdev,
  707. sizeof(struct flash_params_8000) / sizeof(u16),
  708. "8000");
  709. if (status) {
  710. netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
  711. status = -EINVAL;
  712. goto exit;
  713. }
  714. /* Extract either manufacturer or BOFM modified
  715. * MAC address.
  716. */
  717. if (qdev->flash.flash_params_8000.data_type1 == 2)
  718. memcpy(mac_addr,
  719. qdev->flash.flash_params_8000.mac_addr1,
  720. qdev->ndev->addr_len);
  721. else
  722. memcpy(mac_addr,
  723. qdev->flash.flash_params_8000.mac_addr,
  724. qdev->ndev->addr_len);
  725. if (!is_valid_ether_addr(mac_addr)) {
  726. netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
  727. status = -EINVAL;
  728. goto exit;
  729. }
  730. memcpy(qdev->ndev->dev_addr,
  731. mac_addr,
  732. qdev->ndev->addr_len);
  733. exit:
  734. ql_sem_unlock(qdev, SEM_FLASH_MASK);
  735. return status;
  736. }
  737. static int ql_get_8012_flash_params(struct ql_adapter *qdev)
  738. {
  739. int i;
  740. int status;
  741. __le32 *p = (__le32 *)&qdev->flash;
  742. u32 offset = 0;
  743. u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
  744. /* Second function's parameters follow the first
  745. * function's.
  746. */
  747. if (qdev->port)
  748. offset = size;
  749. if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
  750. return -ETIMEDOUT;
  751. for (i = 0; i < size; i++, p++) {
  752. status = ql_read_flash_word(qdev, i+offset, p);
  753. if (status) {
  754. netif_err(qdev, ifup, qdev->ndev,
  755. "Error reading flash.\n");
  756. goto exit;
  757. }
  758. }
  759. status = ql_validate_flash(qdev,
  760. sizeof(struct flash_params_8012) / sizeof(u16),
  761. "8012");
  762. if (status) {
  763. netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
  764. status = -EINVAL;
  765. goto exit;
  766. }
  767. if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
  768. status = -EINVAL;
  769. goto exit;
  770. }
  771. memcpy(qdev->ndev->dev_addr,
  772. qdev->flash.flash_params_8012.mac_addr,
  773. qdev->ndev->addr_len);
  774. exit:
  775. ql_sem_unlock(qdev, SEM_FLASH_MASK);
  776. return status;
  777. }
  778. /* xgmac register are located behind the xgmac_addr and xgmac_data
  779. * register pair. Each read/write requires us to wait for the ready
  780. * bit before reading/writing the data.
  781. */
  782. static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
  783. {
  784. int status;
  785. /* wait for reg to come ready */
  786. status = ql_wait_reg_rdy(qdev,
  787. XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
  788. if (status)
  789. return status;
  790. /* write the data to the data reg */
  791. ql_write32(qdev, XGMAC_DATA, data);
  792. /* trigger the write */
  793. ql_write32(qdev, XGMAC_ADDR, reg);
  794. return status;
  795. }
  796. /* xgmac register are located behind the xgmac_addr and xgmac_data
  797. * register pair. Each read/write requires us to wait for the ready
  798. * bit before reading/writing the data.
  799. */
  800. int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
  801. {
  802. int status = 0;
  803. /* wait for reg to come ready */
  804. status = ql_wait_reg_rdy(qdev,
  805. XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
  806. if (status)
  807. goto exit;
  808. /* set up for reg read */
  809. ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
  810. /* wait for reg to come ready */
  811. status = ql_wait_reg_rdy(qdev,
  812. XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
  813. if (status)
  814. goto exit;
  815. /* get the data */
  816. *data = ql_read32(qdev, XGMAC_DATA);
  817. exit:
  818. return status;
  819. }
  820. /* This is used for reading the 64-bit statistics regs. */
  821. int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
  822. {
  823. int status = 0;
  824. u32 hi = 0;
  825. u32 lo = 0;
  826. status = ql_read_xgmac_reg(qdev, reg, &lo);
  827. if (status)
  828. goto exit;
  829. status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
  830. if (status)
  831. goto exit;
  832. *data = (u64) lo | ((u64) hi << 32);
  833. exit:
  834. return status;
  835. }
  836. static int ql_8000_port_initialize(struct ql_adapter *qdev)
  837. {
  838. int status;
  839. /*
  840. * Get MPI firmware version for driver banner
  841. * and ethool info.
  842. */
  843. status = ql_mb_about_fw(qdev);
  844. if (status)
  845. goto exit;
  846. status = ql_mb_get_fw_state(qdev);
  847. if (status)
  848. goto exit;
  849. /* Wake up a worker to get/set the TX/RX frame sizes. */
  850. queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
  851. exit:
  852. return status;
  853. }
  854. /* Take the MAC Core out of reset.
  855. * Enable statistics counting.
  856. * Take the transmitter/receiver out of reset.
  857. * This functionality may be done in the MPI firmware at a
  858. * later date.
  859. */
  860. static int ql_8012_port_initialize(struct ql_adapter *qdev)
  861. {
  862. int status = 0;
  863. u32 data;
  864. if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
  865. /* Another function has the semaphore, so
  866. * wait for the port init bit to come ready.
  867. */
  868. netif_info(qdev, link, qdev->ndev,
  869. "Another function has the semaphore, so wait for the port init bit to come ready.\n");
  870. status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
  871. if (status) {
  872. netif_crit(qdev, link, qdev->ndev,
  873. "Port initialize timed out.\n");
  874. }
  875. return status;
  876. }
  877. netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
  878. /* Set the core reset. */
  879. status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
  880. if (status)
  881. goto end;
  882. data |= GLOBAL_CFG_RESET;
  883. status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
  884. if (status)
  885. goto end;
  886. /* Clear the core reset and turn on jumbo for receiver. */
  887. data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
  888. data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
  889. data |= GLOBAL_CFG_TX_STAT_EN;
  890. data |= GLOBAL_CFG_RX_STAT_EN;
  891. status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
  892. if (status)
  893. goto end;
  894. /* Enable transmitter, and clear it's reset. */
  895. status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
  896. if (status)
  897. goto end;
  898. data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
  899. data |= TX_CFG_EN; /* Enable the transmitter. */
  900. status = ql_write_xgmac_reg(qdev, TX_CFG, data);
  901. if (status)
  902. goto end;
  903. /* Enable receiver and clear it's reset. */
  904. status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
  905. if (status)
  906. goto end;
  907. data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
  908. data |= RX_CFG_EN; /* Enable the receiver. */
  909. status = ql_write_xgmac_reg(qdev, RX_CFG, data);
  910. if (status)
  911. goto end;
  912. /* Turn on jumbo. */
  913. status =
  914. ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
  915. if (status)
  916. goto end;
  917. status =
  918. ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
  919. if (status)
  920. goto end;
  921. /* Signal to the world that the port is enabled. */
  922. ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
  923. end:
  924. ql_sem_unlock(qdev, qdev->xg_sem_mask);
  925. return status;
  926. }
  927. static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
  928. {
  929. return PAGE_SIZE << qdev->lbq_buf_order;
  930. }
  931. /* Get the next large buffer. */
  932. static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
  933. {
  934. struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
  935. rx_ring->lbq_curr_idx++;
  936. if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
  937. rx_ring->lbq_curr_idx = 0;
  938. rx_ring->lbq_free_cnt++;
  939. return lbq_desc;
  940. }
  941. static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
  942. struct rx_ring *rx_ring)
  943. {
  944. struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
  945. pci_dma_sync_single_for_cpu(qdev->pdev,
  946. dma_unmap_addr(lbq_desc, mapaddr),
  947. rx_ring->lbq_buf_size,
  948. PCI_DMA_FROMDEVICE);
  949. /* If it's the last chunk of our master page then
  950. * we unmap it.
  951. */
  952. if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
  953. == ql_lbq_block_size(qdev))
  954. pci_unmap_page(qdev->pdev,
  955. lbq_desc->p.pg_chunk.map,
  956. ql_lbq_block_size(qdev),
  957. PCI_DMA_FROMDEVICE);
  958. return lbq_desc;
  959. }
  960. /* Get the next small buffer. */
  961. static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
  962. {
  963. struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
  964. rx_ring->sbq_curr_idx++;
  965. if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
  966. rx_ring->sbq_curr_idx = 0;
  967. rx_ring->sbq_free_cnt++;
  968. return sbq_desc;
  969. }
  970. /* Update an rx ring index. */
  971. static void ql_update_cq(struct rx_ring *rx_ring)
  972. {
  973. rx_ring->cnsmr_idx++;
  974. rx_ring->curr_entry++;
  975. if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
  976. rx_ring->cnsmr_idx = 0;
  977. rx_ring->curr_entry = rx_ring->cq_base;
  978. }
  979. }
  980. static void ql_write_cq_idx(struct rx_ring *rx_ring)
  981. {
  982. ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
  983. }
  984. static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
  985. struct bq_desc *lbq_desc)
  986. {
  987. if (!rx_ring->pg_chunk.page) {
  988. u64 map;
  989. rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
  990. GFP_ATOMIC,
  991. qdev->lbq_buf_order);
  992. if (unlikely(!rx_ring->pg_chunk.page)) {
  993. netif_err(qdev, drv, qdev->ndev,
  994. "page allocation failed.\n");
  995. return -ENOMEM;
  996. }
  997. rx_ring->pg_chunk.offset = 0;
  998. map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
  999. 0, ql_lbq_block_size(qdev),
  1000. PCI_DMA_FROMDEVICE);
  1001. if (pci_dma_mapping_error(qdev->pdev, map)) {
  1002. __free_pages(rx_ring->pg_chunk.page,
  1003. qdev->lbq_buf_order);
  1004. rx_ring->pg_chunk.page = NULL;
  1005. netif_err(qdev, drv, qdev->ndev,
  1006. "PCI mapping failed.\n");
  1007. return -ENOMEM;
  1008. }
  1009. rx_ring->pg_chunk.map = map;
  1010. rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
  1011. }
  1012. /* Copy the current master pg_chunk info
  1013. * to the current descriptor.
  1014. */
  1015. lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
  1016. /* Adjust the master page chunk for next
  1017. * buffer get.
  1018. */
  1019. rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
  1020. if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
  1021. rx_ring->pg_chunk.page = NULL;
  1022. lbq_desc->p.pg_chunk.last_flag = 1;
  1023. } else {
  1024. rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
  1025. get_page(rx_ring->pg_chunk.page);
  1026. lbq_desc->p.pg_chunk.last_flag = 0;
  1027. }
  1028. return 0;
  1029. }
  1030. /* Process (refill) a large buffer queue. */
  1031. static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
  1032. {
  1033. u32 clean_idx = rx_ring->lbq_clean_idx;
  1034. u32 start_idx = clean_idx;
  1035. struct bq_desc *lbq_desc;
  1036. u64 map;
  1037. int i;
  1038. while (rx_ring->lbq_free_cnt > 32) {
  1039. for (i = (rx_ring->lbq_clean_idx % 16); i < 16; i++) {
  1040. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1041. "lbq: try cleaning clean_idx = %d.\n",
  1042. clean_idx);
  1043. lbq_desc = &rx_ring->lbq[clean_idx];
  1044. if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
  1045. rx_ring->lbq_clean_idx = clean_idx;
  1046. netif_err(qdev, ifup, qdev->ndev,
  1047. "Could not get a page chunk, i=%d, clean_idx =%d .\n",
  1048. i, clean_idx);
  1049. return;
  1050. }
  1051. map = lbq_desc->p.pg_chunk.map +
  1052. lbq_desc->p.pg_chunk.offset;
  1053. dma_unmap_addr_set(lbq_desc, mapaddr, map);
  1054. dma_unmap_len_set(lbq_desc, maplen,
  1055. rx_ring->lbq_buf_size);
  1056. *lbq_desc->addr = cpu_to_le64(map);
  1057. pci_dma_sync_single_for_device(qdev->pdev, map,
  1058. rx_ring->lbq_buf_size,
  1059. PCI_DMA_FROMDEVICE);
  1060. clean_idx++;
  1061. if (clean_idx == rx_ring->lbq_len)
  1062. clean_idx = 0;
  1063. }
  1064. rx_ring->lbq_clean_idx = clean_idx;
  1065. rx_ring->lbq_prod_idx += 16;
  1066. if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
  1067. rx_ring->lbq_prod_idx = 0;
  1068. rx_ring->lbq_free_cnt -= 16;
  1069. }
  1070. if (start_idx != clean_idx) {
  1071. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1072. "lbq: updating prod idx = %d.\n",
  1073. rx_ring->lbq_prod_idx);
  1074. ql_write_db_reg(rx_ring->lbq_prod_idx,
  1075. rx_ring->lbq_prod_idx_db_reg);
  1076. }
  1077. }
  1078. /* Process (refill) a small buffer queue. */
  1079. static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
  1080. {
  1081. u32 clean_idx = rx_ring->sbq_clean_idx;
  1082. u32 start_idx = clean_idx;
  1083. struct bq_desc *sbq_desc;
  1084. u64 map;
  1085. int i;
  1086. while (rx_ring->sbq_free_cnt > 16) {
  1087. for (i = (rx_ring->sbq_clean_idx % 16); i < 16; i++) {
  1088. sbq_desc = &rx_ring->sbq[clean_idx];
  1089. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1090. "sbq: try cleaning clean_idx = %d.\n",
  1091. clean_idx);
  1092. if (sbq_desc->p.skb == NULL) {
  1093. netif_printk(qdev, rx_status, KERN_DEBUG,
  1094. qdev->ndev,
  1095. "sbq: getting new skb for index %d.\n",
  1096. sbq_desc->index);
  1097. sbq_desc->p.skb =
  1098. netdev_alloc_skb(qdev->ndev,
  1099. SMALL_BUFFER_SIZE);
  1100. if (sbq_desc->p.skb == NULL) {
  1101. rx_ring->sbq_clean_idx = clean_idx;
  1102. return;
  1103. }
  1104. skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
  1105. map = pci_map_single(qdev->pdev,
  1106. sbq_desc->p.skb->data,
  1107. rx_ring->sbq_buf_size,
  1108. PCI_DMA_FROMDEVICE);
  1109. if (pci_dma_mapping_error(qdev->pdev, map)) {
  1110. netif_err(qdev, ifup, qdev->ndev,
  1111. "PCI mapping failed.\n");
  1112. rx_ring->sbq_clean_idx = clean_idx;
  1113. dev_kfree_skb_any(sbq_desc->p.skb);
  1114. sbq_desc->p.skb = NULL;
  1115. return;
  1116. }
  1117. dma_unmap_addr_set(sbq_desc, mapaddr, map);
  1118. dma_unmap_len_set(sbq_desc, maplen,
  1119. rx_ring->sbq_buf_size);
  1120. *sbq_desc->addr = cpu_to_le64(map);
  1121. }
  1122. clean_idx++;
  1123. if (clean_idx == rx_ring->sbq_len)
  1124. clean_idx = 0;
  1125. }
  1126. rx_ring->sbq_clean_idx = clean_idx;
  1127. rx_ring->sbq_prod_idx += 16;
  1128. if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
  1129. rx_ring->sbq_prod_idx = 0;
  1130. rx_ring->sbq_free_cnt -= 16;
  1131. }
  1132. if (start_idx != clean_idx) {
  1133. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1134. "sbq: updating prod idx = %d.\n",
  1135. rx_ring->sbq_prod_idx);
  1136. ql_write_db_reg(rx_ring->sbq_prod_idx,
  1137. rx_ring->sbq_prod_idx_db_reg);
  1138. }
  1139. }
  1140. static void ql_update_buffer_queues(struct ql_adapter *qdev,
  1141. struct rx_ring *rx_ring)
  1142. {
  1143. ql_update_sbq(qdev, rx_ring);
  1144. ql_update_lbq(qdev, rx_ring);
  1145. }
  1146. /* Unmaps tx buffers. Can be called from send() if a pci mapping
  1147. * fails at some stage, or from the interrupt when a tx completes.
  1148. */
  1149. static void ql_unmap_send(struct ql_adapter *qdev,
  1150. struct tx_ring_desc *tx_ring_desc, int mapped)
  1151. {
  1152. int i;
  1153. for (i = 0; i < mapped; i++) {
  1154. if (i == 0 || (i == 7 && mapped > 7)) {
  1155. /*
  1156. * Unmap the skb->data area, or the
  1157. * external sglist (AKA the Outbound
  1158. * Address List (OAL)).
  1159. * If its the zeroeth element, then it's
  1160. * the skb->data area. If it's the 7th
  1161. * element and there is more than 6 frags,
  1162. * then its an OAL.
  1163. */
  1164. if (i == 7) {
  1165. netif_printk(qdev, tx_done, KERN_DEBUG,
  1166. qdev->ndev,
  1167. "unmapping OAL area.\n");
  1168. }
  1169. pci_unmap_single(qdev->pdev,
  1170. dma_unmap_addr(&tx_ring_desc->map[i],
  1171. mapaddr),
  1172. dma_unmap_len(&tx_ring_desc->map[i],
  1173. maplen),
  1174. PCI_DMA_TODEVICE);
  1175. } else {
  1176. netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
  1177. "unmapping frag %d.\n", i);
  1178. pci_unmap_page(qdev->pdev,
  1179. dma_unmap_addr(&tx_ring_desc->map[i],
  1180. mapaddr),
  1181. dma_unmap_len(&tx_ring_desc->map[i],
  1182. maplen), PCI_DMA_TODEVICE);
  1183. }
  1184. }
  1185. }
  1186. /* Map the buffers for this transmit. This will return
  1187. * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
  1188. */
  1189. static int ql_map_send(struct ql_adapter *qdev,
  1190. struct ob_mac_iocb_req *mac_iocb_ptr,
  1191. struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
  1192. {
  1193. int len = skb_headlen(skb);
  1194. dma_addr_t map;
  1195. int frag_idx, err, map_idx = 0;
  1196. struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
  1197. int frag_cnt = skb_shinfo(skb)->nr_frags;
  1198. if (frag_cnt) {
  1199. netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
  1200. "frag_cnt = %d.\n", frag_cnt);
  1201. }
  1202. /*
  1203. * Map the skb buffer first.
  1204. */
  1205. map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
  1206. err = pci_dma_mapping_error(qdev->pdev, map);
  1207. if (err) {
  1208. netif_err(qdev, tx_queued, qdev->ndev,
  1209. "PCI mapping failed with error: %d\n", err);
  1210. return NETDEV_TX_BUSY;
  1211. }
  1212. tbd->len = cpu_to_le32(len);
  1213. tbd->addr = cpu_to_le64(map);
  1214. dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
  1215. dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
  1216. map_idx++;
  1217. /*
  1218. * This loop fills the remainder of the 8 address descriptors
  1219. * in the IOCB. If there are more than 7 fragments, then the
  1220. * eighth address desc will point to an external list (OAL).
  1221. * When this happens, the remainder of the frags will be stored
  1222. * in this list.
  1223. */
  1224. for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
  1225. skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
  1226. tbd++;
  1227. if (frag_idx == 6 && frag_cnt > 7) {
  1228. /* Let's tack on an sglist.
  1229. * Our control block will now
  1230. * look like this:
  1231. * iocb->seg[0] = skb->data
  1232. * iocb->seg[1] = frag[0]
  1233. * iocb->seg[2] = frag[1]
  1234. * iocb->seg[3] = frag[2]
  1235. * iocb->seg[4] = frag[3]
  1236. * iocb->seg[5] = frag[4]
  1237. * iocb->seg[6] = frag[5]
  1238. * iocb->seg[7] = ptr to OAL (external sglist)
  1239. * oal->seg[0] = frag[6]
  1240. * oal->seg[1] = frag[7]
  1241. * oal->seg[2] = frag[8]
  1242. * oal->seg[3] = frag[9]
  1243. * oal->seg[4] = frag[10]
  1244. * etc...
  1245. */
  1246. /* Tack on the OAL in the eighth segment of IOCB. */
  1247. map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
  1248. sizeof(struct oal),
  1249. PCI_DMA_TODEVICE);
  1250. err = pci_dma_mapping_error(qdev->pdev, map);
  1251. if (err) {
  1252. netif_err(qdev, tx_queued, qdev->ndev,
  1253. "PCI mapping outbound address list with error: %d\n",
  1254. err);
  1255. goto map_error;
  1256. }
  1257. tbd->addr = cpu_to_le64(map);
  1258. /*
  1259. * The length is the number of fragments
  1260. * that remain to be mapped times the length
  1261. * of our sglist (OAL).
  1262. */
  1263. tbd->len =
  1264. cpu_to_le32((sizeof(struct tx_buf_desc) *
  1265. (frag_cnt - frag_idx)) | TX_DESC_C);
  1266. dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
  1267. map);
  1268. dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
  1269. sizeof(struct oal));
  1270. tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
  1271. map_idx++;
  1272. }
  1273. map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
  1274. DMA_TO_DEVICE);
  1275. err = dma_mapping_error(&qdev->pdev->dev, map);
  1276. if (err) {
  1277. netif_err(qdev, tx_queued, qdev->ndev,
  1278. "PCI mapping frags failed with error: %d.\n",
  1279. err);
  1280. goto map_error;
  1281. }
  1282. tbd->addr = cpu_to_le64(map);
  1283. tbd->len = cpu_to_le32(skb_frag_size(frag));
  1284. dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
  1285. dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
  1286. skb_frag_size(frag));
  1287. }
  1288. /* Save the number of segments we've mapped. */
  1289. tx_ring_desc->map_cnt = map_idx;
  1290. /* Terminate the last segment. */
  1291. tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
  1292. return NETDEV_TX_OK;
  1293. map_error:
  1294. /*
  1295. * If the first frag mapping failed, then i will be zero.
  1296. * This causes the unmap of the skb->data area. Otherwise
  1297. * we pass in the number of frags that mapped successfully
  1298. * so they can be umapped.
  1299. */
  1300. ql_unmap_send(qdev, tx_ring_desc, map_idx);
  1301. return NETDEV_TX_BUSY;
  1302. }
  1303. /* Categorizing receive firmware frame errors */
  1304. static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
  1305. struct rx_ring *rx_ring)
  1306. {
  1307. struct nic_stats *stats = &qdev->nic_stats;
  1308. stats->rx_err_count++;
  1309. rx_ring->rx_errors++;
  1310. switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
  1311. case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
  1312. stats->rx_code_err++;
  1313. break;
  1314. case IB_MAC_IOCB_RSP_ERR_OVERSIZE:
  1315. stats->rx_oversize_err++;
  1316. break;
  1317. case IB_MAC_IOCB_RSP_ERR_UNDERSIZE:
  1318. stats->rx_undersize_err++;
  1319. break;
  1320. case IB_MAC_IOCB_RSP_ERR_PREAMBLE:
  1321. stats->rx_preamble_err++;
  1322. break;
  1323. case IB_MAC_IOCB_RSP_ERR_FRAME_LEN:
  1324. stats->rx_frame_len_err++;
  1325. break;
  1326. case IB_MAC_IOCB_RSP_ERR_CRC:
  1327. stats->rx_crc_err++;
  1328. default:
  1329. break;
  1330. }
  1331. }
  1332. /* Process an inbound completion from an rx ring. */
  1333. static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
  1334. struct rx_ring *rx_ring,
  1335. struct ib_mac_iocb_rsp *ib_mac_rsp,
  1336. u32 length,
  1337. u16 vlan_id)
  1338. {
  1339. struct sk_buff *skb;
  1340. struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
  1341. struct napi_struct *napi = &rx_ring->napi;
  1342. /* Frame error, so drop the packet. */
  1343. if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
  1344. ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
  1345. put_page(lbq_desc->p.pg_chunk.page);
  1346. return;
  1347. }
  1348. napi->dev = qdev->ndev;
  1349. skb = napi_get_frags(napi);
  1350. if (!skb) {
  1351. netif_err(qdev, drv, qdev->ndev,
  1352. "Couldn't get an skb, exiting.\n");
  1353. rx_ring->rx_dropped++;
  1354. put_page(lbq_desc->p.pg_chunk.page);
  1355. return;
  1356. }
  1357. prefetch(lbq_desc->p.pg_chunk.va);
  1358. __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
  1359. lbq_desc->p.pg_chunk.page,
  1360. lbq_desc->p.pg_chunk.offset,
  1361. length);
  1362. skb->len += length;
  1363. skb->data_len += length;
  1364. skb->truesize += length;
  1365. skb_shinfo(skb)->nr_frags++;
  1366. rx_ring->rx_packets++;
  1367. rx_ring->rx_bytes += length;
  1368. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1369. skb_record_rx_queue(skb, rx_ring->cq_id);
  1370. if (vlan_id != 0xffff)
  1371. __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
  1372. napi_gro_frags(napi);
  1373. }
  1374. /* Process an inbound completion from an rx ring. */
  1375. static void ql_process_mac_rx_page(struct ql_adapter *qdev,
  1376. struct rx_ring *rx_ring,
  1377. struct ib_mac_iocb_rsp *ib_mac_rsp,
  1378. u32 length,
  1379. u16 vlan_id)
  1380. {
  1381. struct net_device *ndev = qdev->ndev;
  1382. struct sk_buff *skb = NULL;
  1383. void *addr;
  1384. struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
  1385. struct napi_struct *napi = &rx_ring->napi;
  1386. skb = netdev_alloc_skb(ndev, length);
  1387. if (!skb) {
  1388. rx_ring->rx_dropped++;
  1389. put_page(lbq_desc->p.pg_chunk.page);
  1390. return;
  1391. }
  1392. addr = lbq_desc->p.pg_chunk.va;
  1393. prefetch(addr);
  1394. /* Frame error, so drop the packet. */
  1395. if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
  1396. ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
  1397. goto err_out;
  1398. }
  1399. /* The max framesize filter on this chip is set higher than
  1400. * MTU since FCoE uses 2k frames.
  1401. */
  1402. if (skb->len > ndev->mtu + ETH_HLEN) {
  1403. netif_err(qdev, drv, qdev->ndev,
  1404. "Segment too small, dropping.\n");
  1405. rx_ring->rx_dropped++;
  1406. goto err_out;
  1407. }
  1408. memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
  1409. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1410. "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
  1411. length);
  1412. skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
  1413. lbq_desc->p.pg_chunk.offset+ETH_HLEN,
  1414. length-ETH_HLEN);
  1415. skb->len += length-ETH_HLEN;
  1416. skb->data_len += length-ETH_HLEN;
  1417. skb->truesize += length-ETH_HLEN;
  1418. rx_ring->rx_packets++;
  1419. rx_ring->rx_bytes += skb->len;
  1420. skb->protocol = eth_type_trans(skb, ndev);
  1421. skb_checksum_none_assert(skb);
  1422. if ((ndev->features & NETIF_F_RXCSUM) &&
  1423. !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
  1424. /* TCP frame. */
  1425. if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
  1426. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1427. "TCP checksum done!\n");
  1428. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1429. } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
  1430. (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
  1431. /* Unfragmented ipv4 UDP frame. */
  1432. struct iphdr *iph =
  1433. (struct iphdr *) ((u8 *)addr + ETH_HLEN);
  1434. if (!(iph->frag_off &
  1435. htons(IP_MF|IP_OFFSET))) {
  1436. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1437. netif_printk(qdev, rx_status, KERN_DEBUG,
  1438. qdev->ndev,
  1439. "UDP checksum done!\n");
  1440. }
  1441. }
  1442. }
  1443. skb_record_rx_queue(skb, rx_ring->cq_id);
  1444. if (vlan_id != 0xffff)
  1445. __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
  1446. if (skb->ip_summed == CHECKSUM_UNNECESSARY)
  1447. napi_gro_receive(napi, skb);
  1448. else
  1449. netif_receive_skb(skb);
  1450. return;
  1451. err_out:
  1452. dev_kfree_skb_any(skb);
  1453. put_page(lbq_desc->p.pg_chunk.page);
  1454. }
  1455. /* Process an inbound completion from an rx ring. */
  1456. static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
  1457. struct rx_ring *rx_ring,
  1458. struct ib_mac_iocb_rsp *ib_mac_rsp,
  1459. u32 length,
  1460. u16 vlan_id)
  1461. {
  1462. struct net_device *ndev = qdev->ndev;
  1463. struct sk_buff *skb = NULL;
  1464. struct sk_buff *new_skb = NULL;
  1465. struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
  1466. skb = sbq_desc->p.skb;
  1467. /* Allocate new_skb and copy */
  1468. new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
  1469. if (new_skb == NULL) {
  1470. rx_ring->rx_dropped++;
  1471. return;
  1472. }
  1473. skb_reserve(new_skb, NET_IP_ALIGN);
  1474. memcpy(skb_put(new_skb, length), skb->data, length);
  1475. skb = new_skb;
  1476. /* Frame error, so drop the packet. */
  1477. if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
  1478. ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
  1479. dev_kfree_skb_any(skb);
  1480. return;
  1481. }
  1482. /* loopback self test for ethtool */
  1483. if (test_bit(QL_SELFTEST, &qdev->flags)) {
  1484. ql_check_lb_frame(qdev, skb);
  1485. dev_kfree_skb_any(skb);
  1486. return;
  1487. }
  1488. /* The max framesize filter on this chip is set higher than
  1489. * MTU since FCoE uses 2k frames.
  1490. */
  1491. if (skb->len > ndev->mtu + ETH_HLEN) {
  1492. dev_kfree_skb_any(skb);
  1493. rx_ring->rx_dropped++;
  1494. return;
  1495. }
  1496. prefetch(skb->data);
  1497. if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
  1498. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1499. "%s Multicast.\n",
  1500. (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
  1501. IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
  1502. (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
  1503. IB_MAC_IOCB_RSP_M_REG ? "Registered" :
  1504. (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
  1505. IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
  1506. }
  1507. if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
  1508. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1509. "Promiscuous Packet.\n");
  1510. rx_ring->rx_packets++;
  1511. rx_ring->rx_bytes += skb->len;
  1512. skb->protocol = eth_type_trans(skb, ndev);
  1513. skb_checksum_none_assert(skb);
  1514. /* If rx checksum is on, and there are no
  1515. * csum or frame errors.
  1516. */
  1517. if ((ndev->features & NETIF_F_RXCSUM) &&
  1518. !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
  1519. /* TCP frame. */
  1520. if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
  1521. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1522. "TCP checksum done!\n");
  1523. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1524. } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
  1525. (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
  1526. /* Unfragmented ipv4 UDP frame. */
  1527. struct iphdr *iph = (struct iphdr *) skb->data;
  1528. if (!(iph->frag_off &
  1529. htons(IP_MF|IP_OFFSET))) {
  1530. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1531. netif_printk(qdev, rx_status, KERN_DEBUG,
  1532. qdev->ndev,
  1533. "UDP checksum done!\n");
  1534. }
  1535. }
  1536. }
  1537. skb_record_rx_queue(skb, rx_ring->cq_id);
  1538. if (vlan_id != 0xffff)
  1539. __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
  1540. if (skb->ip_summed == CHECKSUM_UNNECESSARY)
  1541. napi_gro_receive(&rx_ring->napi, skb);
  1542. else
  1543. netif_receive_skb(skb);
  1544. }
  1545. static void ql_realign_skb(struct sk_buff *skb, int len)
  1546. {
  1547. void *temp_addr = skb->data;
  1548. /* Undo the skb_reserve(skb,32) we did before
  1549. * giving to hardware, and realign data on
  1550. * a 2-byte boundary.
  1551. */
  1552. skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
  1553. skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
  1554. skb_copy_to_linear_data(skb, temp_addr,
  1555. (unsigned int)len);
  1556. }
  1557. /*
  1558. * This function builds an skb for the given inbound
  1559. * completion. It will be rewritten for readability in the near
  1560. * future, but for not it works well.
  1561. */
  1562. static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
  1563. struct rx_ring *rx_ring,
  1564. struct ib_mac_iocb_rsp *ib_mac_rsp)
  1565. {
  1566. struct bq_desc *lbq_desc;
  1567. struct bq_desc *sbq_desc;
  1568. struct sk_buff *skb = NULL;
  1569. u32 length = le32_to_cpu(ib_mac_rsp->data_len);
  1570. u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
  1571. /*
  1572. * Handle the header buffer if present.
  1573. */
  1574. if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
  1575. ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
  1576. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1577. "Header of %d bytes in small buffer.\n", hdr_len);
  1578. /*
  1579. * Headers fit nicely into a small buffer.
  1580. */
  1581. sbq_desc = ql_get_curr_sbuf(rx_ring);
  1582. pci_unmap_single(qdev->pdev,
  1583. dma_unmap_addr(sbq_desc, mapaddr),
  1584. dma_unmap_len(sbq_desc, maplen),
  1585. PCI_DMA_FROMDEVICE);
  1586. skb = sbq_desc->p.skb;
  1587. ql_realign_skb(skb, hdr_len);
  1588. skb_put(skb, hdr_len);
  1589. sbq_desc->p.skb = NULL;
  1590. }
  1591. /*
  1592. * Handle the data buffer(s).
  1593. */
  1594. if (unlikely(!length)) { /* Is there data too? */
  1595. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1596. "No Data buffer in this packet.\n");
  1597. return skb;
  1598. }
  1599. if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
  1600. if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
  1601. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1602. "Headers in small, data of %d bytes in small, combine them.\n",
  1603. length);
  1604. /*
  1605. * Data is less than small buffer size so it's
  1606. * stuffed in a small buffer.
  1607. * For this case we append the data
  1608. * from the "data" small buffer to the "header" small
  1609. * buffer.
  1610. */
  1611. sbq_desc = ql_get_curr_sbuf(rx_ring);
  1612. pci_dma_sync_single_for_cpu(qdev->pdev,
  1613. dma_unmap_addr
  1614. (sbq_desc, mapaddr),
  1615. dma_unmap_len
  1616. (sbq_desc, maplen),
  1617. PCI_DMA_FROMDEVICE);
  1618. memcpy(skb_put(skb, length),
  1619. sbq_desc->p.skb->data, length);
  1620. pci_dma_sync_single_for_device(qdev->pdev,
  1621. dma_unmap_addr
  1622. (sbq_desc,
  1623. mapaddr),
  1624. dma_unmap_len
  1625. (sbq_desc,
  1626. maplen),
  1627. PCI_DMA_FROMDEVICE);
  1628. } else {
  1629. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1630. "%d bytes in a single small buffer.\n",
  1631. length);
  1632. sbq_desc = ql_get_curr_sbuf(rx_ring);
  1633. skb = sbq_desc->p.skb;
  1634. ql_realign_skb(skb, length);
  1635. skb_put(skb, length);
  1636. pci_unmap_single(qdev->pdev,
  1637. dma_unmap_addr(sbq_desc,
  1638. mapaddr),
  1639. dma_unmap_len(sbq_desc,
  1640. maplen),
  1641. PCI_DMA_FROMDEVICE);
  1642. sbq_desc->p.skb = NULL;
  1643. }
  1644. } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
  1645. if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
  1646. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1647. "Header in small, %d bytes in large. Chain large to small!\n",
  1648. length);
  1649. /*
  1650. * The data is in a single large buffer. We
  1651. * chain it to the header buffer's skb and let
  1652. * it rip.
  1653. */
  1654. lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
  1655. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1656. "Chaining page at offset = %d, for %d bytes to skb.\n",
  1657. lbq_desc->p.pg_chunk.offset, length);
  1658. skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
  1659. lbq_desc->p.pg_chunk.offset,
  1660. length);
  1661. skb->len += length;
  1662. skb->data_len += length;
  1663. skb->truesize += length;
  1664. } else {
  1665. /*
  1666. * The headers and data are in a single large buffer. We
  1667. * copy it to a new skb and let it go. This can happen with
  1668. * jumbo mtu on a non-TCP/UDP frame.
  1669. */
  1670. lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
  1671. skb = netdev_alloc_skb(qdev->ndev, length);
  1672. if (skb == NULL) {
  1673. netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
  1674. "No skb available, drop the packet.\n");
  1675. return NULL;
  1676. }
  1677. pci_unmap_page(qdev->pdev,
  1678. dma_unmap_addr(lbq_desc,
  1679. mapaddr),
  1680. dma_unmap_len(lbq_desc, maplen),
  1681. PCI_DMA_FROMDEVICE);
  1682. skb_reserve(skb, NET_IP_ALIGN);
  1683. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1684. "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
  1685. length);
  1686. skb_fill_page_desc(skb, 0,
  1687. lbq_desc->p.pg_chunk.page,
  1688. lbq_desc->p.pg_chunk.offset,
  1689. length);
  1690. skb->len += length;
  1691. skb->data_len += length;
  1692. skb->truesize += length;
  1693. length -= length;
  1694. __pskb_pull_tail(skb,
  1695. (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
  1696. VLAN_ETH_HLEN : ETH_HLEN);
  1697. }
  1698. } else {
  1699. /*
  1700. * The data is in a chain of large buffers
  1701. * pointed to by a small buffer. We loop
  1702. * thru and chain them to the our small header
  1703. * buffer's skb.
  1704. * frags: There are 18 max frags and our small
  1705. * buffer will hold 32 of them. The thing is,
  1706. * we'll use 3 max for our 9000 byte jumbo
  1707. * frames. If the MTU goes up we could
  1708. * eventually be in trouble.
  1709. */
  1710. int size, i = 0;
  1711. sbq_desc = ql_get_curr_sbuf(rx_ring);
  1712. pci_unmap_single(qdev->pdev,
  1713. dma_unmap_addr(sbq_desc, mapaddr),
  1714. dma_unmap_len(sbq_desc, maplen),
  1715. PCI_DMA_FROMDEVICE);
  1716. if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
  1717. /*
  1718. * This is an non TCP/UDP IP frame, so
  1719. * the headers aren't split into a small
  1720. * buffer. We have to use the small buffer
  1721. * that contains our sg list as our skb to
  1722. * send upstairs. Copy the sg list here to
  1723. * a local buffer and use it to find the
  1724. * pages to chain.
  1725. */
  1726. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1727. "%d bytes of headers & data in chain of large.\n",
  1728. length);
  1729. skb = sbq_desc->p.skb;
  1730. sbq_desc->p.skb = NULL;
  1731. skb_reserve(skb, NET_IP_ALIGN);
  1732. }
  1733. while (length > 0) {
  1734. lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
  1735. size = (length < rx_ring->lbq_buf_size) ? length :
  1736. rx_ring->lbq_buf_size;
  1737. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1738. "Adding page %d to skb for %d bytes.\n",
  1739. i, size);
  1740. skb_fill_page_desc(skb, i,
  1741. lbq_desc->p.pg_chunk.page,
  1742. lbq_desc->p.pg_chunk.offset,
  1743. size);
  1744. skb->len += size;
  1745. skb->data_len += size;
  1746. skb->truesize += size;
  1747. length -= size;
  1748. i++;
  1749. }
  1750. __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
  1751. VLAN_ETH_HLEN : ETH_HLEN);
  1752. }
  1753. return skb;
  1754. }
  1755. /* Process an inbound completion from an rx ring. */
  1756. static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
  1757. struct rx_ring *rx_ring,
  1758. struct ib_mac_iocb_rsp *ib_mac_rsp,
  1759. u16 vlan_id)
  1760. {
  1761. struct net_device *ndev = qdev->ndev;
  1762. struct sk_buff *skb = NULL;
  1763. QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
  1764. skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
  1765. if (unlikely(!skb)) {
  1766. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1767. "No skb available, drop packet.\n");
  1768. rx_ring->rx_dropped++;
  1769. return;
  1770. }
  1771. /* Frame error, so drop the packet. */
  1772. if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
  1773. ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
  1774. dev_kfree_skb_any(skb);
  1775. return;
  1776. }
  1777. /* The max framesize filter on this chip is set higher than
  1778. * MTU since FCoE uses 2k frames.
  1779. */
  1780. if (skb->len > ndev->mtu + ETH_HLEN) {
  1781. dev_kfree_skb_any(skb);
  1782. rx_ring->rx_dropped++;
  1783. return;
  1784. }
  1785. /* loopback self test for ethtool */
  1786. if (test_bit(QL_SELFTEST, &qdev->flags)) {
  1787. ql_check_lb_frame(qdev, skb);
  1788. dev_kfree_skb_any(skb);
  1789. return;
  1790. }
  1791. prefetch(skb->data);
  1792. if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
  1793. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
  1794. (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
  1795. IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
  1796. (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
  1797. IB_MAC_IOCB_RSP_M_REG ? "Registered" :
  1798. (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
  1799. IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
  1800. rx_ring->rx_multicast++;
  1801. }
  1802. if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
  1803. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1804. "Promiscuous Packet.\n");
  1805. }
  1806. skb->protocol = eth_type_trans(skb, ndev);
  1807. skb_checksum_none_assert(skb);
  1808. /* If rx checksum is on, and there are no
  1809. * csum or frame errors.
  1810. */
  1811. if ((ndev->features & NETIF_F_RXCSUM) &&
  1812. !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
  1813. /* TCP frame. */
  1814. if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
  1815. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1816. "TCP checksum done!\n");
  1817. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1818. } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
  1819. (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
  1820. /* Unfragmented ipv4 UDP frame. */
  1821. struct iphdr *iph = (struct iphdr *) skb->data;
  1822. if (!(iph->frag_off &
  1823. htons(IP_MF|IP_OFFSET))) {
  1824. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1825. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1826. "TCP checksum done!\n");
  1827. }
  1828. }
  1829. }
  1830. rx_ring->rx_packets++;
  1831. rx_ring->rx_bytes += skb->len;
  1832. skb_record_rx_queue(skb, rx_ring->cq_id);
  1833. if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0))
  1834. __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
  1835. if (skb->ip_summed == CHECKSUM_UNNECESSARY)
  1836. napi_gro_receive(&rx_ring->napi, skb);
  1837. else
  1838. netif_receive_skb(skb);
  1839. }
  1840. /* Process an inbound completion from an rx ring. */
  1841. static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
  1842. struct rx_ring *rx_ring,
  1843. struct ib_mac_iocb_rsp *ib_mac_rsp)
  1844. {
  1845. u32 length = le32_to_cpu(ib_mac_rsp->data_len);
  1846. u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
  1847. ((le16_to_cpu(ib_mac_rsp->vlan_id) &
  1848. IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
  1849. QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
  1850. if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
  1851. /* The data and headers are split into
  1852. * separate buffers.
  1853. */
  1854. ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
  1855. vlan_id);
  1856. } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
  1857. /* The data fit in a single small buffer.
  1858. * Allocate a new skb, copy the data and
  1859. * return the buffer to the free pool.
  1860. */
  1861. ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
  1862. length, vlan_id);
  1863. } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
  1864. !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
  1865. (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
  1866. /* TCP packet in a page chunk that's been checksummed.
  1867. * Tack it on to our GRO skb and let it go.
  1868. */
  1869. ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
  1870. length, vlan_id);
  1871. } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
  1872. /* Non-TCP packet in a page chunk. Allocate an
  1873. * skb, tack it on frags, and send it up.
  1874. */
  1875. ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
  1876. length, vlan_id);
  1877. } else {
  1878. /* Non-TCP/UDP large frames that span multiple buffers
  1879. * can be processed corrrectly by the split frame logic.
  1880. */
  1881. ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
  1882. vlan_id);
  1883. }
  1884. return (unsigned long)length;
  1885. }
  1886. /* Process an outbound completion from an rx ring. */
  1887. static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
  1888. struct ob_mac_iocb_rsp *mac_rsp)
  1889. {
  1890. struct tx_ring *tx_ring;
  1891. struct tx_ring_desc *tx_ring_desc;
  1892. QL_DUMP_OB_MAC_RSP(mac_rsp);
  1893. tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
  1894. tx_ring_desc = &tx_ring->q[mac_rsp->tid];
  1895. ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
  1896. tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
  1897. tx_ring->tx_packets++;
  1898. dev_kfree_skb(tx_ring_desc->skb);
  1899. tx_ring_desc->skb = NULL;
  1900. if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
  1901. OB_MAC_IOCB_RSP_S |
  1902. OB_MAC_IOCB_RSP_L |
  1903. OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
  1904. if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
  1905. netif_warn(qdev, tx_done, qdev->ndev,
  1906. "Total descriptor length did not match transfer length.\n");
  1907. }
  1908. if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
  1909. netif_warn(qdev, tx_done, qdev->ndev,
  1910. "Frame too short to be valid, not sent.\n");
  1911. }
  1912. if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
  1913. netif_warn(qdev, tx_done, qdev->ndev,
  1914. "Frame too long, but sent anyway.\n");
  1915. }
  1916. if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
  1917. netif_warn(qdev, tx_done, qdev->ndev,
  1918. "PCI backplane error. Frame not sent.\n");
  1919. }
  1920. }
  1921. atomic_inc(&tx_ring->tx_count);
  1922. }
  1923. /* Fire up a handler to reset the MPI processor. */
  1924. void ql_queue_fw_error(struct ql_adapter *qdev)
  1925. {
  1926. ql_link_off(qdev);
  1927. queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
  1928. }
  1929. void ql_queue_asic_error(struct ql_adapter *qdev)
  1930. {
  1931. ql_link_off(qdev);
  1932. ql_disable_interrupts(qdev);
  1933. /* Clear adapter up bit to signal the recovery
  1934. * process that it shouldn't kill the reset worker
  1935. * thread
  1936. */
  1937. clear_bit(QL_ADAPTER_UP, &qdev->flags);
  1938. /* Set asic recovery bit to indicate reset process that we are
  1939. * in fatal error recovery process rather than normal close
  1940. */
  1941. set_bit(QL_ASIC_RECOVERY, &qdev->flags);
  1942. queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
  1943. }
  1944. static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
  1945. struct ib_ae_iocb_rsp *ib_ae_rsp)
  1946. {
  1947. switch (ib_ae_rsp->event) {
  1948. case MGMT_ERR_EVENT:
  1949. netif_err(qdev, rx_err, qdev->ndev,
  1950. "Management Processor Fatal Error.\n");
  1951. ql_queue_fw_error(qdev);
  1952. return;
  1953. case CAM_LOOKUP_ERR_EVENT:
  1954. netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
  1955. netdev_err(qdev->ndev, "This event shouldn't occur.\n");
  1956. ql_queue_asic_error(qdev);
  1957. return;
  1958. case SOFT_ECC_ERROR_EVENT:
  1959. netdev_err(qdev->ndev, "Soft ECC error detected.\n");
  1960. ql_queue_asic_error(qdev);
  1961. break;
  1962. case PCI_ERR_ANON_BUF_RD:
  1963. netdev_err(qdev->ndev, "PCI error occurred when reading "
  1964. "anonymous buffers from rx_ring %d.\n",
  1965. ib_ae_rsp->q_id);
  1966. ql_queue_asic_error(qdev);
  1967. break;
  1968. default:
  1969. netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
  1970. ib_ae_rsp->event);
  1971. ql_queue_asic_error(qdev);
  1972. break;
  1973. }
  1974. }
  1975. static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
  1976. {
  1977. struct ql_adapter *qdev = rx_ring->qdev;
  1978. u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
  1979. struct ob_mac_iocb_rsp *net_rsp = NULL;
  1980. int count = 0;
  1981. struct tx_ring *tx_ring;
  1982. /* While there are entries in the completion queue. */
  1983. while (prod != rx_ring->cnsmr_idx) {
  1984. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1985. "cq_id = %d, prod = %d, cnsmr = %d.\n.",
  1986. rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
  1987. net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
  1988. rmb();
  1989. switch (net_rsp->opcode) {
  1990. case OPCODE_OB_MAC_TSO_IOCB:
  1991. case OPCODE_OB_MAC_IOCB:
  1992. ql_process_mac_tx_intr(qdev, net_rsp);
  1993. break;
  1994. default:
  1995. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1996. "Hit default case, not handled! dropping the packet, opcode = %x.\n",
  1997. net_rsp->opcode);
  1998. }
  1999. count++;
  2000. ql_update_cq(rx_ring);
  2001. prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
  2002. }
  2003. if (!net_rsp)
  2004. return 0;
  2005. ql_write_cq_idx(rx_ring);
  2006. tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
  2007. if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
  2008. if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
  2009. /*
  2010. * The queue got stopped because the tx_ring was full.
  2011. * Wake it up, because it's now at least 25% empty.
  2012. */
  2013. netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
  2014. }
  2015. return count;
  2016. }
  2017. static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
  2018. {
  2019. struct ql_adapter *qdev = rx_ring->qdev;
  2020. u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
  2021. struct ql_net_rsp_iocb *net_rsp;
  2022. int count = 0;
  2023. /* While there are entries in the completion queue. */
  2024. while (prod != rx_ring->cnsmr_idx) {
  2025. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  2026. "cq_id = %d, prod = %d, cnsmr = %d.\n.",
  2027. rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
  2028. net_rsp = rx_ring->curr_entry;
  2029. rmb();
  2030. switch (net_rsp->opcode) {
  2031. case OPCODE_IB_MAC_IOCB:
  2032. ql_process_mac_rx_intr(qdev, rx_ring,
  2033. (struct ib_mac_iocb_rsp *)
  2034. net_rsp);
  2035. break;
  2036. case OPCODE_IB_AE_IOCB:
  2037. ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
  2038. net_rsp);
  2039. break;
  2040. default:
  2041. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  2042. "Hit default case, not handled! dropping the packet, opcode = %x.\n",
  2043. net_rsp->opcode);
  2044. break;
  2045. }
  2046. count++;
  2047. ql_update_cq(rx_ring);
  2048. prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
  2049. if (count == budget)
  2050. break;
  2051. }
  2052. ql_update_buffer_queues(qdev, rx_ring);
  2053. ql_write_cq_idx(rx_ring);
  2054. return count;
  2055. }
  2056. static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
  2057. {
  2058. struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
  2059. struct ql_adapter *qdev = rx_ring->qdev;
  2060. struct rx_ring *trx_ring;
  2061. int i, work_done = 0;
  2062. struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
  2063. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  2064. "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
  2065. /* Service the TX rings first. They start
  2066. * right after the RSS rings. */
  2067. for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
  2068. trx_ring = &qdev->rx_ring[i];
  2069. /* If this TX completion ring belongs to this vector and
  2070. * it's not empty then service it.
  2071. */
  2072. if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
  2073. (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
  2074. trx_ring->cnsmr_idx)) {
  2075. netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
  2076. "%s: Servicing TX completion ring %d.\n",
  2077. __func__, trx_ring->cq_id);
  2078. ql_clean_outbound_rx_ring(trx_ring);
  2079. }
  2080. }
  2081. /*
  2082. * Now service the RSS ring if it's active.
  2083. */
  2084. if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
  2085. rx_ring->cnsmr_idx) {
  2086. netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
  2087. "%s: Servicing RX completion ring %d.\n",
  2088. __func__, rx_ring->cq_id);
  2089. work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
  2090. }
  2091. if (work_done < budget) {
  2092. napi_complete(napi);
  2093. ql_enable_completion_interrupt(qdev, rx_ring->irq);
  2094. }
  2095. return work_done;
  2096. }
  2097. static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
  2098. {
  2099. struct ql_adapter *qdev = netdev_priv(ndev);
  2100. if (features & NETIF_F_HW_VLAN_CTAG_RX) {
  2101. ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
  2102. NIC_RCV_CFG_VLAN_MATCH_AND_NON);
  2103. } else {
  2104. ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
  2105. }
  2106. }
  2107. static netdev_features_t qlge_fix_features(struct net_device *ndev,
  2108. netdev_features_t features)
  2109. {
  2110. /*
  2111. * Since there is no support for separate rx/tx vlan accel
  2112. * enable/disable make sure tx flag is always in same state as rx.
  2113. */
  2114. if (features & NETIF_F_HW_VLAN_CTAG_RX)
  2115. features |= NETIF_F_HW_VLAN_CTAG_TX;
  2116. else
  2117. features &= ~NETIF_F_HW_VLAN_CTAG_TX;
  2118. return features;
  2119. }
  2120. static int qlge_set_features(struct net_device *ndev,
  2121. netdev_features_t features)
  2122. {
  2123. netdev_features_t changed = ndev->features ^ features;
  2124. if (changed & NETIF_F_HW_VLAN_CTAG_RX)
  2125. qlge_vlan_mode(ndev, features);
  2126. return 0;
  2127. }
  2128. static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
  2129. {
  2130. u32 enable_bit = MAC_ADDR_E;
  2131. int err;
  2132. err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
  2133. MAC_ADDR_TYPE_VLAN, vid);
  2134. if (err)
  2135. netif_err(qdev, ifup, qdev->ndev,
  2136. "Failed to init vlan address.\n");
  2137. return err;
  2138. }
  2139. static int qlge_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
  2140. {
  2141. struct ql_adapter *qdev = netdev_priv(ndev);
  2142. int status;
  2143. int err;
  2144. status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
  2145. if (status)
  2146. return status;
  2147. err = __qlge_vlan_rx_add_vid(qdev, vid);
  2148. set_bit(vid, qdev->active_vlans);
  2149. ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
  2150. return err;
  2151. }
  2152. static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
  2153. {
  2154. u32 enable_bit = 0;
  2155. int err;
  2156. err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
  2157. MAC_ADDR_TYPE_VLAN, vid);
  2158. if (err)
  2159. netif_err(qdev, ifup, qdev->ndev,
  2160. "Failed to clear vlan address.\n");
  2161. return err;
  2162. }
  2163. static int qlge_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
  2164. {
  2165. struct ql_adapter *qdev = netdev_priv(ndev);
  2166. int status;
  2167. int err;
  2168. status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
  2169. if (status)
  2170. return status;
  2171. err = __qlge_vlan_rx_kill_vid(qdev, vid);
  2172. clear_bit(vid, qdev->active_vlans);
  2173. ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
  2174. return err;
  2175. }
  2176. static void qlge_restore_vlan(struct ql_adapter *qdev)
  2177. {
  2178. int status;
  2179. u16 vid;
  2180. status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
  2181. if (status)
  2182. return;
  2183. for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
  2184. __qlge_vlan_rx_add_vid(qdev, vid);
  2185. ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
  2186. }
  2187. /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
  2188. static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
  2189. {
  2190. struct rx_ring *rx_ring = dev_id;
  2191. napi_schedule(&rx_ring->napi);
  2192. return IRQ_HANDLED;
  2193. }
  2194. /* This handles a fatal error, MPI activity, and the default
  2195. * rx_ring in an MSI-X multiple vector environment.
  2196. * In MSI/Legacy environment it also process the rest of
  2197. * the rx_rings.
  2198. */
  2199. static irqreturn_t qlge_isr(int irq, void *dev_id)
  2200. {
  2201. struct rx_ring *rx_ring = dev_id;
  2202. struct ql_adapter *qdev = rx_ring->qdev;
  2203. struct intr_context *intr_context = &qdev->intr_context[0];
  2204. u32 var;
  2205. int work_done = 0;
  2206. spin_lock(&qdev->hw_lock);
  2207. if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
  2208. netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
  2209. "Shared Interrupt, Not ours!\n");
  2210. spin_unlock(&qdev->hw_lock);
  2211. return IRQ_NONE;
  2212. }
  2213. spin_unlock(&qdev->hw_lock);
  2214. var = ql_disable_completion_interrupt(qdev, intr_context->intr);
  2215. /*
  2216. * Check for fatal error.
  2217. */
  2218. if (var & STS_FE) {
  2219. ql_queue_asic_error(qdev);
  2220. netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
  2221. var = ql_read32(qdev, ERR_STS);
  2222. netdev_err(qdev->ndev, "Resetting chip. "
  2223. "Error Status Register = 0x%x\n", var);
  2224. return IRQ_HANDLED;
  2225. }
  2226. /*
  2227. * Check MPI processor activity.
  2228. */
  2229. if ((var & STS_PI) &&
  2230. (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
  2231. /*
  2232. * We've got an async event or mailbox completion.
  2233. * Handle it and clear the source of the interrupt.
  2234. */
  2235. netif_err(qdev, intr, qdev->ndev,
  2236. "Got MPI processor interrupt.\n");
  2237. ql_disable_completion_interrupt(qdev, intr_context->intr);
  2238. ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
  2239. queue_delayed_work_on(smp_processor_id(),
  2240. qdev->workqueue, &qdev->mpi_work, 0);
  2241. work_done++;
  2242. }
  2243. /*
  2244. * Get the bit-mask that shows the active queues for this
  2245. * pass. Compare it to the queues that this irq services
  2246. * and call napi if there's a match.
  2247. */
  2248. var = ql_read32(qdev, ISR1);
  2249. if (var & intr_context->irq_mask) {
  2250. netif_info(qdev, intr, qdev->ndev,
  2251. "Waking handler for rx_ring[0].\n");
  2252. ql_disable_completion_interrupt(qdev, intr_context->intr);
  2253. napi_schedule(&rx_ring->napi);
  2254. work_done++;
  2255. }
  2256. ql_enable_completion_interrupt(qdev, intr_context->intr);
  2257. return work_done ? IRQ_HANDLED : IRQ_NONE;
  2258. }
  2259. static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
  2260. {
  2261. if (skb_is_gso(skb)) {
  2262. int err;
  2263. if (skb_header_cloned(skb)) {
  2264. err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
  2265. if (err)
  2266. return err;
  2267. }
  2268. mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
  2269. mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
  2270. mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
  2271. mac_iocb_ptr->total_hdrs_len =
  2272. cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
  2273. mac_iocb_ptr->net_trans_offset =
  2274. cpu_to_le16(skb_network_offset(skb) |
  2275. skb_transport_offset(skb)
  2276. << OB_MAC_TRANSPORT_HDR_SHIFT);
  2277. mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
  2278. mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
  2279. if (likely(skb->protocol == htons(ETH_P_IP))) {
  2280. struct iphdr *iph = ip_hdr(skb);
  2281. iph->check = 0;
  2282. mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
  2283. tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
  2284. iph->daddr, 0,
  2285. IPPROTO_TCP,
  2286. 0);
  2287. } else if (skb->protocol == htons(ETH_P_IPV6)) {
  2288. mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
  2289. tcp_hdr(skb)->check =
  2290. ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
  2291. &ipv6_hdr(skb)->daddr,
  2292. 0, IPPROTO_TCP, 0);
  2293. }
  2294. return 1;
  2295. }
  2296. return 0;
  2297. }
  2298. static void ql_hw_csum_setup(struct sk_buff *skb,
  2299. struct ob_mac_tso_iocb_req *mac_iocb_ptr)
  2300. {
  2301. int len;
  2302. struct iphdr *iph = ip_hdr(skb);
  2303. __sum16 *check;
  2304. mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
  2305. mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
  2306. mac_iocb_ptr->net_trans_offset =
  2307. cpu_to_le16(skb_network_offset(skb) |
  2308. skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
  2309. mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
  2310. len = (ntohs(iph->tot_len) - (iph->ihl << 2));
  2311. if (likely(iph->protocol == IPPROTO_TCP)) {
  2312. check = &(tcp_hdr(skb)->check);
  2313. mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
  2314. mac_iocb_ptr->total_hdrs_len =
  2315. cpu_to_le16(skb_transport_offset(skb) +
  2316. (tcp_hdr(skb)->doff << 2));
  2317. } else {
  2318. check = &(udp_hdr(skb)->check);
  2319. mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
  2320. mac_iocb_ptr->total_hdrs_len =
  2321. cpu_to_le16(skb_transport_offset(skb) +
  2322. sizeof(struct udphdr));
  2323. }
  2324. *check = ~csum_tcpudp_magic(iph->saddr,
  2325. iph->daddr, len, iph->protocol, 0);
  2326. }
  2327. static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
  2328. {
  2329. struct tx_ring_desc *tx_ring_desc;
  2330. struct ob_mac_iocb_req *mac_iocb_ptr;
  2331. struct ql_adapter *qdev = netdev_priv(ndev);
  2332. int tso;
  2333. struct tx_ring *tx_ring;
  2334. u32 tx_ring_idx = (u32) skb->queue_mapping;
  2335. tx_ring = &qdev->tx_ring[tx_ring_idx];
  2336. if (skb_padto(skb, ETH_ZLEN))
  2337. return NETDEV_TX_OK;
  2338. if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
  2339. netif_info(qdev, tx_queued, qdev->ndev,
  2340. "%s: BUG! shutting down tx queue %d due to lack of resources.\n",
  2341. __func__, tx_ring_idx);
  2342. netif_stop_subqueue(ndev, tx_ring->wq_id);
  2343. tx_ring->tx_errors++;
  2344. return NETDEV_TX_BUSY;
  2345. }
  2346. tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
  2347. mac_iocb_ptr = tx_ring_desc->queue_entry;
  2348. memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
  2349. mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
  2350. mac_iocb_ptr->tid = tx_ring_desc->index;
  2351. /* We use the upper 32-bits to store the tx queue for this IO.
  2352. * When we get the completion we can use it to establish the context.
  2353. */
  2354. mac_iocb_ptr->txq_idx = tx_ring_idx;
  2355. tx_ring_desc->skb = skb;
  2356. mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
  2357. if (vlan_tx_tag_present(skb)) {
  2358. netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
  2359. "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
  2360. mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
  2361. mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
  2362. }
  2363. tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
  2364. if (tso < 0) {
  2365. dev_kfree_skb_any(skb);
  2366. return NETDEV_TX_OK;
  2367. } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
  2368. ql_hw_csum_setup(skb,
  2369. (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
  2370. }
  2371. if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
  2372. NETDEV_TX_OK) {
  2373. netif_err(qdev, tx_queued, qdev->ndev,
  2374. "Could not map the segments.\n");
  2375. tx_ring->tx_errors++;
  2376. return NETDEV_TX_BUSY;
  2377. }
  2378. QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
  2379. tx_ring->prod_idx++;
  2380. if (tx_ring->prod_idx == tx_ring->wq_len)
  2381. tx_ring->prod_idx = 0;
  2382. wmb();
  2383. ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
  2384. netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
  2385. "tx queued, slot %d, len %d\n",
  2386. tx_ring->prod_idx, skb->len);
  2387. atomic_dec(&tx_ring->tx_count);
  2388. if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
  2389. netif_stop_subqueue(ndev, tx_ring->wq_id);
  2390. if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
  2391. /*
  2392. * The queue got stopped because the tx_ring was full.
  2393. * Wake it up, because it's now at least 25% empty.
  2394. */
  2395. netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
  2396. }
  2397. return NETDEV_TX_OK;
  2398. }
  2399. static void ql_free_shadow_space(struct ql_adapter *qdev)
  2400. {
  2401. if (qdev->rx_ring_shadow_reg_area) {
  2402. pci_free_consistent(qdev->pdev,
  2403. PAGE_SIZE,
  2404. qdev->rx_ring_shadow_reg_area,
  2405. qdev->rx_ring_shadow_reg_dma);
  2406. qdev->rx_ring_shadow_reg_area = NULL;
  2407. }
  2408. if (qdev->tx_ring_shadow_reg_area) {
  2409. pci_free_consistent(qdev->pdev,
  2410. PAGE_SIZE,
  2411. qdev->tx_ring_shadow_reg_area,
  2412. qdev->tx_ring_shadow_reg_dma);
  2413. qdev->tx_ring_shadow_reg_area = NULL;
  2414. }
  2415. }
  2416. static int ql_alloc_shadow_space(struct ql_adapter *qdev)
  2417. {
  2418. qdev->rx_ring_shadow_reg_area =
  2419. pci_alloc_consistent(qdev->pdev,
  2420. PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
  2421. if (qdev->rx_ring_shadow_reg_area == NULL) {
  2422. netif_err(qdev, ifup, qdev->ndev,
  2423. "Allocation of RX shadow space failed.\n");
  2424. return -ENOMEM;
  2425. }
  2426. memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
  2427. qdev->tx_ring_shadow_reg_area =
  2428. pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
  2429. &qdev->tx_ring_shadow_reg_dma);
  2430. if (qdev->tx_ring_shadow_reg_area == NULL) {
  2431. netif_err(qdev, ifup, qdev->ndev,
  2432. "Allocation of TX shadow space failed.\n");
  2433. goto err_wqp_sh_area;
  2434. }
  2435. memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
  2436. return 0;
  2437. err_wqp_sh_area:
  2438. pci_free_consistent(qdev->pdev,
  2439. PAGE_SIZE,
  2440. qdev->rx_ring_shadow_reg_area,
  2441. qdev->rx_ring_shadow_reg_dma);
  2442. return -ENOMEM;
  2443. }
  2444. static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
  2445. {
  2446. struct tx_ring_desc *tx_ring_desc;
  2447. int i;
  2448. struct ob_mac_iocb_req *mac_iocb_ptr;
  2449. mac_iocb_ptr = tx_ring->wq_base;
  2450. tx_ring_desc = tx_ring->q;
  2451. for (i = 0; i < tx_ring->wq_len; i++) {
  2452. tx_ring_desc->index = i;
  2453. tx_ring_desc->skb = NULL;
  2454. tx_ring_desc->queue_entry = mac_iocb_ptr;
  2455. mac_iocb_ptr++;
  2456. tx_ring_desc++;
  2457. }
  2458. atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
  2459. }
  2460. static void ql_free_tx_resources(struct ql_adapter *qdev,
  2461. struct tx_ring *tx_ring)
  2462. {
  2463. if (tx_ring->wq_base) {
  2464. pci_free_consistent(qdev->pdev, tx_ring->wq_size,
  2465. tx_ring->wq_base, tx_ring->wq_base_dma);
  2466. tx_ring->wq_base = NULL;
  2467. }
  2468. kfree(tx_ring->q);
  2469. tx_ring->q = NULL;
  2470. }
  2471. static int ql_alloc_tx_resources(struct ql_adapter *qdev,
  2472. struct tx_ring *tx_ring)
  2473. {
  2474. tx_ring->wq_base =
  2475. pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
  2476. &tx_ring->wq_base_dma);
  2477. if ((tx_ring->wq_base == NULL) ||
  2478. tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
  2479. goto pci_alloc_err;
  2480. tx_ring->q =
  2481. kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
  2482. if (tx_ring->q == NULL)
  2483. goto err;
  2484. return 0;
  2485. err:
  2486. pci_free_consistent(qdev->pdev, tx_ring->wq_size,
  2487. tx_ring->wq_base, tx_ring->wq_base_dma);
  2488. tx_ring->wq_base = NULL;
  2489. pci_alloc_err:
  2490. netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
  2491. return -ENOMEM;
  2492. }
  2493. static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
  2494. {
  2495. struct bq_desc *lbq_desc;
  2496. uint32_t curr_idx, clean_idx;
  2497. curr_idx = rx_ring->lbq_curr_idx;
  2498. clean_idx = rx_ring->lbq_clean_idx;
  2499. while (curr_idx != clean_idx) {
  2500. lbq_desc = &rx_ring->lbq[curr_idx];
  2501. if (lbq_desc->p.pg_chunk.last_flag) {
  2502. pci_unmap_page(qdev->pdev,
  2503. lbq_desc->p.pg_chunk.map,
  2504. ql_lbq_block_size(qdev),
  2505. PCI_DMA_FROMDEVICE);
  2506. lbq_desc->p.pg_chunk.last_flag = 0;
  2507. }
  2508. put_page(lbq_desc->p.pg_chunk.page);
  2509. lbq_desc->p.pg_chunk.page = NULL;
  2510. if (++curr_idx == rx_ring->lbq_len)
  2511. curr_idx = 0;
  2512. }
  2513. if (rx_ring->pg_chunk.page) {
  2514. pci_unmap_page(qdev->pdev, rx_ring->pg_chunk.map,
  2515. ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE);
  2516. put_page(rx_ring->pg_chunk.page);
  2517. rx_ring->pg_chunk.page = NULL;
  2518. }
  2519. }
  2520. static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
  2521. {
  2522. int i;
  2523. struct bq_desc *sbq_desc;
  2524. for (i = 0; i < rx_ring->sbq_len; i++) {
  2525. sbq_desc = &rx_ring->sbq[i];
  2526. if (sbq_desc == NULL) {
  2527. netif_err(qdev, ifup, qdev->ndev,
  2528. "sbq_desc %d is NULL.\n", i);
  2529. return;
  2530. }
  2531. if (sbq_desc->p.skb) {
  2532. pci_unmap_single(qdev->pdev,
  2533. dma_unmap_addr(sbq_desc, mapaddr),
  2534. dma_unmap_len(sbq_desc, maplen),
  2535. PCI_DMA_FROMDEVICE);
  2536. dev_kfree_skb(sbq_desc->p.skb);
  2537. sbq_desc->p.skb = NULL;
  2538. }
  2539. }
  2540. }
  2541. /* Free all large and small rx buffers associated
  2542. * with the completion queues for this device.
  2543. */
  2544. static void ql_free_rx_buffers(struct ql_adapter *qdev)
  2545. {
  2546. int i;
  2547. struct rx_ring *rx_ring;
  2548. for (i = 0; i < qdev->rx_ring_count; i++) {
  2549. rx_ring = &qdev->rx_ring[i];
  2550. if (rx_ring->lbq)
  2551. ql_free_lbq_buffers(qdev, rx_ring);
  2552. if (rx_ring->sbq)
  2553. ql_free_sbq_buffers(qdev, rx_ring);
  2554. }
  2555. }
  2556. static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
  2557. {
  2558. struct rx_ring *rx_ring;
  2559. int i;
  2560. for (i = 0; i < qdev->rx_ring_count; i++) {
  2561. rx_ring = &qdev->rx_ring[i];
  2562. if (rx_ring->type != TX_Q)
  2563. ql_update_buffer_queues(qdev, rx_ring);
  2564. }
  2565. }
  2566. static void ql_init_lbq_ring(struct ql_adapter *qdev,
  2567. struct rx_ring *rx_ring)
  2568. {
  2569. int i;
  2570. struct bq_desc *lbq_desc;
  2571. __le64 *bq = rx_ring->lbq_base;
  2572. memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
  2573. for (i = 0; i < rx_ring->lbq_len; i++) {
  2574. lbq_desc = &rx_ring->lbq[i];
  2575. memset(lbq_desc, 0, sizeof(*lbq_desc));
  2576. lbq_desc->index = i;
  2577. lbq_desc->addr = bq;
  2578. bq++;
  2579. }
  2580. }
  2581. static void ql_init_sbq_ring(struct ql_adapter *qdev,
  2582. struct rx_ring *rx_ring)
  2583. {
  2584. int i;
  2585. struct bq_desc *sbq_desc;
  2586. __le64 *bq = rx_ring->sbq_base;
  2587. memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
  2588. for (i = 0; i < rx_ring->sbq_len; i++) {
  2589. sbq_desc = &rx_ring->sbq[i];
  2590. memset(sbq_desc, 0, sizeof(*sbq_desc));
  2591. sbq_desc->index = i;
  2592. sbq_desc->addr = bq;
  2593. bq++;
  2594. }
  2595. }
  2596. static void ql_free_rx_resources(struct ql_adapter *qdev,
  2597. struct rx_ring *rx_ring)
  2598. {
  2599. /* Free the small buffer queue. */
  2600. if (rx_ring->sbq_base) {
  2601. pci_free_consistent(qdev->pdev,
  2602. rx_ring->sbq_size,
  2603. rx_ring->sbq_base, rx_ring->sbq_base_dma);
  2604. rx_ring->sbq_base = NULL;
  2605. }
  2606. /* Free the small buffer queue control blocks. */
  2607. kfree(rx_ring->sbq);
  2608. rx_ring->sbq = NULL;
  2609. /* Free the large buffer queue. */
  2610. if (rx_ring->lbq_base) {
  2611. pci_free_consistent(qdev->pdev,
  2612. rx_ring->lbq_size,
  2613. rx_ring->lbq_base, rx_ring->lbq_base_dma);
  2614. rx_ring->lbq_base = NULL;
  2615. }
  2616. /* Free the large buffer queue control blocks. */
  2617. kfree(rx_ring->lbq);
  2618. rx_ring->lbq = NULL;
  2619. /* Free the rx queue. */
  2620. if (rx_ring->cq_base) {
  2621. pci_free_consistent(qdev->pdev,
  2622. rx_ring->cq_size,
  2623. rx_ring->cq_base, rx_ring->cq_base_dma);
  2624. rx_ring->cq_base = NULL;
  2625. }
  2626. }
  2627. /* Allocate queues and buffers for this completions queue based
  2628. * on the values in the parameter structure. */
  2629. static int ql_alloc_rx_resources(struct ql_adapter *qdev,
  2630. struct rx_ring *rx_ring)
  2631. {
  2632. /*
  2633. * Allocate the completion queue for this rx_ring.
  2634. */
  2635. rx_ring->cq_base =
  2636. pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
  2637. &rx_ring->cq_base_dma);
  2638. if (rx_ring->cq_base == NULL) {
  2639. netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
  2640. return -ENOMEM;
  2641. }
  2642. if (rx_ring->sbq_len) {
  2643. /*
  2644. * Allocate small buffer queue.
  2645. */
  2646. rx_ring->sbq_base =
  2647. pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
  2648. &rx_ring->sbq_base_dma);
  2649. if (rx_ring->sbq_base == NULL) {
  2650. netif_err(qdev, ifup, qdev->ndev,
  2651. "Small buffer queue allocation failed.\n");
  2652. goto err_mem;
  2653. }
  2654. /*
  2655. * Allocate small buffer queue control blocks.
  2656. */
  2657. rx_ring->sbq = kmalloc_array(rx_ring->sbq_len,
  2658. sizeof(struct bq_desc),
  2659. GFP_KERNEL);
  2660. if (rx_ring->sbq == NULL)
  2661. goto err_mem;
  2662. ql_init_sbq_ring(qdev, rx_ring);
  2663. }
  2664. if (rx_ring->lbq_len) {
  2665. /*
  2666. * Allocate large buffer queue.
  2667. */
  2668. rx_ring->lbq_base =
  2669. pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
  2670. &rx_ring->lbq_base_dma);
  2671. if (rx_ring->lbq_base == NULL) {
  2672. netif_err(qdev, ifup, qdev->ndev,
  2673. "Large buffer queue allocation failed.\n");
  2674. goto err_mem;
  2675. }
  2676. /*
  2677. * Allocate large buffer queue control blocks.
  2678. */
  2679. rx_ring->lbq = kmalloc_array(rx_ring->lbq_len,
  2680. sizeof(struct bq_desc),
  2681. GFP_KERNEL);
  2682. if (rx_ring->lbq == NULL)
  2683. goto err_mem;
  2684. ql_init_lbq_ring(qdev, rx_ring);
  2685. }
  2686. return 0;
  2687. err_mem:
  2688. ql_free_rx_resources(qdev, rx_ring);
  2689. return -ENOMEM;
  2690. }
  2691. static void ql_tx_ring_clean(struct ql_adapter *qdev)
  2692. {
  2693. struct tx_ring *tx_ring;
  2694. struct tx_ring_desc *tx_ring_desc;
  2695. int i, j;
  2696. /*
  2697. * Loop through all queues and free
  2698. * any resources.
  2699. */
  2700. for (j = 0; j < qdev->tx_ring_count; j++) {
  2701. tx_ring = &qdev->tx_ring[j];
  2702. for (i = 0; i < tx_ring->wq_len; i++) {
  2703. tx_ring_desc = &tx_ring->q[i];
  2704. if (tx_ring_desc && tx_ring_desc->skb) {
  2705. netif_err(qdev, ifdown, qdev->ndev,
  2706. "Freeing lost SKB %p, from queue %d, index %d.\n",
  2707. tx_ring_desc->skb, j,
  2708. tx_ring_desc->index);
  2709. ql_unmap_send(qdev, tx_ring_desc,
  2710. tx_ring_desc->map_cnt);
  2711. dev_kfree_skb(tx_ring_desc->skb);
  2712. tx_ring_desc->skb = NULL;
  2713. }
  2714. }
  2715. }
  2716. }
  2717. static void ql_free_mem_resources(struct ql_adapter *qdev)
  2718. {
  2719. int i;
  2720. for (i = 0; i < qdev->tx_ring_count; i++)
  2721. ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
  2722. for (i = 0; i < qdev->rx_ring_count; i++)
  2723. ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
  2724. ql_free_shadow_space(qdev);
  2725. }
  2726. static int ql_alloc_mem_resources(struct ql_adapter *qdev)
  2727. {
  2728. int i;
  2729. /* Allocate space for our shadow registers and such. */
  2730. if (ql_alloc_shadow_space(qdev))
  2731. return -ENOMEM;
  2732. for (i = 0; i < qdev->rx_ring_count; i++) {
  2733. if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
  2734. netif_err(qdev, ifup, qdev->ndev,
  2735. "RX resource allocation failed.\n");
  2736. goto err_mem;
  2737. }
  2738. }
  2739. /* Allocate tx queue resources */
  2740. for (i = 0; i < qdev->tx_ring_count; i++) {
  2741. if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
  2742. netif_err(qdev, ifup, qdev->ndev,
  2743. "TX resource allocation failed.\n");
  2744. goto err_mem;
  2745. }
  2746. }
  2747. return 0;
  2748. err_mem:
  2749. ql_free_mem_resources(qdev);
  2750. return -ENOMEM;
  2751. }
  2752. /* Set up the rx ring control block and pass it to the chip.
  2753. * The control block is defined as
  2754. * "Completion Queue Initialization Control Block", or cqicb.
  2755. */
  2756. static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
  2757. {
  2758. struct cqicb *cqicb = &rx_ring->cqicb;
  2759. void *shadow_reg = qdev->rx_ring_shadow_reg_area +
  2760. (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
  2761. u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
  2762. (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
  2763. void __iomem *doorbell_area =
  2764. qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
  2765. int err = 0;
  2766. u16 bq_len;
  2767. u64 tmp;
  2768. __le64 *base_indirect_ptr;
  2769. int page_entries;
  2770. /* Set up the shadow registers for this ring. */
  2771. rx_ring->prod_idx_sh_reg = shadow_reg;
  2772. rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
  2773. *rx_ring->prod_idx_sh_reg = 0;
  2774. shadow_reg += sizeof(u64);
  2775. shadow_reg_dma += sizeof(u64);
  2776. rx_ring->lbq_base_indirect = shadow_reg;
  2777. rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
  2778. shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
  2779. shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
  2780. rx_ring->sbq_base_indirect = shadow_reg;
  2781. rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
  2782. /* PCI doorbell mem area + 0x00 for consumer index register */
  2783. rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
  2784. rx_ring->cnsmr_idx = 0;
  2785. rx_ring->curr_entry = rx_ring->cq_base;
  2786. /* PCI doorbell mem area + 0x04 for valid register */
  2787. rx_ring->valid_db_reg = doorbell_area + 0x04;
  2788. /* PCI doorbell mem area + 0x18 for large buffer consumer */
  2789. rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
  2790. /* PCI doorbell mem area + 0x1c */
  2791. rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
  2792. memset((void *)cqicb, 0, sizeof(struct cqicb));
  2793. cqicb->msix_vect = rx_ring->irq;
  2794. bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
  2795. cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
  2796. cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
  2797. cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
  2798. /*
  2799. * Set up the control block load flags.
  2800. */
  2801. cqicb->flags = FLAGS_LC | /* Load queue base address */
  2802. FLAGS_LV | /* Load MSI-X vector */
  2803. FLAGS_LI; /* Load irq delay values */
  2804. if (rx_ring->lbq_len) {
  2805. cqicb->flags |= FLAGS_LL; /* Load lbq values */
  2806. tmp = (u64)rx_ring->lbq_base_dma;
  2807. base_indirect_ptr = rx_ring->lbq_base_indirect;
  2808. page_entries = 0;
  2809. do {
  2810. *base_indirect_ptr = cpu_to_le64(tmp);
  2811. tmp += DB_PAGE_SIZE;
  2812. base_indirect_ptr++;
  2813. page_entries++;
  2814. } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
  2815. cqicb->lbq_addr =
  2816. cpu_to_le64(rx_ring->lbq_base_indirect_dma);
  2817. bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
  2818. (u16) rx_ring->lbq_buf_size;
  2819. cqicb->lbq_buf_size = cpu_to_le16(bq_len);
  2820. bq_len = (rx_ring->lbq_len == 65536) ? 0 :
  2821. (u16) rx_ring->lbq_len;
  2822. cqicb->lbq_len = cpu_to_le16(bq_len);
  2823. rx_ring->lbq_prod_idx = 0;
  2824. rx_ring->lbq_curr_idx = 0;
  2825. rx_ring->lbq_clean_idx = 0;
  2826. rx_ring->lbq_free_cnt = rx_ring->lbq_len;
  2827. }
  2828. if (rx_ring->sbq_len) {
  2829. cqicb->flags |= FLAGS_LS; /* Load sbq values */
  2830. tmp = (u64)rx_ring->sbq_base_dma;
  2831. base_indirect_ptr = rx_ring->sbq_base_indirect;
  2832. page_entries = 0;
  2833. do {
  2834. *base_indirect_ptr = cpu_to_le64(tmp);
  2835. tmp += DB_PAGE_SIZE;
  2836. base_indirect_ptr++;
  2837. page_entries++;
  2838. } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
  2839. cqicb->sbq_addr =
  2840. cpu_to_le64(rx_ring->sbq_base_indirect_dma);
  2841. cqicb->sbq_buf_size =
  2842. cpu_to_le16((u16)(rx_ring->sbq_buf_size));
  2843. bq_len = (rx_ring->sbq_len == 65536) ? 0 :
  2844. (u16) rx_ring->sbq_len;
  2845. cqicb->sbq_len = cpu_to_le16(bq_len);
  2846. rx_ring->sbq_prod_idx = 0;
  2847. rx_ring->sbq_curr_idx = 0;
  2848. rx_ring->sbq_clean_idx = 0;
  2849. rx_ring->sbq_free_cnt = rx_ring->sbq_len;
  2850. }
  2851. switch (rx_ring->type) {
  2852. case TX_Q:
  2853. cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
  2854. cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
  2855. break;
  2856. case RX_Q:
  2857. /* Inbound completion handling rx_rings run in
  2858. * separate NAPI contexts.
  2859. */
  2860. netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
  2861. 64);
  2862. cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
  2863. cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
  2864. break;
  2865. default:
  2866. netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
  2867. "Invalid rx_ring->type = %d.\n", rx_ring->type);
  2868. }
  2869. err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
  2870. CFG_LCQ, rx_ring->cq_id);
  2871. if (err) {
  2872. netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
  2873. return err;
  2874. }
  2875. return err;
  2876. }
  2877. static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
  2878. {
  2879. struct wqicb *wqicb = (struct wqicb *)tx_ring;
  2880. void __iomem *doorbell_area =
  2881. qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
  2882. void *shadow_reg = qdev->tx_ring_shadow_reg_area +
  2883. (tx_ring->wq_id * sizeof(u64));
  2884. u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
  2885. (tx_ring->wq_id * sizeof(u64));
  2886. int err = 0;
  2887. /*
  2888. * Assign doorbell registers for this tx_ring.
  2889. */
  2890. /* TX PCI doorbell mem area for tx producer index */
  2891. tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
  2892. tx_ring->prod_idx = 0;
  2893. /* TX PCI doorbell mem area + 0x04 */
  2894. tx_ring->valid_db_reg = doorbell_area + 0x04;
  2895. /*
  2896. * Assign shadow registers for this tx_ring.
  2897. */
  2898. tx_ring->cnsmr_idx_sh_reg = shadow_reg;
  2899. tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
  2900. wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
  2901. wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
  2902. Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
  2903. wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
  2904. wqicb->rid = 0;
  2905. wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
  2906. wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
  2907. ql_init_tx_ring(qdev, tx_ring);
  2908. err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
  2909. (u16) tx_ring->wq_id);
  2910. if (err) {
  2911. netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
  2912. return err;
  2913. }
  2914. return err;
  2915. }
  2916. static void ql_disable_msix(struct ql_adapter *qdev)
  2917. {
  2918. if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
  2919. pci_disable_msix(qdev->pdev);
  2920. clear_bit(QL_MSIX_ENABLED, &qdev->flags);
  2921. kfree(qdev->msi_x_entry);
  2922. qdev->msi_x_entry = NULL;
  2923. } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
  2924. pci_disable_msi(qdev->pdev);
  2925. clear_bit(QL_MSI_ENABLED, &qdev->flags);
  2926. }
  2927. }
  2928. /* We start by trying to get the number of vectors
  2929. * stored in qdev->intr_count. If we don't get that
  2930. * many then we reduce the count and try again.
  2931. */
  2932. static void ql_enable_msix(struct ql_adapter *qdev)
  2933. {
  2934. int i, err;
  2935. /* Get the MSIX vectors. */
  2936. if (qlge_irq_type == MSIX_IRQ) {
  2937. /* Try to alloc space for the msix struct,
  2938. * if it fails then go to MSI/legacy.
  2939. */
  2940. qdev->msi_x_entry = kcalloc(qdev->intr_count,
  2941. sizeof(struct msix_entry),
  2942. GFP_KERNEL);
  2943. if (!qdev->msi_x_entry) {
  2944. qlge_irq_type = MSI_IRQ;
  2945. goto msi;
  2946. }
  2947. for (i = 0; i < qdev->intr_count; i++)
  2948. qdev->msi_x_entry[i].entry = i;
  2949. /* Loop to get our vectors. We start with
  2950. * what we want and settle for what we get.
  2951. */
  2952. do {
  2953. err = pci_enable_msix(qdev->pdev,
  2954. qdev->msi_x_entry, qdev->intr_count);
  2955. if (err > 0)
  2956. qdev->intr_count = err;
  2957. } while (err > 0);
  2958. if (err < 0) {
  2959. kfree(qdev->msi_x_entry);
  2960. qdev->msi_x_entry = NULL;
  2961. netif_warn(qdev, ifup, qdev->ndev,
  2962. "MSI-X Enable failed, trying MSI.\n");
  2963. qdev->intr_count = 1;
  2964. qlge_irq_type = MSI_IRQ;
  2965. } else if (err == 0) {
  2966. set_bit(QL_MSIX_ENABLED, &qdev->flags);
  2967. netif_info(qdev, ifup, qdev->ndev,
  2968. "MSI-X Enabled, got %d vectors.\n",
  2969. qdev->intr_count);
  2970. return;
  2971. }
  2972. }
  2973. msi:
  2974. qdev->intr_count = 1;
  2975. if (qlge_irq_type == MSI_IRQ) {
  2976. if (!pci_enable_msi(qdev->pdev)) {
  2977. set_bit(QL_MSI_ENABLED, &qdev->flags);
  2978. netif_info(qdev, ifup, qdev->ndev,
  2979. "Running with MSI interrupts.\n");
  2980. return;
  2981. }
  2982. }
  2983. qlge_irq_type = LEG_IRQ;
  2984. netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
  2985. "Running with legacy interrupts.\n");
  2986. }
  2987. /* Each vector services 1 RSS ring and and 1 or more
  2988. * TX completion rings. This function loops through
  2989. * the TX completion rings and assigns the vector that
  2990. * will service it. An example would be if there are
  2991. * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
  2992. * This would mean that vector 0 would service RSS ring 0
  2993. * and TX completion rings 0,1,2 and 3. Vector 1 would
  2994. * service RSS ring 1 and TX completion rings 4,5,6 and 7.
  2995. */
  2996. static void ql_set_tx_vect(struct ql_adapter *qdev)
  2997. {
  2998. int i, j, vect;
  2999. u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
  3000. if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
  3001. /* Assign irq vectors to TX rx_rings.*/
  3002. for (vect = 0, j = 0, i = qdev->rss_ring_count;
  3003. i < qdev->rx_ring_count; i++) {
  3004. if (j == tx_rings_per_vector) {
  3005. vect++;
  3006. j = 0;
  3007. }
  3008. qdev->rx_ring[i].irq = vect;
  3009. j++;
  3010. }
  3011. } else {
  3012. /* For single vector all rings have an irq
  3013. * of zero.
  3014. */
  3015. for (i = 0; i < qdev->rx_ring_count; i++)
  3016. qdev->rx_ring[i].irq = 0;
  3017. }
  3018. }
  3019. /* Set the interrupt mask for this vector. Each vector
  3020. * will service 1 RSS ring and 1 or more TX completion
  3021. * rings. This function sets up a bit mask per vector
  3022. * that indicates which rings it services.
  3023. */
  3024. static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
  3025. {
  3026. int j, vect = ctx->intr;
  3027. u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
  3028. if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
  3029. /* Add the RSS ring serviced by this vector
  3030. * to the mask.
  3031. */
  3032. ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
  3033. /* Add the TX ring(s) serviced by this vector
  3034. * to the mask. */
  3035. for (j = 0; j < tx_rings_per_vector; j++) {
  3036. ctx->irq_mask |=
  3037. (1 << qdev->rx_ring[qdev->rss_ring_count +
  3038. (vect * tx_rings_per_vector) + j].cq_id);
  3039. }
  3040. } else {
  3041. /* For single vector we just shift each queue's
  3042. * ID into the mask.
  3043. */
  3044. for (j = 0; j < qdev->rx_ring_count; j++)
  3045. ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
  3046. }
  3047. }
  3048. /*
  3049. * Here we build the intr_context structures based on
  3050. * our rx_ring count and intr vector count.
  3051. * The intr_context structure is used to hook each vector
  3052. * to possibly different handlers.
  3053. */
  3054. static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
  3055. {
  3056. int i = 0;
  3057. struct intr_context *intr_context = &qdev->intr_context[0];
  3058. if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
  3059. /* Each rx_ring has it's
  3060. * own intr_context since we have separate
  3061. * vectors for each queue.
  3062. */
  3063. for (i = 0; i < qdev->intr_count; i++, intr_context++) {
  3064. qdev->rx_ring[i].irq = i;
  3065. intr_context->intr = i;
  3066. intr_context->qdev = qdev;
  3067. /* Set up this vector's bit-mask that indicates
  3068. * which queues it services.
  3069. */
  3070. ql_set_irq_mask(qdev, intr_context);
  3071. /*
  3072. * We set up each vectors enable/disable/read bits so
  3073. * there's no bit/mask calculations in the critical path.
  3074. */
  3075. intr_context->intr_en_mask =
  3076. INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
  3077. INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
  3078. | i;
  3079. intr_context->intr_dis_mask =
  3080. INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
  3081. INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
  3082. INTR_EN_IHD | i;
  3083. intr_context->intr_read_mask =
  3084. INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
  3085. INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
  3086. i;
  3087. if (i == 0) {
  3088. /* The first vector/queue handles
  3089. * broadcast/multicast, fatal errors,
  3090. * and firmware events. This in addition
  3091. * to normal inbound NAPI processing.
  3092. */
  3093. intr_context->handler = qlge_isr;
  3094. sprintf(intr_context->name, "%s-rx-%d",
  3095. qdev->ndev->name, i);
  3096. } else {
  3097. /*
  3098. * Inbound queues handle unicast frames only.
  3099. */
  3100. intr_context->handler = qlge_msix_rx_isr;
  3101. sprintf(intr_context->name, "%s-rx-%d",
  3102. qdev->ndev->name, i);
  3103. }
  3104. }
  3105. } else {
  3106. /*
  3107. * All rx_rings use the same intr_context since
  3108. * there is only one vector.
  3109. */
  3110. intr_context->intr = 0;
  3111. intr_context->qdev = qdev;
  3112. /*
  3113. * We set up each vectors enable/disable/read bits so
  3114. * there's no bit/mask calculations in the critical path.
  3115. */
  3116. intr_context->intr_en_mask =
  3117. INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
  3118. intr_context->intr_dis_mask =
  3119. INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
  3120. INTR_EN_TYPE_DISABLE;
  3121. intr_context->intr_read_mask =
  3122. INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
  3123. /*
  3124. * Single interrupt means one handler for all rings.
  3125. */
  3126. intr_context->handler = qlge_isr;
  3127. sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
  3128. /* Set up this vector's bit-mask that indicates
  3129. * which queues it services. In this case there is
  3130. * a single vector so it will service all RSS and
  3131. * TX completion rings.
  3132. */
  3133. ql_set_irq_mask(qdev, intr_context);
  3134. }
  3135. /* Tell the TX completion rings which MSIx vector
  3136. * they will be using.
  3137. */
  3138. ql_set_tx_vect(qdev);
  3139. }
  3140. static void ql_free_irq(struct ql_adapter *qdev)
  3141. {
  3142. int i;
  3143. struct intr_context *intr_context = &qdev->intr_context[0];
  3144. for (i = 0; i < qdev->intr_count; i++, intr_context++) {
  3145. if (intr_context->hooked) {
  3146. if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
  3147. free_irq(qdev->msi_x_entry[i].vector,
  3148. &qdev->rx_ring[i]);
  3149. } else {
  3150. free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
  3151. }
  3152. }
  3153. }
  3154. ql_disable_msix(qdev);
  3155. }
  3156. static int ql_request_irq(struct ql_adapter *qdev)
  3157. {
  3158. int i;
  3159. int status = 0;
  3160. struct pci_dev *pdev = qdev->pdev;
  3161. struct intr_context *intr_context = &qdev->intr_context[0];
  3162. ql_resolve_queues_to_irqs(qdev);
  3163. for (i = 0; i < qdev->intr_count; i++, intr_context++) {
  3164. atomic_set(&intr_context->irq_cnt, 0);
  3165. if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
  3166. status = request_irq(qdev->msi_x_entry[i].vector,
  3167. intr_context->handler,
  3168. 0,
  3169. intr_context->name,
  3170. &qdev->rx_ring[i]);
  3171. if (status) {
  3172. netif_err(qdev, ifup, qdev->ndev,
  3173. "Failed request for MSIX interrupt %d.\n",
  3174. i);
  3175. goto err_irq;
  3176. }
  3177. } else {
  3178. netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
  3179. "trying msi or legacy interrupts.\n");
  3180. netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
  3181. "%s: irq = %d.\n", __func__, pdev->irq);
  3182. netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
  3183. "%s: context->name = %s.\n", __func__,
  3184. intr_context->name);
  3185. netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
  3186. "%s: dev_id = 0x%p.\n", __func__,
  3187. &qdev->rx_ring[0]);
  3188. status =
  3189. request_irq(pdev->irq, qlge_isr,
  3190. test_bit(QL_MSI_ENABLED,
  3191. &qdev->
  3192. flags) ? 0 : IRQF_SHARED,
  3193. intr_context->name, &qdev->rx_ring[0]);
  3194. if (status)
  3195. goto err_irq;
  3196. netif_err(qdev, ifup, qdev->ndev,
  3197. "Hooked intr %d, queue type %s, with name %s.\n",
  3198. i,
  3199. qdev->rx_ring[0].type == DEFAULT_Q ?
  3200. "DEFAULT_Q" :
  3201. qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
  3202. qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
  3203. intr_context->name);
  3204. }
  3205. intr_context->hooked = 1;
  3206. }
  3207. return status;
  3208. err_irq:
  3209. netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
  3210. ql_free_irq(qdev);
  3211. return status;
  3212. }
  3213. static int ql_start_rss(struct ql_adapter *qdev)
  3214. {
  3215. static const u8 init_hash_seed[] = {
  3216. 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
  3217. 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
  3218. 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
  3219. 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
  3220. 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
  3221. };
  3222. struct ricb *ricb = &qdev->ricb;
  3223. int status = 0;
  3224. int i;
  3225. u8 *hash_id = (u8 *) ricb->hash_cq_id;
  3226. memset((void *)ricb, 0, sizeof(*ricb));
  3227. ricb->base_cq = RSS_L4K;
  3228. ricb->flags =
  3229. (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
  3230. ricb->mask = cpu_to_le16((u16)(0x3ff));
  3231. /*
  3232. * Fill out the Indirection Table.
  3233. */
  3234. for (i = 0; i < 1024; i++)
  3235. hash_id[i] = (i & (qdev->rss_ring_count - 1));
  3236. memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
  3237. memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
  3238. status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
  3239. if (status) {
  3240. netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
  3241. return status;
  3242. }
  3243. return status;
  3244. }
  3245. static int ql_clear_routing_entries(struct ql_adapter *qdev)
  3246. {
  3247. int i, status = 0;
  3248. status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
  3249. if (status)
  3250. return status;
  3251. /* Clear all the entries in the routing table. */
  3252. for (i = 0; i < 16; i++) {
  3253. status = ql_set_routing_reg(qdev, i, 0, 0);
  3254. if (status) {
  3255. netif_err(qdev, ifup, qdev->ndev,
  3256. "Failed to init routing register for CAM packets.\n");
  3257. break;
  3258. }
  3259. }
  3260. ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
  3261. return status;
  3262. }
  3263. /* Initialize the frame-to-queue routing. */
  3264. static int ql_route_initialize(struct ql_adapter *qdev)
  3265. {
  3266. int status = 0;
  3267. /* Clear all the entries in the routing table. */
  3268. status = ql_clear_routing_entries(qdev);
  3269. if (status)
  3270. return status;
  3271. status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
  3272. if (status)
  3273. return status;
  3274. status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
  3275. RT_IDX_IP_CSUM_ERR, 1);
  3276. if (status) {
  3277. netif_err(qdev, ifup, qdev->ndev,
  3278. "Failed to init routing register "
  3279. "for IP CSUM error packets.\n");
  3280. goto exit;
  3281. }
  3282. status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
  3283. RT_IDX_TU_CSUM_ERR, 1);
  3284. if (status) {
  3285. netif_err(qdev, ifup, qdev->ndev,
  3286. "Failed to init routing register "
  3287. "for TCP/UDP CSUM error packets.\n");
  3288. goto exit;
  3289. }
  3290. status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
  3291. if (status) {
  3292. netif_err(qdev, ifup, qdev->ndev,
  3293. "Failed to init routing register for broadcast packets.\n");
  3294. goto exit;
  3295. }
  3296. /* If we have more than one inbound queue, then turn on RSS in the
  3297. * routing block.
  3298. */
  3299. if (qdev->rss_ring_count > 1) {
  3300. status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
  3301. RT_IDX_RSS_MATCH, 1);
  3302. if (status) {
  3303. netif_err(qdev, ifup, qdev->ndev,
  3304. "Failed to init routing register for MATCH RSS packets.\n");
  3305. goto exit;
  3306. }
  3307. }
  3308. status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
  3309. RT_IDX_CAM_HIT, 1);
  3310. if (status)
  3311. netif_err(qdev, ifup, qdev->ndev,
  3312. "Failed to init routing register for CAM packets.\n");
  3313. exit:
  3314. ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
  3315. return status;
  3316. }
  3317. int ql_cam_route_initialize(struct ql_adapter *qdev)
  3318. {
  3319. int status, set;
  3320. /* If check if the link is up and use to
  3321. * determine if we are setting or clearing
  3322. * the MAC address in the CAM.
  3323. */
  3324. set = ql_read32(qdev, STS);
  3325. set &= qdev->port_link_up;
  3326. status = ql_set_mac_addr(qdev, set);
  3327. if (status) {
  3328. netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
  3329. return status;
  3330. }
  3331. status = ql_route_initialize(qdev);
  3332. if (status)
  3333. netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
  3334. return status;
  3335. }
  3336. static int ql_adapter_initialize(struct ql_adapter *qdev)
  3337. {
  3338. u32 value, mask;
  3339. int i;
  3340. int status = 0;
  3341. /*
  3342. * Set up the System register to halt on errors.
  3343. */
  3344. value = SYS_EFE | SYS_FAE;
  3345. mask = value << 16;
  3346. ql_write32(qdev, SYS, mask | value);
  3347. /* Set the default queue, and VLAN behavior. */
  3348. value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
  3349. mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
  3350. ql_write32(qdev, NIC_RCV_CFG, (mask | value));
  3351. /* Set the MPI interrupt to enabled. */
  3352. ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
  3353. /* Enable the function, set pagesize, enable error checking. */
  3354. value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
  3355. FSC_EC | FSC_VM_PAGE_4K;
  3356. value |= SPLT_SETTING;
  3357. /* Set/clear header splitting. */
  3358. mask = FSC_VM_PAGESIZE_MASK |
  3359. FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
  3360. ql_write32(qdev, FSC, mask | value);
  3361. ql_write32(qdev, SPLT_HDR, SPLT_LEN);
  3362. /* Set RX packet routing to use port/pci function on which the
  3363. * packet arrived on in addition to usual frame routing.
  3364. * This is helpful on bonding where both interfaces can have
  3365. * the same MAC address.
  3366. */
  3367. ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
  3368. /* Reroute all packets to our Interface.
  3369. * They may have been routed to MPI firmware
  3370. * due to WOL.
  3371. */
  3372. value = ql_read32(qdev, MGMT_RCV_CFG);
  3373. value &= ~MGMT_RCV_CFG_RM;
  3374. mask = 0xffff0000;
  3375. /* Sticky reg needs clearing due to WOL. */
  3376. ql_write32(qdev, MGMT_RCV_CFG, mask);
  3377. ql_write32(qdev, MGMT_RCV_CFG, mask | value);
  3378. /* Default WOL is enable on Mezz cards */
  3379. if (qdev->pdev->subsystem_device == 0x0068 ||
  3380. qdev->pdev->subsystem_device == 0x0180)
  3381. qdev->wol = WAKE_MAGIC;
  3382. /* Start up the rx queues. */
  3383. for (i = 0; i < qdev->rx_ring_count; i++) {
  3384. status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
  3385. if (status) {
  3386. netif_err(qdev, ifup, qdev->ndev,
  3387. "Failed to start rx ring[%d].\n", i);
  3388. return status;
  3389. }
  3390. }
  3391. /* If there is more than one inbound completion queue
  3392. * then download a RICB to configure RSS.
  3393. */
  3394. if (qdev->rss_ring_count > 1) {
  3395. status = ql_start_rss(qdev);
  3396. if (status) {
  3397. netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
  3398. return status;
  3399. }
  3400. }
  3401. /* Start up the tx queues. */
  3402. for (i = 0; i < qdev->tx_ring_count; i++) {
  3403. status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
  3404. if (status) {
  3405. netif_err(qdev, ifup, qdev->ndev,
  3406. "Failed to start tx ring[%d].\n", i);
  3407. return status;
  3408. }
  3409. }
  3410. /* Initialize the port and set the max framesize. */
  3411. status = qdev->nic_ops->port_initialize(qdev);
  3412. if (status)
  3413. netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
  3414. /* Set up the MAC address and frame routing filter. */
  3415. status = ql_cam_route_initialize(qdev);
  3416. if (status) {
  3417. netif_err(qdev, ifup, qdev->ndev,
  3418. "Failed to init CAM/Routing tables.\n");
  3419. return status;
  3420. }
  3421. /* Start NAPI for the RSS queues. */
  3422. for (i = 0; i < qdev->rss_ring_count; i++)
  3423. napi_enable(&qdev->rx_ring[i].napi);
  3424. return status;
  3425. }
  3426. /* Issue soft reset to chip. */
  3427. static int ql_adapter_reset(struct ql_adapter *qdev)
  3428. {
  3429. u32 value;
  3430. int status = 0;
  3431. unsigned long end_jiffies;
  3432. /* Clear all the entries in the routing table. */
  3433. status = ql_clear_routing_entries(qdev);
  3434. if (status) {
  3435. netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
  3436. return status;
  3437. }
  3438. end_jiffies = jiffies +
  3439. max((unsigned long)1, usecs_to_jiffies(30));
  3440. /* Check if bit is set then skip the mailbox command and
  3441. * clear the bit, else we are in normal reset process.
  3442. */
  3443. if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
  3444. /* Stop management traffic. */
  3445. ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
  3446. /* Wait for the NIC and MGMNT FIFOs to empty. */
  3447. ql_wait_fifo_empty(qdev);
  3448. } else
  3449. clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
  3450. ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
  3451. do {
  3452. value = ql_read32(qdev, RST_FO);
  3453. if ((value & RST_FO_FR) == 0)
  3454. break;
  3455. cpu_relax();
  3456. } while (time_before(jiffies, end_jiffies));
  3457. if (value & RST_FO_FR) {
  3458. netif_err(qdev, ifdown, qdev->ndev,
  3459. "ETIMEDOUT!!! errored out of resetting the chip!\n");
  3460. status = -ETIMEDOUT;
  3461. }
  3462. /* Resume management traffic. */
  3463. ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
  3464. return status;
  3465. }
  3466. static void ql_display_dev_info(struct net_device *ndev)
  3467. {
  3468. struct ql_adapter *qdev = netdev_priv(ndev);
  3469. netif_info(qdev, probe, qdev->ndev,
  3470. "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
  3471. "XG Roll = %d, XG Rev = %d.\n",
  3472. qdev->func,
  3473. qdev->port,
  3474. qdev->chip_rev_id & 0x0000000f,
  3475. qdev->chip_rev_id >> 4 & 0x0000000f,
  3476. qdev->chip_rev_id >> 8 & 0x0000000f,
  3477. qdev->chip_rev_id >> 12 & 0x0000000f);
  3478. netif_info(qdev, probe, qdev->ndev,
  3479. "MAC address %pM\n", ndev->dev_addr);
  3480. }
  3481. static int ql_wol(struct ql_adapter *qdev)
  3482. {
  3483. int status = 0;
  3484. u32 wol = MB_WOL_DISABLE;
  3485. /* The CAM is still intact after a reset, but if we
  3486. * are doing WOL, then we may need to program the
  3487. * routing regs. We would also need to issue the mailbox
  3488. * commands to instruct the MPI what to do per the ethtool
  3489. * settings.
  3490. */
  3491. if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
  3492. WAKE_MCAST | WAKE_BCAST)) {
  3493. netif_err(qdev, ifdown, qdev->ndev,
  3494. "Unsupported WOL parameter. qdev->wol = 0x%x.\n",
  3495. qdev->wol);
  3496. return -EINVAL;
  3497. }
  3498. if (qdev->wol & WAKE_MAGIC) {
  3499. status = ql_mb_wol_set_magic(qdev, 1);
  3500. if (status) {
  3501. netif_err(qdev, ifdown, qdev->ndev,
  3502. "Failed to set magic packet on %s.\n",
  3503. qdev->ndev->name);
  3504. return status;
  3505. } else
  3506. netif_info(qdev, drv, qdev->ndev,
  3507. "Enabled magic packet successfully on %s.\n",
  3508. qdev->ndev->name);
  3509. wol |= MB_WOL_MAGIC_PKT;
  3510. }
  3511. if (qdev->wol) {
  3512. wol |= MB_WOL_MODE_ON;
  3513. status = ql_mb_wol_mode(qdev, wol);
  3514. netif_err(qdev, drv, qdev->ndev,
  3515. "WOL %s (wol code 0x%x) on %s\n",
  3516. (status == 0) ? "Successfully set" : "Failed",
  3517. wol, qdev->ndev->name);
  3518. }
  3519. return status;
  3520. }
  3521. static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
  3522. {
  3523. /* Don't kill the reset worker thread if we
  3524. * are in the process of recovery.
  3525. */
  3526. if (test_bit(QL_ADAPTER_UP, &qdev->flags))
  3527. cancel_delayed_work_sync(&qdev->asic_reset_work);
  3528. cancel_delayed_work_sync(&qdev->mpi_reset_work);
  3529. cancel_delayed_work_sync(&qdev->mpi_work);
  3530. cancel_delayed_work_sync(&qdev->mpi_idc_work);
  3531. cancel_delayed_work_sync(&qdev->mpi_core_to_log);
  3532. cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
  3533. }
  3534. static int ql_adapter_down(struct ql_adapter *qdev)
  3535. {
  3536. int i, status = 0;
  3537. ql_link_off(qdev);
  3538. ql_cancel_all_work_sync(qdev);
  3539. for (i = 0; i < qdev->rss_ring_count; i++)
  3540. napi_disable(&qdev->rx_ring[i].napi);
  3541. clear_bit(QL_ADAPTER_UP, &qdev->flags);
  3542. ql_disable_interrupts(qdev);
  3543. ql_tx_ring_clean(qdev);
  3544. /* Call netif_napi_del() from common point.
  3545. */
  3546. for (i = 0; i < qdev->rss_ring_count; i++)
  3547. netif_napi_del(&qdev->rx_ring[i].napi);
  3548. status = ql_adapter_reset(qdev);
  3549. if (status)
  3550. netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
  3551. qdev->func);
  3552. ql_free_rx_buffers(qdev);
  3553. return status;
  3554. }
  3555. static int ql_adapter_up(struct ql_adapter *qdev)
  3556. {
  3557. int err = 0;
  3558. err = ql_adapter_initialize(qdev);
  3559. if (err) {
  3560. netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
  3561. goto err_init;
  3562. }
  3563. set_bit(QL_ADAPTER_UP, &qdev->flags);
  3564. ql_alloc_rx_buffers(qdev);
  3565. /* If the port is initialized and the
  3566. * link is up the turn on the carrier.
  3567. */
  3568. if ((ql_read32(qdev, STS) & qdev->port_init) &&
  3569. (ql_read32(qdev, STS) & qdev->port_link_up))
  3570. ql_link_on(qdev);
  3571. /* Restore rx mode. */
  3572. clear_bit(QL_ALLMULTI, &qdev->flags);
  3573. clear_bit(QL_PROMISCUOUS, &qdev->flags);
  3574. qlge_set_multicast_list(qdev->ndev);
  3575. /* Restore vlan setting. */
  3576. qlge_restore_vlan(qdev);
  3577. ql_enable_interrupts(qdev);
  3578. ql_enable_all_completion_interrupts(qdev);
  3579. netif_tx_start_all_queues(qdev->ndev);
  3580. return 0;
  3581. err_init:
  3582. ql_adapter_reset(qdev);
  3583. return err;
  3584. }
  3585. static void ql_release_adapter_resources(struct ql_adapter *qdev)
  3586. {
  3587. ql_free_mem_resources(qdev);
  3588. ql_free_irq(qdev);
  3589. }
  3590. static int ql_get_adapter_resources(struct ql_adapter *qdev)
  3591. {
  3592. int status = 0;
  3593. if (ql_alloc_mem_resources(qdev)) {
  3594. netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
  3595. return -ENOMEM;
  3596. }
  3597. status = ql_request_irq(qdev);
  3598. return status;
  3599. }
  3600. static int qlge_close(struct net_device *ndev)
  3601. {
  3602. struct ql_adapter *qdev = netdev_priv(ndev);
  3603. /* If we hit pci_channel_io_perm_failure
  3604. * failure condition, then we already
  3605. * brought the adapter down.
  3606. */
  3607. if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
  3608. netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
  3609. clear_bit(QL_EEH_FATAL, &qdev->flags);
  3610. return 0;
  3611. }
  3612. /*
  3613. * Wait for device to recover from a reset.
  3614. * (Rarely happens, but possible.)
  3615. */
  3616. while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
  3617. msleep(1);
  3618. ql_adapter_down(qdev);
  3619. ql_release_adapter_resources(qdev);
  3620. return 0;
  3621. }
  3622. static int ql_configure_rings(struct ql_adapter *qdev)
  3623. {
  3624. int i;
  3625. struct rx_ring *rx_ring;
  3626. struct tx_ring *tx_ring;
  3627. int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
  3628. unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
  3629. LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
  3630. qdev->lbq_buf_order = get_order(lbq_buf_len);
  3631. /* In a perfect world we have one RSS ring for each CPU
  3632. * and each has it's own vector. To do that we ask for
  3633. * cpu_cnt vectors. ql_enable_msix() will adjust the
  3634. * vector count to what we actually get. We then
  3635. * allocate an RSS ring for each.
  3636. * Essentially, we are doing min(cpu_count, msix_vector_count).
  3637. */
  3638. qdev->intr_count = cpu_cnt;
  3639. ql_enable_msix(qdev);
  3640. /* Adjust the RSS ring count to the actual vector count. */
  3641. qdev->rss_ring_count = qdev->intr_count;
  3642. qdev->tx_ring_count = cpu_cnt;
  3643. qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
  3644. for (i = 0; i < qdev->tx_ring_count; i++) {
  3645. tx_ring = &qdev->tx_ring[i];
  3646. memset((void *)tx_ring, 0, sizeof(*tx_ring));
  3647. tx_ring->qdev = qdev;
  3648. tx_ring->wq_id = i;
  3649. tx_ring->wq_len = qdev->tx_ring_size;
  3650. tx_ring->wq_size =
  3651. tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
  3652. /*
  3653. * The completion queue ID for the tx rings start
  3654. * immediately after the rss rings.
  3655. */
  3656. tx_ring->cq_id = qdev->rss_ring_count + i;
  3657. }
  3658. for (i = 0; i < qdev->rx_ring_count; i++) {
  3659. rx_ring = &qdev->rx_ring[i];
  3660. memset((void *)rx_ring, 0, sizeof(*rx_ring));
  3661. rx_ring->qdev = qdev;
  3662. rx_ring->cq_id = i;
  3663. rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
  3664. if (i < qdev->rss_ring_count) {
  3665. /*
  3666. * Inbound (RSS) queues.
  3667. */
  3668. rx_ring->cq_len = qdev->rx_ring_size;
  3669. rx_ring->cq_size =
  3670. rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
  3671. rx_ring->lbq_len = NUM_LARGE_BUFFERS;
  3672. rx_ring->lbq_size =
  3673. rx_ring->lbq_len * sizeof(__le64);
  3674. rx_ring->lbq_buf_size = (u16)lbq_buf_len;
  3675. rx_ring->sbq_len = NUM_SMALL_BUFFERS;
  3676. rx_ring->sbq_size =
  3677. rx_ring->sbq_len * sizeof(__le64);
  3678. rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
  3679. rx_ring->type = RX_Q;
  3680. } else {
  3681. /*
  3682. * Outbound queue handles outbound completions only.
  3683. */
  3684. /* outbound cq is same size as tx_ring it services. */
  3685. rx_ring->cq_len = qdev->tx_ring_size;
  3686. rx_ring->cq_size =
  3687. rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
  3688. rx_ring->lbq_len = 0;
  3689. rx_ring->lbq_size = 0;
  3690. rx_ring->lbq_buf_size = 0;
  3691. rx_ring->sbq_len = 0;
  3692. rx_ring->sbq_size = 0;
  3693. rx_ring->sbq_buf_size = 0;
  3694. rx_ring->type = TX_Q;
  3695. }
  3696. }
  3697. return 0;
  3698. }
  3699. static int qlge_open(struct net_device *ndev)
  3700. {
  3701. int err = 0;
  3702. struct ql_adapter *qdev = netdev_priv(ndev);
  3703. err = ql_adapter_reset(qdev);
  3704. if (err)
  3705. return err;
  3706. err = ql_configure_rings(qdev);
  3707. if (err)
  3708. return err;
  3709. err = ql_get_adapter_resources(qdev);
  3710. if (err)
  3711. goto error_up;
  3712. err = ql_adapter_up(qdev);
  3713. if (err)
  3714. goto error_up;
  3715. return err;
  3716. error_up:
  3717. ql_release_adapter_resources(qdev);
  3718. return err;
  3719. }
  3720. static int ql_change_rx_buffers(struct ql_adapter *qdev)
  3721. {
  3722. struct rx_ring *rx_ring;
  3723. int i, status;
  3724. u32 lbq_buf_len;
  3725. /* Wait for an outstanding reset to complete. */
  3726. if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
  3727. int i = 3;
  3728. while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
  3729. netif_err(qdev, ifup, qdev->ndev,
  3730. "Waiting for adapter UP...\n");
  3731. ssleep(1);
  3732. }
  3733. if (!i) {
  3734. netif_err(qdev, ifup, qdev->ndev,
  3735. "Timed out waiting for adapter UP\n");
  3736. return -ETIMEDOUT;
  3737. }
  3738. }
  3739. status = ql_adapter_down(qdev);
  3740. if (status)
  3741. goto error;
  3742. /* Get the new rx buffer size. */
  3743. lbq_buf_len = (qdev->ndev->mtu > 1500) ?
  3744. LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
  3745. qdev->lbq_buf_order = get_order(lbq_buf_len);
  3746. for (i = 0; i < qdev->rss_ring_count; i++) {
  3747. rx_ring = &qdev->rx_ring[i];
  3748. /* Set the new size. */
  3749. rx_ring->lbq_buf_size = lbq_buf_len;
  3750. }
  3751. status = ql_adapter_up(qdev);
  3752. if (status)
  3753. goto error;
  3754. return status;
  3755. error:
  3756. netif_alert(qdev, ifup, qdev->ndev,
  3757. "Driver up/down cycle failed, closing device.\n");
  3758. set_bit(QL_ADAPTER_UP, &qdev->flags);
  3759. dev_close(qdev->ndev);
  3760. return status;
  3761. }
  3762. static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
  3763. {
  3764. struct ql_adapter *qdev = netdev_priv(ndev);
  3765. int status;
  3766. if (ndev->mtu == 1500 && new_mtu == 9000) {
  3767. netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
  3768. } else if (ndev->mtu == 9000 && new_mtu == 1500) {
  3769. netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
  3770. } else
  3771. return -EINVAL;
  3772. queue_delayed_work(qdev->workqueue,
  3773. &qdev->mpi_port_cfg_work, 3*HZ);
  3774. ndev->mtu = new_mtu;
  3775. if (!netif_running(qdev->ndev)) {
  3776. return 0;
  3777. }
  3778. status = ql_change_rx_buffers(qdev);
  3779. if (status) {
  3780. netif_err(qdev, ifup, qdev->ndev,
  3781. "Changing MTU failed.\n");
  3782. }
  3783. return status;
  3784. }
  3785. static struct net_device_stats *qlge_get_stats(struct net_device
  3786. *ndev)
  3787. {
  3788. struct ql_adapter *qdev = netdev_priv(ndev);
  3789. struct rx_ring *rx_ring = &qdev->rx_ring[0];
  3790. struct tx_ring *tx_ring = &qdev->tx_ring[0];
  3791. unsigned long pkts, mcast, dropped, errors, bytes;
  3792. int i;
  3793. /* Get RX stats. */
  3794. pkts = mcast = dropped = errors = bytes = 0;
  3795. for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
  3796. pkts += rx_ring->rx_packets;
  3797. bytes += rx_ring->rx_bytes;
  3798. dropped += rx_ring->rx_dropped;
  3799. errors += rx_ring->rx_errors;
  3800. mcast += rx_ring->rx_multicast;
  3801. }
  3802. ndev->stats.rx_packets = pkts;
  3803. ndev->stats.rx_bytes = bytes;
  3804. ndev->stats.rx_dropped = dropped;
  3805. ndev->stats.rx_errors = errors;
  3806. ndev->stats.multicast = mcast;
  3807. /* Get TX stats. */
  3808. pkts = errors = bytes = 0;
  3809. for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
  3810. pkts += tx_ring->tx_packets;
  3811. bytes += tx_ring->tx_bytes;
  3812. errors += tx_ring->tx_errors;
  3813. }
  3814. ndev->stats.tx_packets = pkts;
  3815. ndev->stats.tx_bytes = bytes;
  3816. ndev->stats.tx_errors = errors;
  3817. return &ndev->stats;
  3818. }
  3819. static void qlge_set_multicast_list(struct net_device *ndev)
  3820. {
  3821. struct ql_adapter *qdev = netdev_priv(ndev);
  3822. struct netdev_hw_addr *ha;
  3823. int i, status;
  3824. status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
  3825. if (status)
  3826. return;
  3827. /*
  3828. * Set or clear promiscuous mode if a
  3829. * transition is taking place.
  3830. */
  3831. if (ndev->flags & IFF_PROMISC) {
  3832. if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
  3833. if (ql_set_routing_reg
  3834. (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
  3835. netif_err(qdev, hw, qdev->ndev,
  3836. "Failed to set promiscuous mode.\n");
  3837. } else {
  3838. set_bit(QL_PROMISCUOUS, &qdev->flags);
  3839. }
  3840. }
  3841. } else {
  3842. if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
  3843. if (ql_set_routing_reg
  3844. (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
  3845. netif_err(qdev, hw, qdev->ndev,
  3846. "Failed to clear promiscuous mode.\n");
  3847. } else {
  3848. clear_bit(QL_PROMISCUOUS, &qdev->flags);
  3849. }
  3850. }
  3851. }
  3852. /*
  3853. * Set or clear all multicast mode if a
  3854. * transition is taking place.
  3855. */
  3856. if ((ndev->flags & IFF_ALLMULTI) ||
  3857. (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
  3858. if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
  3859. if (ql_set_routing_reg
  3860. (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
  3861. netif_err(qdev, hw, qdev->ndev,
  3862. "Failed to set all-multi mode.\n");
  3863. } else {
  3864. set_bit(QL_ALLMULTI, &qdev->flags);
  3865. }
  3866. }
  3867. } else {
  3868. if (test_bit(QL_ALLMULTI, &qdev->flags)) {
  3869. if (ql_set_routing_reg
  3870. (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
  3871. netif_err(qdev, hw, qdev->ndev,
  3872. "Failed to clear all-multi mode.\n");
  3873. } else {
  3874. clear_bit(QL_ALLMULTI, &qdev->flags);
  3875. }
  3876. }
  3877. }
  3878. if (!netdev_mc_empty(ndev)) {
  3879. status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
  3880. if (status)
  3881. goto exit;
  3882. i = 0;
  3883. netdev_for_each_mc_addr(ha, ndev) {
  3884. if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
  3885. MAC_ADDR_TYPE_MULTI_MAC, i)) {
  3886. netif_err(qdev, hw, qdev->ndev,
  3887. "Failed to loadmulticast address.\n");
  3888. ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
  3889. goto exit;
  3890. }
  3891. i++;
  3892. }
  3893. ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
  3894. if (ql_set_routing_reg
  3895. (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
  3896. netif_err(qdev, hw, qdev->ndev,
  3897. "Failed to set multicast match mode.\n");
  3898. } else {
  3899. set_bit(QL_ALLMULTI, &qdev->flags);
  3900. }
  3901. }
  3902. exit:
  3903. ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
  3904. }
  3905. static int qlge_set_mac_address(struct net_device *ndev, void *p)
  3906. {
  3907. struct ql_adapter *qdev = netdev_priv(ndev);
  3908. struct sockaddr *addr = p;
  3909. int status;
  3910. if (!is_valid_ether_addr(addr->sa_data))
  3911. return -EADDRNOTAVAIL;
  3912. memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
  3913. /* Update local copy of current mac address. */
  3914. memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
  3915. status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
  3916. if (status)
  3917. return status;
  3918. status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
  3919. MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
  3920. if (status)
  3921. netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
  3922. ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
  3923. return status;
  3924. }
  3925. static void qlge_tx_timeout(struct net_device *ndev)
  3926. {
  3927. struct ql_adapter *qdev = netdev_priv(ndev);
  3928. ql_queue_asic_error(qdev);
  3929. }
  3930. static void ql_asic_reset_work(struct work_struct *work)
  3931. {
  3932. struct ql_adapter *qdev =
  3933. container_of(work, struct ql_adapter, asic_reset_work.work);
  3934. int status;
  3935. rtnl_lock();
  3936. status = ql_adapter_down(qdev);
  3937. if (status)
  3938. goto error;
  3939. status = ql_adapter_up(qdev);
  3940. if (status)
  3941. goto error;
  3942. /* Restore rx mode. */
  3943. clear_bit(QL_ALLMULTI, &qdev->flags);
  3944. clear_bit(QL_PROMISCUOUS, &qdev->flags);
  3945. qlge_set_multicast_list(qdev->ndev);
  3946. rtnl_unlock();
  3947. return;
  3948. error:
  3949. netif_alert(qdev, ifup, qdev->ndev,
  3950. "Driver up/down cycle failed, closing device\n");
  3951. set_bit(QL_ADAPTER_UP, &qdev->flags);
  3952. dev_close(qdev->ndev);
  3953. rtnl_unlock();
  3954. }
  3955. static const struct nic_operations qla8012_nic_ops = {
  3956. .get_flash = ql_get_8012_flash_params,
  3957. .port_initialize = ql_8012_port_initialize,
  3958. };
  3959. static const struct nic_operations qla8000_nic_ops = {
  3960. .get_flash = ql_get_8000_flash_params,
  3961. .port_initialize = ql_8000_port_initialize,
  3962. };
  3963. /* Find the pcie function number for the other NIC
  3964. * on this chip. Since both NIC functions share a
  3965. * common firmware we have the lowest enabled function
  3966. * do any common work. Examples would be resetting
  3967. * after a fatal firmware error, or doing a firmware
  3968. * coredump.
  3969. */
  3970. static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
  3971. {
  3972. int status = 0;
  3973. u32 temp;
  3974. u32 nic_func1, nic_func2;
  3975. status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
  3976. &temp);
  3977. if (status)
  3978. return status;
  3979. nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
  3980. MPI_TEST_NIC_FUNC_MASK);
  3981. nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
  3982. MPI_TEST_NIC_FUNC_MASK);
  3983. if (qdev->func == nic_func1)
  3984. qdev->alt_func = nic_func2;
  3985. else if (qdev->func == nic_func2)
  3986. qdev->alt_func = nic_func1;
  3987. else
  3988. status = -EIO;
  3989. return status;
  3990. }
  3991. static int ql_get_board_info(struct ql_adapter *qdev)
  3992. {
  3993. int status;
  3994. qdev->func =
  3995. (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
  3996. if (qdev->func > 3)
  3997. return -EIO;
  3998. status = ql_get_alt_pcie_func(qdev);
  3999. if (status)
  4000. return status;
  4001. qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
  4002. if (qdev->port) {
  4003. qdev->xg_sem_mask = SEM_XGMAC1_MASK;
  4004. qdev->port_link_up = STS_PL1;
  4005. qdev->port_init = STS_PI1;
  4006. qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
  4007. qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
  4008. } else {
  4009. qdev->xg_sem_mask = SEM_XGMAC0_MASK;
  4010. qdev->port_link_up = STS_PL0;
  4011. qdev->port_init = STS_PI0;
  4012. qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
  4013. qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
  4014. }
  4015. qdev->chip_rev_id = ql_read32(qdev, REV_ID);
  4016. qdev->device_id = qdev->pdev->device;
  4017. if (qdev->device_id == QLGE_DEVICE_ID_8012)
  4018. qdev->nic_ops = &qla8012_nic_ops;
  4019. else if (qdev->device_id == QLGE_DEVICE_ID_8000)
  4020. qdev->nic_ops = &qla8000_nic_ops;
  4021. return status;
  4022. }
  4023. static void ql_release_all(struct pci_dev *pdev)
  4024. {
  4025. struct net_device *ndev = pci_get_drvdata(pdev);
  4026. struct ql_adapter *qdev = netdev_priv(ndev);
  4027. if (qdev->workqueue) {
  4028. destroy_workqueue(qdev->workqueue);
  4029. qdev->workqueue = NULL;
  4030. }
  4031. if (qdev->reg_base)
  4032. iounmap(qdev->reg_base);
  4033. if (qdev->doorbell_area)
  4034. iounmap(qdev->doorbell_area);
  4035. vfree(qdev->mpi_coredump);
  4036. pci_release_regions(pdev);
  4037. pci_set_drvdata(pdev, NULL);
  4038. }
  4039. static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
  4040. int cards_found)
  4041. {
  4042. struct ql_adapter *qdev = netdev_priv(ndev);
  4043. int err = 0;
  4044. memset((void *)qdev, 0, sizeof(*qdev));
  4045. err = pci_enable_device(pdev);
  4046. if (err) {
  4047. dev_err(&pdev->dev, "PCI device enable failed.\n");
  4048. return err;
  4049. }
  4050. qdev->ndev = ndev;
  4051. qdev->pdev = pdev;
  4052. pci_set_drvdata(pdev, ndev);
  4053. /* Set PCIe read request size */
  4054. err = pcie_set_readrq(pdev, 4096);
  4055. if (err) {
  4056. dev_err(&pdev->dev, "Set readrq failed.\n");
  4057. goto err_out1;
  4058. }
  4059. err = pci_request_regions(pdev, DRV_NAME);
  4060. if (err) {
  4061. dev_err(&pdev->dev, "PCI region request failed.\n");
  4062. return err;
  4063. }
  4064. pci_set_master(pdev);
  4065. if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
  4066. set_bit(QL_DMA64, &qdev->flags);
  4067. err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
  4068. } else {
  4069. err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
  4070. if (!err)
  4071. err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
  4072. }
  4073. if (err) {
  4074. dev_err(&pdev->dev, "No usable DMA configuration.\n");
  4075. goto err_out2;
  4076. }
  4077. /* Set PCIe reset type for EEH to fundamental. */
  4078. pdev->needs_freset = 1;
  4079. pci_save_state(pdev);
  4080. qdev->reg_base =
  4081. ioremap_nocache(pci_resource_start(pdev, 1),
  4082. pci_resource_len(pdev, 1));
  4083. if (!qdev->reg_base) {
  4084. dev_err(&pdev->dev, "Register mapping failed.\n");
  4085. err = -ENOMEM;
  4086. goto err_out2;
  4087. }
  4088. qdev->doorbell_area_size = pci_resource_len(pdev, 3);
  4089. qdev->doorbell_area =
  4090. ioremap_nocache(pci_resource_start(pdev, 3),
  4091. pci_resource_len(pdev, 3));
  4092. if (!qdev->doorbell_area) {
  4093. dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
  4094. err = -ENOMEM;
  4095. goto err_out2;
  4096. }
  4097. err = ql_get_board_info(qdev);
  4098. if (err) {
  4099. dev_err(&pdev->dev, "Register access failed.\n");
  4100. err = -EIO;
  4101. goto err_out2;
  4102. }
  4103. qdev->msg_enable = netif_msg_init(debug, default_msg);
  4104. spin_lock_init(&qdev->hw_lock);
  4105. spin_lock_init(&qdev->stats_lock);
  4106. if (qlge_mpi_coredump) {
  4107. qdev->mpi_coredump =
  4108. vmalloc(sizeof(struct ql_mpi_coredump));
  4109. if (qdev->mpi_coredump == NULL) {
  4110. err = -ENOMEM;
  4111. goto err_out2;
  4112. }
  4113. if (qlge_force_coredump)
  4114. set_bit(QL_FRC_COREDUMP, &qdev->flags);
  4115. }
  4116. /* make sure the EEPROM is good */
  4117. err = qdev->nic_ops->get_flash(qdev);
  4118. if (err) {
  4119. dev_err(&pdev->dev, "Invalid FLASH.\n");
  4120. goto err_out2;
  4121. }
  4122. /* Keep local copy of current mac address. */
  4123. memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
  4124. /* Set up the default ring sizes. */
  4125. qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
  4126. qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
  4127. /* Set up the coalescing parameters. */
  4128. qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
  4129. qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
  4130. qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
  4131. qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
  4132. /*
  4133. * Set up the operating parameters.
  4134. */
  4135. qdev->workqueue = create_singlethread_workqueue(ndev->name);
  4136. INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
  4137. INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
  4138. INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
  4139. INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
  4140. INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
  4141. INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
  4142. init_completion(&qdev->ide_completion);
  4143. mutex_init(&qdev->mpi_mutex);
  4144. if (!cards_found) {
  4145. dev_info(&pdev->dev, "%s\n", DRV_STRING);
  4146. dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
  4147. DRV_NAME, DRV_VERSION);
  4148. }
  4149. return 0;
  4150. err_out2:
  4151. ql_release_all(pdev);
  4152. err_out1:
  4153. pci_disable_device(pdev);
  4154. return err;
  4155. }
  4156. static const struct net_device_ops qlge_netdev_ops = {
  4157. .ndo_open = qlge_open,
  4158. .ndo_stop = qlge_close,
  4159. .ndo_start_xmit = qlge_send,
  4160. .ndo_change_mtu = qlge_change_mtu,
  4161. .ndo_get_stats = qlge_get_stats,
  4162. .ndo_set_rx_mode = qlge_set_multicast_list,
  4163. .ndo_set_mac_address = qlge_set_mac_address,
  4164. .ndo_validate_addr = eth_validate_addr,
  4165. .ndo_tx_timeout = qlge_tx_timeout,
  4166. .ndo_fix_features = qlge_fix_features,
  4167. .ndo_set_features = qlge_set_features,
  4168. .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
  4169. .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
  4170. };
  4171. static void ql_timer(unsigned long data)
  4172. {
  4173. struct ql_adapter *qdev = (struct ql_adapter *)data;
  4174. u32 var = 0;
  4175. var = ql_read32(qdev, STS);
  4176. if (pci_channel_offline(qdev->pdev)) {
  4177. netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
  4178. return;
  4179. }
  4180. mod_timer(&qdev->timer, jiffies + (5*HZ));
  4181. }
  4182. static int qlge_probe(struct pci_dev *pdev,
  4183. const struct pci_device_id *pci_entry)
  4184. {
  4185. struct net_device *ndev = NULL;
  4186. struct ql_adapter *qdev = NULL;
  4187. static int cards_found = 0;
  4188. int err = 0;
  4189. ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
  4190. min(MAX_CPUS, netif_get_num_default_rss_queues()));
  4191. if (!ndev)
  4192. return -ENOMEM;
  4193. err = ql_init_device(pdev, ndev, cards_found);
  4194. if (err < 0) {
  4195. free_netdev(ndev);
  4196. return err;
  4197. }
  4198. qdev = netdev_priv(ndev);
  4199. SET_NETDEV_DEV(ndev, &pdev->dev);
  4200. ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
  4201. NETIF_F_TSO | NETIF_F_TSO_ECN |
  4202. NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_RXCSUM;
  4203. ndev->features = ndev->hw_features |
  4204. NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
  4205. ndev->vlan_features = ndev->hw_features;
  4206. if (test_bit(QL_DMA64, &qdev->flags))
  4207. ndev->features |= NETIF_F_HIGHDMA;
  4208. /*
  4209. * Set up net_device structure.
  4210. */
  4211. ndev->tx_queue_len = qdev->tx_ring_size;
  4212. ndev->irq = pdev->irq;
  4213. ndev->netdev_ops = &qlge_netdev_ops;
  4214. SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
  4215. ndev->watchdog_timeo = 10 * HZ;
  4216. err = register_netdev(ndev);
  4217. if (err) {
  4218. dev_err(&pdev->dev, "net device registration failed.\n");
  4219. ql_release_all(pdev);
  4220. pci_disable_device(pdev);
  4221. free_netdev(ndev);
  4222. return err;
  4223. }
  4224. /* Start up the timer to trigger EEH if
  4225. * the bus goes dead
  4226. */
  4227. init_timer_deferrable(&qdev->timer);
  4228. qdev->timer.data = (unsigned long)qdev;
  4229. qdev->timer.function = ql_timer;
  4230. qdev->timer.expires = jiffies + (5*HZ);
  4231. add_timer(&qdev->timer);
  4232. ql_link_off(qdev);
  4233. ql_display_dev_info(ndev);
  4234. atomic_set(&qdev->lb_count, 0);
  4235. cards_found++;
  4236. return 0;
  4237. }
  4238. netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
  4239. {
  4240. return qlge_send(skb, ndev);
  4241. }
  4242. int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
  4243. {
  4244. return ql_clean_inbound_rx_ring(rx_ring, budget);
  4245. }
  4246. static void qlge_remove(struct pci_dev *pdev)
  4247. {
  4248. struct net_device *ndev = pci_get_drvdata(pdev);
  4249. struct ql_adapter *qdev = netdev_priv(ndev);
  4250. del_timer_sync(&qdev->timer);
  4251. ql_cancel_all_work_sync(qdev);
  4252. unregister_netdev(ndev);
  4253. ql_release_all(pdev);
  4254. pci_disable_device(pdev);
  4255. free_netdev(ndev);
  4256. }
  4257. /* Clean up resources without touching hardware. */
  4258. static void ql_eeh_close(struct net_device *ndev)
  4259. {
  4260. int i;
  4261. struct ql_adapter *qdev = netdev_priv(ndev);
  4262. if (netif_carrier_ok(ndev)) {
  4263. netif_carrier_off(ndev);
  4264. netif_stop_queue(ndev);
  4265. }
  4266. /* Disabling the timer */
  4267. del_timer_sync(&qdev->timer);
  4268. ql_cancel_all_work_sync(qdev);
  4269. for (i = 0; i < qdev->rss_ring_count; i++)
  4270. netif_napi_del(&qdev->rx_ring[i].napi);
  4271. clear_bit(QL_ADAPTER_UP, &qdev->flags);
  4272. ql_tx_ring_clean(qdev);
  4273. ql_free_rx_buffers(qdev);
  4274. ql_release_adapter_resources(qdev);
  4275. }
  4276. /*
  4277. * This callback is called by the PCI subsystem whenever
  4278. * a PCI bus error is detected.
  4279. */
  4280. static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
  4281. enum pci_channel_state state)
  4282. {
  4283. struct net_device *ndev = pci_get_drvdata(pdev);
  4284. struct ql_adapter *qdev = netdev_priv(ndev);
  4285. switch (state) {
  4286. case pci_channel_io_normal:
  4287. return PCI_ERS_RESULT_CAN_RECOVER;
  4288. case pci_channel_io_frozen:
  4289. netif_device_detach(ndev);
  4290. if (netif_running(ndev))
  4291. ql_eeh_close(ndev);
  4292. pci_disable_device(pdev);
  4293. return PCI_ERS_RESULT_NEED_RESET;
  4294. case pci_channel_io_perm_failure:
  4295. dev_err(&pdev->dev,
  4296. "%s: pci_channel_io_perm_failure.\n", __func__);
  4297. ql_eeh_close(ndev);
  4298. set_bit(QL_EEH_FATAL, &qdev->flags);
  4299. return PCI_ERS_RESULT_DISCONNECT;
  4300. }
  4301. /* Request a slot reset. */
  4302. return PCI_ERS_RESULT_NEED_RESET;
  4303. }
  4304. /*
  4305. * This callback is called after the PCI buss has been reset.
  4306. * Basically, this tries to restart the card from scratch.
  4307. * This is a shortened version of the device probe/discovery code,
  4308. * it resembles the first-half of the () routine.
  4309. */
  4310. static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
  4311. {
  4312. struct net_device *ndev = pci_get_drvdata(pdev);
  4313. struct ql_adapter *qdev = netdev_priv(ndev);
  4314. pdev->error_state = pci_channel_io_normal;
  4315. pci_restore_state(pdev);
  4316. if (pci_enable_device(pdev)) {
  4317. netif_err(qdev, ifup, qdev->ndev,
  4318. "Cannot re-enable PCI device after reset.\n");
  4319. return PCI_ERS_RESULT_DISCONNECT;
  4320. }
  4321. pci_set_master(pdev);
  4322. if (ql_adapter_reset(qdev)) {
  4323. netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
  4324. set_bit(QL_EEH_FATAL, &qdev->flags);
  4325. return PCI_ERS_RESULT_DISCONNECT;
  4326. }
  4327. return PCI_ERS_RESULT_RECOVERED;
  4328. }
  4329. static void qlge_io_resume(struct pci_dev *pdev)
  4330. {
  4331. struct net_device *ndev = pci_get_drvdata(pdev);
  4332. struct ql_adapter *qdev = netdev_priv(ndev);
  4333. int err = 0;
  4334. if (netif_running(ndev)) {
  4335. err = qlge_open(ndev);
  4336. if (err) {
  4337. netif_err(qdev, ifup, qdev->ndev,
  4338. "Device initialization failed after reset.\n");
  4339. return;
  4340. }
  4341. } else {
  4342. netif_err(qdev, ifup, qdev->ndev,
  4343. "Device was not running prior to EEH.\n");
  4344. }
  4345. mod_timer(&qdev->timer, jiffies + (5*HZ));
  4346. netif_device_attach(ndev);
  4347. }
  4348. static const struct pci_error_handlers qlge_err_handler = {
  4349. .error_detected = qlge_io_error_detected,
  4350. .slot_reset = qlge_io_slot_reset,
  4351. .resume = qlge_io_resume,
  4352. };
  4353. static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
  4354. {
  4355. struct net_device *ndev = pci_get_drvdata(pdev);
  4356. struct ql_adapter *qdev = netdev_priv(ndev);
  4357. int err;
  4358. netif_device_detach(ndev);
  4359. del_timer_sync(&qdev->timer);
  4360. if (netif_running(ndev)) {
  4361. err = ql_adapter_down(qdev);
  4362. if (!err)
  4363. return err;
  4364. }
  4365. ql_wol(qdev);
  4366. err = pci_save_state(pdev);
  4367. if (err)
  4368. return err;
  4369. pci_disable_device(pdev);
  4370. pci_set_power_state(pdev, pci_choose_state(pdev, state));
  4371. return 0;
  4372. }
  4373. #ifdef CONFIG_PM
  4374. static int qlge_resume(struct pci_dev *pdev)
  4375. {
  4376. struct net_device *ndev = pci_get_drvdata(pdev);
  4377. struct ql_adapter *qdev = netdev_priv(ndev);
  4378. int err;
  4379. pci_set_power_state(pdev, PCI_D0);
  4380. pci_restore_state(pdev);
  4381. err = pci_enable_device(pdev);
  4382. if (err) {
  4383. netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
  4384. return err;
  4385. }
  4386. pci_set_master(pdev);
  4387. pci_enable_wake(pdev, PCI_D3hot, 0);
  4388. pci_enable_wake(pdev, PCI_D3cold, 0);
  4389. if (netif_running(ndev)) {
  4390. err = ql_adapter_up(qdev);
  4391. if (err)
  4392. return err;
  4393. }
  4394. mod_timer(&qdev->timer, jiffies + (5*HZ));
  4395. netif_device_attach(ndev);
  4396. return 0;
  4397. }
  4398. #endif /* CONFIG_PM */
  4399. static void qlge_shutdown(struct pci_dev *pdev)
  4400. {
  4401. qlge_suspend(pdev, PMSG_SUSPEND);
  4402. }
  4403. static struct pci_driver qlge_driver = {
  4404. .name = DRV_NAME,
  4405. .id_table = qlge_pci_tbl,
  4406. .probe = qlge_probe,
  4407. .remove = qlge_remove,
  4408. #ifdef CONFIG_PM
  4409. .suspend = qlge_suspend,
  4410. .resume = qlge_resume,
  4411. #endif
  4412. .shutdown = qlge_shutdown,
  4413. .err_handler = &qlge_err_handler
  4414. };
  4415. static int __init qlge_init_module(void)
  4416. {
  4417. return pci_register_driver(&qlge_driver);
  4418. }
  4419. static void __exit qlge_exit(void)
  4420. {
  4421. pci_unregister_driver(&qlge_driver);
  4422. }
  4423. module_init(qlge_init_module);
  4424. module_exit(qlge_exit);