qlge_main.c 135 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987
  1. /*
  2. * QLogic qlge NIC HBA Driver
  3. * Copyright (c) 2003-2008 QLogic Corporation
  4. * See LICENSE.qlge for copyright and licensing details.
  5. * Author: Linux qlge network device driver by
  6. * Ron Mercer <ron.mercer@qlogic.com>
  7. */
  8. #include <linux/kernel.h>
  9. #include <linux/init.h>
  10. #include <linux/bitops.h>
  11. #include <linux/types.h>
  12. #include <linux/module.h>
  13. #include <linux/list.h>
  14. #include <linux/pci.h>
  15. #include <linux/dma-mapping.h>
  16. #include <linux/pagemap.h>
  17. #include <linux/sched.h>
  18. #include <linux/slab.h>
  19. #include <linux/dmapool.h>
  20. #include <linux/mempool.h>
  21. #include <linux/spinlock.h>
  22. #include <linux/kthread.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/errno.h>
  25. #include <linux/ioport.h>
  26. #include <linux/in.h>
  27. #include <linux/ip.h>
  28. #include <linux/ipv6.h>
  29. #include <net/ipv6.h>
  30. #include <linux/tcp.h>
  31. #include <linux/udp.h>
  32. #include <linux/if_arp.h>
  33. #include <linux/if_ether.h>
  34. #include <linux/netdevice.h>
  35. #include <linux/etherdevice.h>
  36. #include <linux/ethtool.h>
  37. #include <linux/if_vlan.h>
  38. #include <linux/skbuff.h>
  39. #include <linux/delay.h>
  40. #include <linux/mm.h>
  41. #include <linux/vmalloc.h>
  42. #include <linux/prefetch.h>
  43. #include <net/ip6_checksum.h>
  44. #include "qlge.h"
  45. char qlge_driver_name[] = DRV_NAME;
  46. const char qlge_driver_version[] = DRV_VERSION;
  47. MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
  48. MODULE_DESCRIPTION(DRV_STRING " ");
  49. MODULE_LICENSE("GPL");
  50. MODULE_VERSION(DRV_VERSION);
  51. static const u32 default_msg =
  52. NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
  53. /* NETIF_MSG_TIMER | */
  54. NETIF_MSG_IFDOWN |
  55. NETIF_MSG_IFUP |
  56. NETIF_MSG_RX_ERR |
  57. NETIF_MSG_TX_ERR |
  58. /* NETIF_MSG_TX_QUEUED | */
  59. /* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
  60. /* NETIF_MSG_PKTDATA | */
  61. NETIF_MSG_HW | NETIF_MSG_WOL | 0;
  62. static int debug = -1; /* defaults above */
  63. module_param(debug, int, 0664);
  64. MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
  65. #define MSIX_IRQ 0
  66. #define MSI_IRQ 1
  67. #define LEG_IRQ 2
  68. static int qlge_irq_type = MSIX_IRQ;
  69. module_param(qlge_irq_type, int, 0664);
  70. MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
  71. static int qlge_mpi_coredump;
  72. module_param(qlge_mpi_coredump, int, 0);
  73. MODULE_PARM_DESC(qlge_mpi_coredump,
  74. "Option to enable MPI firmware dump. "
  75. "Default is OFF - Do Not allocate memory. ");
  76. static int qlge_force_coredump;
  77. module_param(qlge_force_coredump, int, 0);
  78. MODULE_PARM_DESC(qlge_force_coredump,
  79. "Option to allow force of firmware core dump. "
  80. "Default is OFF - Do not allow.");
  81. static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
  82. {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
  83. {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
  84. /* required last entry */
  85. {0,}
  86. };
  87. MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
  88. static int ql_wol(struct ql_adapter *qdev);
  89. static void qlge_set_multicast_list(struct net_device *ndev);
  90. /* This hardware semaphore causes exclusive access to
  91. * resources shared between the NIC driver, MPI firmware,
  92. * FCOE firmware and the FC driver.
  93. */
  94. static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
  95. {
  96. u32 sem_bits = 0;
  97. switch (sem_mask) {
  98. case SEM_XGMAC0_MASK:
  99. sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
  100. break;
  101. case SEM_XGMAC1_MASK:
  102. sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
  103. break;
  104. case SEM_ICB_MASK:
  105. sem_bits = SEM_SET << SEM_ICB_SHIFT;
  106. break;
  107. case SEM_MAC_ADDR_MASK:
  108. sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
  109. break;
  110. case SEM_FLASH_MASK:
  111. sem_bits = SEM_SET << SEM_FLASH_SHIFT;
  112. break;
  113. case SEM_PROBE_MASK:
  114. sem_bits = SEM_SET << SEM_PROBE_SHIFT;
  115. break;
  116. case SEM_RT_IDX_MASK:
  117. sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
  118. break;
  119. case SEM_PROC_REG_MASK:
  120. sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
  121. break;
  122. default:
  123. netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
  124. return -EINVAL;
  125. }
  126. ql_write32(qdev, SEM, sem_bits | sem_mask);
  127. return !(ql_read32(qdev, SEM) & sem_bits);
  128. }
  129. int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
  130. {
  131. unsigned int wait_count = 30;
  132. do {
  133. if (!ql_sem_trylock(qdev, sem_mask))
  134. return 0;
  135. udelay(100);
  136. } while (--wait_count);
  137. return -ETIMEDOUT;
  138. }
  139. void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
  140. {
  141. ql_write32(qdev, SEM, sem_mask);
  142. ql_read32(qdev, SEM); /* flush */
  143. }
  144. /* This function waits for a specific bit to come ready
  145. * in a given register. It is used mostly by the initialize
  146. * process, but is also used in kernel thread API such as
  147. * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
  148. */
  149. int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
  150. {
  151. u32 temp;
  152. int count = UDELAY_COUNT;
  153. while (count) {
  154. temp = ql_read32(qdev, reg);
  155. /* check for errors */
  156. if (temp & err_bit) {
  157. netif_alert(qdev, probe, qdev->ndev,
  158. "register 0x%.08x access error, value = 0x%.08x!.\n",
  159. reg, temp);
  160. return -EIO;
  161. } else if (temp & bit)
  162. return 0;
  163. udelay(UDELAY_DELAY);
  164. count--;
  165. }
  166. netif_alert(qdev, probe, qdev->ndev,
  167. "Timed out waiting for reg %x to come ready.\n", reg);
  168. return -ETIMEDOUT;
  169. }
  170. /* The CFG register is used to download TX and RX control blocks
  171. * to the chip. This function waits for an operation to complete.
  172. */
  173. static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
  174. {
  175. int count = UDELAY_COUNT;
  176. u32 temp;
  177. while (count) {
  178. temp = ql_read32(qdev, CFG);
  179. if (temp & CFG_LE)
  180. return -EIO;
  181. if (!(temp & bit))
  182. return 0;
  183. udelay(UDELAY_DELAY);
  184. count--;
  185. }
  186. return -ETIMEDOUT;
  187. }
  188. /* Used to issue init control blocks to hw. Maps control block,
  189. * sets address, triggers download, waits for completion.
  190. */
  191. int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
  192. u16 q_id)
  193. {
  194. u64 map;
  195. int status = 0;
  196. int direction;
  197. u32 mask;
  198. u32 value;
  199. direction =
  200. (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
  201. PCI_DMA_FROMDEVICE;
  202. map = pci_map_single(qdev->pdev, ptr, size, direction);
  203. if (pci_dma_mapping_error(qdev->pdev, map)) {
  204. netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
  205. return -ENOMEM;
  206. }
  207. status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
  208. if (status)
  209. return status;
  210. status = ql_wait_cfg(qdev, bit);
  211. if (status) {
  212. netif_err(qdev, ifup, qdev->ndev,
  213. "Timed out waiting for CFG to come ready.\n");
  214. goto exit;
  215. }
  216. ql_write32(qdev, ICB_L, (u32) map);
  217. ql_write32(qdev, ICB_H, (u32) (map >> 32));
  218. mask = CFG_Q_MASK | (bit << 16);
  219. value = bit | (q_id << CFG_Q_SHIFT);
  220. ql_write32(qdev, CFG, (mask | value));
  221. /*
  222. * Wait for the bit to clear after signaling hw.
  223. */
  224. status = ql_wait_cfg(qdev, bit);
  225. exit:
  226. ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
  227. pci_unmap_single(qdev->pdev, map, size, direction);
  228. return status;
  229. }
  230. /* Get a specific MAC address from the CAM. Used for debug and reg dump. */
  231. int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
  232. u32 *value)
  233. {
  234. u32 offset = 0;
  235. int status;
  236. switch (type) {
  237. case MAC_ADDR_TYPE_MULTI_MAC:
  238. case MAC_ADDR_TYPE_CAM_MAC:
  239. {
  240. status =
  241. ql_wait_reg_rdy(qdev,
  242. MAC_ADDR_IDX, MAC_ADDR_MW, 0);
  243. if (status)
  244. goto exit;
  245. ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
  246. (index << MAC_ADDR_IDX_SHIFT) | /* index */
  247. MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
  248. status =
  249. ql_wait_reg_rdy(qdev,
  250. MAC_ADDR_IDX, MAC_ADDR_MR, 0);
  251. if (status)
  252. goto exit;
  253. *value++ = ql_read32(qdev, MAC_ADDR_DATA);
  254. status =
  255. ql_wait_reg_rdy(qdev,
  256. MAC_ADDR_IDX, MAC_ADDR_MW, 0);
  257. if (status)
  258. goto exit;
  259. ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
  260. (index << MAC_ADDR_IDX_SHIFT) | /* index */
  261. MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
  262. status =
  263. ql_wait_reg_rdy(qdev,
  264. MAC_ADDR_IDX, MAC_ADDR_MR, 0);
  265. if (status)
  266. goto exit;
  267. *value++ = ql_read32(qdev, MAC_ADDR_DATA);
  268. if (type == MAC_ADDR_TYPE_CAM_MAC) {
  269. status =
  270. ql_wait_reg_rdy(qdev,
  271. MAC_ADDR_IDX, MAC_ADDR_MW, 0);
  272. if (status)
  273. goto exit;
  274. ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
  275. (index << MAC_ADDR_IDX_SHIFT) | /* index */
  276. MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
  277. status =
  278. ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
  279. MAC_ADDR_MR, 0);
  280. if (status)
  281. goto exit;
  282. *value++ = ql_read32(qdev, MAC_ADDR_DATA);
  283. }
  284. break;
  285. }
  286. case MAC_ADDR_TYPE_VLAN:
  287. case MAC_ADDR_TYPE_MULTI_FLTR:
  288. default:
  289. netif_crit(qdev, ifup, qdev->ndev,
  290. "Address type %d not yet supported.\n", type);
  291. status = -EPERM;
  292. }
  293. exit:
  294. return status;
  295. }
  296. /* Set up a MAC, multicast or VLAN address for the
  297. * inbound frame matching.
  298. */
  299. static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
  300. u16 index)
  301. {
  302. u32 offset = 0;
  303. int status = 0;
  304. switch (type) {
  305. case MAC_ADDR_TYPE_MULTI_MAC:
  306. {
  307. u32 upper = (addr[0] << 8) | addr[1];
  308. u32 lower = (addr[2] << 24) | (addr[3] << 16) |
  309. (addr[4] << 8) | (addr[5]);
  310. status =
  311. ql_wait_reg_rdy(qdev,
  312. MAC_ADDR_IDX, MAC_ADDR_MW, 0);
  313. if (status)
  314. goto exit;
  315. ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
  316. (index << MAC_ADDR_IDX_SHIFT) |
  317. type | MAC_ADDR_E);
  318. ql_write32(qdev, MAC_ADDR_DATA, lower);
  319. status =
  320. ql_wait_reg_rdy(qdev,
  321. MAC_ADDR_IDX, MAC_ADDR_MW, 0);
  322. if (status)
  323. goto exit;
  324. ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
  325. (index << MAC_ADDR_IDX_SHIFT) |
  326. type | MAC_ADDR_E);
  327. ql_write32(qdev, MAC_ADDR_DATA, upper);
  328. status =
  329. ql_wait_reg_rdy(qdev,
  330. MAC_ADDR_IDX, MAC_ADDR_MW, 0);
  331. if (status)
  332. goto exit;
  333. break;
  334. }
  335. case MAC_ADDR_TYPE_CAM_MAC:
  336. {
  337. u32 cam_output;
  338. u32 upper = (addr[0] << 8) | addr[1];
  339. u32 lower =
  340. (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
  341. (addr[5]);
  342. netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
  343. "Adding %s address %pM at index %d in the CAM.\n",
  344. type == MAC_ADDR_TYPE_MULTI_MAC ?
  345. "MULTICAST" : "UNICAST",
  346. addr, index);
  347. status =
  348. ql_wait_reg_rdy(qdev,
  349. MAC_ADDR_IDX, MAC_ADDR_MW, 0);
  350. if (status)
  351. goto exit;
  352. ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
  353. (index << MAC_ADDR_IDX_SHIFT) | /* index */
  354. type); /* type */
  355. ql_write32(qdev, MAC_ADDR_DATA, lower);
  356. status =
  357. ql_wait_reg_rdy(qdev,
  358. MAC_ADDR_IDX, MAC_ADDR_MW, 0);
  359. if (status)
  360. goto exit;
  361. ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
  362. (index << MAC_ADDR_IDX_SHIFT) | /* index */
  363. type); /* type */
  364. ql_write32(qdev, MAC_ADDR_DATA, upper);
  365. status =
  366. ql_wait_reg_rdy(qdev,
  367. MAC_ADDR_IDX, MAC_ADDR_MW, 0);
  368. if (status)
  369. goto exit;
  370. ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
  371. (index << MAC_ADDR_IDX_SHIFT) | /* index */
  372. type); /* type */
  373. /* This field should also include the queue id
  374. and possibly the function id. Right now we hardcode
  375. the route field to NIC core.
  376. */
  377. cam_output = (CAM_OUT_ROUTE_NIC |
  378. (qdev->
  379. func << CAM_OUT_FUNC_SHIFT) |
  380. (0 << CAM_OUT_CQ_ID_SHIFT));
  381. if (qdev->ndev->features & NETIF_F_HW_VLAN_RX)
  382. cam_output |= CAM_OUT_RV;
  383. /* route to NIC core */
  384. ql_write32(qdev, MAC_ADDR_DATA, cam_output);
  385. break;
  386. }
  387. case MAC_ADDR_TYPE_VLAN:
  388. {
  389. u32 enable_bit = *((u32 *) &addr[0]);
  390. /* For VLAN, the addr actually holds a bit that
  391. * either enables or disables the vlan id we are
  392. * addressing. It's either MAC_ADDR_E on or off.
  393. * That's bit-27 we're talking about.
  394. */
  395. netif_info(qdev, ifup, qdev->ndev,
  396. "%s VLAN ID %d %s the CAM.\n",
  397. enable_bit ? "Adding" : "Removing",
  398. index,
  399. enable_bit ? "to" : "from");
  400. status =
  401. ql_wait_reg_rdy(qdev,
  402. MAC_ADDR_IDX, MAC_ADDR_MW, 0);
  403. if (status)
  404. goto exit;
  405. ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
  406. (index << MAC_ADDR_IDX_SHIFT) | /* index */
  407. type | /* type */
  408. enable_bit); /* enable/disable */
  409. break;
  410. }
  411. case MAC_ADDR_TYPE_MULTI_FLTR:
  412. default:
  413. netif_crit(qdev, ifup, qdev->ndev,
  414. "Address type %d not yet supported.\n", type);
  415. status = -EPERM;
  416. }
  417. exit:
  418. return status;
  419. }
  420. /* Set or clear MAC address in hardware. We sometimes
  421. * have to clear it to prevent wrong frame routing
  422. * especially in a bonding environment.
  423. */
  424. static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
  425. {
  426. int status;
  427. char zero_mac_addr[ETH_ALEN];
  428. char *addr;
  429. if (set) {
  430. addr = &qdev->current_mac_addr[0];
  431. netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
  432. "Set Mac addr %pM\n", addr);
  433. } else {
  434. memset(zero_mac_addr, 0, ETH_ALEN);
  435. addr = &zero_mac_addr[0];
  436. netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
  437. "Clearing MAC address\n");
  438. }
  439. status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
  440. if (status)
  441. return status;
  442. status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
  443. MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
  444. ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
  445. if (status)
  446. netif_err(qdev, ifup, qdev->ndev,
  447. "Failed to init mac address.\n");
  448. return status;
  449. }
  450. void ql_link_on(struct ql_adapter *qdev)
  451. {
  452. netif_err(qdev, link, qdev->ndev, "Link is up.\n");
  453. netif_carrier_on(qdev->ndev);
  454. ql_set_mac_addr(qdev, 1);
  455. }
  456. void ql_link_off(struct ql_adapter *qdev)
  457. {
  458. netif_err(qdev, link, qdev->ndev, "Link is down.\n");
  459. netif_carrier_off(qdev->ndev);
  460. ql_set_mac_addr(qdev, 0);
  461. }
  462. /* Get a specific frame routing value from the CAM.
  463. * Used for debug and reg dump.
  464. */
  465. int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
  466. {
  467. int status = 0;
  468. status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
  469. if (status)
  470. goto exit;
  471. ql_write32(qdev, RT_IDX,
  472. RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
  473. status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
  474. if (status)
  475. goto exit;
  476. *value = ql_read32(qdev, RT_DATA);
  477. exit:
  478. return status;
  479. }
  480. /* The NIC function for this chip has 16 routing indexes. Each one can be used
  481. * to route different frame types to various inbound queues. We send broadcast/
  482. * multicast/error frames to the default queue for slow handling,
  483. * and CAM hit/RSS frames to the fast handling queues.
  484. */
  485. static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
  486. int enable)
  487. {
  488. int status = -EINVAL; /* Return error if no mask match. */
  489. u32 value = 0;
  490. netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
  491. "%s %s mask %s the routing reg.\n",
  492. enable ? "Adding" : "Removing",
  493. index == RT_IDX_ALL_ERR_SLOT ? "MAC ERROR/ALL ERROR" :
  494. index == RT_IDX_IP_CSUM_ERR_SLOT ? "IP CSUM ERROR" :
  495. index == RT_IDX_TCP_UDP_CSUM_ERR_SLOT ? "TCP/UDP CSUM ERROR" :
  496. index == RT_IDX_BCAST_SLOT ? "BROADCAST" :
  497. index == RT_IDX_MCAST_MATCH_SLOT ? "MULTICAST MATCH" :
  498. index == RT_IDX_ALLMULTI_SLOT ? "ALL MULTICAST MATCH" :
  499. index == RT_IDX_UNUSED6_SLOT ? "UNUSED6" :
  500. index == RT_IDX_UNUSED7_SLOT ? "UNUSED7" :
  501. index == RT_IDX_RSS_MATCH_SLOT ? "RSS ALL/IPV4 MATCH" :
  502. index == RT_IDX_RSS_IPV6_SLOT ? "RSS IPV6" :
  503. index == RT_IDX_RSS_TCP4_SLOT ? "RSS TCP4" :
  504. index == RT_IDX_RSS_TCP6_SLOT ? "RSS TCP6" :
  505. index == RT_IDX_CAM_HIT_SLOT ? "CAM HIT" :
  506. index == RT_IDX_UNUSED013 ? "UNUSED13" :
  507. index == RT_IDX_UNUSED014 ? "UNUSED14" :
  508. index == RT_IDX_PROMISCUOUS_SLOT ? "PROMISCUOUS" :
  509. "(Bad index != RT_IDX)",
  510. enable ? "to" : "from");
  511. switch (mask) {
  512. case RT_IDX_CAM_HIT:
  513. {
  514. value = RT_IDX_DST_CAM_Q | /* dest */
  515. RT_IDX_TYPE_NICQ | /* type */
  516. (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
  517. break;
  518. }
  519. case RT_IDX_VALID: /* Promiscuous Mode frames. */
  520. {
  521. value = RT_IDX_DST_DFLT_Q | /* dest */
  522. RT_IDX_TYPE_NICQ | /* type */
  523. (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
  524. break;
  525. }
  526. case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
  527. {
  528. value = RT_IDX_DST_DFLT_Q | /* dest */
  529. RT_IDX_TYPE_NICQ | /* type */
  530. (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
  531. break;
  532. }
  533. case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
  534. {
  535. value = RT_IDX_DST_DFLT_Q | /* dest */
  536. RT_IDX_TYPE_NICQ | /* type */
  537. (RT_IDX_IP_CSUM_ERR_SLOT <<
  538. RT_IDX_IDX_SHIFT); /* index */
  539. break;
  540. }
  541. case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
  542. {
  543. value = RT_IDX_DST_DFLT_Q | /* dest */
  544. RT_IDX_TYPE_NICQ | /* type */
  545. (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
  546. RT_IDX_IDX_SHIFT); /* index */
  547. break;
  548. }
  549. case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
  550. {
  551. value = RT_IDX_DST_DFLT_Q | /* dest */
  552. RT_IDX_TYPE_NICQ | /* type */
  553. (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
  554. break;
  555. }
  556. case RT_IDX_MCAST: /* Pass up All Multicast frames. */
  557. {
  558. value = RT_IDX_DST_DFLT_Q | /* dest */
  559. RT_IDX_TYPE_NICQ | /* type */
  560. (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
  561. break;
  562. }
  563. case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
  564. {
  565. value = RT_IDX_DST_DFLT_Q | /* dest */
  566. RT_IDX_TYPE_NICQ | /* type */
  567. (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
  568. break;
  569. }
  570. case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
  571. {
  572. value = RT_IDX_DST_RSS | /* dest */
  573. RT_IDX_TYPE_NICQ | /* type */
  574. (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
  575. break;
  576. }
  577. case 0: /* Clear the E-bit on an entry. */
  578. {
  579. value = RT_IDX_DST_DFLT_Q | /* dest */
  580. RT_IDX_TYPE_NICQ | /* type */
  581. (index << RT_IDX_IDX_SHIFT);/* index */
  582. break;
  583. }
  584. default:
  585. netif_err(qdev, ifup, qdev->ndev,
  586. "Mask type %d not yet supported.\n", mask);
  587. status = -EPERM;
  588. goto exit;
  589. }
  590. if (value) {
  591. status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
  592. if (status)
  593. goto exit;
  594. value |= (enable ? RT_IDX_E : 0);
  595. ql_write32(qdev, RT_IDX, value);
  596. ql_write32(qdev, RT_DATA, enable ? mask : 0);
  597. }
  598. exit:
  599. return status;
  600. }
  601. static void ql_enable_interrupts(struct ql_adapter *qdev)
  602. {
  603. ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
  604. }
  605. static void ql_disable_interrupts(struct ql_adapter *qdev)
  606. {
  607. ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
  608. }
  609. /* If we're running with multiple MSI-X vectors then we enable on the fly.
  610. * Otherwise, we may have multiple outstanding workers and don't want to
  611. * enable until the last one finishes. In this case, the irq_cnt gets
  612. * incremented every time we queue a worker and decremented every time
  613. * a worker finishes. Once it hits zero we enable the interrupt.
  614. */
  615. u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
  616. {
  617. u32 var = 0;
  618. unsigned long hw_flags = 0;
  619. struct intr_context *ctx = qdev->intr_context + intr;
  620. if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
  621. /* Always enable if we're MSIX multi interrupts and
  622. * it's not the default (zeroeth) interrupt.
  623. */
  624. ql_write32(qdev, INTR_EN,
  625. ctx->intr_en_mask);
  626. var = ql_read32(qdev, STS);
  627. return var;
  628. }
  629. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  630. if (atomic_dec_and_test(&ctx->irq_cnt)) {
  631. ql_write32(qdev, INTR_EN,
  632. ctx->intr_en_mask);
  633. var = ql_read32(qdev, STS);
  634. }
  635. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  636. return var;
  637. }
  638. static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
  639. {
  640. u32 var = 0;
  641. struct intr_context *ctx;
  642. /* HW disables for us if we're MSIX multi interrupts and
  643. * it's not the default (zeroeth) interrupt.
  644. */
  645. if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
  646. return 0;
  647. ctx = qdev->intr_context + intr;
  648. spin_lock(&qdev->hw_lock);
  649. if (!atomic_read(&ctx->irq_cnt)) {
  650. ql_write32(qdev, INTR_EN,
  651. ctx->intr_dis_mask);
  652. var = ql_read32(qdev, STS);
  653. }
  654. atomic_inc(&ctx->irq_cnt);
  655. spin_unlock(&qdev->hw_lock);
  656. return var;
  657. }
  658. static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
  659. {
  660. int i;
  661. for (i = 0; i < qdev->intr_count; i++) {
  662. /* The enable call does a atomic_dec_and_test
  663. * and enables only if the result is zero.
  664. * So we precharge it here.
  665. */
  666. if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
  667. i == 0))
  668. atomic_set(&qdev->intr_context[i].irq_cnt, 1);
  669. ql_enable_completion_interrupt(qdev, i);
  670. }
  671. }
  672. static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
  673. {
  674. int status, i;
  675. u16 csum = 0;
  676. __le16 *flash = (__le16 *)&qdev->flash;
  677. status = strncmp((char *)&qdev->flash, str, 4);
  678. if (status) {
  679. netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
  680. return status;
  681. }
  682. for (i = 0; i < size; i++)
  683. csum += le16_to_cpu(*flash++);
  684. if (csum)
  685. netif_err(qdev, ifup, qdev->ndev,
  686. "Invalid flash checksum, csum = 0x%.04x.\n", csum);
  687. return csum;
  688. }
  689. static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
  690. {
  691. int status = 0;
  692. /* wait for reg to come ready */
  693. status = ql_wait_reg_rdy(qdev,
  694. FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
  695. if (status)
  696. goto exit;
  697. /* set up for reg read */
  698. ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
  699. /* wait for reg to come ready */
  700. status = ql_wait_reg_rdy(qdev,
  701. FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
  702. if (status)
  703. goto exit;
  704. /* This data is stored on flash as an array of
  705. * __le32. Since ql_read32() returns cpu endian
  706. * we need to swap it back.
  707. */
  708. *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
  709. exit:
  710. return status;
  711. }
  712. static int ql_get_8000_flash_params(struct ql_adapter *qdev)
  713. {
  714. u32 i, size;
  715. int status;
  716. __le32 *p = (__le32 *)&qdev->flash;
  717. u32 offset;
  718. u8 mac_addr[6];
  719. /* Get flash offset for function and adjust
  720. * for dword access.
  721. */
  722. if (!qdev->port)
  723. offset = FUNC0_FLASH_OFFSET / sizeof(u32);
  724. else
  725. offset = FUNC1_FLASH_OFFSET / sizeof(u32);
  726. if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
  727. return -ETIMEDOUT;
  728. size = sizeof(struct flash_params_8000) / sizeof(u32);
  729. for (i = 0; i < size; i++, p++) {
  730. status = ql_read_flash_word(qdev, i+offset, p);
  731. if (status) {
  732. netif_err(qdev, ifup, qdev->ndev,
  733. "Error reading flash.\n");
  734. goto exit;
  735. }
  736. }
  737. status = ql_validate_flash(qdev,
  738. sizeof(struct flash_params_8000) / sizeof(u16),
  739. "8000");
  740. if (status) {
  741. netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
  742. status = -EINVAL;
  743. goto exit;
  744. }
  745. /* Extract either manufacturer or BOFM modified
  746. * MAC address.
  747. */
  748. if (qdev->flash.flash_params_8000.data_type1 == 2)
  749. memcpy(mac_addr,
  750. qdev->flash.flash_params_8000.mac_addr1,
  751. qdev->ndev->addr_len);
  752. else
  753. memcpy(mac_addr,
  754. qdev->flash.flash_params_8000.mac_addr,
  755. qdev->ndev->addr_len);
  756. if (!is_valid_ether_addr(mac_addr)) {
  757. netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
  758. status = -EINVAL;
  759. goto exit;
  760. }
  761. memcpy(qdev->ndev->dev_addr,
  762. mac_addr,
  763. qdev->ndev->addr_len);
  764. exit:
  765. ql_sem_unlock(qdev, SEM_FLASH_MASK);
  766. return status;
  767. }
  768. static int ql_get_8012_flash_params(struct ql_adapter *qdev)
  769. {
  770. int i;
  771. int status;
  772. __le32 *p = (__le32 *)&qdev->flash;
  773. u32 offset = 0;
  774. u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
  775. /* Second function's parameters follow the first
  776. * function's.
  777. */
  778. if (qdev->port)
  779. offset = size;
  780. if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
  781. return -ETIMEDOUT;
  782. for (i = 0; i < size; i++, p++) {
  783. status = ql_read_flash_word(qdev, i+offset, p);
  784. if (status) {
  785. netif_err(qdev, ifup, qdev->ndev,
  786. "Error reading flash.\n");
  787. goto exit;
  788. }
  789. }
  790. status = ql_validate_flash(qdev,
  791. sizeof(struct flash_params_8012) / sizeof(u16),
  792. "8012");
  793. if (status) {
  794. netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
  795. status = -EINVAL;
  796. goto exit;
  797. }
  798. if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
  799. status = -EINVAL;
  800. goto exit;
  801. }
  802. memcpy(qdev->ndev->dev_addr,
  803. qdev->flash.flash_params_8012.mac_addr,
  804. qdev->ndev->addr_len);
  805. exit:
  806. ql_sem_unlock(qdev, SEM_FLASH_MASK);
  807. return status;
  808. }
  809. /* xgmac register are located behind the xgmac_addr and xgmac_data
  810. * register pair. Each read/write requires us to wait for the ready
  811. * bit before reading/writing the data.
  812. */
  813. static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
  814. {
  815. int status;
  816. /* wait for reg to come ready */
  817. status = ql_wait_reg_rdy(qdev,
  818. XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
  819. if (status)
  820. return status;
  821. /* write the data to the data reg */
  822. ql_write32(qdev, XGMAC_DATA, data);
  823. /* trigger the write */
  824. ql_write32(qdev, XGMAC_ADDR, reg);
  825. return status;
  826. }
  827. /* xgmac register are located behind the xgmac_addr and xgmac_data
  828. * register pair. Each read/write requires us to wait for the ready
  829. * bit before reading/writing the data.
  830. */
  831. int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
  832. {
  833. int status = 0;
  834. /* wait for reg to come ready */
  835. status = ql_wait_reg_rdy(qdev,
  836. XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
  837. if (status)
  838. goto exit;
  839. /* set up for reg read */
  840. ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
  841. /* wait for reg to come ready */
  842. status = ql_wait_reg_rdy(qdev,
  843. XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
  844. if (status)
  845. goto exit;
  846. /* get the data */
  847. *data = ql_read32(qdev, XGMAC_DATA);
  848. exit:
  849. return status;
  850. }
  851. /* This is used for reading the 64-bit statistics regs. */
  852. int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
  853. {
  854. int status = 0;
  855. u32 hi = 0;
  856. u32 lo = 0;
  857. status = ql_read_xgmac_reg(qdev, reg, &lo);
  858. if (status)
  859. goto exit;
  860. status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
  861. if (status)
  862. goto exit;
  863. *data = (u64) lo | ((u64) hi << 32);
  864. exit:
  865. return status;
  866. }
  867. static int ql_8000_port_initialize(struct ql_adapter *qdev)
  868. {
  869. int status;
  870. /*
  871. * Get MPI firmware version for driver banner
  872. * and ethool info.
  873. */
  874. status = ql_mb_about_fw(qdev);
  875. if (status)
  876. goto exit;
  877. status = ql_mb_get_fw_state(qdev);
  878. if (status)
  879. goto exit;
  880. /* Wake up a worker to get/set the TX/RX frame sizes. */
  881. queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
  882. exit:
  883. return status;
  884. }
  885. /* Take the MAC Core out of reset.
  886. * Enable statistics counting.
  887. * Take the transmitter/receiver out of reset.
  888. * This functionality may be done in the MPI firmware at a
  889. * later date.
  890. */
  891. static int ql_8012_port_initialize(struct ql_adapter *qdev)
  892. {
  893. int status = 0;
  894. u32 data;
  895. if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
  896. /* Another function has the semaphore, so
  897. * wait for the port init bit to come ready.
  898. */
  899. netif_info(qdev, link, qdev->ndev,
  900. "Another function has the semaphore, so wait for the port init bit to come ready.\n");
  901. status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
  902. if (status) {
  903. netif_crit(qdev, link, qdev->ndev,
  904. "Port initialize timed out.\n");
  905. }
  906. return status;
  907. }
  908. netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
  909. /* Set the core reset. */
  910. status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
  911. if (status)
  912. goto end;
  913. data |= GLOBAL_CFG_RESET;
  914. status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
  915. if (status)
  916. goto end;
  917. /* Clear the core reset and turn on jumbo for receiver. */
  918. data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
  919. data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
  920. data |= GLOBAL_CFG_TX_STAT_EN;
  921. data |= GLOBAL_CFG_RX_STAT_EN;
  922. status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
  923. if (status)
  924. goto end;
  925. /* Enable transmitter, and clear it's reset. */
  926. status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
  927. if (status)
  928. goto end;
  929. data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
  930. data |= TX_CFG_EN; /* Enable the transmitter. */
  931. status = ql_write_xgmac_reg(qdev, TX_CFG, data);
  932. if (status)
  933. goto end;
  934. /* Enable receiver and clear it's reset. */
  935. status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
  936. if (status)
  937. goto end;
  938. data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
  939. data |= RX_CFG_EN; /* Enable the receiver. */
  940. status = ql_write_xgmac_reg(qdev, RX_CFG, data);
  941. if (status)
  942. goto end;
  943. /* Turn on jumbo. */
  944. status =
  945. ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
  946. if (status)
  947. goto end;
  948. status =
  949. ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
  950. if (status)
  951. goto end;
  952. /* Signal to the world that the port is enabled. */
  953. ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
  954. end:
  955. ql_sem_unlock(qdev, qdev->xg_sem_mask);
  956. return status;
  957. }
  958. static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
  959. {
  960. return PAGE_SIZE << qdev->lbq_buf_order;
  961. }
  962. /* Get the next large buffer. */
  963. static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
  964. {
  965. struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
  966. rx_ring->lbq_curr_idx++;
  967. if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
  968. rx_ring->lbq_curr_idx = 0;
  969. rx_ring->lbq_free_cnt++;
  970. return lbq_desc;
  971. }
  972. static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
  973. struct rx_ring *rx_ring)
  974. {
  975. struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
  976. pci_dma_sync_single_for_cpu(qdev->pdev,
  977. dma_unmap_addr(lbq_desc, mapaddr),
  978. rx_ring->lbq_buf_size,
  979. PCI_DMA_FROMDEVICE);
  980. /* If it's the last chunk of our master page then
  981. * we unmap it.
  982. */
  983. if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
  984. == ql_lbq_block_size(qdev))
  985. pci_unmap_page(qdev->pdev,
  986. lbq_desc->p.pg_chunk.map,
  987. ql_lbq_block_size(qdev),
  988. PCI_DMA_FROMDEVICE);
  989. return lbq_desc;
  990. }
  991. /* Get the next small buffer. */
  992. static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
  993. {
  994. struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
  995. rx_ring->sbq_curr_idx++;
  996. if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
  997. rx_ring->sbq_curr_idx = 0;
  998. rx_ring->sbq_free_cnt++;
  999. return sbq_desc;
  1000. }
  1001. /* Update an rx ring index. */
  1002. static void ql_update_cq(struct rx_ring *rx_ring)
  1003. {
  1004. rx_ring->cnsmr_idx++;
  1005. rx_ring->curr_entry++;
  1006. if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
  1007. rx_ring->cnsmr_idx = 0;
  1008. rx_ring->curr_entry = rx_ring->cq_base;
  1009. }
  1010. }
  1011. static void ql_write_cq_idx(struct rx_ring *rx_ring)
  1012. {
  1013. ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
  1014. }
  1015. static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
  1016. struct bq_desc *lbq_desc)
  1017. {
  1018. if (!rx_ring->pg_chunk.page) {
  1019. u64 map;
  1020. rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
  1021. GFP_ATOMIC,
  1022. qdev->lbq_buf_order);
  1023. if (unlikely(!rx_ring->pg_chunk.page)) {
  1024. netif_err(qdev, drv, qdev->ndev,
  1025. "page allocation failed.\n");
  1026. return -ENOMEM;
  1027. }
  1028. rx_ring->pg_chunk.offset = 0;
  1029. map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
  1030. 0, ql_lbq_block_size(qdev),
  1031. PCI_DMA_FROMDEVICE);
  1032. if (pci_dma_mapping_error(qdev->pdev, map)) {
  1033. __free_pages(rx_ring->pg_chunk.page,
  1034. qdev->lbq_buf_order);
  1035. netif_err(qdev, drv, qdev->ndev,
  1036. "PCI mapping failed.\n");
  1037. return -ENOMEM;
  1038. }
  1039. rx_ring->pg_chunk.map = map;
  1040. rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
  1041. }
  1042. /* Copy the current master pg_chunk info
  1043. * to the current descriptor.
  1044. */
  1045. lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
  1046. /* Adjust the master page chunk for next
  1047. * buffer get.
  1048. */
  1049. rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
  1050. if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
  1051. rx_ring->pg_chunk.page = NULL;
  1052. lbq_desc->p.pg_chunk.last_flag = 1;
  1053. } else {
  1054. rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
  1055. get_page(rx_ring->pg_chunk.page);
  1056. lbq_desc->p.pg_chunk.last_flag = 0;
  1057. }
  1058. return 0;
  1059. }
  1060. /* Process (refill) a large buffer queue. */
  1061. static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
  1062. {
  1063. u32 clean_idx = rx_ring->lbq_clean_idx;
  1064. u32 start_idx = clean_idx;
  1065. struct bq_desc *lbq_desc;
  1066. u64 map;
  1067. int i;
  1068. while (rx_ring->lbq_free_cnt > 32) {
  1069. for (i = 0; i < 16; i++) {
  1070. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1071. "lbq: try cleaning clean_idx = %d.\n",
  1072. clean_idx);
  1073. lbq_desc = &rx_ring->lbq[clean_idx];
  1074. if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
  1075. netif_err(qdev, ifup, qdev->ndev,
  1076. "Could not get a page chunk.\n");
  1077. return;
  1078. }
  1079. map = lbq_desc->p.pg_chunk.map +
  1080. lbq_desc->p.pg_chunk.offset;
  1081. dma_unmap_addr_set(lbq_desc, mapaddr, map);
  1082. dma_unmap_len_set(lbq_desc, maplen,
  1083. rx_ring->lbq_buf_size);
  1084. *lbq_desc->addr = cpu_to_le64(map);
  1085. pci_dma_sync_single_for_device(qdev->pdev, map,
  1086. rx_ring->lbq_buf_size,
  1087. PCI_DMA_FROMDEVICE);
  1088. clean_idx++;
  1089. if (clean_idx == rx_ring->lbq_len)
  1090. clean_idx = 0;
  1091. }
  1092. rx_ring->lbq_clean_idx = clean_idx;
  1093. rx_ring->lbq_prod_idx += 16;
  1094. if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
  1095. rx_ring->lbq_prod_idx = 0;
  1096. rx_ring->lbq_free_cnt -= 16;
  1097. }
  1098. if (start_idx != clean_idx) {
  1099. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1100. "lbq: updating prod idx = %d.\n",
  1101. rx_ring->lbq_prod_idx);
  1102. ql_write_db_reg(rx_ring->lbq_prod_idx,
  1103. rx_ring->lbq_prod_idx_db_reg);
  1104. }
  1105. }
  1106. /* Process (refill) a small buffer queue. */
  1107. static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
  1108. {
  1109. u32 clean_idx = rx_ring->sbq_clean_idx;
  1110. u32 start_idx = clean_idx;
  1111. struct bq_desc *sbq_desc;
  1112. u64 map;
  1113. int i;
  1114. while (rx_ring->sbq_free_cnt > 16) {
  1115. for (i = 0; i < 16; i++) {
  1116. sbq_desc = &rx_ring->sbq[clean_idx];
  1117. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1118. "sbq: try cleaning clean_idx = %d.\n",
  1119. clean_idx);
  1120. if (sbq_desc->p.skb == NULL) {
  1121. netif_printk(qdev, rx_status, KERN_DEBUG,
  1122. qdev->ndev,
  1123. "sbq: getting new skb for index %d.\n",
  1124. sbq_desc->index);
  1125. sbq_desc->p.skb =
  1126. netdev_alloc_skb(qdev->ndev,
  1127. SMALL_BUFFER_SIZE);
  1128. if (sbq_desc->p.skb == NULL) {
  1129. netif_err(qdev, probe, qdev->ndev,
  1130. "Couldn't get an skb.\n");
  1131. rx_ring->sbq_clean_idx = clean_idx;
  1132. return;
  1133. }
  1134. skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
  1135. map = pci_map_single(qdev->pdev,
  1136. sbq_desc->p.skb->data,
  1137. rx_ring->sbq_buf_size,
  1138. PCI_DMA_FROMDEVICE);
  1139. if (pci_dma_mapping_error(qdev->pdev, map)) {
  1140. netif_err(qdev, ifup, qdev->ndev,
  1141. "PCI mapping failed.\n");
  1142. rx_ring->sbq_clean_idx = clean_idx;
  1143. dev_kfree_skb_any(sbq_desc->p.skb);
  1144. sbq_desc->p.skb = NULL;
  1145. return;
  1146. }
  1147. dma_unmap_addr_set(sbq_desc, mapaddr, map);
  1148. dma_unmap_len_set(sbq_desc, maplen,
  1149. rx_ring->sbq_buf_size);
  1150. *sbq_desc->addr = cpu_to_le64(map);
  1151. }
  1152. clean_idx++;
  1153. if (clean_idx == rx_ring->sbq_len)
  1154. clean_idx = 0;
  1155. }
  1156. rx_ring->sbq_clean_idx = clean_idx;
  1157. rx_ring->sbq_prod_idx += 16;
  1158. if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
  1159. rx_ring->sbq_prod_idx = 0;
  1160. rx_ring->sbq_free_cnt -= 16;
  1161. }
  1162. if (start_idx != clean_idx) {
  1163. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1164. "sbq: updating prod idx = %d.\n",
  1165. rx_ring->sbq_prod_idx);
  1166. ql_write_db_reg(rx_ring->sbq_prod_idx,
  1167. rx_ring->sbq_prod_idx_db_reg);
  1168. }
  1169. }
  1170. static void ql_update_buffer_queues(struct ql_adapter *qdev,
  1171. struct rx_ring *rx_ring)
  1172. {
  1173. ql_update_sbq(qdev, rx_ring);
  1174. ql_update_lbq(qdev, rx_ring);
  1175. }
  1176. /* Unmaps tx buffers. Can be called from send() if a pci mapping
  1177. * fails at some stage, or from the interrupt when a tx completes.
  1178. */
  1179. static void ql_unmap_send(struct ql_adapter *qdev,
  1180. struct tx_ring_desc *tx_ring_desc, int mapped)
  1181. {
  1182. int i;
  1183. for (i = 0; i < mapped; i++) {
  1184. if (i == 0 || (i == 7 && mapped > 7)) {
  1185. /*
  1186. * Unmap the skb->data area, or the
  1187. * external sglist (AKA the Outbound
  1188. * Address List (OAL)).
  1189. * If its the zeroeth element, then it's
  1190. * the skb->data area. If it's the 7th
  1191. * element and there is more than 6 frags,
  1192. * then its an OAL.
  1193. */
  1194. if (i == 7) {
  1195. netif_printk(qdev, tx_done, KERN_DEBUG,
  1196. qdev->ndev,
  1197. "unmapping OAL area.\n");
  1198. }
  1199. pci_unmap_single(qdev->pdev,
  1200. dma_unmap_addr(&tx_ring_desc->map[i],
  1201. mapaddr),
  1202. dma_unmap_len(&tx_ring_desc->map[i],
  1203. maplen),
  1204. PCI_DMA_TODEVICE);
  1205. } else {
  1206. netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
  1207. "unmapping frag %d.\n", i);
  1208. pci_unmap_page(qdev->pdev,
  1209. dma_unmap_addr(&tx_ring_desc->map[i],
  1210. mapaddr),
  1211. dma_unmap_len(&tx_ring_desc->map[i],
  1212. maplen), PCI_DMA_TODEVICE);
  1213. }
  1214. }
  1215. }
  1216. /* Map the buffers for this transmit. This will return
  1217. * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
  1218. */
  1219. static int ql_map_send(struct ql_adapter *qdev,
  1220. struct ob_mac_iocb_req *mac_iocb_ptr,
  1221. struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
  1222. {
  1223. int len = skb_headlen(skb);
  1224. dma_addr_t map;
  1225. int frag_idx, err, map_idx = 0;
  1226. struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
  1227. int frag_cnt = skb_shinfo(skb)->nr_frags;
  1228. if (frag_cnt) {
  1229. netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
  1230. "frag_cnt = %d.\n", frag_cnt);
  1231. }
  1232. /*
  1233. * Map the skb buffer first.
  1234. */
  1235. map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
  1236. err = pci_dma_mapping_error(qdev->pdev, map);
  1237. if (err) {
  1238. netif_err(qdev, tx_queued, qdev->ndev,
  1239. "PCI mapping failed with error: %d\n", err);
  1240. return NETDEV_TX_BUSY;
  1241. }
  1242. tbd->len = cpu_to_le32(len);
  1243. tbd->addr = cpu_to_le64(map);
  1244. dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
  1245. dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
  1246. map_idx++;
  1247. /*
  1248. * This loop fills the remainder of the 8 address descriptors
  1249. * in the IOCB. If there are more than 7 fragments, then the
  1250. * eighth address desc will point to an external list (OAL).
  1251. * When this happens, the remainder of the frags will be stored
  1252. * in this list.
  1253. */
  1254. for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
  1255. skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
  1256. tbd++;
  1257. if (frag_idx == 6 && frag_cnt > 7) {
  1258. /* Let's tack on an sglist.
  1259. * Our control block will now
  1260. * look like this:
  1261. * iocb->seg[0] = skb->data
  1262. * iocb->seg[1] = frag[0]
  1263. * iocb->seg[2] = frag[1]
  1264. * iocb->seg[3] = frag[2]
  1265. * iocb->seg[4] = frag[3]
  1266. * iocb->seg[5] = frag[4]
  1267. * iocb->seg[6] = frag[5]
  1268. * iocb->seg[7] = ptr to OAL (external sglist)
  1269. * oal->seg[0] = frag[6]
  1270. * oal->seg[1] = frag[7]
  1271. * oal->seg[2] = frag[8]
  1272. * oal->seg[3] = frag[9]
  1273. * oal->seg[4] = frag[10]
  1274. * etc...
  1275. */
  1276. /* Tack on the OAL in the eighth segment of IOCB. */
  1277. map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
  1278. sizeof(struct oal),
  1279. PCI_DMA_TODEVICE);
  1280. err = pci_dma_mapping_error(qdev->pdev, map);
  1281. if (err) {
  1282. netif_err(qdev, tx_queued, qdev->ndev,
  1283. "PCI mapping outbound address list with error: %d\n",
  1284. err);
  1285. goto map_error;
  1286. }
  1287. tbd->addr = cpu_to_le64(map);
  1288. /*
  1289. * The length is the number of fragments
  1290. * that remain to be mapped times the length
  1291. * of our sglist (OAL).
  1292. */
  1293. tbd->len =
  1294. cpu_to_le32((sizeof(struct tx_buf_desc) *
  1295. (frag_cnt - frag_idx)) | TX_DESC_C);
  1296. dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
  1297. map);
  1298. dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
  1299. sizeof(struct oal));
  1300. tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
  1301. map_idx++;
  1302. }
  1303. map =
  1304. pci_map_page(qdev->pdev, frag->page,
  1305. frag->page_offset, frag->size,
  1306. PCI_DMA_TODEVICE);
  1307. err = pci_dma_mapping_error(qdev->pdev, map);
  1308. if (err) {
  1309. netif_err(qdev, tx_queued, qdev->ndev,
  1310. "PCI mapping frags failed with error: %d.\n",
  1311. err);
  1312. goto map_error;
  1313. }
  1314. tbd->addr = cpu_to_le64(map);
  1315. tbd->len = cpu_to_le32(frag->size);
  1316. dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
  1317. dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
  1318. frag->size);
  1319. }
  1320. /* Save the number of segments we've mapped. */
  1321. tx_ring_desc->map_cnt = map_idx;
  1322. /* Terminate the last segment. */
  1323. tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
  1324. return NETDEV_TX_OK;
  1325. map_error:
  1326. /*
  1327. * If the first frag mapping failed, then i will be zero.
  1328. * This causes the unmap of the skb->data area. Otherwise
  1329. * we pass in the number of frags that mapped successfully
  1330. * so they can be umapped.
  1331. */
  1332. ql_unmap_send(qdev, tx_ring_desc, map_idx);
  1333. return NETDEV_TX_BUSY;
  1334. }
  1335. /* Process an inbound completion from an rx ring. */
  1336. static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
  1337. struct rx_ring *rx_ring,
  1338. struct ib_mac_iocb_rsp *ib_mac_rsp,
  1339. u32 length,
  1340. u16 vlan_id)
  1341. {
  1342. struct sk_buff *skb;
  1343. struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
  1344. struct skb_frag_struct *rx_frag;
  1345. int nr_frags;
  1346. struct napi_struct *napi = &rx_ring->napi;
  1347. napi->dev = qdev->ndev;
  1348. skb = napi_get_frags(napi);
  1349. if (!skb) {
  1350. netif_err(qdev, drv, qdev->ndev,
  1351. "Couldn't get an skb, exiting.\n");
  1352. rx_ring->rx_dropped++;
  1353. put_page(lbq_desc->p.pg_chunk.page);
  1354. return;
  1355. }
  1356. prefetch(lbq_desc->p.pg_chunk.va);
  1357. rx_frag = skb_shinfo(skb)->frags;
  1358. nr_frags = skb_shinfo(skb)->nr_frags;
  1359. rx_frag += nr_frags;
  1360. rx_frag->page = lbq_desc->p.pg_chunk.page;
  1361. rx_frag->page_offset = lbq_desc->p.pg_chunk.offset;
  1362. rx_frag->size = length;
  1363. skb->len += length;
  1364. skb->data_len += length;
  1365. skb->truesize += length;
  1366. skb_shinfo(skb)->nr_frags++;
  1367. rx_ring->rx_packets++;
  1368. rx_ring->rx_bytes += length;
  1369. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1370. skb_record_rx_queue(skb, rx_ring->cq_id);
  1371. if (vlan_id != 0xffff)
  1372. __vlan_hwaccel_put_tag(skb, vlan_id);
  1373. napi_gro_frags(napi);
  1374. }
  1375. /* Process an inbound completion from an rx ring. */
  1376. static void ql_process_mac_rx_page(struct ql_adapter *qdev,
  1377. struct rx_ring *rx_ring,
  1378. struct ib_mac_iocb_rsp *ib_mac_rsp,
  1379. u32 length,
  1380. u16 vlan_id)
  1381. {
  1382. struct net_device *ndev = qdev->ndev;
  1383. struct sk_buff *skb = NULL;
  1384. void *addr;
  1385. struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
  1386. struct napi_struct *napi = &rx_ring->napi;
  1387. skb = netdev_alloc_skb(ndev, length);
  1388. if (!skb) {
  1389. netif_err(qdev, drv, qdev->ndev,
  1390. "Couldn't get an skb, need to unwind!.\n");
  1391. rx_ring->rx_dropped++;
  1392. put_page(lbq_desc->p.pg_chunk.page);
  1393. return;
  1394. }
  1395. addr = lbq_desc->p.pg_chunk.va;
  1396. prefetch(addr);
  1397. /* Frame error, so drop the packet. */
  1398. if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
  1399. netif_info(qdev, drv, qdev->ndev,
  1400. "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
  1401. rx_ring->rx_errors++;
  1402. goto err_out;
  1403. }
  1404. /* The max framesize filter on this chip is set higher than
  1405. * MTU since FCoE uses 2k frames.
  1406. */
  1407. if (skb->len > ndev->mtu + ETH_HLEN) {
  1408. netif_err(qdev, drv, qdev->ndev,
  1409. "Segment too small, dropping.\n");
  1410. rx_ring->rx_dropped++;
  1411. goto err_out;
  1412. }
  1413. memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
  1414. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1415. "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
  1416. length);
  1417. skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
  1418. lbq_desc->p.pg_chunk.offset+ETH_HLEN,
  1419. length-ETH_HLEN);
  1420. skb->len += length-ETH_HLEN;
  1421. skb->data_len += length-ETH_HLEN;
  1422. skb->truesize += length-ETH_HLEN;
  1423. rx_ring->rx_packets++;
  1424. rx_ring->rx_bytes += skb->len;
  1425. skb->protocol = eth_type_trans(skb, ndev);
  1426. skb_checksum_none_assert(skb);
  1427. if ((ndev->features & NETIF_F_RXCSUM) &&
  1428. !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
  1429. /* TCP frame. */
  1430. if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
  1431. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1432. "TCP checksum done!\n");
  1433. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1434. } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
  1435. (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
  1436. /* Unfragmented ipv4 UDP frame. */
  1437. struct iphdr *iph = (struct iphdr *) skb->data;
  1438. if (!(iph->frag_off &
  1439. cpu_to_be16(IP_MF|IP_OFFSET))) {
  1440. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1441. netif_printk(qdev, rx_status, KERN_DEBUG,
  1442. qdev->ndev,
  1443. "TCP checksum done!\n");
  1444. }
  1445. }
  1446. }
  1447. skb_record_rx_queue(skb, rx_ring->cq_id);
  1448. if (vlan_id != 0xffff)
  1449. __vlan_hwaccel_put_tag(skb, vlan_id);
  1450. if (skb->ip_summed == CHECKSUM_UNNECESSARY)
  1451. napi_gro_receive(napi, skb);
  1452. else
  1453. netif_receive_skb(skb);
  1454. return;
  1455. err_out:
  1456. dev_kfree_skb_any(skb);
  1457. put_page(lbq_desc->p.pg_chunk.page);
  1458. }
  1459. /* Process an inbound completion from an rx ring. */
  1460. static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
  1461. struct rx_ring *rx_ring,
  1462. struct ib_mac_iocb_rsp *ib_mac_rsp,
  1463. u32 length,
  1464. u16 vlan_id)
  1465. {
  1466. struct net_device *ndev = qdev->ndev;
  1467. struct sk_buff *skb = NULL;
  1468. struct sk_buff *new_skb = NULL;
  1469. struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
  1470. skb = sbq_desc->p.skb;
  1471. /* Allocate new_skb and copy */
  1472. new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
  1473. if (new_skb == NULL) {
  1474. netif_err(qdev, probe, qdev->ndev,
  1475. "No skb available, drop the packet.\n");
  1476. rx_ring->rx_dropped++;
  1477. return;
  1478. }
  1479. skb_reserve(new_skb, NET_IP_ALIGN);
  1480. memcpy(skb_put(new_skb, length), skb->data, length);
  1481. skb = new_skb;
  1482. /* Frame error, so drop the packet. */
  1483. if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
  1484. netif_info(qdev, drv, qdev->ndev,
  1485. "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
  1486. dev_kfree_skb_any(skb);
  1487. rx_ring->rx_errors++;
  1488. return;
  1489. }
  1490. /* loopback self test for ethtool */
  1491. if (test_bit(QL_SELFTEST, &qdev->flags)) {
  1492. ql_check_lb_frame(qdev, skb);
  1493. dev_kfree_skb_any(skb);
  1494. return;
  1495. }
  1496. /* The max framesize filter on this chip is set higher than
  1497. * MTU since FCoE uses 2k frames.
  1498. */
  1499. if (skb->len > ndev->mtu + ETH_HLEN) {
  1500. dev_kfree_skb_any(skb);
  1501. rx_ring->rx_dropped++;
  1502. return;
  1503. }
  1504. prefetch(skb->data);
  1505. skb->dev = ndev;
  1506. if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
  1507. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1508. "%s Multicast.\n",
  1509. (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
  1510. IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
  1511. (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
  1512. IB_MAC_IOCB_RSP_M_REG ? "Registered" :
  1513. (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
  1514. IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
  1515. }
  1516. if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
  1517. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1518. "Promiscuous Packet.\n");
  1519. rx_ring->rx_packets++;
  1520. rx_ring->rx_bytes += skb->len;
  1521. skb->protocol = eth_type_trans(skb, ndev);
  1522. skb_checksum_none_assert(skb);
  1523. /* If rx checksum is on, and there are no
  1524. * csum or frame errors.
  1525. */
  1526. if ((ndev->features & NETIF_F_RXCSUM) &&
  1527. !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
  1528. /* TCP frame. */
  1529. if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
  1530. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1531. "TCP checksum done!\n");
  1532. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1533. } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
  1534. (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
  1535. /* Unfragmented ipv4 UDP frame. */
  1536. struct iphdr *iph = (struct iphdr *) skb->data;
  1537. if (!(iph->frag_off &
  1538. ntohs(IP_MF|IP_OFFSET))) {
  1539. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1540. netif_printk(qdev, rx_status, KERN_DEBUG,
  1541. qdev->ndev,
  1542. "TCP checksum done!\n");
  1543. }
  1544. }
  1545. }
  1546. skb_record_rx_queue(skb, rx_ring->cq_id);
  1547. if (vlan_id != 0xffff)
  1548. __vlan_hwaccel_put_tag(skb, vlan_id);
  1549. if (skb->ip_summed == CHECKSUM_UNNECESSARY)
  1550. napi_gro_receive(&rx_ring->napi, skb);
  1551. else
  1552. netif_receive_skb(skb);
  1553. }
  1554. static void ql_realign_skb(struct sk_buff *skb, int len)
  1555. {
  1556. void *temp_addr = skb->data;
  1557. /* Undo the skb_reserve(skb,32) we did before
  1558. * giving to hardware, and realign data on
  1559. * a 2-byte boundary.
  1560. */
  1561. skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
  1562. skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
  1563. skb_copy_to_linear_data(skb, temp_addr,
  1564. (unsigned int)len);
  1565. }
  1566. /*
  1567. * This function builds an skb for the given inbound
  1568. * completion. It will be rewritten for readability in the near
  1569. * future, but for not it works well.
  1570. */
  1571. static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
  1572. struct rx_ring *rx_ring,
  1573. struct ib_mac_iocb_rsp *ib_mac_rsp)
  1574. {
  1575. struct bq_desc *lbq_desc;
  1576. struct bq_desc *sbq_desc;
  1577. struct sk_buff *skb = NULL;
  1578. u32 length = le32_to_cpu(ib_mac_rsp->data_len);
  1579. u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
  1580. /*
  1581. * Handle the header buffer if present.
  1582. */
  1583. if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
  1584. ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
  1585. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1586. "Header of %d bytes in small buffer.\n", hdr_len);
  1587. /*
  1588. * Headers fit nicely into a small buffer.
  1589. */
  1590. sbq_desc = ql_get_curr_sbuf(rx_ring);
  1591. pci_unmap_single(qdev->pdev,
  1592. dma_unmap_addr(sbq_desc, mapaddr),
  1593. dma_unmap_len(sbq_desc, maplen),
  1594. PCI_DMA_FROMDEVICE);
  1595. skb = sbq_desc->p.skb;
  1596. ql_realign_skb(skb, hdr_len);
  1597. skb_put(skb, hdr_len);
  1598. sbq_desc->p.skb = NULL;
  1599. }
  1600. /*
  1601. * Handle the data buffer(s).
  1602. */
  1603. if (unlikely(!length)) { /* Is there data too? */
  1604. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1605. "No Data buffer in this packet.\n");
  1606. return skb;
  1607. }
  1608. if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
  1609. if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
  1610. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1611. "Headers in small, data of %d bytes in small, combine them.\n",
  1612. length);
  1613. /*
  1614. * Data is less than small buffer size so it's
  1615. * stuffed in a small buffer.
  1616. * For this case we append the data
  1617. * from the "data" small buffer to the "header" small
  1618. * buffer.
  1619. */
  1620. sbq_desc = ql_get_curr_sbuf(rx_ring);
  1621. pci_dma_sync_single_for_cpu(qdev->pdev,
  1622. dma_unmap_addr
  1623. (sbq_desc, mapaddr),
  1624. dma_unmap_len
  1625. (sbq_desc, maplen),
  1626. PCI_DMA_FROMDEVICE);
  1627. memcpy(skb_put(skb, length),
  1628. sbq_desc->p.skb->data, length);
  1629. pci_dma_sync_single_for_device(qdev->pdev,
  1630. dma_unmap_addr
  1631. (sbq_desc,
  1632. mapaddr),
  1633. dma_unmap_len
  1634. (sbq_desc,
  1635. maplen),
  1636. PCI_DMA_FROMDEVICE);
  1637. } else {
  1638. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1639. "%d bytes in a single small buffer.\n",
  1640. length);
  1641. sbq_desc = ql_get_curr_sbuf(rx_ring);
  1642. skb = sbq_desc->p.skb;
  1643. ql_realign_skb(skb, length);
  1644. skb_put(skb, length);
  1645. pci_unmap_single(qdev->pdev,
  1646. dma_unmap_addr(sbq_desc,
  1647. mapaddr),
  1648. dma_unmap_len(sbq_desc,
  1649. maplen),
  1650. PCI_DMA_FROMDEVICE);
  1651. sbq_desc->p.skb = NULL;
  1652. }
  1653. } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
  1654. if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
  1655. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1656. "Header in small, %d bytes in large. Chain large to small!\n",
  1657. length);
  1658. /*
  1659. * The data is in a single large buffer. We
  1660. * chain it to the header buffer's skb and let
  1661. * it rip.
  1662. */
  1663. lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
  1664. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1665. "Chaining page at offset = %d, for %d bytes to skb.\n",
  1666. lbq_desc->p.pg_chunk.offset, length);
  1667. skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
  1668. lbq_desc->p.pg_chunk.offset,
  1669. length);
  1670. skb->len += length;
  1671. skb->data_len += length;
  1672. skb->truesize += length;
  1673. } else {
  1674. /*
  1675. * The headers and data are in a single large buffer. We
  1676. * copy it to a new skb and let it go. This can happen with
  1677. * jumbo mtu on a non-TCP/UDP frame.
  1678. */
  1679. lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
  1680. skb = netdev_alloc_skb(qdev->ndev, length);
  1681. if (skb == NULL) {
  1682. netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
  1683. "No skb available, drop the packet.\n");
  1684. return NULL;
  1685. }
  1686. pci_unmap_page(qdev->pdev,
  1687. dma_unmap_addr(lbq_desc,
  1688. mapaddr),
  1689. dma_unmap_len(lbq_desc, maplen),
  1690. PCI_DMA_FROMDEVICE);
  1691. skb_reserve(skb, NET_IP_ALIGN);
  1692. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1693. "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
  1694. length);
  1695. skb_fill_page_desc(skb, 0,
  1696. lbq_desc->p.pg_chunk.page,
  1697. lbq_desc->p.pg_chunk.offset,
  1698. length);
  1699. skb->len += length;
  1700. skb->data_len += length;
  1701. skb->truesize += length;
  1702. length -= length;
  1703. __pskb_pull_tail(skb,
  1704. (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
  1705. VLAN_ETH_HLEN : ETH_HLEN);
  1706. }
  1707. } else {
  1708. /*
  1709. * The data is in a chain of large buffers
  1710. * pointed to by a small buffer. We loop
  1711. * thru and chain them to the our small header
  1712. * buffer's skb.
  1713. * frags: There are 18 max frags and our small
  1714. * buffer will hold 32 of them. The thing is,
  1715. * we'll use 3 max for our 9000 byte jumbo
  1716. * frames. If the MTU goes up we could
  1717. * eventually be in trouble.
  1718. */
  1719. int size, i = 0;
  1720. sbq_desc = ql_get_curr_sbuf(rx_ring);
  1721. pci_unmap_single(qdev->pdev,
  1722. dma_unmap_addr(sbq_desc, mapaddr),
  1723. dma_unmap_len(sbq_desc, maplen),
  1724. PCI_DMA_FROMDEVICE);
  1725. if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
  1726. /*
  1727. * This is an non TCP/UDP IP frame, so
  1728. * the headers aren't split into a small
  1729. * buffer. We have to use the small buffer
  1730. * that contains our sg list as our skb to
  1731. * send upstairs. Copy the sg list here to
  1732. * a local buffer and use it to find the
  1733. * pages to chain.
  1734. */
  1735. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1736. "%d bytes of headers & data in chain of large.\n",
  1737. length);
  1738. skb = sbq_desc->p.skb;
  1739. sbq_desc->p.skb = NULL;
  1740. skb_reserve(skb, NET_IP_ALIGN);
  1741. }
  1742. while (length > 0) {
  1743. lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
  1744. size = (length < rx_ring->lbq_buf_size) ? length :
  1745. rx_ring->lbq_buf_size;
  1746. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1747. "Adding page %d to skb for %d bytes.\n",
  1748. i, size);
  1749. skb_fill_page_desc(skb, i,
  1750. lbq_desc->p.pg_chunk.page,
  1751. lbq_desc->p.pg_chunk.offset,
  1752. size);
  1753. skb->len += size;
  1754. skb->data_len += size;
  1755. skb->truesize += size;
  1756. length -= size;
  1757. i++;
  1758. }
  1759. __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
  1760. VLAN_ETH_HLEN : ETH_HLEN);
  1761. }
  1762. return skb;
  1763. }
  1764. /* Process an inbound completion from an rx ring. */
  1765. static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
  1766. struct rx_ring *rx_ring,
  1767. struct ib_mac_iocb_rsp *ib_mac_rsp,
  1768. u16 vlan_id)
  1769. {
  1770. struct net_device *ndev = qdev->ndev;
  1771. struct sk_buff *skb = NULL;
  1772. QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
  1773. skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
  1774. if (unlikely(!skb)) {
  1775. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1776. "No skb available, drop packet.\n");
  1777. rx_ring->rx_dropped++;
  1778. return;
  1779. }
  1780. /* Frame error, so drop the packet. */
  1781. if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
  1782. netif_info(qdev, drv, qdev->ndev,
  1783. "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
  1784. dev_kfree_skb_any(skb);
  1785. rx_ring->rx_errors++;
  1786. return;
  1787. }
  1788. /* The max framesize filter on this chip is set higher than
  1789. * MTU since FCoE uses 2k frames.
  1790. */
  1791. if (skb->len > ndev->mtu + ETH_HLEN) {
  1792. dev_kfree_skb_any(skb);
  1793. rx_ring->rx_dropped++;
  1794. return;
  1795. }
  1796. /* loopback self test for ethtool */
  1797. if (test_bit(QL_SELFTEST, &qdev->flags)) {
  1798. ql_check_lb_frame(qdev, skb);
  1799. dev_kfree_skb_any(skb);
  1800. return;
  1801. }
  1802. prefetch(skb->data);
  1803. skb->dev = ndev;
  1804. if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
  1805. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
  1806. (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
  1807. IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
  1808. (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
  1809. IB_MAC_IOCB_RSP_M_REG ? "Registered" :
  1810. (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
  1811. IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
  1812. rx_ring->rx_multicast++;
  1813. }
  1814. if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
  1815. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1816. "Promiscuous Packet.\n");
  1817. }
  1818. skb->protocol = eth_type_trans(skb, ndev);
  1819. skb_checksum_none_assert(skb);
  1820. /* If rx checksum is on, and there are no
  1821. * csum or frame errors.
  1822. */
  1823. if ((ndev->features & NETIF_F_RXCSUM) &&
  1824. !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
  1825. /* TCP frame. */
  1826. if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
  1827. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1828. "TCP checksum done!\n");
  1829. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1830. } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
  1831. (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
  1832. /* Unfragmented ipv4 UDP frame. */
  1833. struct iphdr *iph = (struct iphdr *) skb->data;
  1834. if (!(iph->frag_off &
  1835. ntohs(IP_MF|IP_OFFSET))) {
  1836. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1837. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1838. "TCP checksum done!\n");
  1839. }
  1840. }
  1841. }
  1842. rx_ring->rx_packets++;
  1843. rx_ring->rx_bytes += skb->len;
  1844. skb_record_rx_queue(skb, rx_ring->cq_id);
  1845. if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0))
  1846. __vlan_hwaccel_put_tag(skb, vlan_id);
  1847. if (skb->ip_summed == CHECKSUM_UNNECESSARY)
  1848. napi_gro_receive(&rx_ring->napi, skb);
  1849. else
  1850. netif_receive_skb(skb);
  1851. }
  1852. /* Process an inbound completion from an rx ring. */
  1853. static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
  1854. struct rx_ring *rx_ring,
  1855. struct ib_mac_iocb_rsp *ib_mac_rsp)
  1856. {
  1857. u32 length = le32_to_cpu(ib_mac_rsp->data_len);
  1858. u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
  1859. ((le16_to_cpu(ib_mac_rsp->vlan_id) &
  1860. IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
  1861. QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
  1862. if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
  1863. /* The data and headers are split into
  1864. * separate buffers.
  1865. */
  1866. ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
  1867. vlan_id);
  1868. } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
  1869. /* The data fit in a single small buffer.
  1870. * Allocate a new skb, copy the data and
  1871. * return the buffer to the free pool.
  1872. */
  1873. ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
  1874. length, vlan_id);
  1875. } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
  1876. !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
  1877. (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
  1878. /* TCP packet in a page chunk that's been checksummed.
  1879. * Tack it on to our GRO skb and let it go.
  1880. */
  1881. ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
  1882. length, vlan_id);
  1883. } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
  1884. /* Non-TCP packet in a page chunk. Allocate an
  1885. * skb, tack it on frags, and send it up.
  1886. */
  1887. ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
  1888. length, vlan_id);
  1889. } else {
  1890. /* Non-TCP/UDP large frames that span multiple buffers
  1891. * can be processed corrrectly by the split frame logic.
  1892. */
  1893. ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
  1894. vlan_id);
  1895. }
  1896. return (unsigned long)length;
  1897. }
  1898. /* Process an outbound completion from an rx ring. */
  1899. static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
  1900. struct ob_mac_iocb_rsp *mac_rsp)
  1901. {
  1902. struct tx_ring *tx_ring;
  1903. struct tx_ring_desc *tx_ring_desc;
  1904. QL_DUMP_OB_MAC_RSP(mac_rsp);
  1905. tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
  1906. tx_ring_desc = &tx_ring->q[mac_rsp->tid];
  1907. ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
  1908. tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
  1909. tx_ring->tx_packets++;
  1910. dev_kfree_skb(tx_ring_desc->skb);
  1911. tx_ring_desc->skb = NULL;
  1912. if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
  1913. OB_MAC_IOCB_RSP_S |
  1914. OB_MAC_IOCB_RSP_L |
  1915. OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
  1916. if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
  1917. netif_warn(qdev, tx_done, qdev->ndev,
  1918. "Total descriptor length did not match transfer length.\n");
  1919. }
  1920. if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
  1921. netif_warn(qdev, tx_done, qdev->ndev,
  1922. "Frame too short to be valid, not sent.\n");
  1923. }
  1924. if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
  1925. netif_warn(qdev, tx_done, qdev->ndev,
  1926. "Frame too long, but sent anyway.\n");
  1927. }
  1928. if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
  1929. netif_warn(qdev, tx_done, qdev->ndev,
  1930. "PCI backplane error. Frame not sent.\n");
  1931. }
  1932. }
  1933. atomic_inc(&tx_ring->tx_count);
  1934. }
  1935. /* Fire up a handler to reset the MPI processor. */
  1936. void ql_queue_fw_error(struct ql_adapter *qdev)
  1937. {
  1938. ql_link_off(qdev);
  1939. queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
  1940. }
  1941. void ql_queue_asic_error(struct ql_adapter *qdev)
  1942. {
  1943. ql_link_off(qdev);
  1944. ql_disable_interrupts(qdev);
  1945. /* Clear adapter up bit to signal the recovery
  1946. * process that it shouldn't kill the reset worker
  1947. * thread
  1948. */
  1949. clear_bit(QL_ADAPTER_UP, &qdev->flags);
  1950. /* Set asic recovery bit to indicate reset process that we are
  1951. * in fatal error recovery process rather than normal close
  1952. */
  1953. set_bit(QL_ASIC_RECOVERY, &qdev->flags);
  1954. queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
  1955. }
  1956. static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
  1957. struct ib_ae_iocb_rsp *ib_ae_rsp)
  1958. {
  1959. switch (ib_ae_rsp->event) {
  1960. case MGMT_ERR_EVENT:
  1961. netif_err(qdev, rx_err, qdev->ndev,
  1962. "Management Processor Fatal Error.\n");
  1963. ql_queue_fw_error(qdev);
  1964. return;
  1965. case CAM_LOOKUP_ERR_EVENT:
  1966. netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
  1967. netdev_err(qdev->ndev, "This event shouldn't occur.\n");
  1968. ql_queue_asic_error(qdev);
  1969. return;
  1970. case SOFT_ECC_ERROR_EVENT:
  1971. netdev_err(qdev->ndev, "Soft ECC error detected.\n");
  1972. ql_queue_asic_error(qdev);
  1973. break;
  1974. case PCI_ERR_ANON_BUF_RD:
  1975. netdev_err(qdev->ndev, "PCI error occurred when reading "
  1976. "anonymous buffers from rx_ring %d.\n",
  1977. ib_ae_rsp->q_id);
  1978. ql_queue_asic_error(qdev);
  1979. break;
  1980. default:
  1981. netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
  1982. ib_ae_rsp->event);
  1983. ql_queue_asic_error(qdev);
  1984. break;
  1985. }
  1986. }
  1987. static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
  1988. {
  1989. struct ql_adapter *qdev = rx_ring->qdev;
  1990. u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
  1991. struct ob_mac_iocb_rsp *net_rsp = NULL;
  1992. int count = 0;
  1993. struct tx_ring *tx_ring;
  1994. /* While there are entries in the completion queue. */
  1995. while (prod != rx_ring->cnsmr_idx) {
  1996. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1997. "cq_id = %d, prod = %d, cnsmr = %d.\n.",
  1998. rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
  1999. net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
  2000. rmb();
  2001. switch (net_rsp->opcode) {
  2002. case OPCODE_OB_MAC_TSO_IOCB:
  2003. case OPCODE_OB_MAC_IOCB:
  2004. ql_process_mac_tx_intr(qdev, net_rsp);
  2005. break;
  2006. default:
  2007. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  2008. "Hit default case, not handled! dropping the packet, opcode = %x.\n",
  2009. net_rsp->opcode);
  2010. }
  2011. count++;
  2012. ql_update_cq(rx_ring);
  2013. prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
  2014. }
  2015. if (!net_rsp)
  2016. return 0;
  2017. ql_write_cq_idx(rx_ring);
  2018. tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
  2019. if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
  2020. if (atomic_read(&tx_ring->queue_stopped) &&
  2021. (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
  2022. /*
  2023. * The queue got stopped because the tx_ring was full.
  2024. * Wake it up, because it's now at least 25% empty.
  2025. */
  2026. netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
  2027. }
  2028. return count;
  2029. }
  2030. static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
  2031. {
  2032. struct ql_adapter *qdev = rx_ring->qdev;
  2033. u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
  2034. struct ql_net_rsp_iocb *net_rsp;
  2035. int count = 0;
  2036. /* While there are entries in the completion queue. */
  2037. while (prod != rx_ring->cnsmr_idx) {
  2038. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  2039. "cq_id = %d, prod = %d, cnsmr = %d.\n.",
  2040. rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
  2041. net_rsp = rx_ring->curr_entry;
  2042. rmb();
  2043. switch (net_rsp->opcode) {
  2044. case OPCODE_IB_MAC_IOCB:
  2045. ql_process_mac_rx_intr(qdev, rx_ring,
  2046. (struct ib_mac_iocb_rsp *)
  2047. net_rsp);
  2048. break;
  2049. case OPCODE_IB_AE_IOCB:
  2050. ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
  2051. net_rsp);
  2052. break;
  2053. default:
  2054. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  2055. "Hit default case, not handled! dropping the packet, opcode = %x.\n",
  2056. net_rsp->opcode);
  2057. break;
  2058. }
  2059. count++;
  2060. ql_update_cq(rx_ring);
  2061. prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
  2062. if (count == budget)
  2063. break;
  2064. }
  2065. ql_update_buffer_queues(qdev, rx_ring);
  2066. ql_write_cq_idx(rx_ring);
  2067. return count;
  2068. }
  2069. static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
  2070. {
  2071. struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
  2072. struct ql_adapter *qdev = rx_ring->qdev;
  2073. struct rx_ring *trx_ring;
  2074. int i, work_done = 0;
  2075. struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
  2076. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  2077. "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
  2078. /* Service the TX rings first. They start
  2079. * right after the RSS rings. */
  2080. for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
  2081. trx_ring = &qdev->rx_ring[i];
  2082. /* If this TX completion ring belongs to this vector and
  2083. * it's not empty then service it.
  2084. */
  2085. if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
  2086. (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
  2087. trx_ring->cnsmr_idx)) {
  2088. netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
  2089. "%s: Servicing TX completion ring %d.\n",
  2090. __func__, trx_ring->cq_id);
  2091. ql_clean_outbound_rx_ring(trx_ring);
  2092. }
  2093. }
  2094. /*
  2095. * Now service the RSS ring if it's active.
  2096. */
  2097. if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
  2098. rx_ring->cnsmr_idx) {
  2099. netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
  2100. "%s: Servicing RX completion ring %d.\n",
  2101. __func__, rx_ring->cq_id);
  2102. work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
  2103. }
  2104. if (work_done < budget) {
  2105. napi_complete(napi);
  2106. ql_enable_completion_interrupt(qdev, rx_ring->irq);
  2107. }
  2108. return work_done;
  2109. }
  2110. static void qlge_vlan_mode(struct net_device *ndev, u32 features)
  2111. {
  2112. struct ql_adapter *qdev = netdev_priv(ndev);
  2113. if (features & NETIF_F_HW_VLAN_RX) {
  2114. netif_printk(qdev, ifup, KERN_DEBUG, ndev,
  2115. "Turning on VLAN in NIC_RCV_CFG.\n");
  2116. ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
  2117. NIC_RCV_CFG_VLAN_MATCH_AND_NON);
  2118. } else {
  2119. netif_printk(qdev, ifup, KERN_DEBUG, ndev,
  2120. "Turning off VLAN in NIC_RCV_CFG.\n");
  2121. ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
  2122. }
  2123. }
  2124. static u32 qlge_fix_features(struct net_device *ndev, u32 features)
  2125. {
  2126. /*
  2127. * Since there is no support for separate rx/tx vlan accel
  2128. * enable/disable make sure tx flag is always in same state as rx.
  2129. */
  2130. if (features & NETIF_F_HW_VLAN_RX)
  2131. features |= NETIF_F_HW_VLAN_TX;
  2132. else
  2133. features &= ~NETIF_F_HW_VLAN_TX;
  2134. return features;
  2135. }
  2136. static int qlge_set_features(struct net_device *ndev, u32 features)
  2137. {
  2138. u32 changed = ndev->features ^ features;
  2139. if (changed & NETIF_F_HW_VLAN_RX)
  2140. qlge_vlan_mode(ndev, features);
  2141. return 0;
  2142. }
  2143. static void __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
  2144. {
  2145. u32 enable_bit = MAC_ADDR_E;
  2146. if (ql_set_mac_addr_reg
  2147. (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
  2148. netif_err(qdev, ifup, qdev->ndev,
  2149. "Failed to init vlan address.\n");
  2150. }
  2151. }
  2152. static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
  2153. {
  2154. struct ql_adapter *qdev = netdev_priv(ndev);
  2155. int status;
  2156. status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
  2157. if (status)
  2158. return;
  2159. __qlge_vlan_rx_add_vid(qdev, vid);
  2160. set_bit(vid, qdev->active_vlans);
  2161. ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
  2162. }
  2163. static void __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
  2164. {
  2165. u32 enable_bit = 0;
  2166. if (ql_set_mac_addr_reg
  2167. (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
  2168. netif_err(qdev, ifup, qdev->ndev,
  2169. "Failed to clear vlan address.\n");
  2170. }
  2171. }
  2172. static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
  2173. {
  2174. struct ql_adapter *qdev = netdev_priv(ndev);
  2175. int status;
  2176. status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
  2177. if (status)
  2178. return;
  2179. __qlge_vlan_rx_kill_vid(qdev, vid);
  2180. clear_bit(vid, qdev->active_vlans);
  2181. ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
  2182. }
  2183. static void qlge_restore_vlan(struct ql_adapter *qdev)
  2184. {
  2185. int status;
  2186. u16 vid;
  2187. status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
  2188. if (status)
  2189. return;
  2190. for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
  2191. __qlge_vlan_rx_add_vid(qdev, vid);
  2192. ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
  2193. }
  2194. /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
  2195. static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
  2196. {
  2197. struct rx_ring *rx_ring = dev_id;
  2198. napi_schedule(&rx_ring->napi);
  2199. return IRQ_HANDLED;
  2200. }
  2201. /* This handles a fatal error, MPI activity, and the default
  2202. * rx_ring in an MSI-X multiple vector environment.
  2203. * In MSI/Legacy environment it also process the rest of
  2204. * the rx_rings.
  2205. */
  2206. static irqreturn_t qlge_isr(int irq, void *dev_id)
  2207. {
  2208. struct rx_ring *rx_ring = dev_id;
  2209. struct ql_adapter *qdev = rx_ring->qdev;
  2210. struct intr_context *intr_context = &qdev->intr_context[0];
  2211. u32 var;
  2212. int work_done = 0;
  2213. spin_lock(&qdev->hw_lock);
  2214. if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
  2215. netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
  2216. "Shared Interrupt, Not ours!\n");
  2217. spin_unlock(&qdev->hw_lock);
  2218. return IRQ_NONE;
  2219. }
  2220. spin_unlock(&qdev->hw_lock);
  2221. var = ql_disable_completion_interrupt(qdev, intr_context->intr);
  2222. /*
  2223. * Check for fatal error.
  2224. */
  2225. if (var & STS_FE) {
  2226. ql_queue_asic_error(qdev);
  2227. netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
  2228. var = ql_read32(qdev, ERR_STS);
  2229. netdev_err(qdev->ndev, "Resetting chip. "
  2230. "Error Status Register = 0x%x\n", var);
  2231. return IRQ_HANDLED;
  2232. }
  2233. /*
  2234. * Check MPI processor activity.
  2235. */
  2236. if ((var & STS_PI) &&
  2237. (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
  2238. /*
  2239. * We've got an async event or mailbox completion.
  2240. * Handle it and clear the source of the interrupt.
  2241. */
  2242. netif_err(qdev, intr, qdev->ndev,
  2243. "Got MPI processor interrupt.\n");
  2244. ql_disable_completion_interrupt(qdev, intr_context->intr);
  2245. ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
  2246. queue_delayed_work_on(smp_processor_id(),
  2247. qdev->workqueue, &qdev->mpi_work, 0);
  2248. work_done++;
  2249. }
  2250. /*
  2251. * Get the bit-mask that shows the active queues for this
  2252. * pass. Compare it to the queues that this irq services
  2253. * and call napi if there's a match.
  2254. */
  2255. var = ql_read32(qdev, ISR1);
  2256. if (var & intr_context->irq_mask) {
  2257. netif_info(qdev, intr, qdev->ndev,
  2258. "Waking handler for rx_ring[0].\n");
  2259. ql_disable_completion_interrupt(qdev, intr_context->intr);
  2260. napi_schedule(&rx_ring->napi);
  2261. work_done++;
  2262. }
  2263. ql_enable_completion_interrupt(qdev, intr_context->intr);
  2264. return work_done ? IRQ_HANDLED : IRQ_NONE;
  2265. }
  2266. static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
  2267. {
  2268. if (skb_is_gso(skb)) {
  2269. int err;
  2270. if (skb_header_cloned(skb)) {
  2271. err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
  2272. if (err)
  2273. return err;
  2274. }
  2275. mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
  2276. mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
  2277. mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
  2278. mac_iocb_ptr->total_hdrs_len =
  2279. cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
  2280. mac_iocb_ptr->net_trans_offset =
  2281. cpu_to_le16(skb_network_offset(skb) |
  2282. skb_transport_offset(skb)
  2283. << OB_MAC_TRANSPORT_HDR_SHIFT);
  2284. mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
  2285. mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
  2286. if (likely(skb->protocol == htons(ETH_P_IP))) {
  2287. struct iphdr *iph = ip_hdr(skb);
  2288. iph->check = 0;
  2289. mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
  2290. tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
  2291. iph->daddr, 0,
  2292. IPPROTO_TCP,
  2293. 0);
  2294. } else if (skb->protocol == htons(ETH_P_IPV6)) {
  2295. mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
  2296. tcp_hdr(skb)->check =
  2297. ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
  2298. &ipv6_hdr(skb)->daddr,
  2299. 0, IPPROTO_TCP, 0);
  2300. }
  2301. return 1;
  2302. }
  2303. return 0;
  2304. }
  2305. static void ql_hw_csum_setup(struct sk_buff *skb,
  2306. struct ob_mac_tso_iocb_req *mac_iocb_ptr)
  2307. {
  2308. int len;
  2309. struct iphdr *iph = ip_hdr(skb);
  2310. __sum16 *check;
  2311. mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
  2312. mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
  2313. mac_iocb_ptr->net_trans_offset =
  2314. cpu_to_le16(skb_network_offset(skb) |
  2315. skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
  2316. mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
  2317. len = (ntohs(iph->tot_len) - (iph->ihl << 2));
  2318. if (likely(iph->protocol == IPPROTO_TCP)) {
  2319. check = &(tcp_hdr(skb)->check);
  2320. mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
  2321. mac_iocb_ptr->total_hdrs_len =
  2322. cpu_to_le16(skb_transport_offset(skb) +
  2323. (tcp_hdr(skb)->doff << 2));
  2324. } else {
  2325. check = &(udp_hdr(skb)->check);
  2326. mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
  2327. mac_iocb_ptr->total_hdrs_len =
  2328. cpu_to_le16(skb_transport_offset(skb) +
  2329. sizeof(struct udphdr));
  2330. }
  2331. *check = ~csum_tcpudp_magic(iph->saddr,
  2332. iph->daddr, len, iph->protocol, 0);
  2333. }
  2334. static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
  2335. {
  2336. struct tx_ring_desc *tx_ring_desc;
  2337. struct ob_mac_iocb_req *mac_iocb_ptr;
  2338. struct ql_adapter *qdev = netdev_priv(ndev);
  2339. int tso;
  2340. struct tx_ring *tx_ring;
  2341. u32 tx_ring_idx = (u32) skb->queue_mapping;
  2342. tx_ring = &qdev->tx_ring[tx_ring_idx];
  2343. if (skb_padto(skb, ETH_ZLEN))
  2344. return NETDEV_TX_OK;
  2345. if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
  2346. netif_info(qdev, tx_queued, qdev->ndev,
  2347. "%s: shutting down tx queue %d du to lack of resources.\n",
  2348. __func__, tx_ring_idx);
  2349. netif_stop_subqueue(ndev, tx_ring->wq_id);
  2350. atomic_inc(&tx_ring->queue_stopped);
  2351. tx_ring->tx_errors++;
  2352. return NETDEV_TX_BUSY;
  2353. }
  2354. tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
  2355. mac_iocb_ptr = tx_ring_desc->queue_entry;
  2356. memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
  2357. mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
  2358. mac_iocb_ptr->tid = tx_ring_desc->index;
  2359. /* We use the upper 32-bits to store the tx queue for this IO.
  2360. * When we get the completion we can use it to establish the context.
  2361. */
  2362. mac_iocb_ptr->txq_idx = tx_ring_idx;
  2363. tx_ring_desc->skb = skb;
  2364. mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
  2365. if (vlan_tx_tag_present(skb)) {
  2366. netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
  2367. "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
  2368. mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
  2369. mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
  2370. }
  2371. tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
  2372. if (tso < 0) {
  2373. dev_kfree_skb_any(skb);
  2374. return NETDEV_TX_OK;
  2375. } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
  2376. ql_hw_csum_setup(skb,
  2377. (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
  2378. }
  2379. if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
  2380. NETDEV_TX_OK) {
  2381. netif_err(qdev, tx_queued, qdev->ndev,
  2382. "Could not map the segments.\n");
  2383. tx_ring->tx_errors++;
  2384. return NETDEV_TX_BUSY;
  2385. }
  2386. QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
  2387. tx_ring->prod_idx++;
  2388. if (tx_ring->prod_idx == tx_ring->wq_len)
  2389. tx_ring->prod_idx = 0;
  2390. wmb();
  2391. ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
  2392. netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
  2393. "tx queued, slot %d, len %d\n",
  2394. tx_ring->prod_idx, skb->len);
  2395. atomic_dec(&tx_ring->tx_count);
  2396. return NETDEV_TX_OK;
  2397. }
  2398. static void ql_free_shadow_space(struct ql_adapter *qdev)
  2399. {
  2400. if (qdev->rx_ring_shadow_reg_area) {
  2401. pci_free_consistent(qdev->pdev,
  2402. PAGE_SIZE,
  2403. qdev->rx_ring_shadow_reg_area,
  2404. qdev->rx_ring_shadow_reg_dma);
  2405. qdev->rx_ring_shadow_reg_area = NULL;
  2406. }
  2407. if (qdev->tx_ring_shadow_reg_area) {
  2408. pci_free_consistent(qdev->pdev,
  2409. PAGE_SIZE,
  2410. qdev->tx_ring_shadow_reg_area,
  2411. qdev->tx_ring_shadow_reg_dma);
  2412. qdev->tx_ring_shadow_reg_area = NULL;
  2413. }
  2414. }
  2415. static int ql_alloc_shadow_space(struct ql_adapter *qdev)
  2416. {
  2417. qdev->rx_ring_shadow_reg_area =
  2418. pci_alloc_consistent(qdev->pdev,
  2419. PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
  2420. if (qdev->rx_ring_shadow_reg_area == NULL) {
  2421. netif_err(qdev, ifup, qdev->ndev,
  2422. "Allocation of RX shadow space failed.\n");
  2423. return -ENOMEM;
  2424. }
  2425. memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
  2426. qdev->tx_ring_shadow_reg_area =
  2427. pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
  2428. &qdev->tx_ring_shadow_reg_dma);
  2429. if (qdev->tx_ring_shadow_reg_area == NULL) {
  2430. netif_err(qdev, ifup, qdev->ndev,
  2431. "Allocation of TX shadow space failed.\n");
  2432. goto err_wqp_sh_area;
  2433. }
  2434. memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
  2435. return 0;
  2436. err_wqp_sh_area:
  2437. pci_free_consistent(qdev->pdev,
  2438. PAGE_SIZE,
  2439. qdev->rx_ring_shadow_reg_area,
  2440. qdev->rx_ring_shadow_reg_dma);
  2441. return -ENOMEM;
  2442. }
  2443. static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
  2444. {
  2445. struct tx_ring_desc *tx_ring_desc;
  2446. int i;
  2447. struct ob_mac_iocb_req *mac_iocb_ptr;
  2448. mac_iocb_ptr = tx_ring->wq_base;
  2449. tx_ring_desc = tx_ring->q;
  2450. for (i = 0; i < tx_ring->wq_len; i++) {
  2451. tx_ring_desc->index = i;
  2452. tx_ring_desc->skb = NULL;
  2453. tx_ring_desc->queue_entry = mac_iocb_ptr;
  2454. mac_iocb_ptr++;
  2455. tx_ring_desc++;
  2456. }
  2457. atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
  2458. atomic_set(&tx_ring->queue_stopped, 0);
  2459. }
  2460. static void ql_free_tx_resources(struct ql_adapter *qdev,
  2461. struct tx_ring *tx_ring)
  2462. {
  2463. if (tx_ring->wq_base) {
  2464. pci_free_consistent(qdev->pdev, tx_ring->wq_size,
  2465. tx_ring->wq_base, tx_ring->wq_base_dma);
  2466. tx_ring->wq_base = NULL;
  2467. }
  2468. kfree(tx_ring->q);
  2469. tx_ring->q = NULL;
  2470. }
  2471. static int ql_alloc_tx_resources(struct ql_adapter *qdev,
  2472. struct tx_ring *tx_ring)
  2473. {
  2474. tx_ring->wq_base =
  2475. pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
  2476. &tx_ring->wq_base_dma);
  2477. if ((tx_ring->wq_base == NULL) ||
  2478. tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
  2479. netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
  2480. return -ENOMEM;
  2481. }
  2482. tx_ring->q =
  2483. kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
  2484. if (tx_ring->q == NULL)
  2485. goto err;
  2486. return 0;
  2487. err:
  2488. pci_free_consistent(qdev->pdev, tx_ring->wq_size,
  2489. tx_ring->wq_base, tx_ring->wq_base_dma);
  2490. return -ENOMEM;
  2491. }
  2492. static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
  2493. {
  2494. struct bq_desc *lbq_desc;
  2495. uint32_t curr_idx, clean_idx;
  2496. curr_idx = rx_ring->lbq_curr_idx;
  2497. clean_idx = rx_ring->lbq_clean_idx;
  2498. while (curr_idx != clean_idx) {
  2499. lbq_desc = &rx_ring->lbq[curr_idx];
  2500. if (lbq_desc->p.pg_chunk.last_flag) {
  2501. pci_unmap_page(qdev->pdev,
  2502. lbq_desc->p.pg_chunk.map,
  2503. ql_lbq_block_size(qdev),
  2504. PCI_DMA_FROMDEVICE);
  2505. lbq_desc->p.pg_chunk.last_flag = 0;
  2506. }
  2507. put_page(lbq_desc->p.pg_chunk.page);
  2508. lbq_desc->p.pg_chunk.page = NULL;
  2509. if (++curr_idx == rx_ring->lbq_len)
  2510. curr_idx = 0;
  2511. }
  2512. }
  2513. static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
  2514. {
  2515. int i;
  2516. struct bq_desc *sbq_desc;
  2517. for (i = 0; i < rx_ring->sbq_len; i++) {
  2518. sbq_desc = &rx_ring->sbq[i];
  2519. if (sbq_desc == NULL) {
  2520. netif_err(qdev, ifup, qdev->ndev,
  2521. "sbq_desc %d is NULL.\n", i);
  2522. return;
  2523. }
  2524. if (sbq_desc->p.skb) {
  2525. pci_unmap_single(qdev->pdev,
  2526. dma_unmap_addr(sbq_desc, mapaddr),
  2527. dma_unmap_len(sbq_desc, maplen),
  2528. PCI_DMA_FROMDEVICE);
  2529. dev_kfree_skb(sbq_desc->p.skb);
  2530. sbq_desc->p.skb = NULL;
  2531. }
  2532. }
  2533. }
  2534. /* Free all large and small rx buffers associated
  2535. * with the completion queues for this device.
  2536. */
  2537. static void ql_free_rx_buffers(struct ql_adapter *qdev)
  2538. {
  2539. int i;
  2540. struct rx_ring *rx_ring;
  2541. for (i = 0; i < qdev->rx_ring_count; i++) {
  2542. rx_ring = &qdev->rx_ring[i];
  2543. if (rx_ring->lbq)
  2544. ql_free_lbq_buffers(qdev, rx_ring);
  2545. if (rx_ring->sbq)
  2546. ql_free_sbq_buffers(qdev, rx_ring);
  2547. }
  2548. }
  2549. static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
  2550. {
  2551. struct rx_ring *rx_ring;
  2552. int i;
  2553. for (i = 0; i < qdev->rx_ring_count; i++) {
  2554. rx_ring = &qdev->rx_ring[i];
  2555. if (rx_ring->type != TX_Q)
  2556. ql_update_buffer_queues(qdev, rx_ring);
  2557. }
  2558. }
  2559. static void ql_init_lbq_ring(struct ql_adapter *qdev,
  2560. struct rx_ring *rx_ring)
  2561. {
  2562. int i;
  2563. struct bq_desc *lbq_desc;
  2564. __le64 *bq = rx_ring->lbq_base;
  2565. memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
  2566. for (i = 0; i < rx_ring->lbq_len; i++) {
  2567. lbq_desc = &rx_ring->lbq[i];
  2568. memset(lbq_desc, 0, sizeof(*lbq_desc));
  2569. lbq_desc->index = i;
  2570. lbq_desc->addr = bq;
  2571. bq++;
  2572. }
  2573. }
  2574. static void ql_init_sbq_ring(struct ql_adapter *qdev,
  2575. struct rx_ring *rx_ring)
  2576. {
  2577. int i;
  2578. struct bq_desc *sbq_desc;
  2579. __le64 *bq = rx_ring->sbq_base;
  2580. memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
  2581. for (i = 0; i < rx_ring->sbq_len; i++) {
  2582. sbq_desc = &rx_ring->sbq[i];
  2583. memset(sbq_desc, 0, sizeof(*sbq_desc));
  2584. sbq_desc->index = i;
  2585. sbq_desc->addr = bq;
  2586. bq++;
  2587. }
  2588. }
  2589. static void ql_free_rx_resources(struct ql_adapter *qdev,
  2590. struct rx_ring *rx_ring)
  2591. {
  2592. /* Free the small buffer queue. */
  2593. if (rx_ring->sbq_base) {
  2594. pci_free_consistent(qdev->pdev,
  2595. rx_ring->sbq_size,
  2596. rx_ring->sbq_base, rx_ring->sbq_base_dma);
  2597. rx_ring->sbq_base = NULL;
  2598. }
  2599. /* Free the small buffer queue control blocks. */
  2600. kfree(rx_ring->sbq);
  2601. rx_ring->sbq = NULL;
  2602. /* Free the large buffer queue. */
  2603. if (rx_ring->lbq_base) {
  2604. pci_free_consistent(qdev->pdev,
  2605. rx_ring->lbq_size,
  2606. rx_ring->lbq_base, rx_ring->lbq_base_dma);
  2607. rx_ring->lbq_base = NULL;
  2608. }
  2609. /* Free the large buffer queue control blocks. */
  2610. kfree(rx_ring->lbq);
  2611. rx_ring->lbq = NULL;
  2612. /* Free the rx queue. */
  2613. if (rx_ring->cq_base) {
  2614. pci_free_consistent(qdev->pdev,
  2615. rx_ring->cq_size,
  2616. rx_ring->cq_base, rx_ring->cq_base_dma);
  2617. rx_ring->cq_base = NULL;
  2618. }
  2619. }
  2620. /* Allocate queues and buffers for this completions queue based
  2621. * on the values in the parameter structure. */
  2622. static int ql_alloc_rx_resources(struct ql_adapter *qdev,
  2623. struct rx_ring *rx_ring)
  2624. {
  2625. /*
  2626. * Allocate the completion queue for this rx_ring.
  2627. */
  2628. rx_ring->cq_base =
  2629. pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
  2630. &rx_ring->cq_base_dma);
  2631. if (rx_ring->cq_base == NULL) {
  2632. netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
  2633. return -ENOMEM;
  2634. }
  2635. if (rx_ring->sbq_len) {
  2636. /*
  2637. * Allocate small buffer queue.
  2638. */
  2639. rx_ring->sbq_base =
  2640. pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
  2641. &rx_ring->sbq_base_dma);
  2642. if (rx_ring->sbq_base == NULL) {
  2643. netif_err(qdev, ifup, qdev->ndev,
  2644. "Small buffer queue allocation failed.\n");
  2645. goto err_mem;
  2646. }
  2647. /*
  2648. * Allocate small buffer queue control blocks.
  2649. */
  2650. rx_ring->sbq =
  2651. kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
  2652. GFP_KERNEL);
  2653. if (rx_ring->sbq == NULL) {
  2654. netif_err(qdev, ifup, qdev->ndev,
  2655. "Small buffer queue control block allocation failed.\n");
  2656. goto err_mem;
  2657. }
  2658. ql_init_sbq_ring(qdev, rx_ring);
  2659. }
  2660. if (rx_ring->lbq_len) {
  2661. /*
  2662. * Allocate large buffer queue.
  2663. */
  2664. rx_ring->lbq_base =
  2665. pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
  2666. &rx_ring->lbq_base_dma);
  2667. if (rx_ring->lbq_base == NULL) {
  2668. netif_err(qdev, ifup, qdev->ndev,
  2669. "Large buffer queue allocation failed.\n");
  2670. goto err_mem;
  2671. }
  2672. /*
  2673. * Allocate large buffer queue control blocks.
  2674. */
  2675. rx_ring->lbq =
  2676. kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
  2677. GFP_KERNEL);
  2678. if (rx_ring->lbq == NULL) {
  2679. netif_err(qdev, ifup, qdev->ndev,
  2680. "Large buffer queue control block allocation failed.\n");
  2681. goto err_mem;
  2682. }
  2683. ql_init_lbq_ring(qdev, rx_ring);
  2684. }
  2685. return 0;
  2686. err_mem:
  2687. ql_free_rx_resources(qdev, rx_ring);
  2688. return -ENOMEM;
  2689. }
  2690. static void ql_tx_ring_clean(struct ql_adapter *qdev)
  2691. {
  2692. struct tx_ring *tx_ring;
  2693. struct tx_ring_desc *tx_ring_desc;
  2694. int i, j;
  2695. /*
  2696. * Loop through all queues and free
  2697. * any resources.
  2698. */
  2699. for (j = 0; j < qdev->tx_ring_count; j++) {
  2700. tx_ring = &qdev->tx_ring[j];
  2701. for (i = 0; i < tx_ring->wq_len; i++) {
  2702. tx_ring_desc = &tx_ring->q[i];
  2703. if (tx_ring_desc && tx_ring_desc->skb) {
  2704. netif_err(qdev, ifdown, qdev->ndev,
  2705. "Freeing lost SKB %p, from queue %d, index %d.\n",
  2706. tx_ring_desc->skb, j,
  2707. tx_ring_desc->index);
  2708. ql_unmap_send(qdev, tx_ring_desc,
  2709. tx_ring_desc->map_cnt);
  2710. dev_kfree_skb(tx_ring_desc->skb);
  2711. tx_ring_desc->skb = NULL;
  2712. }
  2713. }
  2714. }
  2715. }
  2716. static void ql_free_mem_resources(struct ql_adapter *qdev)
  2717. {
  2718. int i;
  2719. for (i = 0; i < qdev->tx_ring_count; i++)
  2720. ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
  2721. for (i = 0; i < qdev->rx_ring_count; i++)
  2722. ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
  2723. ql_free_shadow_space(qdev);
  2724. }
  2725. static int ql_alloc_mem_resources(struct ql_adapter *qdev)
  2726. {
  2727. int i;
  2728. /* Allocate space for our shadow registers and such. */
  2729. if (ql_alloc_shadow_space(qdev))
  2730. return -ENOMEM;
  2731. for (i = 0; i < qdev->rx_ring_count; i++) {
  2732. if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
  2733. netif_err(qdev, ifup, qdev->ndev,
  2734. "RX resource allocation failed.\n");
  2735. goto err_mem;
  2736. }
  2737. }
  2738. /* Allocate tx queue resources */
  2739. for (i = 0; i < qdev->tx_ring_count; i++) {
  2740. if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
  2741. netif_err(qdev, ifup, qdev->ndev,
  2742. "TX resource allocation failed.\n");
  2743. goto err_mem;
  2744. }
  2745. }
  2746. return 0;
  2747. err_mem:
  2748. ql_free_mem_resources(qdev);
  2749. return -ENOMEM;
  2750. }
  2751. /* Set up the rx ring control block and pass it to the chip.
  2752. * The control block is defined as
  2753. * "Completion Queue Initialization Control Block", or cqicb.
  2754. */
  2755. static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
  2756. {
  2757. struct cqicb *cqicb = &rx_ring->cqicb;
  2758. void *shadow_reg = qdev->rx_ring_shadow_reg_area +
  2759. (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
  2760. u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
  2761. (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
  2762. void __iomem *doorbell_area =
  2763. qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
  2764. int err = 0;
  2765. u16 bq_len;
  2766. u64 tmp;
  2767. __le64 *base_indirect_ptr;
  2768. int page_entries;
  2769. /* Set up the shadow registers for this ring. */
  2770. rx_ring->prod_idx_sh_reg = shadow_reg;
  2771. rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
  2772. *rx_ring->prod_idx_sh_reg = 0;
  2773. shadow_reg += sizeof(u64);
  2774. shadow_reg_dma += sizeof(u64);
  2775. rx_ring->lbq_base_indirect = shadow_reg;
  2776. rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
  2777. shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
  2778. shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
  2779. rx_ring->sbq_base_indirect = shadow_reg;
  2780. rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
  2781. /* PCI doorbell mem area + 0x00 for consumer index register */
  2782. rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
  2783. rx_ring->cnsmr_idx = 0;
  2784. rx_ring->curr_entry = rx_ring->cq_base;
  2785. /* PCI doorbell mem area + 0x04 for valid register */
  2786. rx_ring->valid_db_reg = doorbell_area + 0x04;
  2787. /* PCI doorbell mem area + 0x18 for large buffer consumer */
  2788. rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
  2789. /* PCI doorbell mem area + 0x1c */
  2790. rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
  2791. memset((void *)cqicb, 0, sizeof(struct cqicb));
  2792. cqicb->msix_vect = rx_ring->irq;
  2793. bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
  2794. cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
  2795. cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
  2796. cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
  2797. /*
  2798. * Set up the control block load flags.
  2799. */
  2800. cqicb->flags = FLAGS_LC | /* Load queue base address */
  2801. FLAGS_LV | /* Load MSI-X vector */
  2802. FLAGS_LI; /* Load irq delay values */
  2803. if (rx_ring->lbq_len) {
  2804. cqicb->flags |= FLAGS_LL; /* Load lbq values */
  2805. tmp = (u64)rx_ring->lbq_base_dma;
  2806. base_indirect_ptr = rx_ring->lbq_base_indirect;
  2807. page_entries = 0;
  2808. do {
  2809. *base_indirect_ptr = cpu_to_le64(tmp);
  2810. tmp += DB_PAGE_SIZE;
  2811. base_indirect_ptr++;
  2812. page_entries++;
  2813. } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
  2814. cqicb->lbq_addr =
  2815. cpu_to_le64(rx_ring->lbq_base_indirect_dma);
  2816. bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
  2817. (u16) rx_ring->lbq_buf_size;
  2818. cqicb->lbq_buf_size = cpu_to_le16(bq_len);
  2819. bq_len = (rx_ring->lbq_len == 65536) ? 0 :
  2820. (u16) rx_ring->lbq_len;
  2821. cqicb->lbq_len = cpu_to_le16(bq_len);
  2822. rx_ring->lbq_prod_idx = 0;
  2823. rx_ring->lbq_curr_idx = 0;
  2824. rx_ring->lbq_clean_idx = 0;
  2825. rx_ring->lbq_free_cnt = rx_ring->lbq_len;
  2826. }
  2827. if (rx_ring->sbq_len) {
  2828. cqicb->flags |= FLAGS_LS; /* Load sbq values */
  2829. tmp = (u64)rx_ring->sbq_base_dma;
  2830. base_indirect_ptr = rx_ring->sbq_base_indirect;
  2831. page_entries = 0;
  2832. do {
  2833. *base_indirect_ptr = cpu_to_le64(tmp);
  2834. tmp += DB_PAGE_SIZE;
  2835. base_indirect_ptr++;
  2836. page_entries++;
  2837. } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
  2838. cqicb->sbq_addr =
  2839. cpu_to_le64(rx_ring->sbq_base_indirect_dma);
  2840. cqicb->sbq_buf_size =
  2841. cpu_to_le16((u16)(rx_ring->sbq_buf_size));
  2842. bq_len = (rx_ring->sbq_len == 65536) ? 0 :
  2843. (u16) rx_ring->sbq_len;
  2844. cqicb->sbq_len = cpu_to_le16(bq_len);
  2845. rx_ring->sbq_prod_idx = 0;
  2846. rx_ring->sbq_curr_idx = 0;
  2847. rx_ring->sbq_clean_idx = 0;
  2848. rx_ring->sbq_free_cnt = rx_ring->sbq_len;
  2849. }
  2850. switch (rx_ring->type) {
  2851. case TX_Q:
  2852. cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
  2853. cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
  2854. break;
  2855. case RX_Q:
  2856. /* Inbound completion handling rx_rings run in
  2857. * separate NAPI contexts.
  2858. */
  2859. netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
  2860. 64);
  2861. cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
  2862. cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
  2863. break;
  2864. default:
  2865. netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
  2866. "Invalid rx_ring->type = %d.\n", rx_ring->type);
  2867. }
  2868. netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
  2869. "Initializing rx work queue.\n");
  2870. err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
  2871. CFG_LCQ, rx_ring->cq_id);
  2872. if (err) {
  2873. netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
  2874. return err;
  2875. }
  2876. return err;
  2877. }
  2878. static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
  2879. {
  2880. struct wqicb *wqicb = (struct wqicb *)tx_ring;
  2881. void __iomem *doorbell_area =
  2882. qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
  2883. void *shadow_reg = qdev->tx_ring_shadow_reg_area +
  2884. (tx_ring->wq_id * sizeof(u64));
  2885. u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
  2886. (tx_ring->wq_id * sizeof(u64));
  2887. int err = 0;
  2888. /*
  2889. * Assign doorbell registers for this tx_ring.
  2890. */
  2891. /* TX PCI doorbell mem area for tx producer index */
  2892. tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
  2893. tx_ring->prod_idx = 0;
  2894. /* TX PCI doorbell mem area + 0x04 */
  2895. tx_ring->valid_db_reg = doorbell_area + 0x04;
  2896. /*
  2897. * Assign shadow registers for this tx_ring.
  2898. */
  2899. tx_ring->cnsmr_idx_sh_reg = shadow_reg;
  2900. tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
  2901. wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
  2902. wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
  2903. Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
  2904. wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
  2905. wqicb->rid = 0;
  2906. wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
  2907. wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
  2908. ql_init_tx_ring(qdev, tx_ring);
  2909. err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
  2910. (u16) tx_ring->wq_id);
  2911. if (err) {
  2912. netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
  2913. return err;
  2914. }
  2915. netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
  2916. "Successfully loaded WQICB.\n");
  2917. return err;
  2918. }
  2919. static void ql_disable_msix(struct ql_adapter *qdev)
  2920. {
  2921. if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
  2922. pci_disable_msix(qdev->pdev);
  2923. clear_bit(QL_MSIX_ENABLED, &qdev->flags);
  2924. kfree(qdev->msi_x_entry);
  2925. qdev->msi_x_entry = NULL;
  2926. } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
  2927. pci_disable_msi(qdev->pdev);
  2928. clear_bit(QL_MSI_ENABLED, &qdev->flags);
  2929. }
  2930. }
  2931. /* We start by trying to get the number of vectors
  2932. * stored in qdev->intr_count. If we don't get that
  2933. * many then we reduce the count and try again.
  2934. */
  2935. static void ql_enable_msix(struct ql_adapter *qdev)
  2936. {
  2937. int i, err;
  2938. /* Get the MSIX vectors. */
  2939. if (qlge_irq_type == MSIX_IRQ) {
  2940. /* Try to alloc space for the msix struct,
  2941. * if it fails then go to MSI/legacy.
  2942. */
  2943. qdev->msi_x_entry = kcalloc(qdev->intr_count,
  2944. sizeof(struct msix_entry),
  2945. GFP_KERNEL);
  2946. if (!qdev->msi_x_entry) {
  2947. qlge_irq_type = MSI_IRQ;
  2948. goto msi;
  2949. }
  2950. for (i = 0; i < qdev->intr_count; i++)
  2951. qdev->msi_x_entry[i].entry = i;
  2952. /* Loop to get our vectors. We start with
  2953. * what we want and settle for what we get.
  2954. */
  2955. do {
  2956. err = pci_enable_msix(qdev->pdev,
  2957. qdev->msi_x_entry, qdev->intr_count);
  2958. if (err > 0)
  2959. qdev->intr_count = err;
  2960. } while (err > 0);
  2961. if (err < 0) {
  2962. kfree(qdev->msi_x_entry);
  2963. qdev->msi_x_entry = NULL;
  2964. netif_warn(qdev, ifup, qdev->ndev,
  2965. "MSI-X Enable failed, trying MSI.\n");
  2966. qdev->intr_count = 1;
  2967. qlge_irq_type = MSI_IRQ;
  2968. } else if (err == 0) {
  2969. set_bit(QL_MSIX_ENABLED, &qdev->flags);
  2970. netif_info(qdev, ifup, qdev->ndev,
  2971. "MSI-X Enabled, got %d vectors.\n",
  2972. qdev->intr_count);
  2973. return;
  2974. }
  2975. }
  2976. msi:
  2977. qdev->intr_count = 1;
  2978. if (qlge_irq_type == MSI_IRQ) {
  2979. if (!pci_enable_msi(qdev->pdev)) {
  2980. set_bit(QL_MSI_ENABLED, &qdev->flags);
  2981. netif_info(qdev, ifup, qdev->ndev,
  2982. "Running with MSI interrupts.\n");
  2983. return;
  2984. }
  2985. }
  2986. qlge_irq_type = LEG_IRQ;
  2987. netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
  2988. "Running with legacy interrupts.\n");
  2989. }
  2990. /* Each vector services 1 RSS ring and and 1 or more
  2991. * TX completion rings. This function loops through
  2992. * the TX completion rings and assigns the vector that
  2993. * will service it. An example would be if there are
  2994. * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
  2995. * This would mean that vector 0 would service RSS ring 0
  2996. * and TX completion rings 0,1,2 and 3. Vector 1 would
  2997. * service RSS ring 1 and TX completion rings 4,5,6 and 7.
  2998. */
  2999. static void ql_set_tx_vect(struct ql_adapter *qdev)
  3000. {
  3001. int i, j, vect;
  3002. u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
  3003. if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
  3004. /* Assign irq vectors to TX rx_rings.*/
  3005. for (vect = 0, j = 0, i = qdev->rss_ring_count;
  3006. i < qdev->rx_ring_count; i++) {
  3007. if (j == tx_rings_per_vector) {
  3008. vect++;
  3009. j = 0;
  3010. }
  3011. qdev->rx_ring[i].irq = vect;
  3012. j++;
  3013. }
  3014. } else {
  3015. /* For single vector all rings have an irq
  3016. * of zero.
  3017. */
  3018. for (i = 0; i < qdev->rx_ring_count; i++)
  3019. qdev->rx_ring[i].irq = 0;
  3020. }
  3021. }
  3022. /* Set the interrupt mask for this vector. Each vector
  3023. * will service 1 RSS ring and 1 or more TX completion
  3024. * rings. This function sets up a bit mask per vector
  3025. * that indicates which rings it services.
  3026. */
  3027. static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
  3028. {
  3029. int j, vect = ctx->intr;
  3030. u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
  3031. if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
  3032. /* Add the RSS ring serviced by this vector
  3033. * to the mask.
  3034. */
  3035. ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
  3036. /* Add the TX ring(s) serviced by this vector
  3037. * to the mask. */
  3038. for (j = 0; j < tx_rings_per_vector; j++) {
  3039. ctx->irq_mask |=
  3040. (1 << qdev->rx_ring[qdev->rss_ring_count +
  3041. (vect * tx_rings_per_vector) + j].cq_id);
  3042. }
  3043. } else {
  3044. /* For single vector we just shift each queue's
  3045. * ID into the mask.
  3046. */
  3047. for (j = 0; j < qdev->rx_ring_count; j++)
  3048. ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
  3049. }
  3050. }
  3051. /*
  3052. * Here we build the intr_context structures based on
  3053. * our rx_ring count and intr vector count.
  3054. * The intr_context structure is used to hook each vector
  3055. * to possibly different handlers.
  3056. */
  3057. static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
  3058. {
  3059. int i = 0;
  3060. struct intr_context *intr_context = &qdev->intr_context[0];
  3061. if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
  3062. /* Each rx_ring has it's
  3063. * own intr_context since we have separate
  3064. * vectors for each queue.
  3065. */
  3066. for (i = 0; i < qdev->intr_count; i++, intr_context++) {
  3067. qdev->rx_ring[i].irq = i;
  3068. intr_context->intr = i;
  3069. intr_context->qdev = qdev;
  3070. /* Set up this vector's bit-mask that indicates
  3071. * which queues it services.
  3072. */
  3073. ql_set_irq_mask(qdev, intr_context);
  3074. /*
  3075. * We set up each vectors enable/disable/read bits so
  3076. * there's no bit/mask calculations in the critical path.
  3077. */
  3078. intr_context->intr_en_mask =
  3079. INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
  3080. INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
  3081. | i;
  3082. intr_context->intr_dis_mask =
  3083. INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
  3084. INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
  3085. INTR_EN_IHD | i;
  3086. intr_context->intr_read_mask =
  3087. INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
  3088. INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
  3089. i;
  3090. if (i == 0) {
  3091. /* The first vector/queue handles
  3092. * broadcast/multicast, fatal errors,
  3093. * and firmware events. This in addition
  3094. * to normal inbound NAPI processing.
  3095. */
  3096. intr_context->handler = qlge_isr;
  3097. sprintf(intr_context->name, "%s-rx-%d",
  3098. qdev->ndev->name, i);
  3099. } else {
  3100. /*
  3101. * Inbound queues handle unicast frames only.
  3102. */
  3103. intr_context->handler = qlge_msix_rx_isr;
  3104. sprintf(intr_context->name, "%s-rx-%d",
  3105. qdev->ndev->name, i);
  3106. }
  3107. }
  3108. } else {
  3109. /*
  3110. * All rx_rings use the same intr_context since
  3111. * there is only one vector.
  3112. */
  3113. intr_context->intr = 0;
  3114. intr_context->qdev = qdev;
  3115. /*
  3116. * We set up each vectors enable/disable/read bits so
  3117. * there's no bit/mask calculations in the critical path.
  3118. */
  3119. intr_context->intr_en_mask =
  3120. INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
  3121. intr_context->intr_dis_mask =
  3122. INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
  3123. INTR_EN_TYPE_DISABLE;
  3124. intr_context->intr_read_mask =
  3125. INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
  3126. /*
  3127. * Single interrupt means one handler for all rings.
  3128. */
  3129. intr_context->handler = qlge_isr;
  3130. sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
  3131. /* Set up this vector's bit-mask that indicates
  3132. * which queues it services. In this case there is
  3133. * a single vector so it will service all RSS and
  3134. * TX completion rings.
  3135. */
  3136. ql_set_irq_mask(qdev, intr_context);
  3137. }
  3138. /* Tell the TX completion rings which MSIx vector
  3139. * they will be using.
  3140. */
  3141. ql_set_tx_vect(qdev);
  3142. }
  3143. static void ql_free_irq(struct ql_adapter *qdev)
  3144. {
  3145. int i;
  3146. struct intr_context *intr_context = &qdev->intr_context[0];
  3147. for (i = 0; i < qdev->intr_count; i++, intr_context++) {
  3148. if (intr_context->hooked) {
  3149. if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
  3150. free_irq(qdev->msi_x_entry[i].vector,
  3151. &qdev->rx_ring[i]);
  3152. netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
  3153. "freeing msix interrupt %d.\n", i);
  3154. } else {
  3155. free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
  3156. netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
  3157. "freeing msi interrupt %d.\n", i);
  3158. }
  3159. }
  3160. }
  3161. ql_disable_msix(qdev);
  3162. }
  3163. static int ql_request_irq(struct ql_adapter *qdev)
  3164. {
  3165. int i;
  3166. int status = 0;
  3167. struct pci_dev *pdev = qdev->pdev;
  3168. struct intr_context *intr_context = &qdev->intr_context[0];
  3169. ql_resolve_queues_to_irqs(qdev);
  3170. for (i = 0; i < qdev->intr_count; i++, intr_context++) {
  3171. atomic_set(&intr_context->irq_cnt, 0);
  3172. if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
  3173. status = request_irq(qdev->msi_x_entry[i].vector,
  3174. intr_context->handler,
  3175. 0,
  3176. intr_context->name,
  3177. &qdev->rx_ring[i]);
  3178. if (status) {
  3179. netif_err(qdev, ifup, qdev->ndev,
  3180. "Failed request for MSIX interrupt %d.\n",
  3181. i);
  3182. goto err_irq;
  3183. } else {
  3184. netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
  3185. "Hooked intr %d, queue type %s, with name %s.\n",
  3186. i,
  3187. qdev->rx_ring[i].type == DEFAULT_Q ?
  3188. "DEFAULT_Q" :
  3189. qdev->rx_ring[i].type == TX_Q ?
  3190. "TX_Q" :
  3191. qdev->rx_ring[i].type == RX_Q ?
  3192. "RX_Q" : "",
  3193. intr_context->name);
  3194. }
  3195. } else {
  3196. netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
  3197. "trying msi or legacy interrupts.\n");
  3198. netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
  3199. "%s: irq = %d.\n", __func__, pdev->irq);
  3200. netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
  3201. "%s: context->name = %s.\n", __func__,
  3202. intr_context->name);
  3203. netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
  3204. "%s: dev_id = 0x%p.\n", __func__,
  3205. &qdev->rx_ring[0]);
  3206. status =
  3207. request_irq(pdev->irq, qlge_isr,
  3208. test_bit(QL_MSI_ENABLED,
  3209. &qdev->
  3210. flags) ? 0 : IRQF_SHARED,
  3211. intr_context->name, &qdev->rx_ring[0]);
  3212. if (status)
  3213. goto err_irq;
  3214. netif_err(qdev, ifup, qdev->ndev,
  3215. "Hooked intr %d, queue type %s, with name %s.\n",
  3216. i,
  3217. qdev->rx_ring[0].type == DEFAULT_Q ?
  3218. "DEFAULT_Q" :
  3219. qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
  3220. qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
  3221. intr_context->name);
  3222. }
  3223. intr_context->hooked = 1;
  3224. }
  3225. return status;
  3226. err_irq:
  3227. netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
  3228. ql_free_irq(qdev);
  3229. return status;
  3230. }
  3231. static int ql_start_rss(struct ql_adapter *qdev)
  3232. {
  3233. static const u8 init_hash_seed[] = {
  3234. 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
  3235. 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
  3236. 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
  3237. 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
  3238. 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
  3239. };
  3240. struct ricb *ricb = &qdev->ricb;
  3241. int status = 0;
  3242. int i;
  3243. u8 *hash_id = (u8 *) ricb->hash_cq_id;
  3244. memset((void *)ricb, 0, sizeof(*ricb));
  3245. ricb->base_cq = RSS_L4K;
  3246. ricb->flags =
  3247. (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
  3248. ricb->mask = cpu_to_le16((u16)(0x3ff));
  3249. /*
  3250. * Fill out the Indirection Table.
  3251. */
  3252. for (i = 0; i < 1024; i++)
  3253. hash_id[i] = (i & (qdev->rss_ring_count - 1));
  3254. memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
  3255. memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
  3256. netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "Initializing RSS.\n");
  3257. status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
  3258. if (status) {
  3259. netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
  3260. return status;
  3261. }
  3262. netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
  3263. "Successfully loaded RICB.\n");
  3264. return status;
  3265. }
  3266. static int ql_clear_routing_entries(struct ql_adapter *qdev)
  3267. {
  3268. int i, status = 0;
  3269. status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
  3270. if (status)
  3271. return status;
  3272. /* Clear all the entries in the routing table. */
  3273. for (i = 0; i < 16; i++) {
  3274. status = ql_set_routing_reg(qdev, i, 0, 0);
  3275. if (status) {
  3276. netif_err(qdev, ifup, qdev->ndev,
  3277. "Failed to init routing register for CAM packets.\n");
  3278. break;
  3279. }
  3280. }
  3281. ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
  3282. return status;
  3283. }
  3284. /* Initialize the frame-to-queue routing. */
  3285. static int ql_route_initialize(struct ql_adapter *qdev)
  3286. {
  3287. int status = 0;
  3288. /* Clear all the entries in the routing table. */
  3289. status = ql_clear_routing_entries(qdev);
  3290. if (status)
  3291. return status;
  3292. status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
  3293. if (status)
  3294. return status;
  3295. status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
  3296. RT_IDX_IP_CSUM_ERR, 1);
  3297. if (status) {
  3298. netif_err(qdev, ifup, qdev->ndev,
  3299. "Failed to init routing register "
  3300. "for IP CSUM error packets.\n");
  3301. goto exit;
  3302. }
  3303. status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
  3304. RT_IDX_TU_CSUM_ERR, 1);
  3305. if (status) {
  3306. netif_err(qdev, ifup, qdev->ndev,
  3307. "Failed to init routing register "
  3308. "for TCP/UDP CSUM error packets.\n");
  3309. goto exit;
  3310. }
  3311. status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
  3312. if (status) {
  3313. netif_err(qdev, ifup, qdev->ndev,
  3314. "Failed to init routing register for broadcast packets.\n");
  3315. goto exit;
  3316. }
  3317. /* If we have more than one inbound queue, then turn on RSS in the
  3318. * routing block.
  3319. */
  3320. if (qdev->rss_ring_count > 1) {
  3321. status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
  3322. RT_IDX_RSS_MATCH, 1);
  3323. if (status) {
  3324. netif_err(qdev, ifup, qdev->ndev,
  3325. "Failed to init routing register for MATCH RSS packets.\n");
  3326. goto exit;
  3327. }
  3328. }
  3329. status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
  3330. RT_IDX_CAM_HIT, 1);
  3331. if (status)
  3332. netif_err(qdev, ifup, qdev->ndev,
  3333. "Failed to init routing register for CAM packets.\n");
  3334. exit:
  3335. ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
  3336. return status;
  3337. }
  3338. int ql_cam_route_initialize(struct ql_adapter *qdev)
  3339. {
  3340. int status, set;
  3341. /* If check if the link is up and use to
  3342. * determine if we are setting or clearing
  3343. * the MAC address in the CAM.
  3344. */
  3345. set = ql_read32(qdev, STS);
  3346. set &= qdev->port_link_up;
  3347. status = ql_set_mac_addr(qdev, set);
  3348. if (status) {
  3349. netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
  3350. return status;
  3351. }
  3352. status = ql_route_initialize(qdev);
  3353. if (status)
  3354. netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
  3355. return status;
  3356. }
  3357. static int ql_adapter_initialize(struct ql_adapter *qdev)
  3358. {
  3359. u32 value, mask;
  3360. int i;
  3361. int status = 0;
  3362. /*
  3363. * Set up the System register to halt on errors.
  3364. */
  3365. value = SYS_EFE | SYS_FAE;
  3366. mask = value << 16;
  3367. ql_write32(qdev, SYS, mask | value);
  3368. /* Set the default queue, and VLAN behavior. */
  3369. value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
  3370. mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
  3371. ql_write32(qdev, NIC_RCV_CFG, (mask | value));
  3372. /* Set the MPI interrupt to enabled. */
  3373. ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
  3374. /* Enable the function, set pagesize, enable error checking. */
  3375. value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
  3376. FSC_EC | FSC_VM_PAGE_4K;
  3377. value |= SPLT_SETTING;
  3378. /* Set/clear header splitting. */
  3379. mask = FSC_VM_PAGESIZE_MASK |
  3380. FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
  3381. ql_write32(qdev, FSC, mask | value);
  3382. ql_write32(qdev, SPLT_HDR, SPLT_LEN);
  3383. /* Set RX packet routing to use port/pci function on which the
  3384. * packet arrived on in addition to usual frame routing.
  3385. * This is helpful on bonding where both interfaces can have
  3386. * the same MAC address.
  3387. */
  3388. ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
  3389. /* Reroute all packets to our Interface.
  3390. * They may have been routed to MPI firmware
  3391. * due to WOL.
  3392. */
  3393. value = ql_read32(qdev, MGMT_RCV_CFG);
  3394. value &= ~MGMT_RCV_CFG_RM;
  3395. mask = 0xffff0000;
  3396. /* Sticky reg needs clearing due to WOL. */
  3397. ql_write32(qdev, MGMT_RCV_CFG, mask);
  3398. ql_write32(qdev, MGMT_RCV_CFG, mask | value);
  3399. /* Default WOL is enable on Mezz cards */
  3400. if (qdev->pdev->subsystem_device == 0x0068 ||
  3401. qdev->pdev->subsystem_device == 0x0180)
  3402. qdev->wol = WAKE_MAGIC;
  3403. /* Start up the rx queues. */
  3404. for (i = 0; i < qdev->rx_ring_count; i++) {
  3405. status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
  3406. if (status) {
  3407. netif_err(qdev, ifup, qdev->ndev,
  3408. "Failed to start rx ring[%d].\n", i);
  3409. return status;
  3410. }
  3411. }
  3412. /* If there is more than one inbound completion queue
  3413. * then download a RICB to configure RSS.
  3414. */
  3415. if (qdev->rss_ring_count > 1) {
  3416. status = ql_start_rss(qdev);
  3417. if (status) {
  3418. netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
  3419. return status;
  3420. }
  3421. }
  3422. /* Start up the tx queues. */
  3423. for (i = 0; i < qdev->tx_ring_count; i++) {
  3424. status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
  3425. if (status) {
  3426. netif_err(qdev, ifup, qdev->ndev,
  3427. "Failed to start tx ring[%d].\n", i);
  3428. return status;
  3429. }
  3430. }
  3431. /* Initialize the port and set the max framesize. */
  3432. status = qdev->nic_ops->port_initialize(qdev);
  3433. if (status)
  3434. netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
  3435. /* Set up the MAC address and frame routing filter. */
  3436. status = ql_cam_route_initialize(qdev);
  3437. if (status) {
  3438. netif_err(qdev, ifup, qdev->ndev,
  3439. "Failed to init CAM/Routing tables.\n");
  3440. return status;
  3441. }
  3442. /* Start NAPI for the RSS queues. */
  3443. for (i = 0; i < qdev->rss_ring_count; i++) {
  3444. netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
  3445. "Enabling NAPI for rx_ring[%d].\n", i);
  3446. napi_enable(&qdev->rx_ring[i].napi);
  3447. }
  3448. return status;
  3449. }
  3450. /* Issue soft reset to chip. */
  3451. static int ql_adapter_reset(struct ql_adapter *qdev)
  3452. {
  3453. u32 value;
  3454. int status = 0;
  3455. unsigned long end_jiffies;
  3456. /* Clear all the entries in the routing table. */
  3457. status = ql_clear_routing_entries(qdev);
  3458. if (status) {
  3459. netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
  3460. return status;
  3461. }
  3462. end_jiffies = jiffies +
  3463. max((unsigned long)1, usecs_to_jiffies(30));
  3464. /* Check if bit is set then skip the mailbox command and
  3465. * clear the bit, else we are in normal reset process.
  3466. */
  3467. if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
  3468. /* Stop management traffic. */
  3469. ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
  3470. /* Wait for the NIC and MGMNT FIFOs to empty. */
  3471. ql_wait_fifo_empty(qdev);
  3472. } else
  3473. clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
  3474. ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
  3475. do {
  3476. value = ql_read32(qdev, RST_FO);
  3477. if ((value & RST_FO_FR) == 0)
  3478. break;
  3479. cpu_relax();
  3480. } while (time_before(jiffies, end_jiffies));
  3481. if (value & RST_FO_FR) {
  3482. netif_err(qdev, ifdown, qdev->ndev,
  3483. "ETIMEDOUT!!! errored out of resetting the chip!\n");
  3484. status = -ETIMEDOUT;
  3485. }
  3486. /* Resume management traffic. */
  3487. ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
  3488. return status;
  3489. }
  3490. static void ql_display_dev_info(struct net_device *ndev)
  3491. {
  3492. struct ql_adapter *qdev = netdev_priv(ndev);
  3493. netif_info(qdev, probe, qdev->ndev,
  3494. "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
  3495. "XG Roll = %d, XG Rev = %d.\n",
  3496. qdev->func,
  3497. qdev->port,
  3498. qdev->chip_rev_id & 0x0000000f,
  3499. qdev->chip_rev_id >> 4 & 0x0000000f,
  3500. qdev->chip_rev_id >> 8 & 0x0000000f,
  3501. qdev->chip_rev_id >> 12 & 0x0000000f);
  3502. netif_info(qdev, probe, qdev->ndev,
  3503. "MAC address %pM\n", ndev->dev_addr);
  3504. }
  3505. static int ql_wol(struct ql_adapter *qdev)
  3506. {
  3507. int status = 0;
  3508. u32 wol = MB_WOL_DISABLE;
  3509. /* The CAM is still intact after a reset, but if we
  3510. * are doing WOL, then we may need to program the
  3511. * routing regs. We would also need to issue the mailbox
  3512. * commands to instruct the MPI what to do per the ethtool
  3513. * settings.
  3514. */
  3515. if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
  3516. WAKE_MCAST | WAKE_BCAST)) {
  3517. netif_err(qdev, ifdown, qdev->ndev,
  3518. "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
  3519. qdev->wol);
  3520. return -EINVAL;
  3521. }
  3522. if (qdev->wol & WAKE_MAGIC) {
  3523. status = ql_mb_wol_set_magic(qdev, 1);
  3524. if (status) {
  3525. netif_err(qdev, ifdown, qdev->ndev,
  3526. "Failed to set magic packet on %s.\n",
  3527. qdev->ndev->name);
  3528. return status;
  3529. } else
  3530. netif_info(qdev, drv, qdev->ndev,
  3531. "Enabled magic packet successfully on %s.\n",
  3532. qdev->ndev->name);
  3533. wol |= MB_WOL_MAGIC_PKT;
  3534. }
  3535. if (qdev->wol) {
  3536. wol |= MB_WOL_MODE_ON;
  3537. status = ql_mb_wol_mode(qdev, wol);
  3538. netif_err(qdev, drv, qdev->ndev,
  3539. "WOL %s (wol code 0x%x) on %s\n",
  3540. (status == 0) ? "Successfully set" : "Failed",
  3541. wol, qdev->ndev->name);
  3542. }
  3543. return status;
  3544. }
  3545. static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
  3546. {
  3547. /* Don't kill the reset worker thread if we
  3548. * are in the process of recovery.
  3549. */
  3550. if (test_bit(QL_ADAPTER_UP, &qdev->flags))
  3551. cancel_delayed_work_sync(&qdev->asic_reset_work);
  3552. cancel_delayed_work_sync(&qdev->mpi_reset_work);
  3553. cancel_delayed_work_sync(&qdev->mpi_work);
  3554. cancel_delayed_work_sync(&qdev->mpi_idc_work);
  3555. cancel_delayed_work_sync(&qdev->mpi_core_to_log);
  3556. cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
  3557. }
  3558. static int ql_adapter_down(struct ql_adapter *qdev)
  3559. {
  3560. int i, status = 0;
  3561. ql_link_off(qdev);
  3562. ql_cancel_all_work_sync(qdev);
  3563. for (i = 0; i < qdev->rss_ring_count; i++)
  3564. napi_disable(&qdev->rx_ring[i].napi);
  3565. clear_bit(QL_ADAPTER_UP, &qdev->flags);
  3566. ql_disable_interrupts(qdev);
  3567. ql_tx_ring_clean(qdev);
  3568. /* Call netif_napi_del() from common point.
  3569. */
  3570. for (i = 0; i < qdev->rss_ring_count; i++)
  3571. netif_napi_del(&qdev->rx_ring[i].napi);
  3572. status = ql_adapter_reset(qdev);
  3573. if (status)
  3574. netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
  3575. qdev->func);
  3576. ql_free_rx_buffers(qdev);
  3577. return status;
  3578. }
  3579. static int ql_adapter_up(struct ql_adapter *qdev)
  3580. {
  3581. int err = 0;
  3582. err = ql_adapter_initialize(qdev);
  3583. if (err) {
  3584. netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
  3585. goto err_init;
  3586. }
  3587. set_bit(QL_ADAPTER_UP, &qdev->flags);
  3588. ql_alloc_rx_buffers(qdev);
  3589. /* If the port is initialized and the
  3590. * link is up the turn on the carrier.
  3591. */
  3592. if ((ql_read32(qdev, STS) & qdev->port_init) &&
  3593. (ql_read32(qdev, STS) & qdev->port_link_up))
  3594. ql_link_on(qdev);
  3595. /* Restore rx mode. */
  3596. clear_bit(QL_ALLMULTI, &qdev->flags);
  3597. clear_bit(QL_PROMISCUOUS, &qdev->flags);
  3598. qlge_set_multicast_list(qdev->ndev);
  3599. /* Restore vlan setting. */
  3600. qlge_restore_vlan(qdev);
  3601. ql_enable_interrupts(qdev);
  3602. ql_enable_all_completion_interrupts(qdev);
  3603. netif_tx_start_all_queues(qdev->ndev);
  3604. return 0;
  3605. err_init:
  3606. ql_adapter_reset(qdev);
  3607. return err;
  3608. }
  3609. static void ql_release_adapter_resources(struct ql_adapter *qdev)
  3610. {
  3611. ql_free_mem_resources(qdev);
  3612. ql_free_irq(qdev);
  3613. }
  3614. static int ql_get_adapter_resources(struct ql_adapter *qdev)
  3615. {
  3616. int status = 0;
  3617. if (ql_alloc_mem_resources(qdev)) {
  3618. netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
  3619. return -ENOMEM;
  3620. }
  3621. status = ql_request_irq(qdev);
  3622. return status;
  3623. }
  3624. static int qlge_close(struct net_device *ndev)
  3625. {
  3626. struct ql_adapter *qdev = netdev_priv(ndev);
  3627. /* If we hit pci_channel_io_perm_failure
  3628. * failure condition, then we already
  3629. * brought the adapter down.
  3630. */
  3631. if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
  3632. netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
  3633. clear_bit(QL_EEH_FATAL, &qdev->flags);
  3634. return 0;
  3635. }
  3636. /*
  3637. * Wait for device to recover from a reset.
  3638. * (Rarely happens, but possible.)
  3639. */
  3640. while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
  3641. msleep(1);
  3642. ql_adapter_down(qdev);
  3643. ql_release_adapter_resources(qdev);
  3644. return 0;
  3645. }
  3646. static int ql_configure_rings(struct ql_adapter *qdev)
  3647. {
  3648. int i;
  3649. struct rx_ring *rx_ring;
  3650. struct tx_ring *tx_ring;
  3651. int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
  3652. unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
  3653. LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
  3654. qdev->lbq_buf_order = get_order(lbq_buf_len);
  3655. /* In a perfect world we have one RSS ring for each CPU
  3656. * and each has it's own vector. To do that we ask for
  3657. * cpu_cnt vectors. ql_enable_msix() will adjust the
  3658. * vector count to what we actually get. We then
  3659. * allocate an RSS ring for each.
  3660. * Essentially, we are doing min(cpu_count, msix_vector_count).
  3661. */
  3662. qdev->intr_count = cpu_cnt;
  3663. ql_enable_msix(qdev);
  3664. /* Adjust the RSS ring count to the actual vector count. */
  3665. qdev->rss_ring_count = qdev->intr_count;
  3666. qdev->tx_ring_count = cpu_cnt;
  3667. qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
  3668. for (i = 0; i < qdev->tx_ring_count; i++) {
  3669. tx_ring = &qdev->tx_ring[i];
  3670. memset((void *)tx_ring, 0, sizeof(*tx_ring));
  3671. tx_ring->qdev = qdev;
  3672. tx_ring->wq_id = i;
  3673. tx_ring->wq_len = qdev->tx_ring_size;
  3674. tx_ring->wq_size =
  3675. tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
  3676. /*
  3677. * The completion queue ID for the tx rings start
  3678. * immediately after the rss rings.
  3679. */
  3680. tx_ring->cq_id = qdev->rss_ring_count + i;
  3681. }
  3682. for (i = 0; i < qdev->rx_ring_count; i++) {
  3683. rx_ring = &qdev->rx_ring[i];
  3684. memset((void *)rx_ring, 0, sizeof(*rx_ring));
  3685. rx_ring->qdev = qdev;
  3686. rx_ring->cq_id = i;
  3687. rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
  3688. if (i < qdev->rss_ring_count) {
  3689. /*
  3690. * Inbound (RSS) queues.
  3691. */
  3692. rx_ring->cq_len = qdev->rx_ring_size;
  3693. rx_ring->cq_size =
  3694. rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
  3695. rx_ring->lbq_len = NUM_LARGE_BUFFERS;
  3696. rx_ring->lbq_size =
  3697. rx_ring->lbq_len * sizeof(__le64);
  3698. rx_ring->lbq_buf_size = (u16)lbq_buf_len;
  3699. netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
  3700. "lbq_buf_size %d, order = %d\n",
  3701. rx_ring->lbq_buf_size,
  3702. qdev->lbq_buf_order);
  3703. rx_ring->sbq_len = NUM_SMALL_BUFFERS;
  3704. rx_ring->sbq_size =
  3705. rx_ring->sbq_len * sizeof(__le64);
  3706. rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
  3707. rx_ring->type = RX_Q;
  3708. } else {
  3709. /*
  3710. * Outbound queue handles outbound completions only.
  3711. */
  3712. /* outbound cq is same size as tx_ring it services. */
  3713. rx_ring->cq_len = qdev->tx_ring_size;
  3714. rx_ring->cq_size =
  3715. rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
  3716. rx_ring->lbq_len = 0;
  3717. rx_ring->lbq_size = 0;
  3718. rx_ring->lbq_buf_size = 0;
  3719. rx_ring->sbq_len = 0;
  3720. rx_ring->sbq_size = 0;
  3721. rx_ring->sbq_buf_size = 0;
  3722. rx_ring->type = TX_Q;
  3723. }
  3724. }
  3725. return 0;
  3726. }
  3727. static int qlge_open(struct net_device *ndev)
  3728. {
  3729. int err = 0;
  3730. struct ql_adapter *qdev = netdev_priv(ndev);
  3731. err = ql_adapter_reset(qdev);
  3732. if (err)
  3733. return err;
  3734. err = ql_configure_rings(qdev);
  3735. if (err)
  3736. return err;
  3737. err = ql_get_adapter_resources(qdev);
  3738. if (err)
  3739. goto error_up;
  3740. err = ql_adapter_up(qdev);
  3741. if (err)
  3742. goto error_up;
  3743. return err;
  3744. error_up:
  3745. ql_release_adapter_resources(qdev);
  3746. return err;
  3747. }
  3748. static int ql_change_rx_buffers(struct ql_adapter *qdev)
  3749. {
  3750. struct rx_ring *rx_ring;
  3751. int i, status;
  3752. u32 lbq_buf_len;
  3753. /* Wait for an outstanding reset to complete. */
  3754. if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
  3755. int i = 3;
  3756. while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
  3757. netif_err(qdev, ifup, qdev->ndev,
  3758. "Waiting for adapter UP...\n");
  3759. ssleep(1);
  3760. }
  3761. if (!i) {
  3762. netif_err(qdev, ifup, qdev->ndev,
  3763. "Timed out waiting for adapter UP\n");
  3764. return -ETIMEDOUT;
  3765. }
  3766. }
  3767. status = ql_adapter_down(qdev);
  3768. if (status)
  3769. goto error;
  3770. /* Get the new rx buffer size. */
  3771. lbq_buf_len = (qdev->ndev->mtu > 1500) ?
  3772. LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
  3773. qdev->lbq_buf_order = get_order(lbq_buf_len);
  3774. for (i = 0; i < qdev->rss_ring_count; i++) {
  3775. rx_ring = &qdev->rx_ring[i];
  3776. /* Set the new size. */
  3777. rx_ring->lbq_buf_size = lbq_buf_len;
  3778. }
  3779. status = ql_adapter_up(qdev);
  3780. if (status)
  3781. goto error;
  3782. return status;
  3783. error:
  3784. netif_alert(qdev, ifup, qdev->ndev,
  3785. "Driver up/down cycle failed, closing device.\n");
  3786. set_bit(QL_ADAPTER_UP, &qdev->flags);
  3787. dev_close(qdev->ndev);
  3788. return status;
  3789. }
  3790. static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
  3791. {
  3792. struct ql_adapter *qdev = netdev_priv(ndev);
  3793. int status;
  3794. if (ndev->mtu == 1500 && new_mtu == 9000) {
  3795. netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
  3796. } else if (ndev->mtu == 9000 && new_mtu == 1500) {
  3797. netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
  3798. } else
  3799. return -EINVAL;
  3800. queue_delayed_work(qdev->workqueue,
  3801. &qdev->mpi_port_cfg_work, 3*HZ);
  3802. ndev->mtu = new_mtu;
  3803. if (!netif_running(qdev->ndev)) {
  3804. return 0;
  3805. }
  3806. status = ql_change_rx_buffers(qdev);
  3807. if (status) {
  3808. netif_err(qdev, ifup, qdev->ndev,
  3809. "Changing MTU failed.\n");
  3810. }
  3811. return status;
  3812. }
  3813. static struct net_device_stats *qlge_get_stats(struct net_device
  3814. *ndev)
  3815. {
  3816. struct ql_adapter *qdev = netdev_priv(ndev);
  3817. struct rx_ring *rx_ring = &qdev->rx_ring[0];
  3818. struct tx_ring *tx_ring = &qdev->tx_ring[0];
  3819. unsigned long pkts, mcast, dropped, errors, bytes;
  3820. int i;
  3821. /* Get RX stats. */
  3822. pkts = mcast = dropped = errors = bytes = 0;
  3823. for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
  3824. pkts += rx_ring->rx_packets;
  3825. bytes += rx_ring->rx_bytes;
  3826. dropped += rx_ring->rx_dropped;
  3827. errors += rx_ring->rx_errors;
  3828. mcast += rx_ring->rx_multicast;
  3829. }
  3830. ndev->stats.rx_packets = pkts;
  3831. ndev->stats.rx_bytes = bytes;
  3832. ndev->stats.rx_dropped = dropped;
  3833. ndev->stats.rx_errors = errors;
  3834. ndev->stats.multicast = mcast;
  3835. /* Get TX stats. */
  3836. pkts = errors = bytes = 0;
  3837. for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
  3838. pkts += tx_ring->tx_packets;
  3839. bytes += tx_ring->tx_bytes;
  3840. errors += tx_ring->tx_errors;
  3841. }
  3842. ndev->stats.tx_packets = pkts;
  3843. ndev->stats.tx_bytes = bytes;
  3844. ndev->stats.tx_errors = errors;
  3845. return &ndev->stats;
  3846. }
  3847. static void qlge_set_multicast_list(struct net_device *ndev)
  3848. {
  3849. struct ql_adapter *qdev = netdev_priv(ndev);
  3850. struct netdev_hw_addr *ha;
  3851. int i, status;
  3852. status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
  3853. if (status)
  3854. return;
  3855. /*
  3856. * Set or clear promiscuous mode if a
  3857. * transition is taking place.
  3858. */
  3859. if (ndev->flags & IFF_PROMISC) {
  3860. if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
  3861. if (ql_set_routing_reg
  3862. (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
  3863. netif_err(qdev, hw, qdev->ndev,
  3864. "Failed to set promiscuous mode.\n");
  3865. } else {
  3866. set_bit(QL_PROMISCUOUS, &qdev->flags);
  3867. }
  3868. }
  3869. } else {
  3870. if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
  3871. if (ql_set_routing_reg
  3872. (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
  3873. netif_err(qdev, hw, qdev->ndev,
  3874. "Failed to clear promiscuous mode.\n");
  3875. } else {
  3876. clear_bit(QL_PROMISCUOUS, &qdev->flags);
  3877. }
  3878. }
  3879. }
  3880. /*
  3881. * Set or clear all multicast mode if a
  3882. * transition is taking place.
  3883. */
  3884. if ((ndev->flags & IFF_ALLMULTI) ||
  3885. (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
  3886. if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
  3887. if (ql_set_routing_reg
  3888. (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
  3889. netif_err(qdev, hw, qdev->ndev,
  3890. "Failed to set all-multi mode.\n");
  3891. } else {
  3892. set_bit(QL_ALLMULTI, &qdev->flags);
  3893. }
  3894. }
  3895. } else {
  3896. if (test_bit(QL_ALLMULTI, &qdev->flags)) {
  3897. if (ql_set_routing_reg
  3898. (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
  3899. netif_err(qdev, hw, qdev->ndev,
  3900. "Failed to clear all-multi mode.\n");
  3901. } else {
  3902. clear_bit(QL_ALLMULTI, &qdev->flags);
  3903. }
  3904. }
  3905. }
  3906. if (!netdev_mc_empty(ndev)) {
  3907. status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
  3908. if (status)
  3909. goto exit;
  3910. i = 0;
  3911. netdev_for_each_mc_addr(ha, ndev) {
  3912. if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
  3913. MAC_ADDR_TYPE_MULTI_MAC, i)) {
  3914. netif_err(qdev, hw, qdev->ndev,
  3915. "Failed to loadmulticast address.\n");
  3916. ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
  3917. goto exit;
  3918. }
  3919. i++;
  3920. }
  3921. ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
  3922. if (ql_set_routing_reg
  3923. (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
  3924. netif_err(qdev, hw, qdev->ndev,
  3925. "Failed to set multicast match mode.\n");
  3926. } else {
  3927. set_bit(QL_ALLMULTI, &qdev->flags);
  3928. }
  3929. }
  3930. exit:
  3931. ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
  3932. }
  3933. static int qlge_set_mac_address(struct net_device *ndev, void *p)
  3934. {
  3935. struct ql_adapter *qdev = netdev_priv(ndev);
  3936. struct sockaddr *addr = p;
  3937. int status;
  3938. if (!is_valid_ether_addr(addr->sa_data))
  3939. return -EADDRNOTAVAIL;
  3940. memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
  3941. /* Update local copy of current mac address. */
  3942. memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
  3943. status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
  3944. if (status)
  3945. return status;
  3946. status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
  3947. MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
  3948. if (status)
  3949. netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
  3950. ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
  3951. return status;
  3952. }
  3953. static void qlge_tx_timeout(struct net_device *ndev)
  3954. {
  3955. struct ql_adapter *qdev = netdev_priv(ndev);
  3956. ql_queue_asic_error(qdev);
  3957. }
  3958. static void ql_asic_reset_work(struct work_struct *work)
  3959. {
  3960. struct ql_adapter *qdev =
  3961. container_of(work, struct ql_adapter, asic_reset_work.work);
  3962. int status;
  3963. rtnl_lock();
  3964. status = ql_adapter_down(qdev);
  3965. if (status)
  3966. goto error;
  3967. status = ql_adapter_up(qdev);
  3968. if (status)
  3969. goto error;
  3970. /* Restore rx mode. */
  3971. clear_bit(QL_ALLMULTI, &qdev->flags);
  3972. clear_bit(QL_PROMISCUOUS, &qdev->flags);
  3973. qlge_set_multicast_list(qdev->ndev);
  3974. rtnl_unlock();
  3975. return;
  3976. error:
  3977. netif_alert(qdev, ifup, qdev->ndev,
  3978. "Driver up/down cycle failed, closing device\n");
  3979. set_bit(QL_ADAPTER_UP, &qdev->flags);
  3980. dev_close(qdev->ndev);
  3981. rtnl_unlock();
  3982. }
  3983. static const struct nic_operations qla8012_nic_ops = {
  3984. .get_flash = ql_get_8012_flash_params,
  3985. .port_initialize = ql_8012_port_initialize,
  3986. };
  3987. static const struct nic_operations qla8000_nic_ops = {
  3988. .get_flash = ql_get_8000_flash_params,
  3989. .port_initialize = ql_8000_port_initialize,
  3990. };
  3991. /* Find the pcie function number for the other NIC
  3992. * on this chip. Since both NIC functions share a
  3993. * common firmware we have the lowest enabled function
  3994. * do any common work. Examples would be resetting
  3995. * after a fatal firmware error, or doing a firmware
  3996. * coredump.
  3997. */
  3998. static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
  3999. {
  4000. int status = 0;
  4001. u32 temp;
  4002. u32 nic_func1, nic_func2;
  4003. status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
  4004. &temp);
  4005. if (status)
  4006. return status;
  4007. nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
  4008. MPI_TEST_NIC_FUNC_MASK);
  4009. nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
  4010. MPI_TEST_NIC_FUNC_MASK);
  4011. if (qdev->func == nic_func1)
  4012. qdev->alt_func = nic_func2;
  4013. else if (qdev->func == nic_func2)
  4014. qdev->alt_func = nic_func1;
  4015. else
  4016. status = -EIO;
  4017. return status;
  4018. }
  4019. static int ql_get_board_info(struct ql_adapter *qdev)
  4020. {
  4021. int status;
  4022. qdev->func =
  4023. (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
  4024. if (qdev->func > 3)
  4025. return -EIO;
  4026. status = ql_get_alt_pcie_func(qdev);
  4027. if (status)
  4028. return status;
  4029. qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
  4030. if (qdev->port) {
  4031. qdev->xg_sem_mask = SEM_XGMAC1_MASK;
  4032. qdev->port_link_up = STS_PL1;
  4033. qdev->port_init = STS_PI1;
  4034. qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
  4035. qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
  4036. } else {
  4037. qdev->xg_sem_mask = SEM_XGMAC0_MASK;
  4038. qdev->port_link_up = STS_PL0;
  4039. qdev->port_init = STS_PI0;
  4040. qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
  4041. qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
  4042. }
  4043. qdev->chip_rev_id = ql_read32(qdev, REV_ID);
  4044. qdev->device_id = qdev->pdev->device;
  4045. if (qdev->device_id == QLGE_DEVICE_ID_8012)
  4046. qdev->nic_ops = &qla8012_nic_ops;
  4047. else if (qdev->device_id == QLGE_DEVICE_ID_8000)
  4048. qdev->nic_ops = &qla8000_nic_ops;
  4049. return status;
  4050. }
  4051. static void ql_release_all(struct pci_dev *pdev)
  4052. {
  4053. struct net_device *ndev = pci_get_drvdata(pdev);
  4054. struct ql_adapter *qdev = netdev_priv(ndev);
  4055. if (qdev->workqueue) {
  4056. destroy_workqueue(qdev->workqueue);
  4057. qdev->workqueue = NULL;
  4058. }
  4059. if (qdev->reg_base)
  4060. iounmap(qdev->reg_base);
  4061. if (qdev->doorbell_area)
  4062. iounmap(qdev->doorbell_area);
  4063. vfree(qdev->mpi_coredump);
  4064. pci_release_regions(pdev);
  4065. pci_set_drvdata(pdev, NULL);
  4066. }
  4067. static int __devinit ql_init_device(struct pci_dev *pdev,
  4068. struct net_device *ndev, int cards_found)
  4069. {
  4070. struct ql_adapter *qdev = netdev_priv(ndev);
  4071. int err = 0;
  4072. memset((void *)qdev, 0, sizeof(*qdev));
  4073. err = pci_enable_device(pdev);
  4074. if (err) {
  4075. dev_err(&pdev->dev, "PCI device enable failed.\n");
  4076. return err;
  4077. }
  4078. qdev->ndev = ndev;
  4079. qdev->pdev = pdev;
  4080. pci_set_drvdata(pdev, ndev);
  4081. /* Set PCIe read request size */
  4082. err = pcie_set_readrq(pdev, 4096);
  4083. if (err) {
  4084. dev_err(&pdev->dev, "Set readrq failed.\n");
  4085. goto err_out1;
  4086. }
  4087. err = pci_request_regions(pdev, DRV_NAME);
  4088. if (err) {
  4089. dev_err(&pdev->dev, "PCI region request failed.\n");
  4090. return err;
  4091. }
  4092. pci_set_master(pdev);
  4093. if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
  4094. set_bit(QL_DMA64, &qdev->flags);
  4095. err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
  4096. } else {
  4097. err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
  4098. if (!err)
  4099. err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
  4100. }
  4101. if (err) {
  4102. dev_err(&pdev->dev, "No usable DMA configuration.\n");
  4103. goto err_out2;
  4104. }
  4105. /* Set PCIe reset type for EEH to fundamental. */
  4106. pdev->needs_freset = 1;
  4107. pci_save_state(pdev);
  4108. qdev->reg_base =
  4109. ioremap_nocache(pci_resource_start(pdev, 1),
  4110. pci_resource_len(pdev, 1));
  4111. if (!qdev->reg_base) {
  4112. dev_err(&pdev->dev, "Register mapping failed.\n");
  4113. err = -ENOMEM;
  4114. goto err_out2;
  4115. }
  4116. qdev->doorbell_area_size = pci_resource_len(pdev, 3);
  4117. qdev->doorbell_area =
  4118. ioremap_nocache(pci_resource_start(pdev, 3),
  4119. pci_resource_len(pdev, 3));
  4120. if (!qdev->doorbell_area) {
  4121. dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
  4122. err = -ENOMEM;
  4123. goto err_out2;
  4124. }
  4125. err = ql_get_board_info(qdev);
  4126. if (err) {
  4127. dev_err(&pdev->dev, "Register access failed.\n");
  4128. err = -EIO;
  4129. goto err_out2;
  4130. }
  4131. qdev->msg_enable = netif_msg_init(debug, default_msg);
  4132. spin_lock_init(&qdev->hw_lock);
  4133. spin_lock_init(&qdev->stats_lock);
  4134. if (qlge_mpi_coredump) {
  4135. qdev->mpi_coredump =
  4136. vmalloc(sizeof(struct ql_mpi_coredump));
  4137. if (qdev->mpi_coredump == NULL) {
  4138. dev_err(&pdev->dev, "Coredump alloc failed.\n");
  4139. err = -ENOMEM;
  4140. goto err_out2;
  4141. }
  4142. if (qlge_force_coredump)
  4143. set_bit(QL_FRC_COREDUMP, &qdev->flags);
  4144. }
  4145. /* make sure the EEPROM is good */
  4146. err = qdev->nic_ops->get_flash(qdev);
  4147. if (err) {
  4148. dev_err(&pdev->dev, "Invalid FLASH.\n");
  4149. goto err_out2;
  4150. }
  4151. memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
  4152. /* Keep local copy of current mac address. */
  4153. memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
  4154. /* Set up the default ring sizes. */
  4155. qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
  4156. qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
  4157. /* Set up the coalescing parameters. */
  4158. qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
  4159. qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
  4160. qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
  4161. qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
  4162. /*
  4163. * Set up the operating parameters.
  4164. */
  4165. qdev->workqueue = create_singlethread_workqueue(ndev->name);
  4166. INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
  4167. INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
  4168. INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
  4169. INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
  4170. INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
  4171. INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
  4172. init_completion(&qdev->ide_completion);
  4173. mutex_init(&qdev->mpi_mutex);
  4174. if (!cards_found) {
  4175. dev_info(&pdev->dev, "%s\n", DRV_STRING);
  4176. dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
  4177. DRV_NAME, DRV_VERSION);
  4178. }
  4179. return 0;
  4180. err_out2:
  4181. ql_release_all(pdev);
  4182. err_out1:
  4183. pci_disable_device(pdev);
  4184. return err;
  4185. }
  4186. static const struct net_device_ops qlge_netdev_ops = {
  4187. .ndo_open = qlge_open,
  4188. .ndo_stop = qlge_close,
  4189. .ndo_start_xmit = qlge_send,
  4190. .ndo_change_mtu = qlge_change_mtu,
  4191. .ndo_get_stats = qlge_get_stats,
  4192. .ndo_set_multicast_list = qlge_set_multicast_list,
  4193. .ndo_set_mac_address = qlge_set_mac_address,
  4194. .ndo_validate_addr = eth_validate_addr,
  4195. .ndo_tx_timeout = qlge_tx_timeout,
  4196. .ndo_fix_features = qlge_fix_features,
  4197. .ndo_set_features = qlge_set_features,
  4198. .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
  4199. .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
  4200. };
  4201. static void ql_timer(unsigned long data)
  4202. {
  4203. struct ql_adapter *qdev = (struct ql_adapter *)data;
  4204. u32 var = 0;
  4205. var = ql_read32(qdev, STS);
  4206. if (pci_channel_offline(qdev->pdev)) {
  4207. netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
  4208. return;
  4209. }
  4210. mod_timer(&qdev->timer, jiffies + (5*HZ));
  4211. }
  4212. static int __devinit qlge_probe(struct pci_dev *pdev,
  4213. const struct pci_device_id *pci_entry)
  4214. {
  4215. struct net_device *ndev = NULL;
  4216. struct ql_adapter *qdev = NULL;
  4217. static int cards_found = 0;
  4218. int err = 0;
  4219. ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
  4220. min(MAX_CPUS, (int)num_online_cpus()));
  4221. if (!ndev)
  4222. return -ENOMEM;
  4223. err = ql_init_device(pdev, ndev, cards_found);
  4224. if (err < 0) {
  4225. free_netdev(ndev);
  4226. return err;
  4227. }
  4228. qdev = netdev_priv(ndev);
  4229. SET_NETDEV_DEV(ndev, &pdev->dev);
  4230. ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
  4231. NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN |
  4232. NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM;
  4233. ndev->features = ndev->hw_features |
  4234. NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
  4235. if (test_bit(QL_DMA64, &qdev->flags))
  4236. ndev->features |= NETIF_F_HIGHDMA;
  4237. /*
  4238. * Set up net_device structure.
  4239. */
  4240. ndev->tx_queue_len = qdev->tx_ring_size;
  4241. ndev->irq = pdev->irq;
  4242. ndev->netdev_ops = &qlge_netdev_ops;
  4243. SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
  4244. ndev->watchdog_timeo = 10 * HZ;
  4245. err = register_netdev(ndev);
  4246. if (err) {
  4247. dev_err(&pdev->dev, "net device registration failed.\n");
  4248. ql_release_all(pdev);
  4249. pci_disable_device(pdev);
  4250. return err;
  4251. }
  4252. /* Start up the timer to trigger EEH if
  4253. * the bus goes dead
  4254. */
  4255. init_timer_deferrable(&qdev->timer);
  4256. qdev->timer.data = (unsigned long)qdev;
  4257. qdev->timer.function = ql_timer;
  4258. qdev->timer.expires = jiffies + (5*HZ);
  4259. add_timer(&qdev->timer);
  4260. ql_link_off(qdev);
  4261. ql_display_dev_info(ndev);
  4262. atomic_set(&qdev->lb_count, 0);
  4263. cards_found++;
  4264. return 0;
  4265. }
  4266. netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
  4267. {
  4268. return qlge_send(skb, ndev);
  4269. }
  4270. int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
  4271. {
  4272. return ql_clean_inbound_rx_ring(rx_ring, budget);
  4273. }
  4274. static void __devexit qlge_remove(struct pci_dev *pdev)
  4275. {
  4276. struct net_device *ndev = pci_get_drvdata(pdev);
  4277. struct ql_adapter *qdev = netdev_priv(ndev);
  4278. del_timer_sync(&qdev->timer);
  4279. ql_cancel_all_work_sync(qdev);
  4280. unregister_netdev(ndev);
  4281. ql_release_all(pdev);
  4282. pci_disable_device(pdev);
  4283. free_netdev(ndev);
  4284. }
  4285. /* Clean up resources without touching hardware. */
  4286. static void ql_eeh_close(struct net_device *ndev)
  4287. {
  4288. int i;
  4289. struct ql_adapter *qdev = netdev_priv(ndev);
  4290. if (netif_carrier_ok(ndev)) {
  4291. netif_carrier_off(ndev);
  4292. netif_stop_queue(ndev);
  4293. }
  4294. /* Disabling the timer */
  4295. del_timer_sync(&qdev->timer);
  4296. ql_cancel_all_work_sync(qdev);
  4297. for (i = 0; i < qdev->rss_ring_count; i++)
  4298. netif_napi_del(&qdev->rx_ring[i].napi);
  4299. clear_bit(QL_ADAPTER_UP, &qdev->flags);
  4300. ql_tx_ring_clean(qdev);
  4301. ql_free_rx_buffers(qdev);
  4302. ql_release_adapter_resources(qdev);
  4303. }
  4304. /*
  4305. * This callback is called by the PCI subsystem whenever
  4306. * a PCI bus error is detected.
  4307. */
  4308. static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
  4309. enum pci_channel_state state)
  4310. {
  4311. struct net_device *ndev = pci_get_drvdata(pdev);
  4312. struct ql_adapter *qdev = netdev_priv(ndev);
  4313. switch (state) {
  4314. case pci_channel_io_normal:
  4315. return PCI_ERS_RESULT_CAN_RECOVER;
  4316. case pci_channel_io_frozen:
  4317. netif_device_detach(ndev);
  4318. if (netif_running(ndev))
  4319. ql_eeh_close(ndev);
  4320. pci_disable_device(pdev);
  4321. return PCI_ERS_RESULT_NEED_RESET;
  4322. case pci_channel_io_perm_failure:
  4323. dev_err(&pdev->dev,
  4324. "%s: pci_channel_io_perm_failure.\n", __func__);
  4325. ql_eeh_close(ndev);
  4326. set_bit(QL_EEH_FATAL, &qdev->flags);
  4327. return PCI_ERS_RESULT_DISCONNECT;
  4328. }
  4329. /* Request a slot reset. */
  4330. return PCI_ERS_RESULT_NEED_RESET;
  4331. }
  4332. /*
  4333. * This callback is called after the PCI buss has been reset.
  4334. * Basically, this tries to restart the card from scratch.
  4335. * This is a shortened version of the device probe/discovery code,
  4336. * it resembles the first-half of the () routine.
  4337. */
  4338. static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
  4339. {
  4340. struct net_device *ndev = pci_get_drvdata(pdev);
  4341. struct ql_adapter *qdev = netdev_priv(ndev);
  4342. pdev->error_state = pci_channel_io_normal;
  4343. pci_restore_state(pdev);
  4344. if (pci_enable_device(pdev)) {
  4345. netif_err(qdev, ifup, qdev->ndev,
  4346. "Cannot re-enable PCI device after reset.\n");
  4347. return PCI_ERS_RESULT_DISCONNECT;
  4348. }
  4349. pci_set_master(pdev);
  4350. if (ql_adapter_reset(qdev)) {
  4351. netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
  4352. set_bit(QL_EEH_FATAL, &qdev->flags);
  4353. return PCI_ERS_RESULT_DISCONNECT;
  4354. }
  4355. return PCI_ERS_RESULT_RECOVERED;
  4356. }
  4357. static void qlge_io_resume(struct pci_dev *pdev)
  4358. {
  4359. struct net_device *ndev = pci_get_drvdata(pdev);
  4360. struct ql_adapter *qdev = netdev_priv(ndev);
  4361. int err = 0;
  4362. if (netif_running(ndev)) {
  4363. err = qlge_open(ndev);
  4364. if (err) {
  4365. netif_err(qdev, ifup, qdev->ndev,
  4366. "Device initialization failed after reset.\n");
  4367. return;
  4368. }
  4369. } else {
  4370. netif_err(qdev, ifup, qdev->ndev,
  4371. "Device was not running prior to EEH.\n");
  4372. }
  4373. mod_timer(&qdev->timer, jiffies + (5*HZ));
  4374. netif_device_attach(ndev);
  4375. }
  4376. static struct pci_error_handlers qlge_err_handler = {
  4377. .error_detected = qlge_io_error_detected,
  4378. .slot_reset = qlge_io_slot_reset,
  4379. .resume = qlge_io_resume,
  4380. };
  4381. static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
  4382. {
  4383. struct net_device *ndev = pci_get_drvdata(pdev);
  4384. struct ql_adapter *qdev = netdev_priv(ndev);
  4385. int err;
  4386. netif_device_detach(ndev);
  4387. del_timer_sync(&qdev->timer);
  4388. if (netif_running(ndev)) {
  4389. err = ql_adapter_down(qdev);
  4390. if (!err)
  4391. return err;
  4392. }
  4393. ql_wol(qdev);
  4394. err = pci_save_state(pdev);
  4395. if (err)
  4396. return err;
  4397. pci_disable_device(pdev);
  4398. pci_set_power_state(pdev, pci_choose_state(pdev, state));
  4399. return 0;
  4400. }
  4401. #ifdef CONFIG_PM
  4402. static int qlge_resume(struct pci_dev *pdev)
  4403. {
  4404. struct net_device *ndev = pci_get_drvdata(pdev);
  4405. struct ql_adapter *qdev = netdev_priv(ndev);
  4406. int err;
  4407. pci_set_power_state(pdev, PCI_D0);
  4408. pci_restore_state(pdev);
  4409. err = pci_enable_device(pdev);
  4410. if (err) {
  4411. netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
  4412. return err;
  4413. }
  4414. pci_set_master(pdev);
  4415. pci_enable_wake(pdev, PCI_D3hot, 0);
  4416. pci_enable_wake(pdev, PCI_D3cold, 0);
  4417. if (netif_running(ndev)) {
  4418. err = ql_adapter_up(qdev);
  4419. if (err)
  4420. return err;
  4421. }
  4422. mod_timer(&qdev->timer, jiffies + (5*HZ));
  4423. netif_device_attach(ndev);
  4424. return 0;
  4425. }
  4426. #endif /* CONFIG_PM */
  4427. static void qlge_shutdown(struct pci_dev *pdev)
  4428. {
  4429. qlge_suspend(pdev, PMSG_SUSPEND);
  4430. }
  4431. static struct pci_driver qlge_driver = {
  4432. .name = DRV_NAME,
  4433. .id_table = qlge_pci_tbl,
  4434. .probe = qlge_probe,
  4435. .remove = __devexit_p(qlge_remove),
  4436. #ifdef CONFIG_PM
  4437. .suspend = qlge_suspend,
  4438. .resume = qlge_resume,
  4439. #endif
  4440. .shutdown = qlge_shutdown,
  4441. .err_handler = &qlge_err_handler
  4442. };
  4443. static int __init qlge_init_module(void)
  4444. {
  4445. return pci_register_driver(&qlge_driver);
  4446. }
  4447. static void __exit qlge_exit(void)
  4448. {
  4449. pci_unregister_driver(&qlge_driver);
  4450. }
  4451. module_init(qlge_init_module);
  4452. module_exit(qlge_exit);