qla3xxx.c 106 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120
  1. /*
  2. * QLogic QLA3xxx NIC HBA Driver
  3. * Copyright (c) 2003-2006 QLogic Corporation
  4. *
  5. * See LICENSE.qla3xxx for copyright and licensing details.
  6. */
  7. #include <linux/kernel.h>
  8. #include <linux/init.h>
  9. #include <linux/types.h>
  10. #include <linux/module.h>
  11. #include <linux/list.h>
  12. #include <linux/pci.h>
  13. #include <linux/dma-mapping.h>
  14. #include <linux/sched.h>
  15. #include <linux/slab.h>
  16. #include <linux/dmapool.h>
  17. #include <linux/mempool.h>
  18. #include <linux/spinlock.h>
  19. #include <linux/kthread.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/errno.h>
  22. #include <linux/ioport.h>
  23. #include <linux/ip.h>
  24. #include <linux/in.h>
  25. #include <linux/if_arp.h>
  26. #include <linux/if_ether.h>
  27. #include <linux/netdevice.h>
  28. #include <linux/etherdevice.h>
  29. #include <linux/ethtool.h>
  30. #include <linux/skbuff.h>
  31. #include <linux/rtnetlink.h>
  32. #include <linux/if_vlan.h>
  33. #include <linux/delay.h>
  34. #include <linux/mm.h>
  35. #include "qla3xxx.h"
  36. #define DRV_NAME "qla3xxx"
  37. #define DRV_STRING "QLogic ISP3XXX Network Driver"
  38. #define DRV_VERSION "v2.03.00-k5"
  39. #define PFX DRV_NAME " "
  40. static const char ql3xxx_driver_name[] = DRV_NAME;
  41. static const char ql3xxx_driver_version[] = DRV_VERSION;
  42. MODULE_AUTHOR("QLogic Corporation");
  43. MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION " ");
  44. MODULE_LICENSE("GPL");
  45. MODULE_VERSION(DRV_VERSION);
  46. static const u32 default_msg
  47. = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
  48. | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
  49. static int debug = -1; /* defaults above */
  50. module_param(debug, int, 0);
  51. MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
  52. static int msi;
  53. module_param(msi, int, 0);
  54. MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts.");
  55. static struct pci_device_id ql3xxx_pci_tbl[] __devinitdata = {
  56. {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)},
  57. {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID)},
  58. /* required last entry */
  59. {0,}
  60. };
  61. MODULE_DEVICE_TABLE(pci, ql3xxx_pci_tbl);
  62. /*
  63. * These are the known PHY's which are used
  64. */
  65. typedef enum {
  66. PHY_TYPE_UNKNOWN = 0,
  67. PHY_VITESSE_VSC8211,
  68. PHY_AGERE_ET1011C,
  69. MAX_PHY_DEV_TYPES
  70. } PHY_DEVICE_et;
  71. typedef struct {
  72. PHY_DEVICE_et phyDevice;
  73. u32 phyIdOUI;
  74. u16 phyIdModel;
  75. char *name;
  76. } PHY_DEVICE_INFO_t;
  77. static const PHY_DEVICE_INFO_t PHY_DEVICES[] =
  78. {{PHY_TYPE_UNKNOWN, 0x000000, 0x0, "PHY_TYPE_UNKNOWN"},
  79. {PHY_VITESSE_VSC8211, 0x0003f1, 0xb, "PHY_VITESSE_VSC8211"},
  80. {PHY_AGERE_ET1011C, 0x00a0bc, 0x1, "PHY_AGERE_ET1011C"},
  81. };
  82. /*
  83. * Caller must take hw_lock.
  84. */
  85. static int ql_sem_spinlock(struct ql3_adapter *qdev,
  86. u32 sem_mask, u32 sem_bits)
  87. {
  88. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  89. u32 value;
  90. unsigned int seconds = 3;
  91. do {
  92. writel((sem_mask | sem_bits),
  93. &port_regs->CommonRegs.semaphoreReg);
  94. value = readl(&port_regs->CommonRegs.semaphoreReg);
  95. if ((value & (sem_mask >> 16)) == sem_bits)
  96. return 0;
  97. ssleep(1);
  98. } while(--seconds);
  99. return -1;
  100. }
  101. static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask)
  102. {
  103. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  104. writel(sem_mask, &port_regs->CommonRegs.semaphoreReg);
  105. readl(&port_regs->CommonRegs.semaphoreReg);
  106. }
  107. static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits)
  108. {
  109. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  110. u32 value;
  111. writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg);
  112. value = readl(&port_regs->CommonRegs.semaphoreReg);
  113. return ((value & (sem_mask >> 16)) == sem_bits);
  114. }
  115. /*
  116. * Caller holds hw_lock.
  117. */
  118. static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
  119. {
  120. int i = 0;
  121. while (1) {
  122. if (!ql_sem_lock(qdev,
  123. QL_DRVR_SEM_MASK,
  124. (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
  125. * 2) << 1)) {
  126. if (i < 10) {
  127. ssleep(1);
  128. i++;
  129. } else {
  130. printk(KERN_ERR PFX "%s: Timed out waiting for "
  131. "driver lock...\n",
  132. qdev->ndev->name);
  133. return 0;
  134. }
  135. } else {
  136. printk(KERN_DEBUG PFX
  137. "%s: driver lock acquired.\n",
  138. qdev->ndev->name);
  139. return 1;
  140. }
  141. }
  142. }
  143. static void ql_set_register_page(struct ql3_adapter *qdev, u32 page)
  144. {
  145. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  146. writel(((ISP_CONTROL_NP_MASK << 16) | page),
  147. &port_regs->CommonRegs.ispControlStatus);
  148. readl(&port_regs->CommonRegs.ispControlStatus);
  149. qdev->current_page = page;
  150. }
  151. static u32 ql_read_common_reg_l(struct ql3_adapter *qdev,
  152. u32 __iomem * reg)
  153. {
  154. u32 value;
  155. unsigned long hw_flags;
  156. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  157. value = readl(reg);
  158. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  159. return value;
  160. }
  161. static u32 ql_read_common_reg(struct ql3_adapter *qdev,
  162. u32 __iomem * reg)
  163. {
  164. return readl(reg);
  165. }
  166. static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg)
  167. {
  168. u32 value;
  169. unsigned long hw_flags;
  170. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  171. if (qdev->current_page != 0)
  172. ql_set_register_page(qdev,0);
  173. value = readl(reg);
  174. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  175. return value;
  176. }
  177. static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg)
  178. {
  179. if (qdev->current_page != 0)
  180. ql_set_register_page(qdev,0);
  181. return readl(reg);
  182. }
  183. static void ql_write_common_reg_l(struct ql3_adapter *qdev,
  184. u32 __iomem *reg, u32 value)
  185. {
  186. unsigned long hw_flags;
  187. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  188. writel(value, reg);
  189. readl(reg);
  190. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  191. return;
  192. }
  193. static void ql_write_common_reg(struct ql3_adapter *qdev,
  194. u32 __iomem *reg, u32 value)
  195. {
  196. writel(value, reg);
  197. readl(reg);
  198. return;
  199. }
  200. static void ql_write_nvram_reg(struct ql3_adapter *qdev,
  201. u32 __iomem *reg, u32 value)
  202. {
  203. writel(value, reg);
  204. readl(reg);
  205. udelay(1);
  206. return;
  207. }
  208. static void ql_write_page0_reg(struct ql3_adapter *qdev,
  209. u32 __iomem *reg, u32 value)
  210. {
  211. if (qdev->current_page != 0)
  212. ql_set_register_page(qdev,0);
  213. writel(value, reg);
  214. readl(reg);
  215. return;
  216. }
  217. /*
  218. * Caller holds hw_lock. Only called during init.
  219. */
  220. static void ql_write_page1_reg(struct ql3_adapter *qdev,
  221. u32 __iomem *reg, u32 value)
  222. {
  223. if (qdev->current_page != 1)
  224. ql_set_register_page(qdev,1);
  225. writel(value, reg);
  226. readl(reg);
  227. return;
  228. }
  229. /*
  230. * Caller holds hw_lock. Only called during init.
  231. */
  232. static void ql_write_page2_reg(struct ql3_adapter *qdev,
  233. u32 __iomem *reg, u32 value)
  234. {
  235. if (qdev->current_page != 2)
  236. ql_set_register_page(qdev,2);
  237. writel(value, reg);
  238. readl(reg);
  239. return;
  240. }
  241. static void ql_disable_interrupts(struct ql3_adapter *qdev)
  242. {
  243. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  244. ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
  245. (ISP_IMR_ENABLE_INT << 16));
  246. }
  247. static void ql_enable_interrupts(struct ql3_adapter *qdev)
  248. {
  249. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  250. ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
  251. ((0xff << 16) | ISP_IMR_ENABLE_INT));
  252. }
  253. static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
  254. struct ql_rcv_buf_cb *lrg_buf_cb)
  255. {
  256. dma_addr_t map;
  257. int err;
  258. lrg_buf_cb->next = NULL;
  259. if (qdev->lrg_buf_free_tail == NULL) { /* The list is empty */
  260. qdev->lrg_buf_free_head = qdev->lrg_buf_free_tail = lrg_buf_cb;
  261. } else {
  262. qdev->lrg_buf_free_tail->next = lrg_buf_cb;
  263. qdev->lrg_buf_free_tail = lrg_buf_cb;
  264. }
  265. if (!lrg_buf_cb->skb) {
  266. lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev,
  267. qdev->lrg_buffer_len);
  268. if (unlikely(!lrg_buf_cb->skb)) {
  269. printk(KERN_ERR PFX "%s: failed netdev_alloc_skb().\n",
  270. qdev->ndev->name);
  271. qdev->lrg_buf_skb_check++;
  272. } else {
  273. /*
  274. * We save some space to copy the ethhdr from first
  275. * buffer
  276. */
  277. skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
  278. map = pci_map_single(qdev->pdev,
  279. lrg_buf_cb->skb->data,
  280. qdev->lrg_buffer_len -
  281. QL_HEADER_SPACE,
  282. PCI_DMA_FROMDEVICE);
  283. err = pci_dma_mapping_error(qdev->pdev, map);
  284. if(err) {
  285. printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
  286. qdev->ndev->name, err);
  287. dev_kfree_skb(lrg_buf_cb->skb);
  288. lrg_buf_cb->skb = NULL;
  289. qdev->lrg_buf_skb_check++;
  290. return;
  291. }
  292. lrg_buf_cb->buf_phy_addr_low =
  293. cpu_to_le32(LS_64BITS(map));
  294. lrg_buf_cb->buf_phy_addr_high =
  295. cpu_to_le32(MS_64BITS(map));
  296. pci_unmap_addr_set(lrg_buf_cb, mapaddr, map);
  297. pci_unmap_len_set(lrg_buf_cb, maplen,
  298. qdev->lrg_buffer_len -
  299. QL_HEADER_SPACE);
  300. }
  301. }
  302. qdev->lrg_buf_free_count++;
  303. }
  304. static struct ql_rcv_buf_cb *ql_get_from_lrg_buf_free_list(struct ql3_adapter
  305. *qdev)
  306. {
  307. struct ql_rcv_buf_cb *lrg_buf_cb;
  308. if ((lrg_buf_cb = qdev->lrg_buf_free_head) != NULL) {
  309. if ((qdev->lrg_buf_free_head = lrg_buf_cb->next) == NULL)
  310. qdev->lrg_buf_free_tail = NULL;
  311. qdev->lrg_buf_free_count--;
  312. }
  313. return lrg_buf_cb;
  314. }
  315. static u32 addrBits = EEPROM_NO_ADDR_BITS;
  316. static u32 dataBits = EEPROM_NO_DATA_BITS;
  317. static void fm93c56a_deselect(struct ql3_adapter *qdev);
  318. static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr,
  319. unsigned short *value);
  320. /*
  321. * Caller holds hw_lock.
  322. */
  323. static void fm93c56a_select(struct ql3_adapter *qdev)
  324. {
  325. struct ql3xxx_port_registers __iomem *port_regs =
  326. qdev->mem_map_registers;
  327. qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1;
  328. ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
  329. ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
  330. ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
  331. ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data));
  332. }
  333. /*
  334. * Caller holds hw_lock.
  335. */
  336. static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr)
  337. {
  338. int i;
  339. u32 mask;
  340. u32 dataBit;
  341. u32 previousBit;
  342. struct ql3xxx_port_registers __iomem *port_regs =
  343. qdev->mem_map_registers;
  344. /* Clock in a zero, then do the start bit */
  345. ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
  346. ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
  347. AUBURN_EEPROM_DO_1);
  348. ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
  349. ISP_NVRAM_MASK | qdev->
  350. eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
  351. AUBURN_EEPROM_CLK_RISE);
  352. ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
  353. ISP_NVRAM_MASK | qdev->
  354. eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
  355. AUBURN_EEPROM_CLK_FALL);
  356. mask = 1 << (FM93C56A_CMD_BITS - 1);
  357. /* Force the previous data bit to be different */
  358. previousBit = 0xffff;
  359. for (i = 0; i < FM93C56A_CMD_BITS; i++) {
  360. dataBit =
  361. (cmd & mask) ? AUBURN_EEPROM_DO_1 : AUBURN_EEPROM_DO_0;
  362. if (previousBit != dataBit) {
  363. /*
  364. * If the bit changed, then change the DO state to
  365. * match
  366. */
  367. ql_write_nvram_reg(qdev,
  368. &port_regs->CommonRegs.
  369. serialPortInterfaceReg,
  370. ISP_NVRAM_MASK | qdev->
  371. eeprom_cmd_data | dataBit);
  372. previousBit = dataBit;
  373. }
  374. ql_write_nvram_reg(qdev,
  375. &port_regs->CommonRegs.
  376. serialPortInterfaceReg,
  377. ISP_NVRAM_MASK | qdev->
  378. eeprom_cmd_data | dataBit |
  379. AUBURN_EEPROM_CLK_RISE);
  380. ql_write_nvram_reg(qdev,
  381. &port_regs->CommonRegs.
  382. serialPortInterfaceReg,
  383. ISP_NVRAM_MASK | qdev->
  384. eeprom_cmd_data | dataBit |
  385. AUBURN_EEPROM_CLK_FALL);
  386. cmd = cmd << 1;
  387. }
  388. mask = 1 << (addrBits - 1);
  389. /* Force the previous data bit to be different */
  390. previousBit = 0xffff;
  391. for (i = 0; i < addrBits; i++) {
  392. dataBit =
  393. (eepromAddr & mask) ? AUBURN_EEPROM_DO_1 :
  394. AUBURN_EEPROM_DO_0;
  395. if (previousBit != dataBit) {
  396. /*
  397. * If the bit changed, then change the DO state to
  398. * match
  399. */
  400. ql_write_nvram_reg(qdev,
  401. &port_regs->CommonRegs.
  402. serialPortInterfaceReg,
  403. ISP_NVRAM_MASK | qdev->
  404. eeprom_cmd_data | dataBit);
  405. previousBit = dataBit;
  406. }
  407. ql_write_nvram_reg(qdev,
  408. &port_regs->CommonRegs.
  409. serialPortInterfaceReg,
  410. ISP_NVRAM_MASK | qdev->
  411. eeprom_cmd_data | dataBit |
  412. AUBURN_EEPROM_CLK_RISE);
  413. ql_write_nvram_reg(qdev,
  414. &port_regs->CommonRegs.
  415. serialPortInterfaceReg,
  416. ISP_NVRAM_MASK | qdev->
  417. eeprom_cmd_data | dataBit |
  418. AUBURN_EEPROM_CLK_FALL);
  419. eepromAddr = eepromAddr << 1;
  420. }
  421. }
  422. /*
  423. * Caller holds hw_lock.
  424. */
  425. static void fm93c56a_deselect(struct ql3_adapter *qdev)
  426. {
  427. struct ql3xxx_port_registers __iomem *port_regs =
  428. qdev->mem_map_registers;
  429. qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0;
  430. ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
  431. ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
  432. }
  433. /*
  434. * Caller holds hw_lock.
  435. */
  436. static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value)
  437. {
  438. int i;
  439. u32 data = 0;
  440. u32 dataBit;
  441. struct ql3xxx_port_registers __iomem *port_regs =
  442. qdev->mem_map_registers;
  443. /* Read the data bits */
  444. /* The first bit is a dummy. Clock right over it. */
  445. for (i = 0; i < dataBits; i++) {
  446. ql_write_nvram_reg(qdev,
  447. &port_regs->CommonRegs.
  448. serialPortInterfaceReg,
  449. ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
  450. AUBURN_EEPROM_CLK_RISE);
  451. ql_write_nvram_reg(qdev,
  452. &port_regs->CommonRegs.
  453. serialPortInterfaceReg,
  454. ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
  455. AUBURN_EEPROM_CLK_FALL);
  456. dataBit =
  457. (ql_read_common_reg
  458. (qdev,
  459. &port_regs->CommonRegs.
  460. serialPortInterfaceReg) & AUBURN_EEPROM_DI_1) ? 1 : 0;
  461. data = (data << 1) | dataBit;
  462. }
  463. *value = (u16) data;
  464. }
  465. /*
  466. * Caller holds hw_lock.
  467. */
  468. static void eeprom_readword(struct ql3_adapter *qdev,
  469. u32 eepromAddr, unsigned short *value)
  470. {
  471. fm93c56a_select(qdev);
  472. fm93c56a_cmd(qdev, (int)FM93C56A_READ, eepromAddr);
  473. fm93c56a_datain(qdev, value);
  474. fm93c56a_deselect(qdev);
  475. }
  476. static void ql_set_mac_addr(struct net_device *ndev, u16 *addr)
  477. {
  478. __le16 *p = (__le16 *)ndev->dev_addr;
  479. p[0] = cpu_to_le16(addr[0]);
  480. p[1] = cpu_to_le16(addr[1]);
  481. p[2] = cpu_to_le16(addr[2]);
  482. }
  483. static int ql_get_nvram_params(struct ql3_adapter *qdev)
  484. {
  485. u16 *pEEPROMData;
  486. u16 checksum = 0;
  487. u32 index;
  488. unsigned long hw_flags;
  489. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  490. pEEPROMData = (u16 *) & qdev->nvram_data;
  491. qdev->eeprom_cmd_data = 0;
  492. if(ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK,
  493. (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
  494. 2) << 10)) {
  495. printk(KERN_ERR PFX"%s: Failed ql_sem_spinlock().\n",
  496. __func__);
  497. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  498. return -1;
  499. }
  500. for (index = 0; index < EEPROM_SIZE; index++) {
  501. eeprom_readword(qdev, index, pEEPROMData);
  502. checksum += *pEEPROMData;
  503. pEEPROMData++;
  504. }
  505. ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK);
  506. if (checksum != 0) {
  507. printk(KERN_ERR PFX "%s: checksum should be zero, is %x!!\n",
  508. qdev->ndev->name, checksum);
  509. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  510. return -1;
  511. }
  512. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  513. return checksum;
  514. }
  515. static const u32 PHYAddr[2] = {
  516. PORT0_PHY_ADDRESS, PORT1_PHY_ADDRESS
  517. };
  518. static int ql_wait_for_mii_ready(struct ql3_adapter *qdev)
  519. {
  520. struct ql3xxx_port_registers __iomem *port_regs =
  521. qdev->mem_map_registers;
  522. u32 temp;
  523. int count = 1000;
  524. while (count) {
  525. temp = ql_read_page0_reg(qdev, &port_regs->macMIIStatusReg);
  526. if (!(temp & MAC_MII_STATUS_BSY))
  527. return 0;
  528. udelay(10);
  529. count--;
  530. }
  531. return -1;
  532. }
  533. static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev)
  534. {
  535. struct ql3xxx_port_registers __iomem *port_regs =
  536. qdev->mem_map_registers;
  537. u32 scanControl;
  538. if (qdev->numPorts > 1) {
  539. /* Auto scan will cycle through multiple ports */
  540. scanControl = MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC;
  541. } else {
  542. scanControl = MAC_MII_CONTROL_SC;
  543. }
  544. /*
  545. * Scan register 1 of PHY/PETBI,
  546. * Set up to scan both devices
  547. * The autoscan starts from the first register, completes
  548. * the last one before rolling over to the first
  549. */
  550. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
  551. PHYAddr[0] | MII_SCAN_REGISTER);
  552. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
  553. (scanControl) |
  554. ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS) << 16));
  555. }
  556. static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev)
  557. {
  558. u8 ret;
  559. struct ql3xxx_port_registers __iomem *port_regs =
  560. qdev->mem_map_registers;
  561. /* See if scan mode is enabled before we turn it off */
  562. if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) &
  563. (MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC)) {
  564. /* Scan is enabled */
  565. ret = 1;
  566. } else {
  567. /* Scan is disabled */
  568. ret = 0;
  569. }
  570. /*
  571. * When disabling scan mode you must first change the MII register
  572. * address
  573. */
  574. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
  575. PHYAddr[0] | MII_SCAN_REGISTER);
  576. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
  577. ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS |
  578. MAC_MII_CONTROL_RC) << 16));
  579. return ret;
  580. }
  581. static int ql_mii_write_reg_ex(struct ql3_adapter *qdev,
  582. u16 regAddr, u16 value, u32 phyAddr)
  583. {
  584. struct ql3xxx_port_registers __iomem *port_regs =
  585. qdev->mem_map_registers;
  586. u8 scanWasEnabled;
  587. scanWasEnabled = ql_mii_disable_scan_mode(qdev);
  588. if (ql_wait_for_mii_ready(qdev)) {
  589. if (netif_msg_link(qdev))
  590. printk(KERN_WARNING PFX
  591. "%s Timed out waiting for management port to "
  592. "get free before issuing command.\n",
  593. qdev->ndev->name);
  594. return -1;
  595. }
  596. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
  597. phyAddr | regAddr);
  598. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
  599. /* Wait for write to complete 9/10/04 SJP */
  600. if (ql_wait_for_mii_ready(qdev)) {
  601. if (netif_msg_link(qdev))
  602. printk(KERN_WARNING PFX
  603. "%s: Timed out waiting for management port to "
  604. "get free before issuing command.\n",
  605. qdev->ndev->name);
  606. return -1;
  607. }
  608. if (scanWasEnabled)
  609. ql_mii_enable_scan_mode(qdev);
  610. return 0;
  611. }
  612. static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr,
  613. u16 * value, u32 phyAddr)
  614. {
  615. struct ql3xxx_port_registers __iomem *port_regs =
  616. qdev->mem_map_registers;
  617. u8 scanWasEnabled;
  618. u32 temp;
  619. scanWasEnabled = ql_mii_disable_scan_mode(qdev);
  620. if (ql_wait_for_mii_ready(qdev)) {
  621. if (netif_msg_link(qdev))
  622. printk(KERN_WARNING PFX
  623. "%s: Timed out waiting for management port to "
  624. "get free before issuing command.\n",
  625. qdev->ndev->name);
  626. return -1;
  627. }
  628. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
  629. phyAddr | regAddr);
  630. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
  631. (MAC_MII_CONTROL_RC << 16));
  632. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
  633. (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC);
  634. /* Wait for the read to complete */
  635. if (ql_wait_for_mii_ready(qdev)) {
  636. if (netif_msg_link(qdev))
  637. printk(KERN_WARNING PFX
  638. "%s: Timed out waiting for management port to "
  639. "get free after issuing command.\n",
  640. qdev->ndev->name);
  641. return -1;
  642. }
  643. temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
  644. *value = (u16) temp;
  645. if (scanWasEnabled)
  646. ql_mii_enable_scan_mode(qdev);
  647. return 0;
  648. }
  649. static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value)
  650. {
  651. struct ql3xxx_port_registers __iomem *port_regs =
  652. qdev->mem_map_registers;
  653. ql_mii_disable_scan_mode(qdev);
  654. if (ql_wait_for_mii_ready(qdev)) {
  655. if (netif_msg_link(qdev))
  656. printk(KERN_WARNING PFX
  657. "%s: Timed out waiting for management port to "
  658. "get free before issuing command.\n",
  659. qdev->ndev->name);
  660. return -1;
  661. }
  662. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
  663. qdev->PHYAddr | regAddr);
  664. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
  665. /* Wait for write to complete. */
  666. if (ql_wait_for_mii_ready(qdev)) {
  667. if (netif_msg_link(qdev))
  668. printk(KERN_WARNING PFX
  669. "%s: Timed out waiting for management port to "
  670. "get free before issuing command.\n",
  671. qdev->ndev->name);
  672. return -1;
  673. }
  674. ql_mii_enable_scan_mode(qdev);
  675. return 0;
  676. }
  677. static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value)
  678. {
  679. u32 temp;
  680. struct ql3xxx_port_registers __iomem *port_regs =
  681. qdev->mem_map_registers;
  682. ql_mii_disable_scan_mode(qdev);
  683. if (ql_wait_for_mii_ready(qdev)) {
  684. if (netif_msg_link(qdev))
  685. printk(KERN_WARNING PFX
  686. "%s: Timed out waiting for management port to "
  687. "get free before issuing command.\n",
  688. qdev->ndev->name);
  689. return -1;
  690. }
  691. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
  692. qdev->PHYAddr | regAddr);
  693. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
  694. (MAC_MII_CONTROL_RC << 16));
  695. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
  696. (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC);
  697. /* Wait for the read to complete */
  698. if (ql_wait_for_mii_ready(qdev)) {
  699. if (netif_msg_link(qdev))
  700. printk(KERN_WARNING PFX
  701. "%s: Timed out waiting for management port to "
  702. "get free before issuing command.\n",
  703. qdev->ndev->name);
  704. return -1;
  705. }
  706. temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
  707. *value = (u16) temp;
  708. ql_mii_enable_scan_mode(qdev);
  709. return 0;
  710. }
  711. static void ql_petbi_reset(struct ql3_adapter *qdev)
  712. {
  713. ql_mii_write_reg(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET);
  714. }
  715. static void ql_petbi_start_neg(struct ql3_adapter *qdev)
  716. {
  717. u16 reg;
  718. /* Enable Auto-negotiation sense */
  719. ql_mii_read_reg(qdev, PETBI_TBI_CTRL, &reg);
  720. reg |= PETBI_TBI_AUTO_SENSE;
  721. ql_mii_write_reg(qdev, PETBI_TBI_CTRL, reg);
  722. ql_mii_write_reg(qdev, PETBI_NEG_ADVER,
  723. PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX);
  724. ql_mii_write_reg(qdev, PETBI_CONTROL_REG,
  725. PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG |
  726. PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000);
  727. }
  728. static void ql_petbi_reset_ex(struct ql3_adapter *qdev)
  729. {
  730. ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET,
  731. PHYAddr[qdev->mac_index]);
  732. }
  733. static void ql_petbi_start_neg_ex(struct ql3_adapter *qdev)
  734. {
  735. u16 reg;
  736. /* Enable Auto-negotiation sense */
  737. ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, &reg,
  738. PHYAddr[qdev->mac_index]);
  739. reg |= PETBI_TBI_AUTO_SENSE;
  740. ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg,
  741. PHYAddr[qdev->mac_index]);
  742. ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER,
  743. PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX,
  744. PHYAddr[qdev->mac_index]);
  745. ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG,
  746. PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG |
  747. PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000,
  748. PHYAddr[qdev->mac_index]);
  749. }
  750. static void ql_petbi_init(struct ql3_adapter *qdev)
  751. {
  752. ql_petbi_reset(qdev);
  753. ql_petbi_start_neg(qdev);
  754. }
  755. static void ql_petbi_init_ex(struct ql3_adapter *qdev)
  756. {
  757. ql_petbi_reset_ex(qdev);
  758. ql_petbi_start_neg_ex(qdev);
  759. }
  760. static int ql_is_petbi_neg_pause(struct ql3_adapter *qdev)
  761. {
  762. u16 reg;
  763. if (ql_mii_read_reg(qdev, PETBI_NEG_PARTNER, &reg) < 0)
  764. return 0;
  765. return (reg & PETBI_NEG_PAUSE_MASK) == PETBI_NEG_PAUSE;
  766. }
  767. static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr)
  768. {
  769. printk(KERN_INFO "%s: enabling Agere specific PHY\n", qdev->ndev->name);
  770. /* power down device bit 11 = 1 */
  771. ql_mii_write_reg_ex(qdev, 0x00, 0x1940, miiAddr);
  772. /* enable diagnostic mode bit 2 = 1 */
  773. ql_mii_write_reg_ex(qdev, 0x12, 0x840e, miiAddr);
  774. /* 1000MB amplitude adjust (see Agere errata) */
  775. ql_mii_write_reg_ex(qdev, 0x10, 0x8805, miiAddr);
  776. /* 1000MB amplitude adjust (see Agere errata) */
  777. ql_mii_write_reg_ex(qdev, 0x11, 0xf03e, miiAddr);
  778. /* 100MB amplitude adjust (see Agere errata) */
  779. ql_mii_write_reg_ex(qdev, 0x10, 0x8806, miiAddr);
  780. /* 100MB amplitude adjust (see Agere errata) */
  781. ql_mii_write_reg_ex(qdev, 0x11, 0x003e, miiAddr);
  782. /* 10MB amplitude adjust (see Agere errata) */
  783. ql_mii_write_reg_ex(qdev, 0x10, 0x8807, miiAddr);
  784. /* 10MB amplitude adjust (see Agere errata) */
  785. ql_mii_write_reg_ex(qdev, 0x11, 0x1f00, miiAddr);
  786. /* point to hidden reg 0x2806 */
  787. ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr);
  788. /* Write new PHYAD w/bit 5 set */
  789. ql_mii_write_reg_ex(qdev, 0x11, 0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr);
  790. /*
  791. * Disable diagnostic mode bit 2 = 0
  792. * Power up device bit 11 = 0
  793. * Link up (on) and activity (blink)
  794. */
  795. ql_mii_write_reg(qdev, 0x12, 0x840a);
  796. ql_mii_write_reg(qdev, 0x00, 0x1140);
  797. ql_mii_write_reg(qdev, 0x1c, 0xfaf0);
  798. }
  799. static PHY_DEVICE_et getPhyType (struct ql3_adapter *qdev,
  800. u16 phyIdReg0, u16 phyIdReg1)
  801. {
  802. PHY_DEVICE_et result = PHY_TYPE_UNKNOWN;
  803. u32 oui;
  804. u16 model;
  805. int i;
  806. if (phyIdReg0 == 0xffff) {
  807. return result;
  808. }
  809. if (phyIdReg1 == 0xffff) {
  810. return result;
  811. }
  812. /* oui is split between two registers */
  813. oui = (phyIdReg0 << 6) | ((phyIdReg1 & PHY_OUI_1_MASK) >> 10);
  814. model = (phyIdReg1 & PHY_MODEL_MASK) >> 4;
  815. /* Scan table for this PHY */
  816. for(i = 0; i < MAX_PHY_DEV_TYPES; i++) {
  817. if ((oui == PHY_DEVICES[i].phyIdOUI) && (model == PHY_DEVICES[i].phyIdModel))
  818. {
  819. result = PHY_DEVICES[i].phyDevice;
  820. printk(KERN_INFO "%s: Phy: %s\n",
  821. qdev->ndev->name, PHY_DEVICES[i].name);
  822. break;
  823. }
  824. }
  825. return result;
  826. }
  827. static int ql_phy_get_speed(struct ql3_adapter *qdev)
  828. {
  829. u16 reg;
  830. switch(qdev->phyType) {
  831. case PHY_AGERE_ET1011C:
  832. {
  833. if (ql_mii_read_reg(qdev, 0x1A, &reg) < 0)
  834. return 0;
  835. reg = (reg >> 8) & 3;
  836. break;
  837. }
  838. default:
  839. if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
  840. return 0;
  841. reg = (((reg & 0x18) >> 3) & 3);
  842. }
  843. switch(reg) {
  844. case 2:
  845. return SPEED_1000;
  846. case 1:
  847. return SPEED_100;
  848. case 0:
  849. return SPEED_10;
  850. default:
  851. return -1;
  852. }
  853. }
  854. static int ql_is_full_dup(struct ql3_adapter *qdev)
  855. {
  856. u16 reg;
  857. switch(qdev->phyType) {
  858. case PHY_AGERE_ET1011C:
  859. {
  860. if (ql_mii_read_reg(qdev, 0x1A, &reg))
  861. return 0;
  862. return ((reg & 0x0080) && (reg & 0x1000)) != 0;
  863. }
  864. case PHY_VITESSE_VSC8211:
  865. default:
  866. {
  867. if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
  868. return 0;
  869. return (reg & PHY_AUX_DUPLEX_STAT) != 0;
  870. }
  871. }
  872. }
  873. static int ql_is_phy_neg_pause(struct ql3_adapter *qdev)
  874. {
  875. u16 reg;
  876. if (ql_mii_read_reg(qdev, PHY_NEG_PARTNER, &reg) < 0)
  877. return 0;
  878. return (reg & PHY_NEG_PAUSE) != 0;
  879. }
  880. static int PHY_Setup(struct ql3_adapter *qdev)
  881. {
  882. u16 reg1;
  883. u16 reg2;
  884. bool agereAddrChangeNeeded = false;
  885. u32 miiAddr = 0;
  886. int err;
  887. /* Determine the PHY we are using by reading the ID's */
  888. err = ql_mii_read_reg(qdev, PHY_ID_0_REG, &reg1);
  889. if(err != 0) {
  890. printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG\n",
  891. qdev->ndev->name);
  892. return err;
  893. }
  894. err = ql_mii_read_reg(qdev, PHY_ID_1_REG, &reg2);
  895. if(err != 0) {
  896. printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG\n",
  897. qdev->ndev->name);
  898. return err;
  899. }
  900. /* Check if we have a Agere PHY */
  901. if ((reg1 == 0xffff) || (reg2 == 0xffff)) {
  902. /* Determine which MII address we should be using
  903. determined by the index of the card */
  904. if (qdev->mac_index == 0) {
  905. miiAddr = MII_AGERE_ADDR_1;
  906. } else {
  907. miiAddr = MII_AGERE_ADDR_2;
  908. }
  909. err =ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, &reg1, miiAddr);
  910. if(err != 0) {
  911. printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG after Agere detected\n",
  912. qdev->ndev->name);
  913. return err;
  914. }
  915. err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, &reg2, miiAddr);
  916. if(err != 0) {
  917. printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG after Agere detected\n",
  918. qdev->ndev->name);
  919. return err;
  920. }
  921. /* We need to remember to initialize the Agere PHY */
  922. agereAddrChangeNeeded = true;
  923. }
  924. /* Determine the particular PHY we have on board to apply
  925. PHY specific initializations */
  926. qdev->phyType = getPhyType(qdev, reg1, reg2);
  927. if ((qdev->phyType == PHY_AGERE_ET1011C) && agereAddrChangeNeeded) {
  928. /* need this here so address gets changed */
  929. phyAgereSpecificInit(qdev, miiAddr);
  930. } else if (qdev->phyType == PHY_TYPE_UNKNOWN) {
  931. printk(KERN_ERR "%s: PHY is unknown\n", qdev->ndev->name);
  932. return -EIO;
  933. }
  934. return 0;
  935. }
  936. /*
  937. * Caller holds hw_lock.
  938. */
  939. static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable)
  940. {
  941. struct ql3xxx_port_registers __iomem *port_regs =
  942. qdev->mem_map_registers;
  943. u32 value;
  944. if (enable)
  945. value = (MAC_CONFIG_REG_PE | (MAC_CONFIG_REG_PE << 16));
  946. else
  947. value = (MAC_CONFIG_REG_PE << 16);
  948. if (qdev->mac_index)
  949. ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
  950. else
  951. ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
  952. }
  953. /*
  954. * Caller holds hw_lock.
  955. */
  956. static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable)
  957. {
  958. struct ql3xxx_port_registers __iomem *port_regs =
  959. qdev->mem_map_registers;
  960. u32 value;
  961. if (enable)
  962. value = (MAC_CONFIG_REG_SR | (MAC_CONFIG_REG_SR << 16));
  963. else
  964. value = (MAC_CONFIG_REG_SR << 16);
  965. if (qdev->mac_index)
  966. ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
  967. else
  968. ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
  969. }
  970. /*
  971. * Caller holds hw_lock.
  972. */
  973. static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable)
  974. {
  975. struct ql3xxx_port_registers __iomem *port_regs =
  976. qdev->mem_map_registers;
  977. u32 value;
  978. if (enable)
  979. value = (MAC_CONFIG_REG_GM | (MAC_CONFIG_REG_GM << 16));
  980. else
  981. value = (MAC_CONFIG_REG_GM << 16);
  982. if (qdev->mac_index)
  983. ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
  984. else
  985. ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
  986. }
  987. /*
  988. * Caller holds hw_lock.
  989. */
  990. static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable)
  991. {
  992. struct ql3xxx_port_registers __iomem *port_regs =
  993. qdev->mem_map_registers;
  994. u32 value;
  995. if (enable)
  996. value = (MAC_CONFIG_REG_FD | (MAC_CONFIG_REG_FD << 16));
  997. else
  998. value = (MAC_CONFIG_REG_FD << 16);
  999. if (qdev->mac_index)
  1000. ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
  1001. else
  1002. ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
  1003. }
  1004. /*
  1005. * Caller holds hw_lock.
  1006. */
  1007. static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable)
  1008. {
  1009. struct ql3xxx_port_registers __iomem *port_regs =
  1010. qdev->mem_map_registers;
  1011. u32 value;
  1012. if (enable)
  1013. value =
  1014. ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) |
  1015. ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16));
  1016. else
  1017. value = ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16);
  1018. if (qdev->mac_index)
  1019. ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
  1020. else
  1021. ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
  1022. }
  1023. /*
  1024. * Caller holds hw_lock.
  1025. */
  1026. static int ql_is_fiber(struct ql3_adapter *qdev)
  1027. {
  1028. struct ql3xxx_port_registers __iomem *port_regs =
  1029. qdev->mem_map_registers;
  1030. u32 bitToCheck = 0;
  1031. u32 temp;
  1032. switch (qdev->mac_index) {
  1033. case 0:
  1034. bitToCheck = PORT_STATUS_SM0;
  1035. break;
  1036. case 1:
  1037. bitToCheck = PORT_STATUS_SM1;
  1038. break;
  1039. }
  1040. temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
  1041. return (temp & bitToCheck) != 0;
  1042. }
  1043. static int ql_is_auto_cfg(struct ql3_adapter *qdev)
  1044. {
  1045. u16 reg;
  1046. ql_mii_read_reg(qdev, 0x00, &reg);
  1047. return (reg & 0x1000) != 0;
  1048. }
  1049. /*
  1050. * Caller holds hw_lock.
  1051. */
  1052. static int ql_is_auto_neg_complete(struct ql3_adapter *qdev)
  1053. {
  1054. struct ql3xxx_port_registers __iomem *port_regs =
  1055. qdev->mem_map_registers;
  1056. u32 bitToCheck = 0;
  1057. u32 temp;
  1058. switch (qdev->mac_index) {
  1059. case 0:
  1060. bitToCheck = PORT_STATUS_AC0;
  1061. break;
  1062. case 1:
  1063. bitToCheck = PORT_STATUS_AC1;
  1064. break;
  1065. }
  1066. temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
  1067. if (temp & bitToCheck) {
  1068. if (netif_msg_link(qdev))
  1069. printk(KERN_INFO PFX
  1070. "%s: Auto-Negotiate complete.\n",
  1071. qdev->ndev->name);
  1072. return 1;
  1073. } else {
  1074. if (netif_msg_link(qdev))
  1075. printk(KERN_WARNING PFX
  1076. "%s: Auto-Negotiate incomplete.\n",
  1077. qdev->ndev->name);
  1078. return 0;
  1079. }
  1080. }
  1081. /*
  1082. * ql_is_neg_pause() returns 1 if pause was negotiated to be on
  1083. */
  1084. static int ql_is_neg_pause(struct ql3_adapter *qdev)
  1085. {
  1086. if (ql_is_fiber(qdev))
  1087. return ql_is_petbi_neg_pause(qdev);
  1088. else
  1089. return ql_is_phy_neg_pause(qdev);
  1090. }
  1091. static int ql_auto_neg_error(struct ql3_adapter *qdev)
  1092. {
  1093. struct ql3xxx_port_registers __iomem *port_regs =
  1094. qdev->mem_map_registers;
  1095. u32 bitToCheck = 0;
  1096. u32 temp;
  1097. switch (qdev->mac_index) {
  1098. case 0:
  1099. bitToCheck = PORT_STATUS_AE0;
  1100. break;
  1101. case 1:
  1102. bitToCheck = PORT_STATUS_AE1;
  1103. break;
  1104. }
  1105. temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
  1106. return (temp & bitToCheck) != 0;
  1107. }
  1108. static u32 ql_get_link_speed(struct ql3_adapter *qdev)
  1109. {
  1110. if (ql_is_fiber(qdev))
  1111. return SPEED_1000;
  1112. else
  1113. return ql_phy_get_speed(qdev);
  1114. }
  1115. static int ql_is_link_full_dup(struct ql3_adapter *qdev)
  1116. {
  1117. if (ql_is_fiber(qdev))
  1118. return 1;
  1119. else
  1120. return ql_is_full_dup(qdev);
  1121. }
  1122. /*
  1123. * Caller holds hw_lock.
  1124. */
  1125. static int ql_link_down_detect(struct ql3_adapter *qdev)
  1126. {
  1127. struct ql3xxx_port_registers __iomem *port_regs =
  1128. qdev->mem_map_registers;
  1129. u32 bitToCheck = 0;
  1130. u32 temp;
  1131. switch (qdev->mac_index) {
  1132. case 0:
  1133. bitToCheck = ISP_CONTROL_LINK_DN_0;
  1134. break;
  1135. case 1:
  1136. bitToCheck = ISP_CONTROL_LINK_DN_1;
  1137. break;
  1138. }
  1139. temp =
  1140. ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
  1141. return (temp & bitToCheck) != 0;
  1142. }
  1143. /*
  1144. * Caller holds hw_lock.
  1145. */
  1146. static int ql_link_down_detect_clear(struct ql3_adapter *qdev)
  1147. {
  1148. struct ql3xxx_port_registers __iomem *port_regs =
  1149. qdev->mem_map_registers;
  1150. switch (qdev->mac_index) {
  1151. case 0:
  1152. ql_write_common_reg(qdev,
  1153. &port_regs->CommonRegs.ispControlStatus,
  1154. (ISP_CONTROL_LINK_DN_0) |
  1155. (ISP_CONTROL_LINK_DN_0 << 16));
  1156. break;
  1157. case 1:
  1158. ql_write_common_reg(qdev,
  1159. &port_regs->CommonRegs.ispControlStatus,
  1160. (ISP_CONTROL_LINK_DN_1) |
  1161. (ISP_CONTROL_LINK_DN_1 << 16));
  1162. break;
  1163. default:
  1164. return 1;
  1165. }
  1166. return 0;
  1167. }
  1168. /*
  1169. * Caller holds hw_lock.
  1170. */
  1171. static int ql_this_adapter_controls_port(struct ql3_adapter *qdev)
  1172. {
  1173. struct ql3xxx_port_registers __iomem *port_regs =
  1174. qdev->mem_map_registers;
  1175. u32 bitToCheck = 0;
  1176. u32 temp;
  1177. switch (qdev->mac_index) {
  1178. case 0:
  1179. bitToCheck = PORT_STATUS_F1_ENABLED;
  1180. break;
  1181. case 1:
  1182. bitToCheck = PORT_STATUS_F3_ENABLED;
  1183. break;
  1184. default:
  1185. break;
  1186. }
  1187. temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
  1188. if (temp & bitToCheck) {
  1189. if (netif_msg_link(qdev))
  1190. printk(KERN_DEBUG PFX
  1191. "%s: is not link master.\n", qdev->ndev->name);
  1192. return 0;
  1193. } else {
  1194. if (netif_msg_link(qdev))
  1195. printk(KERN_DEBUG PFX
  1196. "%s: is link master.\n", qdev->ndev->name);
  1197. return 1;
  1198. }
  1199. }
  1200. static void ql_phy_reset_ex(struct ql3_adapter *qdev)
  1201. {
  1202. ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET,
  1203. PHYAddr[qdev->mac_index]);
  1204. }
  1205. static void ql_phy_start_neg_ex(struct ql3_adapter *qdev)
  1206. {
  1207. u16 reg;
  1208. u16 portConfiguration;
  1209. if(qdev->phyType == PHY_AGERE_ET1011C) {
  1210. /* turn off external loopback */
  1211. ql_mii_write_reg(qdev, 0x13, 0x0000);
  1212. }
  1213. if(qdev->mac_index == 0)
  1214. portConfiguration = qdev->nvram_data.macCfg_port0.portConfiguration;
  1215. else
  1216. portConfiguration = qdev->nvram_data.macCfg_port1.portConfiguration;
  1217. /* Some HBA's in the field are set to 0 and they need to
  1218. be reinterpreted with a default value */
  1219. if(portConfiguration == 0)
  1220. portConfiguration = PORT_CONFIG_DEFAULT;
  1221. /* Set the 1000 advertisements */
  1222. ql_mii_read_reg_ex(qdev, PHY_GIG_CONTROL, &reg,
  1223. PHYAddr[qdev->mac_index]);
  1224. reg &= ~PHY_GIG_ALL_PARAMS;
  1225. if(portConfiguration & PORT_CONFIG_1000MB_SPEED) {
  1226. if(portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED)
  1227. reg |= PHY_GIG_ADV_1000F;
  1228. else
  1229. reg |= PHY_GIG_ADV_1000H;
  1230. }
  1231. ql_mii_write_reg_ex(qdev, PHY_GIG_CONTROL, reg,
  1232. PHYAddr[qdev->mac_index]);
  1233. /* Set the 10/100 & pause negotiation advertisements */
  1234. ql_mii_read_reg_ex(qdev, PHY_NEG_ADVER, &reg,
  1235. PHYAddr[qdev->mac_index]);
  1236. reg &= ~PHY_NEG_ALL_PARAMS;
  1237. if(portConfiguration & PORT_CONFIG_SYM_PAUSE_ENABLED)
  1238. reg |= PHY_NEG_ASY_PAUSE | PHY_NEG_SYM_PAUSE;
  1239. if(portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) {
  1240. if(portConfiguration & PORT_CONFIG_100MB_SPEED)
  1241. reg |= PHY_NEG_ADV_100F;
  1242. if(portConfiguration & PORT_CONFIG_10MB_SPEED)
  1243. reg |= PHY_NEG_ADV_10F;
  1244. }
  1245. if(portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED) {
  1246. if(portConfiguration & PORT_CONFIG_100MB_SPEED)
  1247. reg |= PHY_NEG_ADV_100H;
  1248. if(portConfiguration & PORT_CONFIG_10MB_SPEED)
  1249. reg |= PHY_NEG_ADV_10H;
  1250. }
  1251. if(portConfiguration &
  1252. PORT_CONFIG_1000MB_SPEED) {
  1253. reg |= 1;
  1254. }
  1255. ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg,
  1256. PHYAddr[qdev->mac_index]);
  1257. ql_mii_read_reg_ex(qdev, CONTROL_REG, &reg, PHYAddr[qdev->mac_index]);
  1258. ql_mii_write_reg_ex(qdev, CONTROL_REG,
  1259. reg | PHY_CTRL_RESTART_NEG | PHY_CTRL_AUTO_NEG,
  1260. PHYAddr[qdev->mac_index]);
  1261. }
  1262. static void ql_phy_init_ex(struct ql3_adapter *qdev)
  1263. {
  1264. ql_phy_reset_ex(qdev);
  1265. PHY_Setup(qdev);
  1266. ql_phy_start_neg_ex(qdev);
  1267. }
  1268. /*
  1269. * Caller holds hw_lock.
  1270. */
  1271. static u32 ql_get_link_state(struct ql3_adapter *qdev)
  1272. {
  1273. struct ql3xxx_port_registers __iomem *port_regs =
  1274. qdev->mem_map_registers;
  1275. u32 bitToCheck = 0;
  1276. u32 temp, linkState;
  1277. switch (qdev->mac_index) {
  1278. case 0:
  1279. bitToCheck = PORT_STATUS_UP0;
  1280. break;
  1281. case 1:
  1282. bitToCheck = PORT_STATUS_UP1;
  1283. break;
  1284. }
  1285. temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
  1286. if (temp & bitToCheck) {
  1287. linkState = LS_UP;
  1288. } else {
  1289. linkState = LS_DOWN;
  1290. }
  1291. return linkState;
  1292. }
  1293. static int ql_port_start(struct ql3_adapter *qdev)
  1294. {
  1295. if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
  1296. (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
  1297. 2) << 7)) {
  1298. printk(KERN_ERR "%s: Could not get hw lock for GIO\n",
  1299. qdev->ndev->name);
  1300. return -1;
  1301. }
  1302. if (ql_is_fiber(qdev)) {
  1303. ql_petbi_init(qdev);
  1304. } else {
  1305. /* Copper port */
  1306. ql_phy_init_ex(qdev);
  1307. }
  1308. ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
  1309. return 0;
  1310. }
  1311. static int ql_finish_auto_neg(struct ql3_adapter *qdev)
  1312. {
  1313. if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
  1314. (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
  1315. 2) << 7))
  1316. return -1;
  1317. if (!ql_auto_neg_error(qdev)) {
  1318. if (test_bit(QL_LINK_MASTER,&qdev->flags)) {
  1319. /* configure the MAC */
  1320. if (netif_msg_link(qdev))
  1321. printk(KERN_DEBUG PFX
  1322. "%s: Configuring link.\n",
  1323. qdev->ndev->
  1324. name);
  1325. ql_mac_cfg_soft_reset(qdev, 1);
  1326. ql_mac_cfg_gig(qdev,
  1327. (ql_get_link_speed
  1328. (qdev) ==
  1329. SPEED_1000));
  1330. ql_mac_cfg_full_dup(qdev,
  1331. ql_is_link_full_dup
  1332. (qdev));
  1333. ql_mac_cfg_pause(qdev,
  1334. ql_is_neg_pause
  1335. (qdev));
  1336. ql_mac_cfg_soft_reset(qdev, 0);
  1337. /* enable the MAC */
  1338. if (netif_msg_link(qdev))
  1339. printk(KERN_DEBUG PFX
  1340. "%s: Enabling mac.\n",
  1341. qdev->ndev->
  1342. name);
  1343. ql_mac_enable(qdev, 1);
  1344. }
  1345. qdev->port_link_state = LS_UP;
  1346. netif_start_queue(qdev->ndev);
  1347. netif_carrier_on(qdev->ndev);
  1348. if (netif_msg_link(qdev))
  1349. printk(KERN_INFO PFX
  1350. "%s: Link is up at %d Mbps, %s duplex.\n",
  1351. qdev->ndev->name,
  1352. ql_get_link_speed(qdev),
  1353. ql_is_link_full_dup(qdev)
  1354. ? "full" : "half");
  1355. } else { /* Remote error detected */
  1356. if (test_bit(QL_LINK_MASTER,&qdev->flags)) {
  1357. if (netif_msg_link(qdev))
  1358. printk(KERN_DEBUG PFX
  1359. "%s: Remote error detected. "
  1360. "Calling ql_port_start().\n",
  1361. qdev->ndev->
  1362. name);
  1363. /*
  1364. * ql_port_start() is shared code and needs
  1365. * to lock the PHY on it's own.
  1366. */
  1367. ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
  1368. if(ql_port_start(qdev)) {/* Restart port */
  1369. return -1;
  1370. } else
  1371. return 0;
  1372. }
  1373. }
  1374. ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
  1375. return 0;
  1376. }
  1377. static void ql_link_state_machine_work(struct work_struct *work)
  1378. {
  1379. struct ql3_adapter *qdev =
  1380. container_of(work, struct ql3_adapter, link_state_work.work);
  1381. u32 curr_link_state;
  1382. unsigned long hw_flags;
  1383. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  1384. curr_link_state = ql_get_link_state(qdev);
  1385. if (test_bit(QL_RESET_ACTIVE,&qdev->flags)) {
  1386. if (netif_msg_link(qdev))
  1387. printk(KERN_INFO PFX
  1388. "%s: Reset in progress, skip processing link "
  1389. "state.\n", qdev->ndev->name);
  1390. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  1391. /* Restart timer on 2 second interval. */
  1392. mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);\
  1393. return;
  1394. }
  1395. switch (qdev->port_link_state) {
  1396. default:
  1397. if (test_bit(QL_LINK_MASTER,&qdev->flags)) {
  1398. ql_port_start(qdev);
  1399. }
  1400. qdev->port_link_state = LS_DOWN;
  1401. /* Fall Through */
  1402. case LS_DOWN:
  1403. if (curr_link_state == LS_UP) {
  1404. if (netif_msg_link(qdev))
  1405. printk(KERN_INFO PFX "%s: Link is up.\n",
  1406. qdev->ndev->name);
  1407. if (ql_is_auto_neg_complete(qdev))
  1408. ql_finish_auto_neg(qdev);
  1409. if (qdev->port_link_state == LS_UP)
  1410. ql_link_down_detect_clear(qdev);
  1411. qdev->port_link_state = LS_UP;
  1412. }
  1413. break;
  1414. case LS_UP:
  1415. /*
  1416. * See if the link is currently down or went down and came
  1417. * back up
  1418. */
  1419. if (curr_link_state == LS_DOWN) {
  1420. if (netif_msg_link(qdev))
  1421. printk(KERN_INFO PFX "%s: Link is down.\n",
  1422. qdev->ndev->name);
  1423. qdev->port_link_state = LS_DOWN;
  1424. }
  1425. if (ql_link_down_detect(qdev))
  1426. qdev->port_link_state = LS_DOWN;
  1427. break;
  1428. }
  1429. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  1430. /* Restart timer on 2 second interval. */
  1431. mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
  1432. }
  1433. /*
  1434. * Caller must take hw_lock and QL_PHY_GIO_SEM.
  1435. */
  1436. static void ql_get_phy_owner(struct ql3_adapter *qdev)
  1437. {
  1438. if (ql_this_adapter_controls_port(qdev))
  1439. set_bit(QL_LINK_MASTER,&qdev->flags);
  1440. else
  1441. clear_bit(QL_LINK_MASTER,&qdev->flags);
  1442. }
  1443. /*
  1444. * Caller must take hw_lock and QL_PHY_GIO_SEM.
  1445. */
  1446. static void ql_init_scan_mode(struct ql3_adapter *qdev)
  1447. {
  1448. ql_mii_enable_scan_mode(qdev);
  1449. if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) {
  1450. if (ql_this_adapter_controls_port(qdev))
  1451. ql_petbi_init_ex(qdev);
  1452. } else {
  1453. if (ql_this_adapter_controls_port(qdev))
  1454. ql_phy_init_ex(qdev);
  1455. }
  1456. }
  1457. /*
  1458. * MII_Setup needs to be called before taking the PHY out of reset so that the
  1459. * management interface clock speed can be set properly. It would be better if
  1460. * we had a way to disable MDC until after the PHY is out of reset, but we
  1461. * don't have that capability.
  1462. */
  1463. static int ql_mii_setup(struct ql3_adapter *qdev)
  1464. {
  1465. u32 reg;
  1466. struct ql3xxx_port_registers __iomem *port_regs =
  1467. qdev->mem_map_registers;
  1468. if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
  1469. (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
  1470. 2) << 7))
  1471. return -1;
  1472. if (qdev->device_id == QL3032_DEVICE_ID)
  1473. ql_write_page0_reg(qdev,
  1474. &port_regs->macMIIMgmtControlReg, 0x0f00000);
  1475. /* Divide 125MHz clock by 28 to meet PHY timing requirements */
  1476. reg = MAC_MII_CONTROL_CLK_SEL_DIV28;
  1477. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
  1478. reg | ((MAC_MII_CONTROL_CLK_SEL_MASK) << 16));
  1479. ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
  1480. return 0;
  1481. }
  1482. static u32 ql_supported_modes(struct ql3_adapter *qdev)
  1483. {
  1484. u32 supported;
  1485. if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) {
  1486. supported = SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE
  1487. | SUPPORTED_Autoneg;
  1488. } else {
  1489. supported = SUPPORTED_10baseT_Half
  1490. | SUPPORTED_10baseT_Full
  1491. | SUPPORTED_100baseT_Half
  1492. | SUPPORTED_100baseT_Full
  1493. | SUPPORTED_1000baseT_Half
  1494. | SUPPORTED_1000baseT_Full
  1495. | SUPPORTED_Autoneg | SUPPORTED_TP;
  1496. }
  1497. return supported;
  1498. }
  1499. static int ql_get_auto_cfg_status(struct ql3_adapter *qdev)
  1500. {
  1501. int status;
  1502. unsigned long hw_flags;
  1503. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  1504. if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
  1505. (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
  1506. 2) << 7)) {
  1507. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  1508. return 0;
  1509. }
  1510. status = ql_is_auto_cfg(qdev);
  1511. ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
  1512. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  1513. return status;
  1514. }
  1515. static u32 ql_get_speed(struct ql3_adapter *qdev)
  1516. {
  1517. u32 status;
  1518. unsigned long hw_flags;
  1519. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  1520. if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
  1521. (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
  1522. 2) << 7)) {
  1523. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  1524. return 0;
  1525. }
  1526. status = ql_get_link_speed(qdev);
  1527. ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
  1528. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  1529. return status;
  1530. }
  1531. static int ql_get_full_dup(struct ql3_adapter *qdev)
  1532. {
  1533. int status;
  1534. unsigned long hw_flags;
  1535. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  1536. if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
  1537. (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
  1538. 2) << 7)) {
  1539. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  1540. return 0;
  1541. }
  1542. status = ql_is_link_full_dup(qdev);
  1543. ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
  1544. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  1545. return status;
  1546. }
  1547. static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
  1548. {
  1549. struct ql3_adapter *qdev = netdev_priv(ndev);
  1550. ecmd->transceiver = XCVR_INTERNAL;
  1551. ecmd->supported = ql_supported_modes(qdev);
  1552. if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) {
  1553. ecmd->port = PORT_FIBRE;
  1554. } else {
  1555. ecmd->port = PORT_TP;
  1556. ecmd->phy_address = qdev->PHYAddr;
  1557. }
  1558. ecmd->advertising = ql_supported_modes(qdev);
  1559. ecmd->autoneg = ql_get_auto_cfg_status(qdev);
  1560. ecmd->speed = ql_get_speed(qdev);
  1561. ecmd->duplex = ql_get_full_dup(qdev);
  1562. return 0;
  1563. }
  1564. static void ql_get_drvinfo(struct net_device *ndev,
  1565. struct ethtool_drvinfo *drvinfo)
  1566. {
  1567. struct ql3_adapter *qdev = netdev_priv(ndev);
  1568. strncpy(drvinfo->driver, ql3xxx_driver_name, 32);
  1569. strncpy(drvinfo->version, ql3xxx_driver_version, 32);
  1570. strncpy(drvinfo->fw_version, "N/A", 32);
  1571. strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32);
  1572. drvinfo->regdump_len = 0;
  1573. drvinfo->eedump_len = 0;
  1574. }
  1575. static u32 ql_get_msglevel(struct net_device *ndev)
  1576. {
  1577. struct ql3_adapter *qdev = netdev_priv(ndev);
  1578. return qdev->msg_enable;
  1579. }
  1580. static void ql_set_msglevel(struct net_device *ndev, u32 value)
  1581. {
  1582. struct ql3_adapter *qdev = netdev_priv(ndev);
  1583. qdev->msg_enable = value;
  1584. }
  1585. static void ql_get_pauseparam(struct net_device *ndev,
  1586. struct ethtool_pauseparam *pause)
  1587. {
  1588. struct ql3_adapter *qdev = netdev_priv(ndev);
  1589. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  1590. u32 reg;
  1591. if(qdev->mac_index == 0)
  1592. reg = ql_read_page0_reg(qdev, &port_regs->mac0ConfigReg);
  1593. else
  1594. reg = ql_read_page0_reg(qdev, &port_regs->mac1ConfigReg);
  1595. pause->autoneg = ql_get_auto_cfg_status(qdev);
  1596. pause->rx_pause = (reg & MAC_CONFIG_REG_RF) >> 2;
  1597. pause->tx_pause = (reg & MAC_CONFIG_REG_TF) >> 1;
  1598. }
  1599. static const struct ethtool_ops ql3xxx_ethtool_ops = {
  1600. .get_settings = ql_get_settings,
  1601. .get_drvinfo = ql_get_drvinfo,
  1602. .get_link = ethtool_op_get_link,
  1603. .get_msglevel = ql_get_msglevel,
  1604. .set_msglevel = ql_set_msglevel,
  1605. .get_pauseparam = ql_get_pauseparam,
  1606. };
  1607. static int ql_populate_free_queue(struct ql3_adapter *qdev)
  1608. {
  1609. struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head;
  1610. dma_addr_t map;
  1611. int err;
  1612. while (lrg_buf_cb) {
  1613. if (!lrg_buf_cb->skb) {
  1614. lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev,
  1615. qdev->lrg_buffer_len);
  1616. if (unlikely(!lrg_buf_cb->skb)) {
  1617. printk(KERN_DEBUG PFX
  1618. "%s: Failed netdev_alloc_skb().\n",
  1619. qdev->ndev->name);
  1620. break;
  1621. } else {
  1622. /*
  1623. * We save some space to copy the ethhdr from
  1624. * first buffer
  1625. */
  1626. skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
  1627. map = pci_map_single(qdev->pdev,
  1628. lrg_buf_cb->skb->data,
  1629. qdev->lrg_buffer_len -
  1630. QL_HEADER_SPACE,
  1631. PCI_DMA_FROMDEVICE);
  1632. err = pci_dma_mapping_error(qdev->pdev, map);
  1633. if(err) {
  1634. printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
  1635. qdev->ndev->name, err);
  1636. dev_kfree_skb(lrg_buf_cb->skb);
  1637. lrg_buf_cb->skb = NULL;
  1638. break;
  1639. }
  1640. lrg_buf_cb->buf_phy_addr_low =
  1641. cpu_to_le32(LS_64BITS(map));
  1642. lrg_buf_cb->buf_phy_addr_high =
  1643. cpu_to_le32(MS_64BITS(map));
  1644. pci_unmap_addr_set(lrg_buf_cb, mapaddr, map);
  1645. pci_unmap_len_set(lrg_buf_cb, maplen,
  1646. qdev->lrg_buffer_len -
  1647. QL_HEADER_SPACE);
  1648. --qdev->lrg_buf_skb_check;
  1649. if (!qdev->lrg_buf_skb_check)
  1650. return 1;
  1651. }
  1652. }
  1653. lrg_buf_cb = lrg_buf_cb->next;
  1654. }
  1655. return 0;
  1656. }
  1657. /*
  1658. * Caller holds hw_lock.
  1659. */
  1660. static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev)
  1661. {
  1662. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  1663. if (qdev->small_buf_release_cnt >= 16) {
  1664. while (qdev->small_buf_release_cnt >= 16) {
  1665. qdev->small_buf_q_producer_index++;
  1666. if (qdev->small_buf_q_producer_index ==
  1667. NUM_SBUFQ_ENTRIES)
  1668. qdev->small_buf_q_producer_index = 0;
  1669. qdev->small_buf_release_cnt -= 8;
  1670. }
  1671. wmb();
  1672. writel(qdev->small_buf_q_producer_index,
  1673. &port_regs->CommonRegs.rxSmallQProducerIndex);
  1674. }
  1675. }
  1676. /*
  1677. * Caller holds hw_lock.
  1678. */
  1679. static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev)
  1680. {
  1681. struct bufq_addr_element *lrg_buf_q_ele;
  1682. int i;
  1683. struct ql_rcv_buf_cb *lrg_buf_cb;
  1684. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  1685. if ((qdev->lrg_buf_free_count >= 8)
  1686. && (qdev->lrg_buf_release_cnt >= 16)) {
  1687. if (qdev->lrg_buf_skb_check)
  1688. if (!ql_populate_free_queue(qdev))
  1689. return;
  1690. lrg_buf_q_ele = qdev->lrg_buf_next_free;
  1691. while ((qdev->lrg_buf_release_cnt >= 16)
  1692. && (qdev->lrg_buf_free_count >= 8)) {
  1693. for (i = 0; i < 8; i++) {
  1694. lrg_buf_cb =
  1695. ql_get_from_lrg_buf_free_list(qdev);
  1696. lrg_buf_q_ele->addr_high =
  1697. lrg_buf_cb->buf_phy_addr_high;
  1698. lrg_buf_q_ele->addr_low =
  1699. lrg_buf_cb->buf_phy_addr_low;
  1700. lrg_buf_q_ele++;
  1701. qdev->lrg_buf_release_cnt--;
  1702. }
  1703. qdev->lrg_buf_q_producer_index++;
  1704. if (qdev->lrg_buf_q_producer_index == qdev->num_lbufq_entries)
  1705. qdev->lrg_buf_q_producer_index = 0;
  1706. if (qdev->lrg_buf_q_producer_index ==
  1707. (qdev->num_lbufq_entries - 1)) {
  1708. lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr;
  1709. }
  1710. }
  1711. wmb();
  1712. qdev->lrg_buf_next_free = lrg_buf_q_ele;
  1713. writel(qdev->lrg_buf_q_producer_index,
  1714. &port_regs->CommonRegs.rxLargeQProducerIndex);
  1715. }
  1716. }
  1717. static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
  1718. struct ob_mac_iocb_rsp *mac_rsp)
  1719. {
  1720. struct ql_tx_buf_cb *tx_cb;
  1721. int i;
  1722. int retval = 0;
  1723. if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
  1724. printk(KERN_WARNING "Frame short but, frame was padded and sent.\n");
  1725. }
  1726. tx_cb = &qdev->tx_buf[mac_rsp->transaction_id];
  1727. /* Check the transmit response flags for any errors */
  1728. if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
  1729. printk(KERN_ERR "Frame too short to be legal, frame not sent.\n");
  1730. qdev->ndev->stats.tx_errors++;
  1731. retval = -EIO;
  1732. goto frame_not_sent;
  1733. }
  1734. if(tx_cb->seg_count == 0) {
  1735. printk(KERN_ERR "tx_cb->seg_count == 0: %d\n", mac_rsp->transaction_id);
  1736. qdev->ndev->stats.tx_errors++;
  1737. retval = -EIO;
  1738. goto invalid_seg_count;
  1739. }
  1740. pci_unmap_single(qdev->pdev,
  1741. pci_unmap_addr(&tx_cb->map[0], mapaddr),
  1742. pci_unmap_len(&tx_cb->map[0], maplen),
  1743. PCI_DMA_TODEVICE);
  1744. tx_cb->seg_count--;
  1745. if (tx_cb->seg_count) {
  1746. for (i = 1; i < tx_cb->seg_count; i++) {
  1747. pci_unmap_page(qdev->pdev,
  1748. pci_unmap_addr(&tx_cb->map[i],
  1749. mapaddr),
  1750. pci_unmap_len(&tx_cb->map[i], maplen),
  1751. PCI_DMA_TODEVICE);
  1752. }
  1753. }
  1754. qdev->ndev->stats.tx_packets++;
  1755. qdev->ndev->stats.tx_bytes += tx_cb->skb->len;
  1756. frame_not_sent:
  1757. dev_kfree_skb_irq(tx_cb->skb);
  1758. tx_cb->skb = NULL;
  1759. invalid_seg_count:
  1760. atomic_inc(&qdev->tx_count);
  1761. }
  1762. static void ql_get_sbuf(struct ql3_adapter *qdev)
  1763. {
  1764. if (++qdev->small_buf_index == NUM_SMALL_BUFFERS)
  1765. qdev->small_buf_index = 0;
  1766. qdev->small_buf_release_cnt++;
  1767. }
  1768. static struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev)
  1769. {
  1770. struct ql_rcv_buf_cb *lrg_buf_cb = NULL;
  1771. lrg_buf_cb = &qdev->lrg_buf[qdev->lrg_buf_index];
  1772. qdev->lrg_buf_release_cnt++;
  1773. if (++qdev->lrg_buf_index == qdev->num_large_buffers)
  1774. qdev->lrg_buf_index = 0;
  1775. return(lrg_buf_cb);
  1776. }
  1777. /*
  1778. * The difference between 3022 and 3032 for inbound completions:
  1779. * 3022 uses two buffers per completion. The first buffer contains
  1780. * (some) header info, the second the remainder of the headers plus
  1781. * the data. For this chip we reserve some space at the top of the
  1782. * receive buffer so that the header info in buffer one can be
  1783. * prepended to the buffer two. Buffer two is the sent up while
  1784. * buffer one is returned to the hardware to be reused.
  1785. * 3032 receives all of it's data and headers in one buffer for a
  1786. * simpler process. 3032 also supports checksum verification as
  1787. * can be seen in ql_process_macip_rx_intr().
  1788. */
  1789. static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
  1790. struct ib_mac_iocb_rsp *ib_mac_rsp_ptr)
  1791. {
  1792. struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
  1793. struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
  1794. struct sk_buff *skb;
  1795. u16 length = le16_to_cpu(ib_mac_rsp_ptr->length);
  1796. /*
  1797. * Get the inbound address list (small buffer).
  1798. */
  1799. ql_get_sbuf(qdev);
  1800. if (qdev->device_id == QL3022_DEVICE_ID)
  1801. lrg_buf_cb1 = ql_get_lbuf(qdev);
  1802. /* start of second buffer */
  1803. lrg_buf_cb2 = ql_get_lbuf(qdev);
  1804. skb = lrg_buf_cb2->skb;
  1805. qdev->ndev->stats.rx_packets++;
  1806. qdev->ndev->stats.rx_bytes += length;
  1807. skb_put(skb, length);
  1808. pci_unmap_single(qdev->pdev,
  1809. pci_unmap_addr(lrg_buf_cb2, mapaddr),
  1810. pci_unmap_len(lrg_buf_cb2, maplen),
  1811. PCI_DMA_FROMDEVICE);
  1812. prefetch(skb->data);
  1813. skb->ip_summed = CHECKSUM_NONE;
  1814. skb->protocol = eth_type_trans(skb, qdev->ndev);
  1815. netif_receive_skb(skb);
  1816. lrg_buf_cb2->skb = NULL;
  1817. if (qdev->device_id == QL3022_DEVICE_ID)
  1818. ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
  1819. ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
  1820. }
  1821. static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
  1822. struct ib_ip_iocb_rsp *ib_ip_rsp_ptr)
  1823. {
  1824. struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
  1825. struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
  1826. struct sk_buff *skb1 = NULL, *skb2;
  1827. struct net_device *ndev = qdev->ndev;
  1828. u16 length = le16_to_cpu(ib_ip_rsp_ptr->length);
  1829. u16 size = 0;
  1830. /*
  1831. * Get the inbound address list (small buffer).
  1832. */
  1833. ql_get_sbuf(qdev);
  1834. if (qdev->device_id == QL3022_DEVICE_ID) {
  1835. /* start of first buffer on 3022 */
  1836. lrg_buf_cb1 = ql_get_lbuf(qdev);
  1837. skb1 = lrg_buf_cb1->skb;
  1838. size = ETH_HLEN;
  1839. if (*((u16 *) skb1->data) != 0xFFFF)
  1840. size += VLAN_ETH_HLEN - ETH_HLEN;
  1841. }
  1842. /* start of second buffer */
  1843. lrg_buf_cb2 = ql_get_lbuf(qdev);
  1844. skb2 = lrg_buf_cb2->skb;
  1845. skb_put(skb2, length); /* Just the second buffer length here. */
  1846. pci_unmap_single(qdev->pdev,
  1847. pci_unmap_addr(lrg_buf_cb2, mapaddr),
  1848. pci_unmap_len(lrg_buf_cb2, maplen),
  1849. PCI_DMA_FROMDEVICE);
  1850. prefetch(skb2->data);
  1851. skb2->ip_summed = CHECKSUM_NONE;
  1852. if (qdev->device_id == QL3022_DEVICE_ID) {
  1853. /*
  1854. * Copy the ethhdr from first buffer to second. This
  1855. * is necessary for 3022 IP completions.
  1856. */
  1857. skb_copy_from_linear_data_offset(skb1, VLAN_ID_LEN,
  1858. skb_push(skb2, size), size);
  1859. } else {
  1860. u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum);
  1861. if (checksum &
  1862. (IB_IP_IOCB_RSP_3032_ICE |
  1863. IB_IP_IOCB_RSP_3032_CE)) {
  1864. printk(KERN_ERR
  1865. "%s: Bad checksum for this %s packet, checksum = %x.\n",
  1866. __func__,
  1867. ((checksum &
  1868. IB_IP_IOCB_RSP_3032_TCP) ? "TCP" :
  1869. "UDP"),checksum);
  1870. } else if ((checksum & IB_IP_IOCB_RSP_3032_TCP) ||
  1871. (checksum & IB_IP_IOCB_RSP_3032_UDP &&
  1872. !(checksum & IB_IP_IOCB_RSP_3032_NUC))) {
  1873. skb2->ip_summed = CHECKSUM_UNNECESSARY;
  1874. }
  1875. }
  1876. skb2->protocol = eth_type_trans(skb2, qdev->ndev);
  1877. netif_receive_skb(skb2);
  1878. ndev->stats.rx_packets++;
  1879. ndev->stats.rx_bytes += length;
  1880. lrg_buf_cb2->skb = NULL;
  1881. if (qdev->device_id == QL3022_DEVICE_ID)
  1882. ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
  1883. ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
  1884. }
  1885. static int ql_tx_rx_clean(struct ql3_adapter *qdev,
  1886. int *tx_cleaned, int *rx_cleaned, int work_to_do)
  1887. {
  1888. struct net_rsp_iocb *net_rsp;
  1889. struct net_device *ndev = qdev->ndev;
  1890. int work_done = 0;
  1891. /* While there are entries in the completion queue. */
  1892. while ((le32_to_cpu(*(qdev->prsp_producer_index)) !=
  1893. qdev->rsp_consumer_index) && (work_done < work_to_do)) {
  1894. net_rsp = qdev->rsp_current;
  1895. rmb();
  1896. /*
  1897. * Fix 4032 chipe undocumented "feature" where bit-8 is set if the
  1898. * inbound completion is for a VLAN.
  1899. */
  1900. if (qdev->device_id == QL3032_DEVICE_ID)
  1901. net_rsp->opcode &= 0x7f;
  1902. switch (net_rsp->opcode) {
  1903. case OPCODE_OB_MAC_IOCB_FN0:
  1904. case OPCODE_OB_MAC_IOCB_FN2:
  1905. ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *)
  1906. net_rsp);
  1907. (*tx_cleaned)++;
  1908. break;
  1909. case OPCODE_IB_MAC_IOCB:
  1910. case OPCODE_IB_3032_MAC_IOCB:
  1911. ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *)
  1912. net_rsp);
  1913. (*rx_cleaned)++;
  1914. break;
  1915. case OPCODE_IB_IP_IOCB:
  1916. case OPCODE_IB_3032_IP_IOCB:
  1917. ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *)
  1918. net_rsp);
  1919. (*rx_cleaned)++;
  1920. break;
  1921. default:
  1922. {
  1923. u32 *tmp = (u32 *) net_rsp;
  1924. printk(KERN_ERR PFX
  1925. "%s: Hit default case, not "
  1926. "handled!\n"
  1927. " dropping the packet, opcode = "
  1928. "%x.\n",
  1929. ndev->name, net_rsp->opcode);
  1930. printk(KERN_ERR PFX
  1931. "0x%08lx 0x%08lx 0x%08lx 0x%08lx \n",
  1932. (unsigned long int)tmp[0],
  1933. (unsigned long int)tmp[1],
  1934. (unsigned long int)tmp[2],
  1935. (unsigned long int)tmp[3]);
  1936. }
  1937. }
  1938. qdev->rsp_consumer_index++;
  1939. if (qdev->rsp_consumer_index == NUM_RSP_Q_ENTRIES) {
  1940. qdev->rsp_consumer_index = 0;
  1941. qdev->rsp_current = qdev->rsp_q_virt_addr;
  1942. } else {
  1943. qdev->rsp_current++;
  1944. }
  1945. work_done = *tx_cleaned + *rx_cleaned;
  1946. }
  1947. return work_done;
  1948. }
  1949. static int ql_poll(struct napi_struct *napi, int budget)
  1950. {
  1951. struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi);
  1952. int rx_cleaned = 0, tx_cleaned = 0;
  1953. unsigned long hw_flags;
  1954. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  1955. ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, budget);
  1956. if (tx_cleaned + rx_cleaned != budget) {
  1957. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  1958. __napi_complete(napi);
  1959. ql_update_small_bufq_prod_index(qdev);
  1960. ql_update_lrg_bufq_prod_index(qdev);
  1961. writel(qdev->rsp_consumer_index,
  1962. &port_regs->CommonRegs.rspQConsumerIndex);
  1963. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  1964. ql_enable_interrupts(qdev);
  1965. }
  1966. return tx_cleaned + rx_cleaned;
  1967. }
  1968. static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
  1969. {
  1970. struct net_device *ndev = dev_id;
  1971. struct ql3_adapter *qdev = netdev_priv(ndev);
  1972. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  1973. u32 value;
  1974. int handled = 1;
  1975. u32 var;
  1976. port_regs = qdev->mem_map_registers;
  1977. value =
  1978. ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus);
  1979. if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) {
  1980. spin_lock(&qdev->adapter_lock);
  1981. netif_stop_queue(qdev->ndev);
  1982. netif_carrier_off(qdev->ndev);
  1983. ql_disable_interrupts(qdev);
  1984. qdev->port_link_state = LS_DOWN;
  1985. set_bit(QL_RESET_ACTIVE,&qdev->flags) ;
  1986. if (value & ISP_CONTROL_FE) {
  1987. /*
  1988. * Chip Fatal Error.
  1989. */
  1990. var =
  1991. ql_read_page0_reg_l(qdev,
  1992. &port_regs->PortFatalErrStatus);
  1993. printk(KERN_WARNING PFX
  1994. "%s: Resetting chip. PortFatalErrStatus "
  1995. "register = 0x%x\n", ndev->name, var);
  1996. set_bit(QL_RESET_START,&qdev->flags) ;
  1997. } else {
  1998. /*
  1999. * Soft Reset Requested.
  2000. */
  2001. set_bit(QL_RESET_PER_SCSI,&qdev->flags) ;
  2002. printk(KERN_ERR PFX
  2003. "%s: Another function issued a reset to the "
  2004. "chip. ISR value = %x.\n", ndev->name, value);
  2005. }
  2006. queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0);
  2007. spin_unlock(&qdev->adapter_lock);
  2008. } else if (value & ISP_IMR_DISABLE_CMPL_INT) {
  2009. ql_disable_interrupts(qdev);
  2010. if (likely(napi_schedule_prep(&qdev->napi))) {
  2011. __napi_schedule(&qdev->napi);
  2012. }
  2013. } else {
  2014. return IRQ_NONE;
  2015. }
  2016. return IRQ_RETVAL(handled);
  2017. }
  2018. /*
  2019. * Get the total number of segments needed for the
  2020. * given number of fragments. This is necessary because
  2021. * outbound address lists (OAL) will be used when more than
  2022. * two frags are given. Each address list has 5 addr/len
  2023. * pairs. The 5th pair in each AOL is used to point to
  2024. * the next AOL if more frags are coming.
  2025. * That is why the frags:segment count ratio is not linear.
  2026. */
  2027. static int ql_get_seg_count(struct ql3_adapter *qdev,
  2028. unsigned short frags)
  2029. {
  2030. if (qdev->device_id == QL3022_DEVICE_ID)
  2031. return 1;
  2032. switch(frags) {
  2033. case 0: return 1; /* just the skb->data seg */
  2034. case 1: return 2; /* skb->data + 1 frag */
  2035. case 2: return 3; /* skb->data + 2 frags */
  2036. case 3: return 5; /* skb->data + 1 frag + 1 AOL containting 2 frags */
  2037. case 4: return 6;
  2038. case 5: return 7;
  2039. case 6: return 8;
  2040. case 7: return 10;
  2041. case 8: return 11;
  2042. case 9: return 12;
  2043. case 10: return 13;
  2044. case 11: return 15;
  2045. case 12: return 16;
  2046. case 13: return 17;
  2047. case 14: return 18;
  2048. case 15: return 20;
  2049. case 16: return 21;
  2050. case 17: return 22;
  2051. case 18: return 23;
  2052. }
  2053. return -1;
  2054. }
  2055. static void ql_hw_csum_setup(const struct sk_buff *skb,
  2056. struct ob_mac_iocb_req *mac_iocb_ptr)
  2057. {
  2058. const struct iphdr *ip = ip_hdr(skb);
  2059. mac_iocb_ptr->ip_hdr_off = skb_network_offset(skb);
  2060. mac_iocb_ptr->ip_hdr_len = ip->ihl;
  2061. if (ip->protocol == IPPROTO_TCP) {
  2062. mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC |
  2063. OB_3032MAC_IOCB_REQ_IC;
  2064. } else {
  2065. mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC |
  2066. OB_3032MAC_IOCB_REQ_IC;
  2067. }
  2068. }
  2069. /*
  2070. * Map the buffers for this transmit. This will return
  2071. * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
  2072. */
  2073. static int ql_send_map(struct ql3_adapter *qdev,
  2074. struct ob_mac_iocb_req *mac_iocb_ptr,
  2075. struct ql_tx_buf_cb *tx_cb,
  2076. struct sk_buff *skb)
  2077. {
  2078. struct oal *oal;
  2079. struct oal_entry *oal_entry;
  2080. int len = skb_headlen(skb);
  2081. dma_addr_t map;
  2082. int err;
  2083. int completed_segs, i;
  2084. int seg_cnt, seg = 0;
  2085. int frag_cnt = (int)skb_shinfo(skb)->nr_frags;
  2086. seg_cnt = tx_cb->seg_count;
  2087. /*
  2088. * Map the skb buffer first.
  2089. */
  2090. map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
  2091. err = pci_dma_mapping_error(qdev->pdev, map);
  2092. if(err) {
  2093. printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
  2094. qdev->ndev->name, err);
  2095. return NETDEV_TX_BUSY;
  2096. }
  2097. oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
  2098. oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
  2099. oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
  2100. oal_entry->len = cpu_to_le32(len);
  2101. pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
  2102. pci_unmap_len_set(&tx_cb->map[seg], maplen, len);
  2103. seg++;
  2104. if (seg_cnt == 1) {
  2105. /* Terminate the last segment. */
  2106. oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY);
  2107. } else {
  2108. oal = tx_cb->oal;
  2109. for (completed_segs=0; completed_segs<frag_cnt; completed_segs++,seg++) {
  2110. skb_frag_t *frag = &skb_shinfo(skb)->frags[completed_segs];
  2111. oal_entry++;
  2112. if ((seg == 2 && seg_cnt > 3) || /* Check for continuation */
  2113. (seg == 7 && seg_cnt > 8) || /* requirements. It's strange */
  2114. (seg == 12 && seg_cnt > 13) || /* but necessary. */
  2115. (seg == 17 && seg_cnt > 18)) {
  2116. /* Continuation entry points to outbound address list. */
  2117. map = pci_map_single(qdev->pdev, oal,
  2118. sizeof(struct oal),
  2119. PCI_DMA_TODEVICE);
  2120. err = pci_dma_mapping_error(qdev->pdev, map);
  2121. if(err) {
  2122. printk(KERN_ERR "%s: PCI mapping outbound address list with error: %d\n",
  2123. qdev->ndev->name, err);
  2124. goto map_error;
  2125. }
  2126. oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
  2127. oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
  2128. oal_entry->len =
  2129. cpu_to_le32(sizeof(struct oal) |
  2130. OAL_CONT_ENTRY);
  2131. pci_unmap_addr_set(&tx_cb->map[seg], mapaddr,
  2132. map);
  2133. pci_unmap_len_set(&tx_cb->map[seg], maplen,
  2134. sizeof(struct oal));
  2135. oal_entry = (struct oal_entry *)oal;
  2136. oal++;
  2137. seg++;
  2138. }
  2139. map =
  2140. pci_map_page(qdev->pdev, frag->page,
  2141. frag->page_offset, frag->size,
  2142. PCI_DMA_TODEVICE);
  2143. err = pci_dma_mapping_error(qdev->pdev, map);
  2144. if(err) {
  2145. printk(KERN_ERR "%s: PCI mapping frags failed with error: %d\n",
  2146. qdev->ndev->name, err);
  2147. goto map_error;
  2148. }
  2149. oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
  2150. oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
  2151. oal_entry->len = cpu_to_le32(frag->size);
  2152. pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
  2153. pci_unmap_len_set(&tx_cb->map[seg], maplen,
  2154. frag->size);
  2155. }
  2156. /* Terminate the last segment. */
  2157. oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY);
  2158. }
  2159. return NETDEV_TX_OK;
  2160. map_error:
  2161. /* A PCI mapping failed and now we will need to back out
  2162. * We need to traverse through the oal's and associated pages which
  2163. * have been mapped and now we must unmap them to clean up properly
  2164. */
  2165. seg = 1;
  2166. oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
  2167. oal = tx_cb->oal;
  2168. for (i=0; i<completed_segs; i++,seg++) {
  2169. oal_entry++;
  2170. if((seg == 2 && seg_cnt > 3) || /* Check for continuation */
  2171. (seg == 7 && seg_cnt > 8) || /* requirements. It's strange */
  2172. (seg == 12 && seg_cnt > 13) || /* but necessary. */
  2173. (seg == 17 && seg_cnt > 18)) {
  2174. pci_unmap_single(qdev->pdev,
  2175. pci_unmap_addr(&tx_cb->map[seg], mapaddr),
  2176. pci_unmap_len(&tx_cb->map[seg], maplen),
  2177. PCI_DMA_TODEVICE);
  2178. oal++;
  2179. seg++;
  2180. }
  2181. pci_unmap_page(qdev->pdev,
  2182. pci_unmap_addr(&tx_cb->map[seg], mapaddr),
  2183. pci_unmap_len(&tx_cb->map[seg], maplen),
  2184. PCI_DMA_TODEVICE);
  2185. }
  2186. pci_unmap_single(qdev->pdev,
  2187. pci_unmap_addr(&tx_cb->map[0], mapaddr),
  2188. pci_unmap_addr(&tx_cb->map[0], maplen),
  2189. PCI_DMA_TODEVICE);
  2190. return NETDEV_TX_BUSY;
  2191. }
  2192. /*
  2193. * The difference between 3022 and 3032 sends:
  2194. * 3022 only supports a simple single segment transmission.
  2195. * 3032 supports checksumming and scatter/gather lists (fragments).
  2196. * The 3032 supports sglists by using the 3 addr/len pairs (ALP)
  2197. * in the IOCB plus a chain of outbound address lists (OAL) that
  2198. * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th)
  2199. * will used to point to an OAL when more ALP entries are required.
  2200. * The IOCB is always the top of the chain followed by one or more
  2201. * OALs (when necessary).
  2202. */
  2203. static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
  2204. {
  2205. struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
  2206. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  2207. struct ql_tx_buf_cb *tx_cb;
  2208. u32 tot_len = skb->len;
  2209. struct ob_mac_iocb_req *mac_iocb_ptr;
  2210. if (unlikely(atomic_read(&qdev->tx_count) < 2)) {
  2211. return NETDEV_TX_BUSY;
  2212. }
  2213. tx_cb = &qdev->tx_buf[qdev->req_producer_index] ;
  2214. if((tx_cb->seg_count = ql_get_seg_count(qdev,
  2215. (skb_shinfo(skb)->nr_frags))) == -1) {
  2216. printk(KERN_ERR PFX"%s: invalid segment count!\n",__func__);
  2217. return NETDEV_TX_OK;
  2218. }
  2219. mac_iocb_ptr = tx_cb->queue_entry;
  2220. memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req));
  2221. mac_iocb_ptr->opcode = qdev->mac_ob_opcode;
  2222. mac_iocb_ptr->flags = OB_MAC_IOCB_REQ_X;
  2223. mac_iocb_ptr->flags |= qdev->mb_bit_mask;
  2224. mac_iocb_ptr->transaction_id = qdev->req_producer_index;
  2225. mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len);
  2226. tx_cb->skb = skb;
  2227. if (qdev->device_id == QL3032_DEVICE_ID &&
  2228. skb->ip_summed == CHECKSUM_PARTIAL)
  2229. ql_hw_csum_setup(skb, mac_iocb_ptr);
  2230. if(ql_send_map(qdev,mac_iocb_ptr,tx_cb,skb) != NETDEV_TX_OK) {
  2231. printk(KERN_ERR PFX"%s: Could not map the segments!\n",__func__);
  2232. return NETDEV_TX_BUSY;
  2233. }
  2234. wmb();
  2235. qdev->req_producer_index++;
  2236. if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES)
  2237. qdev->req_producer_index = 0;
  2238. wmb();
  2239. ql_write_common_reg_l(qdev,
  2240. &port_regs->CommonRegs.reqQProducerIndex,
  2241. qdev->req_producer_index);
  2242. ndev->trans_start = jiffies;
  2243. if (netif_msg_tx_queued(qdev))
  2244. printk(KERN_DEBUG PFX "%s: tx queued, slot %d, len %d\n",
  2245. ndev->name, qdev->req_producer_index, skb->len);
  2246. atomic_dec(&qdev->tx_count);
  2247. return NETDEV_TX_OK;
  2248. }
  2249. static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
  2250. {
  2251. qdev->req_q_size =
  2252. (u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req));
  2253. qdev->req_q_virt_addr =
  2254. pci_alloc_consistent(qdev->pdev,
  2255. (size_t) qdev->req_q_size,
  2256. &qdev->req_q_phy_addr);
  2257. if ((qdev->req_q_virt_addr == NULL) ||
  2258. LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) {
  2259. printk(KERN_ERR PFX "%s: reqQ failed.\n",
  2260. qdev->ndev->name);
  2261. return -ENOMEM;
  2262. }
  2263. qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb);
  2264. qdev->rsp_q_virt_addr =
  2265. pci_alloc_consistent(qdev->pdev,
  2266. (size_t) qdev->rsp_q_size,
  2267. &qdev->rsp_q_phy_addr);
  2268. if ((qdev->rsp_q_virt_addr == NULL) ||
  2269. LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) {
  2270. printk(KERN_ERR PFX
  2271. "%s: rspQ allocation failed\n",
  2272. qdev->ndev->name);
  2273. pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size,
  2274. qdev->req_q_virt_addr,
  2275. qdev->req_q_phy_addr);
  2276. return -ENOMEM;
  2277. }
  2278. set_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags);
  2279. return 0;
  2280. }
  2281. static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev)
  2282. {
  2283. if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags)) {
  2284. printk(KERN_INFO PFX
  2285. "%s: Already done.\n", qdev->ndev->name);
  2286. return;
  2287. }
  2288. pci_free_consistent(qdev->pdev,
  2289. qdev->req_q_size,
  2290. qdev->req_q_virt_addr, qdev->req_q_phy_addr);
  2291. qdev->req_q_virt_addr = NULL;
  2292. pci_free_consistent(qdev->pdev,
  2293. qdev->rsp_q_size,
  2294. qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr);
  2295. qdev->rsp_q_virt_addr = NULL;
  2296. clear_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags);
  2297. }
  2298. static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
  2299. {
  2300. /* Create Large Buffer Queue */
  2301. qdev->lrg_buf_q_size =
  2302. qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry);
  2303. if (qdev->lrg_buf_q_size < PAGE_SIZE)
  2304. qdev->lrg_buf_q_alloc_size = PAGE_SIZE;
  2305. else
  2306. qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2;
  2307. qdev->lrg_buf = kmalloc(qdev->num_large_buffers * sizeof(struct ql_rcv_buf_cb),GFP_KERNEL);
  2308. if (qdev->lrg_buf == NULL) {
  2309. printk(KERN_ERR PFX
  2310. "%s: qdev->lrg_buf alloc failed.\n", qdev->ndev->name);
  2311. return -ENOMEM;
  2312. }
  2313. qdev->lrg_buf_q_alloc_virt_addr =
  2314. pci_alloc_consistent(qdev->pdev,
  2315. qdev->lrg_buf_q_alloc_size,
  2316. &qdev->lrg_buf_q_alloc_phy_addr);
  2317. if (qdev->lrg_buf_q_alloc_virt_addr == NULL) {
  2318. printk(KERN_ERR PFX
  2319. "%s: lBufQ failed\n", qdev->ndev->name);
  2320. return -ENOMEM;
  2321. }
  2322. qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr;
  2323. qdev->lrg_buf_q_phy_addr = qdev->lrg_buf_q_alloc_phy_addr;
  2324. /* Create Small Buffer Queue */
  2325. qdev->small_buf_q_size =
  2326. NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry);
  2327. if (qdev->small_buf_q_size < PAGE_SIZE)
  2328. qdev->small_buf_q_alloc_size = PAGE_SIZE;
  2329. else
  2330. qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2;
  2331. qdev->small_buf_q_alloc_virt_addr =
  2332. pci_alloc_consistent(qdev->pdev,
  2333. qdev->small_buf_q_alloc_size,
  2334. &qdev->small_buf_q_alloc_phy_addr);
  2335. if (qdev->small_buf_q_alloc_virt_addr == NULL) {
  2336. printk(KERN_ERR PFX
  2337. "%s: Small Buffer Queue allocation failed.\n",
  2338. qdev->ndev->name);
  2339. pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size,
  2340. qdev->lrg_buf_q_alloc_virt_addr,
  2341. qdev->lrg_buf_q_alloc_phy_addr);
  2342. return -ENOMEM;
  2343. }
  2344. qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr;
  2345. qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr;
  2346. set_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags);
  2347. return 0;
  2348. }
  2349. static void ql_free_buffer_queues(struct ql3_adapter *qdev)
  2350. {
  2351. if (!test_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags)) {
  2352. printk(KERN_INFO PFX
  2353. "%s: Already done.\n", qdev->ndev->name);
  2354. return;
  2355. }
  2356. if(qdev->lrg_buf) kfree(qdev->lrg_buf);
  2357. pci_free_consistent(qdev->pdev,
  2358. qdev->lrg_buf_q_alloc_size,
  2359. qdev->lrg_buf_q_alloc_virt_addr,
  2360. qdev->lrg_buf_q_alloc_phy_addr);
  2361. qdev->lrg_buf_q_virt_addr = NULL;
  2362. pci_free_consistent(qdev->pdev,
  2363. qdev->small_buf_q_alloc_size,
  2364. qdev->small_buf_q_alloc_virt_addr,
  2365. qdev->small_buf_q_alloc_phy_addr);
  2366. qdev->small_buf_q_virt_addr = NULL;
  2367. clear_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags);
  2368. }
  2369. static int ql_alloc_small_buffers(struct ql3_adapter *qdev)
  2370. {
  2371. int i;
  2372. struct bufq_addr_element *small_buf_q_entry;
  2373. /* Currently we allocate on one of memory and use it for smallbuffers */
  2374. qdev->small_buf_total_size =
  2375. (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES *
  2376. QL_SMALL_BUFFER_SIZE);
  2377. qdev->small_buf_virt_addr =
  2378. pci_alloc_consistent(qdev->pdev,
  2379. qdev->small_buf_total_size,
  2380. &qdev->small_buf_phy_addr);
  2381. if (qdev->small_buf_virt_addr == NULL) {
  2382. printk(KERN_ERR PFX
  2383. "%s: Failed to get small buffer memory.\n",
  2384. qdev->ndev->name);
  2385. return -ENOMEM;
  2386. }
  2387. qdev->small_buf_phy_addr_low = LS_64BITS(qdev->small_buf_phy_addr);
  2388. qdev->small_buf_phy_addr_high = MS_64BITS(qdev->small_buf_phy_addr);
  2389. small_buf_q_entry = qdev->small_buf_q_virt_addr;
  2390. /* Initialize the small buffer queue. */
  2391. for (i = 0; i < (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES); i++) {
  2392. small_buf_q_entry->addr_high =
  2393. cpu_to_le32(qdev->small_buf_phy_addr_high);
  2394. small_buf_q_entry->addr_low =
  2395. cpu_to_le32(qdev->small_buf_phy_addr_low +
  2396. (i * QL_SMALL_BUFFER_SIZE));
  2397. small_buf_q_entry++;
  2398. }
  2399. qdev->small_buf_index = 0;
  2400. set_bit(QL_ALLOC_SMALL_BUF_DONE,&qdev->flags);
  2401. return 0;
  2402. }
  2403. static void ql_free_small_buffers(struct ql3_adapter *qdev)
  2404. {
  2405. if (!test_bit(QL_ALLOC_SMALL_BUF_DONE,&qdev->flags)) {
  2406. printk(KERN_INFO PFX
  2407. "%s: Already done.\n", qdev->ndev->name);
  2408. return;
  2409. }
  2410. if (qdev->small_buf_virt_addr != NULL) {
  2411. pci_free_consistent(qdev->pdev,
  2412. qdev->small_buf_total_size,
  2413. qdev->small_buf_virt_addr,
  2414. qdev->small_buf_phy_addr);
  2415. qdev->small_buf_virt_addr = NULL;
  2416. }
  2417. }
  2418. static void ql_free_large_buffers(struct ql3_adapter *qdev)
  2419. {
  2420. int i = 0;
  2421. struct ql_rcv_buf_cb *lrg_buf_cb;
  2422. for (i = 0; i < qdev->num_large_buffers; i++) {
  2423. lrg_buf_cb = &qdev->lrg_buf[i];
  2424. if (lrg_buf_cb->skb) {
  2425. dev_kfree_skb(lrg_buf_cb->skb);
  2426. pci_unmap_single(qdev->pdev,
  2427. pci_unmap_addr(lrg_buf_cb, mapaddr),
  2428. pci_unmap_len(lrg_buf_cb, maplen),
  2429. PCI_DMA_FROMDEVICE);
  2430. memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
  2431. } else {
  2432. break;
  2433. }
  2434. }
  2435. }
  2436. static void ql_init_large_buffers(struct ql3_adapter *qdev)
  2437. {
  2438. int i;
  2439. struct ql_rcv_buf_cb *lrg_buf_cb;
  2440. struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr;
  2441. for (i = 0; i < qdev->num_large_buffers; i++) {
  2442. lrg_buf_cb = &qdev->lrg_buf[i];
  2443. buf_addr_ele->addr_high = lrg_buf_cb->buf_phy_addr_high;
  2444. buf_addr_ele->addr_low = lrg_buf_cb->buf_phy_addr_low;
  2445. buf_addr_ele++;
  2446. }
  2447. qdev->lrg_buf_index = 0;
  2448. qdev->lrg_buf_skb_check = 0;
  2449. }
  2450. static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
  2451. {
  2452. int i;
  2453. struct ql_rcv_buf_cb *lrg_buf_cb;
  2454. struct sk_buff *skb;
  2455. dma_addr_t map;
  2456. int err;
  2457. for (i = 0; i < qdev->num_large_buffers; i++) {
  2458. skb = netdev_alloc_skb(qdev->ndev,
  2459. qdev->lrg_buffer_len);
  2460. if (unlikely(!skb)) {
  2461. /* Better luck next round */
  2462. printk(KERN_ERR PFX
  2463. "%s: large buff alloc failed, "
  2464. "for %d bytes at index %d.\n",
  2465. qdev->ndev->name,
  2466. qdev->lrg_buffer_len * 2, i);
  2467. ql_free_large_buffers(qdev);
  2468. return -ENOMEM;
  2469. } else {
  2470. lrg_buf_cb = &qdev->lrg_buf[i];
  2471. memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
  2472. lrg_buf_cb->index = i;
  2473. lrg_buf_cb->skb = skb;
  2474. /*
  2475. * We save some space to copy the ethhdr from first
  2476. * buffer
  2477. */
  2478. skb_reserve(skb, QL_HEADER_SPACE);
  2479. map = pci_map_single(qdev->pdev,
  2480. skb->data,
  2481. qdev->lrg_buffer_len -
  2482. QL_HEADER_SPACE,
  2483. PCI_DMA_FROMDEVICE);
  2484. err = pci_dma_mapping_error(qdev->pdev, map);
  2485. if(err) {
  2486. printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
  2487. qdev->ndev->name, err);
  2488. ql_free_large_buffers(qdev);
  2489. return -ENOMEM;
  2490. }
  2491. pci_unmap_addr_set(lrg_buf_cb, mapaddr, map);
  2492. pci_unmap_len_set(lrg_buf_cb, maplen,
  2493. qdev->lrg_buffer_len -
  2494. QL_HEADER_SPACE);
  2495. lrg_buf_cb->buf_phy_addr_low =
  2496. cpu_to_le32(LS_64BITS(map));
  2497. lrg_buf_cb->buf_phy_addr_high =
  2498. cpu_to_le32(MS_64BITS(map));
  2499. }
  2500. }
  2501. return 0;
  2502. }
  2503. static void ql_free_send_free_list(struct ql3_adapter *qdev)
  2504. {
  2505. struct ql_tx_buf_cb *tx_cb;
  2506. int i;
  2507. tx_cb = &qdev->tx_buf[0];
  2508. for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
  2509. if (tx_cb->oal) {
  2510. kfree(tx_cb->oal);
  2511. tx_cb->oal = NULL;
  2512. }
  2513. tx_cb++;
  2514. }
  2515. }
  2516. static int ql_create_send_free_list(struct ql3_adapter *qdev)
  2517. {
  2518. struct ql_tx_buf_cb *tx_cb;
  2519. int i;
  2520. struct ob_mac_iocb_req *req_q_curr =
  2521. qdev->req_q_virt_addr;
  2522. /* Create free list of transmit buffers */
  2523. for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
  2524. tx_cb = &qdev->tx_buf[i];
  2525. tx_cb->skb = NULL;
  2526. tx_cb->queue_entry = req_q_curr;
  2527. req_q_curr++;
  2528. tx_cb->oal = kmalloc(512, GFP_KERNEL);
  2529. if (tx_cb->oal == NULL)
  2530. return -1;
  2531. }
  2532. return 0;
  2533. }
  2534. static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
  2535. {
  2536. if (qdev->ndev->mtu == NORMAL_MTU_SIZE) {
  2537. qdev->num_lbufq_entries = NUM_LBUFQ_ENTRIES;
  2538. qdev->lrg_buffer_len = NORMAL_MTU_SIZE;
  2539. }
  2540. else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) {
  2541. /*
  2542. * Bigger buffers, so less of them.
  2543. */
  2544. qdev->num_lbufq_entries = JUMBO_NUM_LBUFQ_ENTRIES;
  2545. qdev->lrg_buffer_len = JUMBO_MTU_SIZE;
  2546. } else {
  2547. printk(KERN_ERR PFX
  2548. "%s: Invalid mtu size. Only 1500 and 9000 are accepted.\n",
  2549. qdev->ndev->name);
  2550. return -ENOMEM;
  2551. }
  2552. qdev->num_large_buffers = qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY;
  2553. qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE;
  2554. qdev->max_frame_size =
  2555. (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE;
  2556. /*
  2557. * First allocate a page of shared memory and use it for shadow
  2558. * locations of Network Request Queue Consumer Address Register and
  2559. * Network Completion Queue Producer Index Register
  2560. */
  2561. qdev->shadow_reg_virt_addr =
  2562. pci_alloc_consistent(qdev->pdev,
  2563. PAGE_SIZE, &qdev->shadow_reg_phy_addr);
  2564. if (qdev->shadow_reg_virt_addr != NULL) {
  2565. qdev->preq_consumer_index = (u16 *) qdev->shadow_reg_virt_addr;
  2566. qdev->req_consumer_index_phy_addr_high =
  2567. MS_64BITS(qdev->shadow_reg_phy_addr);
  2568. qdev->req_consumer_index_phy_addr_low =
  2569. LS_64BITS(qdev->shadow_reg_phy_addr);
  2570. qdev->prsp_producer_index =
  2571. (__le32 *) (((u8 *) qdev->preq_consumer_index) + 8);
  2572. qdev->rsp_producer_index_phy_addr_high =
  2573. qdev->req_consumer_index_phy_addr_high;
  2574. qdev->rsp_producer_index_phy_addr_low =
  2575. qdev->req_consumer_index_phy_addr_low + 8;
  2576. } else {
  2577. printk(KERN_ERR PFX
  2578. "%s: shadowReg Alloc failed.\n", qdev->ndev->name);
  2579. return -ENOMEM;
  2580. }
  2581. if (ql_alloc_net_req_rsp_queues(qdev) != 0) {
  2582. printk(KERN_ERR PFX
  2583. "%s: ql_alloc_net_req_rsp_queues failed.\n",
  2584. qdev->ndev->name);
  2585. goto err_req_rsp;
  2586. }
  2587. if (ql_alloc_buffer_queues(qdev) != 0) {
  2588. printk(KERN_ERR PFX
  2589. "%s: ql_alloc_buffer_queues failed.\n",
  2590. qdev->ndev->name);
  2591. goto err_buffer_queues;
  2592. }
  2593. if (ql_alloc_small_buffers(qdev) != 0) {
  2594. printk(KERN_ERR PFX
  2595. "%s: ql_alloc_small_buffers failed\n", qdev->ndev->name);
  2596. goto err_small_buffers;
  2597. }
  2598. if (ql_alloc_large_buffers(qdev) != 0) {
  2599. printk(KERN_ERR PFX
  2600. "%s: ql_alloc_large_buffers failed\n", qdev->ndev->name);
  2601. goto err_small_buffers;
  2602. }
  2603. /* Initialize the large buffer queue. */
  2604. ql_init_large_buffers(qdev);
  2605. if (ql_create_send_free_list(qdev))
  2606. goto err_free_list;
  2607. qdev->rsp_current = qdev->rsp_q_virt_addr;
  2608. return 0;
  2609. err_free_list:
  2610. ql_free_send_free_list(qdev);
  2611. err_small_buffers:
  2612. ql_free_buffer_queues(qdev);
  2613. err_buffer_queues:
  2614. ql_free_net_req_rsp_queues(qdev);
  2615. err_req_rsp:
  2616. pci_free_consistent(qdev->pdev,
  2617. PAGE_SIZE,
  2618. qdev->shadow_reg_virt_addr,
  2619. qdev->shadow_reg_phy_addr);
  2620. return -ENOMEM;
  2621. }
  2622. static void ql_free_mem_resources(struct ql3_adapter *qdev)
  2623. {
  2624. ql_free_send_free_list(qdev);
  2625. ql_free_large_buffers(qdev);
  2626. ql_free_small_buffers(qdev);
  2627. ql_free_buffer_queues(qdev);
  2628. ql_free_net_req_rsp_queues(qdev);
  2629. if (qdev->shadow_reg_virt_addr != NULL) {
  2630. pci_free_consistent(qdev->pdev,
  2631. PAGE_SIZE,
  2632. qdev->shadow_reg_virt_addr,
  2633. qdev->shadow_reg_phy_addr);
  2634. qdev->shadow_reg_virt_addr = NULL;
  2635. }
  2636. }
  2637. static int ql_init_misc_registers(struct ql3_adapter *qdev)
  2638. {
  2639. struct ql3xxx_local_ram_registers __iomem *local_ram =
  2640. (void __iomem *)qdev->mem_map_registers;
  2641. if(ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK,
  2642. (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
  2643. 2) << 4))
  2644. return -1;
  2645. ql_write_page2_reg(qdev,
  2646. &local_ram->bufletSize, qdev->nvram_data.bufletSize);
  2647. ql_write_page2_reg(qdev,
  2648. &local_ram->maxBufletCount,
  2649. qdev->nvram_data.bufletCount);
  2650. ql_write_page2_reg(qdev,
  2651. &local_ram->freeBufletThresholdLow,
  2652. (qdev->nvram_data.tcpWindowThreshold25 << 16) |
  2653. (qdev->nvram_data.tcpWindowThreshold0));
  2654. ql_write_page2_reg(qdev,
  2655. &local_ram->freeBufletThresholdHigh,
  2656. qdev->nvram_data.tcpWindowThreshold50);
  2657. ql_write_page2_reg(qdev,
  2658. &local_ram->ipHashTableBase,
  2659. (qdev->nvram_data.ipHashTableBaseHi << 16) |
  2660. qdev->nvram_data.ipHashTableBaseLo);
  2661. ql_write_page2_reg(qdev,
  2662. &local_ram->ipHashTableCount,
  2663. qdev->nvram_data.ipHashTableSize);
  2664. ql_write_page2_reg(qdev,
  2665. &local_ram->tcpHashTableBase,
  2666. (qdev->nvram_data.tcpHashTableBaseHi << 16) |
  2667. qdev->nvram_data.tcpHashTableBaseLo);
  2668. ql_write_page2_reg(qdev,
  2669. &local_ram->tcpHashTableCount,
  2670. qdev->nvram_data.tcpHashTableSize);
  2671. ql_write_page2_reg(qdev,
  2672. &local_ram->ncbBase,
  2673. (qdev->nvram_data.ncbTableBaseHi << 16) |
  2674. qdev->nvram_data.ncbTableBaseLo);
  2675. ql_write_page2_reg(qdev,
  2676. &local_ram->maxNcbCount,
  2677. qdev->nvram_data.ncbTableSize);
  2678. ql_write_page2_reg(qdev,
  2679. &local_ram->drbBase,
  2680. (qdev->nvram_data.drbTableBaseHi << 16) |
  2681. qdev->nvram_data.drbTableBaseLo);
  2682. ql_write_page2_reg(qdev,
  2683. &local_ram->maxDrbCount,
  2684. qdev->nvram_data.drbTableSize);
  2685. ql_sem_unlock(qdev, QL_DDR_RAM_SEM_MASK);
  2686. return 0;
  2687. }
  2688. static int ql_adapter_initialize(struct ql3_adapter *qdev)
  2689. {
  2690. u32 value;
  2691. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  2692. struct ql3xxx_host_memory_registers __iomem *hmem_regs =
  2693. (void __iomem *)port_regs;
  2694. u32 delay = 10;
  2695. int status = 0;
  2696. if(ql_mii_setup(qdev))
  2697. return -1;
  2698. /* Bring out PHY out of reset */
  2699. ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
  2700. (ISP_SERIAL_PORT_IF_WE |
  2701. (ISP_SERIAL_PORT_IF_WE << 16)));
  2702. qdev->port_link_state = LS_DOWN;
  2703. netif_carrier_off(qdev->ndev);
  2704. /* V2 chip fix for ARS-39168. */
  2705. ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
  2706. (ISP_SERIAL_PORT_IF_SDE |
  2707. (ISP_SERIAL_PORT_IF_SDE << 16)));
  2708. /* Request Queue Registers */
  2709. *((u32 *) (qdev->preq_consumer_index)) = 0;
  2710. atomic_set(&qdev->tx_count,NUM_REQ_Q_ENTRIES);
  2711. qdev->req_producer_index = 0;
  2712. ql_write_page1_reg(qdev,
  2713. &hmem_regs->reqConsumerIndexAddrHigh,
  2714. qdev->req_consumer_index_phy_addr_high);
  2715. ql_write_page1_reg(qdev,
  2716. &hmem_regs->reqConsumerIndexAddrLow,
  2717. qdev->req_consumer_index_phy_addr_low);
  2718. ql_write_page1_reg(qdev,
  2719. &hmem_regs->reqBaseAddrHigh,
  2720. MS_64BITS(qdev->req_q_phy_addr));
  2721. ql_write_page1_reg(qdev,
  2722. &hmem_regs->reqBaseAddrLow,
  2723. LS_64BITS(qdev->req_q_phy_addr));
  2724. ql_write_page1_reg(qdev, &hmem_regs->reqLength, NUM_REQ_Q_ENTRIES);
  2725. /* Response Queue Registers */
  2726. *((__le16 *) (qdev->prsp_producer_index)) = 0;
  2727. qdev->rsp_consumer_index = 0;
  2728. qdev->rsp_current = qdev->rsp_q_virt_addr;
  2729. ql_write_page1_reg(qdev,
  2730. &hmem_regs->rspProducerIndexAddrHigh,
  2731. qdev->rsp_producer_index_phy_addr_high);
  2732. ql_write_page1_reg(qdev,
  2733. &hmem_regs->rspProducerIndexAddrLow,
  2734. qdev->rsp_producer_index_phy_addr_low);
  2735. ql_write_page1_reg(qdev,
  2736. &hmem_regs->rspBaseAddrHigh,
  2737. MS_64BITS(qdev->rsp_q_phy_addr));
  2738. ql_write_page1_reg(qdev,
  2739. &hmem_regs->rspBaseAddrLow,
  2740. LS_64BITS(qdev->rsp_q_phy_addr));
  2741. ql_write_page1_reg(qdev, &hmem_regs->rspLength, NUM_RSP_Q_ENTRIES);
  2742. /* Large Buffer Queue */
  2743. ql_write_page1_reg(qdev,
  2744. &hmem_regs->rxLargeQBaseAddrHigh,
  2745. MS_64BITS(qdev->lrg_buf_q_phy_addr));
  2746. ql_write_page1_reg(qdev,
  2747. &hmem_regs->rxLargeQBaseAddrLow,
  2748. LS_64BITS(qdev->lrg_buf_q_phy_addr));
  2749. ql_write_page1_reg(qdev, &hmem_regs->rxLargeQLength, qdev->num_lbufq_entries);
  2750. ql_write_page1_reg(qdev,
  2751. &hmem_regs->rxLargeBufferLength,
  2752. qdev->lrg_buffer_len);
  2753. /* Small Buffer Queue */
  2754. ql_write_page1_reg(qdev,
  2755. &hmem_regs->rxSmallQBaseAddrHigh,
  2756. MS_64BITS(qdev->small_buf_q_phy_addr));
  2757. ql_write_page1_reg(qdev,
  2758. &hmem_regs->rxSmallQBaseAddrLow,
  2759. LS_64BITS(qdev->small_buf_q_phy_addr));
  2760. ql_write_page1_reg(qdev, &hmem_regs->rxSmallQLength, NUM_SBUFQ_ENTRIES);
  2761. ql_write_page1_reg(qdev,
  2762. &hmem_regs->rxSmallBufferLength,
  2763. QL_SMALL_BUFFER_SIZE);
  2764. qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1;
  2765. qdev->small_buf_release_cnt = 8;
  2766. qdev->lrg_buf_q_producer_index = qdev->num_lbufq_entries - 1;
  2767. qdev->lrg_buf_release_cnt = 8;
  2768. qdev->lrg_buf_next_free =
  2769. (struct bufq_addr_element *)qdev->lrg_buf_q_virt_addr;
  2770. qdev->small_buf_index = 0;
  2771. qdev->lrg_buf_index = 0;
  2772. qdev->lrg_buf_free_count = 0;
  2773. qdev->lrg_buf_free_head = NULL;
  2774. qdev->lrg_buf_free_tail = NULL;
  2775. ql_write_common_reg(qdev,
  2776. &port_regs->CommonRegs.
  2777. rxSmallQProducerIndex,
  2778. qdev->small_buf_q_producer_index);
  2779. ql_write_common_reg(qdev,
  2780. &port_regs->CommonRegs.
  2781. rxLargeQProducerIndex,
  2782. qdev->lrg_buf_q_producer_index);
  2783. /*
  2784. * Find out if the chip has already been initialized. If it has, then
  2785. * we skip some of the initialization.
  2786. */
  2787. clear_bit(QL_LINK_MASTER, &qdev->flags);
  2788. value = ql_read_page0_reg(qdev, &port_regs->portStatus);
  2789. if ((value & PORT_STATUS_IC) == 0) {
  2790. /* Chip has not been configured yet, so let it rip. */
  2791. if(ql_init_misc_registers(qdev)) {
  2792. status = -1;
  2793. goto out;
  2794. }
  2795. value = qdev->nvram_data.tcpMaxWindowSize;
  2796. ql_write_page0_reg(qdev, &port_regs->tcpMaxWindow, value);
  2797. value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig;
  2798. if(ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK,
  2799. (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
  2800. * 2) << 13)) {
  2801. status = -1;
  2802. goto out;
  2803. }
  2804. ql_write_page0_reg(qdev, &port_regs->ExternalHWConfig, value);
  2805. ql_write_page0_reg(qdev, &port_regs->InternalChipConfig,
  2806. (((INTERNAL_CHIP_SD | INTERNAL_CHIP_WE) <<
  2807. 16) | (INTERNAL_CHIP_SD |
  2808. INTERNAL_CHIP_WE)));
  2809. ql_sem_unlock(qdev, QL_FLASH_SEM_MASK);
  2810. }
  2811. if (qdev->mac_index)
  2812. ql_write_page0_reg(qdev,
  2813. &port_regs->mac1MaxFrameLengthReg,
  2814. qdev->max_frame_size);
  2815. else
  2816. ql_write_page0_reg(qdev,
  2817. &port_regs->mac0MaxFrameLengthReg,
  2818. qdev->max_frame_size);
  2819. if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
  2820. (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
  2821. 2) << 7)) {
  2822. status = -1;
  2823. goto out;
  2824. }
  2825. PHY_Setup(qdev);
  2826. ql_init_scan_mode(qdev);
  2827. ql_get_phy_owner(qdev);
  2828. /* Load the MAC Configuration */
  2829. /* Program lower 32 bits of the MAC address */
  2830. ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
  2831. (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16));
  2832. ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
  2833. ((qdev->ndev->dev_addr[2] << 24)
  2834. | (qdev->ndev->dev_addr[3] << 16)
  2835. | (qdev->ndev->dev_addr[4] << 8)
  2836. | qdev->ndev->dev_addr[5]));
  2837. /* Program top 16 bits of the MAC address */
  2838. ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
  2839. ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1));
  2840. ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
  2841. ((qdev->ndev->dev_addr[0] << 8)
  2842. | qdev->ndev->dev_addr[1]));
  2843. /* Enable Primary MAC */
  2844. ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
  2845. ((MAC_ADDR_INDIRECT_PTR_REG_PE << 16) |
  2846. MAC_ADDR_INDIRECT_PTR_REG_PE));
  2847. /* Clear Primary and Secondary IP addresses */
  2848. ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
  2849. ((IP_ADDR_INDEX_REG_MASK << 16) |
  2850. (qdev->mac_index << 2)));
  2851. ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
  2852. ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
  2853. ((IP_ADDR_INDEX_REG_MASK << 16) |
  2854. ((qdev->mac_index << 2) + 1)));
  2855. ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
  2856. ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
  2857. /* Indicate Configuration Complete */
  2858. ql_write_page0_reg(qdev,
  2859. &port_regs->portControl,
  2860. ((PORT_CONTROL_CC << 16) | PORT_CONTROL_CC));
  2861. do {
  2862. value = ql_read_page0_reg(qdev, &port_regs->portStatus);
  2863. if (value & PORT_STATUS_IC)
  2864. break;
  2865. msleep(500);
  2866. } while (--delay);
  2867. if (delay == 0) {
  2868. printk(KERN_ERR PFX
  2869. "%s: Hw Initialization timeout.\n", qdev->ndev->name);
  2870. status = -1;
  2871. goto out;
  2872. }
  2873. /* Enable Ethernet Function */
  2874. if (qdev->device_id == QL3032_DEVICE_ID) {
  2875. value =
  2876. (QL3032_PORT_CONTROL_EF | QL3032_PORT_CONTROL_KIE |
  2877. QL3032_PORT_CONTROL_EIv6 | QL3032_PORT_CONTROL_EIv4 |
  2878. QL3032_PORT_CONTROL_ET);
  2879. ql_write_page0_reg(qdev, &port_regs->functionControl,
  2880. ((value << 16) | value));
  2881. } else {
  2882. value =
  2883. (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI |
  2884. PORT_CONTROL_HH);
  2885. ql_write_page0_reg(qdev, &port_regs->portControl,
  2886. ((value << 16) | value));
  2887. }
  2888. out:
  2889. return status;
  2890. }
  2891. /*
  2892. * Caller holds hw_lock.
  2893. */
  2894. static int ql_adapter_reset(struct ql3_adapter *qdev)
  2895. {
  2896. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  2897. int status = 0;
  2898. u16 value;
  2899. int max_wait_time;
  2900. set_bit(QL_RESET_ACTIVE, &qdev->flags);
  2901. clear_bit(QL_RESET_DONE, &qdev->flags);
  2902. /*
  2903. * Issue soft reset to chip.
  2904. */
  2905. printk(KERN_DEBUG PFX
  2906. "%s: Issue soft reset to chip.\n",
  2907. qdev->ndev->name);
  2908. ql_write_common_reg(qdev,
  2909. &port_regs->CommonRegs.ispControlStatus,
  2910. ((ISP_CONTROL_SR << 16) | ISP_CONTROL_SR));
  2911. /* Wait 3 seconds for reset to complete. */
  2912. printk(KERN_DEBUG PFX
  2913. "%s: Wait 10 milliseconds for reset to complete.\n",
  2914. qdev->ndev->name);
  2915. /* Wait until the firmware tells us the Soft Reset is done */
  2916. max_wait_time = 5;
  2917. do {
  2918. value =
  2919. ql_read_common_reg(qdev,
  2920. &port_regs->CommonRegs.ispControlStatus);
  2921. if ((value & ISP_CONTROL_SR) == 0)
  2922. break;
  2923. ssleep(1);
  2924. } while ((--max_wait_time));
  2925. /*
  2926. * Also, make sure that the Network Reset Interrupt bit has been
  2927. * cleared after the soft reset has taken place.
  2928. */
  2929. value =
  2930. ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
  2931. if (value & ISP_CONTROL_RI) {
  2932. printk(KERN_DEBUG PFX
  2933. "ql_adapter_reset: clearing RI after reset.\n");
  2934. ql_write_common_reg(qdev,
  2935. &port_regs->CommonRegs.
  2936. ispControlStatus,
  2937. ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI));
  2938. }
  2939. if (max_wait_time == 0) {
  2940. /* Issue Force Soft Reset */
  2941. ql_write_common_reg(qdev,
  2942. &port_regs->CommonRegs.
  2943. ispControlStatus,
  2944. ((ISP_CONTROL_FSR << 16) |
  2945. ISP_CONTROL_FSR));
  2946. /*
  2947. * Wait until the firmware tells us the Force Soft Reset is
  2948. * done
  2949. */
  2950. max_wait_time = 5;
  2951. do {
  2952. value =
  2953. ql_read_common_reg(qdev,
  2954. &port_regs->CommonRegs.
  2955. ispControlStatus);
  2956. if ((value & ISP_CONTROL_FSR) == 0) {
  2957. break;
  2958. }
  2959. ssleep(1);
  2960. } while ((--max_wait_time));
  2961. }
  2962. if (max_wait_time == 0)
  2963. status = 1;
  2964. clear_bit(QL_RESET_ACTIVE, &qdev->flags);
  2965. set_bit(QL_RESET_DONE, &qdev->flags);
  2966. return status;
  2967. }
  2968. static void ql_set_mac_info(struct ql3_adapter *qdev)
  2969. {
  2970. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  2971. u32 value, port_status;
  2972. u8 func_number;
  2973. /* Get the function number */
  2974. value =
  2975. ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus);
  2976. func_number = (u8) ((value >> 4) & OPCODE_FUNC_ID_MASK);
  2977. port_status = ql_read_page0_reg(qdev, &port_regs->portStatus);
  2978. switch (value & ISP_CONTROL_FN_MASK) {
  2979. case ISP_CONTROL_FN0_NET:
  2980. qdev->mac_index = 0;
  2981. qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
  2982. qdev->mb_bit_mask = FN0_MA_BITS_MASK;
  2983. qdev->PHYAddr = PORT0_PHY_ADDRESS;
  2984. if (port_status & PORT_STATUS_SM0)
  2985. set_bit(QL_LINK_OPTICAL,&qdev->flags);
  2986. else
  2987. clear_bit(QL_LINK_OPTICAL,&qdev->flags);
  2988. break;
  2989. case ISP_CONTROL_FN1_NET:
  2990. qdev->mac_index = 1;
  2991. qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
  2992. qdev->mb_bit_mask = FN1_MA_BITS_MASK;
  2993. qdev->PHYAddr = PORT1_PHY_ADDRESS;
  2994. if (port_status & PORT_STATUS_SM1)
  2995. set_bit(QL_LINK_OPTICAL,&qdev->flags);
  2996. else
  2997. clear_bit(QL_LINK_OPTICAL,&qdev->flags);
  2998. break;
  2999. case ISP_CONTROL_FN0_SCSI:
  3000. case ISP_CONTROL_FN1_SCSI:
  3001. default:
  3002. printk(KERN_DEBUG PFX
  3003. "%s: Invalid function number, ispControlStatus = 0x%x\n",
  3004. qdev->ndev->name,value);
  3005. break;
  3006. }
  3007. qdev->numPorts = qdev->nvram_data.version_and_numPorts >> 8;
  3008. }
  3009. static void ql_display_dev_info(struct net_device *ndev)
  3010. {
  3011. struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
  3012. struct pci_dev *pdev = qdev->pdev;
  3013. printk(KERN_INFO PFX
  3014. "\n%s Adapter %d RevisionID %d found %s on PCI slot %d.\n",
  3015. DRV_NAME, qdev->index, qdev->chip_rev_id,
  3016. (qdev->device_id == QL3032_DEVICE_ID) ? "QLA3032" : "QLA3022",
  3017. qdev->pci_slot);
  3018. printk(KERN_INFO PFX
  3019. "%s Interface.\n",
  3020. test_bit(QL_LINK_OPTICAL,&qdev->flags) ? "OPTICAL" : "COPPER");
  3021. /*
  3022. * Print PCI bus width/type.
  3023. */
  3024. printk(KERN_INFO PFX
  3025. "Bus interface is %s %s.\n",
  3026. ((qdev->pci_width == 64) ? "64-bit" : "32-bit"),
  3027. ((qdev->pci_x) ? "PCI-X" : "PCI"));
  3028. printk(KERN_INFO PFX
  3029. "mem IO base address adjusted = 0x%p\n",
  3030. qdev->mem_map_registers);
  3031. printk(KERN_INFO PFX "Interrupt number = %d\n", pdev->irq);
  3032. if (netif_msg_probe(qdev))
  3033. printk(KERN_INFO PFX
  3034. "%s: MAC address %pM\n",
  3035. ndev->name, ndev->dev_addr);
  3036. }
  3037. static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset)
  3038. {
  3039. struct net_device *ndev = qdev->ndev;
  3040. int retval = 0;
  3041. netif_stop_queue(ndev);
  3042. netif_carrier_off(ndev);
  3043. clear_bit(QL_ADAPTER_UP,&qdev->flags);
  3044. clear_bit(QL_LINK_MASTER,&qdev->flags);
  3045. ql_disable_interrupts(qdev);
  3046. free_irq(qdev->pdev->irq, ndev);
  3047. if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) {
  3048. printk(KERN_INFO PFX
  3049. "%s: calling pci_disable_msi().\n", qdev->ndev->name);
  3050. clear_bit(QL_MSI_ENABLED,&qdev->flags);
  3051. pci_disable_msi(qdev->pdev);
  3052. }
  3053. del_timer_sync(&qdev->adapter_timer);
  3054. napi_disable(&qdev->napi);
  3055. if (do_reset) {
  3056. int soft_reset;
  3057. unsigned long hw_flags;
  3058. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  3059. if (ql_wait_for_drvr_lock(qdev)) {
  3060. if ((soft_reset = ql_adapter_reset(qdev))) {
  3061. printk(KERN_ERR PFX
  3062. "%s: ql_adapter_reset(%d) FAILED!\n",
  3063. ndev->name, qdev->index);
  3064. }
  3065. printk(KERN_ERR PFX
  3066. "%s: Releaseing driver lock via chip reset.\n",ndev->name);
  3067. } else {
  3068. printk(KERN_ERR PFX
  3069. "%s: Could not acquire driver lock to do "
  3070. "reset!\n", ndev->name);
  3071. retval = -1;
  3072. }
  3073. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  3074. }
  3075. ql_free_mem_resources(qdev);
  3076. return retval;
  3077. }
  3078. static int ql_adapter_up(struct ql3_adapter *qdev)
  3079. {
  3080. struct net_device *ndev = qdev->ndev;
  3081. int err;
  3082. unsigned long irq_flags = IRQF_SAMPLE_RANDOM | IRQF_SHARED;
  3083. unsigned long hw_flags;
  3084. if (ql_alloc_mem_resources(qdev)) {
  3085. printk(KERN_ERR PFX
  3086. "%s Unable to allocate buffers.\n", ndev->name);
  3087. return -ENOMEM;
  3088. }
  3089. if (qdev->msi) {
  3090. if (pci_enable_msi(qdev->pdev)) {
  3091. printk(KERN_ERR PFX
  3092. "%s: User requested MSI, but MSI failed to "
  3093. "initialize. Continuing without MSI.\n",
  3094. qdev->ndev->name);
  3095. qdev->msi = 0;
  3096. } else {
  3097. printk(KERN_INFO PFX "%s: MSI Enabled...\n", qdev->ndev->name);
  3098. set_bit(QL_MSI_ENABLED,&qdev->flags);
  3099. irq_flags &= ~IRQF_SHARED;
  3100. }
  3101. }
  3102. if ((err = request_irq(qdev->pdev->irq,
  3103. ql3xxx_isr,
  3104. irq_flags, ndev->name, ndev))) {
  3105. printk(KERN_ERR PFX
  3106. "%s: Failed to reserve interrupt %d already in use.\n",
  3107. ndev->name, qdev->pdev->irq);
  3108. goto err_irq;
  3109. }
  3110. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  3111. if ((err = ql_wait_for_drvr_lock(qdev))) {
  3112. if ((err = ql_adapter_initialize(qdev))) {
  3113. printk(KERN_ERR PFX
  3114. "%s: Unable to initialize adapter.\n",
  3115. ndev->name);
  3116. goto err_init;
  3117. }
  3118. printk(KERN_ERR PFX
  3119. "%s: Releaseing driver lock.\n",ndev->name);
  3120. ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
  3121. } else {
  3122. printk(KERN_ERR PFX
  3123. "%s: Could not aquire driver lock.\n",
  3124. ndev->name);
  3125. goto err_lock;
  3126. }
  3127. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  3128. set_bit(QL_ADAPTER_UP,&qdev->flags);
  3129. mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
  3130. napi_enable(&qdev->napi);
  3131. ql_enable_interrupts(qdev);
  3132. return 0;
  3133. err_init:
  3134. ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
  3135. err_lock:
  3136. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  3137. free_irq(qdev->pdev->irq, ndev);
  3138. err_irq:
  3139. if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) {
  3140. printk(KERN_INFO PFX
  3141. "%s: calling pci_disable_msi().\n",
  3142. qdev->ndev->name);
  3143. clear_bit(QL_MSI_ENABLED,&qdev->flags);
  3144. pci_disable_msi(qdev->pdev);
  3145. }
  3146. return err;
  3147. }
  3148. static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset)
  3149. {
  3150. if( ql_adapter_down(qdev,reset) || ql_adapter_up(qdev)) {
  3151. printk(KERN_ERR PFX
  3152. "%s: Driver up/down cycle failed, "
  3153. "closing device\n",qdev->ndev->name);
  3154. rtnl_lock();
  3155. dev_close(qdev->ndev);
  3156. rtnl_unlock();
  3157. return -1;
  3158. }
  3159. return 0;
  3160. }
  3161. static int ql3xxx_close(struct net_device *ndev)
  3162. {
  3163. struct ql3_adapter *qdev = netdev_priv(ndev);
  3164. /*
  3165. * Wait for device to recover from a reset.
  3166. * (Rarely happens, but possible.)
  3167. */
  3168. while (!test_bit(QL_ADAPTER_UP,&qdev->flags))
  3169. msleep(50);
  3170. ql_adapter_down(qdev,QL_DO_RESET);
  3171. return 0;
  3172. }
  3173. static int ql3xxx_open(struct net_device *ndev)
  3174. {
  3175. struct ql3_adapter *qdev = netdev_priv(ndev);
  3176. return (ql_adapter_up(qdev));
  3177. }
  3178. static int ql3xxx_set_mac_address(struct net_device *ndev, void *p)
  3179. {
  3180. struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
  3181. struct ql3xxx_port_registers __iomem *port_regs =
  3182. qdev->mem_map_registers;
  3183. struct sockaddr *addr = p;
  3184. unsigned long hw_flags;
  3185. if (netif_running(ndev))
  3186. return -EBUSY;
  3187. if (!is_valid_ether_addr(addr->sa_data))
  3188. return -EADDRNOTAVAIL;
  3189. memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
  3190. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  3191. /* Program lower 32 bits of the MAC address */
  3192. ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
  3193. (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16));
  3194. ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
  3195. ((ndev->dev_addr[2] << 24) | (ndev->
  3196. dev_addr[3] << 16) |
  3197. (ndev->dev_addr[4] << 8) | ndev->dev_addr[5]));
  3198. /* Program top 16 bits of the MAC address */
  3199. ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
  3200. ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1));
  3201. ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
  3202. ((ndev->dev_addr[0] << 8) | ndev->dev_addr[1]));
  3203. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  3204. return 0;
  3205. }
  3206. static void ql3xxx_tx_timeout(struct net_device *ndev)
  3207. {
  3208. struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
  3209. printk(KERN_ERR PFX "%s: Resetting...\n", ndev->name);
  3210. /*
  3211. * Stop the queues, we've got a problem.
  3212. */
  3213. netif_stop_queue(ndev);
  3214. /*
  3215. * Wake up the worker to process this event.
  3216. */
  3217. queue_delayed_work(qdev->workqueue, &qdev->tx_timeout_work, 0);
  3218. }
  3219. static void ql_reset_work(struct work_struct *work)
  3220. {
  3221. struct ql3_adapter *qdev =
  3222. container_of(work, struct ql3_adapter, reset_work.work);
  3223. struct net_device *ndev = qdev->ndev;
  3224. u32 value;
  3225. struct ql_tx_buf_cb *tx_cb;
  3226. int max_wait_time, i;
  3227. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  3228. unsigned long hw_flags;
  3229. if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START),&qdev->flags)) {
  3230. clear_bit(QL_LINK_MASTER,&qdev->flags);
  3231. /*
  3232. * Loop through the active list and return the skb.
  3233. */
  3234. for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
  3235. int j;
  3236. tx_cb = &qdev->tx_buf[i];
  3237. if (tx_cb->skb) {
  3238. printk(KERN_DEBUG PFX
  3239. "%s: Freeing lost SKB.\n",
  3240. qdev->ndev->name);
  3241. pci_unmap_single(qdev->pdev,
  3242. pci_unmap_addr(&tx_cb->map[0], mapaddr),
  3243. pci_unmap_len(&tx_cb->map[0], maplen),
  3244. PCI_DMA_TODEVICE);
  3245. for(j=1;j<tx_cb->seg_count;j++) {
  3246. pci_unmap_page(qdev->pdev,
  3247. pci_unmap_addr(&tx_cb->map[j],mapaddr),
  3248. pci_unmap_len(&tx_cb->map[j],maplen),
  3249. PCI_DMA_TODEVICE);
  3250. }
  3251. dev_kfree_skb(tx_cb->skb);
  3252. tx_cb->skb = NULL;
  3253. }
  3254. }
  3255. printk(KERN_ERR PFX
  3256. "%s: Clearing NRI after reset.\n", qdev->ndev->name);
  3257. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  3258. ql_write_common_reg(qdev,
  3259. &port_regs->CommonRegs.
  3260. ispControlStatus,
  3261. ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI));
  3262. /*
  3263. * Wait the for Soft Reset to Complete.
  3264. */
  3265. max_wait_time = 10;
  3266. do {
  3267. value = ql_read_common_reg(qdev,
  3268. &port_regs->CommonRegs.
  3269. ispControlStatus);
  3270. if ((value & ISP_CONTROL_SR) == 0) {
  3271. printk(KERN_DEBUG PFX
  3272. "%s: reset completed.\n",
  3273. qdev->ndev->name);
  3274. break;
  3275. }
  3276. if (value & ISP_CONTROL_RI) {
  3277. printk(KERN_DEBUG PFX
  3278. "%s: clearing NRI after reset.\n",
  3279. qdev->ndev->name);
  3280. ql_write_common_reg(qdev,
  3281. &port_regs->
  3282. CommonRegs.
  3283. ispControlStatus,
  3284. ((ISP_CONTROL_RI <<
  3285. 16) | ISP_CONTROL_RI));
  3286. }
  3287. ssleep(1);
  3288. } while (--max_wait_time);
  3289. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  3290. if (value & ISP_CONTROL_SR) {
  3291. /*
  3292. * Set the reset flags and clear the board again.
  3293. * Nothing else to do...
  3294. */
  3295. printk(KERN_ERR PFX
  3296. "%s: Timed out waiting for reset to "
  3297. "complete.\n", ndev->name);
  3298. printk(KERN_ERR PFX
  3299. "%s: Do a reset.\n", ndev->name);
  3300. clear_bit(QL_RESET_PER_SCSI,&qdev->flags);
  3301. clear_bit(QL_RESET_START,&qdev->flags);
  3302. ql_cycle_adapter(qdev,QL_DO_RESET);
  3303. return;
  3304. }
  3305. clear_bit(QL_RESET_ACTIVE,&qdev->flags);
  3306. clear_bit(QL_RESET_PER_SCSI,&qdev->flags);
  3307. clear_bit(QL_RESET_START,&qdev->flags);
  3308. ql_cycle_adapter(qdev,QL_NO_RESET);
  3309. }
  3310. }
  3311. static void ql_tx_timeout_work(struct work_struct *work)
  3312. {
  3313. struct ql3_adapter *qdev =
  3314. container_of(work, struct ql3_adapter, tx_timeout_work.work);
  3315. ql_cycle_adapter(qdev, QL_DO_RESET);
  3316. }
  3317. static void ql_get_board_info(struct ql3_adapter *qdev)
  3318. {
  3319. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  3320. u32 value;
  3321. value = ql_read_page0_reg_l(qdev, &port_regs->portStatus);
  3322. qdev->chip_rev_id = ((value & PORT_STATUS_REV_ID_MASK) >> 12);
  3323. if (value & PORT_STATUS_64)
  3324. qdev->pci_width = 64;
  3325. else
  3326. qdev->pci_width = 32;
  3327. if (value & PORT_STATUS_X)
  3328. qdev->pci_x = 1;
  3329. else
  3330. qdev->pci_x = 0;
  3331. qdev->pci_slot = (u8) PCI_SLOT(qdev->pdev->devfn);
  3332. }
  3333. static void ql3xxx_timer(unsigned long ptr)
  3334. {
  3335. struct ql3_adapter *qdev = (struct ql3_adapter *)ptr;
  3336. queue_delayed_work(qdev->workqueue, &qdev->link_state_work, 0);
  3337. }
  3338. static const struct net_device_ops ql3xxx_netdev_ops = {
  3339. .ndo_open = ql3xxx_open,
  3340. .ndo_start_xmit = ql3xxx_send,
  3341. .ndo_stop = ql3xxx_close,
  3342. .ndo_set_multicast_list = NULL, /* not allowed on NIC side */
  3343. .ndo_change_mtu = eth_change_mtu,
  3344. .ndo_validate_addr = eth_validate_addr,
  3345. .ndo_set_mac_address = ql3xxx_set_mac_address,
  3346. .ndo_tx_timeout = ql3xxx_tx_timeout,
  3347. };
  3348. static int __devinit ql3xxx_probe(struct pci_dev *pdev,
  3349. const struct pci_device_id *pci_entry)
  3350. {
  3351. struct net_device *ndev = NULL;
  3352. struct ql3_adapter *qdev = NULL;
  3353. static int cards_found = 0;
  3354. int uninitialized_var(pci_using_dac), err;
  3355. err = pci_enable_device(pdev);
  3356. if (err) {
  3357. printk(KERN_ERR PFX "%s cannot enable PCI device\n",
  3358. pci_name(pdev));
  3359. goto err_out;
  3360. }
  3361. err = pci_request_regions(pdev, DRV_NAME);
  3362. if (err) {
  3363. printk(KERN_ERR PFX "%s cannot obtain PCI resources\n",
  3364. pci_name(pdev));
  3365. goto err_out_disable_pdev;
  3366. }
  3367. pci_set_master(pdev);
  3368. if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
  3369. pci_using_dac = 1;
  3370. err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
  3371. } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
  3372. pci_using_dac = 0;
  3373. err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
  3374. }
  3375. if (err) {
  3376. printk(KERN_ERR PFX "%s no usable DMA configuration\n",
  3377. pci_name(pdev));
  3378. goto err_out_free_regions;
  3379. }
  3380. ndev = alloc_etherdev(sizeof(struct ql3_adapter));
  3381. if (!ndev) {
  3382. printk(KERN_ERR PFX "%s could not alloc etherdev\n",
  3383. pci_name(pdev));
  3384. err = -ENOMEM;
  3385. goto err_out_free_regions;
  3386. }
  3387. SET_NETDEV_DEV(ndev, &pdev->dev);
  3388. pci_set_drvdata(pdev, ndev);
  3389. qdev = netdev_priv(ndev);
  3390. qdev->index = cards_found;
  3391. qdev->ndev = ndev;
  3392. qdev->pdev = pdev;
  3393. qdev->device_id = pci_entry->device;
  3394. qdev->port_link_state = LS_DOWN;
  3395. if (msi)
  3396. qdev->msi = 1;
  3397. qdev->msg_enable = netif_msg_init(debug, default_msg);
  3398. if (pci_using_dac)
  3399. ndev->features |= NETIF_F_HIGHDMA;
  3400. if (qdev->device_id == QL3032_DEVICE_ID)
  3401. ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
  3402. qdev->mem_map_registers = pci_ioremap_bar(pdev, 1);
  3403. if (!qdev->mem_map_registers) {
  3404. printk(KERN_ERR PFX "%s: cannot map device registers\n",
  3405. pci_name(pdev));
  3406. err = -EIO;
  3407. goto err_out_free_ndev;
  3408. }
  3409. spin_lock_init(&qdev->adapter_lock);
  3410. spin_lock_init(&qdev->hw_lock);
  3411. /* Set driver entry points */
  3412. ndev->netdev_ops = &ql3xxx_netdev_ops;
  3413. SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops);
  3414. ndev->watchdog_timeo = 5 * HZ;
  3415. netif_napi_add(ndev, &qdev->napi, ql_poll, 64);
  3416. ndev->irq = pdev->irq;
  3417. /* make sure the EEPROM is good */
  3418. if (ql_get_nvram_params(qdev)) {
  3419. printk(KERN_ALERT PFX
  3420. "ql3xxx_probe: Adapter #%d, Invalid NVRAM parameters.\n",
  3421. qdev->index);
  3422. err = -EIO;
  3423. goto err_out_iounmap;
  3424. }
  3425. ql_set_mac_info(qdev);
  3426. /* Validate and set parameters */
  3427. if (qdev->mac_index) {
  3428. ndev->mtu = qdev->nvram_data.macCfg_port1.etherMtu_mac ;
  3429. ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn2.macAddress);
  3430. } else {
  3431. ndev->mtu = qdev->nvram_data.macCfg_port0.etherMtu_mac ;
  3432. ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn0.macAddress);
  3433. }
  3434. memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
  3435. ndev->tx_queue_len = NUM_REQ_Q_ENTRIES;
  3436. /* Record PCI bus information. */
  3437. ql_get_board_info(qdev);
  3438. /*
  3439. * Set the Maximum Memory Read Byte Count value. We do this to handle
  3440. * jumbo frames.
  3441. */
  3442. if (qdev->pci_x) {
  3443. pci_write_config_word(pdev, (int)0x4e, (u16) 0x0036);
  3444. }
  3445. err = register_netdev(ndev);
  3446. if (err) {
  3447. printk(KERN_ERR PFX "%s: cannot register net device\n",
  3448. pci_name(pdev));
  3449. goto err_out_iounmap;
  3450. }
  3451. /* we're going to reset, so assume we have no link for now */
  3452. netif_carrier_off(ndev);
  3453. netif_stop_queue(ndev);
  3454. qdev->workqueue = create_singlethread_workqueue(ndev->name);
  3455. INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work);
  3456. INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work);
  3457. INIT_DELAYED_WORK(&qdev->link_state_work, ql_link_state_machine_work);
  3458. init_timer(&qdev->adapter_timer);
  3459. qdev->adapter_timer.function = ql3xxx_timer;
  3460. qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */
  3461. qdev->adapter_timer.data = (unsigned long)qdev;
  3462. if(!cards_found) {
  3463. printk(KERN_ALERT PFX "%s\n", DRV_STRING);
  3464. printk(KERN_ALERT PFX "Driver name: %s, Version: %s.\n",
  3465. DRV_NAME, DRV_VERSION);
  3466. }
  3467. ql_display_dev_info(ndev);
  3468. cards_found++;
  3469. return 0;
  3470. err_out_iounmap:
  3471. iounmap(qdev->mem_map_registers);
  3472. err_out_free_ndev:
  3473. free_netdev(ndev);
  3474. err_out_free_regions:
  3475. pci_release_regions(pdev);
  3476. err_out_disable_pdev:
  3477. pci_disable_device(pdev);
  3478. pci_set_drvdata(pdev, NULL);
  3479. err_out:
  3480. return err;
  3481. }
  3482. static void __devexit ql3xxx_remove(struct pci_dev *pdev)
  3483. {
  3484. struct net_device *ndev = pci_get_drvdata(pdev);
  3485. struct ql3_adapter *qdev = netdev_priv(ndev);
  3486. unregister_netdev(ndev);
  3487. qdev = netdev_priv(ndev);
  3488. ql_disable_interrupts(qdev);
  3489. if (qdev->workqueue) {
  3490. cancel_delayed_work(&qdev->reset_work);
  3491. cancel_delayed_work(&qdev->tx_timeout_work);
  3492. destroy_workqueue(qdev->workqueue);
  3493. qdev->workqueue = NULL;
  3494. }
  3495. iounmap(qdev->mem_map_registers);
  3496. pci_release_regions(pdev);
  3497. pci_set_drvdata(pdev, NULL);
  3498. free_netdev(ndev);
  3499. }
  3500. static struct pci_driver ql3xxx_driver = {
  3501. .name = DRV_NAME,
  3502. .id_table = ql3xxx_pci_tbl,
  3503. .probe = ql3xxx_probe,
  3504. .remove = __devexit_p(ql3xxx_remove),
  3505. };
  3506. static int __init ql3xxx_init_module(void)
  3507. {
  3508. return pci_register_driver(&ql3xxx_driver);
  3509. }
  3510. static void __exit ql3xxx_exit(void)
  3511. {
  3512. pci_unregister_driver(&ql3xxx_driver);
  3513. }
  3514. module_init(ql3xxx_init_module);
  3515. module_exit(ql3xxx_exit);