qla3xxx.c 102 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940
  1. /*
  2. * QLogic QLA3xxx NIC HBA Driver
  3. * Copyright (c) 2003-2006 QLogic Corporation
  4. *
  5. * See LICENSE.qla3xxx for copyright and licensing details.
  6. */
  7. #include <linux/kernel.h>
  8. #include <linux/init.h>
  9. #include <linux/types.h>
  10. #include <linux/module.h>
  11. #include <linux/list.h>
  12. #include <linux/pci.h>
  13. #include <linux/dma-mapping.h>
  14. #include <linux/sched.h>
  15. #include <linux/slab.h>
  16. #include <linux/dmapool.h>
  17. #include <linux/mempool.h>
  18. #include <linux/spinlock.h>
  19. #include <linux/kthread.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/errno.h>
  22. #include <linux/ioport.h>
  23. #include <linux/ip.h>
  24. #include <linux/in.h>
  25. #include <linux/if_arp.h>
  26. #include <linux/if_ether.h>
  27. #include <linux/netdevice.h>
  28. #include <linux/etherdevice.h>
  29. #include <linux/ethtool.h>
  30. #include <linux/skbuff.h>
  31. #include <linux/rtnetlink.h>
  32. #include <linux/if_vlan.h>
  33. #include <linux/init.h>
  34. #include <linux/delay.h>
  35. #include <linux/mm.h>
  36. #include "qla3xxx.h"
  37. #define DRV_NAME "qla3xxx"
  38. #define DRV_STRING "QLogic ISP3XXX Network Driver"
  39. #define DRV_VERSION "v2.03.00-k3"
  40. #define PFX DRV_NAME " "
  41. static const char ql3xxx_driver_name[] = DRV_NAME;
  42. static const char ql3xxx_driver_version[] = DRV_VERSION;
  43. MODULE_AUTHOR("QLogic Corporation");
  44. MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION " ");
  45. MODULE_LICENSE("GPL");
  46. MODULE_VERSION(DRV_VERSION);
  47. static const u32 default_msg
  48. = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
  49. | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
  50. static int debug = -1; /* defaults above */
  51. module_param(debug, int, 0);
  52. MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
  53. static int msi;
  54. module_param(msi, int, 0);
  55. MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts.");
  56. static struct pci_device_id ql3xxx_pci_tbl[] __devinitdata = {
  57. {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)},
  58. {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID)},
  59. /* required last entry */
  60. {0,}
  61. };
  62. MODULE_DEVICE_TABLE(pci, ql3xxx_pci_tbl);
  63. /*
  64. * Caller must take hw_lock.
  65. */
  66. static int ql_sem_spinlock(struct ql3_adapter *qdev,
  67. u32 sem_mask, u32 sem_bits)
  68. {
  69. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  70. u32 value;
  71. unsigned int seconds = 3;
  72. do {
  73. writel((sem_mask | sem_bits),
  74. &port_regs->CommonRegs.semaphoreReg);
  75. value = readl(&port_regs->CommonRegs.semaphoreReg);
  76. if ((value & (sem_mask >> 16)) == sem_bits)
  77. return 0;
  78. ssleep(1);
  79. } while(--seconds);
  80. return -1;
  81. }
  82. static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask)
  83. {
  84. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  85. writel(sem_mask, &port_regs->CommonRegs.semaphoreReg);
  86. readl(&port_regs->CommonRegs.semaphoreReg);
  87. }
  88. static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits)
  89. {
  90. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  91. u32 value;
  92. writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg);
  93. value = readl(&port_regs->CommonRegs.semaphoreReg);
  94. return ((value & (sem_mask >> 16)) == sem_bits);
  95. }
  96. /*
  97. * Caller holds hw_lock.
  98. */
  99. static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
  100. {
  101. int i = 0;
  102. while (1) {
  103. if (!ql_sem_lock(qdev,
  104. QL_DRVR_SEM_MASK,
  105. (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
  106. * 2) << 1)) {
  107. if (i < 10) {
  108. ssleep(1);
  109. i++;
  110. } else {
  111. printk(KERN_ERR PFX "%s: Timed out waiting for "
  112. "driver lock...\n",
  113. qdev->ndev->name);
  114. return 0;
  115. }
  116. } else {
  117. printk(KERN_DEBUG PFX
  118. "%s: driver lock acquired.\n",
  119. qdev->ndev->name);
  120. return 1;
  121. }
  122. }
  123. }
  124. static void ql_set_register_page(struct ql3_adapter *qdev, u32 page)
  125. {
  126. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  127. writel(((ISP_CONTROL_NP_MASK << 16) | page),
  128. &port_regs->CommonRegs.ispControlStatus);
  129. readl(&port_regs->CommonRegs.ispControlStatus);
  130. qdev->current_page = page;
  131. }
  132. static u32 ql_read_common_reg_l(struct ql3_adapter *qdev,
  133. u32 __iomem * reg)
  134. {
  135. u32 value;
  136. unsigned long hw_flags;
  137. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  138. value = readl(reg);
  139. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  140. return value;
  141. }
  142. static u32 ql_read_common_reg(struct ql3_adapter *qdev,
  143. u32 __iomem * reg)
  144. {
  145. return readl(reg);
  146. }
  147. static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg)
  148. {
  149. u32 value;
  150. unsigned long hw_flags;
  151. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  152. if (qdev->current_page != 0)
  153. ql_set_register_page(qdev,0);
  154. value = readl(reg);
  155. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  156. return value;
  157. }
  158. static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg)
  159. {
  160. if (qdev->current_page != 0)
  161. ql_set_register_page(qdev,0);
  162. return readl(reg);
  163. }
  164. static void ql_write_common_reg_l(struct ql3_adapter *qdev,
  165. u32 __iomem *reg, u32 value)
  166. {
  167. unsigned long hw_flags;
  168. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  169. writel(value, reg);
  170. readl(reg);
  171. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  172. return;
  173. }
  174. static void ql_write_common_reg(struct ql3_adapter *qdev,
  175. u32 __iomem *reg, u32 value)
  176. {
  177. writel(value, reg);
  178. readl(reg);
  179. return;
  180. }
  181. static void ql_write_nvram_reg(struct ql3_adapter *qdev,
  182. u32 __iomem *reg, u32 value)
  183. {
  184. writel(value, reg);
  185. readl(reg);
  186. udelay(1);
  187. return;
  188. }
  189. static void ql_write_page0_reg(struct ql3_adapter *qdev,
  190. u32 __iomem *reg, u32 value)
  191. {
  192. if (qdev->current_page != 0)
  193. ql_set_register_page(qdev,0);
  194. writel(value, reg);
  195. readl(reg);
  196. return;
  197. }
  198. /*
  199. * Caller holds hw_lock. Only called during init.
  200. */
  201. static void ql_write_page1_reg(struct ql3_adapter *qdev,
  202. u32 __iomem *reg, u32 value)
  203. {
  204. if (qdev->current_page != 1)
  205. ql_set_register_page(qdev,1);
  206. writel(value, reg);
  207. readl(reg);
  208. return;
  209. }
  210. /*
  211. * Caller holds hw_lock. Only called during init.
  212. */
  213. static void ql_write_page2_reg(struct ql3_adapter *qdev,
  214. u32 __iomem *reg, u32 value)
  215. {
  216. if (qdev->current_page != 2)
  217. ql_set_register_page(qdev,2);
  218. writel(value, reg);
  219. readl(reg);
  220. return;
  221. }
  222. static void ql_disable_interrupts(struct ql3_adapter *qdev)
  223. {
  224. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  225. ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
  226. (ISP_IMR_ENABLE_INT << 16));
  227. }
  228. static void ql_enable_interrupts(struct ql3_adapter *qdev)
  229. {
  230. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  231. ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
  232. ((0xff << 16) | ISP_IMR_ENABLE_INT));
  233. }
  234. static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
  235. struct ql_rcv_buf_cb *lrg_buf_cb)
  236. {
  237. dma_addr_t map;
  238. int err;
  239. lrg_buf_cb->next = NULL;
  240. if (qdev->lrg_buf_free_tail == NULL) { /* The list is empty */
  241. qdev->lrg_buf_free_head = qdev->lrg_buf_free_tail = lrg_buf_cb;
  242. } else {
  243. qdev->lrg_buf_free_tail->next = lrg_buf_cb;
  244. qdev->lrg_buf_free_tail = lrg_buf_cb;
  245. }
  246. if (!lrg_buf_cb->skb) {
  247. lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev,
  248. qdev->lrg_buffer_len);
  249. if (unlikely(!lrg_buf_cb->skb)) {
  250. printk(KERN_ERR PFX "%s: failed netdev_alloc_skb().\n",
  251. qdev->ndev->name);
  252. qdev->lrg_buf_skb_check++;
  253. } else {
  254. /*
  255. * We save some space to copy the ethhdr from first
  256. * buffer
  257. */
  258. skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
  259. map = pci_map_single(qdev->pdev,
  260. lrg_buf_cb->skb->data,
  261. qdev->lrg_buffer_len -
  262. QL_HEADER_SPACE,
  263. PCI_DMA_FROMDEVICE);
  264. err = pci_dma_mapping_error(map);
  265. if(err) {
  266. printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
  267. qdev->ndev->name, err);
  268. dev_kfree_skb(lrg_buf_cb->skb);
  269. lrg_buf_cb->skb = NULL;
  270. qdev->lrg_buf_skb_check++;
  271. return;
  272. }
  273. lrg_buf_cb->buf_phy_addr_low =
  274. cpu_to_le32(LS_64BITS(map));
  275. lrg_buf_cb->buf_phy_addr_high =
  276. cpu_to_le32(MS_64BITS(map));
  277. pci_unmap_addr_set(lrg_buf_cb, mapaddr, map);
  278. pci_unmap_len_set(lrg_buf_cb, maplen,
  279. qdev->lrg_buffer_len -
  280. QL_HEADER_SPACE);
  281. }
  282. }
  283. qdev->lrg_buf_free_count++;
  284. }
  285. static struct ql_rcv_buf_cb *ql_get_from_lrg_buf_free_list(struct ql3_adapter
  286. *qdev)
  287. {
  288. struct ql_rcv_buf_cb *lrg_buf_cb;
  289. if ((lrg_buf_cb = qdev->lrg_buf_free_head) != NULL) {
  290. if ((qdev->lrg_buf_free_head = lrg_buf_cb->next) == NULL)
  291. qdev->lrg_buf_free_tail = NULL;
  292. qdev->lrg_buf_free_count--;
  293. }
  294. return lrg_buf_cb;
  295. }
  296. static u32 addrBits = EEPROM_NO_ADDR_BITS;
  297. static u32 dataBits = EEPROM_NO_DATA_BITS;
  298. static void fm93c56a_deselect(struct ql3_adapter *qdev);
  299. static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr,
  300. unsigned short *value);
  301. /*
  302. * Caller holds hw_lock.
  303. */
  304. static void fm93c56a_select(struct ql3_adapter *qdev)
  305. {
  306. struct ql3xxx_port_registers __iomem *port_regs =
  307. qdev->mem_map_registers;
  308. qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1;
  309. ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
  310. ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
  311. ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
  312. ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data));
  313. }
  314. /*
  315. * Caller holds hw_lock.
  316. */
  317. static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr)
  318. {
  319. int i;
  320. u32 mask;
  321. u32 dataBit;
  322. u32 previousBit;
  323. struct ql3xxx_port_registers __iomem *port_regs =
  324. qdev->mem_map_registers;
  325. /* Clock in a zero, then do the start bit */
  326. ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
  327. ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
  328. AUBURN_EEPROM_DO_1);
  329. ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
  330. ISP_NVRAM_MASK | qdev->
  331. eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
  332. AUBURN_EEPROM_CLK_RISE);
  333. ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
  334. ISP_NVRAM_MASK | qdev->
  335. eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
  336. AUBURN_EEPROM_CLK_FALL);
  337. mask = 1 << (FM93C56A_CMD_BITS - 1);
  338. /* Force the previous data bit to be different */
  339. previousBit = 0xffff;
  340. for (i = 0; i < FM93C56A_CMD_BITS; i++) {
  341. dataBit =
  342. (cmd & mask) ? AUBURN_EEPROM_DO_1 : AUBURN_EEPROM_DO_0;
  343. if (previousBit != dataBit) {
  344. /*
  345. * If the bit changed, then change the DO state to
  346. * match
  347. */
  348. ql_write_nvram_reg(qdev,
  349. &port_regs->CommonRegs.
  350. serialPortInterfaceReg,
  351. ISP_NVRAM_MASK | qdev->
  352. eeprom_cmd_data | dataBit);
  353. previousBit = dataBit;
  354. }
  355. ql_write_nvram_reg(qdev,
  356. &port_regs->CommonRegs.
  357. serialPortInterfaceReg,
  358. ISP_NVRAM_MASK | qdev->
  359. eeprom_cmd_data | dataBit |
  360. AUBURN_EEPROM_CLK_RISE);
  361. ql_write_nvram_reg(qdev,
  362. &port_regs->CommonRegs.
  363. serialPortInterfaceReg,
  364. ISP_NVRAM_MASK | qdev->
  365. eeprom_cmd_data | dataBit |
  366. AUBURN_EEPROM_CLK_FALL);
  367. cmd = cmd << 1;
  368. }
  369. mask = 1 << (addrBits - 1);
  370. /* Force the previous data bit to be different */
  371. previousBit = 0xffff;
  372. for (i = 0; i < addrBits; i++) {
  373. dataBit =
  374. (eepromAddr & mask) ? AUBURN_EEPROM_DO_1 :
  375. AUBURN_EEPROM_DO_0;
  376. if (previousBit != dataBit) {
  377. /*
  378. * If the bit changed, then change the DO state to
  379. * match
  380. */
  381. ql_write_nvram_reg(qdev,
  382. &port_regs->CommonRegs.
  383. serialPortInterfaceReg,
  384. ISP_NVRAM_MASK | qdev->
  385. eeprom_cmd_data | dataBit);
  386. previousBit = dataBit;
  387. }
  388. ql_write_nvram_reg(qdev,
  389. &port_regs->CommonRegs.
  390. serialPortInterfaceReg,
  391. ISP_NVRAM_MASK | qdev->
  392. eeprom_cmd_data | dataBit |
  393. AUBURN_EEPROM_CLK_RISE);
  394. ql_write_nvram_reg(qdev,
  395. &port_regs->CommonRegs.
  396. serialPortInterfaceReg,
  397. ISP_NVRAM_MASK | qdev->
  398. eeprom_cmd_data | dataBit |
  399. AUBURN_EEPROM_CLK_FALL);
  400. eepromAddr = eepromAddr << 1;
  401. }
  402. }
  403. /*
  404. * Caller holds hw_lock.
  405. */
  406. static void fm93c56a_deselect(struct ql3_adapter *qdev)
  407. {
  408. struct ql3xxx_port_registers __iomem *port_regs =
  409. qdev->mem_map_registers;
  410. qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0;
  411. ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
  412. ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
  413. }
  414. /*
  415. * Caller holds hw_lock.
  416. */
  417. static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value)
  418. {
  419. int i;
  420. u32 data = 0;
  421. u32 dataBit;
  422. struct ql3xxx_port_registers __iomem *port_regs =
  423. qdev->mem_map_registers;
  424. /* Read the data bits */
  425. /* The first bit is a dummy. Clock right over it. */
  426. for (i = 0; i < dataBits; i++) {
  427. ql_write_nvram_reg(qdev,
  428. &port_regs->CommonRegs.
  429. serialPortInterfaceReg,
  430. ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
  431. AUBURN_EEPROM_CLK_RISE);
  432. ql_write_nvram_reg(qdev,
  433. &port_regs->CommonRegs.
  434. serialPortInterfaceReg,
  435. ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
  436. AUBURN_EEPROM_CLK_FALL);
  437. dataBit =
  438. (ql_read_common_reg
  439. (qdev,
  440. &port_regs->CommonRegs.
  441. serialPortInterfaceReg) & AUBURN_EEPROM_DI_1) ? 1 : 0;
  442. data = (data << 1) | dataBit;
  443. }
  444. *value = (u16) data;
  445. }
  446. /*
  447. * Caller holds hw_lock.
  448. */
  449. static void eeprom_readword(struct ql3_adapter *qdev,
  450. u32 eepromAddr, unsigned short *value)
  451. {
  452. fm93c56a_select(qdev);
  453. fm93c56a_cmd(qdev, (int)FM93C56A_READ, eepromAddr);
  454. fm93c56a_datain(qdev, value);
  455. fm93c56a_deselect(qdev);
  456. }
  457. static void ql_swap_mac_addr(u8 * macAddress)
  458. {
  459. #ifdef __BIG_ENDIAN
  460. u8 temp;
  461. temp = macAddress[0];
  462. macAddress[0] = macAddress[1];
  463. macAddress[1] = temp;
  464. temp = macAddress[2];
  465. macAddress[2] = macAddress[3];
  466. macAddress[3] = temp;
  467. temp = macAddress[4];
  468. macAddress[4] = macAddress[5];
  469. macAddress[5] = temp;
  470. #endif
  471. }
  472. static int ql_get_nvram_params(struct ql3_adapter *qdev)
  473. {
  474. u16 *pEEPROMData;
  475. u16 checksum = 0;
  476. u32 index;
  477. unsigned long hw_flags;
  478. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  479. pEEPROMData = (u16 *) & qdev->nvram_data;
  480. qdev->eeprom_cmd_data = 0;
  481. if(ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK,
  482. (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
  483. 2) << 10)) {
  484. printk(KERN_ERR PFX"%s: Failed ql_sem_spinlock().\n",
  485. __func__);
  486. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  487. return -1;
  488. }
  489. for (index = 0; index < EEPROM_SIZE; index++) {
  490. eeprom_readword(qdev, index, pEEPROMData);
  491. checksum += *pEEPROMData;
  492. pEEPROMData++;
  493. }
  494. ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK);
  495. if (checksum != 0) {
  496. printk(KERN_ERR PFX "%s: checksum should be zero, is %x!!\n",
  497. qdev->ndev->name, checksum);
  498. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  499. return -1;
  500. }
  501. /*
  502. * We have a problem with endianness for the MAC addresses
  503. * and the two 8-bit values version, and numPorts. We
  504. * have to swap them on big endian systems.
  505. */
  506. ql_swap_mac_addr(qdev->nvram_data.funcCfg_fn0.macAddress);
  507. ql_swap_mac_addr(qdev->nvram_data.funcCfg_fn1.macAddress);
  508. ql_swap_mac_addr(qdev->nvram_data.funcCfg_fn2.macAddress);
  509. ql_swap_mac_addr(qdev->nvram_data.funcCfg_fn3.macAddress);
  510. pEEPROMData = (u16 *) & qdev->nvram_data.version;
  511. *pEEPROMData = le16_to_cpu(*pEEPROMData);
  512. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  513. return checksum;
  514. }
  515. static const u32 PHYAddr[2] = {
  516. PORT0_PHY_ADDRESS, PORT1_PHY_ADDRESS
  517. };
  518. static int ql_wait_for_mii_ready(struct ql3_adapter *qdev)
  519. {
  520. struct ql3xxx_port_registers __iomem *port_regs =
  521. qdev->mem_map_registers;
  522. u32 temp;
  523. int count = 1000;
  524. while (count) {
  525. temp = ql_read_page0_reg(qdev, &port_regs->macMIIStatusReg);
  526. if (!(temp & MAC_MII_STATUS_BSY))
  527. return 0;
  528. udelay(10);
  529. count--;
  530. }
  531. return -1;
  532. }
  533. static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev)
  534. {
  535. struct ql3xxx_port_registers __iomem *port_regs =
  536. qdev->mem_map_registers;
  537. u32 scanControl;
  538. if (qdev->numPorts > 1) {
  539. /* Auto scan will cycle through multiple ports */
  540. scanControl = MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC;
  541. } else {
  542. scanControl = MAC_MII_CONTROL_SC;
  543. }
  544. /*
  545. * Scan register 1 of PHY/PETBI,
  546. * Set up to scan both devices
  547. * The autoscan starts from the first register, completes
  548. * the last one before rolling over to the first
  549. */
  550. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
  551. PHYAddr[0] | MII_SCAN_REGISTER);
  552. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
  553. (scanControl) |
  554. ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS) << 16));
  555. }
  556. static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev)
  557. {
  558. u8 ret;
  559. struct ql3xxx_port_registers __iomem *port_regs =
  560. qdev->mem_map_registers;
  561. /* See if scan mode is enabled before we turn it off */
  562. if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) &
  563. (MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC)) {
  564. /* Scan is enabled */
  565. ret = 1;
  566. } else {
  567. /* Scan is disabled */
  568. ret = 0;
  569. }
  570. /*
  571. * When disabling scan mode you must first change the MII register
  572. * address
  573. */
  574. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
  575. PHYAddr[0] | MII_SCAN_REGISTER);
  576. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
  577. ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS |
  578. MAC_MII_CONTROL_RC) << 16));
  579. return ret;
  580. }
  581. static int ql_mii_write_reg_ex(struct ql3_adapter *qdev,
  582. u16 regAddr, u16 value, u32 mac_index)
  583. {
  584. struct ql3xxx_port_registers __iomem *port_regs =
  585. qdev->mem_map_registers;
  586. u8 scanWasEnabled;
  587. scanWasEnabled = ql_mii_disable_scan_mode(qdev);
  588. if (ql_wait_for_mii_ready(qdev)) {
  589. if (netif_msg_link(qdev))
  590. printk(KERN_WARNING PFX
  591. "%s Timed out waiting for management port to "
  592. "get free before issuing command.\n",
  593. qdev->ndev->name);
  594. return -1;
  595. }
  596. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
  597. PHYAddr[mac_index] | regAddr);
  598. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
  599. /* Wait for write to complete 9/10/04 SJP */
  600. if (ql_wait_for_mii_ready(qdev)) {
  601. if (netif_msg_link(qdev))
  602. printk(KERN_WARNING PFX
  603. "%s: Timed out waiting for management port to"
  604. "get free before issuing command.\n",
  605. qdev->ndev->name);
  606. return -1;
  607. }
  608. if (scanWasEnabled)
  609. ql_mii_enable_scan_mode(qdev);
  610. return 0;
  611. }
  612. static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr,
  613. u16 * value, u32 mac_index)
  614. {
  615. struct ql3xxx_port_registers __iomem *port_regs =
  616. qdev->mem_map_registers;
  617. u8 scanWasEnabled;
  618. u32 temp;
  619. scanWasEnabled = ql_mii_disable_scan_mode(qdev);
  620. if (ql_wait_for_mii_ready(qdev)) {
  621. if (netif_msg_link(qdev))
  622. printk(KERN_WARNING PFX
  623. "%s: Timed out waiting for management port to "
  624. "get free before issuing command.\n",
  625. qdev->ndev->name);
  626. return -1;
  627. }
  628. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
  629. PHYAddr[mac_index] | regAddr);
  630. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
  631. (MAC_MII_CONTROL_RC << 16));
  632. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
  633. (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC);
  634. /* Wait for the read to complete */
  635. if (ql_wait_for_mii_ready(qdev)) {
  636. if (netif_msg_link(qdev))
  637. printk(KERN_WARNING PFX
  638. "%s: Timed out waiting for management port to "
  639. "get free after issuing command.\n",
  640. qdev->ndev->name);
  641. return -1;
  642. }
  643. temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
  644. *value = (u16) temp;
  645. if (scanWasEnabled)
  646. ql_mii_enable_scan_mode(qdev);
  647. return 0;
  648. }
  649. static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value)
  650. {
  651. struct ql3xxx_port_registers __iomem *port_regs =
  652. qdev->mem_map_registers;
  653. ql_mii_disable_scan_mode(qdev);
  654. if (ql_wait_for_mii_ready(qdev)) {
  655. if (netif_msg_link(qdev))
  656. printk(KERN_WARNING PFX
  657. "%s: Timed out waiting for management port to "
  658. "get free before issuing command.\n",
  659. qdev->ndev->name);
  660. return -1;
  661. }
  662. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
  663. qdev->PHYAddr | regAddr);
  664. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
  665. /* Wait for write to complete. */
  666. if (ql_wait_for_mii_ready(qdev)) {
  667. if (netif_msg_link(qdev))
  668. printk(KERN_WARNING PFX
  669. "%s: Timed out waiting for management port to "
  670. "get free before issuing command.\n",
  671. qdev->ndev->name);
  672. return -1;
  673. }
  674. ql_mii_enable_scan_mode(qdev);
  675. return 0;
  676. }
  677. static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value)
  678. {
  679. u32 temp;
  680. struct ql3xxx_port_registers __iomem *port_regs =
  681. qdev->mem_map_registers;
  682. ql_mii_disable_scan_mode(qdev);
  683. if (ql_wait_for_mii_ready(qdev)) {
  684. if (netif_msg_link(qdev))
  685. printk(KERN_WARNING PFX
  686. "%s: Timed out waiting for management port to "
  687. "get free before issuing command.\n",
  688. qdev->ndev->name);
  689. return -1;
  690. }
  691. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
  692. qdev->PHYAddr | regAddr);
  693. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
  694. (MAC_MII_CONTROL_RC << 16));
  695. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
  696. (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC);
  697. /* Wait for the read to complete */
  698. if (ql_wait_for_mii_ready(qdev)) {
  699. if (netif_msg_link(qdev))
  700. printk(KERN_WARNING PFX
  701. "%s: Timed out waiting for management port to "
  702. "get free before issuing command.\n",
  703. qdev->ndev->name);
  704. return -1;
  705. }
  706. temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
  707. *value = (u16) temp;
  708. ql_mii_enable_scan_mode(qdev);
  709. return 0;
  710. }
  711. static void ql_petbi_reset(struct ql3_adapter *qdev)
  712. {
  713. ql_mii_write_reg(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET);
  714. }
  715. static void ql_petbi_start_neg(struct ql3_adapter *qdev)
  716. {
  717. u16 reg;
  718. /* Enable Auto-negotiation sense */
  719. ql_mii_read_reg(qdev, PETBI_TBI_CTRL, &reg);
  720. reg |= PETBI_TBI_AUTO_SENSE;
  721. ql_mii_write_reg(qdev, PETBI_TBI_CTRL, reg);
  722. ql_mii_write_reg(qdev, PETBI_NEG_ADVER,
  723. PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX);
  724. ql_mii_write_reg(qdev, PETBI_CONTROL_REG,
  725. PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG |
  726. PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000);
  727. }
  728. static void ql_petbi_reset_ex(struct ql3_adapter *qdev, u32 mac_index)
  729. {
  730. ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET,
  731. mac_index);
  732. }
  733. static void ql_petbi_start_neg_ex(struct ql3_adapter *qdev, u32 mac_index)
  734. {
  735. u16 reg;
  736. /* Enable Auto-negotiation sense */
  737. ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, &reg, mac_index);
  738. reg |= PETBI_TBI_AUTO_SENSE;
  739. ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg, mac_index);
  740. ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER,
  741. PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX, mac_index);
  742. ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG,
  743. PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG |
  744. PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000,
  745. mac_index);
  746. }
  747. static void ql_petbi_init(struct ql3_adapter *qdev)
  748. {
  749. ql_petbi_reset(qdev);
  750. ql_petbi_start_neg(qdev);
  751. }
  752. static void ql_petbi_init_ex(struct ql3_adapter *qdev, u32 mac_index)
  753. {
  754. ql_petbi_reset_ex(qdev, mac_index);
  755. ql_petbi_start_neg_ex(qdev, mac_index);
  756. }
  757. static int ql_is_petbi_neg_pause(struct ql3_adapter *qdev)
  758. {
  759. u16 reg;
  760. if (ql_mii_read_reg(qdev, PETBI_NEG_PARTNER, &reg) < 0)
  761. return 0;
  762. return (reg & PETBI_NEG_PAUSE_MASK) == PETBI_NEG_PAUSE;
  763. }
  764. static int ql_phy_get_speed(struct ql3_adapter *qdev)
  765. {
  766. u16 reg;
  767. if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
  768. return 0;
  769. reg = (((reg & 0x18) >> 3) & 3);
  770. if (reg == 2)
  771. return SPEED_1000;
  772. else if (reg == 1)
  773. return SPEED_100;
  774. else if (reg == 0)
  775. return SPEED_10;
  776. else
  777. return -1;
  778. }
  779. static int ql_is_full_dup(struct ql3_adapter *qdev)
  780. {
  781. u16 reg;
  782. if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
  783. return 0;
  784. return (reg & PHY_AUX_DUPLEX_STAT) != 0;
  785. }
  786. static int ql_is_phy_neg_pause(struct ql3_adapter *qdev)
  787. {
  788. u16 reg;
  789. if (ql_mii_read_reg(qdev, PHY_NEG_PARTNER, &reg) < 0)
  790. return 0;
  791. return (reg & PHY_NEG_PAUSE) != 0;
  792. }
  793. /*
  794. * Caller holds hw_lock.
  795. */
  796. static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable)
  797. {
  798. struct ql3xxx_port_registers __iomem *port_regs =
  799. qdev->mem_map_registers;
  800. u32 value;
  801. if (enable)
  802. value = (MAC_CONFIG_REG_PE | (MAC_CONFIG_REG_PE << 16));
  803. else
  804. value = (MAC_CONFIG_REG_PE << 16);
  805. if (qdev->mac_index)
  806. ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
  807. else
  808. ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
  809. }
  810. /*
  811. * Caller holds hw_lock.
  812. */
  813. static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable)
  814. {
  815. struct ql3xxx_port_registers __iomem *port_regs =
  816. qdev->mem_map_registers;
  817. u32 value;
  818. if (enable)
  819. value = (MAC_CONFIG_REG_SR | (MAC_CONFIG_REG_SR << 16));
  820. else
  821. value = (MAC_CONFIG_REG_SR << 16);
  822. if (qdev->mac_index)
  823. ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
  824. else
  825. ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
  826. }
  827. /*
  828. * Caller holds hw_lock.
  829. */
  830. static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable)
  831. {
  832. struct ql3xxx_port_registers __iomem *port_regs =
  833. qdev->mem_map_registers;
  834. u32 value;
  835. if (enable)
  836. value = (MAC_CONFIG_REG_GM | (MAC_CONFIG_REG_GM << 16));
  837. else
  838. value = (MAC_CONFIG_REG_GM << 16);
  839. if (qdev->mac_index)
  840. ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
  841. else
  842. ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
  843. }
  844. /*
  845. * Caller holds hw_lock.
  846. */
  847. static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable)
  848. {
  849. struct ql3xxx_port_registers __iomem *port_regs =
  850. qdev->mem_map_registers;
  851. u32 value;
  852. if (enable)
  853. value = (MAC_CONFIG_REG_FD | (MAC_CONFIG_REG_FD << 16));
  854. else
  855. value = (MAC_CONFIG_REG_FD << 16);
  856. if (qdev->mac_index)
  857. ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
  858. else
  859. ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
  860. }
  861. /*
  862. * Caller holds hw_lock.
  863. */
  864. static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable)
  865. {
  866. struct ql3xxx_port_registers __iomem *port_regs =
  867. qdev->mem_map_registers;
  868. u32 value;
  869. if (enable)
  870. value =
  871. ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) |
  872. ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16));
  873. else
  874. value = ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16);
  875. if (qdev->mac_index)
  876. ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
  877. else
  878. ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
  879. }
  880. /*
  881. * Caller holds hw_lock.
  882. */
  883. static int ql_is_fiber(struct ql3_adapter *qdev)
  884. {
  885. struct ql3xxx_port_registers __iomem *port_regs =
  886. qdev->mem_map_registers;
  887. u32 bitToCheck = 0;
  888. u32 temp;
  889. switch (qdev->mac_index) {
  890. case 0:
  891. bitToCheck = PORT_STATUS_SM0;
  892. break;
  893. case 1:
  894. bitToCheck = PORT_STATUS_SM1;
  895. break;
  896. }
  897. temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
  898. return (temp & bitToCheck) != 0;
  899. }
  900. static int ql_is_auto_cfg(struct ql3_adapter *qdev)
  901. {
  902. u16 reg;
  903. ql_mii_read_reg(qdev, 0x00, &reg);
  904. return (reg & 0x1000) != 0;
  905. }
  906. /*
  907. * Caller holds hw_lock.
  908. */
  909. static int ql_is_auto_neg_complete(struct ql3_adapter *qdev)
  910. {
  911. struct ql3xxx_port_registers __iomem *port_regs =
  912. qdev->mem_map_registers;
  913. u32 bitToCheck = 0;
  914. u32 temp;
  915. switch (qdev->mac_index) {
  916. case 0:
  917. bitToCheck = PORT_STATUS_AC0;
  918. break;
  919. case 1:
  920. bitToCheck = PORT_STATUS_AC1;
  921. break;
  922. }
  923. temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
  924. if (temp & bitToCheck) {
  925. if (netif_msg_link(qdev))
  926. printk(KERN_INFO PFX
  927. "%s: Auto-Negotiate complete.\n",
  928. qdev->ndev->name);
  929. return 1;
  930. } else {
  931. if (netif_msg_link(qdev))
  932. printk(KERN_WARNING PFX
  933. "%s: Auto-Negotiate incomplete.\n",
  934. qdev->ndev->name);
  935. return 0;
  936. }
  937. }
  938. /*
  939. * ql_is_neg_pause() returns 1 if pause was negotiated to be on
  940. */
  941. static int ql_is_neg_pause(struct ql3_adapter *qdev)
  942. {
  943. if (ql_is_fiber(qdev))
  944. return ql_is_petbi_neg_pause(qdev);
  945. else
  946. return ql_is_phy_neg_pause(qdev);
  947. }
  948. static int ql_auto_neg_error(struct ql3_adapter *qdev)
  949. {
  950. struct ql3xxx_port_registers __iomem *port_regs =
  951. qdev->mem_map_registers;
  952. u32 bitToCheck = 0;
  953. u32 temp;
  954. switch (qdev->mac_index) {
  955. case 0:
  956. bitToCheck = PORT_STATUS_AE0;
  957. break;
  958. case 1:
  959. bitToCheck = PORT_STATUS_AE1;
  960. break;
  961. }
  962. temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
  963. return (temp & bitToCheck) != 0;
  964. }
  965. static u32 ql_get_link_speed(struct ql3_adapter *qdev)
  966. {
  967. if (ql_is_fiber(qdev))
  968. return SPEED_1000;
  969. else
  970. return ql_phy_get_speed(qdev);
  971. }
  972. static int ql_is_link_full_dup(struct ql3_adapter *qdev)
  973. {
  974. if (ql_is_fiber(qdev))
  975. return 1;
  976. else
  977. return ql_is_full_dup(qdev);
  978. }
  979. /*
  980. * Caller holds hw_lock.
  981. */
  982. static int ql_link_down_detect(struct ql3_adapter *qdev)
  983. {
  984. struct ql3xxx_port_registers __iomem *port_regs =
  985. qdev->mem_map_registers;
  986. u32 bitToCheck = 0;
  987. u32 temp;
  988. switch (qdev->mac_index) {
  989. case 0:
  990. bitToCheck = ISP_CONTROL_LINK_DN_0;
  991. break;
  992. case 1:
  993. bitToCheck = ISP_CONTROL_LINK_DN_1;
  994. break;
  995. }
  996. temp =
  997. ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
  998. return (temp & bitToCheck) != 0;
  999. }
  1000. /*
  1001. * Caller holds hw_lock.
  1002. */
  1003. static int ql_link_down_detect_clear(struct ql3_adapter *qdev)
  1004. {
  1005. struct ql3xxx_port_registers __iomem *port_regs =
  1006. qdev->mem_map_registers;
  1007. switch (qdev->mac_index) {
  1008. case 0:
  1009. ql_write_common_reg(qdev,
  1010. &port_regs->CommonRegs.ispControlStatus,
  1011. (ISP_CONTROL_LINK_DN_0) |
  1012. (ISP_CONTROL_LINK_DN_0 << 16));
  1013. break;
  1014. case 1:
  1015. ql_write_common_reg(qdev,
  1016. &port_regs->CommonRegs.ispControlStatus,
  1017. (ISP_CONTROL_LINK_DN_1) |
  1018. (ISP_CONTROL_LINK_DN_1 << 16));
  1019. break;
  1020. default:
  1021. return 1;
  1022. }
  1023. return 0;
  1024. }
  1025. /*
  1026. * Caller holds hw_lock.
  1027. */
  1028. static int ql_this_adapter_controls_port(struct ql3_adapter *qdev,
  1029. u32 mac_index)
  1030. {
  1031. struct ql3xxx_port_registers __iomem *port_regs =
  1032. qdev->mem_map_registers;
  1033. u32 bitToCheck = 0;
  1034. u32 temp;
  1035. switch (mac_index) {
  1036. case 0:
  1037. bitToCheck = PORT_STATUS_F1_ENABLED;
  1038. break;
  1039. case 1:
  1040. bitToCheck = PORT_STATUS_F3_ENABLED;
  1041. break;
  1042. default:
  1043. break;
  1044. }
  1045. temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
  1046. if (temp & bitToCheck) {
  1047. if (netif_msg_link(qdev))
  1048. printk(KERN_DEBUG PFX
  1049. "%s: is not link master.\n", qdev->ndev->name);
  1050. return 0;
  1051. } else {
  1052. if (netif_msg_link(qdev))
  1053. printk(KERN_DEBUG PFX
  1054. "%s: is link master.\n", qdev->ndev->name);
  1055. return 1;
  1056. }
  1057. }
  1058. static void ql_phy_reset_ex(struct ql3_adapter *qdev, u32 mac_index)
  1059. {
  1060. ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET, mac_index);
  1061. }
  1062. static void ql_phy_start_neg_ex(struct ql3_adapter *qdev, u32 mac_index)
  1063. {
  1064. u16 reg;
  1065. ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER,
  1066. PHY_NEG_PAUSE | PHY_NEG_ADV_SPEED | 1, mac_index);
  1067. ql_mii_read_reg_ex(qdev, CONTROL_REG, &reg, mac_index);
  1068. ql_mii_write_reg_ex(qdev, CONTROL_REG, reg | PHY_CTRL_RESTART_NEG,
  1069. mac_index);
  1070. }
  1071. static void ql_phy_init_ex(struct ql3_adapter *qdev, u32 mac_index)
  1072. {
  1073. ql_phy_reset_ex(qdev, mac_index);
  1074. ql_phy_start_neg_ex(qdev, mac_index);
  1075. }
  1076. /*
  1077. * Caller holds hw_lock.
  1078. */
  1079. static u32 ql_get_link_state(struct ql3_adapter *qdev)
  1080. {
  1081. struct ql3xxx_port_registers __iomem *port_regs =
  1082. qdev->mem_map_registers;
  1083. u32 bitToCheck = 0;
  1084. u32 temp, linkState;
  1085. switch (qdev->mac_index) {
  1086. case 0:
  1087. bitToCheck = PORT_STATUS_UP0;
  1088. break;
  1089. case 1:
  1090. bitToCheck = PORT_STATUS_UP1;
  1091. break;
  1092. }
  1093. temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
  1094. if (temp & bitToCheck) {
  1095. linkState = LS_UP;
  1096. } else {
  1097. linkState = LS_DOWN;
  1098. if (netif_msg_link(qdev))
  1099. printk(KERN_WARNING PFX
  1100. "%s: Link is down.\n", qdev->ndev->name);
  1101. }
  1102. return linkState;
  1103. }
  1104. static int ql_port_start(struct ql3_adapter *qdev)
  1105. {
  1106. if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
  1107. (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
  1108. 2) << 7))
  1109. return -1;
  1110. if (ql_is_fiber(qdev)) {
  1111. ql_petbi_init(qdev);
  1112. } else {
  1113. /* Copper port */
  1114. ql_phy_init_ex(qdev, qdev->mac_index);
  1115. }
  1116. ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
  1117. return 0;
  1118. }
  1119. static int ql_finish_auto_neg(struct ql3_adapter *qdev)
  1120. {
  1121. if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
  1122. (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
  1123. 2) << 7))
  1124. return -1;
  1125. if (!ql_auto_neg_error(qdev)) {
  1126. if (test_bit(QL_LINK_MASTER,&qdev->flags)) {
  1127. /* configure the MAC */
  1128. if (netif_msg_link(qdev))
  1129. printk(KERN_DEBUG PFX
  1130. "%s: Configuring link.\n",
  1131. qdev->ndev->
  1132. name);
  1133. ql_mac_cfg_soft_reset(qdev, 1);
  1134. ql_mac_cfg_gig(qdev,
  1135. (ql_get_link_speed
  1136. (qdev) ==
  1137. SPEED_1000));
  1138. ql_mac_cfg_full_dup(qdev,
  1139. ql_is_link_full_dup
  1140. (qdev));
  1141. ql_mac_cfg_pause(qdev,
  1142. ql_is_neg_pause
  1143. (qdev));
  1144. ql_mac_cfg_soft_reset(qdev, 0);
  1145. /* enable the MAC */
  1146. if (netif_msg_link(qdev))
  1147. printk(KERN_DEBUG PFX
  1148. "%s: Enabling mac.\n",
  1149. qdev->ndev->
  1150. name);
  1151. ql_mac_enable(qdev, 1);
  1152. }
  1153. if (netif_msg_link(qdev))
  1154. printk(KERN_DEBUG PFX
  1155. "%s: Change port_link_state LS_DOWN to LS_UP.\n",
  1156. qdev->ndev->name);
  1157. qdev->port_link_state = LS_UP;
  1158. netif_start_queue(qdev->ndev);
  1159. netif_carrier_on(qdev->ndev);
  1160. if (netif_msg_link(qdev))
  1161. printk(KERN_INFO PFX
  1162. "%s: Link is up at %d Mbps, %s duplex.\n",
  1163. qdev->ndev->name,
  1164. ql_get_link_speed(qdev),
  1165. ql_is_link_full_dup(qdev)
  1166. ? "full" : "half");
  1167. } else { /* Remote error detected */
  1168. if (test_bit(QL_LINK_MASTER,&qdev->flags)) {
  1169. if (netif_msg_link(qdev))
  1170. printk(KERN_DEBUG PFX
  1171. "%s: Remote error detected. "
  1172. "Calling ql_port_start().\n",
  1173. qdev->ndev->
  1174. name);
  1175. /*
  1176. * ql_port_start() is shared code and needs
  1177. * to lock the PHY on it's own.
  1178. */
  1179. ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
  1180. if(ql_port_start(qdev)) {/* Restart port */
  1181. return -1;
  1182. } else
  1183. return 0;
  1184. }
  1185. }
  1186. ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
  1187. return 0;
  1188. }
  1189. static void ql_link_state_machine(struct ql3_adapter *qdev)
  1190. {
  1191. u32 curr_link_state;
  1192. unsigned long hw_flags;
  1193. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  1194. curr_link_state = ql_get_link_state(qdev);
  1195. if (test_bit(QL_RESET_ACTIVE,&qdev->flags)) {
  1196. if (netif_msg_link(qdev))
  1197. printk(KERN_INFO PFX
  1198. "%s: Reset in progress, skip processing link "
  1199. "state.\n", qdev->ndev->name);
  1200. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  1201. return;
  1202. }
  1203. switch (qdev->port_link_state) {
  1204. default:
  1205. if (test_bit(QL_LINK_MASTER,&qdev->flags)) {
  1206. ql_port_start(qdev);
  1207. }
  1208. qdev->port_link_state = LS_DOWN;
  1209. /* Fall Through */
  1210. case LS_DOWN:
  1211. if (netif_msg_link(qdev))
  1212. printk(KERN_DEBUG PFX
  1213. "%s: port_link_state = LS_DOWN.\n",
  1214. qdev->ndev->name);
  1215. if (curr_link_state == LS_UP) {
  1216. if (netif_msg_link(qdev))
  1217. printk(KERN_DEBUG PFX
  1218. "%s: curr_link_state = LS_UP.\n",
  1219. qdev->ndev->name);
  1220. if (ql_is_auto_neg_complete(qdev))
  1221. ql_finish_auto_neg(qdev);
  1222. if (qdev->port_link_state == LS_UP)
  1223. ql_link_down_detect_clear(qdev);
  1224. }
  1225. break;
  1226. case LS_UP:
  1227. /*
  1228. * See if the link is currently down or went down and came
  1229. * back up
  1230. */
  1231. if ((curr_link_state == LS_DOWN) || ql_link_down_detect(qdev)) {
  1232. if (netif_msg_link(qdev))
  1233. printk(KERN_INFO PFX "%s: Link is down.\n",
  1234. qdev->ndev->name);
  1235. qdev->port_link_state = LS_DOWN;
  1236. }
  1237. break;
  1238. }
  1239. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  1240. }
  1241. /*
  1242. * Caller must take hw_lock and QL_PHY_GIO_SEM.
  1243. */
  1244. static void ql_get_phy_owner(struct ql3_adapter *qdev)
  1245. {
  1246. if (ql_this_adapter_controls_port(qdev, qdev->mac_index))
  1247. set_bit(QL_LINK_MASTER,&qdev->flags);
  1248. else
  1249. clear_bit(QL_LINK_MASTER,&qdev->flags);
  1250. }
  1251. /*
  1252. * Caller must take hw_lock and QL_PHY_GIO_SEM.
  1253. */
  1254. static void ql_init_scan_mode(struct ql3_adapter *qdev)
  1255. {
  1256. ql_mii_enable_scan_mode(qdev);
  1257. if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) {
  1258. if (ql_this_adapter_controls_port(qdev, qdev->mac_index))
  1259. ql_petbi_init_ex(qdev, qdev->mac_index);
  1260. } else {
  1261. if (ql_this_adapter_controls_port(qdev, qdev->mac_index))
  1262. ql_phy_init_ex(qdev, qdev->mac_index);
  1263. }
  1264. }
  1265. /*
  1266. * MII_Setup needs to be called before taking the PHY out of reset so that the
  1267. * management interface clock speed can be set properly. It would be better if
  1268. * we had a way to disable MDC until after the PHY is out of reset, but we
  1269. * don't have that capability.
  1270. */
  1271. static int ql_mii_setup(struct ql3_adapter *qdev)
  1272. {
  1273. u32 reg;
  1274. struct ql3xxx_port_registers __iomem *port_regs =
  1275. qdev->mem_map_registers;
  1276. if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
  1277. (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
  1278. 2) << 7))
  1279. return -1;
  1280. if (qdev->device_id == QL3032_DEVICE_ID)
  1281. ql_write_page0_reg(qdev,
  1282. &port_regs->macMIIMgmtControlReg, 0x0f00000);
  1283. /* Divide 125MHz clock by 28 to meet PHY timing requirements */
  1284. reg = MAC_MII_CONTROL_CLK_SEL_DIV28;
  1285. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
  1286. reg | ((MAC_MII_CONTROL_CLK_SEL_MASK) << 16));
  1287. ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
  1288. return 0;
  1289. }
  1290. static u32 ql_supported_modes(struct ql3_adapter *qdev)
  1291. {
  1292. u32 supported;
  1293. if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) {
  1294. supported = SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE
  1295. | SUPPORTED_Autoneg;
  1296. } else {
  1297. supported = SUPPORTED_10baseT_Half
  1298. | SUPPORTED_10baseT_Full
  1299. | SUPPORTED_100baseT_Half
  1300. | SUPPORTED_100baseT_Full
  1301. | SUPPORTED_1000baseT_Half
  1302. | SUPPORTED_1000baseT_Full
  1303. | SUPPORTED_Autoneg | SUPPORTED_TP;
  1304. }
  1305. return supported;
  1306. }
  1307. static int ql_get_auto_cfg_status(struct ql3_adapter *qdev)
  1308. {
  1309. int status;
  1310. unsigned long hw_flags;
  1311. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  1312. if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
  1313. (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
  1314. 2) << 7)) {
  1315. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  1316. return 0;
  1317. }
  1318. status = ql_is_auto_cfg(qdev);
  1319. ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
  1320. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  1321. return status;
  1322. }
  1323. static u32 ql_get_speed(struct ql3_adapter *qdev)
  1324. {
  1325. u32 status;
  1326. unsigned long hw_flags;
  1327. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  1328. if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
  1329. (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
  1330. 2) << 7)) {
  1331. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  1332. return 0;
  1333. }
  1334. status = ql_get_link_speed(qdev);
  1335. ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
  1336. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  1337. return status;
  1338. }
  1339. static int ql_get_full_dup(struct ql3_adapter *qdev)
  1340. {
  1341. int status;
  1342. unsigned long hw_flags;
  1343. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  1344. if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
  1345. (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
  1346. 2) << 7)) {
  1347. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  1348. return 0;
  1349. }
  1350. status = ql_is_link_full_dup(qdev);
  1351. ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
  1352. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  1353. return status;
  1354. }
  1355. static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
  1356. {
  1357. struct ql3_adapter *qdev = netdev_priv(ndev);
  1358. ecmd->transceiver = XCVR_INTERNAL;
  1359. ecmd->supported = ql_supported_modes(qdev);
  1360. if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) {
  1361. ecmd->port = PORT_FIBRE;
  1362. } else {
  1363. ecmd->port = PORT_TP;
  1364. ecmd->phy_address = qdev->PHYAddr;
  1365. }
  1366. ecmd->advertising = ql_supported_modes(qdev);
  1367. ecmd->autoneg = ql_get_auto_cfg_status(qdev);
  1368. ecmd->speed = ql_get_speed(qdev);
  1369. ecmd->duplex = ql_get_full_dup(qdev);
  1370. return 0;
  1371. }
  1372. static void ql_get_drvinfo(struct net_device *ndev,
  1373. struct ethtool_drvinfo *drvinfo)
  1374. {
  1375. struct ql3_adapter *qdev = netdev_priv(ndev);
  1376. strncpy(drvinfo->driver, ql3xxx_driver_name, 32);
  1377. strncpy(drvinfo->version, ql3xxx_driver_version, 32);
  1378. strncpy(drvinfo->fw_version, "N/A", 32);
  1379. strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32);
  1380. drvinfo->n_stats = 0;
  1381. drvinfo->testinfo_len = 0;
  1382. drvinfo->regdump_len = 0;
  1383. drvinfo->eedump_len = 0;
  1384. }
  1385. static u32 ql_get_msglevel(struct net_device *ndev)
  1386. {
  1387. struct ql3_adapter *qdev = netdev_priv(ndev);
  1388. return qdev->msg_enable;
  1389. }
  1390. static void ql_set_msglevel(struct net_device *ndev, u32 value)
  1391. {
  1392. struct ql3_adapter *qdev = netdev_priv(ndev);
  1393. qdev->msg_enable = value;
  1394. }
  1395. static const struct ethtool_ops ql3xxx_ethtool_ops = {
  1396. .get_settings = ql_get_settings,
  1397. .get_drvinfo = ql_get_drvinfo,
  1398. .get_perm_addr = ethtool_op_get_perm_addr,
  1399. .get_link = ethtool_op_get_link,
  1400. .get_msglevel = ql_get_msglevel,
  1401. .set_msglevel = ql_set_msglevel,
  1402. };
  1403. static int ql_populate_free_queue(struct ql3_adapter *qdev)
  1404. {
  1405. struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head;
  1406. dma_addr_t map;
  1407. int err;
  1408. while (lrg_buf_cb) {
  1409. if (!lrg_buf_cb->skb) {
  1410. lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev,
  1411. qdev->lrg_buffer_len);
  1412. if (unlikely(!lrg_buf_cb->skb)) {
  1413. printk(KERN_DEBUG PFX
  1414. "%s: Failed netdev_alloc_skb().\n",
  1415. qdev->ndev->name);
  1416. break;
  1417. } else {
  1418. /*
  1419. * We save some space to copy the ethhdr from
  1420. * first buffer
  1421. */
  1422. skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
  1423. map = pci_map_single(qdev->pdev,
  1424. lrg_buf_cb->skb->data,
  1425. qdev->lrg_buffer_len -
  1426. QL_HEADER_SPACE,
  1427. PCI_DMA_FROMDEVICE);
  1428. err = pci_dma_mapping_error(map);
  1429. if(err) {
  1430. printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
  1431. qdev->ndev->name, err);
  1432. dev_kfree_skb(lrg_buf_cb->skb);
  1433. lrg_buf_cb->skb = NULL;
  1434. break;
  1435. }
  1436. lrg_buf_cb->buf_phy_addr_low =
  1437. cpu_to_le32(LS_64BITS(map));
  1438. lrg_buf_cb->buf_phy_addr_high =
  1439. cpu_to_le32(MS_64BITS(map));
  1440. pci_unmap_addr_set(lrg_buf_cb, mapaddr, map);
  1441. pci_unmap_len_set(lrg_buf_cb, maplen,
  1442. qdev->lrg_buffer_len -
  1443. QL_HEADER_SPACE);
  1444. --qdev->lrg_buf_skb_check;
  1445. if (!qdev->lrg_buf_skb_check)
  1446. return 1;
  1447. }
  1448. }
  1449. lrg_buf_cb = lrg_buf_cb->next;
  1450. }
  1451. return 0;
  1452. }
  1453. /*
  1454. * Caller holds hw_lock.
  1455. */
  1456. static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev)
  1457. {
  1458. struct bufq_addr_element *lrg_buf_q_ele;
  1459. int i;
  1460. struct ql_rcv_buf_cb *lrg_buf_cb;
  1461. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  1462. if ((qdev->lrg_buf_free_count >= 8)
  1463. && (qdev->lrg_buf_release_cnt >= 16)) {
  1464. if (qdev->lrg_buf_skb_check)
  1465. if (!ql_populate_free_queue(qdev))
  1466. return;
  1467. lrg_buf_q_ele = qdev->lrg_buf_next_free;
  1468. while ((qdev->lrg_buf_release_cnt >= 16)
  1469. && (qdev->lrg_buf_free_count >= 8)) {
  1470. for (i = 0; i < 8; i++) {
  1471. lrg_buf_cb =
  1472. ql_get_from_lrg_buf_free_list(qdev);
  1473. lrg_buf_q_ele->addr_high =
  1474. lrg_buf_cb->buf_phy_addr_high;
  1475. lrg_buf_q_ele->addr_low =
  1476. lrg_buf_cb->buf_phy_addr_low;
  1477. lrg_buf_q_ele++;
  1478. qdev->lrg_buf_release_cnt--;
  1479. }
  1480. qdev->lrg_buf_q_producer_index++;
  1481. if (qdev->lrg_buf_q_producer_index == qdev->num_lbufq_entries)
  1482. qdev->lrg_buf_q_producer_index = 0;
  1483. if (qdev->lrg_buf_q_producer_index ==
  1484. (qdev->num_lbufq_entries - 1)) {
  1485. lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr;
  1486. }
  1487. }
  1488. qdev->lrg_buf_next_free = lrg_buf_q_ele;
  1489. ql_write_common_reg(qdev,
  1490. &port_regs->CommonRegs.
  1491. rxLargeQProducerIndex,
  1492. qdev->lrg_buf_q_producer_index);
  1493. }
  1494. }
  1495. static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
  1496. struct ob_mac_iocb_rsp *mac_rsp)
  1497. {
  1498. struct ql_tx_buf_cb *tx_cb;
  1499. int i;
  1500. int retval = 0;
  1501. if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
  1502. printk(KERN_WARNING "Frame short but, frame was padded and sent.\n");
  1503. }
  1504. tx_cb = &qdev->tx_buf[mac_rsp->transaction_id];
  1505. /* Check the transmit response flags for any errors */
  1506. if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
  1507. printk(KERN_ERR "Frame too short to be legal, frame not sent.\n");
  1508. qdev->stats.tx_errors++;
  1509. retval = -EIO;
  1510. goto frame_not_sent;
  1511. }
  1512. if(tx_cb->seg_count == 0) {
  1513. printk(KERN_ERR "tx_cb->seg_count == 0: %d\n", mac_rsp->transaction_id);
  1514. qdev->stats.tx_errors++;
  1515. retval = -EIO;
  1516. goto invalid_seg_count;
  1517. }
  1518. pci_unmap_single(qdev->pdev,
  1519. pci_unmap_addr(&tx_cb->map[0], mapaddr),
  1520. pci_unmap_len(&tx_cb->map[0], maplen),
  1521. PCI_DMA_TODEVICE);
  1522. tx_cb->seg_count--;
  1523. if (tx_cb->seg_count) {
  1524. for (i = 1; i < tx_cb->seg_count; i++) {
  1525. pci_unmap_page(qdev->pdev,
  1526. pci_unmap_addr(&tx_cb->map[i],
  1527. mapaddr),
  1528. pci_unmap_len(&tx_cb->map[i], maplen),
  1529. PCI_DMA_TODEVICE);
  1530. }
  1531. }
  1532. qdev->stats.tx_packets++;
  1533. qdev->stats.tx_bytes += tx_cb->skb->len;
  1534. frame_not_sent:
  1535. dev_kfree_skb_irq(tx_cb->skb);
  1536. tx_cb->skb = NULL;
  1537. invalid_seg_count:
  1538. atomic_inc(&qdev->tx_count);
  1539. }
  1540. void ql_get_sbuf(struct ql3_adapter *qdev)
  1541. {
  1542. if (++qdev->small_buf_index == NUM_SMALL_BUFFERS)
  1543. qdev->small_buf_index = 0;
  1544. qdev->small_buf_release_cnt++;
  1545. }
  1546. struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev)
  1547. {
  1548. struct ql_rcv_buf_cb *lrg_buf_cb = NULL;
  1549. lrg_buf_cb = &qdev->lrg_buf[qdev->lrg_buf_index];
  1550. qdev->lrg_buf_release_cnt++;
  1551. if (++qdev->lrg_buf_index == qdev->num_large_buffers)
  1552. qdev->lrg_buf_index = 0;
  1553. return(lrg_buf_cb);
  1554. }
  1555. /*
  1556. * The difference between 3022 and 3032 for inbound completions:
  1557. * 3022 uses two buffers per completion. The first buffer contains
  1558. * (some) header info, the second the remainder of the headers plus
  1559. * the data. For this chip we reserve some space at the top of the
  1560. * receive buffer so that the header info in buffer one can be
  1561. * prepended to the buffer two. Buffer two is the sent up while
  1562. * buffer one is returned to the hardware to be reused.
  1563. * 3032 receives all of it's data and headers in one buffer for a
  1564. * simpler process. 3032 also supports checksum verification as
  1565. * can be seen in ql_process_macip_rx_intr().
  1566. */
  1567. static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
  1568. struct ib_mac_iocb_rsp *ib_mac_rsp_ptr)
  1569. {
  1570. struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
  1571. struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
  1572. struct sk_buff *skb;
  1573. u16 length = le16_to_cpu(ib_mac_rsp_ptr->length);
  1574. /*
  1575. * Get the inbound address list (small buffer).
  1576. */
  1577. ql_get_sbuf(qdev);
  1578. if (qdev->device_id == QL3022_DEVICE_ID)
  1579. lrg_buf_cb1 = ql_get_lbuf(qdev);
  1580. /* start of second buffer */
  1581. lrg_buf_cb2 = ql_get_lbuf(qdev);
  1582. skb = lrg_buf_cb2->skb;
  1583. qdev->stats.rx_packets++;
  1584. qdev->stats.rx_bytes += length;
  1585. skb_put(skb, length);
  1586. pci_unmap_single(qdev->pdev,
  1587. pci_unmap_addr(lrg_buf_cb2, mapaddr),
  1588. pci_unmap_len(lrg_buf_cb2, maplen),
  1589. PCI_DMA_FROMDEVICE);
  1590. prefetch(skb->data);
  1591. skb->dev = qdev->ndev;
  1592. skb->ip_summed = CHECKSUM_NONE;
  1593. skb->protocol = eth_type_trans(skb, qdev->ndev);
  1594. netif_receive_skb(skb);
  1595. qdev->ndev->last_rx = jiffies;
  1596. lrg_buf_cb2->skb = NULL;
  1597. if (qdev->device_id == QL3022_DEVICE_ID)
  1598. ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
  1599. ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
  1600. }
  1601. static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
  1602. struct ib_ip_iocb_rsp *ib_ip_rsp_ptr)
  1603. {
  1604. struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
  1605. struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
  1606. struct sk_buff *skb1 = NULL, *skb2;
  1607. struct net_device *ndev = qdev->ndev;
  1608. u16 length = le16_to_cpu(ib_ip_rsp_ptr->length);
  1609. u16 size = 0;
  1610. /*
  1611. * Get the inbound address list (small buffer).
  1612. */
  1613. ql_get_sbuf(qdev);
  1614. if (qdev->device_id == QL3022_DEVICE_ID) {
  1615. /* start of first buffer on 3022 */
  1616. lrg_buf_cb1 = ql_get_lbuf(qdev);
  1617. skb1 = lrg_buf_cb1->skb;
  1618. size = ETH_HLEN;
  1619. if (*((u16 *) skb1->data) != 0xFFFF)
  1620. size += VLAN_ETH_HLEN - ETH_HLEN;
  1621. }
  1622. /* start of second buffer */
  1623. lrg_buf_cb2 = ql_get_lbuf(qdev);
  1624. skb2 = lrg_buf_cb2->skb;
  1625. skb_put(skb2, length); /* Just the second buffer length here. */
  1626. pci_unmap_single(qdev->pdev,
  1627. pci_unmap_addr(lrg_buf_cb2, mapaddr),
  1628. pci_unmap_len(lrg_buf_cb2, maplen),
  1629. PCI_DMA_FROMDEVICE);
  1630. prefetch(skb2->data);
  1631. skb2->ip_summed = CHECKSUM_NONE;
  1632. if (qdev->device_id == QL3022_DEVICE_ID) {
  1633. /*
  1634. * Copy the ethhdr from first buffer to second. This
  1635. * is necessary for 3022 IP completions.
  1636. */
  1637. memcpy(skb_push(skb2, size), skb1->data + VLAN_ID_LEN, size);
  1638. } else {
  1639. u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum);
  1640. if (checksum &
  1641. (IB_IP_IOCB_RSP_3032_ICE |
  1642. IB_IP_IOCB_RSP_3032_CE |
  1643. IB_IP_IOCB_RSP_3032_NUC)) {
  1644. printk(KERN_ERR
  1645. "%s: Bad checksum for this %s packet, checksum = %x.\n",
  1646. __func__,
  1647. ((checksum &
  1648. IB_IP_IOCB_RSP_3032_TCP) ? "TCP" :
  1649. "UDP"),checksum);
  1650. } else if (checksum & IB_IP_IOCB_RSP_3032_TCP) {
  1651. skb2->ip_summed = CHECKSUM_UNNECESSARY;
  1652. }
  1653. }
  1654. skb2->dev = qdev->ndev;
  1655. skb2->protocol = eth_type_trans(skb2, qdev->ndev);
  1656. netif_receive_skb(skb2);
  1657. qdev->stats.rx_packets++;
  1658. qdev->stats.rx_bytes += length;
  1659. ndev->last_rx = jiffies;
  1660. lrg_buf_cb2->skb = NULL;
  1661. if (qdev->device_id == QL3022_DEVICE_ID)
  1662. ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
  1663. ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
  1664. }
  1665. static int ql_tx_rx_clean(struct ql3_adapter *qdev,
  1666. int *tx_cleaned, int *rx_cleaned, int work_to_do)
  1667. {
  1668. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  1669. struct net_rsp_iocb *net_rsp;
  1670. struct net_device *ndev = qdev->ndev;
  1671. unsigned long hw_flags;
  1672. int work_done = 0;
  1673. u32 rsp_producer_index = le32_to_cpu(*(qdev->prsp_producer_index));
  1674. /* While there are entries in the completion queue. */
  1675. while ((rsp_producer_index !=
  1676. qdev->rsp_consumer_index) && (work_done < work_to_do)) {
  1677. net_rsp = qdev->rsp_current;
  1678. switch (net_rsp->opcode) {
  1679. case OPCODE_OB_MAC_IOCB_FN0:
  1680. case OPCODE_OB_MAC_IOCB_FN2:
  1681. ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *)
  1682. net_rsp);
  1683. (*tx_cleaned)++;
  1684. break;
  1685. case OPCODE_IB_MAC_IOCB:
  1686. case OPCODE_IB_3032_MAC_IOCB:
  1687. ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *)
  1688. net_rsp);
  1689. (*rx_cleaned)++;
  1690. break;
  1691. case OPCODE_IB_IP_IOCB:
  1692. case OPCODE_IB_3032_IP_IOCB:
  1693. ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *)
  1694. net_rsp);
  1695. (*rx_cleaned)++;
  1696. break;
  1697. default:
  1698. {
  1699. u32 *tmp = (u32 *) net_rsp;
  1700. printk(KERN_ERR PFX
  1701. "%s: Hit default case, not "
  1702. "handled!\n"
  1703. " dropping the packet, opcode = "
  1704. "%x.\n",
  1705. ndev->name, net_rsp->opcode);
  1706. printk(KERN_ERR PFX
  1707. "0x%08lx 0x%08lx 0x%08lx 0x%08lx \n",
  1708. (unsigned long int)tmp[0],
  1709. (unsigned long int)tmp[1],
  1710. (unsigned long int)tmp[2],
  1711. (unsigned long int)tmp[3]);
  1712. }
  1713. }
  1714. qdev->rsp_consumer_index++;
  1715. if (qdev->rsp_consumer_index == NUM_RSP_Q_ENTRIES) {
  1716. qdev->rsp_consumer_index = 0;
  1717. qdev->rsp_current = qdev->rsp_q_virt_addr;
  1718. } else {
  1719. qdev->rsp_current++;
  1720. }
  1721. work_done = *tx_cleaned + *rx_cleaned;
  1722. }
  1723. if(work_done) {
  1724. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  1725. ql_update_lrg_bufq_prod_index(qdev);
  1726. if (qdev->small_buf_release_cnt >= 16) {
  1727. while (qdev->small_buf_release_cnt >= 16) {
  1728. qdev->small_buf_q_producer_index++;
  1729. if (qdev->small_buf_q_producer_index ==
  1730. NUM_SBUFQ_ENTRIES)
  1731. qdev->small_buf_q_producer_index = 0;
  1732. qdev->small_buf_release_cnt -= 8;
  1733. }
  1734. wmb();
  1735. ql_write_common_reg(qdev,
  1736. &port_regs->CommonRegs.
  1737. rxSmallQProducerIndex,
  1738. qdev->small_buf_q_producer_index);
  1739. }
  1740. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  1741. }
  1742. return *tx_cleaned + *rx_cleaned;
  1743. }
  1744. static int ql_poll(struct net_device *ndev, int *budget)
  1745. {
  1746. struct ql3_adapter *qdev = netdev_priv(ndev);
  1747. int work_to_do = min(*budget, ndev->quota);
  1748. int rx_cleaned = 0, tx_cleaned = 0;
  1749. unsigned long hw_flags;
  1750. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  1751. if (!netif_carrier_ok(ndev))
  1752. goto quit_polling;
  1753. ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, work_to_do);
  1754. *budget -= rx_cleaned;
  1755. ndev->quota -= rx_cleaned;
  1756. if( tx_cleaned + rx_cleaned != work_to_do ||
  1757. !netif_running(ndev)) {
  1758. quit_polling:
  1759. netif_rx_complete(ndev);
  1760. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  1761. ql_write_common_reg(qdev,
  1762. &port_regs->CommonRegs.rspQConsumerIndex,
  1763. qdev->rsp_consumer_index);
  1764. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  1765. ql_enable_interrupts(qdev);
  1766. return 0;
  1767. }
  1768. return 1;
  1769. }
  1770. static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
  1771. {
  1772. struct net_device *ndev = dev_id;
  1773. struct ql3_adapter *qdev = netdev_priv(ndev);
  1774. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  1775. u32 value;
  1776. int handled = 1;
  1777. u32 var;
  1778. port_regs = qdev->mem_map_registers;
  1779. value =
  1780. ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus);
  1781. if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) {
  1782. spin_lock(&qdev->adapter_lock);
  1783. netif_stop_queue(qdev->ndev);
  1784. netif_carrier_off(qdev->ndev);
  1785. ql_disable_interrupts(qdev);
  1786. qdev->port_link_state = LS_DOWN;
  1787. set_bit(QL_RESET_ACTIVE,&qdev->flags) ;
  1788. if (value & ISP_CONTROL_FE) {
  1789. /*
  1790. * Chip Fatal Error.
  1791. */
  1792. var =
  1793. ql_read_page0_reg_l(qdev,
  1794. &port_regs->PortFatalErrStatus);
  1795. printk(KERN_WARNING PFX
  1796. "%s: Resetting chip. PortFatalErrStatus "
  1797. "register = 0x%x\n", ndev->name, var);
  1798. set_bit(QL_RESET_START,&qdev->flags) ;
  1799. } else {
  1800. /*
  1801. * Soft Reset Requested.
  1802. */
  1803. set_bit(QL_RESET_PER_SCSI,&qdev->flags) ;
  1804. printk(KERN_ERR PFX
  1805. "%s: Another function issued a reset to the "
  1806. "chip. ISR value = %x.\n", ndev->name, value);
  1807. }
  1808. queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0);
  1809. spin_unlock(&qdev->adapter_lock);
  1810. } else if (value & ISP_IMR_DISABLE_CMPL_INT) {
  1811. ql_disable_interrupts(qdev);
  1812. if (likely(netif_rx_schedule_prep(ndev))) {
  1813. __netif_rx_schedule(ndev);
  1814. }
  1815. } else {
  1816. return IRQ_NONE;
  1817. }
  1818. return IRQ_RETVAL(handled);
  1819. }
  1820. /*
  1821. * Get the total number of segments needed for the
  1822. * given number of fragments. This is necessary because
  1823. * outbound address lists (OAL) will be used when more than
  1824. * two frags are given. Each address list has 5 addr/len
  1825. * pairs. The 5th pair in each AOL is used to point to
  1826. * the next AOL if more frags are coming.
  1827. * That is why the frags:segment count ratio is not linear.
  1828. */
  1829. static int ql_get_seg_count(struct ql3_adapter *qdev,
  1830. unsigned short frags)
  1831. {
  1832. if (qdev->device_id == QL3022_DEVICE_ID)
  1833. return 1;
  1834. switch(frags) {
  1835. case 0: return 1; /* just the skb->data seg */
  1836. case 1: return 2; /* skb->data + 1 frag */
  1837. case 2: return 3; /* skb->data + 2 frags */
  1838. case 3: return 5; /* skb->data + 1 frag + 1 AOL containting 2 frags */
  1839. case 4: return 6;
  1840. case 5: return 7;
  1841. case 6: return 8;
  1842. case 7: return 10;
  1843. case 8: return 11;
  1844. case 9: return 12;
  1845. case 10: return 13;
  1846. case 11: return 15;
  1847. case 12: return 16;
  1848. case 13: return 17;
  1849. case 14: return 18;
  1850. case 15: return 20;
  1851. case 16: return 21;
  1852. case 17: return 22;
  1853. case 18: return 23;
  1854. }
  1855. return -1;
  1856. }
  1857. static void ql_hw_csum_setup(struct sk_buff *skb,
  1858. struct ob_mac_iocb_req *mac_iocb_ptr)
  1859. {
  1860. struct ethhdr *eth;
  1861. struct iphdr *ip = NULL;
  1862. u8 offset = ETH_HLEN;
  1863. eth = (struct ethhdr *)(skb->data);
  1864. if (eth->h_proto == __constant_htons(ETH_P_IP)) {
  1865. ip = (struct iphdr *)&skb->data[ETH_HLEN];
  1866. } else if (eth->h_proto == htons(ETH_P_8021Q) &&
  1867. ((struct vlan_ethhdr *)skb->data)->
  1868. h_vlan_encapsulated_proto == __constant_htons(ETH_P_IP)) {
  1869. ip = (struct iphdr *)&skb->data[VLAN_ETH_HLEN];
  1870. offset = VLAN_ETH_HLEN;
  1871. }
  1872. if (ip) {
  1873. if (ip->protocol == IPPROTO_TCP) {
  1874. mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC |
  1875. OB_3032MAC_IOCB_REQ_IC;
  1876. mac_iocb_ptr->ip_hdr_off = offset;
  1877. mac_iocb_ptr->ip_hdr_len = ip->ihl;
  1878. } else if (ip->protocol == IPPROTO_UDP) {
  1879. mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC |
  1880. OB_3032MAC_IOCB_REQ_IC;
  1881. mac_iocb_ptr->ip_hdr_off = offset;
  1882. mac_iocb_ptr->ip_hdr_len = ip->ihl;
  1883. }
  1884. }
  1885. }
  1886. /*
  1887. * Map the buffers for this transmit. This will return
  1888. * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
  1889. */
  1890. static int ql_send_map(struct ql3_adapter *qdev,
  1891. struct ob_mac_iocb_req *mac_iocb_ptr,
  1892. struct ql_tx_buf_cb *tx_cb,
  1893. struct sk_buff *skb)
  1894. {
  1895. struct oal *oal;
  1896. struct oal_entry *oal_entry;
  1897. int len = skb_headlen(skb);
  1898. dma_addr_t map;
  1899. int err;
  1900. int completed_segs, i;
  1901. int seg_cnt, seg = 0;
  1902. int frag_cnt = (int)skb_shinfo(skb)->nr_frags;
  1903. seg_cnt = tx_cb->seg_count = ql_get_seg_count(qdev,
  1904. (skb_shinfo(skb)->nr_frags));
  1905. if(seg_cnt == -1) {
  1906. printk(KERN_ERR PFX"%s: invalid segment count!\n",__func__);
  1907. return NETDEV_TX_BUSY;
  1908. }
  1909. /*
  1910. * Map the skb buffer first.
  1911. */
  1912. map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
  1913. err = pci_dma_mapping_error(map);
  1914. if(err) {
  1915. printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
  1916. qdev->ndev->name, err);
  1917. return NETDEV_TX_BUSY;
  1918. }
  1919. oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
  1920. oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
  1921. oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
  1922. oal_entry->len = cpu_to_le32(len);
  1923. pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
  1924. pci_unmap_len_set(&tx_cb->map[seg], maplen, len);
  1925. seg++;
  1926. if (seg_cnt == 1) {
  1927. /* Terminate the last segment. */
  1928. oal_entry->len =
  1929. cpu_to_le32(le32_to_cpu(oal_entry->len) | OAL_LAST_ENTRY);
  1930. } else {
  1931. oal = tx_cb->oal;
  1932. for (completed_segs=0; completed_segs<frag_cnt; completed_segs++,seg++) {
  1933. skb_frag_t *frag = &skb_shinfo(skb)->frags[completed_segs];
  1934. oal_entry++;
  1935. if ((seg == 2 && seg_cnt > 3) || /* Check for continuation */
  1936. (seg == 7 && seg_cnt > 8) || /* requirements. It's strange */
  1937. (seg == 12 && seg_cnt > 13) || /* but necessary. */
  1938. (seg == 17 && seg_cnt > 18)) {
  1939. /* Continuation entry points to outbound address list. */
  1940. map = pci_map_single(qdev->pdev, oal,
  1941. sizeof(struct oal),
  1942. PCI_DMA_TODEVICE);
  1943. err = pci_dma_mapping_error(map);
  1944. if(err) {
  1945. printk(KERN_ERR "%s: PCI mapping outbound address list with error: %d\n",
  1946. qdev->ndev->name, err);
  1947. goto map_error;
  1948. }
  1949. oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
  1950. oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
  1951. oal_entry->len =
  1952. cpu_to_le32(sizeof(struct oal) |
  1953. OAL_CONT_ENTRY);
  1954. pci_unmap_addr_set(&tx_cb->map[seg], mapaddr,
  1955. map);
  1956. pci_unmap_len_set(&tx_cb->map[seg], maplen,
  1957. len);
  1958. oal_entry = (struct oal_entry *)oal;
  1959. oal++;
  1960. seg++;
  1961. }
  1962. map =
  1963. pci_map_page(qdev->pdev, frag->page,
  1964. frag->page_offset, frag->size,
  1965. PCI_DMA_TODEVICE);
  1966. err = pci_dma_mapping_error(map);
  1967. if(err) {
  1968. printk(KERN_ERR "%s: PCI mapping frags failed with error: %d\n",
  1969. qdev->ndev->name, err);
  1970. goto map_error;
  1971. }
  1972. oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
  1973. oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
  1974. oal_entry->len = cpu_to_le32(frag->size);
  1975. pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
  1976. pci_unmap_len_set(&tx_cb->map[seg], maplen,
  1977. frag->size);
  1978. }
  1979. /* Terminate the last segment. */
  1980. oal_entry->len =
  1981. cpu_to_le32(le32_to_cpu(oal_entry->len) | OAL_LAST_ENTRY);
  1982. }
  1983. return NETDEV_TX_OK;
  1984. map_error:
  1985. /* A PCI mapping failed and now we will need to back out
  1986. * We need to traverse through the oal's and associated pages which
  1987. * have been mapped and now we must unmap them to clean up properly
  1988. */
  1989. seg = 1;
  1990. oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
  1991. oal = tx_cb->oal;
  1992. for (i=0; i<completed_segs; i++,seg++) {
  1993. oal_entry++;
  1994. if((seg == 2 && seg_cnt > 3) || /* Check for continuation */
  1995. (seg == 7 && seg_cnt > 8) || /* requirements. It's strange */
  1996. (seg == 12 && seg_cnt > 13) || /* but necessary. */
  1997. (seg == 17 && seg_cnt > 18)) {
  1998. pci_unmap_single(qdev->pdev,
  1999. pci_unmap_addr(&tx_cb->map[seg], mapaddr),
  2000. pci_unmap_len(&tx_cb->map[seg], maplen),
  2001. PCI_DMA_TODEVICE);
  2002. oal++;
  2003. seg++;
  2004. }
  2005. pci_unmap_page(qdev->pdev,
  2006. pci_unmap_addr(&tx_cb->map[seg], mapaddr),
  2007. pci_unmap_len(&tx_cb->map[seg], maplen),
  2008. PCI_DMA_TODEVICE);
  2009. }
  2010. pci_unmap_single(qdev->pdev,
  2011. pci_unmap_addr(&tx_cb->map[0], mapaddr),
  2012. pci_unmap_addr(&tx_cb->map[0], maplen),
  2013. PCI_DMA_TODEVICE);
  2014. return NETDEV_TX_BUSY;
  2015. }
  2016. /*
  2017. * The difference between 3022 and 3032 sends:
  2018. * 3022 only supports a simple single segment transmission.
  2019. * 3032 supports checksumming and scatter/gather lists (fragments).
  2020. * The 3032 supports sglists by using the 3 addr/len pairs (ALP)
  2021. * in the IOCB plus a chain of outbound address lists (OAL) that
  2022. * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th)
  2023. * will used to point to an OAL when more ALP entries are required.
  2024. * The IOCB is always the top of the chain followed by one or more
  2025. * OALs (when necessary).
  2026. */
  2027. static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
  2028. {
  2029. struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
  2030. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  2031. struct ql_tx_buf_cb *tx_cb;
  2032. u32 tot_len = skb->len;
  2033. struct ob_mac_iocb_req *mac_iocb_ptr;
  2034. if (unlikely(atomic_read(&qdev->tx_count) < 2)) {
  2035. return NETDEV_TX_BUSY;
  2036. }
  2037. tx_cb = &qdev->tx_buf[qdev->req_producer_index] ;
  2038. if((tx_cb->seg_count = ql_get_seg_count(qdev,
  2039. (skb_shinfo(skb)->nr_frags))) == -1) {
  2040. printk(KERN_ERR PFX"%s: invalid segment count!\n",__func__);
  2041. return NETDEV_TX_OK;
  2042. }
  2043. mac_iocb_ptr = tx_cb->queue_entry;
  2044. memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req));
  2045. mac_iocb_ptr->opcode = qdev->mac_ob_opcode;
  2046. mac_iocb_ptr->flags = OB_MAC_IOCB_REQ_X;
  2047. mac_iocb_ptr->flags |= qdev->mb_bit_mask;
  2048. mac_iocb_ptr->transaction_id = qdev->req_producer_index;
  2049. mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len);
  2050. tx_cb->skb = skb;
  2051. if (qdev->device_id == QL3032_DEVICE_ID &&
  2052. skb->ip_summed == CHECKSUM_PARTIAL)
  2053. ql_hw_csum_setup(skb, mac_iocb_ptr);
  2054. if(ql_send_map(qdev,mac_iocb_ptr,tx_cb,skb) != NETDEV_TX_OK) {
  2055. printk(KERN_ERR PFX"%s: Could not map the segments!\n",__func__);
  2056. return NETDEV_TX_BUSY;
  2057. }
  2058. wmb();
  2059. qdev->req_producer_index++;
  2060. if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES)
  2061. qdev->req_producer_index = 0;
  2062. wmb();
  2063. ql_write_common_reg_l(qdev,
  2064. &port_regs->CommonRegs.reqQProducerIndex,
  2065. qdev->req_producer_index);
  2066. ndev->trans_start = jiffies;
  2067. if (netif_msg_tx_queued(qdev))
  2068. printk(KERN_DEBUG PFX "%s: tx queued, slot %d, len %d\n",
  2069. ndev->name, qdev->req_producer_index, skb->len);
  2070. atomic_dec(&qdev->tx_count);
  2071. return NETDEV_TX_OK;
  2072. }
  2073. static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
  2074. {
  2075. qdev->req_q_size =
  2076. (u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req));
  2077. qdev->req_q_virt_addr =
  2078. pci_alloc_consistent(qdev->pdev,
  2079. (size_t) qdev->req_q_size,
  2080. &qdev->req_q_phy_addr);
  2081. if ((qdev->req_q_virt_addr == NULL) ||
  2082. LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) {
  2083. printk(KERN_ERR PFX "%s: reqQ failed.\n",
  2084. qdev->ndev->name);
  2085. return -ENOMEM;
  2086. }
  2087. qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb);
  2088. qdev->rsp_q_virt_addr =
  2089. pci_alloc_consistent(qdev->pdev,
  2090. (size_t) qdev->rsp_q_size,
  2091. &qdev->rsp_q_phy_addr);
  2092. if ((qdev->rsp_q_virt_addr == NULL) ||
  2093. LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) {
  2094. printk(KERN_ERR PFX
  2095. "%s: rspQ allocation failed\n",
  2096. qdev->ndev->name);
  2097. pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size,
  2098. qdev->req_q_virt_addr,
  2099. qdev->req_q_phy_addr);
  2100. return -ENOMEM;
  2101. }
  2102. set_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags);
  2103. return 0;
  2104. }
  2105. static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev)
  2106. {
  2107. if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags)) {
  2108. printk(KERN_INFO PFX
  2109. "%s: Already done.\n", qdev->ndev->name);
  2110. return;
  2111. }
  2112. pci_free_consistent(qdev->pdev,
  2113. qdev->req_q_size,
  2114. qdev->req_q_virt_addr, qdev->req_q_phy_addr);
  2115. qdev->req_q_virt_addr = NULL;
  2116. pci_free_consistent(qdev->pdev,
  2117. qdev->rsp_q_size,
  2118. qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr);
  2119. qdev->rsp_q_virt_addr = NULL;
  2120. clear_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags);
  2121. }
  2122. static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
  2123. {
  2124. /* Create Large Buffer Queue */
  2125. qdev->lrg_buf_q_size =
  2126. qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry);
  2127. if (qdev->lrg_buf_q_size < PAGE_SIZE)
  2128. qdev->lrg_buf_q_alloc_size = PAGE_SIZE;
  2129. else
  2130. qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2;
  2131. qdev->lrg_buf = kmalloc(qdev->num_large_buffers * sizeof(struct ql_rcv_buf_cb),GFP_KERNEL);
  2132. if (qdev->lrg_buf == NULL) {
  2133. printk(KERN_ERR PFX
  2134. "%s: qdev->lrg_buf alloc failed.\n", qdev->ndev->name);
  2135. return -ENOMEM;
  2136. }
  2137. qdev->lrg_buf_q_alloc_virt_addr =
  2138. pci_alloc_consistent(qdev->pdev,
  2139. qdev->lrg_buf_q_alloc_size,
  2140. &qdev->lrg_buf_q_alloc_phy_addr);
  2141. if (qdev->lrg_buf_q_alloc_virt_addr == NULL) {
  2142. printk(KERN_ERR PFX
  2143. "%s: lBufQ failed\n", qdev->ndev->name);
  2144. return -ENOMEM;
  2145. }
  2146. qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr;
  2147. qdev->lrg_buf_q_phy_addr = qdev->lrg_buf_q_alloc_phy_addr;
  2148. /* Create Small Buffer Queue */
  2149. qdev->small_buf_q_size =
  2150. NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry);
  2151. if (qdev->small_buf_q_size < PAGE_SIZE)
  2152. qdev->small_buf_q_alloc_size = PAGE_SIZE;
  2153. else
  2154. qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2;
  2155. qdev->small_buf_q_alloc_virt_addr =
  2156. pci_alloc_consistent(qdev->pdev,
  2157. qdev->small_buf_q_alloc_size,
  2158. &qdev->small_buf_q_alloc_phy_addr);
  2159. if (qdev->small_buf_q_alloc_virt_addr == NULL) {
  2160. printk(KERN_ERR PFX
  2161. "%s: Small Buffer Queue allocation failed.\n",
  2162. qdev->ndev->name);
  2163. pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size,
  2164. qdev->lrg_buf_q_alloc_virt_addr,
  2165. qdev->lrg_buf_q_alloc_phy_addr);
  2166. return -ENOMEM;
  2167. }
  2168. qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr;
  2169. qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr;
  2170. set_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags);
  2171. return 0;
  2172. }
  2173. static void ql_free_buffer_queues(struct ql3_adapter *qdev)
  2174. {
  2175. if (!test_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags)) {
  2176. printk(KERN_INFO PFX
  2177. "%s: Already done.\n", qdev->ndev->name);
  2178. return;
  2179. }
  2180. if(qdev->lrg_buf) kfree(qdev->lrg_buf);
  2181. pci_free_consistent(qdev->pdev,
  2182. qdev->lrg_buf_q_alloc_size,
  2183. qdev->lrg_buf_q_alloc_virt_addr,
  2184. qdev->lrg_buf_q_alloc_phy_addr);
  2185. qdev->lrg_buf_q_virt_addr = NULL;
  2186. pci_free_consistent(qdev->pdev,
  2187. qdev->small_buf_q_alloc_size,
  2188. qdev->small_buf_q_alloc_virt_addr,
  2189. qdev->small_buf_q_alloc_phy_addr);
  2190. qdev->small_buf_q_virt_addr = NULL;
  2191. clear_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags);
  2192. }
  2193. static int ql_alloc_small_buffers(struct ql3_adapter *qdev)
  2194. {
  2195. int i;
  2196. struct bufq_addr_element *small_buf_q_entry;
  2197. /* Currently we allocate on one of memory and use it for smallbuffers */
  2198. qdev->small_buf_total_size =
  2199. (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES *
  2200. QL_SMALL_BUFFER_SIZE);
  2201. qdev->small_buf_virt_addr =
  2202. pci_alloc_consistent(qdev->pdev,
  2203. qdev->small_buf_total_size,
  2204. &qdev->small_buf_phy_addr);
  2205. if (qdev->small_buf_virt_addr == NULL) {
  2206. printk(KERN_ERR PFX
  2207. "%s: Failed to get small buffer memory.\n",
  2208. qdev->ndev->name);
  2209. return -ENOMEM;
  2210. }
  2211. qdev->small_buf_phy_addr_low = LS_64BITS(qdev->small_buf_phy_addr);
  2212. qdev->small_buf_phy_addr_high = MS_64BITS(qdev->small_buf_phy_addr);
  2213. small_buf_q_entry = qdev->small_buf_q_virt_addr;
  2214. /* Initialize the small buffer queue. */
  2215. for (i = 0; i < (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES); i++) {
  2216. small_buf_q_entry->addr_high =
  2217. cpu_to_le32(qdev->small_buf_phy_addr_high);
  2218. small_buf_q_entry->addr_low =
  2219. cpu_to_le32(qdev->small_buf_phy_addr_low +
  2220. (i * QL_SMALL_BUFFER_SIZE));
  2221. small_buf_q_entry++;
  2222. }
  2223. qdev->small_buf_index = 0;
  2224. set_bit(QL_ALLOC_SMALL_BUF_DONE,&qdev->flags);
  2225. return 0;
  2226. }
  2227. static void ql_free_small_buffers(struct ql3_adapter *qdev)
  2228. {
  2229. if (!test_bit(QL_ALLOC_SMALL_BUF_DONE,&qdev->flags)) {
  2230. printk(KERN_INFO PFX
  2231. "%s: Already done.\n", qdev->ndev->name);
  2232. return;
  2233. }
  2234. if (qdev->small_buf_virt_addr != NULL) {
  2235. pci_free_consistent(qdev->pdev,
  2236. qdev->small_buf_total_size,
  2237. qdev->small_buf_virt_addr,
  2238. qdev->small_buf_phy_addr);
  2239. qdev->small_buf_virt_addr = NULL;
  2240. }
  2241. }
  2242. static void ql_free_large_buffers(struct ql3_adapter *qdev)
  2243. {
  2244. int i = 0;
  2245. struct ql_rcv_buf_cb *lrg_buf_cb;
  2246. for (i = 0; i < qdev->num_large_buffers; i++) {
  2247. lrg_buf_cb = &qdev->lrg_buf[i];
  2248. if (lrg_buf_cb->skb) {
  2249. dev_kfree_skb(lrg_buf_cb->skb);
  2250. pci_unmap_single(qdev->pdev,
  2251. pci_unmap_addr(lrg_buf_cb, mapaddr),
  2252. pci_unmap_len(lrg_buf_cb, maplen),
  2253. PCI_DMA_FROMDEVICE);
  2254. memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
  2255. } else {
  2256. break;
  2257. }
  2258. }
  2259. }
  2260. static void ql_init_large_buffers(struct ql3_adapter *qdev)
  2261. {
  2262. int i;
  2263. struct ql_rcv_buf_cb *lrg_buf_cb;
  2264. struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr;
  2265. for (i = 0; i < qdev->num_large_buffers; i++) {
  2266. lrg_buf_cb = &qdev->lrg_buf[i];
  2267. buf_addr_ele->addr_high = lrg_buf_cb->buf_phy_addr_high;
  2268. buf_addr_ele->addr_low = lrg_buf_cb->buf_phy_addr_low;
  2269. buf_addr_ele++;
  2270. }
  2271. qdev->lrg_buf_index = 0;
  2272. qdev->lrg_buf_skb_check = 0;
  2273. }
  2274. static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
  2275. {
  2276. int i;
  2277. struct ql_rcv_buf_cb *lrg_buf_cb;
  2278. struct sk_buff *skb;
  2279. dma_addr_t map;
  2280. int err;
  2281. for (i = 0; i < qdev->num_large_buffers; i++) {
  2282. skb = netdev_alloc_skb(qdev->ndev,
  2283. qdev->lrg_buffer_len);
  2284. if (unlikely(!skb)) {
  2285. /* Better luck next round */
  2286. printk(KERN_ERR PFX
  2287. "%s: large buff alloc failed, "
  2288. "for %d bytes at index %d.\n",
  2289. qdev->ndev->name,
  2290. qdev->lrg_buffer_len * 2, i);
  2291. ql_free_large_buffers(qdev);
  2292. return -ENOMEM;
  2293. } else {
  2294. lrg_buf_cb = &qdev->lrg_buf[i];
  2295. memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
  2296. lrg_buf_cb->index = i;
  2297. lrg_buf_cb->skb = skb;
  2298. /*
  2299. * We save some space to copy the ethhdr from first
  2300. * buffer
  2301. */
  2302. skb_reserve(skb, QL_HEADER_SPACE);
  2303. map = pci_map_single(qdev->pdev,
  2304. skb->data,
  2305. qdev->lrg_buffer_len -
  2306. QL_HEADER_SPACE,
  2307. PCI_DMA_FROMDEVICE);
  2308. err = pci_dma_mapping_error(map);
  2309. if(err) {
  2310. printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
  2311. qdev->ndev->name, err);
  2312. ql_free_large_buffers(qdev);
  2313. return -ENOMEM;
  2314. }
  2315. pci_unmap_addr_set(lrg_buf_cb, mapaddr, map);
  2316. pci_unmap_len_set(lrg_buf_cb, maplen,
  2317. qdev->lrg_buffer_len -
  2318. QL_HEADER_SPACE);
  2319. lrg_buf_cb->buf_phy_addr_low =
  2320. cpu_to_le32(LS_64BITS(map));
  2321. lrg_buf_cb->buf_phy_addr_high =
  2322. cpu_to_le32(MS_64BITS(map));
  2323. }
  2324. }
  2325. return 0;
  2326. }
  2327. static void ql_free_send_free_list(struct ql3_adapter *qdev)
  2328. {
  2329. struct ql_tx_buf_cb *tx_cb;
  2330. int i;
  2331. tx_cb = &qdev->tx_buf[0];
  2332. for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
  2333. if (tx_cb->oal) {
  2334. kfree(tx_cb->oal);
  2335. tx_cb->oal = NULL;
  2336. }
  2337. tx_cb++;
  2338. }
  2339. }
  2340. static int ql_create_send_free_list(struct ql3_adapter *qdev)
  2341. {
  2342. struct ql_tx_buf_cb *tx_cb;
  2343. int i;
  2344. struct ob_mac_iocb_req *req_q_curr =
  2345. qdev->req_q_virt_addr;
  2346. /* Create free list of transmit buffers */
  2347. for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
  2348. tx_cb = &qdev->tx_buf[i];
  2349. tx_cb->skb = NULL;
  2350. tx_cb->queue_entry = req_q_curr;
  2351. req_q_curr++;
  2352. tx_cb->oal = kmalloc(512, GFP_KERNEL);
  2353. if (tx_cb->oal == NULL)
  2354. return -1;
  2355. }
  2356. return 0;
  2357. }
  2358. static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
  2359. {
  2360. if (qdev->ndev->mtu == NORMAL_MTU_SIZE) {
  2361. qdev->num_lbufq_entries = NUM_LBUFQ_ENTRIES;
  2362. qdev->lrg_buffer_len = NORMAL_MTU_SIZE;
  2363. }
  2364. else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) {
  2365. /*
  2366. * Bigger buffers, so less of them.
  2367. */
  2368. qdev->num_lbufq_entries = JUMBO_NUM_LBUFQ_ENTRIES;
  2369. qdev->lrg_buffer_len = JUMBO_MTU_SIZE;
  2370. } else {
  2371. printk(KERN_ERR PFX
  2372. "%s: Invalid mtu size. Only 1500 and 9000 are accepted.\n",
  2373. qdev->ndev->name);
  2374. return -ENOMEM;
  2375. }
  2376. qdev->num_large_buffers = qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY;
  2377. qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE;
  2378. qdev->max_frame_size =
  2379. (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE;
  2380. /*
  2381. * First allocate a page of shared memory and use it for shadow
  2382. * locations of Network Request Queue Consumer Address Register and
  2383. * Network Completion Queue Producer Index Register
  2384. */
  2385. qdev->shadow_reg_virt_addr =
  2386. pci_alloc_consistent(qdev->pdev,
  2387. PAGE_SIZE, &qdev->shadow_reg_phy_addr);
  2388. if (qdev->shadow_reg_virt_addr != NULL) {
  2389. qdev->preq_consumer_index = (u16 *) qdev->shadow_reg_virt_addr;
  2390. qdev->req_consumer_index_phy_addr_high =
  2391. MS_64BITS(qdev->shadow_reg_phy_addr);
  2392. qdev->req_consumer_index_phy_addr_low =
  2393. LS_64BITS(qdev->shadow_reg_phy_addr);
  2394. qdev->prsp_producer_index =
  2395. (u32 *) (((u8 *) qdev->preq_consumer_index) + 8);
  2396. qdev->rsp_producer_index_phy_addr_high =
  2397. qdev->req_consumer_index_phy_addr_high;
  2398. qdev->rsp_producer_index_phy_addr_low =
  2399. qdev->req_consumer_index_phy_addr_low + 8;
  2400. } else {
  2401. printk(KERN_ERR PFX
  2402. "%s: shadowReg Alloc failed.\n", qdev->ndev->name);
  2403. return -ENOMEM;
  2404. }
  2405. if (ql_alloc_net_req_rsp_queues(qdev) != 0) {
  2406. printk(KERN_ERR PFX
  2407. "%s: ql_alloc_net_req_rsp_queues failed.\n",
  2408. qdev->ndev->name);
  2409. goto err_req_rsp;
  2410. }
  2411. if (ql_alloc_buffer_queues(qdev) != 0) {
  2412. printk(KERN_ERR PFX
  2413. "%s: ql_alloc_buffer_queues failed.\n",
  2414. qdev->ndev->name);
  2415. goto err_buffer_queues;
  2416. }
  2417. if (ql_alloc_small_buffers(qdev) != 0) {
  2418. printk(KERN_ERR PFX
  2419. "%s: ql_alloc_small_buffers failed\n", qdev->ndev->name);
  2420. goto err_small_buffers;
  2421. }
  2422. if (ql_alloc_large_buffers(qdev) != 0) {
  2423. printk(KERN_ERR PFX
  2424. "%s: ql_alloc_large_buffers failed\n", qdev->ndev->name);
  2425. goto err_small_buffers;
  2426. }
  2427. /* Initialize the large buffer queue. */
  2428. ql_init_large_buffers(qdev);
  2429. if (ql_create_send_free_list(qdev))
  2430. goto err_free_list;
  2431. qdev->rsp_current = qdev->rsp_q_virt_addr;
  2432. return 0;
  2433. err_free_list:
  2434. ql_free_send_free_list(qdev);
  2435. err_small_buffers:
  2436. ql_free_buffer_queues(qdev);
  2437. err_buffer_queues:
  2438. ql_free_net_req_rsp_queues(qdev);
  2439. err_req_rsp:
  2440. pci_free_consistent(qdev->pdev,
  2441. PAGE_SIZE,
  2442. qdev->shadow_reg_virt_addr,
  2443. qdev->shadow_reg_phy_addr);
  2444. return -ENOMEM;
  2445. }
  2446. static void ql_free_mem_resources(struct ql3_adapter *qdev)
  2447. {
  2448. ql_free_send_free_list(qdev);
  2449. ql_free_large_buffers(qdev);
  2450. ql_free_small_buffers(qdev);
  2451. ql_free_buffer_queues(qdev);
  2452. ql_free_net_req_rsp_queues(qdev);
  2453. if (qdev->shadow_reg_virt_addr != NULL) {
  2454. pci_free_consistent(qdev->pdev,
  2455. PAGE_SIZE,
  2456. qdev->shadow_reg_virt_addr,
  2457. qdev->shadow_reg_phy_addr);
  2458. qdev->shadow_reg_virt_addr = NULL;
  2459. }
  2460. }
  2461. static int ql_init_misc_registers(struct ql3_adapter *qdev)
  2462. {
  2463. struct ql3xxx_local_ram_registers __iomem *local_ram =
  2464. (void __iomem *)qdev->mem_map_registers;
  2465. if(ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK,
  2466. (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
  2467. 2) << 4))
  2468. return -1;
  2469. ql_write_page2_reg(qdev,
  2470. &local_ram->bufletSize, qdev->nvram_data.bufletSize);
  2471. ql_write_page2_reg(qdev,
  2472. &local_ram->maxBufletCount,
  2473. qdev->nvram_data.bufletCount);
  2474. ql_write_page2_reg(qdev,
  2475. &local_ram->freeBufletThresholdLow,
  2476. (qdev->nvram_data.tcpWindowThreshold25 << 16) |
  2477. (qdev->nvram_data.tcpWindowThreshold0));
  2478. ql_write_page2_reg(qdev,
  2479. &local_ram->freeBufletThresholdHigh,
  2480. qdev->nvram_data.tcpWindowThreshold50);
  2481. ql_write_page2_reg(qdev,
  2482. &local_ram->ipHashTableBase,
  2483. (qdev->nvram_data.ipHashTableBaseHi << 16) |
  2484. qdev->nvram_data.ipHashTableBaseLo);
  2485. ql_write_page2_reg(qdev,
  2486. &local_ram->ipHashTableCount,
  2487. qdev->nvram_data.ipHashTableSize);
  2488. ql_write_page2_reg(qdev,
  2489. &local_ram->tcpHashTableBase,
  2490. (qdev->nvram_data.tcpHashTableBaseHi << 16) |
  2491. qdev->nvram_data.tcpHashTableBaseLo);
  2492. ql_write_page2_reg(qdev,
  2493. &local_ram->tcpHashTableCount,
  2494. qdev->nvram_data.tcpHashTableSize);
  2495. ql_write_page2_reg(qdev,
  2496. &local_ram->ncbBase,
  2497. (qdev->nvram_data.ncbTableBaseHi << 16) |
  2498. qdev->nvram_data.ncbTableBaseLo);
  2499. ql_write_page2_reg(qdev,
  2500. &local_ram->maxNcbCount,
  2501. qdev->nvram_data.ncbTableSize);
  2502. ql_write_page2_reg(qdev,
  2503. &local_ram->drbBase,
  2504. (qdev->nvram_data.drbTableBaseHi << 16) |
  2505. qdev->nvram_data.drbTableBaseLo);
  2506. ql_write_page2_reg(qdev,
  2507. &local_ram->maxDrbCount,
  2508. qdev->nvram_data.drbTableSize);
  2509. ql_sem_unlock(qdev, QL_DDR_RAM_SEM_MASK);
  2510. return 0;
  2511. }
  2512. static int ql_adapter_initialize(struct ql3_adapter *qdev)
  2513. {
  2514. u32 value;
  2515. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  2516. struct ql3xxx_host_memory_registers __iomem *hmem_regs =
  2517. (void __iomem *)port_regs;
  2518. u32 delay = 10;
  2519. int status = 0;
  2520. if(ql_mii_setup(qdev))
  2521. return -1;
  2522. /* Bring out PHY out of reset */
  2523. ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
  2524. (ISP_SERIAL_PORT_IF_WE |
  2525. (ISP_SERIAL_PORT_IF_WE << 16)));
  2526. qdev->port_link_state = LS_DOWN;
  2527. netif_carrier_off(qdev->ndev);
  2528. /* V2 chip fix for ARS-39168. */
  2529. ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
  2530. (ISP_SERIAL_PORT_IF_SDE |
  2531. (ISP_SERIAL_PORT_IF_SDE << 16)));
  2532. /* Request Queue Registers */
  2533. *((u32 *) (qdev->preq_consumer_index)) = 0;
  2534. atomic_set(&qdev->tx_count,NUM_REQ_Q_ENTRIES);
  2535. qdev->req_producer_index = 0;
  2536. ql_write_page1_reg(qdev,
  2537. &hmem_regs->reqConsumerIndexAddrHigh,
  2538. qdev->req_consumer_index_phy_addr_high);
  2539. ql_write_page1_reg(qdev,
  2540. &hmem_regs->reqConsumerIndexAddrLow,
  2541. qdev->req_consumer_index_phy_addr_low);
  2542. ql_write_page1_reg(qdev,
  2543. &hmem_regs->reqBaseAddrHigh,
  2544. MS_64BITS(qdev->req_q_phy_addr));
  2545. ql_write_page1_reg(qdev,
  2546. &hmem_regs->reqBaseAddrLow,
  2547. LS_64BITS(qdev->req_q_phy_addr));
  2548. ql_write_page1_reg(qdev, &hmem_regs->reqLength, NUM_REQ_Q_ENTRIES);
  2549. /* Response Queue Registers */
  2550. *((u16 *) (qdev->prsp_producer_index)) = 0;
  2551. qdev->rsp_consumer_index = 0;
  2552. qdev->rsp_current = qdev->rsp_q_virt_addr;
  2553. ql_write_page1_reg(qdev,
  2554. &hmem_regs->rspProducerIndexAddrHigh,
  2555. qdev->rsp_producer_index_phy_addr_high);
  2556. ql_write_page1_reg(qdev,
  2557. &hmem_regs->rspProducerIndexAddrLow,
  2558. qdev->rsp_producer_index_phy_addr_low);
  2559. ql_write_page1_reg(qdev,
  2560. &hmem_regs->rspBaseAddrHigh,
  2561. MS_64BITS(qdev->rsp_q_phy_addr));
  2562. ql_write_page1_reg(qdev,
  2563. &hmem_regs->rspBaseAddrLow,
  2564. LS_64BITS(qdev->rsp_q_phy_addr));
  2565. ql_write_page1_reg(qdev, &hmem_regs->rspLength, NUM_RSP_Q_ENTRIES);
  2566. /* Large Buffer Queue */
  2567. ql_write_page1_reg(qdev,
  2568. &hmem_regs->rxLargeQBaseAddrHigh,
  2569. MS_64BITS(qdev->lrg_buf_q_phy_addr));
  2570. ql_write_page1_reg(qdev,
  2571. &hmem_regs->rxLargeQBaseAddrLow,
  2572. LS_64BITS(qdev->lrg_buf_q_phy_addr));
  2573. ql_write_page1_reg(qdev, &hmem_regs->rxLargeQLength, qdev->num_lbufq_entries);
  2574. ql_write_page1_reg(qdev,
  2575. &hmem_regs->rxLargeBufferLength,
  2576. qdev->lrg_buffer_len);
  2577. /* Small Buffer Queue */
  2578. ql_write_page1_reg(qdev,
  2579. &hmem_regs->rxSmallQBaseAddrHigh,
  2580. MS_64BITS(qdev->small_buf_q_phy_addr));
  2581. ql_write_page1_reg(qdev,
  2582. &hmem_regs->rxSmallQBaseAddrLow,
  2583. LS_64BITS(qdev->small_buf_q_phy_addr));
  2584. ql_write_page1_reg(qdev, &hmem_regs->rxSmallQLength, NUM_SBUFQ_ENTRIES);
  2585. ql_write_page1_reg(qdev,
  2586. &hmem_regs->rxSmallBufferLength,
  2587. QL_SMALL_BUFFER_SIZE);
  2588. qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1;
  2589. qdev->small_buf_release_cnt = 8;
  2590. qdev->lrg_buf_q_producer_index = qdev->num_lbufq_entries - 1;
  2591. qdev->lrg_buf_release_cnt = 8;
  2592. qdev->lrg_buf_next_free =
  2593. (struct bufq_addr_element *)qdev->lrg_buf_q_virt_addr;
  2594. qdev->small_buf_index = 0;
  2595. qdev->lrg_buf_index = 0;
  2596. qdev->lrg_buf_free_count = 0;
  2597. qdev->lrg_buf_free_head = NULL;
  2598. qdev->lrg_buf_free_tail = NULL;
  2599. ql_write_common_reg(qdev,
  2600. &port_regs->CommonRegs.
  2601. rxSmallQProducerIndex,
  2602. qdev->small_buf_q_producer_index);
  2603. ql_write_common_reg(qdev,
  2604. &port_regs->CommonRegs.
  2605. rxLargeQProducerIndex,
  2606. qdev->lrg_buf_q_producer_index);
  2607. /*
  2608. * Find out if the chip has already been initialized. If it has, then
  2609. * we skip some of the initialization.
  2610. */
  2611. clear_bit(QL_LINK_MASTER, &qdev->flags);
  2612. value = ql_read_page0_reg(qdev, &port_regs->portStatus);
  2613. if ((value & PORT_STATUS_IC) == 0) {
  2614. /* Chip has not been configured yet, so let it rip. */
  2615. if(ql_init_misc_registers(qdev)) {
  2616. status = -1;
  2617. goto out;
  2618. }
  2619. if (qdev->mac_index)
  2620. ql_write_page0_reg(qdev,
  2621. &port_regs->mac1MaxFrameLengthReg,
  2622. qdev->max_frame_size);
  2623. else
  2624. ql_write_page0_reg(qdev,
  2625. &port_regs->mac0MaxFrameLengthReg,
  2626. qdev->max_frame_size);
  2627. value = qdev->nvram_data.tcpMaxWindowSize;
  2628. ql_write_page0_reg(qdev, &port_regs->tcpMaxWindow, value);
  2629. value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig;
  2630. if(ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK,
  2631. (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
  2632. * 2) << 13)) {
  2633. status = -1;
  2634. goto out;
  2635. }
  2636. ql_write_page0_reg(qdev, &port_regs->ExternalHWConfig, value);
  2637. ql_write_page0_reg(qdev, &port_regs->InternalChipConfig,
  2638. (((INTERNAL_CHIP_SD | INTERNAL_CHIP_WE) <<
  2639. 16) | (INTERNAL_CHIP_SD |
  2640. INTERNAL_CHIP_WE)));
  2641. ql_sem_unlock(qdev, QL_FLASH_SEM_MASK);
  2642. }
  2643. if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
  2644. (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
  2645. 2) << 7)) {
  2646. status = -1;
  2647. goto out;
  2648. }
  2649. ql_init_scan_mode(qdev);
  2650. ql_get_phy_owner(qdev);
  2651. /* Load the MAC Configuration */
  2652. /* Program lower 32 bits of the MAC address */
  2653. ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
  2654. (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16));
  2655. ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
  2656. ((qdev->ndev->dev_addr[2] << 24)
  2657. | (qdev->ndev->dev_addr[3] << 16)
  2658. | (qdev->ndev->dev_addr[4] << 8)
  2659. | qdev->ndev->dev_addr[5]));
  2660. /* Program top 16 bits of the MAC address */
  2661. ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
  2662. ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1));
  2663. ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
  2664. ((qdev->ndev->dev_addr[0] << 8)
  2665. | qdev->ndev->dev_addr[1]));
  2666. /* Enable Primary MAC */
  2667. ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
  2668. ((MAC_ADDR_INDIRECT_PTR_REG_PE << 16) |
  2669. MAC_ADDR_INDIRECT_PTR_REG_PE));
  2670. /* Clear Primary and Secondary IP addresses */
  2671. ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
  2672. ((IP_ADDR_INDEX_REG_MASK << 16) |
  2673. (qdev->mac_index << 2)));
  2674. ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
  2675. ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
  2676. ((IP_ADDR_INDEX_REG_MASK << 16) |
  2677. ((qdev->mac_index << 2) + 1)));
  2678. ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
  2679. ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
  2680. /* Indicate Configuration Complete */
  2681. ql_write_page0_reg(qdev,
  2682. &port_regs->portControl,
  2683. ((PORT_CONTROL_CC << 16) | PORT_CONTROL_CC));
  2684. do {
  2685. value = ql_read_page0_reg(qdev, &port_regs->portStatus);
  2686. if (value & PORT_STATUS_IC)
  2687. break;
  2688. msleep(500);
  2689. } while (--delay);
  2690. if (delay == 0) {
  2691. printk(KERN_ERR PFX
  2692. "%s: Hw Initialization timeout.\n", qdev->ndev->name);
  2693. status = -1;
  2694. goto out;
  2695. }
  2696. /* Enable Ethernet Function */
  2697. if (qdev->device_id == QL3032_DEVICE_ID) {
  2698. value =
  2699. (QL3032_PORT_CONTROL_EF | QL3032_PORT_CONTROL_KIE |
  2700. QL3032_PORT_CONTROL_EIv6 | QL3032_PORT_CONTROL_EIv4);
  2701. ql_write_page0_reg(qdev, &port_regs->functionControl,
  2702. ((value << 16) | value));
  2703. } else {
  2704. value =
  2705. (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI |
  2706. PORT_CONTROL_HH);
  2707. ql_write_page0_reg(qdev, &port_regs->portControl,
  2708. ((value << 16) | value));
  2709. }
  2710. out:
  2711. return status;
  2712. }
  2713. /*
  2714. * Caller holds hw_lock.
  2715. */
  2716. static int ql_adapter_reset(struct ql3_adapter *qdev)
  2717. {
  2718. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  2719. int status = 0;
  2720. u16 value;
  2721. int max_wait_time;
  2722. set_bit(QL_RESET_ACTIVE, &qdev->flags);
  2723. clear_bit(QL_RESET_DONE, &qdev->flags);
  2724. /*
  2725. * Issue soft reset to chip.
  2726. */
  2727. printk(KERN_DEBUG PFX
  2728. "%s: Issue soft reset to chip.\n",
  2729. qdev->ndev->name);
  2730. ql_write_common_reg(qdev,
  2731. &port_regs->CommonRegs.ispControlStatus,
  2732. ((ISP_CONTROL_SR << 16) | ISP_CONTROL_SR));
  2733. /* Wait 3 seconds for reset to complete. */
  2734. printk(KERN_DEBUG PFX
  2735. "%s: Wait 10 milliseconds for reset to complete.\n",
  2736. qdev->ndev->name);
  2737. /* Wait until the firmware tells us the Soft Reset is done */
  2738. max_wait_time = 5;
  2739. do {
  2740. value =
  2741. ql_read_common_reg(qdev,
  2742. &port_regs->CommonRegs.ispControlStatus);
  2743. if ((value & ISP_CONTROL_SR) == 0)
  2744. break;
  2745. ssleep(1);
  2746. } while ((--max_wait_time));
  2747. /*
  2748. * Also, make sure that the Network Reset Interrupt bit has been
  2749. * cleared after the soft reset has taken place.
  2750. */
  2751. value =
  2752. ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
  2753. if (value & ISP_CONTROL_RI) {
  2754. printk(KERN_DEBUG PFX
  2755. "ql_adapter_reset: clearing RI after reset.\n");
  2756. ql_write_common_reg(qdev,
  2757. &port_regs->CommonRegs.
  2758. ispControlStatus,
  2759. ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI));
  2760. }
  2761. if (max_wait_time == 0) {
  2762. /* Issue Force Soft Reset */
  2763. ql_write_common_reg(qdev,
  2764. &port_regs->CommonRegs.
  2765. ispControlStatus,
  2766. ((ISP_CONTROL_FSR << 16) |
  2767. ISP_CONTROL_FSR));
  2768. /*
  2769. * Wait until the firmware tells us the Force Soft Reset is
  2770. * done
  2771. */
  2772. max_wait_time = 5;
  2773. do {
  2774. value =
  2775. ql_read_common_reg(qdev,
  2776. &port_regs->CommonRegs.
  2777. ispControlStatus);
  2778. if ((value & ISP_CONTROL_FSR) == 0) {
  2779. break;
  2780. }
  2781. ssleep(1);
  2782. } while ((--max_wait_time));
  2783. }
  2784. if (max_wait_time == 0)
  2785. status = 1;
  2786. clear_bit(QL_RESET_ACTIVE, &qdev->flags);
  2787. set_bit(QL_RESET_DONE, &qdev->flags);
  2788. return status;
  2789. }
  2790. static void ql_set_mac_info(struct ql3_adapter *qdev)
  2791. {
  2792. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  2793. u32 value, port_status;
  2794. u8 func_number;
  2795. /* Get the function number */
  2796. value =
  2797. ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus);
  2798. func_number = (u8) ((value >> 4) & OPCODE_FUNC_ID_MASK);
  2799. port_status = ql_read_page0_reg(qdev, &port_regs->portStatus);
  2800. switch (value & ISP_CONTROL_FN_MASK) {
  2801. case ISP_CONTROL_FN0_NET:
  2802. qdev->mac_index = 0;
  2803. qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
  2804. qdev->tcp_ob_opcode = OUTBOUND_TCP_IOCB | func_number;
  2805. qdev->update_ob_opcode = UPDATE_NCB_IOCB | func_number;
  2806. qdev->mb_bit_mask = FN0_MA_BITS_MASK;
  2807. qdev->PHYAddr = PORT0_PHY_ADDRESS;
  2808. if (port_status & PORT_STATUS_SM0)
  2809. set_bit(QL_LINK_OPTICAL,&qdev->flags);
  2810. else
  2811. clear_bit(QL_LINK_OPTICAL,&qdev->flags);
  2812. break;
  2813. case ISP_CONTROL_FN1_NET:
  2814. qdev->mac_index = 1;
  2815. qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
  2816. qdev->tcp_ob_opcode = OUTBOUND_TCP_IOCB | func_number;
  2817. qdev->update_ob_opcode = UPDATE_NCB_IOCB | func_number;
  2818. qdev->mb_bit_mask = FN1_MA_BITS_MASK;
  2819. qdev->PHYAddr = PORT1_PHY_ADDRESS;
  2820. if (port_status & PORT_STATUS_SM1)
  2821. set_bit(QL_LINK_OPTICAL,&qdev->flags);
  2822. else
  2823. clear_bit(QL_LINK_OPTICAL,&qdev->flags);
  2824. break;
  2825. case ISP_CONTROL_FN0_SCSI:
  2826. case ISP_CONTROL_FN1_SCSI:
  2827. default:
  2828. printk(KERN_DEBUG PFX
  2829. "%s: Invalid function number, ispControlStatus = 0x%x\n",
  2830. qdev->ndev->name,value);
  2831. break;
  2832. }
  2833. qdev->numPorts = qdev->nvram_data.numPorts;
  2834. }
  2835. static void ql_display_dev_info(struct net_device *ndev)
  2836. {
  2837. struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
  2838. struct pci_dev *pdev = qdev->pdev;
  2839. printk(KERN_INFO PFX
  2840. "\n%s Adapter %d RevisionID %d found %s on PCI slot %d.\n",
  2841. DRV_NAME, qdev->index, qdev->chip_rev_id,
  2842. (qdev->device_id == QL3032_DEVICE_ID) ? "QLA3032" : "QLA3022",
  2843. qdev->pci_slot);
  2844. printk(KERN_INFO PFX
  2845. "%s Interface.\n",
  2846. test_bit(QL_LINK_OPTICAL,&qdev->flags) ? "OPTICAL" : "COPPER");
  2847. /*
  2848. * Print PCI bus width/type.
  2849. */
  2850. printk(KERN_INFO PFX
  2851. "Bus interface is %s %s.\n",
  2852. ((qdev->pci_width == 64) ? "64-bit" : "32-bit"),
  2853. ((qdev->pci_x) ? "PCI-X" : "PCI"));
  2854. printk(KERN_INFO PFX
  2855. "mem IO base address adjusted = 0x%p\n",
  2856. qdev->mem_map_registers);
  2857. printk(KERN_INFO PFX "Interrupt number = %d\n", pdev->irq);
  2858. if (netif_msg_probe(qdev))
  2859. printk(KERN_INFO PFX
  2860. "%s: MAC address %02x:%02x:%02x:%02x:%02x:%02x\n",
  2861. ndev->name, ndev->dev_addr[0], ndev->dev_addr[1],
  2862. ndev->dev_addr[2], ndev->dev_addr[3], ndev->dev_addr[4],
  2863. ndev->dev_addr[5]);
  2864. }
  2865. static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset)
  2866. {
  2867. struct net_device *ndev = qdev->ndev;
  2868. int retval = 0;
  2869. netif_stop_queue(ndev);
  2870. netif_carrier_off(ndev);
  2871. clear_bit(QL_ADAPTER_UP,&qdev->flags);
  2872. clear_bit(QL_LINK_MASTER,&qdev->flags);
  2873. ql_disable_interrupts(qdev);
  2874. free_irq(qdev->pdev->irq, ndev);
  2875. if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) {
  2876. printk(KERN_INFO PFX
  2877. "%s: calling pci_disable_msi().\n", qdev->ndev->name);
  2878. clear_bit(QL_MSI_ENABLED,&qdev->flags);
  2879. pci_disable_msi(qdev->pdev);
  2880. }
  2881. del_timer_sync(&qdev->adapter_timer);
  2882. netif_poll_disable(ndev);
  2883. if (do_reset) {
  2884. int soft_reset;
  2885. unsigned long hw_flags;
  2886. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  2887. if (ql_wait_for_drvr_lock(qdev)) {
  2888. if ((soft_reset = ql_adapter_reset(qdev))) {
  2889. printk(KERN_ERR PFX
  2890. "%s: ql_adapter_reset(%d) FAILED!\n",
  2891. ndev->name, qdev->index);
  2892. }
  2893. printk(KERN_ERR PFX
  2894. "%s: Releaseing driver lock via chip reset.\n",ndev->name);
  2895. } else {
  2896. printk(KERN_ERR PFX
  2897. "%s: Could not acquire driver lock to do "
  2898. "reset!\n", ndev->name);
  2899. retval = -1;
  2900. }
  2901. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  2902. }
  2903. ql_free_mem_resources(qdev);
  2904. return retval;
  2905. }
  2906. static int ql_adapter_up(struct ql3_adapter *qdev)
  2907. {
  2908. struct net_device *ndev = qdev->ndev;
  2909. int err;
  2910. unsigned long irq_flags = IRQF_SAMPLE_RANDOM | IRQF_SHARED;
  2911. unsigned long hw_flags;
  2912. if (ql_alloc_mem_resources(qdev)) {
  2913. printk(KERN_ERR PFX
  2914. "%s Unable to allocate buffers.\n", ndev->name);
  2915. return -ENOMEM;
  2916. }
  2917. if (qdev->msi) {
  2918. if (pci_enable_msi(qdev->pdev)) {
  2919. printk(KERN_ERR PFX
  2920. "%s: User requested MSI, but MSI failed to "
  2921. "initialize. Continuing without MSI.\n",
  2922. qdev->ndev->name);
  2923. qdev->msi = 0;
  2924. } else {
  2925. printk(KERN_INFO PFX "%s: MSI Enabled...\n", qdev->ndev->name);
  2926. set_bit(QL_MSI_ENABLED,&qdev->flags);
  2927. irq_flags &= ~IRQF_SHARED;
  2928. }
  2929. }
  2930. if ((err = request_irq(qdev->pdev->irq,
  2931. ql3xxx_isr,
  2932. irq_flags, ndev->name, ndev))) {
  2933. printk(KERN_ERR PFX
  2934. "%s: Failed to reserve interrupt %d already in use.\n",
  2935. ndev->name, qdev->pdev->irq);
  2936. goto err_irq;
  2937. }
  2938. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  2939. if ((err = ql_wait_for_drvr_lock(qdev))) {
  2940. if ((err = ql_adapter_initialize(qdev))) {
  2941. printk(KERN_ERR PFX
  2942. "%s: Unable to initialize adapter.\n",
  2943. ndev->name);
  2944. goto err_init;
  2945. }
  2946. printk(KERN_ERR PFX
  2947. "%s: Releaseing driver lock.\n",ndev->name);
  2948. ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
  2949. } else {
  2950. printk(KERN_ERR PFX
  2951. "%s: Could not aquire driver lock.\n",
  2952. ndev->name);
  2953. goto err_lock;
  2954. }
  2955. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  2956. set_bit(QL_ADAPTER_UP,&qdev->flags);
  2957. mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
  2958. netif_poll_enable(ndev);
  2959. ql_enable_interrupts(qdev);
  2960. return 0;
  2961. err_init:
  2962. ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
  2963. err_lock:
  2964. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  2965. free_irq(qdev->pdev->irq, ndev);
  2966. err_irq:
  2967. if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) {
  2968. printk(KERN_INFO PFX
  2969. "%s: calling pci_disable_msi().\n",
  2970. qdev->ndev->name);
  2971. clear_bit(QL_MSI_ENABLED,&qdev->flags);
  2972. pci_disable_msi(qdev->pdev);
  2973. }
  2974. return err;
  2975. }
  2976. static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset)
  2977. {
  2978. if( ql_adapter_down(qdev,reset) || ql_adapter_up(qdev)) {
  2979. printk(KERN_ERR PFX
  2980. "%s: Driver up/down cycle failed, "
  2981. "closing device\n",qdev->ndev->name);
  2982. dev_close(qdev->ndev);
  2983. return -1;
  2984. }
  2985. return 0;
  2986. }
  2987. static int ql3xxx_close(struct net_device *ndev)
  2988. {
  2989. struct ql3_adapter *qdev = netdev_priv(ndev);
  2990. /*
  2991. * Wait for device to recover from a reset.
  2992. * (Rarely happens, but possible.)
  2993. */
  2994. while (!test_bit(QL_ADAPTER_UP,&qdev->flags))
  2995. msleep(50);
  2996. ql_adapter_down(qdev,QL_DO_RESET);
  2997. return 0;
  2998. }
  2999. static int ql3xxx_open(struct net_device *ndev)
  3000. {
  3001. struct ql3_adapter *qdev = netdev_priv(ndev);
  3002. return (ql_adapter_up(qdev));
  3003. }
  3004. static struct net_device_stats *ql3xxx_get_stats(struct net_device *dev)
  3005. {
  3006. struct ql3_adapter *qdev = (struct ql3_adapter *)dev->priv;
  3007. return &qdev->stats;
  3008. }
  3009. static void ql3xxx_set_multicast_list(struct net_device *ndev)
  3010. {
  3011. /*
  3012. * We are manually parsing the list in the net_device structure.
  3013. */
  3014. return;
  3015. }
  3016. static int ql3xxx_set_mac_address(struct net_device *ndev, void *p)
  3017. {
  3018. struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
  3019. struct ql3xxx_port_registers __iomem *port_regs =
  3020. qdev->mem_map_registers;
  3021. struct sockaddr *addr = p;
  3022. unsigned long hw_flags;
  3023. if (netif_running(ndev))
  3024. return -EBUSY;
  3025. if (!is_valid_ether_addr(addr->sa_data))
  3026. return -EADDRNOTAVAIL;
  3027. memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
  3028. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  3029. /* Program lower 32 bits of the MAC address */
  3030. ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
  3031. (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16));
  3032. ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
  3033. ((ndev->dev_addr[2] << 24) | (ndev->
  3034. dev_addr[3] << 16) |
  3035. (ndev->dev_addr[4] << 8) | ndev->dev_addr[5]));
  3036. /* Program top 16 bits of the MAC address */
  3037. ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
  3038. ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1));
  3039. ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
  3040. ((ndev->dev_addr[0] << 8) | ndev->dev_addr[1]));
  3041. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  3042. return 0;
  3043. }
  3044. static void ql3xxx_tx_timeout(struct net_device *ndev)
  3045. {
  3046. struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
  3047. printk(KERN_ERR PFX "%s: Resetting...\n", ndev->name);
  3048. /*
  3049. * Stop the queues, we've got a problem.
  3050. */
  3051. netif_stop_queue(ndev);
  3052. /*
  3053. * Wake up the worker to process this event.
  3054. */
  3055. queue_delayed_work(qdev->workqueue, &qdev->tx_timeout_work, 0);
  3056. }
  3057. static void ql_reset_work(struct work_struct *work)
  3058. {
  3059. struct ql3_adapter *qdev =
  3060. container_of(work, struct ql3_adapter, reset_work.work);
  3061. struct net_device *ndev = qdev->ndev;
  3062. u32 value;
  3063. struct ql_tx_buf_cb *tx_cb;
  3064. int max_wait_time, i;
  3065. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  3066. unsigned long hw_flags;
  3067. if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START),&qdev->flags)) {
  3068. clear_bit(QL_LINK_MASTER,&qdev->flags);
  3069. /*
  3070. * Loop through the active list and return the skb.
  3071. */
  3072. for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
  3073. int j;
  3074. tx_cb = &qdev->tx_buf[i];
  3075. if (tx_cb->skb) {
  3076. printk(KERN_DEBUG PFX
  3077. "%s: Freeing lost SKB.\n",
  3078. qdev->ndev->name);
  3079. pci_unmap_single(qdev->pdev,
  3080. pci_unmap_addr(&tx_cb->map[0], mapaddr),
  3081. pci_unmap_len(&tx_cb->map[0], maplen),
  3082. PCI_DMA_TODEVICE);
  3083. for(j=1;j<tx_cb->seg_count;j++) {
  3084. pci_unmap_page(qdev->pdev,
  3085. pci_unmap_addr(&tx_cb->map[j],mapaddr),
  3086. pci_unmap_len(&tx_cb->map[j],maplen),
  3087. PCI_DMA_TODEVICE);
  3088. }
  3089. dev_kfree_skb(tx_cb->skb);
  3090. tx_cb->skb = NULL;
  3091. }
  3092. }
  3093. printk(KERN_ERR PFX
  3094. "%s: Clearing NRI after reset.\n", qdev->ndev->name);
  3095. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  3096. ql_write_common_reg(qdev,
  3097. &port_regs->CommonRegs.
  3098. ispControlStatus,
  3099. ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI));
  3100. /*
  3101. * Wait the for Soft Reset to Complete.
  3102. */
  3103. max_wait_time = 10;
  3104. do {
  3105. value = ql_read_common_reg(qdev,
  3106. &port_regs->CommonRegs.
  3107. ispControlStatus);
  3108. if ((value & ISP_CONTROL_SR) == 0) {
  3109. printk(KERN_DEBUG PFX
  3110. "%s: reset completed.\n",
  3111. qdev->ndev->name);
  3112. break;
  3113. }
  3114. if (value & ISP_CONTROL_RI) {
  3115. printk(KERN_DEBUG PFX
  3116. "%s: clearing NRI after reset.\n",
  3117. qdev->ndev->name);
  3118. ql_write_common_reg(qdev,
  3119. &port_regs->
  3120. CommonRegs.
  3121. ispControlStatus,
  3122. ((ISP_CONTROL_RI <<
  3123. 16) | ISP_CONTROL_RI));
  3124. }
  3125. ssleep(1);
  3126. } while (--max_wait_time);
  3127. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  3128. if (value & ISP_CONTROL_SR) {
  3129. /*
  3130. * Set the reset flags and clear the board again.
  3131. * Nothing else to do...
  3132. */
  3133. printk(KERN_ERR PFX
  3134. "%s: Timed out waiting for reset to "
  3135. "complete.\n", ndev->name);
  3136. printk(KERN_ERR PFX
  3137. "%s: Do a reset.\n", ndev->name);
  3138. clear_bit(QL_RESET_PER_SCSI,&qdev->flags);
  3139. clear_bit(QL_RESET_START,&qdev->flags);
  3140. ql_cycle_adapter(qdev,QL_DO_RESET);
  3141. return;
  3142. }
  3143. clear_bit(QL_RESET_ACTIVE,&qdev->flags);
  3144. clear_bit(QL_RESET_PER_SCSI,&qdev->flags);
  3145. clear_bit(QL_RESET_START,&qdev->flags);
  3146. ql_cycle_adapter(qdev,QL_NO_RESET);
  3147. }
  3148. }
  3149. static void ql_tx_timeout_work(struct work_struct *work)
  3150. {
  3151. struct ql3_adapter *qdev =
  3152. container_of(work, struct ql3_adapter, tx_timeout_work.work);
  3153. ql_cycle_adapter(qdev, QL_DO_RESET);
  3154. }
  3155. static void ql_get_board_info(struct ql3_adapter *qdev)
  3156. {
  3157. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  3158. u32 value;
  3159. value = ql_read_page0_reg_l(qdev, &port_regs->portStatus);
  3160. qdev->chip_rev_id = ((value & PORT_STATUS_REV_ID_MASK) >> 12);
  3161. if (value & PORT_STATUS_64)
  3162. qdev->pci_width = 64;
  3163. else
  3164. qdev->pci_width = 32;
  3165. if (value & PORT_STATUS_X)
  3166. qdev->pci_x = 1;
  3167. else
  3168. qdev->pci_x = 0;
  3169. qdev->pci_slot = (u8) PCI_SLOT(qdev->pdev->devfn);
  3170. }
  3171. static void ql3xxx_timer(unsigned long ptr)
  3172. {
  3173. struct ql3_adapter *qdev = (struct ql3_adapter *)ptr;
  3174. if (test_bit(QL_RESET_ACTIVE,&qdev->flags)) {
  3175. printk(KERN_DEBUG PFX
  3176. "%s: Reset in progress.\n",
  3177. qdev->ndev->name);
  3178. goto end;
  3179. }
  3180. ql_link_state_machine(qdev);
  3181. /* Restart timer on 2 second interval. */
  3182. end:
  3183. mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
  3184. }
  3185. static int __devinit ql3xxx_probe(struct pci_dev *pdev,
  3186. const struct pci_device_id *pci_entry)
  3187. {
  3188. struct net_device *ndev = NULL;
  3189. struct ql3_adapter *qdev = NULL;
  3190. static int cards_found = 0;
  3191. int pci_using_dac, err;
  3192. err = pci_enable_device(pdev);
  3193. if (err) {
  3194. printk(KERN_ERR PFX "%s cannot enable PCI device\n",
  3195. pci_name(pdev));
  3196. goto err_out;
  3197. }
  3198. err = pci_request_regions(pdev, DRV_NAME);
  3199. if (err) {
  3200. printk(KERN_ERR PFX "%s cannot obtain PCI resources\n",
  3201. pci_name(pdev));
  3202. goto err_out_disable_pdev;
  3203. }
  3204. pci_set_master(pdev);
  3205. if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
  3206. pci_using_dac = 1;
  3207. err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
  3208. } else if (!(err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
  3209. pci_using_dac = 0;
  3210. err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
  3211. }
  3212. if (err) {
  3213. printk(KERN_ERR PFX "%s no usable DMA configuration\n",
  3214. pci_name(pdev));
  3215. goto err_out_free_regions;
  3216. }
  3217. ndev = alloc_etherdev(sizeof(struct ql3_adapter));
  3218. if (!ndev) {
  3219. printk(KERN_ERR PFX "%s could not alloc etherdev\n",
  3220. pci_name(pdev));
  3221. err = -ENOMEM;
  3222. goto err_out_free_regions;
  3223. }
  3224. SET_MODULE_OWNER(ndev);
  3225. SET_NETDEV_DEV(ndev, &pdev->dev);
  3226. pci_set_drvdata(pdev, ndev);
  3227. qdev = netdev_priv(ndev);
  3228. qdev->index = cards_found;
  3229. qdev->ndev = ndev;
  3230. qdev->pdev = pdev;
  3231. qdev->device_id = pci_entry->device;
  3232. qdev->port_link_state = LS_DOWN;
  3233. if (msi)
  3234. qdev->msi = 1;
  3235. qdev->msg_enable = netif_msg_init(debug, default_msg);
  3236. if (pci_using_dac)
  3237. ndev->features |= NETIF_F_HIGHDMA;
  3238. if (qdev->device_id == QL3032_DEVICE_ID)
  3239. ndev->features |= (NETIF_F_HW_CSUM | NETIF_F_SG);
  3240. qdev->mem_map_registers =
  3241. ioremap_nocache(pci_resource_start(pdev, 1),
  3242. pci_resource_len(qdev->pdev, 1));
  3243. if (!qdev->mem_map_registers) {
  3244. printk(KERN_ERR PFX "%s: cannot map device registers\n",
  3245. pci_name(pdev));
  3246. err = -EIO;
  3247. goto err_out_free_ndev;
  3248. }
  3249. spin_lock_init(&qdev->adapter_lock);
  3250. spin_lock_init(&qdev->hw_lock);
  3251. /* Set driver entry points */
  3252. ndev->open = ql3xxx_open;
  3253. ndev->hard_start_xmit = ql3xxx_send;
  3254. ndev->stop = ql3xxx_close;
  3255. ndev->get_stats = ql3xxx_get_stats;
  3256. ndev->set_multicast_list = ql3xxx_set_multicast_list;
  3257. SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops);
  3258. ndev->set_mac_address = ql3xxx_set_mac_address;
  3259. ndev->tx_timeout = ql3xxx_tx_timeout;
  3260. ndev->watchdog_timeo = 5 * HZ;
  3261. ndev->poll = &ql_poll;
  3262. ndev->weight = 64;
  3263. ndev->irq = pdev->irq;
  3264. /* make sure the EEPROM is good */
  3265. if (ql_get_nvram_params(qdev)) {
  3266. printk(KERN_ALERT PFX
  3267. "ql3xxx_probe: Adapter #%d, Invalid NVRAM parameters.\n",
  3268. qdev->index);
  3269. err = -EIO;
  3270. goto err_out_iounmap;
  3271. }
  3272. ql_set_mac_info(qdev);
  3273. /* Validate and set parameters */
  3274. if (qdev->mac_index) {
  3275. ndev->mtu = qdev->nvram_data.macCfg_port1.etherMtu_mac ;
  3276. memcpy(ndev->dev_addr, &qdev->nvram_data.funcCfg_fn2.macAddress,
  3277. ETH_ALEN);
  3278. } else {
  3279. ndev->mtu = qdev->nvram_data.macCfg_port0.etherMtu_mac ;
  3280. memcpy(ndev->dev_addr, &qdev->nvram_data.funcCfg_fn0.macAddress,
  3281. ETH_ALEN);
  3282. }
  3283. memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
  3284. ndev->tx_queue_len = NUM_REQ_Q_ENTRIES;
  3285. /* Turn off support for multicasting */
  3286. ndev->flags &= ~IFF_MULTICAST;
  3287. /* Record PCI bus information. */
  3288. ql_get_board_info(qdev);
  3289. /*
  3290. * Set the Maximum Memory Read Byte Count value. We do this to handle
  3291. * jumbo frames.
  3292. */
  3293. if (qdev->pci_x) {
  3294. pci_write_config_word(pdev, (int)0x4e, (u16) 0x0036);
  3295. }
  3296. err = register_netdev(ndev);
  3297. if (err) {
  3298. printk(KERN_ERR PFX "%s: cannot register net device\n",
  3299. pci_name(pdev));
  3300. goto err_out_iounmap;
  3301. }
  3302. /* we're going to reset, so assume we have no link for now */
  3303. netif_carrier_off(ndev);
  3304. netif_stop_queue(ndev);
  3305. qdev->workqueue = create_singlethread_workqueue(ndev->name);
  3306. INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work);
  3307. INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work);
  3308. init_timer(&qdev->adapter_timer);
  3309. qdev->adapter_timer.function = ql3xxx_timer;
  3310. qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */
  3311. qdev->adapter_timer.data = (unsigned long)qdev;
  3312. if(!cards_found) {
  3313. printk(KERN_ALERT PFX "%s\n", DRV_STRING);
  3314. printk(KERN_ALERT PFX "Driver name: %s, Version: %s.\n",
  3315. DRV_NAME, DRV_VERSION);
  3316. }
  3317. ql_display_dev_info(ndev);
  3318. cards_found++;
  3319. return 0;
  3320. err_out_iounmap:
  3321. iounmap(qdev->mem_map_registers);
  3322. err_out_free_ndev:
  3323. free_netdev(ndev);
  3324. err_out_free_regions:
  3325. pci_release_regions(pdev);
  3326. err_out_disable_pdev:
  3327. pci_disable_device(pdev);
  3328. pci_set_drvdata(pdev, NULL);
  3329. err_out:
  3330. return err;
  3331. }
  3332. static void __devexit ql3xxx_remove(struct pci_dev *pdev)
  3333. {
  3334. struct net_device *ndev = pci_get_drvdata(pdev);
  3335. struct ql3_adapter *qdev = netdev_priv(ndev);
  3336. unregister_netdev(ndev);
  3337. qdev = netdev_priv(ndev);
  3338. ql_disable_interrupts(qdev);
  3339. if (qdev->workqueue) {
  3340. cancel_delayed_work(&qdev->reset_work);
  3341. cancel_delayed_work(&qdev->tx_timeout_work);
  3342. destroy_workqueue(qdev->workqueue);
  3343. qdev->workqueue = NULL;
  3344. }
  3345. iounmap(qdev->mem_map_registers);
  3346. pci_release_regions(pdev);
  3347. pci_set_drvdata(pdev, NULL);
  3348. free_netdev(ndev);
  3349. }
  3350. static struct pci_driver ql3xxx_driver = {
  3351. .name = DRV_NAME,
  3352. .id_table = ql3xxx_pci_tbl,
  3353. .probe = ql3xxx_probe,
  3354. .remove = __devexit_p(ql3xxx_remove),
  3355. };
  3356. static int __init ql3xxx_init_module(void)
  3357. {
  3358. return pci_register_driver(&ql3xxx_driver);
  3359. }
  3360. static void __exit ql3xxx_exit(void)
  3361. {
  3362. pci_unregister_driver(&ql3xxx_driver);
  3363. }
  3364. module_init(ql3xxx_init_module);
  3365. module_exit(ql3xxx_exit);