qla3xxx.c 106 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121
  1. /*
  2. * QLogic QLA3xxx NIC HBA Driver
  3. * Copyright (c) 2003-2006 QLogic Corporation
  4. *
  5. * See LICENSE.qla3xxx for copyright and licensing details.
  6. */
  7. #include <linux/kernel.h>
  8. #include <linux/init.h>
  9. #include <linux/types.h>
  10. #include <linux/module.h>
  11. #include <linux/list.h>
  12. #include <linux/pci.h>
  13. #include <linux/dma-mapping.h>
  14. #include <linux/sched.h>
  15. #include <linux/slab.h>
  16. #include <linux/dmapool.h>
  17. #include <linux/mempool.h>
  18. #include <linux/spinlock.h>
  19. #include <linux/kthread.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/errno.h>
  22. #include <linux/ioport.h>
  23. #include <linux/ip.h>
  24. #include <linux/in.h>
  25. #include <linux/if_arp.h>
  26. #include <linux/if_ether.h>
  27. #include <linux/netdevice.h>
  28. #include <linux/etherdevice.h>
  29. #include <linux/ethtool.h>
  30. #include <linux/skbuff.h>
  31. #include <linux/rtnetlink.h>
  32. #include <linux/if_vlan.h>
  33. #include <linux/delay.h>
  34. #include <linux/mm.h>
  35. #include "qla3xxx.h"
  36. #define DRV_NAME "qla3xxx"
  37. #define DRV_STRING "QLogic ISP3XXX Network Driver"
  38. #define DRV_VERSION "v2.03.00-k5"
  39. #define PFX DRV_NAME " "
  40. static const char ql3xxx_driver_name[] = DRV_NAME;
  41. static const char ql3xxx_driver_version[] = DRV_VERSION;
  42. MODULE_AUTHOR("QLogic Corporation");
  43. MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION " ");
  44. MODULE_LICENSE("GPL");
  45. MODULE_VERSION(DRV_VERSION);
  46. static const u32 default_msg
  47. = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
  48. | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
  49. static int debug = -1; /* defaults above */
  50. module_param(debug, int, 0);
  51. MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
  52. static int msi;
  53. module_param(msi, int, 0);
  54. MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts.");
  55. static struct pci_device_id ql3xxx_pci_tbl[] __devinitdata = {
  56. {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)},
  57. {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID)},
  58. /* required last entry */
  59. {0,}
  60. };
  61. MODULE_DEVICE_TABLE(pci, ql3xxx_pci_tbl);
  62. /*
  63. * These are the known PHY's which are used
  64. */
  65. typedef enum {
  66. PHY_TYPE_UNKNOWN = 0,
  67. PHY_VITESSE_VSC8211,
  68. PHY_AGERE_ET1011C,
  69. MAX_PHY_DEV_TYPES
  70. } PHY_DEVICE_et;
  71. typedef struct {
  72. PHY_DEVICE_et phyDevice;
  73. u32 phyIdOUI;
  74. u16 phyIdModel;
  75. char *name;
  76. } PHY_DEVICE_INFO_t;
  77. static const PHY_DEVICE_INFO_t PHY_DEVICES[] =
  78. {{PHY_TYPE_UNKNOWN, 0x000000, 0x0, "PHY_TYPE_UNKNOWN"},
  79. {PHY_VITESSE_VSC8211, 0x0003f1, 0xb, "PHY_VITESSE_VSC8211"},
  80. {PHY_AGERE_ET1011C, 0x00a0bc, 0x1, "PHY_AGERE_ET1011C"},
  81. };
  82. /*
  83. * Caller must take hw_lock.
  84. */
  85. static int ql_sem_spinlock(struct ql3_adapter *qdev,
  86. u32 sem_mask, u32 sem_bits)
  87. {
  88. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  89. u32 value;
  90. unsigned int seconds = 3;
  91. do {
  92. writel((sem_mask | sem_bits),
  93. &port_regs->CommonRegs.semaphoreReg);
  94. value = readl(&port_regs->CommonRegs.semaphoreReg);
  95. if ((value & (sem_mask >> 16)) == sem_bits)
  96. return 0;
  97. ssleep(1);
  98. } while(--seconds);
  99. return -1;
  100. }
  101. static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask)
  102. {
  103. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  104. writel(sem_mask, &port_regs->CommonRegs.semaphoreReg);
  105. readl(&port_regs->CommonRegs.semaphoreReg);
  106. }
  107. static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits)
  108. {
  109. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  110. u32 value;
  111. writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg);
  112. value = readl(&port_regs->CommonRegs.semaphoreReg);
  113. return ((value & (sem_mask >> 16)) == sem_bits);
  114. }
  115. /*
  116. * Caller holds hw_lock.
  117. */
  118. static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
  119. {
  120. int i = 0;
  121. while (1) {
  122. if (!ql_sem_lock(qdev,
  123. QL_DRVR_SEM_MASK,
  124. (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
  125. * 2) << 1)) {
  126. if (i < 10) {
  127. ssleep(1);
  128. i++;
  129. } else {
  130. printk(KERN_ERR PFX "%s: Timed out waiting for "
  131. "driver lock...\n",
  132. qdev->ndev->name);
  133. return 0;
  134. }
  135. } else {
  136. printk(KERN_DEBUG PFX
  137. "%s: driver lock acquired.\n",
  138. qdev->ndev->name);
  139. return 1;
  140. }
  141. }
  142. }
  143. static void ql_set_register_page(struct ql3_adapter *qdev, u32 page)
  144. {
  145. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  146. writel(((ISP_CONTROL_NP_MASK << 16) | page),
  147. &port_regs->CommonRegs.ispControlStatus);
  148. readl(&port_regs->CommonRegs.ispControlStatus);
  149. qdev->current_page = page;
  150. }
  151. static u32 ql_read_common_reg_l(struct ql3_adapter *qdev,
  152. u32 __iomem * reg)
  153. {
  154. u32 value;
  155. unsigned long hw_flags;
  156. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  157. value = readl(reg);
  158. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  159. return value;
  160. }
  161. static u32 ql_read_common_reg(struct ql3_adapter *qdev,
  162. u32 __iomem * reg)
  163. {
  164. return readl(reg);
  165. }
  166. static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg)
  167. {
  168. u32 value;
  169. unsigned long hw_flags;
  170. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  171. if (qdev->current_page != 0)
  172. ql_set_register_page(qdev,0);
  173. value = readl(reg);
  174. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  175. return value;
  176. }
  177. static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg)
  178. {
  179. if (qdev->current_page != 0)
  180. ql_set_register_page(qdev,0);
  181. return readl(reg);
  182. }
  183. static void ql_write_common_reg_l(struct ql3_adapter *qdev,
  184. u32 __iomem *reg, u32 value)
  185. {
  186. unsigned long hw_flags;
  187. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  188. writel(value, reg);
  189. readl(reg);
  190. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  191. return;
  192. }
  193. static void ql_write_common_reg(struct ql3_adapter *qdev,
  194. u32 __iomem *reg, u32 value)
  195. {
  196. writel(value, reg);
  197. readl(reg);
  198. return;
  199. }
  200. static void ql_write_nvram_reg(struct ql3_adapter *qdev,
  201. u32 __iomem *reg, u32 value)
  202. {
  203. writel(value, reg);
  204. readl(reg);
  205. udelay(1);
  206. return;
  207. }
  208. static void ql_write_page0_reg(struct ql3_adapter *qdev,
  209. u32 __iomem *reg, u32 value)
  210. {
  211. if (qdev->current_page != 0)
  212. ql_set_register_page(qdev,0);
  213. writel(value, reg);
  214. readl(reg);
  215. return;
  216. }
  217. /*
  218. * Caller holds hw_lock. Only called during init.
  219. */
  220. static void ql_write_page1_reg(struct ql3_adapter *qdev,
  221. u32 __iomem *reg, u32 value)
  222. {
  223. if (qdev->current_page != 1)
  224. ql_set_register_page(qdev,1);
  225. writel(value, reg);
  226. readl(reg);
  227. return;
  228. }
  229. /*
  230. * Caller holds hw_lock. Only called during init.
  231. */
  232. static void ql_write_page2_reg(struct ql3_adapter *qdev,
  233. u32 __iomem *reg, u32 value)
  234. {
  235. if (qdev->current_page != 2)
  236. ql_set_register_page(qdev,2);
  237. writel(value, reg);
  238. readl(reg);
  239. return;
  240. }
  241. static void ql_disable_interrupts(struct ql3_adapter *qdev)
  242. {
  243. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  244. ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
  245. (ISP_IMR_ENABLE_INT << 16));
  246. }
  247. static void ql_enable_interrupts(struct ql3_adapter *qdev)
  248. {
  249. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  250. ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
  251. ((0xff << 16) | ISP_IMR_ENABLE_INT));
  252. }
  253. static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
  254. struct ql_rcv_buf_cb *lrg_buf_cb)
  255. {
  256. dma_addr_t map;
  257. int err;
  258. lrg_buf_cb->next = NULL;
  259. if (qdev->lrg_buf_free_tail == NULL) { /* The list is empty */
  260. qdev->lrg_buf_free_head = qdev->lrg_buf_free_tail = lrg_buf_cb;
  261. } else {
  262. qdev->lrg_buf_free_tail->next = lrg_buf_cb;
  263. qdev->lrg_buf_free_tail = lrg_buf_cb;
  264. }
  265. if (!lrg_buf_cb->skb) {
  266. lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev,
  267. qdev->lrg_buffer_len);
  268. if (unlikely(!lrg_buf_cb->skb)) {
  269. printk(KERN_ERR PFX "%s: failed netdev_alloc_skb().\n",
  270. qdev->ndev->name);
  271. qdev->lrg_buf_skb_check++;
  272. } else {
  273. /*
  274. * We save some space to copy the ethhdr from first
  275. * buffer
  276. */
  277. skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
  278. map = pci_map_single(qdev->pdev,
  279. lrg_buf_cb->skb->data,
  280. qdev->lrg_buffer_len -
  281. QL_HEADER_SPACE,
  282. PCI_DMA_FROMDEVICE);
  283. err = pci_dma_mapping_error(qdev->pdev, map);
  284. if(err) {
  285. printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
  286. qdev->ndev->name, err);
  287. dev_kfree_skb(lrg_buf_cb->skb);
  288. lrg_buf_cb->skb = NULL;
  289. qdev->lrg_buf_skb_check++;
  290. return;
  291. }
  292. lrg_buf_cb->buf_phy_addr_low =
  293. cpu_to_le32(LS_64BITS(map));
  294. lrg_buf_cb->buf_phy_addr_high =
  295. cpu_to_le32(MS_64BITS(map));
  296. pci_unmap_addr_set(lrg_buf_cb, mapaddr, map);
  297. pci_unmap_len_set(lrg_buf_cb, maplen,
  298. qdev->lrg_buffer_len -
  299. QL_HEADER_SPACE);
  300. }
  301. }
  302. qdev->lrg_buf_free_count++;
  303. }
  304. static struct ql_rcv_buf_cb *ql_get_from_lrg_buf_free_list(struct ql3_adapter
  305. *qdev)
  306. {
  307. struct ql_rcv_buf_cb *lrg_buf_cb;
  308. if ((lrg_buf_cb = qdev->lrg_buf_free_head) != NULL) {
  309. if ((qdev->lrg_buf_free_head = lrg_buf_cb->next) == NULL)
  310. qdev->lrg_buf_free_tail = NULL;
  311. qdev->lrg_buf_free_count--;
  312. }
  313. return lrg_buf_cb;
  314. }
  315. static u32 addrBits = EEPROM_NO_ADDR_BITS;
  316. static u32 dataBits = EEPROM_NO_DATA_BITS;
  317. static void fm93c56a_deselect(struct ql3_adapter *qdev);
  318. static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr,
  319. unsigned short *value);
  320. /*
  321. * Caller holds hw_lock.
  322. */
  323. static void fm93c56a_select(struct ql3_adapter *qdev)
  324. {
  325. struct ql3xxx_port_registers __iomem *port_regs =
  326. qdev->mem_map_registers;
  327. qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1;
  328. ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
  329. ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
  330. ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
  331. ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data));
  332. }
  333. /*
  334. * Caller holds hw_lock.
  335. */
  336. static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr)
  337. {
  338. int i;
  339. u32 mask;
  340. u32 dataBit;
  341. u32 previousBit;
  342. struct ql3xxx_port_registers __iomem *port_regs =
  343. qdev->mem_map_registers;
  344. /* Clock in a zero, then do the start bit */
  345. ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
  346. ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
  347. AUBURN_EEPROM_DO_1);
  348. ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
  349. ISP_NVRAM_MASK | qdev->
  350. eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
  351. AUBURN_EEPROM_CLK_RISE);
  352. ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
  353. ISP_NVRAM_MASK | qdev->
  354. eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
  355. AUBURN_EEPROM_CLK_FALL);
  356. mask = 1 << (FM93C56A_CMD_BITS - 1);
  357. /* Force the previous data bit to be different */
  358. previousBit = 0xffff;
  359. for (i = 0; i < FM93C56A_CMD_BITS; i++) {
  360. dataBit =
  361. (cmd & mask) ? AUBURN_EEPROM_DO_1 : AUBURN_EEPROM_DO_0;
  362. if (previousBit != dataBit) {
  363. /*
  364. * If the bit changed, then change the DO state to
  365. * match
  366. */
  367. ql_write_nvram_reg(qdev,
  368. &port_regs->CommonRegs.
  369. serialPortInterfaceReg,
  370. ISP_NVRAM_MASK | qdev->
  371. eeprom_cmd_data | dataBit);
  372. previousBit = dataBit;
  373. }
  374. ql_write_nvram_reg(qdev,
  375. &port_regs->CommonRegs.
  376. serialPortInterfaceReg,
  377. ISP_NVRAM_MASK | qdev->
  378. eeprom_cmd_data | dataBit |
  379. AUBURN_EEPROM_CLK_RISE);
  380. ql_write_nvram_reg(qdev,
  381. &port_regs->CommonRegs.
  382. serialPortInterfaceReg,
  383. ISP_NVRAM_MASK | qdev->
  384. eeprom_cmd_data | dataBit |
  385. AUBURN_EEPROM_CLK_FALL);
  386. cmd = cmd << 1;
  387. }
  388. mask = 1 << (addrBits - 1);
  389. /* Force the previous data bit to be different */
  390. previousBit = 0xffff;
  391. for (i = 0; i < addrBits; i++) {
  392. dataBit =
  393. (eepromAddr & mask) ? AUBURN_EEPROM_DO_1 :
  394. AUBURN_EEPROM_DO_0;
  395. if (previousBit != dataBit) {
  396. /*
  397. * If the bit changed, then change the DO state to
  398. * match
  399. */
  400. ql_write_nvram_reg(qdev,
  401. &port_regs->CommonRegs.
  402. serialPortInterfaceReg,
  403. ISP_NVRAM_MASK | qdev->
  404. eeprom_cmd_data | dataBit);
  405. previousBit = dataBit;
  406. }
  407. ql_write_nvram_reg(qdev,
  408. &port_regs->CommonRegs.
  409. serialPortInterfaceReg,
  410. ISP_NVRAM_MASK | qdev->
  411. eeprom_cmd_data | dataBit |
  412. AUBURN_EEPROM_CLK_RISE);
  413. ql_write_nvram_reg(qdev,
  414. &port_regs->CommonRegs.
  415. serialPortInterfaceReg,
  416. ISP_NVRAM_MASK | qdev->
  417. eeprom_cmd_data | dataBit |
  418. AUBURN_EEPROM_CLK_FALL);
  419. eepromAddr = eepromAddr << 1;
  420. }
  421. }
  422. /*
  423. * Caller holds hw_lock.
  424. */
  425. static void fm93c56a_deselect(struct ql3_adapter *qdev)
  426. {
  427. struct ql3xxx_port_registers __iomem *port_regs =
  428. qdev->mem_map_registers;
  429. qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0;
  430. ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
  431. ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
  432. }
  433. /*
  434. * Caller holds hw_lock.
  435. */
  436. static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value)
  437. {
  438. int i;
  439. u32 data = 0;
  440. u32 dataBit;
  441. struct ql3xxx_port_registers __iomem *port_regs =
  442. qdev->mem_map_registers;
  443. /* Read the data bits */
  444. /* The first bit is a dummy. Clock right over it. */
  445. for (i = 0; i < dataBits; i++) {
  446. ql_write_nvram_reg(qdev,
  447. &port_regs->CommonRegs.
  448. serialPortInterfaceReg,
  449. ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
  450. AUBURN_EEPROM_CLK_RISE);
  451. ql_write_nvram_reg(qdev,
  452. &port_regs->CommonRegs.
  453. serialPortInterfaceReg,
  454. ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
  455. AUBURN_EEPROM_CLK_FALL);
  456. dataBit =
  457. (ql_read_common_reg
  458. (qdev,
  459. &port_regs->CommonRegs.
  460. serialPortInterfaceReg) & AUBURN_EEPROM_DI_1) ? 1 : 0;
  461. data = (data << 1) | dataBit;
  462. }
  463. *value = (u16) data;
  464. }
  465. /*
  466. * Caller holds hw_lock.
  467. */
  468. static void eeprom_readword(struct ql3_adapter *qdev,
  469. u32 eepromAddr, unsigned short *value)
  470. {
  471. fm93c56a_select(qdev);
  472. fm93c56a_cmd(qdev, (int)FM93C56A_READ, eepromAddr);
  473. fm93c56a_datain(qdev, value);
  474. fm93c56a_deselect(qdev);
  475. }
  476. static void ql_set_mac_addr(struct net_device *ndev, u16 *addr)
  477. {
  478. __le16 *p = (__le16 *)ndev->dev_addr;
  479. p[0] = cpu_to_le16(addr[0]);
  480. p[1] = cpu_to_le16(addr[1]);
  481. p[2] = cpu_to_le16(addr[2]);
  482. }
  483. static int ql_get_nvram_params(struct ql3_adapter *qdev)
  484. {
  485. u16 *pEEPROMData;
  486. u16 checksum = 0;
  487. u32 index;
  488. unsigned long hw_flags;
  489. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  490. pEEPROMData = (u16 *) & qdev->nvram_data;
  491. qdev->eeprom_cmd_data = 0;
  492. if(ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK,
  493. (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
  494. 2) << 10)) {
  495. printk(KERN_ERR PFX"%s: Failed ql_sem_spinlock().\n",
  496. __func__);
  497. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  498. return -1;
  499. }
  500. for (index = 0; index < EEPROM_SIZE; index++) {
  501. eeprom_readword(qdev, index, pEEPROMData);
  502. checksum += *pEEPROMData;
  503. pEEPROMData++;
  504. }
  505. ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK);
  506. if (checksum != 0) {
  507. printk(KERN_ERR PFX "%s: checksum should be zero, is %x!!\n",
  508. qdev->ndev->name, checksum);
  509. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  510. return -1;
  511. }
  512. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  513. return checksum;
  514. }
  515. static const u32 PHYAddr[2] = {
  516. PORT0_PHY_ADDRESS, PORT1_PHY_ADDRESS
  517. };
  518. static int ql_wait_for_mii_ready(struct ql3_adapter *qdev)
  519. {
  520. struct ql3xxx_port_registers __iomem *port_regs =
  521. qdev->mem_map_registers;
  522. u32 temp;
  523. int count = 1000;
  524. while (count) {
  525. temp = ql_read_page0_reg(qdev, &port_regs->macMIIStatusReg);
  526. if (!(temp & MAC_MII_STATUS_BSY))
  527. return 0;
  528. udelay(10);
  529. count--;
  530. }
  531. return -1;
  532. }
  533. static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev)
  534. {
  535. struct ql3xxx_port_registers __iomem *port_regs =
  536. qdev->mem_map_registers;
  537. u32 scanControl;
  538. if (qdev->numPorts > 1) {
  539. /* Auto scan will cycle through multiple ports */
  540. scanControl = MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC;
  541. } else {
  542. scanControl = MAC_MII_CONTROL_SC;
  543. }
  544. /*
  545. * Scan register 1 of PHY/PETBI,
  546. * Set up to scan both devices
  547. * The autoscan starts from the first register, completes
  548. * the last one before rolling over to the first
  549. */
  550. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
  551. PHYAddr[0] | MII_SCAN_REGISTER);
  552. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
  553. (scanControl) |
  554. ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS) << 16));
  555. }
  556. static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev)
  557. {
  558. u8 ret;
  559. struct ql3xxx_port_registers __iomem *port_regs =
  560. qdev->mem_map_registers;
  561. /* See if scan mode is enabled before we turn it off */
  562. if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) &
  563. (MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC)) {
  564. /* Scan is enabled */
  565. ret = 1;
  566. } else {
  567. /* Scan is disabled */
  568. ret = 0;
  569. }
  570. /*
  571. * When disabling scan mode you must first change the MII register
  572. * address
  573. */
  574. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
  575. PHYAddr[0] | MII_SCAN_REGISTER);
  576. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
  577. ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS |
  578. MAC_MII_CONTROL_RC) << 16));
  579. return ret;
  580. }
  581. static int ql_mii_write_reg_ex(struct ql3_adapter *qdev,
  582. u16 regAddr, u16 value, u32 phyAddr)
  583. {
  584. struct ql3xxx_port_registers __iomem *port_regs =
  585. qdev->mem_map_registers;
  586. u8 scanWasEnabled;
  587. scanWasEnabled = ql_mii_disable_scan_mode(qdev);
  588. if (ql_wait_for_mii_ready(qdev)) {
  589. if (netif_msg_link(qdev))
  590. printk(KERN_WARNING PFX
  591. "%s Timed out waiting for management port to "
  592. "get free before issuing command.\n",
  593. qdev->ndev->name);
  594. return -1;
  595. }
  596. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
  597. phyAddr | regAddr);
  598. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
  599. /* Wait for write to complete 9/10/04 SJP */
  600. if (ql_wait_for_mii_ready(qdev)) {
  601. if (netif_msg_link(qdev))
  602. printk(KERN_WARNING PFX
  603. "%s: Timed out waiting for management port to "
  604. "get free before issuing command.\n",
  605. qdev->ndev->name);
  606. return -1;
  607. }
  608. if (scanWasEnabled)
  609. ql_mii_enable_scan_mode(qdev);
  610. return 0;
  611. }
  612. static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr,
  613. u16 * value, u32 phyAddr)
  614. {
  615. struct ql3xxx_port_registers __iomem *port_regs =
  616. qdev->mem_map_registers;
  617. u8 scanWasEnabled;
  618. u32 temp;
  619. scanWasEnabled = ql_mii_disable_scan_mode(qdev);
  620. if (ql_wait_for_mii_ready(qdev)) {
  621. if (netif_msg_link(qdev))
  622. printk(KERN_WARNING PFX
  623. "%s: Timed out waiting for management port to "
  624. "get free before issuing command.\n",
  625. qdev->ndev->name);
  626. return -1;
  627. }
  628. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
  629. phyAddr | regAddr);
  630. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
  631. (MAC_MII_CONTROL_RC << 16));
  632. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
  633. (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC);
  634. /* Wait for the read to complete */
  635. if (ql_wait_for_mii_ready(qdev)) {
  636. if (netif_msg_link(qdev))
  637. printk(KERN_WARNING PFX
  638. "%s: Timed out waiting for management port to "
  639. "get free after issuing command.\n",
  640. qdev->ndev->name);
  641. return -1;
  642. }
  643. temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
  644. *value = (u16) temp;
  645. if (scanWasEnabled)
  646. ql_mii_enable_scan_mode(qdev);
  647. return 0;
  648. }
  649. static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value)
  650. {
  651. struct ql3xxx_port_registers __iomem *port_regs =
  652. qdev->mem_map_registers;
  653. ql_mii_disable_scan_mode(qdev);
  654. if (ql_wait_for_mii_ready(qdev)) {
  655. if (netif_msg_link(qdev))
  656. printk(KERN_WARNING PFX
  657. "%s: Timed out waiting for management port to "
  658. "get free before issuing command.\n",
  659. qdev->ndev->name);
  660. return -1;
  661. }
  662. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
  663. qdev->PHYAddr | regAddr);
  664. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
  665. /* Wait for write to complete. */
  666. if (ql_wait_for_mii_ready(qdev)) {
  667. if (netif_msg_link(qdev))
  668. printk(KERN_WARNING PFX
  669. "%s: Timed out waiting for management port to "
  670. "get free before issuing command.\n",
  671. qdev->ndev->name);
  672. return -1;
  673. }
  674. ql_mii_enable_scan_mode(qdev);
  675. return 0;
  676. }
  677. static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value)
  678. {
  679. u32 temp;
  680. struct ql3xxx_port_registers __iomem *port_regs =
  681. qdev->mem_map_registers;
  682. ql_mii_disable_scan_mode(qdev);
  683. if (ql_wait_for_mii_ready(qdev)) {
  684. if (netif_msg_link(qdev))
  685. printk(KERN_WARNING PFX
  686. "%s: Timed out waiting for management port to "
  687. "get free before issuing command.\n",
  688. qdev->ndev->name);
  689. return -1;
  690. }
  691. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
  692. qdev->PHYAddr | regAddr);
  693. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
  694. (MAC_MII_CONTROL_RC << 16));
  695. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
  696. (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC);
  697. /* Wait for the read to complete */
  698. if (ql_wait_for_mii_ready(qdev)) {
  699. if (netif_msg_link(qdev))
  700. printk(KERN_WARNING PFX
  701. "%s: Timed out waiting for management port to "
  702. "get free before issuing command.\n",
  703. qdev->ndev->name);
  704. return -1;
  705. }
  706. temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
  707. *value = (u16) temp;
  708. ql_mii_enable_scan_mode(qdev);
  709. return 0;
  710. }
  711. static void ql_petbi_reset(struct ql3_adapter *qdev)
  712. {
  713. ql_mii_write_reg(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET);
  714. }
  715. static void ql_petbi_start_neg(struct ql3_adapter *qdev)
  716. {
  717. u16 reg;
  718. /* Enable Auto-negotiation sense */
  719. ql_mii_read_reg(qdev, PETBI_TBI_CTRL, &reg);
  720. reg |= PETBI_TBI_AUTO_SENSE;
  721. ql_mii_write_reg(qdev, PETBI_TBI_CTRL, reg);
  722. ql_mii_write_reg(qdev, PETBI_NEG_ADVER,
  723. PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX);
  724. ql_mii_write_reg(qdev, PETBI_CONTROL_REG,
  725. PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG |
  726. PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000);
  727. }
  728. static void ql_petbi_reset_ex(struct ql3_adapter *qdev)
  729. {
  730. ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET,
  731. PHYAddr[qdev->mac_index]);
  732. }
  733. static void ql_petbi_start_neg_ex(struct ql3_adapter *qdev)
  734. {
  735. u16 reg;
  736. /* Enable Auto-negotiation sense */
  737. ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, &reg,
  738. PHYAddr[qdev->mac_index]);
  739. reg |= PETBI_TBI_AUTO_SENSE;
  740. ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg,
  741. PHYAddr[qdev->mac_index]);
  742. ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER,
  743. PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX,
  744. PHYAddr[qdev->mac_index]);
  745. ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG,
  746. PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG |
  747. PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000,
  748. PHYAddr[qdev->mac_index]);
  749. }
  750. static void ql_petbi_init(struct ql3_adapter *qdev)
  751. {
  752. ql_petbi_reset(qdev);
  753. ql_petbi_start_neg(qdev);
  754. }
  755. static void ql_petbi_init_ex(struct ql3_adapter *qdev)
  756. {
  757. ql_petbi_reset_ex(qdev);
  758. ql_petbi_start_neg_ex(qdev);
  759. }
  760. static int ql_is_petbi_neg_pause(struct ql3_adapter *qdev)
  761. {
  762. u16 reg;
  763. if (ql_mii_read_reg(qdev, PETBI_NEG_PARTNER, &reg) < 0)
  764. return 0;
  765. return (reg & PETBI_NEG_PAUSE_MASK) == PETBI_NEG_PAUSE;
  766. }
  767. static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr)
  768. {
  769. printk(KERN_INFO "%s: enabling Agere specific PHY\n", qdev->ndev->name);
  770. /* power down device bit 11 = 1 */
  771. ql_mii_write_reg_ex(qdev, 0x00, 0x1940, miiAddr);
  772. /* enable diagnostic mode bit 2 = 1 */
  773. ql_mii_write_reg_ex(qdev, 0x12, 0x840e, miiAddr);
  774. /* 1000MB amplitude adjust (see Agere errata) */
  775. ql_mii_write_reg_ex(qdev, 0x10, 0x8805, miiAddr);
  776. /* 1000MB amplitude adjust (see Agere errata) */
  777. ql_mii_write_reg_ex(qdev, 0x11, 0xf03e, miiAddr);
  778. /* 100MB amplitude adjust (see Agere errata) */
  779. ql_mii_write_reg_ex(qdev, 0x10, 0x8806, miiAddr);
  780. /* 100MB amplitude adjust (see Agere errata) */
  781. ql_mii_write_reg_ex(qdev, 0x11, 0x003e, miiAddr);
  782. /* 10MB amplitude adjust (see Agere errata) */
  783. ql_mii_write_reg_ex(qdev, 0x10, 0x8807, miiAddr);
  784. /* 10MB amplitude adjust (see Agere errata) */
  785. ql_mii_write_reg_ex(qdev, 0x11, 0x1f00, miiAddr);
  786. /* point to hidden reg 0x2806 */
  787. ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr);
  788. /* Write new PHYAD w/bit 5 set */
  789. ql_mii_write_reg_ex(qdev, 0x11, 0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr);
  790. /*
  791. * Disable diagnostic mode bit 2 = 0
  792. * Power up device bit 11 = 0
  793. * Link up (on) and activity (blink)
  794. */
  795. ql_mii_write_reg(qdev, 0x12, 0x840a);
  796. ql_mii_write_reg(qdev, 0x00, 0x1140);
  797. ql_mii_write_reg(qdev, 0x1c, 0xfaf0);
  798. }
  799. static PHY_DEVICE_et getPhyType (struct ql3_adapter *qdev,
  800. u16 phyIdReg0, u16 phyIdReg1)
  801. {
  802. PHY_DEVICE_et result = PHY_TYPE_UNKNOWN;
  803. u32 oui;
  804. u16 model;
  805. int i;
  806. if (phyIdReg0 == 0xffff) {
  807. return result;
  808. }
  809. if (phyIdReg1 == 0xffff) {
  810. return result;
  811. }
  812. /* oui is split between two registers */
  813. oui = (phyIdReg0 << 6) | ((phyIdReg1 & PHY_OUI_1_MASK) >> 10);
  814. model = (phyIdReg1 & PHY_MODEL_MASK) >> 4;
  815. /* Scan table for this PHY */
  816. for(i = 0; i < MAX_PHY_DEV_TYPES; i++) {
  817. if ((oui == PHY_DEVICES[i].phyIdOUI) && (model == PHY_DEVICES[i].phyIdModel))
  818. {
  819. result = PHY_DEVICES[i].phyDevice;
  820. printk(KERN_INFO "%s: Phy: %s\n",
  821. qdev->ndev->name, PHY_DEVICES[i].name);
  822. break;
  823. }
  824. }
  825. return result;
  826. }
  827. static int ql_phy_get_speed(struct ql3_adapter *qdev)
  828. {
  829. u16 reg;
  830. switch(qdev->phyType) {
  831. case PHY_AGERE_ET1011C:
  832. {
  833. if (ql_mii_read_reg(qdev, 0x1A, &reg) < 0)
  834. return 0;
  835. reg = (reg >> 8) & 3;
  836. break;
  837. }
  838. default:
  839. if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
  840. return 0;
  841. reg = (((reg & 0x18) >> 3) & 3);
  842. }
  843. switch(reg) {
  844. case 2:
  845. return SPEED_1000;
  846. case 1:
  847. return SPEED_100;
  848. case 0:
  849. return SPEED_10;
  850. default:
  851. return -1;
  852. }
  853. }
  854. static int ql_is_full_dup(struct ql3_adapter *qdev)
  855. {
  856. u16 reg;
  857. switch(qdev->phyType) {
  858. case PHY_AGERE_ET1011C:
  859. {
  860. if (ql_mii_read_reg(qdev, 0x1A, &reg))
  861. return 0;
  862. return ((reg & 0x0080) && (reg & 0x1000)) != 0;
  863. }
  864. case PHY_VITESSE_VSC8211:
  865. default:
  866. {
  867. if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
  868. return 0;
  869. return (reg & PHY_AUX_DUPLEX_STAT) != 0;
  870. }
  871. }
  872. }
  873. static int ql_is_phy_neg_pause(struct ql3_adapter *qdev)
  874. {
  875. u16 reg;
  876. if (ql_mii_read_reg(qdev, PHY_NEG_PARTNER, &reg) < 0)
  877. return 0;
  878. return (reg & PHY_NEG_PAUSE) != 0;
  879. }
  880. static int PHY_Setup(struct ql3_adapter *qdev)
  881. {
  882. u16 reg1;
  883. u16 reg2;
  884. bool agereAddrChangeNeeded = false;
  885. u32 miiAddr = 0;
  886. int err;
  887. /* Determine the PHY we are using by reading the ID's */
  888. err = ql_mii_read_reg(qdev, PHY_ID_0_REG, &reg1);
  889. if(err != 0) {
  890. printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG\n",
  891. qdev->ndev->name);
  892. return err;
  893. }
  894. err = ql_mii_read_reg(qdev, PHY_ID_1_REG, &reg2);
  895. if(err != 0) {
  896. printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG\n",
  897. qdev->ndev->name);
  898. return err;
  899. }
  900. /* Check if we have a Agere PHY */
  901. if ((reg1 == 0xffff) || (reg2 == 0xffff)) {
  902. /* Determine which MII address we should be using
  903. determined by the index of the card */
  904. if (qdev->mac_index == 0) {
  905. miiAddr = MII_AGERE_ADDR_1;
  906. } else {
  907. miiAddr = MII_AGERE_ADDR_2;
  908. }
  909. err =ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, &reg1, miiAddr);
  910. if(err != 0) {
  911. printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG after Agere detected\n",
  912. qdev->ndev->name);
  913. return err;
  914. }
  915. err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, &reg2, miiAddr);
  916. if(err != 0) {
  917. printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG after Agere detected\n",
  918. qdev->ndev->name);
  919. return err;
  920. }
  921. /* We need to remember to initialize the Agere PHY */
  922. agereAddrChangeNeeded = true;
  923. }
  924. /* Determine the particular PHY we have on board to apply
  925. PHY specific initializations */
  926. qdev->phyType = getPhyType(qdev, reg1, reg2);
  927. if ((qdev->phyType == PHY_AGERE_ET1011C) && agereAddrChangeNeeded) {
  928. /* need this here so address gets changed */
  929. phyAgereSpecificInit(qdev, miiAddr);
  930. } else if (qdev->phyType == PHY_TYPE_UNKNOWN) {
  931. printk(KERN_ERR "%s: PHY is unknown\n", qdev->ndev->name);
  932. return -EIO;
  933. }
  934. return 0;
  935. }
  936. /*
  937. * Caller holds hw_lock.
  938. */
  939. static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable)
  940. {
  941. struct ql3xxx_port_registers __iomem *port_regs =
  942. qdev->mem_map_registers;
  943. u32 value;
  944. if (enable)
  945. value = (MAC_CONFIG_REG_PE | (MAC_CONFIG_REG_PE << 16));
  946. else
  947. value = (MAC_CONFIG_REG_PE << 16);
  948. if (qdev->mac_index)
  949. ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
  950. else
  951. ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
  952. }
  953. /*
  954. * Caller holds hw_lock.
  955. */
  956. static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable)
  957. {
  958. struct ql3xxx_port_registers __iomem *port_regs =
  959. qdev->mem_map_registers;
  960. u32 value;
  961. if (enable)
  962. value = (MAC_CONFIG_REG_SR | (MAC_CONFIG_REG_SR << 16));
  963. else
  964. value = (MAC_CONFIG_REG_SR << 16);
  965. if (qdev->mac_index)
  966. ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
  967. else
  968. ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
  969. }
  970. /*
  971. * Caller holds hw_lock.
  972. */
  973. static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable)
  974. {
  975. struct ql3xxx_port_registers __iomem *port_regs =
  976. qdev->mem_map_registers;
  977. u32 value;
  978. if (enable)
  979. value = (MAC_CONFIG_REG_GM | (MAC_CONFIG_REG_GM << 16));
  980. else
  981. value = (MAC_CONFIG_REG_GM << 16);
  982. if (qdev->mac_index)
  983. ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
  984. else
  985. ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
  986. }
  987. /*
  988. * Caller holds hw_lock.
  989. */
  990. static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable)
  991. {
  992. struct ql3xxx_port_registers __iomem *port_regs =
  993. qdev->mem_map_registers;
  994. u32 value;
  995. if (enable)
  996. value = (MAC_CONFIG_REG_FD | (MAC_CONFIG_REG_FD << 16));
  997. else
  998. value = (MAC_CONFIG_REG_FD << 16);
  999. if (qdev->mac_index)
  1000. ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
  1001. else
  1002. ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
  1003. }
  1004. /*
  1005. * Caller holds hw_lock.
  1006. */
  1007. static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable)
  1008. {
  1009. struct ql3xxx_port_registers __iomem *port_regs =
  1010. qdev->mem_map_registers;
  1011. u32 value;
  1012. if (enable)
  1013. value =
  1014. ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) |
  1015. ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16));
  1016. else
  1017. value = ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16);
  1018. if (qdev->mac_index)
  1019. ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
  1020. else
  1021. ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
  1022. }
  1023. /*
  1024. * Caller holds hw_lock.
  1025. */
  1026. static int ql_is_fiber(struct ql3_adapter *qdev)
  1027. {
  1028. struct ql3xxx_port_registers __iomem *port_regs =
  1029. qdev->mem_map_registers;
  1030. u32 bitToCheck = 0;
  1031. u32 temp;
  1032. switch (qdev->mac_index) {
  1033. case 0:
  1034. bitToCheck = PORT_STATUS_SM0;
  1035. break;
  1036. case 1:
  1037. bitToCheck = PORT_STATUS_SM1;
  1038. break;
  1039. }
  1040. temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
  1041. return (temp & bitToCheck) != 0;
  1042. }
  1043. static int ql_is_auto_cfg(struct ql3_adapter *qdev)
  1044. {
  1045. u16 reg;
  1046. ql_mii_read_reg(qdev, 0x00, &reg);
  1047. return (reg & 0x1000) != 0;
  1048. }
  1049. /*
  1050. * Caller holds hw_lock.
  1051. */
  1052. static int ql_is_auto_neg_complete(struct ql3_adapter *qdev)
  1053. {
  1054. struct ql3xxx_port_registers __iomem *port_regs =
  1055. qdev->mem_map_registers;
  1056. u32 bitToCheck = 0;
  1057. u32 temp;
  1058. switch (qdev->mac_index) {
  1059. case 0:
  1060. bitToCheck = PORT_STATUS_AC0;
  1061. break;
  1062. case 1:
  1063. bitToCheck = PORT_STATUS_AC1;
  1064. break;
  1065. }
  1066. temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
  1067. if (temp & bitToCheck) {
  1068. if (netif_msg_link(qdev))
  1069. printk(KERN_INFO PFX
  1070. "%s: Auto-Negotiate complete.\n",
  1071. qdev->ndev->name);
  1072. return 1;
  1073. } else {
  1074. if (netif_msg_link(qdev))
  1075. printk(KERN_WARNING PFX
  1076. "%s: Auto-Negotiate incomplete.\n",
  1077. qdev->ndev->name);
  1078. return 0;
  1079. }
  1080. }
  1081. /*
  1082. * ql_is_neg_pause() returns 1 if pause was negotiated to be on
  1083. */
  1084. static int ql_is_neg_pause(struct ql3_adapter *qdev)
  1085. {
  1086. if (ql_is_fiber(qdev))
  1087. return ql_is_petbi_neg_pause(qdev);
  1088. else
  1089. return ql_is_phy_neg_pause(qdev);
  1090. }
  1091. static int ql_auto_neg_error(struct ql3_adapter *qdev)
  1092. {
  1093. struct ql3xxx_port_registers __iomem *port_regs =
  1094. qdev->mem_map_registers;
  1095. u32 bitToCheck = 0;
  1096. u32 temp;
  1097. switch (qdev->mac_index) {
  1098. case 0:
  1099. bitToCheck = PORT_STATUS_AE0;
  1100. break;
  1101. case 1:
  1102. bitToCheck = PORT_STATUS_AE1;
  1103. break;
  1104. }
  1105. temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
  1106. return (temp & bitToCheck) != 0;
  1107. }
  1108. static u32 ql_get_link_speed(struct ql3_adapter *qdev)
  1109. {
  1110. if (ql_is_fiber(qdev))
  1111. return SPEED_1000;
  1112. else
  1113. return ql_phy_get_speed(qdev);
  1114. }
  1115. static int ql_is_link_full_dup(struct ql3_adapter *qdev)
  1116. {
  1117. if (ql_is_fiber(qdev))
  1118. return 1;
  1119. else
  1120. return ql_is_full_dup(qdev);
  1121. }
  1122. /*
  1123. * Caller holds hw_lock.
  1124. */
  1125. static int ql_link_down_detect(struct ql3_adapter *qdev)
  1126. {
  1127. struct ql3xxx_port_registers __iomem *port_regs =
  1128. qdev->mem_map_registers;
  1129. u32 bitToCheck = 0;
  1130. u32 temp;
  1131. switch (qdev->mac_index) {
  1132. case 0:
  1133. bitToCheck = ISP_CONTROL_LINK_DN_0;
  1134. break;
  1135. case 1:
  1136. bitToCheck = ISP_CONTROL_LINK_DN_1;
  1137. break;
  1138. }
  1139. temp =
  1140. ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
  1141. return (temp & bitToCheck) != 0;
  1142. }
  1143. /*
  1144. * Caller holds hw_lock.
  1145. */
  1146. static int ql_link_down_detect_clear(struct ql3_adapter *qdev)
  1147. {
  1148. struct ql3xxx_port_registers __iomem *port_regs =
  1149. qdev->mem_map_registers;
  1150. switch (qdev->mac_index) {
  1151. case 0:
  1152. ql_write_common_reg(qdev,
  1153. &port_regs->CommonRegs.ispControlStatus,
  1154. (ISP_CONTROL_LINK_DN_0) |
  1155. (ISP_CONTROL_LINK_DN_0 << 16));
  1156. break;
  1157. case 1:
  1158. ql_write_common_reg(qdev,
  1159. &port_regs->CommonRegs.ispControlStatus,
  1160. (ISP_CONTROL_LINK_DN_1) |
  1161. (ISP_CONTROL_LINK_DN_1 << 16));
  1162. break;
  1163. default:
  1164. return 1;
  1165. }
  1166. return 0;
  1167. }
  1168. /*
  1169. * Caller holds hw_lock.
  1170. */
  1171. static int ql_this_adapter_controls_port(struct ql3_adapter *qdev)
  1172. {
  1173. struct ql3xxx_port_registers __iomem *port_regs =
  1174. qdev->mem_map_registers;
  1175. u32 bitToCheck = 0;
  1176. u32 temp;
  1177. switch (qdev->mac_index) {
  1178. case 0:
  1179. bitToCheck = PORT_STATUS_F1_ENABLED;
  1180. break;
  1181. case 1:
  1182. bitToCheck = PORT_STATUS_F3_ENABLED;
  1183. break;
  1184. default:
  1185. break;
  1186. }
  1187. temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
  1188. if (temp & bitToCheck) {
  1189. if (netif_msg_link(qdev))
  1190. printk(KERN_DEBUG PFX
  1191. "%s: is not link master.\n", qdev->ndev->name);
  1192. return 0;
  1193. } else {
  1194. if (netif_msg_link(qdev))
  1195. printk(KERN_DEBUG PFX
  1196. "%s: is link master.\n", qdev->ndev->name);
  1197. return 1;
  1198. }
  1199. }
  1200. static void ql_phy_reset_ex(struct ql3_adapter *qdev)
  1201. {
  1202. ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET,
  1203. PHYAddr[qdev->mac_index]);
  1204. }
  1205. static void ql_phy_start_neg_ex(struct ql3_adapter *qdev)
  1206. {
  1207. u16 reg;
  1208. u16 portConfiguration;
  1209. if(qdev->phyType == PHY_AGERE_ET1011C) {
  1210. /* turn off external loopback */
  1211. ql_mii_write_reg(qdev, 0x13, 0x0000);
  1212. }
  1213. if(qdev->mac_index == 0)
  1214. portConfiguration = qdev->nvram_data.macCfg_port0.portConfiguration;
  1215. else
  1216. portConfiguration = qdev->nvram_data.macCfg_port1.portConfiguration;
  1217. /* Some HBA's in the field are set to 0 and they need to
  1218. be reinterpreted with a default value */
  1219. if(portConfiguration == 0)
  1220. portConfiguration = PORT_CONFIG_DEFAULT;
  1221. /* Set the 1000 advertisements */
  1222. ql_mii_read_reg_ex(qdev, PHY_GIG_CONTROL, &reg,
  1223. PHYAddr[qdev->mac_index]);
  1224. reg &= ~PHY_GIG_ALL_PARAMS;
  1225. if(portConfiguration & PORT_CONFIG_1000MB_SPEED) {
  1226. if(portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED)
  1227. reg |= PHY_GIG_ADV_1000F;
  1228. else
  1229. reg |= PHY_GIG_ADV_1000H;
  1230. }
  1231. ql_mii_write_reg_ex(qdev, PHY_GIG_CONTROL, reg,
  1232. PHYAddr[qdev->mac_index]);
  1233. /* Set the 10/100 & pause negotiation advertisements */
  1234. ql_mii_read_reg_ex(qdev, PHY_NEG_ADVER, &reg,
  1235. PHYAddr[qdev->mac_index]);
  1236. reg &= ~PHY_NEG_ALL_PARAMS;
  1237. if(portConfiguration & PORT_CONFIG_SYM_PAUSE_ENABLED)
  1238. reg |= PHY_NEG_ASY_PAUSE | PHY_NEG_SYM_PAUSE;
  1239. if(portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) {
  1240. if(portConfiguration & PORT_CONFIG_100MB_SPEED)
  1241. reg |= PHY_NEG_ADV_100F;
  1242. if(portConfiguration & PORT_CONFIG_10MB_SPEED)
  1243. reg |= PHY_NEG_ADV_10F;
  1244. }
  1245. if(portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED) {
  1246. if(portConfiguration & PORT_CONFIG_100MB_SPEED)
  1247. reg |= PHY_NEG_ADV_100H;
  1248. if(portConfiguration & PORT_CONFIG_10MB_SPEED)
  1249. reg |= PHY_NEG_ADV_10H;
  1250. }
  1251. if(portConfiguration &
  1252. PORT_CONFIG_1000MB_SPEED) {
  1253. reg |= 1;
  1254. }
  1255. ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg,
  1256. PHYAddr[qdev->mac_index]);
  1257. ql_mii_read_reg_ex(qdev, CONTROL_REG, &reg, PHYAddr[qdev->mac_index]);
  1258. ql_mii_write_reg_ex(qdev, CONTROL_REG,
  1259. reg | PHY_CTRL_RESTART_NEG | PHY_CTRL_AUTO_NEG,
  1260. PHYAddr[qdev->mac_index]);
  1261. }
  1262. static void ql_phy_init_ex(struct ql3_adapter *qdev)
  1263. {
  1264. ql_phy_reset_ex(qdev);
  1265. PHY_Setup(qdev);
  1266. ql_phy_start_neg_ex(qdev);
  1267. }
  1268. /*
  1269. * Caller holds hw_lock.
  1270. */
  1271. static u32 ql_get_link_state(struct ql3_adapter *qdev)
  1272. {
  1273. struct ql3xxx_port_registers __iomem *port_regs =
  1274. qdev->mem_map_registers;
  1275. u32 bitToCheck = 0;
  1276. u32 temp, linkState;
  1277. switch (qdev->mac_index) {
  1278. case 0:
  1279. bitToCheck = PORT_STATUS_UP0;
  1280. break;
  1281. case 1:
  1282. bitToCheck = PORT_STATUS_UP1;
  1283. break;
  1284. }
  1285. temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
  1286. if (temp & bitToCheck) {
  1287. linkState = LS_UP;
  1288. } else {
  1289. linkState = LS_DOWN;
  1290. }
  1291. return linkState;
  1292. }
  1293. static int ql_port_start(struct ql3_adapter *qdev)
  1294. {
  1295. if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
  1296. (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
  1297. 2) << 7)) {
  1298. printk(KERN_ERR "%s: Could not get hw lock for GIO\n",
  1299. qdev->ndev->name);
  1300. return -1;
  1301. }
  1302. if (ql_is_fiber(qdev)) {
  1303. ql_petbi_init(qdev);
  1304. } else {
  1305. /* Copper port */
  1306. ql_phy_init_ex(qdev);
  1307. }
  1308. ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
  1309. return 0;
  1310. }
  1311. static int ql_finish_auto_neg(struct ql3_adapter *qdev)
  1312. {
  1313. if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
  1314. (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
  1315. 2) << 7))
  1316. return -1;
  1317. if (!ql_auto_neg_error(qdev)) {
  1318. if (test_bit(QL_LINK_MASTER,&qdev->flags)) {
  1319. /* configure the MAC */
  1320. if (netif_msg_link(qdev))
  1321. printk(KERN_DEBUG PFX
  1322. "%s: Configuring link.\n",
  1323. qdev->ndev->
  1324. name);
  1325. ql_mac_cfg_soft_reset(qdev, 1);
  1326. ql_mac_cfg_gig(qdev,
  1327. (ql_get_link_speed
  1328. (qdev) ==
  1329. SPEED_1000));
  1330. ql_mac_cfg_full_dup(qdev,
  1331. ql_is_link_full_dup
  1332. (qdev));
  1333. ql_mac_cfg_pause(qdev,
  1334. ql_is_neg_pause
  1335. (qdev));
  1336. ql_mac_cfg_soft_reset(qdev, 0);
  1337. /* enable the MAC */
  1338. if (netif_msg_link(qdev))
  1339. printk(KERN_DEBUG PFX
  1340. "%s: Enabling mac.\n",
  1341. qdev->ndev->
  1342. name);
  1343. ql_mac_enable(qdev, 1);
  1344. }
  1345. qdev->port_link_state = LS_UP;
  1346. netif_start_queue(qdev->ndev);
  1347. netif_carrier_on(qdev->ndev);
  1348. if (netif_msg_link(qdev))
  1349. printk(KERN_INFO PFX
  1350. "%s: Link is up at %d Mbps, %s duplex.\n",
  1351. qdev->ndev->name,
  1352. ql_get_link_speed(qdev),
  1353. ql_is_link_full_dup(qdev)
  1354. ? "full" : "half");
  1355. } else { /* Remote error detected */
  1356. if (test_bit(QL_LINK_MASTER,&qdev->flags)) {
  1357. if (netif_msg_link(qdev))
  1358. printk(KERN_DEBUG PFX
  1359. "%s: Remote error detected. "
  1360. "Calling ql_port_start().\n",
  1361. qdev->ndev->
  1362. name);
  1363. /*
  1364. * ql_port_start() is shared code and needs
  1365. * to lock the PHY on it's own.
  1366. */
  1367. ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
  1368. if(ql_port_start(qdev)) {/* Restart port */
  1369. return -1;
  1370. } else
  1371. return 0;
  1372. }
  1373. }
  1374. ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
  1375. return 0;
  1376. }
  1377. static void ql_link_state_machine_work(struct work_struct *work)
  1378. {
  1379. struct ql3_adapter *qdev =
  1380. container_of(work, struct ql3_adapter, link_state_work.work);
  1381. u32 curr_link_state;
  1382. unsigned long hw_flags;
  1383. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  1384. curr_link_state = ql_get_link_state(qdev);
  1385. if (test_bit(QL_RESET_ACTIVE,&qdev->flags)) {
  1386. if (netif_msg_link(qdev))
  1387. printk(KERN_INFO PFX
  1388. "%s: Reset in progress, skip processing link "
  1389. "state.\n", qdev->ndev->name);
  1390. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  1391. /* Restart timer on 2 second interval. */
  1392. mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);\
  1393. return;
  1394. }
  1395. switch (qdev->port_link_state) {
  1396. default:
  1397. if (test_bit(QL_LINK_MASTER,&qdev->flags)) {
  1398. ql_port_start(qdev);
  1399. }
  1400. qdev->port_link_state = LS_DOWN;
  1401. /* Fall Through */
  1402. case LS_DOWN:
  1403. if (curr_link_state == LS_UP) {
  1404. if (netif_msg_link(qdev))
  1405. printk(KERN_INFO PFX "%s: Link is up.\n",
  1406. qdev->ndev->name);
  1407. if (ql_is_auto_neg_complete(qdev))
  1408. ql_finish_auto_neg(qdev);
  1409. if (qdev->port_link_state == LS_UP)
  1410. ql_link_down_detect_clear(qdev);
  1411. qdev->port_link_state = LS_UP;
  1412. }
  1413. break;
  1414. case LS_UP:
  1415. /*
  1416. * See if the link is currently down or went down and came
  1417. * back up
  1418. */
  1419. if (curr_link_state == LS_DOWN) {
  1420. if (netif_msg_link(qdev))
  1421. printk(KERN_INFO PFX "%s: Link is down.\n",
  1422. qdev->ndev->name);
  1423. qdev->port_link_state = LS_DOWN;
  1424. }
  1425. if (ql_link_down_detect(qdev))
  1426. qdev->port_link_state = LS_DOWN;
  1427. break;
  1428. }
  1429. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  1430. /* Restart timer on 2 second interval. */
  1431. mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
  1432. }
  1433. /*
  1434. * Caller must take hw_lock and QL_PHY_GIO_SEM.
  1435. */
  1436. static void ql_get_phy_owner(struct ql3_adapter *qdev)
  1437. {
  1438. if (ql_this_adapter_controls_port(qdev))
  1439. set_bit(QL_LINK_MASTER,&qdev->flags);
  1440. else
  1441. clear_bit(QL_LINK_MASTER,&qdev->flags);
  1442. }
  1443. /*
  1444. * Caller must take hw_lock and QL_PHY_GIO_SEM.
  1445. */
  1446. static void ql_init_scan_mode(struct ql3_adapter *qdev)
  1447. {
  1448. ql_mii_enable_scan_mode(qdev);
  1449. if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) {
  1450. if (ql_this_adapter_controls_port(qdev))
  1451. ql_petbi_init_ex(qdev);
  1452. } else {
  1453. if (ql_this_adapter_controls_port(qdev))
  1454. ql_phy_init_ex(qdev);
  1455. }
  1456. }
  1457. /*
  1458. * MII_Setup needs to be called before taking the PHY out of reset so that the
  1459. * management interface clock speed can be set properly. It would be better if
  1460. * we had a way to disable MDC until after the PHY is out of reset, but we
  1461. * don't have that capability.
  1462. */
  1463. static int ql_mii_setup(struct ql3_adapter *qdev)
  1464. {
  1465. u32 reg;
  1466. struct ql3xxx_port_registers __iomem *port_regs =
  1467. qdev->mem_map_registers;
  1468. if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
  1469. (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
  1470. 2) << 7))
  1471. return -1;
  1472. if (qdev->device_id == QL3032_DEVICE_ID)
  1473. ql_write_page0_reg(qdev,
  1474. &port_regs->macMIIMgmtControlReg, 0x0f00000);
  1475. /* Divide 125MHz clock by 28 to meet PHY timing requirements */
  1476. reg = MAC_MII_CONTROL_CLK_SEL_DIV28;
  1477. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
  1478. reg | ((MAC_MII_CONTROL_CLK_SEL_MASK) << 16));
  1479. ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
  1480. return 0;
  1481. }
  1482. static u32 ql_supported_modes(struct ql3_adapter *qdev)
  1483. {
  1484. u32 supported;
  1485. if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) {
  1486. supported = SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE
  1487. | SUPPORTED_Autoneg;
  1488. } else {
  1489. supported = SUPPORTED_10baseT_Half
  1490. | SUPPORTED_10baseT_Full
  1491. | SUPPORTED_100baseT_Half
  1492. | SUPPORTED_100baseT_Full
  1493. | SUPPORTED_1000baseT_Half
  1494. | SUPPORTED_1000baseT_Full
  1495. | SUPPORTED_Autoneg | SUPPORTED_TP;
  1496. }
  1497. return supported;
  1498. }
  1499. static int ql_get_auto_cfg_status(struct ql3_adapter *qdev)
  1500. {
  1501. int status;
  1502. unsigned long hw_flags;
  1503. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  1504. if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
  1505. (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
  1506. 2) << 7)) {
  1507. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  1508. return 0;
  1509. }
  1510. status = ql_is_auto_cfg(qdev);
  1511. ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
  1512. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  1513. return status;
  1514. }
  1515. static u32 ql_get_speed(struct ql3_adapter *qdev)
  1516. {
  1517. u32 status;
  1518. unsigned long hw_flags;
  1519. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  1520. if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
  1521. (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
  1522. 2) << 7)) {
  1523. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  1524. return 0;
  1525. }
  1526. status = ql_get_link_speed(qdev);
  1527. ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
  1528. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  1529. return status;
  1530. }
  1531. static int ql_get_full_dup(struct ql3_adapter *qdev)
  1532. {
  1533. int status;
  1534. unsigned long hw_flags;
  1535. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  1536. if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
  1537. (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
  1538. 2) << 7)) {
  1539. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  1540. return 0;
  1541. }
  1542. status = ql_is_link_full_dup(qdev);
  1543. ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
  1544. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  1545. return status;
  1546. }
  1547. static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
  1548. {
  1549. struct ql3_adapter *qdev = netdev_priv(ndev);
  1550. ecmd->transceiver = XCVR_INTERNAL;
  1551. ecmd->supported = ql_supported_modes(qdev);
  1552. if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) {
  1553. ecmd->port = PORT_FIBRE;
  1554. } else {
  1555. ecmd->port = PORT_TP;
  1556. ecmd->phy_address = qdev->PHYAddr;
  1557. }
  1558. ecmd->advertising = ql_supported_modes(qdev);
  1559. ecmd->autoneg = ql_get_auto_cfg_status(qdev);
  1560. ecmd->speed = ql_get_speed(qdev);
  1561. ecmd->duplex = ql_get_full_dup(qdev);
  1562. return 0;
  1563. }
  1564. static void ql_get_drvinfo(struct net_device *ndev,
  1565. struct ethtool_drvinfo *drvinfo)
  1566. {
  1567. struct ql3_adapter *qdev = netdev_priv(ndev);
  1568. strncpy(drvinfo->driver, ql3xxx_driver_name, 32);
  1569. strncpy(drvinfo->version, ql3xxx_driver_version, 32);
  1570. strncpy(drvinfo->fw_version, "N/A", 32);
  1571. strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32);
  1572. drvinfo->regdump_len = 0;
  1573. drvinfo->eedump_len = 0;
  1574. }
  1575. static u32 ql_get_msglevel(struct net_device *ndev)
  1576. {
  1577. struct ql3_adapter *qdev = netdev_priv(ndev);
  1578. return qdev->msg_enable;
  1579. }
  1580. static void ql_set_msglevel(struct net_device *ndev, u32 value)
  1581. {
  1582. struct ql3_adapter *qdev = netdev_priv(ndev);
  1583. qdev->msg_enable = value;
  1584. }
  1585. static void ql_get_pauseparam(struct net_device *ndev,
  1586. struct ethtool_pauseparam *pause)
  1587. {
  1588. struct ql3_adapter *qdev = netdev_priv(ndev);
  1589. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  1590. u32 reg;
  1591. if(qdev->mac_index == 0)
  1592. reg = ql_read_page0_reg(qdev, &port_regs->mac0ConfigReg);
  1593. else
  1594. reg = ql_read_page0_reg(qdev, &port_regs->mac1ConfigReg);
  1595. pause->autoneg = ql_get_auto_cfg_status(qdev);
  1596. pause->rx_pause = (reg & MAC_CONFIG_REG_RF) >> 2;
  1597. pause->tx_pause = (reg & MAC_CONFIG_REG_TF) >> 1;
  1598. }
  1599. static const struct ethtool_ops ql3xxx_ethtool_ops = {
  1600. .get_settings = ql_get_settings,
  1601. .get_drvinfo = ql_get_drvinfo,
  1602. .get_link = ethtool_op_get_link,
  1603. .get_msglevel = ql_get_msglevel,
  1604. .set_msglevel = ql_set_msglevel,
  1605. .get_pauseparam = ql_get_pauseparam,
  1606. };
  1607. static int ql_populate_free_queue(struct ql3_adapter *qdev)
  1608. {
  1609. struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head;
  1610. dma_addr_t map;
  1611. int err;
  1612. while (lrg_buf_cb) {
  1613. if (!lrg_buf_cb->skb) {
  1614. lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev,
  1615. qdev->lrg_buffer_len);
  1616. if (unlikely(!lrg_buf_cb->skb)) {
  1617. printk(KERN_DEBUG PFX
  1618. "%s: Failed netdev_alloc_skb().\n",
  1619. qdev->ndev->name);
  1620. break;
  1621. } else {
  1622. /*
  1623. * We save some space to copy the ethhdr from
  1624. * first buffer
  1625. */
  1626. skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
  1627. map = pci_map_single(qdev->pdev,
  1628. lrg_buf_cb->skb->data,
  1629. qdev->lrg_buffer_len -
  1630. QL_HEADER_SPACE,
  1631. PCI_DMA_FROMDEVICE);
  1632. err = pci_dma_mapping_error(qdev->pdev, map);
  1633. if(err) {
  1634. printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
  1635. qdev->ndev->name, err);
  1636. dev_kfree_skb(lrg_buf_cb->skb);
  1637. lrg_buf_cb->skb = NULL;
  1638. break;
  1639. }
  1640. lrg_buf_cb->buf_phy_addr_low =
  1641. cpu_to_le32(LS_64BITS(map));
  1642. lrg_buf_cb->buf_phy_addr_high =
  1643. cpu_to_le32(MS_64BITS(map));
  1644. pci_unmap_addr_set(lrg_buf_cb, mapaddr, map);
  1645. pci_unmap_len_set(lrg_buf_cb, maplen,
  1646. qdev->lrg_buffer_len -
  1647. QL_HEADER_SPACE);
  1648. --qdev->lrg_buf_skb_check;
  1649. if (!qdev->lrg_buf_skb_check)
  1650. return 1;
  1651. }
  1652. }
  1653. lrg_buf_cb = lrg_buf_cb->next;
  1654. }
  1655. return 0;
  1656. }
  1657. /*
  1658. * Caller holds hw_lock.
  1659. */
  1660. static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev)
  1661. {
  1662. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  1663. if (qdev->small_buf_release_cnt >= 16) {
  1664. while (qdev->small_buf_release_cnt >= 16) {
  1665. qdev->small_buf_q_producer_index++;
  1666. if (qdev->small_buf_q_producer_index ==
  1667. NUM_SBUFQ_ENTRIES)
  1668. qdev->small_buf_q_producer_index = 0;
  1669. qdev->small_buf_release_cnt -= 8;
  1670. }
  1671. wmb();
  1672. writel(qdev->small_buf_q_producer_index,
  1673. &port_regs->CommonRegs.rxSmallQProducerIndex);
  1674. }
  1675. }
  1676. /*
  1677. * Caller holds hw_lock.
  1678. */
  1679. static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev)
  1680. {
  1681. struct bufq_addr_element *lrg_buf_q_ele;
  1682. int i;
  1683. struct ql_rcv_buf_cb *lrg_buf_cb;
  1684. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  1685. if ((qdev->lrg_buf_free_count >= 8)
  1686. && (qdev->lrg_buf_release_cnt >= 16)) {
  1687. if (qdev->lrg_buf_skb_check)
  1688. if (!ql_populate_free_queue(qdev))
  1689. return;
  1690. lrg_buf_q_ele = qdev->lrg_buf_next_free;
  1691. while ((qdev->lrg_buf_release_cnt >= 16)
  1692. && (qdev->lrg_buf_free_count >= 8)) {
  1693. for (i = 0; i < 8; i++) {
  1694. lrg_buf_cb =
  1695. ql_get_from_lrg_buf_free_list(qdev);
  1696. lrg_buf_q_ele->addr_high =
  1697. lrg_buf_cb->buf_phy_addr_high;
  1698. lrg_buf_q_ele->addr_low =
  1699. lrg_buf_cb->buf_phy_addr_low;
  1700. lrg_buf_q_ele++;
  1701. qdev->lrg_buf_release_cnt--;
  1702. }
  1703. qdev->lrg_buf_q_producer_index++;
  1704. if (qdev->lrg_buf_q_producer_index == qdev->num_lbufq_entries)
  1705. qdev->lrg_buf_q_producer_index = 0;
  1706. if (qdev->lrg_buf_q_producer_index ==
  1707. (qdev->num_lbufq_entries - 1)) {
  1708. lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr;
  1709. }
  1710. }
  1711. wmb();
  1712. qdev->lrg_buf_next_free = lrg_buf_q_ele;
  1713. writel(qdev->lrg_buf_q_producer_index,
  1714. &port_regs->CommonRegs.rxLargeQProducerIndex);
  1715. }
  1716. }
  1717. static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
  1718. struct ob_mac_iocb_rsp *mac_rsp)
  1719. {
  1720. struct ql_tx_buf_cb *tx_cb;
  1721. int i;
  1722. int retval = 0;
  1723. if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
  1724. printk(KERN_WARNING "Frame short but, frame was padded and sent.\n");
  1725. }
  1726. tx_cb = &qdev->tx_buf[mac_rsp->transaction_id];
  1727. /* Check the transmit response flags for any errors */
  1728. if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
  1729. printk(KERN_ERR "Frame too short to be legal, frame not sent.\n");
  1730. qdev->ndev->stats.tx_errors++;
  1731. retval = -EIO;
  1732. goto frame_not_sent;
  1733. }
  1734. if(tx_cb->seg_count == 0) {
  1735. printk(KERN_ERR "tx_cb->seg_count == 0: %d\n", mac_rsp->transaction_id);
  1736. qdev->ndev->stats.tx_errors++;
  1737. retval = -EIO;
  1738. goto invalid_seg_count;
  1739. }
  1740. pci_unmap_single(qdev->pdev,
  1741. pci_unmap_addr(&tx_cb->map[0], mapaddr),
  1742. pci_unmap_len(&tx_cb->map[0], maplen),
  1743. PCI_DMA_TODEVICE);
  1744. tx_cb->seg_count--;
  1745. if (tx_cb->seg_count) {
  1746. for (i = 1; i < tx_cb->seg_count; i++) {
  1747. pci_unmap_page(qdev->pdev,
  1748. pci_unmap_addr(&tx_cb->map[i],
  1749. mapaddr),
  1750. pci_unmap_len(&tx_cb->map[i], maplen),
  1751. PCI_DMA_TODEVICE);
  1752. }
  1753. }
  1754. qdev->ndev->stats.tx_packets++;
  1755. qdev->ndev->stats.tx_bytes += tx_cb->skb->len;
  1756. frame_not_sent:
  1757. dev_kfree_skb_irq(tx_cb->skb);
  1758. tx_cb->skb = NULL;
  1759. invalid_seg_count:
  1760. atomic_inc(&qdev->tx_count);
  1761. }
  1762. static void ql_get_sbuf(struct ql3_adapter *qdev)
  1763. {
  1764. if (++qdev->small_buf_index == NUM_SMALL_BUFFERS)
  1765. qdev->small_buf_index = 0;
  1766. qdev->small_buf_release_cnt++;
  1767. }
  1768. static struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev)
  1769. {
  1770. struct ql_rcv_buf_cb *lrg_buf_cb = NULL;
  1771. lrg_buf_cb = &qdev->lrg_buf[qdev->lrg_buf_index];
  1772. qdev->lrg_buf_release_cnt++;
  1773. if (++qdev->lrg_buf_index == qdev->num_large_buffers)
  1774. qdev->lrg_buf_index = 0;
  1775. return(lrg_buf_cb);
  1776. }
  1777. /*
  1778. * The difference between 3022 and 3032 for inbound completions:
  1779. * 3022 uses two buffers per completion. The first buffer contains
  1780. * (some) header info, the second the remainder of the headers plus
  1781. * the data. For this chip we reserve some space at the top of the
  1782. * receive buffer so that the header info in buffer one can be
  1783. * prepended to the buffer two. Buffer two is the sent up while
  1784. * buffer one is returned to the hardware to be reused.
  1785. * 3032 receives all of it's data and headers in one buffer for a
  1786. * simpler process. 3032 also supports checksum verification as
  1787. * can be seen in ql_process_macip_rx_intr().
  1788. */
  1789. static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
  1790. struct ib_mac_iocb_rsp *ib_mac_rsp_ptr)
  1791. {
  1792. struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
  1793. struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
  1794. struct sk_buff *skb;
  1795. u16 length = le16_to_cpu(ib_mac_rsp_ptr->length);
  1796. /*
  1797. * Get the inbound address list (small buffer).
  1798. */
  1799. ql_get_sbuf(qdev);
  1800. if (qdev->device_id == QL3022_DEVICE_ID)
  1801. lrg_buf_cb1 = ql_get_lbuf(qdev);
  1802. /* start of second buffer */
  1803. lrg_buf_cb2 = ql_get_lbuf(qdev);
  1804. skb = lrg_buf_cb2->skb;
  1805. qdev->ndev->stats.rx_packets++;
  1806. qdev->ndev->stats.rx_bytes += length;
  1807. skb_put(skb, length);
  1808. pci_unmap_single(qdev->pdev,
  1809. pci_unmap_addr(lrg_buf_cb2, mapaddr),
  1810. pci_unmap_len(lrg_buf_cb2, maplen),
  1811. PCI_DMA_FROMDEVICE);
  1812. prefetch(skb->data);
  1813. skb->ip_summed = CHECKSUM_NONE;
  1814. skb->protocol = eth_type_trans(skb, qdev->ndev);
  1815. netif_receive_skb(skb);
  1816. lrg_buf_cb2->skb = NULL;
  1817. if (qdev->device_id == QL3022_DEVICE_ID)
  1818. ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
  1819. ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
  1820. }
  1821. static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
  1822. struct ib_ip_iocb_rsp *ib_ip_rsp_ptr)
  1823. {
  1824. struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
  1825. struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
  1826. struct sk_buff *skb1 = NULL, *skb2;
  1827. struct net_device *ndev = qdev->ndev;
  1828. u16 length = le16_to_cpu(ib_ip_rsp_ptr->length);
  1829. u16 size = 0;
  1830. /*
  1831. * Get the inbound address list (small buffer).
  1832. */
  1833. ql_get_sbuf(qdev);
  1834. if (qdev->device_id == QL3022_DEVICE_ID) {
  1835. /* start of first buffer on 3022 */
  1836. lrg_buf_cb1 = ql_get_lbuf(qdev);
  1837. skb1 = lrg_buf_cb1->skb;
  1838. size = ETH_HLEN;
  1839. if (*((u16 *) skb1->data) != 0xFFFF)
  1840. size += VLAN_ETH_HLEN - ETH_HLEN;
  1841. }
  1842. /* start of second buffer */
  1843. lrg_buf_cb2 = ql_get_lbuf(qdev);
  1844. skb2 = lrg_buf_cb2->skb;
  1845. skb_put(skb2, length); /* Just the second buffer length here. */
  1846. pci_unmap_single(qdev->pdev,
  1847. pci_unmap_addr(lrg_buf_cb2, mapaddr),
  1848. pci_unmap_len(lrg_buf_cb2, maplen),
  1849. PCI_DMA_FROMDEVICE);
  1850. prefetch(skb2->data);
  1851. skb2->ip_summed = CHECKSUM_NONE;
  1852. if (qdev->device_id == QL3022_DEVICE_ID) {
  1853. /*
  1854. * Copy the ethhdr from first buffer to second. This
  1855. * is necessary for 3022 IP completions.
  1856. */
  1857. skb_copy_from_linear_data_offset(skb1, VLAN_ID_LEN,
  1858. skb_push(skb2, size), size);
  1859. } else {
  1860. u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum);
  1861. if (checksum &
  1862. (IB_IP_IOCB_RSP_3032_ICE |
  1863. IB_IP_IOCB_RSP_3032_CE)) {
  1864. printk(KERN_ERR
  1865. "%s: Bad checksum for this %s packet, checksum = %x.\n",
  1866. __func__,
  1867. ((checksum &
  1868. IB_IP_IOCB_RSP_3032_TCP) ? "TCP" :
  1869. "UDP"),checksum);
  1870. } else if ((checksum & IB_IP_IOCB_RSP_3032_TCP) ||
  1871. (checksum & IB_IP_IOCB_RSP_3032_UDP &&
  1872. !(checksum & IB_IP_IOCB_RSP_3032_NUC))) {
  1873. skb2->ip_summed = CHECKSUM_UNNECESSARY;
  1874. }
  1875. }
  1876. skb2->protocol = eth_type_trans(skb2, qdev->ndev);
  1877. netif_receive_skb(skb2);
  1878. ndev->stats.rx_packets++;
  1879. ndev->stats.rx_bytes += length;
  1880. lrg_buf_cb2->skb = NULL;
  1881. if (qdev->device_id == QL3022_DEVICE_ID)
  1882. ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
  1883. ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
  1884. }
  1885. static int ql_tx_rx_clean(struct ql3_adapter *qdev,
  1886. int *tx_cleaned, int *rx_cleaned, int work_to_do)
  1887. {
  1888. struct net_rsp_iocb *net_rsp;
  1889. struct net_device *ndev = qdev->ndev;
  1890. int work_done = 0;
  1891. /* While there are entries in the completion queue. */
  1892. while ((le32_to_cpu(*(qdev->prsp_producer_index)) !=
  1893. qdev->rsp_consumer_index) && (work_done < work_to_do)) {
  1894. net_rsp = qdev->rsp_current;
  1895. rmb();
  1896. /*
  1897. * Fix 4032 chipe undocumented "feature" where bit-8 is set if the
  1898. * inbound completion is for a VLAN.
  1899. */
  1900. if (qdev->device_id == QL3032_DEVICE_ID)
  1901. net_rsp->opcode &= 0x7f;
  1902. switch (net_rsp->opcode) {
  1903. case OPCODE_OB_MAC_IOCB_FN0:
  1904. case OPCODE_OB_MAC_IOCB_FN2:
  1905. ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *)
  1906. net_rsp);
  1907. (*tx_cleaned)++;
  1908. break;
  1909. case OPCODE_IB_MAC_IOCB:
  1910. case OPCODE_IB_3032_MAC_IOCB:
  1911. ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *)
  1912. net_rsp);
  1913. (*rx_cleaned)++;
  1914. break;
  1915. case OPCODE_IB_IP_IOCB:
  1916. case OPCODE_IB_3032_IP_IOCB:
  1917. ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *)
  1918. net_rsp);
  1919. (*rx_cleaned)++;
  1920. break;
  1921. default:
  1922. {
  1923. u32 *tmp = (u32 *) net_rsp;
  1924. printk(KERN_ERR PFX
  1925. "%s: Hit default case, not "
  1926. "handled!\n"
  1927. " dropping the packet, opcode = "
  1928. "%x.\n",
  1929. ndev->name, net_rsp->opcode);
  1930. printk(KERN_ERR PFX
  1931. "0x%08lx 0x%08lx 0x%08lx 0x%08lx \n",
  1932. (unsigned long int)tmp[0],
  1933. (unsigned long int)tmp[1],
  1934. (unsigned long int)tmp[2],
  1935. (unsigned long int)tmp[3]);
  1936. }
  1937. }
  1938. qdev->rsp_consumer_index++;
  1939. if (qdev->rsp_consumer_index == NUM_RSP_Q_ENTRIES) {
  1940. qdev->rsp_consumer_index = 0;
  1941. qdev->rsp_current = qdev->rsp_q_virt_addr;
  1942. } else {
  1943. qdev->rsp_current++;
  1944. }
  1945. work_done = *tx_cleaned + *rx_cleaned;
  1946. }
  1947. return work_done;
  1948. }
  1949. static int ql_poll(struct napi_struct *napi, int budget)
  1950. {
  1951. struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi);
  1952. struct net_device *ndev = qdev->ndev;
  1953. int rx_cleaned = 0, tx_cleaned = 0;
  1954. unsigned long hw_flags;
  1955. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  1956. ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, budget);
  1957. if (tx_cleaned + rx_cleaned != budget) {
  1958. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  1959. __netif_rx_complete(ndev, napi);
  1960. ql_update_small_bufq_prod_index(qdev);
  1961. ql_update_lrg_bufq_prod_index(qdev);
  1962. writel(qdev->rsp_consumer_index,
  1963. &port_regs->CommonRegs.rspQConsumerIndex);
  1964. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  1965. ql_enable_interrupts(qdev);
  1966. }
  1967. return tx_cleaned + rx_cleaned;
  1968. }
  1969. static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
  1970. {
  1971. struct net_device *ndev = dev_id;
  1972. struct ql3_adapter *qdev = netdev_priv(ndev);
  1973. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  1974. u32 value;
  1975. int handled = 1;
  1976. u32 var;
  1977. port_regs = qdev->mem_map_registers;
  1978. value =
  1979. ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus);
  1980. if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) {
  1981. spin_lock(&qdev->adapter_lock);
  1982. netif_stop_queue(qdev->ndev);
  1983. netif_carrier_off(qdev->ndev);
  1984. ql_disable_interrupts(qdev);
  1985. qdev->port_link_state = LS_DOWN;
  1986. set_bit(QL_RESET_ACTIVE,&qdev->flags) ;
  1987. if (value & ISP_CONTROL_FE) {
  1988. /*
  1989. * Chip Fatal Error.
  1990. */
  1991. var =
  1992. ql_read_page0_reg_l(qdev,
  1993. &port_regs->PortFatalErrStatus);
  1994. printk(KERN_WARNING PFX
  1995. "%s: Resetting chip. PortFatalErrStatus "
  1996. "register = 0x%x\n", ndev->name, var);
  1997. set_bit(QL_RESET_START,&qdev->flags) ;
  1998. } else {
  1999. /*
  2000. * Soft Reset Requested.
  2001. */
  2002. set_bit(QL_RESET_PER_SCSI,&qdev->flags) ;
  2003. printk(KERN_ERR PFX
  2004. "%s: Another function issued a reset to the "
  2005. "chip. ISR value = %x.\n", ndev->name, value);
  2006. }
  2007. queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0);
  2008. spin_unlock(&qdev->adapter_lock);
  2009. } else if (value & ISP_IMR_DISABLE_CMPL_INT) {
  2010. ql_disable_interrupts(qdev);
  2011. if (likely(netif_rx_schedule_prep(ndev, &qdev->napi))) {
  2012. __netif_rx_schedule(ndev, &qdev->napi);
  2013. }
  2014. } else {
  2015. return IRQ_NONE;
  2016. }
  2017. return IRQ_RETVAL(handled);
  2018. }
  2019. /*
  2020. * Get the total number of segments needed for the
  2021. * given number of fragments. This is necessary because
  2022. * outbound address lists (OAL) will be used when more than
  2023. * two frags are given. Each address list has 5 addr/len
  2024. * pairs. The 5th pair in each AOL is used to point to
  2025. * the next AOL if more frags are coming.
  2026. * That is why the frags:segment count ratio is not linear.
  2027. */
  2028. static int ql_get_seg_count(struct ql3_adapter *qdev,
  2029. unsigned short frags)
  2030. {
  2031. if (qdev->device_id == QL3022_DEVICE_ID)
  2032. return 1;
  2033. switch(frags) {
  2034. case 0: return 1; /* just the skb->data seg */
  2035. case 1: return 2; /* skb->data + 1 frag */
  2036. case 2: return 3; /* skb->data + 2 frags */
  2037. case 3: return 5; /* skb->data + 1 frag + 1 AOL containting 2 frags */
  2038. case 4: return 6;
  2039. case 5: return 7;
  2040. case 6: return 8;
  2041. case 7: return 10;
  2042. case 8: return 11;
  2043. case 9: return 12;
  2044. case 10: return 13;
  2045. case 11: return 15;
  2046. case 12: return 16;
  2047. case 13: return 17;
  2048. case 14: return 18;
  2049. case 15: return 20;
  2050. case 16: return 21;
  2051. case 17: return 22;
  2052. case 18: return 23;
  2053. }
  2054. return -1;
  2055. }
  2056. static void ql_hw_csum_setup(const struct sk_buff *skb,
  2057. struct ob_mac_iocb_req *mac_iocb_ptr)
  2058. {
  2059. const struct iphdr *ip = ip_hdr(skb);
  2060. mac_iocb_ptr->ip_hdr_off = skb_network_offset(skb);
  2061. mac_iocb_ptr->ip_hdr_len = ip->ihl;
  2062. if (ip->protocol == IPPROTO_TCP) {
  2063. mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC |
  2064. OB_3032MAC_IOCB_REQ_IC;
  2065. } else {
  2066. mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC |
  2067. OB_3032MAC_IOCB_REQ_IC;
  2068. }
  2069. }
  2070. /*
  2071. * Map the buffers for this transmit. This will return
  2072. * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
  2073. */
  2074. static int ql_send_map(struct ql3_adapter *qdev,
  2075. struct ob_mac_iocb_req *mac_iocb_ptr,
  2076. struct ql_tx_buf_cb *tx_cb,
  2077. struct sk_buff *skb)
  2078. {
  2079. struct oal *oal;
  2080. struct oal_entry *oal_entry;
  2081. int len = skb_headlen(skb);
  2082. dma_addr_t map;
  2083. int err;
  2084. int completed_segs, i;
  2085. int seg_cnt, seg = 0;
  2086. int frag_cnt = (int)skb_shinfo(skb)->nr_frags;
  2087. seg_cnt = tx_cb->seg_count;
  2088. /*
  2089. * Map the skb buffer first.
  2090. */
  2091. map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
  2092. err = pci_dma_mapping_error(qdev->pdev, map);
  2093. if(err) {
  2094. printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
  2095. qdev->ndev->name, err);
  2096. return NETDEV_TX_BUSY;
  2097. }
  2098. oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
  2099. oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
  2100. oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
  2101. oal_entry->len = cpu_to_le32(len);
  2102. pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
  2103. pci_unmap_len_set(&tx_cb->map[seg], maplen, len);
  2104. seg++;
  2105. if (seg_cnt == 1) {
  2106. /* Terminate the last segment. */
  2107. oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY);
  2108. } else {
  2109. oal = tx_cb->oal;
  2110. for (completed_segs=0; completed_segs<frag_cnt; completed_segs++,seg++) {
  2111. skb_frag_t *frag = &skb_shinfo(skb)->frags[completed_segs];
  2112. oal_entry++;
  2113. if ((seg == 2 && seg_cnt > 3) || /* Check for continuation */
  2114. (seg == 7 && seg_cnt > 8) || /* requirements. It's strange */
  2115. (seg == 12 && seg_cnt > 13) || /* but necessary. */
  2116. (seg == 17 && seg_cnt > 18)) {
  2117. /* Continuation entry points to outbound address list. */
  2118. map = pci_map_single(qdev->pdev, oal,
  2119. sizeof(struct oal),
  2120. PCI_DMA_TODEVICE);
  2121. err = pci_dma_mapping_error(qdev->pdev, map);
  2122. if(err) {
  2123. printk(KERN_ERR "%s: PCI mapping outbound address list with error: %d\n",
  2124. qdev->ndev->name, err);
  2125. goto map_error;
  2126. }
  2127. oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
  2128. oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
  2129. oal_entry->len =
  2130. cpu_to_le32(sizeof(struct oal) |
  2131. OAL_CONT_ENTRY);
  2132. pci_unmap_addr_set(&tx_cb->map[seg], mapaddr,
  2133. map);
  2134. pci_unmap_len_set(&tx_cb->map[seg], maplen,
  2135. sizeof(struct oal));
  2136. oal_entry = (struct oal_entry *)oal;
  2137. oal++;
  2138. seg++;
  2139. }
  2140. map =
  2141. pci_map_page(qdev->pdev, frag->page,
  2142. frag->page_offset, frag->size,
  2143. PCI_DMA_TODEVICE);
  2144. err = pci_dma_mapping_error(qdev->pdev, map);
  2145. if(err) {
  2146. printk(KERN_ERR "%s: PCI mapping frags failed with error: %d\n",
  2147. qdev->ndev->name, err);
  2148. goto map_error;
  2149. }
  2150. oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
  2151. oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
  2152. oal_entry->len = cpu_to_le32(frag->size);
  2153. pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
  2154. pci_unmap_len_set(&tx_cb->map[seg], maplen,
  2155. frag->size);
  2156. }
  2157. /* Terminate the last segment. */
  2158. oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY);
  2159. }
  2160. return NETDEV_TX_OK;
  2161. map_error:
  2162. /* A PCI mapping failed and now we will need to back out
  2163. * We need to traverse through the oal's and associated pages which
  2164. * have been mapped and now we must unmap them to clean up properly
  2165. */
  2166. seg = 1;
  2167. oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
  2168. oal = tx_cb->oal;
  2169. for (i=0; i<completed_segs; i++,seg++) {
  2170. oal_entry++;
  2171. if((seg == 2 && seg_cnt > 3) || /* Check for continuation */
  2172. (seg == 7 && seg_cnt > 8) || /* requirements. It's strange */
  2173. (seg == 12 && seg_cnt > 13) || /* but necessary. */
  2174. (seg == 17 && seg_cnt > 18)) {
  2175. pci_unmap_single(qdev->pdev,
  2176. pci_unmap_addr(&tx_cb->map[seg], mapaddr),
  2177. pci_unmap_len(&tx_cb->map[seg], maplen),
  2178. PCI_DMA_TODEVICE);
  2179. oal++;
  2180. seg++;
  2181. }
  2182. pci_unmap_page(qdev->pdev,
  2183. pci_unmap_addr(&tx_cb->map[seg], mapaddr),
  2184. pci_unmap_len(&tx_cb->map[seg], maplen),
  2185. PCI_DMA_TODEVICE);
  2186. }
  2187. pci_unmap_single(qdev->pdev,
  2188. pci_unmap_addr(&tx_cb->map[0], mapaddr),
  2189. pci_unmap_addr(&tx_cb->map[0], maplen),
  2190. PCI_DMA_TODEVICE);
  2191. return NETDEV_TX_BUSY;
  2192. }
  2193. /*
  2194. * The difference between 3022 and 3032 sends:
  2195. * 3022 only supports a simple single segment transmission.
  2196. * 3032 supports checksumming and scatter/gather lists (fragments).
  2197. * The 3032 supports sglists by using the 3 addr/len pairs (ALP)
  2198. * in the IOCB plus a chain of outbound address lists (OAL) that
  2199. * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th)
  2200. * will used to point to an OAL when more ALP entries are required.
  2201. * The IOCB is always the top of the chain followed by one or more
  2202. * OALs (when necessary).
  2203. */
  2204. static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
  2205. {
  2206. struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
  2207. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  2208. struct ql_tx_buf_cb *tx_cb;
  2209. u32 tot_len = skb->len;
  2210. struct ob_mac_iocb_req *mac_iocb_ptr;
  2211. if (unlikely(atomic_read(&qdev->tx_count) < 2)) {
  2212. return NETDEV_TX_BUSY;
  2213. }
  2214. tx_cb = &qdev->tx_buf[qdev->req_producer_index] ;
  2215. if((tx_cb->seg_count = ql_get_seg_count(qdev,
  2216. (skb_shinfo(skb)->nr_frags))) == -1) {
  2217. printk(KERN_ERR PFX"%s: invalid segment count!\n",__func__);
  2218. return NETDEV_TX_OK;
  2219. }
  2220. mac_iocb_ptr = tx_cb->queue_entry;
  2221. memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req));
  2222. mac_iocb_ptr->opcode = qdev->mac_ob_opcode;
  2223. mac_iocb_ptr->flags = OB_MAC_IOCB_REQ_X;
  2224. mac_iocb_ptr->flags |= qdev->mb_bit_mask;
  2225. mac_iocb_ptr->transaction_id = qdev->req_producer_index;
  2226. mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len);
  2227. tx_cb->skb = skb;
  2228. if (qdev->device_id == QL3032_DEVICE_ID &&
  2229. skb->ip_summed == CHECKSUM_PARTIAL)
  2230. ql_hw_csum_setup(skb, mac_iocb_ptr);
  2231. if(ql_send_map(qdev,mac_iocb_ptr,tx_cb,skb) != NETDEV_TX_OK) {
  2232. printk(KERN_ERR PFX"%s: Could not map the segments!\n",__func__);
  2233. return NETDEV_TX_BUSY;
  2234. }
  2235. wmb();
  2236. qdev->req_producer_index++;
  2237. if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES)
  2238. qdev->req_producer_index = 0;
  2239. wmb();
  2240. ql_write_common_reg_l(qdev,
  2241. &port_regs->CommonRegs.reqQProducerIndex,
  2242. qdev->req_producer_index);
  2243. ndev->trans_start = jiffies;
  2244. if (netif_msg_tx_queued(qdev))
  2245. printk(KERN_DEBUG PFX "%s: tx queued, slot %d, len %d\n",
  2246. ndev->name, qdev->req_producer_index, skb->len);
  2247. atomic_dec(&qdev->tx_count);
  2248. return NETDEV_TX_OK;
  2249. }
  2250. static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
  2251. {
  2252. qdev->req_q_size =
  2253. (u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req));
  2254. qdev->req_q_virt_addr =
  2255. pci_alloc_consistent(qdev->pdev,
  2256. (size_t) qdev->req_q_size,
  2257. &qdev->req_q_phy_addr);
  2258. if ((qdev->req_q_virt_addr == NULL) ||
  2259. LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) {
  2260. printk(KERN_ERR PFX "%s: reqQ failed.\n",
  2261. qdev->ndev->name);
  2262. return -ENOMEM;
  2263. }
  2264. qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb);
  2265. qdev->rsp_q_virt_addr =
  2266. pci_alloc_consistent(qdev->pdev,
  2267. (size_t) qdev->rsp_q_size,
  2268. &qdev->rsp_q_phy_addr);
  2269. if ((qdev->rsp_q_virt_addr == NULL) ||
  2270. LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) {
  2271. printk(KERN_ERR PFX
  2272. "%s: rspQ allocation failed\n",
  2273. qdev->ndev->name);
  2274. pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size,
  2275. qdev->req_q_virt_addr,
  2276. qdev->req_q_phy_addr);
  2277. return -ENOMEM;
  2278. }
  2279. set_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags);
  2280. return 0;
  2281. }
  2282. static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev)
  2283. {
  2284. if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags)) {
  2285. printk(KERN_INFO PFX
  2286. "%s: Already done.\n", qdev->ndev->name);
  2287. return;
  2288. }
  2289. pci_free_consistent(qdev->pdev,
  2290. qdev->req_q_size,
  2291. qdev->req_q_virt_addr, qdev->req_q_phy_addr);
  2292. qdev->req_q_virt_addr = NULL;
  2293. pci_free_consistent(qdev->pdev,
  2294. qdev->rsp_q_size,
  2295. qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr);
  2296. qdev->rsp_q_virt_addr = NULL;
  2297. clear_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags);
  2298. }
  2299. static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
  2300. {
  2301. /* Create Large Buffer Queue */
  2302. qdev->lrg_buf_q_size =
  2303. qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry);
  2304. if (qdev->lrg_buf_q_size < PAGE_SIZE)
  2305. qdev->lrg_buf_q_alloc_size = PAGE_SIZE;
  2306. else
  2307. qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2;
  2308. qdev->lrg_buf = kmalloc(qdev->num_large_buffers * sizeof(struct ql_rcv_buf_cb),GFP_KERNEL);
  2309. if (qdev->lrg_buf == NULL) {
  2310. printk(KERN_ERR PFX
  2311. "%s: qdev->lrg_buf alloc failed.\n", qdev->ndev->name);
  2312. return -ENOMEM;
  2313. }
  2314. qdev->lrg_buf_q_alloc_virt_addr =
  2315. pci_alloc_consistent(qdev->pdev,
  2316. qdev->lrg_buf_q_alloc_size,
  2317. &qdev->lrg_buf_q_alloc_phy_addr);
  2318. if (qdev->lrg_buf_q_alloc_virt_addr == NULL) {
  2319. printk(KERN_ERR PFX
  2320. "%s: lBufQ failed\n", qdev->ndev->name);
  2321. return -ENOMEM;
  2322. }
  2323. qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr;
  2324. qdev->lrg_buf_q_phy_addr = qdev->lrg_buf_q_alloc_phy_addr;
  2325. /* Create Small Buffer Queue */
  2326. qdev->small_buf_q_size =
  2327. NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry);
  2328. if (qdev->small_buf_q_size < PAGE_SIZE)
  2329. qdev->small_buf_q_alloc_size = PAGE_SIZE;
  2330. else
  2331. qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2;
  2332. qdev->small_buf_q_alloc_virt_addr =
  2333. pci_alloc_consistent(qdev->pdev,
  2334. qdev->small_buf_q_alloc_size,
  2335. &qdev->small_buf_q_alloc_phy_addr);
  2336. if (qdev->small_buf_q_alloc_virt_addr == NULL) {
  2337. printk(KERN_ERR PFX
  2338. "%s: Small Buffer Queue allocation failed.\n",
  2339. qdev->ndev->name);
  2340. pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size,
  2341. qdev->lrg_buf_q_alloc_virt_addr,
  2342. qdev->lrg_buf_q_alloc_phy_addr);
  2343. return -ENOMEM;
  2344. }
  2345. qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr;
  2346. qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr;
  2347. set_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags);
  2348. return 0;
  2349. }
  2350. static void ql_free_buffer_queues(struct ql3_adapter *qdev)
  2351. {
  2352. if (!test_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags)) {
  2353. printk(KERN_INFO PFX
  2354. "%s: Already done.\n", qdev->ndev->name);
  2355. return;
  2356. }
  2357. if(qdev->lrg_buf) kfree(qdev->lrg_buf);
  2358. pci_free_consistent(qdev->pdev,
  2359. qdev->lrg_buf_q_alloc_size,
  2360. qdev->lrg_buf_q_alloc_virt_addr,
  2361. qdev->lrg_buf_q_alloc_phy_addr);
  2362. qdev->lrg_buf_q_virt_addr = NULL;
  2363. pci_free_consistent(qdev->pdev,
  2364. qdev->small_buf_q_alloc_size,
  2365. qdev->small_buf_q_alloc_virt_addr,
  2366. qdev->small_buf_q_alloc_phy_addr);
  2367. qdev->small_buf_q_virt_addr = NULL;
  2368. clear_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags);
  2369. }
  2370. static int ql_alloc_small_buffers(struct ql3_adapter *qdev)
  2371. {
  2372. int i;
  2373. struct bufq_addr_element *small_buf_q_entry;
  2374. /* Currently we allocate on one of memory and use it for smallbuffers */
  2375. qdev->small_buf_total_size =
  2376. (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES *
  2377. QL_SMALL_BUFFER_SIZE);
  2378. qdev->small_buf_virt_addr =
  2379. pci_alloc_consistent(qdev->pdev,
  2380. qdev->small_buf_total_size,
  2381. &qdev->small_buf_phy_addr);
  2382. if (qdev->small_buf_virt_addr == NULL) {
  2383. printk(KERN_ERR PFX
  2384. "%s: Failed to get small buffer memory.\n",
  2385. qdev->ndev->name);
  2386. return -ENOMEM;
  2387. }
  2388. qdev->small_buf_phy_addr_low = LS_64BITS(qdev->small_buf_phy_addr);
  2389. qdev->small_buf_phy_addr_high = MS_64BITS(qdev->small_buf_phy_addr);
  2390. small_buf_q_entry = qdev->small_buf_q_virt_addr;
  2391. /* Initialize the small buffer queue. */
  2392. for (i = 0; i < (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES); i++) {
  2393. small_buf_q_entry->addr_high =
  2394. cpu_to_le32(qdev->small_buf_phy_addr_high);
  2395. small_buf_q_entry->addr_low =
  2396. cpu_to_le32(qdev->small_buf_phy_addr_low +
  2397. (i * QL_SMALL_BUFFER_SIZE));
  2398. small_buf_q_entry++;
  2399. }
  2400. qdev->small_buf_index = 0;
  2401. set_bit(QL_ALLOC_SMALL_BUF_DONE,&qdev->flags);
  2402. return 0;
  2403. }
  2404. static void ql_free_small_buffers(struct ql3_adapter *qdev)
  2405. {
  2406. if (!test_bit(QL_ALLOC_SMALL_BUF_DONE,&qdev->flags)) {
  2407. printk(KERN_INFO PFX
  2408. "%s: Already done.\n", qdev->ndev->name);
  2409. return;
  2410. }
  2411. if (qdev->small_buf_virt_addr != NULL) {
  2412. pci_free_consistent(qdev->pdev,
  2413. qdev->small_buf_total_size,
  2414. qdev->small_buf_virt_addr,
  2415. qdev->small_buf_phy_addr);
  2416. qdev->small_buf_virt_addr = NULL;
  2417. }
  2418. }
  2419. static void ql_free_large_buffers(struct ql3_adapter *qdev)
  2420. {
  2421. int i = 0;
  2422. struct ql_rcv_buf_cb *lrg_buf_cb;
  2423. for (i = 0; i < qdev->num_large_buffers; i++) {
  2424. lrg_buf_cb = &qdev->lrg_buf[i];
  2425. if (lrg_buf_cb->skb) {
  2426. dev_kfree_skb(lrg_buf_cb->skb);
  2427. pci_unmap_single(qdev->pdev,
  2428. pci_unmap_addr(lrg_buf_cb, mapaddr),
  2429. pci_unmap_len(lrg_buf_cb, maplen),
  2430. PCI_DMA_FROMDEVICE);
  2431. memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
  2432. } else {
  2433. break;
  2434. }
  2435. }
  2436. }
  2437. static void ql_init_large_buffers(struct ql3_adapter *qdev)
  2438. {
  2439. int i;
  2440. struct ql_rcv_buf_cb *lrg_buf_cb;
  2441. struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr;
  2442. for (i = 0; i < qdev->num_large_buffers; i++) {
  2443. lrg_buf_cb = &qdev->lrg_buf[i];
  2444. buf_addr_ele->addr_high = lrg_buf_cb->buf_phy_addr_high;
  2445. buf_addr_ele->addr_low = lrg_buf_cb->buf_phy_addr_low;
  2446. buf_addr_ele++;
  2447. }
  2448. qdev->lrg_buf_index = 0;
  2449. qdev->lrg_buf_skb_check = 0;
  2450. }
  2451. static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
  2452. {
  2453. int i;
  2454. struct ql_rcv_buf_cb *lrg_buf_cb;
  2455. struct sk_buff *skb;
  2456. dma_addr_t map;
  2457. int err;
  2458. for (i = 0; i < qdev->num_large_buffers; i++) {
  2459. skb = netdev_alloc_skb(qdev->ndev,
  2460. qdev->lrg_buffer_len);
  2461. if (unlikely(!skb)) {
  2462. /* Better luck next round */
  2463. printk(KERN_ERR PFX
  2464. "%s: large buff alloc failed, "
  2465. "for %d bytes at index %d.\n",
  2466. qdev->ndev->name,
  2467. qdev->lrg_buffer_len * 2, i);
  2468. ql_free_large_buffers(qdev);
  2469. return -ENOMEM;
  2470. } else {
  2471. lrg_buf_cb = &qdev->lrg_buf[i];
  2472. memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
  2473. lrg_buf_cb->index = i;
  2474. lrg_buf_cb->skb = skb;
  2475. /*
  2476. * We save some space to copy the ethhdr from first
  2477. * buffer
  2478. */
  2479. skb_reserve(skb, QL_HEADER_SPACE);
  2480. map = pci_map_single(qdev->pdev,
  2481. skb->data,
  2482. qdev->lrg_buffer_len -
  2483. QL_HEADER_SPACE,
  2484. PCI_DMA_FROMDEVICE);
  2485. err = pci_dma_mapping_error(qdev->pdev, map);
  2486. if(err) {
  2487. printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
  2488. qdev->ndev->name, err);
  2489. ql_free_large_buffers(qdev);
  2490. return -ENOMEM;
  2491. }
  2492. pci_unmap_addr_set(lrg_buf_cb, mapaddr, map);
  2493. pci_unmap_len_set(lrg_buf_cb, maplen,
  2494. qdev->lrg_buffer_len -
  2495. QL_HEADER_SPACE);
  2496. lrg_buf_cb->buf_phy_addr_low =
  2497. cpu_to_le32(LS_64BITS(map));
  2498. lrg_buf_cb->buf_phy_addr_high =
  2499. cpu_to_le32(MS_64BITS(map));
  2500. }
  2501. }
  2502. return 0;
  2503. }
  2504. static void ql_free_send_free_list(struct ql3_adapter *qdev)
  2505. {
  2506. struct ql_tx_buf_cb *tx_cb;
  2507. int i;
  2508. tx_cb = &qdev->tx_buf[0];
  2509. for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
  2510. if (tx_cb->oal) {
  2511. kfree(tx_cb->oal);
  2512. tx_cb->oal = NULL;
  2513. }
  2514. tx_cb++;
  2515. }
  2516. }
  2517. static int ql_create_send_free_list(struct ql3_adapter *qdev)
  2518. {
  2519. struct ql_tx_buf_cb *tx_cb;
  2520. int i;
  2521. struct ob_mac_iocb_req *req_q_curr =
  2522. qdev->req_q_virt_addr;
  2523. /* Create free list of transmit buffers */
  2524. for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
  2525. tx_cb = &qdev->tx_buf[i];
  2526. tx_cb->skb = NULL;
  2527. tx_cb->queue_entry = req_q_curr;
  2528. req_q_curr++;
  2529. tx_cb->oal = kmalloc(512, GFP_KERNEL);
  2530. if (tx_cb->oal == NULL)
  2531. return -1;
  2532. }
  2533. return 0;
  2534. }
  2535. static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
  2536. {
  2537. if (qdev->ndev->mtu == NORMAL_MTU_SIZE) {
  2538. qdev->num_lbufq_entries = NUM_LBUFQ_ENTRIES;
  2539. qdev->lrg_buffer_len = NORMAL_MTU_SIZE;
  2540. }
  2541. else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) {
  2542. /*
  2543. * Bigger buffers, so less of them.
  2544. */
  2545. qdev->num_lbufq_entries = JUMBO_NUM_LBUFQ_ENTRIES;
  2546. qdev->lrg_buffer_len = JUMBO_MTU_SIZE;
  2547. } else {
  2548. printk(KERN_ERR PFX
  2549. "%s: Invalid mtu size. Only 1500 and 9000 are accepted.\n",
  2550. qdev->ndev->name);
  2551. return -ENOMEM;
  2552. }
  2553. qdev->num_large_buffers = qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY;
  2554. qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE;
  2555. qdev->max_frame_size =
  2556. (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE;
  2557. /*
  2558. * First allocate a page of shared memory and use it for shadow
  2559. * locations of Network Request Queue Consumer Address Register and
  2560. * Network Completion Queue Producer Index Register
  2561. */
  2562. qdev->shadow_reg_virt_addr =
  2563. pci_alloc_consistent(qdev->pdev,
  2564. PAGE_SIZE, &qdev->shadow_reg_phy_addr);
  2565. if (qdev->shadow_reg_virt_addr != NULL) {
  2566. qdev->preq_consumer_index = (u16 *) qdev->shadow_reg_virt_addr;
  2567. qdev->req_consumer_index_phy_addr_high =
  2568. MS_64BITS(qdev->shadow_reg_phy_addr);
  2569. qdev->req_consumer_index_phy_addr_low =
  2570. LS_64BITS(qdev->shadow_reg_phy_addr);
  2571. qdev->prsp_producer_index =
  2572. (__le32 *) (((u8 *) qdev->preq_consumer_index) + 8);
  2573. qdev->rsp_producer_index_phy_addr_high =
  2574. qdev->req_consumer_index_phy_addr_high;
  2575. qdev->rsp_producer_index_phy_addr_low =
  2576. qdev->req_consumer_index_phy_addr_low + 8;
  2577. } else {
  2578. printk(KERN_ERR PFX
  2579. "%s: shadowReg Alloc failed.\n", qdev->ndev->name);
  2580. return -ENOMEM;
  2581. }
  2582. if (ql_alloc_net_req_rsp_queues(qdev) != 0) {
  2583. printk(KERN_ERR PFX
  2584. "%s: ql_alloc_net_req_rsp_queues failed.\n",
  2585. qdev->ndev->name);
  2586. goto err_req_rsp;
  2587. }
  2588. if (ql_alloc_buffer_queues(qdev) != 0) {
  2589. printk(KERN_ERR PFX
  2590. "%s: ql_alloc_buffer_queues failed.\n",
  2591. qdev->ndev->name);
  2592. goto err_buffer_queues;
  2593. }
  2594. if (ql_alloc_small_buffers(qdev) != 0) {
  2595. printk(KERN_ERR PFX
  2596. "%s: ql_alloc_small_buffers failed\n", qdev->ndev->name);
  2597. goto err_small_buffers;
  2598. }
  2599. if (ql_alloc_large_buffers(qdev) != 0) {
  2600. printk(KERN_ERR PFX
  2601. "%s: ql_alloc_large_buffers failed\n", qdev->ndev->name);
  2602. goto err_small_buffers;
  2603. }
  2604. /* Initialize the large buffer queue. */
  2605. ql_init_large_buffers(qdev);
  2606. if (ql_create_send_free_list(qdev))
  2607. goto err_free_list;
  2608. qdev->rsp_current = qdev->rsp_q_virt_addr;
  2609. return 0;
  2610. err_free_list:
  2611. ql_free_send_free_list(qdev);
  2612. err_small_buffers:
  2613. ql_free_buffer_queues(qdev);
  2614. err_buffer_queues:
  2615. ql_free_net_req_rsp_queues(qdev);
  2616. err_req_rsp:
  2617. pci_free_consistent(qdev->pdev,
  2618. PAGE_SIZE,
  2619. qdev->shadow_reg_virt_addr,
  2620. qdev->shadow_reg_phy_addr);
  2621. return -ENOMEM;
  2622. }
  2623. static void ql_free_mem_resources(struct ql3_adapter *qdev)
  2624. {
  2625. ql_free_send_free_list(qdev);
  2626. ql_free_large_buffers(qdev);
  2627. ql_free_small_buffers(qdev);
  2628. ql_free_buffer_queues(qdev);
  2629. ql_free_net_req_rsp_queues(qdev);
  2630. if (qdev->shadow_reg_virt_addr != NULL) {
  2631. pci_free_consistent(qdev->pdev,
  2632. PAGE_SIZE,
  2633. qdev->shadow_reg_virt_addr,
  2634. qdev->shadow_reg_phy_addr);
  2635. qdev->shadow_reg_virt_addr = NULL;
  2636. }
  2637. }
  2638. static int ql_init_misc_registers(struct ql3_adapter *qdev)
  2639. {
  2640. struct ql3xxx_local_ram_registers __iomem *local_ram =
  2641. (void __iomem *)qdev->mem_map_registers;
  2642. if(ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK,
  2643. (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
  2644. 2) << 4))
  2645. return -1;
  2646. ql_write_page2_reg(qdev,
  2647. &local_ram->bufletSize, qdev->nvram_data.bufletSize);
  2648. ql_write_page2_reg(qdev,
  2649. &local_ram->maxBufletCount,
  2650. qdev->nvram_data.bufletCount);
  2651. ql_write_page2_reg(qdev,
  2652. &local_ram->freeBufletThresholdLow,
  2653. (qdev->nvram_data.tcpWindowThreshold25 << 16) |
  2654. (qdev->nvram_data.tcpWindowThreshold0));
  2655. ql_write_page2_reg(qdev,
  2656. &local_ram->freeBufletThresholdHigh,
  2657. qdev->nvram_data.tcpWindowThreshold50);
  2658. ql_write_page2_reg(qdev,
  2659. &local_ram->ipHashTableBase,
  2660. (qdev->nvram_data.ipHashTableBaseHi << 16) |
  2661. qdev->nvram_data.ipHashTableBaseLo);
  2662. ql_write_page2_reg(qdev,
  2663. &local_ram->ipHashTableCount,
  2664. qdev->nvram_data.ipHashTableSize);
  2665. ql_write_page2_reg(qdev,
  2666. &local_ram->tcpHashTableBase,
  2667. (qdev->nvram_data.tcpHashTableBaseHi << 16) |
  2668. qdev->nvram_data.tcpHashTableBaseLo);
  2669. ql_write_page2_reg(qdev,
  2670. &local_ram->tcpHashTableCount,
  2671. qdev->nvram_data.tcpHashTableSize);
  2672. ql_write_page2_reg(qdev,
  2673. &local_ram->ncbBase,
  2674. (qdev->nvram_data.ncbTableBaseHi << 16) |
  2675. qdev->nvram_data.ncbTableBaseLo);
  2676. ql_write_page2_reg(qdev,
  2677. &local_ram->maxNcbCount,
  2678. qdev->nvram_data.ncbTableSize);
  2679. ql_write_page2_reg(qdev,
  2680. &local_ram->drbBase,
  2681. (qdev->nvram_data.drbTableBaseHi << 16) |
  2682. qdev->nvram_data.drbTableBaseLo);
  2683. ql_write_page2_reg(qdev,
  2684. &local_ram->maxDrbCount,
  2685. qdev->nvram_data.drbTableSize);
  2686. ql_sem_unlock(qdev, QL_DDR_RAM_SEM_MASK);
  2687. return 0;
  2688. }
  2689. static int ql_adapter_initialize(struct ql3_adapter *qdev)
  2690. {
  2691. u32 value;
  2692. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  2693. struct ql3xxx_host_memory_registers __iomem *hmem_regs =
  2694. (void __iomem *)port_regs;
  2695. u32 delay = 10;
  2696. int status = 0;
  2697. if(ql_mii_setup(qdev))
  2698. return -1;
  2699. /* Bring out PHY out of reset */
  2700. ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
  2701. (ISP_SERIAL_PORT_IF_WE |
  2702. (ISP_SERIAL_PORT_IF_WE << 16)));
  2703. qdev->port_link_state = LS_DOWN;
  2704. netif_carrier_off(qdev->ndev);
  2705. /* V2 chip fix for ARS-39168. */
  2706. ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
  2707. (ISP_SERIAL_PORT_IF_SDE |
  2708. (ISP_SERIAL_PORT_IF_SDE << 16)));
  2709. /* Request Queue Registers */
  2710. *((u32 *) (qdev->preq_consumer_index)) = 0;
  2711. atomic_set(&qdev->tx_count,NUM_REQ_Q_ENTRIES);
  2712. qdev->req_producer_index = 0;
  2713. ql_write_page1_reg(qdev,
  2714. &hmem_regs->reqConsumerIndexAddrHigh,
  2715. qdev->req_consumer_index_phy_addr_high);
  2716. ql_write_page1_reg(qdev,
  2717. &hmem_regs->reqConsumerIndexAddrLow,
  2718. qdev->req_consumer_index_phy_addr_low);
  2719. ql_write_page1_reg(qdev,
  2720. &hmem_regs->reqBaseAddrHigh,
  2721. MS_64BITS(qdev->req_q_phy_addr));
  2722. ql_write_page1_reg(qdev,
  2723. &hmem_regs->reqBaseAddrLow,
  2724. LS_64BITS(qdev->req_q_phy_addr));
  2725. ql_write_page1_reg(qdev, &hmem_regs->reqLength, NUM_REQ_Q_ENTRIES);
  2726. /* Response Queue Registers */
  2727. *((__le16 *) (qdev->prsp_producer_index)) = 0;
  2728. qdev->rsp_consumer_index = 0;
  2729. qdev->rsp_current = qdev->rsp_q_virt_addr;
  2730. ql_write_page1_reg(qdev,
  2731. &hmem_regs->rspProducerIndexAddrHigh,
  2732. qdev->rsp_producer_index_phy_addr_high);
  2733. ql_write_page1_reg(qdev,
  2734. &hmem_regs->rspProducerIndexAddrLow,
  2735. qdev->rsp_producer_index_phy_addr_low);
  2736. ql_write_page1_reg(qdev,
  2737. &hmem_regs->rspBaseAddrHigh,
  2738. MS_64BITS(qdev->rsp_q_phy_addr));
  2739. ql_write_page1_reg(qdev,
  2740. &hmem_regs->rspBaseAddrLow,
  2741. LS_64BITS(qdev->rsp_q_phy_addr));
  2742. ql_write_page1_reg(qdev, &hmem_regs->rspLength, NUM_RSP_Q_ENTRIES);
  2743. /* Large Buffer Queue */
  2744. ql_write_page1_reg(qdev,
  2745. &hmem_regs->rxLargeQBaseAddrHigh,
  2746. MS_64BITS(qdev->lrg_buf_q_phy_addr));
  2747. ql_write_page1_reg(qdev,
  2748. &hmem_regs->rxLargeQBaseAddrLow,
  2749. LS_64BITS(qdev->lrg_buf_q_phy_addr));
  2750. ql_write_page1_reg(qdev, &hmem_regs->rxLargeQLength, qdev->num_lbufq_entries);
  2751. ql_write_page1_reg(qdev,
  2752. &hmem_regs->rxLargeBufferLength,
  2753. qdev->lrg_buffer_len);
  2754. /* Small Buffer Queue */
  2755. ql_write_page1_reg(qdev,
  2756. &hmem_regs->rxSmallQBaseAddrHigh,
  2757. MS_64BITS(qdev->small_buf_q_phy_addr));
  2758. ql_write_page1_reg(qdev,
  2759. &hmem_regs->rxSmallQBaseAddrLow,
  2760. LS_64BITS(qdev->small_buf_q_phy_addr));
  2761. ql_write_page1_reg(qdev, &hmem_regs->rxSmallQLength, NUM_SBUFQ_ENTRIES);
  2762. ql_write_page1_reg(qdev,
  2763. &hmem_regs->rxSmallBufferLength,
  2764. QL_SMALL_BUFFER_SIZE);
  2765. qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1;
  2766. qdev->small_buf_release_cnt = 8;
  2767. qdev->lrg_buf_q_producer_index = qdev->num_lbufq_entries - 1;
  2768. qdev->lrg_buf_release_cnt = 8;
  2769. qdev->lrg_buf_next_free =
  2770. (struct bufq_addr_element *)qdev->lrg_buf_q_virt_addr;
  2771. qdev->small_buf_index = 0;
  2772. qdev->lrg_buf_index = 0;
  2773. qdev->lrg_buf_free_count = 0;
  2774. qdev->lrg_buf_free_head = NULL;
  2775. qdev->lrg_buf_free_tail = NULL;
  2776. ql_write_common_reg(qdev,
  2777. &port_regs->CommonRegs.
  2778. rxSmallQProducerIndex,
  2779. qdev->small_buf_q_producer_index);
  2780. ql_write_common_reg(qdev,
  2781. &port_regs->CommonRegs.
  2782. rxLargeQProducerIndex,
  2783. qdev->lrg_buf_q_producer_index);
  2784. /*
  2785. * Find out if the chip has already been initialized. If it has, then
  2786. * we skip some of the initialization.
  2787. */
  2788. clear_bit(QL_LINK_MASTER, &qdev->flags);
  2789. value = ql_read_page0_reg(qdev, &port_regs->portStatus);
  2790. if ((value & PORT_STATUS_IC) == 0) {
  2791. /* Chip has not been configured yet, so let it rip. */
  2792. if(ql_init_misc_registers(qdev)) {
  2793. status = -1;
  2794. goto out;
  2795. }
  2796. value = qdev->nvram_data.tcpMaxWindowSize;
  2797. ql_write_page0_reg(qdev, &port_regs->tcpMaxWindow, value);
  2798. value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig;
  2799. if(ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK,
  2800. (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
  2801. * 2) << 13)) {
  2802. status = -1;
  2803. goto out;
  2804. }
  2805. ql_write_page0_reg(qdev, &port_regs->ExternalHWConfig, value);
  2806. ql_write_page0_reg(qdev, &port_regs->InternalChipConfig,
  2807. (((INTERNAL_CHIP_SD | INTERNAL_CHIP_WE) <<
  2808. 16) | (INTERNAL_CHIP_SD |
  2809. INTERNAL_CHIP_WE)));
  2810. ql_sem_unlock(qdev, QL_FLASH_SEM_MASK);
  2811. }
  2812. if (qdev->mac_index)
  2813. ql_write_page0_reg(qdev,
  2814. &port_regs->mac1MaxFrameLengthReg,
  2815. qdev->max_frame_size);
  2816. else
  2817. ql_write_page0_reg(qdev,
  2818. &port_regs->mac0MaxFrameLengthReg,
  2819. qdev->max_frame_size);
  2820. if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
  2821. (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
  2822. 2) << 7)) {
  2823. status = -1;
  2824. goto out;
  2825. }
  2826. PHY_Setup(qdev);
  2827. ql_init_scan_mode(qdev);
  2828. ql_get_phy_owner(qdev);
  2829. /* Load the MAC Configuration */
  2830. /* Program lower 32 bits of the MAC address */
  2831. ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
  2832. (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16));
  2833. ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
  2834. ((qdev->ndev->dev_addr[2] << 24)
  2835. | (qdev->ndev->dev_addr[3] << 16)
  2836. | (qdev->ndev->dev_addr[4] << 8)
  2837. | qdev->ndev->dev_addr[5]));
  2838. /* Program top 16 bits of the MAC address */
  2839. ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
  2840. ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1));
  2841. ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
  2842. ((qdev->ndev->dev_addr[0] << 8)
  2843. | qdev->ndev->dev_addr[1]));
  2844. /* Enable Primary MAC */
  2845. ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
  2846. ((MAC_ADDR_INDIRECT_PTR_REG_PE << 16) |
  2847. MAC_ADDR_INDIRECT_PTR_REG_PE));
  2848. /* Clear Primary and Secondary IP addresses */
  2849. ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
  2850. ((IP_ADDR_INDEX_REG_MASK << 16) |
  2851. (qdev->mac_index << 2)));
  2852. ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
  2853. ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
  2854. ((IP_ADDR_INDEX_REG_MASK << 16) |
  2855. ((qdev->mac_index << 2) + 1)));
  2856. ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
  2857. ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
  2858. /* Indicate Configuration Complete */
  2859. ql_write_page0_reg(qdev,
  2860. &port_regs->portControl,
  2861. ((PORT_CONTROL_CC << 16) | PORT_CONTROL_CC));
  2862. do {
  2863. value = ql_read_page0_reg(qdev, &port_regs->portStatus);
  2864. if (value & PORT_STATUS_IC)
  2865. break;
  2866. msleep(500);
  2867. } while (--delay);
  2868. if (delay == 0) {
  2869. printk(KERN_ERR PFX
  2870. "%s: Hw Initialization timeout.\n", qdev->ndev->name);
  2871. status = -1;
  2872. goto out;
  2873. }
  2874. /* Enable Ethernet Function */
  2875. if (qdev->device_id == QL3032_DEVICE_ID) {
  2876. value =
  2877. (QL3032_PORT_CONTROL_EF | QL3032_PORT_CONTROL_KIE |
  2878. QL3032_PORT_CONTROL_EIv6 | QL3032_PORT_CONTROL_EIv4 |
  2879. QL3032_PORT_CONTROL_ET);
  2880. ql_write_page0_reg(qdev, &port_regs->functionControl,
  2881. ((value << 16) | value));
  2882. } else {
  2883. value =
  2884. (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI |
  2885. PORT_CONTROL_HH);
  2886. ql_write_page0_reg(qdev, &port_regs->portControl,
  2887. ((value << 16) | value));
  2888. }
  2889. out:
  2890. return status;
  2891. }
  2892. /*
  2893. * Caller holds hw_lock.
  2894. */
  2895. static int ql_adapter_reset(struct ql3_adapter *qdev)
  2896. {
  2897. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  2898. int status = 0;
  2899. u16 value;
  2900. int max_wait_time;
  2901. set_bit(QL_RESET_ACTIVE, &qdev->flags);
  2902. clear_bit(QL_RESET_DONE, &qdev->flags);
  2903. /*
  2904. * Issue soft reset to chip.
  2905. */
  2906. printk(KERN_DEBUG PFX
  2907. "%s: Issue soft reset to chip.\n",
  2908. qdev->ndev->name);
  2909. ql_write_common_reg(qdev,
  2910. &port_regs->CommonRegs.ispControlStatus,
  2911. ((ISP_CONTROL_SR << 16) | ISP_CONTROL_SR));
  2912. /* Wait 3 seconds for reset to complete. */
  2913. printk(KERN_DEBUG PFX
  2914. "%s: Wait 10 milliseconds for reset to complete.\n",
  2915. qdev->ndev->name);
  2916. /* Wait until the firmware tells us the Soft Reset is done */
  2917. max_wait_time = 5;
  2918. do {
  2919. value =
  2920. ql_read_common_reg(qdev,
  2921. &port_regs->CommonRegs.ispControlStatus);
  2922. if ((value & ISP_CONTROL_SR) == 0)
  2923. break;
  2924. ssleep(1);
  2925. } while ((--max_wait_time));
  2926. /*
  2927. * Also, make sure that the Network Reset Interrupt bit has been
  2928. * cleared after the soft reset has taken place.
  2929. */
  2930. value =
  2931. ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
  2932. if (value & ISP_CONTROL_RI) {
  2933. printk(KERN_DEBUG PFX
  2934. "ql_adapter_reset: clearing RI after reset.\n");
  2935. ql_write_common_reg(qdev,
  2936. &port_regs->CommonRegs.
  2937. ispControlStatus,
  2938. ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI));
  2939. }
  2940. if (max_wait_time == 0) {
  2941. /* Issue Force Soft Reset */
  2942. ql_write_common_reg(qdev,
  2943. &port_regs->CommonRegs.
  2944. ispControlStatus,
  2945. ((ISP_CONTROL_FSR << 16) |
  2946. ISP_CONTROL_FSR));
  2947. /*
  2948. * Wait until the firmware tells us the Force Soft Reset is
  2949. * done
  2950. */
  2951. max_wait_time = 5;
  2952. do {
  2953. value =
  2954. ql_read_common_reg(qdev,
  2955. &port_regs->CommonRegs.
  2956. ispControlStatus);
  2957. if ((value & ISP_CONTROL_FSR) == 0) {
  2958. break;
  2959. }
  2960. ssleep(1);
  2961. } while ((--max_wait_time));
  2962. }
  2963. if (max_wait_time == 0)
  2964. status = 1;
  2965. clear_bit(QL_RESET_ACTIVE, &qdev->flags);
  2966. set_bit(QL_RESET_DONE, &qdev->flags);
  2967. return status;
  2968. }
  2969. static void ql_set_mac_info(struct ql3_adapter *qdev)
  2970. {
  2971. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  2972. u32 value, port_status;
  2973. u8 func_number;
  2974. /* Get the function number */
  2975. value =
  2976. ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus);
  2977. func_number = (u8) ((value >> 4) & OPCODE_FUNC_ID_MASK);
  2978. port_status = ql_read_page0_reg(qdev, &port_regs->portStatus);
  2979. switch (value & ISP_CONTROL_FN_MASK) {
  2980. case ISP_CONTROL_FN0_NET:
  2981. qdev->mac_index = 0;
  2982. qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
  2983. qdev->mb_bit_mask = FN0_MA_BITS_MASK;
  2984. qdev->PHYAddr = PORT0_PHY_ADDRESS;
  2985. if (port_status & PORT_STATUS_SM0)
  2986. set_bit(QL_LINK_OPTICAL,&qdev->flags);
  2987. else
  2988. clear_bit(QL_LINK_OPTICAL,&qdev->flags);
  2989. break;
  2990. case ISP_CONTROL_FN1_NET:
  2991. qdev->mac_index = 1;
  2992. qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
  2993. qdev->mb_bit_mask = FN1_MA_BITS_MASK;
  2994. qdev->PHYAddr = PORT1_PHY_ADDRESS;
  2995. if (port_status & PORT_STATUS_SM1)
  2996. set_bit(QL_LINK_OPTICAL,&qdev->flags);
  2997. else
  2998. clear_bit(QL_LINK_OPTICAL,&qdev->flags);
  2999. break;
  3000. case ISP_CONTROL_FN0_SCSI:
  3001. case ISP_CONTROL_FN1_SCSI:
  3002. default:
  3003. printk(KERN_DEBUG PFX
  3004. "%s: Invalid function number, ispControlStatus = 0x%x\n",
  3005. qdev->ndev->name,value);
  3006. break;
  3007. }
  3008. qdev->numPorts = qdev->nvram_data.version_and_numPorts >> 8;
  3009. }
  3010. static void ql_display_dev_info(struct net_device *ndev)
  3011. {
  3012. struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
  3013. struct pci_dev *pdev = qdev->pdev;
  3014. printk(KERN_INFO PFX
  3015. "\n%s Adapter %d RevisionID %d found %s on PCI slot %d.\n",
  3016. DRV_NAME, qdev->index, qdev->chip_rev_id,
  3017. (qdev->device_id == QL3032_DEVICE_ID) ? "QLA3032" : "QLA3022",
  3018. qdev->pci_slot);
  3019. printk(KERN_INFO PFX
  3020. "%s Interface.\n",
  3021. test_bit(QL_LINK_OPTICAL,&qdev->flags) ? "OPTICAL" : "COPPER");
  3022. /*
  3023. * Print PCI bus width/type.
  3024. */
  3025. printk(KERN_INFO PFX
  3026. "Bus interface is %s %s.\n",
  3027. ((qdev->pci_width == 64) ? "64-bit" : "32-bit"),
  3028. ((qdev->pci_x) ? "PCI-X" : "PCI"));
  3029. printk(KERN_INFO PFX
  3030. "mem IO base address adjusted = 0x%p\n",
  3031. qdev->mem_map_registers);
  3032. printk(KERN_INFO PFX "Interrupt number = %d\n", pdev->irq);
  3033. if (netif_msg_probe(qdev))
  3034. printk(KERN_INFO PFX
  3035. "%s: MAC address %pM\n",
  3036. ndev->name, ndev->dev_addr);
  3037. }
  3038. static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset)
  3039. {
  3040. struct net_device *ndev = qdev->ndev;
  3041. int retval = 0;
  3042. netif_stop_queue(ndev);
  3043. netif_carrier_off(ndev);
  3044. clear_bit(QL_ADAPTER_UP,&qdev->flags);
  3045. clear_bit(QL_LINK_MASTER,&qdev->flags);
  3046. ql_disable_interrupts(qdev);
  3047. free_irq(qdev->pdev->irq, ndev);
  3048. if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) {
  3049. printk(KERN_INFO PFX
  3050. "%s: calling pci_disable_msi().\n", qdev->ndev->name);
  3051. clear_bit(QL_MSI_ENABLED,&qdev->flags);
  3052. pci_disable_msi(qdev->pdev);
  3053. }
  3054. del_timer_sync(&qdev->adapter_timer);
  3055. napi_disable(&qdev->napi);
  3056. if (do_reset) {
  3057. int soft_reset;
  3058. unsigned long hw_flags;
  3059. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  3060. if (ql_wait_for_drvr_lock(qdev)) {
  3061. if ((soft_reset = ql_adapter_reset(qdev))) {
  3062. printk(KERN_ERR PFX
  3063. "%s: ql_adapter_reset(%d) FAILED!\n",
  3064. ndev->name, qdev->index);
  3065. }
  3066. printk(KERN_ERR PFX
  3067. "%s: Releaseing driver lock via chip reset.\n",ndev->name);
  3068. } else {
  3069. printk(KERN_ERR PFX
  3070. "%s: Could not acquire driver lock to do "
  3071. "reset!\n", ndev->name);
  3072. retval = -1;
  3073. }
  3074. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  3075. }
  3076. ql_free_mem_resources(qdev);
  3077. return retval;
  3078. }
  3079. static int ql_adapter_up(struct ql3_adapter *qdev)
  3080. {
  3081. struct net_device *ndev = qdev->ndev;
  3082. int err;
  3083. unsigned long irq_flags = IRQF_SAMPLE_RANDOM | IRQF_SHARED;
  3084. unsigned long hw_flags;
  3085. if (ql_alloc_mem_resources(qdev)) {
  3086. printk(KERN_ERR PFX
  3087. "%s Unable to allocate buffers.\n", ndev->name);
  3088. return -ENOMEM;
  3089. }
  3090. if (qdev->msi) {
  3091. if (pci_enable_msi(qdev->pdev)) {
  3092. printk(KERN_ERR PFX
  3093. "%s: User requested MSI, but MSI failed to "
  3094. "initialize. Continuing without MSI.\n",
  3095. qdev->ndev->name);
  3096. qdev->msi = 0;
  3097. } else {
  3098. printk(KERN_INFO PFX "%s: MSI Enabled...\n", qdev->ndev->name);
  3099. set_bit(QL_MSI_ENABLED,&qdev->flags);
  3100. irq_flags &= ~IRQF_SHARED;
  3101. }
  3102. }
  3103. if ((err = request_irq(qdev->pdev->irq,
  3104. ql3xxx_isr,
  3105. irq_flags, ndev->name, ndev))) {
  3106. printk(KERN_ERR PFX
  3107. "%s: Failed to reserve interrupt %d already in use.\n",
  3108. ndev->name, qdev->pdev->irq);
  3109. goto err_irq;
  3110. }
  3111. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  3112. if ((err = ql_wait_for_drvr_lock(qdev))) {
  3113. if ((err = ql_adapter_initialize(qdev))) {
  3114. printk(KERN_ERR PFX
  3115. "%s: Unable to initialize adapter.\n",
  3116. ndev->name);
  3117. goto err_init;
  3118. }
  3119. printk(KERN_ERR PFX
  3120. "%s: Releaseing driver lock.\n",ndev->name);
  3121. ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
  3122. } else {
  3123. printk(KERN_ERR PFX
  3124. "%s: Could not aquire driver lock.\n",
  3125. ndev->name);
  3126. goto err_lock;
  3127. }
  3128. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  3129. set_bit(QL_ADAPTER_UP,&qdev->flags);
  3130. mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
  3131. napi_enable(&qdev->napi);
  3132. ql_enable_interrupts(qdev);
  3133. return 0;
  3134. err_init:
  3135. ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
  3136. err_lock:
  3137. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  3138. free_irq(qdev->pdev->irq, ndev);
  3139. err_irq:
  3140. if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) {
  3141. printk(KERN_INFO PFX
  3142. "%s: calling pci_disable_msi().\n",
  3143. qdev->ndev->name);
  3144. clear_bit(QL_MSI_ENABLED,&qdev->flags);
  3145. pci_disable_msi(qdev->pdev);
  3146. }
  3147. return err;
  3148. }
  3149. static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset)
  3150. {
  3151. if( ql_adapter_down(qdev,reset) || ql_adapter_up(qdev)) {
  3152. printk(KERN_ERR PFX
  3153. "%s: Driver up/down cycle failed, "
  3154. "closing device\n",qdev->ndev->name);
  3155. rtnl_lock();
  3156. dev_close(qdev->ndev);
  3157. rtnl_unlock();
  3158. return -1;
  3159. }
  3160. return 0;
  3161. }
  3162. static int ql3xxx_close(struct net_device *ndev)
  3163. {
  3164. struct ql3_adapter *qdev = netdev_priv(ndev);
  3165. /*
  3166. * Wait for device to recover from a reset.
  3167. * (Rarely happens, but possible.)
  3168. */
  3169. while (!test_bit(QL_ADAPTER_UP,&qdev->flags))
  3170. msleep(50);
  3171. ql_adapter_down(qdev,QL_DO_RESET);
  3172. return 0;
  3173. }
  3174. static int ql3xxx_open(struct net_device *ndev)
  3175. {
  3176. struct ql3_adapter *qdev = netdev_priv(ndev);
  3177. return (ql_adapter_up(qdev));
  3178. }
  3179. static int ql3xxx_set_mac_address(struct net_device *ndev, void *p)
  3180. {
  3181. struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
  3182. struct ql3xxx_port_registers __iomem *port_regs =
  3183. qdev->mem_map_registers;
  3184. struct sockaddr *addr = p;
  3185. unsigned long hw_flags;
  3186. if (netif_running(ndev))
  3187. return -EBUSY;
  3188. if (!is_valid_ether_addr(addr->sa_data))
  3189. return -EADDRNOTAVAIL;
  3190. memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
  3191. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  3192. /* Program lower 32 bits of the MAC address */
  3193. ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
  3194. (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16));
  3195. ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
  3196. ((ndev->dev_addr[2] << 24) | (ndev->
  3197. dev_addr[3] << 16) |
  3198. (ndev->dev_addr[4] << 8) | ndev->dev_addr[5]));
  3199. /* Program top 16 bits of the MAC address */
  3200. ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
  3201. ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1));
  3202. ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
  3203. ((ndev->dev_addr[0] << 8) | ndev->dev_addr[1]));
  3204. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  3205. return 0;
  3206. }
  3207. static void ql3xxx_tx_timeout(struct net_device *ndev)
  3208. {
  3209. struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
  3210. printk(KERN_ERR PFX "%s: Resetting...\n", ndev->name);
  3211. /*
  3212. * Stop the queues, we've got a problem.
  3213. */
  3214. netif_stop_queue(ndev);
  3215. /*
  3216. * Wake up the worker to process this event.
  3217. */
  3218. queue_delayed_work(qdev->workqueue, &qdev->tx_timeout_work, 0);
  3219. }
  3220. static void ql_reset_work(struct work_struct *work)
  3221. {
  3222. struct ql3_adapter *qdev =
  3223. container_of(work, struct ql3_adapter, reset_work.work);
  3224. struct net_device *ndev = qdev->ndev;
  3225. u32 value;
  3226. struct ql_tx_buf_cb *tx_cb;
  3227. int max_wait_time, i;
  3228. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  3229. unsigned long hw_flags;
  3230. if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START),&qdev->flags)) {
  3231. clear_bit(QL_LINK_MASTER,&qdev->flags);
  3232. /*
  3233. * Loop through the active list and return the skb.
  3234. */
  3235. for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
  3236. int j;
  3237. tx_cb = &qdev->tx_buf[i];
  3238. if (tx_cb->skb) {
  3239. printk(KERN_DEBUG PFX
  3240. "%s: Freeing lost SKB.\n",
  3241. qdev->ndev->name);
  3242. pci_unmap_single(qdev->pdev,
  3243. pci_unmap_addr(&tx_cb->map[0], mapaddr),
  3244. pci_unmap_len(&tx_cb->map[0], maplen),
  3245. PCI_DMA_TODEVICE);
  3246. for(j=1;j<tx_cb->seg_count;j++) {
  3247. pci_unmap_page(qdev->pdev,
  3248. pci_unmap_addr(&tx_cb->map[j],mapaddr),
  3249. pci_unmap_len(&tx_cb->map[j],maplen),
  3250. PCI_DMA_TODEVICE);
  3251. }
  3252. dev_kfree_skb(tx_cb->skb);
  3253. tx_cb->skb = NULL;
  3254. }
  3255. }
  3256. printk(KERN_ERR PFX
  3257. "%s: Clearing NRI after reset.\n", qdev->ndev->name);
  3258. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  3259. ql_write_common_reg(qdev,
  3260. &port_regs->CommonRegs.
  3261. ispControlStatus,
  3262. ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI));
  3263. /*
  3264. * Wait the for Soft Reset to Complete.
  3265. */
  3266. max_wait_time = 10;
  3267. do {
  3268. value = ql_read_common_reg(qdev,
  3269. &port_regs->CommonRegs.
  3270. ispControlStatus);
  3271. if ((value & ISP_CONTROL_SR) == 0) {
  3272. printk(KERN_DEBUG PFX
  3273. "%s: reset completed.\n",
  3274. qdev->ndev->name);
  3275. break;
  3276. }
  3277. if (value & ISP_CONTROL_RI) {
  3278. printk(KERN_DEBUG PFX
  3279. "%s: clearing NRI after reset.\n",
  3280. qdev->ndev->name);
  3281. ql_write_common_reg(qdev,
  3282. &port_regs->
  3283. CommonRegs.
  3284. ispControlStatus,
  3285. ((ISP_CONTROL_RI <<
  3286. 16) | ISP_CONTROL_RI));
  3287. }
  3288. ssleep(1);
  3289. } while (--max_wait_time);
  3290. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  3291. if (value & ISP_CONTROL_SR) {
  3292. /*
  3293. * Set the reset flags and clear the board again.
  3294. * Nothing else to do...
  3295. */
  3296. printk(KERN_ERR PFX
  3297. "%s: Timed out waiting for reset to "
  3298. "complete.\n", ndev->name);
  3299. printk(KERN_ERR PFX
  3300. "%s: Do a reset.\n", ndev->name);
  3301. clear_bit(QL_RESET_PER_SCSI,&qdev->flags);
  3302. clear_bit(QL_RESET_START,&qdev->flags);
  3303. ql_cycle_adapter(qdev,QL_DO_RESET);
  3304. return;
  3305. }
  3306. clear_bit(QL_RESET_ACTIVE,&qdev->flags);
  3307. clear_bit(QL_RESET_PER_SCSI,&qdev->flags);
  3308. clear_bit(QL_RESET_START,&qdev->flags);
  3309. ql_cycle_adapter(qdev,QL_NO_RESET);
  3310. }
  3311. }
  3312. static void ql_tx_timeout_work(struct work_struct *work)
  3313. {
  3314. struct ql3_adapter *qdev =
  3315. container_of(work, struct ql3_adapter, tx_timeout_work.work);
  3316. ql_cycle_adapter(qdev, QL_DO_RESET);
  3317. }
  3318. static void ql_get_board_info(struct ql3_adapter *qdev)
  3319. {
  3320. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  3321. u32 value;
  3322. value = ql_read_page0_reg_l(qdev, &port_regs->portStatus);
  3323. qdev->chip_rev_id = ((value & PORT_STATUS_REV_ID_MASK) >> 12);
  3324. if (value & PORT_STATUS_64)
  3325. qdev->pci_width = 64;
  3326. else
  3327. qdev->pci_width = 32;
  3328. if (value & PORT_STATUS_X)
  3329. qdev->pci_x = 1;
  3330. else
  3331. qdev->pci_x = 0;
  3332. qdev->pci_slot = (u8) PCI_SLOT(qdev->pdev->devfn);
  3333. }
  3334. static void ql3xxx_timer(unsigned long ptr)
  3335. {
  3336. struct ql3_adapter *qdev = (struct ql3_adapter *)ptr;
  3337. queue_delayed_work(qdev->workqueue, &qdev->link_state_work, 0);
  3338. }
  3339. static const struct net_device_ops ql3xxx_netdev_ops = {
  3340. .ndo_open = ql3xxx_open,
  3341. .ndo_start_xmit = ql3xxx_send,
  3342. .ndo_stop = ql3xxx_close,
  3343. .ndo_set_multicast_list = NULL, /* not allowed on NIC side */
  3344. .ndo_change_mtu = eth_change_mtu,
  3345. .ndo_validate_addr = eth_validate_addr,
  3346. .ndo_set_mac_address = ql3xxx_set_mac_address,
  3347. .ndo_tx_timeout = ql3xxx_tx_timeout,
  3348. };
  3349. static int __devinit ql3xxx_probe(struct pci_dev *pdev,
  3350. const struct pci_device_id *pci_entry)
  3351. {
  3352. struct net_device *ndev = NULL;
  3353. struct ql3_adapter *qdev = NULL;
  3354. static int cards_found = 0;
  3355. int uninitialized_var(pci_using_dac), err;
  3356. err = pci_enable_device(pdev);
  3357. if (err) {
  3358. printk(KERN_ERR PFX "%s cannot enable PCI device\n",
  3359. pci_name(pdev));
  3360. goto err_out;
  3361. }
  3362. err = pci_request_regions(pdev, DRV_NAME);
  3363. if (err) {
  3364. printk(KERN_ERR PFX "%s cannot obtain PCI resources\n",
  3365. pci_name(pdev));
  3366. goto err_out_disable_pdev;
  3367. }
  3368. pci_set_master(pdev);
  3369. if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
  3370. pci_using_dac = 1;
  3371. err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
  3372. } else if (!(err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
  3373. pci_using_dac = 0;
  3374. err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
  3375. }
  3376. if (err) {
  3377. printk(KERN_ERR PFX "%s no usable DMA configuration\n",
  3378. pci_name(pdev));
  3379. goto err_out_free_regions;
  3380. }
  3381. ndev = alloc_etherdev(sizeof(struct ql3_adapter));
  3382. if (!ndev) {
  3383. printk(KERN_ERR PFX "%s could not alloc etherdev\n",
  3384. pci_name(pdev));
  3385. err = -ENOMEM;
  3386. goto err_out_free_regions;
  3387. }
  3388. SET_NETDEV_DEV(ndev, &pdev->dev);
  3389. pci_set_drvdata(pdev, ndev);
  3390. qdev = netdev_priv(ndev);
  3391. qdev->index = cards_found;
  3392. qdev->ndev = ndev;
  3393. qdev->pdev = pdev;
  3394. qdev->device_id = pci_entry->device;
  3395. qdev->port_link_state = LS_DOWN;
  3396. if (msi)
  3397. qdev->msi = 1;
  3398. qdev->msg_enable = netif_msg_init(debug, default_msg);
  3399. if (pci_using_dac)
  3400. ndev->features |= NETIF_F_HIGHDMA;
  3401. if (qdev->device_id == QL3032_DEVICE_ID)
  3402. ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
  3403. qdev->mem_map_registers = pci_ioremap_bar(pdev, 1);
  3404. if (!qdev->mem_map_registers) {
  3405. printk(KERN_ERR PFX "%s: cannot map device registers\n",
  3406. pci_name(pdev));
  3407. err = -EIO;
  3408. goto err_out_free_ndev;
  3409. }
  3410. spin_lock_init(&qdev->adapter_lock);
  3411. spin_lock_init(&qdev->hw_lock);
  3412. /* Set driver entry points */
  3413. ndev->netdev_ops = &ql3xxx_netdev_ops;
  3414. SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops);
  3415. ndev->watchdog_timeo = 5 * HZ;
  3416. netif_napi_add(ndev, &qdev->napi, ql_poll, 64);
  3417. ndev->irq = pdev->irq;
  3418. /* make sure the EEPROM is good */
  3419. if (ql_get_nvram_params(qdev)) {
  3420. printk(KERN_ALERT PFX
  3421. "ql3xxx_probe: Adapter #%d, Invalid NVRAM parameters.\n",
  3422. qdev->index);
  3423. err = -EIO;
  3424. goto err_out_iounmap;
  3425. }
  3426. ql_set_mac_info(qdev);
  3427. /* Validate and set parameters */
  3428. if (qdev->mac_index) {
  3429. ndev->mtu = qdev->nvram_data.macCfg_port1.etherMtu_mac ;
  3430. ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn2.macAddress);
  3431. } else {
  3432. ndev->mtu = qdev->nvram_data.macCfg_port0.etherMtu_mac ;
  3433. ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn0.macAddress);
  3434. }
  3435. memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
  3436. ndev->tx_queue_len = NUM_REQ_Q_ENTRIES;
  3437. /* Record PCI bus information. */
  3438. ql_get_board_info(qdev);
  3439. /*
  3440. * Set the Maximum Memory Read Byte Count value. We do this to handle
  3441. * jumbo frames.
  3442. */
  3443. if (qdev->pci_x) {
  3444. pci_write_config_word(pdev, (int)0x4e, (u16) 0x0036);
  3445. }
  3446. err = register_netdev(ndev);
  3447. if (err) {
  3448. printk(KERN_ERR PFX "%s: cannot register net device\n",
  3449. pci_name(pdev));
  3450. goto err_out_iounmap;
  3451. }
  3452. /* we're going to reset, so assume we have no link for now */
  3453. netif_carrier_off(ndev);
  3454. netif_stop_queue(ndev);
  3455. qdev->workqueue = create_singlethread_workqueue(ndev->name);
  3456. INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work);
  3457. INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work);
  3458. INIT_DELAYED_WORK(&qdev->link_state_work, ql_link_state_machine_work);
  3459. init_timer(&qdev->adapter_timer);
  3460. qdev->adapter_timer.function = ql3xxx_timer;
  3461. qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */
  3462. qdev->adapter_timer.data = (unsigned long)qdev;
  3463. if(!cards_found) {
  3464. printk(KERN_ALERT PFX "%s\n", DRV_STRING);
  3465. printk(KERN_ALERT PFX "Driver name: %s, Version: %s.\n",
  3466. DRV_NAME, DRV_VERSION);
  3467. }
  3468. ql_display_dev_info(ndev);
  3469. cards_found++;
  3470. return 0;
  3471. err_out_iounmap:
  3472. iounmap(qdev->mem_map_registers);
  3473. err_out_free_ndev:
  3474. free_netdev(ndev);
  3475. err_out_free_regions:
  3476. pci_release_regions(pdev);
  3477. err_out_disable_pdev:
  3478. pci_disable_device(pdev);
  3479. pci_set_drvdata(pdev, NULL);
  3480. err_out:
  3481. return err;
  3482. }
  3483. static void __devexit ql3xxx_remove(struct pci_dev *pdev)
  3484. {
  3485. struct net_device *ndev = pci_get_drvdata(pdev);
  3486. struct ql3_adapter *qdev = netdev_priv(ndev);
  3487. unregister_netdev(ndev);
  3488. qdev = netdev_priv(ndev);
  3489. ql_disable_interrupts(qdev);
  3490. if (qdev->workqueue) {
  3491. cancel_delayed_work(&qdev->reset_work);
  3492. cancel_delayed_work(&qdev->tx_timeout_work);
  3493. destroy_workqueue(qdev->workqueue);
  3494. qdev->workqueue = NULL;
  3495. }
  3496. iounmap(qdev->mem_map_registers);
  3497. pci_release_regions(pdev);
  3498. pci_set_drvdata(pdev, NULL);
  3499. free_netdev(ndev);
  3500. }
  3501. static struct pci_driver ql3xxx_driver = {
  3502. .name = DRV_NAME,
  3503. .id_table = ql3xxx_pci_tbl,
  3504. .probe = ql3xxx_probe,
  3505. .remove = __devexit_p(ql3xxx_remove),
  3506. };
  3507. static int __init ql3xxx_init_module(void)
  3508. {
  3509. return pci_register_driver(&ql3xxx_driver);
  3510. }
  3511. static void __exit ql3xxx_exit(void)
  3512. {
  3513. pci_unregister_driver(&ql3xxx_driver);
  3514. }
  3515. module_init(ql3xxx_init_module);
  3516. module_exit(ql3xxx_exit);