qla3xxx.c 98 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792
  1. /*
  2. * QLogic QLA3xxx NIC HBA Driver
  3. * Copyright (c) 2003-2006 QLogic Corporation
  4. *
  5. * See LICENSE.qla3xxx for copyright and licensing details.
  6. */
  7. #include <linux/kernel.h>
  8. #include <linux/init.h>
  9. #include <linux/types.h>
  10. #include <linux/module.h>
  11. #include <linux/list.h>
  12. #include <linux/pci.h>
  13. #include <linux/dma-mapping.h>
  14. #include <linux/sched.h>
  15. #include <linux/slab.h>
  16. #include <linux/dmapool.h>
  17. #include <linux/mempool.h>
  18. #include <linux/spinlock.h>
  19. #include <linux/kthread.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/errno.h>
  22. #include <linux/ioport.h>
  23. #include <linux/ip.h>
  24. #include <linux/in.h>
  25. #include <linux/if_arp.h>
  26. #include <linux/if_ether.h>
  27. #include <linux/netdevice.h>
  28. #include <linux/etherdevice.h>
  29. #include <linux/ethtool.h>
  30. #include <linux/skbuff.h>
  31. #include <linux/rtnetlink.h>
  32. #include <linux/if_vlan.h>
  33. #include <linux/init.h>
  34. #include <linux/delay.h>
  35. #include <linux/mm.h>
  36. #include "qla3xxx.h"
  37. #define DRV_NAME "qla3xxx"
  38. #define DRV_STRING "QLogic ISP3XXX Network Driver"
  39. #define DRV_VERSION "v2.02.00-k36"
  40. #define PFX DRV_NAME " "
  41. static const char ql3xxx_driver_name[] = DRV_NAME;
  42. static const char ql3xxx_driver_version[] = DRV_VERSION;
  43. MODULE_AUTHOR("QLogic Corporation");
  44. MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION " ");
  45. MODULE_LICENSE("GPL");
  46. MODULE_VERSION(DRV_VERSION);
  47. static const u32 default_msg
  48. = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
  49. | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
  50. static int debug = -1; /* defaults above */
  51. module_param(debug, int, 0);
  52. MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
  53. static int msi;
  54. module_param(msi, int, 0);
  55. MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts.");
  56. static struct pci_device_id ql3xxx_pci_tbl[] __devinitdata = {
  57. {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)},
  58. {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID)},
  59. /* required last entry */
  60. {0,}
  61. };
  62. MODULE_DEVICE_TABLE(pci, ql3xxx_pci_tbl);
  63. /*
  64. * Caller must take hw_lock.
  65. */
  66. static int ql_sem_spinlock(struct ql3_adapter *qdev,
  67. u32 sem_mask, u32 sem_bits)
  68. {
  69. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  70. u32 value;
  71. unsigned int seconds = 3;
  72. do {
  73. writel((sem_mask | sem_bits),
  74. &port_regs->CommonRegs.semaphoreReg);
  75. value = readl(&port_regs->CommonRegs.semaphoreReg);
  76. if ((value & (sem_mask >> 16)) == sem_bits)
  77. return 0;
  78. ssleep(1);
  79. } while(--seconds);
  80. return -1;
  81. }
  82. static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask)
  83. {
  84. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  85. writel(sem_mask, &port_regs->CommonRegs.semaphoreReg);
  86. readl(&port_regs->CommonRegs.semaphoreReg);
  87. }
  88. static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits)
  89. {
  90. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  91. u32 value;
  92. writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg);
  93. value = readl(&port_regs->CommonRegs.semaphoreReg);
  94. return ((value & (sem_mask >> 16)) == sem_bits);
  95. }
  96. /*
  97. * Caller holds hw_lock.
  98. */
  99. static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
  100. {
  101. int i = 0;
  102. while (1) {
  103. if (!ql_sem_lock(qdev,
  104. QL_DRVR_SEM_MASK,
  105. (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
  106. * 2) << 1)) {
  107. if (i < 10) {
  108. ssleep(1);
  109. i++;
  110. } else {
  111. printk(KERN_ERR PFX "%s: Timed out waiting for "
  112. "driver lock...\n",
  113. qdev->ndev->name);
  114. return 0;
  115. }
  116. } else {
  117. printk(KERN_DEBUG PFX
  118. "%s: driver lock acquired.\n",
  119. qdev->ndev->name);
  120. return 1;
  121. }
  122. }
  123. }
  124. static void ql_set_register_page(struct ql3_adapter *qdev, u32 page)
  125. {
  126. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  127. writel(((ISP_CONTROL_NP_MASK << 16) | page),
  128. &port_regs->CommonRegs.ispControlStatus);
  129. readl(&port_regs->CommonRegs.ispControlStatus);
  130. qdev->current_page = page;
  131. }
  132. static u32 ql_read_common_reg_l(struct ql3_adapter *qdev,
  133. u32 __iomem * reg)
  134. {
  135. u32 value;
  136. unsigned long hw_flags;
  137. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  138. value = readl(reg);
  139. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  140. return value;
  141. }
  142. static u32 ql_read_common_reg(struct ql3_adapter *qdev,
  143. u32 __iomem * reg)
  144. {
  145. return readl(reg);
  146. }
  147. static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg)
  148. {
  149. u32 value;
  150. unsigned long hw_flags;
  151. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  152. if (qdev->current_page != 0)
  153. ql_set_register_page(qdev,0);
  154. value = readl(reg);
  155. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  156. return value;
  157. }
  158. static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg)
  159. {
  160. if (qdev->current_page != 0)
  161. ql_set_register_page(qdev,0);
  162. return readl(reg);
  163. }
  164. static void ql_write_common_reg_l(struct ql3_adapter *qdev,
  165. u32 __iomem *reg, u32 value)
  166. {
  167. unsigned long hw_flags;
  168. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  169. writel(value, reg);
  170. readl(reg);
  171. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  172. return;
  173. }
  174. static void ql_write_common_reg(struct ql3_adapter *qdev,
  175. u32 __iomem *reg, u32 value)
  176. {
  177. writel(value, reg);
  178. readl(reg);
  179. return;
  180. }
  181. static void ql_write_nvram_reg(struct ql3_adapter *qdev,
  182. u32 __iomem *reg, u32 value)
  183. {
  184. writel(value, reg);
  185. readl(reg);
  186. udelay(1);
  187. return;
  188. }
  189. static void ql_write_page0_reg(struct ql3_adapter *qdev,
  190. u32 __iomem *reg, u32 value)
  191. {
  192. if (qdev->current_page != 0)
  193. ql_set_register_page(qdev,0);
  194. writel(value, reg);
  195. readl(reg);
  196. return;
  197. }
  198. /*
  199. * Caller holds hw_lock. Only called during init.
  200. */
  201. static void ql_write_page1_reg(struct ql3_adapter *qdev,
  202. u32 __iomem *reg, u32 value)
  203. {
  204. if (qdev->current_page != 1)
  205. ql_set_register_page(qdev,1);
  206. writel(value, reg);
  207. readl(reg);
  208. return;
  209. }
  210. /*
  211. * Caller holds hw_lock. Only called during init.
  212. */
  213. static void ql_write_page2_reg(struct ql3_adapter *qdev,
  214. u32 __iomem *reg, u32 value)
  215. {
  216. if (qdev->current_page != 2)
  217. ql_set_register_page(qdev,2);
  218. writel(value, reg);
  219. readl(reg);
  220. return;
  221. }
  222. static void ql_disable_interrupts(struct ql3_adapter *qdev)
  223. {
  224. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  225. ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
  226. (ISP_IMR_ENABLE_INT << 16));
  227. }
  228. static void ql_enable_interrupts(struct ql3_adapter *qdev)
  229. {
  230. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  231. ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
  232. ((0xff << 16) | ISP_IMR_ENABLE_INT));
  233. }
  234. static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
  235. struct ql_rcv_buf_cb *lrg_buf_cb)
  236. {
  237. u64 map;
  238. lrg_buf_cb->next = NULL;
  239. if (qdev->lrg_buf_free_tail == NULL) { /* The list is empty */
  240. qdev->lrg_buf_free_head = qdev->lrg_buf_free_tail = lrg_buf_cb;
  241. } else {
  242. qdev->lrg_buf_free_tail->next = lrg_buf_cb;
  243. qdev->lrg_buf_free_tail = lrg_buf_cb;
  244. }
  245. if (!lrg_buf_cb->skb) {
  246. lrg_buf_cb->skb = dev_alloc_skb(qdev->lrg_buffer_len);
  247. if (unlikely(!lrg_buf_cb->skb)) {
  248. printk(KERN_ERR PFX "%s: failed dev_alloc_skb().\n",
  249. qdev->ndev->name);
  250. qdev->lrg_buf_skb_check++;
  251. } else {
  252. /*
  253. * We save some space to copy the ethhdr from first
  254. * buffer
  255. */
  256. skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
  257. map = pci_map_single(qdev->pdev,
  258. lrg_buf_cb->skb->data,
  259. qdev->lrg_buffer_len -
  260. QL_HEADER_SPACE,
  261. PCI_DMA_FROMDEVICE);
  262. lrg_buf_cb->buf_phy_addr_low =
  263. cpu_to_le32(LS_64BITS(map));
  264. lrg_buf_cb->buf_phy_addr_high =
  265. cpu_to_le32(MS_64BITS(map));
  266. pci_unmap_addr_set(lrg_buf_cb, mapaddr, map);
  267. pci_unmap_len_set(lrg_buf_cb, maplen,
  268. qdev->lrg_buffer_len -
  269. QL_HEADER_SPACE);
  270. }
  271. }
  272. qdev->lrg_buf_free_count++;
  273. }
  274. static struct ql_rcv_buf_cb *ql_get_from_lrg_buf_free_list(struct ql3_adapter
  275. *qdev)
  276. {
  277. struct ql_rcv_buf_cb *lrg_buf_cb;
  278. if ((lrg_buf_cb = qdev->lrg_buf_free_head) != NULL) {
  279. if ((qdev->lrg_buf_free_head = lrg_buf_cb->next) == NULL)
  280. qdev->lrg_buf_free_tail = NULL;
  281. qdev->lrg_buf_free_count--;
  282. }
  283. return lrg_buf_cb;
  284. }
  285. static u32 addrBits = EEPROM_NO_ADDR_BITS;
  286. static u32 dataBits = EEPROM_NO_DATA_BITS;
  287. static void fm93c56a_deselect(struct ql3_adapter *qdev);
  288. static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr,
  289. unsigned short *value);
  290. /*
  291. * Caller holds hw_lock.
  292. */
  293. static void fm93c56a_select(struct ql3_adapter *qdev)
  294. {
  295. struct ql3xxx_port_registers __iomem *port_regs =
  296. qdev->mem_map_registers;
  297. qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1;
  298. ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
  299. ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
  300. ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
  301. ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data));
  302. }
  303. /*
  304. * Caller holds hw_lock.
  305. */
  306. static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr)
  307. {
  308. int i;
  309. u32 mask;
  310. u32 dataBit;
  311. u32 previousBit;
  312. struct ql3xxx_port_registers __iomem *port_regs =
  313. qdev->mem_map_registers;
  314. /* Clock in a zero, then do the start bit */
  315. ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
  316. ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
  317. AUBURN_EEPROM_DO_1);
  318. ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
  319. ISP_NVRAM_MASK | qdev->
  320. eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
  321. AUBURN_EEPROM_CLK_RISE);
  322. ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
  323. ISP_NVRAM_MASK | qdev->
  324. eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
  325. AUBURN_EEPROM_CLK_FALL);
  326. mask = 1 << (FM93C56A_CMD_BITS - 1);
  327. /* Force the previous data bit to be different */
  328. previousBit = 0xffff;
  329. for (i = 0; i < FM93C56A_CMD_BITS; i++) {
  330. dataBit =
  331. (cmd & mask) ? AUBURN_EEPROM_DO_1 : AUBURN_EEPROM_DO_0;
  332. if (previousBit != dataBit) {
  333. /*
  334. * If the bit changed, then change the DO state to
  335. * match
  336. */
  337. ql_write_nvram_reg(qdev,
  338. &port_regs->CommonRegs.
  339. serialPortInterfaceReg,
  340. ISP_NVRAM_MASK | qdev->
  341. eeprom_cmd_data | dataBit);
  342. previousBit = dataBit;
  343. }
  344. ql_write_nvram_reg(qdev,
  345. &port_regs->CommonRegs.
  346. serialPortInterfaceReg,
  347. ISP_NVRAM_MASK | qdev->
  348. eeprom_cmd_data | dataBit |
  349. AUBURN_EEPROM_CLK_RISE);
  350. ql_write_nvram_reg(qdev,
  351. &port_regs->CommonRegs.
  352. serialPortInterfaceReg,
  353. ISP_NVRAM_MASK | qdev->
  354. eeprom_cmd_data | dataBit |
  355. AUBURN_EEPROM_CLK_FALL);
  356. cmd = cmd << 1;
  357. }
  358. mask = 1 << (addrBits - 1);
  359. /* Force the previous data bit to be different */
  360. previousBit = 0xffff;
  361. for (i = 0; i < addrBits; i++) {
  362. dataBit =
  363. (eepromAddr & mask) ? AUBURN_EEPROM_DO_1 :
  364. AUBURN_EEPROM_DO_0;
  365. if (previousBit != dataBit) {
  366. /*
  367. * If the bit changed, then change the DO state to
  368. * match
  369. */
  370. ql_write_nvram_reg(qdev,
  371. &port_regs->CommonRegs.
  372. serialPortInterfaceReg,
  373. ISP_NVRAM_MASK | qdev->
  374. eeprom_cmd_data | dataBit);
  375. previousBit = dataBit;
  376. }
  377. ql_write_nvram_reg(qdev,
  378. &port_regs->CommonRegs.
  379. serialPortInterfaceReg,
  380. ISP_NVRAM_MASK | qdev->
  381. eeprom_cmd_data | dataBit |
  382. AUBURN_EEPROM_CLK_RISE);
  383. ql_write_nvram_reg(qdev,
  384. &port_regs->CommonRegs.
  385. serialPortInterfaceReg,
  386. ISP_NVRAM_MASK | qdev->
  387. eeprom_cmd_data | dataBit |
  388. AUBURN_EEPROM_CLK_FALL);
  389. eepromAddr = eepromAddr << 1;
  390. }
  391. }
  392. /*
  393. * Caller holds hw_lock.
  394. */
  395. static void fm93c56a_deselect(struct ql3_adapter *qdev)
  396. {
  397. struct ql3xxx_port_registers __iomem *port_regs =
  398. qdev->mem_map_registers;
  399. qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0;
  400. ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
  401. ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
  402. }
  403. /*
  404. * Caller holds hw_lock.
  405. */
  406. static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value)
  407. {
  408. int i;
  409. u32 data = 0;
  410. u32 dataBit;
  411. struct ql3xxx_port_registers __iomem *port_regs =
  412. qdev->mem_map_registers;
  413. /* Read the data bits */
  414. /* The first bit is a dummy. Clock right over it. */
  415. for (i = 0; i < dataBits; i++) {
  416. ql_write_nvram_reg(qdev,
  417. &port_regs->CommonRegs.
  418. serialPortInterfaceReg,
  419. ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
  420. AUBURN_EEPROM_CLK_RISE);
  421. ql_write_nvram_reg(qdev,
  422. &port_regs->CommonRegs.
  423. serialPortInterfaceReg,
  424. ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
  425. AUBURN_EEPROM_CLK_FALL);
  426. dataBit =
  427. (ql_read_common_reg
  428. (qdev,
  429. &port_regs->CommonRegs.
  430. serialPortInterfaceReg) & AUBURN_EEPROM_DI_1) ? 1 : 0;
  431. data = (data << 1) | dataBit;
  432. }
  433. *value = (u16) data;
  434. }
  435. /*
  436. * Caller holds hw_lock.
  437. */
  438. static void eeprom_readword(struct ql3_adapter *qdev,
  439. u32 eepromAddr, unsigned short *value)
  440. {
  441. fm93c56a_select(qdev);
  442. fm93c56a_cmd(qdev, (int)FM93C56A_READ, eepromAddr);
  443. fm93c56a_datain(qdev, value);
  444. fm93c56a_deselect(qdev);
  445. }
  446. static void ql_swap_mac_addr(u8 * macAddress)
  447. {
  448. #ifdef __BIG_ENDIAN
  449. u8 temp;
  450. temp = macAddress[0];
  451. macAddress[0] = macAddress[1];
  452. macAddress[1] = temp;
  453. temp = macAddress[2];
  454. macAddress[2] = macAddress[3];
  455. macAddress[3] = temp;
  456. temp = macAddress[4];
  457. macAddress[4] = macAddress[5];
  458. macAddress[5] = temp;
  459. #endif
  460. }
  461. static int ql_get_nvram_params(struct ql3_adapter *qdev)
  462. {
  463. u16 *pEEPROMData;
  464. u16 checksum = 0;
  465. u32 index;
  466. unsigned long hw_flags;
  467. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  468. pEEPROMData = (u16 *) & qdev->nvram_data;
  469. qdev->eeprom_cmd_data = 0;
  470. if(ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK,
  471. (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
  472. 2) << 10)) {
  473. printk(KERN_ERR PFX"%s: Failed ql_sem_spinlock().\n",
  474. __func__);
  475. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  476. return -1;
  477. }
  478. for (index = 0; index < EEPROM_SIZE; index++) {
  479. eeprom_readword(qdev, index, pEEPROMData);
  480. checksum += *pEEPROMData;
  481. pEEPROMData++;
  482. }
  483. ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK);
  484. if (checksum != 0) {
  485. printk(KERN_ERR PFX "%s: checksum should be zero, is %x!!\n",
  486. qdev->ndev->name, checksum);
  487. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  488. return -1;
  489. }
  490. /*
  491. * We have a problem with endianness for the MAC addresses
  492. * and the two 8-bit values version, and numPorts. We
  493. * have to swap them on big endian systems.
  494. */
  495. ql_swap_mac_addr(qdev->nvram_data.funcCfg_fn0.macAddress);
  496. ql_swap_mac_addr(qdev->nvram_data.funcCfg_fn1.macAddress);
  497. ql_swap_mac_addr(qdev->nvram_data.funcCfg_fn2.macAddress);
  498. ql_swap_mac_addr(qdev->nvram_data.funcCfg_fn3.macAddress);
  499. pEEPROMData = (u16 *) & qdev->nvram_data.version;
  500. *pEEPROMData = le16_to_cpu(*pEEPROMData);
  501. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  502. return checksum;
  503. }
  504. static const u32 PHYAddr[2] = {
  505. PORT0_PHY_ADDRESS, PORT1_PHY_ADDRESS
  506. };
  507. static int ql_wait_for_mii_ready(struct ql3_adapter *qdev)
  508. {
  509. struct ql3xxx_port_registers __iomem *port_regs =
  510. qdev->mem_map_registers;
  511. u32 temp;
  512. int count = 1000;
  513. while (count) {
  514. temp = ql_read_page0_reg(qdev, &port_regs->macMIIStatusReg);
  515. if (!(temp & MAC_MII_STATUS_BSY))
  516. return 0;
  517. udelay(10);
  518. count--;
  519. }
  520. return -1;
  521. }
  522. static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev)
  523. {
  524. struct ql3xxx_port_registers __iomem *port_regs =
  525. qdev->mem_map_registers;
  526. u32 scanControl;
  527. if (qdev->numPorts > 1) {
  528. /* Auto scan will cycle through multiple ports */
  529. scanControl = MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC;
  530. } else {
  531. scanControl = MAC_MII_CONTROL_SC;
  532. }
  533. /*
  534. * Scan register 1 of PHY/PETBI,
  535. * Set up to scan both devices
  536. * The autoscan starts from the first register, completes
  537. * the last one before rolling over to the first
  538. */
  539. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
  540. PHYAddr[0] | MII_SCAN_REGISTER);
  541. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
  542. (scanControl) |
  543. ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS) << 16));
  544. }
  545. static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev)
  546. {
  547. u8 ret;
  548. struct ql3xxx_port_registers __iomem *port_regs =
  549. qdev->mem_map_registers;
  550. /* See if scan mode is enabled before we turn it off */
  551. if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) &
  552. (MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC)) {
  553. /* Scan is enabled */
  554. ret = 1;
  555. } else {
  556. /* Scan is disabled */
  557. ret = 0;
  558. }
  559. /*
  560. * When disabling scan mode you must first change the MII register
  561. * address
  562. */
  563. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
  564. PHYAddr[0] | MII_SCAN_REGISTER);
  565. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
  566. ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS |
  567. MAC_MII_CONTROL_RC) << 16));
  568. return ret;
  569. }
  570. static int ql_mii_write_reg_ex(struct ql3_adapter *qdev,
  571. u16 regAddr, u16 value, u32 mac_index)
  572. {
  573. struct ql3xxx_port_registers __iomem *port_regs =
  574. qdev->mem_map_registers;
  575. u8 scanWasEnabled;
  576. scanWasEnabled = ql_mii_disable_scan_mode(qdev);
  577. if (ql_wait_for_mii_ready(qdev)) {
  578. if (netif_msg_link(qdev))
  579. printk(KERN_WARNING PFX
  580. "%s Timed out waiting for management port to "
  581. "get free before issuing command.\n",
  582. qdev->ndev->name);
  583. return -1;
  584. }
  585. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
  586. PHYAddr[mac_index] | regAddr);
  587. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
  588. /* Wait for write to complete 9/10/04 SJP */
  589. if (ql_wait_for_mii_ready(qdev)) {
  590. if (netif_msg_link(qdev))
  591. printk(KERN_WARNING PFX
  592. "%s: Timed out waiting for management port to"
  593. "get free before issuing command.\n",
  594. qdev->ndev->name);
  595. return -1;
  596. }
  597. if (scanWasEnabled)
  598. ql_mii_enable_scan_mode(qdev);
  599. return 0;
  600. }
  601. static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr,
  602. u16 * value, u32 mac_index)
  603. {
  604. struct ql3xxx_port_registers __iomem *port_regs =
  605. qdev->mem_map_registers;
  606. u8 scanWasEnabled;
  607. u32 temp;
  608. scanWasEnabled = ql_mii_disable_scan_mode(qdev);
  609. if (ql_wait_for_mii_ready(qdev)) {
  610. if (netif_msg_link(qdev))
  611. printk(KERN_WARNING PFX
  612. "%s: Timed out waiting for management port to "
  613. "get free before issuing command.\n",
  614. qdev->ndev->name);
  615. return -1;
  616. }
  617. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
  618. PHYAddr[mac_index] | regAddr);
  619. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
  620. (MAC_MII_CONTROL_RC << 16));
  621. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
  622. (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC);
  623. /* Wait for the read to complete */
  624. if (ql_wait_for_mii_ready(qdev)) {
  625. if (netif_msg_link(qdev))
  626. printk(KERN_WARNING PFX
  627. "%s: Timed out waiting for management port to "
  628. "get free after issuing command.\n",
  629. qdev->ndev->name);
  630. return -1;
  631. }
  632. temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
  633. *value = (u16) temp;
  634. if (scanWasEnabled)
  635. ql_mii_enable_scan_mode(qdev);
  636. return 0;
  637. }
  638. static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value)
  639. {
  640. struct ql3xxx_port_registers __iomem *port_regs =
  641. qdev->mem_map_registers;
  642. ql_mii_disable_scan_mode(qdev);
  643. if (ql_wait_for_mii_ready(qdev)) {
  644. if (netif_msg_link(qdev))
  645. printk(KERN_WARNING PFX
  646. "%s: Timed out waiting for management port to "
  647. "get free before issuing command.\n",
  648. qdev->ndev->name);
  649. return -1;
  650. }
  651. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
  652. qdev->PHYAddr | regAddr);
  653. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
  654. /* Wait for write to complete. */
  655. if (ql_wait_for_mii_ready(qdev)) {
  656. if (netif_msg_link(qdev))
  657. printk(KERN_WARNING PFX
  658. "%s: Timed out waiting for management port to "
  659. "get free before issuing command.\n",
  660. qdev->ndev->name);
  661. return -1;
  662. }
  663. ql_mii_enable_scan_mode(qdev);
  664. return 0;
  665. }
  666. static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value)
  667. {
  668. u32 temp;
  669. struct ql3xxx_port_registers __iomem *port_regs =
  670. qdev->mem_map_registers;
  671. ql_mii_disable_scan_mode(qdev);
  672. if (ql_wait_for_mii_ready(qdev)) {
  673. if (netif_msg_link(qdev))
  674. printk(KERN_WARNING PFX
  675. "%s: Timed out waiting for management port to "
  676. "get free before issuing command.\n",
  677. qdev->ndev->name);
  678. return -1;
  679. }
  680. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
  681. qdev->PHYAddr | regAddr);
  682. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
  683. (MAC_MII_CONTROL_RC << 16));
  684. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
  685. (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC);
  686. /* Wait for the read to complete */
  687. if (ql_wait_for_mii_ready(qdev)) {
  688. if (netif_msg_link(qdev))
  689. printk(KERN_WARNING PFX
  690. "%s: Timed out waiting for management port to "
  691. "get free before issuing command.\n",
  692. qdev->ndev->name);
  693. return -1;
  694. }
  695. temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
  696. *value = (u16) temp;
  697. ql_mii_enable_scan_mode(qdev);
  698. return 0;
  699. }
  700. static void ql_petbi_reset(struct ql3_adapter *qdev)
  701. {
  702. ql_mii_write_reg(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET);
  703. }
  704. static void ql_petbi_start_neg(struct ql3_adapter *qdev)
  705. {
  706. u16 reg;
  707. /* Enable Auto-negotiation sense */
  708. ql_mii_read_reg(qdev, PETBI_TBI_CTRL, &reg);
  709. reg |= PETBI_TBI_AUTO_SENSE;
  710. ql_mii_write_reg(qdev, PETBI_TBI_CTRL, reg);
  711. ql_mii_write_reg(qdev, PETBI_NEG_ADVER,
  712. PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX);
  713. ql_mii_write_reg(qdev, PETBI_CONTROL_REG,
  714. PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG |
  715. PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000);
  716. }
  717. static void ql_petbi_reset_ex(struct ql3_adapter *qdev, u32 mac_index)
  718. {
  719. ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET,
  720. mac_index);
  721. }
  722. static void ql_petbi_start_neg_ex(struct ql3_adapter *qdev, u32 mac_index)
  723. {
  724. u16 reg;
  725. /* Enable Auto-negotiation sense */
  726. ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, &reg, mac_index);
  727. reg |= PETBI_TBI_AUTO_SENSE;
  728. ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg, mac_index);
  729. ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER,
  730. PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX, mac_index);
  731. ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG,
  732. PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG |
  733. PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000,
  734. mac_index);
  735. }
  736. static void ql_petbi_init(struct ql3_adapter *qdev)
  737. {
  738. ql_petbi_reset(qdev);
  739. ql_petbi_start_neg(qdev);
  740. }
  741. static void ql_petbi_init_ex(struct ql3_adapter *qdev, u32 mac_index)
  742. {
  743. ql_petbi_reset_ex(qdev, mac_index);
  744. ql_petbi_start_neg_ex(qdev, mac_index);
  745. }
  746. static int ql_is_petbi_neg_pause(struct ql3_adapter *qdev)
  747. {
  748. u16 reg;
  749. if (ql_mii_read_reg(qdev, PETBI_NEG_PARTNER, &reg) < 0)
  750. return 0;
  751. return (reg & PETBI_NEG_PAUSE_MASK) == PETBI_NEG_PAUSE;
  752. }
  753. static int ql_phy_get_speed(struct ql3_adapter *qdev)
  754. {
  755. u16 reg;
  756. if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
  757. return 0;
  758. reg = (((reg & 0x18) >> 3) & 3);
  759. if (reg == 2)
  760. return SPEED_1000;
  761. else if (reg == 1)
  762. return SPEED_100;
  763. else if (reg == 0)
  764. return SPEED_10;
  765. else
  766. return -1;
  767. }
  768. static int ql_is_full_dup(struct ql3_adapter *qdev)
  769. {
  770. u16 reg;
  771. if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
  772. return 0;
  773. return (reg & PHY_AUX_DUPLEX_STAT) != 0;
  774. }
  775. static int ql_is_phy_neg_pause(struct ql3_adapter *qdev)
  776. {
  777. u16 reg;
  778. if (ql_mii_read_reg(qdev, PHY_NEG_PARTNER, &reg) < 0)
  779. return 0;
  780. return (reg & PHY_NEG_PAUSE) != 0;
  781. }
  782. /*
  783. * Caller holds hw_lock.
  784. */
  785. static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable)
  786. {
  787. struct ql3xxx_port_registers __iomem *port_regs =
  788. qdev->mem_map_registers;
  789. u32 value;
  790. if (enable)
  791. value = (MAC_CONFIG_REG_PE | (MAC_CONFIG_REG_PE << 16));
  792. else
  793. value = (MAC_CONFIG_REG_PE << 16);
  794. if (qdev->mac_index)
  795. ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
  796. else
  797. ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
  798. }
  799. /*
  800. * Caller holds hw_lock.
  801. */
  802. static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable)
  803. {
  804. struct ql3xxx_port_registers __iomem *port_regs =
  805. qdev->mem_map_registers;
  806. u32 value;
  807. if (enable)
  808. value = (MAC_CONFIG_REG_SR | (MAC_CONFIG_REG_SR << 16));
  809. else
  810. value = (MAC_CONFIG_REG_SR << 16);
  811. if (qdev->mac_index)
  812. ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
  813. else
  814. ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
  815. }
  816. /*
  817. * Caller holds hw_lock.
  818. */
  819. static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable)
  820. {
  821. struct ql3xxx_port_registers __iomem *port_regs =
  822. qdev->mem_map_registers;
  823. u32 value;
  824. if (enable)
  825. value = (MAC_CONFIG_REG_GM | (MAC_CONFIG_REG_GM << 16));
  826. else
  827. value = (MAC_CONFIG_REG_GM << 16);
  828. if (qdev->mac_index)
  829. ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
  830. else
  831. ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
  832. }
  833. /*
  834. * Caller holds hw_lock.
  835. */
  836. static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable)
  837. {
  838. struct ql3xxx_port_registers __iomem *port_regs =
  839. qdev->mem_map_registers;
  840. u32 value;
  841. if (enable)
  842. value = (MAC_CONFIG_REG_FD | (MAC_CONFIG_REG_FD << 16));
  843. else
  844. value = (MAC_CONFIG_REG_FD << 16);
  845. if (qdev->mac_index)
  846. ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
  847. else
  848. ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
  849. }
  850. /*
  851. * Caller holds hw_lock.
  852. */
  853. static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable)
  854. {
  855. struct ql3xxx_port_registers __iomem *port_regs =
  856. qdev->mem_map_registers;
  857. u32 value;
  858. if (enable)
  859. value =
  860. ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) |
  861. ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16));
  862. else
  863. value = ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16);
  864. if (qdev->mac_index)
  865. ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
  866. else
  867. ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
  868. }
  869. /*
  870. * Caller holds hw_lock.
  871. */
  872. static int ql_is_fiber(struct ql3_adapter *qdev)
  873. {
  874. struct ql3xxx_port_registers __iomem *port_regs =
  875. qdev->mem_map_registers;
  876. u32 bitToCheck = 0;
  877. u32 temp;
  878. switch (qdev->mac_index) {
  879. case 0:
  880. bitToCheck = PORT_STATUS_SM0;
  881. break;
  882. case 1:
  883. bitToCheck = PORT_STATUS_SM1;
  884. break;
  885. }
  886. temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
  887. return (temp & bitToCheck) != 0;
  888. }
  889. static int ql_is_auto_cfg(struct ql3_adapter *qdev)
  890. {
  891. u16 reg;
  892. ql_mii_read_reg(qdev, 0x00, &reg);
  893. return (reg & 0x1000) != 0;
  894. }
  895. /*
  896. * Caller holds hw_lock.
  897. */
  898. static int ql_is_auto_neg_complete(struct ql3_adapter *qdev)
  899. {
  900. struct ql3xxx_port_registers __iomem *port_regs =
  901. qdev->mem_map_registers;
  902. u32 bitToCheck = 0;
  903. u32 temp;
  904. switch (qdev->mac_index) {
  905. case 0:
  906. bitToCheck = PORT_STATUS_AC0;
  907. break;
  908. case 1:
  909. bitToCheck = PORT_STATUS_AC1;
  910. break;
  911. }
  912. temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
  913. if (temp & bitToCheck) {
  914. if (netif_msg_link(qdev))
  915. printk(KERN_INFO PFX
  916. "%s: Auto-Negotiate complete.\n",
  917. qdev->ndev->name);
  918. return 1;
  919. } else {
  920. if (netif_msg_link(qdev))
  921. printk(KERN_WARNING PFX
  922. "%s: Auto-Negotiate incomplete.\n",
  923. qdev->ndev->name);
  924. return 0;
  925. }
  926. }
  927. /*
  928. * ql_is_neg_pause() returns 1 if pause was negotiated to be on
  929. */
  930. static int ql_is_neg_pause(struct ql3_adapter *qdev)
  931. {
  932. if (ql_is_fiber(qdev))
  933. return ql_is_petbi_neg_pause(qdev);
  934. else
  935. return ql_is_phy_neg_pause(qdev);
  936. }
  937. static int ql_auto_neg_error(struct ql3_adapter *qdev)
  938. {
  939. struct ql3xxx_port_registers __iomem *port_regs =
  940. qdev->mem_map_registers;
  941. u32 bitToCheck = 0;
  942. u32 temp;
  943. switch (qdev->mac_index) {
  944. case 0:
  945. bitToCheck = PORT_STATUS_AE0;
  946. break;
  947. case 1:
  948. bitToCheck = PORT_STATUS_AE1;
  949. break;
  950. }
  951. temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
  952. return (temp & bitToCheck) != 0;
  953. }
  954. static u32 ql_get_link_speed(struct ql3_adapter *qdev)
  955. {
  956. if (ql_is_fiber(qdev))
  957. return SPEED_1000;
  958. else
  959. return ql_phy_get_speed(qdev);
  960. }
  961. static int ql_is_link_full_dup(struct ql3_adapter *qdev)
  962. {
  963. if (ql_is_fiber(qdev))
  964. return 1;
  965. else
  966. return ql_is_full_dup(qdev);
  967. }
  968. /*
  969. * Caller holds hw_lock.
  970. */
  971. static int ql_link_down_detect(struct ql3_adapter *qdev)
  972. {
  973. struct ql3xxx_port_registers __iomem *port_regs =
  974. qdev->mem_map_registers;
  975. u32 bitToCheck = 0;
  976. u32 temp;
  977. switch (qdev->mac_index) {
  978. case 0:
  979. bitToCheck = ISP_CONTROL_LINK_DN_0;
  980. break;
  981. case 1:
  982. bitToCheck = ISP_CONTROL_LINK_DN_1;
  983. break;
  984. }
  985. temp =
  986. ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
  987. return (temp & bitToCheck) != 0;
  988. }
  989. /*
  990. * Caller holds hw_lock.
  991. */
  992. static int ql_link_down_detect_clear(struct ql3_adapter *qdev)
  993. {
  994. struct ql3xxx_port_registers __iomem *port_regs =
  995. qdev->mem_map_registers;
  996. switch (qdev->mac_index) {
  997. case 0:
  998. ql_write_common_reg(qdev,
  999. &port_regs->CommonRegs.ispControlStatus,
  1000. (ISP_CONTROL_LINK_DN_0) |
  1001. (ISP_CONTROL_LINK_DN_0 << 16));
  1002. break;
  1003. case 1:
  1004. ql_write_common_reg(qdev,
  1005. &port_regs->CommonRegs.ispControlStatus,
  1006. (ISP_CONTROL_LINK_DN_1) |
  1007. (ISP_CONTROL_LINK_DN_1 << 16));
  1008. break;
  1009. default:
  1010. return 1;
  1011. }
  1012. return 0;
  1013. }
  1014. /*
  1015. * Caller holds hw_lock.
  1016. */
  1017. static int ql_this_adapter_controls_port(struct ql3_adapter *qdev,
  1018. u32 mac_index)
  1019. {
  1020. struct ql3xxx_port_registers __iomem *port_regs =
  1021. qdev->mem_map_registers;
  1022. u32 bitToCheck = 0;
  1023. u32 temp;
  1024. switch (mac_index) {
  1025. case 0:
  1026. bitToCheck = PORT_STATUS_F1_ENABLED;
  1027. break;
  1028. case 1:
  1029. bitToCheck = PORT_STATUS_F3_ENABLED;
  1030. break;
  1031. default:
  1032. break;
  1033. }
  1034. temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
  1035. if (temp & bitToCheck) {
  1036. if (netif_msg_link(qdev))
  1037. printk(KERN_DEBUG PFX
  1038. "%s: is not link master.\n", qdev->ndev->name);
  1039. return 0;
  1040. } else {
  1041. if (netif_msg_link(qdev))
  1042. printk(KERN_DEBUG PFX
  1043. "%s: is link master.\n", qdev->ndev->name);
  1044. return 1;
  1045. }
  1046. }
  1047. static void ql_phy_reset_ex(struct ql3_adapter *qdev, u32 mac_index)
  1048. {
  1049. ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET, mac_index);
  1050. }
  1051. static void ql_phy_start_neg_ex(struct ql3_adapter *qdev, u32 mac_index)
  1052. {
  1053. u16 reg;
  1054. ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER,
  1055. PHY_NEG_PAUSE | PHY_NEG_ADV_SPEED | 1, mac_index);
  1056. ql_mii_read_reg_ex(qdev, CONTROL_REG, &reg, mac_index);
  1057. ql_mii_write_reg_ex(qdev, CONTROL_REG, reg | PHY_CTRL_RESTART_NEG,
  1058. mac_index);
  1059. }
  1060. static void ql_phy_init_ex(struct ql3_adapter *qdev, u32 mac_index)
  1061. {
  1062. ql_phy_reset_ex(qdev, mac_index);
  1063. ql_phy_start_neg_ex(qdev, mac_index);
  1064. }
  1065. /*
  1066. * Caller holds hw_lock.
  1067. */
  1068. static u32 ql_get_link_state(struct ql3_adapter *qdev)
  1069. {
  1070. struct ql3xxx_port_registers __iomem *port_regs =
  1071. qdev->mem_map_registers;
  1072. u32 bitToCheck = 0;
  1073. u32 temp, linkState;
  1074. switch (qdev->mac_index) {
  1075. case 0:
  1076. bitToCheck = PORT_STATUS_UP0;
  1077. break;
  1078. case 1:
  1079. bitToCheck = PORT_STATUS_UP1;
  1080. break;
  1081. }
  1082. temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
  1083. if (temp & bitToCheck) {
  1084. linkState = LS_UP;
  1085. } else {
  1086. linkState = LS_DOWN;
  1087. if (netif_msg_link(qdev))
  1088. printk(KERN_WARNING PFX
  1089. "%s: Link is down.\n", qdev->ndev->name);
  1090. }
  1091. return linkState;
  1092. }
  1093. static int ql_port_start(struct ql3_adapter *qdev)
  1094. {
  1095. if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
  1096. (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
  1097. 2) << 7))
  1098. return -1;
  1099. if (ql_is_fiber(qdev)) {
  1100. ql_petbi_init(qdev);
  1101. } else {
  1102. /* Copper port */
  1103. ql_phy_init_ex(qdev, qdev->mac_index);
  1104. }
  1105. ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
  1106. return 0;
  1107. }
  1108. static int ql_finish_auto_neg(struct ql3_adapter *qdev)
  1109. {
  1110. if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
  1111. (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
  1112. 2) << 7))
  1113. return -1;
  1114. if (!ql_auto_neg_error(qdev)) {
  1115. if (test_bit(QL_LINK_MASTER,&qdev->flags)) {
  1116. /* configure the MAC */
  1117. if (netif_msg_link(qdev))
  1118. printk(KERN_DEBUG PFX
  1119. "%s: Configuring link.\n",
  1120. qdev->ndev->
  1121. name);
  1122. ql_mac_cfg_soft_reset(qdev, 1);
  1123. ql_mac_cfg_gig(qdev,
  1124. (ql_get_link_speed
  1125. (qdev) ==
  1126. SPEED_1000));
  1127. ql_mac_cfg_full_dup(qdev,
  1128. ql_is_link_full_dup
  1129. (qdev));
  1130. ql_mac_cfg_pause(qdev,
  1131. ql_is_neg_pause
  1132. (qdev));
  1133. ql_mac_cfg_soft_reset(qdev, 0);
  1134. /* enable the MAC */
  1135. if (netif_msg_link(qdev))
  1136. printk(KERN_DEBUG PFX
  1137. "%s: Enabling mac.\n",
  1138. qdev->ndev->
  1139. name);
  1140. ql_mac_enable(qdev, 1);
  1141. }
  1142. if (netif_msg_link(qdev))
  1143. printk(KERN_DEBUG PFX
  1144. "%s: Change port_link_state LS_DOWN to LS_UP.\n",
  1145. qdev->ndev->name);
  1146. qdev->port_link_state = LS_UP;
  1147. netif_start_queue(qdev->ndev);
  1148. netif_carrier_on(qdev->ndev);
  1149. if (netif_msg_link(qdev))
  1150. printk(KERN_INFO PFX
  1151. "%s: Link is up at %d Mbps, %s duplex.\n",
  1152. qdev->ndev->name,
  1153. ql_get_link_speed(qdev),
  1154. ql_is_link_full_dup(qdev)
  1155. ? "full" : "half");
  1156. } else { /* Remote error detected */
  1157. if (test_bit(QL_LINK_MASTER,&qdev->flags)) {
  1158. if (netif_msg_link(qdev))
  1159. printk(KERN_DEBUG PFX
  1160. "%s: Remote error detected. "
  1161. "Calling ql_port_start().\n",
  1162. qdev->ndev->
  1163. name);
  1164. /*
  1165. * ql_port_start() is shared code and needs
  1166. * to lock the PHY on it's own.
  1167. */
  1168. ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
  1169. if(ql_port_start(qdev)) {/* Restart port */
  1170. return -1;
  1171. } else
  1172. return 0;
  1173. }
  1174. }
  1175. ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
  1176. return 0;
  1177. }
  1178. static void ql_link_state_machine(struct ql3_adapter *qdev)
  1179. {
  1180. u32 curr_link_state;
  1181. unsigned long hw_flags;
  1182. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  1183. curr_link_state = ql_get_link_state(qdev);
  1184. if (test_bit(QL_RESET_ACTIVE,&qdev->flags)) {
  1185. if (netif_msg_link(qdev))
  1186. printk(KERN_INFO PFX
  1187. "%s: Reset in progress, skip processing link "
  1188. "state.\n", qdev->ndev->name);
  1189. return;
  1190. }
  1191. switch (qdev->port_link_state) {
  1192. default:
  1193. if (test_bit(QL_LINK_MASTER,&qdev->flags)) {
  1194. ql_port_start(qdev);
  1195. }
  1196. qdev->port_link_state = LS_DOWN;
  1197. /* Fall Through */
  1198. case LS_DOWN:
  1199. if (netif_msg_link(qdev))
  1200. printk(KERN_DEBUG PFX
  1201. "%s: port_link_state = LS_DOWN.\n",
  1202. qdev->ndev->name);
  1203. if (curr_link_state == LS_UP) {
  1204. if (netif_msg_link(qdev))
  1205. printk(KERN_DEBUG PFX
  1206. "%s: curr_link_state = LS_UP.\n",
  1207. qdev->ndev->name);
  1208. if (ql_is_auto_neg_complete(qdev))
  1209. ql_finish_auto_neg(qdev);
  1210. if (qdev->port_link_state == LS_UP)
  1211. ql_link_down_detect_clear(qdev);
  1212. }
  1213. break;
  1214. case LS_UP:
  1215. /*
  1216. * See if the link is currently down or went down and came
  1217. * back up
  1218. */
  1219. if ((curr_link_state == LS_DOWN) || ql_link_down_detect(qdev)) {
  1220. if (netif_msg_link(qdev))
  1221. printk(KERN_INFO PFX "%s: Link is down.\n",
  1222. qdev->ndev->name);
  1223. qdev->port_link_state = LS_DOWN;
  1224. }
  1225. break;
  1226. }
  1227. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  1228. }
  1229. /*
  1230. * Caller must take hw_lock and QL_PHY_GIO_SEM.
  1231. */
  1232. static void ql_get_phy_owner(struct ql3_adapter *qdev)
  1233. {
  1234. if (ql_this_adapter_controls_port(qdev, qdev->mac_index))
  1235. set_bit(QL_LINK_MASTER,&qdev->flags);
  1236. else
  1237. clear_bit(QL_LINK_MASTER,&qdev->flags);
  1238. }
  1239. /*
  1240. * Caller must take hw_lock and QL_PHY_GIO_SEM.
  1241. */
  1242. static void ql_init_scan_mode(struct ql3_adapter *qdev)
  1243. {
  1244. ql_mii_enable_scan_mode(qdev);
  1245. if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) {
  1246. if (ql_this_adapter_controls_port(qdev, qdev->mac_index))
  1247. ql_petbi_init_ex(qdev, qdev->mac_index);
  1248. } else {
  1249. if (ql_this_adapter_controls_port(qdev, qdev->mac_index))
  1250. ql_phy_init_ex(qdev, qdev->mac_index);
  1251. }
  1252. }
  1253. /*
  1254. * MII_Setup needs to be called before taking the PHY out of reset so that the
  1255. * management interface clock speed can be set properly. It would be better if
  1256. * we had a way to disable MDC until after the PHY is out of reset, but we
  1257. * don't have that capability.
  1258. */
  1259. static int ql_mii_setup(struct ql3_adapter *qdev)
  1260. {
  1261. u32 reg;
  1262. struct ql3xxx_port_registers __iomem *port_regs =
  1263. qdev->mem_map_registers;
  1264. if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
  1265. (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
  1266. 2) << 7))
  1267. return -1;
  1268. if (qdev->device_id == QL3032_DEVICE_ID)
  1269. ql_write_page0_reg(qdev,
  1270. &port_regs->macMIIMgmtControlReg, 0x0f00000);
  1271. /* Divide 125MHz clock by 28 to meet PHY timing requirements */
  1272. reg = MAC_MII_CONTROL_CLK_SEL_DIV28;
  1273. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
  1274. reg | ((MAC_MII_CONTROL_CLK_SEL_MASK) << 16));
  1275. ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
  1276. return 0;
  1277. }
  1278. static u32 ql_supported_modes(struct ql3_adapter *qdev)
  1279. {
  1280. u32 supported;
  1281. if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) {
  1282. supported = SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE
  1283. | SUPPORTED_Autoneg;
  1284. } else {
  1285. supported = SUPPORTED_10baseT_Half
  1286. | SUPPORTED_10baseT_Full
  1287. | SUPPORTED_100baseT_Half
  1288. | SUPPORTED_100baseT_Full
  1289. | SUPPORTED_1000baseT_Half
  1290. | SUPPORTED_1000baseT_Full
  1291. | SUPPORTED_Autoneg | SUPPORTED_TP;
  1292. }
  1293. return supported;
  1294. }
  1295. static int ql_get_auto_cfg_status(struct ql3_adapter *qdev)
  1296. {
  1297. int status;
  1298. unsigned long hw_flags;
  1299. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  1300. if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
  1301. (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
  1302. 2) << 7))
  1303. return 0;
  1304. status = ql_is_auto_cfg(qdev);
  1305. ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
  1306. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  1307. return status;
  1308. }
  1309. static u32 ql_get_speed(struct ql3_adapter *qdev)
  1310. {
  1311. u32 status;
  1312. unsigned long hw_flags;
  1313. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  1314. if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
  1315. (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
  1316. 2) << 7))
  1317. return 0;
  1318. status = ql_get_link_speed(qdev);
  1319. ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
  1320. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  1321. return status;
  1322. }
  1323. static int ql_get_full_dup(struct ql3_adapter *qdev)
  1324. {
  1325. int status;
  1326. unsigned long hw_flags;
  1327. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  1328. if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
  1329. (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
  1330. 2) << 7))
  1331. return 0;
  1332. status = ql_is_link_full_dup(qdev);
  1333. ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
  1334. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  1335. return status;
  1336. }
  1337. static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
  1338. {
  1339. struct ql3_adapter *qdev = netdev_priv(ndev);
  1340. ecmd->transceiver = XCVR_INTERNAL;
  1341. ecmd->supported = ql_supported_modes(qdev);
  1342. if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) {
  1343. ecmd->port = PORT_FIBRE;
  1344. } else {
  1345. ecmd->port = PORT_TP;
  1346. ecmd->phy_address = qdev->PHYAddr;
  1347. }
  1348. ecmd->advertising = ql_supported_modes(qdev);
  1349. ecmd->autoneg = ql_get_auto_cfg_status(qdev);
  1350. ecmd->speed = ql_get_speed(qdev);
  1351. ecmd->duplex = ql_get_full_dup(qdev);
  1352. return 0;
  1353. }
  1354. static void ql_get_drvinfo(struct net_device *ndev,
  1355. struct ethtool_drvinfo *drvinfo)
  1356. {
  1357. struct ql3_adapter *qdev = netdev_priv(ndev);
  1358. strncpy(drvinfo->driver, ql3xxx_driver_name, 32);
  1359. strncpy(drvinfo->version, ql3xxx_driver_version, 32);
  1360. strncpy(drvinfo->fw_version, "N/A", 32);
  1361. strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32);
  1362. drvinfo->n_stats = 0;
  1363. drvinfo->testinfo_len = 0;
  1364. drvinfo->regdump_len = 0;
  1365. drvinfo->eedump_len = 0;
  1366. }
  1367. static u32 ql_get_msglevel(struct net_device *ndev)
  1368. {
  1369. struct ql3_adapter *qdev = netdev_priv(ndev);
  1370. return qdev->msg_enable;
  1371. }
  1372. static void ql_set_msglevel(struct net_device *ndev, u32 value)
  1373. {
  1374. struct ql3_adapter *qdev = netdev_priv(ndev);
  1375. qdev->msg_enable = value;
  1376. }
  1377. static const struct ethtool_ops ql3xxx_ethtool_ops = {
  1378. .get_settings = ql_get_settings,
  1379. .get_drvinfo = ql_get_drvinfo,
  1380. .get_perm_addr = ethtool_op_get_perm_addr,
  1381. .get_link = ethtool_op_get_link,
  1382. .get_msglevel = ql_get_msglevel,
  1383. .set_msglevel = ql_set_msglevel,
  1384. };
  1385. static int ql_populate_free_queue(struct ql3_adapter *qdev)
  1386. {
  1387. struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head;
  1388. u64 map;
  1389. while (lrg_buf_cb) {
  1390. if (!lrg_buf_cb->skb) {
  1391. lrg_buf_cb->skb = dev_alloc_skb(qdev->lrg_buffer_len);
  1392. if (unlikely(!lrg_buf_cb->skb)) {
  1393. printk(KERN_DEBUG PFX
  1394. "%s: Failed dev_alloc_skb().\n",
  1395. qdev->ndev->name);
  1396. break;
  1397. } else {
  1398. /*
  1399. * We save some space to copy the ethhdr from
  1400. * first buffer
  1401. */
  1402. skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
  1403. map = pci_map_single(qdev->pdev,
  1404. lrg_buf_cb->skb->data,
  1405. qdev->lrg_buffer_len -
  1406. QL_HEADER_SPACE,
  1407. PCI_DMA_FROMDEVICE);
  1408. lrg_buf_cb->buf_phy_addr_low =
  1409. cpu_to_le32(LS_64BITS(map));
  1410. lrg_buf_cb->buf_phy_addr_high =
  1411. cpu_to_le32(MS_64BITS(map));
  1412. pci_unmap_addr_set(lrg_buf_cb, mapaddr, map);
  1413. pci_unmap_len_set(lrg_buf_cb, maplen,
  1414. qdev->lrg_buffer_len -
  1415. QL_HEADER_SPACE);
  1416. --qdev->lrg_buf_skb_check;
  1417. if (!qdev->lrg_buf_skb_check)
  1418. return 1;
  1419. }
  1420. }
  1421. lrg_buf_cb = lrg_buf_cb->next;
  1422. }
  1423. return 0;
  1424. }
  1425. /*
  1426. * Caller holds hw_lock.
  1427. */
  1428. static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev)
  1429. {
  1430. struct bufq_addr_element *lrg_buf_q_ele;
  1431. int i;
  1432. struct ql_rcv_buf_cb *lrg_buf_cb;
  1433. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  1434. if ((qdev->lrg_buf_free_count >= 8)
  1435. && (qdev->lrg_buf_release_cnt >= 16)) {
  1436. if (qdev->lrg_buf_skb_check)
  1437. if (!ql_populate_free_queue(qdev))
  1438. return;
  1439. lrg_buf_q_ele = qdev->lrg_buf_next_free;
  1440. while ((qdev->lrg_buf_release_cnt >= 16)
  1441. && (qdev->lrg_buf_free_count >= 8)) {
  1442. for (i = 0; i < 8; i++) {
  1443. lrg_buf_cb =
  1444. ql_get_from_lrg_buf_free_list(qdev);
  1445. lrg_buf_q_ele->addr_high =
  1446. lrg_buf_cb->buf_phy_addr_high;
  1447. lrg_buf_q_ele->addr_low =
  1448. lrg_buf_cb->buf_phy_addr_low;
  1449. lrg_buf_q_ele++;
  1450. qdev->lrg_buf_release_cnt--;
  1451. }
  1452. qdev->lrg_buf_q_producer_index++;
  1453. if (qdev->lrg_buf_q_producer_index == NUM_LBUFQ_ENTRIES)
  1454. qdev->lrg_buf_q_producer_index = 0;
  1455. if (qdev->lrg_buf_q_producer_index ==
  1456. (NUM_LBUFQ_ENTRIES - 1)) {
  1457. lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr;
  1458. }
  1459. }
  1460. qdev->lrg_buf_next_free = lrg_buf_q_ele;
  1461. ql_write_common_reg(qdev,
  1462. &port_regs->CommonRegs.
  1463. rxLargeQProducerIndex,
  1464. qdev->lrg_buf_q_producer_index);
  1465. }
  1466. }
  1467. static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
  1468. struct ob_mac_iocb_rsp *mac_rsp)
  1469. {
  1470. struct ql_tx_buf_cb *tx_cb;
  1471. int i;
  1472. tx_cb = &qdev->tx_buf[mac_rsp->transaction_id];
  1473. pci_unmap_single(qdev->pdev,
  1474. pci_unmap_addr(&tx_cb->map[0], mapaddr),
  1475. pci_unmap_len(&tx_cb->map[0], maplen),
  1476. PCI_DMA_TODEVICE);
  1477. tx_cb->seg_count--;
  1478. if (tx_cb->seg_count) {
  1479. for (i = 1; i < tx_cb->seg_count; i++) {
  1480. pci_unmap_page(qdev->pdev,
  1481. pci_unmap_addr(&tx_cb->map[i],
  1482. mapaddr),
  1483. pci_unmap_len(&tx_cb->map[i], maplen),
  1484. PCI_DMA_TODEVICE);
  1485. }
  1486. }
  1487. qdev->stats.tx_packets++;
  1488. qdev->stats.tx_bytes += tx_cb->skb->len;
  1489. dev_kfree_skb_irq(tx_cb->skb);
  1490. tx_cb->skb = NULL;
  1491. atomic_inc(&qdev->tx_count);
  1492. }
  1493. /*
  1494. * The difference between 3022 and 3032 for inbound completions:
  1495. * 3022 uses two buffers per completion. The first buffer contains
  1496. * (some) header info, the second the remainder of the headers plus
  1497. * the data. For this chip we reserve some space at the top of the
  1498. * receive buffer so that the header info in buffer one can be
  1499. * prepended to the buffer two. Buffer two is the sent up while
  1500. * buffer one is returned to the hardware to be reused.
  1501. * 3032 receives all of it's data and headers in one buffer for a
  1502. * simpler process. 3032 also supports checksum verification as
  1503. * can be seen in ql_process_macip_rx_intr().
  1504. */
  1505. static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
  1506. struct ib_mac_iocb_rsp *ib_mac_rsp_ptr)
  1507. {
  1508. long int offset;
  1509. u32 lrg_buf_phy_addr_low = 0;
  1510. struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
  1511. struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
  1512. u32 *curr_ial_ptr;
  1513. struct sk_buff *skb;
  1514. u16 length = le16_to_cpu(ib_mac_rsp_ptr->length);
  1515. /*
  1516. * Get the inbound address list (small buffer).
  1517. */
  1518. offset = qdev->small_buf_index * QL_SMALL_BUFFER_SIZE;
  1519. if (++qdev->small_buf_index == NUM_SMALL_BUFFERS)
  1520. qdev->small_buf_index = 0;
  1521. curr_ial_ptr = (u32 *) (qdev->small_buf_virt_addr + offset);
  1522. qdev->last_rsp_offset = qdev->small_buf_phy_addr_low + offset;
  1523. qdev->small_buf_release_cnt++;
  1524. if (qdev->device_id == QL3022_DEVICE_ID) {
  1525. /* start of first buffer (3022 only) */
  1526. lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
  1527. lrg_buf_cb1 = &qdev->lrg_buf[qdev->lrg_buf_index];
  1528. qdev->lrg_buf_release_cnt++;
  1529. if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS) {
  1530. qdev->lrg_buf_index = 0;
  1531. }
  1532. curr_ial_ptr++; /* 64-bit pointers require two incs. */
  1533. curr_ial_ptr++;
  1534. }
  1535. /* start of second buffer */
  1536. lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
  1537. lrg_buf_cb2 = &qdev->lrg_buf[qdev->lrg_buf_index];
  1538. /*
  1539. * Second buffer gets sent up the stack.
  1540. */
  1541. qdev->lrg_buf_release_cnt++;
  1542. if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
  1543. qdev->lrg_buf_index = 0;
  1544. skb = lrg_buf_cb2->skb;
  1545. qdev->stats.rx_packets++;
  1546. qdev->stats.rx_bytes += length;
  1547. skb_put(skb, length);
  1548. pci_unmap_single(qdev->pdev,
  1549. pci_unmap_addr(lrg_buf_cb2, mapaddr),
  1550. pci_unmap_len(lrg_buf_cb2, maplen),
  1551. PCI_DMA_FROMDEVICE);
  1552. prefetch(skb->data);
  1553. skb->dev = qdev->ndev;
  1554. skb->ip_summed = CHECKSUM_NONE;
  1555. skb->protocol = eth_type_trans(skb, qdev->ndev);
  1556. netif_receive_skb(skb);
  1557. qdev->ndev->last_rx = jiffies;
  1558. lrg_buf_cb2->skb = NULL;
  1559. if (qdev->device_id == QL3022_DEVICE_ID)
  1560. ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
  1561. ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
  1562. }
  1563. static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
  1564. struct ib_ip_iocb_rsp *ib_ip_rsp_ptr)
  1565. {
  1566. long int offset;
  1567. u32 lrg_buf_phy_addr_low = 0;
  1568. struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
  1569. struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
  1570. u32 *curr_ial_ptr;
  1571. struct sk_buff *skb1 = NULL, *skb2;
  1572. struct net_device *ndev = qdev->ndev;
  1573. u16 length = le16_to_cpu(ib_ip_rsp_ptr->length);
  1574. u16 size = 0;
  1575. /*
  1576. * Get the inbound address list (small buffer).
  1577. */
  1578. offset = qdev->small_buf_index * QL_SMALL_BUFFER_SIZE;
  1579. if (++qdev->small_buf_index == NUM_SMALL_BUFFERS)
  1580. qdev->small_buf_index = 0;
  1581. curr_ial_ptr = (u32 *) (qdev->small_buf_virt_addr + offset);
  1582. qdev->last_rsp_offset = qdev->small_buf_phy_addr_low + offset;
  1583. qdev->small_buf_release_cnt++;
  1584. if (qdev->device_id == QL3022_DEVICE_ID) {
  1585. /* start of first buffer on 3022 */
  1586. lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
  1587. lrg_buf_cb1 = &qdev->lrg_buf[qdev->lrg_buf_index];
  1588. qdev->lrg_buf_release_cnt++;
  1589. if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
  1590. qdev->lrg_buf_index = 0;
  1591. skb1 = lrg_buf_cb1->skb;
  1592. curr_ial_ptr++; /* 64-bit pointers require two incs. */
  1593. curr_ial_ptr++;
  1594. size = ETH_HLEN;
  1595. if (*((u16 *) skb1->data) != 0xFFFF)
  1596. size += VLAN_ETH_HLEN - ETH_HLEN;
  1597. }
  1598. /* start of second buffer */
  1599. lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
  1600. lrg_buf_cb2 = &qdev->lrg_buf[qdev->lrg_buf_index];
  1601. skb2 = lrg_buf_cb2->skb;
  1602. qdev->lrg_buf_release_cnt++;
  1603. if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
  1604. qdev->lrg_buf_index = 0;
  1605. skb_put(skb2, length); /* Just the second buffer length here. */
  1606. pci_unmap_single(qdev->pdev,
  1607. pci_unmap_addr(lrg_buf_cb2, mapaddr),
  1608. pci_unmap_len(lrg_buf_cb2, maplen),
  1609. PCI_DMA_FROMDEVICE);
  1610. prefetch(skb2->data);
  1611. skb2->ip_summed = CHECKSUM_NONE;
  1612. if (qdev->device_id == QL3022_DEVICE_ID) {
  1613. /*
  1614. * Copy the ethhdr from first buffer to second. This
  1615. * is necessary for 3022 IP completions.
  1616. */
  1617. memcpy(skb_push(skb2, size), skb1->data + VLAN_ID_LEN, size);
  1618. } else {
  1619. u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum);
  1620. if (checksum &
  1621. (IB_IP_IOCB_RSP_3032_ICE |
  1622. IB_IP_IOCB_RSP_3032_CE |
  1623. IB_IP_IOCB_RSP_3032_NUC)) {
  1624. printk(KERN_ERR
  1625. "%s: Bad checksum for this %s packet, checksum = %x.\n",
  1626. __func__,
  1627. ((checksum &
  1628. IB_IP_IOCB_RSP_3032_TCP) ? "TCP" :
  1629. "UDP"),checksum);
  1630. } else if (checksum & IB_IP_IOCB_RSP_3032_TCP) {
  1631. skb2->ip_summed = CHECKSUM_UNNECESSARY;
  1632. }
  1633. }
  1634. skb2->dev = qdev->ndev;
  1635. skb2->protocol = eth_type_trans(skb2, qdev->ndev);
  1636. netif_receive_skb(skb2);
  1637. qdev->stats.rx_packets++;
  1638. qdev->stats.rx_bytes += length;
  1639. ndev->last_rx = jiffies;
  1640. lrg_buf_cb2->skb = NULL;
  1641. if (qdev->device_id == QL3022_DEVICE_ID)
  1642. ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
  1643. ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
  1644. }
  1645. static int ql_tx_rx_clean(struct ql3_adapter *qdev,
  1646. int *tx_cleaned, int *rx_cleaned, int work_to_do)
  1647. {
  1648. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  1649. struct net_rsp_iocb *net_rsp;
  1650. struct net_device *ndev = qdev->ndev;
  1651. unsigned long hw_flags;
  1652. /* While there are entries in the completion queue. */
  1653. while ((cpu_to_le32(*(qdev->prsp_producer_index)) !=
  1654. qdev->rsp_consumer_index) && (*rx_cleaned < work_to_do)) {
  1655. net_rsp = qdev->rsp_current;
  1656. switch (net_rsp->opcode) {
  1657. case OPCODE_OB_MAC_IOCB_FN0:
  1658. case OPCODE_OB_MAC_IOCB_FN2:
  1659. ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *)
  1660. net_rsp);
  1661. (*tx_cleaned)++;
  1662. break;
  1663. case OPCODE_IB_MAC_IOCB:
  1664. case OPCODE_IB_3032_MAC_IOCB:
  1665. ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *)
  1666. net_rsp);
  1667. (*rx_cleaned)++;
  1668. break;
  1669. case OPCODE_IB_IP_IOCB:
  1670. case OPCODE_IB_3032_IP_IOCB:
  1671. ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *)
  1672. net_rsp);
  1673. (*rx_cleaned)++;
  1674. break;
  1675. default:
  1676. {
  1677. u32 *tmp = (u32 *) net_rsp;
  1678. printk(KERN_ERR PFX
  1679. "%s: Hit default case, not "
  1680. "handled!\n"
  1681. " dropping the packet, opcode = "
  1682. "%x.\n",
  1683. ndev->name, net_rsp->opcode);
  1684. printk(KERN_ERR PFX
  1685. "0x%08lx 0x%08lx 0x%08lx 0x%08lx \n",
  1686. (unsigned long int)tmp[0],
  1687. (unsigned long int)tmp[1],
  1688. (unsigned long int)tmp[2],
  1689. (unsigned long int)tmp[3]);
  1690. }
  1691. }
  1692. qdev->rsp_consumer_index++;
  1693. if (qdev->rsp_consumer_index == NUM_RSP_Q_ENTRIES) {
  1694. qdev->rsp_consumer_index = 0;
  1695. qdev->rsp_current = qdev->rsp_q_virt_addr;
  1696. } else {
  1697. qdev->rsp_current++;
  1698. }
  1699. }
  1700. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  1701. ql_update_lrg_bufq_prod_index(qdev);
  1702. if (qdev->small_buf_release_cnt >= 16) {
  1703. while (qdev->small_buf_release_cnt >= 16) {
  1704. qdev->small_buf_q_producer_index++;
  1705. if (qdev->small_buf_q_producer_index ==
  1706. NUM_SBUFQ_ENTRIES)
  1707. qdev->small_buf_q_producer_index = 0;
  1708. qdev->small_buf_release_cnt -= 8;
  1709. }
  1710. ql_write_common_reg(qdev,
  1711. &port_regs->CommonRegs.
  1712. rxSmallQProducerIndex,
  1713. qdev->small_buf_q_producer_index);
  1714. }
  1715. ql_write_common_reg(qdev,
  1716. &port_regs->CommonRegs.rspQConsumerIndex,
  1717. qdev->rsp_consumer_index);
  1718. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  1719. if (unlikely(netif_queue_stopped(qdev->ndev))) {
  1720. if (netif_queue_stopped(qdev->ndev) &&
  1721. (atomic_read(&qdev->tx_count) > (NUM_REQ_Q_ENTRIES / 4)))
  1722. netif_wake_queue(qdev->ndev);
  1723. }
  1724. return *tx_cleaned + *rx_cleaned;
  1725. }
  1726. static int ql_poll(struct net_device *ndev, int *budget)
  1727. {
  1728. struct ql3_adapter *qdev = netdev_priv(ndev);
  1729. int work_to_do = min(*budget, ndev->quota);
  1730. int rx_cleaned = 0, tx_cleaned = 0;
  1731. if (!netif_carrier_ok(ndev))
  1732. goto quit_polling;
  1733. ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, work_to_do);
  1734. *budget -= rx_cleaned;
  1735. ndev->quota -= rx_cleaned;
  1736. if ((!tx_cleaned && !rx_cleaned) || !netif_running(ndev)) {
  1737. quit_polling:
  1738. netif_rx_complete(ndev);
  1739. ql_enable_interrupts(qdev);
  1740. return 0;
  1741. }
  1742. return 1;
  1743. }
  1744. static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
  1745. {
  1746. struct net_device *ndev = dev_id;
  1747. struct ql3_adapter *qdev = netdev_priv(ndev);
  1748. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  1749. u32 value;
  1750. int handled = 1;
  1751. u32 var;
  1752. port_regs = qdev->mem_map_registers;
  1753. value =
  1754. ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus);
  1755. if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) {
  1756. spin_lock(&qdev->adapter_lock);
  1757. netif_stop_queue(qdev->ndev);
  1758. netif_carrier_off(qdev->ndev);
  1759. ql_disable_interrupts(qdev);
  1760. qdev->port_link_state = LS_DOWN;
  1761. set_bit(QL_RESET_ACTIVE,&qdev->flags) ;
  1762. if (value & ISP_CONTROL_FE) {
  1763. /*
  1764. * Chip Fatal Error.
  1765. */
  1766. var =
  1767. ql_read_page0_reg_l(qdev,
  1768. &port_regs->PortFatalErrStatus);
  1769. printk(KERN_WARNING PFX
  1770. "%s: Resetting chip. PortFatalErrStatus "
  1771. "register = 0x%x\n", ndev->name, var);
  1772. set_bit(QL_RESET_START,&qdev->flags) ;
  1773. } else {
  1774. /*
  1775. * Soft Reset Requested.
  1776. */
  1777. set_bit(QL_RESET_PER_SCSI,&qdev->flags) ;
  1778. printk(KERN_ERR PFX
  1779. "%s: Another function issued a reset to the "
  1780. "chip. ISR value = %x.\n", ndev->name, value);
  1781. }
  1782. queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0);
  1783. spin_unlock(&qdev->adapter_lock);
  1784. } else if (value & ISP_IMR_DISABLE_CMPL_INT) {
  1785. ql_disable_interrupts(qdev);
  1786. if (likely(netif_rx_schedule_prep(ndev)))
  1787. __netif_rx_schedule(ndev);
  1788. else
  1789. ql_enable_interrupts(qdev);
  1790. } else {
  1791. return IRQ_NONE;
  1792. }
  1793. return IRQ_RETVAL(handled);
  1794. }
  1795. /*
  1796. * Get the total number of segments needed for the
  1797. * given number of fragments. This is necessary because
  1798. * outbound address lists (OAL) will be used when more than
  1799. * two frags are given. Each address list has 5 addr/len
  1800. * pairs. The 5th pair in each AOL is used to point to
  1801. * the next AOL if more frags are coming.
  1802. * That is why the frags:segment count ratio is not linear.
  1803. */
  1804. static int ql_get_seg_count(unsigned short frags)
  1805. {
  1806. switch(frags) {
  1807. case 0: return 1; /* just the skb->data seg */
  1808. case 1: return 2; /* skb->data + 1 frag */
  1809. case 2: return 3; /* skb->data + 2 frags */
  1810. case 3: return 5; /* skb->data + 1 frag + 1 AOL containting 2 frags */
  1811. case 4: return 6;
  1812. case 5: return 7;
  1813. case 6: return 8;
  1814. case 7: return 10;
  1815. case 8: return 11;
  1816. case 9: return 12;
  1817. case 10: return 13;
  1818. case 11: return 15;
  1819. case 12: return 16;
  1820. case 13: return 17;
  1821. case 14: return 18;
  1822. case 15: return 20;
  1823. case 16: return 21;
  1824. case 17: return 22;
  1825. case 18: return 23;
  1826. }
  1827. return -1;
  1828. }
  1829. static void ql_hw_csum_setup(struct sk_buff *skb,
  1830. struct ob_mac_iocb_req *mac_iocb_ptr)
  1831. {
  1832. struct ethhdr *eth;
  1833. struct iphdr *ip = NULL;
  1834. u8 offset = ETH_HLEN;
  1835. eth = (struct ethhdr *)(skb->data);
  1836. if (eth->h_proto == __constant_htons(ETH_P_IP)) {
  1837. ip = (struct iphdr *)&skb->data[ETH_HLEN];
  1838. } else if (eth->h_proto == htons(ETH_P_8021Q) &&
  1839. ((struct vlan_ethhdr *)skb->data)->
  1840. h_vlan_encapsulated_proto == __constant_htons(ETH_P_IP)) {
  1841. ip = (struct iphdr *)&skb->data[VLAN_ETH_HLEN];
  1842. offset = VLAN_ETH_HLEN;
  1843. }
  1844. if (ip) {
  1845. if (ip->protocol == IPPROTO_TCP) {
  1846. mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC;
  1847. mac_iocb_ptr->ip_hdr_off = offset;
  1848. mac_iocb_ptr->ip_hdr_len = ip->ihl;
  1849. } else if (ip->protocol == IPPROTO_UDP) {
  1850. mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC;
  1851. mac_iocb_ptr->ip_hdr_off = offset;
  1852. mac_iocb_ptr->ip_hdr_len = ip->ihl;
  1853. }
  1854. }
  1855. }
  1856. /*
  1857. * The difference between 3022 and 3032 sends:
  1858. * 3022 only supports a simple single segment transmission.
  1859. * 3032 supports checksumming and scatter/gather lists (fragments).
  1860. * The 3032 supports sglists by using the 3 addr/len pairs (ALP)
  1861. * in the IOCB plus a chain of outbound address lists (OAL) that
  1862. * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th)
  1863. * will used to point to an OAL when more ALP entries are required.
  1864. * The IOCB is always the top of the chain followed by one or more
  1865. * OALs (when necessary).
  1866. */
  1867. static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
  1868. {
  1869. struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
  1870. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  1871. struct ql_tx_buf_cb *tx_cb;
  1872. u32 tot_len = skb->len;
  1873. struct oal *oal;
  1874. struct oal_entry *oal_entry;
  1875. int len;
  1876. struct ob_mac_iocb_req *mac_iocb_ptr;
  1877. u64 map;
  1878. int seg_cnt, seg = 0;
  1879. int frag_cnt = (int)skb_shinfo(skb)->nr_frags;
  1880. if (unlikely(atomic_read(&qdev->tx_count) < 2)) {
  1881. if (!netif_queue_stopped(ndev))
  1882. netif_stop_queue(ndev);
  1883. return NETDEV_TX_BUSY;
  1884. }
  1885. tx_cb = &qdev->tx_buf[qdev->req_producer_index] ;
  1886. seg_cnt = tx_cb->seg_count = ql_get_seg_count((skb_shinfo(skb)->nr_frags));
  1887. if(seg_cnt == -1) {
  1888. printk(KERN_ERR PFX"%s: invalid segment count!\n",__func__);
  1889. return NETDEV_TX_OK;
  1890. }
  1891. mac_iocb_ptr = tx_cb->queue_entry;
  1892. mac_iocb_ptr->opcode = qdev->mac_ob_opcode;
  1893. mac_iocb_ptr->flags |= qdev->mb_bit_mask;
  1894. mac_iocb_ptr->transaction_id = qdev->req_producer_index;
  1895. mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len);
  1896. tx_cb->skb = skb;
  1897. if (skb->ip_summed == CHECKSUM_PARTIAL)
  1898. ql_hw_csum_setup(skb, mac_iocb_ptr);
  1899. len = skb_headlen(skb);
  1900. map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
  1901. oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
  1902. oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
  1903. oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
  1904. oal_entry->len = cpu_to_le32(len);
  1905. pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
  1906. pci_unmap_len_set(&tx_cb->map[seg], maplen, len);
  1907. seg++;
  1908. if (!skb_shinfo(skb)->nr_frags) {
  1909. /* Terminate the last segment. */
  1910. oal_entry->len =
  1911. cpu_to_le32(le32_to_cpu(oal_entry->len) | OAL_LAST_ENTRY);
  1912. } else {
  1913. int i;
  1914. oal = tx_cb->oal;
  1915. for (i=0; i<frag_cnt; i++,seg++) {
  1916. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  1917. oal_entry++;
  1918. if ((seg == 2 && seg_cnt > 3) || /* Check for continuation */
  1919. (seg == 7 && seg_cnt > 8) || /* requirements. It's strange */
  1920. (seg == 12 && seg_cnt > 13) || /* but necessary. */
  1921. (seg == 17 && seg_cnt > 18)) {
  1922. /* Continuation entry points to outbound address list. */
  1923. map = pci_map_single(qdev->pdev, oal,
  1924. sizeof(struct oal),
  1925. PCI_DMA_TODEVICE);
  1926. oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
  1927. oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
  1928. oal_entry->len =
  1929. cpu_to_le32(sizeof(struct oal) |
  1930. OAL_CONT_ENTRY);
  1931. pci_unmap_addr_set(&tx_cb->map[seg], mapaddr,
  1932. map);
  1933. pci_unmap_len_set(&tx_cb->map[seg], maplen,
  1934. len);
  1935. oal_entry = (struct oal_entry *)oal;
  1936. oal++;
  1937. seg++;
  1938. }
  1939. map =
  1940. pci_map_page(qdev->pdev, frag->page,
  1941. frag->page_offset, frag->size,
  1942. PCI_DMA_TODEVICE);
  1943. oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
  1944. oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
  1945. oal_entry->len = cpu_to_le32(frag->size);
  1946. pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
  1947. pci_unmap_len_set(&tx_cb->map[seg], maplen,
  1948. frag->size);
  1949. }
  1950. /* Terminate the last segment. */
  1951. oal_entry->len =
  1952. cpu_to_le32(le32_to_cpu(oal_entry->len) | OAL_LAST_ENTRY);
  1953. }
  1954. wmb();
  1955. qdev->req_producer_index++;
  1956. if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES)
  1957. qdev->req_producer_index = 0;
  1958. wmb();
  1959. ql_write_common_reg_l(qdev,
  1960. &port_regs->CommonRegs.reqQProducerIndex,
  1961. qdev->req_producer_index);
  1962. ndev->trans_start = jiffies;
  1963. if (netif_msg_tx_queued(qdev))
  1964. printk(KERN_DEBUG PFX "%s: tx queued, slot %d, len %d\n",
  1965. ndev->name, qdev->req_producer_index, skb->len);
  1966. atomic_dec(&qdev->tx_count);
  1967. return NETDEV_TX_OK;
  1968. }
  1969. static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
  1970. {
  1971. qdev->req_q_size =
  1972. (u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req));
  1973. qdev->req_q_virt_addr =
  1974. pci_alloc_consistent(qdev->pdev,
  1975. (size_t) qdev->req_q_size,
  1976. &qdev->req_q_phy_addr);
  1977. if ((qdev->req_q_virt_addr == NULL) ||
  1978. LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) {
  1979. printk(KERN_ERR PFX "%s: reqQ failed.\n",
  1980. qdev->ndev->name);
  1981. return -ENOMEM;
  1982. }
  1983. qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb);
  1984. qdev->rsp_q_virt_addr =
  1985. pci_alloc_consistent(qdev->pdev,
  1986. (size_t) qdev->rsp_q_size,
  1987. &qdev->rsp_q_phy_addr);
  1988. if ((qdev->rsp_q_virt_addr == NULL) ||
  1989. LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) {
  1990. printk(KERN_ERR PFX
  1991. "%s: rspQ allocation failed\n",
  1992. qdev->ndev->name);
  1993. pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size,
  1994. qdev->req_q_virt_addr,
  1995. qdev->req_q_phy_addr);
  1996. return -ENOMEM;
  1997. }
  1998. set_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags);
  1999. return 0;
  2000. }
  2001. static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev)
  2002. {
  2003. if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags)) {
  2004. printk(KERN_INFO PFX
  2005. "%s: Already done.\n", qdev->ndev->name);
  2006. return;
  2007. }
  2008. pci_free_consistent(qdev->pdev,
  2009. qdev->req_q_size,
  2010. qdev->req_q_virt_addr, qdev->req_q_phy_addr);
  2011. qdev->req_q_virt_addr = NULL;
  2012. pci_free_consistent(qdev->pdev,
  2013. qdev->rsp_q_size,
  2014. qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr);
  2015. qdev->rsp_q_virt_addr = NULL;
  2016. clear_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags);
  2017. }
  2018. static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
  2019. {
  2020. /* Create Large Buffer Queue */
  2021. qdev->lrg_buf_q_size =
  2022. NUM_LBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry);
  2023. if (qdev->lrg_buf_q_size < PAGE_SIZE)
  2024. qdev->lrg_buf_q_alloc_size = PAGE_SIZE;
  2025. else
  2026. qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2;
  2027. qdev->lrg_buf_q_alloc_virt_addr =
  2028. pci_alloc_consistent(qdev->pdev,
  2029. qdev->lrg_buf_q_alloc_size,
  2030. &qdev->lrg_buf_q_alloc_phy_addr);
  2031. if (qdev->lrg_buf_q_alloc_virt_addr == NULL) {
  2032. printk(KERN_ERR PFX
  2033. "%s: lBufQ failed\n", qdev->ndev->name);
  2034. return -ENOMEM;
  2035. }
  2036. qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr;
  2037. qdev->lrg_buf_q_phy_addr = qdev->lrg_buf_q_alloc_phy_addr;
  2038. /* Create Small Buffer Queue */
  2039. qdev->small_buf_q_size =
  2040. NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry);
  2041. if (qdev->small_buf_q_size < PAGE_SIZE)
  2042. qdev->small_buf_q_alloc_size = PAGE_SIZE;
  2043. else
  2044. qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2;
  2045. qdev->small_buf_q_alloc_virt_addr =
  2046. pci_alloc_consistent(qdev->pdev,
  2047. qdev->small_buf_q_alloc_size,
  2048. &qdev->small_buf_q_alloc_phy_addr);
  2049. if (qdev->small_buf_q_alloc_virt_addr == NULL) {
  2050. printk(KERN_ERR PFX
  2051. "%s: Small Buffer Queue allocation failed.\n",
  2052. qdev->ndev->name);
  2053. pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size,
  2054. qdev->lrg_buf_q_alloc_virt_addr,
  2055. qdev->lrg_buf_q_alloc_phy_addr);
  2056. return -ENOMEM;
  2057. }
  2058. qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr;
  2059. qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr;
  2060. set_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags);
  2061. return 0;
  2062. }
  2063. static void ql_free_buffer_queues(struct ql3_adapter *qdev)
  2064. {
  2065. if (!test_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags)) {
  2066. printk(KERN_INFO PFX
  2067. "%s: Already done.\n", qdev->ndev->name);
  2068. return;
  2069. }
  2070. pci_free_consistent(qdev->pdev,
  2071. qdev->lrg_buf_q_alloc_size,
  2072. qdev->lrg_buf_q_alloc_virt_addr,
  2073. qdev->lrg_buf_q_alloc_phy_addr);
  2074. qdev->lrg_buf_q_virt_addr = NULL;
  2075. pci_free_consistent(qdev->pdev,
  2076. qdev->small_buf_q_alloc_size,
  2077. qdev->small_buf_q_alloc_virt_addr,
  2078. qdev->small_buf_q_alloc_phy_addr);
  2079. qdev->small_buf_q_virt_addr = NULL;
  2080. clear_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags);
  2081. }
  2082. static int ql_alloc_small_buffers(struct ql3_adapter *qdev)
  2083. {
  2084. int i;
  2085. struct bufq_addr_element *small_buf_q_entry;
  2086. /* Currently we allocate on one of memory and use it for smallbuffers */
  2087. qdev->small_buf_total_size =
  2088. (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES *
  2089. QL_SMALL_BUFFER_SIZE);
  2090. qdev->small_buf_virt_addr =
  2091. pci_alloc_consistent(qdev->pdev,
  2092. qdev->small_buf_total_size,
  2093. &qdev->small_buf_phy_addr);
  2094. if (qdev->small_buf_virt_addr == NULL) {
  2095. printk(KERN_ERR PFX
  2096. "%s: Failed to get small buffer memory.\n",
  2097. qdev->ndev->name);
  2098. return -ENOMEM;
  2099. }
  2100. qdev->small_buf_phy_addr_low = LS_64BITS(qdev->small_buf_phy_addr);
  2101. qdev->small_buf_phy_addr_high = MS_64BITS(qdev->small_buf_phy_addr);
  2102. small_buf_q_entry = qdev->small_buf_q_virt_addr;
  2103. qdev->last_rsp_offset = qdev->small_buf_phy_addr_low;
  2104. /* Initialize the small buffer queue. */
  2105. for (i = 0; i < (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES); i++) {
  2106. small_buf_q_entry->addr_high =
  2107. cpu_to_le32(qdev->small_buf_phy_addr_high);
  2108. small_buf_q_entry->addr_low =
  2109. cpu_to_le32(qdev->small_buf_phy_addr_low +
  2110. (i * QL_SMALL_BUFFER_SIZE));
  2111. small_buf_q_entry++;
  2112. }
  2113. qdev->small_buf_index = 0;
  2114. set_bit(QL_ALLOC_SMALL_BUF_DONE,&qdev->flags);
  2115. return 0;
  2116. }
  2117. static void ql_free_small_buffers(struct ql3_adapter *qdev)
  2118. {
  2119. if (!test_bit(QL_ALLOC_SMALL_BUF_DONE,&qdev->flags)) {
  2120. printk(KERN_INFO PFX
  2121. "%s: Already done.\n", qdev->ndev->name);
  2122. return;
  2123. }
  2124. if (qdev->small_buf_virt_addr != NULL) {
  2125. pci_free_consistent(qdev->pdev,
  2126. qdev->small_buf_total_size,
  2127. qdev->small_buf_virt_addr,
  2128. qdev->small_buf_phy_addr);
  2129. qdev->small_buf_virt_addr = NULL;
  2130. }
  2131. }
  2132. static void ql_free_large_buffers(struct ql3_adapter *qdev)
  2133. {
  2134. int i = 0;
  2135. struct ql_rcv_buf_cb *lrg_buf_cb;
  2136. for (i = 0; i < NUM_LARGE_BUFFERS; i++) {
  2137. lrg_buf_cb = &qdev->lrg_buf[i];
  2138. if (lrg_buf_cb->skb) {
  2139. dev_kfree_skb(lrg_buf_cb->skb);
  2140. pci_unmap_single(qdev->pdev,
  2141. pci_unmap_addr(lrg_buf_cb, mapaddr),
  2142. pci_unmap_len(lrg_buf_cb, maplen),
  2143. PCI_DMA_FROMDEVICE);
  2144. memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
  2145. } else {
  2146. break;
  2147. }
  2148. }
  2149. }
  2150. static void ql_init_large_buffers(struct ql3_adapter *qdev)
  2151. {
  2152. int i;
  2153. struct ql_rcv_buf_cb *lrg_buf_cb;
  2154. struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr;
  2155. for (i = 0; i < NUM_LARGE_BUFFERS; i++) {
  2156. lrg_buf_cb = &qdev->lrg_buf[i];
  2157. buf_addr_ele->addr_high = lrg_buf_cb->buf_phy_addr_high;
  2158. buf_addr_ele->addr_low = lrg_buf_cb->buf_phy_addr_low;
  2159. buf_addr_ele++;
  2160. }
  2161. qdev->lrg_buf_index = 0;
  2162. qdev->lrg_buf_skb_check = 0;
  2163. }
  2164. static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
  2165. {
  2166. int i;
  2167. struct ql_rcv_buf_cb *lrg_buf_cb;
  2168. struct sk_buff *skb;
  2169. u64 map;
  2170. for (i = 0; i < NUM_LARGE_BUFFERS; i++) {
  2171. skb = dev_alloc_skb(qdev->lrg_buffer_len);
  2172. if (unlikely(!skb)) {
  2173. /* Better luck next round */
  2174. printk(KERN_ERR PFX
  2175. "%s: large buff alloc failed, "
  2176. "for %d bytes at index %d.\n",
  2177. qdev->ndev->name,
  2178. qdev->lrg_buffer_len * 2, i);
  2179. ql_free_large_buffers(qdev);
  2180. return -ENOMEM;
  2181. } else {
  2182. lrg_buf_cb = &qdev->lrg_buf[i];
  2183. memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
  2184. lrg_buf_cb->index = i;
  2185. lrg_buf_cb->skb = skb;
  2186. /*
  2187. * We save some space to copy the ethhdr from first
  2188. * buffer
  2189. */
  2190. skb_reserve(skb, QL_HEADER_SPACE);
  2191. map = pci_map_single(qdev->pdev,
  2192. skb->data,
  2193. qdev->lrg_buffer_len -
  2194. QL_HEADER_SPACE,
  2195. PCI_DMA_FROMDEVICE);
  2196. pci_unmap_addr_set(lrg_buf_cb, mapaddr, map);
  2197. pci_unmap_len_set(lrg_buf_cb, maplen,
  2198. qdev->lrg_buffer_len -
  2199. QL_HEADER_SPACE);
  2200. lrg_buf_cb->buf_phy_addr_low =
  2201. cpu_to_le32(LS_64BITS(map));
  2202. lrg_buf_cb->buf_phy_addr_high =
  2203. cpu_to_le32(MS_64BITS(map));
  2204. }
  2205. }
  2206. return 0;
  2207. }
  2208. static void ql_free_send_free_list(struct ql3_adapter *qdev)
  2209. {
  2210. struct ql_tx_buf_cb *tx_cb;
  2211. int i;
  2212. tx_cb = &qdev->tx_buf[0];
  2213. for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
  2214. if (tx_cb->oal) {
  2215. kfree(tx_cb->oal);
  2216. tx_cb->oal = NULL;
  2217. }
  2218. tx_cb++;
  2219. }
  2220. }
  2221. static int ql_create_send_free_list(struct ql3_adapter *qdev)
  2222. {
  2223. struct ql_tx_buf_cb *tx_cb;
  2224. int i;
  2225. struct ob_mac_iocb_req *req_q_curr =
  2226. qdev->req_q_virt_addr;
  2227. /* Create free list of transmit buffers */
  2228. for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
  2229. tx_cb = &qdev->tx_buf[i];
  2230. tx_cb->skb = NULL;
  2231. tx_cb->queue_entry = req_q_curr;
  2232. req_q_curr++;
  2233. tx_cb->oal = kmalloc(512, GFP_KERNEL);
  2234. if (tx_cb->oal == NULL)
  2235. return -1;
  2236. }
  2237. return 0;
  2238. }
  2239. static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
  2240. {
  2241. if (qdev->ndev->mtu == NORMAL_MTU_SIZE)
  2242. qdev->lrg_buffer_len = NORMAL_MTU_SIZE;
  2243. else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) {
  2244. qdev->lrg_buffer_len = JUMBO_MTU_SIZE;
  2245. } else {
  2246. printk(KERN_ERR PFX
  2247. "%s: Invalid mtu size. Only 1500 and 9000 are accepted.\n",
  2248. qdev->ndev->name);
  2249. return -ENOMEM;
  2250. }
  2251. qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE;
  2252. qdev->max_frame_size =
  2253. (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE;
  2254. /*
  2255. * First allocate a page of shared memory and use it for shadow
  2256. * locations of Network Request Queue Consumer Address Register and
  2257. * Network Completion Queue Producer Index Register
  2258. */
  2259. qdev->shadow_reg_virt_addr =
  2260. pci_alloc_consistent(qdev->pdev,
  2261. PAGE_SIZE, &qdev->shadow_reg_phy_addr);
  2262. if (qdev->shadow_reg_virt_addr != NULL) {
  2263. qdev->preq_consumer_index = (u16 *) qdev->shadow_reg_virt_addr;
  2264. qdev->req_consumer_index_phy_addr_high =
  2265. MS_64BITS(qdev->shadow_reg_phy_addr);
  2266. qdev->req_consumer_index_phy_addr_low =
  2267. LS_64BITS(qdev->shadow_reg_phy_addr);
  2268. qdev->prsp_producer_index =
  2269. (u32 *) (((u8 *) qdev->preq_consumer_index) + 8);
  2270. qdev->rsp_producer_index_phy_addr_high =
  2271. qdev->req_consumer_index_phy_addr_high;
  2272. qdev->rsp_producer_index_phy_addr_low =
  2273. qdev->req_consumer_index_phy_addr_low + 8;
  2274. } else {
  2275. printk(KERN_ERR PFX
  2276. "%s: shadowReg Alloc failed.\n", qdev->ndev->name);
  2277. return -ENOMEM;
  2278. }
  2279. if (ql_alloc_net_req_rsp_queues(qdev) != 0) {
  2280. printk(KERN_ERR PFX
  2281. "%s: ql_alloc_net_req_rsp_queues failed.\n",
  2282. qdev->ndev->name);
  2283. goto err_req_rsp;
  2284. }
  2285. if (ql_alloc_buffer_queues(qdev) != 0) {
  2286. printk(KERN_ERR PFX
  2287. "%s: ql_alloc_buffer_queues failed.\n",
  2288. qdev->ndev->name);
  2289. goto err_buffer_queues;
  2290. }
  2291. if (ql_alloc_small_buffers(qdev) != 0) {
  2292. printk(KERN_ERR PFX
  2293. "%s: ql_alloc_small_buffers failed\n", qdev->ndev->name);
  2294. goto err_small_buffers;
  2295. }
  2296. if (ql_alloc_large_buffers(qdev) != 0) {
  2297. printk(KERN_ERR PFX
  2298. "%s: ql_alloc_large_buffers failed\n", qdev->ndev->name);
  2299. goto err_small_buffers;
  2300. }
  2301. /* Initialize the large buffer queue. */
  2302. ql_init_large_buffers(qdev);
  2303. if (ql_create_send_free_list(qdev))
  2304. goto err_free_list;
  2305. qdev->rsp_current = qdev->rsp_q_virt_addr;
  2306. return 0;
  2307. err_free_list:
  2308. ql_free_send_free_list(qdev);
  2309. err_small_buffers:
  2310. ql_free_buffer_queues(qdev);
  2311. err_buffer_queues:
  2312. ql_free_net_req_rsp_queues(qdev);
  2313. err_req_rsp:
  2314. pci_free_consistent(qdev->pdev,
  2315. PAGE_SIZE,
  2316. qdev->shadow_reg_virt_addr,
  2317. qdev->shadow_reg_phy_addr);
  2318. return -ENOMEM;
  2319. }
  2320. static void ql_free_mem_resources(struct ql3_adapter *qdev)
  2321. {
  2322. ql_free_send_free_list(qdev);
  2323. ql_free_large_buffers(qdev);
  2324. ql_free_small_buffers(qdev);
  2325. ql_free_buffer_queues(qdev);
  2326. ql_free_net_req_rsp_queues(qdev);
  2327. if (qdev->shadow_reg_virt_addr != NULL) {
  2328. pci_free_consistent(qdev->pdev,
  2329. PAGE_SIZE,
  2330. qdev->shadow_reg_virt_addr,
  2331. qdev->shadow_reg_phy_addr);
  2332. qdev->shadow_reg_virt_addr = NULL;
  2333. }
  2334. }
  2335. static int ql_init_misc_registers(struct ql3_adapter *qdev)
  2336. {
  2337. struct ql3xxx_local_ram_registers __iomem *local_ram =
  2338. (void __iomem *)qdev->mem_map_registers;
  2339. if(ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK,
  2340. (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
  2341. 2) << 4))
  2342. return -1;
  2343. ql_write_page2_reg(qdev,
  2344. &local_ram->bufletSize, qdev->nvram_data.bufletSize);
  2345. ql_write_page2_reg(qdev,
  2346. &local_ram->maxBufletCount,
  2347. qdev->nvram_data.bufletCount);
  2348. ql_write_page2_reg(qdev,
  2349. &local_ram->freeBufletThresholdLow,
  2350. (qdev->nvram_data.tcpWindowThreshold25 << 16) |
  2351. (qdev->nvram_data.tcpWindowThreshold0));
  2352. ql_write_page2_reg(qdev,
  2353. &local_ram->freeBufletThresholdHigh,
  2354. qdev->nvram_data.tcpWindowThreshold50);
  2355. ql_write_page2_reg(qdev,
  2356. &local_ram->ipHashTableBase,
  2357. (qdev->nvram_data.ipHashTableBaseHi << 16) |
  2358. qdev->nvram_data.ipHashTableBaseLo);
  2359. ql_write_page2_reg(qdev,
  2360. &local_ram->ipHashTableCount,
  2361. qdev->nvram_data.ipHashTableSize);
  2362. ql_write_page2_reg(qdev,
  2363. &local_ram->tcpHashTableBase,
  2364. (qdev->nvram_data.tcpHashTableBaseHi << 16) |
  2365. qdev->nvram_data.tcpHashTableBaseLo);
  2366. ql_write_page2_reg(qdev,
  2367. &local_ram->tcpHashTableCount,
  2368. qdev->nvram_data.tcpHashTableSize);
  2369. ql_write_page2_reg(qdev,
  2370. &local_ram->ncbBase,
  2371. (qdev->nvram_data.ncbTableBaseHi << 16) |
  2372. qdev->nvram_data.ncbTableBaseLo);
  2373. ql_write_page2_reg(qdev,
  2374. &local_ram->maxNcbCount,
  2375. qdev->nvram_data.ncbTableSize);
  2376. ql_write_page2_reg(qdev,
  2377. &local_ram->drbBase,
  2378. (qdev->nvram_data.drbTableBaseHi << 16) |
  2379. qdev->nvram_data.drbTableBaseLo);
  2380. ql_write_page2_reg(qdev,
  2381. &local_ram->maxDrbCount,
  2382. qdev->nvram_data.drbTableSize);
  2383. ql_sem_unlock(qdev, QL_DDR_RAM_SEM_MASK);
  2384. return 0;
  2385. }
  2386. static int ql_adapter_initialize(struct ql3_adapter *qdev)
  2387. {
  2388. u32 value;
  2389. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  2390. struct ql3xxx_host_memory_registers __iomem *hmem_regs =
  2391. (void __iomem *)port_regs;
  2392. u32 delay = 10;
  2393. int status = 0;
  2394. if(ql_mii_setup(qdev))
  2395. return -1;
  2396. /* Bring out PHY out of reset */
  2397. ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
  2398. (ISP_SERIAL_PORT_IF_WE |
  2399. (ISP_SERIAL_PORT_IF_WE << 16)));
  2400. qdev->port_link_state = LS_DOWN;
  2401. netif_carrier_off(qdev->ndev);
  2402. /* V2 chip fix for ARS-39168. */
  2403. ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
  2404. (ISP_SERIAL_PORT_IF_SDE |
  2405. (ISP_SERIAL_PORT_IF_SDE << 16)));
  2406. /* Request Queue Registers */
  2407. *((u32 *) (qdev->preq_consumer_index)) = 0;
  2408. atomic_set(&qdev->tx_count,NUM_REQ_Q_ENTRIES);
  2409. qdev->req_producer_index = 0;
  2410. ql_write_page1_reg(qdev,
  2411. &hmem_regs->reqConsumerIndexAddrHigh,
  2412. qdev->req_consumer_index_phy_addr_high);
  2413. ql_write_page1_reg(qdev,
  2414. &hmem_regs->reqConsumerIndexAddrLow,
  2415. qdev->req_consumer_index_phy_addr_low);
  2416. ql_write_page1_reg(qdev,
  2417. &hmem_regs->reqBaseAddrHigh,
  2418. MS_64BITS(qdev->req_q_phy_addr));
  2419. ql_write_page1_reg(qdev,
  2420. &hmem_regs->reqBaseAddrLow,
  2421. LS_64BITS(qdev->req_q_phy_addr));
  2422. ql_write_page1_reg(qdev, &hmem_regs->reqLength, NUM_REQ_Q_ENTRIES);
  2423. /* Response Queue Registers */
  2424. *((u16 *) (qdev->prsp_producer_index)) = 0;
  2425. qdev->rsp_consumer_index = 0;
  2426. qdev->rsp_current = qdev->rsp_q_virt_addr;
  2427. ql_write_page1_reg(qdev,
  2428. &hmem_regs->rspProducerIndexAddrHigh,
  2429. qdev->rsp_producer_index_phy_addr_high);
  2430. ql_write_page1_reg(qdev,
  2431. &hmem_regs->rspProducerIndexAddrLow,
  2432. qdev->rsp_producer_index_phy_addr_low);
  2433. ql_write_page1_reg(qdev,
  2434. &hmem_regs->rspBaseAddrHigh,
  2435. MS_64BITS(qdev->rsp_q_phy_addr));
  2436. ql_write_page1_reg(qdev,
  2437. &hmem_regs->rspBaseAddrLow,
  2438. LS_64BITS(qdev->rsp_q_phy_addr));
  2439. ql_write_page1_reg(qdev, &hmem_regs->rspLength, NUM_RSP_Q_ENTRIES);
  2440. /* Large Buffer Queue */
  2441. ql_write_page1_reg(qdev,
  2442. &hmem_regs->rxLargeQBaseAddrHigh,
  2443. MS_64BITS(qdev->lrg_buf_q_phy_addr));
  2444. ql_write_page1_reg(qdev,
  2445. &hmem_regs->rxLargeQBaseAddrLow,
  2446. LS_64BITS(qdev->lrg_buf_q_phy_addr));
  2447. ql_write_page1_reg(qdev, &hmem_regs->rxLargeQLength, NUM_LBUFQ_ENTRIES);
  2448. ql_write_page1_reg(qdev,
  2449. &hmem_regs->rxLargeBufferLength,
  2450. qdev->lrg_buffer_len);
  2451. /* Small Buffer Queue */
  2452. ql_write_page1_reg(qdev,
  2453. &hmem_regs->rxSmallQBaseAddrHigh,
  2454. MS_64BITS(qdev->small_buf_q_phy_addr));
  2455. ql_write_page1_reg(qdev,
  2456. &hmem_regs->rxSmallQBaseAddrLow,
  2457. LS_64BITS(qdev->small_buf_q_phy_addr));
  2458. ql_write_page1_reg(qdev, &hmem_regs->rxSmallQLength, NUM_SBUFQ_ENTRIES);
  2459. ql_write_page1_reg(qdev,
  2460. &hmem_regs->rxSmallBufferLength,
  2461. QL_SMALL_BUFFER_SIZE);
  2462. qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1;
  2463. qdev->small_buf_release_cnt = 8;
  2464. qdev->lrg_buf_q_producer_index = NUM_LBUFQ_ENTRIES - 1;
  2465. qdev->lrg_buf_release_cnt = 8;
  2466. qdev->lrg_buf_next_free =
  2467. (struct bufq_addr_element *)qdev->lrg_buf_q_virt_addr;
  2468. qdev->small_buf_index = 0;
  2469. qdev->lrg_buf_index = 0;
  2470. qdev->lrg_buf_free_count = 0;
  2471. qdev->lrg_buf_free_head = NULL;
  2472. qdev->lrg_buf_free_tail = NULL;
  2473. ql_write_common_reg(qdev,
  2474. &port_regs->CommonRegs.
  2475. rxSmallQProducerIndex,
  2476. qdev->small_buf_q_producer_index);
  2477. ql_write_common_reg(qdev,
  2478. &port_regs->CommonRegs.
  2479. rxLargeQProducerIndex,
  2480. qdev->lrg_buf_q_producer_index);
  2481. /*
  2482. * Find out if the chip has already been initialized. If it has, then
  2483. * we skip some of the initialization.
  2484. */
  2485. clear_bit(QL_LINK_MASTER, &qdev->flags);
  2486. value = ql_read_page0_reg(qdev, &port_regs->portStatus);
  2487. if ((value & PORT_STATUS_IC) == 0) {
  2488. /* Chip has not been configured yet, so let it rip. */
  2489. if(ql_init_misc_registers(qdev)) {
  2490. status = -1;
  2491. goto out;
  2492. }
  2493. if (qdev->mac_index)
  2494. ql_write_page0_reg(qdev,
  2495. &port_regs->mac1MaxFrameLengthReg,
  2496. qdev->max_frame_size);
  2497. else
  2498. ql_write_page0_reg(qdev,
  2499. &port_regs->mac0MaxFrameLengthReg,
  2500. qdev->max_frame_size);
  2501. value = qdev->nvram_data.tcpMaxWindowSize;
  2502. ql_write_page0_reg(qdev, &port_regs->tcpMaxWindow, value);
  2503. value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig;
  2504. if(ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK,
  2505. (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
  2506. * 2) << 13)) {
  2507. status = -1;
  2508. goto out;
  2509. }
  2510. ql_write_page0_reg(qdev, &port_regs->ExternalHWConfig, value);
  2511. ql_write_page0_reg(qdev, &port_regs->InternalChipConfig,
  2512. (((INTERNAL_CHIP_SD | INTERNAL_CHIP_WE) <<
  2513. 16) | (INTERNAL_CHIP_SD |
  2514. INTERNAL_CHIP_WE)));
  2515. ql_sem_unlock(qdev, QL_FLASH_SEM_MASK);
  2516. }
  2517. if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
  2518. (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
  2519. 2) << 7)) {
  2520. status = -1;
  2521. goto out;
  2522. }
  2523. ql_init_scan_mode(qdev);
  2524. ql_get_phy_owner(qdev);
  2525. /* Load the MAC Configuration */
  2526. /* Program lower 32 bits of the MAC address */
  2527. ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
  2528. (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16));
  2529. ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
  2530. ((qdev->ndev->dev_addr[2] << 24)
  2531. | (qdev->ndev->dev_addr[3] << 16)
  2532. | (qdev->ndev->dev_addr[4] << 8)
  2533. | qdev->ndev->dev_addr[5]));
  2534. /* Program top 16 bits of the MAC address */
  2535. ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
  2536. ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1));
  2537. ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
  2538. ((qdev->ndev->dev_addr[0] << 8)
  2539. | qdev->ndev->dev_addr[1]));
  2540. /* Enable Primary MAC */
  2541. ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
  2542. ((MAC_ADDR_INDIRECT_PTR_REG_PE << 16) |
  2543. MAC_ADDR_INDIRECT_PTR_REG_PE));
  2544. /* Clear Primary and Secondary IP addresses */
  2545. ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
  2546. ((IP_ADDR_INDEX_REG_MASK << 16) |
  2547. (qdev->mac_index << 2)));
  2548. ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
  2549. ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
  2550. ((IP_ADDR_INDEX_REG_MASK << 16) |
  2551. ((qdev->mac_index << 2) + 1)));
  2552. ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
  2553. ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
  2554. /* Indicate Configuration Complete */
  2555. ql_write_page0_reg(qdev,
  2556. &port_regs->portControl,
  2557. ((PORT_CONTROL_CC << 16) | PORT_CONTROL_CC));
  2558. do {
  2559. value = ql_read_page0_reg(qdev, &port_regs->portStatus);
  2560. if (value & PORT_STATUS_IC)
  2561. break;
  2562. msleep(500);
  2563. } while (--delay);
  2564. if (delay == 0) {
  2565. printk(KERN_ERR PFX
  2566. "%s: Hw Initialization timeout.\n", qdev->ndev->name);
  2567. status = -1;
  2568. goto out;
  2569. }
  2570. /* Enable Ethernet Function */
  2571. if (qdev->device_id == QL3032_DEVICE_ID) {
  2572. value =
  2573. (QL3032_PORT_CONTROL_EF | QL3032_PORT_CONTROL_KIE |
  2574. QL3032_PORT_CONTROL_EIv6 | QL3032_PORT_CONTROL_EIv4);
  2575. ql_write_page0_reg(qdev, &port_regs->functionControl,
  2576. ((value << 16) | value));
  2577. } else {
  2578. value =
  2579. (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI |
  2580. PORT_CONTROL_HH);
  2581. ql_write_page0_reg(qdev, &port_regs->portControl,
  2582. ((value << 16) | value));
  2583. }
  2584. out:
  2585. return status;
  2586. }
  2587. /*
  2588. * Caller holds hw_lock.
  2589. */
  2590. static int ql_adapter_reset(struct ql3_adapter *qdev)
  2591. {
  2592. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  2593. int status = 0;
  2594. u16 value;
  2595. int max_wait_time;
  2596. set_bit(QL_RESET_ACTIVE, &qdev->flags);
  2597. clear_bit(QL_RESET_DONE, &qdev->flags);
  2598. /*
  2599. * Issue soft reset to chip.
  2600. */
  2601. printk(KERN_DEBUG PFX
  2602. "%s: Issue soft reset to chip.\n",
  2603. qdev->ndev->name);
  2604. ql_write_common_reg(qdev,
  2605. &port_regs->CommonRegs.ispControlStatus,
  2606. ((ISP_CONTROL_SR << 16) | ISP_CONTROL_SR));
  2607. /* Wait 3 seconds for reset to complete. */
  2608. printk(KERN_DEBUG PFX
  2609. "%s: Wait 10 milliseconds for reset to complete.\n",
  2610. qdev->ndev->name);
  2611. /* Wait until the firmware tells us the Soft Reset is done */
  2612. max_wait_time = 5;
  2613. do {
  2614. value =
  2615. ql_read_common_reg(qdev,
  2616. &port_regs->CommonRegs.ispControlStatus);
  2617. if ((value & ISP_CONTROL_SR) == 0)
  2618. break;
  2619. ssleep(1);
  2620. } while ((--max_wait_time));
  2621. /*
  2622. * Also, make sure that the Network Reset Interrupt bit has been
  2623. * cleared after the soft reset has taken place.
  2624. */
  2625. value =
  2626. ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
  2627. if (value & ISP_CONTROL_RI) {
  2628. printk(KERN_DEBUG PFX
  2629. "ql_adapter_reset: clearing RI after reset.\n");
  2630. ql_write_common_reg(qdev,
  2631. &port_regs->CommonRegs.
  2632. ispControlStatus,
  2633. ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI));
  2634. }
  2635. if (max_wait_time == 0) {
  2636. /* Issue Force Soft Reset */
  2637. ql_write_common_reg(qdev,
  2638. &port_regs->CommonRegs.
  2639. ispControlStatus,
  2640. ((ISP_CONTROL_FSR << 16) |
  2641. ISP_CONTROL_FSR));
  2642. /*
  2643. * Wait until the firmware tells us the Force Soft Reset is
  2644. * done
  2645. */
  2646. max_wait_time = 5;
  2647. do {
  2648. value =
  2649. ql_read_common_reg(qdev,
  2650. &port_regs->CommonRegs.
  2651. ispControlStatus);
  2652. if ((value & ISP_CONTROL_FSR) == 0) {
  2653. break;
  2654. }
  2655. ssleep(1);
  2656. } while ((--max_wait_time));
  2657. }
  2658. if (max_wait_time == 0)
  2659. status = 1;
  2660. clear_bit(QL_RESET_ACTIVE, &qdev->flags);
  2661. set_bit(QL_RESET_DONE, &qdev->flags);
  2662. return status;
  2663. }
  2664. static void ql_set_mac_info(struct ql3_adapter *qdev)
  2665. {
  2666. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  2667. u32 value, port_status;
  2668. u8 func_number;
  2669. /* Get the function number */
  2670. value =
  2671. ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus);
  2672. func_number = (u8) ((value >> 4) & OPCODE_FUNC_ID_MASK);
  2673. port_status = ql_read_page0_reg(qdev, &port_regs->portStatus);
  2674. switch (value & ISP_CONTROL_FN_MASK) {
  2675. case ISP_CONTROL_FN0_NET:
  2676. qdev->mac_index = 0;
  2677. qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
  2678. qdev->tcp_ob_opcode = OUTBOUND_TCP_IOCB | func_number;
  2679. qdev->update_ob_opcode = UPDATE_NCB_IOCB | func_number;
  2680. qdev->mb_bit_mask = FN0_MA_BITS_MASK;
  2681. qdev->PHYAddr = PORT0_PHY_ADDRESS;
  2682. if (port_status & PORT_STATUS_SM0)
  2683. set_bit(QL_LINK_OPTICAL,&qdev->flags);
  2684. else
  2685. clear_bit(QL_LINK_OPTICAL,&qdev->flags);
  2686. break;
  2687. case ISP_CONTROL_FN1_NET:
  2688. qdev->mac_index = 1;
  2689. qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
  2690. qdev->tcp_ob_opcode = OUTBOUND_TCP_IOCB | func_number;
  2691. qdev->update_ob_opcode = UPDATE_NCB_IOCB | func_number;
  2692. qdev->mb_bit_mask = FN1_MA_BITS_MASK;
  2693. qdev->PHYAddr = PORT1_PHY_ADDRESS;
  2694. if (port_status & PORT_STATUS_SM1)
  2695. set_bit(QL_LINK_OPTICAL,&qdev->flags);
  2696. else
  2697. clear_bit(QL_LINK_OPTICAL,&qdev->flags);
  2698. break;
  2699. case ISP_CONTROL_FN0_SCSI:
  2700. case ISP_CONTROL_FN1_SCSI:
  2701. default:
  2702. printk(KERN_DEBUG PFX
  2703. "%s: Invalid function number, ispControlStatus = 0x%x\n",
  2704. qdev->ndev->name,value);
  2705. break;
  2706. }
  2707. qdev->numPorts = qdev->nvram_data.numPorts;
  2708. }
  2709. static void ql_display_dev_info(struct net_device *ndev)
  2710. {
  2711. struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
  2712. struct pci_dev *pdev = qdev->pdev;
  2713. printk(KERN_INFO PFX
  2714. "\n%s Adapter %d RevisionID %d found %s on PCI slot %d.\n",
  2715. DRV_NAME, qdev->index, qdev->chip_rev_id,
  2716. (qdev->device_id == QL3032_DEVICE_ID) ? "QLA3032" : "QLA3022",
  2717. qdev->pci_slot);
  2718. printk(KERN_INFO PFX
  2719. "%s Interface.\n",
  2720. test_bit(QL_LINK_OPTICAL,&qdev->flags) ? "OPTICAL" : "COPPER");
  2721. /*
  2722. * Print PCI bus width/type.
  2723. */
  2724. printk(KERN_INFO PFX
  2725. "Bus interface is %s %s.\n",
  2726. ((qdev->pci_width == 64) ? "64-bit" : "32-bit"),
  2727. ((qdev->pci_x) ? "PCI-X" : "PCI"));
  2728. printk(KERN_INFO PFX
  2729. "mem IO base address adjusted = 0x%p\n",
  2730. qdev->mem_map_registers);
  2731. printk(KERN_INFO PFX "Interrupt number = %d\n", pdev->irq);
  2732. if (netif_msg_probe(qdev))
  2733. printk(KERN_INFO PFX
  2734. "%s: MAC address %02x:%02x:%02x:%02x:%02x:%02x\n",
  2735. ndev->name, ndev->dev_addr[0], ndev->dev_addr[1],
  2736. ndev->dev_addr[2], ndev->dev_addr[3], ndev->dev_addr[4],
  2737. ndev->dev_addr[5]);
  2738. }
  2739. static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset)
  2740. {
  2741. struct net_device *ndev = qdev->ndev;
  2742. int retval = 0;
  2743. netif_stop_queue(ndev);
  2744. netif_carrier_off(ndev);
  2745. clear_bit(QL_ADAPTER_UP,&qdev->flags);
  2746. clear_bit(QL_LINK_MASTER,&qdev->flags);
  2747. ql_disable_interrupts(qdev);
  2748. free_irq(qdev->pdev->irq, ndev);
  2749. if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) {
  2750. printk(KERN_INFO PFX
  2751. "%s: calling pci_disable_msi().\n", qdev->ndev->name);
  2752. clear_bit(QL_MSI_ENABLED,&qdev->flags);
  2753. pci_disable_msi(qdev->pdev);
  2754. }
  2755. del_timer_sync(&qdev->adapter_timer);
  2756. netif_poll_disable(ndev);
  2757. if (do_reset) {
  2758. int soft_reset;
  2759. unsigned long hw_flags;
  2760. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  2761. if (ql_wait_for_drvr_lock(qdev)) {
  2762. if ((soft_reset = ql_adapter_reset(qdev))) {
  2763. printk(KERN_ERR PFX
  2764. "%s: ql_adapter_reset(%d) FAILED!\n",
  2765. ndev->name, qdev->index);
  2766. }
  2767. printk(KERN_ERR PFX
  2768. "%s: Releaseing driver lock via chip reset.\n",ndev->name);
  2769. } else {
  2770. printk(KERN_ERR PFX
  2771. "%s: Could not acquire driver lock to do "
  2772. "reset!\n", ndev->name);
  2773. retval = -1;
  2774. }
  2775. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  2776. }
  2777. ql_free_mem_resources(qdev);
  2778. return retval;
  2779. }
  2780. static int ql_adapter_up(struct ql3_adapter *qdev)
  2781. {
  2782. struct net_device *ndev = qdev->ndev;
  2783. int err;
  2784. unsigned long irq_flags = IRQF_SAMPLE_RANDOM | IRQF_SHARED;
  2785. unsigned long hw_flags;
  2786. if (ql_alloc_mem_resources(qdev)) {
  2787. printk(KERN_ERR PFX
  2788. "%s Unable to allocate buffers.\n", ndev->name);
  2789. return -ENOMEM;
  2790. }
  2791. if (qdev->msi) {
  2792. if (pci_enable_msi(qdev->pdev)) {
  2793. printk(KERN_ERR PFX
  2794. "%s: User requested MSI, but MSI failed to "
  2795. "initialize. Continuing without MSI.\n",
  2796. qdev->ndev->name);
  2797. qdev->msi = 0;
  2798. } else {
  2799. printk(KERN_INFO PFX "%s: MSI Enabled...\n", qdev->ndev->name);
  2800. set_bit(QL_MSI_ENABLED,&qdev->flags);
  2801. irq_flags &= ~IRQF_SHARED;
  2802. }
  2803. }
  2804. if ((err = request_irq(qdev->pdev->irq,
  2805. ql3xxx_isr,
  2806. irq_flags, ndev->name, ndev))) {
  2807. printk(KERN_ERR PFX
  2808. "%s: Failed to reserve interrupt %d already in use.\n",
  2809. ndev->name, qdev->pdev->irq);
  2810. goto err_irq;
  2811. }
  2812. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  2813. if ((err = ql_wait_for_drvr_lock(qdev))) {
  2814. if ((err = ql_adapter_initialize(qdev))) {
  2815. printk(KERN_ERR PFX
  2816. "%s: Unable to initialize adapter.\n",
  2817. ndev->name);
  2818. goto err_init;
  2819. }
  2820. printk(KERN_ERR PFX
  2821. "%s: Releaseing driver lock.\n",ndev->name);
  2822. ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
  2823. } else {
  2824. printk(KERN_ERR PFX
  2825. "%s: Could not aquire driver lock.\n",
  2826. ndev->name);
  2827. goto err_lock;
  2828. }
  2829. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  2830. set_bit(QL_ADAPTER_UP,&qdev->flags);
  2831. mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
  2832. netif_poll_enable(ndev);
  2833. ql_enable_interrupts(qdev);
  2834. return 0;
  2835. err_init:
  2836. ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
  2837. err_lock:
  2838. free_irq(qdev->pdev->irq, ndev);
  2839. err_irq:
  2840. if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) {
  2841. printk(KERN_INFO PFX
  2842. "%s: calling pci_disable_msi().\n",
  2843. qdev->ndev->name);
  2844. clear_bit(QL_MSI_ENABLED,&qdev->flags);
  2845. pci_disable_msi(qdev->pdev);
  2846. }
  2847. return err;
  2848. }
  2849. static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset)
  2850. {
  2851. if( ql_adapter_down(qdev,reset) || ql_adapter_up(qdev)) {
  2852. printk(KERN_ERR PFX
  2853. "%s: Driver up/down cycle failed, "
  2854. "closing device\n",qdev->ndev->name);
  2855. dev_close(qdev->ndev);
  2856. return -1;
  2857. }
  2858. return 0;
  2859. }
  2860. static int ql3xxx_close(struct net_device *ndev)
  2861. {
  2862. struct ql3_adapter *qdev = netdev_priv(ndev);
  2863. /*
  2864. * Wait for device to recover from a reset.
  2865. * (Rarely happens, but possible.)
  2866. */
  2867. while (!test_bit(QL_ADAPTER_UP,&qdev->flags))
  2868. msleep(50);
  2869. ql_adapter_down(qdev,QL_DO_RESET);
  2870. return 0;
  2871. }
  2872. static int ql3xxx_open(struct net_device *ndev)
  2873. {
  2874. struct ql3_adapter *qdev = netdev_priv(ndev);
  2875. return (ql_adapter_up(qdev));
  2876. }
  2877. static struct net_device_stats *ql3xxx_get_stats(struct net_device *dev)
  2878. {
  2879. struct ql3_adapter *qdev = (struct ql3_adapter *)dev->priv;
  2880. return &qdev->stats;
  2881. }
  2882. static int ql3xxx_change_mtu(struct net_device *ndev, int new_mtu)
  2883. {
  2884. struct ql3_adapter *qdev = netdev_priv(ndev);
  2885. printk(KERN_ERR PFX "%s: new mtu size = %d.\n", ndev->name, new_mtu);
  2886. if (new_mtu != NORMAL_MTU_SIZE && new_mtu != JUMBO_MTU_SIZE) {
  2887. printk(KERN_ERR PFX
  2888. "%s: mtu size of %d is not valid. Use exactly %d or "
  2889. "%d.\n", ndev->name, new_mtu, NORMAL_MTU_SIZE,
  2890. JUMBO_MTU_SIZE);
  2891. return -EINVAL;
  2892. }
  2893. if (!netif_running(ndev)) {
  2894. ndev->mtu = new_mtu;
  2895. return 0;
  2896. }
  2897. ndev->mtu = new_mtu;
  2898. return ql_cycle_adapter(qdev,QL_DO_RESET);
  2899. }
  2900. static void ql3xxx_set_multicast_list(struct net_device *ndev)
  2901. {
  2902. /*
  2903. * We are manually parsing the list in the net_device structure.
  2904. */
  2905. return;
  2906. }
  2907. static int ql3xxx_set_mac_address(struct net_device *ndev, void *p)
  2908. {
  2909. struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
  2910. struct ql3xxx_port_registers __iomem *port_regs =
  2911. qdev->mem_map_registers;
  2912. struct sockaddr *addr = p;
  2913. unsigned long hw_flags;
  2914. if (netif_running(ndev))
  2915. return -EBUSY;
  2916. if (!is_valid_ether_addr(addr->sa_data))
  2917. return -EADDRNOTAVAIL;
  2918. memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
  2919. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  2920. /* Program lower 32 bits of the MAC address */
  2921. ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
  2922. (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16));
  2923. ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
  2924. ((ndev->dev_addr[2] << 24) | (ndev->
  2925. dev_addr[3] << 16) |
  2926. (ndev->dev_addr[4] << 8) | ndev->dev_addr[5]));
  2927. /* Program top 16 bits of the MAC address */
  2928. ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
  2929. ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1));
  2930. ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
  2931. ((ndev->dev_addr[0] << 8) | ndev->dev_addr[1]));
  2932. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  2933. return 0;
  2934. }
  2935. static void ql3xxx_tx_timeout(struct net_device *ndev)
  2936. {
  2937. struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
  2938. printk(KERN_ERR PFX "%s: Resetting...\n", ndev->name);
  2939. /*
  2940. * Stop the queues, we've got a problem.
  2941. */
  2942. netif_stop_queue(ndev);
  2943. /*
  2944. * Wake up the worker to process this event.
  2945. */
  2946. queue_delayed_work(qdev->workqueue, &qdev->tx_timeout_work, 0);
  2947. }
  2948. static void ql_reset_work(struct work_struct *work)
  2949. {
  2950. struct ql3_adapter *qdev =
  2951. container_of(work, struct ql3_adapter, reset_work.work);
  2952. struct net_device *ndev = qdev->ndev;
  2953. u32 value;
  2954. struct ql_tx_buf_cb *tx_cb;
  2955. int max_wait_time, i;
  2956. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  2957. unsigned long hw_flags;
  2958. if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START),&qdev->flags)) {
  2959. clear_bit(QL_LINK_MASTER,&qdev->flags);
  2960. /*
  2961. * Loop through the active list and return the skb.
  2962. */
  2963. for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
  2964. int j;
  2965. tx_cb = &qdev->tx_buf[i];
  2966. if (tx_cb->skb) {
  2967. printk(KERN_DEBUG PFX
  2968. "%s: Freeing lost SKB.\n",
  2969. qdev->ndev->name);
  2970. pci_unmap_single(qdev->pdev,
  2971. pci_unmap_addr(&tx_cb->map[0], mapaddr),
  2972. pci_unmap_len(&tx_cb->map[0], maplen),
  2973. PCI_DMA_TODEVICE);
  2974. for(j=1;j<tx_cb->seg_count;j++) {
  2975. pci_unmap_page(qdev->pdev,
  2976. pci_unmap_addr(&tx_cb->map[j],mapaddr),
  2977. pci_unmap_len(&tx_cb->map[j],maplen),
  2978. PCI_DMA_TODEVICE);
  2979. }
  2980. dev_kfree_skb(tx_cb->skb);
  2981. tx_cb->skb = NULL;
  2982. }
  2983. }
  2984. printk(KERN_ERR PFX
  2985. "%s: Clearing NRI after reset.\n", qdev->ndev->name);
  2986. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  2987. ql_write_common_reg(qdev,
  2988. &port_regs->CommonRegs.
  2989. ispControlStatus,
  2990. ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI));
  2991. /*
  2992. * Wait the for Soft Reset to Complete.
  2993. */
  2994. max_wait_time = 10;
  2995. do {
  2996. value = ql_read_common_reg(qdev,
  2997. &port_regs->CommonRegs.
  2998. ispControlStatus);
  2999. if ((value & ISP_CONTROL_SR) == 0) {
  3000. printk(KERN_DEBUG PFX
  3001. "%s: reset completed.\n",
  3002. qdev->ndev->name);
  3003. break;
  3004. }
  3005. if (value & ISP_CONTROL_RI) {
  3006. printk(KERN_DEBUG PFX
  3007. "%s: clearing NRI after reset.\n",
  3008. qdev->ndev->name);
  3009. ql_write_common_reg(qdev,
  3010. &port_regs->
  3011. CommonRegs.
  3012. ispControlStatus,
  3013. ((ISP_CONTROL_RI <<
  3014. 16) | ISP_CONTROL_RI));
  3015. }
  3016. ssleep(1);
  3017. } while (--max_wait_time);
  3018. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  3019. if (value & ISP_CONTROL_SR) {
  3020. /*
  3021. * Set the reset flags and clear the board again.
  3022. * Nothing else to do...
  3023. */
  3024. printk(KERN_ERR PFX
  3025. "%s: Timed out waiting for reset to "
  3026. "complete.\n", ndev->name);
  3027. printk(KERN_ERR PFX
  3028. "%s: Do a reset.\n", ndev->name);
  3029. clear_bit(QL_RESET_PER_SCSI,&qdev->flags);
  3030. clear_bit(QL_RESET_START,&qdev->flags);
  3031. ql_cycle_adapter(qdev,QL_DO_RESET);
  3032. return;
  3033. }
  3034. clear_bit(QL_RESET_ACTIVE,&qdev->flags);
  3035. clear_bit(QL_RESET_PER_SCSI,&qdev->flags);
  3036. clear_bit(QL_RESET_START,&qdev->flags);
  3037. ql_cycle_adapter(qdev,QL_NO_RESET);
  3038. }
  3039. }
  3040. static void ql_tx_timeout_work(struct work_struct *work)
  3041. {
  3042. struct ql3_adapter *qdev =
  3043. container_of(work, struct ql3_adapter, tx_timeout_work.work);
  3044. ql_cycle_adapter(qdev, QL_DO_RESET);
  3045. }
  3046. static void ql_get_board_info(struct ql3_adapter *qdev)
  3047. {
  3048. struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
  3049. u32 value;
  3050. value = ql_read_page0_reg_l(qdev, &port_regs->portStatus);
  3051. qdev->chip_rev_id = ((value & PORT_STATUS_REV_ID_MASK) >> 12);
  3052. if (value & PORT_STATUS_64)
  3053. qdev->pci_width = 64;
  3054. else
  3055. qdev->pci_width = 32;
  3056. if (value & PORT_STATUS_X)
  3057. qdev->pci_x = 1;
  3058. else
  3059. qdev->pci_x = 0;
  3060. qdev->pci_slot = (u8) PCI_SLOT(qdev->pdev->devfn);
  3061. }
  3062. static void ql3xxx_timer(unsigned long ptr)
  3063. {
  3064. struct ql3_adapter *qdev = (struct ql3_adapter *)ptr;
  3065. if (test_bit(QL_RESET_ACTIVE,&qdev->flags)) {
  3066. printk(KERN_DEBUG PFX
  3067. "%s: Reset in progress.\n",
  3068. qdev->ndev->name);
  3069. goto end;
  3070. }
  3071. ql_link_state_machine(qdev);
  3072. /* Restart timer on 2 second interval. */
  3073. end:
  3074. mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
  3075. }
  3076. static int __devinit ql3xxx_probe(struct pci_dev *pdev,
  3077. const struct pci_device_id *pci_entry)
  3078. {
  3079. struct net_device *ndev = NULL;
  3080. struct ql3_adapter *qdev = NULL;
  3081. static int cards_found = 0;
  3082. int pci_using_dac, err;
  3083. err = pci_enable_device(pdev);
  3084. if (err) {
  3085. printk(KERN_ERR PFX "%s cannot enable PCI device\n",
  3086. pci_name(pdev));
  3087. goto err_out;
  3088. }
  3089. err = pci_request_regions(pdev, DRV_NAME);
  3090. if (err) {
  3091. printk(KERN_ERR PFX "%s cannot obtain PCI resources\n",
  3092. pci_name(pdev));
  3093. goto err_out_disable_pdev;
  3094. }
  3095. pci_set_master(pdev);
  3096. if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
  3097. pci_using_dac = 1;
  3098. err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
  3099. } else if (!(err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
  3100. pci_using_dac = 0;
  3101. err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
  3102. }
  3103. if (err) {
  3104. printk(KERN_ERR PFX "%s no usable DMA configuration\n",
  3105. pci_name(pdev));
  3106. goto err_out_free_regions;
  3107. }
  3108. ndev = alloc_etherdev(sizeof(struct ql3_adapter));
  3109. if (!ndev) {
  3110. printk(KERN_ERR PFX "%s could not alloc etherdev\n",
  3111. pci_name(pdev));
  3112. err = -ENOMEM;
  3113. goto err_out_free_regions;
  3114. }
  3115. SET_MODULE_OWNER(ndev);
  3116. SET_NETDEV_DEV(ndev, &pdev->dev);
  3117. pci_set_drvdata(pdev, ndev);
  3118. qdev = netdev_priv(ndev);
  3119. qdev->index = cards_found;
  3120. qdev->ndev = ndev;
  3121. qdev->pdev = pdev;
  3122. qdev->device_id = pci_entry->device;
  3123. qdev->port_link_state = LS_DOWN;
  3124. if (msi)
  3125. qdev->msi = 1;
  3126. qdev->msg_enable = netif_msg_init(debug, default_msg);
  3127. if (pci_using_dac)
  3128. ndev->features |= NETIF_F_HIGHDMA;
  3129. if (qdev->device_id == QL3032_DEVICE_ID)
  3130. ndev->features |= (NETIF_F_HW_CSUM | NETIF_F_SG);
  3131. qdev->mem_map_registers =
  3132. ioremap_nocache(pci_resource_start(pdev, 1),
  3133. pci_resource_len(qdev->pdev, 1));
  3134. if (!qdev->mem_map_registers) {
  3135. printk(KERN_ERR PFX "%s: cannot map device registers\n",
  3136. pci_name(pdev));
  3137. err = -EIO;
  3138. goto err_out_free_ndev;
  3139. }
  3140. spin_lock_init(&qdev->adapter_lock);
  3141. spin_lock_init(&qdev->hw_lock);
  3142. /* Set driver entry points */
  3143. ndev->open = ql3xxx_open;
  3144. ndev->hard_start_xmit = ql3xxx_send;
  3145. ndev->stop = ql3xxx_close;
  3146. ndev->get_stats = ql3xxx_get_stats;
  3147. ndev->change_mtu = ql3xxx_change_mtu;
  3148. ndev->set_multicast_list = ql3xxx_set_multicast_list;
  3149. SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops);
  3150. ndev->set_mac_address = ql3xxx_set_mac_address;
  3151. ndev->tx_timeout = ql3xxx_tx_timeout;
  3152. ndev->watchdog_timeo = 5 * HZ;
  3153. ndev->poll = &ql_poll;
  3154. ndev->weight = 64;
  3155. ndev->irq = pdev->irq;
  3156. /* make sure the EEPROM is good */
  3157. if (ql_get_nvram_params(qdev)) {
  3158. printk(KERN_ALERT PFX
  3159. "ql3xxx_probe: Adapter #%d, Invalid NVRAM parameters.\n",
  3160. qdev->index);
  3161. err = -EIO;
  3162. goto err_out_iounmap;
  3163. }
  3164. ql_set_mac_info(qdev);
  3165. /* Validate and set parameters */
  3166. if (qdev->mac_index) {
  3167. memcpy(ndev->dev_addr, &qdev->nvram_data.funcCfg_fn2.macAddress,
  3168. ETH_ALEN);
  3169. } else {
  3170. memcpy(ndev->dev_addr, &qdev->nvram_data.funcCfg_fn0.macAddress,
  3171. ETH_ALEN);
  3172. }
  3173. memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
  3174. ndev->tx_queue_len = NUM_REQ_Q_ENTRIES;
  3175. /* Turn off support for multicasting */
  3176. ndev->flags &= ~IFF_MULTICAST;
  3177. /* Record PCI bus information. */
  3178. ql_get_board_info(qdev);
  3179. /*
  3180. * Set the Maximum Memory Read Byte Count value. We do this to handle
  3181. * jumbo frames.
  3182. */
  3183. if (qdev->pci_x) {
  3184. pci_write_config_word(pdev, (int)0x4e, (u16) 0x0036);
  3185. }
  3186. err = register_netdev(ndev);
  3187. if (err) {
  3188. printk(KERN_ERR PFX "%s: cannot register net device\n",
  3189. pci_name(pdev));
  3190. goto err_out_iounmap;
  3191. }
  3192. /* we're going to reset, so assume we have no link for now */
  3193. netif_carrier_off(ndev);
  3194. netif_stop_queue(ndev);
  3195. qdev->workqueue = create_singlethread_workqueue(ndev->name);
  3196. INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work);
  3197. INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work);
  3198. init_timer(&qdev->adapter_timer);
  3199. qdev->adapter_timer.function = ql3xxx_timer;
  3200. qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */
  3201. qdev->adapter_timer.data = (unsigned long)qdev;
  3202. if(!cards_found) {
  3203. printk(KERN_ALERT PFX "%s\n", DRV_STRING);
  3204. printk(KERN_ALERT PFX "Driver name: %s, Version: %s.\n",
  3205. DRV_NAME, DRV_VERSION);
  3206. }
  3207. ql_display_dev_info(ndev);
  3208. cards_found++;
  3209. return 0;
  3210. err_out_iounmap:
  3211. iounmap(qdev->mem_map_registers);
  3212. err_out_free_ndev:
  3213. free_netdev(ndev);
  3214. err_out_free_regions:
  3215. pci_release_regions(pdev);
  3216. err_out_disable_pdev:
  3217. pci_disable_device(pdev);
  3218. pci_set_drvdata(pdev, NULL);
  3219. err_out:
  3220. return err;
  3221. }
  3222. static void __devexit ql3xxx_remove(struct pci_dev *pdev)
  3223. {
  3224. struct net_device *ndev = pci_get_drvdata(pdev);
  3225. struct ql3_adapter *qdev = netdev_priv(ndev);
  3226. unregister_netdev(ndev);
  3227. qdev = netdev_priv(ndev);
  3228. ql_disable_interrupts(qdev);
  3229. if (qdev->workqueue) {
  3230. cancel_delayed_work(&qdev->reset_work);
  3231. cancel_delayed_work(&qdev->tx_timeout_work);
  3232. destroy_workqueue(qdev->workqueue);
  3233. qdev->workqueue = NULL;
  3234. }
  3235. iounmap(qdev->mem_map_registers);
  3236. pci_release_regions(pdev);
  3237. pci_set_drvdata(pdev, NULL);
  3238. free_netdev(ndev);
  3239. }
  3240. static struct pci_driver ql3xxx_driver = {
  3241. .name = DRV_NAME,
  3242. .id_table = ql3xxx_pci_tbl,
  3243. .probe = ql3xxx_probe,
  3244. .remove = __devexit_p(ql3xxx_remove),
  3245. };
  3246. static int __init ql3xxx_init_module(void)
  3247. {
  3248. return pci_register_driver(&ql3xxx_driver);
  3249. }
  3250. static void __exit ql3xxx_exit(void)
  3251. {
  3252. pci_unregister_driver(&ql3xxx_driver);
  3253. }
  3254. module_init(ql3xxx_init_module);
  3255. module_exit(ql3xxx_exit);