qlge_main.c 107 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960
  1. /*
  2. * QLogic qlge NIC HBA Driver
  3. * Copyright (c) 2003-2008 QLogic Corporation
  4. * See LICENSE.qlge for copyright and licensing details.
  5. * Author: Linux qlge network device driver by
  6. * Ron Mercer <ron.mercer@qlogic.com>
  7. */
  8. #include <linux/kernel.h>
  9. #include <linux/init.h>
  10. #include <linux/types.h>
  11. #include <linux/module.h>
  12. #include <linux/list.h>
  13. #include <linux/pci.h>
  14. #include <linux/dma-mapping.h>
  15. #include <linux/pagemap.h>
  16. #include <linux/sched.h>
  17. #include <linux/slab.h>
  18. #include <linux/dmapool.h>
  19. #include <linux/mempool.h>
  20. #include <linux/spinlock.h>
  21. #include <linux/kthread.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/errno.h>
  24. #include <linux/ioport.h>
  25. #include <linux/in.h>
  26. #include <linux/ip.h>
  27. #include <linux/ipv6.h>
  28. #include <net/ipv6.h>
  29. #include <linux/tcp.h>
  30. #include <linux/udp.h>
  31. #include <linux/if_arp.h>
  32. #include <linux/if_ether.h>
  33. #include <linux/netdevice.h>
  34. #include <linux/etherdevice.h>
  35. #include <linux/ethtool.h>
  36. #include <linux/skbuff.h>
  37. #include <linux/rtnetlink.h>
  38. #include <linux/if_vlan.h>
  39. #include <linux/delay.h>
  40. #include <linux/mm.h>
  41. #include <linux/vmalloc.h>
  42. #include <net/ip6_checksum.h>
  43. #include "qlge.h"
  44. char qlge_driver_name[] = DRV_NAME;
  45. const char qlge_driver_version[] = DRV_VERSION;
  46. MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
  47. MODULE_DESCRIPTION(DRV_STRING " ");
  48. MODULE_LICENSE("GPL");
  49. MODULE_VERSION(DRV_VERSION);
  50. static const u32 default_msg =
  51. NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
  52. /* NETIF_MSG_TIMER | */
  53. NETIF_MSG_IFDOWN |
  54. NETIF_MSG_IFUP |
  55. NETIF_MSG_RX_ERR |
  56. NETIF_MSG_TX_ERR |
  57. NETIF_MSG_TX_QUEUED |
  58. NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS |
  59. /* NETIF_MSG_PKTDATA | */
  60. NETIF_MSG_HW | NETIF_MSG_WOL | 0;
  61. static int debug = 0x00007fff; /* defaults above */
  62. module_param(debug, int, 0);
  63. MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
  64. #define MSIX_IRQ 0
  65. #define MSI_IRQ 1
  66. #define LEG_IRQ 2
  67. static int irq_type = MSIX_IRQ;
  68. module_param(irq_type, int, MSIX_IRQ);
  69. MODULE_PARM_DESC(irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
  70. static struct pci_device_id qlge_pci_tbl[] __devinitdata = {
  71. {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID)},
  72. {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID1)},
  73. /* required last entry */
  74. {0,}
  75. };
  76. MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
  77. /* This hardware semaphore causes exclusive access to
  78. * resources shared between the NIC driver, MPI firmware,
  79. * FCOE firmware and the FC driver.
  80. */
  81. static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
  82. {
  83. u32 sem_bits = 0;
  84. switch (sem_mask) {
  85. case SEM_XGMAC0_MASK:
  86. sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
  87. break;
  88. case SEM_XGMAC1_MASK:
  89. sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
  90. break;
  91. case SEM_ICB_MASK:
  92. sem_bits = SEM_SET << SEM_ICB_SHIFT;
  93. break;
  94. case SEM_MAC_ADDR_MASK:
  95. sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
  96. break;
  97. case SEM_FLASH_MASK:
  98. sem_bits = SEM_SET << SEM_FLASH_SHIFT;
  99. break;
  100. case SEM_PROBE_MASK:
  101. sem_bits = SEM_SET << SEM_PROBE_SHIFT;
  102. break;
  103. case SEM_RT_IDX_MASK:
  104. sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
  105. break;
  106. case SEM_PROC_REG_MASK:
  107. sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
  108. break;
  109. default:
  110. QPRINTK(qdev, PROBE, ALERT, "Bad Semaphore mask!.\n");
  111. return -EINVAL;
  112. }
  113. ql_write32(qdev, SEM, sem_bits | sem_mask);
  114. return !(ql_read32(qdev, SEM) & sem_bits);
  115. }
  116. int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
  117. {
  118. unsigned int seconds = 3;
  119. do {
  120. if (!ql_sem_trylock(qdev, sem_mask))
  121. return 0;
  122. ssleep(1);
  123. } while (--seconds);
  124. return -ETIMEDOUT;
  125. }
  126. void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
  127. {
  128. ql_write32(qdev, SEM, sem_mask);
  129. ql_read32(qdev, SEM); /* flush */
  130. }
  131. /* This function waits for a specific bit to come ready
  132. * in a given register. It is used mostly by the initialize
  133. * process, but is also used in kernel thread API such as
  134. * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
  135. */
  136. int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
  137. {
  138. u32 temp;
  139. int count = UDELAY_COUNT;
  140. while (count) {
  141. temp = ql_read32(qdev, reg);
  142. /* check for errors */
  143. if (temp & err_bit) {
  144. QPRINTK(qdev, PROBE, ALERT,
  145. "register 0x%.08x access error, value = 0x%.08x!.\n",
  146. reg, temp);
  147. return -EIO;
  148. } else if (temp & bit)
  149. return 0;
  150. udelay(UDELAY_DELAY);
  151. count--;
  152. }
  153. QPRINTK(qdev, PROBE, ALERT,
  154. "Timed out waiting for reg %x to come ready.\n", reg);
  155. return -ETIMEDOUT;
  156. }
  157. /* The CFG register is used to download TX and RX control blocks
  158. * to the chip. This function waits for an operation to complete.
  159. */
  160. static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
  161. {
  162. int count = UDELAY_COUNT;
  163. u32 temp;
  164. while (count) {
  165. temp = ql_read32(qdev, CFG);
  166. if (temp & CFG_LE)
  167. return -EIO;
  168. if (!(temp & bit))
  169. return 0;
  170. udelay(UDELAY_DELAY);
  171. count--;
  172. }
  173. return -ETIMEDOUT;
  174. }
  175. /* Used to issue init control blocks to hw. Maps control block,
  176. * sets address, triggers download, waits for completion.
  177. */
  178. int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
  179. u16 q_id)
  180. {
  181. u64 map;
  182. int status = 0;
  183. int direction;
  184. u32 mask;
  185. u32 value;
  186. direction =
  187. (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
  188. PCI_DMA_FROMDEVICE;
  189. map = pci_map_single(qdev->pdev, ptr, size, direction);
  190. if (pci_dma_mapping_error(qdev->pdev, map)) {
  191. QPRINTK(qdev, IFUP, ERR, "Couldn't map DMA area.\n");
  192. return -ENOMEM;
  193. }
  194. status = ql_wait_cfg(qdev, bit);
  195. if (status) {
  196. QPRINTK(qdev, IFUP, ERR,
  197. "Timed out waiting for CFG to come ready.\n");
  198. goto exit;
  199. }
  200. status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
  201. if (status)
  202. goto exit;
  203. ql_write32(qdev, ICB_L, (u32) map);
  204. ql_write32(qdev, ICB_H, (u32) (map >> 32));
  205. ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
  206. mask = CFG_Q_MASK | (bit << 16);
  207. value = bit | (q_id << CFG_Q_SHIFT);
  208. ql_write32(qdev, CFG, (mask | value));
  209. /*
  210. * Wait for the bit to clear after signaling hw.
  211. */
  212. status = ql_wait_cfg(qdev, bit);
  213. exit:
  214. pci_unmap_single(qdev->pdev, map, size, direction);
  215. return status;
  216. }
  217. /* Get a specific MAC address from the CAM. Used for debug and reg dump. */
  218. int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
  219. u32 *value)
  220. {
  221. u32 offset = 0;
  222. int status;
  223. status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
  224. if (status)
  225. return status;
  226. switch (type) {
  227. case MAC_ADDR_TYPE_MULTI_MAC:
  228. case MAC_ADDR_TYPE_CAM_MAC:
  229. {
  230. status =
  231. ql_wait_reg_rdy(qdev,
  232. MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E);
  233. if (status)
  234. goto exit;
  235. ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
  236. (index << MAC_ADDR_IDX_SHIFT) | /* index */
  237. MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
  238. status =
  239. ql_wait_reg_rdy(qdev,
  240. MAC_ADDR_IDX, MAC_ADDR_MR, MAC_ADDR_E);
  241. if (status)
  242. goto exit;
  243. *value++ = ql_read32(qdev, MAC_ADDR_DATA);
  244. status =
  245. ql_wait_reg_rdy(qdev,
  246. MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E);
  247. if (status)
  248. goto exit;
  249. ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
  250. (index << MAC_ADDR_IDX_SHIFT) | /* index */
  251. MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
  252. status =
  253. ql_wait_reg_rdy(qdev,
  254. MAC_ADDR_IDX, MAC_ADDR_MR, MAC_ADDR_E);
  255. if (status)
  256. goto exit;
  257. *value++ = ql_read32(qdev, MAC_ADDR_DATA);
  258. if (type == MAC_ADDR_TYPE_CAM_MAC) {
  259. status =
  260. ql_wait_reg_rdy(qdev,
  261. MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E);
  262. if (status)
  263. goto exit;
  264. ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
  265. (index << MAC_ADDR_IDX_SHIFT) | /* index */
  266. MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
  267. status =
  268. ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
  269. MAC_ADDR_MR, MAC_ADDR_E);
  270. if (status)
  271. goto exit;
  272. *value++ = ql_read32(qdev, MAC_ADDR_DATA);
  273. }
  274. break;
  275. }
  276. case MAC_ADDR_TYPE_VLAN:
  277. case MAC_ADDR_TYPE_MULTI_FLTR:
  278. default:
  279. QPRINTK(qdev, IFUP, CRIT,
  280. "Address type %d not yet supported.\n", type);
  281. status = -EPERM;
  282. }
  283. exit:
  284. ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
  285. return status;
  286. }
  287. /* Set up a MAC, multicast or VLAN address for the
  288. * inbound frame matching.
  289. */
  290. static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
  291. u16 index)
  292. {
  293. u32 offset = 0;
  294. int status = 0;
  295. status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
  296. if (status)
  297. return status;
  298. switch (type) {
  299. case MAC_ADDR_TYPE_MULTI_MAC:
  300. case MAC_ADDR_TYPE_CAM_MAC:
  301. {
  302. u32 cam_output;
  303. u32 upper = (addr[0] << 8) | addr[1];
  304. u32 lower =
  305. (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
  306. (addr[5]);
  307. QPRINTK(qdev, IFUP, INFO,
  308. "Adding %s address %pM"
  309. " at index %d in the CAM.\n",
  310. ((type ==
  311. MAC_ADDR_TYPE_MULTI_MAC) ? "MULTICAST" :
  312. "UNICAST"), addr, index);
  313. status =
  314. ql_wait_reg_rdy(qdev,
  315. MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E);
  316. if (status)
  317. goto exit;
  318. ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
  319. (index << MAC_ADDR_IDX_SHIFT) | /* index */
  320. type); /* type */
  321. ql_write32(qdev, MAC_ADDR_DATA, lower);
  322. status =
  323. ql_wait_reg_rdy(qdev,
  324. MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E);
  325. if (status)
  326. goto exit;
  327. ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
  328. (index << MAC_ADDR_IDX_SHIFT) | /* index */
  329. type); /* type */
  330. ql_write32(qdev, MAC_ADDR_DATA, upper);
  331. status =
  332. ql_wait_reg_rdy(qdev,
  333. MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E);
  334. if (status)
  335. goto exit;
  336. ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
  337. (index << MAC_ADDR_IDX_SHIFT) | /* index */
  338. type); /* type */
  339. /* This field should also include the queue id
  340. and possibly the function id. Right now we hardcode
  341. the route field to NIC core.
  342. */
  343. if (type == MAC_ADDR_TYPE_CAM_MAC) {
  344. cam_output = (CAM_OUT_ROUTE_NIC |
  345. (qdev->
  346. func << CAM_OUT_FUNC_SHIFT) |
  347. (qdev->
  348. rss_ring_first_cq_id <<
  349. CAM_OUT_CQ_ID_SHIFT));
  350. if (qdev->vlgrp)
  351. cam_output |= CAM_OUT_RV;
  352. /* route to NIC core */
  353. ql_write32(qdev, MAC_ADDR_DATA, cam_output);
  354. }
  355. break;
  356. }
  357. case MAC_ADDR_TYPE_VLAN:
  358. {
  359. u32 enable_bit = *((u32 *) &addr[0]);
  360. /* For VLAN, the addr actually holds a bit that
  361. * either enables or disables the vlan id we are
  362. * addressing. It's either MAC_ADDR_E on or off.
  363. * That's bit-27 we're talking about.
  364. */
  365. QPRINTK(qdev, IFUP, INFO, "%s VLAN ID %d %s the CAM.\n",
  366. (enable_bit ? "Adding" : "Removing"),
  367. index, (enable_bit ? "to" : "from"));
  368. status =
  369. ql_wait_reg_rdy(qdev,
  370. MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E);
  371. if (status)
  372. goto exit;
  373. ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
  374. (index << MAC_ADDR_IDX_SHIFT) | /* index */
  375. type | /* type */
  376. enable_bit); /* enable/disable */
  377. break;
  378. }
  379. case MAC_ADDR_TYPE_MULTI_FLTR:
  380. default:
  381. QPRINTK(qdev, IFUP, CRIT,
  382. "Address type %d not yet supported.\n", type);
  383. status = -EPERM;
  384. }
  385. exit:
  386. ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
  387. return status;
  388. }
  389. /* Get a specific frame routing value from the CAM.
  390. * Used for debug and reg dump.
  391. */
  392. int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
  393. {
  394. int status = 0;
  395. status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
  396. if (status)
  397. goto exit;
  398. status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, RT_IDX_E);
  399. if (status)
  400. goto exit;
  401. ql_write32(qdev, RT_IDX,
  402. RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
  403. status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, RT_IDX_E);
  404. if (status)
  405. goto exit;
  406. *value = ql_read32(qdev, RT_DATA);
  407. exit:
  408. ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
  409. return status;
  410. }
  411. /* The NIC function for this chip has 16 routing indexes. Each one can be used
  412. * to route different frame types to various inbound queues. We send broadcast/
  413. * multicast/error frames to the default queue for slow handling,
  414. * and CAM hit/RSS frames to the fast handling queues.
  415. */
  416. static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
  417. int enable)
  418. {
  419. int status;
  420. u32 value = 0;
  421. status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
  422. if (status)
  423. return status;
  424. QPRINTK(qdev, IFUP, DEBUG,
  425. "%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n",
  426. (enable ? "Adding" : "Removing"),
  427. ((index == RT_IDX_ALL_ERR_SLOT) ? "MAC ERROR/ALL ERROR" : ""),
  428. ((index == RT_IDX_IP_CSUM_ERR_SLOT) ? "IP CSUM ERROR" : ""),
  429. ((index ==
  430. RT_IDX_TCP_UDP_CSUM_ERR_SLOT) ? "TCP/UDP CSUM ERROR" : ""),
  431. ((index == RT_IDX_BCAST_SLOT) ? "BROADCAST" : ""),
  432. ((index == RT_IDX_MCAST_MATCH_SLOT) ? "MULTICAST MATCH" : ""),
  433. ((index == RT_IDX_ALLMULTI_SLOT) ? "ALL MULTICAST MATCH" : ""),
  434. ((index == RT_IDX_UNUSED6_SLOT) ? "UNUSED6" : ""),
  435. ((index == RT_IDX_UNUSED7_SLOT) ? "UNUSED7" : ""),
  436. ((index == RT_IDX_RSS_MATCH_SLOT) ? "RSS ALL/IPV4 MATCH" : ""),
  437. ((index == RT_IDX_RSS_IPV6_SLOT) ? "RSS IPV6" : ""),
  438. ((index == RT_IDX_RSS_TCP4_SLOT) ? "RSS TCP4" : ""),
  439. ((index == RT_IDX_RSS_TCP6_SLOT) ? "RSS TCP6" : ""),
  440. ((index == RT_IDX_CAM_HIT_SLOT) ? "CAM HIT" : ""),
  441. ((index == RT_IDX_UNUSED013) ? "UNUSED13" : ""),
  442. ((index == RT_IDX_UNUSED014) ? "UNUSED14" : ""),
  443. ((index == RT_IDX_PROMISCUOUS_SLOT) ? "PROMISCUOUS" : ""),
  444. (enable ? "to" : "from"));
  445. switch (mask) {
  446. case RT_IDX_CAM_HIT:
  447. {
  448. value = RT_IDX_DST_CAM_Q | /* dest */
  449. RT_IDX_TYPE_NICQ | /* type */
  450. (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
  451. break;
  452. }
  453. case RT_IDX_VALID: /* Promiscuous Mode frames. */
  454. {
  455. value = RT_IDX_DST_DFLT_Q | /* dest */
  456. RT_IDX_TYPE_NICQ | /* type */
  457. (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
  458. break;
  459. }
  460. case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
  461. {
  462. value = RT_IDX_DST_DFLT_Q | /* dest */
  463. RT_IDX_TYPE_NICQ | /* type */
  464. (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
  465. break;
  466. }
  467. case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
  468. {
  469. value = RT_IDX_DST_DFLT_Q | /* dest */
  470. RT_IDX_TYPE_NICQ | /* type */
  471. (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
  472. break;
  473. }
  474. case RT_IDX_MCAST: /* Pass up All Multicast frames. */
  475. {
  476. value = RT_IDX_DST_CAM_Q | /* dest */
  477. RT_IDX_TYPE_NICQ | /* type */
  478. (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
  479. break;
  480. }
  481. case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
  482. {
  483. value = RT_IDX_DST_CAM_Q | /* dest */
  484. RT_IDX_TYPE_NICQ | /* type */
  485. (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
  486. break;
  487. }
  488. case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
  489. {
  490. value = RT_IDX_DST_RSS | /* dest */
  491. RT_IDX_TYPE_NICQ | /* type */
  492. (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
  493. break;
  494. }
  495. case 0: /* Clear the E-bit on an entry. */
  496. {
  497. value = RT_IDX_DST_DFLT_Q | /* dest */
  498. RT_IDX_TYPE_NICQ | /* type */
  499. (index << RT_IDX_IDX_SHIFT);/* index */
  500. break;
  501. }
  502. default:
  503. QPRINTK(qdev, IFUP, ERR, "Mask type %d not yet supported.\n",
  504. mask);
  505. status = -EPERM;
  506. goto exit;
  507. }
  508. if (value) {
  509. status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
  510. if (status)
  511. goto exit;
  512. value |= (enable ? RT_IDX_E : 0);
  513. ql_write32(qdev, RT_IDX, value);
  514. ql_write32(qdev, RT_DATA, enable ? mask : 0);
  515. }
  516. exit:
  517. ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
  518. return status;
  519. }
  520. static void ql_enable_interrupts(struct ql_adapter *qdev)
  521. {
  522. ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
  523. }
  524. static void ql_disable_interrupts(struct ql_adapter *qdev)
  525. {
  526. ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
  527. }
  528. /* If we're running with multiple MSI-X vectors then we enable on the fly.
  529. * Otherwise, we may have multiple outstanding workers and don't want to
  530. * enable until the last one finishes. In this case, the irq_cnt gets
  531. * incremented everytime we queue a worker and decremented everytime
  532. * a worker finishes. Once it hits zero we enable the interrupt.
  533. */
  534. u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
  535. {
  536. u32 var = 0;
  537. unsigned long hw_flags = 0;
  538. struct intr_context *ctx = qdev->intr_context + intr;
  539. if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
  540. /* Always enable if we're MSIX multi interrupts and
  541. * it's not the default (zeroeth) interrupt.
  542. */
  543. ql_write32(qdev, INTR_EN,
  544. ctx->intr_en_mask);
  545. var = ql_read32(qdev, STS);
  546. return var;
  547. }
  548. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  549. if (atomic_dec_and_test(&ctx->irq_cnt)) {
  550. ql_write32(qdev, INTR_EN,
  551. ctx->intr_en_mask);
  552. var = ql_read32(qdev, STS);
  553. }
  554. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  555. return var;
  556. }
  557. static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
  558. {
  559. u32 var = 0;
  560. unsigned long hw_flags;
  561. struct intr_context *ctx;
  562. /* HW disables for us if we're MSIX multi interrupts and
  563. * it's not the default (zeroeth) interrupt.
  564. */
  565. if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
  566. return 0;
  567. ctx = qdev->intr_context + intr;
  568. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  569. if (!atomic_read(&ctx->irq_cnt)) {
  570. ql_write32(qdev, INTR_EN,
  571. ctx->intr_dis_mask);
  572. var = ql_read32(qdev, STS);
  573. }
  574. atomic_inc(&ctx->irq_cnt);
  575. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  576. return var;
  577. }
  578. static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
  579. {
  580. int i;
  581. for (i = 0; i < qdev->intr_count; i++) {
  582. /* The enable call does a atomic_dec_and_test
  583. * and enables only if the result is zero.
  584. * So we precharge it here.
  585. */
  586. if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
  587. i == 0))
  588. atomic_set(&qdev->intr_context[i].irq_cnt, 1);
  589. ql_enable_completion_interrupt(qdev, i);
  590. }
  591. }
  592. static int ql_read_flash_word(struct ql_adapter *qdev, int offset, u32 *data)
  593. {
  594. int status = 0;
  595. /* wait for reg to come ready */
  596. status = ql_wait_reg_rdy(qdev,
  597. FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
  598. if (status)
  599. goto exit;
  600. /* set up for reg read */
  601. ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
  602. /* wait for reg to come ready */
  603. status = ql_wait_reg_rdy(qdev,
  604. FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
  605. if (status)
  606. goto exit;
  607. /* get the data */
  608. *data = ql_read32(qdev, FLASH_DATA);
  609. exit:
  610. return status;
  611. }
  612. static int ql_get_flash_params(struct ql_adapter *qdev)
  613. {
  614. int i;
  615. int status;
  616. u32 *p = (u32 *)&qdev->flash;
  617. if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
  618. return -ETIMEDOUT;
  619. for (i = 0; i < sizeof(qdev->flash) / sizeof(u32); i++, p++) {
  620. status = ql_read_flash_word(qdev, i, p);
  621. if (status) {
  622. QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
  623. goto exit;
  624. }
  625. }
  626. exit:
  627. ql_sem_unlock(qdev, SEM_FLASH_MASK);
  628. return status;
  629. }
  630. /* xgmac register are located behind the xgmac_addr and xgmac_data
  631. * register pair. Each read/write requires us to wait for the ready
  632. * bit before reading/writing the data.
  633. */
  634. static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
  635. {
  636. int status;
  637. /* wait for reg to come ready */
  638. status = ql_wait_reg_rdy(qdev,
  639. XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
  640. if (status)
  641. return status;
  642. /* write the data to the data reg */
  643. ql_write32(qdev, XGMAC_DATA, data);
  644. /* trigger the write */
  645. ql_write32(qdev, XGMAC_ADDR, reg);
  646. return status;
  647. }
  648. /* xgmac register are located behind the xgmac_addr and xgmac_data
  649. * register pair. Each read/write requires us to wait for the ready
  650. * bit before reading/writing the data.
  651. */
  652. int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
  653. {
  654. int status = 0;
  655. /* wait for reg to come ready */
  656. status = ql_wait_reg_rdy(qdev,
  657. XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
  658. if (status)
  659. goto exit;
  660. /* set up for reg read */
  661. ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
  662. /* wait for reg to come ready */
  663. status = ql_wait_reg_rdy(qdev,
  664. XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
  665. if (status)
  666. goto exit;
  667. /* get the data */
  668. *data = ql_read32(qdev, XGMAC_DATA);
  669. exit:
  670. return status;
  671. }
  672. /* This is used for reading the 64-bit statistics regs. */
  673. int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
  674. {
  675. int status = 0;
  676. u32 hi = 0;
  677. u32 lo = 0;
  678. status = ql_read_xgmac_reg(qdev, reg, &lo);
  679. if (status)
  680. goto exit;
  681. status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
  682. if (status)
  683. goto exit;
  684. *data = (u64) lo | ((u64) hi << 32);
  685. exit:
  686. return status;
  687. }
  688. /* Take the MAC Core out of reset.
  689. * Enable statistics counting.
  690. * Take the transmitter/receiver out of reset.
  691. * This functionality may be done in the MPI firmware at a
  692. * later date.
  693. */
  694. static int ql_port_initialize(struct ql_adapter *qdev)
  695. {
  696. int status = 0;
  697. u32 data;
  698. if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
  699. /* Another function has the semaphore, so
  700. * wait for the port init bit to come ready.
  701. */
  702. QPRINTK(qdev, LINK, INFO,
  703. "Another function has the semaphore, so wait for the port init bit to come ready.\n");
  704. status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
  705. if (status) {
  706. QPRINTK(qdev, LINK, CRIT,
  707. "Port initialize timed out.\n");
  708. }
  709. return status;
  710. }
  711. QPRINTK(qdev, LINK, INFO, "Got xgmac semaphore!.\n");
  712. /* Set the core reset. */
  713. status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
  714. if (status)
  715. goto end;
  716. data |= GLOBAL_CFG_RESET;
  717. status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
  718. if (status)
  719. goto end;
  720. /* Clear the core reset and turn on jumbo for receiver. */
  721. data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
  722. data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
  723. data |= GLOBAL_CFG_TX_STAT_EN;
  724. data |= GLOBAL_CFG_RX_STAT_EN;
  725. status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
  726. if (status)
  727. goto end;
  728. /* Enable transmitter, and clear it's reset. */
  729. status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
  730. if (status)
  731. goto end;
  732. data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
  733. data |= TX_CFG_EN; /* Enable the transmitter. */
  734. status = ql_write_xgmac_reg(qdev, TX_CFG, data);
  735. if (status)
  736. goto end;
  737. /* Enable receiver and clear it's reset. */
  738. status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
  739. if (status)
  740. goto end;
  741. data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
  742. data |= RX_CFG_EN; /* Enable the receiver. */
  743. status = ql_write_xgmac_reg(qdev, RX_CFG, data);
  744. if (status)
  745. goto end;
  746. /* Turn on jumbo. */
  747. status =
  748. ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
  749. if (status)
  750. goto end;
  751. status =
  752. ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
  753. if (status)
  754. goto end;
  755. /* Signal to the world that the port is enabled. */
  756. ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
  757. end:
  758. ql_sem_unlock(qdev, qdev->xg_sem_mask);
  759. return status;
  760. }
  761. /* Get the next large buffer. */
  762. static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
  763. {
  764. struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
  765. rx_ring->lbq_curr_idx++;
  766. if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
  767. rx_ring->lbq_curr_idx = 0;
  768. rx_ring->lbq_free_cnt++;
  769. return lbq_desc;
  770. }
  771. /* Get the next small buffer. */
  772. static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
  773. {
  774. struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
  775. rx_ring->sbq_curr_idx++;
  776. if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
  777. rx_ring->sbq_curr_idx = 0;
  778. rx_ring->sbq_free_cnt++;
  779. return sbq_desc;
  780. }
  781. /* Update an rx ring index. */
  782. static void ql_update_cq(struct rx_ring *rx_ring)
  783. {
  784. rx_ring->cnsmr_idx++;
  785. rx_ring->curr_entry++;
  786. if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
  787. rx_ring->cnsmr_idx = 0;
  788. rx_ring->curr_entry = rx_ring->cq_base;
  789. }
  790. }
  791. static void ql_write_cq_idx(struct rx_ring *rx_ring)
  792. {
  793. ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
  794. }
  795. /* Process (refill) a large buffer queue. */
  796. static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
  797. {
  798. int clean_idx = rx_ring->lbq_clean_idx;
  799. struct bq_desc *lbq_desc;
  800. struct bq_element *bq;
  801. u64 map;
  802. int i;
  803. while (rx_ring->lbq_free_cnt > 16) {
  804. for (i = 0; i < 16; i++) {
  805. QPRINTK(qdev, RX_STATUS, DEBUG,
  806. "lbq: try cleaning clean_idx = %d.\n",
  807. clean_idx);
  808. lbq_desc = &rx_ring->lbq[clean_idx];
  809. bq = lbq_desc->bq;
  810. if (lbq_desc->p.lbq_page == NULL) {
  811. QPRINTK(qdev, RX_STATUS, DEBUG,
  812. "lbq: getting new page for index %d.\n",
  813. lbq_desc->index);
  814. lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC);
  815. if (lbq_desc->p.lbq_page == NULL) {
  816. QPRINTK(qdev, RX_STATUS, ERR,
  817. "Couldn't get a page.\n");
  818. return;
  819. }
  820. map = pci_map_page(qdev->pdev,
  821. lbq_desc->p.lbq_page,
  822. 0, PAGE_SIZE,
  823. PCI_DMA_FROMDEVICE);
  824. if (pci_dma_mapping_error(qdev->pdev, map)) {
  825. QPRINTK(qdev, RX_STATUS, ERR,
  826. "PCI mapping failed.\n");
  827. return;
  828. }
  829. pci_unmap_addr_set(lbq_desc, mapaddr, map);
  830. pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE);
  831. bq->addr_lo = /*lbq_desc->addr_lo = */
  832. cpu_to_le32(map);
  833. bq->addr_hi = /*lbq_desc->addr_hi = */
  834. cpu_to_le32(map >> 32);
  835. }
  836. clean_idx++;
  837. if (clean_idx == rx_ring->lbq_len)
  838. clean_idx = 0;
  839. }
  840. rx_ring->lbq_clean_idx = clean_idx;
  841. rx_ring->lbq_prod_idx += 16;
  842. if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
  843. rx_ring->lbq_prod_idx = 0;
  844. QPRINTK(qdev, RX_STATUS, DEBUG,
  845. "lbq: updating prod idx = %d.\n",
  846. rx_ring->lbq_prod_idx);
  847. ql_write_db_reg(rx_ring->lbq_prod_idx,
  848. rx_ring->lbq_prod_idx_db_reg);
  849. rx_ring->lbq_free_cnt -= 16;
  850. }
  851. }
  852. /* Process (refill) a small buffer queue. */
  853. static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
  854. {
  855. int clean_idx = rx_ring->sbq_clean_idx;
  856. struct bq_desc *sbq_desc;
  857. struct bq_element *bq;
  858. u64 map;
  859. int i;
  860. while (rx_ring->sbq_free_cnt > 16) {
  861. for (i = 0; i < 16; i++) {
  862. sbq_desc = &rx_ring->sbq[clean_idx];
  863. QPRINTK(qdev, RX_STATUS, DEBUG,
  864. "sbq: try cleaning clean_idx = %d.\n",
  865. clean_idx);
  866. bq = sbq_desc->bq;
  867. if (sbq_desc->p.skb == NULL) {
  868. QPRINTK(qdev, RX_STATUS, DEBUG,
  869. "sbq: getting new skb for index %d.\n",
  870. sbq_desc->index);
  871. sbq_desc->p.skb =
  872. netdev_alloc_skb(qdev->ndev,
  873. rx_ring->sbq_buf_size);
  874. if (sbq_desc->p.skb == NULL) {
  875. QPRINTK(qdev, PROBE, ERR,
  876. "Couldn't get an skb.\n");
  877. rx_ring->sbq_clean_idx = clean_idx;
  878. return;
  879. }
  880. skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
  881. map = pci_map_single(qdev->pdev,
  882. sbq_desc->p.skb->data,
  883. rx_ring->sbq_buf_size /
  884. 2, PCI_DMA_FROMDEVICE);
  885. pci_unmap_addr_set(sbq_desc, mapaddr, map);
  886. pci_unmap_len_set(sbq_desc, maplen,
  887. rx_ring->sbq_buf_size / 2);
  888. bq->addr_lo = cpu_to_le32(map);
  889. bq->addr_hi = cpu_to_le32(map >> 32);
  890. }
  891. clean_idx++;
  892. if (clean_idx == rx_ring->sbq_len)
  893. clean_idx = 0;
  894. }
  895. rx_ring->sbq_clean_idx = clean_idx;
  896. rx_ring->sbq_prod_idx += 16;
  897. if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
  898. rx_ring->sbq_prod_idx = 0;
  899. QPRINTK(qdev, RX_STATUS, DEBUG,
  900. "sbq: updating prod idx = %d.\n",
  901. rx_ring->sbq_prod_idx);
  902. ql_write_db_reg(rx_ring->sbq_prod_idx,
  903. rx_ring->sbq_prod_idx_db_reg);
  904. rx_ring->sbq_free_cnt -= 16;
  905. }
  906. }
  907. static void ql_update_buffer_queues(struct ql_adapter *qdev,
  908. struct rx_ring *rx_ring)
  909. {
  910. ql_update_sbq(qdev, rx_ring);
  911. ql_update_lbq(qdev, rx_ring);
  912. }
  913. /* Unmaps tx buffers. Can be called from send() if a pci mapping
  914. * fails at some stage, or from the interrupt when a tx completes.
  915. */
  916. static void ql_unmap_send(struct ql_adapter *qdev,
  917. struct tx_ring_desc *tx_ring_desc, int mapped)
  918. {
  919. int i;
  920. for (i = 0; i < mapped; i++) {
  921. if (i == 0 || (i == 7 && mapped > 7)) {
  922. /*
  923. * Unmap the skb->data area, or the
  924. * external sglist (AKA the Outbound
  925. * Address List (OAL)).
  926. * If its the zeroeth element, then it's
  927. * the skb->data area. If it's the 7th
  928. * element and there is more than 6 frags,
  929. * then its an OAL.
  930. */
  931. if (i == 7) {
  932. QPRINTK(qdev, TX_DONE, DEBUG,
  933. "unmapping OAL area.\n");
  934. }
  935. pci_unmap_single(qdev->pdev,
  936. pci_unmap_addr(&tx_ring_desc->map[i],
  937. mapaddr),
  938. pci_unmap_len(&tx_ring_desc->map[i],
  939. maplen),
  940. PCI_DMA_TODEVICE);
  941. } else {
  942. QPRINTK(qdev, TX_DONE, DEBUG, "unmapping frag %d.\n",
  943. i);
  944. pci_unmap_page(qdev->pdev,
  945. pci_unmap_addr(&tx_ring_desc->map[i],
  946. mapaddr),
  947. pci_unmap_len(&tx_ring_desc->map[i],
  948. maplen), PCI_DMA_TODEVICE);
  949. }
  950. }
  951. }
  952. /* Map the buffers for this transmit. This will return
  953. * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
  954. */
  955. static int ql_map_send(struct ql_adapter *qdev,
  956. struct ob_mac_iocb_req *mac_iocb_ptr,
  957. struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
  958. {
  959. int len = skb_headlen(skb);
  960. dma_addr_t map;
  961. int frag_idx, err, map_idx = 0;
  962. struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
  963. int frag_cnt = skb_shinfo(skb)->nr_frags;
  964. if (frag_cnt) {
  965. QPRINTK(qdev, TX_QUEUED, DEBUG, "frag_cnt = %d.\n", frag_cnt);
  966. }
  967. /*
  968. * Map the skb buffer first.
  969. */
  970. map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
  971. err = pci_dma_mapping_error(qdev->pdev, map);
  972. if (err) {
  973. QPRINTK(qdev, TX_QUEUED, ERR,
  974. "PCI mapping failed with error: %d\n", err);
  975. return NETDEV_TX_BUSY;
  976. }
  977. tbd->len = cpu_to_le32(len);
  978. tbd->addr = cpu_to_le64(map);
  979. pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
  980. pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
  981. map_idx++;
  982. /*
  983. * This loop fills the remainder of the 8 address descriptors
  984. * in the IOCB. If there are more than 7 fragments, then the
  985. * eighth address desc will point to an external list (OAL).
  986. * When this happens, the remainder of the frags will be stored
  987. * in this list.
  988. */
  989. for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
  990. skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
  991. tbd++;
  992. if (frag_idx == 6 && frag_cnt > 7) {
  993. /* Let's tack on an sglist.
  994. * Our control block will now
  995. * look like this:
  996. * iocb->seg[0] = skb->data
  997. * iocb->seg[1] = frag[0]
  998. * iocb->seg[2] = frag[1]
  999. * iocb->seg[3] = frag[2]
  1000. * iocb->seg[4] = frag[3]
  1001. * iocb->seg[5] = frag[4]
  1002. * iocb->seg[6] = frag[5]
  1003. * iocb->seg[7] = ptr to OAL (external sglist)
  1004. * oal->seg[0] = frag[6]
  1005. * oal->seg[1] = frag[7]
  1006. * oal->seg[2] = frag[8]
  1007. * oal->seg[3] = frag[9]
  1008. * oal->seg[4] = frag[10]
  1009. * etc...
  1010. */
  1011. /* Tack on the OAL in the eighth segment of IOCB. */
  1012. map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
  1013. sizeof(struct oal),
  1014. PCI_DMA_TODEVICE);
  1015. err = pci_dma_mapping_error(qdev->pdev, map);
  1016. if (err) {
  1017. QPRINTK(qdev, TX_QUEUED, ERR,
  1018. "PCI mapping outbound address list with error: %d\n",
  1019. err);
  1020. goto map_error;
  1021. }
  1022. tbd->addr = cpu_to_le64(map);
  1023. /*
  1024. * The length is the number of fragments
  1025. * that remain to be mapped times the length
  1026. * of our sglist (OAL).
  1027. */
  1028. tbd->len =
  1029. cpu_to_le32((sizeof(struct tx_buf_desc) *
  1030. (frag_cnt - frag_idx)) | TX_DESC_C);
  1031. pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
  1032. map);
  1033. pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
  1034. sizeof(struct oal));
  1035. tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
  1036. map_idx++;
  1037. }
  1038. map =
  1039. pci_map_page(qdev->pdev, frag->page,
  1040. frag->page_offset, frag->size,
  1041. PCI_DMA_TODEVICE);
  1042. err = pci_dma_mapping_error(qdev->pdev, map);
  1043. if (err) {
  1044. QPRINTK(qdev, TX_QUEUED, ERR,
  1045. "PCI mapping frags failed with error: %d.\n",
  1046. err);
  1047. goto map_error;
  1048. }
  1049. tbd->addr = cpu_to_le64(map);
  1050. tbd->len = cpu_to_le32(frag->size);
  1051. pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
  1052. pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
  1053. frag->size);
  1054. }
  1055. /* Save the number of segments we've mapped. */
  1056. tx_ring_desc->map_cnt = map_idx;
  1057. /* Terminate the last segment. */
  1058. tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
  1059. return NETDEV_TX_OK;
  1060. map_error:
  1061. /*
  1062. * If the first frag mapping failed, then i will be zero.
  1063. * This causes the unmap of the skb->data area. Otherwise
  1064. * we pass in the number of frags that mapped successfully
  1065. * so they can be umapped.
  1066. */
  1067. ql_unmap_send(qdev, tx_ring_desc, map_idx);
  1068. return NETDEV_TX_BUSY;
  1069. }
  1070. static void ql_realign_skb(struct sk_buff *skb, int len)
  1071. {
  1072. void *temp_addr = skb->data;
  1073. /* Undo the skb_reserve(skb,32) we did before
  1074. * giving to hardware, and realign data on
  1075. * a 2-byte boundary.
  1076. */
  1077. skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
  1078. skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
  1079. skb_copy_to_linear_data(skb, temp_addr,
  1080. (unsigned int)len);
  1081. }
  1082. /*
  1083. * This function builds an skb for the given inbound
  1084. * completion. It will be rewritten for readability in the near
  1085. * future, but for not it works well.
  1086. */
  1087. static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
  1088. struct rx_ring *rx_ring,
  1089. struct ib_mac_iocb_rsp *ib_mac_rsp)
  1090. {
  1091. struct bq_desc *lbq_desc;
  1092. struct bq_desc *sbq_desc;
  1093. struct sk_buff *skb = NULL;
  1094. u32 length = le32_to_cpu(ib_mac_rsp->data_len);
  1095. u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
  1096. /*
  1097. * Handle the header buffer if present.
  1098. */
  1099. if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
  1100. ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
  1101. QPRINTK(qdev, RX_STATUS, DEBUG, "Header of %d bytes in small buffer.\n", hdr_len);
  1102. /*
  1103. * Headers fit nicely into a small buffer.
  1104. */
  1105. sbq_desc = ql_get_curr_sbuf(rx_ring);
  1106. pci_unmap_single(qdev->pdev,
  1107. pci_unmap_addr(sbq_desc, mapaddr),
  1108. pci_unmap_len(sbq_desc, maplen),
  1109. PCI_DMA_FROMDEVICE);
  1110. skb = sbq_desc->p.skb;
  1111. ql_realign_skb(skb, hdr_len);
  1112. skb_put(skb, hdr_len);
  1113. sbq_desc->p.skb = NULL;
  1114. }
  1115. /*
  1116. * Handle the data buffer(s).
  1117. */
  1118. if (unlikely(!length)) { /* Is there data too? */
  1119. QPRINTK(qdev, RX_STATUS, DEBUG,
  1120. "No Data buffer in this packet.\n");
  1121. return skb;
  1122. }
  1123. if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
  1124. if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
  1125. QPRINTK(qdev, RX_STATUS, DEBUG,
  1126. "Headers in small, data of %d bytes in small, combine them.\n", length);
  1127. /*
  1128. * Data is less than small buffer size so it's
  1129. * stuffed in a small buffer.
  1130. * For this case we append the data
  1131. * from the "data" small buffer to the "header" small
  1132. * buffer.
  1133. */
  1134. sbq_desc = ql_get_curr_sbuf(rx_ring);
  1135. pci_dma_sync_single_for_cpu(qdev->pdev,
  1136. pci_unmap_addr
  1137. (sbq_desc, mapaddr),
  1138. pci_unmap_len
  1139. (sbq_desc, maplen),
  1140. PCI_DMA_FROMDEVICE);
  1141. memcpy(skb_put(skb, length),
  1142. sbq_desc->p.skb->data, length);
  1143. pci_dma_sync_single_for_device(qdev->pdev,
  1144. pci_unmap_addr
  1145. (sbq_desc,
  1146. mapaddr),
  1147. pci_unmap_len
  1148. (sbq_desc,
  1149. maplen),
  1150. PCI_DMA_FROMDEVICE);
  1151. } else {
  1152. QPRINTK(qdev, RX_STATUS, DEBUG,
  1153. "%d bytes in a single small buffer.\n", length);
  1154. sbq_desc = ql_get_curr_sbuf(rx_ring);
  1155. skb = sbq_desc->p.skb;
  1156. ql_realign_skb(skb, length);
  1157. skb_put(skb, length);
  1158. pci_unmap_single(qdev->pdev,
  1159. pci_unmap_addr(sbq_desc,
  1160. mapaddr),
  1161. pci_unmap_len(sbq_desc,
  1162. maplen),
  1163. PCI_DMA_FROMDEVICE);
  1164. sbq_desc->p.skb = NULL;
  1165. }
  1166. } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
  1167. if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
  1168. QPRINTK(qdev, RX_STATUS, DEBUG,
  1169. "Header in small, %d bytes in large. Chain large to small!\n", length);
  1170. /*
  1171. * The data is in a single large buffer. We
  1172. * chain it to the header buffer's skb and let
  1173. * it rip.
  1174. */
  1175. lbq_desc = ql_get_curr_lbuf(rx_ring);
  1176. pci_unmap_page(qdev->pdev,
  1177. pci_unmap_addr(lbq_desc,
  1178. mapaddr),
  1179. pci_unmap_len(lbq_desc, maplen),
  1180. PCI_DMA_FROMDEVICE);
  1181. QPRINTK(qdev, RX_STATUS, DEBUG,
  1182. "Chaining page to skb.\n");
  1183. skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page,
  1184. 0, length);
  1185. skb->len += length;
  1186. skb->data_len += length;
  1187. skb->truesize += length;
  1188. lbq_desc->p.lbq_page = NULL;
  1189. } else {
  1190. /*
  1191. * The headers and data are in a single large buffer. We
  1192. * copy it to a new skb and let it go. This can happen with
  1193. * jumbo mtu on a non-TCP/UDP frame.
  1194. */
  1195. lbq_desc = ql_get_curr_lbuf(rx_ring);
  1196. skb = netdev_alloc_skb(qdev->ndev, length);
  1197. if (skb == NULL) {
  1198. QPRINTK(qdev, PROBE, DEBUG,
  1199. "No skb available, drop the packet.\n");
  1200. return NULL;
  1201. }
  1202. skb_reserve(skb, NET_IP_ALIGN);
  1203. QPRINTK(qdev, RX_STATUS, DEBUG,
  1204. "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length);
  1205. skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page,
  1206. 0, length);
  1207. skb->len += length;
  1208. skb->data_len += length;
  1209. skb->truesize += length;
  1210. length -= length;
  1211. lbq_desc->p.lbq_page = NULL;
  1212. __pskb_pull_tail(skb,
  1213. (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
  1214. VLAN_ETH_HLEN : ETH_HLEN);
  1215. }
  1216. } else {
  1217. /*
  1218. * The data is in a chain of large buffers
  1219. * pointed to by a small buffer. We loop
  1220. * thru and chain them to the our small header
  1221. * buffer's skb.
  1222. * frags: There are 18 max frags and our small
  1223. * buffer will hold 32 of them. The thing is,
  1224. * we'll use 3 max for our 9000 byte jumbo
  1225. * frames. If the MTU goes up we could
  1226. * eventually be in trouble.
  1227. */
  1228. int size, offset, i = 0;
  1229. struct bq_element *bq, bq_array[8];
  1230. sbq_desc = ql_get_curr_sbuf(rx_ring);
  1231. pci_unmap_single(qdev->pdev,
  1232. pci_unmap_addr(sbq_desc, mapaddr),
  1233. pci_unmap_len(sbq_desc, maplen),
  1234. PCI_DMA_FROMDEVICE);
  1235. if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
  1236. /*
  1237. * This is an non TCP/UDP IP frame, so
  1238. * the headers aren't split into a small
  1239. * buffer. We have to use the small buffer
  1240. * that contains our sg list as our skb to
  1241. * send upstairs. Copy the sg list here to
  1242. * a local buffer and use it to find the
  1243. * pages to chain.
  1244. */
  1245. QPRINTK(qdev, RX_STATUS, DEBUG,
  1246. "%d bytes of headers & data in chain of large.\n", length);
  1247. skb = sbq_desc->p.skb;
  1248. bq = &bq_array[0];
  1249. memcpy(bq, skb->data, sizeof(bq_array));
  1250. sbq_desc->p.skb = NULL;
  1251. skb_reserve(skb, NET_IP_ALIGN);
  1252. } else {
  1253. QPRINTK(qdev, RX_STATUS, DEBUG,
  1254. "Headers in small, %d bytes of data in chain of large.\n", length);
  1255. bq = (struct bq_element *)sbq_desc->p.skb->data;
  1256. }
  1257. while (length > 0) {
  1258. lbq_desc = ql_get_curr_lbuf(rx_ring);
  1259. if ((bq->addr_lo & ~BQ_MASK) != lbq_desc->bq->addr_lo) {
  1260. QPRINTK(qdev, RX_STATUS, ERR,
  1261. "Panic!!! bad large buffer address, expected 0x%.08x, got 0x%.08x.\n",
  1262. lbq_desc->bq->addr_lo, bq->addr_lo);
  1263. return NULL;
  1264. }
  1265. pci_unmap_page(qdev->pdev,
  1266. pci_unmap_addr(lbq_desc,
  1267. mapaddr),
  1268. pci_unmap_len(lbq_desc,
  1269. maplen),
  1270. PCI_DMA_FROMDEVICE);
  1271. size = (length < PAGE_SIZE) ? length : PAGE_SIZE;
  1272. offset = 0;
  1273. QPRINTK(qdev, RX_STATUS, DEBUG,
  1274. "Adding page %d to skb for %d bytes.\n",
  1275. i, size);
  1276. skb_fill_page_desc(skb, i, lbq_desc->p.lbq_page,
  1277. offset, size);
  1278. skb->len += size;
  1279. skb->data_len += size;
  1280. skb->truesize += size;
  1281. length -= size;
  1282. lbq_desc->p.lbq_page = NULL;
  1283. bq++;
  1284. i++;
  1285. }
  1286. __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
  1287. VLAN_ETH_HLEN : ETH_HLEN);
  1288. }
  1289. return skb;
  1290. }
  1291. /* Process an inbound completion from an rx ring. */
  1292. static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
  1293. struct rx_ring *rx_ring,
  1294. struct ib_mac_iocb_rsp *ib_mac_rsp)
  1295. {
  1296. struct net_device *ndev = qdev->ndev;
  1297. struct sk_buff *skb = NULL;
  1298. QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
  1299. skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
  1300. if (unlikely(!skb)) {
  1301. QPRINTK(qdev, RX_STATUS, DEBUG,
  1302. "No skb available, drop packet.\n");
  1303. return;
  1304. }
  1305. prefetch(skb->data);
  1306. skb->dev = ndev;
  1307. if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
  1308. QPRINTK(qdev, RX_STATUS, DEBUG, "%s%s%s Multicast.\n",
  1309. (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
  1310. IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
  1311. (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
  1312. IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
  1313. (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
  1314. IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
  1315. }
  1316. if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
  1317. QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n");
  1318. }
  1319. if (ib_mac_rsp->flags1 & (IB_MAC_IOCB_RSP_IE | IB_MAC_IOCB_RSP_TE)) {
  1320. QPRINTK(qdev, RX_STATUS, ERR,
  1321. "Bad checksum for this %s packet.\n",
  1322. ((ib_mac_rsp->
  1323. flags2 & IB_MAC_IOCB_RSP_T) ? "TCP" : "UDP"));
  1324. skb->ip_summed = CHECKSUM_NONE;
  1325. } else if (qdev->rx_csum &&
  1326. ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) ||
  1327. ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
  1328. !(ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_NU)))) {
  1329. QPRINTK(qdev, RX_STATUS, DEBUG, "RX checksum done!\n");
  1330. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1331. }
  1332. qdev->stats.rx_packets++;
  1333. qdev->stats.rx_bytes += skb->len;
  1334. skb->protocol = eth_type_trans(skb, ndev);
  1335. if (qdev->vlgrp && (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V)) {
  1336. QPRINTK(qdev, RX_STATUS, DEBUG,
  1337. "Passing a VLAN packet upstream.\n");
  1338. vlan_hwaccel_rx(skb, qdev->vlgrp,
  1339. le16_to_cpu(ib_mac_rsp->vlan_id));
  1340. } else {
  1341. QPRINTK(qdev, RX_STATUS, DEBUG,
  1342. "Passing a normal packet upstream.\n");
  1343. netif_rx(skb);
  1344. }
  1345. }
  1346. /* Process an outbound completion from an rx ring. */
  1347. static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
  1348. struct ob_mac_iocb_rsp *mac_rsp)
  1349. {
  1350. struct tx_ring *tx_ring;
  1351. struct tx_ring_desc *tx_ring_desc;
  1352. QL_DUMP_OB_MAC_RSP(mac_rsp);
  1353. tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
  1354. tx_ring_desc = &tx_ring->q[mac_rsp->tid];
  1355. ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
  1356. qdev->stats.tx_bytes += tx_ring_desc->map_cnt;
  1357. qdev->stats.tx_packets++;
  1358. dev_kfree_skb(tx_ring_desc->skb);
  1359. tx_ring_desc->skb = NULL;
  1360. if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
  1361. OB_MAC_IOCB_RSP_S |
  1362. OB_MAC_IOCB_RSP_L |
  1363. OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
  1364. if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
  1365. QPRINTK(qdev, TX_DONE, WARNING,
  1366. "Total descriptor length did not match transfer length.\n");
  1367. }
  1368. if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
  1369. QPRINTK(qdev, TX_DONE, WARNING,
  1370. "Frame too short to be legal, not sent.\n");
  1371. }
  1372. if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
  1373. QPRINTK(qdev, TX_DONE, WARNING,
  1374. "Frame too long, but sent anyway.\n");
  1375. }
  1376. if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
  1377. QPRINTK(qdev, TX_DONE, WARNING,
  1378. "PCI backplane error. Frame not sent.\n");
  1379. }
  1380. }
  1381. atomic_inc(&tx_ring->tx_count);
  1382. }
  1383. /* Fire up a handler to reset the MPI processor. */
  1384. void ql_queue_fw_error(struct ql_adapter *qdev)
  1385. {
  1386. netif_stop_queue(qdev->ndev);
  1387. netif_carrier_off(qdev->ndev);
  1388. queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
  1389. }
  1390. void ql_queue_asic_error(struct ql_adapter *qdev)
  1391. {
  1392. netif_stop_queue(qdev->ndev);
  1393. netif_carrier_off(qdev->ndev);
  1394. ql_disable_interrupts(qdev);
  1395. queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
  1396. }
  1397. static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
  1398. struct ib_ae_iocb_rsp *ib_ae_rsp)
  1399. {
  1400. switch (ib_ae_rsp->event) {
  1401. case MGMT_ERR_EVENT:
  1402. QPRINTK(qdev, RX_ERR, ERR,
  1403. "Management Processor Fatal Error.\n");
  1404. ql_queue_fw_error(qdev);
  1405. return;
  1406. case CAM_LOOKUP_ERR_EVENT:
  1407. QPRINTK(qdev, LINK, ERR,
  1408. "Multiple CAM hits lookup occurred.\n");
  1409. QPRINTK(qdev, DRV, ERR, "This event shouldn't occur.\n");
  1410. ql_queue_asic_error(qdev);
  1411. return;
  1412. case SOFT_ECC_ERROR_EVENT:
  1413. QPRINTK(qdev, RX_ERR, ERR, "Soft ECC error detected.\n");
  1414. ql_queue_asic_error(qdev);
  1415. break;
  1416. case PCI_ERR_ANON_BUF_RD:
  1417. QPRINTK(qdev, RX_ERR, ERR,
  1418. "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
  1419. ib_ae_rsp->q_id);
  1420. ql_queue_asic_error(qdev);
  1421. break;
  1422. default:
  1423. QPRINTK(qdev, DRV, ERR, "Unexpected event %d.\n",
  1424. ib_ae_rsp->event);
  1425. ql_queue_asic_error(qdev);
  1426. break;
  1427. }
  1428. }
  1429. static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
  1430. {
  1431. struct ql_adapter *qdev = rx_ring->qdev;
  1432. u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
  1433. struct ob_mac_iocb_rsp *net_rsp = NULL;
  1434. int count = 0;
  1435. /* While there are entries in the completion queue. */
  1436. while (prod != rx_ring->cnsmr_idx) {
  1437. QPRINTK(qdev, RX_STATUS, DEBUG,
  1438. "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
  1439. prod, rx_ring->cnsmr_idx);
  1440. net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
  1441. rmb();
  1442. switch (net_rsp->opcode) {
  1443. case OPCODE_OB_MAC_TSO_IOCB:
  1444. case OPCODE_OB_MAC_IOCB:
  1445. ql_process_mac_tx_intr(qdev, net_rsp);
  1446. break;
  1447. default:
  1448. QPRINTK(qdev, RX_STATUS, DEBUG,
  1449. "Hit default case, not handled! dropping the packet, opcode = %x.\n",
  1450. net_rsp->opcode);
  1451. }
  1452. count++;
  1453. ql_update_cq(rx_ring);
  1454. prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
  1455. }
  1456. ql_write_cq_idx(rx_ring);
  1457. if (netif_queue_stopped(qdev->ndev) && net_rsp != NULL) {
  1458. struct tx_ring *tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
  1459. if (atomic_read(&tx_ring->queue_stopped) &&
  1460. (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
  1461. /*
  1462. * The queue got stopped because the tx_ring was full.
  1463. * Wake it up, because it's now at least 25% empty.
  1464. */
  1465. netif_wake_queue(qdev->ndev);
  1466. }
  1467. return count;
  1468. }
  1469. static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
  1470. {
  1471. struct ql_adapter *qdev = rx_ring->qdev;
  1472. u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
  1473. struct ql_net_rsp_iocb *net_rsp;
  1474. int count = 0;
  1475. /* While there are entries in the completion queue. */
  1476. while (prod != rx_ring->cnsmr_idx) {
  1477. QPRINTK(qdev, RX_STATUS, DEBUG,
  1478. "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
  1479. prod, rx_ring->cnsmr_idx);
  1480. net_rsp = rx_ring->curr_entry;
  1481. rmb();
  1482. switch (net_rsp->opcode) {
  1483. case OPCODE_IB_MAC_IOCB:
  1484. ql_process_mac_rx_intr(qdev, rx_ring,
  1485. (struct ib_mac_iocb_rsp *)
  1486. net_rsp);
  1487. break;
  1488. case OPCODE_IB_AE_IOCB:
  1489. ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
  1490. net_rsp);
  1491. break;
  1492. default:
  1493. {
  1494. QPRINTK(qdev, RX_STATUS, DEBUG,
  1495. "Hit default case, not handled! dropping the packet, opcode = %x.\n",
  1496. net_rsp->opcode);
  1497. }
  1498. }
  1499. count++;
  1500. ql_update_cq(rx_ring);
  1501. prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
  1502. if (count == budget)
  1503. break;
  1504. }
  1505. ql_update_buffer_queues(qdev, rx_ring);
  1506. ql_write_cq_idx(rx_ring);
  1507. return count;
  1508. }
  1509. static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
  1510. {
  1511. struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
  1512. struct ql_adapter *qdev = rx_ring->qdev;
  1513. int work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
  1514. QPRINTK(qdev, RX_STATUS, DEBUG, "Enter, NAPI POLL cq_id = %d.\n",
  1515. rx_ring->cq_id);
  1516. if (work_done < budget) {
  1517. __netif_rx_complete(napi);
  1518. ql_enable_completion_interrupt(qdev, rx_ring->irq);
  1519. }
  1520. return work_done;
  1521. }
  1522. static void ql_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
  1523. {
  1524. struct ql_adapter *qdev = netdev_priv(ndev);
  1525. qdev->vlgrp = grp;
  1526. if (grp) {
  1527. QPRINTK(qdev, IFUP, DEBUG, "Turning on VLAN in NIC_RCV_CFG.\n");
  1528. ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
  1529. NIC_RCV_CFG_VLAN_MATCH_AND_NON);
  1530. } else {
  1531. QPRINTK(qdev, IFUP, DEBUG,
  1532. "Turning off VLAN in NIC_RCV_CFG.\n");
  1533. ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
  1534. }
  1535. }
  1536. static void ql_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
  1537. {
  1538. struct ql_adapter *qdev = netdev_priv(ndev);
  1539. u32 enable_bit = MAC_ADDR_E;
  1540. spin_lock(&qdev->hw_lock);
  1541. if (ql_set_mac_addr_reg
  1542. (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
  1543. QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n");
  1544. }
  1545. spin_unlock(&qdev->hw_lock);
  1546. }
  1547. static void ql_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
  1548. {
  1549. struct ql_adapter *qdev = netdev_priv(ndev);
  1550. u32 enable_bit = 0;
  1551. spin_lock(&qdev->hw_lock);
  1552. if (ql_set_mac_addr_reg
  1553. (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
  1554. QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n");
  1555. }
  1556. spin_unlock(&qdev->hw_lock);
  1557. }
  1558. /* Worker thread to process a given rx_ring that is dedicated
  1559. * to outbound completions.
  1560. */
  1561. static void ql_tx_clean(struct work_struct *work)
  1562. {
  1563. struct rx_ring *rx_ring =
  1564. container_of(work, struct rx_ring, rx_work.work);
  1565. ql_clean_outbound_rx_ring(rx_ring);
  1566. ql_enable_completion_interrupt(rx_ring->qdev, rx_ring->irq);
  1567. }
  1568. /* Worker thread to process a given rx_ring that is dedicated
  1569. * to inbound completions.
  1570. */
  1571. static void ql_rx_clean(struct work_struct *work)
  1572. {
  1573. struct rx_ring *rx_ring =
  1574. container_of(work, struct rx_ring, rx_work.work);
  1575. ql_clean_inbound_rx_ring(rx_ring, 64);
  1576. ql_enable_completion_interrupt(rx_ring->qdev, rx_ring->irq);
  1577. }
  1578. /* MSI-X Multiple Vector Interrupt Handler for outbound completions. */
  1579. static irqreturn_t qlge_msix_tx_isr(int irq, void *dev_id)
  1580. {
  1581. struct rx_ring *rx_ring = dev_id;
  1582. queue_delayed_work_on(rx_ring->cpu, rx_ring->qdev->q_workqueue,
  1583. &rx_ring->rx_work, 0);
  1584. return IRQ_HANDLED;
  1585. }
  1586. /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
  1587. static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
  1588. {
  1589. struct rx_ring *rx_ring = dev_id;
  1590. netif_rx_schedule(&rx_ring->napi);
  1591. return IRQ_HANDLED;
  1592. }
  1593. /* This handles a fatal error, MPI activity, and the default
  1594. * rx_ring in an MSI-X multiple vector environment.
  1595. * In MSI/Legacy environment it also process the rest of
  1596. * the rx_rings.
  1597. */
  1598. static irqreturn_t qlge_isr(int irq, void *dev_id)
  1599. {
  1600. struct rx_ring *rx_ring = dev_id;
  1601. struct ql_adapter *qdev = rx_ring->qdev;
  1602. struct intr_context *intr_context = &qdev->intr_context[0];
  1603. u32 var;
  1604. int i;
  1605. int work_done = 0;
  1606. spin_lock(&qdev->hw_lock);
  1607. if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
  1608. QPRINTK(qdev, INTR, DEBUG, "Shared Interrupt, Not ours!\n");
  1609. spin_unlock(&qdev->hw_lock);
  1610. return IRQ_NONE;
  1611. }
  1612. spin_unlock(&qdev->hw_lock);
  1613. var = ql_disable_completion_interrupt(qdev, intr_context->intr);
  1614. /*
  1615. * Check for fatal error.
  1616. */
  1617. if (var & STS_FE) {
  1618. ql_queue_asic_error(qdev);
  1619. QPRINTK(qdev, INTR, ERR, "Got fatal error, STS = %x.\n", var);
  1620. var = ql_read32(qdev, ERR_STS);
  1621. QPRINTK(qdev, INTR, ERR,
  1622. "Resetting chip. Error Status Register = 0x%x\n", var);
  1623. return IRQ_HANDLED;
  1624. }
  1625. /*
  1626. * Check MPI processor activity.
  1627. */
  1628. if (var & STS_PI) {
  1629. /*
  1630. * We've got an async event or mailbox completion.
  1631. * Handle it and clear the source of the interrupt.
  1632. */
  1633. QPRINTK(qdev, INTR, ERR, "Got MPI processor interrupt.\n");
  1634. ql_disable_completion_interrupt(qdev, intr_context->intr);
  1635. queue_delayed_work_on(smp_processor_id(), qdev->workqueue,
  1636. &qdev->mpi_work, 0);
  1637. work_done++;
  1638. }
  1639. /*
  1640. * Check the default queue and wake handler if active.
  1641. */
  1642. rx_ring = &qdev->rx_ring[0];
  1643. if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) != rx_ring->cnsmr_idx) {
  1644. QPRINTK(qdev, INTR, INFO, "Waking handler for rx_ring[0].\n");
  1645. ql_disable_completion_interrupt(qdev, intr_context->intr);
  1646. queue_delayed_work_on(smp_processor_id(), qdev->q_workqueue,
  1647. &rx_ring->rx_work, 0);
  1648. work_done++;
  1649. }
  1650. if (!test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
  1651. /*
  1652. * Start the DPC for each active queue.
  1653. */
  1654. for (i = 1; i < qdev->rx_ring_count; i++) {
  1655. rx_ring = &qdev->rx_ring[i];
  1656. if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
  1657. rx_ring->cnsmr_idx) {
  1658. QPRINTK(qdev, INTR, INFO,
  1659. "Waking handler for rx_ring[%d].\n", i);
  1660. ql_disable_completion_interrupt(qdev,
  1661. intr_context->
  1662. intr);
  1663. if (i < qdev->rss_ring_first_cq_id)
  1664. queue_delayed_work_on(rx_ring->cpu,
  1665. qdev->q_workqueue,
  1666. &rx_ring->rx_work,
  1667. 0);
  1668. else
  1669. netif_rx_schedule(&rx_ring->napi);
  1670. work_done++;
  1671. }
  1672. }
  1673. }
  1674. ql_enable_completion_interrupt(qdev, intr_context->intr);
  1675. return work_done ? IRQ_HANDLED : IRQ_NONE;
  1676. }
  1677. static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
  1678. {
  1679. if (skb_is_gso(skb)) {
  1680. int err;
  1681. if (skb_header_cloned(skb)) {
  1682. err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
  1683. if (err)
  1684. return err;
  1685. }
  1686. mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
  1687. mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
  1688. mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
  1689. mac_iocb_ptr->total_hdrs_len =
  1690. cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
  1691. mac_iocb_ptr->net_trans_offset =
  1692. cpu_to_le16(skb_network_offset(skb) |
  1693. skb_transport_offset(skb)
  1694. << OB_MAC_TRANSPORT_HDR_SHIFT);
  1695. mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
  1696. mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
  1697. if (likely(skb->protocol == htons(ETH_P_IP))) {
  1698. struct iphdr *iph = ip_hdr(skb);
  1699. iph->check = 0;
  1700. mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
  1701. tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
  1702. iph->daddr, 0,
  1703. IPPROTO_TCP,
  1704. 0);
  1705. } else if (skb->protocol == htons(ETH_P_IPV6)) {
  1706. mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
  1707. tcp_hdr(skb)->check =
  1708. ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
  1709. &ipv6_hdr(skb)->daddr,
  1710. 0, IPPROTO_TCP, 0);
  1711. }
  1712. return 1;
  1713. }
  1714. return 0;
  1715. }
  1716. static void ql_hw_csum_setup(struct sk_buff *skb,
  1717. struct ob_mac_tso_iocb_req *mac_iocb_ptr)
  1718. {
  1719. int len;
  1720. struct iphdr *iph = ip_hdr(skb);
  1721. u16 *check;
  1722. mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
  1723. mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
  1724. mac_iocb_ptr->net_trans_offset =
  1725. cpu_to_le16(skb_network_offset(skb) |
  1726. skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
  1727. mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
  1728. len = (ntohs(iph->tot_len) - (iph->ihl << 2));
  1729. if (likely(iph->protocol == IPPROTO_TCP)) {
  1730. check = &(tcp_hdr(skb)->check);
  1731. mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
  1732. mac_iocb_ptr->total_hdrs_len =
  1733. cpu_to_le16(skb_transport_offset(skb) +
  1734. (tcp_hdr(skb)->doff << 2));
  1735. } else {
  1736. check = &(udp_hdr(skb)->check);
  1737. mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
  1738. mac_iocb_ptr->total_hdrs_len =
  1739. cpu_to_le16(skb_transport_offset(skb) +
  1740. sizeof(struct udphdr));
  1741. }
  1742. *check = ~csum_tcpudp_magic(iph->saddr,
  1743. iph->daddr, len, iph->protocol, 0);
  1744. }
  1745. static int qlge_send(struct sk_buff *skb, struct net_device *ndev)
  1746. {
  1747. struct tx_ring_desc *tx_ring_desc;
  1748. struct ob_mac_iocb_req *mac_iocb_ptr;
  1749. struct ql_adapter *qdev = netdev_priv(ndev);
  1750. int tso;
  1751. struct tx_ring *tx_ring;
  1752. u32 tx_ring_idx = (u32) QL_TXQ_IDX(qdev, skb);
  1753. tx_ring = &qdev->tx_ring[tx_ring_idx];
  1754. if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
  1755. QPRINTK(qdev, TX_QUEUED, INFO,
  1756. "%s: shutting down tx queue %d du to lack of resources.\n",
  1757. __func__, tx_ring_idx);
  1758. netif_stop_queue(ndev);
  1759. atomic_inc(&tx_ring->queue_stopped);
  1760. return NETDEV_TX_BUSY;
  1761. }
  1762. tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
  1763. mac_iocb_ptr = tx_ring_desc->queue_entry;
  1764. memset((void *)mac_iocb_ptr, 0, sizeof(mac_iocb_ptr));
  1765. if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) != NETDEV_TX_OK) {
  1766. QPRINTK(qdev, TX_QUEUED, ERR, "Could not map the segments.\n");
  1767. return NETDEV_TX_BUSY;
  1768. }
  1769. mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
  1770. mac_iocb_ptr->tid = tx_ring_desc->index;
  1771. /* We use the upper 32-bits to store the tx queue for this IO.
  1772. * When we get the completion we can use it to establish the context.
  1773. */
  1774. mac_iocb_ptr->txq_idx = tx_ring_idx;
  1775. tx_ring_desc->skb = skb;
  1776. mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
  1777. if (qdev->vlgrp && vlan_tx_tag_present(skb)) {
  1778. QPRINTK(qdev, TX_QUEUED, DEBUG, "Adding a vlan tag %d.\n",
  1779. vlan_tx_tag_get(skb));
  1780. mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
  1781. mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
  1782. }
  1783. tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
  1784. if (tso < 0) {
  1785. dev_kfree_skb_any(skb);
  1786. return NETDEV_TX_OK;
  1787. } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
  1788. ql_hw_csum_setup(skb,
  1789. (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
  1790. }
  1791. QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
  1792. tx_ring->prod_idx++;
  1793. if (tx_ring->prod_idx == tx_ring->wq_len)
  1794. tx_ring->prod_idx = 0;
  1795. wmb();
  1796. ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
  1797. ndev->trans_start = jiffies;
  1798. QPRINTK(qdev, TX_QUEUED, DEBUG, "tx queued, slot %d, len %d\n",
  1799. tx_ring->prod_idx, skb->len);
  1800. atomic_dec(&tx_ring->tx_count);
  1801. return NETDEV_TX_OK;
  1802. }
  1803. static void ql_free_shadow_space(struct ql_adapter *qdev)
  1804. {
  1805. if (qdev->rx_ring_shadow_reg_area) {
  1806. pci_free_consistent(qdev->pdev,
  1807. PAGE_SIZE,
  1808. qdev->rx_ring_shadow_reg_area,
  1809. qdev->rx_ring_shadow_reg_dma);
  1810. qdev->rx_ring_shadow_reg_area = NULL;
  1811. }
  1812. if (qdev->tx_ring_shadow_reg_area) {
  1813. pci_free_consistent(qdev->pdev,
  1814. PAGE_SIZE,
  1815. qdev->tx_ring_shadow_reg_area,
  1816. qdev->tx_ring_shadow_reg_dma);
  1817. qdev->tx_ring_shadow_reg_area = NULL;
  1818. }
  1819. }
  1820. static int ql_alloc_shadow_space(struct ql_adapter *qdev)
  1821. {
  1822. qdev->rx_ring_shadow_reg_area =
  1823. pci_alloc_consistent(qdev->pdev,
  1824. PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
  1825. if (qdev->rx_ring_shadow_reg_area == NULL) {
  1826. QPRINTK(qdev, IFUP, ERR,
  1827. "Allocation of RX shadow space failed.\n");
  1828. return -ENOMEM;
  1829. }
  1830. qdev->tx_ring_shadow_reg_area =
  1831. pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
  1832. &qdev->tx_ring_shadow_reg_dma);
  1833. if (qdev->tx_ring_shadow_reg_area == NULL) {
  1834. QPRINTK(qdev, IFUP, ERR,
  1835. "Allocation of TX shadow space failed.\n");
  1836. goto err_wqp_sh_area;
  1837. }
  1838. return 0;
  1839. err_wqp_sh_area:
  1840. pci_free_consistent(qdev->pdev,
  1841. PAGE_SIZE,
  1842. qdev->rx_ring_shadow_reg_area,
  1843. qdev->rx_ring_shadow_reg_dma);
  1844. return -ENOMEM;
  1845. }
  1846. static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
  1847. {
  1848. struct tx_ring_desc *tx_ring_desc;
  1849. int i;
  1850. struct ob_mac_iocb_req *mac_iocb_ptr;
  1851. mac_iocb_ptr = tx_ring->wq_base;
  1852. tx_ring_desc = tx_ring->q;
  1853. for (i = 0; i < tx_ring->wq_len; i++) {
  1854. tx_ring_desc->index = i;
  1855. tx_ring_desc->skb = NULL;
  1856. tx_ring_desc->queue_entry = mac_iocb_ptr;
  1857. mac_iocb_ptr++;
  1858. tx_ring_desc++;
  1859. }
  1860. atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
  1861. atomic_set(&tx_ring->queue_stopped, 0);
  1862. }
  1863. static void ql_free_tx_resources(struct ql_adapter *qdev,
  1864. struct tx_ring *tx_ring)
  1865. {
  1866. if (tx_ring->wq_base) {
  1867. pci_free_consistent(qdev->pdev, tx_ring->wq_size,
  1868. tx_ring->wq_base, tx_ring->wq_base_dma);
  1869. tx_ring->wq_base = NULL;
  1870. }
  1871. kfree(tx_ring->q);
  1872. tx_ring->q = NULL;
  1873. }
  1874. static int ql_alloc_tx_resources(struct ql_adapter *qdev,
  1875. struct tx_ring *tx_ring)
  1876. {
  1877. tx_ring->wq_base =
  1878. pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
  1879. &tx_ring->wq_base_dma);
  1880. if ((tx_ring->wq_base == NULL)
  1881. || tx_ring->wq_base_dma & (tx_ring->wq_size - 1)) {
  1882. QPRINTK(qdev, IFUP, ERR, "tx_ring alloc failed.\n");
  1883. return -ENOMEM;
  1884. }
  1885. tx_ring->q =
  1886. kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
  1887. if (tx_ring->q == NULL)
  1888. goto err;
  1889. return 0;
  1890. err:
  1891. pci_free_consistent(qdev->pdev, tx_ring->wq_size,
  1892. tx_ring->wq_base, tx_ring->wq_base_dma);
  1893. return -ENOMEM;
  1894. }
  1895. static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
  1896. {
  1897. int i;
  1898. struct bq_desc *lbq_desc;
  1899. for (i = 0; i < rx_ring->lbq_len; i++) {
  1900. lbq_desc = &rx_ring->lbq[i];
  1901. if (lbq_desc->p.lbq_page) {
  1902. pci_unmap_page(qdev->pdev,
  1903. pci_unmap_addr(lbq_desc, mapaddr),
  1904. pci_unmap_len(lbq_desc, maplen),
  1905. PCI_DMA_FROMDEVICE);
  1906. put_page(lbq_desc->p.lbq_page);
  1907. lbq_desc->p.lbq_page = NULL;
  1908. }
  1909. lbq_desc->bq->addr_lo = 0;
  1910. lbq_desc->bq->addr_hi = 0;
  1911. }
  1912. }
  1913. /*
  1914. * Allocate and map a page for each element of the lbq.
  1915. */
  1916. static int ql_alloc_lbq_buffers(struct ql_adapter *qdev,
  1917. struct rx_ring *rx_ring)
  1918. {
  1919. int i;
  1920. struct bq_desc *lbq_desc;
  1921. u64 map;
  1922. struct bq_element *bq = rx_ring->lbq_base;
  1923. for (i = 0; i < rx_ring->lbq_len; i++) {
  1924. lbq_desc = &rx_ring->lbq[i];
  1925. memset(lbq_desc, 0, sizeof(lbq_desc));
  1926. lbq_desc->bq = bq;
  1927. lbq_desc->index = i;
  1928. lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC);
  1929. if (unlikely(!lbq_desc->p.lbq_page)) {
  1930. QPRINTK(qdev, IFUP, ERR, "failed alloc_page().\n");
  1931. goto mem_error;
  1932. } else {
  1933. map = pci_map_page(qdev->pdev,
  1934. lbq_desc->p.lbq_page,
  1935. 0, PAGE_SIZE, PCI_DMA_FROMDEVICE);
  1936. if (pci_dma_mapping_error(qdev->pdev, map)) {
  1937. QPRINTK(qdev, IFUP, ERR,
  1938. "PCI mapping failed.\n");
  1939. goto mem_error;
  1940. }
  1941. pci_unmap_addr_set(lbq_desc, mapaddr, map);
  1942. pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE);
  1943. bq->addr_lo = cpu_to_le32(map);
  1944. bq->addr_hi = cpu_to_le32(map >> 32);
  1945. }
  1946. bq++;
  1947. }
  1948. return 0;
  1949. mem_error:
  1950. ql_free_lbq_buffers(qdev, rx_ring);
  1951. return -ENOMEM;
  1952. }
  1953. static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
  1954. {
  1955. int i;
  1956. struct bq_desc *sbq_desc;
  1957. for (i = 0; i < rx_ring->sbq_len; i++) {
  1958. sbq_desc = &rx_ring->sbq[i];
  1959. if (sbq_desc == NULL) {
  1960. QPRINTK(qdev, IFUP, ERR, "sbq_desc %d is NULL.\n", i);
  1961. return;
  1962. }
  1963. if (sbq_desc->p.skb) {
  1964. pci_unmap_single(qdev->pdev,
  1965. pci_unmap_addr(sbq_desc, mapaddr),
  1966. pci_unmap_len(sbq_desc, maplen),
  1967. PCI_DMA_FROMDEVICE);
  1968. dev_kfree_skb(sbq_desc->p.skb);
  1969. sbq_desc->p.skb = NULL;
  1970. }
  1971. if (sbq_desc->bq == NULL) {
  1972. QPRINTK(qdev, IFUP, ERR, "sbq_desc->bq %d is NULL.\n",
  1973. i);
  1974. return;
  1975. }
  1976. sbq_desc->bq->addr_lo = 0;
  1977. sbq_desc->bq->addr_hi = 0;
  1978. }
  1979. }
  1980. /* Allocate and map an skb for each element of the sbq. */
  1981. static int ql_alloc_sbq_buffers(struct ql_adapter *qdev,
  1982. struct rx_ring *rx_ring)
  1983. {
  1984. int i;
  1985. struct bq_desc *sbq_desc;
  1986. struct sk_buff *skb;
  1987. u64 map;
  1988. struct bq_element *bq = rx_ring->sbq_base;
  1989. for (i = 0; i < rx_ring->sbq_len; i++) {
  1990. sbq_desc = &rx_ring->sbq[i];
  1991. memset(sbq_desc, 0, sizeof(sbq_desc));
  1992. sbq_desc->index = i;
  1993. sbq_desc->bq = bq;
  1994. skb = netdev_alloc_skb(qdev->ndev, rx_ring->sbq_buf_size);
  1995. if (unlikely(!skb)) {
  1996. /* Better luck next round */
  1997. QPRINTK(qdev, IFUP, ERR,
  1998. "small buff alloc failed for %d bytes at index %d.\n",
  1999. rx_ring->sbq_buf_size, i);
  2000. goto mem_err;
  2001. }
  2002. skb_reserve(skb, QLGE_SB_PAD);
  2003. sbq_desc->p.skb = skb;
  2004. /*
  2005. * Map only half the buffer. Because the
  2006. * other half may get some data copied to it
  2007. * when the completion arrives.
  2008. */
  2009. map = pci_map_single(qdev->pdev,
  2010. skb->data,
  2011. rx_ring->sbq_buf_size / 2,
  2012. PCI_DMA_FROMDEVICE);
  2013. if (pci_dma_mapping_error(qdev->pdev, map)) {
  2014. QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n");
  2015. goto mem_err;
  2016. }
  2017. pci_unmap_addr_set(sbq_desc, mapaddr, map);
  2018. pci_unmap_len_set(sbq_desc, maplen, rx_ring->sbq_buf_size / 2);
  2019. bq->addr_lo = /*sbq_desc->addr_lo = */
  2020. cpu_to_le32(map);
  2021. bq->addr_hi = /*sbq_desc->addr_hi = */
  2022. cpu_to_le32(map >> 32);
  2023. bq++;
  2024. }
  2025. return 0;
  2026. mem_err:
  2027. ql_free_sbq_buffers(qdev, rx_ring);
  2028. return -ENOMEM;
  2029. }
  2030. static void ql_free_rx_resources(struct ql_adapter *qdev,
  2031. struct rx_ring *rx_ring)
  2032. {
  2033. if (rx_ring->sbq_len)
  2034. ql_free_sbq_buffers(qdev, rx_ring);
  2035. if (rx_ring->lbq_len)
  2036. ql_free_lbq_buffers(qdev, rx_ring);
  2037. /* Free the small buffer queue. */
  2038. if (rx_ring->sbq_base) {
  2039. pci_free_consistent(qdev->pdev,
  2040. rx_ring->sbq_size,
  2041. rx_ring->sbq_base, rx_ring->sbq_base_dma);
  2042. rx_ring->sbq_base = NULL;
  2043. }
  2044. /* Free the small buffer queue control blocks. */
  2045. kfree(rx_ring->sbq);
  2046. rx_ring->sbq = NULL;
  2047. /* Free the large buffer queue. */
  2048. if (rx_ring->lbq_base) {
  2049. pci_free_consistent(qdev->pdev,
  2050. rx_ring->lbq_size,
  2051. rx_ring->lbq_base, rx_ring->lbq_base_dma);
  2052. rx_ring->lbq_base = NULL;
  2053. }
  2054. /* Free the large buffer queue control blocks. */
  2055. kfree(rx_ring->lbq);
  2056. rx_ring->lbq = NULL;
  2057. /* Free the rx queue. */
  2058. if (rx_ring->cq_base) {
  2059. pci_free_consistent(qdev->pdev,
  2060. rx_ring->cq_size,
  2061. rx_ring->cq_base, rx_ring->cq_base_dma);
  2062. rx_ring->cq_base = NULL;
  2063. }
  2064. }
  2065. /* Allocate queues and buffers for this completions queue based
  2066. * on the values in the parameter structure. */
  2067. static int ql_alloc_rx_resources(struct ql_adapter *qdev,
  2068. struct rx_ring *rx_ring)
  2069. {
  2070. /*
  2071. * Allocate the completion queue for this rx_ring.
  2072. */
  2073. rx_ring->cq_base =
  2074. pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
  2075. &rx_ring->cq_base_dma);
  2076. if (rx_ring->cq_base == NULL) {
  2077. QPRINTK(qdev, IFUP, ERR, "rx_ring alloc failed.\n");
  2078. return -ENOMEM;
  2079. }
  2080. if (rx_ring->sbq_len) {
  2081. /*
  2082. * Allocate small buffer queue.
  2083. */
  2084. rx_ring->sbq_base =
  2085. pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
  2086. &rx_ring->sbq_base_dma);
  2087. if (rx_ring->sbq_base == NULL) {
  2088. QPRINTK(qdev, IFUP, ERR,
  2089. "Small buffer queue allocation failed.\n");
  2090. goto err_mem;
  2091. }
  2092. /*
  2093. * Allocate small buffer queue control blocks.
  2094. */
  2095. rx_ring->sbq =
  2096. kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
  2097. GFP_KERNEL);
  2098. if (rx_ring->sbq == NULL) {
  2099. QPRINTK(qdev, IFUP, ERR,
  2100. "Small buffer queue control block allocation failed.\n");
  2101. goto err_mem;
  2102. }
  2103. if (ql_alloc_sbq_buffers(qdev, rx_ring)) {
  2104. QPRINTK(qdev, IFUP, ERR,
  2105. "Small buffer allocation failed.\n");
  2106. goto err_mem;
  2107. }
  2108. }
  2109. if (rx_ring->lbq_len) {
  2110. /*
  2111. * Allocate large buffer queue.
  2112. */
  2113. rx_ring->lbq_base =
  2114. pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
  2115. &rx_ring->lbq_base_dma);
  2116. if (rx_ring->lbq_base == NULL) {
  2117. QPRINTK(qdev, IFUP, ERR,
  2118. "Large buffer queue allocation failed.\n");
  2119. goto err_mem;
  2120. }
  2121. /*
  2122. * Allocate large buffer queue control blocks.
  2123. */
  2124. rx_ring->lbq =
  2125. kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
  2126. GFP_KERNEL);
  2127. if (rx_ring->lbq == NULL) {
  2128. QPRINTK(qdev, IFUP, ERR,
  2129. "Large buffer queue control block allocation failed.\n");
  2130. goto err_mem;
  2131. }
  2132. /*
  2133. * Allocate the buffers.
  2134. */
  2135. if (ql_alloc_lbq_buffers(qdev, rx_ring)) {
  2136. QPRINTK(qdev, IFUP, ERR,
  2137. "Large buffer allocation failed.\n");
  2138. goto err_mem;
  2139. }
  2140. }
  2141. return 0;
  2142. err_mem:
  2143. ql_free_rx_resources(qdev, rx_ring);
  2144. return -ENOMEM;
  2145. }
  2146. static void ql_tx_ring_clean(struct ql_adapter *qdev)
  2147. {
  2148. struct tx_ring *tx_ring;
  2149. struct tx_ring_desc *tx_ring_desc;
  2150. int i, j;
  2151. /*
  2152. * Loop through all queues and free
  2153. * any resources.
  2154. */
  2155. for (j = 0; j < qdev->tx_ring_count; j++) {
  2156. tx_ring = &qdev->tx_ring[j];
  2157. for (i = 0; i < tx_ring->wq_len; i++) {
  2158. tx_ring_desc = &tx_ring->q[i];
  2159. if (tx_ring_desc && tx_ring_desc->skb) {
  2160. QPRINTK(qdev, IFDOWN, ERR,
  2161. "Freeing lost SKB %p, from queue %d, index %d.\n",
  2162. tx_ring_desc->skb, j,
  2163. tx_ring_desc->index);
  2164. ql_unmap_send(qdev, tx_ring_desc,
  2165. tx_ring_desc->map_cnt);
  2166. dev_kfree_skb(tx_ring_desc->skb);
  2167. tx_ring_desc->skb = NULL;
  2168. }
  2169. }
  2170. }
  2171. }
  2172. static void ql_free_ring_cb(struct ql_adapter *qdev)
  2173. {
  2174. kfree(qdev->ring_mem);
  2175. }
  2176. static int ql_alloc_ring_cb(struct ql_adapter *qdev)
  2177. {
  2178. /* Allocate space for tx/rx ring control blocks. */
  2179. qdev->ring_mem_size =
  2180. (qdev->tx_ring_count * sizeof(struct tx_ring)) +
  2181. (qdev->rx_ring_count * sizeof(struct rx_ring));
  2182. qdev->ring_mem = kmalloc(qdev->ring_mem_size, GFP_KERNEL);
  2183. if (qdev->ring_mem == NULL) {
  2184. return -ENOMEM;
  2185. } else {
  2186. qdev->rx_ring = qdev->ring_mem;
  2187. qdev->tx_ring = qdev->ring_mem +
  2188. (qdev->rx_ring_count * sizeof(struct rx_ring));
  2189. }
  2190. return 0;
  2191. }
  2192. static void ql_free_mem_resources(struct ql_adapter *qdev)
  2193. {
  2194. int i;
  2195. for (i = 0; i < qdev->tx_ring_count; i++)
  2196. ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
  2197. for (i = 0; i < qdev->rx_ring_count; i++)
  2198. ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
  2199. ql_free_shadow_space(qdev);
  2200. }
  2201. static int ql_alloc_mem_resources(struct ql_adapter *qdev)
  2202. {
  2203. int i;
  2204. /* Allocate space for our shadow registers and such. */
  2205. if (ql_alloc_shadow_space(qdev))
  2206. return -ENOMEM;
  2207. for (i = 0; i < qdev->rx_ring_count; i++) {
  2208. if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
  2209. QPRINTK(qdev, IFUP, ERR,
  2210. "RX resource allocation failed.\n");
  2211. goto err_mem;
  2212. }
  2213. }
  2214. /* Allocate tx queue resources */
  2215. for (i = 0; i < qdev->tx_ring_count; i++) {
  2216. if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
  2217. QPRINTK(qdev, IFUP, ERR,
  2218. "TX resource allocation failed.\n");
  2219. goto err_mem;
  2220. }
  2221. }
  2222. return 0;
  2223. err_mem:
  2224. ql_free_mem_resources(qdev);
  2225. return -ENOMEM;
  2226. }
  2227. /* Set up the rx ring control block and pass it to the chip.
  2228. * The control block is defined as
  2229. * "Completion Queue Initialization Control Block", or cqicb.
  2230. */
  2231. static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
  2232. {
  2233. struct cqicb *cqicb = &rx_ring->cqicb;
  2234. void *shadow_reg = qdev->rx_ring_shadow_reg_area +
  2235. (rx_ring->cq_id * sizeof(u64) * 4);
  2236. u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
  2237. (rx_ring->cq_id * sizeof(u64) * 4);
  2238. void __iomem *doorbell_area =
  2239. qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
  2240. int err = 0;
  2241. u16 bq_len;
  2242. /* Set up the shadow registers for this ring. */
  2243. rx_ring->prod_idx_sh_reg = shadow_reg;
  2244. rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
  2245. shadow_reg += sizeof(u64);
  2246. shadow_reg_dma += sizeof(u64);
  2247. rx_ring->lbq_base_indirect = shadow_reg;
  2248. rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
  2249. shadow_reg += sizeof(u64);
  2250. shadow_reg_dma += sizeof(u64);
  2251. rx_ring->sbq_base_indirect = shadow_reg;
  2252. rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
  2253. /* PCI doorbell mem area + 0x00 for consumer index register */
  2254. rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
  2255. rx_ring->cnsmr_idx = 0;
  2256. rx_ring->curr_entry = rx_ring->cq_base;
  2257. /* PCI doorbell mem area + 0x04 for valid register */
  2258. rx_ring->valid_db_reg = doorbell_area + 0x04;
  2259. /* PCI doorbell mem area + 0x18 for large buffer consumer */
  2260. rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
  2261. /* PCI doorbell mem area + 0x1c */
  2262. rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
  2263. memset((void *)cqicb, 0, sizeof(struct cqicb));
  2264. cqicb->msix_vect = rx_ring->irq;
  2265. cqicb->len = cpu_to_le16(rx_ring->cq_len | LEN_V | LEN_CPP_CONT);
  2266. cqicb->addr_lo = cpu_to_le32(rx_ring->cq_base_dma);
  2267. cqicb->addr_hi = cpu_to_le32((u64) rx_ring->cq_base_dma >> 32);
  2268. cqicb->prod_idx_addr_lo = cpu_to_le32(rx_ring->prod_idx_sh_reg_dma);
  2269. cqicb->prod_idx_addr_hi =
  2270. cpu_to_le32((u64) rx_ring->prod_idx_sh_reg_dma >> 32);
  2271. /*
  2272. * Set up the control block load flags.
  2273. */
  2274. cqicb->flags = FLAGS_LC | /* Load queue base address */
  2275. FLAGS_LV | /* Load MSI-X vector */
  2276. FLAGS_LI; /* Load irq delay values */
  2277. if (rx_ring->lbq_len) {
  2278. cqicb->flags |= FLAGS_LL; /* Load lbq values */
  2279. *((u64 *) rx_ring->lbq_base_indirect) = rx_ring->lbq_base_dma;
  2280. cqicb->lbq_addr_lo =
  2281. cpu_to_le32(rx_ring->lbq_base_indirect_dma);
  2282. cqicb->lbq_addr_hi =
  2283. cpu_to_le32((u64) rx_ring->lbq_base_indirect_dma >> 32);
  2284. cqicb->lbq_buf_size = cpu_to_le32(rx_ring->lbq_buf_size);
  2285. bq_len = (u16) rx_ring->lbq_len;
  2286. cqicb->lbq_len = cpu_to_le16(bq_len);
  2287. rx_ring->lbq_prod_idx = rx_ring->lbq_len - 16;
  2288. rx_ring->lbq_curr_idx = 0;
  2289. rx_ring->lbq_clean_idx = rx_ring->lbq_prod_idx;
  2290. rx_ring->lbq_free_cnt = 16;
  2291. }
  2292. if (rx_ring->sbq_len) {
  2293. cqicb->flags |= FLAGS_LS; /* Load sbq values */
  2294. *((u64 *) rx_ring->sbq_base_indirect) = rx_ring->sbq_base_dma;
  2295. cqicb->sbq_addr_lo =
  2296. cpu_to_le32(rx_ring->sbq_base_indirect_dma);
  2297. cqicb->sbq_addr_hi =
  2298. cpu_to_le32((u64) rx_ring->sbq_base_indirect_dma >> 32);
  2299. cqicb->sbq_buf_size =
  2300. cpu_to_le16(((rx_ring->sbq_buf_size / 2) + 8) & 0xfffffff8);
  2301. bq_len = (u16) rx_ring->sbq_len;
  2302. cqicb->sbq_len = cpu_to_le16(bq_len);
  2303. rx_ring->sbq_prod_idx = rx_ring->sbq_len - 16;
  2304. rx_ring->sbq_curr_idx = 0;
  2305. rx_ring->sbq_clean_idx = rx_ring->sbq_prod_idx;
  2306. rx_ring->sbq_free_cnt = 16;
  2307. }
  2308. switch (rx_ring->type) {
  2309. case TX_Q:
  2310. /* If there's only one interrupt, then we use
  2311. * worker threads to process the outbound
  2312. * completion handling rx_rings. We do this so
  2313. * they can be run on multiple CPUs. There is
  2314. * room to play with this more where we would only
  2315. * run in a worker if there are more than x number
  2316. * of outbound completions on the queue and more
  2317. * than one queue active. Some threshold that
  2318. * would indicate a benefit in spite of the cost
  2319. * of a context switch.
  2320. * If there's more than one interrupt, then the
  2321. * outbound completions are processed in the ISR.
  2322. */
  2323. if (!test_bit(QL_MSIX_ENABLED, &qdev->flags))
  2324. INIT_DELAYED_WORK(&rx_ring->rx_work, ql_tx_clean);
  2325. else {
  2326. /* With all debug warnings on we see a WARN_ON message
  2327. * when we free the skb in the interrupt context.
  2328. */
  2329. INIT_DELAYED_WORK(&rx_ring->rx_work, ql_tx_clean);
  2330. }
  2331. cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
  2332. cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
  2333. break;
  2334. case DEFAULT_Q:
  2335. INIT_DELAYED_WORK(&rx_ring->rx_work, ql_rx_clean);
  2336. cqicb->irq_delay = 0;
  2337. cqicb->pkt_delay = 0;
  2338. break;
  2339. case RX_Q:
  2340. /* Inbound completion handling rx_rings run in
  2341. * separate NAPI contexts.
  2342. */
  2343. netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
  2344. 64);
  2345. cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
  2346. cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
  2347. break;
  2348. default:
  2349. QPRINTK(qdev, IFUP, DEBUG, "Invalid rx_ring->type = %d.\n",
  2350. rx_ring->type);
  2351. }
  2352. QPRINTK(qdev, IFUP, INFO, "Initializing rx work queue.\n");
  2353. err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
  2354. CFG_LCQ, rx_ring->cq_id);
  2355. if (err) {
  2356. QPRINTK(qdev, IFUP, ERR, "Failed to load CQICB.\n");
  2357. return err;
  2358. }
  2359. QPRINTK(qdev, IFUP, INFO, "Successfully loaded CQICB.\n");
  2360. /*
  2361. * Advance the producer index for the buffer queues.
  2362. */
  2363. wmb();
  2364. if (rx_ring->lbq_len)
  2365. ql_write_db_reg(rx_ring->lbq_prod_idx,
  2366. rx_ring->lbq_prod_idx_db_reg);
  2367. if (rx_ring->sbq_len)
  2368. ql_write_db_reg(rx_ring->sbq_prod_idx,
  2369. rx_ring->sbq_prod_idx_db_reg);
  2370. return err;
  2371. }
  2372. static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
  2373. {
  2374. struct wqicb *wqicb = (struct wqicb *)tx_ring;
  2375. void __iomem *doorbell_area =
  2376. qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
  2377. void *shadow_reg = qdev->tx_ring_shadow_reg_area +
  2378. (tx_ring->wq_id * sizeof(u64));
  2379. u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
  2380. (tx_ring->wq_id * sizeof(u64));
  2381. int err = 0;
  2382. /*
  2383. * Assign doorbell registers for this tx_ring.
  2384. */
  2385. /* TX PCI doorbell mem area for tx producer index */
  2386. tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
  2387. tx_ring->prod_idx = 0;
  2388. /* TX PCI doorbell mem area + 0x04 */
  2389. tx_ring->valid_db_reg = doorbell_area + 0x04;
  2390. /*
  2391. * Assign shadow registers for this tx_ring.
  2392. */
  2393. tx_ring->cnsmr_idx_sh_reg = shadow_reg;
  2394. tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
  2395. wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
  2396. wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
  2397. Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
  2398. wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
  2399. wqicb->rid = 0;
  2400. wqicb->addr_lo = cpu_to_le32(tx_ring->wq_base_dma);
  2401. wqicb->addr_hi = cpu_to_le32((u64) tx_ring->wq_base_dma >> 32);
  2402. wqicb->cnsmr_idx_addr_lo = cpu_to_le32(tx_ring->cnsmr_idx_sh_reg_dma);
  2403. wqicb->cnsmr_idx_addr_hi =
  2404. cpu_to_le32((u64) tx_ring->cnsmr_idx_sh_reg_dma >> 32);
  2405. ql_init_tx_ring(qdev, tx_ring);
  2406. err = ql_write_cfg(qdev, wqicb, sizeof(wqicb), CFG_LRQ,
  2407. (u16) tx_ring->wq_id);
  2408. if (err) {
  2409. QPRINTK(qdev, IFUP, ERR, "Failed to load tx_ring.\n");
  2410. return err;
  2411. }
  2412. QPRINTK(qdev, IFUP, INFO, "Successfully loaded WQICB.\n");
  2413. return err;
  2414. }
  2415. static void ql_disable_msix(struct ql_adapter *qdev)
  2416. {
  2417. if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
  2418. pci_disable_msix(qdev->pdev);
  2419. clear_bit(QL_MSIX_ENABLED, &qdev->flags);
  2420. kfree(qdev->msi_x_entry);
  2421. qdev->msi_x_entry = NULL;
  2422. } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
  2423. pci_disable_msi(qdev->pdev);
  2424. clear_bit(QL_MSI_ENABLED, &qdev->flags);
  2425. }
  2426. }
  2427. static void ql_enable_msix(struct ql_adapter *qdev)
  2428. {
  2429. int i;
  2430. qdev->intr_count = 1;
  2431. /* Get the MSIX vectors. */
  2432. if (irq_type == MSIX_IRQ) {
  2433. /* Try to alloc space for the msix struct,
  2434. * if it fails then go to MSI/legacy.
  2435. */
  2436. qdev->msi_x_entry = kcalloc(qdev->rx_ring_count,
  2437. sizeof(struct msix_entry),
  2438. GFP_KERNEL);
  2439. if (!qdev->msi_x_entry) {
  2440. irq_type = MSI_IRQ;
  2441. goto msi;
  2442. }
  2443. for (i = 0; i < qdev->rx_ring_count; i++)
  2444. qdev->msi_x_entry[i].entry = i;
  2445. if (!pci_enable_msix
  2446. (qdev->pdev, qdev->msi_x_entry, qdev->rx_ring_count)) {
  2447. set_bit(QL_MSIX_ENABLED, &qdev->flags);
  2448. qdev->intr_count = qdev->rx_ring_count;
  2449. QPRINTK(qdev, IFUP, INFO,
  2450. "MSI-X Enabled, got %d vectors.\n",
  2451. qdev->intr_count);
  2452. return;
  2453. } else {
  2454. kfree(qdev->msi_x_entry);
  2455. qdev->msi_x_entry = NULL;
  2456. QPRINTK(qdev, IFUP, WARNING,
  2457. "MSI-X Enable failed, trying MSI.\n");
  2458. irq_type = MSI_IRQ;
  2459. }
  2460. }
  2461. msi:
  2462. if (irq_type == MSI_IRQ) {
  2463. if (!pci_enable_msi(qdev->pdev)) {
  2464. set_bit(QL_MSI_ENABLED, &qdev->flags);
  2465. QPRINTK(qdev, IFUP, INFO,
  2466. "Running with MSI interrupts.\n");
  2467. return;
  2468. }
  2469. }
  2470. irq_type = LEG_IRQ;
  2471. QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n");
  2472. }
  2473. /*
  2474. * Here we build the intr_context structures based on
  2475. * our rx_ring count and intr vector count.
  2476. * The intr_context structure is used to hook each vector
  2477. * to possibly different handlers.
  2478. */
  2479. static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
  2480. {
  2481. int i = 0;
  2482. struct intr_context *intr_context = &qdev->intr_context[0];
  2483. ql_enable_msix(qdev);
  2484. if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
  2485. /* Each rx_ring has it's
  2486. * own intr_context since we have separate
  2487. * vectors for each queue.
  2488. * This only true when MSI-X is enabled.
  2489. */
  2490. for (i = 0; i < qdev->intr_count; i++, intr_context++) {
  2491. qdev->rx_ring[i].irq = i;
  2492. intr_context->intr = i;
  2493. intr_context->qdev = qdev;
  2494. /*
  2495. * We set up each vectors enable/disable/read bits so
  2496. * there's no bit/mask calculations in the critical path.
  2497. */
  2498. intr_context->intr_en_mask =
  2499. INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
  2500. INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
  2501. | i;
  2502. intr_context->intr_dis_mask =
  2503. INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
  2504. INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
  2505. INTR_EN_IHD | i;
  2506. intr_context->intr_read_mask =
  2507. INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
  2508. INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
  2509. i;
  2510. if (i == 0) {
  2511. /*
  2512. * Default queue handles bcast/mcast plus
  2513. * async events. Needs buffers.
  2514. */
  2515. intr_context->handler = qlge_isr;
  2516. sprintf(intr_context->name, "%s-default-queue",
  2517. qdev->ndev->name);
  2518. } else if (i < qdev->rss_ring_first_cq_id) {
  2519. /*
  2520. * Outbound queue is for outbound completions only.
  2521. */
  2522. intr_context->handler = qlge_msix_tx_isr;
  2523. sprintf(intr_context->name, "%s-txq-%d",
  2524. qdev->ndev->name, i);
  2525. } else {
  2526. /*
  2527. * Inbound queues handle unicast frames only.
  2528. */
  2529. intr_context->handler = qlge_msix_rx_isr;
  2530. sprintf(intr_context->name, "%s-rxq-%d",
  2531. qdev->ndev->name, i);
  2532. }
  2533. }
  2534. } else {
  2535. /*
  2536. * All rx_rings use the same intr_context since
  2537. * there is only one vector.
  2538. */
  2539. intr_context->intr = 0;
  2540. intr_context->qdev = qdev;
  2541. /*
  2542. * We set up each vectors enable/disable/read bits so
  2543. * there's no bit/mask calculations in the critical path.
  2544. */
  2545. intr_context->intr_en_mask =
  2546. INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
  2547. intr_context->intr_dis_mask =
  2548. INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
  2549. INTR_EN_TYPE_DISABLE;
  2550. intr_context->intr_read_mask =
  2551. INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
  2552. /*
  2553. * Single interrupt means one handler for all rings.
  2554. */
  2555. intr_context->handler = qlge_isr;
  2556. sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
  2557. for (i = 0; i < qdev->rx_ring_count; i++)
  2558. qdev->rx_ring[i].irq = 0;
  2559. }
  2560. }
  2561. static void ql_free_irq(struct ql_adapter *qdev)
  2562. {
  2563. int i;
  2564. struct intr_context *intr_context = &qdev->intr_context[0];
  2565. for (i = 0; i < qdev->intr_count; i++, intr_context++) {
  2566. if (intr_context->hooked) {
  2567. if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
  2568. free_irq(qdev->msi_x_entry[i].vector,
  2569. &qdev->rx_ring[i]);
  2570. QPRINTK(qdev, IFDOWN, ERR,
  2571. "freeing msix interrupt %d.\n", i);
  2572. } else {
  2573. free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
  2574. QPRINTK(qdev, IFDOWN, ERR,
  2575. "freeing msi interrupt %d.\n", i);
  2576. }
  2577. }
  2578. }
  2579. ql_disable_msix(qdev);
  2580. }
  2581. static int ql_request_irq(struct ql_adapter *qdev)
  2582. {
  2583. int i;
  2584. int status = 0;
  2585. struct pci_dev *pdev = qdev->pdev;
  2586. struct intr_context *intr_context = &qdev->intr_context[0];
  2587. ql_resolve_queues_to_irqs(qdev);
  2588. for (i = 0; i < qdev->intr_count; i++, intr_context++) {
  2589. atomic_set(&intr_context->irq_cnt, 0);
  2590. if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
  2591. status = request_irq(qdev->msi_x_entry[i].vector,
  2592. intr_context->handler,
  2593. 0,
  2594. intr_context->name,
  2595. &qdev->rx_ring[i]);
  2596. if (status) {
  2597. QPRINTK(qdev, IFUP, ERR,
  2598. "Failed request for MSIX interrupt %d.\n",
  2599. i);
  2600. goto err_irq;
  2601. } else {
  2602. QPRINTK(qdev, IFUP, INFO,
  2603. "Hooked intr %d, queue type %s%s%s, with name %s.\n",
  2604. i,
  2605. qdev->rx_ring[i].type ==
  2606. DEFAULT_Q ? "DEFAULT_Q" : "",
  2607. qdev->rx_ring[i].type ==
  2608. TX_Q ? "TX_Q" : "",
  2609. qdev->rx_ring[i].type ==
  2610. RX_Q ? "RX_Q" : "", intr_context->name);
  2611. }
  2612. } else {
  2613. QPRINTK(qdev, IFUP, DEBUG,
  2614. "trying msi or legacy interrupts.\n");
  2615. QPRINTK(qdev, IFUP, DEBUG,
  2616. "%s: irq = %d.\n", __func__, pdev->irq);
  2617. QPRINTK(qdev, IFUP, DEBUG,
  2618. "%s: context->name = %s.\n", __func__,
  2619. intr_context->name);
  2620. QPRINTK(qdev, IFUP, DEBUG,
  2621. "%s: dev_id = 0x%p.\n", __func__,
  2622. &qdev->rx_ring[0]);
  2623. status =
  2624. request_irq(pdev->irq, qlge_isr,
  2625. test_bit(QL_MSI_ENABLED,
  2626. &qdev->
  2627. flags) ? 0 : IRQF_SHARED,
  2628. intr_context->name, &qdev->rx_ring[0]);
  2629. if (status)
  2630. goto err_irq;
  2631. QPRINTK(qdev, IFUP, ERR,
  2632. "Hooked intr %d, queue type %s%s%s, with name %s.\n",
  2633. i,
  2634. qdev->rx_ring[0].type ==
  2635. DEFAULT_Q ? "DEFAULT_Q" : "",
  2636. qdev->rx_ring[0].type == TX_Q ? "TX_Q" : "",
  2637. qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
  2638. intr_context->name);
  2639. }
  2640. intr_context->hooked = 1;
  2641. }
  2642. return status;
  2643. err_irq:
  2644. QPRINTK(qdev, IFUP, ERR, "Failed to get the interrupts!!!/n");
  2645. ql_free_irq(qdev);
  2646. return status;
  2647. }
  2648. static int ql_start_rss(struct ql_adapter *qdev)
  2649. {
  2650. struct ricb *ricb = &qdev->ricb;
  2651. int status = 0;
  2652. int i;
  2653. u8 *hash_id = (u8 *) ricb->hash_cq_id;
  2654. memset((void *)ricb, 0, sizeof(ricb));
  2655. ricb->base_cq = qdev->rss_ring_first_cq_id | RSS_L4K;
  2656. ricb->flags =
  2657. (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RI4 | RSS_RI6 | RSS_RT4 |
  2658. RSS_RT6);
  2659. ricb->mask = cpu_to_le16(qdev->rss_ring_count - 1);
  2660. /*
  2661. * Fill out the Indirection Table.
  2662. */
  2663. for (i = 0; i < 32; i++)
  2664. hash_id[i] = i & 1;
  2665. /*
  2666. * Random values for the IPv6 and IPv4 Hash Keys.
  2667. */
  2668. get_random_bytes((void *)&ricb->ipv6_hash_key[0], 40);
  2669. get_random_bytes((void *)&ricb->ipv4_hash_key[0], 16);
  2670. QPRINTK(qdev, IFUP, INFO, "Initializing RSS.\n");
  2671. status = ql_write_cfg(qdev, ricb, sizeof(ricb), CFG_LR, 0);
  2672. if (status) {
  2673. QPRINTK(qdev, IFUP, ERR, "Failed to load RICB.\n");
  2674. return status;
  2675. }
  2676. QPRINTK(qdev, IFUP, INFO, "Successfully loaded RICB.\n");
  2677. return status;
  2678. }
  2679. /* Initialize the frame-to-queue routing. */
  2680. static int ql_route_initialize(struct ql_adapter *qdev)
  2681. {
  2682. int status = 0;
  2683. int i;
  2684. /* Clear all the entries in the routing table. */
  2685. for (i = 0; i < 16; i++) {
  2686. status = ql_set_routing_reg(qdev, i, 0, 0);
  2687. if (status) {
  2688. QPRINTK(qdev, IFUP, ERR,
  2689. "Failed to init routing register for CAM packets.\n");
  2690. return status;
  2691. }
  2692. }
  2693. status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1);
  2694. if (status) {
  2695. QPRINTK(qdev, IFUP, ERR,
  2696. "Failed to init routing register for error packets.\n");
  2697. return status;
  2698. }
  2699. status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
  2700. if (status) {
  2701. QPRINTK(qdev, IFUP, ERR,
  2702. "Failed to init routing register for broadcast packets.\n");
  2703. return status;
  2704. }
  2705. /* If we have more than one inbound queue, then turn on RSS in the
  2706. * routing block.
  2707. */
  2708. if (qdev->rss_ring_count > 1) {
  2709. status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
  2710. RT_IDX_RSS_MATCH, 1);
  2711. if (status) {
  2712. QPRINTK(qdev, IFUP, ERR,
  2713. "Failed to init routing register for MATCH RSS packets.\n");
  2714. return status;
  2715. }
  2716. }
  2717. status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
  2718. RT_IDX_CAM_HIT, 1);
  2719. if (status) {
  2720. QPRINTK(qdev, IFUP, ERR,
  2721. "Failed to init routing register for CAM packets.\n");
  2722. return status;
  2723. }
  2724. return status;
  2725. }
  2726. static int ql_adapter_initialize(struct ql_adapter *qdev)
  2727. {
  2728. u32 value, mask;
  2729. int i;
  2730. int status = 0;
  2731. /*
  2732. * Set up the System register to halt on errors.
  2733. */
  2734. value = SYS_EFE | SYS_FAE;
  2735. mask = value << 16;
  2736. ql_write32(qdev, SYS, mask | value);
  2737. /* Set the default queue. */
  2738. value = NIC_RCV_CFG_DFQ;
  2739. mask = NIC_RCV_CFG_DFQ_MASK;
  2740. ql_write32(qdev, NIC_RCV_CFG, (mask | value));
  2741. /* Set the MPI interrupt to enabled. */
  2742. ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
  2743. /* Enable the function, set pagesize, enable error checking. */
  2744. value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
  2745. FSC_EC | FSC_VM_PAGE_4K | FSC_SH;
  2746. /* Set/clear header splitting. */
  2747. mask = FSC_VM_PAGESIZE_MASK |
  2748. FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
  2749. ql_write32(qdev, FSC, mask | value);
  2750. ql_write32(qdev, SPLT_HDR, SPLT_HDR_EP |
  2751. min(SMALL_BUFFER_SIZE, MAX_SPLIT_SIZE));
  2752. /* Start up the rx queues. */
  2753. for (i = 0; i < qdev->rx_ring_count; i++) {
  2754. status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
  2755. if (status) {
  2756. QPRINTK(qdev, IFUP, ERR,
  2757. "Failed to start rx ring[%d].\n", i);
  2758. return status;
  2759. }
  2760. }
  2761. /* If there is more than one inbound completion queue
  2762. * then download a RICB to configure RSS.
  2763. */
  2764. if (qdev->rss_ring_count > 1) {
  2765. status = ql_start_rss(qdev);
  2766. if (status) {
  2767. QPRINTK(qdev, IFUP, ERR, "Failed to start RSS.\n");
  2768. return status;
  2769. }
  2770. }
  2771. /* Start up the tx queues. */
  2772. for (i = 0; i < qdev->tx_ring_count; i++) {
  2773. status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
  2774. if (status) {
  2775. QPRINTK(qdev, IFUP, ERR,
  2776. "Failed to start tx ring[%d].\n", i);
  2777. return status;
  2778. }
  2779. }
  2780. status = ql_port_initialize(qdev);
  2781. if (status) {
  2782. QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n");
  2783. return status;
  2784. }
  2785. status = ql_set_mac_addr_reg(qdev, (u8 *) qdev->ndev->perm_addr,
  2786. MAC_ADDR_TYPE_CAM_MAC, qdev->func);
  2787. if (status) {
  2788. QPRINTK(qdev, IFUP, ERR, "Failed to init mac address.\n");
  2789. return status;
  2790. }
  2791. status = ql_route_initialize(qdev);
  2792. if (status) {
  2793. QPRINTK(qdev, IFUP, ERR, "Failed to init routing table.\n");
  2794. return status;
  2795. }
  2796. /* Start NAPI for the RSS queues. */
  2797. for (i = qdev->rss_ring_first_cq_id; i < qdev->rx_ring_count; i++) {
  2798. QPRINTK(qdev, IFUP, INFO, "Enabling NAPI for rx_ring[%d].\n",
  2799. i);
  2800. napi_enable(&qdev->rx_ring[i].napi);
  2801. }
  2802. return status;
  2803. }
  2804. /* Issue soft reset to chip. */
  2805. static int ql_adapter_reset(struct ql_adapter *qdev)
  2806. {
  2807. u32 value;
  2808. int max_wait_time;
  2809. int status = 0;
  2810. int resetCnt = 0;
  2811. #define MAX_RESET_CNT 1
  2812. issueReset:
  2813. resetCnt++;
  2814. QPRINTK(qdev, IFDOWN, DEBUG, "Issue soft reset to chip.\n");
  2815. ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
  2816. /* Wait for reset to complete. */
  2817. max_wait_time = 3;
  2818. QPRINTK(qdev, IFDOWN, DEBUG, "Wait %d seconds for reset to complete.\n",
  2819. max_wait_time);
  2820. do {
  2821. value = ql_read32(qdev, RST_FO);
  2822. if ((value & RST_FO_FR) == 0)
  2823. break;
  2824. ssleep(1);
  2825. } while ((--max_wait_time));
  2826. if (value & RST_FO_FR) {
  2827. QPRINTK(qdev, IFDOWN, ERR,
  2828. "Stuck in SoftReset: FSC_SR:0x%08x\n", value);
  2829. if (resetCnt < MAX_RESET_CNT)
  2830. goto issueReset;
  2831. }
  2832. if (max_wait_time == 0) {
  2833. status = -ETIMEDOUT;
  2834. QPRINTK(qdev, IFDOWN, ERR,
  2835. "ETIMEOUT!!! errored out of resetting the chip!\n");
  2836. }
  2837. return status;
  2838. }
  2839. static void ql_display_dev_info(struct net_device *ndev)
  2840. {
  2841. struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
  2842. QPRINTK(qdev, PROBE, INFO,
  2843. "Function #%d, NIC Roll %d, NIC Rev = %d, "
  2844. "XG Roll = %d, XG Rev = %d.\n",
  2845. qdev->func,
  2846. qdev->chip_rev_id & 0x0000000f,
  2847. qdev->chip_rev_id >> 4 & 0x0000000f,
  2848. qdev->chip_rev_id >> 8 & 0x0000000f,
  2849. qdev->chip_rev_id >> 12 & 0x0000000f);
  2850. QPRINTK(qdev, PROBE, INFO, "MAC address %pM\n", ndev->dev_addr);
  2851. }
  2852. static int ql_adapter_down(struct ql_adapter *qdev)
  2853. {
  2854. struct net_device *ndev = qdev->ndev;
  2855. int i, status = 0;
  2856. struct rx_ring *rx_ring;
  2857. netif_stop_queue(ndev);
  2858. netif_carrier_off(ndev);
  2859. cancel_delayed_work_sync(&qdev->asic_reset_work);
  2860. cancel_delayed_work_sync(&qdev->mpi_reset_work);
  2861. cancel_delayed_work_sync(&qdev->mpi_work);
  2862. /* The default queue at index 0 is always processed in
  2863. * a workqueue.
  2864. */
  2865. cancel_delayed_work_sync(&qdev->rx_ring[0].rx_work);
  2866. /* The rest of the rx_rings are processed in
  2867. * a workqueue only if it's a single interrupt
  2868. * environment (MSI/Legacy).
  2869. */
  2870. for (i = 1; i < qdev->rx_ring_count; i++) {
  2871. rx_ring = &qdev->rx_ring[i];
  2872. /* Only the RSS rings use NAPI on multi irq
  2873. * environment. Outbound completion processing
  2874. * is done in interrupt context.
  2875. */
  2876. if (i >= qdev->rss_ring_first_cq_id) {
  2877. napi_disable(&rx_ring->napi);
  2878. } else {
  2879. cancel_delayed_work_sync(&rx_ring->rx_work);
  2880. }
  2881. }
  2882. clear_bit(QL_ADAPTER_UP, &qdev->flags);
  2883. ql_disable_interrupts(qdev);
  2884. ql_tx_ring_clean(qdev);
  2885. spin_lock(&qdev->hw_lock);
  2886. status = ql_adapter_reset(qdev);
  2887. if (status)
  2888. QPRINTK(qdev, IFDOWN, ERR, "reset(func #%d) FAILED!\n",
  2889. qdev->func);
  2890. spin_unlock(&qdev->hw_lock);
  2891. return status;
  2892. }
  2893. static int ql_adapter_up(struct ql_adapter *qdev)
  2894. {
  2895. int err = 0;
  2896. spin_lock(&qdev->hw_lock);
  2897. err = ql_adapter_initialize(qdev);
  2898. if (err) {
  2899. QPRINTK(qdev, IFUP, INFO, "Unable to initialize adapter.\n");
  2900. spin_unlock(&qdev->hw_lock);
  2901. goto err_init;
  2902. }
  2903. spin_unlock(&qdev->hw_lock);
  2904. set_bit(QL_ADAPTER_UP, &qdev->flags);
  2905. ql_enable_interrupts(qdev);
  2906. ql_enable_all_completion_interrupts(qdev);
  2907. if ((ql_read32(qdev, STS) & qdev->port_init)) {
  2908. netif_carrier_on(qdev->ndev);
  2909. netif_start_queue(qdev->ndev);
  2910. }
  2911. return 0;
  2912. err_init:
  2913. ql_adapter_reset(qdev);
  2914. return err;
  2915. }
  2916. static int ql_cycle_adapter(struct ql_adapter *qdev)
  2917. {
  2918. int status;
  2919. status = ql_adapter_down(qdev);
  2920. if (status)
  2921. goto error;
  2922. status = ql_adapter_up(qdev);
  2923. if (status)
  2924. goto error;
  2925. return status;
  2926. error:
  2927. QPRINTK(qdev, IFUP, ALERT,
  2928. "Driver up/down cycle failed, closing device\n");
  2929. rtnl_lock();
  2930. dev_close(qdev->ndev);
  2931. rtnl_unlock();
  2932. return status;
  2933. }
  2934. static void ql_release_adapter_resources(struct ql_adapter *qdev)
  2935. {
  2936. ql_free_mem_resources(qdev);
  2937. ql_free_irq(qdev);
  2938. }
  2939. static int ql_get_adapter_resources(struct ql_adapter *qdev)
  2940. {
  2941. int status = 0;
  2942. if (ql_alloc_mem_resources(qdev)) {
  2943. QPRINTK(qdev, IFUP, ERR, "Unable to allocate memory.\n");
  2944. return -ENOMEM;
  2945. }
  2946. status = ql_request_irq(qdev);
  2947. if (status)
  2948. goto err_irq;
  2949. return status;
  2950. err_irq:
  2951. ql_free_mem_resources(qdev);
  2952. return status;
  2953. }
  2954. static int qlge_close(struct net_device *ndev)
  2955. {
  2956. struct ql_adapter *qdev = netdev_priv(ndev);
  2957. /*
  2958. * Wait for device to recover from a reset.
  2959. * (Rarely happens, but possible.)
  2960. */
  2961. while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
  2962. msleep(1);
  2963. ql_adapter_down(qdev);
  2964. ql_release_adapter_resources(qdev);
  2965. ql_free_ring_cb(qdev);
  2966. return 0;
  2967. }
  2968. static int ql_configure_rings(struct ql_adapter *qdev)
  2969. {
  2970. int i;
  2971. struct rx_ring *rx_ring;
  2972. struct tx_ring *tx_ring;
  2973. int cpu_cnt = num_online_cpus();
  2974. /*
  2975. * For each processor present we allocate one
  2976. * rx_ring for outbound completions, and one
  2977. * rx_ring for inbound completions. Plus there is
  2978. * always the one default queue. For the CPU
  2979. * counts we end up with the following rx_rings:
  2980. * rx_ring count =
  2981. * one default queue +
  2982. * (CPU count * outbound completion rx_ring) +
  2983. * (CPU count * inbound (RSS) completion rx_ring)
  2984. * To keep it simple we limit the total number of
  2985. * queues to < 32, so we truncate CPU to 8.
  2986. * This limitation can be removed when requested.
  2987. */
  2988. if (cpu_cnt > 8)
  2989. cpu_cnt = 8;
  2990. /*
  2991. * rx_ring[0] is always the default queue.
  2992. */
  2993. /* Allocate outbound completion ring for each CPU. */
  2994. qdev->tx_ring_count = cpu_cnt;
  2995. /* Allocate inbound completion (RSS) ring for each CPU. */
  2996. qdev->rss_ring_count = cpu_cnt;
  2997. /* cq_id for the first inbound ring handler. */
  2998. qdev->rss_ring_first_cq_id = cpu_cnt + 1;
  2999. /*
  3000. * qdev->rx_ring_count:
  3001. * Total number of rx_rings. This includes the one
  3002. * default queue, a number of outbound completion
  3003. * handler rx_rings, and the number of inbound
  3004. * completion handler rx_rings.
  3005. */
  3006. qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count + 1;
  3007. if (ql_alloc_ring_cb(qdev))
  3008. return -ENOMEM;
  3009. for (i = 0; i < qdev->tx_ring_count; i++) {
  3010. tx_ring = &qdev->tx_ring[i];
  3011. memset((void *)tx_ring, 0, sizeof(tx_ring));
  3012. tx_ring->qdev = qdev;
  3013. tx_ring->wq_id = i;
  3014. tx_ring->wq_len = qdev->tx_ring_size;
  3015. tx_ring->wq_size =
  3016. tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
  3017. /*
  3018. * The completion queue ID for the tx rings start
  3019. * immediately after the default Q ID, which is zero.
  3020. */
  3021. tx_ring->cq_id = i + 1;
  3022. }
  3023. for (i = 0; i < qdev->rx_ring_count; i++) {
  3024. rx_ring = &qdev->rx_ring[i];
  3025. memset((void *)rx_ring, 0, sizeof(rx_ring));
  3026. rx_ring->qdev = qdev;
  3027. rx_ring->cq_id = i;
  3028. rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
  3029. if (i == 0) { /* Default queue at index 0. */
  3030. /*
  3031. * Default queue handles bcast/mcast plus
  3032. * async events. Needs buffers.
  3033. */
  3034. rx_ring->cq_len = qdev->rx_ring_size;
  3035. rx_ring->cq_size =
  3036. rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
  3037. rx_ring->lbq_len = NUM_LARGE_BUFFERS;
  3038. rx_ring->lbq_size =
  3039. rx_ring->lbq_len * sizeof(struct bq_element);
  3040. rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE;
  3041. rx_ring->sbq_len = NUM_SMALL_BUFFERS;
  3042. rx_ring->sbq_size =
  3043. rx_ring->sbq_len * sizeof(struct bq_element);
  3044. rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2;
  3045. rx_ring->type = DEFAULT_Q;
  3046. } else if (i < qdev->rss_ring_first_cq_id) {
  3047. /*
  3048. * Outbound queue handles outbound completions only.
  3049. */
  3050. /* outbound cq is same size as tx_ring it services. */
  3051. rx_ring->cq_len = qdev->tx_ring_size;
  3052. rx_ring->cq_size =
  3053. rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
  3054. rx_ring->lbq_len = 0;
  3055. rx_ring->lbq_size = 0;
  3056. rx_ring->lbq_buf_size = 0;
  3057. rx_ring->sbq_len = 0;
  3058. rx_ring->sbq_size = 0;
  3059. rx_ring->sbq_buf_size = 0;
  3060. rx_ring->type = TX_Q;
  3061. } else { /* Inbound completions (RSS) queues */
  3062. /*
  3063. * Inbound queues handle unicast frames only.
  3064. */
  3065. rx_ring->cq_len = qdev->rx_ring_size;
  3066. rx_ring->cq_size =
  3067. rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
  3068. rx_ring->lbq_len = NUM_LARGE_BUFFERS;
  3069. rx_ring->lbq_size =
  3070. rx_ring->lbq_len * sizeof(struct bq_element);
  3071. rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE;
  3072. rx_ring->sbq_len = NUM_SMALL_BUFFERS;
  3073. rx_ring->sbq_size =
  3074. rx_ring->sbq_len * sizeof(struct bq_element);
  3075. rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2;
  3076. rx_ring->type = RX_Q;
  3077. }
  3078. }
  3079. return 0;
  3080. }
  3081. static int qlge_open(struct net_device *ndev)
  3082. {
  3083. int err = 0;
  3084. struct ql_adapter *qdev = netdev_priv(ndev);
  3085. err = ql_configure_rings(qdev);
  3086. if (err)
  3087. return err;
  3088. err = ql_get_adapter_resources(qdev);
  3089. if (err)
  3090. goto error_up;
  3091. err = ql_adapter_up(qdev);
  3092. if (err)
  3093. goto error_up;
  3094. return err;
  3095. error_up:
  3096. ql_release_adapter_resources(qdev);
  3097. ql_free_ring_cb(qdev);
  3098. return err;
  3099. }
  3100. static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
  3101. {
  3102. struct ql_adapter *qdev = netdev_priv(ndev);
  3103. if (ndev->mtu == 1500 && new_mtu == 9000) {
  3104. QPRINTK(qdev, IFUP, ERR, "Changing to jumbo MTU.\n");
  3105. } else if (ndev->mtu == 9000 && new_mtu == 1500) {
  3106. QPRINTK(qdev, IFUP, ERR, "Changing to normal MTU.\n");
  3107. } else if ((ndev->mtu == 1500 && new_mtu == 1500) ||
  3108. (ndev->mtu == 9000 && new_mtu == 9000)) {
  3109. return 0;
  3110. } else
  3111. return -EINVAL;
  3112. ndev->mtu = new_mtu;
  3113. return 0;
  3114. }
  3115. static struct net_device_stats *qlge_get_stats(struct net_device
  3116. *ndev)
  3117. {
  3118. struct ql_adapter *qdev = netdev_priv(ndev);
  3119. return &qdev->stats;
  3120. }
  3121. static void qlge_set_multicast_list(struct net_device *ndev)
  3122. {
  3123. struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
  3124. struct dev_mc_list *mc_ptr;
  3125. int i;
  3126. spin_lock(&qdev->hw_lock);
  3127. /*
  3128. * Set or clear promiscuous mode if a
  3129. * transition is taking place.
  3130. */
  3131. if (ndev->flags & IFF_PROMISC) {
  3132. if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
  3133. if (ql_set_routing_reg
  3134. (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
  3135. QPRINTK(qdev, HW, ERR,
  3136. "Failed to set promiscous mode.\n");
  3137. } else {
  3138. set_bit(QL_PROMISCUOUS, &qdev->flags);
  3139. }
  3140. }
  3141. } else {
  3142. if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
  3143. if (ql_set_routing_reg
  3144. (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
  3145. QPRINTK(qdev, HW, ERR,
  3146. "Failed to clear promiscous mode.\n");
  3147. } else {
  3148. clear_bit(QL_PROMISCUOUS, &qdev->flags);
  3149. }
  3150. }
  3151. }
  3152. /*
  3153. * Set or clear all multicast mode if a
  3154. * transition is taking place.
  3155. */
  3156. if ((ndev->flags & IFF_ALLMULTI) ||
  3157. (ndev->mc_count > MAX_MULTICAST_ENTRIES)) {
  3158. if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
  3159. if (ql_set_routing_reg
  3160. (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
  3161. QPRINTK(qdev, HW, ERR,
  3162. "Failed to set all-multi mode.\n");
  3163. } else {
  3164. set_bit(QL_ALLMULTI, &qdev->flags);
  3165. }
  3166. }
  3167. } else {
  3168. if (test_bit(QL_ALLMULTI, &qdev->flags)) {
  3169. if (ql_set_routing_reg
  3170. (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
  3171. QPRINTK(qdev, HW, ERR,
  3172. "Failed to clear all-multi mode.\n");
  3173. } else {
  3174. clear_bit(QL_ALLMULTI, &qdev->flags);
  3175. }
  3176. }
  3177. }
  3178. if (ndev->mc_count) {
  3179. for (i = 0, mc_ptr = ndev->mc_list; mc_ptr;
  3180. i++, mc_ptr = mc_ptr->next)
  3181. if (ql_set_mac_addr_reg(qdev, (u8 *) mc_ptr->dmi_addr,
  3182. MAC_ADDR_TYPE_MULTI_MAC, i)) {
  3183. QPRINTK(qdev, HW, ERR,
  3184. "Failed to loadmulticast address.\n");
  3185. goto exit;
  3186. }
  3187. if (ql_set_routing_reg
  3188. (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
  3189. QPRINTK(qdev, HW, ERR,
  3190. "Failed to set multicast match mode.\n");
  3191. } else {
  3192. set_bit(QL_ALLMULTI, &qdev->flags);
  3193. }
  3194. }
  3195. exit:
  3196. spin_unlock(&qdev->hw_lock);
  3197. }
  3198. static int qlge_set_mac_address(struct net_device *ndev, void *p)
  3199. {
  3200. struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
  3201. struct sockaddr *addr = p;
  3202. int ret = 0;
  3203. if (netif_running(ndev))
  3204. return -EBUSY;
  3205. if (!is_valid_ether_addr(addr->sa_data))
  3206. return -EADDRNOTAVAIL;
  3207. memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
  3208. spin_lock(&qdev->hw_lock);
  3209. if (ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
  3210. MAC_ADDR_TYPE_CAM_MAC, qdev->func)) {/* Unicast */
  3211. QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n");
  3212. ret = -1;
  3213. }
  3214. spin_unlock(&qdev->hw_lock);
  3215. return ret;
  3216. }
  3217. static void qlge_tx_timeout(struct net_device *ndev)
  3218. {
  3219. struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
  3220. queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
  3221. }
  3222. static void ql_asic_reset_work(struct work_struct *work)
  3223. {
  3224. struct ql_adapter *qdev =
  3225. container_of(work, struct ql_adapter, asic_reset_work.work);
  3226. ql_cycle_adapter(qdev);
  3227. }
  3228. static void ql_get_board_info(struct ql_adapter *qdev)
  3229. {
  3230. qdev->func =
  3231. (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
  3232. if (qdev->func) {
  3233. qdev->xg_sem_mask = SEM_XGMAC1_MASK;
  3234. qdev->port_link_up = STS_PL1;
  3235. qdev->port_init = STS_PI1;
  3236. qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
  3237. qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
  3238. } else {
  3239. qdev->xg_sem_mask = SEM_XGMAC0_MASK;
  3240. qdev->port_link_up = STS_PL0;
  3241. qdev->port_init = STS_PI0;
  3242. qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
  3243. qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
  3244. }
  3245. qdev->chip_rev_id = ql_read32(qdev, REV_ID);
  3246. }
  3247. static void ql_release_all(struct pci_dev *pdev)
  3248. {
  3249. struct net_device *ndev = pci_get_drvdata(pdev);
  3250. struct ql_adapter *qdev = netdev_priv(ndev);
  3251. if (qdev->workqueue) {
  3252. destroy_workqueue(qdev->workqueue);
  3253. qdev->workqueue = NULL;
  3254. }
  3255. if (qdev->q_workqueue) {
  3256. destroy_workqueue(qdev->q_workqueue);
  3257. qdev->q_workqueue = NULL;
  3258. }
  3259. if (qdev->reg_base)
  3260. iounmap(qdev->reg_base);
  3261. if (qdev->doorbell_area)
  3262. iounmap(qdev->doorbell_area);
  3263. pci_release_regions(pdev);
  3264. pci_set_drvdata(pdev, NULL);
  3265. }
  3266. static int __devinit ql_init_device(struct pci_dev *pdev,
  3267. struct net_device *ndev, int cards_found)
  3268. {
  3269. struct ql_adapter *qdev = netdev_priv(ndev);
  3270. int pos, err = 0;
  3271. u16 val16;
  3272. memset((void *)qdev, 0, sizeof(qdev));
  3273. err = pci_enable_device(pdev);
  3274. if (err) {
  3275. dev_err(&pdev->dev, "PCI device enable failed.\n");
  3276. return err;
  3277. }
  3278. pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
  3279. if (pos <= 0) {
  3280. dev_err(&pdev->dev, PFX "Cannot find PCI Express capability, "
  3281. "aborting.\n");
  3282. goto err_out;
  3283. } else {
  3284. pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16);
  3285. val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
  3286. val16 |= (PCI_EXP_DEVCTL_CERE |
  3287. PCI_EXP_DEVCTL_NFERE |
  3288. PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE);
  3289. pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, val16);
  3290. }
  3291. err = pci_request_regions(pdev, DRV_NAME);
  3292. if (err) {
  3293. dev_err(&pdev->dev, "PCI region request failed.\n");
  3294. goto err_out;
  3295. }
  3296. pci_set_master(pdev);
  3297. if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
  3298. set_bit(QL_DMA64, &qdev->flags);
  3299. err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
  3300. } else {
  3301. err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
  3302. if (!err)
  3303. err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
  3304. }
  3305. if (err) {
  3306. dev_err(&pdev->dev, "No usable DMA configuration.\n");
  3307. goto err_out;
  3308. }
  3309. pci_set_drvdata(pdev, ndev);
  3310. qdev->reg_base =
  3311. ioremap_nocache(pci_resource_start(pdev, 1),
  3312. pci_resource_len(pdev, 1));
  3313. if (!qdev->reg_base) {
  3314. dev_err(&pdev->dev, "Register mapping failed.\n");
  3315. err = -ENOMEM;
  3316. goto err_out;
  3317. }
  3318. qdev->doorbell_area_size = pci_resource_len(pdev, 3);
  3319. qdev->doorbell_area =
  3320. ioremap_nocache(pci_resource_start(pdev, 3),
  3321. pci_resource_len(pdev, 3));
  3322. if (!qdev->doorbell_area) {
  3323. dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
  3324. err = -ENOMEM;
  3325. goto err_out;
  3326. }
  3327. ql_get_board_info(qdev);
  3328. qdev->ndev = ndev;
  3329. qdev->pdev = pdev;
  3330. qdev->msg_enable = netif_msg_init(debug, default_msg);
  3331. spin_lock_init(&qdev->hw_lock);
  3332. spin_lock_init(&qdev->stats_lock);
  3333. /* make sure the EEPROM is good */
  3334. err = ql_get_flash_params(qdev);
  3335. if (err) {
  3336. dev_err(&pdev->dev, "Invalid FLASH.\n");
  3337. goto err_out;
  3338. }
  3339. if (!is_valid_ether_addr(qdev->flash.mac_addr))
  3340. goto err_out;
  3341. memcpy(ndev->dev_addr, qdev->flash.mac_addr, ndev->addr_len);
  3342. memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
  3343. /* Set up the default ring sizes. */
  3344. qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
  3345. qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
  3346. /* Set up the coalescing parameters. */
  3347. qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
  3348. qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
  3349. qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
  3350. qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
  3351. /*
  3352. * Set up the operating parameters.
  3353. */
  3354. qdev->rx_csum = 1;
  3355. qdev->q_workqueue = create_workqueue(ndev->name);
  3356. qdev->workqueue = create_singlethread_workqueue(ndev->name);
  3357. INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
  3358. INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
  3359. INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
  3360. if (!cards_found) {
  3361. dev_info(&pdev->dev, "%s\n", DRV_STRING);
  3362. dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
  3363. DRV_NAME, DRV_VERSION);
  3364. }
  3365. return 0;
  3366. err_out:
  3367. ql_release_all(pdev);
  3368. pci_disable_device(pdev);
  3369. return err;
  3370. }
  3371. static const struct net_device_ops qlge_netdev_ops = {
  3372. .ndo_open = qlge_open,
  3373. .ndo_stop = qlge_close,
  3374. .ndo_start_xmit = qlge_send,
  3375. .ndo_change_mtu = qlge_change_mtu,
  3376. .ndo_get_stats = qlge_get_stats,
  3377. .ndo_set_multicast_list = qlge_set_multicast_list,
  3378. .ndo_set_mac_address = qlge_set_mac_address,
  3379. .ndo_validate_addr = eth_validate_addr,
  3380. .ndo_tx_timeout = qlge_tx_timeout,
  3381. .ndo_vlan_rx_register = ql_vlan_rx_register,
  3382. .ndo_vlan_rx_add_vid = ql_vlan_rx_add_vid,
  3383. .ndo_vlan_rx_kill_vid = ql_vlan_rx_kill_vid,
  3384. };
  3385. static int __devinit qlge_probe(struct pci_dev *pdev,
  3386. const struct pci_device_id *pci_entry)
  3387. {
  3388. struct net_device *ndev = NULL;
  3389. struct ql_adapter *qdev = NULL;
  3390. static int cards_found = 0;
  3391. int err = 0;
  3392. ndev = alloc_etherdev(sizeof(struct ql_adapter));
  3393. if (!ndev)
  3394. return -ENOMEM;
  3395. err = ql_init_device(pdev, ndev, cards_found);
  3396. if (err < 0) {
  3397. free_netdev(ndev);
  3398. return err;
  3399. }
  3400. qdev = netdev_priv(ndev);
  3401. SET_NETDEV_DEV(ndev, &pdev->dev);
  3402. ndev->features = (0
  3403. | NETIF_F_IP_CSUM
  3404. | NETIF_F_SG
  3405. | NETIF_F_TSO
  3406. | NETIF_F_TSO6
  3407. | NETIF_F_TSO_ECN
  3408. | NETIF_F_HW_VLAN_TX
  3409. | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER);
  3410. if (test_bit(QL_DMA64, &qdev->flags))
  3411. ndev->features |= NETIF_F_HIGHDMA;
  3412. /*
  3413. * Set up net_device structure.
  3414. */
  3415. ndev->tx_queue_len = qdev->tx_ring_size;
  3416. ndev->irq = pdev->irq;
  3417. ndev->netdev_ops = &qlge_netdev_ops;
  3418. SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
  3419. ndev->watchdog_timeo = 10 * HZ;
  3420. err = register_netdev(ndev);
  3421. if (err) {
  3422. dev_err(&pdev->dev, "net device registration failed.\n");
  3423. ql_release_all(pdev);
  3424. pci_disable_device(pdev);
  3425. return err;
  3426. }
  3427. netif_carrier_off(ndev);
  3428. netif_stop_queue(ndev);
  3429. ql_display_dev_info(ndev);
  3430. cards_found++;
  3431. return 0;
  3432. }
  3433. static void __devexit qlge_remove(struct pci_dev *pdev)
  3434. {
  3435. struct net_device *ndev = pci_get_drvdata(pdev);
  3436. unregister_netdev(ndev);
  3437. ql_release_all(pdev);
  3438. pci_disable_device(pdev);
  3439. free_netdev(ndev);
  3440. }
  3441. /*
  3442. * This callback is called by the PCI subsystem whenever
  3443. * a PCI bus error is detected.
  3444. */
  3445. static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
  3446. enum pci_channel_state state)
  3447. {
  3448. struct net_device *ndev = pci_get_drvdata(pdev);
  3449. struct ql_adapter *qdev = netdev_priv(ndev);
  3450. if (netif_running(ndev))
  3451. ql_adapter_down(qdev);
  3452. pci_disable_device(pdev);
  3453. /* Request a slot reset. */
  3454. return PCI_ERS_RESULT_NEED_RESET;
  3455. }
  3456. /*
  3457. * This callback is called after the PCI buss has been reset.
  3458. * Basically, this tries to restart the card from scratch.
  3459. * This is a shortened version of the device probe/discovery code,
  3460. * it resembles the first-half of the () routine.
  3461. */
  3462. static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
  3463. {
  3464. struct net_device *ndev = pci_get_drvdata(pdev);
  3465. struct ql_adapter *qdev = netdev_priv(ndev);
  3466. if (pci_enable_device(pdev)) {
  3467. QPRINTK(qdev, IFUP, ERR,
  3468. "Cannot re-enable PCI device after reset.\n");
  3469. return PCI_ERS_RESULT_DISCONNECT;
  3470. }
  3471. pci_set_master(pdev);
  3472. netif_carrier_off(ndev);
  3473. netif_stop_queue(ndev);
  3474. ql_adapter_reset(qdev);
  3475. /* Make sure the EEPROM is good */
  3476. memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
  3477. if (!is_valid_ether_addr(ndev->perm_addr)) {
  3478. QPRINTK(qdev, IFUP, ERR, "After reset, invalid MAC address.\n");
  3479. return PCI_ERS_RESULT_DISCONNECT;
  3480. }
  3481. return PCI_ERS_RESULT_RECOVERED;
  3482. }
  3483. static void qlge_io_resume(struct pci_dev *pdev)
  3484. {
  3485. struct net_device *ndev = pci_get_drvdata(pdev);
  3486. struct ql_adapter *qdev = netdev_priv(ndev);
  3487. pci_set_master(pdev);
  3488. if (netif_running(ndev)) {
  3489. if (ql_adapter_up(qdev)) {
  3490. QPRINTK(qdev, IFUP, ERR,
  3491. "Device initialization failed after reset.\n");
  3492. return;
  3493. }
  3494. }
  3495. netif_device_attach(ndev);
  3496. }
  3497. static struct pci_error_handlers qlge_err_handler = {
  3498. .error_detected = qlge_io_error_detected,
  3499. .slot_reset = qlge_io_slot_reset,
  3500. .resume = qlge_io_resume,
  3501. };
  3502. static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
  3503. {
  3504. struct net_device *ndev = pci_get_drvdata(pdev);
  3505. struct ql_adapter *qdev = netdev_priv(ndev);
  3506. int err;
  3507. netif_device_detach(ndev);
  3508. if (netif_running(ndev)) {
  3509. err = ql_adapter_down(qdev);
  3510. if (!err)
  3511. return err;
  3512. }
  3513. err = pci_save_state(pdev);
  3514. if (err)
  3515. return err;
  3516. pci_disable_device(pdev);
  3517. pci_set_power_state(pdev, pci_choose_state(pdev, state));
  3518. return 0;
  3519. }
  3520. #ifdef CONFIG_PM
  3521. static int qlge_resume(struct pci_dev *pdev)
  3522. {
  3523. struct net_device *ndev = pci_get_drvdata(pdev);
  3524. struct ql_adapter *qdev = netdev_priv(ndev);
  3525. int err;
  3526. pci_set_power_state(pdev, PCI_D0);
  3527. pci_restore_state(pdev);
  3528. err = pci_enable_device(pdev);
  3529. if (err) {
  3530. QPRINTK(qdev, IFUP, ERR, "Cannot enable PCI device from suspend\n");
  3531. return err;
  3532. }
  3533. pci_set_master(pdev);
  3534. pci_enable_wake(pdev, PCI_D3hot, 0);
  3535. pci_enable_wake(pdev, PCI_D3cold, 0);
  3536. if (netif_running(ndev)) {
  3537. err = ql_adapter_up(qdev);
  3538. if (err)
  3539. return err;
  3540. }
  3541. netif_device_attach(ndev);
  3542. return 0;
  3543. }
  3544. #endif /* CONFIG_PM */
  3545. static void qlge_shutdown(struct pci_dev *pdev)
  3546. {
  3547. qlge_suspend(pdev, PMSG_SUSPEND);
  3548. }
  3549. static struct pci_driver qlge_driver = {
  3550. .name = DRV_NAME,
  3551. .id_table = qlge_pci_tbl,
  3552. .probe = qlge_probe,
  3553. .remove = __devexit_p(qlge_remove),
  3554. #ifdef CONFIG_PM
  3555. .suspend = qlge_suspend,
  3556. .resume = qlge_resume,
  3557. #endif
  3558. .shutdown = qlge_shutdown,
  3559. .err_handler = &qlge_err_handler
  3560. };
  3561. static int __init qlge_init_module(void)
  3562. {
  3563. return pci_register_driver(&qlge_driver);
  3564. }
  3565. static void __exit qlge_exit(void)
  3566. {
  3567. pci_unregister_driver(&qlge_driver);
  3568. }
  3569. module_init(qlge_init_module);
  3570. module_exit(qlge_exit);