qlge_main.c 105 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911
  1. /*
  2. * QLogic qlge NIC HBA Driver
  3. * Copyright (c) 2003-2008 QLogic Corporation
  4. * See LICENSE.qlge for copyright and licensing details.
  5. * Author: Linux qlge network device driver by
  6. * Ron Mercer <ron.mercer@qlogic.com>
  7. */
  8. #include <linux/kernel.h>
  9. #include <linux/init.h>
  10. #include <linux/types.h>
  11. #include <linux/module.h>
  12. #include <linux/list.h>
  13. #include <linux/pci.h>
  14. #include <linux/dma-mapping.h>
  15. #include <linux/pagemap.h>
  16. #include <linux/sched.h>
  17. #include <linux/slab.h>
  18. #include <linux/dmapool.h>
  19. #include <linux/mempool.h>
  20. #include <linux/spinlock.h>
  21. #include <linux/kthread.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/errno.h>
  24. #include <linux/ioport.h>
  25. #include <linux/in.h>
  26. #include <linux/ip.h>
  27. #include <linux/ipv6.h>
  28. #include <net/ipv6.h>
  29. #include <linux/tcp.h>
  30. #include <linux/udp.h>
  31. #include <linux/if_arp.h>
  32. #include <linux/if_ether.h>
  33. #include <linux/netdevice.h>
  34. #include <linux/etherdevice.h>
  35. #include <linux/ethtool.h>
  36. #include <linux/skbuff.h>
  37. #include <linux/rtnetlink.h>
  38. #include <linux/if_vlan.h>
  39. #include <linux/delay.h>
  40. #include <linux/mm.h>
  41. #include <linux/vmalloc.h>
  42. #include <net/ip6_checksum.h>
  43. #include "qlge.h"
  44. char qlge_driver_name[] = DRV_NAME;
  45. const char qlge_driver_version[] = DRV_VERSION;
  46. MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
  47. MODULE_DESCRIPTION(DRV_STRING " ");
  48. MODULE_LICENSE("GPL");
  49. MODULE_VERSION(DRV_VERSION);
  50. static const u32 default_msg =
  51. NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
  52. /* NETIF_MSG_TIMER | */
  53. NETIF_MSG_IFDOWN |
  54. NETIF_MSG_IFUP |
  55. NETIF_MSG_RX_ERR |
  56. NETIF_MSG_TX_ERR |
  57. NETIF_MSG_TX_QUEUED |
  58. NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS |
  59. /* NETIF_MSG_PKTDATA | */
  60. NETIF_MSG_HW | NETIF_MSG_WOL | 0;
  61. static int debug = 0x00007fff; /* defaults above */
  62. module_param(debug, int, 0);
  63. MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
  64. #define MSIX_IRQ 0
  65. #define MSI_IRQ 1
  66. #define LEG_IRQ 2
  67. static int irq_type = MSIX_IRQ;
  68. module_param(irq_type, int, MSIX_IRQ);
  69. MODULE_PARM_DESC(irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
  70. static struct pci_device_id qlge_pci_tbl[] __devinitdata = {
  71. {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID)},
  72. /* required last entry */
  73. {0,}
  74. };
  75. MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
  76. /* This hardware semaphore causes exclusive access to
  77. * resources shared between the NIC driver, MPI firmware,
  78. * FCOE firmware and the FC driver.
  79. */
  80. static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
  81. {
  82. u32 sem_bits = 0;
  83. switch (sem_mask) {
  84. case SEM_XGMAC0_MASK:
  85. sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
  86. break;
  87. case SEM_XGMAC1_MASK:
  88. sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
  89. break;
  90. case SEM_ICB_MASK:
  91. sem_bits = SEM_SET << SEM_ICB_SHIFT;
  92. break;
  93. case SEM_MAC_ADDR_MASK:
  94. sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
  95. break;
  96. case SEM_FLASH_MASK:
  97. sem_bits = SEM_SET << SEM_FLASH_SHIFT;
  98. break;
  99. case SEM_PROBE_MASK:
  100. sem_bits = SEM_SET << SEM_PROBE_SHIFT;
  101. break;
  102. case SEM_RT_IDX_MASK:
  103. sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
  104. break;
  105. case SEM_PROC_REG_MASK:
  106. sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
  107. break;
  108. default:
  109. QPRINTK(qdev, PROBE, ALERT, "Bad Semaphore mask!.\n");
  110. return -EINVAL;
  111. }
  112. ql_write32(qdev, SEM, sem_bits | sem_mask);
  113. return !(ql_read32(qdev, SEM) & sem_bits);
  114. }
  115. int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
  116. {
  117. unsigned int wait_count = 30;
  118. do {
  119. if (!ql_sem_trylock(qdev, sem_mask))
  120. return 0;
  121. udelay(100);
  122. } while (--wait_count);
  123. return -ETIMEDOUT;
  124. }
  125. void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
  126. {
  127. ql_write32(qdev, SEM, sem_mask);
  128. ql_read32(qdev, SEM); /* flush */
  129. }
  130. /* This function waits for a specific bit to come ready
  131. * in a given register. It is used mostly by the initialize
  132. * process, but is also used in kernel thread API such as
  133. * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
  134. */
  135. int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
  136. {
  137. u32 temp;
  138. int count = UDELAY_COUNT;
  139. while (count) {
  140. temp = ql_read32(qdev, reg);
  141. /* check for errors */
  142. if (temp & err_bit) {
  143. QPRINTK(qdev, PROBE, ALERT,
  144. "register 0x%.08x access error, value = 0x%.08x!.\n",
  145. reg, temp);
  146. return -EIO;
  147. } else if (temp & bit)
  148. return 0;
  149. udelay(UDELAY_DELAY);
  150. count--;
  151. }
  152. QPRINTK(qdev, PROBE, ALERT,
  153. "Timed out waiting for reg %x to come ready.\n", reg);
  154. return -ETIMEDOUT;
  155. }
  156. /* The CFG register is used to download TX and RX control blocks
  157. * to the chip. This function waits for an operation to complete.
  158. */
  159. static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
  160. {
  161. int count = UDELAY_COUNT;
  162. u32 temp;
  163. while (count) {
  164. temp = ql_read32(qdev, CFG);
  165. if (temp & CFG_LE)
  166. return -EIO;
  167. if (!(temp & bit))
  168. return 0;
  169. udelay(UDELAY_DELAY);
  170. count--;
  171. }
  172. return -ETIMEDOUT;
  173. }
  174. /* Used to issue init control blocks to hw. Maps control block,
  175. * sets address, triggers download, waits for completion.
  176. */
  177. int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
  178. u16 q_id)
  179. {
  180. u64 map;
  181. int status = 0;
  182. int direction;
  183. u32 mask;
  184. u32 value;
  185. direction =
  186. (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
  187. PCI_DMA_FROMDEVICE;
  188. map = pci_map_single(qdev->pdev, ptr, size, direction);
  189. if (pci_dma_mapping_error(qdev->pdev, map)) {
  190. QPRINTK(qdev, IFUP, ERR, "Couldn't map DMA area.\n");
  191. return -ENOMEM;
  192. }
  193. status = ql_wait_cfg(qdev, bit);
  194. if (status) {
  195. QPRINTK(qdev, IFUP, ERR,
  196. "Timed out waiting for CFG to come ready.\n");
  197. goto exit;
  198. }
  199. status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
  200. if (status)
  201. goto exit;
  202. ql_write32(qdev, ICB_L, (u32) map);
  203. ql_write32(qdev, ICB_H, (u32) (map >> 32));
  204. ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
  205. mask = CFG_Q_MASK | (bit << 16);
  206. value = bit | (q_id << CFG_Q_SHIFT);
  207. ql_write32(qdev, CFG, (mask | value));
  208. /*
  209. * Wait for the bit to clear after signaling hw.
  210. */
  211. status = ql_wait_cfg(qdev, bit);
  212. exit:
  213. pci_unmap_single(qdev->pdev, map, size, direction);
  214. return status;
  215. }
  216. /* Get a specific MAC address from the CAM. Used for debug and reg dump. */
  217. int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
  218. u32 *value)
  219. {
  220. u32 offset = 0;
  221. int status;
  222. status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
  223. if (status)
  224. return status;
  225. switch (type) {
  226. case MAC_ADDR_TYPE_MULTI_MAC:
  227. case MAC_ADDR_TYPE_CAM_MAC:
  228. {
  229. status =
  230. ql_wait_reg_rdy(qdev,
  231. MAC_ADDR_IDX, MAC_ADDR_MW, 0);
  232. if (status)
  233. goto exit;
  234. ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
  235. (index << MAC_ADDR_IDX_SHIFT) | /* index */
  236. MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
  237. status =
  238. ql_wait_reg_rdy(qdev,
  239. MAC_ADDR_IDX, MAC_ADDR_MR, 0);
  240. if (status)
  241. goto exit;
  242. *value++ = ql_read32(qdev, MAC_ADDR_DATA);
  243. status =
  244. ql_wait_reg_rdy(qdev,
  245. MAC_ADDR_IDX, MAC_ADDR_MW, 0);
  246. if (status)
  247. goto exit;
  248. ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
  249. (index << MAC_ADDR_IDX_SHIFT) | /* index */
  250. MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
  251. status =
  252. ql_wait_reg_rdy(qdev,
  253. MAC_ADDR_IDX, MAC_ADDR_MR, 0);
  254. if (status)
  255. goto exit;
  256. *value++ = ql_read32(qdev, MAC_ADDR_DATA);
  257. if (type == MAC_ADDR_TYPE_CAM_MAC) {
  258. status =
  259. ql_wait_reg_rdy(qdev,
  260. MAC_ADDR_IDX, MAC_ADDR_MW, 0);
  261. if (status)
  262. goto exit;
  263. ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
  264. (index << MAC_ADDR_IDX_SHIFT) | /* index */
  265. MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
  266. status =
  267. ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
  268. MAC_ADDR_MR, 0);
  269. if (status)
  270. goto exit;
  271. *value++ = ql_read32(qdev, MAC_ADDR_DATA);
  272. }
  273. break;
  274. }
  275. case MAC_ADDR_TYPE_VLAN:
  276. case MAC_ADDR_TYPE_MULTI_FLTR:
  277. default:
  278. QPRINTK(qdev, IFUP, CRIT,
  279. "Address type %d not yet supported.\n", type);
  280. status = -EPERM;
  281. }
  282. exit:
  283. ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
  284. return status;
  285. }
  286. /* Set up a MAC, multicast or VLAN address for the
  287. * inbound frame matching.
  288. */
  289. static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
  290. u16 index)
  291. {
  292. u32 offset = 0;
  293. int status = 0;
  294. status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
  295. if (status)
  296. return status;
  297. switch (type) {
  298. case MAC_ADDR_TYPE_MULTI_MAC:
  299. case MAC_ADDR_TYPE_CAM_MAC:
  300. {
  301. u32 cam_output;
  302. u32 upper = (addr[0] << 8) | addr[1];
  303. u32 lower =
  304. (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
  305. (addr[5]);
  306. QPRINTK(qdev, IFUP, INFO,
  307. "Adding %s address %pM"
  308. " at index %d in the CAM.\n",
  309. ((type ==
  310. MAC_ADDR_TYPE_MULTI_MAC) ? "MULTICAST" :
  311. "UNICAST"), addr, index);
  312. status =
  313. ql_wait_reg_rdy(qdev,
  314. MAC_ADDR_IDX, MAC_ADDR_MW, 0);
  315. if (status)
  316. goto exit;
  317. ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
  318. (index << MAC_ADDR_IDX_SHIFT) | /* index */
  319. type); /* type */
  320. ql_write32(qdev, MAC_ADDR_DATA, lower);
  321. status =
  322. ql_wait_reg_rdy(qdev,
  323. MAC_ADDR_IDX, MAC_ADDR_MW, 0);
  324. if (status)
  325. goto exit;
  326. ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
  327. (index << MAC_ADDR_IDX_SHIFT) | /* index */
  328. type); /* type */
  329. ql_write32(qdev, MAC_ADDR_DATA, upper);
  330. status =
  331. ql_wait_reg_rdy(qdev,
  332. MAC_ADDR_IDX, MAC_ADDR_MW, 0);
  333. if (status)
  334. goto exit;
  335. ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
  336. (index << MAC_ADDR_IDX_SHIFT) | /* index */
  337. type); /* type */
  338. /* This field should also include the queue id
  339. and possibly the function id. Right now we hardcode
  340. the route field to NIC core.
  341. */
  342. if (type == MAC_ADDR_TYPE_CAM_MAC) {
  343. cam_output = (CAM_OUT_ROUTE_NIC |
  344. (qdev->
  345. func << CAM_OUT_FUNC_SHIFT) |
  346. (qdev->
  347. rss_ring_first_cq_id <<
  348. CAM_OUT_CQ_ID_SHIFT));
  349. if (qdev->vlgrp)
  350. cam_output |= CAM_OUT_RV;
  351. /* route to NIC core */
  352. ql_write32(qdev, MAC_ADDR_DATA, cam_output);
  353. }
  354. break;
  355. }
  356. case MAC_ADDR_TYPE_VLAN:
  357. {
  358. u32 enable_bit = *((u32 *) &addr[0]);
  359. /* For VLAN, the addr actually holds a bit that
  360. * either enables or disables the vlan id we are
  361. * addressing. It's either MAC_ADDR_E on or off.
  362. * That's bit-27 we're talking about.
  363. */
  364. QPRINTK(qdev, IFUP, INFO, "%s VLAN ID %d %s the CAM.\n",
  365. (enable_bit ? "Adding" : "Removing"),
  366. index, (enable_bit ? "to" : "from"));
  367. status =
  368. ql_wait_reg_rdy(qdev,
  369. MAC_ADDR_IDX, MAC_ADDR_MW, 0);
  370. if (status)
  371. goto exit;
  372. ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
  373. (index << MAC_ADDR_IDX_SHIFT) | /* index */
  374. type | /* type */
  375. enable_bit); /* enable/disable */
  376. break;
  377. }
  378. case MAC_ADDR_TYPE_MULTI_FLTR:
  379. default:
  380. QPRINTK(qdev, IFUP, CRIT,
  381. "Address type %d not yet supported.\n", type);
  382. status = -EPERM;
  383. }
  384. exit:
  385. ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
  386. return status;
  387. }
  388. /* Get a specific frame routing value from the CAM.
  389. * Used for debug and reg dump.
  390. */
  391. int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
  392. {
  393. int status = 0;
  394. status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
  395. if (status)
  396. goto exit;
  397. status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
  398. if (status)
  399. goto exit;
  400. ql_write32(qdev, RT_IDX,
  401. RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
  402. status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
  403. if (status)
  404. goto exit;
  405. *value = ql_read32(qdev, RT_DATA);
  406. exit:
  407. ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
  408. return status;
  409. }
  410. /* The NIC function for this chip has 16 routing indexes. Each one can be used
  411. * to route different frame types to various inbound queues. We send broadcast/
  412. * multicast/error frames to the default queue for slow handling,
  413. * and CAM hit/RSS frames to the fast handling queues.
  414. */
  415. static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
  416. int enable)
  417. {
  418. int status;
  419. u32 value = 0;
  420. status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
  421. if (status)
  422. return status;
  423. QPRINTK(qdev, IFUP, DEBUG,
  424. "%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n",
  425. (enable ? "Adding" : "Removing"),
  426. ((index == RT_IDX_ALL_ERR_SLOT) ? "MAC ERROR/ALL ERROR" : ""),
  427. ((index == RT_IDX_IP_CSUM_ERR_SLOT) ? "IP CSUM ERROR" : ""),
  428. ((index ==
  429. RT_IDX_TCP_UDP_CSUM_ERR_SLOT) ? "TCP/UDP CSUM ERROR" : ""),
  430. ((index == RT_IDX_BCAST_SLOT) ? "BROADCAST" : ""),
  431. ((index == RT_IDX_MCAST_MATCH_SLOT) ? "MULTICAST MATCH" : ""),
  432. ((index == RT_IDX_ALLMULTI_SLOT) ? "ALL MULTICAST MATCH" : ""),
  433. ((index == RT_IDX_UNUSED6_SLOT) ? "UNUSED6" : ""),
  434. ((index == RT_IDX_UNUSED7_SLOT) ? "UNUSED7" : ""),
  435. ((index == RT_IDX_RSS_MATCH_SLOT) ? "RSS ALL/IPV4 MATCH" : ""),
  436. ((index == RT_IDX_RSS_IPV6_SLOT) ? "RSS IPV6" : ""),
  437. ((index == RT_IDX_RSS_TCP4_SLOT) ? "RSS TCP4" : ""),
  438. ((index == RT_IDX_RSS_TCP6_SLOT) ? "RSS TCP6" : ""),
  439. ((index == RT_IDX_CAM_HIT_SLOT) ? "CAM HIT" : ""),
  440. ((index == RT_IDX_UNUSED013) ? "UNUSED13" : ""),
  441. ((index == RT_IDX_UNUSED014) ? "UNUSED14" : ""),
  442. ((index == RT_IDX_PROMISCUOUS_SLOT) ? "PROMISCUOUS" : ""),
  443. (enable ? "to" : "from"));
  444. switch (mask) {
  445. case RT_IDX_CAM_HIT:
  446. {
  447. value = RT_IDX_DST_CAM_Q | /* dest */
  448. RT_IDX_TYPE_NICQ | /* type */
  449. (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
  450. break;
  451. }
  452. case RT_IDX_VALID: /* Promiscuous Mode frames. */
  453. {
  454. value = RT_IDX_DST_DFLT_Q | /* dest */
  455. RT_IDX_TYPE_NICQ | /* type */
  456. (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
  457. break;
  458. }
  459. case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
  460. {
  461. value = RT_IDX_DST_DFLT_Q | /* dest */
  462. RT_IDX_TYPE_NICQ | /* type */
  463. (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
  464. break;
  465. }
  466. case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
  467. {
  468. value = RT_IDX_DST_DFLT_Q | /* dest */
  469. RT_IDX_TYPE_NICQ | /* type */
  470. (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
  471. break;
  472. }
  473. case RT_IDX_MCAST: /* Pass up All Multicast frames. */
  474. {
  475. value = RT_IDX_DST_CAM_Q | /* dest */
  476. RT_IDX_TYPE_NICQ | /* type */
  477. (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
  478. break;
  479. }
  480. case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
  481. {
  482. value = RT_IDX_DST_CAM_Q | /* dest */
  483. RT_IDX_TYPE_NICQ | /* type */
  484. (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
  485. break;
  486. }
  487. case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
  488. {
  489. value = RT_IDX_DST_RSS | /* dest */
  490. RT_IDX_TYPE_NICQ | /* type */
  491. (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
  492. break;
  493. }
  494. case 0: /* Clear the E-bit on an entry. */
  495. {
  496. value = RT_IDX_DST_DFLT_Q | /* dest */
  497. RT_IDX_TYPE_NICQ | /* type */
  498. (index << RT_IDX_IDX_SHIFT);/* index */
  499. break;
  500. }
  501. default:
  502. QPRINTK(qdev, IFUP, ERR, "Mask type %d not yet supported.\n",
  503. mask);
  504. status = -EPERM;
  505. goto exit;
  506. }
  507. if (value) {
  508. status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
  509. if (status)
  510. goto exit;
  511. value |= (enable ? RT_IDX_E : 0);
  512. ql_write32(qdev, RT_IDX, value);
  513. ql_write32(qdev, RT_DATA, enable ? mask : 0);
  514. }
  515. exit:
  516. ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
  517. return status;
  518. }
  519. static void ql_enable_interrupts(struct ql_adapter *qdev)
  520. {
  521. ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
  522. }
  523. static void ql_disable_interrupts(struct ql_adapter *qdev)
  524. {
  525. ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
  526. }
  527. /* If we're running with multiple MSI-X vectors then we enable on the fly.
  528. * Otherwise, we may have multiple outstanding workers and don't want to
  529. * enable until the last one finishes. In this case, the irq_cnt gets
  530. * incremented everytime we queue a worker and decremented everytime
  531. * a worker finishes. Once it hits zero we enable the interrupt.
  532. */
  533. u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
  534. {
  535. u32 var = 0;
  536. unsigned long hw_flags = 0;
  537. struct intr_context *ctx = qdev->intr_context + intr;
  538. if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
  539. /* Always enable if we're MSIX multi interrupts and
  540. * it's not the default (zeroeth) interrupt.
  541. */
  542. ql_write32(qdev, INTR_EN,
  543. ctx->intr_en_mask);
  544. var = ql_read32(qdev, STS);
  545. return var;
  546. }
  547. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  548. if (atomic_dec_and_test(&ctx->irq_cnt)) {
  549. ql_write32(qdev, INTR_EN,
  550. ctx->intr_en_mask);
  551. var = ql_read32(qdev, STS);
  552. }
  553. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  554. return var;
  555. }
  556. static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
  557. {
  558. u32 var = 0;
  559. unsigned long hw_flags;
  560. struct intr_context *ctx;
  561. /* HW disables for us if we're MSIX multi interrupts and
  562. * it's not the default (zeroeth) interrupt.
  563. */
  564. if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
  565. return 0;
  566. ctx = qdev->intr_context + intr;
  567. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  568. if (!atomic_read(&ctx->irq_cnt)) {
  569. ql_write32(qdev, INTR_EN,
  570. ctx->intr_dis_mask);
  571. var = ql_read32(qdev, STS);
  572. }
  573. atomic_inc(&ctx->irq_cnt);
  574. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  575. return var;
  576. }
  577. static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
  578. {
  579. int i;
  580. for (i = 0; i < qdev->intr_count; i++) {
  581. /* The enable call does a atomic_dec_and_test
  582. * and enables only if the result is zero.
  583. * So we precharge it here.
  584. */
  585. if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
  586. i == 0))
  587. atomic_set(&qdev->intr_context[i].irq_cnt, 1);
  588. ql_enable_completion_interrupt(qdev, i);
  589. }
  590. }
  591. static int ql_read_flash_word(struct ql_adapter *qdev, int offset, u32 *data)
  592. {
  593. int status = 0;
  594. /* wait for reg to come ready */
  595. status = ql_wait_reg_rdy(qdev,
  596. FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
  597. if (status)
  598. goto exit;
  599. /* set up for reg read */
  600. ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
  601. /* wait for reg to come ready */
  602. status = ql_wait_reg_rdy(qdev,
  603. FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
  604. if (status)
  605. goto exit;
  606. /* get the data */
  607. *data = ql_read32(qdev, FLASH_DATA);
  608. exit:
  609. return status;
  610. }
  611. static int ql_get_flash_params(struct ql_adapter *qdev)
  612. {
  613. int i;
  614. int status;
  615. u32 *p = (u32 *)&qdev->flash;
  616. if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
  617. return -ETIMEDOUT;
  618. for (i = 0; i < sizeof(qdev->flash) / sizeof(u32); i++, p++) {
  619. status = ql_read_flash_word(qdev, i, p);
  620. if (status) {
  621. QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
  622. goto exit;
  623. }
  624. }
  625. exit:
  626. ql_sem_unlock(qdev, SEM_FLASH_MASK);
  627. return status;
  628. }
  629. /* xgmac register are located behind the xgmac_addr and xgmac_data
  630. * register pair. Each read/write requires us to wait for the ready
  631. * bit before reading/writing the data.
  632. */
  633. static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
  634. {
  635. int status;
  636. /* wait for reg to come ready */
  637. status = ql_wait_reg_rdy(qdev,
  638. XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
  639. if (status)
  640. return status;
  641. /* write the data to the data reg */
  642. ql_write32(qdev, XGMAC_DATA, data);
  643. /* trigger the write */
  644. ql_write32(qdev, XGMAC_ADDR, reg);
  645. return status;
  646. }
  647. /* xgmac register are located behind the xgmac_addr and xgmac_data
  648. * register pair. Each read/write requires us to wait for the ready
  649. * bit before reading/writing the data.
  650. */
  651. int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
  652. {
  653. int status = 0;
  654. /* wait for reg to come ready */
  655. status = ql_wait_reg_rdy(qdev,
  656. XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
  657. if (status)
  658. goto exit;
  659. /* set up for reg read */
  660. ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
  661. /* wait for reg to come ready */
  662. status = ql_wait_reg_rdy(qdev,
  663. XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
  664. if (status)
  665. goto exit;
  666. /* get the data */
  667. *data = ql_read32(qdev, XGMAC_DATA);
  668. exit:
  669. return status;
  670. }
  671. /* This is used for reading the 64-bit statistics regs. */
  672. int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
  673. {
  674. int status = 0;
  675. u32 hi = 0;
  676. u32 lo = 0;
  677. status = ql_read_xgmac_reg(qdev, reg, &lo);
  678. if (status)
  679. goto exit;
  680. status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
  681. if (status)
  682. goto exit;
  683. *data = (u64) lo | ((u64) hi << 32);
  684. exit:
  685. return status;
  686. }
  687. /* Take the MAC Core out of reset.
  688. * Enable statistics counting.
  689. * Take the transmitter/receiver out of reset.
  690. * This functionality may be done in the MPI firmware at a
  691. * later date.
  692. */
  693. static int ql_port_initialize(struct ql_adapter *qdev)
  694. {
  695. int status = 0;
  696. u32 data;
  697. if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
  698. /* Another function has the semaphore, so
  699. * wait for the port init bit to come ready.
  700. */
  701. QPRINTK(qdev, LINK, INFO,
  702. "Another function has the semaphore, so wait for the port init bit to come ready.\n");
  703. status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
  704. if (status) {
  705. QPRINTK(qdev, LINK, CRIT,
  706. "Port initialize timed out.\n");
  707. }
  708. return status;
  709. }
  710. QPRINTK(qdev, LINK, INFO, "Got xgmac semaphore!.\n");
  711. /* Set the core reset. */
  712. status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
  713. if (status)
  714. goto end;
  715. data |= GLOBAL_CFG_RESET;
  716. status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
  717. if (status)
  718. goto end;
  719. /* Clear the core reset and turn on jumbo for receiver. */
  720. data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
  721. data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
  722. data |= GLOBAL_CFG_TX_STAT_EN;
  723. data |= GLOBAL_CFG_RX_STAT_EN;
  724. status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
  725. if (status)
  726. goto end;
  727. /* Enable transmitter, and clear it's reset. */
  728. status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
  729. if (status)
  730. goto end;
  731. data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
  732. data |= TX_CFG_EN; /* Enable the transmitter. */
  733. status = ql_write_xgmac_reg(qdev, TX_CFG, data);
  734. if (status)
  735. goto end;
  736. /* Enable receiver and clear it's reset. */
  737. status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
  738. if (status)
  739. goto end;
  740. data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
  741. data |= RX_CFG_EN; /* Enable the receiver. */
  742. status = ql_write_xgmac_reg(qdev, RX_CFG, data);
  743. if (status)
  744. goto end;
  745. /* Turn on jumbo. */
  746. status =
  747. ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
  748. if (status)
  749. goto end;
  750. status =
  751. ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
  752. if (status)
  753. goto end;
  754. /* Signal to the world that the port is enabled. */
  755. ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
  756. end:
  757. ql_sem_unlock(qdev, qdev->xg_sem_mask);
  758. return status;
  759. }
  760. /* Get the next large buffer. */
  761. static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
  762. {
  763. struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
  764. rx_ring->lbq_curr_idx++;
  765. if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
  766. rx_ring->lbq_curr_idx = 0;
  767. rx_ring->lbq_free_cnt++;
  768. return lbq_desc;
  769. }
  770. /* Get the next small buffer. */
  771. static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
  772. {
  773. struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
  774. rx_ring->sbq_curr_idx++;
  775. if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
  776. rx_ring->sbq_curr_idx = 0;
  777. rx_ring->sbq_free_cnt++;
  778. return sbq_desc;
  779. }
  780. /* Update an rx ring index. */
  781. static void ql_update_cq(struct rx_ring *rx_ring)
  782. {
  783. rx_ring->cnsmr_idx++;
  784. rx_ring->curr_entry++;
  785. if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
  786. rx_ring->cnsmr_idx = 0;
  787. rx_ring->curr_entry = rx_ring->cq_base;
  788. }
  789. }
  790. static void ql_write_cq_idx(struct rx_ring *rx_ring)
  791. {
  792. ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
  793. }
  794. /* Process (refill) a large buffer queue. */
  795. static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
  796. {
  797. int clean_idx = rx_ring->lbq_clean_idx;
  798. struct bq_desc *lbq_desc;
  799. u64 map;
  800. int i;
  801. while (rx_ring->lbq_free_cnt > 16) {
  802. for (i = 0; i < 16; i++) {
  803. QPRINTK(qdev, RX_STATUS, DEBUG,
  804. "lbq: try cleaning clean_idx = %d.\n",
  805. clean_idx);
  806. lbq_desc = &rx_ring->lbq[clean_idx];
  807. if (lbq_desc->p.lbq_page == NULL) {
  808. QPRINTK(qdev, RX_STATUS, DEBUG,
  809. "lbq: getting new page for index %d.\n",
  810. lbq_desc->index);
  811. lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC);
  812. if (lbq_desc->p.lbq_page == NULL) {
  813. QPRINTK(qdev, RX_STATUS, ERR,
  814. "Couldn't get a page.\n");
  815. return;
  816. }
  817. map = pci_map_page(qdev->pdev,
  818. lbq_desc->p.lbq_page,
  819. 0, PAGE_SIZE,
  820. PCI_DMA_FROMDEVICE);
  821. if (pci_dma_mapping_error(qdev->pdev, map)) {
  822. QPRINTK(qdev, RX_STATUS, ERR,
  823. "PCI mapping failed.\n");
  824. return;
  825. }
  826. pci_unmap_addr_set(lbq_desc, mapaddr, map);
  827. pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE);
  828. *lbq_desc->addr = cpu_to_le64(map);
  829. }
  830. clean_idx++;
  831. if (clean_idx == rx_ring->lbq_len)
  832. clean_idx = 0;
  833. }
  834. rx_ring->lbq_clean_idx = clean_idx;
  835. rx_ring->lbq_prod_idx += 16;
  836. if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
  837. rx_ring->lbq_prod_idx = 0;
  838. QPRINTK(qdev, RX_STATUS, DEBUG,
  839. "lbq: updating prod idx = %d.\n",
  840. rx_ring->lbq_prod_idx);
  841. ql_write_db_reg(rx_ring->lbq_prod_idx,
  842. rx_ring->lbq_prod_idx_db_reg);
  843. rx_ring->lbq_free_cnt -= 16;
  844. }
  845. }
  846. /* Process (refill) a small buffer queue. */
  847. static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
  848. {
  849. int clean_idx = rx_ring->sbq_clean_idx;
  850. struct bq_desc *sbq_desc;
  851. u64 map;
  852. int i;
  853. while (rx_ring->sbq_free_cnt > 16) {
  854. for (i = 0; i < 16; i++) {
  855. sbq_desc = &rx_ring->sbq[clean_idx];
  856. QPRINTK(qdev, RX_STATUS, DEBUG,
  857. "sbq: try cleaning clean_idx = %d.\n",
  858. clean_idx);
  859. if (sbq_desc->p.skb == NULL) {
  860. QPRINTK(qdev, RX_STATUS, DEBUG,
  861. "sbq: getting new skb for index %d.\n",
  862. sbq_desc->index);
  863. sbq_desc->p.skb =
  864. netdev_alloc_skb(qdev->ndev,
  865. rx_ring->sbq_buf_size);
  866. if (sbq_desc->p.skb == NULL) {
  867. QPRINTK(qdev, PROBE, ERR,
  868. "Couldn't get an skb.\n");
  869. rx_ring->sbq_clean_idx = clean_idx;
  870. return;
  871. }
  872. skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
  873. map = pci_map_single(qdev->pdev,
  874. sbq_desc->p.skb->data,
  875. rx_ring->sbq_buf_size /
  876. 2, PCI_DMA_FROMDEVICE);
  877. if (pci_dma_mapping_error(qdev->pdev, map)) {
  878. QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n");
  879. rx_ring->sbq_clean_idx = clean_idx;
  880. return;
  881. }
  882. pci_unmap_addr_set(sbq_desc, mapaddr, map);
  883. pci_unmap_len_set(sbq_desc, maplen,
  884. rx_ring->sbq_buf_size / 2);
  885. *sbq_desc->addr = cpu_to_le64(map);
  886. }
  887. clean_idx++;
  888. if (clean_idx == rx_ring->sbq_len)
  889. clean_idx = 0;
  890. }
  891. rx_ring->sbq_clean_idx = clean_idx;
  892. rx_ring->sbq_prod_idx += 16;
  893. if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
  894. rx_ring->sbq_prod_idx = 0;
  895. QPRINTK(qdev, RX_STATUS, DEBUG,
  896. "sbq: updating prod idx = %d.\n",
  897. rx_ring->sbq_prod_idx);
  898. ql_write_db_reg(rx_ring->sbq_prod_idx,
  899. rx_ring->sbq_prod_idx_db_reg);
  900. rx_ring->sbq_free_cnt -= 16;
  901. }
  902. }
  903. static void ql_update_buffer_queues(struct ql_adapter *qdev,
  904. struct rx_ring *rx_ring)
  905. {
  906. ql_update_sbq(qdev, rx_ring);
  907. ql_update_lbq(qdev, rx_ring);
  908. }
  909. /* Unmaps tx buffers. Can be called from send() if a pci mapping
  910. * fails at some stage, or from the interrupt when a tx completes.
  911. */
  912. static void ql_unmap_send(struct ql_adapter *qdev,
  913. struct tx_ring_desc *tx_ring_desc, int mapped)
  914. {
  915. int i;
  916. for (i = 0; i < mapped; i++) {
  917. if (i == 0 || (i == 7 && mapped > 7)) {
  918. /*
  919. * Unmap the skb->data area, or the
  920. * external sglist (AKA the Outbound
  921. * Address List (OAL)).
  922. * If its the zeroeth element, then it's
  923. * the skb->data area. If it's the 7th
  924. * element and there is more than 6 frags,
  925. * then its an OAL.
  926. */
  927. if (i == 7) {
  928. QPRINTK(qdev, TX_DONE, DEBUG,
  929. "unmapping OAL area.\n");
  930. }
  931. pci_unmap_single(qdev->pdev,
  932. pci_unmap_addr(&tx_ring_desc->map[i],
  933. mapaddr),
  934. pci_unmap_len(&tx_ring_desc->map[i],
  935. maplen),
  936. PCI_DMA_TODEVICE);
  937. } else {
  938. QPRINTK(qdev, TX_DONE, DEBUG, "unmapping frag %d.\n",
  939. i);
  940. pci_unmap_page(qdev->pdev,
  941. pci_unmap_addr(&tx_ring_desc->map[i],
  942. mapaddr),
  943. pci_unmap_len(&tx_ring_desc->map[i],
  944. maplen), PCI_DMA_TODEVICE);
  945. }
  946. }
  947. }
  948. /* Map the buffers for this transmit. This will return
  949. * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
  950. */
  951. static int ql_map_send(struct ql_adapter *qdev,
  952. struct ob_mac_iocb_req *mac_iocb_ptr,
  953. struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
  954. {
  955. int len = skb_headlen(skb);
  956. dma_addr_t map;
  957. int frag_idx, err, map_idx = 0;
  958. struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
  959. int frag_cnt = skb_shinfo(skb)->nr_frags;
  960. if (frag_cnt) {
  961. QPRINTK(qdev, TX_QUEUED, DEBUG, "frag_cnt = %d.\n", frag_cnt);
  962. }
  963. /*
  964. * Map the skb buffer first.
  965. */
  966. map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
  967. err = pci_dma_mapping_error(qdev->pdev, map);
  968. if (err) {
  969. QPRINTK(qdev, TX_QUEUED, ERR,
  970. "PCI mapping failed with error: %d\n", err);
  971. return NETDEV_TX_BUSY;
  972. }
  973. tbd->len = cpu_to_le32(len);
  974. tbd->addr = cpu_to_le64(map);
  975. pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
  976. pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
  977. map_idx++;
  978. /*
  979. * This loop fills the remainder of the 8 address descriptors
  980. * in the IOCB. If there are more than 7 fragments, then the
  981. * eighth address desc will point to an external list (OAL).
  982. * When this happens, the remainder of the frags will be stored
  983. * in this list.
  984. */
  985. for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
  986. skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
  987. tbd++;
  988. if (frag_idx == 6 && frag_cnt > 7) {
  989. /* Let's tack on an sglist.
  990. * Our control block will now
  991. * look like this:
  992. * iocb->seg[0] = skb->data
  993. * iocb->seg[1] = frag[0]
  994. * iocb->seg[2] = frag[1]
  995. * iocb->seg[3] = frag[2]
  996. * iocb->seg[4] = frag[3]
  997. * iocb->seg[5] = frag[4]
  998. * iocb->seg[6] = frag[5]
  999. * iocb->seg[7] = ptr to OAL (external sglist)
  1000. * oal->seg[0] = frag[6]
  1001. * oal->seg[1] = frag[7]
  1002. * oal->seg[2] = frag[8]
  1003. * oal->seg[3] = frag[9]
  1004. * oal->seg[4] = frag[10]
  1005. * etc...
  1006. */
  1007. /* Tack on the OAL in the eighth segment of IOCB. */
  1008. map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
  1009. sizeof(struct oal),
  1010. PCI_DMA_TODEVICE);
  1011. err = pci_dma_mapping_error(qdev->pdev, map);
  1012. if (err) {
  1013. QPRINTK(qdev, TX_QUEUED, ERR,
  1014. "PCI mapping outbound address list with error: %d\n",
  1015. err);
  1016. goto map_error;
  1017. }
  1018. tbd->addr = cpu_to_le64(map);
  1019. /*
  1020. * The length is the number of fragments
  1021. * that remain to be mapped times the length
  1022. * of our sglist (OAL).
  1023. */
  1024. tbd->len =
  1025. cpu_to_le32((sizeof(struct tx_buf_desc) *
  1026. (frag_cnt - frag_idx)) | TX_DESC_C);
  1027. pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
  1028. map);
  1029. pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
  1030. sizeof(struct oal));
  1031. tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
  1032. map_idx++;
  1033. }
  1034. map =
  1035. pci_map_page(qdev->pdev, frag->page,
  1036. frag->page_offset, frag->size,
  1037. PCI_DMA_TODEVICE);
  1038. err = pci_dma_mapping_error(qdev->pdev, map);
  1039. if (err) {
  1040. QPRINTK(qdev, TX_QUEUED, ERR,
  1041. "PCI mapping frags failed with error: %d.\n",
  1042. err);
  1043. goto map_error;
  1044. }
  1045. tbd->addr = cpu_to_le64(map);
  1046. tbd->len = cpu_to_le32(frag->size);
  1047. pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
  1048. pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
  1049. frag->size);
  1050. }
  1051. /* Save the number of segments we've mapped. */
  1052. tx_ring_desc->map_cnt = map_idx;
  1053. /* Terminate the last segment. */
  1054. tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
  1055. return NETDEV_TX_OK;
  1056. map_error:
  1057. /*
  1058. * If the first frag mapping failed, then i will be zero.
  1059. * This causes the unmap of the skb->data area. Otherwise
  1060. * we pass in the number of frags that mapped successfully
  1061. * so they can be umapped.
  1062. */
  1063. ql_unmap_send(qdev, tx_ring_desc, map_idx);
  1064. return NETDEV_TX_BUSY;
  1065. }
  1066. static void ql_realign_skb(struct sk_buff *skb, int len)
  1067. {
  1068. void *temp_addr = skb->data;
  1069. /* Undo the skb_reserve(skb,32) we did before
  1070. * giving to hardware, and realign data on
  1071. * a 2-byte boundary.
  1072. */
  1073. skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
  1074. skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
  1075. skb_copy_to_linear_data(skb, temp_addr,
  1076. (unsigned int)len);
  1077. }
  1078. /*
  1079. * This function builds an skb for the given inbound
  1080. * completion. It will be rewritten for readability in the near
  1081. * future, but for not it works well.
  1082. */
  1083. static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
  1084. struct rx_ring *rx_ring,
  1085. struct ib_mac_iocb_rsp *ib_mac_rsp)
  1086. {
  1087. struct bq_desc *lbq_desc;
  1088. struct bq_desc *sbq_desc;
  1089. struct sk_buff *skb = NULL;
  1090. u32 length = le32_to_cpu(ib_mac_rsp->data_len);
  1091. u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
  1092. /*
  1093. * Handle the header buffer if present.
  1094. */
  1095. if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
  1096. ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
  1097. QPRINTK(qdev, RX_STATUS, DEBUG, "Header of %d bytes in small buffer.\n", hdr_len);
  1098. /*
  1099. * Headers fit nicely into a small buffer.
  1100. */
  1101. sbq_desc = ql_get_curr_sbuf(rx_ring);
  1102. pci_unmap_single(qdev->pdev,
  1103. pci_unmap_addr(sbq_desc, mapaddr),
  1104. pci_unmap_len(sbq_desc, maplen),
  1105. PCI_DMA_FROMDEVICE);
  1106. skb = sbq_desc->p.skb;
  1107. ql_realign_skb(skb, hdr_len);
  1108. skb_put(skb, hdr_len);
  1109. sbq_desc->p.skb = NULL;
  1110. }
  1111. /*
  1112. * Handle the data buffer(s).
  1113. */
  1114. if (unlikely(!length)) { /* Is there data too? */
  1115. QPRINTK(qdev, RX_STATUS, DEBUG,
  1116. "No Data buffer in this packet.\n");
  1117. return skb;
  1118. }
  1119. if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
  1120. if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
  1121. QPRINTK(qdev, RX_STATUS, DEBUG,
  1122. "Headers in small, data of %d bytes in small, combine them.\n", length);
  1123. /*
  1124. * Data is less than small buffer size so it's
  1125. * stuffed in a small buffer.
  1126. * For this case we append the data
  1127. * from the "data" small buffer to the "header" small
  1128. * buffer.
  1129. */
  1130. sbq_desc = ql_get_curr_sbuf(rx_ring);
  1131. pci_dma_sync_single_for_cpu(qdev->pdev,
  1132. pci_unmap_addr
  1133. (sbq_desc, mapaddr),
  1134. pci_unmap_len
  1135. (sbq_desc, maplen),
  1136. PCI_DMA_FROMDEVICE);
  1137. memcpy(skb_put(skb, length),
  1138. sbq_desc->p.skb->data, length);
  1139. pci_dma_sync_single_for_device(qdev->pdev,
  1140. pci_unmap_addr
  1141. (sbq_desc,
  1142. mapaddr),
  1143. pci_unmap_len
  1144. (sbq_desc,
  1145. maplen),
  1146. PCI_DMA_FROMDEVICE);
  1147. } else {
  1148. QPRINTK(qdev, RX_STATUS, DEBUG,
  1149. "%d bytes in a single small buffer.\n", length);
  1150. sbq_desc = ql_get_curr_sbuf(rx_ring);
  1151. skb = sbq_desc->p.skb;
  1152. ql_realign_skb(skb, length);
  1153. skb_put(skb, length);
  1154. pci_unmap_single(qdev->pdev,
  1155. pci_unmap_addr(sbq_desc,
  1156. mapaddr),
  1157. pci_unmap_len(sbq_desc,
  1158. maplen),
  1159. PCI_DMA_FROMDEVICE);
  1160. sbq_desc->p.skb = NULL;
  1161. }
  1162. } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
  1163. if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
  1164. QPRINTK(qdev, RX_STATUS, DEBUG,
  1165. "Header in small, %d bytes in large. Chain large to small!\n", length);
  1166. /*
  1167. * The data is in a single large buffer. We
  1168. * chain it to the header buffer's skb and let
  1169. * it rip.
  1170. */
  1171. lbq_desc = ql_get_curr_lbuf(rx_ring);
  1172. pci_unmap_page(qdev->pdev,
  1173. pci_unmap_addr(lbq_desc,
  1174. mapaddr),
  1175. pci_unmap_len(lbq_desc, maplen),
  1176. PCI_DMA_FROMDEVICE);
  1177. QPRINTK(qdev, RX_STATUS, DEBUG,
  1178. "Chaining page to skb.\n");
  1179. skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page,
  1180. 0, length);
  1181. skb->len += length;
  1182. skb->data_len += length;
  1183. skb->truesize += length;
  1184. lbq_desc->p.lbq_page = NULL;
  1185. } else {
  1186. /*
  1187. * The headers and data are in a single large buffer. We
  1188. * copy it to a new skb and let it go. This can happen with
  1189. * jumbo mtu on a non-TCP/UDP frame.
  1190. */
  1191. lbq_desc = ql_get_curr_lbuf(rx_ring);
  1192. skb = netdev_alloc_skb(qdev->ndev, length);
  1193. if (skb == NULL) {
  1194. QPRINTK(qdev, PROBE, DEBUG,
  1195. "No skb available, drop the packet.\n");
  1196. return NULL;
  1197. }
  1198. pci_unmap_page(qdev->pdev,
  1199. pci_unmap_addr(lbq_desc,
  1200. mapaddr),
  1201. pci_unmap_len(lbq_desc, maplen),
  1202. PCI_DMA_FROMDEVICE);
  1203. skb_reserve(skb, NET_IP_ALIGN);
  1204. QPRINTK(qdev, RX_STATUS, DEBUG,
  1205. "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length);
  1206. skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page,
  1207. 0, length);
  1208. skb->len += length;
  1209. skb->data_len += length;
  1210. skb->truesize += length;
  1211. length -= length;
  1212. lbq_desc->p.lbq_page = NULL;
  1213. __pskb_pull_tail(skb,
  1214. (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
  1215. VLAN_ETH_HLEN : ETH_HLEN);
  1216. }
  1217. } else {
  1218. /*
  1219. * The data is in a chain of large buffers
  1220. * pointed to by a small buffer. We loop
  1221. * thru and chain them to the our small header
  1222. * buffer's skb.
  1223. * frags: There are 18 max frags and our small
  1224. * buffer will hold 32 of them. The thing is,
  1225. * we'll use 3 max for our 9000 byte jumbo
  1226. * frames. If the MTU goes up we could
  1227. * eventually be in trouble.
  1228. */
  1229. int size, offset, i = 0;
  1230. __le64 *bq, bq_array[8];
  1231. sbq_desc = ql_get_curr_sbuf(rx_ring);
  1232. pci_unmap_single(qdev->pdev,
  1233. pci_unmap_addr(sbq_desc, mapaddr),
  1234. pci_unmap_len(sbq_desc, maplen),
  1235. PCI_DMA_FROMDEVICE);
  1236. if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
  1237. /*
  1238. * This is an non TCP/UDP IP frame, so
  1239. * the headers aren't split into a small
  1240. * buffer. We have to use the small buffer
  1241. * that contains our sg list as our skb to
  1242. * send upstairs. Copy the sg list here to
  1243. * a local buffer and use it to find the
  1244. * pages to chain.
  1245. */
  1246. QPRINTK(qdev, RX_STATUS, DEBUG,
  1247. "%d bytes of headers & data in chain of large.\n", length);
  1248. skb = sbq_desc->p.skb;
  1249. bq = &bq_array[0];
  1250. memcpy(bq, skb->data, sizeof(bq_array));
  1251. sbq_desc->p.skb = NULL;
  1252. skb_reserve(skb, NET_IP_ALIGN);
  1253. } else {
  1254. QPRINTK(qdev, RX_STATUS, DEBUG,
  1255. "Headers in small, %d bytes of data in chain of large.\n", length);
  1256. bq = (__le64 *)sbq_desc->p.skb->data;
  1257. }
  1258. while (length > 0) {
  1259. lbq_desc = ql_get_curr_lbuf(rx_ring);
  1260. pci_unmap_page(qdev->pdev,
  1261. pci_unmap_addr(lbq_desc,
  1262. mapaddr),
  1263. pci_unmap_len(lbq_desc,
  1264. maplen),
  1265. PCI_DMA_FROMDEVICE);
  1266. size = (length < PAGE_SIZE) ? length : PAGE_SIZE;
  1267. offset = 0;
  1268. QPRINTK(qdev, RX_STATUS, DEBUG,
  1269. "Adding page %d to skb for %d bytes.\n",
  1270. i, size);
  1271. skb_fill_page_desc(skb, i, lbq_desc->p.lbq_page,
  1272. offset, size);
  1273. skb->len += size;
  1274. skb->data_len += size;
  1275. skb->truesize += size;
  1276. length -= size;
  1277. lbq_desc->p.lbq_page = NULL;
  1278. bq++;
  1279. i++;
  1280. }
  1281. __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
  1282. VLAN_ETH_HLEN : ETH_HLEN);
  1283. }
  1284. return skb;
  1285. }
  1286. /* Process an inbound completion from an rx ring. */
  1287. static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
  1288. struct rx_ring *rx_ring,
  1289. struct ib_mac_iocb_rsp *ib_mac_rsp)
  1290. {
  1291. struct net_device *ndev = qdev->ndev;
  1292. struct sk_buff *skb = NULL;
  1293. QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
  1294. skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
  1295. if (unlikely(!skb)) {
  1296. QPRINTK(qdev, RX_STATUS, DEBUG,
  1297. "No skb available, drop packet.\n");
  1298. return;
  1299. }
  1300. prefetch(skb->data);
  1301. skb->dev = ndev;
  1302. if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
  1303. QPRINTK(qdev, RX_STATUS, DEBUG, "%s%s%s Multicast.\n",
  1304. (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
  1305. IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
  1306. (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
  1307. IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
  1308. (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
  1309. IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
  1310. }
  1311. if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
  1312. QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n");
  1313. }
  1314. if (ib_mac_rsp->flags1 & (IB_MAC_IOCB_RSP_IE | IB_MAC_IOCB_RSP_TE)) {
  1315. QPRINTK(qdev, RX_STATUS, ERR,
  1316. "Bad checksum for this %s packet.\n",
  1317. ((ib_mac_rsp->
  1318. flags2 & IB_MAC_IOCB_RSP_T) ? "TCP" : "UDP"));
  1319. skb->ip_summed = CHECKSUM_NONE;
  1320. } else if (qdev->rx_csum &&
  1321. ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) ||
  1322. ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
  1323. !(ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_NU)))) {
  1324. QPRINTK(qdev, RX_STATUS, DEBUG, "RX checksum done!\n");
  1325. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1326. }
  1327. qdev->stats.rx_packets++;
  1328. qdev->stats.rx_bytes += skb->len;
  1329. skb->protocol = eth_type_trans(skb, ndev);
  1330. skb_record_rx_queue(skb, rx_ring - &qdev->rx_ring[0]);
  1331. if (qdev->vlgrp && (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V)) {
  1332. QPRINTK(qdev, RX_STATUS, DEBUG,
  1333. "Passing a VLAN packet upstream.\n");
  1334. vlan_hwaccel_rx(skb, qdev->vlgrp,
  1335. le16_to_cpu(ib_mac_rsp->vlan_id));
  1336. } else {
  1337. QPRINTK(qdev, RX_STATUS, DEBUG,
  1338. "Passing a normal packet upstream.\n");
  1339. netif_rx(skb);
  1340. }
  1341. }
  1342. /* Process an outbound completion from an rx ring. */
  1343. static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
  1344. struct ob_mac_iocb_rsp *mac_rsp)
  1345. {
  1346. struct tx_ring *tx_ring;
  1347. struct tx_ring_desc *tx_ring_desc;
  1348. QL_DUMP_OB_MAC_RSP(mac_rsp);
  1349. tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
  1350. tx_ring_desc = &tx_ring->q[mac_rsp->tid];
  1351. ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
  1352. qdev->stats.tx_bytes += tx_ring_desc->map_cnt;
  1353. qdev->stats.tx_packets++;
  1354. dev_kfree_skb(tx_ring_desc->skb);
  1355. tx_ring_desc->skb = NULL;
  1356. if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
  1357. OB_MAC_IOCB_RSP_S |
  1358. OB_MAC_IOCB_RSP_L |
  1359. OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
  1360. if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
  1361. QPRINTK(qdev, TX_DONE, WARNING,
  1362. "Total descriptor length did not match transfer length.\n");
  1363. }
  1364. if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
  1365. QPRINTK(qdev, TX_DONE, WARNING,
  1366. "Frame too short to be legal, not sent.\n");
  1367. }
  1368. if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
  1369. QPRINTK(qdev, TX_DONE, WARNING,
  1370. "Frame too long, but sent anyway.\n");
  1371. }
  1372. if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
  1373. QPRINTK(qdev, TX_DONE, WARNING,
  1374. "PCI backplane error. Frame not sent.\n");
  1375. }
  1376. }
  1377. atomic_inc(&tx_ring->tx_count);
  1378. }
  1379. /* Fire up a handler to reset the MPI processor. */
  1380. void ql_queue_fw_error(struct ql_adapter *qdev)
  1381. {
  1382. netif_stop_queue(qdev->ndev);
  1383. netif_carrier_off(qdev->ndev);
  1384. queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
  1385. }
  1386. void ql_queue_asic_error(struct ql_adapter *qdev)
  1387. {
  1388. netif_stop_queue(qdev->ndev);
  1389. netif_carrier_off(qdev->ndev);
  1390. ql_disable_interrupts(qdev);
  1391. queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
  1392. }
  1393. static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
  1394. struct ib_ae_iocb_rsp *ib_ae_rsp)
  1395. {
  1396. switch (ib_ae_rsp->event) {
  1397. case MGMT_ERR_EVENT:
  1398. QPRINTK(qdev, RX_ERR, ERR,
  1399. "Management Processor Fatal Error.\n");
  1400. ql_queue_fw_error(qdev);
  1401. return;
  1402. case CAM_LOOKUP_ERR_EVENT:
  1403. QPRINTK(qdev, LINK, ERR,
  1404. "Multiple CAM hits lookup occurred.\n");
  1405. QPRINTK(qdev, DRV, ERR, "This event shouldn't occur.\n");
  1406. ql_queue_asic_error(qdev);
  1407. return;
  1408. case SOFT_ECC_ERROR_EVENT:
  1409. QPRINTK(qdev, RX_ERR, ERR, "Soft ECC error detected.\n");
  1410. ql_queue_asic_error(qdev);
  1411. break;
  1412. case PCI_ERR_ANON_BUF_RD:
  1413. QPRINTK(qdev, RX_ERR, ERR,
  1414. "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
  1415. ib_ae_rsp->q_id);
  1416. ql_queue_asic_error(qdev);
  1417. break;
  1418. default:
  1419. QPRINTK(qdev, DRV, ERR, "Unexpected event %d.\n",
  1420. ib_ae_rsp->event);
  1421. ql_queue_asic_error(qdev);
  1422. break;
  1423. }
  1424. }
  1425. static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
  1426. {
  1427. struct ql_adapter *qdev = rx_ring->qdev;
  1428. u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
  1429. struct ob_mac_iocb_rsp *net_rsp = NULL;
  1430. int count = 0;
  1431. /* While there are entries in the completion queue. */
  1432. while (prod != rx_ring->cnsmr_idx) {
  1433. QPRINTK(qdev, RX_STATUS, DEBUG,
  1434. "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
  1435. prod, rx_ring->cnsmr_idx);
  1436. net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
  1437. rmb();
  1438. switch (net_rsp->opcode) {
  1439. case OPCODE_OB_MAC_TSO_IOCB:
  1440. case OPCODE_OB_MAC_IOCB:
  1441. ql_process_mac_tx_intr(qdev, net_rsp);
  1442. break;
  1443. default:
  1444. QPRINTK(qdev, RX_STATUS, DEBUG,
  1445. "Hit default case, not handled! dropping the packet, opcode = %x.\n",
  1446. net_rsp->opcode);
  1447. }
  1448. count++;
  1449. ql_update_cq(rx_ring);
  1450. prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
  1451. }
  1452. ql_write_cq_idx(rx_ring);
  1453. if (netif_queue_stopped(qdev->ndev) && net_rsp != NULL) {
  1454. struct tx_ring *tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
  1455. if (atomic_read(&tx_ring->queue_stopped) &&
  1456. (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
  1457. /*
  1458. * The queue got stopped because the tx_ring was full.
  1459. * Wake it up, because it's now at least 25% empty.
  1460. */
  1461. netif_wake_queue(qdev->ndev);
  1462. }
  1463. return count;
  1464. }
  1465. static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
  1466. {
  1467. struct ql_adapter *qdev = rx_ring->qdev;
  1468. u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
  1469. struct ql_net_rsp_iocb *net_rsp;
  1470. int count = 0;
  1471. /* While there are entries in the completion queue. */
  1472. while (prod != rx_ring->cnsmr_idx) {
  1473. QPRINTK(qdev, RX_STATUS, DEBUG,
  1474. "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
  1475. prod, rx_ring->cnsmr_idx);
  1476. net_rsp = rx_ring->curr_entry;
  1477. rmb();
  1478. switch (net_rsp->opcode) {
  1479. case OPCODE_IB_MAC_IOCB:
  1480. ql_process_mac_rx_intr(qdev, rx_ring,
  1481. (struct ib_mac_iocb_rsp *)
  1482. net_rsp);
  1483. break;
  1484. case OPCODE_IB_AE_IOCB:
  1485. ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
  1486. net_rsp);
  1487. break;
  1488. default:
  1489. {
  1490. QPRINTK(qdev, RX_STATUS, DEBUG,
  1491. "Hit default case, not handled! dropping the packet, opcode = %x.\n",
  1492. net_rsp->opcode);
  1493. }
  1494. }
  1495. count++;
  1496. ql_update_cq(rx_ring);
  1497. prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
  1498. if (count == budget)
  1499. break;
  1500. }
  1501. ql_update_buffer_queues(qdev, rx_ring);
  1502. ql_write_cq_idx(rx_ring);
  1503. return count;
  1504. }
  1505. static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
  1506. {
  1507. struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
  1508. struct ql_adapter *qdev = rx_ring->qdev;
  1509. int work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
  1510. QPRINTK(qdev, RX_STATUS, DEBUG, "Enter, NAPI POLL cq_id = %d.\n",
  1511. rx_ring->cq_id);
  1512. if (work_done < budget) {
  1513. __napi_complete(napi);
  1514. ql_enable_completion_interrupt(qdev, rx_ring->irq);
  1515. }
  1516. return work_done;
  1517. }
  1518. static void ql_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
  1519. {
  1520. struct ql_adapter *qdev = netdev_priv(ndev);
  1521. qdev->vlgrp = grp;
  1522. if (grp) {
  1523. QPRINTK(qdev, IFUP, DEBUG, "Turning on VLAN in NIC_RCV_CFG.\n");
  1524. ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
  1525. NIC_RCV_CFG_VLAN_MATCH_AND_NON);
  1526. } else {
  1527. QPRINTK(qdev, IFUP, DEBUG,
  1528. "Turning off VLAN in NIC_RCV_CFG.\n");
  1529. ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
  1530. }
  1531. }
  1532. static void ql_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
  1533. {
  1534. struct ql_adapter *qdev = netdev_priv(ndev);
  1535. u32 enable_bit = MAC_ADDR_E;
  1536. spin_lock(&qdev->hw_lock);
  1537. if (ql_set_mac_addr_reg
  1538. (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
  1539. QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n");
  1540. }
  1541. spin_unlock(&qdev->hw_lock);
  1542. }
  1543. static void ql_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
  1544. {
  1545. struct ql_adapter *qdev = netdev_priv(ndev);
  1546. u32 enable_bit = 0;
  1547. spin_lock(&qdev->hw_lock);
  1548. if (ql_set_mac_addr_reg
  1549. (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
  1550. QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n");
  1551. }
  1552. spin_unlock(&qdev->hw_lock);
  1553. }
  1554. /* Worker thread to process a given rx_ring that is dedicated
  1555. * to outbound completions.
  1556. */
  1557. static void ql_tx_clean(struct work_struct *work)
  1558. {
  1559. struct rx_ring *rx_ring =
  1560. container_of(work, struct rx_ring, rx_work.work);
  1561. ql_clean_outbound_rx_ring(rx_ring);
  1562. ql_enable_completion_interrupt(rx_ring->qdev, rx_ring->irq);
  1563. }
  1564. /* Worker thread to process a given rx_ring that is dedicated
  1565. * to inbound completions.
  1566. */
  1567. static void ql_rx_clean(struct work_struct *work)
  1568. {
  1569. struct rx_ring *rx_ring =
  1570. container_of(work, struct rx_ring, rx_work.work);
  1571. ql_clean_inbound_rx_ring(rx_ring, 64);
  1572. ql_enable_completion_interrupt(rx_ring->qdev, rx_ring->irq);
  1573. }
  1574. /* MSI-X Multiple Vector Interrupt Handler for outbound completions. */
  1575. static irqreturn_t qlge_msix_tx_isr(int irq, void *dev_id)
  1576. {
  1577. struct rx_ring *rx_ring = dev_id;
  1578. queue_delayed_work_on(rx_ring->cpu, rx_ring->qdev->q_workqueue,
  1579. &rx_ring->rx_work, 0);
  1580. return IRQ_HANDLED;
  1581. }
  1582. /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
  1583. static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
  1584. {
  1585. struct rx_ring *rx_ring = dev_id;
  1586. napi_schedule(&rx_ring->napi);
  1587. return IRQ_HANDLED;
  1588. }
  1589. /* This handles a fatal error, MPI activity, and the default
  1590. * rx_ring in an MSI-X multiple vector environment.
  1591. * In MSI/Legacy environment it also process the rest of
  1592. * the rx_rings.
  1593. */
  1594. static irqreturn_t qlge_isr(int irq, void *dev_id)
  1595. {
  1596. struct rx_ring *rx_ring = dev_id;
  1597. struct ql_adapter *qdev = rx_ring->qdev;
  1598. struct intr_context *intr_context = &qdev->intr_context[0];
  1599. u32 var;
  1600. int i;
  1601. int work_done = 0;
  1602. spin_lock(&qdev->hw_lock);
  1603. if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
  1604. QPRINTK(qdev, INTR, DEBUG, "Shared Interrupt, Not ours!\n");
  1605. spin_unlock(&qdev->hw_lock);
  1606. return IRQ_NONE;
  1607. }
  1608. spin_unlock(&qdev->hw_lock);
  1609. var = ql_disable_completion_interrupt(qdev, intr_context->intr);
  1610. /*
  1611. * Check for fatal error.
  1612. */
  1613. if (var & STS_FE) {
  1614. ql_queue_asic_error(qdev);
  1615. QPRINTK(qdev, INTR, ERR, "Got fatal error, STS = %x.\n", var);
  1616. var = ql_read32(qdev, ERR_STS);
  1617. QPRINTK(qdev, INTR, ERR,
  1618. "Resetting chip. Error Status Register = 0x%x\n", var);
  1619. return IRQ_HANDLED;
  1620. }
  1621. /*
  1622. * Check MPI processor activity.
  1623. */
  1624. if (var & STS_PI) {
  1625. /*
  1626. * We've got an async event or mailbox completion.
  1627. * Handle it and clear the source of the interrupt.
  1628. */
  1629. QPRINTK(qdev, INTR, ERR, "Got MPI processor interrupt.\n");
  1630. ql_disable_completion_interrupt(qdev, intr_context->intr);
  1631. queue_delayed_work_on(smp_processor_id(), qdev->workqueue,
  1632. &qdev->mpi_work, 0);
  1633. work_done++;
  1634. }
  1635. /*
  1636. * Check the default queue and wake handler if active.
  1637. */
  1638. rx_ring = &qdev->rx_ring[0];
  1639. if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) != rx_ring->cnsmr_idx) {
  1640. QPRINTK(qdev, INTR, INFO, "Waking handler for rx_ring[0].\n");
  1641. ql_disable_completion_interrupt(qdev, intr_context->intr);
  1642. queue_delayed_work_on(smp_processor_id(), qdev->q_workqueue,
  1643. &rx_ring->rx_work, 0);
  1644. work_done++;
  1645. }
  1646. if (!test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
  1647. /*
  1648. * Start the DPC for each active queue.
  1649. */
  1650. for (i = 1; i < qdev->rx_ring_count; i++) {
  1651. rx_ring = &qdev->rx_ring[i];
  1652. if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
  1653. rx_ring->cnsmr_idx) {
  1654. QPRINTK(qdev, INTR, INFO,
  1655. "Waking handler for rx_ring[%d].\n", i);
  1656. ql_disable_completion_interrupt(qdev,
  1657. intr_context->
  1658. intr);
  1659. if (i < qdev->rss_ring_first_cq_id)
  1660. queue_delayed_work_on(rx_ring->cpu,
  1661. qdev->q_workqueue,
  1662. &rx_ring->rx_work,
  1663. 0);
  1664. else
  1665. napi_schedule(&rx_ring->napi);
  1666. work_done++;
  1667. }
  1668. }
  1669. }
  1670. ql_enable_completion_interrupt(qdev, intr_context->intr);
  1671. return work_done ? IRQ_HANDLED : IRQ_NONE;
  1672. }
  1673. static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
  1674. {
  1675. if (skb_is_gso(skb)) {
  1676. int err;
  1677. if (skb_header_cloned(skb)) {
  1678. err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
  1679. if (err)
  1680. return err;
  1681. }
  1682. mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
  1683. mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
  1684. mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
  1685. mac_iocb_ptr->total_hdrs_len =
  1686. cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
  1687. mac_iocb_ptr->net_trans_offset =
  1688. cpu_to_le16(skb_network_offset(skb) |
  1689. skb_transport_offset(skb)
  1690. << OB_MAC_TRANSPORT_HDR_SHIFT);
  1691. mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
  1692. mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
  1693. if (likely(skb->protocol == htons(ETH_P_IP))) {
  1694. struct iphdr *iph = ip_hdr(skb);
  1695. iph->check = 0;
  1696. mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
  1697. tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
  1698. iph->daddr, 0,
  1699. IPPROTO_TCP,
  1700. 0);
  1701. } else if (skb->protocol == htons(ETH_P_IPV6)) {
  1702. mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
  1703. tcp_hdr(skb)->check =
  1704. ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
  1705. &ipv6_hdr(skb)->daddr,
  1706. 0, IPPROTO_TCP, 0);
  1707. }
  1708. return 1;
  1709. }
  1710. return 0;
  1711. }
  1712. static void ql_hw_csum_setup(struct sk_buff *skb,
  1713. struct ob_mac_tso_iocb_req *mac_iocb_ptr)
  1714. {
  1715. int len;
  1716. struct iphdr *iph = ip_hdr(skb);
  1717. __sum16 *check;
  1718. mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
  1719. mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
  1720. mac_iocb_ptr->net_trans_offset =
  1721. cpu_to_le16(skb_network_offset(skb) |
  1722. skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
  1723. mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
  1724. len = (ntohs(iph->tot_len) - (iph->ihl << 2));
  1725. if (likely(iph->protocol == IPPROTO_TCP)) {
  1726. check = &(tcp_hdr(skb)->check);
  1727. mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
  1728. mac_iocb_ptr->total_hdrs_len =
  1729. cpu_to_le16(skb_transport_offset(skb) +
  1730. (tcp_hdr(skb)->doff << 2));
  1731. } else {
  1732. check = &(udp_hdr(skb)->check);
  1733. mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
  1734. mac_iocb_ptr->total_hdrs_len =
  1735. cpu_to_le16(skb_transport_offset(skb) +
  1736. sizeof(struct udphdr));
  1737. }
  1738. *check = ~csum_tcpudp_magic(iph->saddr,
  1739. iph->daddr, len, iph->protocol, 0);
  1740. }
  1741. static int qlge_send(struct sk_buff *skb, struct net_device *ndev)
  1742. {
  1743. struct tx_ring_desc *tx_ring_desc;
  1744. struct ob_mac_iocb_req *mac_iocb_ptr;
  1745. struct ql_adapter *qdev = netdev_priv(ndev);
  1746. int tso;
  1747. struct tx_ring *tx_ring;
  1748. u32 tx_ring_idx = (u32) QL_TXQ_IDX(qdev, skb);
  1749. tx_ring = &qdev->tx_ring[tx_ring_idx];
  1750. if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
  1751. QPRINTK(qdev, TX_QUEUED, INFO,
  1752. "%s: shutting down tx queue %d du to lack of resources.\n",
  1753. __func__, tx_ring_idx);
  1754. netif_stop_queue(ndev);
  1755. atomic_inc(&tx_ring->queue_stopped);
  1756. return NETDEV_TX_BUSY;
  1757. }
  1758. tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
  1759. mac_iocb_ptr = tx_ring_desc->queue_entry;
  1760. memset((void *)mac_iocb_ptr, 0, sizeof(mac_iocb_ptr));
  1761. if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) != NETDEV_TX_OK) {
  1762. QPRINTK(qdev, TX_QUEUED, ERR, "Could not map the segments.\n");
  1763. return NETDEV_TX_BUSY;
  1764. }
  1765. mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
  1766. mac_iocb_ptr->tid = tx_ring_desc->index;
  1767. /* We use the upper 32-bits to store the tx queue for this IO.
  1768. * When we get the completion we can use it to establish the context.
  1769. */
  1770. mac_iocb_ptr->txq_idx = tx_ring_idx;
  1771. tx_ring_desc->skb = skb;
  1772. mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
  1773. if (qdev->vlgrp && vlan_tx_tag_present(skb)) {
  1774. QPRINTK(qdev, TX_QUEUED, DEBUG, "Adding a vlan tag %d.\n",
  1775. vlan_tx_tag_get(skb));
  1776. mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
  1777. mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
  1778. }
  1779. tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
  1780. if (tso < 0) {
  1781. dev_kfree_skb_any(skb);
  1782. return NETDEV_TX_OK;
  1783. } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
  1784. ql_hw_csum_setup(skb,
  1785. (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
  1786. }
  1787. QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
  1788. tx_ring->prod_idx++;
  1789. if (tx_ring->prod_idx == tx_ring->wq_len)
  1790. tx_ring->prod_idx = 0;
  1791. wmb();
  1792. ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
  1793. ndev->trans_start = jiffies;
  1794. QPRINTK(qdev, TX_QUEUED, DEBUG, "tx queued, slot %d, len %d\n",
  1795. tx_ring->prod_idx, skb->len);
  1796. atomic_dec(&tx_ring->tx_count);
  1797. return NETDEV_TX_OK;
  1798. }
  1799. static void ql_free_shadow_space(struct ql_adapter *qdev)
  1800. {
  1801. if (qdev->rx_ring_shadow_reg_area) {
  1802. pci_free_consistent(qdev->pdev,
  1803. PAGE_SIZE,
  1804. qdev->rx_ring_shadow_reg_area,
  1805. qdev->rx_ring_shadow_reg_dma);
  1806. qdev->rx_ring_shadow_reg_area = NULL;
  1807. }
  1808. if (qdev->tx_ring_shadow_reg_area) {
  1809. pci_free_consistent(qdev->pdev,
  1810. PAGE_SIZE,
  1811. qdev->tx_ring_shadow_reg_area,
  1812. qdev->tx_ring_shadow_reg_dma);
  1813. qdev->tx_ring_shadow_reg_area = NULL;
  1814. }
  1815. }
  1816. static int ql_alloc_shadow_space(struct ql_adapter *qdev)
  1817. {
  1818. qdev->rx_ring_shadow_reg_area =
  1819. pci_alloc_consistent(qdev->pdev,
  1820. PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
  1821. if (qdev->rx_ring_shadow_reg_area == NULL) {
  1822. QPRINTK(qdev, IFUP, ERR,
  1823. "Allocation of RX shadow space failed.\n");
  1824. return -ENOMEM;
  1825. }
  1826. qdev->tx_ring_shadow_reg_area =
  1827. pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
  1828. &qdev->tx_ring_shadow_reg_dma);
  1829. if (qdev->tx_ring_shadow_reg_area == NULL) {
  1830. QPRINTK(qdev, IFUP, ERR,
  1831. "Allocation of TX shadow space failed.\n");
  1832. goto err_wqp_sh_area;
  1833. }
  1834. return 0;
  1835. err_wqp_sh_area:
  1836. pci_free_consistent(qdev->pdev,
  1837. PAGE_SIZE,
  1838. qdev->rx_ring_shadow_reg_area,
  1839. qdev->rx_ring_shadow_reg_dma);
  1840. return -ENOMEM;
  1841. }
  1842. static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
  1843. {
  1844. struct tx_ring_desc *tx_ring_desc;
  1845. int i;
  1846. struct ob_mac_iocb_req *mac_iocb_ptr;
  1847. mac_iocb_ptr = tx_ring->wq_base;
  1848. tx_ring_desc = tx_ring->q;
  1849. for (i = 0; i < tx_ring->wq_len; i++) {
  1850. tx_ring_desc->index = i;
  1851. tx_ring_desc->skb = NULL;
  1852. tx_ring_desc->queue_entry = mac_iocb_ptr;
  1853. mac_iocb_ptr++;
  1854. tx_ring_desc++;
  1855. }
  1856. atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
  1857. atomic_set(&tx_ring->queue_stopped, 0);
  1858. }
  1859. static void ql_free_tx_resources(struct ql_adapter *qdev,
  1860. struct tx_ring *tx_ring)
  1861. {
  1862. if (tx_ring->wq_base) {
  1863. pci_free_consistent(qdev->pdev, tx_ring->wq_size,
  1864. tx_ring->wq_base, tx_ring->wq_base_dma);
  1865. tx_ring->wq_base = NULL;
  1866. }
  1867. kfree(tx_ring->q);
  1868. tx_ring->q = NULL;
  1869. }
  1870. static int ql_alloc_tx_resources(struct ql_adapter *qdev,
  1871. struct tx_ring *tx_ring)
  1872. {
  1873. tx_ring->wq_base =
  1874. pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
  1875. &tx_ring->wq_base_dma);
  1876. if ((tx_ring->wq_base == NULL)
  1877. || tx_ring->wq_base_dma & (tx_ring->wq_size - 1)) {
  1878. QPRINTK(qdev, IFUP, ERR, "tx_ring alloc failed.\n");
  1879. return -ENOMEM;
  1880. }
  1881. tx_ring->q =
  1882. kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
  1883. if (tx_ring->q == NULL)
  1884. goto err;
  1885. return 0;
  1886. err:
  1887. pci_free_consistent(qdev->pdev, tx_ring->wq_size,
  1888. tx_ring->wq_base, tx_ring->wq_base_dma);
  1889. return -ENOMEM;
  1890. }
  1891. static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
  1892. {
  1893. int i;
  1894. struct bq_desc *lbq_desc;
  1895. for (i = 0; i < rx_ring->lbq_len; i++) {
  1896. lbq_desc = &rx_ring->lbq[i];
  1897. if (lbq_desc->p.lbq_page) {
  1898. pci_unmap_page(qdev->pdev,
  1899. pci_unmap_addr(lbq_desc, mapaddr),
  1900. pci_unmap_len(lbq_desc, maplen),
  1901. PCI_DMA_FROMDEVICE);
  1902. put_page(lbq_desc->p.lbq_page);
  1903. lbq_desc->p.lbq_page = NULL;
  1904. }
  1905. }
  1906. }
  1907. /*
  1908. * Allocate and map a page for each element of the lbq.
  1909. */
  1910. static int ql_alloc_lbq_buffers(struct ql_adapter *qdev,
  1911. struct rx_ring *rx_ring)
  1912. {
  1913. int i;
  1914. struct bq_desc *lbq_desc;
  1915. u64 map;
  1916. __le64 *bq = rx_ring->lbq_base;
  1917. for (i = 0; i < rx_ring->lbq_len; i++) {
  1918. lbq_desc = &rx_ring->lbq[i];
  1919. memset(lbq_desc, 0, sizeof(lbq_desc));
  1920. lbq_desc->addr = bq;
  1921. lbq_desc->index = i;
  1922. lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC);
  1923. if (unlikely(!lbq_desc->p.lbq_page)) {
  1924. QPRINTK(qdev, IFUP, ERR, "failed alloc_page().\n");
  1925. goto mem_error;
  1926. } else {
  1927. map = pci_map_page(qdev->pdev,
  1928. lbq_desc->p.lbq_page,
  1929. 0, PAGE_SIZE, PCI_DMA_FROMDEVICE);
  1930. if (pci_dma_mapping_error(qdev->pdev, map)) {
  1931. QPRINTK(qdev, IFUP, ERR,
  1932. "PCI mapping failed.\n");
  1933. goto mem_error;
  1934. }
  1935. pci_unmap_addr_set(lbq_desc, mapaddr, map);
  1936. pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE);
  1937. *lbq_desc->addr = cpu_to_le64(map);
  1938. }
  1939. bq++;
  1940. }
  1941. return 0;
  1942. mem_error:
  1943. ql_free_lbq_buffers(qdev, rx_ring);
  1944. return -ENOMEM;
  1945. }
  1946. static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
  1947. {
  1948. int i;
  1949. struct bq_desc *sbq_desc;
  1950. for (i = 0; i < rx_ring->sbq_len; i++) {
  1951. sbq_desc = &rx_ring->sbq[i];
  1952. if (sbq_desc == NULL) {
  1953. QPRINTK(qdev, IFUP, ERR, "sbq_desc %d is NULL.\n", i);
  1954. return;
  1955. }
  1956. if (sbq_desc->p.skb) {
  1957. pci_unmap_single(qdev->pdev,
  1958. pci_unmap_addr(sbq_desc, mapaddr),
  1959. pci_unmap_len(sbq_desc, maplen),
  1960. PCI_DMA_FROMDEVICE);
  1961. dev_kfree_skb(sbq_desc->p.skb);
  1962. sbq_desc->p.skb = NULL;
  1963. }
  1964. }
  1965. }
  1966. /* Allocate and map an skb for each element of the sbq. */
  1967. static int ql_alloc_sbq_buffers(struct ql_adapter *qdev,
  1968. struct rx_ring *rx_ring)
  1969. {
  1970. int i;
  1971. struct bq_desc *sbq_desc;
  1972. struct sk_buff *skb;
  1973. u64 map;
  1974. __le64 *bq = rx_ring->sbq_base;
  1975. for (i = 0; i < rx_ring->sbq_len; i++) {
  1976. sbq_desc = &rx_ring->sbq[i];
  1977. memset(sbq_desc, 0, sizeof(sbq_desc));
  1978. sbq_desc->index = i;
  1979. sbq_desc->addr = bq;
  1980. skb = netdev_alloc_skb(qdev->ndev, rx_ring->sbq_buf_size);
  1981. if (unlikely(!skb)) {
  1982. /* Better luck next round */
  1983. QPRINTK(qdev, IFUP, ERR,
  1984. "small buff alloc failed for %d bytes at index %d.\n",
  1985. rx_ring->sbq_buf_size, i);
  1986. goto mem_err;
  1987. }
  1988. skb_reserve(skb, QLGE_SB_PAD);
  1989. sbq_desc->p.skb = skb;
  1990. /*
  1991. * Map only half the buffer. Because the
  1992. * other half may get some data copied to it
  1993. * when the completion arrives.
  1994. */
  1995. map = pci_map_single(qdev->pdev,
  1996. skb->data,
  1997. rx_ring->sbq_buf_size / 2,
  1998. PCI_DMA_FROMDEVICE);
  1999. if (pci_dma_mapping_error(qdev->pdev, map)) {
  2000. QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n");
  2001. goto mem_err;
  2002. }
  2003. pci_unmap_addr_set(sbq_desc, mapaddr, map);
  2004. pci_unmap_len_set(sbq_desc, maplen, rx_ring->sbq_buf_size / 2);
  2005. *sbq_desc->addr = cpu_to_le64(map);
  2006. bq++;
  2007. }
  2008. return 0;
  2009. mem_err:
  2010. ql_free_sbq_buffers(qdev, rx_ring);
  2011. return -ENOMEM;
  2012. }
  2013. static void ql_free_rx_resources(struct ql_adapter *qdev,
  2014. struct rx_ring *rx_ring)
  2015. {
  2016. if (rx_ring->sbq_len)
  2017. ql_free_sbq_buffers(qdev, rx_ring);
  2018. if (rx_ring->lbq_len)
  2019. ql_free_lbq_buffers(qdev, rx_ring);
  2020. /* Free the small buffer queue. */
  2021. if (rx_ring->sbq_base) {
  2022. pci_free_consistent(qdev->pdev,
  2023. rx_ring->sbq_size,
  2024. rx_ring->sbq_base, rx_ring->sbq_base_dma);
  2025. rx_ring->sbq_base = NULL;
  2026. }
  2027. /* Free the small buffer queue control blocks. */
  2028. kfree(rx_ring->sbq);
  2029. rx_ring->sbq = NULL;
  2030. /* Free the large buffer queue. */
  2031. if (rx_ring->lbq_base) {
  2032. pci_free_consistent(qdev->pdev,
  2033. rx_ring->lbq_size,
  2034. rx_ring->lbq_base, rx_ring->lbq_base_dma);
  2035. rx_ring->lbq_base = NULL;
  2036. }
  2037. /* Free the large buffer queue control blocks. */
  2038. kfree(rx_ring->lbq);
  2039. rx_ring->lbq = NULL;
  2040. /* Free the rx queue. */
  2041. if (rx_ring->cq_base) {
  2042. pci_free_consistent(qdev->pdev,
  2043. rx_ring->cq_size,
  2044. rx_ring->cq_base, rx_ring->cq_base_dma);
  2045. rx_ring->cq_base = NULL;
  2046. }
  2047. }
  2048. /* Allocate queues and buffers for this completions queue based
  2049. * on the values in the parameter structure. */
  2050. static int ql_alloc_rx_resources(struct ql_adapter *qdev,
  2051. struct rx_ring *rx_ring)
  2052. {
  2053. /*
  2054. * Allocate the completion queue for this rx_ring.
  2055. */
  2056. rx_ring->cq_base =
  2057. pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
  2058. &rx_ring->cq_base_dma);
  2059. if (rx_ring->cq_base == NULL) {
  2060. QPRINTK(qdev, IFUP, ERR, "rx_ring alloc failed.\n");
  2061. return -ENOMEM;
  2062. }
  2063. if (rx_ring->sbq_len) {
  2064. /*
  2065. * Allocate small buffer queue.
  2066. */
  2067. rx_ring->sbq_base =
  2068. pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
  2069. &rx_ring->sbq_base_dma);
  2070. if (rx_ring->sbq_base == NULL) {
  2071. QPRINTK(qdev, IFUP, ERR,
  2072. "Small buffer queue allocation failed.\n");
  2073. goto err_mem;
  2074. }
  2075. /*
  2076. * Allocate small buffer queue control blocks.
  2077. */
  2078. rx_ring->sbq =
  2079. kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
  2080. GFP_KERNEL);
  2081. if (rx_ring->sbq == NULL) {
  2082. QPRINTK(qdev, IFUP, ERR,
  2083. "Small buffer queue control block allocation failed.\n");
  2084. goto err_mem;
  2085. }
  2086. if (ql_alloc_sbq_buffers(qdev, rx_ring)) {
  2087. QPRINTK(qdev, IFUP, ERR,
  2088. "Small buffer allocation failed.\n");
  2089. goto err_mem;
  2090. }
  2091. }
  2092. if (rx_ring->lbq_len) {
  2093. /*
  2094. * Allocate large buffer queue.
  2095. */
  2096. rx_ring->lbq_base =
  2097. pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
  2098. &rx_ring->lbq_base_dma);
  2099. if (rx_ring->lbq_base == NULL) {
  2100. QPRINTK(qdev, IFUP, ERR,
  2101. "Large buffer queue allocation failed.\n");
  2102. goto err_mem;
  2103. }
  2104. /*
  2105. * Allocate large buffer queue control blocks.
  2106. */
  2107. rx_ring->lbq =
  2108. kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
  2109. GFP_KERNEL);
  2110. if (rx_ring->lbq == NULL) {
  2111. QPRINTK(qdev, IFUP, ERR,
  2112. "Large buffer queue control block allocation failed.\n");
  2113. goto err_mem;
  2114. }
  2115. /*
  2116. * Allocate the buffers.
  2117. */
  2118. if (ql_alloc_lbq_buffers(qdev, rx_ring)) {
  2119. QPRINTK(qdev, IFUP, ERR,
  2120. "Large buffer allocation failed.\n");
  2121. goto err_mem;
  2122. }
  2123. }
  2124. return 0;
  2125. err_mem:
  2126. ql_free_rx_resources(qdev, rx_ring);
  2127. return -ENOMEM;
  2128. }
  2129. static void ql_tx_ring_clean(struct ql_adapter *qdev)
  2130. {
  2131. struct tx_ring *tx_ring;
  2132. struct tx_ring_desc *tx_ring_desc;
  2133. int i, j;
  2134. /*
  2135. * Loop through all queues and free
  2136. * any resources.
  2137. */
  2138. for (j = 0; j < qdev->tx_ring_count; j++) {
  2139. tx_ring = &qdev->tx_ring[j];
  2140. for (i = 0; i < tx_ring->wq_len; i++) {
  2141. tx_ring_desc = &tx_ring->q[i];
  2142. if (tx_ring_desc && tx_ring_desc->skb) {
  2143. QPRINTK(qdev, IFDOWN, ERR,
  2144. "Freeing lost SKB %p, from queue %d, index %d.\n",
  2145. tx_ring_desc->skb, j,
  2146. tx_ring_desc->index);
  2147. ql_unmap_send(qdev, tx_ring_desc,
  2148. tx_ring_desc->map_cnt);
  2149. dev_kfree_skb(tx_ring_desc->skb);
  2150. tx_ring_desc->skb = NULL;
  2151. }
  2152. }
  2153. }
  2154. }
  2155. static void ql_free_mem_resources(struct ql_adapter *qdev)
  2156. {
  2157. int i;
  2158. for (i = 0; i < qdev->tx_ring_count; i++)
  2159. ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
  2160. for (i = 0; i < qdev->rx_ring_count; i++)
  2161. ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
  2162. ql_free_shadow_space(qdev);
  2163. }
  2164. static int ql_alloc_mem_resources(struct ql_adapter *qdev)
  2165. {
  2166. int i;
  2167. /* Allocate space for our shadow registers and such. */
  2168. if (ql_alloc_shadow_space(qdev))
  2169. return -ENOMEM;
  2170. for (i = 0; i < qdev->rx_ring_count; i++) {
  2171. if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
  2172. QPRINTK(qdev, IFUP, ERR,
  2173. "RX resource allocation failed.\n");
  2174. goto err_mem;
  2175. }
  2176. }
  2177. /* Allocate tx queue resources */
  2178. for (i = 0; i < qdev->tx_ring_count; i++) {
  2179. if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
  2180. QPRINTK(qdev, IFUP, ERR,
  2181. "TX resource allocation failed.\n");
  2182. goto err_mem;
  2183. }
  2184. }
  2185. return 0;
  2186. err_mem:
  2187. ql_free_mem_resources(qdev);
  2188. return -ENOMEM;
  2189. }
  2190. /* Set up the rx ring control block and pass it to the chip.
  2191. * The control block is defined as
  2192. * "Completion Queue Initialization Control Block", or cqicb.
  2193. */
  2194. static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
  2195. {
  2196. struct cqicb *cqicb = &rx_ring->cqicb;
  2197. void *shadow_reg = qdev->rx_ring_shadow_reg_area +
  2198. (rx_ring->cq_id * sizeof(u64) * 4);
  2199. u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
  2200. (rx_ring->cq_id * sizeof(u64) * 4);
  2201. void __iomem *doorbell_area =
  2202. qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
  2203. int err = 0;
  2204. u16 bq_len;
  2205. /* Set up the shadow registers for this ring. */
  2206. rx_ring->prod_idx_sh_reg = shadow_reg;
  2207. rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
  2208. shadow_reg += sizeof(u64);
  2209. shadow_reg_dma += sizeof(u64);
  2210. rx_ring->lbq_base_indirect = shadow_reg;
  2211. rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
  2212. shadow_reg += sizeof(u64);
  2213. shadow_reg_dma += sizeof(u64);
  2214. rx_ring->sbq_base_indirect = shadow_reg;
  2215. rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
  2216. /* PCI doorbell mem area + 0x00 for consumer index register */
  2217. rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
  2218. rx_ring->cnsmr_idx = 0;
  2219. rx_ring->curr_entry = rx_ring->cq_base;
  2220. /* PCI doorbell mem area + 0x04 for valid register */
  2221. rx_ring->valid_db_reg = doorbell_area + 0x04;
  2222. /* PCI doorbell mem area + 0x18 for large buffer consumer */
  2223. rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
  2224. /* PCI doorbell mem area + 0x1c */
  2225. rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
  2226. memset((void *)cqicb, 0, sizeof(struct cqicb));
  2227. cqicb->msix_vect = rx_ring->irq;
  2228. bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
  2229. cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
  2230. cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
  2231. cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
  2232. /*
  2233. * Set up the control block load flags.
  2234. */
  2235. cqicb->flags = FLAGS_LC | /* Load queue base address */
  2236. FLAGS_LV | /* Load MSI-X vector */
  2237. FLAGS_LI; /* Load irq delay values */
  2238. if (rx_ring->lbq_len) {
  2239. cqicb->flags |= FLAGS_LL; /* Load lbq values */
  2240. *((u64 *) rx_ring->lbq_base_indirect) = rx_ring->lbq_base_dma;
  2241. cqicb->lbq_addr =
  2242. cpu_to_le64(rx_ring->lbq_base_indirect_dma);
  2243. bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
  2244. (u16) rx_ring->lbq_buf_size;
  2245. cqicb->lbq_buf_size = cpu_to_le16(bq_len);
  2246. bq_len = (rx_ring->lbq_len == 65536) ? 0 :
  2247. (u16) rx_ring->lbq_len;
  2248. cqicb->lbq_len = cpu_to_le16(bq_len);
  2249. rx_ring->lbq_prod_idx = rx_ring->lbq_len - 16;
  2250. rx_ring->lbq_curr_idx = 0;
  2251. rx_ring->lbq_clean_idx = rx_ring->lbq_prod_idx;
  2252. rx_ring->lbq_free_cnt = 16;
  2253. }
  2254. if (rx_ring->sbq_len) {
  2255. cqicb->flags |= FLAGS_LS; /* Load sbq values */
  2256. *((u64 *) rx_ring->sbq_base_indirect) = rx_ring->sbq_base_dma;
  2257. cqicb->sbq_addr =
  2258. cpu_to_le64(rx_ring->sbq_base_indirect_dma);
  2259. cqicb->sbq_buf_size =
  2260. cpu_to_le16(((rx_ring->sbq_buf_size / 2) + 8) & 0xfffffff8);
  2261. bq_len = (rx_ring->sbq_len == 65536) ? 0 :
  2262. (u16) rx_ring->sbq_len;
  2263. cqicb->sbq_len = cpu_to_le16(bq_len);
  2264. rx_ring->sbq_prod_idx = rx_ring->sbq_len - 16;
  2265. rx_ring->sbq_curr_idx = 0;
  2266. rx_ring->sbq_clean_idx = rx_ring->sbq_prod_idx;
  2267. rx_ring->sbq_free_cnt = 16;
  2268. }
  2269. switch (rx_ring->type) {
  2270. case TX_Q:
  2271. /* If there's only one interrupt, then we use
  2272. * worker threads to process the outbound
  2273. * completion handling rx_rings. We do this so
  2274. * they can be run on multiple CPUs. There is
  2275. * room to play with this more where we would only
  2276. * run in a worker if there are more than x number
  2277. * of outbound completions on the queue and more
  2278. * than one queue active. Some threshold that
  2279. * would indicate a benefit in spite of the cost
  2280. * of a context switch.
  2281. * If there's more than one interrupt, then the
  2282. * outbound completions are processed in the ISR.
  2283. */
  2284. if (!test_bit(QL_MSIX_ENABLED, &qdev->flags))
  2285. INIT_DELAYED_WORK(&rx_ring->rx_work, ql_tx_clean);
  2286. else {
  2287. /* With all debug warnings on we see a WARN_ON message
  2288. * when we free the skb in the interrupt context.
  2289. */
  2290. INIT_DELAYED_WORK(&rx_ring->rx_work, ql_tx_clean);
  2291. }
  2292. cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
  2293. cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
  2294. break;
  2295. case DEFAULT_Q:
  2296. INIT_DELAYED_WORK(&rx_ring->rx_work, ql_rx_clean);
  2297. cqicb->irq_delay = 0;
  2298. cqicb->pkt_delay = 0;
  2299. break;
  2300. case RX_Q:
  2301. /* Inbound completion handling rx_rings run in
  2302. * separate NAPI contexts.
  2303. */
  2304. netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
  2305. 64);
  2306. cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
  2307. cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
  2308. break;
  2309. default:
  2310. QPRINTK(qdev, IFUP, DEBUG, "Invalid rx_ring->type = %d.\n",
  2311. rx_ring->type);
  2312. }
  2313. QPRINTK(qdev, IFUP, INFO, "Initializing rx work queue.\n");
  2314. err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
  2315. CFG_LCQ, rx_ring->cq_id);
  2316. if (err) {
  2317. QPRINTK(qdev, IFUP, ERR, "Failed to load CQICB.\n");
  2318. return err;
  2319. }
  2320. QPRINTK(qdev, IFUP, INFO, "Successfully loaded CQICB.\n");
  2321. /*
  2322. * Advance the producer index for the buffer queues.
  2323. */
  2324. wmb();
  2325. if (rx_ring->lbq_len)
  2326. ql_write_db_reg(rx_ring->lbq_prod_idx,
  2327. rx_ring->lbq_prod_idx_db_reg);
  2328. if (rx_ring->sbq_len)
  2329. ql_write_db_reg(rx_ring->sbq_prod_idx,
  2330. rx_ring->sbq_prod_idx_db_reg);
  2331. return err;
  2332. }
  2333. static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
  2334. {
  2335. struct wqicb *wqicb = (struct wqicb *)tx_ring;
  2336. void __iomem *doorbell_area =
  2337. qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
  2338. void *shadow_reg = qdev->tx_ring_shadow_reg_area +
  2339. (tx_ring->wq_id * sizeof(u64));
  2340. u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
  2341. (tx_ring->wq_id * sizeof(u64));
  2342. int err = 0;
  2343. /*
  2344. * Assign doorbell registers for this tx_ring.
  2345. */
  2346. /* TX PCI doorbell mem area for tx producer index */
  2347. tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
  2348. tx_ring->prod_idx = 0;
  2349. /* TX PCI doorbell mem area + 0x04 */
  2350. tx_ring->valid_db_reg = doorbell_area + 0x04;
  2351. /*
  2352. * Assign shadow registers for this tx_ring.
  2353. */
  2354. tx_ring->cnsmr_idx_sh_reg = shadow_reg;
  2355. tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
  2356. wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
  2357. wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
  2358. Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
  2359. wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
  2360. wqicb->rid = 0;
  2361. wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
  2362. wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
  2363. ql_init_tx_ring(qdev, tx_ring);
  2364. err = ql_write_cfg(qdev, wqicb, sizeof(wqicb), CFG_LRQ,
  2365. (u16) tx_ring->wq_id);
  2366. if (err) {
  2367. QPRINTK(qdev, IFUP, ERR, "Failed to load tx_ring.\n");
  2368. return err;
  2369. }
  2370. QPRINTK(qdev, IFUP, INFO, "Successfully loaded WQICB.\n");
  2371. return err;
  2372. }
  2373. static void ql_disable_msix(struct ql_adapter *qdev)
  2374. {
  2375. if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
  2376. pci_disable_msix(qdev->pdev);
  2377. clear_bit(QL_MSIX_ENABLED, &qdev->flags);
  2378. kfree(qdev->msi_x_entry);
  2379. qdev->msi_x_entry = NULL;
  2380. } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
  2381. pci_disable_msi(qdev->pdev);
  2382. clear_bit(QL_MSI_ENABLED, &qdev->flags);
  2383. }
  2384. }
  2385. static void ql_enable_msix(struct ql_adapter *qdev)
  2386. {
  2387. int i;
  2388. qdev->intr_count = 1;
  2389. /* Get the MSIX vectors. */
  2390. if (irq_type == MSIX_IRQ) {
  2391. /* Try to alloc space for the msix struct,
  2392. * if it fails then go to MSI/legacy.
  2393. */
  2394. qdev->msi_x_entry = kcalloc(qdev->rx_ring_count,
  2395. sizeof(struct msix_entry),
  2396. GFP_KERNEL);
  2397. if (!qdev->msi_x_entry) {
  2398. irq_type = MSI_IRQ;
  2399. goto msi;
  2400. }
  2401. for (i = 0; i < qdev->rx_ring_count; i++)
  2402. qdev->msi_x_entry[i].entry = i;
  2403. if (!pci_enable_msix
  2404. (qdev->pdev, qdev->msi_x_entry, qdev->rx_ring_count)) {
  2405. set_bit(QL_MSIX_ENABLED, &qdev->flags);
  2406. qdev->intr_count = qdev->rx_ring_count;
  2407. QPRINTK(qdev, IFUP, INFO,
  2408. "MSI-X Enabled, got %d vectors.\n",
  2409. qdev->intr_count);
  2410. return;
  2411. } else {
  2412. kfree(qdev->msi_x_entry);
  2413. qdev->msi_x_entry = NULL;
  2414. QPRINTK(qdev, IFUP, WARNING,
  2415. "MSI-X Enable failed, trying MSI.\n");
  2416. irq_type = MSI_IRQ;
  2417. }
  2418. }
  2419. msi:
  2420. if (irq_type == MSI_IRQ) {
  2421. if (!pci_enable_msi(qdev->pdev)) {
  2422. set_bit(QL_MSI_ENABLED, &qdev->flags);
  2423. QPRINTK(qdev, IFUP, INFO,
  2424. "Running with MSI interrupts.\n");
  2425. return;
  2426. }
  2427. }
  2428. irq_type = LEG_IRQ;
  2429. QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n");
  2430. }
  2431. /*
  2432. * Here we build the intr_context structures based on
  2433. * our rx_ring count and intr vector count.
  2434. * The intr_context structure is used to hook each vector
  2435. * to possibly different handlers.
  2436. */
  2437. static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
  2438. {
  2439. int i = 0;
  2440. struct intr_context *intr_context = &qdev->intr_context[0];
  2441. ql_enable_msix(qdev);
  2442. if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
  2443. /* Each rx_ring has it's
  2444. * own intr_context since we have separate
  2445. * vectors for each queue.
  2446. * This only true when MSI-X is enabled.
  2447. */
  2448. for (i = 0; i < qdev->intr_count; i++, intr_context++) {
  2449. qdev->rx_ring[i].irq = i;
  2450. intr_context->intr = i;
  2451. intr_context->qdev = qdev;
  2452. /*
  2453. * We set up each vectors enable/disable/read bits so
  2454. * there's no bit/mask calculations in the critical path.
  2455. */
  2456. intr_context->intr_en_mask =
  2457. INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
  2458. INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
  2459. | i;
  2460. intr_context->intr_dis_mask =
  2461. INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
  2462. INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
  2463. INTR_EN_IHD | i;
  2464. intr_context->intr_read_mask =
  2465. INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
  2466. INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
  2467. i;
  2468. if (i == 0) {
  2469. /*
  2470. * Default queue handles bcast/mcast plus
  2471. * async events. Needs buffers.
  2472. */
  2473. intr_context->handler = qlge_isr;
  2474. sprintf(intr_context->name, "%s-default-queue",
  2475. qdev->ndev->name);
  2476. } else if (i < qdev->rss_ring_first_cq_id) {
  2477. /*
  2478. * Outbound queue is for outbound completions only.
  2479. */
  2480. intr_context->handler = qlge_msix_tx_isr;
  2481. sprintf(intr_context->name, "%s-tx-%d",
  2482. qdev->ndev->name, i);
  2483. } else {
  2484. /*
  2485. * Inbound queues handle unicast frames only.
  2486. */
  2487. intr_context->handler = qlge_msix_rx_isr;
  2488. sprintf(intr_context->name, "%s-rx-%d",
  2489. qdev->ndev->name, i);
  2490. }
  2491. }
  2492. } else {
  2493. /*
  2494. * All rx_rings use the same intr_context since
  2495. * there is only one vector.
  2496. */
  2497. intr_context->intr = 0;
  2498. intr_context->qdev = qdev;
  2499. /*
  2500. * We set up each vectors enable/disable/read bits so
  2501. * there's no bit/mask calculations in the critical path.
  2502. */
  2503. intr_context->intr_en_mask =
  2504. INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
  2505. intr_context->intr_dis_mask =
  2506. INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
  2507. INTR_EN_TYPE_DISABLE;
  2508. intr_context->intr_read_mask =
  2509. INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
  2510. /*
  2511. * Single interrupt means one handler for all rings.
  2512. */
  2513. intr_context->handler = qlge_isr;
  2514. sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
  2515. for (i = 0; i < qdev->rx_ring_count; i++)
  2516. qdev->rx_ring[i].irq = 0;
  2517. }
  2518. }
  2519. static void ql_free_irq(struct ql_adapter *qdev)
  2520. {
  2521. int i;
  2522. struct intr_context *intr_context = &qdev->intr_context[0];
  2523. for (i = 0; i < qdev->intr_count; i++, intr_context++) {
  2524. if (intr_context->hooked) {
  2525. if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
  2526. free_irq(qdev->msi_x_entry[i].vector,
  2527. &qdev->rx_ring[i]);
  2528. QPRINTK(qdev, IFDOWN, ERR,
  2529. "freeing msix interrupt %d.\n", i);
  2530. } else {
  2531. free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
  2532. QPRINTK(qdev, IFDOWN, ERR,
  2533. "freeing msi interrupt %d.\n", i);
  2534. }
  2535. }
  2536. }
  2537. ql_disable_msix(qdev);
  2538. }
  2539. static int ql_request_irq(struct ql_adapter *qdev)
  2540. {
  2541. int i;
  2542. int status = 0;
  2543. struct pci_dev *pdev = qdev->pdev;
  2544. struct intr_context *intr_context = &qdev->intr_context[0];
  2545. ql_resolve_queues_to_irqs(qdev);
  2546. for (i = 0; i < qdev->intr_count; i++, intr_context++) {
  2547. atomic_set(&intr_context->irq_cnt, 0);
  2548. if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
  2549. status = request_irq(qdev->msi_x_entry[i].vector,
  2550. intr_context->handler,
  2551. 0,
  2552. intr_context->name,
  2553. &qdev->rx_ring[i]);
  2554. if (status) {
  2555. QPRINTK(qdev, IFUP, ERR,
  2556. "Failed request for MSIX interrupt %d.\n",
  2557. i);
  2558. goto err_irq;
  2559. } else {
  2560. QPRINTK(qdev, IFUP, INFO,
  2561. "Hooked intr %d, queue type %s%s%s, with name %s.\n",
  2562. i,
  2563. qdev->rx_ring[i].type ==
  2564. DEFAULT_Q ? "DEFAULT_Q" : "",
  2565. qdev->rx_ring[i].type ==
  2566. TX_Q ? "TX_Q" : "",
  2567. qdev->rx_ring[i].type ==
  2568. RX_Q ? "RX_Q" : "", intr_context->name);
  2569. }
  2570. } else {
  2571. QPRINTK(qdev, IFUP, DEBUG,
  2572. "trying msi or legacy interrupts.\n");
  2573. QPRINTK(qdev, IFUP, DEBUG,
  2574. "%s: irq = %d.\n", __func__, pdev->irq);
  2575. QPRINTK(qdev, IFUP, DEBUG,
  2576. "%s: context->name = %s.\n", __func__,
  2577. intr_context->name);
  2578. QPRINTK(qdev, IFUP, DEBUG,
  2579. "%s: dev_id = 0x%p.\n", __func__,
  2580. &qdev->rx_ring[0]);
  2581. status =
  2582. request_irq(pdev->irq, qlge_isr,
  2583. test_bit(QL_MSI_ENABLED,
  2584. &qdev->
  2585. flags) ? 0 : IRQF_SHARED,
  2586. intr_context->name, &qdev->rx_ring[0]);
  2587. if (status)
  2588. goto err_irq;
  2589. QPRINTK(qdev, IFUP, ERR,
  2590. "Hooked intr %d, queue type %s%s%s, with name %s.\n",
  2591. i,
  2592. qdev->rx_ring[0].type ==
  2593. DEFAULT_Q ? "DEFAULT_Q" : "",
  2594. qdev->rx_ring[0].type == TX_Q ? "TX_Q" : "",
  2595. qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
  2596. intr_context->name);
  2597. }
  2598. intr_context->hooked = 1;
  2599. }
  2600. return status;
  2601. err_irq:
  2602. QPRINTK(qdev, IFUP, ERR, "Failed to get the interrupts!!!/n");
  2603. ql_free_irq(qdev);
  2604. return status;
  2605. }
  2606. static int ql_start_rss(struct ql_adapter *qdev)
  2607. {
  2608. struct ricb *ricb = &qdev->ricb;
  2609. int status = 0;
  2610. int i;
  2611. u8 *hash_id = (u8 *) ricb->hash_cq_id;
  2612. memset((void *)ricb, 0, sizeof(ricb));
  2613. ricb->base_cq = qdev->rss_ring_first_cq_id | RSS_L4K;
  2614. ricb->flags =
  2615. (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RI4 | RSS_RI6 | RSS_RT4 |
  2616. RSS_RT6);
  2617. ricb->mask = cpu_to_le16(qdev->rss_ring_count - 1);
  2618. /*
  2619. * Fill out the Indirection Table.
  2620. */
  2621. for (i = 0; i < 32; i++)
  2622. hash_id[i] = i & 1;
  2623. /*
  2624. * Random values for the IPv6 and IPv4 Hash Keys.
  2625. */
  2626. get_random_bytes((void *)&ricb->ipv6_hash_key[0], 40);
  2627. get_random_bytes((void *)&ricb->ipv4_hash_key[0], 16);
  2628. QPRINTK(qdev, IFUP, INFO, "Initializing RSS.\n");
  2629. status = ql_write_cfg(qdev, ricb, sizeof(ricb), CFG_LR, 0);
  2630. if (status) {
  2631. QPRINTK(qdev, IFUP, ERR, "Failed to load RICB.\n");
  2632. return status;
  2633. }
  2634. QPRINTK(qdev, IFUP, INFO, "Successfully loaded RICB.\n");
  2635. return status;
  2636. }
  2637. /* Initialize the frame-to-queue routing. */
  2638. static int ql_route_initialize(struct ql_adapter *qdev)
  2639. {
  2640. int status = 0;
  2641. int i;
  2642. /* Clear all the entries in the routing table. */
  2643. for (i = 0; i < 16; i++) {
  2644. status = ql_set_routing_reg(qdev, i, 0, 0);
  2645. if (status) {
  2646. QPRINTK(qdev, IFUP, ERR,
  2647. "Failed to init routing register for CAM packets.\n");
  2648. return status;
  2649. }
  2650. }
  2651. status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1);
  2652. if (status) {
  2653. QPRINTK(qdev, IFUP, ERR,
  2654. "Failed to init routing register for error packets.\n");
  2655. return status;
  2656. }
  2657. status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
  2658. if (status) {
  2659. QPRINTK(qdev, IFUP, ERR,
  2660. "Failed to init routing register for broadcast packets.\n");
  2661. return status;
  2662. }
  2663. /* If we have more than one inbound queue, then turn on RSS in the
  2664. * routing block.
  2665. */
  2666. if (qdev->rss_ring_count > 1) {
  2667. status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
  2668. RT_IDX_RSS_MATCH, 1);
  2669. if (status) {
  2670. QPRINTK(qdev, IFUP, ERR,
  2671. "Failed to init routing register for MATCH RSS packets.\n");
  2672. return status;
  2673. }
  2674. }
  2675. status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
  2676. RT_IDX_CAM_HIT, 1);
  2677. if (status) {
  2678. QPRINTK(qdev, IFUP, ERR,
  2679. "Failed to init routing register for CAM packets.\n");
  2680. return status;
  2681. }
  2682. return status;
  2683. }
  2684. static int ql_adapter_initialize(struct ql_adapter *qdev)
  2685. {
  2686. u32 value, mask;
  2687. int i;
  2688. int status = 0;
  2689. /*
  2690. * Set up the System register to halt on errors.
  2691. */
  2692. value = SYS_EFE | SYS_FAE;
  2693. mask = value << 16;
  2694. ql_write32(qdev, SYS, mask | value);
  2695. /* Set the default queue. */
  2696. value = NIC_RCV_CFG_DFQ;
  2697. mask = NIC_RCV_CFG_DFQ_MASK;
  2698. ql_write32(qdev, NIC_RCV_CFG, (mask | value));
  2699. /* Set the MPI interrupt to enabled. */
  2700. ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
  2701. /* Enable the function, set pagesize, enable error checking. */
  2702. value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
  2703. FSC_EC | FSC_VM_PAGE_4K | FSC_SH;
  2704. /* Set/clear header splitting. */
  2705. mask = FSC_VM_PAGESIZE_MASK |
  2706. FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
  2707. ql_write32(qdev, FSC, mask | value);
  2708. ql_write32(qdev, SPLT_HDR, SPLT_HDR_EP |
  2709. min(SMALL_BUFFER_SIZE, MAX_SPLIT_SIZE));
  2710. /* Start up the rx queues. */
  2711. for (i = 0; i < qdev->rx_ring_count; i++) {
  2712. status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
  2713. if (status) {
  2714. QPRINTK(qdev, IFUP, ERR,
  2715. "Failed to start rx ring[%d].\n", i);
  2716. return status;
  2717. }
  2718. }
  2719. /* If there is more than one inbound completion queue
  2720. * then download a RICB to configure RSS.
  2721. */
  2722. if (qdev->rss_ring_count > 1) {
  2723. status = ql_start_rss(qdev);
  2724. if (status) {
  2725. QPRINTK(qdev, IFUP, ERR, "Failed to start RSS.\n");
  2726. return status;
  2727. }
  2728. }
  2729. /* Start up the tx queues. */
  2730. for (i = 0; i < qdev->tx_ring_count; i++) {
  2731. status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
  2732. if (status) {
  2733. QPRINTK(qdev, IFUP, ERR,
  2734. "Failed to start tx ring[%d].\n", i);
  2735. return status;
  2736. }
  2737. }
  2738. status = ql_port_initialize(qdev);
  2739. if (status) {
  2740. QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n");
  2741. return status;
  2742. }
  2743. status = ql_set_mac_addr_reg(qdev, (u8 *) qdev->ndev->perm_addr,
  2744. MAC_ADDR_TYPE_CAM_MAC, qdev->func);
  2745. if (status) {
  2746. QPRINTK(qdev, IFUP, ERR, "Failed to init mac address.\n");
  2747. return status;
  2748. }
  2749. status = ql_route_initialize(qdev);
  2750. if (status) {
  2751. QPRINTK(qdev, IFUP, ERR, "Failed to init routing table.\n");
  2752. return status;
  2753. }
  2754. /* Start NAPI for the RSS queues. */
  2755. for (i = qdev->rss_ring_first_cq_id; i < qdev->rx_ring_count; i++) {
  2756. QPRINTK(qdev, IFUP, INFO, "Enabling NAPI for rx_ring[%d].\n",
  2757. i);
  2758. napi_enable(&qdev->rx_ring[i].napi);
  2759. }
  2760. return status;
  2761. }
  2762. /* Issue soft reset to chip. */
  2763. static int ql_adapter_reset(struct ql_adapter *qdev)
  2764. {
  2765. u32 value;
  2766. int max_wait_time;
  2767. int status = 0;
  2768. int resetCnt = 0;
  2769. #define MAX_RESET_CNT 1
  2770. issueReset:
  2771. resetCnt++;
  2772. QPRINTK(qdev, IFDOWN, DEBUG, "Issue soft reset to chip.\n");
  2773. ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
  2774. /* Wait for reset to complete. */
  2775. max_wait_time = 3;
  2776. QPRINTK(qdev, IFDOWN, DEBUG, "Wait %d seconds for reset to complete.\n",
  2777. max_wait_time);
  2778. do {
  2779. value = ql_read32(qdev, RST_FO);
  2780. if ((value & RST_FO_FR) == 0)
  2781. break;
  2782. ssleep(1);
  2783. } while ((--max_wait_time));
  2784. if (value & RST_FO_FR) {
  2785. QPRINTK(qdev, IFDOWN, ERR,
  2786. "Stuck in SoftReset: FSC_SR:0x%08x\n", value);
  2787. if (resetCnt < MAX_RESET_CNT)
  2788. goto issueReset;
  2789. }
  2790. if (max_wait_time == 0) {
  2791. status = -ETIMEDOUT;
  2792. QPRINTK(qdev, IFDOWN, ERR,
  2793. "ETIMEOUT!!! errored out of resetting the chip!\n");
  2794. }
  2795. return status;
  2796. }
  2797. static void ql_display_dev_info(struct net_device *ndev)
  2798. {
  2799. struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
  2800. QPRINTK(qdev, PROBE, INFO,
  2801. "Function #%d, NIC Roll %d, NIC Rev = %d, "
  2802. "XG Roll = %d, XG Rev = %d.\n",
  2803. qdev->func,
  2804. qdev->chip_rev_id & 0x0000000f,
  2805. qdev->chip_rev_id >> 4 & 0x0000000f,
  2806. qdev->chip_rev_id >> 8 & 0x0000000f,
  2807. qdev->chip_rev_id >> 12 & 0x0000000f);
  2808. QPRINTK(qdev, PROBE, INFO, "MAC address %pM\n", ndev->dev_addr);
  2809. }
  2810. static int ql_adapter_down(struct ql_adapter *qdev)
  2811. {
  2812. struct net_device *ndev = qdev->ndev;
  2813. int i, status = 0;
  2814. struct rx_ring *rx_ring;
  2815. netif_stop_queue(ndev);
  2816. netif_carrier_off(ndev);
  2817. cancel_delayed_work_sync(&qdev->asic_reset_work);
  2818. cancel_delayed_work_sync(&qdev->mpi_reset_work);
  2819. cancel_delayed_work_sync(&qdev->mpi_work);
  2820. /* The default queue at index 0 is always processed in
  2821. * a workqueue.
  2822. */
  2823. cancel_delayed_work_sync(&qdev->rx_ring[0].rx_work);
  2824. /* The rest of the rx_rings are processed in
  2825. * a workqueue only if it's a single interrupt
  2826. * environment (MSI/Legacy).
  2827. */
  2828. for (i = 1; i < qdev->rx_ring_count; i++) {
  2829. rx_ring = &qdev->rx_ring[i];
  2830. /* Only the RSS rings use NAPI on multi irq
  2831. * environment. Outbound completion processing
  2832. * is done in interrupt context.
  2833. */
  2834. if (i >= qdev->rss_ring_first_cq_id) {
  2835. napi_disable(&rx_ring->napi);
  2836. } else {
  2837. cancel_delayed_work_sync(&rx_ring->rx_work);
  2838. }
  2839. }
  2840. clear_bit(QL_ADAPTER_UP, &qdev->flags);
  2841. ql_disable_interrupts(qdev);
  2842. ql_tx_ring_clean(qdev);
  2843. spin_lock(&qdev->hw_lock);
  2844. status = ql_adapter_reset(qdev);
  2845. if (status)
  2846. QPRINTK(qdev, IFDOWN, ERR, "reset(func #%d) FAILED!\n",
  2847. qdev->func);
  2848. spin_unlock(&qdev->hw_lock);
  2849. return status;
  2850. }
  2851. static int ql_adapter_up(struct ql_adapter *qdev)
  2852. {
  2853. int err = 0;
  2854. spin_lock(&qdev->hw_lock);
  2855. err = ql_adapter_initialize(qdev);
  2856. if (err) {
  2857. QPRINTK(qdev, IFUP, INFO, "Unable to initialize adapter.\n");
  2858. spin_unlock(&qdev->hw_lock);
  2859. goto err_init;
  2860. }
  2861. spin_unlock(&qdev->hw_lock);
  2862. set_bit(QL_ADAPTER_UP, &qdev->flags);
  2863. ql_enable_interrupts(qdev);
  2864. ql_enable_all_completion_interrupts(qdev);
  2865. if ((ql_read32(qdev, STS) & qdev->port_init)) {
  2866. netif_carrier_on(qdev->ndev);
  2867. netif_start_queue(qdev->ndev);
  2868. }
  2869. return 0;
  2870. err_init:
  2871. ql_adapter_reset(qdev);
  2872. return err;
  2873. }
  2874. static int ql_cycle_adapter(struct ql_adapter *qdev)
  2875. {
  2876. int status;
  2877. status = ql_adapter_down(qdev);
  2878. if (status)
  2879. goto error;
  2880. status = ql_adapter_up(qdev);
  2881. if (status)
  2882. goto error;
  2883. return status;
  2884. error:
  2885. QPRINTK(qdev, IFUP, ALERT,
  2886. "Driver up/down cycle failed, closing device\n");
  2887. rtnl_lock();
  2888. dev_close(qdev->ndev);
  2889. rtnl_unlock();
  2890. return status;
  2891. }
  2892. static void ql_release_adapter_resources(struct ql_adapter *qdev)
  2893. {
  2894. ql_free_mem_resources(qdev);
  2895. ql_free_irq(qdev);
  2896. }
  2897. static int ql_get_adapter_resources(struct ql_adapter *qdev)
  2898. {
  2899. int status = 0;
  2900. if (ql_alloc_mem_resources(qdev)) {
  2901. QPRINTK(qdev, IFUP, ERR, "Unable to allocate memory.\n");
  2902. return -ENOMEM;
  2903. }
  2904. status = ql_request_irq(qdev);
  2905. if (status)
  2906. goto err_irq;
  2907. return status;
  2908. err_irq:
  2909. ql_free_mem_resources(qdev);
  2910. return status;
  2911. }
  2912. static int qlge_close(struct net_device *ndev)
  2913. {
  2914. struct ql_adapter *qdev = netdev_priv(ndev);
  2915. /*
  2916. * Wait for device to recover from a reset.
  2917. * (Rarely happens, but possible.)
  2918. */
  2919. while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
  2920. msleep(1);
  2921. ql_adapter_down(qdev);
  2922. ql_release_adapter_resources(qdev);
  2923. return 0;
  2924. }
  2925. static int ql_configure_rings(struct ql_adapter *qdev)
  2926. {
  2927. int i;
  2928. struct rx_ring *rx_ring;
  2929. struct tx_ring *tx_ring;
  2930. int cpu_cnt = num_online_cpus();
  2931. /*
  2932. * For each processor present we allocate one
  2933. * rx_ring for outbound completions, and one
  2934. * rx_ring for inbound completions. Plus there is
  2935. * always the one default queue. For the CPU
  2936. * counts we end up with the following rx_rings:
  2937. * rx_ring count =
  2938. * one default queue +
  2939. * (CPU count * outbound completion rx_ring) +
  2940. * (CPU count * inbound (RSS) completion rx_ring)
  2941. * To keep it simple we limit the total number of
  2942. * queues to < 32, so we truncate CPU to 8.
  2943. * This limitation can be removed when requested.
  2944. */
  2945. if (cpu_cnt > MAX_CPUS)
  2946. cpu_cnt = MAX_CPUS;
  2947. /*
  2948. * rx_ring[0] is always the default queue.
  2949. */
  2950. /* Allocate outbound completion ring for each CPU. */
  2951. qdev->tx_ring_count = cpu_cnt;
  2952. /* Allocate inbound completion (RSS) ring for each CPU. */
  2953. qdev->rss_ring_count = cpu_cnt;
  2954. /* cq_id for the first inbound ring handler. */
  2955. qdev->rss_ring_first_cq_id = cpu_cnt + 1;
  2956. /*
  2957. * qdev->rx_ring_count:
  2958. * Total number of rx_rings. This includes the one
  2959. * default queue, a number of outbound completion
  2960. * handler rx_rings, and the number of inbound
  2961. * completion handler rx_rings.
  2962. */
  2963. qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count + 1;
  2964. for (i = 0; i < qdev->tx_ring_count; i++) {
  2965. tx_ring = &qdev->tx_ring[i];
  2966. memset((void *)tx_ring, 0, sizeof(tx_ring));
  2967. tx_ring->qdev = qdev;
  2968. tx_ring->wq_id = i;
  2969. tx_ring->wq_len = qdev->tx_ring_size;
  2970. tx_ring->wq_size =
  2971. tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
  2972. /*
  2973. * The completion queue ID for the tx rings start
  2974. * immediately after the default Q ID, which is zero.
  2975. */
  2976. tx_ring->cq_id = i + 1;
  2977. }
  2978. for (i = 0; i < qdev->rx_ring_count; i++) {
  2979. rx_ring = &qdev->rx_ring[i];
  2980. memset((void *)rx_ring, 0, sizeof(rx_ring));
  2981. rx_ring->qdev = qdev;
  2982. rx_ring->cq_id = i;
  2983. rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
  2984. if (i == 0) { /* Default queue at index 0. */
  2985. /*
  2986. * Default queue handles bcast/mcast plus
  2987. * async events. Needs buffers.
  2988. */
  2989. rx_ring->cq_len = qdev->rx_ring_size;
  2990. rx_ring->cq_size =
  2991. rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
  2992. rx_ring->lbq_len = NUM_LARGE_BUFFERS;
  2993. rx_ring->lbq_size =
  2994. rx_ring->lbq_len * sizeof(__le64);
  2995. rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE;
  2996. rx_ring->sbq_len = NUM_SMALL_BUFFERS;
  2997. rx_ring->sbq_size =
  2998. rx_ring->sbq_len * sizeof(__le64);
  2999. rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2;
  3000. rx_ring->type = DEFAULT_Q;
  3001. } else if (i < qdev->rss_ring_first_cq_id) {
  3002. /*
  3003. * Outbound queue handles outbound completions only.
  3004. */
  3005. /* outbound cq is same size as tx_ring it services. */
  3006. rx_ring->cq_len = qdev->tx_ring_size;
  3007. rx_ring->cq_size =
  3008. rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
  3009. rx_ring->lbq_len = 0;
  3010. rx_ring->lbq_size = 0;
  3011. rx_ring->lbq_buf_size = 0;
  3012. rx_ring->sbq_len = 0;
  3013. rx_ring->sbq_size = 0;
  3014. rx_ring->sbq_buf_size = 0;
  3015. rx_ring->type = TX_Q;
  3016. } else { /* Inbound completions (RSS) queues */
  3017. /*
  3018. * Inbound queues handle unicast frames only.
  3019. */
  3020. rx_ring->cq_len = qdev->rx_ring_size;
  3021. rx_ring->cq_size =
  3022. rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
  3023. rx_ring->lbq_len = NUM_LARGE_BUFFERS;
  3024. rx_ring->lbq_size =
  3025. rx_ring->lbq_len * sizeof(__le64);
  3026. rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE;
  3027. rx_ring->sbq_len = NUM_SMALL_BUFFERS;
  3028. rx_ring->sbq_size =
  3029. rx_ring->sbq_len * sizeof(__le64);
  3030. rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2;
  3031. rx_ring->type = RX_Q;
  3032. }
  3033. }
  3034. return 0;
  3035. }
  3036. static int qlge_open(struct net_device *ndev)
  3037. {
  3038. int err = 0;
  3039. struct ql_adapter *qdev = netdev_priv(ndev);
  3040. err = ql_configure_rings(qdev);
  3041. if (err)
  3042. return err;
  3043. err = ql_get_adapter_resources(qdev);
  3044. if (err)
  3045. goto error_up;
  3046. err = ql_adapter_up(qdev);
  3047. if (err)
  3048. goto error_up;
  3049. return err;
  3050. error_up:
  3051. ql_release_adapter_resources(qdev);
  3052. return err;
  3053. }
  3054. static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
  3055. {
  3056. struct ql_adapter *qdev = netdev_priv(ndev);
  3057. if (ndev->mtu == 1500 && new_mtu == 9000) {
  3058. QPRINTK(qdev, IFUP, ERR, "Changing to jumbo MTU.\n");
  3059. } else if (ndev->mtu == 9000 && new_mtu == 1500) {
  3060. QPRINTK(qdev, IFUP, ERR, "Changing to normal MTU.\n");
  3061. } else if ((ndev->mtu == 1500 && new_mtu == 1500) ||
  3062. (ndev->mtu == 9000 && new_mtu == 9000)) {
  3063. return 0;
  3064. } else
  3065. return -EINVAL;
  3066. ndev->mtu = new_mtu;
  3067. return 0;
  3068. }
  3069. static struct net_device_stats *qlge_get_stats(struct net_device
  3070. *ndev)
  3071. {
  3072. struct ql_adapter *qdev = netdev_priv(ndev);
  3073. return &qdev->stats;
  3074. }
  3075. static void qlge_set_multicast_list(struct net_device *ndev)
  3076. {
  3077. struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
  3078. struct dev_mc_list *mc_ptr;
  3079. int i;
  3080. spin_lock(&qdev->hw_lock);
  3081. /*
  3082. * Set or clear promiscuous mode if a
  3083. * transition is taking place.
  3084. */
  3085. if (ndev->flags & IFF_PROMISC) {
  3086. if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
  3087. if (ql_set_routing_reg
  3088. (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
  3089. QPRINTK(qdev, HW, ERR,
  3090. "Failed to set promiscous mode.\n");
  3091. } else {
  3092. set_bit(QL_PROMISCUOUS, &qdev->flags);
  3093. }
  3094. }
  3095. } else {
  3096. if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
  3097. if (ql_set_routing_reg
  3098. (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
  3099. QPRINTK(qdev, HW, ERR,
  3100. "Failed to clear promiscous mode.\n");
  3101. } else {
  3102. clear_bit(QL_PROMISCUOUS, &qdev->flags);
  3103. }
  3104. }
  3105. }
  3106. /*
  3107. * Set or clear all multicast mode if a
  3108. * transition is taking place.
  3109. */
  3110. if ((ndev->flags & IFF_ALLMULTI) ||
  3111. (ndev->mc_count > MAX_MULTICAST_ENTRIES)) {
  3112. if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
  3113. if (ql_set_routing_reg
  3114. (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
  3115. QPRINTK(qdev, HW, ERR,
  3116. "Failed to set all-multi mode.\n");
  3117. } else {
  3118. set_bit(QL_ALLMULTI, &qdev->flags);
  3119. }
  3120. }
  3121. } else {
  3122. if (test_bit(QL_ALLMULTI, &qdev->flags)) {
  3123. if (ql_set_routing_reg
  3124. (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
  3125. QPRINTK(qdev, HW, ERR,
  3126. "Failed to clear all-multi mode.\n");
  3127. } else {
  3128. clear_bit(QL_ALLMULTI, &qdev->flags);
  3129. }
  3130. }
  3131. }
  3132. if (ndev->mc_count) {
  3133. for (i = 0, mc_ptr = ndev->mc_list; mc_ptr;
  3134. i++, mc_ptr = mc_ptr->next)
  3135. if (ql_set_mac_addr_reg(qdev, (u8 *) mc_ptr->dmi_addr,
  3136. MAC_ADDR_TYPE_MULTI_MAC, i)) {
  3137. QPRINTK(qdev, HW, ERR,
  3138. "Failed to loadmulticast address.\n");
  3139. goto exit;
  3140. }
  3141. if (ql_set_routing_reg
  3142. (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
  3143. QPRINTK(qdev, HW, ERR,
  3144. "Failed to set multicast match mode.\n");
  3145. } else {
  3146. set_bit(QL_ALLMULTI, &qdev->flags);
  3147. }
  3148. }
  3149. exit:
  3150. spin_unlock(&qdev->hw_lock);
  3151. }
  3152. static int qlge_set_mac_address(struct net_device *ndev, void *p)
  3153. {
  3154. struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
  3155. struct sockaddr *addr = p;
  3156. int ret = 0;
  3157. if (netif_running(ndev))
  3158. return -EBUSY;
  3159. if (!is_valid_ether_addr(addr->sa_data))
  3160. return -EADDRNOTAVAIL;
  3161. memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
  3162. spin_lock(&qdev->hw_lock);
  3163. if (ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
  3164. MAC_ADDR_TYPE_CAM_MAC, qdev->func)) {/* Unicast */
  3165. QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n");
  3166. ret = -1;
  3167. }
  3168. spin_unlock(&qdev->hw_lock);
  3169. return ret;
  3170. }
  3171. static void qlge_tx_timeout(struct net_device *ndev)
  3172. {
  3173. struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
  3174. queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
  3175. }
  3176. static void ql_asic_reset_work(struct work_struct *work)
  3177. {
  3178. struct ql_adapter *qdev =
  3179. container_of(work, struct ql_adapter, asic_reset_work.work);
  3180. ql_cycle_adapter(qdev);
  3181. }
  3182. static void ql_get_board_info(struct ql_adapter *qdev)
  3183. {
  3184. qdev->func =
  3185. (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
  3186. if (qdev->func) {
  3187. qdev->xg_sem_mask = SEM_XGMAC1_MASK;
  3188. qdev->port_link_up = STS_PL1;
  3189. qdev->port_init = STS_PI1;
  3190. qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
  3191. qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
  3192. } else {
  3193. qdev->xg_sem_mask = SEM_XGMAC0_MASK;
  3194. qdev->port_link_up = STS_PL0;
  3195. qdev->port_init = STS_PI0;
  3196. qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
  3197. qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
  3198. }
  3199. qdev->chip_rev_id = ql_read32(qdev, REV_ID);
  3200. }
  3201. static void ql_release_all(struct pci_dev *pdev)
  3202. {
  3203. struct net_device *ndev = pci_get_drvdata(pdev);
  3204. struct ql_adapter *qdev = netdev_priv(ndev);
  3205. if (qdev->workqueue) {
  3206. destroy_workqueue(qdev->workqueue);
  3207. qdev->workqueue = NULL;
  3208. }
  3209. if (qdev->q_workqueue) {
  3210. destroy_workqueue(qdev->q_workqueue);
  3211. qdev->q_workqueue = NULL;
  3212. }
  3213. if (qdev->reg_base)
  3214. iounmap(qdev->reg_base);
  3215. if (qdev->doorbell_area)
  3216. iounmap(qdev->doorbell_area);
  3217. pci_release_regions(pdev);
  3218. pci_set_drvdata(pdev, NULL);
  3219. }
  3220. static int __devinit ql_init_device(struct pci_dev *pdev,
  3221. struct net_device *ndev, int cards_found)
  3222. {
  3223. struct ql_adapter *qdev = netdev_priv(ndev);
  3224. int pos, err = 0;
  3225. u16 val16;
  3226. memset((void *)qdev, 0, sizeof(qdev));
  3227. err = pci_enable_device(pdev);
  3228. if (err) {
  3229. dev_err(&pdev->dev, "PCI device enable failed.\n");
  3230. return err;
  3231. }
  3232. pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
  3233. if (pos <= 0) {
  3234. dev_err(&pdev->dev, PFX "Cannot find PCI Express capability, "
  3235. "aborting.\n");
  3236. goto err_out;
  3237. } else {
  3238. pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16);
  3239. val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
  3240. val16 |= (PCI_EXP_DEVCTL_CERE |
  3241. PCI_EXP_DEVCTL_NFERE |
  3242. PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE);
  3243. pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, val16);
  3244. }
  3245. err = pci_request_regions(pdev, DRV_NAME);
  3246. if (err) {
  3247. dev_err(&pdev->dev, "PCI region request failed.\n");
  3248. goto err_out;
  3249. }
  3250. pci_set_master(pdev);
  3251. if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
  3252. set_bit(QL_DMA64, &qdev->flags);
  3253. err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
  3254. } else {
  3255. err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
  3256. if (!err)
  3257. err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
  3258. }
  3259. if (err) {
  3260. dev_err(&pdev->dev, "No usable DMA configuration.\n");
  3261. goto err_out;
  3262. }
  3263. pci_set_drvdata(pdev, ndev);
  3264. qdev->reg_base =
  3265. ioremap_nocache(pci_resource_start(pdev, 1),
  3266. pci_resource_len(pdev, 1));
  3267. if (!qdev->reg_base) {
  3268. dev_err(&pdev->dev, "Register mapping failed.\n");
  3269. err = -ENOMEM;
  3270. goto err_out;
  3271. }
  3272. qdev->doorbell_area_size = pci_resource_len(pdev, 3);
  3273. qdev->doorbell_area =
  3274. ioremap_nocache(pci_resource_start(pdev, 3),
  3275. pci_resource_len(pdev, 3));
  3276. if (!qdev->doorbell_area) {
  3277. dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
  3278. err = -ENOMEM;
  3279. goto err_out;
  3280. }
  3281. ql_get_board_info(qdev);
  3282. qdev->ndev = ndev;
  3283. qdev->pdev = pdev;
  3284. qdev->msg_enable = netif_msg_init(debug, default_msg);
  3285. spin_lock_init(&qdev->hw_lock);
  3286. spin_lock_init(&qdev->stats_lock);
  3287. /* make sure the EEPROM is good */
  3288. err = ql_get_flash_params(qdev);
  3289. if (err) {
  3290. dev_err(&pdev->dev, "Invalid FLASH.\n");
  3291. goto err_out;
  3292. }
  3293. if (!is_valid_ether_addr(qdev->flash.mac_addr))
  3294. goto err_out;
  3295. memcpy(ndev->dev_addr, qdev->flash.mac_addr, ndev->addr_len);
  3296. memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
  3297. /* Set up the default ring sizes. */
  3298. qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
  3299. qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
  3300. /* Set up the coalescing parameters. */
  3301. qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
  3302. qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
  3303. qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
  3304. qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
  3305. /*
  3306. * Set up the operating parameters.
  3307. */
  3308. qdev->rx_csum = 1;
  3309. qdev->q_workqueue = create_workqueue(ndev->name);
  3310. qdev->workqueue = create_singlethread_workqueue(ndev->name);
  3311. INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
  3312. INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
  3313. INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
  3314. if (!cards_found) {
  3315. dev_info(&pdev->dev, "%s\n", DRV_STRING);
  3316. dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
  3317. DRV_NAME, DRV_VERSION);
  3318. }
  3319. return 0;
  3320. err_out:
  3321. ql_release_all(pdev);
  3322. pci_disable_device(pdev);
  3323. return err;
  3324. }
  3325. static const struct net_device_ops qlge_netdev_ops = {
  3326. .ndo_open = qlge_open,
  3327. .ndo_stop = qlge_close,
  3328. .ndo_start_xmit = qlge_send,
  3329. .ndo_change_mtu = qlge_change_mtu,
  3330. .ndo_get_stats = qlge_get_stats,
  3331. .ndo_set_multicast_list = qlge_set_multicast_list,
  3332. .ndo_set_mac_address = qlge_set_mac_address,
  3333. .ndo_validate_addr = eth_validate_addr,
  3334. .ndo_tx_timeout = qlge_tx_timeout,
  3335. .ndo_vlan_rx_register = ql_vlan_rx_register,
  3336. .ndo_vlan_rx_add_vid = ql_vlan_rx_add_vid,
  3337. .ndo_vlan_rx_kill_vid = ql_vlan_rx_kill_vid,
  3338. };
  3339. static int __devinit qlge_probe(struct pci_dev *pdev,
  3340. const struct pci_device_id *pci_entry)
  3341. {
  3342. struct net_device *ndev = NULL;
  3343. struct ql_adapter *qdev = NULL;
  3344. static int cards_found = 0;
  3345. int err = 0;
  3346. ndev = alloc_etherdev(sizeof(struct ql_adapter));
  3347. if (!ndev)
  3348. return -ENOMEM;
  3349. err = ql_init_device(pdev, ndev, cards_found);
  3350. if (err < 0) {
  3351. free_netdev(ndev);
  3352. return err;
  3353. }
  3354. qdev = netdev_priv(ndev);
  3355. SET_NETDEV_DEV(ndev, &pdev->dev);
  3356. ndev->features = (0
  3357. | NETIF_F_IP_CSUM
  3358. | NETIF_F_SG
  3359. | NETIF_F_TSO
  3360. | NETIF_F_TSO6
  3361. | NETIF_F_TSO_ECN
  3362. | NETIF_F_HW_VLAN_TX
  3363. | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER);
  3364. if (test_bit(QL_DMA64, &qdev->flags))
  3365. ndev->features |= NETIF_F_HIGHDMA;
  3366. /*
  3367. * Set up net_device structure.
  3368. */
  3369. ndev->tx_queue_len = qdev->tx_ring_size;
  3370. ndev->irq = pdev->irq;
  3371. ndev->netdev_ops = &qlge_netdev_ops;
  3372. SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
  3373. ndev->watchdog_timeo = 10 * HZ;
  3374. err = register_netdev(ndev);
  3375. if (err) {
  3376. dev_err(&pdev->dev, "net device registration failed.\n");
  3377. ql_release_all(pdev);
  3378. pci_disable_device(pdev);
  3379. return err;
  3380. }
  3381. netif_carrier_off(ndev);
  3382. netif_stop_queue(ndev);
  3383. ql_display_dev_info(ndev);
  3384. cards_found++;
  3385. return 0;
  3386. }
  3387. static void __devexit qlge_remove(struct pci_dev *pdev)
  3388. {
  3389. struct net_device *ndev = pci_get_drvdata(pdev);
  3390. unregister_netdev(ndev);
  3391. ql_release_all(pdev);
  3392. pci_disable_device(pdev);
  3393. free_netdev(ndev);
  3394. }
  3395. /*
  3396. * This callback is called by the PCI subsystem whenever
  3397. * a PCI bus error is detected.
  3398. */
  3399. static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
  3400. enum pci_channel_state state)
  3401. {
  3402. struct net_device *ndev = pci_get_drvdata(pdev);
  3403. struct ql_adapter *qdev = netdev_priv(ndev);
  3404. if (netif_running(ndev))
  3405. ql_adapter_down(qdev);
  3406. pci_disable_device(pdev);
  3407. /* Request a slot reset. */
  3408. return PCI_ERS_RESULT_NEED_RESET;
  3409. }
  3410. /*
  3411. * This callback is called after the PCI buss has been reset.
  3412. * Basically, this tries to restart the card from scratch.
  3413. * This is a shortened version of the device probe/discovery code,
  3414. * it resembles the first-half of the () routine.
  3415. */
  3416. static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
  3417. {
  3418. struct net_device *ndev = pci_get_drvdata(pdev);
  3419. struct ql_adapter *qdev = netdev_priv(ndev);
  3420. if (pci_enable_device(pdev)) {
  3421. QPRINTK(qdev, IFUP, ERR,
  3422. "Cannot re-enable PCI device after reset.\n");
  3423. return PCI_ERS_RESULT_DISCONNECT;
  3424. }
  3425. pci_set_master(pdev);
  3426. netif_carrier_off(ndev);
  3427. netif_stop_queue(ndev);
  3428. ql_adapter_reset(qdev);
  3429. /* Make sure the EEPROM is good */
  3430. memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
  3431. if (!is_valid_ether_addr(ndev->perm_addr)) {
  3432. QPRINTK(qdev, IFUP, ERR, "After reset, invalid MAC address.\n");
  3433. return PCI_ERS_RESULT_DISCONNECT;
  3434. }
  3435. return PCI_ERS_RESULT_RECOVERED;
  3436. }
  3437. static void qlge_io_resume(struct pci_dev *pdev)
  3438. {
  3439. struct net_device *ndev = pci_get_drvdata(pdev);
  3440. struct ql_adapter *qdev = netdev_priv(ndev);
  3441. pci_set_master(pdev);
  3442. if (netif_running(ndev)) {
  3443. if (ql_adapter_up(qdev)) {
  3444. QPRINTK(qdev, IFUP, ERR,
  3445. "Device initialization failed after reset.\n");
  3446. return;
  3447. }
  3448. }
  3449. netif_device_attach(ndev);
  3450. }
  3451. static struct pci_error_handlers qlge_err_handler = {
  3452. .error_detected = qlge_io_error_detected,
  3453. .slot_reset = qlge_io_slot_reset,
  3454. .resume = qlge_io_resume,
  3455. };
  3456. static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
  3457. {
  3458. struct net_device *ndev = pci_get_drvdata(pdev);
  3459. struct ql_adapter *qdev = netdev_priv(ndev);
  3460. int err;
  3461. netif_device_detach(ndev);
  3462. if (netif_running(ndev)) {
  3463. err = ql_adapter_down(qdev);
  3464. if (!err)
  3465. return err;
  3466. }
  3467. err = pci_save_state(pdev);
  3468. if (err)
  3469. return err;
  3470. pci_disable_device(pdev);
  3471. pci_set_power_state(pdev, pci_choose_state(pdev, state));
  3472. return 0;
  3473. }
  3474. #ifdef CONFIG_PM
  3475. static int qlge_resume(struct pci_dev *pdev)
  3476. {
  3477. struct net_device *ndev = pci_get_drvdata(pdev);
  3478. struct ql_adapter *qdev = netdev_priv(ndev);
  3479. int err;
  3480. pci_set_power_state(pdev, PCI_D0);
  3481. pci_restore_state(pdev);
  3482. err = pci_enable_device(pdev);
  3483. if (err) {
  3484. QPRINTK(qdev, IFUP, ERR, "Cannot enable PCI device from suspend\n");
  3485. return err;
  3486. }
  3487. pci_set_master(pdev);
  3488. pci_enable_wake(pdev, PCI_D3hot, 0);
  3489. pci_enable_wake(pdev, PCI_D3cold, 0);
  3490. if (netif_running(ndev)) {
  3491. err = ql_adapter_up(qdev);
  3492. if (err)
  3493. return err;
  3494. }
  3495. netif_device_attach(ndev);
  3496. return 0;
  3497. }
  3498. #endif /* CONFIG_PM */
  3499. static void qlge_shutdown(struct pci_dev *pdev)
  3500. {
  3501. qlge_suspend(pdev, PMSG_SUSPEND);
  3502. }
  3503. static struct pci_driver qlge_driver = {
  3504. .name = DRV_NAME,
  3505. .id_table = qlge_pci_tbl,
  3506. .probe = qlge_probe,
  3507. .remove = __devexit_p(qlge_remove),
  3508. #ifdef CONFIG_PM
  3509. .suspend = qlge_suspend,
  3510. .resume = qlge_resume,
  3511. #endif
  3512. .shutdown = qlge_shutdown,
  3513. .err_handler = &qlge_err_handler
  3514. };
  3515. static int __init qlge_init_module(void)
  3516. {
  3517. return pci_register_driver(&qlge_driver);
  3518. }
  3519. static void __exit qlge_exit(void)
  3520. {
  3521. pci_unregister_driver(&qlge_driver);
  3522. }
  3523. module_init(qlge_init_module);
  3524. module_exit(qlge_exit);