efx.c 81 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116
  1. /****************************************************************************
  2. * Driver for Solarflare Solarstorm network controllers and boards
  3. * Copyright 2005-2006 Fen Systems Ltd.
  4. * Copyright 2005-2011 Solarflare Communications Inc.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published
  8. * by the Free Software Foundation, incorporated herein by reference.
  9. */
  10. #include <linux/module.h>
  11. #include <linux/pci.h>
  12. #include <linux/netdevice.h>
  13. #include <linux/etherdevice.h>
  14. #include <linux/delay.h>
  15. #include <linux/notifier.h>
  16. #include <linux/ip.h>
  17. #include <linux/tcp.h>
  18. #include <linux/in.h>
  19. #include <linux/crc32.h>
  20. #include <linux/ethtool.h>
  21. #include <linux/topology.h>
  22. #include <linux/gfp.h>
  23. #include <linux/pci.h>
  24. #include <linux/cpu_rmap.h>
  25. #include <linux/aer.h>
  26. #include "net_driver.h"
  27. #include "efx.h"
  28. #include "nic.h"
  29. #include "selftest.h"
  30. #include "mcdi.h"
  31. #include "workarounds.h"
  32. /**************************************************************************
  33. *
  34. * Type name strings
  35. *
  36. **************************************************************************
  37. */
  38. /* Loopback mode names (see LOOPBACK_MODE()) */
  39. const unsigned int efx_loopback_mode_max = LOOPBACK_MAX;
  40. const char *const efx_loopback_mode_names[] = {
  41. [LOOPBACK_NONE] = "NONE",
  42. [LOOPBACK_DATA] = "DATAPATH",
  43. [LOOPBACK_GMAC] = "GMAC",
  44. [LOOPBACK_XGMII] = "XGMII",
  45. [LOOPBACK_XGXS] = "XGXS",
  46. [LOOPBACK_XAUI] = "XAUI",
  47. [LOOPBACK_GMII] = "GMII",
  48. [LOOPBACK_SGMII] = "SGMII",
  49. [LOOPBACK_XGBR] = "XGBR",
  50. [LOOPBACK_XFI] = "XFI",
  51. [LOOPBACK_XAUI_FAR] = "XAUI_FAR",
  52. [LOOPBACK_GMII_FAR] = "GMII_FAR",
  53. [LOOPBACK_SGMII_FAR] = "SGMII_FAR",
  54. [LOOPBACK_XFI_FAR] = "XFI_FAR",
  55. [LOOPBACK_GPHY] = "GPHY",
  56. [LOOPBACK_PHYXS] = "PHYXS",
  57. [LOOPBACK_PCS] = "PCS",
  58. [LOOPBACK_PMAPMD] = "PMA/PMD",
  59. [LOOPBACK_XPORT] = "XPORT",
  60. [LOOPBACK_XGMII_WS] = "XGMII_WS",
  61. [LOOPBACK_XAUI_WS] = "XAUI_WS",
  62. [LOOPBACK_XAUI_WS_FAR] = "XAUI_WS_FAR",
  63. [LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR",
  64. [LOOPBACK_GMII_WS] = "GMII_WS",
  65. [LOOPBACK_XFI_WS] = "XFI_WS",
  66. [LOOPBACK_XFI_WS_FAR] = "XFI_WS_FAR",
  67. [LOOPBACK_PHYXS_WS] = "PHYXS_WS",
  68. };
  69. const unsigned int efx_reset_type_max = RESET_TYPE_MAX;
  70. const char *const efx_reset_type_names[] = {
  71. [RESET_TYPE_INVISIBLE] = "INVISIBLE",
  72. [RESET_TYPE_ALL] = "ALL",
  73. [RESET_TYPE_RECOVER_OR_ALL] = "RECOVER_OR_ALL",
  74. [RESET_TYPE_WORLD] = "WORLD",
  75. [RESET_TYPE_RECOVER_OR_DISABLE] = "RECOVER_OR_DISABLE",
  76. [RESET_TYPE_DISABLE] = "DISABLE",
  77. [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG",
  78. [RESET_TYPE_INT_ERROR] = "INT_ERROR",
  79. [RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY",
  80. [RESET_TYPE_RX_DESC_FETCH] = "RX_DESC_FETCH",
  81. [RESET_TYPE_TX_DESC_FETCH] = "TX_DESC_FETCH",
  82. [RESET_TYPE_TX_SKIP] = "TX_SKIP",
  83. [RESET_TYPE_MC_FAILURE] = "MC_FAILURE",
  84. };
  85. /* Reset workqueue. If any NIC has a hardware failure then a reset will be
  86. * queued onto this work queue. This is not a per-nic work queue, because
  87. * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised.
  88. */
  89. static struct workqueue_struct *reset_workqueue;
  90. /**************************************************************************
  91. *
  92. * Configurable values
  93. *
  94. *************************************************************************/
  95. /*
  96. * Use separate channels for TX and RX events
  97. *
  98. * Set this to 1 to use separate channels for TX and RX. It allows us
  99. * to control interrupt affinity separately for TX and RX.
  100. *
  101. * This is only used in MSI-X interrupt mode
  102. */
  103. static bool separate_tx_channels;
  104. module_param(separate_tx_channels, bool, 0444);
  105. MODULE_PARM_DESC(separate_tx_channels,
  106. "Use separate channels for TX and RX");
  107. /* This is the weight assigned to each of the (per-channel) virtual
  108. * NAPI devices.
  109. */
  110. static int napi_weight = 64;
  111. /* This is the time (in jiffies) between invocations of the hardware
  112. * monitor.
  113. * On Falcon-based NICs, this will:
  114. * - Check the on-board hardware monitor;
  115. * - Poll the link state and reconfigure the hardware as necessary.
  116. * On Siena-based NICs for power systems with EEH support, this will give EEH a
  117. * chance to start.
  118. */
  119. static unsigned int efx_monitor_interval = 1 * HZ;
  120. /* Initial interrupt moderation settings. They can be modified after
  121. * module load with ethtool.
  122. *
  123. * The default for RX should strike a balance between increasing the
  124. * round-trip latency and reducing overhead.
  125. */
  126. static unsigned int rx_irq_mod_usec = 60;
  127. /* Initial interrupt moderation settings. They can be modified after
  128. * module load with ethtool.
  129. *
  130. * This default is chosen to ensure that a 10G link does not go idle
  131. * while a TX queue is stopped after it has become full. A queue is
  132. * restarted when it drops below half full. The time this takes (assuming
  133. * worst case 3 descriptors per packet and 1024 descriptors) is
  134. * 512 / 3 * 1.2 = 205 usec.
  135. */
  136. static unsigned int tx_irq_mod_usec = 150;
  137. /* This is the first interrupt mode to try out of:
  138. * 0 => MSI-X
  139. * 1 => MSI
  140. * 2 => legacy
  141. */
  142. static unsigned int interrupt_mode;
  143. /* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
  144. * i.e. the number of CPUs among which we may distribute simultaneous
  145. * interrupt handling.
  146. *
  147. * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
  148. * The default (0) means to assign an interrupt to each core.
  149. */
  150. static unsigned int rss_cpus;
  151. module_param(rss_cpus, uint, 0444);
  152. MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
  153. static bool phy_flash_cfg;
  154. module_param(phy_flash_cfg, bool, 0644);
  155. MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially");
  156. static unsigned irq_adapt_low_thresh = 8000;
  157. module_param(irq_adapt_low_thresh, uint, 0644);
  158. MODULE_PARM_DESC(irq_adapt_low_thresh,
  159. "Threshold score for reducing IRQ moderation");
  160. static unsigned irq_adapt_high_thresh = 16000;
  161. module_param(irq_adapt_high_thresh, uint, 0644);
  162. MODULE_PARM_DESC(irq_adapt_high_thresh,
  163. "Threshold score for increasing IRQ moderation");
  164. static unsigned debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
  165. NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
  166. NETIF_MSG_IFUP | NETIF_MSG_RX_ERR |
  167. NETIF_MSG_TX_ERR | NETIF_MSG_HW);
  168. module_param(debug, uint, 0);
  169. MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
  170. /**************************************************************************
  171. *
  172. * Utility functions and prototypes
  173. *
  174. *************************************************************************/
  175. static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq);
  176. static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq);
  177. static void efx_remove_channel(struct efx_channel *channel);
  178. static void efx_remove_channels(struct efx_nic *efx);
  179. static const struct efx_channel_type efx_default_channel_type;
  180. static void efx_remove_port(struct efx_nic *efx);
  181. static void efx_init_napi_channel(struct efx_channel *channel);
  182. static void efx_fini_napi(struct efx_nic *efx);
  183. static void efx_fini_napi_channel(struct efx_channel *channel);
  184. static void efx_fini_struct(struct efx_nic *efx);
  185. static void efx_start_all(struct efx_nic *efx);
  186. static void efx_stop_all(struct efx_nic *efx);
  187. #define EFX_ASSERT_RESET_SERIALISED(efx) \
  188. do { \
  189. if ((efx->state == STATE_READY) || \
  190. (efx->state == STATE_RECOVERY) || \
  191. (efx->state == STATE_DISABLED)) \
  192. ASSERT_RTNL(); \
  193. } while (0)
  194. static int efx_check_disabled(struct efx_nic *efx)
  195. {
  196. if (efx->state == STATE_DISABLED || efx->state == STATE_RECOVERY) {
  197. netif_err(efx, drv, efx->net_dev,
  198. "device is disabled due to earlier errors\n");
  199. return -EIO;
  200. }
  201. return 0;
  202. }
  203. /**************************************************************************
  204. *
  205. * Event queue processing
  206. *
  207. *************************************************************************/
  208. /* Process channel's event queue
  209. *
  210. * This function is responsible for processing the event queue of a
  211. * single channel. The caller must guarantee that this function will
  212. * never be concurrently called more than once on the same channel,
  213. * though different channels may be being processed concurrently.
  214. */
  215. static int efx_process_channel(struct efx_channel *channel, int budget)
  216. {
  217. int spent;
  218. if (unlikely(!channel->enabled))
  219. return 0;
  220. spent = efx_nic_process_eventq(channel, budget);
  221. if (spent && efx_channel_has_rx_queue(channel)) {
  222. struct efx_rx_queue *rx_queue =
  223. efx_channel_get_rx_queue(channel);
  224. efx_rx_flush_packet(channel);
  225. if (rx_queue->enabled)
  226. efx_fast_push_rx_descriptors(rx_queue);
  227. }
  228. return spent;
  229. }
  230. /* Mark channel as finished processing
  231. *
  232. * Note that since we will not receive further interrupts for this
  233. * channel before we finish processing and call the eventq_read_ack()
  234. * method, there is no need to use the interrupt hold-off timers.
  235. */
  236. static inline void efx_channel_processed(struct efx_channel *channel)
  237. {
  238. /* The interrupt handler for this channel may set work_pending
  239. * as soon as we acknowledge the events we've seen. Make sure
  240. * it's cleared before then. */
  241. channel->work_pending = false;
  242. smp_wmb();
  243. efx_nic_eventq_read_ack(channel);
  244. }
  245. /* NAPI poll handler
  246. *
  247. * NAPI guarantees serialisation of polls of the same device, which
  248. * provides the guarantee required by efx_process_channel().
  249. */
  250. static int efx_poll(struct napi_struct *napi, int budget)
  251. {
  252. struct efx_channel *channel =
  253. container_of(napi, struct efx_channel, napi_str);
  254. struct efx_nic *efx = channel->efx;
  255. int spent;
  256. netif_vdbg(efx, intr, efx->net_dev,
  257. "channel %d NAPI poll executing on CPU %d\n",
  258. channel->channel, raw_smp_processor_id());
  259. spent = efx_process_channel(channel, budget);
  260. if (spent < budget) {
  261. if (efx_channel_has_rx_queue(channel) &&
  262. efx->irq_rx_adaptive &&
  263. unlikely(++channel->irq_count == 1000)) {
  264. if (unlikely(channel->irq_mod_score <
  265. irq_adapt_low_thresh)) {
  266. if (channel->irq_moderation > 1) {
  267. channel->irq_moderation -= 1;
  268. efx->type->push_irq_moderation(channel);
  269. }
  270. } else if (unlikely(channel->irq_mod_score >
  271. irq_adapt_high_thresh)) {
  272. if (channel->irq_moderation <
  273. efx->irq_rx_moderation) {
  274. channel->irq_moderation += 1;
  275. efx->type->push_irq_moderation(channel);
  276. }
  277. }
  278. channel->irq_count = 0;
  279. channel->irq_mod_score = 0;
  280. }
  281. efx_filter_rfs_expire(channel);
  282. /* There is no race here; although napi_disable() will
  283. * only wait for napi_complete(), this isn't a problem
  284. * since efx_channel_processed() will have no effect if
  285. * interrupts have already been disabled.
  286. */
  287. napi_complete(napi);
  288. efx_channel_processed(channel);
  289. }
  290. return spent;
  291. }
  292. /* Process the eventq of the specified channel immediately on this CPU
  293. *
  294. * Disable hardware generated interrupts, wait for any existing
  295. * processing to finish, then directly poll (and ack ) the eventq.
  296. * Finally reenable NAPI and interrupts.
  297. *
  298. * This is for use only during a loopback self-test. It must not
  299. * deliver any packets up the stack as this can result in deadlock.
  300. */
  301. void efx_process_channel_now(struct efx_channel *channel)
  302. {
  303. struct efx_nic *efx = channel->efx;
  304. BUG_ON(channel->channel >= efx->n_channels);
  305. BUG_ON(!channel->enabled);
  306. BUG_ON(!efx->loopback_selftest);
  307. /* Disable interrupts and wait for ISRs to complete */
  308. efx_nic_disable_interrupts(efx);
  309. if (efx->legacy_irq) {
  310. synchronize_irq(efx->legacy_irq);
  311. efx->legacy_irq_enabled = false;
  312. }
  313. if (channel->irq)
  314. synchronize_irq(channel->irq);
  315. /* Wait for any NAPI processing to complete */
  316. napi_disable(&channel->napi_str);
  317. /* Poll the channel */
  318. efx_process_channel(channel, channel->eventq_mask + 1);
  319. /* Ack the eventq. This may cause an interrupt to be generated
  320. * when they are reenabled */
  321. efx_channel_processed(channel);
  322. napi_enable(&channel->napi_str);
  323. if (efx->legacy_irq)
  324. efx->legacy_irq_enabled = true;
  325. efx_nic_enable_interrupts(efx);
  326. }
  327. /* Create event queue
  328. * Event queue memory allocations are done only once. If the channel
  329. * is reset, the memory buffer will be reused; this guards against
  330. * errors during channel reset and also simplifies interrupt handling.
  331. */
  332. static int efx_probe_eventq(struct efx_channel *channel)
  333. {
  334. struct efx_nic *efx = channel->efx;
  335. unsigned long entries;
  336. netif_dbg(efx, probe, efx->net_dev,
  337. "chan %d create event queue\n", channel->channel);
  338. /* Build an event queue with room for one event per tx and rx buffer,
  339. * plus some extra for link state events and MCDI completions. */
  340. entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128);
  341. EFX_BUG_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE);
  342. channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1;
  343. return efx_nic_probe_eventq(channel);
  344. }
  345. /* Prepare channel's event queue */
  346. static void efx_init_eventq(struct efx_channel *channel)
  347. {
  348. netif_dbg(channel->efx, drv, channel->efx->net_dev,
  349. "chan %d init event queue\n", channel->channel);
  350. channel->eventq_read_ptr = 0;
  351. efx_nic_init_eventq(channel);
  352. }
  353. /* Enable event queue processing and NAPI */
  354. static void efx_start_eventq(struct efx_channel *channel)
  355. {
  356. netif_dbg(channel->efx, ifup, channel->efx->net_dev,
  357. "chan %d start event queue\n", channel->channel);
  358. /* The interrupt handler for this channel may set work_pending
  359. * as soon as we enable it. Make sure it's cleared before
  360. * then. Similarly, make sure it sees the enabled flag set.
  361. */
  362. channel->work_pending = false;
  363. channel->enabled = true;
  364. smp_wmb();
  365. napi_enable(&channel->napi_str);
  366. efx_nic_eventq_read_ack(channel);
  367. }
  368. /* Disable event queue processing and NAPI */
  369. static void efx_stop_eventq(struct efx_channel *channel)
  370. {
  371. if (!channel->enabled)
  372. return;
  373. napi_disable(&channel->napi_str);
  374. channel->enabled = false;
  375. }
  376. static void efx_fini_eventq(struct efx_channel *channel)
  377. {
  378. netif_dbg(channel->efx, drv, channel->efx->net_dev,
  379. "chan %d fini event queue\n", channel->channel);
  380. efx_nic_fini_eventq(channel);
  381. }
  382. static void efx_remove_eventq(struct efx_channel *channel)
  383. {
  384. netif_dbg(channel->efx, drv, channel->efx->net_dev,
  385. "chan %d remove event queue\n", channel->channel);
  386. efx_nic_remove_eventq(channel);
  387. }
  388. /**************************************************************************
  389. *
  390. * Channel handling
  391. *
  392. *************************************************************************/
  393. /* Allocate and initialise a channel structure. */
  394. static struct efx_channel *
  395. efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
  396. {
  397. struct efx_channel *channel;
  398. struct efx_rx_queue *rx_queue;
  399. struct efx_tx_queue *tx_queue;
  400. int j;
  401. channel = kzalloc(sizeof(*channel), GFP_KERNEL);
  402. if (!channel)
  403. return NULL;
  404. channel->efx = efx;
  405. channel->channel = i;
  406. channel->type = &efx_default_channel_type;
  407. for (j = 0; j < EFX_TXQ_TYPES; j++) {
  408. tx_queue = &channel->tx_queue[j];
  409. tx_queue->efx = efx;
  410. tx_queue->queue = i * EFX_TXQ_TYPES + j;
  411. tx_queue->channel = channel;
  412. }
  413. rx_queue = &channel->rx_queue;
  414. rx_queue->efx = efx;
  415. setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill,
  416. (unsigned long)rx_queue);
  417. return channel;
  418. }
  419. /* Allocate and initialise a channel structure, copying parameters
  420. * (but not resources) from an old channel structure.
  421. */
  422. static struct efx_channel *
  423. efx_copy_channel(const struct efx_channel *old_channel)
  424. {
  425. struct efx_channel *channel;
  426. struct efx_rx_queue *rx_queue;
  427. struct efx_tx_queue *tx_queue;
  428. int j;
  429. channel = kmalloc(sizeof(*channel), GFP_KERNEL);
  430. if (!channel)
  431. return NULL;
  432. *channel = *old_channel;
  433. channel->napi_dev = NULL;
  434. memset(&channel->eventq, 0, sizeof(channel->eventq));
  435. for (j = 0; j < EFX_TXQ_TYPES; j++) {
  436. tx_queue = &channel->tx_queue[j];
  437. if (tx_queue->channel)
  438. tx_queue->channel = channel;
  439. tx_queue->buffer = NULL;
  440. memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
  441. }
  442. rx_queue = &channel->rx_queue;
  443. rx_queue->buffer = NULL;
  444. memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
  445. setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill,
  446. (unsigned long)rx_queue);
  447. return channel;
  448. }
  449. static int efx_probe_channel(struct efx_channel *channel)
  450. {
  451. struct efx_tx_queue *tx_queue;
  452. struct efx_rx_queue *rx_queue;
  453. int rc;
  454. netif_dbg(channel->efx, probe, channel->efx->net_dev,
  455. "creating channel %d\n", channel->channel);
  456. rc = channel->type->pre_probe(channel);
  457. if (rc)
  458. goto fail;
  459. rc = efx_probe_eventq(channel);
  460. if (rc)
  461. goto fail;
  462. efx_for_each_channel_tx_queue(tx_queue, channel) {
  463. rc = efx_probe_tx_queue(tx_queue);
  464. if (rc)
  465. goto fail;
  466. }
  467. efx_for_each_channel_rx_queue(rx_queue, channel) {
  468. rc = efx_probe_rx_queue(rx_queue);
  469. if (rc)
  470. goto fail;
  471. }
  472. channel->n_rx_frm_trunc = 0;
  473. return 0;
  474. fail:
  475. efx_remove_channel(channel);
  476. return rc;
  477. }
  478. static void
  479. efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len)
  480. {
  481. struct efx_nic *efx = channel->efx;
  482. const char *type;
  483. int number;
  484. number = channel->channel;
  485. if (efx->tx_channel_offset == 0) {
  486. type = "";
  487. } else if (channel->channel < efx->tx_channel_offset) {
  488. type = "-rx";
  489. } else {
  490. type = "-tx";
  491. number -= efx->tx_channel_offset;
  492. }
  493. snprintf(buf, len, "%s%s-%d", efx->name, type, number);
  494. }
  495. static void efx_set_channel_names(struct efx_nic *efx)
  496. {
  497. struct efx_channel *channel;
  498. efx_for_each_channel(channel, efx)
  499. channel->type->get_name(channel,
  500. efx->channel_name[channel->channel],
  501. sizeof(efx->channel_name[0]));
  502. }
  503. static int efx_probe_channels(struct efx_nic *efx)
  504. {
  505. struct efx_channel *channel;
  506. int rc;
  507. /* Restart special buffer allocation */
  508. efx->next_buffer_table = 0;
  509. /* Probe channels in reverse, so that any 'extra' channels
  510. * use the start of the buffer table. This allows the traffic
  511. * channels to be resized without moving them or wasting the
  512. * entries before them.
  513. */
  514. efx_for_each_channel_rev(channel, efx) {
  515. rc = efx_probe_channel(channel);
  516. if (rc) {
  517. netif_err(efx, probe, efx->net_dev,
  518. "failed to create channel %d\n",
  519. channel->channel);
  520. goto fail;
  521. }
  522. }
  523. efx_set_channel_names(efx);
  524. return 0;
  525. fail:
  526. efx_remove_channels(efx);
  527. return rc;
  528. }
  529. /* Channels are shutdown and reinitialised whilst the NIC is running
  530. * to propagate configuration changes (mtu, checksum offload), or
  531. * to clear hardware error conditions
  532. */
  533. static void efx_start_datapath(struct efx_nic *efx)
  534. {
  535. bool old_rx_scatter = efx->rx_scatter;
  536. struct efx_tx_queue *tx_queue;
  537. struct efx_rx_queue *rx_queue;
  538. struct efx_channel *channel;
  539. size_t rx_buf_len;
  540. /* Calculate the rx buffer allocation parameters required to
  541. * support the current MTU, including padding for header
  542. * alignment and overruns.
  543. */
  544. efx->rx_dma_len = (efx->type->rx_buffer_hash_size +
  545. EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
  546. efx->type->rx_buffer_padding);
  547. rx_buf_len = (sizeof(struct efx_rx_page_state) +
  548. EFX_PAGE_IP_ALIGN + efx->rx_dma_len);
  549. if (rx_buf_len <= PAGE_SIZE) {
  550. efx->rx_scatter = false;
  551. efx->rx_buffer_order = 0;
  552. if (rx_buf_len <= PAGE_SIZE / 2)
  553. efx->rx_buffer_truesize = PAGE_SIZE / 2;
  554. else
  555. efx->rx_buffer_truesize = PAGE_SIZE;
  556. } else if (efx->type->can_rx_scatter) {
  557. BUILD_BUG_ON(sizeof(struct efx_rx_page_state) +
  558. EFX_PAGE_IP_ALIGN + EFX_RX_USR_BUF_SIZE >
  559. PAGE_SIZE / 2);
  560. efx->rx_scatter = true;
  561. efx->rx_dma_len = EFX_RX_USR_BUF_SIZE;
  562. efx->rx_buffer_order = 0;
  563. efx->rx_buffer_truesize = PAGE_SIZE / 2;
  564. } else {
  565. efx->rx_scatter = false;
  566. efx->rx_buffer_order = get_order(rx_buf_len);
  567. efx->rx_buffer_truesize = PAGE_SIZE << efx->rx_buffer_order;
  568. }
  569. /* RX filters also have scatter-enabled flags */
  570. if (efx->rx_scatter != old_rx_scatter)
  571. efx_filter_update_rx_scatter(efx);
  572. /* We must keep at least one descriptor in a TX ring empty.
  573. * We could avoid this when the queue size does not exactly
  574. * match the hardware ring size, but it's not that important.
  575. * Therefore we stop the queue when one more skb might fill
  576. * the ring completely. We wake it when half way back to
  577. * empty.
  578. */
  579. efx->txq_stop_thresh = efx->txq_entries - efx_tx_max_skb_descs(efx);
  580. efx->txq_wake_thresh = efx->txq_stop_thresh / 2;
  581. /* Initialise the channels */
  582. efx_for_each_channel(channel, efx) {
  583. efx_for_each_channel_tx_queue(tx_queue, channel)
  584. efx_init_tx_queue(tx_queue);
  585. efx_for_each_channel_rx_queue(rx_queue, channel) {
  586. efx_init_rx_queue(rx_queue);
  587. efx_nic_generate_fill_event(rx_queue);
  588. }
  589. WARN_ON(channel->rx_pkt_n_frags);
  590. }
  591. if (netif_device_present(efx->net_dev))
  592. netif_tx_wake_all_queues(efx->net_dev);
  593. }
  594. static void efx_stop_datapath(struct efx_nic *efx)
  595. {
  596. struct efx_channel *channel;
  597. struct efx_tx_queue *tx_queue;
  598. struct efx_rx_queue *rx_queue;
  599. struct pci_dev *dev = efx->pci_dev;
  600. int rc;
  601. EFX_ASSERT_RESET_SERIALISED(efx);
  602. BUG_ON(efx->port_enabled);
  603. /* Only perform flush if dma is enabled */
  604. if (dev->is_busmaster && efx->state != STATE_RECOVERY) {
  605. rc = efx_nic_flush_queues(efx);
  606. if (rc && EFX_WORKAROUND_7803(efx)) {
  607. /* Schedule a reset to recover from the flush failure. The
  608. * descriptor caches reference memory we're about to free,
  609. * but falcon_reconfigure_mac_wrapper() won't reconnect
  610. * the MACs because of the pending reset. */
  611. netif_err(efx, drv, efx->net_dev,
  612. "Resetting to recover from flush failure\n");
  613. efx_schedule_reset(efx, RESET_TYPE_ALL);
  614. } else if (rc) {
  615. netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
  616. } else {
  617. netif_dbg(efx, drv, efx->net_dev,
  618. "successfully flushed all queues\n");
  619. }
  620. }
  621. efx_for_each_channel(channel, efx) {
  622. /* RX packet processing is pipelined, so wait for the
  623. * NAPI handler to complete. At least event queue 0
  624. * might be kept active by non-data events, so don't
  625. * use napi_synchronize() but actually disable NAPI
  626. * temporarily.
  627. */
  628. if (efx_channel_has_rx_queue(channel)) {
  629. efx_stop_eventq(channel);
  630. efx_start_eventq(channel);
  631. }
  632. efx_for_each_channel_rx_queue(rx_queue, channel)
  633. efx_fini_rx_queue(rx_queue);
  634. efx_for_each_possible_channel_tx_queue(tx_queue, channel)
  635. efx_fini_tx_queue(tx_queue);
  636. }
  637. }
  638. static void efx_remove_channel(struct efx_channel *channel)
  639. {
  640. struct efx_tx_queue *tx_queue;
  641. struct efx_rx_queue *rx_queue;
  642. netif_dbg(channel->efx, drv, channel->efx->net_dev,
  643. "destroy chan %d\n", channel->channel);
  644. efx_for_each_channel_rx_queue(rx_queue, channel)
  645. efx_remove_rx_queue(rx_queue);
  646. efx_for_each_possible_channel_tx_queue(tx_queue, channel)
  647. efx_remove_tx_queue(tx_queue);
  648. efx_remove_eventq(channel);
  649. channel->type->post_remove(channel);
  650. }
  651. static void efx_remove_channels(struct efx_nic *efx)
  652. {
  653. struct efx_channel *channel;
  654. efx_for_each_channel(channel, efx)
  655. efx_remove_channel(channel);
  656. }
  657. int
  658. efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
  659. {
  660. struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
  661. u32 old_rxq_entries, old_txq_entries;
  662. unsigned i, next_buffer_table = 0;
  663. int rc;
  664. rc = efx_check_disabled(efx);
  665. if (rc)
  666. return rc;
  667. /* Not all channels should be reallocated. We must avoid
  668. * reallocating their buffer table entries.
  669. */
  670. efx_for_each_channel(channel, efx) {
  671. struct efx_rx_queue *rx_queue;
  672. struct efx_tx_queue *tx_queue;
  673. if (channel->type->copy)
  674. continue;
  675. next_buffer_table = max(next_buffer_table,
  676. channel->eventq.index +
  677. channel->eventq.entries);
  678. efx_for_each_channel_rx_queue(rx_queue, channel)
  679. next_buffer_table = max(next_buffer_table,
  680. rx_queue->rxd.index +
  681. rx_queue->rxd.entries);
  682. efx_for_each_channel_tx_queue(tx_queue, channel)
  683. next_buffer_table = max(next_buffer_table,
  684. tx_queue->txd.index +
  685. tx_queue->txd.entries);
  686. }
  687. efx_device_detach_sync(efx);
  688. efx_stop_all(efx);
  689. efx_stop_interrupts(efx, true);
  690. /* Clone channels (where possible) */
  691. memset(other_channel, 0, sizeof(other_channel));
  692. for (i = 0; i < efx->n_channels; i++) {
  693. channel = efx->channel[i];
  694. if (channel->type->copy)
  695. channel = channel->type->copy(channel);
  696. if (!channel) {
  697. rc = -ENOMEM;
  698. goto out;
  699. }
  700. other_channel[i] = channel;
  701. }
  702. /* Swap entry counts and channel pointers */
  703. old_rxq_entries = efx->rxq_entries;
  704. old_txq_entries = efx->txq_entries;
  705. efx->rxq_entries = rxq_entries;
  706. efx->txq_entries = txq_entries;
  707. for (i = 0; i < efx->n_channels; i++) {
  708. channel = efx->channel[i];
  709. efx->channel[i] = other_channel[i];
  710. other_channel[i] = channel;
  711. }
  712. /* Restart buffer table allocation */
  713. efx->next_buffer_table = next_buffer_table;
  714. for (i = 0; i < efx->n_channels; i++) {
  715. channel = efx->channel[i];
  716. if (!channel->type->copy)
  717. continue;
  718. rc = efx_probe_channel(channel);
  719. if (rc)
  720. goto rollback;
  721. efx_init_napi_channel(efx->channel[i]);
  722. }
  723. out:
  724. /* Destroy unused channel structures */
  725. for (i = 0; i < efx->n_channels; i++) {
  726. channel = other_channel[i];
  727. if (channel && channel->type->copy) {
  728. efx_fini_napi_channel(channel);
  729. efx_remove_channel(channel);
  730. kfree(channel);
  731. }
  732. }
  733. efx_start_interrupts(efx, true);
  734. efx_start_all(efx);
  735. netif_device_attach(efx->net_dev);
  736. return rc;
  737. rollback:
  738. /* Swap back */
  739. efx->rxq_entries = old_rxq_entries;
  740. efx->txq_entries = old_txq_entries;
  741. for (i = 0; i < efx->n_channels; i++) {
  742. channel = efx->channel[i];
  743. efx->channel[i] = other_channel[i];
  744. other_channel[i] = channel;
  745. }
  746. goto out;
  747. }
  748. void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
  749. {
  750. mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100));
  751. }
  752. static const struct efx_channel_type efx_default_channel_type = {
  753. .pre_probe = efx_channel_dummy_op_int,
  754. .post_remove = efx_channel_dummy_op_void,
  755. .get_name = efx_get_channel_name,
  756. .copy = efx_copy_channel,
  757. .keep_eventq = false,
  758. };
  759. int efx_channel_dummy_op_int(struct efx_channel *channel)
  760. {
  761. return 0;
  762. }
  763. void efx_channel_dummy_op_void(struct efx_channel *channel)
  764. {
  765. }
  766. /**************************************************************************
  767. *
  768. * Port handling
  769. *
  770. **************************************************************************/
  771. /* This ensures that the kernel is kept informed (via
  772. * netif_carrier_on/off) of the link status, and also maintains the
  773. * link status's stop on the port's TX queue.
  774. */
  775. void efx_link_status_changed(struct efx_nic *efx)
  776. {
  777. struct efx_link_state *link_state = &efx->link_state;
  778. /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
  779. * that no events are triggered between unregister_netdev() and the
  780. * driver unloading. A more general condition is that NETDEV_CHANGE
  781. * can only be generated between NETDEV_UP and NETDEV_DOWN */
  782. if (!netif_running(efx->net_dev))
  783. return;
  784. if (link_state->up != netif_carrier_ok(efx->net_dev)) {
  785. efx->n_link_state_changes++;
  786. if (link_state->up)
  787. netif_carrier_on(efx->net_dev);
  788. else
  789. netif_carrier_off(efx->net_dev);
  790. }
  791. /* Status message for kernel log */
  792. if (link_state->up)
  793. netif_info(efx, link, efx->net_dev,
  794. "link up at %uMbps %s-duplex (MTU %d)%s\n",
  795. link_state->speed, link_state->fd ? "full" : "half",
  796. efx->net_dev->mtu,
  797. (efx->promiscuous ? " [PROMISC]" : ""));
  798. else
  799. netif_info(efx, link, efx->net_dev, "link down\n");
  800. }
  801. void efx_link_set_advertising(struct efx_nic *efx, u32 advertising)
  802. {
  803. efx->link_advertising = advertising;
  804. if (advertising) {
  805. if (advertising & ADVERTISED_Pause)
  806. efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX);
  807. else
  808. efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
  809. if (advertising & ADVERTISED_Asym_Pause)
  810. efx->wanted_fc ^= EFX_FC_TX;
  811. }
  812. }
  813. void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc)
  814. {
  815. efx->wanted_fc = wanted_fc;
  816. if (efx->link_advertising) {
  817. if (wanted_fc & EFX_FC_RX)
  818. efx->link_advertising |= (ADVERTISED_Pause |
  819. ADVERTISED_Asym_Pause);
  820. else
  821. efx->link_advertising &= ~(ADVERTISED_Pause |
  822. ADVERTISED_Asym_Pause);
  823. if (wanted_fc & EFX_FC_TX)
  824. efx->link_advertising ^= ADVERTISED_Asym_Pause;
  825. }
  826. }
  827. static void efx_fini_port(struct efx_nic *efx);
  828. /* Push loopback/power/transmit disable settings to the PHY, and reconfigure
  829. * the MAC appropriately. All other PHY configuration changes are pushed
  830. * through phy_op->set_settings(), and pushed asynchronously to the MAC
  831. * through efx_monitor().
  832. *
  833. * Callers must hold the mac_lock
  834. */
  835. int __efx_reconfigure_port(struct efx_nic *efx)
  836. {
  837. enum efx_phy_mode phy_mode;
  838. int rc;
  839. WARN_ON(!mutex_is_locked(&efx->mac_lock));
  840. /* Serialise the promiscuous flag with efx_set_rx_mode. */
  841. netif_addr_lock_bh(efx->net_dev);
  842. netif_addr_unlock_bh(efx->net_dev);
  843. /* Disable PHY transmit in mac level loopbacks */
  844. phy_mode = efx->phy_mode;
  845. if (LOOPBACK_INTERNAL(efx))
  846. efx->phy_mode |= PHY_MODE_TX_DISABLED;
  847. else
  848. efx->phy_mode &= ~PHY_MODE_TX_DISABLED;
  849. rc = efx->type->reconfigure_port(efx);
  850. if (rc)
  851. efx->phy_mode = phy_mode;
  852. return rc;
  853. }
  854. /* Reinitialise the MAC to pick up new PHY settings, even if the port is
  855. * disabled. */
  856. int efx_reconfigure_port(struct efx_nic *efx)
  857. {
  858. int rc;
  859. EFX_ASSERT_RESET_SERIALISED(efx);
  860. mutex_lock(&efx->mac_lock);
  861. rc = __efx_reconfigure_port(efx);
  862. mutex_unlock(&efx->mac_lock);
  863. return rc;
  864. }
  865. /* Asynchronous work item for changing MAC promiscuity and multicast
  866. * hash. Avoid a drain/rx_ingress enable by reconfiguring the current
  867. * MAC directly. */
  868. static void efx_mac_work(struct work_struct *data)
  869. {
  870. struct efx_nic *efx = container_of(data, struct efx_nic, mac_work);
  871. mutex_lock(&efx->mac_lock);
  872. if (efx->port_enabled)
  873. efx->type->reconfigure_mac(efx);
  874. mutex_unlock(&efx->mac_lock);
  875. }
  876. static int efx_probe_port(struct efx_nic *efx)
  877. {
  878. int rc;
  879. netif_dbg(efx, probe, efx->net_dev, "create port\n");
  880. if (phy_flash_cfg)
  881. efx->phy_mode = PHY_MODE_SPECIAL;
  882. /* Connect up MAC/PHY operations table */
  883. rc = efx->type->probe_port(efx);
  884. if (rc)
  885. return rc;
  886. /* Initialise MAC address to permanent address */
  887. memcpy(efx->net_dev->dev_addr, efx->net_dev->perm_addr, ETH_ALEN);
  888. return 0;
  889. }
  890. static int efx_init_port(struct efx_nic *efx)
  891. {
  892. int rc;
  893. netif_dbg(efx, drv, efx->net_dev, "init port\n");
  894. mutex_lock(&efx->mac_lock);
  895. rc = efx->phy_op->init(efx);
  896. if (rc)
  897. goto fail1;
  898. efx->port_initialized = true;
  899. /* Reconfigure the MAC before creating dma queues (required for
  900. * Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */
  901. efx->type->reconfigure_mac(efx);
  902. /* Ensure the PHY advertises the correct flow control settings */
  903. rc = efx->phy_op->reconfigure(efx);
  904. if (rc)
  905. goto fail2;
  906. mutex_unlock(&efx->mac_lock);
  907. return 0;
  908. fail2:
  909. efx->phy_op->fini(efx);
  910. fail1:
  911. mutex_unlock(&efx->mac_lock);
  912. return rc;
  913. }
  914. static void efx_start_port(struct efx_nic *efx)
  915. {
  916. netif_dbg(efx, ifup, efx->net_dev, "start port\n");
  917. BUG_ON(efx->port_enabled);
  918. mutex_lock(&efx->mac_lock);
  919. efx->port_enabled = true;
  920. /* efx_mac_work() might have been scheduled after efx_stop_port(),
  921. * and then cancelled by efx_flush_all() */
  922. efx->type->reconfigure_mac(efx);
  923. mutex_unlock(&efx->mac_lock);
  924. }
  925. /* Prevent efx_mac_work() and efx_monitor() from working */
  926. static void efx_stop_port(struct efx_nic *efx)
  927. {
  928. netif_dbg(efx, ifdown, efx->net_dev, "stop port\n");
  929. mutex_lock(&efx->mac_lock);
  930. efx->port_enabled = false;
  931. mutex_unlock(&efx->mac_lock);
  932. /* Serialise against efx_set_multicast_list() */
  933. netif_addr_lock_bh(efx->net_dev);
  934. netif_addr_unlock_bh(efx->net_dev);
  935. }
  936. static void efx_fini_port(struct efx_nic *efx)
  937. {
  938. netif_dbg(efx, drv, efx->net_dev, "shut down port\n");
  939. if (!efx->port_initialized)
  940. return;
  941. efx->phy_op->fini(efx);
  942. efx->port_initialized = false;
  943. efx->link_state.up = false;
  944. efx_link_status_changed(efx);
  945. }
  946. static void efx_remove_port(struct efx_nic *efx)
  947. {
  948. netif_dbg(efx, drv, efx->net_dev, "destroying port\n");
  949. efx->type->remove_port(efx);
  950. }
  951. /**************************************************************************
  952. *
  953. * NIC handling
  954. *
  955. **************************************************************************/
  956. /* This configures the PCI device to enable I/O and DMA. */
  957. static int efx_init_io(struct efx_nic *efx)
  958. {
  959. struct pci_dev *pci_dev = efx->pci_dev;
  960. dma_addr_t dma_mask = efx->type->max_dma_mask;
  961. int rc;
  962. netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
  963. rc = pci_enable_device(pci_dev);
  964. if (rc) {
  965. netif_err(efx, probe, efx->net_dev,
  966. "failed to enable PCI device\n");
  967. goto fail1;
  968. }
  969. pci_set_master(pci_dev);
  970. /* Set the PCI DMA mask. Try all possibilities from our
  971. * genuine mask down to 32 bits, because some architectures
  972. * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit
  973. * masks event though they reject 46 bit masks.
  974. */
  975. while (dma_mask > 0x7fffffffUL) {
  976. if (dma_supported(&pci_dev->dev, dma_mask)) {
  977. rc = dma_set_mask(&pci_dev->dev, dma_mask);
  978. if (rc == 0)
  979. break;
  980. }
  981. dma_mask >>= 1;
  982. }
  983. if (rc) {
  984. netif_err(efx, probe, efx->net_dev,
  985. "could not find a suitable DMA mask\n");
  986. goto fail2;
  987. }
  988. netif_dbg(efx, probe, efx->net_dev,
  989. "using DMA mask %llx\n", (unsigned long long) dma_mask);
  990. rc = dma_set_coherent_mask(&pci_dev->dev, dma_mask);
  991. if (rc) {
  992. /* dma_set_coherent_mask() is not *allowed* to
  993. * fail with a mask that dma_set_mask() accepted,
  994. * but just in case...
  995. */
  996. netif_err(efx, probe, efx->net_dev,
  997. "failed to set consistent DMA mask\n");
  998. goto fail2;
  999. }
  1000. efx->membase_phys = pci_resource_start(efx->pci_dev, EFX_MEM_BAR);
  1001. rc = pci_request_region(pci_dev, EFX_MEM_BAR, "sfc");
  1002. if (rc) {
  1003. netif_err(efx, probe, efx->net_dev,
  1004. "request for memory BAR failed\n");
  1005. rc = -EIO;
  1006. goto fail3;
  1007. }
  1008. efx->membase = ioremap_nocache(efx->membase_phys,
  1009. efx->type->mem_map_size);
  1010. if (!efx->membase) {
  1011. netif_err(efx, probe, efx->net_dev,
  1012. "could not map memory BAR at %llx+%x\n",
  1013. (unsigned long long)efx->membase_phys,
  1014. efx->type->mem_map_size);
  1015. rc = -ENOMEM;
  1016. goto fail4;
  1017. }
  1018. netif_dbg(efx, probe, efx->net_dev,
  1019. "memory BAR at %llx+%x (virtual %p)\n",
  1020. (unsigned long long)efx->membase_phys,
  1021. efx->type->mem_map_size, efx->membase);
  1022. return 0;
  1023. fail4:
  1024. pci_release_region(efx->pci_dev, EFX_MEM_BAR);
  1025. fail3:
  1026. efx->membase_phys = 0;
  1027. fail2:
  1028. pci_disable_device(efx->pci_dev);
  1029. fail1:
  1030. return rc;
  1031. }
  1032. static void efx_fini_io(struct efx_nic *efx)
  1033. {
  1034. netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n");
  1035. if (efx->membase) {
  1036. iounmap(efx->membase);
  1037. efx->membase = NULL;
  1038. }
  1039. if (efx->membase_phys) {
  1040. pci_release_region(efx->pci_dev, EFX_MEM_BAR);
  1041. efx->membase_phys = 0;
  1042. }
  1043. pci_disable_device(efx->pci_dev);
  1044. }
  1045. static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
  1046. {
  1047. cpumask_var_t thread_mask;
  1048. unsigned int count;
  1049. int cpu;
  1050. if (rss_cpus) {
  1051. count = rss_cpus;
  1052. } else {
  1053. if (unlikely(!zalloc_cpumask_var(&thread_mask, GFP_KERNEL))) {
  1054. netif_warn(efx, probe, efx->net_dev,
  1055. "RSS disabled due to allocation failure\n");
  1056. return 1;
  1057. }
  1058. count = 0;
  1059. for_each_online_cpu(cpu) {
  1060. if (!cpumask_test_cpu(cpu, thread_mask)) {
  1061. ++count;
  1062. cpumask_or(thread_mask, thread_mask,
  1063. topology_thread_cpumask(cpu));
  1064. }
  1065. }
  1066. free_cpumask_var(thread_mask);
  1067. }
  1068. /* If RSS is requested for the PF *and* VFs then we can't write RSS
  1069. * table entries that are inaccessible to VFs
  1070. */
  1071. if (efx_sriov_wanted(efx) && efx_vf_size(efx) > 1 &&
  1072. count > efx_vf_size(efx)) {
  1073. netif_warn(efx, probe, efx->net_dev,
  1074. "Reducing number of RSS channels from %u to %u for "
  1075. "VF support. Increase vf-msix-limit to use more "
  1076. "channels on the PF.\n",
  1077. count, efx_vf_size(efx));
  1078. count = efx_vf_size(efx);
  1079. }
  1080. return count;
  1081. }
  1082. static int
  1083. efx_init_rx_cpu_rmap(struct efx_nic *efx, struct msix_entry *xentries)
  1084. {
  1085. #ifdef CONFIG_RFS_ACCEL
  1086. unsigned int i;
  1087. int rc;
  1088. efx->net_dev->rx_cpu_rmap = alloc_irq_cpu_rmap(efx->n_rx_channels);
  1089. if (!efx->net_dev->rx_cpu_rmap)
  1090. return -ENOMEM;
  1091. for (i = 0; i < efx->n_rx_channels; i++) {
  1092. rc = irq_cpu_rmap_add(efx->net_dev->rx_cpu_rmap,
  1093. xentries[i].vector);
  1094. if (rc) {
  1095. free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
  1096. efx->net_dev->rx_cpu_rmap = NULL;
  1097. return rc;
  1098. }
  1099. }
  1100. #endif
  1101. return 0;
  1102. }
  1103. /* Probe the number and type of interrupts we are able to obtain, and
  1104. * the resulting numbers of channels and RX queues.
  1105. */
  1106. static int efx_probe_interrupts(struct efx_nic *efx)
  1107. {
  1108. unsigned int max_channels =
  1109. min(efx->type->phys_addr_channels, EFX_MAX_CHANNELS);
  1110. unsigned int extra_channels = 0;
  1111. unsigned int i, j;
  1112. int rc;
  1113. for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++)
  1114. if (efx->extra_channel_type[i])
  1115. ++extra_channels;
  1116. if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
  1117. struct msix_entry xentries[EFX_MAX_CHANNELS];
  1118. unsigned int n_channels;
  1119. n_channels = efx_wanted_parallelism(efx);
  1120. if (separate_tx_channels)
  1121. n_channels *= 2;
  1122. n_channels += extra_channels;
  1123. n_channels = min(n_channels, max_channels);
  1124. for (i = 0; i < n_channels; i++)
  1125. xentries[i].entry = i;
  1126. rc = pci_enable_msix(efx->pci_dev, xentries, n_channels);
  1127. if (rc > 0) {
  1128. netif_err(efx, drv, efx->net_dev,
  1129. "WARNING: Insufficient MSI-X vectors"
  1130. " available (%d < %u).\n", rc, n_channels);
  1131. netif_err(efx, drv, efx->net_dev,
  1132. "WARNING: Performance may be reduced.\n");
  1133. EFX_BUG_ON_PARANOID(rc >= n_channels);
  1134. n_channels = rc;
  1135. rc = pci_enable_msix(efx->pci_dev, xentries,
  1136. n_channels);
  1137. }
  1138. if (rc == 0) {
  1139. efx->n_channels = n_channels;
  1140. if (n_channels > extra_channels)
  1141. n_channels -= extra_channels;
  1142. if (separate_tx_channels) {
  1143. efx->n_tx_channels = max(n_channels / 2, 1U);
  1144. efx->n_rx_channels = max(n_channels -
  1145. efx->n_tx_channels,
  1146. 1U);
  1147. } else {
  1148. efx->n_tx_channels = n_channels;
  1149. efx->n_rx_channels = n_channels;
  1150. }
  1151. rc = efx_init_rx_cpu_rmap(efx, xentries);
  1152. if (rc) {
  1153. pci_disable_msix(efx->pci_dev);
  1154. return rc;
  1155. }
  1156. for (i = 0; i < efx->n_channels; i++)
  1157. efx_get_channel(efx, i)->irq =
  1158. xentries[i].vector;
  1159. } else {
  1160. /* Fall back to single channel MSI */
  1161. efx->interrupt_mode = EFX_INT_MODE_MSI;
  1162. netif_err(efx, drv, efx->net_dev,
  1163. "could not enable MSI-X\n");
  1164. }
  1165. }
  1166. /* Try single interrupt MSI */
  1167. if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
  1168. efx->n_channels = 1;
  1169. efx->n_rx_channels = 1;
  1170. efx->n_tx_channels = 1;
  1171. rc = pci_enable_msi(efx->pci_dev);
  1172. if (rc == 0) {
  1173. efx_get_channel(efx, 0)->irq = efx->pci_dev->irq;
  1174. } else {
  1175. netif_err(efx, drv, efx->net_dev,
  1176. "could not enable MSI\n");
  1177. efx->interrupt_mode = EFX_INT_MODE_LEGACY;
  1178. }
  1179. }
  1180. /* Assume legacy interrupts */
  1181. if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
  1182. efx->n_channels = 1 + (separate_tx_channels ? 1 : 0);
  1183. efx->n_rx_channels = 1;
  1184. efx->n_tx_channels = 1;
  1185. efx->legacy_irq = efx->pci_dev->irq;
  1186. }
  1187. /* Assign extra channels if possible */
  1188. j = efx->n_channels;
  1189. for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) {
  1190. if (!efx->extra_channel_type[i])
  1191. continue;
  1192. if (efx->interrupt_mode != EFX_INT_MODE_MSIX ||
  1193. efx->n_channels <= extra_channels) {
  1194. efx->extra_channel_type[i]->handle_no_channel(efx);
  1195. } else {
  1196. --j;
  1197. efx_get_channel(efx, j)->type =
  1198. efx->extra_channel_type[i];
  1199. }
  1200. }
  1201. /* RSS might be usable on VFs even if it is disabled on the PF */
  1202. efx->rss_spread = ((efx->n_rx_channels > 1 || !efx_sriov_wanted(efx)) ?
  1203. efx->n_rx_channels : efx_vf_size(efx));
  1204. return 0;
  1205. }
  1206. /* Enable interrupts, then probe and start the event queues */
  1207. static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq)
  1208. {
  1209. struct efx_channel *channel;
  1210. BUG_ON(efx->state == STATE_DISABLED);
  1211. if (efx->legacy_irq)
  1212. efx->legacy_irq_enabled = true;
  1213. efx_nic_enable_interrupts(efx);
  1214. efx_for_each_channel(channel, efx) {
  1215. if (!channel->type->keep_eventq || !may_keep_eventq)
  1216. efx_init_eventq(channel);
  1217. efx_start_eventq(channel);
  1218. }
  1219. efx_mcdi_mode_event(efx);
  1220. }
  1221. static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq)
  1222. {
  1223. struct efx_channel *channel;
  1224. if (efx->state == STATE_DISABLED)
  1225. return;
  1226. efx_mcdi_mode_poll(efx);
  1227. efx_nic_disable_interrupts(efx);
  1228. if (efx->legacy_irq) {
  1229. synchronize_irq(efx->legacy_irq);
  1230. efx->legacy_irq_enabled = false;
  1231. }
  1232. efx_for_each_channel(channel, efx) {
  1233. if (channel->irq)
  1234. synchronize_irq(channel->irq);
  1235. efx_stop_eventq(channel);
  1236. if (!channel->type->keep_eventq || !may_keep_eventq)
  1237. efx_fini_eventq(channel);
  1238. }
  1239. }
  1240. static void efx_remove_interrupts(struct efx_nic *efx)
  1241. {
  1242. struct efx_channel *channel;
  1243. /* Remove MSI/MSI-X interrupts */
  1244. efx_for_each_channel(channel, efx)
  1245. channel->irq = 0;
  1246. pci_disable_msi(efx->pci_dev);
  1247. pci_disable_msix(efx->pci_dev);
  1248. /* Remove legacy interrupt */
  1249. efx->legacy_irq = 0;
  1250. }
  1251. static void efx_set_channels(struct efx_nic *efx)
  1252. {
  1253. struct efx_channel *channel;
  1254. struct efx_tx_queue *tx_queue;
  1255. efx->tx_channel_offset =
  1256. separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
  1257. /* We need to mark which channels really have RX and TX
  1258. * queues, and adjust the TX queue numbers if we have separate
  1259. * RX-only and TX-only channels.
  1260. */
  1261. efx_for_each_channel(channel, efx) {
  1262. if (channel->channel < efx->n_rx_channels)
  1263. channel->rx_queue.core_index = channel->channel;
  1264. else
  1265. channel->rx_queue.core_index = -1;
  1266. efx_for_each_channel_tx_queue(tx_queue, channel)
  1267. tx_queue->queue -= (efx->tx_channel_offset *
  1268. EFX_TXQ_TYPES);
  1269. }
  1270. }
  1271. static int efx_probe_nic(struct efx_nic *efx)
  1272. {
  1273. size_t i;
  1274. int rc;
  1275. netif_dbg(efx, probe, efx->net_dev, "creating NIC\n");
  1276. /* Carry out hardware-type specific initialisation */
  1277. rc = efx->type->probe(efx);
  1278. if (rc)
  1279. return rc;
  1280. /* Determine the number of channels and queues by trying to hook
  1281. * in MSI-X interrupts. */
  1282. rc = efx_probe_interrupts(efx);
  1283. if (rc)
  1284. goto fail;
  1285. efx->type->dimension_resources(efx);
  1286. if (efx->n_channels > 1)
  1287. get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key));
  1288. for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
  1289. efx->rx_indir_table[i] =
  1290. ethtool_rxfh_indir_default(i, efx->rss_spread);
  1291. efx_set_channels(efx);
  1292. netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
  1293. netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
  1294. /* Initialise the interrupt moderation settings */
  1295. efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true,
  1296. true);
  1297. return 0;
  1298. fail:
  1299. efx->type->remove(efx);
  1300. return rc;
  1301. }
  1302. static void efx_remove_nic(struct efx_nic *efx)
  1303. {
  1304. netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n");
  1305. efx_remove_interrupts(efx);
  1306. efx->type->remove(efx);
  1307. }
  1308. /**************************************************************************
  1309. *
  1310. * NIC startup/shutdown
  1311. *
  1312. *************************************************************************/
  1313. static int efx_probe_all(struct efx_nic *efx)
  1314. {
  1315. int rc;
  1316. rc = efx_probe_nic(efx);
  1317. if (rc) {
  1318. netif_err(efx, probe, efx->net_dev, "failed to create NIC\n");
  1319. goto fail1;
  1320. }
  1321. rc = efx_probe_port(efx);
  1322. if (rc) {
  1323. netif_err(efx, probe, efx->net_dev, "failed to create port\n");
  1324. goto fail2;
  1325. }
  1326. BUILD_BUG_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_RXQ_MIN_ENT);
  1327. if (WARN_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_TXQ_MIN_ENT(efx))) {
  1328. rc = -EINVAL;
  1329. goto fail3;
  1330. }
  1331. efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE;
  1332. rc = efx_probe_filters(efx);
  1333. if (rc) {
  1334. netif_err(efx, probe, efx->net_dev,
  1335. "failed to create filter tables\n");
  1336. goto fail3;
  1337. }
  1338. rc = efx_probe_channels(efx);
  1339. if (rc)
  1340. goto fail4;
  1341. return 0;
  1342. fail4:
  1343. efx_remove_filters(efx);
  1344. fail3:
  1345. efx_remove_port(efx);
  1346. fail2:
  1347. efx_remove_nic(efx);
  1348. fail1:
  1349. return rc;
  1350. }
  1351. /* If the interface is supposed to be running but is not, start
  1352. * the hardware and software data path, regular activity for the port
  1353. * (MAC statistics, link polling, etc.) and schedule the port to be
  1354. * reconfigured. Interrupts must already be enabled. This function
  1355. * is safe to call multiple times, so long as the NIC is not disabled.
  1356. * Requires the RTNL lock.
  1357. */
  1358. static void efx_start_all(struct efx_nic *efx)
  1359. {
  1360. EFX_ASSERT_RESET_SERIALISED(efx);
  1361. BUG_ON(efx->state == STATE_DISABLED);
  1362. /* Check that it is appropriate to restart the interface. All
  1363. * of these flags are safe to read under just the rtnl lock */
  1364. if (efx->port_enabled || !netif_running(efx->net_dev))
  1365. return;
  1366. efx_start_port(efx);
  1367. efx_start_datapath(efx);
  1368. /* Start the hardware monitor if there is one */
  1369. if (efx->type->monitor != NULL)
  1370. queue_delayed_work(efx->workqueue, &efx->monitor_work,
  1371. efx_monitor_interval);
  1372. /* If link state detection is normally event-driven, we have
  1373. * to poll now because we could have missed a change
  1374. */
  1375. if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
  1376. mutex_lock(&efx->mac_lock);
  1377. if (efx->phy_op->poll(efx))
  1378. efx_link_status_changed(efx);
  1379. mutex_unlock(&efx->mac_lock);
  1380. }
  1381. efx->type->start_stats(efx);
  1382. }
  1383. /* Flush all delayed work. Should only be called when no more delayed work
  1384. * will be scheduled. This doesn't flush pending online resets (efx_reset),
  1385. * since we're holding the rtnl_lock at this point. */
  1386. static void efx_flush_all(struct efx_nic *efx)
  1387. {
  1388. /* Make sure the hardware monitor and event self-test are stopped */
  1389. cancel_delayed_work_sync(&efx->monitor_work);
  1390. efx_selftest_async_cancel(efx);
  1391. /* Stop scheduled port reconfigurations */
  1392. cancel_work_sync(&efx->mac_work);
  1393. }
  1394. /* Quiesce the hardware and software data path, and regular activity
  1395. * for the port without bringing the link down. Safe to call multiple
  1396. * times with the NIC in almost any state, but interrupts should be
  1397. * enabled. Requires the RTNL lock.
  1398. */
  1399. static void efx_stop_all(struct efx_nic *efx)
  1400. {
  1401. EFX_ASSERT_RESET_SERIALISED(efx);
  1402. /* port_enabled can be read safely under the rtnl lock */
  1403. if (!efx->port_enabled)
  1404. return;
  1405. efx->type->stop_stats(efx);
  1406. efx_stop_port(efx);
  1407. /* Flush efx_mac_work(), refill_workqueue, monitor_work */
  1408. efx_flush_all(efx);
  1409. /* Stop the kernel transmit interface. This is only valid if
  1410. * the device is stopped or detached; otherwise the watchdog
  1411. * may fire immediately.
  1412. */
  1413. WARN_ON(netif_running(efx->net_dev) &&
  1414. netif_device_present(efx->net_dev));
  1415. netif_tx_disable(efx->net_dev);
  1416. efx_stop_datapath(efx);
  1417. }
  1418. static void efx_remove_all(struct efx_nic *efx)
  1419. {
  1420. efx_remove_channels(efx);
  1421. efx_remove_filters(efx);
  1422. efx_remove_port(efx);
  1423. efx_remove_nic(efx);
  1424. }
  1425. /**************************************************************************
  1426. *
  1427. * Interrupt moderation
  1428. *
  1429. **************************************************************************/
  1430. static unsigned int irq_mod_ticks(unsigned int usecs, unsigned int quantum_ns)
  1431. {
  1432. if (usecs == 0)
  1433. return 0;
  1434. if (usecs * 1000 < quantum_ns)
  1435. return 1; /* never round down to 0 */
  1436. return usecs * 1000 / quantum_ns;
  1437. }
  1438. /* Set interrupt moderation parameters */
  1439. int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
  1440. unsigned int rx_usecs, bool rx_adaptive,
  1441. bool rx_may_override_tx)
  1442. {
  1443. struct efx_channel *channel;
  1444. unsigned int irq_mod_max = DIV_ROUND_UP(efx->type->timer_period_max *
  1445. efx->timer_quantum_ns,
  1446. 1000);
  1447. unsigned int tx_ticks;
  1448. unsigned int rx_ticks;
  1449. EFX_ASSERT_RESET_SERIALISED(efx);
  1450. if (tx_usecs > irq_mod_max || rx_usecs > irq_mod_max)
  1451. return -EINVAL;
  1452. tx_ticks = irq_mod_ticks(tx_usecs, efx->timer_quantum_ns);
  1453. rx_ticks = irq_mod_ticks(rx_usecs, efx->timer_quantum_ns);
  1454. if (tx_ticks != rx_ticks && efx->tx_channel_offset == 0 &&
  1455. !rx_may_override_tx) {
  1456. netif_err(efx, drv, efx->net_dev, "Channels are shared. "
  1457. "RX and TX IRQ moderation must be equal\n");
  1458. return -EINVAL;
  1459. }
  1460. efx->irq_rx_adaptive = rx_adaptive;
  1461. efx->irq_rx_moderation = rx_ticks;
  1462. efx_for_each_channel(channel, efx) {
  1463. if (efx_channel_has_rx_queue(channel))
  1464. channel->irq_moderation = rx_ticks;
  1465. else if (efx_channel_has_tx_queues(channel))
  1466. channel->irq_moderation = tx_ticks;
  1467. }
  1468. return 0;
  1469. }
  1470. void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
  1471. unsigned int *rx_usecs, bool *rx_adaptive)
  1472. {
  1473. /* We must round up when converting ticks to microseconds
  1474. * because we round down when converting the other way.
  1475. */
  1476. *rx_adaptive = efx->irq_rx_adaptive;
  1477. *rx_usecs = DIV_ROUND_UP(efx->irq_rx_moderation *
  1478. efx->timer_quantum_ns,
  1479. 1000);
  1480. /* If channels are shared between RX and TX, so is IRQ
  1481. * moderation. Otherwise, IRQ moderation is the same for all
  1482. * TX channels and is not adaptive.
  1483. */
  1484. if (efx->tx_channel_offset == 0)
  1485. *tx_usecs = *rx_usecs;
  1486. else
  1487. *tx_usecs = DIV_ROUND_UP(
  1488. efx->channel[efx->tx_channel_offset]->irq_moderation *
  1489. efx->timer_quantum_ns,
  1490. 1000);
  1491. }
  1492. /**************************************************************************
  1493. *
  1494. * Hardware monitor
  1495. *
  1496. **************************************************************************/
  1497. /* Run periodically off the general workqueue */
  1498. static void efx_monitor(struct work_struct *data)
  1499. {
  1500. struct efx_nic *efx = container_of(data, struct efx_nic,
  1501. monitor_work.work);
  1502. netif_vdbg(efx, timer, efx->net_dev,
  1503. "hardware monitor executing on CPU %d\n",
  1504. raw_smp_processor_id());
  1505. BUG_ON(efx->type->monitor == NULL);
  1506. /* If the mac_lock is already held then it is likely a port
  1507. * reconfiguration is already in place, which will likely do
  1508. * most of the work of monitor() anyway. */
  1509. if (mutex_trylock(&efx->mac_lock)) {
  1510. if (efx->port_enabled)
  1511. efx->type->monitor(efx);
  1512. mutex_unlock(&efx->mac_lock);
  1513. }
  1514. queue_delayed_work(efx->workqueue, &efx->monitor_work,
  1515. efx_monitor_interval);
  1516. }
  1517. /**************************************************************************
  1518. *
  1519. * ioctls
  1520. *
  1521. *************************************************************************/
  1522. /* Net device ioctl
  1523. * Context: process, rtnl_lock() held.
  1524. */
  1525. static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
  1526. {
  1527. struct efx_nic *efx = netdev_priv(net_dev);
  1528. struct mii_ioctl_data *data = if_mii(ifr);
  1529. if (cmd == SIOCSHWTSTAMP)
  1530. return efx_ptp_ioctl(efx, ifr, cmd);
  1531. /* Convert phy_id from older PRTAD/DEVAD format */
  1532. if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
  1533. (data->phy_id & 0xfc00) == 0x0400)
  1534. data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400;
  1535. return mdio_mii_ioctl(&efx->mdio, data, cmd);
  1536. }
  1537. /**************************************************************************
  1538. *
  1539. * NAPI interface
  1540. *
  1541. **************************************************************************/
  1542. static void efx_init_napi_channel(struct efx_channel *channel)
  1543. {
  1544. struct efx_nic *efx = channel->efx;
  1545. channel->napi_dev = efx->net_dev;
  1546. netif_napi_add(channel->napi_dev, &channel->napi_str,
  1547. efx_poll, napi_weight);
  1548. }
  1549. static void efx_init_napi(struct efx_nic *efx)
  1550. {
  1551. struct efx_channel *channel;
  1552. efx_for_each_channel(channel, efx)
  1553. efx_init_napi_channel(channel);
  1554. }
  1555. static void efx_fini_napi_channel(struct efx_channel *channel)
  1556. {
  1557. if (channel->napi_dev)
  1558. netif_napi_del(&channel->napi_str);
  1559. channel->napi_dev = NULL;
  1560. }
  1561. static void efx_fini_napi(struct efx_nic *efx)
  1562. {
  1563. struct efx_channel *channel;
  1564. efx_for_each_channel(channel, efx)
  1565. efx_fini_napi_channel(channel);
  1566. }
  1567. /**************************************************************************
  1568. *
  1569. * Kernel netpoll interface
  1570. *
  1571. *************************************************************************/
  1572. #ifdef CONFIG_NET_POLL_CONTROLLER
  1573. /* Although in the common case interrupts will be disabled, this is not
  1574. * guaranteed. However, all our work happens inside the NAPI callback,
  1575. * so no locking is required.
  1576. */
  1577. static void efx_netpoll(struct net_device *net_dev)
  1578. {
  1579. struct efx_nic *efx = netdev_priv(net_dev);
  1580. struct efx_channel *channel;
  1581. efx_for_each_channel(channel, efx)
  1582. efx_schedule_channel(channel);
  1583. }
  1584. #endif
  1585. /**************************************************************************
  1586. *
  1587. * Kernel net device interface
  1588. *
  1589. *************************************************************************/
  1590. /* Context: process, rtnl_lock() held. */
  1591. static int efx_net_open(struct net_device *net_dev)
  1592. {
  1593. struct efx_nic *efx = netdev_priv(net_dev);
  1594. int rc;
  1595. netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n",
  1596. raw_smp_processor_id());
  1597. rc = efx_check_disabled(efx);
  1598. if (rc)
  1599. return rc;
  1600. if (efx->phy_mode & PHY_MODE_SPECIAL)
  1601. return -EBUSY;
  1602. if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL))
  1603. return -EIO;
  1604. /* Notify the kernel of the link state polled during driver load,
  1605. * before the monitor starts running */
  1606. efx_link_status_changed(efx);
  1607. efx_start_all(efx);
  1608. efx_selftest_async_start(efx);
  1609. return 0;
  1610. }
  1611. /* Context: process, rtnl_lock() held.
  1612. * Note that the kernel will ignore our return code; this method
  1613. * should really be a void.
  1614. */
  1615. static int efx_net_stop(struct net_device *net_dev)
  1616. {
  1617. struct efx_nic *efx = netdev_priv(net_dev);
  1618. netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
  1619. raw_smp_processor_id());
  1620. /* Stop the device and flush all the channels */
  1621. efx_stop_all(efx);
  1622. return 0;
  1623. }
  1624. /* Context: process, dev_base_lock or RTNL held, non-blocking. */
  1625. static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev,
  1626. struct rtnl_link_stats64 *stats)
  1627. {
  1628. struct efx_nic *efx = netdev_priv(net_dev);
  1629. struct efx_mac_stats *mac_stats = &efx->mac_stats;
  1630. spin_lock_bh(&efx->stats_lock);
  1631. efx->type->update_stats(efx);
  1632. stats->rx_packets = mac_stats->rx_packets;
  1633. stats->tx_packets = mac_stats->tx_packets;
  1634. stats->rx_bytes = mac_stats->rx_bytes;
  1635. stats->tx_bytes = mac_stats->tx_bytes;
  1636. stats->rx_dropped = efx->n_rx_nodesc_drop_cnt;
  1637. stats->multicast = mac_stats->rx_multicast;
  1638. stats->collisions = mac_stats->tx_collision;
  1639. stats->rx_length_errors = (mac_stats->rx_gtjumbo +
  1640. mac_stats->rx_length_error);
  1641. stats->rx_crc_errors = mac_stats->rx_bad;
  1642. stats->rx_frame_errors = mac_stats->rx_align_error;
  1643. stats->rx_fifo_errors = mac_stats->rx_overflow;
  1644. stats->rx_missed_errors = mac_stats->rx_missed;
  1645. stats->tx_window_errors = mac_stats->tx_late_collision;
  1646. stats->rx_errors = (stats->rx_length_errors +
  1647. stats->rx_crc_errors +
  1648. stats->rx_frame_errors +
  1649. mac_stats->rx_symbol_error);
  1650. stats->tx_errors = (stats->tx_window_errors +
  1651. mac_stats->tx_bad);
  1652. spin_unlock_bh(&efx->stats_lock);
  1653. return stats;
  1654. }
  1655. /* Context: netif_tx_lock held, BHs disabled. */
  1656. static void efx_watchdog(struct net_device *net_dev)
  1657. {
  1658. struct efx_nic *efx = netdev_priv(net_dev);
  1659. netif_err(efx, tx_err, efx->net_dev,
  1660. "TX stuck with port_enabled=%d: resetting channels\n",
  1661. efx->port_enabled);
  1662. efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
  1663. }
  1664. /* Context: process, rtnl_lock() held. */
  1665. static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
  1666. {
  1667. struct efx_nic *efx = netdev_priv(net_dev);
  1668. int rc;
  1669. rc = efx_check_disabled(efx);
  1670. if (rc)
  1671. return rc;
  1672. if (new_mtu > EFX_MAX_MTU)
  1673. return -EINVAL;
  1674. netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
  1675. efx_device_detach_sync(efx);
  1676. efx_stop_all(efx);
  1677. mutex_lock(&efx->mac_lock);
  1678. net_dev->mtu = new_mtu;
  1679. efx->type->reconfigure_mac(efx);
  1680. mutex_unlock(&efx->mac_lock);
  1681. efx_start_all(efx);
  1682. netif_device_attach(efx->net_dev);
  1683. return 0;
  1684. }
  1685. static int efx_set_mac_address(struct net_device *net_dev, void *data)
  1686. {
  1687. struct efx_nic *efx = netdev_priv(net_dev);
  1688. struct sockaddr *addr = data;
  1689. char *new_addr = addr->sa_data;
  1690. if (!is_valid_ether_addr(new_addr)) {
  1691. netif_err(efx, drv, efx->net_dev,
  1692. "invalid ethernet MAC address requested: %pM\n",
  1693. new_addr);
  1694. return -EADDRNOTAVAIL;
  1695. }
  1696. memcpy(net_dev->dev_addr, new_addr, net_dev->addr_len);
  1697. efx_sriov_mac_address_changed(efx);
  1698. /* Reconfigure the MAC */
  1699. mutex_lock(&efx->mac_lock);
  1700. efx->type->reconfigure_mac(efx);
  1701. mutex_unlock(&efx->mac_lock);
  1702. return 0;
  1703. }
  1704. /* Context: netif_addr_lock held, BHs disabled. */
  1705. static void efx_set_rx_mode(struct net_device *net_dev)
  1706. {
  1707. struct efx_nic *efx = netdev_priv(net_dev);
  1708. struct netdev_hw_addr *ha;
  1709. union efx_multicast_hash *mc_hash = &efx->multicast_hash;
  1710. u32 crc;
  1711. int bit;
  1712. efx->promiscuous = !!(net_dev->flags & IFF_PROMISC);
  1713. /* Build multicast hash table */
  1714. if (efx->promiscuous || (net_dev->flags & IFF_ALLMULTI)) {
  1715. memset(mc_hash, 0xff, sizeof(*mc_hash));
  1716. } else {
  1717. memset(mc_hash, 0x00, sizeof(*mc_hash));
  1718. netdev_for_each_mc_addr(ha, net_dev) {
  1719. crc = ether_crc_le(ETH_ALEN, ha->addr);
  1720. bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
  1721. __set_bit_le(bit, mc_hash);
  1722. }
  1723. /* Broadcast packets go through the multicast hash filter.
  1724. * ether_crc_le() of the broadcast address is 0xbe2612ff
  1725. * so we always add bit 0xff to the mask.
  1726. */
  1727. __set_bit_le(0xff, mc_hash);
  1728. }
  1729. if (efx->port_enabled)
  1730. queue_work(efx->workqueue, &efx->mac_work);
  1731. /* Otherwise efx_start_port() will do this */
  1732. }
  1733. static int efx_set_features(struct net_device *net_dev, netdev_features_t data)
  1734. {
  1735. struct efx_nic *efx = netdev_priv(net_dev);
  1736. /* If disabling RX n-tuple filtering, clear existing filters */
  1737. if (net_dev->features & ~data & NETIF_F_NTUPLE)
  1738. efx_filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL);
  1739. return 0;
  1740. }
  1741. static const struct net_device_ops efx_netdev_ops = {
  1742. .ndo_open = efx_net_open,
  1743. .ndo_stop = efx_net_stop,
  1744. .ndo_get_stats64 = efx_net_stats,
  1745. .ndo_tx_timeout = efx_watchdog,
  1746. .ndo_start_xmit = efx_hard_start_xmit,
  1747. .ndo_validate_addr = eth_validate_addr,
  1748. .ndo_do_ioctl = efx_ioctl,
  1749. .ndo_change_mtu = efx_change_mtu,
  1750. .ndo_set_mac_address = efx_set_mac_address,
  1751. .ndo_set_rx_mode = efx_set_rx_mode,
  1752. .ndo_set_features = efx_set_features,
  1753. #ifdef CONFIG_SFC_SRIOV
  1754. .ndo_set_vf_mac = efx_sriov_set_vf_mac,
  1755. .ndo_set_vf_vlan = efx_sriov_set_vf_vlan,
  1756. .ndo_set_vf_spoofchk = efx_sriov_set_vf_spoofchk,
  1757. .ndo_get_vf_config = efx_sriov_get_vf_config,
  1758. #endif
  1759. #ifdef CONFIG_NET_POLL_CONTROLLER
  1760. .ndo_poll_controller = efx_netpoll,
  1761. #endif
  1762. .ndo_setup_tc = efx_setup_tc,
  1763. #ifdef CONFIG_RFS_ACCEL
  1764. .ndo_rx_flow_steer = efx_filter_rfs,
  1765. #endif
  1766. };
  1767. static void efx_update_name(struct efx_nic *efx)
  1768. {
  1769. strcpy(efx->name, efx->net_dev->name);
  1770. efx_mtd_rename(efx);
  1771. efx_set_channel_names(efx);
  1772. }
  1773. static int efx_netdev_event(struct notifier_block *this,
  1774. unsigned long event, void *ptr)
  1775. {
  1776. struct net_device *net_dev = ptr;
  1777. if (net_dev->netdev_ops == &efx_netdev_ops &&
  1778. event == NETDEV_CHANGENAME)
  1779. efx_update_name(netdev_priv(net_dev));
  1780. return NOTIFY_DONE;
  1781. }
  1782. static struct notifier_block efx_netdev_notifier = {
  1783. .notifier_call = efx_netdev_event,
  1784. };
  1785. static ssize_t
  1786. show_phy_type(struct device *dev, struct device_attribute *attr, char *buf)
  1787. {
  1788. struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
  1789. return sprintf(buf, "%d\n", efx->phy_type);
  1790. }
  1791. static DEVICE_ATTR(phy_type, 0644, show_phy_type, NULL);
  1792. static int efx_register_netdev(struct efx_nic *efx)
  1793. {
  1794. struct net_device *net_dev = efx->net_dev;
  1795. struct efx_channel *channel;
  1796. int rc;
  1797. net_dev->watchdog_timeo = 5 * HZ;
  1798. net_dev->irq = efx->pci_dev->irq;
  1799. net_dev->netdev_ops = &efx_netdev_ops;
  1800. SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
  1801. net_dev->gso_max_segs = EFX_TSO_MAX_SEGS;
  1802. rtnl_lock();
  1803. /* Enable resets to be scheduled and check whether any were
  1804. * already requested. If so, the NIC is probably hosed so we
  1805. * abort.
  1806. */
  1807. efx->state = STATE_READY;
  1808. smp_mb(); /* ensure we change state before checking reset_pending */
  1809. if (efx->reset_pending) {
  1810. netif_err(efx, probe, efx->net_dev,
  1811. "aborting probe due to scheduled reset\n");
  1812. rc = -EIO;
  1813. goto fail_locked;
  1814. }
  1815. rc = dev_alloc_name(net_dev, net_dev->name);
  1816. if (rc < 0)
  1817. goto fail_locked;
  1818. efx_update_name(efx);
  1819. /* Always start with carrier off; PHY events will detect the link */
  1820. netif_carrier_off(net_dev);
  1821. rc = register_netdevice(net_dev);
  1822. if (rc)
  1823. goto fail_locked;
  1824. efx_for_each_channel(channel, efx) {
  1825. struct efx_tx_queue *tx_queue;
  1826. efx_for_each_channel_tx_queue(tx_queue, channel)
  1827. efx_init_tx_queue_core_txq(tx_queue);
  1828. }
  1829. rtnl_unlock();
  1830. rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
  1831. if (rc) {
  1832. netif_err(efx, drv, efx->net_dev,
  1833. "failed to init net dev attributes\n");
  1834. goto fail_registered;
  1835. }
  1836. return 0;
  1837. fail_registered:
  1838. rtnl_lock();
  1839. unregister_netdevice(net_dev);
  1840. fail_locked:
  1841. efx->state = STATE_UNINIT;
  1842. rtnl_unlock();
  1843. netif_err(efx, drv, efx->net_dev, "could not register net dev\n");
  1844. return rc;
  1845. }
  1846. static void efx_unregister_netdev(struct efx_nic *efx)
  1847. {
  1848. struct efx_channel *channel;
  1849. struct efx_tx_queue *tx_queue;
  1850. if (!efx->net_dev)
  1851. return;
  1852. BUG_ON(netdev_priv(efx->net_dev) != efx);
  1853. /* Free up any skbs still remaining. This has to happen before
  1854. * we try to unregister the netdev as running their destructors
  1855. * may be needed to get the device ref. count to 0. */
  1856. efx_for_each_channel(channel, efx) {
  1857. efx_for_each_channel_tx_queue(tx_queue, channel)
  1858. efx_release_tx_buffers(tx_queue);
  1859. }
  1860. strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
  1861. device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
  1862. rtnl_lock();
  1863. unregister_netdevice(efx->net_dev);
  1864. efx->state = STATE_UNINIT;
  1865. rtnl_unlock();
  1866. }
  1867. /**************************************************************************
  1868. *
  1869. * Device reset and suspend
  1870. *
  1871. **************************************************************************/
  1872. /* Tears down the entire software state and most of the hardware state
  1873. * before reset. */
  1874. void efx_reset_down(struct efx_nic *efx, enum reset_type method)
  1875. {
  1876. EFX_ASSERT_RESET_SERIALISED(efx);
  1877. efx_stop_all(efx);
  1878. efx_stop_interrupts(efx, false);
  1879. mutex_lock(&efx->mac_lock);
  1880. if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
  1881. efx->phy_op->fini(efx);
  1882. efx->type->fini(efx);
  1883. }
  1884. /* This function will always ensure that the locks acquired in
  1885. * efx_reset_down() are released. A failure return code indicates
  1886. * that we were unable to reinitialise the hardware, and the
  1887. * driver should be disabled. If ok is false, then the rx and tx
  1888. * engines are not restarted, pending a RESET_DISABLE. */
  1889. int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
  1890. {
  1891. int rc;
  1892. EFX_ASSERT_RESET_SERIALISED(efx);
  1893. rc = efx->type->init(efx);
  1894. if (rc) {
  1895. netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n");
  1896. goto fail;
  1897. }
  1898. if (!ok)
  1899. goto fail;
  1900. if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) {
  1901. rc = efx->phy_op->init(efx);
  1902. if (rc)
  1903. goto fail;
  1904. if (efx->phy_op->reconfigure(efx))
  1905. netif_err(efx, drv, efx->net_dev,
  1906. "could not restore PHY settings\n");
  1907. }
  1908. efx->type->reconfigure_mac(efx);
  1909. efx_start_interrupts(efx, false);
  1910. efx_restore_filters(efx);
  1911. efx_sriov_reset(efx);
  1912. mutex_unlock(&efx->mac_lock);
  1913. efx_start_all(efx);
  1914. return 0;
  1915. fail:
  1916. efx->port_initialized = false;
  1917. mutex_unlock(&efx->mac_lock);
  1918. return rc;
  1919. }
  1920. /* Reset the NIC using the specified method. Note that the reset may
  1921. * fail, in which case the card will be left in an unusable state.
  1922. *
  1923. * Caller must hold the rtnl_lock.
  1924. */
  1925. int efx_reset(struct efx_nic *efx, enum reset_type method)
  1926. {
  1927. int rc, rc2;
  1928. bool disabled;
  1929. netif_info(efx, drv, efx->net_dev, "resetting (%s)\n",
  1930. RESET_TYPE(method));
  1931. efx_device_detach_sync(efx);
  1932. efx_reset_down(efx, method);
  1933. rc = efx->type->reset(efx, method);
  1934. if (rc) {
  1935. netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n");
  1936. goto out;
  1937. }
  1938. /* Clear flags for the scopes we covered. We assume the NIC and
  1939. * driver are now quiescent so that there is no race here.
  1940. */
  1941. efx->reset_pending &= -(1 << (method + 1));
  1942. /* Reinitialise bus-mastering, which may have been turned off before
  1943. * the reset was scheduled. This is still appropriate, even in the
  1944. * RESET_TYPE_DISABLE since this driver generally assumes the hardware
  1945. * can respond to requests. */
  1946. pci_set_master(efx->pci_dev);
  1947. out:
  1948. /* Leave device stopped if necessary */
  1949. disabled = rc ||
  1950. method == RESET_TYPE_DISABLE ||
  1951. method == RESET_TYPE_RECOVER_OR_DISABLE;
  1952. rc2 = efx_reset_up(efx, method, !disabled);
  1953. if (rc2) {
  1954. disabled = true;
  1955. if (!rc)
  1956. rc = rc2;
  1957. }
  1958. if (disabled) {
  1959. dev_close(efx->net_dev);
  1960. netif_err(efx, drv, efx->net_dev, "has been disabled\n");
  1961. efx->state = STATE_DISABLED;
  1962. } else {
  1963. netif_dbg(efx, drv, efx->net_dev, "reset complete\n");
  1964. netif_device_attach(efx->net_dev);
  1965. }
  1966. return rc;
  1967. }
  1968. /* Try recovery mechanisms.
  1969. * For now only EEH is supported.
  1970. * Returns 0 if the recovery mechanisms are unsuccessful.
  1971. * Returns a non-zero value otherwise.
  1972. */
  1973. static int efx_try_recovery(struct efx_nic *efx)
  1974. {
  1975. #ifdef CONFIG_EEH
  1976. /* A PCI error can occur and not be seen by EEH because nothing
  1977. * happens on the PCI bus. In this case the driver may fail and
  1978. * schedule a 'recover or reset', leading to this recovery handler.
  1979. * Manually call the eeh failure check function.
  1980. */
  1981. struct eeh_dev *eehdev =
  1982. of_node_to_eeh_dev(pci_device_to_OF_node(efx->pci_dev));
  1983. if (eeh_dev_check_failure(eehdev)) {
  1984. /* The EEH mechanisms will handle the error and reset the
  1985. * device if necessary.
  1986. */
  1987. return 1;
  1988. }
  1989. #endif
  1990. return 0;
  1991. }
  1992. /* The worker thread exists so that code that cannot sleep can
  1993. * schedule a reset for later.
  1994. */
  1995. static void efx_reset_work(struct work_struct *data)
  1996. {
  1997. struct efx_nic *efx = container_of(data, struct efx_nic, reset_work);
  1998. unsigned long pending;
  1999. enum reset_type method;
  2000. pending = ACCESS_ONCE(efx->reset_pending);
  2001. method = fls(pending) - 1;
  2002. if ((method == RESET_TYPE_RECOVER_OR_DISABLE ||
  2003. method == RESET_TYPE_RECOVER_OR_ALL) &&
  2004. efx_try_recovery(efx))
  2005. return;
  2006. if (!pending)
  2007. return;
  2008. rtnl_lock();
  2009. /* We checked the state in efx_schedule_reset() but it may
  2010. * have changed by now. Now that we have the RTNL lock,
  2011. * it cannot change again.
  2012. */
  2013. if (efx->state == STATE_READY)
  2014. (void)efx_reset(efx, method);
  2015. rtnl_unlock();
  2016. }
  2017. void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
  2018. {
  2019. enum reset_type method;
  2020. if (efx->state == STATE_RECOVERY) {
  2021. netif_dbg(efx, drv, efx->net_dev,
  2022. "recovering: skip scheduling %s reset\n",
  2023. RESET_TYPE(type));
  2024. return;
  2025. }
  2026. switch (type) {
  2027. case RESET_TYPE_INVISIBLE:
  2028. case RESET_TYPE_ALL:
  2029. case RESET_TYPE_RECOVER_OR_ALL:
  2030. case RESET_TYPE_WORLD:
  2031. case RESET_TYPE_DISABLE:
  2032. case RESET_TYPE_RECOVER_OR_DISABLE:
  2033. method = type;
  2034. netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
  2035. RESET_TYPE(method));
  2036. break;
  2037. default:
  2038. method = efx->type->map_reset_reason(type);
  2039. netif_dbg(efx, drv, efx->net_dev,
  2040. "scheduling %s reset for %s\n",
  2041. RESET_TYPE(method), RESET_TYPE(type));
  2042. break;
  2043. }
  2044. set_bit(method, &efx->reset_pending);
  2045. smp_mb(); /* ensure we change reset_pending before checking state */
  2046. /* If we're not READY then just leave the flags set as the cue
  2047. * to abort probing or reschedule the reset later.
  2048. */
  2049. if (ACCESS_ONCE(efx->state) != STATE_READY)
  2050. return;
  2051. /* efx_process_channel() will no longer read events once a
  2052. * reset is scheduled. So switch back to poll'd MCDI completions. */
  2053. efx_mcdi_mode_poll(efx);
  2054. queue_work(reset_workqueue, &efx->reset_work);
  2055. }
  2056. /**************************************************************************
  2057. *
  2058. * List of NICs we support
  2059. *
  2060. **************************************************************************/
  2061. /* PCI device ID table */
  2062. static DEFINE_PCI_DEVICE_TABLE(efx_pci_table) = {
  2063. {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE,
  2064. PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0),
  2065. .driver_data = (unsigned long) &falcon_a1_nic_type},
  2066. {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE,
  2067. PCI_DEVICE_ID_SOLARFLARE_SFC4000B),
  2068. .driver_data = (unsigned long) &falcon_b0_nic_type},
  2069. {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0803), /* SFC9020 */
  2070. .driver_data = (unsigned long) &siena_a0_nic_type},
  2071. {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0813), /* SFL9021 */
  2072. .driver_data = (unsigned long) &siena_a0_nic_type},
  2073. {0} /* end of list */
  2074. };
  2075. /**************************************************************************
  2076. *
  2077. * Dummy PHY/MAC operations
  2078. *
  2079. * Can be used for some unimplemented operations
  2080. * Needed so all function pointers are valid and do not have to be tested
  2081. * before use
  2082. *
  2083. **************************************************************************/
  2084. int efx_port_dummy_op_int(struct efx_nic *efx)
  2085. {
  2086. return 0;
  2087. }
  2088. void efx_port_dummy_op_void(struct efx_nic *efx) {}
  2089. static bool efx_port_dummy_op_poll(struct efx_nic *efx)
  2090. {
  2091. return false;
  2092. }
  2093. static const struct efx_phy_operations efx_dummy_phy_operations = {
  2094. .init = efx_port_dummy_op_int,
  2095. .reconfigure = efx_port_dummy_op_int,
  2096. .poll = efx_port_dummy_op_poll,
  2097. .fini = efx_port_dummy_op_void,
  2098. };
  2099. /**************************************************************************
  2100. *
  2101. * Data housekeeping
  2102. *
  2103. **************************************************************************/
  2104. /* This zeroes out and then fills in the invariants in a struct
  2105. * efx_nic (including all sub-structures).
  2106. */
  2107. static int efx_init_struct(struct efx_nic *efx,
  2108. struct pci_dev *pci_dev, struct net_device *net_dev)
  2109. {
  2110. int i;
  2111. /* Initialise common structures */
  2112. spin_lock_init(&efx->biu_lock);
  2113. #ifdef CONFIG_SFC_MTD
  2114. INIT_LIST_HEAD(&efx->mtd_list);
  2115. #endif
  2116. INIT_WORK(&efx->reset_work, efx_reset_work);
  2117. INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
  2118. INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work);
  2119. efx->pci_dev = pci_dev;
  2120. efx->msg_enable = debug;
  2121. efx->state = STATE_UNINIT;
  2122. strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
  2123. efx->net_dev = net_dev;
  2124. spin_lock_init(&efx->stats_lock);
  2125. mutex_init(&efx->mac_lock);
  2126. efx->phy_op = &efx_dummy_phy_operations;
  2127. efx->mdio.dev = net_dev;
  2128. INIT_WORK(&efx->mac_work, efx_mac_work);
  2129. init_waitqueue_head(&efx->flush_wq);
  2130. for (i = 0; i < EFX_MAX_CHANNELS; i++) {
  2131. efx->channel[i] = efx_alloc_channel(efx, i, NULL);
  2132. if (!efx->channel[i])
  2133. goto fail;
  2134. }
  2135. EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
  2136. /* Higher numbered interrupt modes are less capable! */
  2137. efx->interrupt_mode = max(efx->type->max_interrupt_mode,
  2138. interrupt_mode);
  2139. /* Would be good to use the net_dev name, but we're too early */
  2140. snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s",
  2141. pci_name(pci_dev));
  2142. efx->workqueue = create_singlethread_workqueue(efx->workqueue_name);
  2143. if (!efx->workqueue)
  2144. goto fail;
  2145. return 0;
  2146. fail:
  2147. efx_fini_struct(efx);
  2148. return -ENOMEM;
  2149. }
  2150. static void efx_fini_struct(struct efx_nic *efx)
  2151. {
  2152. int i;
  2153. for (i = 0; i < EFX_MAX_CHANNELS; i++)
  2154. kfree(efx->channel[i]);
  2155. if (efx->workqueue) {
  2156. destroy_workqueue(efx->workqueue);
  2157. efx->workqueue = NULL;
  2158. }
  2159. }
  2160. /**************************************************************************
  2161. *
  2162. * PCI interface
  2163. *
  2164. **************************************************************************/
  2165. /* Main body of final NIC shutdown code
  2166. * This is called only at module unload (or hotplug removal).
  2167. */
  2168. static void efx_pci_remove_main(struct efx_nic *efx)
  2169. {
  2170. /* Flush reset_work. It can no longer be scheduled since we
  2171. * are not READY.
  2172. */
  2173. BUG_ON(efx->state == STATE_READY);
  2174. cancel_work_sync(&efx->reset_work);
  2175. #ifdef CONFIG_RFS_ACCEL
  2176. free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
  2177. efx->net_dev->rx_cpu_rmap = NULL;
  2178. #endif
  2179. efx_stop_interrupts(efx, false);
  2180. efx_nic_fini_interrupt(efx);
  2181. efx_fini_port(efx);
  2182. efx->type->fini(efx);
  2183. efx_fini_napi(efx);
  2184. efx_remove_all(efx);
  2185. }
  2186. /* Final NIC shutdown
  2187. * This is called only at module unload (or hotplug removal).
  2188. */
  2189. static void efx_pci_remove(struct pci_dev *pci_dev)
  2190. {
  2191. struct efx_nic *efx;
  2192. efx = pci_get_drvdata(pci_dev);
  2193. if (!efx)
  2194. return;
  2195. /* Mark the NIC as fini, then stop the interface */
  2196. rtnl_lock();
  2197. dev_close(efx->net_dev);
  2198. efx_stop_interrupts(efx, false);
  2199. rtnl_unlock();
  2200. efx_sriov_fini(efx);
  2201. efx_unregister_netdev(efx);
  2202. efx_mtd_remove(efx);
  2203. efx_pci_remove_main(efx);
  2204. efx_fini_io(efx);
  2205. netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
  2206. efx_fini_struct(efx);
  2207. pci_set_drvdata(pci_dev, NULL);
  2208. free_netdev(efx->net_dev);
  2209. pci_disable_pcie_error_reporting(pci_dev);
  2210. };
  2211. /* NIC VPD information
  2212. * Called during probe to display the part number of the
  2213. * installed NIC. VPD is potentially very large but this should
  2214. * always appear within the first 512 bytes.
  2215. */
  2216. #define SFC_VPD_LEN 512
  2217. static void efx_print_product_vpd(struct efx_nic *efx)
  2218. {
  2219. struct pci_dev *dev = efx->pci_dev;
  2220. char vpd_data[SFC_VPD_LEN];
  2221. ssize_t vpd_size;
  2222. int i, j;
  2223. /* Get the vpd data from the device */
  2224. vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data);
  2225. if (vpd_size <= 0) {
  2226. netif_err(efx, drv, efx->net_dev, "Unable to read VPD\n");
  2227. return;
  2228. }
  2229. /* Get the Read only section */
  2230. i = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA);
  2231. if (i < 0) {
  2232. netif_err(efx, drv, efx->net_dev, "VPD Read-only not found\n");
  2233. return;
  2234. }
  2235. j = pci_vpd_lrdt_size(&vpd_data[i]);
  2236. i += PCI_VPD_LRDT_TAG_SIZE;
  2237. if (i + j > vpd_size)
  2238. j = vpd_size - i;
  2239. /* Get the Part number */
  2240. i = pci_vpd_find_info_keyword(vpd_data, i, j, "PN");
  2241. if (i < 0) {
  2242. netif_err(efx, drv, efx->net_dev, "Part number not found\n");
  2243. return;
  2244. }
  2245. j = pci_vpd_info_field_size(&vpd_data[i]);
  2246. i += PCI_VPD_INFO_FLD_HDR_SIZE;
  2247. if (i + j > vpd_size) {
  2248. netif_err(efx, drv, efx->net_dev, "Incomplete part number\n");
  2249. return;
  2250. }
  2251. netif_info(efx, drv, efx->net_dev,
  2252. "Part Number : %.*s\n", j, &vpd_data[i]);
  2253. }
  2254. /* Main body of NIC initialisation
  2255. * This is called at module load (or hotplug insertion, theoretically).
  2256. */
  2257. static int efx_pci_probe_main(struct efx_nic *efx)
  2258. {
  2259. int rc;
  2260. /* Do start-of-day initialisation */
  2261. rc = efx_probe_all(efx);
  2262. if (rc)
  2263. goto fail1;
  2264. efx_init_napi(efx);
  2265. rc = efx->type->init(efx);
  2266. if (rc) {
  2267. netif_err(efx, probe, efx->net_dev,
  2268. "failed to initialise NIC\n");
  2269. goto fail3;
  2270. }
  2271. rc = efx_init_port(efx);
  2272. if (rc) {
  2273. netif_err(efx, probe, efx->net_dev,
  2274. "failed to initialise port\n");
  2275. goto fail4;
  2276. }
  2277. rc = efx_nic_init_interrupt(efx);
  2278. if (rc)
  2279. goto fail5;
  2280. efx_start_interrupts(efx, false);
  2281. return 0;
  2282. fail5:
  2283. efx_fini_port(efx);
  2284. fail4:
  2285. efx->type->fini(efx);
  2286. fail3:
  2287. efx_fini_napi(efx);
  2288. efx_remove_all(efx);
  2289. fail1:
  2290. return rc;
  2291. }
  2292. /* NIC initialisation
  2293. *
  2294. * This is called at module load (or hotplug insertion,
  2295. * theoretically). It sets up PCI mappings, resets the NIC,
  2296. * sets up and registers the network devices with the kernel and hooks
  2297. * the interrupt service routine. It does not prepare the device for
  2298. * transmission; this is left to the first time one of the network
  2299. * interfaces is brought up (i.e. efx_net_open).
  2300. */
  2301. static int efx_pci_probe(struct pci_dev *pci_dev,
  2302. const struct pci_device_id *entry)
  2303. {
  2304. struct net_device *net_dev;
  2305. struct efx_nic *efx;
  2306. int rc;
  2307. /* Allocate and initialise a struct net_device and struct efx_nic */
  2308. net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES,
  2309. EFX_MAX_RX_QUEUES);
  2310. if (!net_dev)
  2311. return -ENOMEM;
  2312. efx = netdev_priv(net_dev);
  2313. efx->type = (const struct efx_nic_type *) entry->driver_data;
  2314. net_dev->features |= (efx->type->offload_features | NETIF_F_SG |
  2315. NETIF_F_HIGHDMA | NETIF_F_TSO |
  2316. NETIF_F_RXCSUM);
  2317. if (efx->type->offload_features & NETIF_F_V6_CSUM)
  2318. net_dev->features |= NETIF_F_TSO6;
  2319. /* Mask for features that also apply to VLAN devices */
  2320. net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG |
  2321. NETIF_F_HIGHDMA | NETIF_F_ALL_TSO |
  2322. NETIF_F_RXCSUM);
  2323. /* All offloads can be toggled */
  2324. net_dev->hw_features = net_dev->features & ~NETIF_F_HIGHDMA;
  2325. pci_set_drvdata(pci_dev, efx);
  2326. SET_NETDEV_DEV(net_dev, &pci_dev->dev);
  2327. rc = efx_init_struct(efx, pci_dev, net_dev);
  2328. if (rc)
  2329. goto fail1;
  2330. netif_info(efx, probe, efx->net_dev,
  2331. "Solarflare NIC detected\n");
  2332. efx_print_product_vpd(efx);
  2333. /* Set up basic I/O (BAR mappings etc) */
  2334. rc = efx_init_io(efx);
  2335. if (rc)
  2336. goto fail2;
  2337. rc = efx_pci_probe_main(efx);
  2338. if (rc)
  2339. goto fail3;
  2340. rc = efx_register_netdev(efx);
  2341. if (rc)
  2342. goto fail4;
  2343. rc = efx_sriov_init(efx);
  2344. if (rc)
  2345. netif_err(efx, probe, efx->net_dev,
  2346. "SR-IOV can't be enabled rc %d\n", rc);
  2347. netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n");
  2348. /* Try to create MTDs, but allow this to fail */
  2349. rtnl_lock();
  2350. rc = efx_mtd_probe(efx);
  2351. rtnl_unlock();
  2352. if (rc)
  2353. netif_warn(efx, probe, efx->net_dev,
  2354. "failed to create MTDs (%d)\n", rc);
  2355. rc = pci_enable_pcie_error_reporting(pci_dev);
  2356. if (rc && rc != -EINVAL)
  2357. netif_warn(efx, probe, efx->net_dev,
  2358. "pci_enable_pcie_error_reporting failed (%d)\n", rc);
  2359. return 0;
  2360. fail4:
  2361. efx_pci_remove_main(efx);
  2362. fail3:
  2363. efx_fini_io(efx);
  2364. fail2:
  2365. efx_fini_struct(efx);
  2366. fail1:
  2367. pci_set_drvdata(pci_dev, NULL);
  2368. WARN_ON(rc > 0);
  2369. netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc);
  2370. free_netdev(net_dev);
  2371. return rc;
  2372. }
  2373. static int efx_pm_freeze(struct device *dev)
  2374. {
  2375. struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
  2376. rtnl_lock();
  2377. if (efx->state != STATE_DISABLED) {
  2378. efx->state = STATE_UNINIT;
  2379. efx_device_detach_sync(efx);
  2380. efx_stop_all(efx);
  2381. efx_stop_interrupts(efx, false);
  2382. }
  2383. rtnl_unlock();
  2384. return 0;
  2385. }
  2386. static int efx_pm_thaw(struct device *dev)
  2387. {
  2388. struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
  2389. rtnl_lock();
  2390. if (efx->state != STATE_DISABLED) {
  2391. efx_start_interrupts(efx, false);
  2392. mutex_lock(&efx->mac_lock);
  2393. efx->phy_op->reconfigure(efx);
  2394. mutex_unlock(&efx->mac_lock);
  2395. efx_start_all(efx);
  2396. netif_device_attach(efx->net_dev);
  2397. efx->state = STATE_READY;
  2398. efx->type->resume_wol(efx);
  2399. }
  2400. rtnl_unlock();
  2401. /* Reschedule any quenched resets scheduled during efx_pm_freeze() */
  2402. queue_work(reset_workqueue, &efx->reset_work);
  2403. return 0;
  2404. }
  2405. static int efx_pm_poweroff(struct device *dev)
  2406. {
  2407. struct pci_dev *pci_dev = to_pci_dev(dev);
  2408. struct efx_nic *efx = pci_get_drvdata(pci_dev);
  2409. efx->type->fini(efx);
  2410. efx->reset_pending = 0;
  2411. pci_save_state(pci_dev);
  2412. return pci_set_power_state(pci_dev, PCI_D3hot);
  2413. }
  2414. /* Used for both resume and restore */
  2415. static int efx_pm_resume(struct device *dev)
  2416. {
  2417. struct pci_dev *pci_dev = to_pci_dev(dev);
  2418. struct efx_nic *efx = pci_get_drvdata(pci_dev);
  2419. int rc;
  2420. rc = pci_set_power_state(pci_dev, PCI_D0);
  2421. if (rc)
  2422. return rc;
  2423. pci_restore_state(pci_dev);
  2424. rc = pci_enable_device(pci_dev);
  2425. if (rc)
  2426. return rc;
  2427. pci_set_master(efx->pci_dev);
  2428. rc = efx->type->reset(efx, RESET_TYPE_ALL);
  2429. if (rc)
  2430. return rc;
  2431. rc = efx->type->init(efx);
  2432. if (rc)
  2433. return rc;
  2434. efx_pm_thaw(dev);
  2435. return 0;
  2436. }
  2437. static int efx_pm_suspend(struct device *dev)
  2438. {
  2439. int rc;
  2440. efx_pm_freeze(dev);
  2441. rc = efx_pm_poweroff(dev);
  2442. if (rc)
  2443. efx_pm_resume(dev);
  2444. return rc;
  2445. }
  2446. static const struct dev_pm_ops efx_pm_ops = {
  2447. .suspend = efx_pm_suspend,
  2448. .resume = efx_pm_resume,
  2449. .freeze = efx_pm_freeze,
  2450. .thaw = efx_pm_thaw,
  2451. .poweroff = efx_pm_poweroff,
  2452. .restore = efx_pm_resume,
  2453. };
  2454. /* A PCI error affecting this device was detected.
  2455. * At this point MMIO and DMA may be disabled.
  2456. * Stop the software path and request a slot reset.
  2457. */
  2458. pci_ers_result_t efx_io_error_detected(struct pci_dev *pdev,
  2459. enum pci_channel_state state)
  2460. {
  2461. pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
  2462. struct efx_nic *efx = pci_get_drvdata(pdev);
  2463. if (state == pci_channel_io_perm_failure)
  2464. return PCI_ERS_RESULT_DISCONNECT;
  2465. rtnl_lock();
  2466. if (efx->state != STATE_DISABLED) {
  2467. efx->state = STATE_RECOVERY;
  2468. efx->reset_pending = 0;
  2469. efx_device_detach_sync(efx);
  2470. efx_stop_all(efx);
  2471. efx_stop_interrupts(efx, false);
  2472. status = PCI_ERS_RESULT_NEED_RESET;
  2473. } else {
  2474. /* If the interface is disabled we don't want to do anything
  2475. * with it.
  2476. */
  2477. status = PCI_ERS_RESULT_RECOVERED;
  2478. }
  2479. rtnl_unlock();
  2480. pci_disable_device(pdev);
  2481. return status;
  2482. }
  2483. /* Fake a successfull reset, which will be performed later in efx_io_resume. */
  2484. pci_ers_result_t efx_io_slot_reset(struct pci_dev *pdev)
  2485. {
  2486. struct efx_nic *efx = pci_get_drvdata(pdev);
  2487. pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
  2488. int rc;
  2489. if (pci_enable_device(pdev)) {
  2490. netif_err(efx, hw, efx->net_dev,
  2491. "Cannot re-enable PCI device after reset.\n");
  2492. status = PCI_ERS_RESULT_DISCONNECT;
  2493. }
  2494. rc = pci_cleanup_aer_uncorrect_error_status(pdev);
  2495. if (rc) {
  2496. netif_err(efx, hw, efx->net_dev,
  2497. "pci_cleanup_aer_uncorrect_error_status failed (%d)\n", rc);
  2498. /* Non-fatal error. Continue. */
  2499. }
  2500. return status;
  2501. }
  2502. /* Perform the actual reset and resume I/O operations. */
  2503. static void efx_io_resume(struct pci_dev *pdev)
  2504. {
  2505. struct efx_nic *efx = pci_get_drvdata(pdev);
  2506. int rc;
  2507. rtnl_lock();
  2508. if (efx->state == STATE_DISABLED)
  2509. goto out;
  2510. rc = efx_reset(efx, RESET_TYPE_ALL);
  2511. if (rc) {
  2512. netif_err(efx, hw, efx->net_dev,
  2513. "efx_reset failed after PCI error (%d)\n", rc);
  2514. } else {
  2515. efx->state = STATE_READY;
  2516. netif_dbg(efx, hw, efx->net_dev,
  2517. "Done resetting and resuming IO after PCI error.\n");
  2518. }
  2519. out:
  2520. rtnl_unlock();
  2521. }
  2522. /* For simplicity and reliability, we always require a slot reset and try to
  2523. * reset the hardware when a pci error affecting the device is detected.
  2524. * We leave both the link_reset and mmio_enabled callback unimplemented:
  2525. * with our request for slot reset the mmio_enabled callback will never be
  2526. * called, and the link_reset callback is not used by AER or EEH mechanisms.
  2527. */
  2528. static struct pci_error_handlers efx_err_handlers = {
  2529. .error_detected = efx_io_error_detected,
  2530. .slot_reset = efx_io_slot_reset,
  2531. .resume = efx_io_resume,
  2532. };
  2533. static struct pci_driver efx_pci_driver = {
  2534. .name = KBUILD_MODNAME,
  2535. .id_table = efx_pci_table,
  2536. .probe = efx_pci_probe,
  2537. .remove = efx_pci_remove,
  2538. .driver.pm = &efx_pm_ops,
  2539. .err_handler = &efx_err_handlers,
  2540. };
  2541. /**************************************************************************
  2542. *
  2543. * Kernel module interface
  2544. *
  2545. *************************************************************************/
  2546. module_param(interrupt_mode, uint, 0444);
  2547. MODULE_PARM_DESC(interrupt_mode,
  2548. "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
  2549. static int __init efx_init_module(void)
  2550. {
  2551. int rc;
  2552. printk(KERN_INFO "Solarflare NET driver v" EFX_DRIVER_VERSION "\n");
  2553. rc = register_netdevice_notifier(&efx_netdev_notifier);
  2554. if (rc)
  2555. goto err_notifier;
  2556. rc = efx_init_sriov();
  2557. if (rc)
  2558. goto err_sriov;
  2559. reset_workqueue = create_singlethread_workqueue("sfc_reset");
  2560. if (!reset_workqueue) {
  2561. rc = -ENOMEM;
  2562. goto err_reset;
  2563. }
  2564. rc = pci_register_driver(&efx_pci_driver);
  2565. if (rc < 0)
  2566. goto err_pci;
  2567. return 0;
  2568. err_pci:
  2569. destroy_workqueue(reset_workqueue);
  2570. err_reset:
  2571. efx_fini_sriov();
  2572. err_sriov:
  2573. unregister_netdevice_notifier(&efx_netdev_notifier);
  2574. err_notifier:
  2575. return rc;
  2576. }
  2577. static void __exit efx_exit_module(void)
  2578. {
  2579. printk(KERN_INFO "Solarflare NET driver unloading\n");
  2580. pci_unregister_driver(&efx_pci_driver);
  2581. destroy_workqueue(reset_workqueue);
  2582. efx_fini_sriov();
  2583. unregister_netdevice_notifier(&efx_netdev_notifier);
  2584. }
  2585. module_init(efx_init_module);
  2586. module_exit(efx_exit_module);
  2587. MODULE_AUTHOR("Solarflare Communications and "
  2588. "Michael Brown <mbrown@fensystems.co.uk>");
  2589. MODULE_DESCRIPTION("Solarflare Communications network driver");
  2590. MODULE_LICENSE("GPL");
  2591. MODULE_DEVICE_TABLE(pci, efx_pci_table);