efx.c 76 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928
  1. /****************************************************************************
  2. * Driver for Solarflare Solarstorm network controllers and boards
  3. * Copyright 2005-2006 Fen Systems Ltd.
  4. * Copyright 2005-2011 Solarflare Communications Inc.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published
  8. * by the Free Software Foundation, incorporated herein by reference.
  9. */
  10. #include <linux/module.h>
  11. #include <linux/pci.h>
  12. #include <linux/netdevice.h>
  13. #include <linux/etherdevice.h>
  14. #include <linux/delay.h>
  15. #include <linux/notifier.h>
  16. #include <linux/ip.h>
  17. #include <linux/tcp.h>
  18. #include <linux/in.h>
  19. #include <linux/crc32.h>
  20. #include <linux/ethtool.h>
  21. #include <linux/topology.h>
  22. #include <linux/gfp.h>
  23. #include <linux/cpu_rmap.h>
  24. #include "net_driver.h"
  25. #include "efx.h"
  26. #include "nic.h"
  27. #include "selftest.h"
  28. #include "mcdi.h"
  29. #include "workarounds.h"
  30. /**************************************************************************
  31. *
  32. * Type name strings
  33. *
  34. **************************************************************************
  35. */
  36. /* Loopback mode names (see LOOPBACK_MODE()) */
  37. const unsigned int efx_loopback_mode_max = LOOPBACK_MAX;
  38. const char *const efx_loopback_mode_names[] = {
  39. [LOOPBACK_NONE] = "NONE",
  40. [LOOPBACK_DATA] = "DATAPATH",
  41. [LOOPBACK_GMAC] = "GMAC",
  42. [LOOPBACK_XGMII] = "XGMII",
  43. [LOOPBACK_XGXS] = "XGXS",
  44. [LOOPBACK_XAUI] = "XAUI",
  45. [LOOPBACK_GMII] = "GMII",
  46. [LOOPBACK_SGMII] = "SGMII",
  47. [LOOPBACK_XGBR] = "XGBR",
  48. [LOOPBACK_XFI] = "XFI",
  49. [LOOPBACK_XAUI_FAR] = "XAUI_FAR",
  50. [LOOPBACK_GMII_FAR] = "GMII_FAR",
  51. [LOOPBACK_SGMII_FAR] = "SGMII_FAR",
  52. [LOOPBACK_XFI_FAR] = "XFI_FAR",
  53. [LOOPBACK_GPHY] = "GPHY",
  54. [LOOPBACK_PHYXS] = "PHYXS",
  55. [LOOPBACK_PCS] = "PCS",
  56. [LOOPBACK_PMAPMD] = "PMA/PMD",
  57. [LOOPBACK_XPORT] = "XPORT",
  58. [LOOPBACK_XGMII_WS] = "XGMII_WS",
  59. [LOOPBACK_XAUI_WS] = "XAUI_WS",
  60. [LOOPBACK_XAUI_WS_FAR] = "XAUI_WS_FAR",
  61. [LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR",
  62. [LOOPBACK_GMII_WS] = "GMII_WS",
  63. [LOOPBACK_XFI_WS] = "XFI_WS",
  64. [LOOPBACK_XFI_WS_FAR] = "XFI_WS_FAR",
  65. [LOOPBACK_PHYXS_WS] = "PHYXS_WS",
  66. };
  67. const unsigned int efx_reset_type_max = RESET_TYPE_MAX;
  68. const char *const efx_reset_type_names[] = {
  69. [RESET_TYPE_INVISIBLE] = "INVISIBLE",
  70. [RESET_TYPE_ALL] = "ALL",
  71. [RESET_TYPE_WORLD] = "WORLD",
  72. [RESET_TYPE_DISABLE] = "DISABLE",
  73. [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG",
  74. [RESET_TYPE_INT_ERROR] = "INT_ERROR",
  75. [RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY",
  76. [RESET_TYPE_RX_DESC_FETCH] = "RX_DESC_FETCH",
  77. [RESET_TYPE_TX_DESC_FETCH] = "TX_DESC_FETCH",
  78. [RESET_TYPE_TX_SKIP] = "TX_SKIP",
  79. [RESET_TYPE_MC_FAILURE] = "MC_FAILURE",
  80. };
  81. #define EFX_MAX_MTU (9 * 1024)
  82. /* Reset workqueue. If any NIC has a hardware failure then a reset will be
  83. * queued onto this work queue. This is not a per-nic work queue, because
  84. * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised.
  85. */
  86. static struct workqueue_struct *reset_workqueue;
  87. /**************************************************************************
  88. *
  89. * Configurable values
  90. *
  91. *************************************************************************/
  92. /*
  93. * Use separate channels for TX and RX events
  94. *
  95. * Set this to 1 to use separate channels for TX and RX. It allows us
  96. * to control interrupt affinity separately for TX and RX.
  97. *
  98. * This is only used in MSI-X interrupt mode
  99. */
  100. static unsigned int separate_tx_channels;
  101. module_param(separate_tx_channels, uint, 0444);
  102. MODULE_PARM_DESC(separate_tx_channels,
  103. "Use separate channels for TX and RX");
  104. /* This is the weight assigned to each of the (per-channel) virtual
  105. * NAPI devices.
  106. */
  107. static int napi_weight = 64;
  108. /* This is the time (in jiffies) between invocations of the hardware
  109. * monitor. On Falcon-based NICs, this will:
  110. * - Check the on-board hardware monitor;
  111. * - Poll the link state and reconfigure the hardware as necessary.
  112. */
  113. static unsigned int efx_monitor_interval = 1 * HZ;
  114. /* Initial interrupt moderation settings. They can be modified after
  115. * module load with ethtool.
  116. *
  117. * The default for RX should strike a balance between increasing the
  118. * round-trip latency and reducing overhead.
  119. */
  120. static unsigned int rx_irq_mod_usec = 60;
  121. /* Initial interrupt moderation settings. They can be modified after
  122. * module load with ethtool.
  123. *
  124. * This default is chosen to ensure that a 10G link does not go idle
  125. * while a TX queue is stopped after it has become full. A queue is
  126. * restarted when it drops below half full. The time this takes (assuming
  127. * worst case 3 descriptors per packet and 1024 descriptors) is
  128. * 512 / 3 * 1.2 = 205 usec.
  129. */
  130. static unsigned int tx_irq_mod_usec = 150;
  131. /* This is the first interrupt mode to try out of:
  132. * 0 => MSI-X
  133. * 1 => MSI
  134. * 2 => legacy
  135. */
  136. static unsigned int interrupt_mode;
  137. /* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
  138. * i.e. the number of CPUs among which we may distribute simultaneous
  139. * interrupt handling.
  140. *
  141. * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
  142. * The default (0) means to assign an interrupt to each core.
  143. */
  144. static unsigned int rss_cpus;
  145. module_param(rss_cpus, uint, 0444);
  146. MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
  147. static int phy_flash_cfg;
  148. module_param(phy_flash_cfg, int, 0644);
  149. MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially");
  150. static unsigned irq_adapt_low_thresh = 8000;
  151. module_param(irq_adapt_low_thresh, uint, 0644);
  152. MODULE_PARM_DESC(irq_adapt_low_thresh,
  153. "Threshold score for reducing IRQ moderation");
  154. static unsigned irq_adapt_high_thresh = 16000;
  155. module_param(irq_adapt_high_thresh, uint, 0644);
  156. MODULE_PARM_DESC(irq_adapt_high_thresh,
  157. "Threshold score for increasing IRQ moderation");
  158. static unsigned debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
  159. NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
  160. NETIF_MSG_IFUP | NETIF_MSG_RX_ERR |
  161. NETIF_MSG_TX_ERR | NETIF_MSG_HW);
  162. module_param(debug, uint, 0);
  163. MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
  164. /**************************************************************************
  165. *
  166. * Utility functions and prototypes
  167. *
  168. *************************************************************************/
  169. static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq);
  170. static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq);
  171. static void efx_remove_channel(struct efx_channel *channel);
  172. static void efx_remove_channels(struct efx_nic *efx);
  173. static const struct efx_channel_type efx_default_channel_type;
  174. static void efx_remove_port(struct efx_nic *efx);
  175. static void efx_init_napi_channel(struct efx_channel *channel);
  176. static void efx_fini_napi(struct efx_nic *efx);
  177. static void efx_fini_napi_channel(struct efx_channel *channel);
  178. static void efx_fini_struct(struct efx_nic *efx);
  179. static void efx_start_all(struct efx_nic *efx);
  180. static void efx_stop_all(struct efx_nic *efx);
  181. #define EFX_ASSERT_RESET_SERIALISED(efx) \
  182. do { \
  183. if ((efx->state == STATE_READY) || \
  184. (efx->state == STATE_DISABLED)) \
  185. ASSERT_RTNL(); \
  186. } while (0)
  187. static int efx_check_disabled(struct efx_nic *efx)
  188. {
  189. if (efx->state == STATE_DISABLED) {
  190. netif_err(efx, drv, efx->net_dev,
  191. "device is disabled due to earlier errors\n");
  192. return -EIO;
  193. }
  194. return 0;
  195. }
  196. /**************************************************************************
  197. *
  198. * Event queue processing
  199. *
  200. *************************************************************************/
  201. /* Process channel's event queue
  202. *
  203. * This function is responsible for processing the event queue of a
  204. * single channel. The caller must guarantee that this function will
  205. * never be concurrently called more than once on the same channel,
  206. * though different channels may be being processed concurrently.
  207. */
  208. static int efx_process_channel(struct efx_channel *channel, int budget)
  209. {
  210. int spent;
  211. if (unlikely(!channel->enabled))
  212. return 0;
  213. spent = efx_nic_process_eventq(channel, budget);
  214. if (spent && efx_channel_has_rx_queue(channel)) {
  215. struct efx_rx_queue *rx_queue =
  216. efx_channel_get_rx_queue(channel);
  217. /* Deliver last RX packet. */
  218. if (channel->rx_pkt) {
  219. __efx_rx_packet(channel, channel->rx_pkt);
  220. channel->rx_pkt = NULL;
  221. }
  222. if (rx_queue->enabled) {
  223. efx_rx_strategy(channel);
  224. efx_fast_push_rx_descriptors(rx_queue);
  225. }
  226. }
  227. return spent;
  228. }
  229. /* Mark channel as finished processing
  230. *
  231. * Note that since we will not receive further interrupts for this
  232. * channel before we finish processing and call the eventq_read_ack()
  233. * method, there is no need to use the interrupt hold-off timers.
  234. */
  235. static inline void efx_channel_processed(struct efx_channel *channel)
  236. {
  237. /* The interrupt handler for this channel may set work_pending
  238. * as soon as we acknowledge the events we've seen. Make sure
  239. * it's cleared before then. */
  240. channel->work_pending = false;
  241. smp_wmb();
  242. efx_nic_eventq_read_ack(channel);
  243. }
  244. /* NAPI poll handler
  245. *
  246. * NAPI guarantees serialisation of polls of the same device, which
  247. * provides the guarantee required by efx_process_channel().
  248. */
  249. static int efx_poll(struct napi_struct *napi, int budget)
  250. {
  251. struct efx_channel *channel =
  252. container_of(napi, struct efx_channel, napi_str);
  253. struct efx_nic *efx = channel->efx;
  254. int spent;
  255. netif_vdbg(efx, intr, efx->net_dev,
  256. "channel %d NAPI poll executing on CPU %d\n",
  257. channel->channel, raw_smp_processor_id());
  258. spent = efx_process_channel(channel, budget);
  259. if (spent < budget) {
  260. if (efx_channel_has_rx_queue(channel) &&
  261. efx->irq_rx_adaptive &&
  262. unlikely(++channel->irq_count == 1000)) {
  263. if (unlikely(channel->irq_mod_score <
  264. irq_adapt_low_thresh)) {
  265. if (channel->irq_moderation > 1) {
  266. channel->irq_moderation -= 1;
  267. efx->type->push_irq_moderation(channel);
  268. }
  269. } else if (unlikely(channel->irq_mod_score >
  270. irq_adapt_high_thresh)) {
  271. if (channel->irq_moderation <
  272. efx->irq_rx_moderation) {
  273. channel->irq_moderation += 1;
  274. efx->type->push_irq_moderation(channel);
  275. }
  276. }
  277. channel->irq_count = 0;
  278. channel->irq_mod_score = 0;
  279. }
  280. efx_filter_rfs_expire(channel);
  281. /* There is no race here; although napi_disable() will
  282. * only wait for napi_complete(), this isn't a problem
  283. * since efx_channel_processed() will have no effect if
  284. * interrupts have already been disabled.
  285. */
  286. napi_complete(napi);
  287. efx_channel_processed(channel);
  288. }
  289. return spent;
  290. }
  291. /* Process the eventq of the specified channel immediately on this CPU
  292. *
  293. * Disable hardware generated interrupts, wait for any existing
  294. * processing to finish, then directly poll (and ack ) the eventq.
  295. * Finally reenable NAPI and interrupts.
  296. *
  297. * This is for use only during a loopback self-test. It must not
  298. * deliver any packets up the stack as this can result in deadlock.
  299. */
  300. void efx_process_channel_now(struct efx_channel *channel)
  301. {
  302. struct efx_nic *efx = channel->efx;
  303. BUG_ON(channel->channel >= efx->n_channels);
  304. BUG_ON(!channel->enabled);
  305. BUG_ON(!efx->loopback_selftest);
  306. /* Disable interrupts and wait for ISRs to complete */
  307. efx_nic_disable_interrupts(efx);
  308. if (efx->legacy_irq) {
  309. synchronize_irq(efx->legacy_irq);
  310. efx->legacy_irq_enabled = false;
  311. }
  312. if (channel->irq)
  313. synchronize_irq(channel->irq);
  314. /* Wait for any NAPI processing to complete */
  315. napi_disable(&channel->napi_str);
  316. /* Poll the channel */
  317. efx_process_channel(channel, channel->eventq_mask + 1);
  318. /* Ack the eventq. This may cause an interrupt to be generated
  319. * when they are reenabled */
  320. efx_channel_processed(channel);
  321. napi_enable(&channel->napi_str);
  322. if (efx->legacy_irq)
  323. efx->legacy_irq_enabled = true;
  324. efx_nic_enable_interrupts(efx);
  325. }
  326. /* Create event queue
  327. * Event queue memory allocations are done only once. If the channel
  328. * is reset, the memory buffer will be reused; this guards against
  329. * errors during channel reset and also simplifies interrupt handling.
  330. */
  331. static int efx_probe_eventq(struct efx_channel *channel)
  332. {
  333. struct efx_nic *efx = channel->efx;
  334. unsigned long entries;
  335. netif_dbg(efx, probe, efx->net_dev,
  336. "chan %d create event queue\n", channel->channel);
  337. /* Build an event queue with room for one event per tx and rx buffer,
  338. * plus some extra for link state events and MCDI completions. */
  339. entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128);
  340. EFX_BUG_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE);
  341. channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1;
  342. return efx_nic_probe_eventq(channel);
  343. }
  344. /* Prepare channel's event queue */
  345. static void efx_init_eventq(struct efx_channel *channel)
  346. {
  347. netif_dbg(channel->efx, drv, channel->efx->net_dev,
  348. "chan %d init event queue\n", channel->channel);
  349. channel->eventq_read_ptr = 0;
  350. efx_nic_init_eventq(channel);
  351. }
  352. /* Enable event queue processing and NAPI */
  353. static void efx_start_eventq(struct efx_channel *channel)
  354. {
  355. netif_dbg(channel->efx, ifup, channel->efx->net_dev,
  356. "chan %d start event queue\n", channel->channel);
  357. /* The interrupt handler for this channel may set work_pending
  358. * as soon as we enable it. Make sure it's cleared before
  359. * then. Similarly, make sure it sees the enabled flag set.
  360. */
  361. channel->work_pending = false;
  362. channel->enabled = true;
  363. smp_wmb();
  364. napi_enable(&channel->napi_str);
  365. efx_nic_eventq_read_ack(channel);
  366. }
  367. /* Disable event queue processing and NAPI */
  368. static void efx_stop_eventq(struct efx_channel *channel)
  369. {
  370. if (!channel->enabled)
  371. return;
  372. napi_disable(&channel->napi_str);
  373. channel->enabled = false;
  374. }
  375. static void efx_fini_eventq(struct efx_channel *channel)
  376. {
  377. netif_dbg(channel->efx, drv, channel->efx->net_dev,
  378. "chan %d fini event queue\n", channel->channel);
  379. efx_nic_fini_eventq(channel);
  380. }
  381. static void efx_remove_eventq(struct efx_channel *channel)
  382. {
  383. netif_dbg(channel->efx, drv, channel->efx->net_dev,
  384. "chan %d remove event queue\n", channel->channel);
  385. efx_nic_remove_eventq(channel);
  386. }
  387. /**************************************************************************
  388. *
  389. * Channel handling
  390. *
  391. *************************************************************************/
  392. /* Allocate and initialise a channel structure. */
  393. static struct efx_channel *
  394. efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
  395. {
  396. struct efx_channel *channel;
  397. struct efx_rx_queue *rx_queue;
  398. struct efx_tx_queue *tx_queue;
  399. int j;
  400. channel = kzalloc(sizeof(*channel), GFP_KERNEL);
  401. if (!channel)
  402. return NULL;
  403. channel->efx = efx;
  404. channel->channel = i;
  405. channel->type = &efx_default_channel_type;
  406. for (j = 0; j < EFX_TXQ_TYPES; j++) {
  407. tx_queue = &channel->tx_queue[j];
  408. tx_queue->efx = efx;
  409. tx_queue->queue = i * EFX_TXQ_TYPES + j;
  410. tx_queue->channel = channel;
  411. }
  412. rx_queue = &channel->rx_queue;
  413. rx_queue->efx = efx;
  414. setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill,
  415. (unsigned long)rx_queue);
  416. return channel;
  417. }
  418. /* Allocate and initialise a channel structure, copying parameters
  419. * (but not resources) from an old channel structure.
  420. */
  421. static struct efx_channel *
  422. efx_copy_channel(const struct efx_channel *old_channel)
  423. {
  424. struct efx_channel *channel;
  425. struct efx_rx_queue *rx_queue;
  426. struct efx_tx_queue *tx_queue;
  427. int j;
  428. channel = kmalloc(sizeof(*channel), GFP_KERNEL);
  429. if (!channel)
  430. return NULL;
  431. *channel = *old_channel;
  432. channel->napi_dev = NULL;
  433. memset(&channel->eventq, 0, sizeof(channel->eventq));
  434. for (j = 0; j < EFX_TXQ_TYPES; j++) {
  435. tx_queue = &channel->tx_queue[j];
  436. if (tx_queue->channel)
  437. tx_queue->channel = channel;
  438. tx_queue->buffer = NULL;
  439. memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
  440. }
  441. rx_queue = &channel->rx_queue;
  442. rx_queue->buffer = NULL;
  443. memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
  444. setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill,
  445. (unsigned long)rx_queue);
  446. return channel;
  447. }
  448. static int efx_probe_channel(struct efx_channel *channel)
  449. {
  450. struct efx_tx_queue *tx_queue;
  451. struct efx_rx_queue *rx_queue;
  452. int rc;
  453. netif_dbg(channel->efx, probe, channel->efx->net_dev,
  454. "creating channel %d\n", channel->channel);
  455. rc = channel->type->pre_probe(channel);
  456. if (rc)
  457. goto fail;
  458. rc = efx_probe_eventq(channel);
  459. if (rc)
  460. goto fail;
  461. efx_for_each_channel_tx_queue(tx_queue, channel) {
  462. rc = efx_probe_tx_queue(tx_queue);
  463. if (rc)
  464. goto fail;
  465. }
  466. efx_for_each_channel_rx_queue(rx_queue, channel) {
  467. rc = efx_probe_rx_queue(rx_queue);
  468. if (rc)
  469. goto fail;
  470. }
  471. channel->n_rx_frm_trunc = 0;
  472. return 0;
  473. fail:
  474. efx_remove_channel(channel);
  475. return rc;
  476. }
  477. static void
  478. efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len)
  479. {
  480. struct efx_nic *efx = channel->efx;
  481. const char *type;
  482. int number;
  483. number = channel->channel;
  484. if (efx->tx_channel_offset == 0) {
  485. type = "";
  486. } else if (channel->channel < efx->tx_channel_offset) {
  487. type = "-rx";
  488. } else {
  489. type = "-tx";
  490. number -= efx->tx_channel_offset;
  491. }
  492. snprintf(buf, len, "%s%s-%d", efx->name, type, number);
  493. }
  494. static void efx_set_channel_names(struct efx_nic *efx)
  495. {
  496. struct efx_channel *channel;
  497. efx_for_each_channel(channel, efx)
  498. channel->type->get_name(channel,
  499. efx->channel_name[channel->channel],
  500. sizeof(efx->channel_name[0]));
  501. }
  502. static int efx_probe_channels(struct efx_nic *efx)
  503. {
  504. struct efx_channel *channel;
  505. int rc;
  506. /* Restart special buffer allocation */
  507. efx->next_buffer_table = 0;
  508. /* Probe channels in reverse, so that any 'extra' channels
  509. * use the start of the buffer table. This allows the traffic
  510. * channels to be resized without moving them or wasting the
  511. * entries before them.
  512. */
  513. efx_for_each_channel_rev(channel, efx) {
  514. rc = efx_probe_channel(channel);
  515. if (rc) {
  516. netif_err(efx, probe, efx->net_dev,
  517. "failed to create channel %d\n",
  518. channel->channel);
  519. goto fail;
  520. }
  521. }
  522. efx_set_channel_names(efx);
  523. return 0;
  524. fail:
  525. efx_remove_channels(efx);
  526. return rc;
  527. }
  528. /* Channels are shutdown and reinitialised whilst the NIC is running
  529. * to propagate configuration changes (mtu, checksum offload), or
  530. * to clear hardware error conditions
  531. */
  532. static void efx_start_datapath(struct efx_nic *efx)
  533. {
  534. struct efx_tx_queue *tx_queue;
  535. struct efx_rx_queue *rx_queue;
  536. struct efx_channel *channel;
  537. /* Calculate the rx buffer allocation parameters required to
  538. * support the current MTU, including padding for header
  539. * alignment and overruns.
  540. */
  541. efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) +
  542. EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
  543. efx->type->rx_buffer_hash_size +
  544. efx->type->rx_buffer_padding);
  545. efx->rx_buffer_order = get_order(efx->rx_buffer_len +
  546. sizeof(struct efx_rx_page_state));
  547. /* We must keep at least one descriptor in a TX ring empty.
  548. * We could avoid this when the queue size does not exactly
  549. * match the hardware ring size, but it's not that important.
  550. * Therefore we stop the queue when one more skb might fill
  551. * the ring completely. We wake it when half way back to
  552. * empty.
  553. */
  554. efx->txq_stop_thresh = efx->txq_entries - efx_tx_max_skb_descs(efx);
  555. efx->txq_wake_thresh = efx->txq_stop_thresh / 2;
  556. /* Initialise the channels */
  557. efx_for_each_channel(channel, efx) {
  558. efx_for_each_channel_tx_queue(tx_queue, channel)
  559. efx_init_tx_queue(tx_queue);
  560. /* The rx buffer allocation strategy is MTU dependent */
  561. efx_rx_strategy(channel);
  562. efx_for_each_channel_rx_queue(rx_queue, channel) {
  563. efx_init_rx_queue(rx_queue);
  564. efx_nic_generate_fill_event(rx_queue);
  565. }
  566. WARN_ON(channel->rx_pkt != NULL);
  567. efx_rx_strategy(channel);
  568. }
  569. if (netif_device_present(efx->net_dev))
  570. netif_tx_wake_all_queues(efx->net_dev);
  571. }
  572. static void efx_stop_datapath(struct efx_nic *efx)
  573. {
  574. struct efx_channel *channel;
  575. struct efx_tx_queue *tx_queue;
  576. struct efx_rx_queue *rx_queue;
  577. struct pci_dev *dev = efx->pci_dev;
  578. int rc;
  579. EFX_ASSERT_RESET_SERIALISED(efx);
  580. BUG_ON(efx->port_enabled);
  581. /* Only perform flush if dma is enabled */
  582. if (dev->is_busmaster) {
  583. rc = efx_nic_flush_queues(efx);
  584. if (rc && EFX_WORKAROUND_7803(efx)) {
  585. /* Schedule a reset to recover from the flush failure. The
  586. * descriptor caches reference memory we're about to free,
  587. * but falcon_reconfigure_mac_wrapper() won't reconnect
  588. * the MACs because of the pending reset. */
  589. netif_err(efx, drv, efx->net_dev,
  590. "Resetting to recover from flush failure\n");
  591. efx_schedule_reset(efx, RESET_TYPE_ALL);
  592. } else if (rc) {
  593. netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
  594. } else {
  595. netif_dbg(efx, drv, efx->net_dev,
  596. "successfully flushed all queues\n");
  597. }
  598. }
  599. efx_for_each_channel(channel, efx) {
  600. /* RX packet processing is pipelined, so wait for the
  601. * NAPI handler to complete. At least event queue 0
  602. * might be kept active by non-data events, so don't
  603. * use napi_synchronize() but actually disable NAPI
  604. * temporarily.
  605. */
  606. if (efx_channel_has_rx_queue(channel)) {
  607. efx_stop_eventq(channel);
  608. efx_start_eventq(channel);
  609. }
  610. efx_for_each_channel_rx_queue(rx_queue, channel)
  611. efx_fini_rx_queue(rx_queue);
  612. efx_for_each_possible_channel_tx_queue(tx_queue, channel)
  613. efx_fini_tx_queue(tx_queue);
  614. }
  615. }
  616. static void efx_remove_channel(struct efx_channel *channel)
  617. {
  618. struct efx_tx_queue *tx_queue;
  619. struct efx_rx_queue *rx_queue;
  620. netif_dbg(channel->efx, drv, channel->efx->net_dev,
  621. "destroy chan %d\n", channel->channel);
  622. efx_for_each_channel_rx_queue(rx_queue, channel)
  623. efx_remove_rx_queue(rx_queue);
  624. efx_for_each_possible_channel_tx_queue(tx_queue, channel)
  625. efx_remove_tx_queue(tx_queue);
  626. efx_remove_eventq(channel);
  627. }
  628. static void efx_remove_channels(struct efx_nic *efx)
  629. {
  630. struct efx_channel *channel;
  631. efx_for_each_channel(channel, efx)
  632. efx_remove_channel(channel);
  633. }
  634. int
  635. efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
  636. {
  637. struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
  638. u32 old_rxq_entries, old_txq_entries;
  639. unsigned i, next_buffer_table = 0;
  640. int rc;
  641. rc = efx_check_disabled(efx);
  642. if (rc)
  643. return rc;
  644. /* Not all channels should be reallocated. We must avoid
  645. * reallocating their buffer table entries.
  646. */
  647. efx_for_each_channel(channel, efx) {
  648. struct efx_rx_queue *rx_queue;
  649. struct efx_tx_queue *tx_queue;
  650. if (channel->type->copy)
  651. continue;
  652. next_buffer_table = max(next_buffer_table,
  653. channel->eventq.index +
  654. channel->eventq.entries);
  655. efx_for_each_channel_rx_queue(rx_queue, channel)
  656. next_buffer_table = max(next_buffer_table,
  657. rx_queue->rxd.index +
  658. rx_queue->rxd.entries);
  659. efx_for_each_channel_tx_queue(tx_queue, channel)
  660. next_buffer_table = max(next_buffer_table,
  661. tx_queue->txd.index +
  662. tx_queue->txd.entries);
  663. }
  664. efx_stop_all(efx);
  665. efx_stop_interrupts(efx, true);
  666. /* Clone channels (where possible) */
  667. memset(other_channel, 0, sizeof(other_channel));
  668. for (i = 0; i < efx->n_channels; i++) {
  669. channel = efx->channel[i];
  670. if (channel->type->copy)
  671. channel = channel->type->copy(channel);
  672. if (!channel) {
  673. rc = -ENOMEM;
  674. goto out;
  675. }
  676. other_channel[i] = channel;
  677. }
  678. /* Swap entry counts and channel pointers */
  679. old_rxq_entries = efx->rxq_entries;
  680. old_txq_entries = efx->txq_entries;
  681. efx->rxq_entries = rxq_entries;
  682. efx->txq_entries = txq_entries;
  683. for (i = 0; i < efx->n_channels; i++) {
  684. channel = efx->channel[i];
  685. efx->channel[i] = other_channel[i];
  686. other_channel[i] = channel;
  687. }
  688. /* Restart buffer table allocation */
  689. efx->next_buffer_table = next_buffer_table;
  690. for (i = 0; i < efx->n_channels; i++) {
  691. channel = efx->channel[i];
  692. if (!channel->type->copy)
  693. continue;
  694. rc = efx_probe_channel(channel);
  695. if (rc)
  696. goto rollback;
  697. efx_init_napi_channel(efx->channel[i]);
  698. }
  699. out:
  700. /* Destroy unused channel structures */
  701. for (i = 0; i < efx->n_channels; i++) {
  702. channel = other_channel[i];
  703. if (channel && channel->type->copy) {
  704. efx_fini_napi_channel(channel);
  705. efx_remove_channel(channel);
  706. kfree(channel);
  707. }
  708. }
  709. efx_start_interrupts(efx, true);
  710. efx_start_all(efx);
  711. return rc;
  712. rollback:
  713. /* Swap back */
  714. efx->rxq_entries = old_rxq_entries;
  715. efx->txq_entries = old_txq_entries;
  716. for (i = 0; i < efx->n_channels; i++) {
  717. channel = efx->channel[i];
  718. efx->channel[i] = other_channel[i];
  719. other_channel[i] = channel;
  720. }
  721. goto out;
  722. }
  723. void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
  724. {
  725. mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100));
  726. }
  727. static const struct efx_channel_type efx_default_channel_type = {
  728. .pre_probe = efx_channel_dummy_op_int,
  729. .get_name = efx_get_channel_name,
  730. .copy = efx_copy_channel,
  731. .keep_eventq = false,
  732. };
  733. int efx_channel_dummy_op_int(struct efx_channel *channel)
  734. {
  735. return 0;
  736. }
  737. /**************************************************************************
  738. *
  739. * Port handling
  740. *
  741. **************************************************************************/
  742. /* This ensures that the kernel is kept informed (via
  743. * netif_carrier_on/off) of the link status, and also maintains the
  744. * link status's stop on the port's TX queue.
  745. */
  746. void efx_link_status_changed(struct efx_nic *efx)
  747. {
  748. struct efx_link_state *link_state = &efx->link_state;
  749. /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
  750. * that no events are triggered between unregister_netdev() and the
  751. * driver unloading. A more general condition is that NETDEV_CHANGE
  752. * can only be generated between NETDEV_UP and NETDEV_DOWN */
  753. if (!netif_running(efx->net_dev))
  754. return;
  755. if (link_state->up != netif_carrier_ok(efx->net_dev)) {
  756. efx->n_link_state_changes++;
  757. if (link_state->up)
  758. netif_carrier_on(efx->net_dev);
  759. else
  760. netif_carrier_off(efx->net_dev);
  761. }
  762. /* Status message for kernel log */
  763. if (link_state->up)
  764. netif_info(efx, link, efx->net_dev,
  765. "link up at %uMbps %s-duplex (MTU %d)%s\n",
  766. link_state->speed, link_state->fd ? "full" : "half",
  767. efx->net_dev->mtu,
  768. (efx->promiscuous ? " [PROMISC]" : ""));
  769. else
  770. netif_info(efx, link, efx->net_dev, "link down\n");
  771. }
  772. void efx_link_set_advertising(struct efx_nic *efx, u32 advertising)
  773. {
  774. efx->link_advertising = advertising;
  775. if (advertising) {
  776. if (advertising & ADVERTISED_Pause)
  777. efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX);
  778. else
  779. efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
  780. if (advertising & ADVERTISED_Asym_Pause)
  781. efx->wanted_fc ^= EFX_FC_TX;
  782. }
  783. }
  784. void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc)
  785. {
  786. efx->wanted_fc = wanted_fc;
  787. if (efx->link_advertising) {
  788. if (wanted_fc & EFX_FC_RX)
  789. efx->link_advertising |= (ADVERTISED_Pause |
  790. ADVERTISED_Asym_Pause);
  791. else
  792. efx->link_advertising &= ~(ADVERTISED_Pause |
  793. ADVERTISED_Asym_Pause);
  794. if (wanted_fc & EFX_FC_TX)
  795. efx->link_advertising ^= ADVERTISED_Asym_Pause;
  796. }
  797. }
  798. static void efx_fini_port(struct efx_nic *efx);
  799. /* Push loopback/power/transmit disable settings to the PHY, and reconfigure
  800. * the MAC appropriately. All other PHY configuration changes are pushed
  801. * through phy_op->set_settings(), and pushed asynchronously to the MAC
  802. * through efx_monitor().
  803. *
  804. * Callers must hold the mac_lock
  805. */
  806. int __efx_reconfigure_port(struct efx_nic *efx)
  807. {
  808. enum efx_phy_mode phy_mode;
  809. int rc;
  810. WARN_ON(!mutex_is_locked(&efx->mac_lock));
  811. /* Serialise the promiscuous flag with efx_set_rx_mode. */
  812. netif_addr_lock_bh(efx->net_dev);
  813. netif_addr_unlock_bh(efx->net_dev);
  814. /* Disable PHY transmit in mac level loopbacks */
  815. phy_mode = efx->phy_mode;
  816. if (LOOPBACK_INTERNAL(efx))
  817. efx->phy_mode |= PHY_MODE_TX_DISABLED;
  818. else
  819. efx->phy_mode &= ~PHY_MODE_TX_DISABLED;
  820. rc = efx->type->reconfigure_port(efx);
  821. if (rc)
  822. efx->phy_mode = phy_mode;
  823. return rc;
  824. }
  825. /* Reinitialise the MAC to pick up new PHY settings, even if the port is
  826. * disabled. */
  827. int efx_reconfigure_port(struct efx_nic *efx)
  828. {
  829. int rc;
  830. EFX_ASSERT_RESET_SERIALISED(efx);
  831. mutex_lock(&efx->mac_lock);
  832. rc = __efx_reconfigure_port(efx);
  833. mutex_unlock(&efx->mac_lock);
  834. return rc;
  835. }
  836. /* Asynchronous work item for changing MAC promiscuity and multicast
  837. * hash. Avoid a drain/rx_ingress enable by reconfiguring the current
  838. * MAC directly. */
  839. static void efx_mac_work(struct work_struct *data)
  840. {
  841. struct efx_nic *efx = container_of(data, struct efx_nic, mac_work);
  842. mutex_lock(&efx->mac_lock);
  843. if (efx->port_enabled)
  844. efx->type->reconfigure_mac(efx);
  845. mutex_unlock(&efx->mac_lock);
  846. }
  847. static int efx_probe_port(struct efx_nic *efx)
  848. {
  849. int rc;
  850. netif_dbg(efx, probe, efx->net_dev, "create port\n");
  851. if (phy_flash_cfg)
  852. efx->phy_mode = PHY_MODE_SPECIAL;
  853. /* Connect up MAC/PHY operations table */
  854. rc = efx->type->probe_port(efx);
  855. if (rc)
  856. return rc;
  857. /* Initialise MAC address to permanent address */
  858. memcpy(efx->net_dev->dev_addr, efx->net_dev->perm_addr, ETH_ALEN);
  859. return 0;
  860. }
  861. static int efx_init_port(struct efx_nic *efx)
  862. {
  863. int rc;
  864. netif_dbg(efx, drv, efx->net_dev, "init port\n");
  865. mutex_lock(&efx->mac_lock);
  866. rc = efx->phy_op->init(efx);
  867. if (rc)
  868. goto fail1;
  869. efx->port_initialized = true;
  870. /* Reconfigure the MAC before creating dma queues (required for
  871. * Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */
  872. efx->type->reconfigure_mac(efx);
  873. /* Ensure the PHY advertises the correct flow control settings */
  874. rc = efx->phy_op->reconfigure(efx);
  875. if (rc)
  876. goto fail2;
  877. mutex_unlock(&efx->mac_lock);
  878. return 0;
  879. fail2:
  880. efx->phy_op->fini(efx);
  881. fail1:
  882. mutex_unlock(&efx->mac_lock);
  883. return rc;
  884. }
  885. static void efx_start_port(struct efx_nic *efx)
  886. {
  887. netif_dbg(efx, ifup, efx->net_dev, "start port\n");
  888. BUG_ON(efx->port_enabled);
  889. mutex_lock(&efx->mac_lock);
  890. efx->port_enabled = true;
  891. /* efx_mac_work() might have been scheduled after efx_stop_port(),
  892. * and then cancelled by efx_flush_all() */
  893. efx->type->reconfigure_mac(efx);
  894. mutex_unlock(&efx->mac_lock);
  895. }
  896. /* Prevent efx_mac_work() and efx_monitor() from working */
  897. static void efx_stop_port(struct efx_nic *efx)
  898. {
  899. netif_dbg(efx, ifdown, efx->net_dev, "stop port\n");
  900. mutex_lock(&efx->mac_lock);
  901. efx->port_enabled = false;
  902. mutex_unlock(&efx->mac_lock);
  903. /* Serialise against efx_set_multicast_list() */
  904. netif_addr_lock_bh(efx->net_dev);
  905. netif_addr_unlock_bh(efx->net_dev);
  906. }
  907. static void efx_fini_port(struct efx_nic *efx)
  908. {
  909. netif_dbg(efx, drv, efx->net_dev, "shut down port\n");
  910. if (!efx->port_initialized)
  911. return;
  912. efx->phy_op->fini(efx);
  913. efx->port_initialized = false;
  914. efx->link_state.up = false;
  915. efx_link_status_changed(efx);
  916. }
  917. static void efx_remove_port(struct efx_nic *efx)
  918. {
  919. netif_dbg(efx, drv, efx->net_dev, "destroying port\n");
  920. efx->type->remove_port(efx);
  921. }
  922. /**************************************************************************
  923. *
  924. * NIC handling
  925. *
  926. **************************************************************************/
  927. /* This configures the PCI device to enable I/O and DMA. */
  928. static int efx_init_io(struct efx_nic *efx)
  929. {
  930. struct pci_dev *pci_dev = efx->pci_dev;
  931. dma_addr_t dma_mask = efx->type->max_dma_mask;
  932. int rc;
  933. netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
  934. rc = pci_enable_device(pci_dev);
  935. if (rc) {
  936. netif_err(efx, probe, efx->net_dev,
  937. "failed to enable PCI device\n");
  938. goto fail1;
  939. }
  940. pci_set_master(pci_dev);
  941. /* Set the PCI DMA mask. Try all possibilities from our
  942. * genuine mask down to 32 bits, because some architectures
  943. * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit
  944. * masks event though they reject 46 bit masks.
  945. */
  946. while (dma_mask > 0x7fffffffUL) {
  947. if (dma_supported(&pci_dev->dev, dma_mask)) {
  948. rc = dma_set_mask(&pci_dev->dev, dma_mask);
  949. if (rc == 0)
  950. break;
  951. }
  952. dma_mask >>= 1;
  953. }
  954. if (rc) {
  955. netif_err(efx, probe, efx->net_dev,
  956. "could not find a suitable DMA mask\n");
  957. goto fail2;
  958. }
  959. netif_dbg(efx, probe, efx->net_dev,
  960. "using DMA mask %llx\n", (unsigned long long) dma_mask);
  961. rc = dma_set_coherent_mask(&pci_dev->dev, dma_mask);
  962. if (rc) {
  963. /* dma_set_coherent_mask() is not *allowed* to
  964. * fail with a mask that dma_set_mask() accepted,
  965. * but just in case...
  966. */
  967. netif_err(efx, probe, efx->net_dev,
  968. "failed to set consistent DMA mask\n");
  969. goto fail2;
  970. }
  971. efx->membase_phys = pci_resource_start(efx->pci_dev, EFX_MEM_BAR);
  972. rc = pci_request_region(pci_dev, EFX_MEM_BAR, "sfc");
  973. if (rc) {
  974. netif_err(efx, probe, efx->net_dev,
  975. "request for memory BAR failed\n");
  976. rc = -EIO;
  977. goto fail3;
  978. }
  979. efx->membase = ioremap_nocache(efx->membase_phys,
  980. efx->type->mem_map_size);
  981. if (!efx->membase) {
  982. netif_err(efx, probe, efx->net_dev,
  983. "could not map memory BAR at %llx+%x\n",
  984. (unsigned long long)efx->membase_phys,
  985. efx->type->mem_map_size);
  986. rc = -ENOMEM;
  987. goto fail4;
  988. }
  989. netif_dbg(efx, probe, efx->net_dev,
  990. "memory BAR at %llx+%x (virtual %p)\n",
  991. (unsigned long long)efx->membase_phys,
  992. efx->type->mem_map_size, efx->membase);
  993. return 0;
  994. fail4:
  995. pci_release_region(efx->pci_dev, EFX_MEM_BAR);
  996. fail3:
  997. efx->membase_phys = 0;
  998. fail2:
  999. pci_disable_device(efx->pci_dev);
  1000. fail1:
  1001. return rc;
  1002. }
  1003. static void efx_fini_io(struct efx_nic *efx)
  1004. {
  1005. netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n");
  1006. if (efx->membase) {
  1007. iounmap(efx->membase);
  1008. efx->membase = NULL;
  1009. }
  1010. if (efx->membase_phys) {
  1011. pci_release_region(efx->pci_dev, EFX_MEM_BAR);
  1012. efx->membase_phys = 0;
  1013. }
  1014. pci_disable_device(efx->pci_dev);
  1015. }
  1016. static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
  1017. {
  1018. cpumask_var_t thread_mask;
  1019. unsigned int count;
  1020. int cpu;
  1021. if (rss_cpus) {
  1022. count = rss_cpus;
  1023. } else {
  1024. if (unlikely(!zalloc_cpumask_var(&thread_mask, GFP_KERNEL))) {
  1025. netif_warn(efx, probe, efx->net_dev,
  1026. "RSS disabled due to allocation failure\n");
  1027. return 1;
  1028. }
  1029. count = 0;
  1030. for_each_online_cpu(cpu) {
  1031. if (!cpumask_test_cpu(cpu, thread_mask)) {
  1032. ++count;
  1033. cpumask_or(thread_mask, thread_mask,
  1034. topology_thread_cpumask(cpu));
  1035. }
  1036. }
  1037. free_cpumask_var(thread_mask);
  1038. }
  1039. /* If RSS is requested for the PF *and* VFs then we can't write RSS
  1040. * table entries that are inaccessible to VFs
  1041. */
  1042. if (efx_sriov_wanted(efx) && efx_vf_size(efx) > 1 &&
  1043. count > efx_vf_size(efx)) {
  1044. netif_warn(efx, probe, efx->net_dev,
  1045. "Reducing number of RSS channels from %u to %u for "
  1046. "VF support. Increase vf-msix-limit to use more "
  1047. "channels on the PF.\n",
  1048. count, efx_vf_size(efx));
  1049. count = efx_vf_size(efx);
  1050. }
  1051. return count;
  1052. }
  1053. static int
  1054. efx_init_rx_cpu_rmap(struct efx_nic *efx, struct msix_entry *xentries)
  1055. {
  1056. #ifdef CONFIG_RFS_ACCEL
  1057. unsigned int i;
  1058. int rc;
  1059. efx->net_dev->rx_cpu_rmap = alloc_irq_cpu_rmap(efx->n_rx_channels);
  1060. if (!efx->net_dev->rx_cpu_rmap)
  1061. return -ENOMEM;
  1062. for (i = 0; i < efx->n_rx_channels; i++) {
  1063. rc = irq_cpu_rmap_add(efx->net_dev->rx_cpu_rmap,
  1064. xentries[i].vector);
  1065. if (rc) {
  1066. free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
  1067. efx->net_dev->rx_cpu_rmap = NULL;
  1068. return rc;
  1069. }
  1070. }
  1071. #endif
  1072. return 0;
  1073. }
  1074. /* Probe the number and type of interrupts we are able to obtain, and
  1075. * the resulting numbers of channels and RX queues.
  1076. */
  1077. static int efx_probe_interrupts(struct efx_nic *efx)
  1078. {
  1079. unsigned int max_channels =
  1080. min(efx->type->phys_addr_channels, EFX_MAX_CHANNELS);
  1081. unsigned int extra_channels = 0;
  1082. unsigned int i, j;
  1083. int rc;
  1084. for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++)
  1085. if (efx->extra_channel_type[i])
  1086. ++extra_channels;
  1087. if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
  1088. struct msix_entry xentries[EFX_MAX_CHANNELS];
  1089. unsigned int n_channels;
  1090. n_channels = efx_wanted_parallelism(efx);
  1091. if (separate_tx_channels)
  1092. n_channels *= 2;
  1093. n_channels += extra_channels;
  1094. n_channels = min(n_channels, max_channels);
  1095. for (i = 0; i < n_channels; i++)
  1096. xentries[i].entry = i;
  1097. rc = pci_enable_msix(efx->pci_dev, xentries, n_channels);
  1098. if (rc > 0) {
  1099. netif_err(efx, drv, efx->net_dev,
  1100. "WARNING: Insufficient MSI-X vectors"
  1101. " available (%d < %u).\n", rc, n_channels);
  1102. netif_err(efx, drv, efx->net_dev,
  1103. "WARNING: Performance may be reduced.\n");
  1104. EFX_BUG_ON_PARANOID(rc >= n_channels);
  1105. n_channels = rc;
  1106. rc = pci_enable_msix(efx->pci_dev, xentries,
  1107. n_channels);
  1108. }
  1109. if (rc == 0) {
  1110. efx->n_channels = n_channels;
  1111. if (n_channels > extra_channels)
  1112. n_channels -= extra_channels;
  1113. if (separate_tx_channels) {
  1114. efx->n_tx_channels = max(n_channels / 2, 1U);
  1115. efx->n_rx_channels = max(n_channels -
  1116. efx->n_tx_channels,
  1117. 1U);
  1118. } else {
  1119. efx->n_tx_channels = n_channels;
  1120. efx->n_rx_channels = n_channels;
  1121. }
  1122. rc = efx_init_rx_cpu_rmap(efx, xentries);
  1123. if (rc) {
  1124. pci_disable_msix(efx->pci_dev);
  1125. return rc;
  1126. }
  1127. for (i = 0; i < efx->n_channels; i++)
  1128. efx_get_channel(efx, i)->irq =
  1129. xentries[i].vector;
  1130. } else {
  1131. /* Fall back to single channel MSI */
  1132. efx->interrupt_mode = EFX_INT_MODE_MSI;
  1133. netif_err(efx, drv, efx->net_dev,
  1134. "could not enable MSI-X\n");
  1135. }
  1136. }
  1137. /* Try single interrupt MSI */
  1138. if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
  1139. efx->n_channels = 1;
  1140. efx->n_rx_channels = 1;
  1141. efx->n_tx_channels = 1;
  1142. rc = pci_enable_msi(efx->pci_dev);
  1143. if (rc == 0) {
  1144. efx_get_channel(efx, 0)->irq = efx->pci_dev->irq;
  1145. } else {
  1146. netif_err(efx, drv, efx->net_dev,
  1147. "could not enable MSI\n");
  1148. efx->interrupt_mode = EFX_INT_MODE_LEGACY;
  1149. }
  1150. }
  1151. /* Assume legacy interrupts */
  1152. if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
  1153. efx->n_channels = 1 + (separate_tx_channels ? 1 : 0);
  1154. efx->n_rx_channels = 1;
  1155. efx->n_tx_channels = 1;
  1156. efx->legacy_irq = efx->pci_dev->irq;
  1157. }
  1158. /* Assign extra channels if possible */
  1159. j = efx->n_channels;
  1160. for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) {
  1161. if (!efx->extra_channel_type[i])
  1162. continue;
  1163. if (efx->interrupt_mode != EFX_INT_MODE_MSIX ||
  1164. efx->n_channels <= extra_channels) {
  1165. efx->extra_channel_type[i]->handle_no_channel(efx);
  1166. } else {
  1167. --j;
  1168. efx_get_channel(efx, j)->type =
  1169. efx->extra_channel_type[i];
  1170. }
  1171. }
  1172. /* RSS might be usable on VFs even if it is disabled on the PF */
  1173. efx->rss_spread = ((efx->n_rx_channels > 1 || !efx_sriov_wanted(efx)) ?
  1174. efx->n_rx_channels : efx_vf_size(efx));
  1175. return 0;
  1176. }
  1177. /* Enable interrupts, then probe and start the event queues */
  1178. static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq)
  1179. {
  1180. struct efx_channel *channel;
  1181. BUG_ON(efx->state == STATE_DISABLED);
  1182. if (efx->legacy_irq)
  1183. efx->legacy_irq_enabled = true;
  1184. efx_nic_enable_interrupts(efx);
  1185. efx_for_each_channel(channel, efx) {
  1186. if (!channel->type->keep_eventq || !may_keep_eventq)
  1187. efx_init_eventq(channel);
  1188. efx_start_eventq(channel);
  1189. }
  1190. efx_mcdi_mode_event(efx);
  1191. }
  1192. static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq)
  1193. {
  1194. struct efx_channel *channel;
  1195. if (efx->state == STATE_DISABLED)
  1196. return;
  1197. efx_mcdi_mode_poll(efx);
  1198. efx_nic_disable_interrupts(efx);
  1199. if (efx->legacy_irq) {
  1200. synchronize_irq(efx->legacy_irq);
  1201. efx->legacy_irq_enabled = false;
  1202. }
  1203. efx_for_each_channel(channel, efx) {
  1204. if (channel->irq)
  1205. synchronize_irq(channel->irq);
  1206. efx_stop_eventq(channel);
  1207. if (!channel->type->keep_eventq || !may_keep_eventq)
  1208. efx_fini_eventq(channel);
  1209. }
  1210. }
  1211. static void efx_remove_interrupts(struct efx_nic *efx)
  1212. {
  1213. struct efx_channel *channel;
  1214. /* Remove MSI/MSI-X interrupts */
  1215. efx_for_each_channel(channel, efx)
  1216. channel->irq = 0;
  1217. pci_disable_msi(efx->pci_dev);
  1218. pci_disable_msix(efx->pci_dev);
  1219. /* Remove legacy interrupt */
  1220. efx->legacy_irq = 0;
  1221. }
  1222. static void efx_set_channels(struct efx_nic *efx)
  1223. {
  1224. struct efx_channel *channel;
  1225. struct efx_tx_queue *tx_queue;
  1226. efx->tx_channel_offset =
  1227. separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
  1228. /* We need to adjust the TX queue numbers if we have separate
  1229. * RX-only and TX-only channels.
  1230. */
  1231. efx_for_each_channel(channel, efx) {
  1232. efx_for_each_channel_tx_queue(tx_queue, channel)
  1233. tx_queue->queue -= (efx->tx_channel_offset *
  1234. EFX_TXQ_TYPES);
  1235. }
  1236. }
  1237. static int efx_probe_nic(struct efx_nic *efx)
  1238. {
  1239. size_t i;
  1240. int rc;
  1241. netif_dbg(efx, probe, efx->net_dev, "creating NIC\n");
  1242. /* Carry out hardware-type specific initialisation */
  1243. rc = efx->type->probe(efx);
  1244. if (rc)
  1245. return rc;
  1246. /* Determine the number of channels and queues by trying to hook
  1247. * in MSI-X interrupts. */
  1248. rc = efx_probe_interrupts(efx);
  1249. if (rc)
  1250. goto fail;
  1251. efx->type->dimension_resources(efx);
  1252. if (efx->n_channels > 1)
  1253. get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key));
  1254. for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
  1255. efx->rx_indir_table[i] =
  1256. ethtool_rxfh_indir_default(i, efx->rss_spread);
  1257. efx_set_channels(efx);
  1258. netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
  1259. netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
  1260. /* Initialise the interrupt moderation settings */
  1261. efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true,
  1262. true);
  1263. return 0;
  1264. fail:
  1265. efx->type->remove(efx);
  1266. return rc;
  1267. }
  1268. static void efx_remove_nic(struct efx_nic *efx)
  1269. {
  1270. netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n");
  1271. efx_remove_interrupts(efx);
  1272. efx->type->remove(efx);
  1273. }
  1274. /**************************************************************************
  1275. *
  1276. * NIC startup/shutdown
  1277. *
  1278. *************************************************************************/
  1279. static int efx_probe_all(struct efx_nic *efx)
  1280. {
  1281. int rc;
  1282. rc = efx_probe_nic(efx);
  1283. if (rc) {
  1284. netif_err(efx, probe, efx->net_dev, "failed to create NIC\n");
  1285. goto fail1;
  1286. }
  1287. rc = efx_probe_port(efx);
  1288. if (rc) {
  1289. netif_err(efx, probe, efx->net_dev, "failed to create port\n");
  1290. goto fail2;
  1291. }
  1292. BUILD_BUG_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_RXQ_MIN_ENT);
  1293. if (WARN_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_TXQ_MIN_ENT(efx))) {
  1294. rc = -EINVAL;
  1295. goto fail3;
  1296. }
  1297. efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE;
  1298. rc = efx_probe_filters(efx);
  1299. if (rc) {
  1300. netif_err(efx, probe, efx->net_dev,
  1301. "failed to create filter tables\n");
  1302. goto fail3;
  1303. }
  1304. rc = efx_probe_channels(efx);
  1305. if (rc)
  1306. goto fail4;
  1307. return 0;
  1308. fail4:
  1309. efx_remove_filters(efx);
  1310. fail3:
  1311. efx_remove_port(efx);
  1312. fail2:
  1313. efx_remove_nic(efx);
  1314. fail1:
  1315. return rc;
  1316. }
  1317. /* If the interface is supposed to be running but is not, start
  1318. * the hardware and software data path, regular activity for the port
  1319. * (MAC statistics, link polling, etc.) and schedule the port to be
  1320. * reconfigured. Interrupts must already be enabled. This function
  1321. * is safe to call multiple times, so long as the NIC is not disabled.
  1322. * Requires the RTNL lock.
  1323. */
  1324. static void efx_start_all(struct efx_nic *efx)
  1325. {
  1326. EFX_ASSERT_RESET_SERIALISED(efx);
  1327. BUG_ON(efx->state == STATE_DISABLED);
  1328. /* Check that it is appropriate to restart the interface. All
  1329. * of these flags are safe to read under just the rtnl lock */
  1330. if (efx->port_enabled || !netif_running(efx->net_dev))
  1331. return;
  1332. efx_start_port(efx);
  1333. efx_start_datapath(efx);
  1334. /* Start the hardware monitor if there is one. Otherwise (we're link
  1335. * event driven), we have to poll the PHY because after an event queue
  1336. * flush, we could have a missed a link state change */
  1337. if (efx->type->monitor != NULL) {
  1338. queue_delayed_work(efx->workqueue, &efx->monitor_work,
  1339. efx_monitor_interval);
  1340. } else {
  1341. mutex_lock(&efx->mac_lock);
  1342. if (efx->phy_op->poll(efx))
  1343. efx_link_status_changed(efx);
  1344. mutex_unlock(&efx->mac_lock);
  1345. }
  1346. efx->type->start_stats(efx);
  1347. }
  1348. /* Flush all delayed work. Should only be called when no more delayed work
  1349. * will be scheduled. This doesn't flush pending online resets (efx_reset),
  1350. * since we're holding the rtnl_lock at this point. */
  1351. static void efx_flush_all(struct efx_nic *efx)
  1352. {
  1353. /* Make sure the hardware monitor and event self-test are stopped */
  1354. cancel_delayed_work_sync(&efx->monitor_work);
  1355. efx_selftest_async_cancel(efx);
  1356. /* Stop scheduled port reconfigurations */
  1357. cancel_work_sync(&efx->mac_work);
  1358. }
  1359. /* Quiesce the hardware and software data path, and regular activity
  1360. * for the port without bringing the link down. Safe to call multiple
  1361. * times with the NIC in almost any state, but interrupts should be
  1362. * enabled. Requires the RTNL lock.
  1363. */
  1364. static void efx_stop_all(struct efx_nic *efx)
  1365. {
  1366. EFX_ASSERT_RESET_SERIALISED(efx);
  1367. /* port_enabled can be read safely under the rtnl lock */
  1368. if (!efx->port_enabled)
  1369. return;
  1370. efx->type->stop_stats(efx);
  1371. efx_stop_port(efx);
  1372. /* Flush efx_mac_work(), refill_workqueue, monitor_work */
  1373. efx_flush_all(efx);
  1374. /* Stop the kernel transmit interface late, so the watchdog
  1375. * timer isn't ticking over the flush */
  1376. netif_tx_disable(efx->net_dev);
  1377. efx_stop_datapath(efx);
  1378. }
  1379. static void efx_remove_all(struct efx_nic *efx)
  1380. {
  1381. efx_remove_channels(efx);
  1382. efx_remove_filters(efx);
  1383. efx_remove_port(efx);
  1384. efx_remove_nic(efx);
  1385. }
  1386. /**************************************************************************
  1387. *
  1388. * Interrupt moderation
  1389. *
  1390. **************************************************************************/
  1391. static unsigned int irq_mod_ticks(unsigned int usecs, unsigned int quantum_ns)
  1392. {
  1393. if (usecs == 0)
  1394. return 0;
  1395. if (usecs * 1000 < quantum_ns)
  1396. return 1; /* never round down to 0 */
  1397. return usecs * 1000 / quantum_ns;
  1398. }
  1399. /* Set interrupt moderation parameters */
  1400. int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
  1401. unsigned int rx_usecs, bool rx_adaptive,
  1402. bool rx_may_override_tx)
  1403. {
  1404. struct efx_channel *channel;
  1405. unsigned int irq_mod_max = DIV_ROUND_UP(efx->type->timer_period_max *
  1406. efx->timer_quantum_ns,
  1407. 1000);
  1408. unsigned int tx_ticks;
  1409. unsigned int rx_ticks;
  1410. EFX_ASSERT_RESET_SERIALISED(efx);
  1411. if (tx_usecs > irq_mod_max || rx_usecs > irq_mod_max)
  1412. return -EINVAL;
  1413. tx_ticks = irq_mod_ticks(tx_usecs, efx->timer_quantum_ns);
  1414. rx_ticks = irq_mod_ticks(rx_usecs, efx->timer_quantum_ns);
  1415. if (tx_ticks != rx_ticks && efx->tx_channel_offset == 0 &&
  1416. !rx_may_override_tx) {
  1417. netif_err(efx, drv, efx->net_dev, "Channels are shared. "
  1418. "RX and TX IRQ moderation must be equal\n");
  1419. return -EINVAL;
  1420. }
  1421. efx->irq_rx_adaptive = rx_adaptive;
  1422. efx->irq_rx_moderation = rx_ticks;
  1423. efx_for_each_channel(channel, efx) {
  1424. if (efx_channel_has_rx_queue(channel))
  1425. channel->irq_moderation = rx_ticks;
  1426. else if (efx_channel_has_tx_queues(channel))
  1427. channel->irq_moderation = tx_ticks;
  1428. }
  1429. return 0;
  1430. }
  1431. void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
  1432. unsigned int *rx_usecs, bool *rx_adaptive)
  1433. {
  1434. /* We must round up when converting ticks to microseconds
  1435. * because we round down when converting the other way.
  1436. */
  1437. *rx_adaptive = efx->irq_rx_adaptive;
  1438. *rx_usecs = DIV_ROUND_UP(efx->irq_rx_moderation *
  1439. efx->timer_quantum_ns,
  1440. 1000);
  1441. /* If channels are shared between RX and TX, so is IRQ
  1442. * moderation. Otherwise, IRQ moderation is the same for all
  1443. * TX channels and is not adaptive.
  1444. */
  1445. if (efx->tx_channel_offset == 0)
  1446. *tx_usecs = *rx_usecs;
  1447. else
  1448. *tx_usecs = DIV_ROUND_UP(
  1449. efx->channel[efx->tx_channel_offset]->irq_moderation *
  1450. efx->timer_quantum_ns,
  1451. 1000);
  1452. }
  1453. /**************************************************************************
  1454. *
  1455. * Hardware monitor
  1456. *
  1457. **************************************************************************/
  1458. /* Run periodically off the general workqueue */
  1459. static void efx_monitor(struct work_struct *data)
  1460. {
  1461. struct efx_nic *efx = container_of(data, struct efx_nic,
  1462. monitor_work.work);
  1463. netif_vdbg(efx, timer, efx->net_dev,
  1464. "hardware monitor executing on CPU %d\n",
  1465. raw_smp_processor_id());
  1466. BUG_ON(efx->type->monitor == NULL);
  1467. /* If the mac_lock is already held then it is likely a port
  1468. * reconfiguration is already in place, which will likely do
  1469. * most of the work of monitor() anyway. */
  1470. if (mutex_trylock(&efx->mac_lock)) {
  1471. if (efx->port_enabled)
  1472. efx->type->monitor(efx);
  1473. mutex_unlock(&efx->mac_lock);
  1474. }
  1475. queue_delayed_work(efx->workqueue, &efx->monitor_work,
  1476. efx_monitor_interval);
  1477. }
  1478. /**************************************************************************
  1479. *
  1480. * ioctls
  1481. *
  1482. *************************************************************************/
  1483. /* Net device ioctl
  1484. * Context: process, rtnl_lock() held.
  1485. */
  1486. static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
  1487. {
  1488. struct efx_nic *efx = netdev_priv(net_dev);
  1489. struct mii_ioctl_data *data = if_mii(ifr);
  1490. EFX_ASSERT_RESET_SERIALISED(efx);
  1491. /* Convert phy_id from older PRTAD/DEVAD format */
  1492. if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
  1493. (data->phy_id & 0xfc00) == 0x0400)
  1494. data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400;
  1495. return mdio_mii_ioctl(&efx->mdio, data, cmd);
  1496. }
  1497. /**************************************************************************
  1498. *
  1499. * NAPI interface
  1500. *
  1501. **************************************************************************/
  1502. static void efx_init_napi_channel(struct efx_channel *channel)
  1503. {
  1504. struct efx_nic *efx = channel->efx;
  1505. channel->napi_dev = efx->net_dev;
  1506. netif_napi_add(channel->napi_dev, &channel->napi_str,
  1507. efx_poll, napi_weight);
  1508. }
  1509. static void efx_init_napi(struct efx_nic *efx)
  1510. {
  1511. struct efx_channel *channel;
  1512. efx_for_each_channel(channel, efx)
  1513. efx_init_napi_channel(channel);
  1514. }
  1515. static void efx_fini_napi_channel(struct efx_channel *channel)
  1516. {
  1517. if (channel->napi_dev)
  1518. netif_napi_del(&channel->napi_str);
  1519. channel->napi_dev = NULL;
  1520. }
  1521. static void efx_fini_napi(struct efx_nic *efx)
  1522. {
  1523. struct efx_channel *channel;
  1524. efx_for_each_channel(channel, efx)
  1525. efx_fini_napi_channel(channel);
  1526. }
  1527. /**************************************************************************
  1528. *
  1529. * Kernel netpoll interface
  1530. *
  1531. *************************************************************************/
  1532. #ifdef CONFIG_NET_POLL_CONTROLLER
  1533. /* Although in the common case interrupts will be disabled, this is not
  1534. * guaranteed. However, all our work happens inside the NAPI callback,
  1535. * so no locking is required.
  1536. */
  1537. static void efx_netpoll(struct net_device *net_dev)
  1538. {
  1539. struct efx_nic *efx = netdev_priv(net_dev);
  1540. struct efx_channel *channel;
  1541. efx_for_each_channel(channel, efx)
  1542. efx_schedule_channel(channel);
  1543. }
  1544. #endif
  1545. /**************************************************************************
  1546. *
  1547. * Kernel net device interface
  1548. *
  1549. *************************************************************************/
  1550. /* Context: process, rtnl_lock() held. */
  1551. static int efx_net_open(struct net_device *net_dev)
  1552. {
  1553. struct efx_nic *efx = netdev_priv(net_dev);
  1554. int rc;
  1555. EFX_ASSERT_RESET_SERIALISED(efx);
  1556. netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n",
  1557. raw_smp_processor_id());
  1558. rc = efx_check_disabled(efx);
  1559. if (rc)
  1560. return rc;
  1561. if (efx->phy_mode & PHY_MODE_SPECIAL)
  1562. return -EBUSY;
  1563. if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL))
  1564. return -EIO;
  1565. /* Notify the kernel of the link state polled during driver load,
  1566. * before the monitor starts running */
  1567. efx_link_status_changed(efx);
  1568. efx_start_all(efx);
  1569. efx_selftest_async_start(efx);
  1570. return 0;
  1571. }
  1572. /* Context: process, rtnl_lock() held.
  1573. * Note that the kernel will ignore our return code; this method
  1574. * should really be a void.
  1575. */
  1576. static int efx_net_stop(struct net_device *net_dev)
  1577. {
  1578. struct efx_nic *efx = netdev_priv(net_dev);
  1579. netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
  1580. raw_smp_processor_id());
  1581. /* Stop the device and flush all the channels */
  1582. efx_stop_all(efx);
  1583. return 0;
  1584. }
  1585. /* Context: process, dev_base_lock or RTNL held, non-blocking. */
  1586. static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev,
  1587. struct rtnl_link_stats64 *stats)
  1588. {
  1589. struct efx_nic *efx = netdev_priv(net_dev);
  1590. struct efx_mac_stats *mac_stats = &efx->mac_stats;
  1591. spin_lock_bh(&efx->stats_lock);
  1592. efx->type->update_stats(efx);
  1593. stats->rx_packets = mac_stats->rx_packets;
  1594. stats->tx_packets = mac_stats->tx_packets;
  1595. stats->rx_bytes = mac_stats->rx_bytes;
  1596. stats->tx_bytes = mac_stats->tx_bytes;
  1597. stats->rx_dropped = efx->n_rx_nodesc_drop_cnt;
  1598. stats->multicast = mac_stats->rx_multicast;
  1599. stats->collisions = mac_stats->tx_collision;
  1600. stats->rx_length_errors = (mac_stats->rx_gtjumbo +
  1601. mac_stats->rx_length_error);
  1602. stats->rx_crc_errors = mac_stats->rx_bad;
  1603. stats->rx_frame_errors = mac_stats->rx_align_error;
  1604. stats->rx_fifo_errors = mac_stats->rx_overflow;
  1605. stats->rx_missed_errors = mac_stats->rx_missed;
  1606. stats->tx_window_errors = mac_stats->tx_late_collision;
  1607. stats->rx_errors = (stats->rx_length_errors +
  1608. stats->rx_crc_errors +
  1609. stats->rx_frame_errors +
  1610. mac_stats->rx_symbol_error);
  1611. stats->tx_errors = (stats->tx_window_errors +
  1612. mac_stats->tx_bad);
  1613. spin_unlock_bh(&efx->stats_lock);
  1614. return stats;
  1615. }
  1616. /* Context: netif_tx_lock held, BHs disabled. */
  1617. static void efx_watchdog(struct net_device *net_dev)
  1618. {
  1619. struct efx_nic *efx = netdev_priv(net_dev);
  1620. netif_err(efx, tx_err, efx->net_dev,
  1621. "TX stuck with port_enabled=%d: resetting channels\n",
  1622. efx->port_enabled);
  1623. efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
  1624. }
  1625. /* Context: process, rtnl_lock() held. */
  1626. static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
  1627. {
  1628. struct efx_nic *efx = netdev_priv(net_dev);
  1629. int rc;
  1630. EFX_ASSERT_RESET_SERIALISED(efx);
  1631. rc = efx_check_disabled(efx);
  1632. if (rc)
  1633. return rc;
  1634. if (new_mtu > EFX_MAX_MTU)
  1635. return -EINVAL;
  1636. efx_stop_all(efx);
  1637. netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
  1638. mutex_lock(&efx->mac_lock);
  1639. /* Reconfigure the MAC before enabling the dma queues so that
  1640. * the RX buffers don't overflow */
  1641. net_dev->mtu = new_mtu;
  1642. efx->type->reconfigure_mac(efx);
  1643. mutex_unlock(&efx->mac_lock);
  1644. efx_start_all(efx);
  1645. return 0;
  1646. }
  1647. static int efx_set_mac_address(struct net_device *net_dev, void *data)
  1648. {
  1649. struct efx_nic *efx = netdev_priv(net_dev);
  1650. struct sockaddr *addr = data;
  1651. char *new_addr = addr->sa_data;
  1652. EFX_ASSERT_RESET_SERIALISED(efx);
  1653. if (!is_valid_ether_addr(new_addr)) {
  1654. netif_err(efx, drv, efx->net_dev,
  1655. "invalid ethernet MAC address requested: %pM\n",
  1656. new_addr);
  1657. return -EADDRNOTAVAIL;
  1658. }
  1659. memcpy(net_dev->dev_addr, new_addr, net_dev->addr_len);
  1660. efx_sriov_mac_address_changed(efx);
  1661. /* Reconfigure the MAC */
  1662. mutex_lock(&efx->mac_lock);
  1663. efx->type->reconfigure_mac(efx);
  1664. mutex_unlock(&efx->mac_lock);
  1665. return 0;
  1666. }
  1667. /* Context: netif_addr_lock held, BHs disabled. */
  1668. static void efx_set_rx_mode(struct net_device *net_dev)
  1669. {
  1670. struct efx_nic *efx = netdev_priv(net_dev);
  1671. struct netdev_hw_addr *ha;
  1672. union efx_multicast_hash *mc_hash = &efx->multicast_hash;
  1673. u32 crc;
  1674. int bit;
  1675. efx->promiscuous = !!(net_dev->flags & IFF_PROMISC);
  1676. /* Build multicast hash table */
  1677. if (efx->promiscuous || (net_dev->flags & IFF_ALLMULTI)) {
  1678. memset(mc_hash, 0xff, sizeof(*mc_hash));
  1679. } else {
  1680. memset(mc_hash, 0x00, sizeof(*mc_hash));
  1681. netdev_for_each_mc_addr(ha, net_dev) {
  1682. crc = ether_crc_le(ETH_ALEN, ha->addr);
  1683. bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
  1684. set_bit_le(bit, mc_hash->byte);
  1685. }
  1686. /* Broadcast packets go through the multicast hash filter.
  1687. * ether_crc_le() of the broadcast address is 0xbe2612ff
  1688. * so we always add bit 0xff to the mask.
  1689. */
  1690. set_bit_le(0xff, mc_hash->byte);
  1691. }
  1692. if (efx->port_enabled)
  1693. queue_work(efx->workqueue, &efx->mac_work);
  1694. /* Otherwise efx_start_port() will do this */
  1695. }
  1696. static int efx_set_features(struct net_device *net_dev, netdev_features_t data)
  1697. {
  1698. struct efx_nic *efx = netdev_priv(net_dev);
  1699. /* If disabling RX n-tuple filtering, clear existing filters */
  1700. if (net_dev->features & ~data & NETIF_F_NTUPLE)
  1701. efx_filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL);
  1702. return 0;
  1703. }
  1704. static const struct net_device_ops efx_netdev_ops = {
  1705. .ndo_open = efx_net_open,
  1706. .ndo_stop = efx_net_stop,
  1707. .ndo_get_stats64 = efx_net_stats,
  1708. .ndo_tx_timeout = efx_watchdog,
  1709. .ndo_start_xmit = efx_hard_start_xmit,
  1710. .ndo_validate_addr = eth_validate_addr,
  1711. .ndo_do_ioctl = efx_ioctl,
  1712. .ndo_change_mtu = efx_change_mtu,
  1713. .ndo_set_mac_address = efx_set_mac_address,
  1714. .ndo_set_rx_mode = efx_set_rx_mode,
  1715. .ndo_set_features = efx_set_features,
  1716. #ifdef CONFIG_SFC_SRIOV
  1717. .ndo_set_vf_mac = efx_sriov_set_vf_mac,
  1718. .ndo_set_vf_vlan = efx_sriov_set_vf_vlan,
  1719. .ndo_set_vf_spoofchk = efx_sriov_set_vf_spoofchk,
  1720. .ndo_get_vf_config = efx_sriov_get_vf_config,
  1721. #endif
  1722. #ifdef CONFIG_NET_POLL_CONTROLLER
  1723. .ndo_poll_controller = efx_netpoll,
  1724. #endif
  1725. .ndo_setup_tc = efx_setup_tc,
  1726. #ifdef CONFIG_RFS_ACCEL
  1727. .ndo_rx_flow_steer = efx_filter_rfs,
  1728. #endif
  1729. };
  1730. static void efx_update_name(struct efx_nic *efx)
  1731. {
  1732. strcpy(efx->name, efx->net_dev->name);
  1733. efx_mtd_rename(efx);
  1734. efx_set_channel_names(efx);
  1735. }
  1736. static int efx_netdev_event(struct notifier_block *this,
  1737. unsigned long event, void *ptr)
  1738. {
  1739. struct net_device *net_dev = ptr;
  1740. if (net_dev->netdev_ops == &efx_netdev_ops &&
  1741. event == NETDEV_CHANGENAME)
  1742. efx_update_name(netdev_priv(net_dev));
  1743. return NOTIFY_DONE;
  1744. }
  1745. static struct notifier_block efx_netdev_notifier = {
  1746. .notifier_call = efx_netdev_event,
  1747. };
  1748. static ssize_t
  1749. show_phy_type(struct device *dev, struct device_attribute *attr, char *buf)
  1750. {
  1751. struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
  1752. return sprintf(buf, "%d\n", efx->phy_type);
  1753. }
  1754. static DEVICE_ATTR(phy_type, 0644, show_phy_type, NULL);
  1755. static int efx_register_netdev(struct efx_nic *efx)
  1756. {
  1757. struct net_device *net_dev = efx->net_dev;
  1758. struct efx_channel *channel;
  1759. int rc;
  1760. net_dev->watchdog_timeo = 5 * HZ;
  1761. net_dev->irq = efx->pci_dev->irq;
  1762. net_dev->netdev_ops = &efx_netdev_ops;
  1763. SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
  1764. net_dev->gso_max_segs = EFX_TSO_MAX_SEGS;
  1765. rtnl_lock();
  1766. rc = dev_alloc_name(net_dev, net_dev->name);
  1767. if (rc < 0)
  1768. goto fail_locked;
  1769. efx_update_name(efx);
  1770. rc = register_netdevice(net_dev);
  1771. if (rc)
  1772. goto fail_locked;
  1773. efx_for_each_channel(channel, efx) {
  1774. struct efx_tx_queue *tx_queue;
  1775. efx_for_each_channel_tx_queue(tx_queue, channel)
  1776. efx_init_tx_queue_core_txq(tx_queue);
  1777. }
  1778. /* Always start with carrier off; PHY events will detect the link */
  1779. netif_carrier_off(net_dev);
  1780. rtnl_unlock();
  1781. rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
  1782. if (rc) {
  1783. netif_err(efx, drv, efx->net_dev,
  1784. "failed to init net dev attributes\n");
  1785. goto fail_registered;
  1786. }
  1787. return 0;
  1788. fail_locked:
  1789. rtnl_unlock();
  1790. netif_err(efx, drv, efx->net_dev, "could not register net dev\n");
  1791. return rc;
  1792. fail_registered:
  1793. unregister_netdev(net_dev);
  1794. return rc;
  1795. }
  1796. static void efx_unregister_netdev(struct efx_nic *efx)
  1797. {
  1798. struct efx_channel *channel;
  1799. struct efx_tx_queue *tx_queue;
  1800. if (!efx->net_dev)
  1801. return;
  1802. BUG_ON(netdev_priv(efx->net_dev) != efx);
  1803. /* Free up any skbs still remaining. This has to happen before
  1804. * we try to unregister the netdev as running their destructors
  1805. * may be needed to get the device ref. count to 0. */
  1806. efx_for_each_channel(channel, efx) {
  1807. efx_for_each_channel_tx_queue(tx_queue, channel)
  1808. efx_release_tx_buffers(tx_queue);
  1809. }
  1810. strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
  1811. device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
  1812. unregister_netdev(efx->net_dev);
  1813. }
  1814. /**************************************************************************
  1815. *
  1816. * Device reset and suspend
  1817. *
  1818. **************************************************************************/
  1819. /* Tears down the entire software state and most of the hardware state
  1820. * before reset. */
  1821. void efx_reset_down(struct efx_nic *efx, enum reset_type method)
  1822. {
  1823. EFX_ASSERT_RESET_SERIALISED(efx);
  1824. efx_stop_all(efx);
  1825. efx_stop_interrupts(efx, false);
  1826. mutex_lock(&efx->mac_lock);
  1827. if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
  1828. efx->phy_op->fini(efx);
  1829. efx->type->fini(efx);
  1830. }
  1831. /* This function will always ensure that the locks acquired in
  1832. * efx_reset_down() are released. A failure return code indicates
  1833. * that we were unable to reinitialise the hardware, and the
  1834. * driver should be disabled. If ok is false, then the rx and tx
  1835. * engines are not restarted, pending a RESET_DISABLE. */
  1836. int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
  1837. {
  1838. int rc;
  1839. EFX_ASSERT_RESET_SERIALISED(efx);
  1840. rc = efx->type->init(efx);
  1841. if (rc) {
  1842. netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n");
  1843. goto fail;
  1844. }
  1845. if (!ok)
  1846. goto fail;
  1847. if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) {
  1848. rc = efx->phy_op->init(efx);
  1849. if (rc)
  1850. goto fail;
  1851. if (efx->phy_op->reconfigure(efx))
  1852. netif_err(efx, drv, efx->net_dev,
  1853. "could not restore PHY settings\n");
  1854. }
  1855. efx->type->reconfigure_mac(efx);
  1856. efx_start_interrupts(efx, false);
  1857. efx_restore_filters(efx);
  1858. efx_sriov_reset(efx);
  1859. mutex_unlock(&efx->mac_lock);
  1860. efx_start_all(efx);
  1861. return 0;
  1862. fail:
  1863. efx->port_initialized = false;
  1864. mutex_unlock(&efx->mac_lock);
  1865. return rc;
  1866. }
  1867. /* Reset the NIC using the specified method. Note that the reset may
  1868. * fail, in which case the card will be left in an unusable state.
  1869. *
  1870. * Caller must hold the rtnl_lock.
  1871. */
  1872. int efx_reset(struct efx_nic *efx, enum reset_type method)
  1873. {
  1874. int rc, rc2;
  1875. bool disabled;
  1876. netif_info(efx, drv, efx->net_dev, "resetting (%s)\n",
  1877. RESET_TYPE(method));
  1878. netif_device_detach(efx->net_dev);
  1879. efx_reset_down(efx, method);
  1880. rc = efx->type->reset(efx, method);
  1881. if (rc) {
  1882. netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n");
  1883. goto out;
  1884. }
  1885. /* Clear flags for the scopes we covered. We assume the NIC and
  1886. * driver are now quiescent so that there is no race here.
  1887. */
  1888. efx->reset_pending &= -(1 << (method + 1));
  1889. /* Reinitialise bus-mastering, which may have been turned off before
  1890. * the reset was scheduled. This is still appropriate, even in the
  1891. * RESET_TYPE_DISABLE since this driver generally assumes the hardware
  1892. * can respond to requests. */
  1893. pci_set_master(efx->pci_dev);
  1894. out:
  1895. /* Leave device stopped if necessary */
  1896. disabled = rc || method == RESET_TYPE_DISABLE;
  1897. rc2 = efx_reset_up(efx, method, !disabled);
  1898. if (rc2) {
  1899. disabled = true;
  1900. if (!rc)
  1901. rc = rc2;
  1902. }
  1903. if (disabled) {
  1904. dev_close(efx->net_dev);
  1905. netif_err(efx, drv, efx->net_dev, "has been disabled\n");
  1906. efx->state = STATE_DISABLED;
  1907. } else {
  1908. netif_dbg(efx, drv, efx->net_dev, "reset complete\n");
  1909. netif_device_attach(efx->net_dev);
  1910. }
  1911. return rc;
  1912. }
  1913. /* The worker thread exists so that code that cannot sleep can
  1914. * schedule a reset for later.
  1915. */
  1916. static void efx_reset_work(struct work_struct *data)
  1917. {
  1918. struct efx_nic *efx = container_of(data, struct efx_nic, reset_work);
  1919. unsigned long pending = ACCESS_ONCE(efx->reset_pending);
  1920. if (!pending)
  1921. return;
  1922. /* If we're not READY then don't reset. Leave the reset_pending
  1923. * flags set so that efx_pci_probe_main will be retried */
  1924. if (efx->state != STATE_READY)
  1925. return;
  1926. rtnl_lock();
  1927. (void)efx_reset(efx, fls(pending) - 1);
  1928. rtnl_unlock();
  1929. }
  1930. void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
  1931. {
  1932. enum reset_type method;
  1933. switch (type) {
  1934. case RESET_TYPE_INVISIBLE:
  1935. case RESET_TYPE_ALL:
  1936. case RESET_TYPE_WORLD:
  1937. case RESET_TYPE_DISABLE:
  1938. method = type;
  1939. netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
  1940. RESET_TYPE(method));
  1941. break;
  1942. default:
  1943. method = efx->type->map_reset_reason(type);
  1944. netif_dbg(efx, drv, efx->net_dev,
  1945. "scheduling %s reset for %s\n",
  1946. RESET_TYPE(method), RESET_TYPE(type));
  1947. break;
  1948. }
  1949. set_bit(method, &efx->reset_pending);
  1950. /* efx_process_channel() will no longer read events once a
  1951. * reset is scheduled. So switch back to poll'd MCDI completions. */
  1952. efx_mcdi_mode_poll(efx);
  1953. queue_work(reset_workqueue, &efx->reset_work);
  1954. }
  1955. /**************************************************************************
  1956. *
  1957. * List of NICs we support
  1958. *
  1959. **************************************************************************/
  1960. /* PCI device ID table */
  1961. static DEFINE_PCI_DEVICE_TABLE(efx_pci_table) = {
  1962. {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE,
  1963. PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0),
  1964. .driver_data = (unsigned long) &falcon_a1_nic_type},
  1965. {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE,
  1966. PCI_DEVICE_ID_SOLARFLARE_SFC4000B),
  1967. .driver_data = (unsigned long) &falcon_b0_nic_type},
  1968. {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0803), /* SFC9020 */
  1969. .driver_data = (unsigned long) &siena_a0_nic_type},
  1970. {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0813), /* SFL9021 */
  1971. .driver_data = (unsigned long) &siena_a0_nic_type},
  1972. {0} /* end of list */
  1973. };
  1974. /**************************************************************************
  1975. *
  1976. * Dummy PHY/MAC operations
  1977. *
  1978. * Can be used for some unimplemented operations
  1979. * Needed so all function pointers are valid and do not have to be tested
  1980. * before use
  1981. *
  1982. **************************************************************************/
  1983. int efx_port_dummy_op_int(struct efx_nic *efx)
  1984. {
  1985. return 0;
  1986. }
  1987. void efx_port_dummy_op_void(struct efx_nic *efx) {}
  1988. static bool efx_port_dummy_op_poll(struct efx_nic *efx)
  1989. {
  1990. return false;
  1991. }
  1992. static const struct efx_phy_operations efx_dummy_phy_operations = {
  1993. .init = efx_port_dummy_op_int,
  1994. .reconfigure = efx_port_dummy_op_int,
  1995. .poll = efx_port_dummy_op_poll,
  1996. .fini = efx_port_dummy_op_void,
  1997. };
  1998. /**************************************************************************
  1999. *
  2000. * Data housekeeping
  2001. *
  2002. **************************************************************************/
  2003. /* This zeroes out and then fills in the invariants in a struct
  2004. * efx_nic (including all sub-structures).
  2005. */
  2006. static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type,
  2007. struct pci_dev *pci_dev, struct net_device *net_dev)
  2008. {
  2009. int i;
  2010. /* Initialise common structures */
  2011. memset(efx, 0, sizeof(*efx));
  2012. spin_lock_init(&efx->biu_lock);
  2013. #ifdef CONFIG_SFC_MTD
  2014. INIT_LIST_HEAD(&efx->mtd_list);
  2015. #endif
  2016. INIT_WORK(&efx->reset_work, efx_reset_work);
  2017. INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
  2018. INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work);
  2019. efx->pci_dev = pci_dev;
  2020. efx->msg_enable = debug;
  2021. efx->state = STATE_UNINIT;
  2022. strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
  2023. efx->net_dev = net_dev;
  2024. spin_lock_init(&efx->stats_lock);
  2025. mutex_init(&efx->mac_lock);
  2026. efx->phy_op = &efx_dummy_phy_operations;
  2027. efx->mdio.dev = net_dev;
  2028. INIT_WORK(&efx->mac_work, efx_mac_work);
  2029. init_waitqueue_head(&efx->flush_wq);
  2030. for (i = 0; i < EFX_MAX_CHANNELS; i++) {
  2031. efx->channel[i] = efx_alloc_channel(efx, i, NULL);
  2032. if (!efx->channel[i])
  2033. goto fail;
  2034. }
  2035. efx->type = type;
  2036. EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
  2037. /* Higher numbered interrupt modes are less capable! */
  2038. efx->interrupt_mode = max(efx->type->max_interrupt_mode,
  2039. interrupt_mode);
  2040. /* Would be good to use the net_dev name, but we're too early */
  2041. snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s",
  2042. pci_name(pci_dev));
  2043. efx->workqueue = create_singlethread_workqueue(efx->workqueue_name);
  2044. if (!efx->workqueue)
  2045. goto fail;
  2046. return 0;
  2047. fail:
  2048. efx_fini_struct(efx);
  2049. return -ENOMEM;
  2050. }
  2051. static void efx_fini_struct(struct efx_nic *efx)
  2052. {
  2053. int i;
  2054. for (i = 0; i < EFX_MAX_CHANNELS; i++)
  2055. kfree(efx->channel[i]);
  2056. if (efx->workqueue) {
  2057. destroy_workqueue(efx->workqueue);
  2058. efx->workqueue = NULL;
  2059. }
  2060. }
  2061. /**************************************************************************
  2062. *
  2063. * PCI interface
  2064. *
  2065. **************************************************************************/
  2066. /* Main body of final NIC shutdown code
  2067. * This is called only at module unload (or hotplug removal).
  2068. */
  2069. static void efx_pci_remove_main(struct efx_nic *efx)
  2070. {
  2071. #ifdef CONFIG_RFS_ACCEL
  2072. free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
  2073. efx->net_dev->rx_cpu_rmap = NULL;
  2074. #endif
  2075. efx_stop_interrupts(efx, false);
  2076. efx_nic_fini_interrupt(efx);
  2077. efx_fini_port(efx);
  2078. efx->type->fini(efx);
  2079. efx_fini_napi(efx);
  2080. efx_remove_all(efx);
  2081. }
  2082. /* Final NIC shutdown
  2083. * This is called only at module unload (or hotplug removal).
  2084. */
  2085. static void efx_pci_remove(struct pci_dev *pci_dev)
  2086. {
  2087. struct efx_nic *efx;
  2088. efx = pci_get_drvdata(pci_dev);
  2089. if (!efx)
  2090. return;
  2091. /* Mark the NIC as fini, then stop the interface */
  2092. rtnl_lock();
  2093. efx->state = STATE_UNINIT;
  2094. dev_close(efx->net_dev);
  2095. efx_stop_interrupts(efx, false);
  2096. /* Allow any queued efx_resets() to complete */
  2097. rtnl_unlock();
  2098. efx_sriov_fini(efx);
  2099. efx_unregister_netdev(efx);
  2100. efx_mtd_remove(efx);
  2101. /* Wait for any scheduled resets to complete. No more will be
  2102. * scheduled from this point because efx_stop_all() has been
  2103. * called, we are no longer registered with driverlink, and
  2104. * the net_device's have been removed. */
  2105. cancel_work_sync(&efx->reset_work);
  2106. efx_pci_remove_main(efx);
  2107. efx_fini_io(efx);
  2108. netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
  2109. efx_fini_struct(efx);
  2110. pci_set_drvdata(pci_dev, NULL);
  2111. free_netdev(efx->net_dev);
  2112. };
  2113. /* NIC VPD information
  2114. * Called during probe to display the part number of the
  2115. * installed NIC. VPD is potentially very large but this should
  2116. * always appear within the first 512 bytes.
  2117. */
  2118. #define SFC_VPD_LEN 512
  2119. static void efx_print_product_vpd(struct efx_nic *efx)
  2120. {
  2121. struct pci_dev *dev = efx->pci_dev;
  2122. char vpd_data[SFC_VPD_LEN];
  2123. ssize_t vpd_size;
  2124. int i, j;
  2125. /* Get the vpd data from the device */
  2126. vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data);
  2127. if (vpd_size <= 0) {
  2128. netif_err(efx, drv, efx->net_dev, "Unable to read VPD\n");
  2129. return;
  2130. }
  2131. /* Get the Read only section */
  2132. i = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA);
  2133. if (i < 0) {
  2134. netif_err(efx, drv, efx->net_dev, "VPD Read-only not found\n");
  2135. return;
  2136. }
  2137. j = pci_vpd_lrdt_size(&vpd_data[i]);
  2138. i += PCI_VPD_LRDT_TAG_SIZE;
  2139. if (i + j > vpd_size)
  2140. j = vpd_size - i;
  2141. /* Get the Part number */
  2142. i = pci_vpd_find_info_keyword(vpd_data, i, j, "PN");
  2143. if (i < 0) {
  2144. netif_err(efx, drv, efx->net_dev, "Part number not found\n");
  2145. return;
  2146. }
  2147. j = pci_vpd_info_field_size(&vpd_data[i]);
  2148. i += PCI_VPD_INFO_FLD_HDR_SIZE;
  2149. if (i + j > vpd_size) {
  2150. netif_err(efx, drv, efx->net_dev, "Incomplete part number\n");
  2151. return;
  2152. }
  2153. netif_info(efx, drv, efx->net_dev,
  2154. "Part Number : %.*s\n", j, &vpd_data[i]);
  2155. }
  2156. /* Main body of NIC initialisation
  2157. * This is called at module load (or hotplug insertion, theoretically).
  2158. */
  2159. static int efx_pci_probe_main(struct efx_nic *efx)
  2160. {
  2161. int rc;
  2162. /* Do start-of-day initialisation */
  2163. rc = efx_probe_all(efx);
  2164. if (rc)
  2165. goto fail1;
  2166. efx_init_napi(efx);
  2167. rc = efx->type->init(efx);
  2168. if (rc) {
  2169. netif_err(efx, probe, efx->net_dev,
  2170. "failed to initialise NIC\n");
  2171. goto fail3;
  2172. }
  2173. rc = efx_init_port(efx);
  2174. if (rc) {
  2175. netif_err(efx, probe, efx->net_dev,
  2176. "failed to initialise port\n");
  2177. goto fail4;
  2178. }
  2179. rc = efx_nic_init_interrupt(efx);
  2180. if (rc)
  2181. goto fail5;
  2182. efx_start_interrupts(efx, false);
  2183. return 0;
  2184. fail5:
  2185. efx_fini_port(efx);
  2186. fail4:
  2187. efx->type->fini(efx);
  2188. fail3:
  2189. efx_fini_napi(efx);
  2190. efx_remove_all(efx);
  2191. fail1:
  2192. return rc;
  2193. }
  2194. /* NIC initialisation
  2195. *
  2196. * This is called at module load (or hotplug insertion,
  2197. * theoretically). It sets up PCI mappings, resets the NIC,
  2198. * sets up and registers the network devices with the kernel and hooks
  2199. * the interrupt service routine. It does not prepare the device for
  2200. * transmission; this is left to the first time one of the network
  2201. * interfaces is brought up (i.e. efx_net_open).
  2202. */
  2203. static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
  2204. const struct pci_device_id *entry)
  2205. {
  2206. const struct efx_nic_type *type = (const struct efx_nic_type *) entry->driver_data;
  2207. struct net_device *net_dev;
  2208. struct efx_nic *efx;
  2209. int rc;
  2210. /* Allocate and initialise a struct net_device and struct efx_nic */
  2211. net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES,
  2212. EFX_MAX_RX_QUEUES);
  2213. if (!net_dev)
  2214. return -ENOMEM;
  2215. net_dev->features |= (type->offload_features | NETIF_F_SG |
  2216. NETIF_F_HIGHDMA | NETIF_F_TSO |
  2217. NETIF_F_RXCSUM);
  2218. if (type->offload_features & NETIF_F_V6_CSUM)
  2219. net_dev->features |= NETIF_F_TSO6;
  2220. /* Mask for features that also apply to VLAN devices */
  2221. net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG |
  2222. NETIF_F_HIGHDMA | NETIF_F_ALL_TSO |
  2223. NETIF_F_RXCSUM);
  2224. /* All offloads can be toggled */
  2225. net_dev->hw_features = net_dev->features & ~NETIF_F_HIGHDMA;
  2226. efx = netdev_priv(net_dev);
  2227. pci_set_drvdata(pci_dev, efx);
  2228. SET_NETDEV_DEV(net_dev, &pci_dev->dev);
  2229. rc = efx_init_struct(efx, type, pci_dev, net_dev);
  2230. if (rc)
  2231. goto fail1;
  2232. netif_info(efx, probe, efx->net_dev,
  2233. "Solarflare NIC detected\n");
  2234. efx_print_product_vpd(efx);
  2235. /* Set up basic I/O (BAR mappings etc) */
  2236. rc = efx_init_io(efx);
  2237. if (rc)
  2238. goto fail2;
  2239. rc = efx_pci_probe_main(efx);
  2240. /* Serialise against efx_reset(). No more resets will be
  2241. * scheduled since efx_stop_all() has been called, and we have
  2242. * not and never have been registered.
  2243. */
  2244. cancel_work_sync(&efx->reset_work);
  2245. if (rc)
  2246. goto fail3;
  2247. /* If there was a scheduled reset during probe, the NIC is
  2248. * probably hosed anyway.
  2249. */
  2250. if (efx->reset_pending) {
  2251. netif_err(efx, probe, efx->net_dev,
  2252. "aborting probe due to scheduled reset\n");
  2253. rc = -EIO;
  2254. goto fail4;
  2255. }
  2256. /* Switch to the READY state before we expose the device to the OS,
  2257. * so that dev_open()|efx_start_all() will actually start the device */
  2258. efx->state = STATE_READY;
  2259. rc = efx_register_netdev(efx);
  2260. if (rc)
  2261. goto fail4;
  2262. rc = efx_sriov_init(efx);
  2263. if (rc)
  2264. netif_err(efx, probe, efx->net_dev,
  2265. "SR-IOV can't be enabled rc %d\n", rc);
  2266. netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n");
  2267. /* Try to create MTDs, but allow this to fail */
  2268. rtnl_lock();
  2269. rc = efx_mtd_probe(efx);
  2270. rtnl_unlock();
  2271. if (rc)
  2272. netif_warn(efx, probe, efx->net_dev,
  2273. "failed to create MTDs (%d)\n", rc);
  2274. return 0;
  2275. fail4:
  2276. efx_pci_remove_main(efx);
  2277. fail3:
  2278. efx_fini_io(efx);
  2279. fail2:
  2280. efx_fini_struct(efx);
  2281. fail1:
  2282. pci_set_drvdata(pci_dev, NULL);
  2283. WARN_ON(rc > 0);
  2284. netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc);
  2285. free_netdev(net_dev);
  2286. return rc;
  2287. }
  2288. static int efx_pm_freeze(struct device *dev)
  2289. {
  2290. struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
  2291. rtnl_lock();
  2292. if (efx->state != STATE_DISABLED) {
  2293. efx->state = STATE_UNINIT;
  2294. netif_device_detach(efx->net_dev);
  2295. efx_stop_all(efx);
  2296. efx_stop_interrupts(efx, false);
  2297. }
  2298. rtnl_unlock();
  2299. return 0;
  2300. }
  2301. static int efx_pm_thaw(struct device *dev)
  2302. {
  2303. struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
  2304. rtnl_lock();
  2305. if (efx->state != STATE_DISABLED) {
  2306. efx_start_interrupts(efx, false);
  2307. mutex_lock(&efx->mac_lock);
  2308. efx->phy_op->reconfigure(efx);
  2309. mutex_unlock(&efx->mac_lock);
  2310. efx_start_all(efx);
  2311. netif_device_attach(efx->net_dev);
  2312. efx->state = STATE_READY;
  2313. efx->type->resume_wol(efx);
  2314. }
  2315. rtnl_unlock();
  2316. /* Reschedule any quenched resets scheduled during efx_pm_freeze() */
  2317. queue_work(reset_workqueue, &efx->reset_work);
  2318. return 0;
  2319. }
  2320. static int efx_pm_poweroff(struct device *dev)
  2321. {
  2322. struct pci_dev *pci_dev = to_pci_dev(dev);
  2323. struct efx_nic *efx = pci_get_drvdata(pci_dev);
  2324. efx->type->fini(efx);
  2325. efx->reset_pending = 0;
  2326. pci_save_state(pci_dev);
  2327. return pci_set_power_state(pci_dev, PCI_D3hot);
  2328. }
  2329. /* Used for both resume and restore */
  2330. static int efx_pm_resume(struct device *dev)
  2331. {
  2332. struct pci_dev *pci_dev = to_pci_dev(dev);
  2333. struct efx_nic *efx = pci_get_drvdata(pci_dev);
  2334. int rc;
  2335. rc = pci_set_power_state(pci_dev, PCI_D0);
  2336. if (rc)
  2337. return rc;
  2338. pci_restore_state(pci_dev);
  2339. rc = pci_enable_device(pci_dev);
  2340. if (rc)
  2341. return rc;
  2342. pci_set_master(efx->pci_dev);
  2343. rc = efx->type->reset(efx, RESET_TYPE_ALL);
  2344. if (rc)
  2345. return rc;
  2346. rc = efx->type->init(efx);
  2347. if (rc)
  2348. return rc;
  2349. efx_pm_thaw(dev);
  2350. return 0;
  2351. }
  2352. static int efx_pm_suspend(struct device *dev)
  2353. {
  2354. int rc;
  2355. efx_pm_freeze(dev);
  2356. rc = efx_pm_poweroff(dev);
  2357. if (rc)
  2358. efx_pm_resume(dev);
  2359. return rc;
  2360. }
  2361. static const struct dev_pm_ops efx_pm_ops = {
  2362. .suspend = efx_pm_suspend,
  2363. .resume = efx_pm_resume,
  2364. .freeze = efx_pm_freeze,
  2365. .thaw = efx_pm_thaw,
  2366. .poweroff = efx_pm_poweroff,
  2367. .restore = efx_pm_resume,
  2368. };
  2369. static struct pci_driver efx_pci_driver = {
  2370. .name = KBUILD_MODNAME,
  2371. .id_table = efx_pci_table,
  2372. .probe = efx_pci_probe,
  2373. .remove = efx_pci_remove,
  2374. .driver.pm = &efx_pm_ops,
  2375. };
  2376. /**************************************************************************
  2377. *
  2378. * Kernel module interface
  2379. *
  2380. *************************************************************************/
  2381. module_param(interrupt_mode, uint, 0444);
  2382. MODULE_PARM_DESC(interrupt_mode,
  2383. "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
  2384. static int __init efx_init_module(void)
  2385. {
  2386. int rc;
  2387. printk(KERN_INFO "Solarflare NET driver v" EFX_DRIVER_VERSION "\n");
  2388. rc = register_netdevice_notifier(&efx_netdev_notifier);
  2389. if (rc)
  2390. goto err_notifier;
  2391. rc = efx_init_sriov();
  2392. if (rc)
  2393. goto err_sriov;
  2394. reset_workqueue = create_singlethread_workqueue("sfc_reset");
  2395. if (!reset_workqueue) {
  2396. rc = -ENOMEM;
  2397. goto err_reset;
  2398. }
  2399. rc = pci_register_driver(&efx_pci_driver);
  2400. if (rc < 0)
  2401. goto err_pci;
  2402. return 0;
  2403. err_pci:
  2404. destroy_workqueue(reset_workqueue);
  2405. err_reset:
  2406. efx_fini_sriov();
  2407. err_sriov:
  2408. unregister_netdevice_notifier(&efx_netdev_notifier);
  2409. err_notifier:
  2410. return rc;
  2411. }
  2412. static void __exit efx_exit_module(void)
  2413. {
  2414. printk(KERN_INFO "Solarflare NET driver unloading\n");
  2415. pci_unregister_driver(&efx_pci_driver);
  2416. destroy_workqueue(reset_workqueue);
  2417. efx_fini_sriov();
  2418. unregister_netdevice_notifier(&efx_netdev_notifier);
  2419. }
  2420. module_init(efx_init_module);
  2421. module_exit(efx_exit_module);
  2422. MODULE_AUTHOR("Solarflare Communications and "
  2423. "Michael Brown <mbrown@fensystems.co.uk>");
  2424. MODULE_DESCRIPTION("Solarflare Communications network driver");
  2425. MODULE_LICENSE("GPL");
  2426. MODULE_DEVICE_TABLE(pci, efx_pci_table);