efx.c 60 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313
  1. /****************************************************************************
  2. * Driver for Solarflare Solarstorm network controllers and boards
  3. * Copyright 2005-2006 Fen Systems Ltd.
  4. * Copyright 2005-2008 Solarflare Communications Inc.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published
  8. * by the Free Software Foundation, incorporated herein by reference.
  9. */
  10. #include <linux/module.h>
  11. #include <linux/pci.h>
  12. #include <linux/netdevice.h>
  13. #include <linux/etherdevice.h>
  14. #include <linux/delay.h>
  15. #include <linux/notifier.h>
  16. #include <linux/ip.h>
  17. #include <linux/tcp.h>
  18. #include <linux/in.h>
  19. #include <linux/crc32.h>
  20. #include <linux/ethtool.h>
  21. #include <linux/topology.h>
  22. #include "net_driver.h"
  23. #include "ethtool.h"
  24. #include "tx.h"
  25. #include "rx.h"
  26. #include "efx.h"
  27. #include "mdio_10g.h"
  28. #include "falcon.h"
  29. #define EFX_MAX_MTU (9 * 1024)
  30. /* RX slow fill workqueue. If memory allocation fails in the fast path,
  31. * a work item is pushed onto this work queue to retry the allocation later,
  32. * to avoid the NIC being starved of RX buffers. Since this is a per cpu
  33. * workqueue, there is nothing to be gained in making it per NIC
  34. */
  35. static struct workqueue_struct *refill_workqueue;
  36. /* Reset workqueue. If any NIC has a hardware failure then a reset will be
  37. * queued onto this work queue. This is not a per-nic work queue, because
  38. * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised.
  39. */
  40. static struct workqueue_struct *reset_workqueue;
  41. /**************************************************************************
  42. *
  43. * Configurable values
  44. *
  45. *************************************************************************/
  46. /*
  47. * Use separate channels for TX and RX events
  48. *
  49. * Set this to 1 to use separate channels for TX and RX. It allows us
  50. * to control interrupt affinity separately for TX and RX.
  51. *
  52. * This is only used in MSI-X interrupt mode
  53. */
  54. static unsigned int separate_tx_channels;
  55. module_param(separate_tx_channels, uint, 0644);
  56. MODULE_PARM_DESC(separate_tx_channels,
  57. "Use separate channels for TX and RX");
  58. /* This is the weight assigned to each of the (per-channel) virtual
  59. * NAPI devices.
  60. */
  61. static int napi_weight = 64;
  62. /* This is the time (in jiffies) between invocations of the hardware
  63. * monitor, which checks for known hardware bugs and resets the
  64. * hardware and driver as necessary.
  65. */
  66. unsigned int efx_monitor_interval = 1 * HZ;
  67. /* This controls whether or not the driver will initialise devices
  68. * with invalid MAC addresses stored in the EEPROM or flash. If true,
  69. * such devices will be initialised with a random locally-generated
  70. * MAC address. This allows for loading the sfc_mtd driver to
  71. * reprogram the flash, even if the flash contents (including the MAC
  72. * address) have previously been erased.
  73. */
  74. static unsigned int allow_bad_hwaddr;
  75. /* Initial interrupt moderation settings. They can be modified after
  76. * module load with ethtool.
  77. *
  78. * The default for RX should strike a balance between increasing the
  79. * round-trip latency and reducing overhead.
  80. */
  81. static unsigned int rx_irq_mod_usec = 60;
  82. /* Initial interrupt moderation settings. They can be modified after
  83. * module load with ethtool.
  84. *
  85. * This default is chosen to ensure that a 10G link does not go idle
  86. * while a TX queue is stopped after it has become full. A queue is
  87. * restarted when it drops below half full. The time this takes (assuming
  88. * worst case 3 descriptors per packet and 1024 descriptors) is
  89. * 512 / 3 * 1.2 = 205 usec.
  90. */
  91. static unsigned int tx_irq_mod_usec = 150;
  92. /* This is the first interrupt mode to try out of:
  93. * 0 => MSI-X
  94. * 1 => MSI
  95. * 2 => legacy
  96. */
  97. static unsigned int interrupt_mode;
  98. /* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
  99. * i.e. the number of CPUs among which we may distribute simultaneous
  100. * interrupt handling.
  101. *
  102. * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
  103. * The default (0) means to assign an interrupt to each package (level II cache)
  104. */
  105. static unsigned int rss_cpus;
  106. module_param(rss_cpus, uint, 0444);
  107. MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
  108. static int phy_flash_cfg;
  109. module_param(phy_flash_cfg, int, 0644);
  110. MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially");
  111. static unsigned irq_adapt_low_thresh = 10000;
  112. module_param(irq_adapt_low_thresh, uint, 0644);
  113. MODULE_PARM_DESC(irq_adapt_low_thresh,
  114. "Threshold score for reducing IRQ moderation");
  115. static unsigned irq_adapt_high_thresh = 20000;
  116. module_param(irq_adapt_high_thresh, uint, 0644);
  117. MODULE_PARM_DESC(irq_adapt_high_thresh,
  118. "Threshold score for increasing IRQ moderation");
  119. /**************************************************************************
  120. *
  121. * Utility functions and prototypes
  122. *
  123. *************************************************************************/
  124. static void efx_remove_channel(struct efx_channel *channel);
  125. static void efx_remove_port(struct efx_nic *efx);
  126. static void efx_fini_napi(struct efx_nic *efx);
  127. static void efx_fini_channels(struct efx_nic *efx);
  128. #define EFX_ASSERT_RESET_SERIALISED(efx) \
  129. do { \
  130. if (efx->state == STATE_RUNNING) \
  131. ASSERT_RTNL(); \
  132. } while (0)
  133. /**************************************************************************
  134. *
  135. * Event queue processing
  136. *
  137. *************************************************************************/
  138. /* Process channel's event queue
  139. *
  140. * This function is responsible for processing the event queue of a
  141. * single channel. The caller must guarantee that this function will
  142. * never be concurrently called more than once on the same channel,
  143. * though different channels may be being processed concurrently.
  144. */
  145. static int efx_process_channel(struct efx_channel *channel, int rx_quota)
  146. {
  147. struct efx_nic *efx = channel->efx;
  148. int rx_packets;
  149. if (unlikely(efx->reset_pending != RESET_TYPE_NONE ||
  150. !channel->enabled))
  151. return 0;
  152. rx_packets = falcon_process_eventq(channel, rx_quota);
  153. if (rx_packets == 0)
  154. return 0;
  155. /* Deliver last RX packet. */
  156. if (channel->rx_pkt) {
  157. __efx_rx_packet(channel, channel->rx_pkt,
  158. channel->rx_pkt_csummed);
  159. channel->rx_pkt = NULL;
  160. }
  161. efx_rx_strategy(channel);
  162. efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]);
  163. return rx_packets;
  164. }
  165. /* Mark channel as finished processing
  166. *
  167. * Note that since we will not receive further interrupts for this
  168. * channel before we finish processing and call the eventq_read_ack()
  169. * method, there is no need to use the interrupt hold-off timers.
  170. */
  171. static inline void efx_channel_processed(struct efx_channel *channel)
  172. {
  173. /* The interrupt handler for this channel may set work_pending
  174. * as soon as we acknowledge the events we've seen. Make sure
  175. * it's cleared before then. */
  176. channel->work_pending = false;
  177. smp_wmb();
  178. falcon_eventq_read_ack(channel);
  179. }
  180. /* NAPI poll handler
  181. *
  182. * NAPI guarantees serialisation of polls of the same device, which
  183. * provides the guarantee required by efx_process_channel().
  184. */
  185. static int efx_poll(struct napi_struct *napi, int budget)
  186. {
  187. struct efx_channel *channel =
  188. container_of(napi, struct efx_channel, napi_str);
  189. int rx_packets;
  190. EFX_TRACE(channel->efx, "channel %d NAPI poll executing on CPU %d\n",
  191. channel->channel, raw_smp_processor_id());
  192. rx_packets = efx_process_channel(channel, budget);
  193. if (rx_packets < budget) {
  194. struct efx_nic *efx = channel->efx;
  195. if (channel->used_flags & EFX_USED_BY_RX &&
  196. efx->irq_rx_adaptive &&
  197. unlikely(++channel->irq_count == 1000)) {
  198. unsigned old_irq_moderation = channel->irq_moderation;
  199. if (unlikely(channel->irq_mod_score <
  200. irq_adapt_low_thresh)) {
  201. channel->irq_moderation =
  202. max_t(int,
  203. channel->irq_moderation -
  204. FALCON_IRQ_MOD_RESOLUTION,
  205. FALCON_IRQ_MOD_RESOLUTION);
  206. } else if (unlikely(channel->irq_mod_score >
  207. irq_adapt_high_thresh)) {
  208. channel->irq_moderation =
  209. min(channel->irq_moderation +
  210. FALCON_IRQ_MOD_RESOLUTION,
  211. efx->irq_rx_moderation);
  212. }
  213. if (channel->irq_moderation != old_irq_moderation)
  214. falcon_set_int_moderation(channel);
  215. channel->irq_count = 0;
  216. channel->irq_mod_score = 0;
  217. }
  218. /* There is no race here; although napi_disable() will
  219. * only wait for napi_complete(), this isn't a problem
  220. * since efx_channel_processed() will have no effect if
  221. * interrupts have already been disabled.
  222. */
  223. napi_complete(napi);
  224. efx_channel_processed(channel);
  225. }
  226. return rx_packets;
  227. }
  228. /* Process the eventq of the specified channel immediately on this CPU
  229. *
  230. * Disable hardware generated interrupts, wait for any existing
  231. * processing to finish, then directly poll (and ack ) the eventq.
  232. * Finally reenable NAPI and interrupts.
  233. *
  234. * Since we are touching interrupts the caller should hold the suspend lock
  235. */
  236. void efx_process_channel_now(struct efx_channel *channel)
  237. {
  238. struct efx_nic *efx = channel->efx;
  239. BUG_ON(!channel->used_flags);
  240. BUG_ON(!channel->enabled);
  241. /* Disable interrupts and wait for ISRs to complete */
  242. falcon_disable_interrupts(efx);
  243. if (efx->legacy_irq)
  244. synchronize_irq(efx->legacy_irq);
  245. if (channel->irq)
  246. synchronize_irq(channel->irq);
  247. /* Wait for any NAPI processing to complete */
  248. napi_disable(&channel->napi_str);
  249. /* Poll the channel */
  250. efx_process_channel(channel, EFX_EVQ_SIZE);
  251. /* Ack the eventq. This may cause an interrupt to be generated
  252. * when they are reenabled */
  253. efx_channel_processed(channel);
  254. napi_enable(&channel->napi_str);
  255. falcon_enable_interrupts(efx);
  256. }
  257. /* Create event queue
  258. * Event queue memory allocations are done only once. If the channel
  259. * is reset, the memory buffer will be reused; this guards against
  260. * errors during channel reset and also simplifies interrupt handling.
  261. */
  262. static int efx_probe_eventq(struct efx_channel *channel)
  263. {
  264. EFX_LOG(channel->efx, "chan %d create event queue\n", channel->channel);
  265. return falcon_probe_eventq(channel);
  266. }
  267. /* Prepare channel's event queue */
  268. static void efx_init_eventq(struct efx_channel *channel)
  269. {
  270. EFX_LOG(channel->efx, "chan %d init event queue\n", channel->channel);
  271. channel->eventq_read_ptr = 0;
  272. falcon_init_eventq(channel);
  273. }
  274. static void efx_fini_eventq(struct efx_channel *channel)
  275. {
  276. EFX_LOG(channel->efx, "chan %d fini event queue\n", channel->channel);
  277. falcon_fini_eventq(channel);
  278. }
  279. static void efx_remove_eventq(struct efx_channel *channel)
  280. {
  281. EFX_LOG(channel->efx, "chan %d remove event queue\n", channel->channel);
  282. falcon_remove_eventq(channel);
  283. }
  284. /**************************************************************************
  285. *
  286. * Channel handling
  287. *
  288. *************************************************************************/
  289. static int efx_probe_channel(struct efx_channel *channel)
  290. {
  291. struct efx_tx_queue *tx_queue;
  292. struct efx_rx_queue *rx_queue;
  293. int rc;
  294. EFX_LOG(channel->efx, "creating channel %d\n", channel->channel);
  295. rc = efx_probe_eventq(channel);
  296. if (rc)
  297. goto fail1;
  298. efx_for_each_channel_tx_queue(tx_queue, channel) {
  299. rc = efx_probe_tx_queue(tx_queue);
  300. if (rc)
  301. goto fail2;
  302. }
  303. efx_for_each_channel_rx_queue(rx_queue, channel) {
  304. rc = efx_probe_rx_queue(rx_queue);
  305. if (rc)
  306. goto fail3;
  307. }
  308. channel->n_rx_frm_trunc = 0;
  309. return 0;
  310. fail3:
  311. efx_for_each_channel_rx_queue(rx_queue, channel)
  312. efx_remove_rx_queue(rx_queue);
  313. fail2:
  314. efx_for_each_channel_tx_queue(tx_queue, channel)
  315. efx_remove_tx_queue(tx_queue);
  316. fail1:
  317. return rc;
  318. }
  319. static void efx_set_channel_names(struct efx_nic *efx)
  320. {
  321. struct efx_channel *channel;
  322. const char *type = "";
  323. int number;
  324. efx_for_each_channel(channel, efx) {
  325. number = channel->channel;
  326. if (efx->n_channels > efx->n_rx_queues) {
  327. if (channel->channel < efx->n_rx_queues) {
  328. type = "-rx";
  329. } else {
  330. type = "-tx";
  331. number -= efx->n_rx_queues;
  332. }
  333. }
  334. snprintf(channel->name, sizeof(channel->name),
  335. "%s%s-%d", efx->name, type, number);
  336. }
  337. }
  338. /* Channels are shutdown and reinitialised whilst the NIC is running
  339. * to propagate configuration changes (mtu, checksum offload), or
  340. * to clear hardware error conditions
  341. */
  342. static void efx_init_channels(struct efx_nic *efx)
  343. {
  344. struct efx_tx_queue *tx_queue;
  345. struct efx_rx_queue *rx_queue;
  346. struct efx_channel *channel;
  347. /* Calculate the rx buffer allocation parameters required to
  348. * support the current MTU, including padding for header
  349. * alignment and overruns.
  350. */
  351. efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) +
  352. EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
  353. efx->type->rx_buffer_padding);
  354. efx->rx_buffer_order = get_order(efx->rx_buffer_len);
  355. /* Initialise the channels */
  356. efx_for_each_channel(channel, efx) {
  357. EFX_LOG(channel->efx, "init chan %d\n", channel->channel);
  358. efx_init_eventq(channel);
  359. efx_for_each_channel_tx_queue(tx_queue, channel)
  360. efx_init_tx_queue(tx_queue);
  361. /* The rx buffer allocation strategy is MTU dependent */
  362. efx_rx_strategy(channel);
  363. efx_for_each_channel_rx_queue(rx_queue, channel)
  364. efx_init_rx_queue(rx_queue);
  365. WARN_ON(channel->rx_pkt != NULL);
  366. efx_rx_strategy(channel);
  367. }
  368. }
  369. /* This enables event queue processing and packet transmission.
  370. *
  371. * Note that this function is not allowed to fail, since that would
  372. * introduce too much complexity into the suspend/resume path.
  373. */
  374. static void efx_start_channel(struct efx_channel *channel)
  375. {
  376. struct efx_rx_queue *rx_queue;
  377. EFX_LOG(channel->efx, "starting chan %d\n", channel->channel);
  378. /* The interrupt handler for this channel may set work_pending
  379. * as soon as we enable it. Make sure it's cleared before
  380. * then. Similarly, make sure it sees the enabled flag set. */
  381. channel->work_pending = false;
  382. channel->enabled = true;
  383. smp_wmb();
  384. napi_enable(&channel->napi_str);
  385. /* Load up RX descriptors */
  386. efx_for_each_channel_rx_queue(rx_queue, channel)
  387. efx_fast_push_rx_descriptors(rx_queue);
  388. }
  389. /* This disables event queue processing and packet transmission.
  390. * This function does not guarantee that all queue processing
  391. * (e.g. RX refill) is complete.
  392. */
  393. static void efx_stop_channel(struct efx_channel *channel)
  394. {
  395. struct efx_rx_queue *rx_queue;
  396. if (!channel->enabled)
  397. return;
  398. EFX_LOG(channel->efx, "stop chan %d\n", channel->channel);
  399. channel->enabled = false;
  400. napi_disable(&channel->napi_str);
  401. /* Ensure that any worker threads have exited or will be no-ops */
  402. efx_for_each_channel_rx_queue(rx_queue, channel) {
  403. spin_lock_bh(&rx_queue->add_lock);
  404. spin_unlock_bh(&rx_queue->add_lock);
  405. }
  406. }
  407. static void efx_fini_channels(struct efx_nic *efx)
  408. {
  409. struct efx_channel *channel;
  410. struct efx_tx_queue *tx_queue;
  411. struct efx_rx_queue *rx_queue;
  412. int rc;
  413. EFX_ASSERT_RESET_SERIALISED(efx);
  414. BUG_ON(efx->port_enabled);
  415. rc = falcon_flush_queues(efx);
  416. if (rc)
  417. EFX_ERR(efx, "failed to flush queues\n");
  418. else
  419. EFX_LOG(efx, "successfully flushed all queues\n");
  420. efx_for_each_channel(channel, efx) {
  421. EFX_LOG(channel->efx, "shut down chan %d\n", channel->channel);
  422. efx_for_each_channel_rx_queue(rx_queue, channel)
  423. efx_fini_rx_queue(rx_queue);
  424. efx_for_each_channel_tx_queue(tx_queue, channel)
  425. efx_fini_tx_queue(tx_queue);
  426. efx_fini_eventq(channel);
  427. }
  428. }
  429. static void efx_remove_channel(struct efx_channel *channel)
  430. {
  431. struct efx_tx_queue *tx_queue;
  432. struct efx_rx_queue *rx_queue;
  433. EFX_LOG(channel->efx, "destroy chan %d\n", channel->channel);
  434. efx_for_each_channel_rx_queue(rx_queue, channel)
  435. efx_remove_rx_queue(rx_queue);
  436. efx_for_each_channel_tx_queue(tx_queue, channel)
  437. efx_remove_tx_queue(tx_queue);
  438. efx_remove_eventq(channel);
  439. channel->used_flags = 0;
  440. }
  441. void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay)
  442. {
  443. queue_delayed_work(refill_workqueue, &rx_queue->work, delay);
  444. }
  445. /**************************************************************************
  446. *
  447. * Port handling
  448. *
  449. **************************************************************************/
  450. /* This ensures that the kernel is kept informed (via
  451. * netif_carrier_on/off) of the link status, and also maintains the
  452. * link status's stop on the port's TX queue.
  453. */
  454. static void efx_link_status_changed(struct efx_nic *efx)
  455. {
  456. /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
  457. * that no events are triggered between unregister_netdev() and the
  458. * driver unloading. A more general condition is that NETDEV_CHANGE
  459. * can only be generated between NETDEV_UP and NETDEV_DOWN */
  460. if (!netif_running(efx->net_dev))
  461. return;
  462. if (efx->port_inhibited) {
  463. netif_carrier_off(efx->net_dev);
  464. return;
  465. }
  466. if (efx->link_up != netif_carrier_ok(efx->net_dev)) {
  467. efx->n_link_state_changes++;
  468. if (efx->link_up)
  469. netif_carrier_on(efx->net_dev);
  470. else
  471. netif_carrier_off(efx->net_dev);
  472. }
  473. /* Status message for kernel log */
  474. if (efx->link_up) {
  475. EFX_INFO(efx, "link up at %uMbps %s-duplex (MTU %d)%s\n",
  476. efx->link_speed, efx->link_fd ? "full" : "half",
  477. efx->net_dev->mtu,
  478. (efx->promiscuous ? " [PROMISC]" : ""));
  479. } else {
  480. EFX_INFO(efx, "link down\n");
  481. }
  482. }
  483. static void efx_fini_port(struct efx_nic *efx);
  484. /* This call reinitialises the MAC to pick up new PHY settings. The
  485. * caller must hold the mac_lock */
  486. void __efx_reconfigure_port(struct efx_nic *efx)
  487. {
  488. WARN_ON(!mutex_is_locked(&efx->mac_lock));
  489. EFX_LOG(efx, "reconfiguring MAC from PHY settings on CPU %d\n",
  490. raw_smp_processor_id());
  491. /* Serialise the promiscuous flag with efx_set_multicast_list. */
  492. if (efx_dev_registered(efx)) {
  493. netif_addr_lock_bh(efx->net_dev);
  494. netif_addr_unlock_bh(efx->net_dev);
  495. }
  496. falcon_deconfigure_mac_wrapper(efx);
  497. /* Reconfigure the PHY, disabling transmit in mac level loopback. */
  498. if (LOOPBACK_INTERNAL(efx))
  499. efx->phy_mode |= PHY_MODE_TX_DISABLED;
  500. else
  501. efx->phy_mode &= ~PHY_MODE_TX_DISABLED;
  502. efx->phy_op->reconfigure(efx);
  503. if (falcon_switch_mac(efx))
  504. goto fail;
  505. efx->mac_op->reconfigure(efx);
  506. /* Inform kernel of loss/gain of carrier */
  507. efx_link_status_changed(efx);
  508. return;
  509. fail:
  510. EFX_ERR(efx, "failed to reconfigure MAC\n");
  511. efx->port_enabled = false;
  512. efx_fini_port(efx);
  513. }
  514. /* Reinitialise the MAC to pick up new PHY settings, even if the port is
  515. * disabled. */
  516. void efx_reconfigure_port(struct efx_nic *efx)
  517. {
  518. EFX_ASSERT_RESET_SERIALISED(efx);
  519. mutex_lock(&efx->mac_lock);
  520. __efx_reconfigure_port(efx);
  521. mutex_unlock(&efx->mac_lock);
  522. }
  523. /* Asynchronous efx_reconfigure_port work item. To speed up efx_flush_all()
  524. * we don't efx_reconfigure_port() if the port is disabled. Care is taken
  525. * in efx_stop_all() and efx_start_port() to prevent PHY events being lost */
  526. static void efx_phy_work(struct work_struct *data)
  527. {
  528. struct efx_nic *efx = container_of(data, struct efx_nic, phy_work);
  529. mutex_lock(&efx->mac_lock);
  530. if (efx->port_enabled)
  531. __efx_reconfigure_port(efx);
  532. mutex_unlock(&efx->mac_lock);
  533. }
  534. static void efx_mac_work(struct work_struct *data)
  535. {
  536. struct efx_nic *efx = container_of(data, struct efx_nic, mac_work);
  537. mutex_lock(&efx->mac_lock);
  538. if (efx->port_enabled)
  539. efx->mac_op->irq(efx);
  540. mutex_unlock(&efx->mac_lock);
  541. }
  542. static int efx_probe_port(struct efx_nic *efx)
  543. {
  544. int rc;
  545. EFX_LOG(efx, "create port\n");
  546. /* Connect up MAC/PHY operations table and read MAC address */
  547. rc = falcon_probe_port(efx);
  548. if (rc)
  549. goto err;
  550. if (phy_flash_cfg)
  551. efx->phy_mode = PHY_MODE_SPECIAL;
  552. /* Sanity check MAC address */
  553. if (is_valid_ether_addr(efx->mac_address)) {
  554. memcpy(efx->net_dev->dev_addr, efx->mac_address, ETH_ALEN);
  555. } else {
  556. EFX_ERR(efx, "invalid MAC address %pM\n",
  557. efx->mac_address);
  558. if (!allow_bad_hwaddr) {
  559. rc = -EINVAL;
  560. goto err;
  561. }
  562. random_ether_addr(efx->net_dev->dev_addr);
  563. EFX_INFO(efx, "using locally-generated MAC %pM\n",
  564. efx->net_dev->dev_addr);
  565. }
  566. return 0;
  567. err:
  568. efx_remove_port(efx);
  569. return rc;
  570. }
  571. static int efx_init_port(struct efx_nic *efx)
  572. {
  573. int rc;
  574. EFX_LOG(efx, "init port\n");
  575. rc = efx->phy_op->init(efx);
  576. if (rc)
  577. return rc;
  578. mutex_lock(&efx->mac_lock);
  579. efx->phy_op->reconfigure(efx);
  580. rc = falcon_switch_mac(efx);
  581. mutex_unlock(&efx->mac_lock);
  582. if (rc)
  583. goto fail;
  584. efx->mac_op->reconfigure(efx);
  585. efx->port_initialized = true;
  586. efx_stats_enable(efx);
  587. return 0;
  588. fail:
  589. efx->phy_op->fini(efx);
  590. return rc;
  591. }
  592. /* Allow efx_reconfigure_port() to be scheduled, and close the window
  593. * between efx_stop_port and efx_flush_all whereby a previously scheduled
  594. * efx_phy_work()/efx_mac_work() may have been cancelled */
  595. static void efx_start_port(struct efx_nic *efx)
  596. {
  597. EFX_LOG(efx, "start port\n");
  598. BUG_ON(efx->port_enabled);
  599. mutex_lock(&efx->mac_lock);
  600. efx->port_enabled = true;
  601. __efx_reconfigure_port(efx);
  602. efx->mac_op->irq(efx);
  603. mutex_unlock(&efx->mac_lock);
  604. }
  605. /* Prevent efx_phy_work, efx_mac_work, and efx_monitor() from executing,
  606. * and efx_set_multicast_list() from scheduling efx_phy_work. efx_phy_work
  607. * and efx_mac_work may still be scheduled via NAPI processing until
  608. * efx_flush_all() is called */
  609. static void efx_stop_port(struct efx_nic *efx)
  610. {
  611. EFX_LOG(efx, "stop port\n");
  612. mutex_lock(&efx->mac_lock);
  613. efx->port_enabled = false;
  614. mutex_unlock(&efx->mac_lock);
  615. /* Serialise against efx_set_multicast_list() */
  616. if (efx_dev_registered(efx)) {
  617. netif_addr_lock_bh(efx->net_dev);
  618. netif_addr_unlock_bh(efx->net_dev);
  619. }
  620. }
  621. static void efx_fini_port(struct efx_nic *efx)
  622. {
  623. EFX_LOG(efx, "shut down port\n");
  624. if (!efx->port_initialized)
  625. return;
  626. efx_stats_disable(efx);
  627. efx->phy_op->fini(efx);
  628. efx->port_initialized = false;
  629. efx->link_up = false;
  630. efx_link_status_changed(efx);
  631. }
  632. static void efx_remove_port(struct efx_nic *efx)
  633. {
  634. EFX_LOG(efx, "destroying port\n");
  635. falcon_remove_port(efx);
  636. }
  637. /**************************************************************************
  638. *
  639. * NIC handling
  640. *
  641. **************************************************************************/
  642. /* This configures the PCI device to enable I/O and DMA. */
  643. static int efx_init_io(struct efx_nic *efx)
  644. {
  645. struct pci_dev *pci_dev = efx->pci_dev;
  646. dma_addr_t dma_mask = efx->type->max_dma_mask;
  647. int rc;
  648. EFX_LOG(efx, "initialising I/O\n");
  649. rc = pci_enable_device(pci_dev);
  650. if (rc) {
  651. EFX_ERR(efx, "failed to enable PCI device\n");
  652. goto fail1;
  653. }
  654. pci_set_master(pci_dev);
  655. /* Set the PCI DMA mask. Try all possibilities from our
  656. * genuine mask down to 32 bits, because some architectures
  657. * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit
  658. * masks event though they reject 46 bit masks.
  659. */
  660. while (dma_mask > 0x7fffffffUL) {
  661. if (pci_dma_supported(pci_dev, dma_mask) &&
  662. ((rc = pci_set_dma_mask(pci_dev, dma_mask)) == 0))
  663. break;
  664. dma_mask >>= 1;
  665. }
  666. if (rc) {
  667. EFX_ERR(efx, "could not find a suitable DMA mask\n");
  668. goto fail2;
  669. }
  670. EFX_LOG(efx, "using DMA mask %llx\n", (unsigned long long) dma_mask);
  671. rc = pci_set_consistent_dma_mask(pci_dev, dma_mask);
  672. if (rc) {
  673. /* pci_set_consistent_dma_mask() is not *allowed* to
  674. * fail with a mask that pci_set_dma_mask() accepted,
  675. * but just in case...
  676. */
  677. EFX_ERR(efx, "failed to set consistent DMA mask\n");
  678. goto fail2;
  679. }
  680. efx->membase_phys = pci_resource_start(efx->pci_dev,
  681. efx->type->mem_bar);
  682. rc = pci_request_region(pci_dev, efx->type->mem_bar, "sfc");
  683. if (rc) {
  684. EFX_ERR(efx, "request for memory BAR failed\n");
  685. rc = -EIO;
  686. goto fail3;
  687. }
  688. efx->membase = ioremap_nocache(efx->membase_phys,
  689. efx->type->mem_map_size);
  690. if (!efx->membase) {
  691. EFX_ERR(efx, "could not map memory BAR %d at %llx+%x\n",
  692. efx->type->mem_bar,
  693. (unsigned long long)efx->membase_phys,
  694. efx->type->mem_map_size);
  695. rc = -ENOMEM;
  696. goto fail4;
  697. }
  698. EFX_LOG(efx, "memory BAR %u at %llx+%x (virtual %p)\n",
  699. efx->type->mem_bar, (unsigned long long)efx->membase_phys,
  700. efx->type->mem_map_size, efx->membase);
  701. return 0;
  702. fail4:
  703. pci_release_region(efx->pci_dev, efx->type->mem_bar);
  704. fail3:
  705. efx->membase_phys = 0;
  706. fail2:
  707. pci_disable_device(efx->pci_dev);
  708. fail1:
  709. return rc;
  710. }
  711. static void efx_fini_io(struct efx_nic *efx)
  712. {
  713. EFX_LOG(efx, "shutting down I/O\n");
  714. if (efx->membase) {
  715. iounmap(efx->membase);
  716. efx->membase = NULL;
  717. }
  718. if (efx->membase_phys) {
  719. pci_release_region(efx->pci_dev, efx->type->mem_bar);
  720. efx->membase_phys = 0;
  721. }
  722. pci_disable_device(efx->pci_dev);
  723. }
  724. /* Get number of RX queues wanted. Return number of online CPU
  725. * packages in the expectation that an IRQ balancer will spread
  726. * interrupts across them. */
  727. static int efx_wanted_rx_queues(void)
  728. {
  729. cpumask_var_t core_mask;
  730. int count;
  731. int cpu;
  732. if (unlikely(!zalloc_cpumask_var(&core_mask, GFP_KERNEL))) {
  733. printk(KERN_WARNING
  734. "sfc: RSS disabled due to allocation failure\n");
  735. return 1;
  736. }
  737. count = 0;
  738. for_each_online_cpu(cpu) {
  739. if (!cpumask_test_cpu(cpu, core_mask)) {
  740. ++count;
  741. cpumask_or(core_mask, core_mask,
  742. topology_core_cpumask(cpu));
  743. }
  744. }
  745. free_cpumask_var(core_mask);
  746. return count;
  747. }
  748. /* Probe the number and type of interrupts we are able to obtain, and
  749. * the resulting numbers of channels and RX queues.
  750. */
  751. static void efx_probe_interrupts(struct efx_nic *efx)
  752. {
  753. int max_channels =
  754. min_t(int, efx->type->phys_addr_channels, EFX_MAX_CHANNELS);
  755. int rc, i;
  756. if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
  757. struct msix_entry xentries[EFX_MAX_CHANNELS];
  758. int wanted_ints;
  759. int rx_queues;
  760. /* We want one RX queue and interrupt per CPU package
  761. * (or as specified by the rss_cpus module parameter).
  762. * We will need one channel per interrupt.
  763. */
  764. rx_queues = rss_cpus ? rss_cpus : efx_wanted_rx_queues();
  765. wanted_ints = rx_queues + (separate_tx_channels ? 1 : 0);
  766. wanted_ints = min(wanted_ints, max_channels);
  767. for (i = 0; i < wanted_ints; i++)
  768. xentries[i].entry = i;
  769. rc = pci_enable_msix(efx->pci_dev, xentries, wanted_ints);
  770. if (rc > 0) {
  771. EFX_ERR(efx, "WARNING: Insufficient MSI-X vectors"
  772. " available (%d < %d).\n", rc, wanted_ints);
  773. EFX_ERR(efx, "WARNING: Performance may be reduced.\n");
  774. EFX_BUG_ON_PARANOID(rc >= wanted_ints);
  775. wanted_ints = rc;
  776. rc = pci_enable_msix(efx->pci_dev, xentries,
  777. wanted_ints);
  778. }
  779. if (rc == 0) {
  780. efx->n_rx_queues = min(rx_queues, wanted_ints);
  781. efx->n_channels = wanted_ints;
  782. for (i = 0; i < wanted_ints; i++)
  783. efx->channel[i].irq = xentries[i].vector;
  784. } else {
  785. /* Fall back to single channel MSI */
  786. efx->interrupt_mode = EFX_INT_MODE_MSI;
  787. EFX_ERR(efx, "could not enable MSI-X\n");
  788. }
  789. }
  790. /* Try single interrupt MSI */
  791. if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
  792. efx->n_rx_queues = 1;
  793. efx->n_channels = 1;
  794. rc = pci_enable_msi(efx->pci_dev);
  795. if (rc == 0) {
  796. efx->channel[0].irq = efx->pci_dev->irq;
  797. } else {
  798. EFX_ERR(efx, "could not enable MSI\n");
  799. efx->interrupt_mode = EFX_INT_MODE_LEGACY;
  800. }
  801. }
  802. /* Assume legacy interrupts */
  803. if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
  804. efx->n_rx_queues = 1;
  805. efx->n_channels = 1 + (separate_tx_channels ? 1 : 0);
  806. efx->legacy_irq = efx->pci_dev->irq;
  807. }
  808. }
  809. static void efx_remove_interrupts(struct efx_nic *efx)
  810. {
  811. struct efx_channel *channel;
  812. /* Remove MSI/MSI-X interrupts */
  813. efx_for_each_channel(channel, efx)
  814. channel->irq = 0;
  815. pci_disable_msi(efx->pci_dev);
  816. pci_disable_msix(efx->pci_dev);
  817. /* Remove legacy interrupt */
  818. efx->legacy_irq = 0;
  819. }
  820. static void efx_set_channels(struct efx_nic *efx)
  821. {
  822. struct efx_tx_queue *tx_queue;
  823. struct efx_rx_queue *rx_queue;
  824. efx_for_each_tx_queue(tx_queue, efx) {
  825. if (separate_tx_channels)
  826. tx_queue->channel = &efx->channel[efx->n_channels-1];
  827. else
  828. tx_queue->channel = &efx->channel[0];
  829. tx_queue->channel->used_flags |= EFX_USED_BY_TX;
  830. }
  831. efx_for_each_rx_queue(rx_queue, efx) {
  832. rx_queue->channel = &efx->channel[rx_queue->queue];
  833. rx_queue->channel->used_flags |= EFX_USED_BY_RX;
  834. }
  835. }
  836. static int efx_probe_nic(struct efx_nic *efx)
  837. {
  838. int rc;
  839. EFX_LOG(efx, "creating NIC\n");
  840. /* Carry out hardware-type specific initialisation */
  841. rc = falcon_probe_nic(efx);
  842. if (rc)
  843. return rc;
  844. /* Determine the number of channels and RX queues by trying to hook
  845. * in MSI-X interrupts. */
  846. efx_probe_interrupts(efx);
  847. efx_set_channels(efx);
  848. /* Initialise the interrupt moderation settings */
  849. efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true);
  850. return 0;
  851. }
  852. static void efx_remove_nic(struct efx_nic *efx)
  853. {
  854. EFX_LOG(efx, "destroying NIC\n");
  855. efx_remove_interrupts(efx);
  856. falcon_remove_nic(efx);
  857. }
  858. /**************************************************************************
  859. *
  860. * NIC startup/shutdown
  861. *
  862. *************************************************************************/
  863. static int efx_probe_all(struct efx_nic *efx)
  864. {
  865. struct efx_channel *channel;
  866. int rc;
  867. /* Create NIC */
  868. rc = efx_probe_nic(efx);
  869. if (rc) {
  870. EFX_ERR(efx, "failed to create NIC\n");
  871. goto fail1;
  872. }
  873. /* Create port */
  874. rc = efx_probe_port(efx);
  875. if (rc) {
  876. EFX_ERR(efx, "failed to create port\n");
  877. goto fail2;
  878. }
  879. /* Create channels */
  880. efx_for_each_channel(channel, efx) {
  881. rc = efx_probe_channel(channel);
  882. if (rc) {
  883. EFX_ERR(efx, "failed to create channel %d\n",
  884. channel->channel);
  885. goto fail3;
  886. }
  887. }
  888. efx_set_channel_names(efx);
  889. return 0;
  890. fail3:
  891. efx_for_each_channel(channel, efx)
  892. efx_remove_channel(channel);
  893. efx_remove_port(efx);
  894. fail2:
  895. efx_remove_nic(efx);
  896. fail1:
  897. return rc;
  898. }
  899. /* Called after previous invocation(s) of efx_stop_all, restarts the
  900. * port, kernel transmit queue, NAPI processing and hardware interrupts,
  901. * and ensures that the port is scheduled to be reconfigured.
  902. * This function is safe to call multiple times when the NIC is in any
  903. * state. */
  904. static void efx_start_all(struct efx_nic *efx)
  905. {
  906. struct efx_channel *channel;
  907. EFX_ASSERT_RESET_SERIALISED(efx);
  908. /* Check that it is appropriate to restart the interface. All
  909. * of these flags are safe to read under just the rtnl lock */
  910. if (efx->port_enabled)
  911. return;
  912. if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT))
  913. return;
  914. if (efx_dev_registered(efx) && !netif_running(efx->net_dev))
  915. return;
  916. /* Mark the port as enabled so port reconfigurations can start, then
  917. * restart the transmit interface early so the watchdog timer stops */
  918. efx_start_port(efx);
  919. if (efx_dev_registered(efx))
  920. efx_wake_queue(efx);
  921. efx_for_each_channel(channel, efx)
  922. efx_start_channel(channel);
  923. falcon_enable_interrupts(efx);
  924. /* Start hardware monitor if we're in RUNNING */
  925. if (efx->state == STATE_RUNNING)
  926. queue_delayed_work(efx->workqueue, &efx->monitor_work,
  927. efx_monitor_interval);
  928. }
  929. /* Flush all delayed work. Should only be called when no more delayed work
  930. * will be scheduled. This doesn't flush pending online resets (efx_reset),
  931. * since we're holding the rtnl_lock at this point. */
  932. static void efx_flush_all(struct efx_nic *efx)
  933. {
  934. struct efx_rx_queue *rx_queue;
  935. /* Make sure the hardware monitor is stopped */
  936. cancel_delayed_work_sync(&efx->monitor_work);
  937. /* Ensure that all RX slow refills are complete. */
  938. efx_for_each_rx_queue(rx_queue, efx)
  939. cancel_delayed_work_sync(&rx_queue->work);
  940. /* Stop scheduled port reconfigurations */
  941. cancel_work_sync(&efx->mac_work);
  942. cancel_work_sync(&efx->phy_work);
  943. }
  944. /* Quiesce hardware and software without bringing the link down.
  945. * Safe to call multiple times, when the nic and interface is in any
  946. * state. The caller is guaranteed to subsequently be in a position
  947. * to modify any hardware and software state they see fit without
  948. * taking locks. */
  949. static void efx_stop_all(struct efx_nic *efx)
  950. {
  951. struct efx_channel *channel;
  952. EFX_ASSERT_RESET_SERIALISED(efx);
  953. /* port_enabled can be read safely under the rtnl lock */
  954. if (!efx->port_enabled)
  955. return;
  956. /* Disable interrupts and wait for ISR to complete */
  957. falcon_disable_interrupts(efx);
  958. if (efx->legacy_irq)
  959. synchronize_irq(efx->legacy_irq);
  960. efx_for_each_channel(channel, efx) {
  961. if (channel->irq)
  962. synchronize_irq(channel->irq);
  963. }
  964. /* Stop all NAPI processing and synchronous rx refills */
  965. efx_for_each_channel(channel, efx)
  966. efx_stop_channel(channel);
  967. /* Stop all asynchronous port reconfigurations. Since all
  968. * event processing has already been stopped, there is no
  969. * window to loose phy events */
  970. efx_stop_port(efx);
  971. /* Flush efx_phy_work, efx_mac_work, refill_workqueue, monitor_work */
  972. efx_flush_all(efx);
  973. /* Isolate the MAC from the TX and RX engines, so that queue
  974. * flushes will complete in a timely fashion. */
  975. falcon_deconfigure_mac_wrapper(efx);
  976. msleep(10); /* Let the Rx FIFO drain */
  977. falcon_drain_tx_fifo(efx);
  978. /* Stop the kernel transmit interface late, so the watchdog
  979. * timer isn't ticking over the flush */
  980. if (efx_dev_registered(efx)) {
  981. efx_stop_queue(efx);
  982. netif_tx_lock_bh(efx->net_dev);
  983. netif_tx_unlock_bh(efx->net_dev);
  984. }
  985. }
  986. static void efx_remove_all(struct efx_nic *efx)
  987. {
  988. struct efx_channel *channel;
  989. efx_for_each_channel(channel, efx)
  990. efx_remove_channel(channel);
  991. efx_remove_port(efx);
  992. efx_remove_nic(efx);
  993. }
  994. /* A convinience function to safely flush all the queues */
  995. void efx_flush_queues(struct efx_nic *efx)
  996. {
  997. EFX_ASSERT_RESET_SERIALISED(efx);
  998. efx_stop_all(efx);
  999. efx_fini_channels(efx);
  1000. efx_init_channels(efx);
  1001. efx_start_all(efx);
  1002. }
  1003. /**************************************************************************
  1004. *
  1005. * Interrupt moderation
  1006. *
  1007. **************************************************************************/
  1008. /* Set interrupt moderation parameters */
  1009. void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs,
  1010. bool rx_adaptive)
  1011. {
  1012. struct efx_tx_queue *tx_queue;
  1013. struct efx_rx_queue *rx_queue;
  1014. EFX_ASSERT_RESET_SERIALISED(efx);
  1015. efx_for_each_tx_queue(tx_queue, efx)
  1016. tx_queue->channel->irq_moderation = tx_usecs;
  1017. efx->irq_rx_adaptive = rx_adaptive;
  1018. efx->irq_rx_moderation = rx_usecs;
  1019. efx_for_each_rx_queue(rx_queue, efx)
  1020. rx_queue->channel->irq_moderation = rx_usecs;
  1021. }
  1022. /**************************************************************************
  1023. *
  1024. * Hardware monitor
  1025. *
  1026. **************************************************************************/
  1027. /* Run periodically off the general workqueue. Serialised against
  1028. * efx_reconfigure_port via the mac_lock */
  1029. static void efx_monitor(struct work_struct *data)
  1030. {
  1031. struct efx_nic *efx = container_of(data, struct efx_nic,
  1032. monitor_work.work);
  1033. int rc;
  1034. EFX_TRACE(efx, "hardware monitor executing on CPU %d\n",
  1035. raw_smp_processor_id());
  1036. /* If the mac_lock is already held then it is likely a port
  1037. * reconfiguration is already in place, which will likely do
  1038. * most of the work of check_hw() anyway. */
  1039. if (!mutex_trylock(&efx->mac_lock))
  1040. goto out_requeue;
  1041. if (!efx->port_enabled)
  1042. goto out_unlock;
  1043. rc = efx->board_info.monitor(efx);
  1044. if (rc) {
  1045. EFX_ERR(efx, "Board sensor %s; shutting down PHY\n",
  1046. (rc == -ERANGE) ? "reported fault" : "failed");
  1047. efx->phy_mode |= PHY_MODE_LOW_POWER;
  1048. falcon_sim_phy_event(efx);
  1049. }
  1050. efx->phy_op->poll(efx);
  1051. efx->mac_op->poll(efx);
  1052. out_unlock:
  1053. mutex_unlock(&efx->mac_lock);
  1054. out_requeue:
  1055. queue_delayed_work(efx->workqueue, &efx->monitor_work,
  1056. efx_monitor_interval);
  1057. }
  1058. /**************************************************************************
  1059. *
  1060. * ioctls
  1061. *
  1062. *************************************************************************/
  1063. /* Net device ioctl
  1064. * Context: process, rtnl_lock() held.
  1065. */
  1066. static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
  1067. {
  1068. struct efx_nic *efx = netdev_priv(net_dev);
  1069. struct mii_ioctl_data *data = if_mii(ifr);
  1070. EFX_ASSERT_RESET_SERIALISED(efx);
  1071. /* Convert phy_id from older PRTAD/DEVAD format */
  1072. if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
  1073. (data->phy_id & 0xfc00) == 0x0400)
  1074. data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400;
  1075. return mdio_mii_ioctl(&efx->mdio, data, cmd);
  1076. }
  1077. /**************************************************************************
  1078. *
  1079. * NAPI interface
  1080. *
  1081. **************************************************************************/
  1082. static int efx_init_napi(struct efx_nic *efx)
  1083. {
  1084. struct efx_channel *channel;
  1085. efx_for_each_channel(channel, efx) {
  1086. channel->napi_dev = efx->net_dev;
  1087. netif_napi_add(channel->napi_dev, &channel->napi_str,
  1088. efx_poll, napi_weight);
  1089. }
  1090. return 0;
  1091. }
  1092. static void efx_fini_napi(struct efx_nic *efx)
  1093. {
  1094. struct efx_channel *channel;
  1095. efx_for_each_channel(channel, efx) {
  1096. if (channel->napi_dev)
  1097. netif_napi_del(&channel->napi_str);
  1098. channel->napi_dev = NULL;
  1099. }
  1100. }
  1101. /**************************************************************************
  1102. *
  1103. * Kernel netpoll interface
  1104. *
  1105. *************************************************************************/
  1106. #ifdef CONFIG_NET_POLL_CONTROLLER
  1107. /* Although in the common case interrupts will be disabled, this is not
  1108. * guaranteed. However, all our work happens inside the NAPI callback,
  1109. * so no locking is required.
  1110. */
  1111. static void efx_netpoll(struct net_device *net_dev)
  1112. {
  1113. struct efx_nic *efx = netdev_priv(net_dev);
  1114. struct efx_channel *channel;
  1115. efx_for_each_channel(channel, efx)
  1116. efx_schedule_channel(channel);
  1117. }
  1118. #endif
  1119. /**************************************************************************
  1120. *
  1121. * Kernel net device interface
  1122. *
  1123. *************************************************************************/
  1124. /* Context: process, rtnl_lock() held. */
  1125. static int efx_net_open(struct net_device *net_dev)
  1126. {
  1127. struct efx_nic *efx = netdev_priv(net_dev);
  1128. EFX_ASSERT_RESET_SERIALISED(efx);
  1129. EFX_LOG(efx, "opening device %s on CPU %d\n", net_dev->name,
  1130. raw_smp_processor_id());
  1131. if (efx->state == STATE_DISABLED)
  1132. return -EIO;
  1133. if (efx->phy_mode & PHY_MODE_SPECIAL)
  1134. return -EBUSY;
  1135. efx_start_all(efx);
  1136. return 0;
  1137. }
  1138. /* Context: process, rtnl_lock() held.
  1139. * Note that the kernel will ignore our return code; this method
  1140. * should really be a void.
  1141. */
  1142. static int efx_net_stop(struct net_device *net_dev)
  1143. {
  1144. struct efx_nic *efx = netdev_priv(net_dev);
  1145. EFX_LOG(efx, "closing %s on CPU %d\n", net_dev->name,
  1146. raw_smp_processor_id());
  1147. if (efx->state != STATE_DISABLED) {
  1148. /* Stop the device and flush all the channels */
  1149. efx_stop_all(efx);
  1150. efx_fini_channels(efx);
  1151. efx_init_channels(efx);
  1152. }
  1153. return 0;
  1154. }
  1155. void efx_stats_disable(struct efx_nic *efx)
  1156. {
  1157. spin_lock(&efx->stats_lock);
  1158. ++efx->stats_disable_count;
  1159. spin_unlock(&efx->stats_lock);
  1160. }
  1161. void efx_stats_enable(struct efx_nic *efx)
  1162. {
  1163. spin_lock(&efx->stats_lock);
  1164. --efx->stats_disable_count;
  1165. spin_unlock(&efx->stats_lock);
  1166. }
  1167. /* Context: process, dev_base_lock or RTNL held, non-blocking. */
  1168. static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
  1169. {
  1170. struct efx_nic *efx = netdev_priv(net_dev);
  1171. struct efx_mac_stats *mac_stats = &efx->mac_stats;
  1172. struct net_device_stats *stats = &net_dev->stats;
  1173. /* Update stats if possible, but do not wait if another thread
  1174. * is updating them or if MAC stats fetches are temporarily
  1175. * disabled; slightly stale stats are acceptable.
  1176. */
  1177. if (!spin_trylock(&efx->stats_lock))
  1178. return stats;
  1179. if (!efx->stats_disable_count) {
  1180. efx->mac_op->update_stats(efx);
  1181. falcon_update_nic_stats(efx);
  1182. }
  1183. spin_unlock(&efx->stats_lock);
  1184. stats->rx_packets = mac_stats->rx_packets;
  1185. stats->tx_packets = mac_stats->tx_packets;
  1186. stats->rx_bytes = mac_stats->rx_bytes;
  1187. stats->tx_bytes = mac_stats->tx_bytes;
  1188. stats->multicast = mac_stats->rx_multicast;
  1189. stats->collisions = mac_stats->tx_collision;
  1190. stats->rx_length_errors = (mac_stats->rx_gtjumbo +
  1191. mac_stats->rx_length_error);
  1192. stats->rx_over_errors = efx->n_rx_nodesc_drop_cnt;
  1193. stats->rx_crc_errors = mac_stats->rx_bad;
  1194. stats->rx_frame_errors = mac_stats->rx_align_error;
  1195. stats->rx_fifo_errors = mac_stats->rx_overflow;
  1196. stats->rx_missed_errors = mac_stats->rx_missed;
  1197. stats->tx_window_errors = mac_stats->tx_late_collision;
  1198. stats->rx_errors = (stats->rx_length_errors +
  1199. stats->rx_over_errors +
  1200. stats->rx_crc_errors +
  1201. stats->rx_frame_errors +
  1202. stats->rx_fifo_errors +
  1203. stats->rx_missed_errors +
  1204. mac_stats->rx_symbol_error);
  1205. stats->tx_errors = (stats->tx_window_errors +
  1206. mac_stats->tx_bad);
  1207. return stats;
  1208. }
  1209. /* Context: netif_tx_lock held, BHs disabled. */
  1210. static void efx_watchdog(struct net_device *net_dev)
  1211. {
  1212. struct efx_nic *efx = netdev_priv(net_dev);
  1213. EFX_ERR(efx, "TX stuck with stop_count=%d port_enabled=%d:"
  1214. " resetting channels\n",
  1215. atomic_read(&efx->netif_stop_count), efx->port_enabled);
  1216. efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
  1217. }
  1218. /* Context: process, rtnl_lock() held. */
  1219. static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
  1220. {
  1221. struct efx_nic *efx = netdev_priv(net_dev);
  1222. int rc = 0;
  1223. EFX_ASSERT_RESET_SERIALISED(efx);
  1224. if (new_mtu > EFX_MAX_MTU)
  1225. return -EINVAL;
  1226. efx_stop_all(efx);
  1227. EFX_LOG(efx, "changing MTU to %d\n", new_mtu);
  1228. efx_fini_channels(efx);
  1229. net_dev->mtu = new_mtu;
  1230. efx_init_channels(efx);
  1231. efx_start_all(efx);
  1232. return rc;
  1233. }
  1234. static int efx_set_mac_address(struct net_device *net_dev, void *data)
  1235. {
  1236. struct efx_nic *efx = netdev_priv(net_dev);
  1237. struct sockaddr *addr = data;
  1238. char *new_addr = addr->sa_data;
  1239. EFX_ASSERT_RESET_SERIALISED(efx);
  1240. if (!is_valid_ether_addr(new_addr)) {
  1241. EFX_ERR(efx, "invalid ethernet MAC address requested: %pM\n",
  1242. new_addr);
  1243. return -EINVAL;
  1244. }
  1245. memcpy(net_dev->dev_addr, new_addr, net_dev->addr_len);
  1246. /* Reconfigure the MAC */
  1247. efx_reconfigure_port(efx);
  1248. return 0;
  1249. }
  1250. /* Context: netif_addr_lock held, BHs disabled. */
  1251. static void efx_set_multicast_list(struct net_device *net_dev)
  1252. {
  1253. struct efx_nic *efx = netdev_priv(net_dev);
  1254. struct dev_mc_list *mc_list = net_dev->mc_list;
  1255. union efx_multicast_hash *mc_hash = &efx->multicast_hash;
  1256. bool promiscuous = !!(net_dev->flags & IFF_PROMISC);
  1257. bool changed = (efx->promiscuous != promiscuous);
  1258. u32 crc;
  1259. int bit;
  1260. int i;
  1261. efx->promiscuous = promiscuous;
  1262. /* Build multicast hash table */
  1263. if (promiscuous || (net_dev->flags & IFF_ALLMULTI)) {
  1264. memset(mc_hash, 0xff, sizeof(*mc_hash));
  1265. } else {
  1266. memset(mc_hash, 0x00, sizeof(*mc_hash));
  1267. for (i = 0; i < net_dev->mc_count; i++) {
  1268. crc = ether_crc_le(ETH_ALEN, mc_list->dmi_addr);
  1269. bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
  1270. set_bit_le(bit, mc_hash->byte);
  1271. mc_list = mc_list->next;
  1272. }
  1273. }
  1274. if (!efx->port_enabled)
  1275. /* Delay pushing settings until efx_start_port() */
  1276. return;
  1277. if (changed)
  1278. queue_work(efx->workqueue, &efx->phy_work);
  1279. /* Create and activate new global multicast hash table */
  1280. falcon_set_multicast_hash(efx);
  1281. }
  1282. static const struct net_device_ops efx_netdev_ops = {
  1283. .ndo_open = efx_net_open,
  1284. .ndo_stop = efx_net_stop,
  1285. .ndo_get_stats = efx_net_stats,
  1286. .ndo_tx_timeout = efx_watchdog,
  1287. .ndo_start_xmit = efx_hard_start_xmit,
  1288. .ndo_validate_addr = eth_validate_addr,
  1289. .ndo_do_ioctl = efx_ioctl,
  1290. .ndo_change_mtu = efx_change_mtu,
  1291. .ndo_set_mac_address = efx_set_mac_address,
  1292. .ndo_set_multicast_list = efx_set_multicast_list,
  1293. #ifdef CONFIG_NET_POLL_CONTROLLER
  1294. .ndo_poll_controller = efx_netpoll,
  1295. #endif
  1296. };
  1297. static void efx_update_name(struct efx_nic *efx)
  1298. {
  1299. strcpy(efx->name, efx->net_dev->name);
  1300. efx_mtd_rename(efx);
  1301. efx_set_channel_names(efx);
  1302. }
  1303. static int efx_netdev_event(struct notifier_block *this,
  1304. unsigned long event, void *ptr)
  1305. {
  1306. struct net_device *net_dev = ptr;
  1307. if (net_dev->netdev_ops == &efx_netdev_ops &&
  1308. event == NETDEV_CHANGENAME)
  1309. efx_update_name(netdev_priv(net_dev));
  1310. return NOTIFY_DONE;
  1311. }
  1312. static struct notifier_block efx_netdev_notifier = {
  1313. .notifier_call = efx_netdev_event,
  1314. };
  1315. static ssize_t
  1316. show_phy_type(struct device *dev, struct device_attribute *attr, char *buf)
  1317. {
  1318. struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
  1319. return sprintf(buf, "%d\n", efx->phy_type);
  1320. }
  1321. static DEVICE_ATTR(phy_type, 0644, show_phy_type, NULL);
  1322. static int efx_register_netdev(struct efx_nic *efx)
  1323. {
  1324. struct net_device *net_dev = efx->net_dev;
  1325. int rc;
  1326. net_dev->watchdog_timeo = 5 * HZ;
  1327. net_dev->irq = efx->pci_dev->irq;
  1328. net_dev->netdev_ops = &efx_netdev_ops;
  1329. SET_NETDEV_DEV(net_dev, &efx->pci_dev->dev);
  1330. SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
  1331. /* Clear MAC statistics */
  1332. efx->mac_op->update_stats(efx);
  1333. memset(&efx->mac_stats, 0, sizeof(efx->mac_stats));
  1334. rtnl_lock();
  1335. rc = dev_alloc_name(net_dev, net_dev->name);
  1336. if (rc < 0)
  1337. goto fail_locked;
  1338. efx_update_name(efx);
  1339. rc = register_netdevice(net_dev);
  1340. if (rc)
  1341. goto fail_locked;
  1342. /* Always start with carrier off; PHY events will detect the link */
  1343. netif_carrier_off(efx->net_dev);
  1344. rtnl_unlock();
  1345. rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
  1346. if (rc) {
  1347. EFX_ERR(efx, "failed to init net dev attributes\n");
  1348. goto fail_registered;
  1349. }
  1350. return 0;
  1351. fail_locked:
  1352. rtnl_unlock();
  1353. EFX_ERR(efx, "could not register net dev\n");
  1354. return rc;
  1355. fail_registered:
  1356. unregister_netdev(net_dev);
  1357. return rc;
  1358. }
  1359. static void efx_unregister_netdev(struct efx_nic *efx)
  1360. {
  1361. struct efx_tx_queue *tx_queue;
  1362. if (!efx->net_dev)
  1363. return;
  1364. BUG_ON(netdev_priv(efx->net_dev) != efx);
  1365. /* Free up any skbs still remaining. This has to happen before
  1366. * we try to unregister the netdev as running their destructors
  1367. * may be needed to get the device ref. count to 0. */
  1368. efx_for_each_tx_queue(tx_queue, efx)
  1369. efx_release_tx_buffers(tx_queue);
  1370. if (efx_dev_registered(efx)) {
  1371. strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
  1372. device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
  1373. unregister_netdev(efx->net_dev);
  1374. }
  1375. }
  1376. /**************************************************************************
  1377. *
  1378. * Device reset and suspend
  1379. *
  1380. **************************************************************************/
  1381. /* Tears down the entire software state and most of the hardware state
  1382. * before reset. */
  1383. void efx_reset_down(struct efx_nic *efx, enum reset_type method,
  1384. struct ethtool_cmd *ecmd)
  1385. {
  1386. EFX_ASSERT_RESET_SERIALISED(efx);
  1387. efx_stats_disable(efx);
  1388. efx_stop_all(efx);
  1389. mutex_lock(&efx->mac_lock);
  1390. mutex_lock(&efx->spi_lock);
  1391. efx->phy_op->get_settings(efx, ecmd);
  1392. efx_fini_channels(efx);
  1393. if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
  1394. efx->phy_op->fini(efx);
  1395. }
  1396. /* This function will always ensure that the locks acquired in
  1397. * efx_reset_down() are released. A failure return code indicates
  1398. * that we were unable to reinitialise the hardware, and the
  1399. * driver should be disabled. If ok is false, then the rx and tx
  1400. * engines are not restarted, pending a RESET_DISABLE. */
  1401. int efx_reset_up(struct efx_nic *efx, enum reset_type method,
  1402. struct ethtool_cmd *ecmd, bool ok)
  1403. {
  1404. int rc;
  1405. EFX_ASSERT_RESET_SERIALISED(efx);
  1406. rc = falcon_init_nic(efx);
  1407. if (rc) {
  1408. EFX_ERR(efx, "failed to initialise NIC\n");
  1409. ok = false;
  1410. }
  1411. if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) {
  1412. if (ok) {
  1413. rc = efx->phy_op->init(efx);
  1414. if (rc)
  1415. ok = false;
  1416. }
  1417. if (!ok)
  1418. efx->port_initialized = false;
  1419. }
  1420. if (ok) {
  1421. efx_init_channels(efx);
  1422. if (efx->phy_op->set_settings(efx, ecmd))
  1423. EFX_ERR(efx, "could not restore PHY settings\n");
  1424. }
  1425. mutex_unlock(&efx->spi_lock);
  1426. mutex_unlock(&efx->mac_lock);
  1427. if (ok) {
  1428. efx_start_all(efx);
  1429. efx_stats_enable(efx);
  1430. }
  1431. return rc;
  1432. }
  1433. /* Reset the NIC as transparently as possible. Do not reset the PHY
  1434. * Note that the reset may fail, in which case the card will be left
  1435. * in a most-probably-unusable state.
  1436. *
  1437. * This function will sleep. You cannot reset from within an atomic
  1438. * state; use efx_schedule_reset() instead.
  1439. *
  1440. * Grabs the rtnl_lock.
  1441. */
  1442. static int efx_reset(struct efx_nic *efx)
  1443. {
  1444. struct ethtool_cmd ecmd;
  1445. enum reset_type method = efx->reset_pending;
  1446. int rc = 0;
  1447. /* Serialise with kernel interfaces */
  1448. rtnl_lock();
  1449. /* If we're not RUNNING then don't reset. Leave the reset_pending
  1450. * flag set so that efx_pci_probe_main will be retried */
  1451. if (efx->state != STATE_RUNNING) {
  1452. EFX_INFO(efx, "scheduled reset quenched. NIC not RUNNING\n");
  1453. goto out_unlock;
  1454. }
  1455. EFX_INFO(efx, "resetting (%d)\n", method);
  1456. efx_reset_down(efx, method, &ecmd);
  1457. rc = falcon_reset_hw(efx, method);
  1458. if (rc) {
  1459. EFX_ERR(efx, "failed to reset hardware\n");
  1460. goto out_disable;
  1461. }
  1462. /* Allow resets to be rescheduled. */
  1463. efx->reset_pending = RESET_TYPE_NONE;
  1464. /* Reinitialise bus-mastering, which may have been turned off before
  1465. * the reset was scheduled. This is still appropriate, even in the
  1466. * RESET_TYPE_DISABLE since this driver generally assumes the hardware
  1467. * can respond to requests. */
  1468. pci_set_master(efx->pci_dev);
  1469. /* Leave device stopped if necessary */
  1470. if (method == RESET_TYPE_DISABLE) {
  1471. efx_reset_up(efx, method, &ecmd, false);
  1472. rc = -EIO;
  1473. } else {
  1474. rc = efx_reset_up(efx, method, &ecmd, true);
  1475. }
  1476. out_disable:
  1477. if (rc) {
  1478. EFX_ERR(efx, "has been disabled\n");
  1479. efx->state = STATE_DISABLED;
  1480. dev_close(efx->net_dev);
  1481. } else {
  1482. EFX_LOG(efx, "reset complete\n");
  1483. }
  1484. out_unlock:
  1485. rtnl_unlock();
  1486. return rc;
  1487. }
  1488. /* The worker thread exists so that code that cannot sleep can
  1489. * schedule a reset for later.
  1490. */
  1491. static void efx_reset_work(struct work_struct *data)
  1492. {
  1493. struct efx_nic *nic = container_of(data, struct efx_nic, reset_work);
  1494. efx_reset(nic);
  1495. }
  1496. void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
  1497. {
  1498. enum reset_type method;
  1499. if (efx->reset_pending != RESET_TYPE_NONE) {
  1500. EFX_INFO(efx, "quenching already scheduled reset\n");
  1501. return;
  1502. }
  1503. switch (type) {
  1504. case RESET_TYPE_INVISIBLE:
  1505. case RESET_TYPE_ALL:
  1506. case RESET_TYPE_WORLD:
  1507. case RESET_TYPE_DISABLE:
  1508. method = type;
  1509. break;
  1510. case RESET_TYPE_RX_RECOVERY:
  1511. case RESET_TYPE_RX_DESC_FETCH:
  1512. case RESET_TYPE_TX_DESC_FETCH:
  1513. case RESET_TYPE_TX_SKIP:
  1514. method = RESET_TYPE_INVISIBLE;
  1515. break;
  1516. default:
  1517. method = RESET_TYPE_ALL;
  1518. break;
  1519. }
  1520. if (method != type)
  1521. EFX_LOG(efx, "scheduling reset (%d:%d)\n", type, method);
  1522. else
  1523. EFX_LOG(efx, "scheduling reset (%d)\n", method);
  1524. efx->reset_pending = method;
  1525. queue_work(reset_workqueue, &efx->reset_work);
  1526. }
  1527. /**************************************************************************
  1528. *
  1529. * List of NICs we support
  1530. *
  1531. **************************************************************************/
  1532. /* PCI device ID table */
  1533. static struct pci_device_id efx_pci_table[] __devinitdata = {
  1534. {PCI_DEVICE(EFX_VENDID_SFC, FALCON_A_P_DEVID),
  1535. .driver_data = (unsigned long) &falcon_a_nic_type},
  1536. {PCI_DEVICE(EFX_VENDID_SFC, FALCON_B_P_DEVID),
  1537. .driver_data = (unsigned long) &falcon_b_nic_type},
  1538. {0} /* end of list */
  1539. };
  1540. /**************************************************************************
  1541. *
  1542. * Dummy PHY/MAC/Board operations
  1543. *
  1544. * Can be used for some unimplemented operations
  1545. * Needed so all function pointers are valid and do not have to be tested
  1546. * before use
  1547. *
  1548. **************************************************************************/
  1549. int efx_port_dummy_op_int(struct efx_nic *efx)
  1550. {
  1551. return 0;
  1552. }
  1553. void efx_port_dummy_op_void(struct efx_nic *efx) {}
  1554. void efx_port_dummy_op_blink(struct efx_nic *efx, bool blink) {}
  1555. static struct efx_mac_operations efx_dummy_mac_operations = {
  1556. .reconfigure = efx_port_dummy_op_void,
  1557. .poll = efx_port_dummy_op_void,
  1558. .irq = efx_port_dummy_op_void,
  1559. };
  1560. static struct efx_phy_operations efx_dummy_phy_operations = {
  1561. .init = efx_port_dummy_op_int,
  1562. .reconfigure = efx_port_dummy_op_void,
  1563. .poll = efx_port_dummy_op_void,
  1564. .fini = efx_port_dummy_op_void,
  1565. .clear_interrupt = efx_port_dummy_op_void,
  1566. };
  1567. static struct efx_board efx_dummy_board_info = {
  1568. .init = efx_port_dummy_op_int,
  1569. .init_leds = efx_port_dummy_op_void,
  1570. .set_id_led = efx_port_dummy_op_blink,
  1571. .monitor = efx_port_dummy_op_int,
  1572. .blink = efx_port_dummy_op_blink,
  1573. .fini = efx_port_dummy_op_void,
  1574. };
  1575. /**************************************************************************
  1576. *
  1577. * Data housekeeping
  1578. *
  1579. **************************************************************************/
  1580. /* This zeroes out and then fills in the invariants in a struct
  1581. * efx_nic (including all sub-structures).
  1582. */
  1583. static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
  1584. struct pci_dev *pci_dev, struct net_device *net_dev)
  1585. {
  1586. struct efx_channel *channel;
  1587. struct efx_tx_queue *tx_queue;
  1588. struct efx_rx_queue *rx_queue;
  1589. int i;
  1590. /* Initialise common structures */
  1591. memset(efx, 0, sizeof(*efx));
  1592. spin_lock_init(&efx->biu_lock);
  1593. spin_lock_init(&efx->phy_lock);
  1594. mutex_init(&efx->spi_lock);
  1595. INIT_WORK(&efx->reset_work, efx_reset_work);
  1596. INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
  1597. efx->pci_dev = pci_dev;
  1598. efx->state = STATE_INIT;
  1599. efx->reset_pending = RESET_TYPE_NONE;
  1600. strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
  1601. efx->board_info = efx_dummy_board_info;
  1602. efx->net_dev = net_dev;
  1603. efx->rx_checksum_enabled = true;
  1604. spin_lock_init(&efx->netif_stop_lock);
  1605. spin_lock_init(&efx->stats_lock);
  1606. efx->stats_disable_count = 1;
  1607. mutex_init(&efx->mac_lock);
  1608. efx->mac_op = &efx_dummy_mac_operations;
  1609. efx->phy_op = &efx_dummy_phy_operations;
  1610. efx->mdio.dev = net_dev;
  1611. INIT_WORK(&efx->phy_work, efx_phy_work);
  1612. INIT_WORK(&efx->mac_work, efx_mac_work);
  1613. atomic_set(&efx->netif_stop_count, 1);
  1614. for (i = 0; i < EFX_MAX_CHANNELS; i++) {
  1615. channel = &efx->channel[i];
  1616. channel->efx = efx;
  1617. channel->channel = i;
  1618. channel->work_pending = false;
  1619. }
  1620. for (i = 0; i < EFX_TX_QUEUE_COUNT; i++) {
  1621. tx_queue = &efx->tx_queue[i];
  1622. tx_queue->efx = efx;
  1623. tx_queue->queue = i;
  1624. tx_queue->buffer = NULL;
  1625. tx_queue->channel = &efx->channel[0]; /* for safety */
  1626. tx_queue->tso_headers_free = NULL;
  1627. }
  1628. for (i = 0; i < EFX_MAX_RX_QUEUES; i++) {
  1629. rx_queue = &efx->rx_queue[i];
  1630. rx_queue->efx = efx;
  1631. rx_queue->queue = i;
  1632. rx_queue->channel = &efx->channel[0]; /* for safety */
  1633. rx_queue->buffer = NULL;
  1634. spin_lock_init(&rx_queue->add_lock);
  1635. INIT_DELAYED_WORK(&rx_queue->work, efx_rx_work);
  1636. }
  1637. efx->type = type;
  1638. /* As close as we can get to guaranteeing that we don't overflow */
  1639. BUILD_BUG_ON(EFX_EVQ_SIZE < EFX_TXQ_SIZE + EFX_RXQ_SIZE);
  1640. EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
  1641. /* Higher numbered interrupt modes are less capable! */
  1642. efx->interrupt_mode = max(efx->type->max_interrupt_mode,
  1643. interrupt_mode);
  1644. /* Would be good to use the net_dev name, but we're too early */
  1645. snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s",
  1646. pci_name(pci_dev));
  1647. efx->workqueue = create_singlethread_workqueue(efx->workqueue_name);
  1648. if (!efx->workqueue)
  1649. return -ENOMEM;
  1650. return 0;
  1651. }
  1652. static void efx_fini_struct(struct efx_nic *efx)
  1653. {
  1654. if (efx->workqueue) {
  1655. destroy_workqueue(efx->workqueue);
  1656. efx->workqueue = NULL;
  1657. }
  1658. }
  1659. /**************************************************************************
  1660. *
  1661. * PCI interface
  1662. *
  1663. **************************************************************************/
  1664. /* Main body of final NIC shutdown code
  1665. * This is called only at module unload (or hotplug removal).
  1666. */
  1667. static void efx_pci_remove_main(struct efx_nic *efx)
  1668. {
  1669. EFX_ASSERT_RESET_SERIALISED(efx);
  1670. /* Skip everything if we never obtained a valid membase */
  1671. if (!efx->membase)
  1672. return;
  1673. falcon_fini_interrupt(efx);
  1674. efx_fini_channels(efx);
  1675. efx_fini_port(efx);
  1676. /* Shutdown the board, then the NIC and board state */
  1677. efx->board_info.fini(efx);
  1678. efx_fini_napi(efx);
  1679. efx_remove_all(efx);
  1680. }
  1681. /* Final NIC shutdown
  1682. * This is called only at module unload (or hotplug removal).
  1683. */
  1684. static void efx_pci_remove(struct pci_dev *pci_dev)
  1685. {
  1686. struct efx_nic *efx;
  1687. efx = pci_get_drvdata(pci_dev);
  1688. if (!efx)
  1689. return;
  1690. /* Mark the NIC as fini, then stop the interface */
  1691. rtnl_lock();
  1692. efx->state = STATE_FINI;
  1693. dev_close(efx->net_dev);
  1694. /* Allow any queued efx_resets() to complete */
  1695. rtnl_unlock();
  1696. if (efx->membase == NULL)
  1697. goto out;
  1698. efx_unregister_netdev(efx);
  1699. efx_mtd_remove(efx);
  1700. /* Wait for any scheduled resets to complete. No more will be
  1701. * scheduled from this point because efx_stop_all() has been
  1702. * called, we are no longer registered with driverlink, and
  1703. * the net_device's have been removed. */
  1704. cancel_work_sync(&efx->reset_work);
  1705. efx_pci_remove_main(efx);
  1706. out:
  1707. efx_fini_io(efx);
  1708. EFX_LOG(efx, "shutdown successful\n");
  1709. pci_set_drvdata(pci_dev, NULL);
  1710. efx_fini_struct(efx);
  1711. free_netdev(efx->net_dev);
  1712. };
  1713. /* Main body of NIC initialisation
  1714. * This is called at module load (or hotplug insertion, theoretically).
  1715. */
  1716. static int efx_pci_probe_main(struct efx_nic *efx)
  1717. {
  1718. int rc;
  1719. /* Do start-of-day initialisation */
  1720. rc = efx_probe_all(efx);
  1721. if (rc)
  1722. goto fail1;
  1723. rc = efx_init_napi(efx);
  1724. if (rc)
  1725. goto fail2;
  1726. /* Initialise the board */
  1727. rc = efx->board_info.init(efx);
  1728. if (rc) {
  1729. EFX_ERR(efx, "failed to initialise board\n");
  1730. goto fail3;
  1731. }
  1732. rc = falcon_init_nic(efx);
  1733. if (rc) {
  1734. EFX_ERR(efx, "failed to initialise NIC\n");
  1735. goto fail4;
  1736. }
  1737. rc = efx_init_port(efx);
  1738. if (rc) {
  1739. EFX_ERR(efx, "failed to initialise port\n");
  1740. goto fail5;
  1741. }
  1742. efx_init_channels(efx);
  1743. rc = falcon_init_interrupt(efx);
  1744. if (rc)
  1745. goto fail6;
  1746. return 0;
  1747. fail6:
  1748. efx_fini_channels(efx);
  1749. efx_fini_port(efx);
  1750. fail5:
  1751. fail4:
  1752. efx->board_info.fini(efx);
  1753. fail3:
  1754. efx_fini_napi(efx);
  1755. fail2:
  1756. efx_remove_all(efx);
  1757. fail1:
  1758. return rc;
  1759. }
  1760. /* NIC initialisation
  1761. *
  1762. * This is called at module load (or hotplug insertion,
  1763. * theoretically). It sets up PCI mappings, tests and resets the NIC,
  1764. * sets up and registers the network devices with the kernel and hooks
  1765. * the interrupt service routine. It does not prepare the device for
  1766. * transmission; this is left to the first time one of the network
  1767. * interfaces is brought up (i.e. efx_net_open).
  1768. */
  1769. static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
  1770. const struct pci_device_id *entry)
  1771. {
  1772. struct efx_nic_type *type = (struct efx_nic_type *) entry->driver_data;
  1773. struct net_device *net_dev;
  1774. struct efx_nic *efx;
  1775. int i, rc;
  1776. /* Allocate and initialise a struct net_device and struct efx_nic */
  1777. net_dev = alloc_etherdev(sizeof(*efx));
  1778. if (!net_dev)
  1779. return -ENOMEM;
  1780. net_dev->features |= (NETIF_F_IP_CSUM | NETIF_F_SG |
  1781. NETIF_F_HIGHDMA | NETIF_F_TSO |
  1782. NETIF_F_GRO);
  1783. /* Mask for features that also apply to VLAN devices */
  1784. net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG |
  1785. NETIF_F_HIGHDMA | NETIF_F_TSO);
  1786. efx = netdev_priv(net_dev);
  1787. pci_set_drvdata(pci_dev, efx);
  1788. rc = efx_init_struct(efx, type, pci_dev, net_dev);
  1789. if (rc)
  1790. goto fail1;
  1791. EFX_INFO(efx, "Solarflare Communications NIC detected\n");
  1792. /* Set up basic I/O (BAR mappings etc) */
  1793. rc = efx_init_io(efx);
  1794. if (rc)
  1795. goto fail2;
  1796. /* No serialisation is required with the reset path because
  1797. * we're in STATE_INIT. */
  1798. for (i = 0; i < 5; i++) {
  1799. rc = efx_pci_probe_main(efx);
  1800. /* Serialise against efx_reset(). No more resets will be
  1801. * scheduled since efx_stop_all() has been called, and we
  1802. * have not and never have been registered with either
  1803. * the rtnetlink or driverlink layers. */
  1804. cancel_work_sync(&efx->reset_work);
  1805. if (rc == 0) {
  1806. if (efx->reset_pending != RESET_TYPE_NONE) {
  1807. /* If there was a scheduled reset during
  1808. * probe, the NIC is probably hosed anyway */
  1809. efx_pci_remove_main(efx);
  1810. rc = -EIO;
  1811. } else {
  1812. break;
  1813. }
  1814. }
  1815. /* Retry if a recoverably reset event has been scheduled */
  1816. if ((efx->reset_pending != RESET_TYPE_INVISIBLE) &&
  1817. (efx->reset_pending != RESET_TYPE_ALL))
  1818. goto fail3;
  1819. efx->reset_pending = RESET_TYPE_NONE;
  1820. }
  1821. if (rc) {
  1822. EFX_ERR(efx, "Could not reset NIC\n");
  1823. goto fail4;
  1824. }
  1825. /* Switch to the running state before we expose the device to
  1826. * the OS. This is to ensure that the initial gathering of
  1827. * MAC stats succeeds. */
  1828. efx->state = STATE_RUNNING;
  1829. efx_mtd_probe(efx); /* allowed to fail */
  1830. rc = efx_register_netdev(efx);
  1831. if (rc)
  1832. goto fail5;
  1833. EFX_LOG(efx, "initialisation successful\n");
  1834. return 0;
  1835. fail5:
  1836. efx_pci_remove_main(efx);
  1837. fail4:
  1838. fail3:
  1839. efx_fini_io(efx);
  1840. fail2:
  1841. efx_fini_struct(efx);
  1842. fail1:
  1843. EFX_LOG(efx, "initialisation failed. rc=%d\n", rc);
  1844. free_netdev(net_dev);
  1845. return rc;
  1846. }
  1847. static struct pci_driver efx_pci_driver = {
  1848. .name = EFX_DRIVER_NAME,
  1849. .id_table = efx_pci_table,
  1850. .probe = efx_pci_probe,
  1851. .remove = efx_pci_remove,
  1852. };
  1853. /**************************************************************************
  1854. *
  1855. * Kernel module interface
  1856. *
  1857. *************************************************************************/
  1858. module_param(interrupt_mode, uint, 0444);
  1859. MODULE_PARM_DESC(interrupt_mode,
  1860. "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
  1861. static int __init efx_init_module(void)
  1862. {
  1863. int rc;
  1864. printk(KERN_INFO "Solarflare NET driver v" EFX_DRIVER_VERSION "\n");
  1865. rc = register_netdevice_notifier(&efx_netdev_notifier);
  1866. if (rc)
  1867. goto err_notifier;
  1868. refill_workqueue = create_workqueue("sfc_refill");
  1869. if (!refill_workqueue) {
  1870. rc = -ENOMEM;
  1871. goto err_refill;
  1872. }
  1873. reset_workqueue = create_singlethread_workqueue("sfc_reset");
  1874. if (!reset_workqueue) {
  1875. rc = -ENOMEM;
  1876. goto err_reset;
  1877. }
  1878. rc = pci_register_driver(&efx_pci_driver);
  1879. if (rc < 0)
  1880. goto err_pci;
  1881. return 0;
  1882. err_pci:
  1883. destroy_workqueue(reset_workqueue);
  1884. err_reset:
  1885. destroy_workqueue(refill_workqueue);
  1886. err_refill:
  1887. unregister_netdevice_notifier(&efx_netdev_notifier);
  1888. err_notifier:
  1889. return rc;
  1890. }
  1891. static void __exit efx_exit_module(void)
  1892. {
  1893. printk(KERN_INFO "Solarflare NET driver unloading\n");
  1894. pci_unregister_driver(&efx_pci_driver);
  1895. destroy_workqueue(reset_workqueue);
  1896. destroy_workqueue(refill_workqueue);
  1897. unregister_netdevice_notifier(&efx_netdev_notifier);
  1898. }
  1899. module_init(efx_init_module);
  1900. module_exit(efx_exit_module);
  1901. MODULE_AUTHOR("Michael Brown <mbrown@fensystems.co.uk> and "
  1902. "Solarflare Communications");
  1903. MODULE_DESCRIPTION("Solarflare Communications network driver");
  1904. MODULE_LICENSE("GPL");
  1905. MODULE_DEVICE_TABLE(pci, efx_pci_table);