nic.c 58 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953
  1. /****************************************************************************
  2. * Driver for Solarflare Solarstorm network controllers and boards
  3. * Copyright 2005-2006 Fen Systems Ltd.
  4. * Copyright 2006-2011 Solarflare Communications Inc.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published
  8. * by the Free Software Foundation, incorporated herein by reference.
  9. */
  10. #include <linux/bitops.h>
  11. #include <linux/delay.h>
  12. #include <linux/pci.h>
  13. #include <linux/module.h>
  14. #include <linux/seq_file.h>
  15. #include "net_driver.h"
  16. #include "bitfield.h"
  17. #include "efx.h"
  18. #include "nic.h"
  19. #include "regs.h"
  20. #include "io.h"
  21. #include "workarounds.h"
  22. /**************************************************************************
  23. *
  24. * Configurable values
  25. *
  26. **************************************************************************
  27. */
  28. /* This is set to 16 for a good reason. In summary, if larger than
  29. * 16, the descriptor cache holds more than a default socket
  30. * buffer's worth of packets (for UDP we can only have at most one
  31. * socket buffer's worth outstanding). This combined with the fact
  32. * that we only get 1 TX event per descriptor cache means the NIC
  33. * goes idle.
  34. */
  35. #define TX_DC_ENTRIES 16
  36. #define TX_DC_ENTRIES_ORDER 1
  37. #define RX_DC_ENTRIES 64
  38. #define RX_DC_ENTRIES_ORDER 3
  39. /* If EFX_MAX_INT_ERRORS internal errors occur within
  40. * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
  41. * disable it.
  42. */
  43. #define EFX_INT_ERROR_EXPIRE 3600
  44. #define EFX_MAX_INT_ERRORS 5
  45. /* We poll for events every FLUSH_INTERVAL ms, and check FLUSH_POLL_COUNT times
  46. */
  47. #define EFX_FLUSH_INTERVAL 10
  48. #define EFX_FLUSH_POLL_COUNT 100
  49. /* Size and alignment of special buffers (4KB) */
  50. #define EFX_BUF_SIZE 4096
  51. /* Depth of RX flush request fifo */
  52. #define EFX_RX_FLUSH_COUNT 4
  53. /* Generated event code for efx_generate_test_event() */
  54. #define EFX_CHANNEL_MAGIC_TEST(_channel) \
  55. (0x00010100 + (_channel)->channel)
  56. /* Generated event code for efx_generate_fill_event() */
  57. #define EFX_CHANNEL_MAGIC_FILL(_channel) \
  58. (0x00010200 + (_channel)->channel)
  59. /**************************************************************************
  60. *
  61. * Solarstorm hardware access
  62. *
  63. **************************************************************************/
  64. static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
  65. unsigned int index)
  66. {
  67. efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base,
  68. value, index);
  69. }
  70. /* Read the current event from the event queue */
  71. static inline efx_qword_t *efx_event(struct efx_channel *channel,
  72. unsigned int index)
  73. {
  74. return ((efx_qword_t *) (channel->eventq.addr)) + index;
  75. }
  76. /* See if an event is present
  77. *
  78. * We check both the high and low dword of the event for all ones. We
  79. * wrote all ones when we cleared the event, and no valid event can
  80. * have all ones in either its high or low dwords. This approach is
  81. * robust against reordering.
  82. *
  83. * Note that using a single 64-bit comparison is incorrect; even
  84. * though the CPU read will be atomic, the DMA write may not be.
  85. */
  86. static inline int efx_event_present(efx_qword_t *event)
  87. {
  88. return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
  89. EFX_DWORD_IS_ALL_ONES(event->dword[1]));
  90. }
  91. static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b,
  92. const efx_oword_t *mask)
  93. {
  94. return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) ||
  95. ((a->u64[1] ^ b->u64[1]) & mask->u64[1]);
  96. }
  97. int efx_nic_test_registers(struct efx_nic *efx,
  98. const struct efx_nic_register_test *regs,
  99. size_t n_regs)
  100. {
  101. unsigned address = 0, i, j;
  102. efx_oword_t mask, imask, original, reg, buf;
  103. /* Falcon should be in loopback to isolate the XMAC from the PHY */
  104. WARN_ON(!LOOPBACK_INTERNAL(efx));
  105. for (i = 0; i < n_regs; ++i) {
  106. address = regs[i].address;
  107. mask = imask = regs[i].mask;
  108. EFX_INVERT_OWORD(imask);
  109. efx_reado(efx, &original, address);
  110. /* bit sweep on and off */
  111. for (j = 0; j < 128; j++) {
  112. if (!EFX_EXTRACT_OWORD32(mask, j, j))
  113. continue;
  114. /* Test this testable bit can be set in isolation */
  115. EFX_AND_OWORD(reg, original, mask);
  116. EFX_SET_OWORD32(reg, j, j, 1);
  117. efx_writeo(efx, &reg, address);
  118. efx_reado(efx, &buf, address);
  119. if (efx_masked_compare_oword(&reg, &buf, &mask))
  120. goto fail;
  121. /* Test this testable bit can be cleared in isolation */
  122. EFX_OR_OWORD(reg, original, mask);
  123. EFX_SET_OWORD32(reg, j, j, 0);
  124. efx_writeo(efx, &reg, address);
  125. efx_reado(efx, &buf, address);
  126. if (efx_masked_compare_oword(&reg, &buf, &mask))
  127. goto fail;
  128. }
  129. efx_writeo(efx, &original, address);
  130. }
  131. return 0;
  132. fail:
  133. netif_err(efx, hw, efx->net_dev,
  134. "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT
  135. " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg),
  136. EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask));
  137. return -EIO;
  138. }
  139. /**************************************************************************
  140. *
  141. * Special buffer handling
  142. * Special buffers are used for event queues and the TX and RX
  143. * descriptor rings.
  144. *
  145. *************************************************************************/
  146. /*
  147. * Initialise a special buffer
  148. *
  149. * This will define a buffer (previously allocated via
  150. * efx_alloc_special_buffer()) in the buffer table, allowing
  151. * it to be used for event queues, descriptor rings etc.
  152. */
  153. static void
  154. efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
  155. {
  156. efx_qword_t buf_desc;
  157. int index;
  158. dma_addr_t dma_addr;
  159. int i;
  160. EFX_BUG_ON_PARANOID(!buffer->addr);
  161. /* Write buffer descriptors to NIC */
  162. for (i = 0; i < buffer->entries; i++) {
  163. index = buffer->index + i;
  164. dma_addr = buffer->dma_addr + (i * 4096);
  165. netif_dbg(efx, probe, efx->net_dev,
  166. "mapping special buffer %d at %llx\n",
  167. index, (unsigned long long)dma_addr);
  168. EFX_POPULATE_QWORD_3(buf_desc,
  169. FRF_AZ_BUF_ADR_REGION, 0,
  170. FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
  171. FRF_AZ_BUF_OWNER_ID_FBUF, 0);
  172. efx_write_buf_tbl(efx, &buf_desc, index);
  173. }
  174. }
  175. /* Unmaps a buffer and clears the buffer table entries */
  176. static void
  177. efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
  178. {
  179. efx_oword_t buf_tbl_upd;
  180. unsigned int start = buffer->index;
  181. unsigned int end = (buffer->index + buffer->entries - 1);
  182. if (!buffer->entries)
  183. return;
  184. netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n",
  185. buffer->index, buffer->index + buffer->entries - 1);
  186. EFX_POPULATE_OWORD_4(buf_tbl_upd,
  187. FRF_AZ_BUF_UPD_CMD, 0,
  188. FRF_AZ_BUF_CLR_CMD, 1,
  189. FRF_AZ_BUF_CLR_END_ID, end,
  190. FRF_AZ_BUF_CLR_START_ID, start);
  191. efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD);
  192. }
  193. /*
  194. * Allocate a new special buffer
  195. *
  196. * This allocates memory for a new buffer, clears it and allocates a
  197. * new buffer ID range. It does not write into the buffer table.
  198. *
  199. * This call will allocate 4KB buffers, since 8KB buffers can't be
  200. * used for event queues and descriptor rings.
  201. */
  202. static int efx_alloc_special_buffer(struct efx_nic *efx,
  203. struct efx_special_buffer *buffer,
  204. unsigned int len)
  205. {
  206. len = ALIGN(len, EFX_BUF_SIZE);
  207. buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
  208. &buffer->dma_addr, GFP_KERNEL);
  209. if (!buffer->addr)
  210. return -ENOMEM;
  211. buffer->len = len;
  212. buffer->entries = len / EFX_BUF_SIZE;
  213. BUG_ON(buffer->dma_addr & (EFX_BUF_SIZE - 1));
  214. /* All zeros is a potentially valid event so memset to 0xff */
  215. memset(buffer->addr, 0xff, len);
  216. /* Select new buffer ID */
  217. buffer->index = efx->next_buffer_table;
  218. efx->next_buffer_table += buffer->entries;
  219. netif_dbg(efx, probe, efx->net_dev,
  220. "allocating special buffers %d-%d at %llx+%x "
  221. "(virt %p phys %llx)\n", buffer->index,
  222. buffer->index + buffer->entries - 1,
  223. (u64)buffer->dma_addr, len,
  224. buffer->addr, (u64)virt_to_phys(buffer->addr));
  225. return 0;
  226. }
  227. static void
  228. efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
  229. {
  230. if (!buffer->addr)
  231. return;
  232. netif_dbg(efx, hw, efx->net_dev,
  233. "deallocating special buffers %d-%d at %llx+%x "
  234. "(virt %p phys %llx)\n", buffer->index,
  235. buffer->index + buffer->entries - 1,
  236. (u64)buffer->dma_addr, buffer->len,
  237. buffer->addr, (u64)virt_to_phys(buffer->addr));
  238. dma_free_coherent(&efx->pci_dev->dev, buffer->len, buffer->addr,
  239. buffer->dma_addr);
  240. buffer->addr = NULL;
  241. buffer->entries = 0;
  242. }
  243. /**************************************************************************
  244. *
  245. * Generic buffer handling
  246. * These buffers are used for interrupt status and MAC stats
  247. *
  248. **************************************************************************/
  249. int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
  250. unsigned int len)
  251. {
  252. buffer->addr = pci_alloc_consistent(efx->pci_dev, len,
  253. &buffer->dma_addr);
  254. if (!buffer->addr)
  255. return -ENOMEM;
  256. buffer->len = len;
  257. memset(buffer->addr, 0, len);
  258. return 0;
  259. }
  260. void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer)
  261. {
  262. if (buffer->addr) {
  263. pci_free_consistent(efx->pci_dev, buffer->len,
  264. buffer->addr, buffer->dma_addr);
  265. buffer->addr = NULL;
  266. }
  267. }
  268. /**************************************************************************
  269. *
  270. * TX path
  271. *
  272. **************************************************************************/
  273. /* Returns a pointer to the specified transmit descriptor in the TX
  274. * descriptor queue belonging to the specified channel.
  275. */
  276. static inline efx_qword_t *
  277. efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
  278. {
  279. return ((efx_qword_t *) (tx_queue->txd.addr)) + index;
  280. }
  281. /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
  282. static inline void efx_notify_tx_desc(struct efx_tx_queue *tx_queue)
  283. {
  284. unsigned write_ptr;
  285. efx_dword_t reg;
  286. write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
  287. EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
  288. efx_writed_page(tx_queue->efx, &reg,
  289. FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
  290. }
  291. /* Write pointer and first descriptor for TX descriptor ring */
  292. static inline void efx_push_tx_desc(struct efx_tx_queue *tx_queue,
  293. const efx_qword_t *txd)
  294. {
  295. unsigned write_ptr;
  296. efx_oword_t reg;
  297. BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0);
  298. BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0);
  299. write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
  300. EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true,
  301. FRF_AZ_TX_DESC_WPTR, write_ptr);
  302. reg.qword[0] = *txd;
  303. efx_writeo_page(tx_queue->efx, &reg,
  304. FR_BZ_TX_DESC_UPD_P0, tx_queue->queue);
  305. }
  306. static inline bool
  307. efx_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count)
  308. {
  309. unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
  310. if (empty_read_count == 0)
  311. return false;
  312. tx_queue->empty_read_count = 0;
  313. return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0;
  314. }
  315. /* For each entry inserted into the software descriptor ring, create a
  316. * descriptor in the hardware TX descriptor ring (in host memory), and
  317. * write a doorbell.
  318. */
  319. void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
  320. {
  321. struct efx_tx_buffer *buffer;
  322. efx_qword_t *txd;
  323. unsigned write_ptr;
  324. unsigned old_write_count = tx_queue->write_count;
  325. BUG_ON(tx_queue->write_count == tx_queue->insert_count);
  326. do {
  327. write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
  328. buffer = &tx_queue->buffer[write_ptr];
  329. txd = efx_tx_desc(tx_queue, write_ptr);
  330. ++tx_queue->write_count;
  331. /* Create TX descriptor ring entry */
  332. EFX_POPULATE_QWORD_4(*txd,
  333. FSF_AZ_TX_KER_CONT, buffer->continuation,
  334. FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
  335. FSF_AZ_TX_KER_BUF_REGION, 0,
  336. FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
  337. } while (tx_queue->write_count != tx_queue->insert_count);
  338. wmb(); /* Ensure descriptors are written before they are fetched */
  339. if (efx_may_push_tx_desc(tx_queue, old_write_count)) {
  340. txd = efx_tx_desc(tx_queue,
  341. old_write_count & tx_queue->ptr_mask);
  342. efx_push_tx_desc(tx_queue, txd);
  343. ++tx_queue->pushes;
  344. } else {
  345. efx_notify_tx_desc(tx_queue);
  346. }
  347. }
  348. /* Allocate hardware resources for a TX queue */
  349. int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
  350. {
  351. struct efx_nic *efx = tx_queue->efx;
  352. unsigned entries;
  353. entries = tx_queue->ptr_mask + 1;
  354. return efx_alloc_special_buffer(efx, &tx_queue->txd,
  355. entries * sizeof(efx_qword_t));
  356. }
  357. void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
  358. {
  359. struct efx_nic *efx = tx_queue->efx;
  360. efx_oword_t reg;
  361. tx_queue->flushed = FLUSH_NONE;
  362. /* Pin TX descriptor ring */
  363. efx_init_special_buffer(efx, &tx_queue->txd);
  364. /* Push TX descriptor ring to card */
  365. EFX_POPULATE_OWORD_10(reg,
  366. FRF_AZ_TX_DESCQ_EN, 1,
  367. FRF_AZ_TX_ISCSI_DDIG_EN, 0,
  368. FRF_AZ_TX_ISCSI_HDIG_EN, 0,
  369. FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
  370. FRF_AZ_TX_DESCQ_EVQ_ID,
  371. tx_queue->channel->channel,
  372. FRF_AZ_TX_DESCQ_OWNER_ID, 0,
  373. FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue,
  374. FRF_AZ_TX_DESCQ_SIZE,
  375. __ffs(tx_queue->txd.entries),
  376. FRF_AZ_TX_DESCQ_TYPE, 0,
  377. FRF_BZ_TX_NON_IP_DROP_DIS, 1);
  378. if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
  379. int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
  380. EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
  381. EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS,
  382. !csum);
  383. }
  384. efx_writeo_table(efx, &reg, efx->type->txd_ptr_tbl_base,
  385. tx_queue->queue);
  386. if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
  387. /* Only 128 bits in this register */
  388. BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128);
  389. efx_reado(efx, &reg, FR_AA_TX_CHKSM_CFG);
  390. if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
  391. clear_bit_le(tx_queue->queue, (void *)&reg);
  392. else
  393. set_bit_le(tx_queue->queue, (void *)&reg);
  394. efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG);
  395. }
  396. if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
  397. EFX_POPULATE_OWORD_1(reg,
  398. FRF_BZ_TX_PACE,
  399. (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
  400. FFE_BZ_TX_PACE_OFF :
  401. FFE_BZ_TX_PACE_RESERVED);
  402. efx_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL,
  403. tx_queue->queue);
  404. }
  405. }
  406. static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue)
  407. {
  408. struct efx_nic *efx = tx_queue->efx;
  409. efx_oword_t tx_flush_descq;
  410. tx_queue->flushed = FLUSH_PENDING;
  411. /* Post a flush command */
  412. EFX_POPULATE_OWORD_2(tx_flush_descq,
  413. FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
  414. FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
  415. efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ);
  416. }
  417. void efx_nic_fini_tx(struct efx_tx_queue *tx_queue)
  418. {
  419. struct efx_nic *efx = tx_queue->efx;
  420. efx_oword_t tx_desc_ptr;
  421. /* The queue should have been flushed */
  422. WARN_ON(tx_queue->flushed != FLUSH_DONE);
  423. /* Remove TX descriptor ring from card */
  424. EFX_ZERO_OWORD(tx_desc_ptr);
  425. efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
  426. tx_queue->queue);
  427. /* Unpin TX descriptor ring */
  428. efx_fini_special_buffer(efx, &tx_queue->txd);
  429. }
  430. /* Free buffers backing TX queue */
  431. void efx_nic_remove_tx(struct efx_tx_queue *tx_queue)
  432. {
  433. efx_free_special_buffer(tx_queue->efx, &tx_queue->txd);
  434. }
  435. /**************************************************************************
  436. *
  437. * RX path
  438. *
  439. **************************************************************************/
  440. /* Returns a pointer to the specified descriptor in the RX descriptor queue */
  441. static inline efx_qword_t *
  442. efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
  443. {
  444. return ((efx_qword_t *) (rx_queue->rxd.addr)) + index;
  445. }
  446. /* This creates an entry in the RX descriptor queue */
  447. static inline void
  448. efx_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index)
  449. {
  450. struct efx_rx_buffer *rx_buf;
  451. efx_qword_t *rxd;
  452. rxd = efx_rx_desc(rx_queue, index);
  453. rx_buf = efx_rx_buffer(rx_queue, index);
  454. EFX_POPULATE_QWORD_3(*rxd,
  455. FSF_AZ_RX_KER_BUF_SIZE,
  456. rx_buf->len -
  457. rx_queue->efx->type->rx_buffer_padding,
  458. FSF_AZ_RX_KER_BUF_REGION, 0,
  459. FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
  460. }
  461. /* This writes to the RX_DESC_WPTR register for the specified receive
  462. * descriptor ring.
  463. */
  464. void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue)
  465. {
  466. struct efx_nic *efx = rx_queue->efx;
  467. efx_dword_t reg;
  468. unsigned write_ptr;
  469. while (rx_queue->notified_count != rx_queue->added_count) {
  470. efx_build_rx_desc(
  471. rx_queue,
  472. rx_queue->notified_count & rx_queue->ptr_mask);
  473. ++rx_queue->notified_count;
  474. }
  475. wmb();
  476. write_ptr = rx_queue->added_count & rx_queue->ptr_mask;
  477. EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
  478. efx_writed_page(efx, &reg, FR_AZ_RX_DESC_UPD_DWORD_P0,
  479. efx_rx_queue_index(rx_queue));
  480. }
  481. int efx_nic_probe_rx(struct efx_rx_queue *rx_queue)
  482. {
  483. struct efx_nic *efx = rx_queue->efx;
  484. unsigned entries;
  485. entries = rx_queue->ptr_mask + 1;
  486. return efx_alloc_special_buffer(efx, &rx_queue->rxd,
  487. entries * sizeof(efx_qword_t));
  488. }
  489. void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
  490. {
  491. efx_oword_t rx_desc_ptr;
  492. struct efx_nic *efx = rx_queue->efx;
  493. bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0;
  494. bool iscsi_digest_en = is_b0;
  495. netif_dbg(efx, hw, efx->net_dev,
  496. "RX queue %d ring in special buffers %d-%d\n",
  497. efx_rx_queue_index(rx_queue), rx_queue->rxd.index,
  498. rx_queue->rxd.index + rx_queue->rxd.entries - 1);
  499. rx_queue->flushed = FLUSH_NONE;
  500. /* Pin RX descriptor ring */
  501. efx_init_special_buffer(efx, &rx_queue->rxd);
  502. /* Push RX descriptor ring to card */
  503. EFX_POPULATE_OWORD_10(rx_desc_ptr,
  504. FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en,
  505. FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en,
  506. FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
  507. FRF_AZ_RX_DESCQ_EVQ_ID,
  508. efx_rx_queue_channel(rx_queue)->channel,
  509. FRF_AZ_RX_DESCQ_OWNER_ID, 0,
  510. FRF_AZ_RX_DESCQ_LABEL,
  511. efx_rx_queue_index(rx_queue),
  512. FRF_AZ_RX_DESCQ_SIZE,
  513. __ffs(rx_queue->rxd.entries),
  514. FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
  515. /* For >=B0 this is scatter so disable */
  516. FRF_AZ_RX_DESCQ_JUMBO, !is_b0,
  517. FRF_AZ_RX_DESCQ_EN, 1);
  518. efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
  519. efx_rx_queue_index(rx_queue));
  520. }
  521. static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue)
  522. {
  523. struct efx_nic *efx = rx_queue->efx;
  524. efx_oword_t rx_flush_descq;
  525. rx_queue->flushed = FLUSH_PENDING;
  526. /* Post a flush command */
  527. EFX_POPULATE_OWORD_2(rx_flush_descq,
  528. FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
  529. FRF_AZ_RX_FLUSH_DESCQ,
  530. efx_rx_queue_index(rx_queue));
  531. efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
  532. }
  533. void efx_nic_fini_rx(struct efx_rx_queue *rx_queue)
  534. {
  535. efx_oword_t rx_desc_ptr;
  536. struct efx_nic *efx = rx_queue->efx;
  537. /* The queue should already have been flushed */
  538. WARN_ON(rx_queue->flushed != FLUSH_DONE);
  539. /* Remove RX descriptor ring from card */
  540. EFX_ZERO_OWORD(rx_desc_ptr);
  541. efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
  542. efx_rx_queue_index(rx_queue));
  543. /* Unpin RX descriptor ring */
  544. efx_fini_special_buffer(efx, &rx_queue->rxd);
  545. }
  546. /* Free buffers backing RX queue */
  547. void efx_nic_remove_rx(struct efx_rx_queue *rx_queue)
  548. {
  549. efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
  550. }
  551. /**************************************************************************
  552. *
  553. * Event queue processing
  554. * Event queues are processed by per-channel tasklets.
  555. *
  556. **************************************************************************/
  557. /* Update a channel's event queue's read pointer (RPTR) register
  558. *
  559. * This writes the EVQ_RPTR_REG register for the specified channel's
  560. * event queue.
  561. */
  562. void efx_nic_eventq_read_ack(struct efx_channel *channel)
  563. {
  564. efx_dword_t reg;
  565. struct efx_nic *efx = channel->efx;
  566. EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, channel->eventq_read_ptr);
  567. efx_writed_table(efx, &reg, efx->type->evq_rptr_tbl_base,
  568. channel->channel);
  569. }
  570. /* Use HW to insert a SW defined event */
  571. static void efx_generate_event(struct efx_channel *channel, efx_qword_t *event)
  572. {
  573. efx_oword_t drv_ev_reg;
  574. BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 ||
  575. FRF_AZ_DRV_EV_DATA_WIDTH != 64);
  576. drv_ev_reg.u32[0] = event->u32[0];
  577. drv_ev_reg.u32[1] = event->u32[1];
  578. drv_ev_reg.u32[2] = 0;
  579. drv_ev_reg.u32[3] = 0;
  580. EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, channel->channel);
  581. efx_writeo(channel->efx, &drv_ev_reg, FR_AZ_DRV_EV);
  582. }
  583. /* Handle a transmit completion event
  584. *
  585. * The NIC batches TX completion events; the message we receive is of
  586. * the form "complete all TX events up to this index".
  587. */
  588. static int
  589. efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
  590. {
  591. unsigned int tx_ev_desc_ptr;
  592. unsigned int tx_ev_q_label;
  593. struct efx_tx_queue *tx_queue;
  594. struct efx_nic *efx = channel->efx;
  595. int tx_packets = 0;
  596. if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
  597. /* Transmit completion */
  598. tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
  599. tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
  600. tx_queue = efx_channel_get_tx_queue(
  601. channel, tx_ev_q_label % EFX_TXQ_TYPES);
  602. tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
  603. tx_queue->ptr_mask);
  604. channel->irq_mod_score += tx_packets;
  605. efx_xmit_done(tx_queue, tx_ev_desc_ptr);
  606. } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
  607. /* Rewrite the FIFO write pointer */
  608. tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
  609. tx_queue = efx_channel_get_tx_queue(
  610. channel, tx_ev_q_label % EFX_TXQ_TYPES);
  611. if (efx_dev_registered(efx))
  612. netif_tx_lock(efx->net_dev);
  613. efx_notify_tx_desc(tx_queue);
  614. if (efx_dev_registered(efx))
  615. netif_tx_unlock(efx->net_dev);
  616. } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) &&
  617. EFX_WORKAROUND_10727(efx)) {
  618. efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
  619. } else {
  620. netif_err(efx, tx_err, efx->net_dev,
  621. "channel %d unexpected TX event "
  622. EFX_QWORD_FMT"\n", channel->channel,
  623. EFX_QWORD_VAL(*event));
  624. }
  625. return tx_packets;
  626. }
  627. /* Detect errors included in the rx_evt_pkt_ok bit. */
  628. static void efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
  629. const efx_qword_t *event,
  630. bool *rx_ev_pkt_ok,
  631. bool *discard)
  632. {
  633. struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
  634. struct efx_nic *efx = rx_queue->efx;
  635. bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
  636. bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
  637. bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
  638. bool rx_ev_other_err, rx_ev_pause_frm;
  639. bool rx_ev_hdr_type, rx_ev_mcast_pkt;
  640. unsigned rx_ev_pkt_type;
  641. rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
  642. rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
  643. rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC);
  644. rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE);
  645. rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event,
  646. FSF_AZ_RX_EV_BUF_OWNER_ID_ERR);
  647. rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event,
  648. FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR);
  649. rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event,
  650. FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR);
  651. rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR);
  652. rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC);
  653. rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ?
  654. 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB));
  655. rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR);
  656. /* Every error apart from tobe_disc and pause_frm */
  657. rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
  658. rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
  659. rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
  660. /* Count errors that are not in MAC stats. Ignore expected
  661. * checksum errors during self-test. */
  662. if (rx_ev_frm_trunc)
  663. ++channel->n_rx_frm_trunc;
  664. else if (rx_ev_tobe_disc)
  665. ++channel->n_rx_tobe_disc;
  666. else if (!efx->loopback_selftest) {
  667. if (rx_ev_ip_hdr_chksum_err)
  668. ++channel->n_rx_ip_hdr_chksum_err;
  669. else if (rx_ev_tcp_udp_chksum_err)
  670. ++channel->n_rx_tcp_udp_chksum_err;
  671. }
  672. /* The frame must be discarded if any of these are true. */
  673. *discard = (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
  674. rx_ev_tobe_disc | rx_ev_pause_frm);
  675. /* TOBE_DISC is expected on unicast mismatches; don't print out an
  676. * error message. FRM_TRUNC indicates RXDP dropped the packet due
  677. * to a FIFO overflow.
  678. */
  679. #ifdef EFX_ENABLE_DEBUG
  680. if (rx_ev_other_err && net_ratelimit()) {
  681. netif_dbg(efx, rx_err, efx->net_dev,
  682. " RX queue %d unexpected RX event "
  683. EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
  684. efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event),
  685. rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
  686. rx_ev_ip_hdr_chksum_err ?
  687. " [IP_HDR_CHKSUM_ERR]" : "",
  688. rx_ev_tcp_udp_chksum_err ?
  689. " [TCP_UDP_CHKSUM_ERR]" : "",
  690. rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
  691. rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
  692. rx_ev_drib_nib ? " [DRIB_NIB]" : "",
  693. rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
  694. rx_ev_pause_frm ? " [PAUSE]" : "");
  695. }
  696. #endif
  697. }
  698. /* Handle receive events that are not in-order. */
  699. static void
  700. efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
  701. {
  702. struct efx_nic *efx = rx_queue->efx;
  703. unsigned expected, dropped;
  704. expected = rx_queue->removed_count & rx_queue->ptr_mask;
  705. dropped = (index - expected) & rx_queue->ptr_mask;
  706. netif_info(efx, rx_err, efx->net_dev,
  707. "dropped %d events (index=%d expected=%d)\n",
  708. dropped, index, expected);
  709. efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
  710. RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
  711. }
  712. /* Handle a packet received event
  713. *
  714. * The NIC gives a "discard" flag if it's a unicast packet with the
  715. * wrong destination address
  716. * Also "is multicast" and "matches multicast filter" flags can be used to
  717. * discard non-matching multicast packets.
  718. */
  719. static void
  720. efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
  721. {
  722. unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
  723. unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
  724. unsigned expected_ptr;
  725. bool rx_ev_pkt_ok, discard = false, checksummed;
  726. struct efx_rx_queue *rx_queue;
  727. /* Basic packet information */
  728. rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
  729. rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
  730. rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
  731. WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT));
  732. WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP) != 1);
  733. WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
  734. channel->channel);
  735. rx_queue = efx_channel_get_rx_queue(channel);
  736. rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
  737. expected_ptr = rx_queue->removed_count & rx_queue->ptr_mask;
  738. if (unlikely(rx_ev_desc_ptr != expected_ptr))
  739. efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr);
  740. if (likely(rx_ev_pkt_ok)) {
  741. /* If packet is marked as OK and packet type is TCP/IP or
  742. * UDP/IP, then we can rely on the hardware checksum.
  743. */
  744. checksummed =
  745. rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP ||
  746. rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP;
  747. } else {
  748. efx_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok, &discard);
  749. checksummed = false;
  750. }
  751. /* Detect multicast packets that didn't match the filter */
  752. rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
  753. if (rx_ev_mcast_pkt) {
  754. unsigned int rx_ev_mcast_hash_match =
  755. EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH);
  756. if (unlikely(!rx_ev_mcast_hash_match)) {
  757. ++channel->n_rx_mcast_mismatch;
  758. discard = true;
  759. }
  760. }
  761. channel->irq_mod_score += 2;
  762. /* Handle received packet */
  763. efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt,
  764. checksummed, discard);
  765. }
  766. static void
  767. efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event)
  768. {
  769. struct efx_nic *efx = channel->efx;
  770. unsigned code;
  771. code = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
  772. if (code == EFX_CHANNEL_MAGIC_TEST(channel))
  773. ++channel->magic_count;
  774. else if (code == EFX_CHANNEL_MAGIC_FILL(channel))
  775. /* The queue must be empty, so we won't receive any rx
  776. * events, so efx_process_channel() won't refill the
  777. * queue. Refill it here */
  778. efx_fast_push_rx_descriptors(efx_channel_get_rx_queue(channel));
  779. else
  780. netif_dbg(efx, hw, efx->net_dev, "channel %d received "
  781. "generated event "EFX_QWORD_FMT"\n",
  782. channel->channel, EFX_QWORD_VAL(*event));
  783. }
  784. static void
  785. efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
  786. {
  787. struct efx_nic *efx = channel->efx;
  788. unsigned int ev_sub_code;
  789. unsigned int ev_sub_data;
  790. ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE);
  791. ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
  792. switch (ev_sub_code) {
  793. case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
  794. netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
  795. channel->channel, ev_sub_data);
  796. break;
  797. case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
  798. netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
  799. channel->channel, ev_sub_data);
  800. break;
  801. case FSE_AZ_EVQ_INIT_DONE_EV:
  802. netif_dbg(efx, hw, efx->net_dev,
  803. "channel %d EVQ %d initialised\n",
  804. channel->channel, ev_sub_data);
  805. break;
  806. case FSE_AZ_SRM_UPD_DONE_EV:
  807. netif_vdbg(efx, hw, efx->net_dev,
  808. "channel %d SRAM update done\n", channel->channel);
  809. break;
  810. case FSE_AZ_WAKE_UP_EV:
  811. netif_vdbg(efx, hw, efx->net_dev,
  812. "channel %d RXQ %d wakeup event\n",
  813. channel->channel, ev_sub_data);
  814. break;
  815. case FSE_AZ_TIMER_EV:
  816. netif_vdbg(efx, hw, efx->net_dev,
  817. "channel %d RX queue %d timer expired\n",
  818. channel->channel, ev_sub_data);
  819. break;
  820. case FSE_AA_RX_RECOVER_EV:
  821. netif_err(efx, rx_err, efx->net_dev,
  822. "channel %d seen DRIVER RX_RESET event. "
  823. "Resetting.\n", channel->channel);
  824. atomic_inc(&efx->rx_reset);
  825. efx_schedule_reset(efx,
  826. EFX_WORKAROUND_6555(efx) ?
  827. RESET_TYPE_RX_RECOVERY :
  828. RESET_TYPE_DISABLE);
  829. break;
  830. case FSE_BZ_RX_DSC_ERROR_EV:
  831. netif_err(efx, rx_err, efx->net_dev,
  832. "RX DMA Q %d reports descriptor fetch error."
  833. " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
  834. efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH);
  835. break;
  836. case FSE_BZ_TX_DSC_ERROR_EV:
  837. netif_err(efx, tx_err, efx->net_dev,
  838. "TX DMA Q %d reports descriptor fetch error."
  839. " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
  840. efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
  841. break;
  842. default:
  843. netif_vdbg(efx, hw, efx->net_dev,
  844. "channel %d unknown driver event code %d "
  845. "data %04x\n", channel->channel, ev_sub_code,
  846. ev_sub_data);
  847. break;
  848. }
  849. }
  850. int efx_nic_process_eventq(struct efx_channel *channel, int budget)
  851. {
  852. struct efx_nic *efx = channel->efx;
  853. unsigned int read_ptr;
  854. efx_qword_t event, *p_event;
  855. int ev_code;
  856. int tx_packets = 0;
  857. int spent = 0;
  858. read_ptr = channel->eventq_read_ptr;
  859. for (;;) {
  860. p_event = efx_event(channel, read_ptr);
  861. event = *p_event;
  862. if (!efx_event_present(&event))
  863. /* End of events */
  864. break;
  865. netif_vdbg(channel->efx, intr, channel->efx->net_dev,
  866. "channel %d event is "EFX_QWORD_FMT"\n",
  867. channel->channel, EFX_QWORD_VAL(event));
  868. /* Clear this event by marking it all ones */
  869. EFX_SET_QWORD(*p_event);
  870. /* Increment read pointer */
  871. read_ptr = (read_ptr + 1) & channel->eventq_mask;
  872. ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
  873. switch (ev_code) {
  874. case FSE_AZ_EV_CODE_RX_EV:
  875. efx_handle_rx_event(channel, &event);
  876. if (++spent == budget)
  877. goto out;
  878. break;
  879. case FSE_AZ_EV_CODE_TX_EV:
  880. tx_packets += efx_handle_tx_event(channel, &event);
  881. if (tx_packets > efx->txq_entries) {
  882. spent = budget;
  883. goto out;
  884. }
  885. break;
  886. case FSE_AZ_EV_CODE_DRV_GEN_EV:
  887. efx_handle_generated_event(channel, &event);
  888. break;
  889. case FSE_AZ_EV_CODE_DRIVER_EV:
  890. efx_handle_driver_event(channel, &event);
  891. break;
  892. case FSE_CZ_EV_CODE_MCDI_EV:
  893. efx_mcdi_process_event(channel, &event);
  894. break;
  895. case FSE_AZ_EV_CODE_GLOBAL_EV:
  896. if (efx->type->handle_global_event &&
  897. efx->type->handle_global_event(channel, &event))
  898. break;
  899. /* else fall through */
  900. default:
  901. netif_err(channel->efx, hw, channel->efx->net_dev,
  902. "channel %d unknown event type %d (data "
  903. EFX_QWORD_FMT ")\n", channel->channel,
  904. ev_code, EFX_QWORD_VAL(event));
  905. }
  906. }
  907. out:
  908. channel->eventq_read_ptr = read_ptr;
  909. return spent;
  910. }
  911. /* Allocate buffer table entries for event queue */
  912. int efx_nic_probe_eventq(struct efx_channel *channel)
  913. {
  914. struct efx_nic *efx = channel->efx;
  915. unsigned entries;
  916. entries = channel->eventq_mask + 1;
  917. return efx_alloc_special_buffer(efx, &channel->eventq,
  918. entries * sizeof(efx_qword_t));
  919. }
  920. void efx_nic_init_eventq(struct efx_channel *channel)
  921. {
  922. efx_oword_t reg;
  923. struct efx_nic *efx = channel->efx;
  924. netif_dbg(efx, hw, efx->net_dev,
  925. "channel %d event queue in special buffers %d-%d\n",
  926. channel->channel, channel->eventq.index,
  927. channel->eventq.index + channel->eventq.entries - 1);
  928. if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
  929. EFX_POPULATE_OWORD_3(reg,
  930. FRF_CZ_TIMER_Q_EN, 1,
  931. FRF_CZ_HOST_NOTIFY_MODE, 0,
  932. FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
  933. efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
  934. }
  935. /* Pin event queue buffer */
  936. efx_init_special_buffer(efx, &channel->eventq);
  937. /* Fill event queue with all ones (i.e. empty events) */
  938. memset(channel->eventq.addr, 0xff, channel->eventq.len);
  939. /* Push event queue to card */
  940. EFX_POPULATE_OWORD_3(reg,
  941. FRF_AZ_EVQ_EN, 1,
  942. FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries),
  943. FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index);
  944. efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
  945. channel->channel);
  946. efx->type->push_irq_moderation(channel);
  947. }
  948. void efx_nic_fini_eventq(struct efx_channel *channel)
  949. {
  950. efx_oword_t reg;
  951. struct efx_nic *efx = channel->efx;
  952. /* Remove event queue from card */
  953. EFX_ZERO_OWORD(reg);
  954. efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
  955. channel->channel);
  956. if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
  957. efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
  958. /* Unpin event queue */
  959. efx_fini_special_buffer(efx, &channel->eventq);
  960. }
  961. /* Free buffers backing event queue */
  962. void efx_nic_remove_eventq(struct efx_channel *channel)
  963. {
  964. efx_free_special_buffer(channel->efx, &channel->eventq);
  965. }
  966. void efx_nic_generate_test_event(struct efx_channel *channel)
  967. {
  968. unsigned int magic = EFX_CHANNEL_MAGIC_TEST(channel);
  969. efx_qword_t test_event;
  970. EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE,
  971. FSE_AZ_EV_CODE_DRV_GEN_EV,
  972. FSF_AZ_DRV_GEN_EV_MAGIC, magic);
  973. efx_generate_event(channel, &test_event);
  974. }
  975. void efx_nic_generate_fill_event(struct efx_channel *channel)
  976. {
  977. unsigned int magic = EFX_CHANNEL_MAGIC_FILL(channel);
  978. efx_qword_t test_event;
  979. EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE,
  980. FSE_AZ_EV_CODE_DRV_GEN_EV,
  981. FSF_AZ_DRV_GEN_EV_MAGIC, magic);
  982. efx_generate_event(channel, &test_event);
  983. }
  984. /**************************************************************************
  985. *
  986. * Flush handling
  987. *
  988. **************************************************************************/
  989. static void efx_poll_flush_events(struct efx_nic *efx)
  990. {
  991. struct efx_channel *channel = efx_get_channel(efx, 0);
  992. struct efx_tx_queue *tx_queue;
  993. struct efx_rx_queue *rx_queue;
  994. unsigned int read_ptr = channel->eventq_read_ptr;
  995. unsigned int end_ptr = (read_ptr - 1) & channel->eventq_mask;
  996. do {
  997. efx_qword_t *event = efx_event(channel, read_ptr);
  998. int ev_code, ev_sub_code, ev_queue;
  999. bool ev_failed;
  1000. if (!efx_event_present(event))
  1001. break;
  1002. ev_code = EFX_QWORD_FIELD(*event, FSF_AZ_EV_CODE);
  1003. ev_sub_code = EFX_QWORD_FIELD(*event,
  1004. FSF_AZ_DRIVER_EV_SUBCODE);
  1005. if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV &&
  1006. ev_sub_code == FSE_AZ_TX_DESCQ_FLS_DONE_EV) {
  1007. ev_queue = EFX_QWORD_FIELD(*event,
  1008. FSF_AZ_DRIVER_EV_SUBDATA);
  1009. if (ev_queue < EFX_TXQ_TYPES * efx->n_tx_channels) {
  1010. tx_queue = efx_get_tx_queue(
  1011. efx, ev_queue / EFX_TXQ_TYPES,
  1012. ev_queue % EFX_TXQ_TYPES);
  1013. tx_queue->flushed = FLUSH_DONE;
  1014. }
  1015. } else if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV &&
  1016. ev_sub_code == FSE_AZ_RX_DESCQ_FLS_DONE_EV) {
  1017. ev_queue = EFX_QWORD_FIELD(
  1018. *event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
  1019. ev_failed = EFX_QWORD_FIELD(
  1020. *event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
  1021. if (ev_queue < efx->n_rx_channels) {
  1022. rx_queue = efx_get_rx_queue(efx, ev_queue);
  1023. rx_queue->flushed =
  1024. ev_failed ? FLUSH_FAILED : FLUSH_DONE;
  1025. }
  1026. }
  1027. /* We're about to destroy the queue anyway, so
  1028. * it's ok to throw away every non-flush event */
  1029. EFX_SET_QWORD(*event);
  1030. read_ptr = (read_ptr + 1) & channel->eventq_mask;
  1031. } while (read_ptr != end_ptr);
  1032. channel->eventq_read_ptr = read_ptr;
  1033. }
  1034. /* Handle tx and rx flushes at the same time, since they run in
  1035. * parallel in the hardware and there's no reason for us to
  1036. * serialise them */
  1037. int efx_nic_flush_queues(struct efx_nic *efx)
  1038. {
  1039. struct efx_channel *channel;
  1040. struct efx_rx_queue *rx_queue;
  1041. struct efx_tx_queue *tx_queue;
  1042. int i, tx_pending, rx_pending;
  1043. /* If necessary prepare the hardware for flushing */
  1044. efx->type->prepare_flush(efx);
  1045. /* Flush all tx queues in parallel */
  1046. efx_for_each_channel(channel, efx) {
  1047. efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
  1048. if (tx_queue->initialised)
  1049. efx_flush_tx_queue(tx_queue);
  1050. }
  1051. }
  1052. /* The hardware supports four concurrent rx flushes, each of which may
  1053. * need to be retried if there is an outstanding descriptor fetch */
  1054. for (i = 0; i < EFX_FLUSH_POLL_COUNT; ++i) {
  1055. rx_pending = tx_pending = 0;
  1056. efx_for_each_channel(channel, efx) {
  1057. efx_for_each_channel_rx_queue(rx_queue, channel) {
  1058. if (rx_queue->flushed == FLUSH_PENDING)
  1059. ++rx_pending;
  1060. }
  1061. }
  1062. efx_for_each_channel(channel, efx) {
  1063. efx_for_each_channel_rx_queue(rx_queue, channel) {
  1064. if (rx_pending == EFX_RX_FLUSH_COUNT)
  1065. break;
  1066. if (rx_queue->flushed == FLUSH_FAILED ||
  1067. rx_queue->flushed == FLUSH_NONE) {
  1068. efx_flush_rx_queue(rx_queue);
  1069. ++rx_pending;
  1070. }
  1071. }
  1072. efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
  1073. if (tx_queue->initialised &&
  1074. tx_queue->flushed != FLUSH_DONE)
  1075. ++tx_pending;
  1076. }
  1077. }
  1078. if (rx_pending == 0 && tx_pending == 0)
  1079. return 0;
  1080. msleep(EFX_FLUSH_INTERVAL);
  1081. efx_poll_flush_events(efx);
  1082. }
  1083. /* Mark the queues as all flushed. We're going to return failure
  1084. * leading to a reset, or fake up success anyway */
  1085. efx_for_each_channel(channel, efx) {
  1086. efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
  1087. if (tx_queue->initialised &&
  1088. tx_queue->flushed != FLUSH_DONE)
  1089. netif_err(efx, hw, efx->net_dev,
  1090. "tx queue %d flush command timed out\n",
  1091. tx_queue->queue);
  1092. tx_queue->flushed = FLUSH_DONE;
  1093. }
  1094. efx_for_each_channel_rx_queue(rx_queue, channel) {
  1095. if (rx_queue->flushed != FLUSH_DONE)
  1096. netif_err(efx, hw, efx->net_dev,
  1097. "rx queue %d flush command timed out\n",
  1098. efx_rx_queue_index(rx_queue));
  1099. rx_queue->flushed = FLUSH_DONE;
  1100. }
  1101. }
  1102. return -ETIMEDOUT;
  1103. }
  1104. /**************************************************************************
  1105. *
  1106. * Hardware interrupts
  1107. * The hardware interrupt handler does very little work; all the event
  1108. * queue processing is carried out by per-channel tasklets.
  1109. *
  1110. **************************************************************************/
  1111. /* Enable/disable/generate interrupts */
  1112. static inline void efx_nic_interrupts(struct efx_nic *efx,
  1113. bool enabled, bool force)
  1114. {
  1115. efx_oword_t int_en_reg_ker;
  1116. EFX_POPULATE_OWORD_3(int_en_reg_ker,
  1117. FRF_AZ_KER_INT_LEVE_SEL, efx->fatal_irq_level,
  1118. FRF_AZ_KER_INT_KER, force,
  1119. FRF_AZ_DRV_INT_EN_KER, enabled);
  1120. efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
  1121. }
  1122. void efx_nic_enable_interrupts(struct efx_nic *efx)
  1123. {
  1124. struct efx_channel *channel;
  1125. EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr));
  1126. wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
  1127. /* Enable interrupts */
  1128. efx_nic_interrupts(efx, true, false);
  1129. /* Force processing of all the channels to get the EVQ RPTRs up to
  1130. date */
  1131. efx_for_each_channel(channel, efx)
  1132. efx_schedule_channel(channel);
  1133. }
  1134. void efx_nic_disable_interrupts(struct efx_nic *efx)
  1135. {
  1136. /* Disable interrupts */
  1137. efx_nic_interrupts(efx, false, false);
  1138. }
  1139. /* Generate a test interrupt
  1140. * Interrupt must already have been enabled, otherwise nasty things
  1141. * may happen.
  1142. */
  1143. void efx_nic_generate_interrupt(struct efx_nic *efx)
  1144. {
  1145. efx_nic_interrupts(efx, true, true);
  1146. }
  1147. /* Process a fatal interrupt
  1148. * Disable bus mastering ASAP and schedule a reset
  1149. */
  1150. irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx)
  1151. {
  1152. struct falcon_nic_data *nic_data = efx->nic_data;
  1153. efx_oword_t *int_ker = efx->irq_status.addr;
  1154. efx_oword_t fatal_intr;
  1155. int error, mem_perr;
  1156. efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER);
  1157. error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR);
  1158. netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status "
  1159. EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
  1160. EFX_OWORD_VAL(fatal_intr),
  1161. error ? "disabling bus mastering" : "no recognised error");
  1162. /* If this is a memory parity error dump which blocks are offending */
  1163. mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) ||
  1164. EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER));
  1165. if (mem_perr) {
  1166. efx_oword_t reg;
  1167. efx_reado(efx, &reg, FR_AZ_MEM_STAT);
  1168. netif_err(efx, hw, efx->net_dev,
  1169. "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n",
  1170. EFX_OWORD_VAL(reg));
  1171. }
  1172. /* Disable both devices */
  1173. pci_clear_master(efx->pci_dev);
  1174. if (efx_nic_is_dual_func(efx))
  1175. pci_clear_master(nic_data->pci_dev2);
  1176. efx_nic_disable_interrupts(efx);
  1177. /* Count errors and reset or disable the NIC accordingly */
  1178. if (efx->int_error_count == 0 ||
  1179. time_after(jiffies, efx->int_error_expire)) {
  1180. efx->int_error_count = 0;
  1181. efx->int_error_expire =
  1182. jiffies + EFX_INT_ERROR_EXPIRE * HZ;
  1183. }
  1184. if (++efx->int_error_count < EFX_MAX_INT_ERRORS) {
  1185. netif_err(efx, hw, efx->net_dev,
  1186. "SYSTEM ERROR - reset scheduled\n");
  1187. efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
  1188. } else {
  1189. netif_err(efx, hw, efx->net_dev,
  1190. "SYSTEM ERROR - max number of errors seen."
  1191. "NIC will be disabled\n");
  1192. efx_schedule_reset(efx, RESET_TYPE_DISABLE);
  1193. }
  1194. return IRQ_HANDLED;
  1195. }
  1196. /* Handle a legacy interrupt
  1197. * Acknowledges the interrupt and schedule event queue processing.
  1198. */
  1199. static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
  1200. {
  1201. struct efx_nic *efx = dev_id;
  1202. efx_oword_t *int_ker = efx->irq_status.addr;
  1203. irqreturn_t result = IRQ_NONE;
  1204. struct efx_channel *channel;
  1205. efx_dword_t reg;
  1206. u32 queues;
  1207. int syserr;
  1208. /* Could this be ours? If interrupts are disabled then the
  1209. * channel state may not be valid.
  1210. */
  1211. if (!efx->legacy_irq_enabled)
  1212. return result;
  1213. /* Read the ISR which also ACKs the interrupts */
  1214. efx_readd(efx, &reg, FR_BZ_INT_ISR0);
  1215. queues = EFX_EXTRACT_DWORD(reg, 0, 31);
  1216. /* Check to see if we have a serious error condition */
  1217. if (queues & (1U << efx->fatal_irq_level)) {
  1218. syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
  1219. if (unlikely(syserr))
  1220. return efx_nic_fatal_interrupt(efx);
  1221. }
  1222. if (queues != 0) {
  1223. if (EFX_WORKAROUND_15783(efx))
  1224. efx->irq_zero_count = 0;
  1225. /* Schedule processing of any interrupting queues */
  1226. efx_for_each_channel(channel, efx) {
  1227. if (queues & 1)
  1228. efx_schedule_channel(channel);
  1229. queues >>= 1;
  1230. }
  1231. result = IRQ_HANDLED;
  1232. } else if (EFX_WORKAROUND_15783(efx)) {
  1233. efx_qword_t *event;
  1234. /* We can't return IRQ_HANDLED more than once on seeing ISR=0
  1235. * because this might be a shared interrupt. */
  1236. if (efx->irq_zero_count++ == 0)
  1237. result = IRQ_HANDLED;
  1238. /* Ensure we schedule or rearm all event queues */
  1239. efx_for_each_channel(channel, efx) {
  1240. event = efx_event(channel, channel->eventq_read_ptr);
  1241. if (efx_event_present(event))
  1242. efx_schedule_channel(channel);
  1243. else
  1244. efx_nic_eventq_read_ack(channel);
  1245. }
  1246. }
  1247. if (result == IRQ_HANDLED) {
  1248. efx->last_irq_cpu = raw_smp_processor_id();
  1249. netif_vdbg(efx, intr, efx->net_dev,
  1250. "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
  1251. irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
  1252. }
  1253. return result;
  1254. }
  1255. /* Handle an MSI interrupt
  1256. *
  1257. * Handle an MSI hardware interrupt. This routine schedules event
  1258. * queue processing. No interrupt acknowledgement cycle is necessary.
  1259. * Also, we never need to check that the interrupt is for us, since
  1260. * MSI interrupts cannot be shared.
  1261. */
  1262. static irqreturn_t efx_msi_interrupt(int irq, void *dev_id)
  1263. {
  1264. struct efx_channel *channel = *(struct efx_channel **)dev_id;
  1265. struct efx_nic *efx = channel->efx;
  1266. efx_oword_t *int_ker = efx->irq_status.addr;
  1267. int syserr;
  1268. efx->last_irq_cpu = raw_smp_processor_id();
  1269. netif_vdbg(efx, intr, efx->net_dev,
  1270. "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
  1271. irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
  1272. /* Check to see if we have a serious error condition */
  1273. if (channel->channel == efx->fatal_irq_level) {
  1274. syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
  1275. if (unlikely(syserr))
  1276. return efx_nic_fatal_interrupt(efx);
  1277. }
  1278. /* Schedule processing of the channel */
  1279. efx_schedule_channel(channel);
  1280. return IRQ_HANDLED;
  1281. }
  1282. /* Setup RSS indirection table.
  1283. * This maps from the hash value of the packet to RXQ
  1284. */
  1285. void efx_nic_push_rx_indir_table(struct efx_nic *efx)
  1286. {
  1287. size_t i = 0;
  1288. efx_dword_t dword;
  1289. if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
  1290. return;
  1291. BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
  1292. FR_BZ_RX_INDIRECTION_TBL_ROWS);
  1293. for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
  1294. EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
  1295. efx->rx_indir_table[i]);
  1296. efx_writed_table(efx, &dword, FR_BZ_RX_INDIRECTION_TBL, i);
  1297. }
  1298. }
  1299. /* Hook interrupt handler(s)
  1300. * Try MSI and then legacy interrupts.
  1301. */
  1302. int efx_nic_init_interrupt(struct efx_nic *efx)
  1303. {
  1304. struct efx_channel *channel;
  1305. int rc;
  1306. if (!EFX_INT_MODE_USE_MSI(efx)) {
  1307. irq_handler_t handler;
  1308. if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
  1309. handler = efx_legacy_interrupt;
  1310. else
  1311. handler = falcon_legacy_interrupt_a1;
  1312. rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED,
  1313. efx->name, efx);
  1314. if (rc) {
  1315. netif_err(efx, drv, efx->net_dev,
  1316. "failed to hook legacy IRQ %d\n",
  1317. efx->pci_dev->irq);
  1318. goto fail1;
  1319. }
  1320. return 0;
  1321. }
  1322. /* Hook MSI or MSI-X interrupt */
  1323. efx_for_each_channel(channel, efx) {
  1324. rc = request_irq(channel->irq, efx_msi_interrupt,
  1325. IRQF_PROBE_SHARED, /* Not shared */
  1326. efx->channel_name[channel->channel],
  1327. &efx->channel[channel->channel]);
  1328. if (rc) {
  1329. netif_err(efx, drv, efx->net_dev,
  1330. "failed to hook IRQ %d\n", channel->irq);
  1331. goto fail2;
  1332. }
  1333. }
  1334. return 0;
  1335. fail2:
  1336. efx_for_each_channel(channel, efx)
  1337. free_irq(channel->irq, &efx->channel[channel->channel]);
  1338. fail1:
  1339. return rc;
  1340. }
  1341. void efx_nic_fini_interrupt(struct efx_nic *efx)
  1342. {
  1343. struct efx_channel *channel;
  1344. efx_oword_t reg;
  1345. /* Disable MSI/MSI-X interrupts */
  1346. efx_for_each_channel(channel, efx) {
  1347. if (channel->irq)
  1348. free_irq(channel->irq, &efx->channel[channel->channel]);
  1349. }
  1350. /* ACK legacy interrupt */
  1351. if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
  1352. efx_reado(efx, &reg, FR_BZ_INT_ISR0);
  1353. else
  1354. falcon_irq_ack_a1(efx);
  1355. /* Disable legacy interrupt */
  1356. if (efx->legacy_irq)
  1357. free_irq(efx->legacy_irq, efx);
  1358. }
  1359. u32 efx_nic_fpga_ver(struct efx_nic *efx)
  1360. {
  1361. efx_oword_t altera_build;
  1362. efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD);
  1363. return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER);
  1364. }
  1365. void efx_nic_init_common(struct efx_nic *efx)
  1366. {
  1367. efx_oword_t temp;
  1368. /* Set positions of descriptor caches in SRAM. */
  1369. EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR,
  1370. efx->type->tx_dc_base / 8);
  1371. efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG);
  1372. EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR,
  1373. efx->type->rx_dc_base / 8);
  1374. efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG);
  1375. /* Set TX descriptor cache size. */
  1376. BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER));
  1377. EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
  1378. efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG);
  1379. /* Set RX descriptor cache size. Set low watermark to size-8, as
  1380. * this allows most efficient prefetching.
  1381. */
  1382. BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER));
  1383. EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
  1384. efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG);
  1385. EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
  1386. efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM);
  1387. /* Program INT_KER address */
  1388. EFX_POPULATE_OWORD_2(temp,
  1389. FRF_AZ_NORM_INT_VEC_DIS_KER,
  1390. EFX_INT_MODE_USE_MSI(efx),
  1391. FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
  1392. efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER);
  1393. if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx))
  1394. /* Use an interrupt level unused by event queues */
  1395. efx->fatal_irq_level = 0x1f;
  1396. else
  1397. /* Use a valid MSI-X vector */
  1398. efx->fatal_irq_level = 0;
  1399. /* Enable all the genuinely fatal interrupts. (They are still
  1400. * masked by the overall interrupt mask, controlled by
  1401. * falcon_interrupts()).
  1402. *
  1403. * Note: All other fatal interrupts are enabled
  1404. */
  1405. EFX_POPULATE_OWORD_3(temp,
  1406. FRF_AZ_ILL_ADR_INT_KER_EN, 1,
  1407. FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
  1408. FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
  1409. if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
  1410. EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1);
  1411. EFX_INVERT_OWORD(temp);
  1412. efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
  1413. efx_nic_push_rx_indir_table(efx);
  1414. /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
  1415. * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
  1416. */
  1417. efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
  1418. EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
  1419. EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
  1420. EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
  1421. EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1);
  1422. EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
  1423. /* Enable SW_EV to inherit in char driver - assume harmless here */
  1424. EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
  1425. /* Prefetch threshold 2 => fetch when descriptor cache half empty */
  1426. EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
  1427. /* Disable hardware watchdog which can misfire */
  1428. EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
  1429. /* Squash TX of packets of 16 bytes or less */
  1430. if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
  1431. EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
  1432. efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
  1433. if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
  1434. EFX_POPULATE_OWORD_4(temp,
  1435. /* Default values */
  1436. FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
  1437. FRF_BZ_TX_PACE_SB_AF, 0xb,
  1438. FRF_BZ_TX_PACE_FB_BASE, 0,
  1439. /* Allow large pace values in the
  1440. * fast bin. */
  1441. FRF_BZ_TX_PACE_BIN_TH,
  1442. FFE_BZ_TX_PACE_RESERVED);
  1443. efx_writeo(efx, &temp, FR_BZ_TX_PACE);
  1444. }
  1445. }
  1446. /* Register dump */
  1447. #define REGISTER_REVISION_A 1
  1448. #define REGISTER_REVISION_B 2
  1449. #define REGISTER_REVISION_C 3
  1450. #define REGISTER_REVISION_Z 3 /* latest revision */
  1451. struct efx_nic_reg {
  1452. u32 offset:24;
  1453. u32 min_revision:2, max_revision:2;
  1454. };
  1455. #define REGISTER(name, min_rev, max_rev) { \
  1456. FR_ ## min_rev ## max_rev ## _ ## name, \
  1457. REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev \
  1458. }
  1459. #define REGISTER_AA(name) REGISTER(name, A, A)
  1460. #define REGISTER_AB(name) REGISTER(name, A, B)
  1461. #define REGISTER_AZ(name) REGISTER(name, A, Z)
  1462. #define REGISTER_BB(name) REGISTER(name, B, B)
  1463. #define REGISTER_BZ(name) REGISTER(name, B, Z)
  1464. #define REGISTER_CZ(name) REGISTER(name, C, Z)
  1465. static const struct efx_nic_reg efx_nic_regs[] = {
  1466. REGISTER_AZ(ADR_REGION),
  1467. REGISTER_AZ(INT_EN_KER),
  1468. REGISTER_BZ(INT_EN_CHAR),
  1469. REGISTER_AZ(INT_ADR_KER),
  1470. REGISTER_BZ(INT_ADR_CHAR),
  1471. /* INT_ACK_KER is WO */
  1472. /* INT_ISR0 is RC */
  1473. REGISTER_AZ(HW_INIT),
  1474. REGISTER_CZ(USR_EV_CFG),
  1475. REGISTER_AB(EE_SPI_HCMD),
  1476. REGISTER_AB(EE_SPI_HADR),
  1477. REGISTER_AB(EE_SPI_HDATA),
  1478. REGISTER_AB(EE_BASE_PAGE),
  1479. REGISTER_AB(EE_VPD_CFG0),
  1480. /* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */
  1481. /* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */
  1482. /* PCIE_CORE_INDIRECT is indirect */
  1483. REGISTER_AB(NIC_STAT),
  1484. REGISTER_AB(GPIO_CTL),
  1485. REGISTER_AB(GLB_CTL),
  1486. /* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */
  1487. REGISTER_BZ(DP_CTRL),
  1488. REGISTER_AZ(MEM_STAT),
  1489. REGISTER_AZ(CS_DEBUG),
  1490. REGISTER_AZ(ALTERA_BUILD),
  1491. REGISTER_AZ(CSR_SPARE),
  1492. REGISTER_AB(PCIE_SD_CTL0123),
  1493. REGISTER_AB(PCIE_SD_CTL45),
  1494. REGISTER_AB(PCIE_PCS_CTL_STAT),
  1495. /* DEBUG_DATA_OUT is not used */
  1496. /* DRV_EV is WO */
  1497. REGISTER_AZ(EVQ_CTL),
  1498. REGISTER_AZ(EVQ_CNT1),
  1499. REGISTER_AZ(EVQ_CNT2),
  1500. REGISTER_AZ(BUF_TBL_CFG),
  1501. REGISTER_AZ(SRM_RX_DC_CFG),
  1502. REGISTER_AZ(SRM_TX_DC_CFG),
  1503. REGISTER_AZ(SRM_CFG),
  1504. /* BUF_TBL_UPD is WO */
  1505. REGISTER_AZ(SRM_UPD_EVQ),
  1506. REGISTER_AZ(SRAM_PARITY),
  1507. REGISTER_AZ(RX_CFG),
  1508. REGISTER_BZ(RX_FILTER_CTL),
  1509. /* RX_FLUSH_DESCQ is WO */
  1510. REGISTER_AZ(RX_DC_CFG),
  1511. REGISTER_AZ(RX_DC_PF_WM),
  1512. REGISTER_BZ(RX_RSS_TKEY),
  1513. /* RX_NODESC_DROP is RC */
  1514. REGISTER_AA(RX_SELF_RST),
  1515. /* RX_DEBUG, RX_PUSH_DROP are not used */
  1516. REGISTER_CZ(RX_RSS_IPV6_REG1),
  1517. REGISTER_CZ(RX_RSS_IPV6_REG2),
  1518. REGISTER_CZ(RX_RSS_IPV6_REG3),
  1519. /* TX_FLUSH_DESCQ is WO */
  1520. REGISTER_AZ(TX_DC_CFG),
  1521. REGISTER_AA(TX_CHKSM_CFG),
  1522. REGISTER_AZ(TX_CFG),
  1523. /* TX_PUSH_DROP is not used */
  1524. REGISTER_AZ(TX_RESERVED),
  1525. REGISTER_BZ(TX_PACE),
  1526. /* TX_PACE_DROP_QID is RC */
  1527. REGISTER_BB(TX_VLAN),
  1528. REGISTER_BZ(TX_IPFIL_PORTEN),
  1529. REGISTER_AB(MD_TXD),
  1530. REGISTER_AB(MD_RXD),
  1531. REGISTER_AB(MD_CS),
  1532. REGISTER_AB(MD_PHY_ADR),
  1533. REGISTER_AB(MD_ID),
  1534. /* MD_STAT is RC */
  1535. REGISTER_AB(MAC_STAT_DMA),
  1536. REGISTER_AB(MAC_CTRL),
  1537. REGISTER_BB(GEN_MODE),
  1538. REGISTER_AB(MAC_MC_HASH_REG0),
  1539. REGISTER_AB(MAC_MC_HASH_REG1),
  1540. REGISTER_AB(GM_CFG1),
  1541. REGISTER_AB(GM_CFG2),
  1542. /* GM_IPG and GM_HD are not used */
  1543. REGISTER_AB(GM_MAX_FLEN),
  1544. /* GM_TEST is not used */
  1545. REGISTER_AB(GM_ADR1),
  1546. REGISTER_AB(GM_ADR2),
  1547. REGISTER_AB(GMF_CFG0),
  1548. REGISTER_AB(GMF_CFG1),
  1549. REGISTER_AB(GMF_CFG2),
  1550. REGISTER_AB(GMF_CFG3),
  1551. REGISTER_AB(GMF_CFG4),
  1552. REGISTER_AB(GMF_CFG5),
  1553. REGISTER_BB(TX_SRC_MAC_CTL),
  1554. REGISTER_AB(XM_ADR_LO),
  1555. REGISTER_AB(XM_ADR_HI),
  1556. REGISTER_AB(XM_GLB_CFG),
  1557. REGISTER_AB(XM_TX_CFG),
  1558. REGISTER_AB(XM_RX_CFG),
  1559. REGISTER_AB(XM_MGT_INT_MASK),
  1560. REGISTER_AB(XM_FC),
  1561. REGISTER_AB(XM_PAUSE_TIME),
  1562. REGISTER_AB(XM_TX_PARAM),
  1563. REGISTER_AB(XM_RX_PARAM),
  1564. /* XM_MGT_INT_MSK (note no 'A') is RC */
  1565. REGISTER_AB(XX_PWR_RST),
  1566. REGISTER_AB(XX_SD_CTL),
  1567. REGISTER_AB(XX_TXDRV_CTL),
  1568. /* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */
  1569. /* XX_CORE_STAT is partly RC */
  1570. };
  1571. struct efx_nic_reg_table {
  1572. u32 offset:24;
  1573. u32 min_revision:2, max_revision:2;
  1574. u32 step:6, rows:21;
  1575. };
  1576. #define REGISTER_TABLE_DIMENSIONS(_, offset, min_rev, max_rev, step, rows) { \
  1577. offset, \
  1578. REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev, \
  1579. step, rows \
  1580. }
  1581. #define REGISTER_TABLE(name, min_rev, max_rev) \
  1582. REGISTER_TABLE_DIMENSIONS( \
  1583. name, FR_ ## min_rev ## max_rev ## _ ## name, \
  1584. min_rev, max_rev, \
  1585. FR_ ## min_rev ## max_rev ## _ ## name ## _STEP, \
  1586. FR_ ## min_rev ## max_rev ## _ ## name ## _ROWS)
  1587. #define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, A, A)
  1588. #define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, A, Z)
  1589. #define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, B, B)
  1590. #define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, B, Z)
  1591. #define REGISTER_TABLE_BB_CZ(name) \
  1592. REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, B, B, \
  1593. FR_BZ_ ## name ## _STEP, \
  1594. FR_BB_ ## name ## _ROWS), \
  1595. REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, C, Z, \
  1596. FR_BZ_ ## name ## _STEP, \
  1597. FR_CZ_ ## name ## _ROWS)
  1598. #define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, C, Z)
  1599. static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
  1600. /* DRIVER is not used */
  1601. /* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */
  1602. REGISTER_TABLE_BB(TX_IPFIL_TBL),
  1603. REGISTER_TABLE_BB(TX_SRC_MAC_TBL),
  1604. REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER),
  1605. REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL),
  1606. REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER),
  1607. REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL),
  1608. REGISTER_TABLE_AA(EVQ_PTR_TBL_KER),
  1609. REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL),
  1610. /* We can't reasonably read all of the buffer table (up to 8MB!).
  1611. * However this driver will only use a few entries. Reading
  1612. * 1K entries allows for some expansion of queue count and
  1613. * size before we need to change the version. */
  1614. REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER,
  1615. A, A, 8, 1024),
  1616. REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL,
  1617. B, Z, 8, 1024),
  1618. REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0),
  1619. REGISTER_TABLE_BB_CZ(TIMER_TBL),
  1620. REGISTER_TABLE_BB_CZ(TX_PACE_TBL),
  1621. REGISTER_TABLE_BZ(RX_INDIRECTION_TBL),
  1622. /* TX_FILTER_TBL0 is huge and not used by this driver */
  1623. REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0),
  1624. REGISTER_TABLE_CZ(MC_TREG_SMEM),
  1625. /* MSIX_PBA_TABLE is not mapped */
  1626. /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */
  1627. REGISTER_TABLE_BZ(RX_FILTER_TBL0),
  1628. };
  1629. size_t efx_nic_get_regs_len(struct efx_nic *efx)
  1630. {
  1631. const struct efx_nic_reg *reg;
  1632. const struct efx_nic_reg_table *table;
  1633. size_t len = 0;
  1634. for (reg = efx_nic_regs;
  1635. reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs);
  1636. reg++)
  1637. if (efx->type->revision >= reg->min_revision &&
  1638. efx->type->revision <= reg->max_revision)
  1639. len += sizeof(efx_oword_t);
  1640. for (table = efx_nic_reg_tables;
  1641. table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables);
  1642. table++)
  1643. if (efx->type->revision >= table->min_revision &&
  1644. efx->type->revision <= table->max_revision)
  1645. len += table->rows * min_t(size_t, table->step, 16);
  1646. return len;
  1647. }
  1648. void efx_nic_get_regs(struct efx_nic *efx, void *buf)
  1649. {
  1650. const struct efx_nic_reg *reg;
  1651. const struct efx_nic_reg_table *table;
  1652. for (reg = efx_nic_regs;
  1653. reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs);
  1654. reg++) {
  1655. if (efx->type->revision >= reg->min_revision &&
  1656. efx->type->revision <= reg->max_revision) {
  1657. efx_reado(efx, (efx_oword_t *)buf, reg->offset);
  1658. buf += sizeof(efx_oword_t);
  1659. }
  1660. }
  1661. for (table = efx_nic_reg_tables;
  1662. table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables);
  1663. table++) {
  1664. size_t size, i;
  1665. if (!(efx->type->revision >= table->min_revision &&
  1666. efx->type->revision <= table->max_revision))
  1667. continue;
  1668. size = min_t(size_t, table->step, 16);
  1669. for (i = 0; i < table->rows; i++) {
  1670. switch (table->step) {
  1671. case 4: /* 32-bit register or SRAM */
  1672. efx_readd_table(efx, buf, table->offset, i);
  1673. break;
  1674. case 8: /* 64-bit SRAM */
  1675. efx_sram_readq(efx,
  1676. efx->membase + table->offset,
  1677. buf, i);
  1678. break;
  1679. case 16: /* 128-bit register */
  1680. efx_reado_table(efx, buf, table->offset, i);
  1681. break;
  1682. case 32: /* 128-bit register, interleaved */
  1683. efx_reado_table(efx, buf, table->offset, 2 * i);
  1684. break;
  1685. default:
  1686. WARN_ON(1);
  1687. return;
  1688. }
  1689. buf += size;
  1690. }
  1691. }
  1692. }