nic.c 58 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969
  1. /****************************************************************************
  2. * Driver for Solarflare Solarstorm network controllers and boards
  3. * Copyright 2005-2006 Fen Systems Ltd.
  4. * Copyright 2006-2011 Solarflare Communications Inc.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published
  8. * by the Free Software Foundation, incorporated herein by reference.
  9. */
  10. #include <linux/bitops.h>
  11. #include <linux/delay.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/pci.h>
  14. #include <linux/module.h>
  15. #include <linux/seq_file.h>
  16. #include "net_driver.h"
  17. #include "bitfield.h"
  18. #include "efx.h"
  19. #include "nic.h"
  20. #include "regs.h"
  21. #include "io.h"
  22. #include "workarounds.h"
  23. /**************************************************************************
  24. *
  25. * Configurable values
  26. *
  27. **************************************************************************
  28. */
  29. /* This is set to 16 for a good reason. In summary, if larger than
  30. * 16, the descriptor cache holds more than a default socket
  31. * buffer's worth of packets (for UDP we can only have at most one
  32. * socket buffer's worth outstanding). This combined with the fact
  33. * that we only get 1 TX event per descriptor cache means the NIC
  34. * goes idle.
  35. */
  36. #define TX_DC_ENTRIES 16
  37. #define TX_DC_ENTRIES_ORDER 1
  38. #define RX_DC_ENTRIES 64
  39. #define RX_DC_ENTRIES_ORDER 3
  40. /* If EFX_MAX_INT_ERRORS internal errors occur within
  41. * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
  42. * disable it.
  43. */
  44. #define EFX_INT_ERROR_EXPIRE 3600
  45. #define EFX_MAX_INT_ERRORS 5
  46. /* We poll for events every FLUSH_INTERVAL ms, and check FLUSH_POLL_COUNT times
  47. */
  48. #define EFX_FLUSH_INTERVAL 10
  49. #define EFX_FLUSH_POLL_COUNT 100
  50. /* Size and alignment of special buffers (4KB) */
  51. #define EFX_BUF_SIZE 4096
  52. /* Depth of RX flush request fifo */
  53. #define EFX_RX_FLUSH_COUNT 4
  54. /* Generated event code for efx_generate_test_event() */
  55. #define EFX_CHANNEL_MAGIC_TEST(_channel) \
  56. (0x00010100 + (_channel)->channel)
  57. /* Generated event code for efx_generate_fill_event() */
  58. #define EFX_CHANNEL_MAGIC_FILL(_channel) \
  59. (0x00010200 + (_channel)->channel)
  60. /**************************************************************************
  61. *
  62. * Solarstorm hardware access
  63. *
  64. **************************************************************************/
  65. static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
  66. unsigned int index)
  67. {
  68. efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base,
  69. value, index);
  70. }
  71. /* Read the current event from the event queue */
  72. static inline efx_qword_t *efx_event(struct efx_channel *channel,
  73. unsigned int index)
  74. {
  75. return ((efx_qword_t *) (channel->eventq.addr)) +
  76. (index & channel->eventq_mask);
  77. }
  78. /* See if an event is present
  79. *
  80. * We check both the high and low dword of the event for all ones. We
  81. * wrote all ones when we cleared the event, and no valid event can
  82. * have all ones in either its high or low dwords. This approach is
  83. * robust against reordering.
  84. *
  85. * Note that using a single 64-bit comparison is incorrect; even
  86. * though the CPU read will be atomic, the DMA write may not be.
  87. */
  88. static inline int efx_event_present(efx_qword_t *event)
  89. {
  90. return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
  91. EFX_DWORD_IS_ALL_ONES(event->dword[1]));
  92. }
  93. static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b,
  94. const efx_oword_t *mask)
  95. {
  96. return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) ||
  97. ((a->u64[1] ^ b->u64[1]) & mask->u64[1]);
  98. }
  99. int efx_nic_test_registers(struct efx_nic *efx,
  100. const struct efx_nic_register_test *regs,
  101. size_t n_regs)
  102. {
  103. unsigned address = 0, i, j;
  104. efx_oword_t mask, imask, original, reg, buf;
  105. /* Falcon should be in loopback to isolate the XMAC from the PHY */
  106. WARN_ON(!LOOPBACK_INTERNAL(efx));
  107. for (i = 0; i < n_regs; ++i) {
  108. address = regs[i].address;
  109. mask = imask = regs[i].mask;
  110. EFX_INVERT_OWORD(imask);
  111. efx_reado(efx, &original, address);
  112. /* bit sweep on and off */
  113. for (j = 0; j < 128; j++) {
  114. if (!EFX_EXTRACT_OWORD32(mask, j, j))
  115. continue;
  116. /* Test this testable bit can be set in isolation */
  117. EFX_AND_OWORD(reg, original, mask);
  118. EFX_SET_OWORD32(reg, j, j, 1);
  119. efx_writeo(efx, &reg, address);
  120. efx_reado(efx, &buf, address);
  121. if (efx_masked_compare_oword(&reg, &buf, &mask))
  122. goto fail;
  123. /* Test this testable bit can be cleared in isolation */
  124. EFX_OR_OWORD(reg, original, mask);
  125. EFX_SET_OWORD32(reg, j, j, 0);
  126. efx_writeo(efx, &reg, address);
  127. efx_reado(efx, &buf, address);
  128. if (efx_masked_compare_oword(&reg, &buf, &mask))
  129. goto fail;
  130. }
  131. efx_writeo(efx, &original, address);
  132. }
  133. return 0;
  134. fail:
  135. netif_err(efx, hw, efx->net_dev,
  136. "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT
  137. " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg),
  138. EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask));
  139. return -EIO;
  140. }
  141. /**************************************************************************
  142. *
  143. * Special buffer handling
  144. * Special buffers are used for event queues and the TX and RX
  145. * descriptor rings.
  146. *
  147. *************************************************************************/
  148. /*
  149. * Initialise a special buffer
  150. *
  151. * This will define a buffer (previously allocated via
  152. * efx_alloc_special_buffer()) in the buffer table, allowing
  153. * it to be used for event queues, descriptor rings etc.
  154. */
  155. static void
  156. efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
  157. {
  158. efx_qword_t buf_desc;
  159. int index;
  160. dma_addr_t dma_addr;
  161. int i;
  162. EFX_BUG_ON_PARANOID(!buffer->addr);
  163. /* Write buffer descriptors to NIC */
  164. for (i = 0; i < buffer->entries; i++) {
  165. index = buffer->index + i;
  166. dma_addr = buffer->dma_addr + (i * 4096);
  167. netif_dbg(efx, probe, efx->net_dev,
  168. "mapping special buffer %d at %llx\n",
  169. index, (unsigned long long)dma_addr);
  170. EFX_POPULATE_QWORD_3(buf_desc,
  171. FRF_AZ_BUF_ADR_REGION, 0,
  172. FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
  173. FRF_AZ_BUF_OWNER_ID_FBUF, 0);
  174. efx_write_buf_tbl(efx, &buf_desc, index);
  175. }
  176. }
  177. /* Unmaps a buffer and clears the buffer table entries */
  178. static void
  179. efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
  180. {
  181. efx_oword_t buf_tbl_upd;
  182. unsigned int start = buffer->index;
  183. unsigned int end = (buffer->index + buffer->entries - 1);
  184. if (!buffer->entries)
  185. return;
  186. netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n",
  187. buffer->index, buffer->index + buffer->entries - 1);
  188. EFX_POPULATE_OWORD_4(buf_tbl_upd,
  189. FRF_AZ_BUF_UPD_CMD, 0,
  190. FRF_AZ_BUF_CLR_CMD, 1,
  191. FRF_AZ_BUF_CLR_END_ID, end,
  192. FRF_AZ_BUF_CLR_START_ID, start);
  193. efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD);
  194. }
  195. /*
  196. * Allocate a new special buffer
  197. *
  198. * This allocates memory for a new buffer, clears it and allocates a
  199. * new buffer ID range. It does not write into the buffer table.
  200. *
  201. * This call will allocate 4KB buffers, since 8KB buffers can't be
  202. * used for event queues and descriptor rings.
  203. */
  204. static int efx_alloc_special_buffer(struct efx_nic *efx,
  205. struct efx_special_buffer *buffer,
  206. unsigned int len)
  207. {
  208. len = ALIGN(len, EFX_BUF_SIZE);
  209. buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
  210. &buffer->dma_addr, GFP_KERNEL);
  211. if (!buffer->addr)
  212. return -ENOMEM;
  213. buffer->len = len;
  214. buffer->entries = len / EFX_BUF_SIZE;
  215. BUG_ON(buffer->dma_addr & (EFX_BUF_SIZE - 1));
  216. /* All zeros is a potentially valid event so memset to 0xff */
  217. memset(buffer->addr, 0xff, len);
  218. /* Select new buffer ID */
  219. buffer->index = efx->next_buffer_table;
  220. efx->next_buffer_table += buffer->entries;
  221. netif_dbg(efx, probe, efx->net_dev,
  222. "allocating special buffers %d-%d at %llx+%x "
  223. "(virt %p phys %llx)\n", buffer->index,
  224. buffer->index + buffer->entries - 1,
  225. (u64)buffer->dma_addr, len,
  226. buffer->addr, (u64)virt_to_phys(buffer->addr));
  227. return 0;
  228. }
  229. static void
  230. efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
  231. {
  232. if (!buffer->addr)
  233. return;
  234. netif_dbg(efx, hw, efx->net_dev,
  235. "deallocating special buffers %d-%d at %llx+%x "
  236. "(virt %p phys %llx)\n", buffer->index,
  237. buffer->index + buffer->entries - 1,
  238. (u64)buffer->dma_addr, buffer->len,
  239. buffer->addr, (u64)virt_to_phys(buffer->addr));
  240. dma_free_coherent(&efx->pci_dev->dev, buffer->len, buffer->addr,
  241. buffer->dma_addr);
  242. buffer->addr = NULL;
  243. buffer->entries = 0;
  244. }
  245. /**************************************************************************
  246. *
  247. * Generic buffer handling
  248. * These buffers are used for interrupt status and MAC stats
  249. *
  250. **************************************************************************/
  251. int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
  252. unsigned int len)
  253. {
  254. buffer->addr = pci_alloc_consistent(efx->pci_dev, len,
  255. &buffer->dma_addr);
  256. if (!buffer->addr)
  257. return -ENOMEM;
  258. buffer->len = len;
  259. memset(buffer->addr, 0, len);
  260. return 0;
  261. }
  262. void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer)
  263. {
  264. if (buffer->addr) {
  265. pci_free_consistent(efx->pci_dev, buffer->len,
  266. buffer->addr, buffer->dma_addr);
  267. buffer->addr = NULL;
  268. }
  269. }
  270. /**************************************************************************
  271. *
  272. * TX path
  273. *
  274. **************************************************************************/
  275. /* Returns a pointer to the specified transmit descriptor in the TX
  276. * descriptor queue belonging to the specified channel.
  277. */
  278. static inline efx_qword_t *
  279. efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
  280. {
  281. return ((efx_qword_t *) (tx_queue->txd.addr)) + index;
  282. }
  283. /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
  284. static inline void efx_notify_tx_desc(struct efx_tx_queue *tx_queue)
  285. {
  286. unsigned write_ptr;
  287. efx_dword_t reg;
  288. write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
  289. EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
  290. efx_writed_page(tx_queue->efx, &reg,
  291. FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
  292. }
  293. /* Write pointer and first descriptor for TX descriptor ring */
  294. static inline void efx_push_tx_desc(struct efx_tx_queue *tx_queue,
  295. const efx_qword_t *txd)
  296. {
  297. unsigned write_ptr;
  298. efx_oword_t reg;
  299. BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0);
  300. BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0);
  301. write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
  302. EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true,
  303. FRF_AZ_TX_DESC_WPTR, write_ptr);
  304. reg.qword[0] = *txd;
  305. efx_writeo_page(tx_queue->efx, &reg,
  306. FR_BZ_TX_DESC_UPD_P0, tx_queue->queue);
  307. }
  308. static inline bool
  309. efx_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count)
  310. {
  311. unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
  312. if (empty_read_count == 0)
  313. return false;
  314. tx_queue->empty_read_count = 0;
  315. return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0;
  316. }
  317. /* For each entry inserted into the software descriptor ring, create a
  318. * descriptor in the hardware TX descriptor ring (in host memory), and
  319. * write a doorbell.
  320. */
  321. void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
  322. {
  323. struct efx_tx_buffer *buffer;
  324. efx_qword_t *txd;
  325. unsigned write_ptr;
  326. unsigned old_write_count = tx_queue->write_count;
  327. BUG_ON(tx_queue->write_count == tx_queue->insert_count);
  328. do {
  329. write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
  330. buffer = &tx_queue->buffer[write_ptr];
  331. txd = efx_tx_desc(tx_queue, write_ptr);
  332. ++tx_queue->write_count;
  333. /* Create TX descriptor ring entry */
  334. EFX_POPULATE_QWORD_4(*txd,
  335. FSF_AZ_TX_KER_CONT, buffer->continuation,
  336. FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
  337. FSF_AZ_TX_KER_BUF_REGION, 0,
  338. FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
  339. } while (tx_queue->write_count != tx_queue->insert_count);
  340. wmb(); /* Ensure descriptors are written before they are fetched */
  341. if (efx_may_push_tx_desc(tx_queue, old_write_count)) {
  342. txd = efx_tx_desc(tx_queue,
  343. old_write_count & tx_queue->ptr_mask);
  344. efx_push_tx_desc(tx_queue, txd);
  345. ++tx_queue->pushes;
  346. } else {
  347. efx_notify_tx_desc(tx_queue);
  348. }
  349. }
  350. /* Allocate hardware resources for a TX queue */
  351. int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
  352. {
  353. struct efx_nic *efx = tx_queue->efx;
  354. unsigned entries;
  355. entries = tx_queue->ptr_mask + 1;
  356. return efx_alloc_special_buffer(efx, &tx_queue->txd,
  357. entries * sizeof(efx_qword_t));
  358. }
  359. void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
  360. {
  361. struct efx_nic *efx = tx_queue->efx;
  362. efx_oword_t reg;
  363. tx_queue->flushed = FLUSH_NONE;
  364. /* Pin TX descriptor ring */
  365. efx_init_special_buffer(efx, &tx_queue->txd);
  366. /* Push TX descriptor ring to card */
  367. EFX_POPULATE_OWORD_10(reg,
  368. FRF_AZ_TX_DESCQ_EN, 1,
  369. FRF_AZ_TX_ISCSI_DDIG_EN, 0,
  370. FRF_AZ_TX_ISCSI_HDIG_EN, 0,
  371. FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
  372. FRF_AZ_TX_DESCQ_EVQ_ID,
  373. tx_queue->channel->channel,
  374. FRF_AZ_TX_DESCQ_OWNER_ID, 0,
  375. FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue,
  376. FRF_AZ_TX_DESCQ_SIZE,
  377. __ffs(tx_queue->txd.entries),
  378. FRF_AZ_TX_DESCQ_TYPE, 0,
  379. FRF_BZ_TX_NON_IP_DROP_DIS, 1);
  380. if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
  381. int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
  382. EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
  383. EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS,
  384. !csum);
  385. }
  386. efx_writeo_table(efx, &reg, efx->type->txd_ptr_tbl_base,
  387. tx_queue->queue);
  388. if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
  389. /* Only 128 bits in this register */
  390. BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128);
  391. efx_reado(efx, &reg, FR_AA_TX_CHKSM_CFG);
  392. if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
  393. clear_bit_le(tx_queue->queue, (void *)&reg);
  394. else
  395. set_bit_le(tx_queue->queue, (void *)&reg);
  396. efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG);
  397. }
  398. if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
  399. EFX_POPULATE_OWORD_1(reg,
  400. FRF_BZ_TX_PACE,
  401. (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
  402. FFE_BZ_TX_PACE_OFF :
  403. FFE_BZ_TX_PACE_RESERVED);
  404. efx_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL,
  405. tx_queue->queue);
  406. }
  407. }
  408. static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue)
  409. {
  410. struct efx_nic *efx = tx_queue->efx;
  411. efx_oword_t tx_flush_descq;
  412. tx_queue->flushed = FLUSH_PENDING;
  413. /* Post a flush command */
  414. EFX_POPULATE_OWORD_2(tx_flush_descq,
  415. FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
  416. FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
  417. efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ);
  418. }
  419. void efx_nic_fini_tx(struct efx_tx_queue *tx_queue)
  420. {
  421. struct efx_nic *efx = tx_queue->efx;
  422. efx_oword_t tx_desc_ptr;
  423. /* The queue should have been flushed */
  424. WARN_ON(tx_queue->flushed != FLUSH_DONE);
  425. /* Remove TX descriptor ring from card */
  426. EFX_ZERO_OWORD(tx_desc_ptr);
  427. efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
  428. tx_queue->queue);
  429. /* Unpin TX descriptor ring */
  430. efx_fini_special_buffer(efx, &tx_queue->txd);
  431. }
  432. /* Free buffers backing TX queue */
  433. void efx_nic_remove_tx(struct efx_tx_queue *tx_queue)
  434. {
  435. efx_free_special_buffer(tx_queue->efx, &tx_queue->txd);
  436. }
  437. /**************************************************************************
  438. *
  439. * RX path
  440. *
  441. **************************************************************************/
  442. /* Returns a pointer to the specified descriptor in the RX descriptor queue */
  443. static inline efx_qword_t *
  444. efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
  445. {
  446. return ((efx_qword_t *) (rx_queue->rxd.addr)) + index;
  447. }
  448. /* This creates an entry in the RX descriptor queue */
  449. static inline void
  450. efx_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index)
  451. {
  452. struct efx_rx_buffer *rx_buf;
  453. efx_qword_t *rxd;
  454. rxd = efx_rx_desc(rx_queue, index);
  455. rx_buf = efx_rx_buffer(rx_queue, index);
  456. EFX_POPULATE_QWORD_3(*rxd,
  457. FSF_AZ_RX_KER_BUF_SIZE,
  458. rx_buf->len -
  459. rx_queue->efx->type->rx_buffer_padding,
  460. FSF_AZ_RX_KER_BUF_REGION, 0,
  461. FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
  462. }
  463. /* This writes to the RX_DESC_WPTR register for the specified receive
  464. * descriptor ring.
  465. */
  466. void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue)
  467. {
  468. struct efx_nic *efx = rx_queue->efx;
  469. efx_dword_t reg;
  470. unsigned write_ptr;
  471. while (rx_queue->notified_count != rx_queue->added_count) {
  472. efx_build_rx_desc(
  473. rx_queue,
  474. rx_queue->notified_count & rx_queue->ptr_mask);
  475. ++rx_queue->notified_count;
  476. }
  477. wmb();
  478. write_ptr = rx_queue->added_count & rx_queue->ptr_mask;
  479. EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
  480. efx_writed_page(efx, &reg, FR_AZ_RX_DESC_UPD_DWORD_P0,
  481. efx_rx_queue_index(rx_queue));
  482. }
  483. int efx_nic_probe_rx(struct efx_rx_queue *rx_queue)
  484. {
  485. struct efx_nic *efx = rx_queue->efx;
  486. unsigned entries;
  487. entries = rx_queue->ptr_mask + 1;
  488. return efx_alloc_special_buffer(efx, &rx_queue->rxd,
  489. entries * sizeof(efx_qword_t));
  490. }
  491. void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
  492. {
  493. efx_oword_t rx_desc_ptr;
  494. struct efx_nic *efx = rx_queue->efx;
  495. bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0;
  496. bool iscsi_digest_en = is_b0;
  497. netif_dbg(efx, hw, efx->net_dev,
  498. "RX queue %d ring in special buffers %d-%d\n",
  499. efx_rx_queue_index(rx_queue), rx_queue->rxd.index,
  500. rx_queue->rxd.index + rx_queue->rxd.entries - 1);
  501. rx_queue->flushed = FLUSH_NONE;
  502. /* Pin RX descriptor ring */
  503. efx_init_special_buffer(efx, &rx_queue->rxd);
  504. /* Push RX descriptor ring to card */
  505. EFX_POPULATE_OWORD_10(rx_desc_ptr,
  506. FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en,
  507. FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en,
  508. FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
  509. FRF_AZ_RX_DESCQ_EVQ_ID,
  510. efx_rx_queue_channel(rx_queue)->channel,
  511. FRF_AZ_RX_DESCQ_OWNER_ID, 0,
  512. FRF_AZ_RX_DESCQ_LABEL,
  513. efx_rx_queue_index(rx_queue),
  514. FRF_AZ_RX_DESCQ_SIZE,
  515. __ffs(rx_queue->rxd.entries),
  516. FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
  517. /* For >=B0 this is scatter so disable */
  518. FRF_AZ_RX_DESCQ_JUMBO, !is_b0,
  519. FRF_AZ_RX_DESCQ_EN, 1);
  520. efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
  521. efx_rx_queue_index(rx_queue));
  522. }
  523. static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue)
  524. {
  525. struct efx_nic *efx = rx_queue->efx;
  526. efx_oword_t rx_flush_descq;
  527. rx_queue->flushed = FLUSH_PENDING;
  528. /* Post a flush command */
  529. EFX_POPULATE_OWORD_2(rx_flush_descq,
  530. FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
  531. FRF_AZ_RX_FLUSH_DESCQ,
  532. efx_rx_queue_index(rx_queue));
  533. efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
  534. }
  535. void efx_nic_fini_rx(struct efx_rx_queue *rx_queue)
  536. {
  537. efx_oword_t rx_desc_ptr;
  538. struct efx_nic *efx = rx_queue->efx;
  539. /* The queue should already have been flushed */
  540. WARN_ON(rx_queue->flushed != FLUSH_DONE);
  541. /* Remove RX descriptor ring from card */
  542. EFX_ZERO_OWORD(rx_desc_ptr);
  543. efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
  544. efx_rx_queue_index(rx_queue));
  545. /* Unpin RX descriptor ring */
  546. efx_fini_special_buffer(efx, &rx_queue->rxd);
  547. }
  548. /* Free buffers backing RX queue */
  549. void efx_nic_remove_rx(struct efx_rx_queue *rx_queue)
  550. {
  551. efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
  552. }
  553. /**************************************************************************
  554. *
  555. * Event queue processing
  556. * Event queues are processed by per-channel tasklets.
  557. *
  558. **************************************************************************/
  559. /* Update a channel's event queue's read pointer (RPTR) register
  560. *
  561. * This writes the EVQ_RPTR_REG register for the specified channel's
  562. * event queue.
  563. */
  564. void efx_nic_eventq_read_ack(struct efx_channel *channel)
  565. {
  566. efx_dword_t reg;
  567. struct efx_nic *efx = channel->efx;
  568. EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR,
  569. channel->eventq_read_ptr & channel->eventq_mask);
  570. efx_writed_table(efx, &reg, efx->type->evq_rptr_tbl_base,
  571. channel->channel);
  572. }
  573. /* Use HW to insert a SW defined event */
  574. static void efx_generate_event(struct efx_channel *channel, efx_qword_t *event)
  575. {
  576. efx_oword_t drv_ev_reg;
  577. BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 ||
  578. FRF_AZ_DRV_EV_DATA_WIDTH != 64);
  579. drv_ev_reg.u32[0] = event->u32[0];
  580. drv_ev_reg.u32[1] = event->u32[1];
  581. drv_ev_reg.u32[2] = 0;
  582. drv_ev_reg.u32[3] = 0;
  583. EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, channel->channel);
  584. efx_writeo(channel->efx, &drv_ev_reg, FR_AZ_DRV_EV);
  585. }
  586. /* Handle a transmit completion event
  587. *
  588. * The NIC batches TX completion events; the message we receive is of
  589. * the form "complete all TX events up to this index".
  590. */
  591. static int
  592. efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
  593. {
  594. unsigned int tx_ev_desc_ptr;
  595. unsigned int tx_ev_q_label;
  596. struct efx_tx_queue *tx_queue;
  597. struct efx_nic *efx = channel->efx;
  598. int tx_packets = 0;
  599. if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
  600. /* Transmit completion */
  601. tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
  602. tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
  603. tx_queue = efx_channel_get_tx_queue(
  604. channel, tx_ev_q_label % EFX_TXQ_TYPES);
  605. tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
  606. tx_queue->ptr_mask);
  607. channel->irq_mod_score += tx_packets;
  608. efx_xmit_done(tx_queue, tx_ev_desc_ptr);
  609. } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
  610. /* Rewrite the FIFO write pointer */
  611. tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
  612. tx_queue = efx_channel_get_tx_queue(
  613. channel, tx_ev_q_label % EFX_TXQ_TYPES);
  614. if (efx_dev_registered(efx))
  615. netif_tx_lock(efx->net_dev);
  616. efx_notify_tx_desc(tx_queue);
  617. if (efx_dev_registered(efx))
  618. netif_tx_unlock(efx->net_dev);
  619. } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) &&
  620. EFX_WORKAROUND_10727(efx)) {
  621. efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
  622. } else {
  623. netif_err(efx, tx_err, efx->net_dev,
  624. "channel %d unexpected TX event "
  625. EFX_QWORD_FMT"\n", channel->channel,
  626. EFX_QWORD_VAL(*event));
  627. }
  628. return tx_packets;
  629. }
  630. /* Detect errors included in the rx_evt_pkt_ok bit. */
  631. static void efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
  632. const efx_qword_t *event,
  633. bool *rx_ev_pkt_ok,
  634. bool *discard)
  635. {
  636. struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
  637. struct efx_nic *efx = rx_queue->efx;
  638. bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
  639. bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
  640. bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
  641. bool rx_ev_other_err, rx_ev_pause_frm;
  642. bool rx_ev_hdr_type, rx_ev_mcast_pkt;
  643. unsigned rx_ev_pkt_type;
  644. rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
  645. rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
  646. rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC);
  647. rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE);
  648. rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event,
  649. FSF_AZ_RX_EV_BUF_OWNER_ID_ERR);
  650. rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event,
  651. FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR);
  652. rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event,
  653. FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR);
  654. rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR);
  655. rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC);
  656. rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ?
  657. 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB));
  658. rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR);
  659. /* Every error apart from tobe_disc and pause_frm */
  660. rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
  661. rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
  662. rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
  663. /* Count errors that are not in MAC stats. Ignore expected
  664. * checksum errors during self-test. */
  665. if (rx_ev_frm_trunc)
  666. ++channel->n_rx_frm_trunc;
  667. else if (rx_ev_tobe_disc)
  668. ++channel->n_rx_tobe_disc;
  669. else if (!efx->loopback_selftest) {
  670. if (rx_ev_ip_hdr_chksum_err)
  671. ++channel->n_rx_ip_hdr_chksum_err;
  672. else if (rx_ev_tcp_udp_chksum_err)
  673. ++channel->n_rx_tcp_udp_chksum_err;
  674. }
  675. /* The frame must be discarded if any of these are true. */
  676. *discard = (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
  677. rx_ev_tobe_disc | rx_ev_pause_frm);
  678. /* TOBE_DISC is expected on unicast mismatches; don't print out an
  679. * error message. FRM_TRUNC indicates RXDP dropped the packet due
  680. * to a FIFO overflow.
  681. */
  682. #ifdef EFX_ENABLE_DEBUG
  683. if (rx_ev_other_err && net_ratelimit()) {
  684. netif_dbg(efx, rx_err, efx->net_dev,
  685. " RX queue %d unexpected RX event "
  686. EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
  687. efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event),
  688. rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
  689. rx_ev_ip_hdr_chksum_err ?
  690. " [IP_HDR_CHKSUM_ERR]" : "",
  691. rx_ev_tcp_udp_chksum_err ?
  692. " [TCP_UDP_CHKSUM_ERR]" : "",
  693. rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
  694. rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
  695. rx_ev_drib_nib ? " [DRIB_NIB]" : "",
  696. rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
  697. rx_ev_pause_frm ? " [PAUSE]" : "");
  698. }
  699. #endif
  700. }
  701. /* Handle receive events that are not in-order. */
  702. static void
  703. efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
  704. {
  705. struct efx_nic *efx = rx_queue->efx;
  706. unsigned expected, dropped;
  707. expected = rx_queue->removed_count & rx_queue->ptr_mask;
  708. dropped = (index - expected) & rx_queue->ptr_mask;
  709. netif_info(efx, rx_err, efx->net_dev,
  710. "dropped %d events (index=%d expected=%d)\n",
  711. dropped, index, expected);
  712. efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
  713. RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
  714. }
  715. /* Handle a packet received event
  716. *
  717. * The NIC gives a "discard" flag if it's a unicast packet with the
  718. * wrong destination address
  719. * Also "is multicast" and "matches multicast filter" flags can be used to
  720. * discard non-matching multicast packets.
  721. */
  722. static void
  723. efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
  724. {
  725. unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
  726. unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
  727. unsigned expected_ptr;
  728. bool rx_ev_pkt_ok, discard = false, checksummed;
  729. struct efx_rx_queue *rx_queue;
  730. /* Basic packet information */
  731. rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
  732. rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
  733. rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
  734. WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT));
  735. WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP) != 1);
  736. WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
  737. channel->channel);
  738. rx_queue = efx_channel_get_rx_queue(channel);
  739. rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
  740. expected_ptr = rx_queue->removed_count & rx_queue->ptr_mask;
  741. if (unlikely(rx_ev_desc_ptr != expected_ptr))
  742. efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr);
  743. if (likely(rx_ev_pkt_ok)) {
  744. /* If packet is marked as OK and packet type is TCP/IP or
  745. * UDP/IP, then we can rely on the hardware checksum.
  746. */
  747. checksummed =
  748. rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP ||
  749. rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP;
  750. } else {
  751. efx_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok, &discard);
  752. checksummed = false;
  753. }
  754. /* Detect multicast packets that didn't match the filter */
  755. rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
  756. if (rx_ev_mcast_pkt) {
  757. unsigned int rx_ev_mcast_hash_match =
  758. EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH);
  759. if (unlikely(!rx_ev_mcast_hash_match)) {
  760. ++channel->n_rx_mcast_mismatch;
  761. discard = true;
  762. }
  763. }
  764. channel->irq_mod_score += 2;
  765. /* Handle received packet */
  766. efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt,
  767. checksummed, discard);
  768. }
  769. static void
  770. efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event)
  771. {
  772. struct efx_nic *efx = channel->efx;
  773. unsigned code;
  774. code = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
  775. if (code == EFX_CHANNEL_MAGIC_TEST(channel))
  776. ; /* ignore */
  777. else if (code == EFX_CHANNEL_MAGIC_FILL(channel))
  778. /* The queue must be empty, so we won't receive any rx
  779. * events, so efx_process_channel() won't refill the
  780. * queue. Refill it here */
  781. efx_fast_push_rx_descriptors(efx_channel_get_rx_queue(channel));
  782. else
  783. netif_dbg(efx, hw, efx->net_dev, "channel %d received "
  784. "generated event "EFX_QWORD_FMT"\n",
  785. channel->channel, EFX_QWORD_VAL(*event));
  786. }
  787. static void
  788. efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
  789. {
  790. struct efx_nic *efx = channel->efx;
  791. unsigned int ev_sub_code;
  792. unsigned int ev_sub_data;
  793. ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE);
  794. ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
  795. switch (ev_sub_code) {
  796. case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
  797. netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
  798. channel->channel, ev_sub_data);
  799. break;
  800. case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
  801. netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
  802. channel->channel, ev_sub_data);
  803. break;
  804. case FSE_AZ_EVQ_INIT_DONE_EV:
  805. netif_dbg(efx, hw, efx->net_dev,
  806. "channel %d EVQ %d initialised\n",
  807. channel->channel, ev_sub_data);
  808. break;
  809. case FSE_AZ_SRM_UPD_DONE_EV:
  810. netif_vdbg(efx, hw, efx->net_dev,
  811. "channel %d SRAM update done\n", channel->channel);
  812. break;
  813. case FSE_AZ_WAKE_UP_EV:
  814. netif_vdbg(efx, hw, efx->net_dev,
  815. "channel %d RXQ %d wakeup event\n",
  816. channel->channel, ev_sub_data);
  817. break;
  818. case FSE_AZ_TIMER_EV:
  819. netif_vdbg(efx, hw, efx->net_dev,
  820. "channel %d RX queue %d timer expired\n",
  821. channel->channel, ev_sub_data);
  822. break;
  823. case FSE_AA_RX_RECOVER_EV:
  824. netif_err(efx, rx_err, efx->net_dev,
  825. "channel %d seen DRIVER RX_RESET event. "
  826. "Resetting.\n", channel->channel);
  827. atomic_inc(&efx->rx_reset);
  828. efx_schedule_reset(efx,
  829. EFX_WORKAROUND_6555(efx) ?
  830. RESET_TYPE_RX_RECOVERY :
  831. RESET_TYPE_DISABLE);
  832. break;
  833. case FSE_BZ_RX_DSC_ERROR_EV:
  834. netif_err(efx, rx_err, efx->net_dev,
  835. "RX DMA Q %d reports descriptor fetch error."
  836. " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
  837. efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH);
  838. break;
  839. case FSE_BZ_TX_DSC_ERROR_EV:
  840. netif_err(efx, tx_err, efx->net_dev,
  841. "TX DMA Q %d reports descriptor fetch error."
  842. " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
  843. efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
  844. break;
  845. default:
  846. netif_vdbg(efx, hw, efx->net_dev,
  847. "channel %d unknown driver event code %d "
  848. "data %04x\n", channel->channel, ev_sub_code,
  849. ev_sub_data);
  850. break;
  851. }
  852. }
  853. int efx_nic_process_eventq(struct efx_channel *channel, int budget)
  854. {
  855. struct efx_nic *efx = channel->efx;
  856. unsigned int read_ptr;
  857. efx_qword_t event, *p_event;
  858. int ev_code;
  859. int tx_packets = 0;
  860. int spent = 0;
  861. read_ptr = channel->eventq_read_ptr;
  862. for (;;) {
  863. p_event = efx_event(channel, read_ptr);
  864. event = *p_event;
  865. if (!efx_event_present(&event))
  866. /* End of events */
  867. break;
  868. netif_vdbg(channel->efx, intr, channel->efx->net_dev,
  869. "channel %d event is "EFX_QWORD_FMT"\n",
  870. channel->channel, EFX_QWORD_VAL(event));
  871. /* Clear this event by marking it all ones */
  872. EFX_SET_QWORD(*p_event);
  873. ++read_ptr;
  874. ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
  875. switch (ev_code) {
  876. case FSE_AZ_EV_CODE_RX_EV:
  877. efx_handle_rx_event(channel, &event);
  878. if (++spent == budget)
  879. goto out;
  880. break;
  881. case FSE_AZ_EV_CODE_TX_EV:
  882. tx_packets += efx_handle_tx_event(channel, &event);
  883. if (tx_packets > efx->txq_entries) {
  884. spent = budget;
  885. goto out;
  886. }
  887. break;
  888. case FSE_AZ_EV_CODE_DRV_GEN_EV:
  889. efx_handle_generated_event(channel, &event);
  890. break;
  891. case FSE_AZ_EV_CODE_DRIVER_EV:
  892. efx_handle_driver_event(channel, &event);
  893. break;
  894. case FSE_CZ_EV_CODE_MCDI_EV:
  895. efx_mcdi_process_event(channel, &event);
  896. break;
  897. case FSE_AZ_EV_CODE_GLOBAL_EV:
  898. if (efx->type->handle_global_event &&
  899. efx->type->handle_global_event(channel, &event))
  900. break;
  901. /* else fall through */
  902. default:
  903. netif_err(channel->efx, hw, channel->efx->net_dev,
  904. "channel %d unknown event type %d (data "
  905. EFX_QWORD_FMT ")\n", channel->channel,
  906. ev_code, EFX_QWORD_VAL(event));
  907. }
  908. }
  909. out:
  910. channel->eventq_read_ptr = read_ptr;
  911. return spent;
  912. }
  913. /* Check whether an event is present in the eventq at the current
  914. * read pointer. Only useful for self-test.
  915. */
  916. bool efx_nic_event_present(struct efx_channel *channel)
  917. {
  918. return efx_event_present(efx_event(channel, channel->eventq_read_ptr));
  919. }
  920. /* Allocate buffer table entries for event queue */
  921. int efx_nic_probe_eventq(struct efx_channel *channel)
  922. {
  923. struct efx_nic *efx = channel->efx;
  924. unsigned entries;
  925. entries = channel->eventq_mask + 1;
  926. return efx_alloc_special_buffer(efx, &channel->eventq,
  927. entries * sizeof(efx_qword_t));
  928. }
  929. void efx_nic_init_eventq(struct efx_channel *channel)
  930. {
  931. efx_oword_t reg;
  932. struct efx_nic *efx = channel->efx;
  933. netif_dbg(efx, hw, efx->net_dev,
  934. "channel %d event queue in special buffers %d-%d\n",
  935. channel->channel, channel->eventq.index,
  936. channel->eventq.index + channel->eventq.entries - 1);
  937. if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
  938. EFX_POPULATE_OWORD_3(reg,
  939. FRF_CZ_TIMER_Q_EN, 1,
  940. FRF_CZ_HOST_NOTIFY_MODE, 0,
  941. FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
  942. efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
  943. }
  944. /* Pin event queue buffer */
  945. efx_init_special_buffer(efx, &channel->eventq);
  946. /* Fill event queue with all ones (i.e. empty events) */
  947. memset(channel->eventq.addr, 0xff, channel->eventq.len);
  948. /* Push event queue to card */
  949. EFX_POPULATE_OWORD_3(reg,
  950. FRF_AZ_EVQ_EN, 1,
  951. FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries),
  952. FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index);
  953. efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
  954. channel->channel);
  955. efx->type->push_irq_moderation(channel);
  956. }
  957. void efx_nic_fini_eventq(struct efx_channel *channel)
  958. {
  959. efx_oword_t reg;
  960. struct efx_nic *efx = channel->efx;
  961. /* Remove event queue from card */
  962. EFX_ZERO_OWORD(reg);
  963. efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
  964. channel->channel);
  965. if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
  966. efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
  967. /* Unpin event queue */
  968. efx_fini_special_buffer(efx, &channel->eventq);
  969. }
  970. /* Free buffers backing event queue */
  971. void efx_nic_remove_eventq(struct efx_channel *channel)
  972. {
  973. efx_free_special_buffer(channel->efx, &channel->eventq);
  974. }
  975. void efx_nic_generate_test_event(struct efx_channel *channel)
  976. {
  977. unsigned int magic = EFX_CHANNEL_MAGIC_TEST(channel);
  978. efx_qword_t test_event;
  979. EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE,
  980. FSE_AZ_EV_CODE_DRV_GEN_EV,
  981. FSF_AZ_DRV_GEN_EV_MAGIC, magic);
  982. efx_generate_event(channel, &test_event);
  983. }
  984. void efx_nic_generate_fill_event(struct efx_channel *channel)
  985. {
  986. unsigned int magic = EFX_CHANNEL_MAGIC_FILL(channel);
  987. efx_qword_t test_event;
  988. EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE,
  989. FSE_AZ_EV_CODE_DRV_GEN_EV,
  990. FSF_AZ_DRV_GEN_EV_MAGIC, magic);
  991. efx_generate_event(channel, &test_event);
  992. }
  993. /**************************************************************************
  994. *
  995. * Flush handling
  996. *
  997. **************************************************************************/
  998. static void efx_poll_flush_events(struct efx_nic *efx)
  999. {
  1000. struct efx_channel *channel = efx_get_channel(efx, 0);
  1001. struct efx_tx_queue *tx_queue;
  1002. struct efx_rx_queue *rx_queue;
  1003. unsigned int read_ptr = channel->eventq_read_ptr;
  1004. unsigned int end_ptr = read_ptr + channel->eventq_mask - 1;
  1005. do {
  1006. efx_qword_t *event = efx_event(channel, read_ptr);
  1007. int ev_code, ev_sub_code, ev_queue;
  1008. bool ev_failed;
  1009. if (!efx_event_present(event))
  1010. break;
  1011. ev_code = EFX_QWORD_FIELD(*event, FSF_AZ_EV_CODE);
  1012. ev_sub_code = EFX_QWORD_FIELD(*event,
  1013. FSF_AZ_DRIVER_EV_SUBCODE);
  1014. if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV &&
  1015. ev_sub_code == FSE_AZ_TX_DESCQ_FLS_DONE_EV) {
  1016. ev_queue = EFX_QWORD_FIELD(*event,
  1017. FSF_AZ_DRIVER_EV_SUBDATA);
  1018. if (ev_queue < EFX_TXQ_TYPES * efx->n_tx_channels) {
  1019. tx_queue = efx_get_tx_queue(
  1020. efx, ev_queue / EFX_TXQ_TYPES,
  1021. ev_queue % EFX_TXQ_TYPES);
  1022. tx_queue->flushed = FLUSH_DONE;
  1023. }
  1024. } else if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV &&
  1025. ev_sub_code == FSE_AZ_RX_DESCQ_FLS_DONE_EV) {
  1026. ev_queue = EFX_QWORD_FIELD(
  1027. *event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
  1028. ev_failed = EFX_QWORD_FIELD(
  1029. *event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
  1030. if (ev_queue < efx->n_rx_channels) {
  1031. rx_queue = efx_get_rx_queue(efx, ev_queue);
  1032. rx_queue->flushed =
  1033. ev_failed ? FLUSH_FAILED : FLUSH_DONE;
  1034. }
  1035. }
  1036. /* We're about to destroy the queue anyway, so
  1037. * it's ok to throw away every non-flush event */
  1038. EFX_SET_QWORD(*event);
  1039. ++read_ptr;
  1040. } while (read_ptr != end_ptr);
  1041. channel->eventq_read_ptr = read_ptr;
  1042. }
  1043. /* Handle tx and rx flushes at the same time, since they run in
  1044. * parallel in the hardware and there's no reason for us to
  1045. * serialise them */
  1046. int efx_nic_flush_queues(struct efx_nic *efx)
  1047. {
  1048. struct efx_channel *channel;
  1049. struct efx_rx_queue *rx_queue;
  1050. struct efx_tx_queue *tx_queue;
  1051. int i, tx_pending, rx_pending;
  1052. /* If necessary prepare the hardware for flushing */
  1053. efx->type->prepare_flush(efx);
  1054. /* Flush all tx queues in parallel */
  1055. efx_for_each_channel(channel, efx) {
  1056. efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
  1057. if (tx_queue->initialised)
  1058. efx_flush_tx_queue(tx_queue);
  1059. }
  1060. }
  1061. /* The hardware supports four concurrent rx flushes, each of which may
  1062. * need to be retried if there is an outstanding descriptor fetch */
  1063. for (i = 0; i < EFX_FLUSH_POLL_COUNT; ++i) {
  1064. rx_pending = tx_pending = 0;
  1065. efx_for_each_channel(channel, efx) {
  1066. efx_for_each_channel_rx_queue(rx_queue, channel) {
  1067. if (rx_queue->flushed == FLUSH_PENDING)
  1068. ++rx_pending;
  1069. }
  1070. }
  1071. efx_for_each_channel(channel, efx) {
  1072. efx_for_each_channel_rx_queue(rx_queue, channel) {
  1073. if (rx_pending == EFX_RX_FLUSH_COUNT)
  1074. break;
  1075. if (rx_queue->flushed == FLUSH_FAILED ||
  1076. rx_queue->flushed == FLUSH_NONE) {
  1077. efx_flush_rx_queue(rx_queue);
  1078. ++rx_pending;
  1079. }
  1080. }
  1081. efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
  1082. if (tx_queue->initialised &&
  1083. tx_queue->flushed != FLUSH_DONE)
  1084. ++tx_pending;
  1085. }
  1086. }
  1087. if (rx_pending == 0 && tx_pending == 0)
  1088. return 0;
  1089. msleep(EFX_FLUSH_INTERVAL);
  1090. efx_poll_flush_events(efx);
  1091. }
  1092. /* Mark the queues as all flushed. We're going to return failure
  1093. * leading to a reset, or fake up success anyway */
  1094. efx_for_each_channel(channel, efx) {
  1095. efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
  1096. if (tx_queue->initialised &&
  1097. tx_queue->flushed != FLUSH_DONE)
  1098. netif_err(efx, hw, efx->net_dev,
  1099. "tx queue %d flush command timed out\n",
  1100. tx_queue->queue);
  1101. tx_queue->flushed = FLUSH_DONE;
  1102. }
  1103. efx_for_each_channel_rx_queue(rx_queue, channel) {
  1104. if (rx_queue->flushed != FLUSH_DONE)
  1105. netif_err(efx, hw, efx->net_dev,
  1106. "rx queue %d flush command timed out\n",
  1107. efx_rx_queue_index(rx_queue));
  1108. rx_queue->flushed = FLUSH_DONE;
  1109. }
  1110. }
  1111. return -ETIMEDOUT;
  1112. }
  1113. /**************************************************************************
  1114. *
  1115. * Hardware interrupts
  1116. * The hardware interrupt handler does very little work; all the event
  1117. * queue processing is carried out by per-channel tasklets.
  1118. *
  1119. **************************************************************************/
  1120. /* Enable/disable/generate interrupts */
  1121. static inline void efx_nic_interrupts(struct efx_nic *efx,
  1122. bool enabled, bool force)
  1123. {
  1124. efx_oword_t int_en_reg_ker;
  1125. EFX_POPULATE_OWORD_3(int_en_reg_ker,
  1126. FRF_AZ_KER_INT_LEVE_SEL, efx->fatal_irq_level,
  1127. FRF_AZ_KER_INT_KER, force,
  1128. FRF_AZ_DRV_INT_EN_KER, enabled);
  1129. efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
  1130. }
  1131. void efx_nic_enable_interrupts(struct efx_nic *efx)
  1132. {
  1133. struct efx_channel *channel;
  1134. EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr));
  1135. wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
  1136. /* Enable interrupts */
  1137. efx_nic_interrupts(efx, true, false);
  1138. /* Force processing of all the channels to get the EVQ RPTRs up to
  1139. date */
  1140. efx_for_each_channel(channel, efx)
  1141. efx_schedule_channel(channel);
  1142. }
  1143. void efx_nic_disable_interrupts(struct efx_nic *efx)
  1144. {
  1145. /* Disable interrupts */
  1146. efx_nic_interrupts(efx, false, false);
  1147. }
  1148. /* Generate a test interrupt
  1149. * Interrupt must already have been enabled, otherwise nasty things
  1150. * may happen.
  1151. */
  1152. void efx_nic_generate_interrupt(struct efx_nic *efx)
  1153. {
  1154. efx_nic_interrupts(efx, true, true);
  1155. }
  1156. /* Process a fatal interrupt
  1157. * Disable bus mastering ASAP and schedule a reset
  1158. */
  1159. irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx)
  1160. {
  1161. struct falcon_nic_data *nic_data = efx->nic_data;
  1162. efx_oword_t *int_ker = efx->irq_status.addr;
  1163. efx_oword_t fatal_intr;
  1164. int error, mem_perr;
  1165. efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER);
  1166. error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR);
  1167. netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status "
  1168. EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
  1169. EFX_OWORD_VAL(fatal_intr),
  1170. error ? "disabling bus mastering" : "no recognised error");
  1171. /* If this is a memory parity error dump which blocks are offending */
  1172. mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) ||
  1173. EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER));
  1174. if (mem_perr) {
  1175. efx_oword_t reg;
  1176. efx_reado(efx, &reg, FR_AZ_MEM_STAT);
  1177. netif_err(efx, hw, efx->net_dev,
  1178. "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n",
  1179. EFX_OWORD_VAL(reg));
  1180. }
  1181. /* Disable both devices */
  1182. pci_clear_master(efx->pci_dev);
  1183. if (efx_nic_is_dual_func(efx))
  1184. pci_clear_master(nic_data->pci_dev2);
  1185. efx_nic_disable_interrupts(efx);
  1186. /* Count errors and reset or disable the NIC accordingly */
  1187. if (efx->int_error_count == 0 ||
  1188. time_after(jiffies, efx->int_error_expire)) {
  1189. efx->int_error_count = 0;
  1190. efx->int_error_expire =
  1191. jiffies + EFX_INT_ERROR_EXPIRE * HZ;
  1192. }
  1193. if (++efx->int_error_count < EFX_MAX_INT_ERRORS) {
  1194. netif_err(efx, hw, efx->net_dev,
  1195. "SYSTEM ERROR - reset scheduled\n");
  1196. efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
  1197. } else {
  1198. netif_err(efx, hw, efx->net_dev,
  1199. "SYSTEM ERROR - max number of errors seen."
  1200. "NIC will be disabled\n");
  1201. efx_schedule_reset(efx, RESET_TYPE_DISABLE);
  1202. }
  1203. return IRQ_HANDLED;
  1204. }
  1205. /* Handle a legacy interrupt
  1206. * Acknowledges the interrupt and schedule event queue processing.
  1207. */
  1208. static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
  1209. {
  1210. struct efx_nic *efx = dev_id;
  1211. efx_oword_t *int_ker = efx->irq_status.addr;
  1212. irqreturn_t result = IRQ_NONE;
  1213. struct efx_channel *channel;
  1214. efx_dword_t reg;
  1215. u32 queues;
  1216. int syserr;
  1217. /* Could this be ours? If interrupts are disabled then the
  1218. * channel state may not be valid.
  1219. */
  1220. if (!efx->legacy_irq_enabled)
  1221. return result;
  1222. /* Read the ISR which also ACKs the interrupts */
  1223. efx_readd(efx, &reg, FR_BZ_INT_ISR0);
  1224. queues = EFX_EXTRACT_DWORD(reg, 0, 31);
  1225. /* Check to see if we have a serious error condition */
  1226. if (queues & (1U << efx->fatal_irq_level)) {
  1227. syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
  1228. if (unlikely(syserr))
  1229. return efx_nic_fatal_interrupt(efx);
  1230. }
  1231. if (queues != 0) {
  1232. if (EFX_WORKAROUND_15783(efx))
  1233. efx->irq_zero_count = 0;
  1234. /* Schedule processing of any interrupting queues */
  1235. efx_for_each_channel(channel, efx) {
  1236. if (queues & 1)
  1237. efx_schedule_channel(channel);
  1238. queues >>= 1;
  1239. }
  1240. result = IRQ_HANDLED;
  1241. } else if (EFX_WORKAROUND_15783(efx)) {
  1242. efx_qword_t *event;
  1243. /* We can't return IRQ_HANDLED more than once on seeing ISR=0
  1244. * because this might be a shared interrupt. */
  1245. if (efx->irq_zero_count++ == 0)
  1246. result = IRQ_HANDLED;
  1247. /* Ensure we schedule or rearm all event queues */
  1248. efx_for_each_channel(channel, efx) {
  1249. event = efx_event(channel, channel->eventq_read_ptr);
  1250. if (efx_event_present(event))
  1251. efx_schedule_channel(channel);
  1252. else
  1253. efx_nic_eventq_read_ack(channel);
  1254. }
  1255. }
  1256. if (result == IRQ_HANDLED) {
  1257. efx->last_irq_cpu = raw_smp_processor_id();
  1258. netif_vdbg(efx, intr, efx->net_dev,
  1259. "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
  1260. irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
  1261. }
  1262. return result;
  1263. }
  1264. /* Handle an MSI interrupt
  1265. *
  1266. * Handle an MSI hardware interrupt. This routine schedules event
  1267. * queue processing. No interrupt acknowledgement cycle is necessary.
  1268. * Also, we never need to check that the interrupt is for us, since
  1269. * MSI interrupts cannot be shared.
  1270. */
  1271. static irqreturn_t efx_msi_interrupt(int irq, void *dev_id)
  1272. {
  1273. struct efx_channel *channel = *(struct efx_channel **)dev_id;
  1274. struct efx_nic *efx = channel->efx;
  1275. efx_oword_t *int_ker = efx->irq_status.addr;
  1276. int syserr;
  1277. efx->last_irq_cpu = raw_smp_processor_id();
  1278. netif_vdbg(efx, intr, efx->net_dev,
  1279. "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
  1280. irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
  1281. /* Check to see if we have a serious error condition */
  1282. if (channel->channel == efx->fatal_irq_level) {
  1283. syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
  1284. if (unlikely(syserr))
  1285. return efx_nic_fatal_interrupt(efx);
  1286. }
  1287. /* Schedule processing of the channel */
  1288. efx_schedule_channel(channel);
  1289. return IRQ_HANDLED;
  1290. }
  1291. /* Setup RSS indirection table.
  1292. * This maps from the hash value of the packet to RXQ
  1293. */
  1294. void efx_nic_push_rx_indir_table(struct efx_nic *efx)
  1295. {
  1296. size_t i = 0;
  1297. efx_dword_t dword;
  1298. if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
  1299. return;
  1300. BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
  1301. FR_BZ_RX_INDIRECTION_TBL_ROWS);
  1302. for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
  1303. EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
  1304. efx->rx_indir_table[i]);
  1305. efx_writed_table(efx, &dword, FR_BZ_RX_INDIRECTION_TBL, i);
  1306. }
  1307. }
  1308. /* Hook interrupt handler(s)
  1309. * Try MSI and then legacy interrupts.
  1310. */
  1311. int efx_nic_init_interrupt(struct efx_nic *efx)
  1312. {
  1313. struct efx_channel *channel;
  1314. int rc;
  1315. if (!EFX_INT_MODE_USE_MSI(efx)) {
  1316. irq_handler_t handler;
  1317. if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
  1318. handler = efx_legacy_interrupt;
  1319. else
  1320. handler = falcon_legacy_interrupt_a1;
  1321. rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED,
  1322. efx->name, efx);
  1323. if (rc) {
  1324. netif_err(efx, drv, efx->net_dev,
  1325. "failed to hook legacy IRQ %d\n",
  1326. efx->pci_dev->irq);
  1327. goto fail1;
  1328. }
  1329. return 0;
  1330. }
  1331. /* Hook MSI or MSI-X interrupt */
  1332. efx_for_each_channel(channel, efx) {
  1333. rc = request_irq(channel->irq, efx_msi_interrupt,
  1334. IRQF_PROBE_SHARED, /* Not shared */
  1335. efx->channel_name[channel->channel],
  1336. &efx->channel[channel->channel]);
  1337. if (rc) {
  1338. netif_err(efx, drv, efx->net_dev,
  1339. "failed to hook IRQ %d\n", channel->irq);
  1340. goto fail2;
  1341. }
  1342. }
  1343. return 0;
  1344. fail2:
  1345. efx_for_each_channel(channel, efx)
  1346. free_irq(channel->irq, &efx->channel[channel->channel]);
  1347. fail1:
  1348. return rc;
  1349. }
  1350. void efx_nic_fini_interrupt(struct efx_nic *efx)
  1351. {
  1352. struct efx_channel *channel;
  1353. efx_oword_t reg;
  1354. /* Disable MSI/MSI-X interrupts */
  1355. efx_for_each_channel(channel, efx) {
  1356. if (channel->irq)
  1357. free_irq(channel->irq, &efx->channel[channel->channel]);
  1358. }
  1359. /* ACK legacy interrupt */
  1360. if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
  1361. efx_reado(efx, &reg, FR_BZ_INT_ISR0);
  1362. else
  1363. falcon_irq_ack_a1(efx);
  1364. /* Disable legacy interrupt */
  1365. if (efx->legacy_irq)
  1366. free_irq(efx->legacy_irq, efx);
  1367. }
  1368. u32 efx_nic_fpga_ver(struct efx_nic *efx)
  1369. {
  1370. efx_oword_t altera_build;
  1371. efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD);
  1372. return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER);
  1373. }
  1374. void efx_nic_init_common(struct efx_nic *efx)
  1375. {
  1376. efx_oword_t temp;
  1377. /* Set positions of descriptor caches in SRAM. */
  1378. EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR,
  1379. efx->type->tx_dc_base / 8);
  1380. efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG);
  1381. EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR,
  1382. efx->type->rx_dc_base / 8);
  1383. efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG);
  1384. /* Set TX descriptor cache size. */
  1385. BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER));
  1386. EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
  1387. efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG);
  1388. /* Set RX descriptor cache size. Set low watermark to size-8, as
  1389. * this allows most efficient prefetching.
  1390. */
  1391. BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER));
  1392. EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
  1393. efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG);
  1394. EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
  1395. efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM);
  1396. /* Program INT_KER address */
  1397. EFX_POPULATE_OWORD_2(temp,
  1398. FRF_AZ_NORM_INT_VEC_DIS_KER,
  1399. EFX_INT_MODE_USE_MSI(efx),
  1400. FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
  1401. efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER);
  1402. if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx))
  1403. /* Use an interrupt level unused by event queues */
  1404. efx->fatal_irq_level = 0x1f;
  1405. else
  1406. /* Use a valid MSI-X vector */
  1407. efx->fatal_irq_level = 0;
  1408. /* Enable all the genuinely fatal interrupts. (They are still
  1409. * masked by the overall interrupt mask, controlled by
  1410. * falcon_interrupts()).
  1411. *
  1412. * Note: All other fatal interrupts are enabled
  1413. */
  1414. EFX_POPULATE_OWORD_3(temp,
  1415. FRF_AZ_ILL_ADR_INT_KER_EN, 1,
  1416. FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
  1417. FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
  1418. if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
  1419. EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1);
  1420. EFX_INVERT_OWORD(temp);
  1421. efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
  1422. efx_nic_push_rx_indir_table(efx);
  1423. /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
  1424. * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
  1425. */
  1426. efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
  1427. EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
  1428. EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
  1429. EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
  1430. EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1);
  1431. EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
  1432. /* Enable SW_EV to inherit in char driver - assume harmless here */
  1433. EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
  1434. /* Prefetch threshold 2 => fetch when descriptor cache half empty */
  1435. EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
  1436. /* Disable hardware watchdog which can misfire */
  1437. EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
  1438. /* Squash TX of packets of 16 bytes or less */
  1439. if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
  1440. EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
  1441. efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
  1442. if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
  1443. EFX_POPULATE_OWORD_4(temp,
  1444. /* Default values */
  1445. FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
  1446. FRF_BZ_TX_PACE_SB_AF, 0xb,
  1447. FRF_BZ_TX_PACE_FB_BASE, 0,
  1448. /* Allow large pace values in the
  1449. * fast bin. */
  1450. FRF_BZ_TX_PACE_BIN_TH,
  1451. FFE_BZ_TX_PACE_RESERVED);
  1452. efx_writeo(efx, &temp, FR_BZ_TX_PACE);
  1453. }
  1454. }
  1455. /* Register dump */
  1456. #define REGISTER_REVISION_A 1
  1457. #define REGISTER_REVISION_B 2
  1458. #define REGISTER_REVISION_C 3
  1459. #define REGISTER_REVISION_Z 3 /* latest revision */
  1460. struct efx_nic_reg {
  1461. u32 offset:24;
  1462. u32 min_revision:2, max_revision:2;
  1463. };
  1464. #define REGISTER(name, min_rev, max_rev) { \
  1465. FR_ ## min_rev ## max_rev ## _ ## name, \
  1466. REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev \
  1467. }
  1468. #define REGISTER_AA(name) REGISTER(name, A, A)
  1469. #define REGISTER_AB(name) REGISTER(name, A, B)
  1470. #define REGISTER_AZ(name) REGISTER(name, A, Z)
  1471. #define REGISTER_BB(name) REGISTER(name, B, B)
  1472. #define REGISTER_BZ(name) REGISTER(name, B, Z)
  1473. #define REGISTER_CZ(name) REGISTER(name, C, Z)
  1474. static const struct efx_nic_reg efx_nic_regs[] = {
  1475. REGISTER_AZ(ADR_REGION),
  1476. REGISTER_AZ(INT_EN_KER),
  1477. REGISTER_BZ(INT_EN_CHAR),
  1478. REGISTER_AZ(INT_ADR_KER),
  1479. REGISTER_BZ(INT_ADR_CHAR),
  1480. /* INT_ACK_KER is WO */
  1481. /* INT_ISR0 is RC */
  1482. REGISTER_AZ(HW_INIT),
  1483. REGISTER_CZ(USR_EV_CFG),
  1484. REGISTER_AB(EE_SPI_HCMD),
  1485. REGISTER_AB(EE_SPI_HADR),
  1486. REGISTER_AB(EE_SPI_HDATA),
  1487. REGISTER_AB(EE_BASE_PAGE),
  1488. REGISTER_AB(EE_VPD_CFG0),
  1489. /* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */
  1490. /* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */
  1491. /* PCIE_CORE_INDIRECT is indirect */
  1492. REGISTER_AB(NIC_STAT),
  1493. REGISTER_AB(GPIO_CTL),
  1494. REGISTER_AB(GLB_CTL),
  1495. /* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */
  1496. REGISTER_BZ(DP_CTRL),
  1497. REGISTER_AZ(MEM_STAT),
  1498. REGISTER_AZ(CS_DEBUG),
  1499. REGISTER_AZ(ALTERA_BUILD),
  1500. REGISTER_AZ(CSR_SPARE),
  1501. REGISTER_AB(PCIE_SD_CTL0123),
  1502. REGISTER_AB(PCIE_SD_CTL45),
  1503. REGISTER_AB(PCIE_PCS_CTL_STAT),
  1504. /* DEBUG_DATA_OUT is not used */
  1505. /* DRV_EV is WO */
  1506. REGISTER_AZ(EVQ_CTL),
  1507. REGISTER_AZ(EVQ_CNT1),
  1508. REGISTER_AZ(EVQ_CNT2),
  1509. REGISTER_AZ(BUF_TBL_CFG),
  1510. REGISTER_AZ(SRM_RX_DC_CFG),
  1511. REGISTER_AZ(SRM_TX_DC_CFG),
  1512. REGISTER_AZ(SRM_CFG),
  1513. /* BUF_TBL_UPD is WO */
  1514. REGISTER_AZ(SRM_UPD_EVQ),
  1515. REGISTER_AZ(SRAM_PARITY),
  1516. REGISTER_AZ(RX_CFG),
  1517. REGISTER_BZ(RX_FILTER_CTL),
  1518. /* RX_FLUSH_DESCQ is WO */
  1519. REGISTER_AZ(RX_DC_CFG),
  1520. REGISTER_AZ(RX_DC_PF_WM),
  1521. REGISTER_BZ(RX_RSS_TKEY),
  1522. /* RX_NODESC_DROP is RC */
  1523. REGISTER_AA(RX_SELF_RST),
  1524. /* RX_DEBUG, RX_PUSH_DROP are not used */
  1525. REGISTER_CZ(RX_RSS_IPV6_REG1),
  1526. REGISTER_CZ(RX_RSS_IPV6_REG2),
  1527. REGISTER_CZ(RX_RSS_IPV6_REG3),
  1528. /* TX_FLUSH_DESCQ is WO */
  1529. REGISTER_AZ(TX_DC_CFG),
  1530. REGISTER_AA(TX_CHKSM_CFG),
  1531. REGISTER_AZ(TX_CFG),
  1532. /* TX_PUSH_DROP is not used */
  1533. REGISTER_AZ(TX_RESERVED),
  1534. REGISTER_BZ(TX_PACE),
  1535. /* TX_PACE_DROP_QID is RC */
  1536. REGISTER_BB(TX_VLAN),
  1537. REGISTER_BZ(TX_IPFIL_PORTEN),
  1538. REGISTER_AB(MD_TXD),
  1539. REGISTER_AB(MD_RXD),
  1540. REGISTER_AB(MD_CS),
  1541. REGISTER_AB(MD_PHY_ADR),
  1542. REGISTER_AB(MD_ID),
  1543. /* MD_STAT is RC */
  1544. REGISTER_AB(MAC_STAT_DMA),
  1545. REGISTER_AB(MAC_CTRL),
  1546. REGISTER_BB(GEN_MODE),
  1547. REGISTER_AB(MAC_MC_HASH_REG0),
  1548. REGISTER_AB(MAC_MC_HASH_REG1),
  1549. REGISTER_AB(GM_CFG1),
  1550. REGISTER_AB(GM_CFG2),
  1551. /* GM_IPG and GM_HD are not used */
  1552. REGISTER_AB(GM_MAX_FLEN),
  1553. /* GM_TEST is not used */
  1554. REGISTER_AB(GM_ADR1),
  1555. REGISTER_AB(GM_ADR2),
  1556. REGISTER_AB(GMF_CFG0),
  1557. REGISTER_AB(GMF_CFG1),
  1558. REGISTER_AB(GMF_CFG2),
  1559. REGISTER_AB(GMF_CFG3),
  1560. REGISTER_AB(GMF_CFG4),
  1561. REGISTER_AB(GMF_CFG5),
  1562. REGISTER_BB(TX_SRC_MAC_CTL),
  1563. REGISTER_AB(XM_ADR_LO),
  1564. REGISTER_AB(XM_ADR_HI),
  1565. REGISTER_AB(XM_GLB_CFG),
  1566. REGISTER_AB(XM_TX_CFG),
  1567. REGISTER_AB(XM_RX_CFG),
  1568. REGISTER_AB(XM_MGT_INT_MASK),
  1569. REGISTER_AB(XM_FC),
  1570. REGISTER_AB(XM_PAUSE_TIME),
  1571. REGISTER_AB(XM_TX_PARAM),
  1572. REGISTER_AB(XM_RX_PARAM),
  1573. /* XM_MGT_INT_MSK (note no 'A') is RC */
  1574. REGISTER_AB(XX_PWR_RST),
  1575. REGISTER_AB(XX_SD_CTL),
  1576. REGISTER_AB(XX_TXDRV_CTL),
  1577. /* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */
  1578. /* XX_CORE_STAT is partly RC */
  1579. };
  1580. struct efx_nic_reg_table {
  1581. u32 offset:24;
  1582. u32 min_revision:2, max_revision:2;
  1583. u32 step:6, rows:21;
  1584. };
  1585. #define REGISTER_TABLE_DIMENSIONS(_, offset, min_rev, max_rev, step, rows) { \
  1586. offset, \
  1587. REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev, \
  1588. step, rows \
  1589. }
  1590. #define REGISTER_TABLE(name, min_rev, max_rev) \
  1591. REGISTER_TABLE_DIMENSIONS( \
  1592. name, FR_ ## min_rev ## max_rev ## _ ## name, \
  1593. min_rev, max_rev, \
  1594. FR_ ## min_rev ## max_rev ## _ ## name ## _STEP, \
  1595. FR_ ## min_rev ## max_rev ## _ ## name ## _ROWS)
  1596. #define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, A, A)
  1597. #define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, A, Z)
  1598. #define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, B, B)
  1599. #define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, B, Z)
  1600. #define REGISTER_TABLE_BB_CZ(name) \
  1601. REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, B, B, \
  1602. FR_BZ_ ## name ## _STEP, \
  1603. FR_BB_ ## name ## _ROWS), \
  1604. REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, C, Z, \
  1605. FR_BZ_ ## name ## _STEP, \
  1606. FR_CZ_ ## name ## _ROWS)
  1607. #define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, C, Z)
  1608. static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
  1609. /* DRIVER is not used */
  1610. /* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */
  1611. REGISTER_TABLE_BB(TX_IPFIL_TBL),
  1612. REGISTER_TABLE_BB(TX_SRC_MAC_TBL),
  1613. REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER),
  1614. REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL),
  1615. REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER),
  1616. REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL),
  1617. REGISTER_TABLE_AA(EVQ_PTR_TBL_KER),
  1618. REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL),
  1619. /* We can't reasonably read all of the buffer table (up to 8MB!).
  1620. * However this driver will only use a few entries. Reading
  1621. * 1K entries allows for some expansion of queue count and
  1622. * size before we need to change the version. */
  1623. REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER,
  1624. A, A, 8, 1024),
  1625. REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL,
  1626. B, Z, 8, 1024),
  1627. REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0),
  1628. REGISTER_TABLE_BB_CZ(TIMER_TBL),
  1629. REGISTER_TABLE_BB_CZ(TX_PACE_TBL),
  1630. REGISTER_TABLE_BZ(RX_INDIRECTION_TBL),
  1631. /* TX_FILTER_TBL0 is huge and not used by this driver */
  1632. REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0),
  1633. REGISTER_TABLE_CZ(MC_TREG_SMEM),
  1634. /* MSIX_PBA_TABLE is not mapped */
  1635. /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */
  1636. REGISTER_TABLE_BZ(RX_FILTER_TBL0),
  1637. };
  1638. size_t efx_nic_get_regs_len(struct efx_nic *efx)
  1639. {
  1640. const struct efx_nic_reg *reg;
  1641. const struct efx_nic_reg_table *table;
  1642. size_t len = 0;
  1643. for (reg = efx_nic_regs;
  1644. reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs);
  1645. reg++)
  1646. if (efx->type->revision >= reg->min_revision &&
  1647. efx->type->revision <= reg->max_revision)
  1648. len += sizeof(efx_oword_t);
  1649. for (table = efx_nic_reg_tables;
  1650. table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables);
  1651. table++)
  1652. if (efx->type->revision >= table->min_revision &&
  1653. efx->type->revision <= table->max_revision)
  1654. len += table->rows * min_t(size_t, table->step, 16);
  1655. return len;
  1656. }
  1657. void efx_nic_get_regs(struct efx_nic *efx, void *buf)
  1658. {
  1659. const struct efx_nic_reg *reg;
  1660. const struct efx_nic_reg_table *table;
  1661. for (reg = efx_nic_regs;
  1662. reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs);
  1663. reg++) {
  1664. if (efx->type->revision >= reg->min_revision &&
  1665. efx->type->revision <= reg->max_revision) {
  1666. efx_reado(efx, (efx_oword_t *)buf, reg->offset);
  1667. buf += sizeof(efx_oword_t);
  1668. }
  1669. }
  1670. for (table = efx_nic_reg_tables;
  1671. table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables);
  1672. table++) {
  1673. size_t size, i;
  1674. if (!(efx->type->revision >= table->min_revision &&
  1675. efx->type->revision <= table->max_revision))
  1676. continue;
  1677. size = min_t(size_t, table->step, 16);
  1678. if (table->offset >= efx->type->mem_map_size) {
  1679. /* No longer mapped; return dummy data */
  1680. memcpy(buf, "\xde\xc0\xad\xde", 4);
  1681. buf += table->rows * size;
  1682. continue;
  1683. }
  1684. for (i = 0; i < table->rows; i++) {
  1685. switch (table->step) {
  1686. case 4: /* 32-bit register or SRAM */
  1687. efx_readd_table(efx, buf, table->offset, i);
  1688. break;
  1689. case 8: /* 64-bit SRAM */
  1690. efx_sram_readq(efx,
  1691. efx->membase + table->offset,
  1692. buf, i);
  1693. break;
  1694. case 16: /* 128-bit register */
  1695. efx_reado_table(efx, buf, table->offset, i);
  1696. break;
  1697. case 32: /* 128-bit register, interleaved */
  1698. efx_reado_table(efx, buf, table->offset, 2 * i);
  1699. break;
  1700. default:
  1701. WARN_ON(1);
  1702. return;
  1703. }
  1704. buf += size;
  1705. }
  1706. }
  1707. }