farch.c 87 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942
  1. /****************************************************************************
  2. * Driver for Solarflare Solarstorm network controllers and boards
  3. * Copyright 2005-2006 Fen Systems Ltd.
  4. * Copyright 2006-2011 Solarflare Communications Inc.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published
  8. * by the Free Software Foundation, incorporated herein by reference.
  9. */
  10. #include <linux/bitops.h>
  11. #include <linux/delay.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/pci.h>
  14. #include <linux/module.h>
  15. #include <linux/seq_file.h>
  16. #include <linux/crc32.h>
  17. #include "net_driver.h"
  18. #include "bitfield.h"
  19. #include "efx.h"
  20. #include "nic.h"
  21. #include "farch_regs.h"
  22. #include "io.h"
  23. #include "workarounds.h"
  24. /* Falcon-architecture (SFC4000 and SFC9000-family) support */
  25. /**************************************************************************
  26. *
  27. * Configurable values
  28. *
  29. **************************************************************************
  30. */
  31. /* This is set to 16 for a good reason. In summary, if larger than
  32. * 16, the descriptor cache holds more than a default socket
  33. * buffer's worth of packets (for UDP we can only have at most one
  34. * socket buffer's worth outstanding). This combined with the fact
  35. * that we only get 1 TX event per descriptor cache means the NIC
  36. * goes idle.
  37. */
  38. #define TX_DC_ENTRIES 16
  39. #define TX_DC_ENTRIES_ORDER 1
  40. #define RX_DC_ENTRIES 64
  41. #define RX_DC_ENTRIES_ORDER 3
  42. /* If EFX_MAX_INT_ERRORS internal errors occur within
  43. * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
  44. * disable it.
  45. */
  46. #define EFX_INT_ERROR_EXPIRE 3600
  47. #define EFX_MAX_INT_ERRORS 5
  48. /* Depth of RX flush request fifo */
  49. #define EFX_RX_FLUSH_COUNT 4
  50. /* Driver generated events */
  51. #define _EFX_CHANNEL_MAGIC_TEST 0x000101
  52. #define _EFX_CHANNEL_MAGIC_FILL 0x000102
  53. #define _EFX_CHANNEL_MAGIC_RX_DRAIN 0x000103
  54. #define _EFX_CHANNEL_MAGIC_TX_DRAIN 0x000104
  55. #define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data))
  56. #define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8)
  57. #define EFX_CHANNEL_MAGIC_TEST(_channel) \
  58. _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel)
  59. #define EFX_CHANNEL_MAGIC_FILL(_rx_queue) \
  60. _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, \
  61. efx_rx_queue_index(_rx_queue))
  62. #define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \
  63. _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_RX_DRAIN, \
  64. efx_rx_queue_index(_rx_queue))
  65. #define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \
  66. _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \
  67. (_tx_queue)->queue)
  68. static void efx_farch_magic_event(struct efx_channel *channel, u32 magic);
  69. /**************************************************************************
  70. *
  71. * Hardware access
  72. *
  73. **************************************************************************/
  74. static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
  75. unsigned int index)
  76. {
  77. efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base,
  78. value, index);
  79. }
  80. static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b,
  81. const efx_oword_t *mask)
  82. {
  83. return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) ||
  84. ((a->u64[1] ^ b->u64[1]) & mask->u64[1]);
  85. }
  86. int efx_farch_test_registers(struct efx_nic *efx,
  87. const struct efx_farch_register_test *regs,
  88. size_t n_regs)
  89. {
  90. unsigned address = 0, i, j;
  91. efx_oword_t mask, imask, original, reg, buf;
  92. for (i = 0; i < n_regs; ++i) {
  93. address = regs[i].address;
  94. mask = imask = regs[i].mask;
  95. EFX_INVERT_OWORD(imask);
  96. efx_reado(efx, &original, address);
  97. /* bit sweep on and off */
  98. for (j = 0; j < 128; j++) {
  99. if (!EFX_EXTRACT_OWORD32(mask, j, j))
  100. continue;
  101. /* Test this testable bit can be set in isolation */
  102. EFX_AND_OWORD(reg, original, mask);
  103. EFX_SET_OWORD32(reg, j, j, 1);
  104. efx_writeo(efx, &reg, address);
  105. efx_reado(efx, &buf, address);
  106. if (efx_masked_compare_oword(&reg, &buf, &mask))
  107. goto fail;
  108. /* Test this testable bit can be cleared in isolation */
  109. EFX_OR_OWORD(reg, original, mask);
  110. EFX_SET_OWORD32(reg, j, j, 0);
  111. efx_writeo(efx, &reg, address);
  112. efx_reado(efx, &buf, address);
  113. if (efx_masked_compare_oword(&reg, &buf, &mask))
  114. goto fail;
  115. }
  116. efx_writeo(efx, &original, address);
  117. }
  118. return 0;
  119. fail:
  120. netif_err(efx, hw, efx->net_dev,
  121. "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT
  122. " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg),
  123. EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask));
  124. return -EIO;
  125. }
  126. /**************************************************************************
  127. *
  128. * Special buffer handling
  129. * Special buffers are used for event queues and the TX and RX
  130. * descriptor rings.
  131. *
  132. *************************************************************************/
  133. /*
  134. * Initialise a special buffer
  135. *
  136. * This will define a buffer (previously allocated via
  137. * efx_alloc_special_buffer()) in the buffer table, allowing
  138. * it to be used for event queues, descriptor rings etc.
  139. */
  140. static void
  141. efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
  142. {
  143. efx_qword_t buf_desc;
  144. unsigned int index;
  145. dma_addr_t dma_addr;
  146. int i;
  147. EFX_BUG_ON_PARANOID(!buffer->buf.addr);
  148. /* Write buffer descriptors to NIC */
  149. for (i = 0; i < buffer->entries; i++) {
  150. index = buffer->index + i;
  151. dma_addr = buffer->buf.dma_addr + (i * EFX_BUF_SIZE);
  152. netif_dbg(efx, probe, efx->net_dev,
  153. "mapping special buffer %d at %llx\n",
  154. index, (unsigned long long)dma_addr);
  155. EFX_POPULATE_QWORD_3(buf_desc,
  156. FRF_AZ_BUF_ADR_REGION, 0,
  157. FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
  158. FRF_AZ_BUF_OWNER_ID_FBUF, 0);
  159. efx_write_buf_tbl(efx, &buf_desc, index);
  160. }
  161. }
  162. /* Unmaps a buffer and clears the buffer table entries */
  163. static void
  164. efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
  165. {
  166. efx_oword_t buf_tbl_upd;
  167. unsigned int start = buffer->index;
  168. unsigned int end = (buffer->index + buffer->entries - 1);
  169. if (!buffer->entries)
  170. return;
  171. netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n",
  172. buffer->index, buffer->index + buffer->entries - 1);
  173. EFX_POPULATE_OWORD_4(buf_tbl_upd,
  174. FRF_AZ_BUF_UPD_CMD, 0,
  175. FRF_AZ_BUF_CLR_CMD, 1,
  176. FRF_AZ_BUF_CLR_END_ID, end,
  177. FRF_AZ_BUF_CLR_START_ID, start);
  178. efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD);
  179. }
  180. /*
  181. * Allocate a new special buffer
  182. *
  183. * This allocates memory for a new buffer, clears it and allocates a
  184. * new buffer ID range. It does not write into the buffer table.
  185. *
  186. * This call will allocate 4KB buffers, since 8KB buffers can't be
  187. * used for event queues and descriptor rings.
  188. */
  189. static int efx_alloc_special_buffer(struct efx_nic *efx,
  190. struct efx_special_buffer *buffer,
  191. unsigned int len)
  192. {
  193. len = ALIGN(len, EFX_BUF_SIZE);
  194. if (efx_nic_alloc_buffer(efx, &buffer->buf, len, GFP_KERNEL))
  195. return -ENOMEM;
  196. buffer->entries = len / EFX_BUF_SIZE;
  197. BUG_ON(buffer->buf.dma_addr & (EFX_BUF_SIZE - 1));
  198. /* Select new buffer ID */
  199. buffer->index = efx->next_buffer_table;
  200. efx->next_buffer_table += buffer->entries;
  201. #ifdef CONFIG_SFC_SRIOV
  202. BUG_ON(efx_sriov_enabled(efx) &&
  203. efx->vf_buftbl_base < efx->next_buffer_table);
  204. #endif
  205. netif_dbg(efx, probe, efx->net_dev,
  206. "allocating special buffers %d-%d at %llx+%x "
  207. "(virt %p phys %llx)\n", buffer->index,
  208. buffer->index + buffer->entries - 1,
  209. (u64)buffer->buf.dma_addr, len,
  210. buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr));
  211. return 0;
  212. }
  213. static void
  214. efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
  215. {
  216. if (!buffer->buf.addr)
  217. return;
  218. netif_dbg(efx, hw, efx->net_dev,
  219. "deallocating special buffers %d-%d at %llx+%x "
  220. "(virt %p phys %llx)\n", buffer->index,
  221. buffer->index + buffer->entries - 1,
  222. (u64)buffer->buf.dma_addr, buffer->buf.len,
  223. buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr));
  224. efx_nic_free_buffer(efx, &buffer->buf);
  225. buffer->entries = 0;
  226. }
  227. /**************************************************************************
  228. *
  229. * TX path
  230. *
  231. **************************************************************************/
  232. /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
  233. static inline void efx_farch_notify_tx_desc(struct efx_tx_queue *tx_queue)
  234. {
  235. unsigned write_ptr;
  236. efx_dword_t reg;
  237. write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
  238. EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
  239. efx_writed_page(tx_queue->efx, &reg,
  240. FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
  241. }
  242. /* Write pointer and first descriptor for TX descriptor ring */
  243. static inline void efx_farch_push_tx_desc(struct efx_tx_queue *tx_queue,
  244. const efx_qword_t *txd)
  245. {
  246. unsigned write_ptr;
  247. efx_oword_t reg;
  248. BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0);
  249. BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0);
  250. write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
  251. EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true,
  252. FRF_AZ_TX_DESC_WPTR, write_ptr);
  253. reg.qword[0] = *txd;
  254. efx_writeo_page(tx_queue->efx, &reg,
  255. FR_BZ_TX_DESC_UPD_P0, tx_queue->queue);
  256. }
  257. /* For each entry inserted into the software descriptor ring, create a
  258. * descriptor in the hardware TX descriptor ring (in host memory), and
  259. * write a doorbell.
  260. */
  261. void efx_farch_tx_write(struct efx_tx_queue *tx_queue)
  262. {
  263. struct efx_tx_buffer *buffer;
  264. efx_qword_t *txd;
  265. unsigned write_ptr;
  266. unsigned old_write_count = tx_queue->write_count;
  267. BUG_ON(tx_queue->write_count == tx_queue->insert_count);
  268. do {
  269. write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
  270. buffer = &tx_queue->buffer[write_ptr];
  271. txd = efx_tx_desc(tx_queue, write_ptr);
  272. ++tx_queue->write_count;
  273. /* Create TX descriptor ring entry */
  274. BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
  275. EFX_POPULATE_QWORD_4(*txd,
  276. FSF_AZ_TX_KER_CONT,
  277. buffer->flags & EFX_TX_BUF_CONT,
  278. FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
  279. FSF_AZ_TX_KER_BUF_REGION, 0,
  280. FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
  281. } while (tx_queue->write_count != tx_queue->insert_count);
  282. wmb(); /* Ensure descriptors are written before they are fetched */
  283. if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) {
  284. txd = efx_tx_desc(tx_queue,
  285. old_write_count & tx_queue->ptr_mask);
  286. efx_farch_push_tx_desc(tx_queue, txd);
  287. ++tx_queue->pushes;
  288. } else {
  289. efx_farch_notify_tx_desc(tx_queue);
  290. }
  291. }
  292. /* Allocate hardware resources for a TX queue */
  293. int efx_farch_tx_probe(struct efx_tx_queue *tx_queue)
  294. {
  295. struct efx_nic *efx = tx_queue->efx;
  296. unsigned entries;
  297. entries = tx_queue->ptr_mask + 1;
  298. return efx_alloc_special_buffer(efx, &tx_queue->txd,
  299. entries * sizeof(efx_qword_t));
  300. }
  301. void efx_farch_tx_init(struct efx_tx_queue *tx_queue)
  302. {
  303. struct efx_nic *efx = tx_queue->efx;
  304. efx_oword_t reg;
  305. /* Pin TX descriptor ring */
  306. efx_init_special_buffer(efx, &tx_queue->txd);
  307. /* Push TX descriptor ring to card */
  308. EFX_POPULATE_OWORD_10(reg,
  309. FRF_AZ_TX_DESCQ_EN, 1,
  310. FRF_AZ_TX_ISCSI_DDIG_EN, 0,
  311. FRF_AZ_TX_ISCSI_HDIG_EN, 0,
  312. FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
  313. FRF_AZ_TX_DESCQ_EVQ_ID,
  314. tx_queue->channel->channel,
  315. FRF_AZ_TX_DESCQ_OWNER_ID, 0,
  316. FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue,
  317. FRF_AZ_TX_DESCQ_SIZE,
  318. __ffs(tx_queue->txd.entries),
  319. FRF_AZ_TX_DESCQ_TYPE, 0,
  320. FRF_BZ_TX_NON_IP_DROP_DIS, 1);
  321. if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
  322. int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
  323. EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
  324. EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS,
  325. !csum);
  326. }
  327. efx_writeo_table(efx, &reg, efx->type->txd_ptr_tbl_base,
  328. tx_queue->queue);
  329. if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
  330. /* Only 128 bits in this register */
  331. BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128);
  332. efx_reado(efx, &reg, FR_AA_TX_CHKSM_CFG);
  333. if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
  334. __clear_bit_le(tx_queue->queue, &reg);
  335. else
  336. __set_bit_le(tx_queue->queue, &reg);
  337. efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG);
  338. }
  339. if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
  340. EFX_POPULATE_OWORD_1(reg,
  341. FRF_BZ_TX_PACE,
  342. (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
  343. FFE_BZ_TX_PACE_OFF :
  344. FFE_BZ_TX_PACE_RESERVED);
  345. efx_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL,
  346. tx_queue->queue);
  347. }
  348. }
  349. static void efx_farch_flush_tx_queue(struct efx_tx_queue *tx_queue)
  350. {
  351. struct efx_nic *efx = tx_queue->efx;
  352. efx_oword_t tx_flush_descq;
  353. WARN_ON(atomic_read(&tx_queue->flush_outstanding));
  354. atomic_set(&tx_queue->flush_outstanding, 1);
  355. EFX_POPULATE_OWORD_2(tx_flush_descq,
  356. FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
  357. FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
  358. efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ);
  359. }
  360. void efx_farch_tx_fini(struct efx_tx_queue *tx_queue)
  361. {
  362. struct efx_nic *efx = tx_queue->efx;
  363. efx_oword_t tx_desc_ptr;
  364. /* Remove TX descriptor ring from card */
  365. EFX_ZERO_OWORD(tx_desc_ptr);
  366. efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
  367. tx_queue->queue);
  368. /* Unpin TX descriptor ring */
  369. efx_fini_special_buffer(efx, &tx_queue->txd);
  370. }
  371. /* Free buffers backing TX queue */
  372. void efx_farch_tx_remove(struct efx_tx_queue *tx_queue)
  373. {
  374. efx_free_special_buffer(tx_queue->efx, &tx_queue->txd);
  375. }
  376. /**************************************************************************
  377. *
  378. * RX path
  379. *
  380. **************************************************************************/
  381. /* This creates an entry in the RX descriptor queue */
  382. static inline void
  383. efx_farch_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index)
  384. {
  385. struct efx_rx_buffer *rx_buf;
  386. efx_qword_t *rxd;
  387. rxd = efx_rx_desc(rx_queue, index);
  388. rx_buf = efx_rx_buffer(rx_queue, index);
  389. EFX_POPULATE_QWORD_3(*rxd,
  390. FSF_AZ_RX_KER_BUF_SIZE,
  391. rx_buf->len -
  392. rx_queue->efx->type->rx_buffer_padding,
  393. FSF_AZ_RX_KER_BUF_REGION, 0,
  394. FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
  395. }
  396. /* This writes to the RX_DESC_WPTR register for the specified receive
  397. * descriptor ring.
  398. */
  399. void efx_farch_rx_write(struct efx_rx_queue *rx_queue)
  400. {
  401. struct efx_nic *efx = rx_queue->efx;
  402. efx_dword_t reg;
  403. unsigned write_ptr;
  404. while (rx_queue->notified_count != rx_queue->added_count) {
  405. efx_farch_build_rx_desc(
  406. rx_queue,
  407. rx_queue->notified_count & rx_queue->ptr_mask);
  408. ++rx_queue->notified_count;
  409. }
  410. wmb();
  411. write_ptr = rx_queue->added_count & rx_queue->ptr_mask;
  412. EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
  413. efx_writed_page(efx, &reg, FR_AZ_RX_DESC_UPD_DWORD_P0,
  414. efx_rx_queue_index(rx_queue));
  415. }
  416. int efx_farch_rx_probe(struct efx_rx_queue *rx_queue)
  417. {
  418. struct efx_nic *efx = rx_queue->efx;
  419. unsigned entries;
  420. entries = rx_queue->ptr_mask + 1;
  421. return efx_alloc_special_buffer(efx, &rx_queue->rxd,
  422. entries * sizeof(efx_qword_t));
  423. }
  424. void efx_farch_rx_init(struct efx_rx_queue *rx_queue)
  425. {
  426. efx_oword_t rx_desc_ptr;
  427. struct efx_nic *efx = rx_queue->efx;
  428. bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0;
  429. bool iscsi_digest_en = is_b0;
  430. bool jumbo_en;
  431. /* For kernel-mode queues in Falcon A1, the JUMBO flag enables
  432. * DMA to continue after a PCIe page boundary (and scattering
  433. * is not possible). In Falcon B0 and Siena, it enables
  434. * scatter.
  435. */
  436. jumbo_en = !is_b0 || efx->rx_scatter;
  437. netif_dbg(efx, hw, efx->net_dev,
  438. "RX queue %d ring in special buffers %d-%d\n",
  439. efx_rx_queue_index(rx_queue), rx_queue->rxd.index,
  440. rx_queue->rxd.index + rx_queue->rxd.entries - 1);
  441. rx_queue->scatter_n = 0;
  442. /* Pin RX descriptor ring */
  443. efx_init_special_buffer(efx, &rx_queue->rxd);
  444. /* Push RX descriptor ring to card */
  445. EFX_POPULATE_OWORD_10(rx_desc_ptr,
  446. FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en,
  447. FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en,
  448. FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
  449. FRF_AZ_RX_DESCQ_EVQ_ID,
  450. efx_rx_queue_channel(rx_queue)->channel,
  451. FRF_AZ_RX_DESCQ_OWNER_ID, 0,
  452. FRF_AZ_RX_DESCQ_LABEL,
  453. efx_rx_queue_index(rx_queue),
  454. FRF_AZ_RX_DESCQ_SIZE,
  455. __ffs(rx_queue->rxd.entries),
  456. FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
  457. FRF_AZ_RX_DESCQ_JUMBO, jumbo_en,
  458. FRF_AZ_RX_DESCQ_EN, 1);
  459. efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
  460. efx_rx_queue_index(rx_queue));
  461. }
  462. static void efx_farch_flush_rx_queue(struct efx_rx_queue *rx_queue)
  463. {
  464. struct efx_nic *efx = rx_queue->efx;
  465. efx_oword_t rx_flush_descq;
  466. EFX_POPULATE_OWORD_2(rx_flush_descq,
  467. FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
  468. FRF_AZ_RX_FLUSH_DESCQ,
  469. efx_rx_queue_index(rx_queue));
  470. efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
  471. }
  472. void efx_farch_rx_fini(struct efx_rx_queue *rx_queue)
  473. {
  474. efx_oword_t rx_desc_ptr;
  475. struct efx_nic *efx = rx_queue->efx;
  476. /* Remove RX descriptor ring from card */
  477. EFX_ZERO_OWORD(rx_desc_ptr);
  478. efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
  479. efx_rx_queue_index(rx_queue));
  480. /* Unpin RX descriptor ring */
  481. efx_fini_special_buffer(efx, &rx_queue->rxd);
  482. }
  483. /* Free buffers backing RX queue */
  484. void efx_farch_rx_remove(struct efx_rx_queue *rx_queue)
  485. {
  486. efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
  487. }
  488. /**************************************************************************
  489. *
  490. * Flush handling
  491. *
  492. **************************************************************************/
  493. /* efx_farch_flush_queues() must be woken up when all flushes are completed,
  494. * or more RX flushes can be kicked off.
  495. */
  496. static bool efx_farch_flush_wake(struct efx_nic *efx)
  497. {
  498. /* Ensure that all updates are visible to efx_farch_flush_queues() */
  499. smp_mb();
  500. return (atomic_read(&efx->drain_pending) == 0 ||
  501. (atomic_read(&efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT
  502. && atomic_read(&efx->rxq_flush_pending) > 0));
  503. }
  504. static bool efx_check_tx_flush_complete(struct efx_nic *efx)
  505. {
  506. bool i = true;
  507. efx_oword_t txd_ptr_tbl;
  508. struct efx_channel *channel;
  509. struct efx_tx_queue *tx_queue;
  510. efx_for_each_channel(channel, efx) {
  511. efx_for_each_channel_tx_queue(tx_queue, channel) {
  512. efx_reado_table(efx, &txd_ptr_tbl,
  513. FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue);
  514. if (EFX_OWORD_FIELD(txd_ptr_tbl,
  515. FRF_AZ_TX_DESCQ_FLUSH) ||
  516. EFX_OWORD_FIELD(txd_ptr_tbl,
  517. FRF_AZ_TX_DESCQ_EN)) {
  518. netif_dbg(efx, hw, efx->net_dev,
  519. "flush did not complete on TXQ %d\n",
  520. tx_queue->queue);
  521. i = false;
  522. } else if (atomic_cmpxchg(&tx_queue->flush_outstanding,
  523. 1, 0)) {
  524. /* The flush is complete, but we didn't
  525. * receive a flush completion event
  526. */
  527. netif_dbg(efx, hw, efx->net_dev,
  528. "flush complete on TXQ %d, so drain "
  529. "the queue\n", tx_queue->queue);
  530. /* Don't need to increment drain_pending as it
  531. * has already been incremented for the queues
  532. * which did not drain
  533. */
  534. efx_farch_magic_event(channel,
  535. EFX_CHANNEL_MAGIC_TX_DRAIN(
  536. tx_queue));
  537. }
  538. }
  539. }
  540. return i;
  541. }
  542. /* Flush all the transmit queues, and continue flushing receive queues until
  543. * they're all flushed. Wait for the DRAIN events to be recieved so that there
  544. * are no more RX and TX events left on any channel. */
  545. static int efx_farch_do_flush(struct efx_nic *efx)
  546. {
  547. unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */
  548. struct efx_channel *channel;
  549. struct efx_rx_queue *rx_queue;
  550. struct efx_tx_queue *tx_queue;
  551. int rc = 0;
  552. efx_for_each_channel(channel, efx) {
  553. efx_for_each_channel_tx_queue(tx_queue, channel) {
  554. atomic_inc(&efx->drain_pending);
  555. efx_farch_flush_tx_queue(tx_queue);
  556. }
  557. efx_for_each_channel_rx_queue(rx_queue, channel) {
  558. atomic_inc(&efx->drain_pending);
  559. rx_queue->flush_pending = true;
  560. atomic_inc(&efx->rxq_flush_pending);
  561. }
  562. }
  563. while (timeout && atomic_read(&efx->drain_pending) > 0) {
  564. /* If SRIOV is enabled, then offload receive queue flushing to
  565. * the firmware (though we will still have to poll for
  566. * completion). If that fails, fall back to the old scheme.
  567. */
  568. if (efx_sriov_enabled(efx)) {
  569. rc = efx_mcdi_flush_rxqs(efx);
  570. if (!rc)
  571. goto wait;
  572. }
  573. /* The hardware supports four concurrent rx flushes, each of
  574. * which may need to be retried if there is an outstanding
  575. * descriptor fetch
  576. */
  577. efx_for_each_channel(channel, efx) {
  578. efx_for_each_channel_rx_queue(rx_queue, channel) {
  579. if (atomic_read(&efx->rxq_flush_outstanding) >=
  580. EFX_RX_FLUSH_COUNT)
  581. break;
  582. if (rx_queue->flush_pending) {
  583. rx_queue->flush_pending = false;
  584. atomic_dec(&efx->rxq_flush_pending);
  585. atomic_inc(&efx->rxq_flush_outstanding);
  586. efx_farch_flush_rx_queue(rx_queue);
  587. }
  588. }
  589. }
  590. wait:
  591. timeout = wait_event_timeout(efx->flush_wq,
  592. efx_farch_flush_wake(efx),
  593. timeout);
  594. }
  595. if (atomic_read(&efx->drain_pending) &&
  596. !efx_check_tx_flush_complete(efx)) {
  597. netif_err(efx, hw, efx->net_dev, "failed to flush %d queues "
  598. "(rx %d+%d)\n", atomic_read(&efx->drain_pending),
  599. atomic_read(&efx->rxq_flush_outstanding),
  600. atomic_read(&efx->rxq_flush_pending));
  601. rc = -ETIMEDOUT;
  602. atomic_set(&efx->drain_pending, 0);
  603. atomic_set(&efx->rxq_flush_pending, 0);
  604. atomic_set(&efx->rxq_flush_outstanding, 0);
  605. }
  606. return rc;
  607. }
  608. int efx_farch_fini_dmaq(struct efx_nic *efx)
  609. {
  610. struct efx_channel *channel;
  611. struct efx_tx_queue *tx_queue;
  612. struct efx_rx_queue *rx_queue;
  613. int rc = 0;
  614. /* Do not attempt to write to the NIC during EEH recovery */
  615. if (efx->state != STATE_RECOVERY) {
  616. /* Only perform flush if DMA is enabled */
  617. if (efx->pci_dev->is_busmaster) {
  618. efx->type->prepare_flush(efx);
  619. rc = efx_farch_do_flush(efx);
  620. efx->type->finish_flush(efx);
  621. }
  622. efx_for_each_channel(channel, efx) {
  623. efx_for_each_channel_rx_queue(rx_queue, channel)
  624. efx_farch_rx_fini(rx_queue);
  625. efx_for_each_channel_tx_queue(tx_queue, channel)
  626. efx_farch_tx_fini(tx_queue);
  627. }
  628. }
  629. return rc;
  630. }
  631. /**************************************************************************
  632. *
  633. * Event queue processing
  634. * Event queues are processed by per-channel tasklets.
  635. *
  636. **************************************************************************/
  637. /* Update a channel's event queue's read pointer (RPTR) register
  638. *
  639. * This writes the EVQ_RPTR_REG register for the specified channel's
  640. * event queue.
  641. */
  642. void efx_farch_ev_read_ack(struct efx_channel *channel)
  643. {
  644. efx_dword_t reg;
  645. struct efx_nic *efx = channel->efx;
  646. EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR,
  647. channel->eventq_read_ptr & channel->eventq_mask);
  648. /* For Falcon A1, EVQ_RPTR_KER is documented as having a step size
  649. * of 4 bytes, but it is really 16 bytes just like later revisions.
  650. */
  651. efx_writed(efx, &reg,
  652. efx->type->evq_rptr_tbl_base +
  653. FR_BZ_EVQ_RPTR_STEP * channel->channel);
  654. }
  655. /* Use HW to insert a SW defined event */
  656. void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq,
  657. efx_qword_t *event)
  658. {
  659. efx_oword_t drv_ev_reg;
  660. BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 ||
  661. FRF_AZ_DRV_EV_DATA_WIDTH != 64);
  662. drv_ev_reg.u32[0] = event->u32[0];
  663. drv_ev_reg.u32[1] = event->u32[1];
  664. drv_ev_reg.u32[2] = 0;
  665. drv_ev_reg.u32[3] = 0;
  666. EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq);
  667. efx_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV);
  668. }
  669. static void efx_farch_magic_event(struct efx_channel *channel, u32 magic)
  670. {
  671. efx_qword_t event;
  672. EFX_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE,
  673. FSE_AZ_EV_CODE_DRV_GEN_EV,
  674. FSF_AZ_DRV_GEN_EV_MAGIC, magic);
  675. efx_farch_generate_event(channel->efx, channel->channel, &event);
  676. }
  677. /* Handle a transmit completion event
  678. *
  679. * The NIC batches TX completion events; the message we receive is of
  680. * the form "complete all TX events up to this index".
  681. */
  682. static int
  683. efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
  684. {
  685. unsigned int tx_ev_desc_ptr;
  686. unsigned int tx_ev_q_label;
  687. struct efx_tx_queue *tx_queue;
  688. struct efx_nic *efx = channel->efx;
  689. int tx_packets = 0;
  690. if (unlikely(ACCESS_ONCE(efx->reset_pending)))
  691. return 0;
  692. if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
  693. /* Transmit completion */
  694. tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
  695. tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
  696. tx_queue = efx_channel_get_tx_queue(
  697. channel, tx_ev_q_label % EFX_TXQ_TYPES);
  698. tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
  699. tx_queue->ptr_mask);
  700. efx_xmit_done(tx_queue, tx_ev_desc_ptr);
  701. } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
  702. /* Rewrite the FIFO write pointer */
  703. tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
  704. tx_queue = efx_channel_get_tx_queue(
  705. channel, tx_ev_q_label % EFX_TXQ_TYPES);
  706. netif_tx_lock(efx->net_dev);
  707. efx_farch_notify_tx_desc(tx_queue);
  708. netif_tx_unlock(efx->net_dev);
  709. } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR)) {
  710. efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
  711. } else {
  712. netif_err(efx, tx_err, efx->net_dev,
  713. "channel %d unexpected TX event "
  714. EFX_QWORD_FMT"\n", channel->channel,
  715. EFX_QWORD_VAL(*event));
  716. }
  717. return tx_packets;
  718. }
  719. /* Detect errors included in the rx_evt_pkt_ok bit. */
  720. static u16 efx_farch_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
  721. const efx_qword_t *event)
  722. {
  723. struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
  724. struct efx_nic *efx = rx_queue->efx;
  725. bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
  726. bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
  727. bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
  728. bool rx_ev_other_err, rx_ev_pause_frm;
  729. bool rx_ev_hdr_type, rx_ev_mcast_pkt;
  730. unsigned rx_ev_pkt_type;
  731. rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
  732. rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
  733. rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC);
  734. rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE);
  735. rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event,
  736. FSF_AZ_RX_EV_BUF_OWNER_ID_ERR);
  737. rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event,
  738. FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR);
  739. rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event,
  740. FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR);
  741. rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR);
  742. rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC);
  743. rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ?
  744. 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB));
  745. rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR);
  746. /* Every error apart from tobe_disc and pause_frm */
  747. rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
  748. rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
  749. rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
  750. /* Count errors that are not in MAC stats. Ignore expected
  751. * checksum errors during self-test. */
  752. if (rx_ev_frm_trunc)
  753. ++channel->n_rx_frm_trunc;
  754. else if (rx_ev_tobe_disc)
  755. ++channel->n_rx_tobe_disc;
  756. else if (!efx->loopback_selftest) {
  757. if (rx_ev_ip_hdr_chksum_err)
  758. ++channel->n_rx_ip_hdr_chksum_err;
  759. else if (rx_ev_tcp_udp_chksum_err)
  760. ++channel->n_rx_tcp_udp_chksum_err;
  761. }
  762. /* TOBE_DISC is expected on unicast mismatches; don't print out an
  763. * error message. FRM_TRUNC indicates RXDP dropped the packet due
  764. * to a FIFO overflow.
  765. */
  766. #ifdef DEBUG
  767. if (rx_ev_other_err && net_ratelimit()) {
  768. netif_dbg(efx, rx_err, efx->net_dev,
  769. " RX queue %d unexpected RX event "
  770. EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
  771. efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event),
  772. rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
  773. rx_ev_ip_hdr_chksum_err ?
  774. " [IP_HDR_CHKSUM_ERR]" : "",
  775. rx_ev_tcp_udp_chksum_err ?
  776. " [TCP_UDP_CHKSUM_ERR]" : "",
  777. rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
  778. rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
  779. rx_ev_drib_nib ? " [DRIB_NIB]" : "",
  780. rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
  781. rx_ev_pause_frm ? " [PAUSE]" : "");
  782. }
  783. #endif
  784. /* The frame must be discarded if any of these are true. */
  785. return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
  786. rx_ev_tobe_disc | rx_ev_pause_frm) ?
  787. EFX_RX_PKT_DISCARD : 0;
  788. }
  789. /* Handle receive events that are not in-order. Return true if this
  790. * can be handled as a partial packet discard, false if it's more
  791. * serious.
  792. */
  793. static bool
  794. efx_farch_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
  795. {
  796. struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
  797. struct efx_nic *efx = rx_queue->efx;
  798. unsigned expected, dropped;
  799. if (rx_queue->scatter_n &&
  800. index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) &
  801. rx_queue->ptr_mask)) {
  802. ++channel->n_rx_nodesc_trunc;
  803. return true;
  804. }
  805. expected = rx_queue->removed_count & rx_queue->ptr_mask;
  806. dropped = (index - expected) & rx_queue->ptr_mask;
  807. netif_info(efx, rx_err, efx->net_dev,
  808. "dropped %d events (index=%d expected=%d)\n",
  809. dropped, index, expected);
  810. efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
  811. RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
  812. return false;
  813. }
  814. /* Handle a packet received event
  815. *
  816. * The NIC gives a "discard" flag if it's a unicast packet with the
  817. * wrong destination address
  818. * Also "is multicast" and "matches multicast filter" flags can be used to
  819. * discard non-matching multicast packets.
  820. */
  821. static void
  822. efx_farch_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
  823. {
  824. unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
  825. unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
  826. unsigned expected_ptr;
  827. bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont;
  828. u16 flags;
  829. struct efx_rx_queue *rx_queue;
  830. struct efx_nic *efx = channel->efx;
  831. if (unlikely(ACCESS_ONCE(efx->reset_pending)))
  832. return;
  833. rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT);
  834. rx_ev_sop = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP);
  835. WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
  836. channel->channel);
  837. rx_queue = efx_channel_get_rx_queue(channel);
  838. rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
  839. expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) &
  840. rx_queue->ptr_mask);
  841. /* Check for partial drops and other errors */
  842. if (unlikely(rx_ev_desc_ptr != expected_ptr) ||
  843. unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) {
  844. if (rx_ev_desc_ptr != expected_ptr &&
  845. !efx_farch_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr))
  846. return;
  847. /* Discard all pending fragments */
  848. if (rx_queue->scatter_n) {
  849. efx_rx_packet(
  850. rx_queue,
  851. rx_queue->removed_count & rx_queue->ptr_mask,
  852. rx_queue->scatter_n, 0, EFX_RX_PKT_DISCARD);
  853. rx_queue->removed_count += rx_queue->scatter_n;
  854. rx_queue->scatter_n = 0;
  855. }
  856. /* Return if there is no new fragment */
  857. if (rx_ev_desc_ptr != expected_ptr)
  858. return;
  859. /* Discard new fragment if not SOP */
  860. if (!rx_ev_sop) {
  861. efx_rx_packet(
  862. rx_queue,
  863. rx_queue->removed_count & rx_queue->ptr_mask,
  864. 1, 0, EFX_RX_PKT_DISCARD);
  865. ++rx_queue->removed_count;
  866. return;
  867. }
  868. }
  869. ++rx_queue->scatter_n;
  870. if (rx_ev_cont)
  871. return;
  872. rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
  873. rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
  874. rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
  875. if (likely(rx_ev_pkt_ok)) {
  876. /* If packet is marked as OK then we can rely on the
  877. * hardware checksum and classification.
  878. */
  879. flags = 0;
  880. switch (rx_ev_hdr_type) {
  881. case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP:
  882. flags |= EFX_RX_PKT_TCP;
  883. /* fall through */
  884. case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP:
  885. flags |= EFX_RX_PKT_CSUMMED;
  886. /* fall through */
  887. case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER:
  888. case FSE_AZ_RX_EV_HDR_TYPE_OTHER:
  889. break;
  890. }
  891. } else {
  892. flags = efx_farch_handle_rx_not_ok(rx_queue, event);
  893. }
  894. /* Detect multicast packets that didn't match the filter */
  895. rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
  896. if (rx_ev_mcast_pkt) {
  897. unsigned int rx_ev_mcast_hash_match =
  898. EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH);
  899. if (unlikely(!rx_ev_mcast_hash_match)) {
  900. ++channel->n_rx_mcast_mismatch;
  901. flags |= EFX_RX_PKT_DISCARD;
  902. }
  903. }
  904. channel->irq_mod_score += 2;
  905. /* Handle received packet */
  906. efx_rx_packet(rx_queue,
  907. rx_queue->removed_count & rx_queue->ptr_mask,
  908. rx_queue->scatter_n, rx_ev_byte_cnt, flags);
  909. rx_queue->removed_count += rx_queue->scatter_n;
  910. rx_queue->scatter_n = 0;
  911. }
  912. /* If this flush done event corresponds to a &struct efx_tx_queue, then
  913. * send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue
  914. * of all transmit completions.
  915. */
  916. static void
  917. efx_farch_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
  918. {
  919. struct efx_tx_queue *tx_queue;
  920. int qid;
  921. qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
  922. if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) {
  923. tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES,
  924. qid % EFX_TXQ_TYPES);
  925. if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) {
  926. efx_farch_magic_event(tx_queue->channel,
  927. EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
  928. }
  929. }
  930. }
  931. /* If this flush done event corresponds to a &struct efx_rx_queue: If the flush
  932. * was succesful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add
  933. * the RX queue back to the mask of RX queues in need of flushing.
  934. */
  935. static void
  936. efx_farch_handle_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
  937. {
  938. struct efx_channel *channel;
  939. struct efx_rx_queue *rx_queue;
  940. int qid;
  941. bool failed;
  942. qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
  943. failed = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
  944. if (qid >= efx->n_channels)
  945. return;
  946. channel = efx_get_channel(efx, qid);
  947. if (!efx_channel_has_rx_queue(channel))
  948. return;
  949. rx_queue = efx_channel_get_rx_queue(channel);
  950. if (failed) {
  951. netif_info(efx, hw, efx->net_dev,
  952. "RXQ %d flush retry\n", qid);
  953. rx_queue->flush_pending = true;
  954. atomic_inc(&efx->rxq_flush_pending);
  955. } else {
  956. efx_farch_magic_event(efx_rx_queue_channel(rx_queue),
  957. EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue));
  958. }
  959. atomic_dec(&efx->rxq_flush_outstanding);
  960. if (efx_farch_flush_wake(efx))
  961. wake_up(&efx->flush_wq);
  962. }
  963. static void
  964. efx_farch_handle_drain_event(struct efx_channel *channel)
  965. {
  966. struct efx_nic *efx = channel->efx;
  967. WARN_ON(atomic_read(&efx->drain_pending) == 0);
  968. atomic_dec(&efx->drain_pending);
  969. if (efx_farch_flush_wake(efx))
  970. wake_up(&efx->flush_wq);
  971. }
  972. static void efx_farch_handle_generated_event(struct efx_channel *channel,
  973. efx_qword_t *event)
  974. {
  975. struct efx_nic *efx = channel->efx;
  976. struct efx_rx_queue *rx_queue =
  977. efx_channel_has_rx_queue(channel) ?
  978. efx_channel_get_rx_queue(channel) : NULL;
  979. unsigned magic, code;
  980. magic = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
  981. code = _EFX_CHANNEL_MAGIC_CODE(magic);
  982. if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) {
  983. channel->event_test_cpu = raw_smp_processor_id();
  984. } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_FILL(rx_queue)) {
  985. /* The queue must be empty, so we won't receive any rx
  986. * events, so efx_process_channel() won't refill the
  987. * queue. Refill it here */
  988. efx_fast_push_rx_descriptors(rx_queue);
  989. } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) {
  990. efx_farch_handle_drain_event(channel);
  991. } else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) {
  992. efx_farch_handle_drain_event(channel);
  993. } else {
  994. netif_dbg(efx, hw, efx->net_dev, "channel %d received "
  995. "generated event "EFX_QWORD_FMT"\n",
  996. channel->channel, EFX_QWORD_VAL(*event));
  997. }
  998. }
  999. static void
  1000. efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
  1001. {
  1002. struct efx_nic *efx = channel->efx;
  1003. unsigned int ev_sub_code;
  1004. unsigned int ev_sub_data;
  1005. ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE);
  1006. ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
  1007. switch (ev_sub_code) {
  1008. case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
  1009. netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
  1010. channel->channel, ev_sub_data);
  1011. efx_farch_handle_tx_flush_done(efx, event);
  1012. efx_sriov_tx_flush_done(efx, event);
  1013. break;
  1014. case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
  1015. netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
  1016. channel->channel, ev_sub_data);
  1017. efx_farch_handle_rx_flush_done(efx, event);
  1018. efx_sriov_rx_flush_done(efx, event);
  1019. break;
  1020. case FSE_AZ_EVQ_INIT_DONE_EV:
  1021. netif_dbg(efx, hw, efx->net_dev,
  1022. "channel %d EVQ %d initialised\n",
  1023. channel->channel, ev_sub_data);
  1024. break;
  1025. case FSE_AZ_SRM_UPD_DONE_EV:
  1026. netif_vdbg(efx, hw, efx->net_dev,
  1027. "channel %d SRAM update done\n", channel->channel);
  1028. break;
  1029. case FSE_AZ_WAKE_UP_EV:
  1030. netif_vdbg(efx, hw, efx->net_dev,
  1031. "channel %d RXQ %d wakeup event\n",
  1032. channel->channel, ev_sub_data);
  1033. break;
  1034. case FSE_AZ_TIMER_EV:
  1035. netif_vdbg(efx, hw, efx->net_dev,
  1036. "channel %d RX queue %d timer expired\n",
  1037. channel->channel, ev_sub_data);
  1038. break;
  1039. case FSE_AA_RX_RECOVER_EV:
  1040. netif_err(efx, rx_err, efx->net_dev,
  1041. "channel %d seen DRIVER RX_RESET event. "
  1042. "Resetting.\n", channel->channel);
  1043. atomic_inc(&efx->rx_reset);
  1044. efx_schedule_reset(efx,
  1045. EFX_WORKAROUND_6555(efx) ?
  1046. RESET_TYPE_RX_RECOVERY :
  1047. RESET_TYPE_DISABLE);
  1048. break;
  1049. case FSE_BZ_RX_DSC_ERROR_EV:
  1050. if (ev_sub_data < EFX_VI_BASE) {
  1051. netif_err(efx, rx_err, efx->net_dev,
  1052. "RX DMA Q %d reports descriptor fetch error."
  1053. " RX Q %d is disabled.\n", ev_sub_data,
  1054. ev_sub_data);
  1055. efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
  1056. } else
  1057. efx_sriov_desc_fetch_err(efx, ev_sub_data);
  1058. break;
  1059. case FSE_BZ_TX_DSC_ERROR_EV:
  1060. if (ev_sub_data < EFX_VI_BASE) {
  1061. netif_err(efx, tx_err, efx->net_dev,
  1062. "TX DMA Q %d reports descriptor fetch error."
  1063. " TX Q %d is disabled.\n", ev_sub_data,
  1064. ev_sub_data);
  1065. efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
  1066. } else
  1067. efx_sriov_desc_fetch_err(efx, ev_sub_data);
  1068. break;
  1069. default:
  1070. netif_vdbg(efx, hw, efx->net_dev,
  1071. "channel %d unknown driver event code %d "
  1072. "data %04x\n", channel->channel, ev_sub_code,
  1073. ev_sub_data);
  1074. break;
  1075. }
  1076. }
  1077. int efx_farch_ev_process(struct efx_channel *channel, int budget)
  1078. {
  1079. struct efx_nic *efx = channel->efx;
  1080. unsigned int read_ptr;
  1081. efx_qword_t event, *p_event;
  1082. int ev_code;
  1083. int tx_packets = 0;
  1084. int spent = 0;
  1085. read_ptr = channel->eventq_read_ptr;
  1086. for (;;) {
  1087. p_event = efx_event(channel, read_ptr);
  1088. event = *p_event;
  1089. if (!efx_event_present(&event))
  1090. /* End of events */
  1091. break;
  1092. netif_vdbg(channel->efx, intr, channel->efx->net_dev,
  1093. "channel %d event is "EFX_QWORD_FMT"\n",
  1094. channel->channel, EFX_QWORD_VAL(event));
  1095. /* Clear this event by marking it all ones */
  1096. EFX_SET_QWORD(*p_event);
  1097. ++read_ptr;
  1098. ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
  1099. switch (ev_code) {
  1100. case FSE_AZ_EV_CODE_RX_EV:
  1101. efx_farch_handle_rx_event(channel, &event);
  1102. if (++spent == budget)
  1103. goto out;
  1104. break;
  1105. case FSE_AZ_EV_CODE_TX_EV:
  1106. tx_packets += efx_farch_handle_tx_event(channel,
  1107. &event);
  1108. if (tx_packets > efx->txq_entries) {
  1109. spent = budget;
  1110. goto out;
  1111. }
  1112. break;
  1113. case FSE_AZ_EV_CODE_DRV_GEN_EV:
  1114. efx_farch_handle_generated_event(channel, &event);
  1115. break;
  1116. case FSE_AZ_EV_CODE_DRIVER_EV:
  1117. efx_farch_handle_driver_event(channel, &event);
  1118. break;
  1119. case FSE_CZ_EV_CODE_USER_EV:
  1120. efx_sriov_event(channel, &event);
  1121. break;
  1122. case FSE_CZ_EV_CODE_MCDI_EV:
  1123. efx_mcdi_process_event(channel, &event);
  1124. break;
  1125. case FSE_AZ_EV_CODE_GLOBAL_EV:
  1126. if (efx->type->handle_global_event &&
  1127. efx->type->handle_global_event(channel, &event))
  1128. break;
  1129. /* else fall through */
  1130. default:
  1131. netif_err(channel->efx, hw, channel->efx->net_dev,
  1132. "channel %d unknown event type %d (data "
  1133. EFX_QWORD_FMT ")\n", channel->channel,
  1134. ev_code, EFX_QWORD_VAL(event));
  1135. }
  1136. }
  1137. out:
  1138. channel->eventq_read_ptr = read_ptr;
  1139. return spent;
  1140. }
  1141. /* Allocate buffer table entries for event queue */
  1142. int efx_farch_ev_probe(struct efx_channel *channel)
  1143. {
  1144. struct efx_nic *efx = channel->efx;
  1145. unsigned entries;
  1146. entries = channel->eventq_mask + 1;
  1147. return efx_alloc_special_buffer(efx, &channel->eventq,
  1148. entries * sizeof(efx_qword_t));
  1149. }
  1150. void efx_farch_ev_init(struct efx_channel *channel)
  1151. {
  1152. efx_oword_t reg;
  1153. struct efx_nic *efx = channel->efx;
  1154. netif_dbg(efx, hw, efx->net_dev,
  1155. "channel %d event queue in special buffers %d-%d\n",
  1156. channel->channel, channel->eventq.index,
  1157. channel->eventq.index + channel->eventq.entries - 1);
  1158. if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
  1159. EFX_POPULATE_OWORD_3(reg,
  1160. FRF_CZ_TIMER_Q_EN, 1,
  1161. FRF_CZ_HOST_NOTIFY_MODE, 0,
  1162. FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
  1163. efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
  1164. }
  1165. /* Pin event queue buffer */
  1166. efx_init_special_buffer(efx, &channel->eventq);
  1167. /* Fill event queue with all ones (i.e. empty events) */
  1168. memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len);
  1169. /* Push event queue to card */
  1170. EFX_POPULATE_OWORD_3(reg,
  1171. FRF_AZ_EVQ_EN, 1,
  1172. FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries),
  1173. FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index);
  1174. efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
  1175. channel->channel);
  1176. efx->type->push_irq_moderation(channel);
  1177. }
  1178. void efx_farch_ev_fini(struct efx_channel *channel)
  1179. {
  1180. efx_oword_t reg;
  1181. struct efx_nic *efx = channel->efx;
  1182. /* Remove event queue from card */
  1183. EFX_ZERO_OWORD(reg);
  1184. efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
  1185. channel->channel);
  1186. if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
  1187. efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
  1188. /* Unpin event queue */
  1189. efx_fini_special_buffer(efx, &channel->eventq);
  1190. }
  1191. /* Free buffers backing event queue */
  1192. void efx_farch_ev_remove(struct efx_channel *channel)
  1193. {
  1194. efx_free_special_buffer(channel->efx, &channel->eventq);
  1195. }
  1196. void efx_farch_ev_test_generate(struct efx_channel *channel)
  1197. {
  1198. efx_farch_magic_event(channel, EFX_CHANNEL_MAGIC_TEST(channel));
  1199. }
  1200. void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue)
  1201. {
  1202. efx_farch_magic_event(efx_rx_queue_channel(rx_queue),
  1203. EFX_CHANNEL_MAGIC_FILL(rx_queue));
  1204. }
  1205. /**************************************************************************
  1206. *
  1207. * Hardware interrupts
  1208. * The hardware interrupt handler does very little work; all the event
  1209. * queue processing is carried out by per-channel tasklets.
  1210. *
  1211. **************************************************************************/
  1212. /* Enable/disable/generate interrupts */
  1213. static inline void efx_farch_interrupts(struct efx_nic *efx,
  1214. bool enabled, bool force)
  1215. {
  1216. efx_oword_t int_en_reg_ker;
  1217. EFX_POPULATE_OWORD_3(int_en_reg_ker,
  1218. FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level,
  1219. FRF_AZ_KER_INT_KER, force,
  1220. FRF_AZ_DRV_INT_EN_KER, enabled);
  1221. efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
  1222. }
  1223. void efx_farch_irq_enable_master(struct efx_nic *efx)
  1224. {
  1225. EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr));
  1226. wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
  1227. efx_farch_interrupts(efx, true, false);
  1228. }
  1229. void efx_farch_irq_disable_master(struct efx_nic *efx)
  1230. {
  1231. /* Disable interrupts */
  1232. efx_farch_interrupts(efx, false, false);
  1233. }
  1234. /* Generate a test interrupt
  1235. * Interrupt must already have been enabled, otherwise nasty things
  1236. * may happen.
  1237. */
  1238. void efx_farch_irq_test_generate(struct efx_nic *efx)
  1239. {
  1240. efx_farch_interrupts(efx, true, true);
  1241. }
  1242. /* Process a fatal interrupt
  1243. * Disable bus mastering ASAP and schedule a reset
  1244. */
  1245. irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx)
  1246. {
  1247. struct falcon_nic_data *nic_data = efx->nic_data;
  1248. efx_oword_t *int_ker = efx->irq_status.addr;
  1249. efx_oword_t fatal_intr;
  1250. int error, mem_perr;
  1251. efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER);
  1252. error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR);
  1253. netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status "
  1254. EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
  1255. EFX_OWORD_VAL(fatal_intr),
  1256. error ? "disabling bus mastering" : "no recognised error");
  1257. /* If this is a memory parity error dump which blocks are offending */
  1258. mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) ||
  1259. EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER));
  1260. if (mem_perr) {
  1261. efx_oword_t reg;
  1262. efx_reado(efx, &reg, FR_AZ_MEM_STAT);
  1263. netif_err(efx, hw, efx->net_dev,
  1264. "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n",
  1265. EFX_OWORD_VAL(reg));
  1266. }
  1267. /* Disable both devices */
  1268. pci_clear_master(efx->pci_dev);
  1269. if (efx_nic_is_dual_func(efx))
  1270. pci_clear_master(nic_data->pci_dev2);
  1271. efx_farch_irq_disable_master(efx);
  1272. /* Count errors and reset or disable the NIC accordingly */
  1273. if (efx->int_error_count == 0 ||
  1274. time_after(jiffies, efx->int_error_expire)) {
  1275. efx->int_error_count = 0;
  1276. efx->int_error_expire =
  1277. jiffies + EFX_INT_ERROR_EXPIRE * HZ;
  1278. }
  1279. if (++efx->int_error_count < EFX_MAX_INT_ERRORS) {
  1280. netif_err(efx, hw, efx->net_dev,
  1281. "SYSTEM ERROR - reset scheduled\n");
  1282. efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
  1283. } else {
  1284. netif_err(efx, hw, efx->net_dev,
  1285. "SYSTEM ERROR - max number of errors seen."
  1286. "NIC will be disabled\n");
  1287. efx_schedule_reset(efx, RESET_TYPE_DISABLE);
  1288. }
  1289. return IRQ_HANDLED;
  1290. }
  1291. /* Handle a legacy interrupt
  1292. * Acknowledges the interrupt and schedule event queue processing.
  1293. */
  1294. irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id)
  1295. {
  1296. struct efx_nic *efx = dev_id;
  1297. bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled);
  1298. efx_oword_t *int_ker = efx->irq_status.addr;
  1299. irqreturn_t result = IRQ_NONE;
  1300. struct efx_channel *channel;
  1301. efx_dword_t reg;
  1302. u32 queues;
  1303. int syserr;
  1304. /* Read the ISR which also ACKs the interrupts */
  1305. efx_readd(efx, &reg, FR_BZ_INT_ISR0);
  1306. queues = EFX_EXTRACT_DWORD(reg, 0, 31);
  1307. /* Legacy interrupts are disabled too late by the EEH kernel
  1308. * code. Disable them earlier.
  1309. * If an EEH error occurred, the read will have returned all ones.
  1310. */
  1311. if (EFX_DWORD_IS_ALL_ONES(reg) && efx_try_recovery(efx) &&
  1312. !efx->eeh_disabled_legacy_irq) {
  1313. disable_irq_nosync(efx->legacy_irq);
  1314. efx->eeh_disabled_legacy_irq = true;
  1315. }
  1316. /* Handle non-event-queue sources */
  1317. if (queues & (1U << efx->irq_level) && soft_enabled) {
  1318. syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
  1319. if (unlikely(syserr))
  1320. return efx_farch_fatal_interrupt(efx);
  1321. efx->last_irq_cpu = raw_smp_processor_id();
  1322. }
  1323. if (queues != 0) {
  1324. efx->irq_zero_count = 0;
  1325. /* Schedule processing of any interrupting queues */
  1326. if (likely(soft_enabled)) {
  1327. efx_for_each_channel(channel, efx) {
  1328. if (queues & 1)
  1329. efx_schedule_channel_irq(channel);
  1330. queues >>= 1;
  1331. }
  1332. }
  1333. result = IRQ_HANDLED;
  1334. } else {
  1335. efx_qword_t *event;
  1336. /* Legacy ISR read can return zero once (SF bug 15783) */
  1337. /* We can't return IRQ_HANDLED more than once on seeing ISR=0
  1338. * because this might be a shared interrupt. */
  1339. if (efx->irq_zero_count++ == 0)
  1340. result = IRQ_HANDLED;
  1341. /* Ensure we schedule or rearm all event queues */
  1342. if (likely(soft_enabled)) {
  1343. efx_for_each_channel(channel, efx) {
  1344. event = efx_event(channel,
  1345. channel->eventq_read_ptr);
  1346. if (efx_event_present(event))
  1347. efx_schedule_channel_irq(channel);
  1348. else
  1349. efx_farch_ev_read_ack(channel);
  1350. }
  1351. }
  1352. }
  1353. if (result == IRQ_HANDLED)
  1354. netif_vdbg(efx, intr, efx->net_dev,
  1355. "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
  1356. irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
  1357. return result;
  1358. }
  1359. /* Handle an MSI interrupt
  1360. *
  1361. * Handle an MSI hardware interrupt. This routine schedules event
  1362. * queue processing. No interrupt acknowledgement cycle is necessary.
  1363. * Also, we never need to check that the interrupt is for us, since
  1364. * MSI interrupts cannot be shared.
  1365. */
  1366. irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id)
  1367. {
  1368. struct efx_msi_context *context = dev_id;
  1369. struct efx_nic *efx = context->efx;
  1370. efx_oword_t *int_ker = efx->irq_status.addr;
  1371. int syserr;
  1372. netif_vdbg(efx, intr, efx->net_dev,
  1373. "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
  1374. irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
  1375. if (!likely(ACCESS_ONCE(efx->irq_soft_enabled)))
  1376. return IRQ_HANDLED;
  1377. /* Handle non-event-queue sources */
  1378. if (context->index == efx->irq_level) {
  1379. syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
  1380. if (unlikely(syserr))
  1381. return efx_farch_fatal_interrupt(efx);
  1382. efx->last_irq_cpu = raw_smp_processor_id();
  1383. }
  1384. /* Schedule processing of the channel */
  1385. efx_schedule_channel_irq(efx->channel[context->index]);
  1386. return IRQ_HANDLED;
  1387. }
  1388. /* Setup RSS indirection table.
  1389. * This maps from the hash value of the packet to RXQ
  1390. */
  1391. void efx_farch_rx_push_indir_table(struct efx_nic *efx)
  1392. {
  1393. size_t i = 0;
  1394. efx_dword_t dword;
  1395. if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
  1396. return;
  1397. BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
  1398. FR_BZ_RX_INDIRECTION_TBL_ROWS);
  1399. for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
  1400. EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
  1401. efx->rx_indir_table[i]);
  1402. efx_writed(efx, &dword,
  1403. FR_BZ_RX_INDIRECTION_TBL +
  1404. FR_BZ_RX_INDIRECTION_TBL_STEP * i);
  1405. }
  1406. }
  1407. /* Looks at available SRAM resources and works out how many queues we
  1408. * can support, and where things like descriptor caches should live.
  1409. *
  1410. * SRAM is split up as follows:
  1411. * 0 buftbl entries for channels
  1412. * efx->vf_buftbl_base buftbl entries for SR-IOV
  1413. * efx->rx_dc_base RX descriptor caches
  1414. * efx->tx_dc_base TX descriptor caches
  1415. */
  1416. void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw)
  1417. {
  1418. unsigned vi_count, buftbl_min;
  1419. /* Account for the buffer table entries backing the datapath channels
  1420. * and the descriptor caches for those channels.
  1421. */
  1422. buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE +
  1423. efx->n_tx_channels * EFX_TXQ_TYPES * EFX_MAX_DMAQ_SIZE +
  1424. efx->n_channels * EFX_MAX_EVQ_SIZE)
  1425. * sizeof(efx_qword_t) / EFX_BUF_SIZE);
  1426. vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
  1427. #ifdef CONFIG_SFC_SRIOV
  1428. if (efx_sriov_wanted(efx)) {
  1429. unsigned vi_dc_entries, buftbl_free, entries_per_vf, vf_limit;
  1430. efx->vf_buftbl_base = buftbl_min;
  1431. vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES;
  1432. vi_count = max(vi_count, EFX_VI_BASE);
  1433. buftbl_free = (sram_lim_qw - buftbl_min -
  1434. vi_count * vi_dc_entries);
  1435. entries_per_vf = ((vi_dc_entries + EFX_VF_BUFTBL_PER_VI) *
  1436. efx_vf_size(efx));
  1437. vf_limit = min(buftbl_free / entries_per_vf,
  1438. (1024U - EFX_VI_BASE) >> efx->vi_scale);
  1439. if (efx->vf_count > vf_limit) {
  1440. netif_err(efx, probe, efx->net_dev,
  1441. "Reducing VF count from from %d to %d\n",
  1442. efx->vf_count, vf_limit);
  1443. efx->vf_count = vf_limit;
  1444. }
  1445. vi_count += efx->vf_count * efx_vf_size(efx);
  1446. }
  1447. #endif
  1448. efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES;
  1449. efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES;
  1450. }
  1451. u32 efx_farch_fpga_ver(struct efx_nic *efx)
  1452. {
  1453. efx_oword_t altera_build;
  1454. efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD);
  1455. return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER);
  1456. }
  1457. void efx_farch_init_common(struct efx_nic *efx)
  1458. {
  1459. efx_oword_t temp;
  1460. /* Set positions of descriptor caches in SRAM. */
  1461. EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base);
  1462. efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG);
  1463. EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base);
  1464. efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG);
  1465. /* Set TX descriptor cache size. */
  1466. BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER));
  1467. EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
  1468. efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG);
  1469. /* Set RX descriptor cache size. Set low watermark to size-8, as
  1470. * this allows most efficient prefetching.
  1471. */
  1472. BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER));
  1473. EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
  1474. efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG);
  1475. EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
  1476. efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM);
  1477. /* Program INT_KER address */
  1478. EFX_POPULATE_OWORD_2(temp,
  1479. FRF_AZ_NORM_INT_VEC_DIS_KER,
  1480. EFX_INT_MODE_USE_MSI(efx),
  1481. FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
  1482. efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER);
  1483. if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx))
  1484. /* Use an interrupt level unused by event queues */
  1485. efx->irq_level = 0x1f;
  1486. else
  1487. /* Use a valid MSI-X vector */
  1488. efx->irq_level = 0;
  1489. /* Enable all the genuinely fatal interrupts. (They are still
  1490. * masked by the overall interrupt mask, controlled by
  1491. * falcon_interrupts()).
  1492. *
  1493. * Note: All other fatal interrupts are enabled
  1494. */
  1495. EFX_POPULATE_OWORD_3(temp,
  1496. FRF_AZ_ILL_ADR_INT_KER_EN, 1,
  1497. FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
  1498. FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
  1499. if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
  1500. EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1);
  1501. EFX_INVERT_OWORD(temp);
  1502. efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
  1503. efx_farch_rx_push_indir_table(efx);
  1504. /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
  1505. * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
  1506. */
  1507. efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
  1508. EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
  1509. EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
  1510. EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
  1511. EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1);
  1512. EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
  1513. /* Enable SW_EV to inherit in char driver - assume harmless here */
  1514. EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
  1515. /* Prefetch threshold 2 => fetch when descriptor cache half empty */
  1516. EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
  1517. /* Disable hardware watchdog which can misfire */
  1518. EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
  1519. /* Squash TX of packets of 16 bytes or less */
  1520. if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
  1521. EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
  1522. efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
  1523. if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
  1524. EFX_POPULATE_OWORD_4(temp,
  1525. /* Default values */
  1526. FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
  1527. FRF_BZ_TX_PACE_SB_AF, 0xb,
  1528. FRF_BZ_TX_PACE_FB_BASE, 0,
  1529. /* Allow large pace values in the
  1530. * fast bin. */
  1531. FRF_BZ_TX_PACE_BIN_TH,
  1532. FFE_BZ_TX_PACE_RESERVED);
  1533. efx_writeo(efx, &temp, FR_BZ_TX_PACE);
  1534. }
  1535. }
  1536. /**************************************************************************
  1537. *
  1538. * Filter tables
  1539. *
  1540. **************************************************************************
  1541. */
  1542. /* "Fudge factors" - difference between programmed value and actual depth.
  1543. * Due to pipelined implementation we need to program H/W with a value that
  1544. * is larger than the hop limit we want.
  1545. */
  1546. #define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD 3
  1547. #define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL 1
  1548. /* Hard maximum search limit. Hardware will time-out beyond 200-something.
  1549. * We also need to avoid infinite loops in efx_farch_filter_search() when the
  1550. * table is full.
  1551. */
  1552. #define EFX_FARCH_FILTER_CTL_SRCH_MAX 200
  1553. /* Don't try very hard to find space for performance hints, as this is
  1554. * counter-productive. */
  1555. #define EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX 5
  1556. enum efx_farch_filter_type {
  1557. EFX_FARCH_FILTER_TCP_FULL = 0,
  1558. EFX_FARCH_FILTER_TCP_WILD,
  1559. EFX_FARCH_FILTER_UDP_FULL,
  1560. EFX_FARCH_FILTER_UDP_WILD,
  1561. EFX_FARCH_FILTER_MAC_FULL = 4,
  1562. EFX_FARCH_FILTER_MAC_WILD,
  1563. EFX_FARCH_FILTER_UC_DEF = 8,
  1564. EFX_FARCH_FILTER_MC_DEF,
  1565. EFX_FARCH_FILTER_TYPE_COUNT, /* number of specific types */
  1566. };
  1567. enum efx_farch_filter_table_id {
  1568. EFX_FARCH_FILTER_TABLE_RX_IP = 0,
  1569. EFX_FARCH_FILTER_TABLE_RX_MAC,
  1570. EFX_FARCH_FILTER_TABLE_RX_DEF,
  1571. EFX_FARCH_FILTER_TABLE_TX_MAC,
  1572. EFX_FARCH_FILTER_TABLE_COUNT,
  1573. };
  1574. enum efx_farch_filter_index {
  1575. EFX_FARCH_FILTER_INDEX_UC_DEF,
  1576. EFX_FARCH_FILTER_INDEX_MC_DEF,
  1577. EFX_FARCH_FILTER_SIZE_RX_DEF,
  1578. };
  1579. struct efx_farch_filter_spec {
  1580. u8 type:4;
  1581. u8 priority:4;
  1582. u8 flags;
  1583. u16 dmaq_id;
  1584. u32 data[3];
  1585. };
  1586. struct efx_farch_filter_table {
  1587. enum efx_farch_filter_table_id id;
  1588. u32 offset; /* address of table relative to BAR */
  1589. unsigned size; /* number of entries */
  1590. unsigned step; /* step between entries */
  1591. unsigned used; /* number currently used */
  1592. unsigned long *used_bitmap;
  1593. struct efx_farch_filter_spec *spec;
  1594. unsigned search_limit[EFX_FARCH_FILTER_TYPE_COUNT];
  1595. };
  1596. struct efx_farch_filter_state {
  1597. struct efx_farch_filter_table table[EFX_FARCH_FILTER_TABLE_COUNT];
  1598. };
  1599. static void
  1600. efx_farch_filter_table_clear_entry(struct efx_nic *efx,
  1601. struct efx_farch_filter_table *table,
  1602. unsigned int filter_idx);
  1603. /* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
  1604. * key derived from the n-tuple. The initial LFSR state is 0xffff. */
  1605. static u16 efx_farch_filter_hash(u32 key)
  1606. {
  1607. u16 tmp;
  1608. /* First 16 rounds */
  1609. tmp = 0x1fff ^ key >> 16;
  1610. tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
  1611. tmp = tmp ^ tmp >> 9;
  1612. /* Last 16 rounds */
  1613. tmp = tmp ^ tmp << 13 ^ key;
  1614. tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
  1615. return tmp ^ tmp >> 9;
  1616. }
  1617. /* To allow for hash collisions, filter search continues at these
  1618. * increments from the first possible entry selected by the hash. */
  1619. static u16 efx_farch_filter_increment(u32 key)
  1620. {
  1621. return key * 2 - 1;
  1622. }
  1623. static enum efx_farch_filter_table_id
  1624. efx_farch_filter_spec_table_id(const struct efx_farch_filter_spec *spec)
  1625. {
  1626. BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
  1627. (EFX_FARCH_FILTER_TCP_FULL >> 2));
  1628. BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
  1629. (EFX_FARCH_FILTER_TCP_WILD >> 2));
  1630. BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
  1631. (EFX_FARCH_FILTER_UDP_FULL >> 2));
  1632. BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
  1633. (EFX_FARCH_FILTER_UDP_WILD >> 2));
  1634. BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC !=
  1635. (EFX_FARCH_FILTER_MAC_FULL >> 2));
  1636. BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC !=
  1637. (EFX_FARCH_FILTER_MAC_WILD >> 2));
  1638. BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_TX_MAC !=
  1639. EFX_FARCH_FILTER_TABLE_RX_MAC + 2);
  1640. return (spec->type >> 2) + ((spec->flags & EFX_FILTER_FLAG_TX) ? 2 : 0);
  1641. }
  1642. static void efx_farch_filter_push_rx_config(struct efx_nic *efx)
  1643. {
  1644. struct efx_farch_filter_state *state = efx->filter_state;
  1645. struct efx_farch_filter_table *table;
  1646. efx_oword_t filter_ctl;
  1647. efx_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
  1648. table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
  1649. EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT,
  1650. table->search_limit[EFX_FARCH_FILTER_TCP_FULL] +
  1651. EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
  1652. EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT,
  1653. table->search_limit[EFX_FARCH_FILTER_TCP_WILD] +
  1654. EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
  1655. EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT,
  1656. table->search_limit[EFX_FARCH_FILTER_UDP_FULL] +
  1657. EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
  1658. EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT,
  1659. table->search_limit[EFX_FARCH_FILTER_UDP_WILD] +
  1660. EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
  1661. table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC];
  1662. if (table->size) {
  1663. EFX_SET_OWORD_FIELD(
  1664. filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT,
  1665. table->search_limit[EFX_FARCH_FILTER_MAC_FULL] +
  1666. EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
  1667. EFX_SET_OWORD_FIELD(
  1668. filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT,
  1669. table->search_limit[EFX_FARCH_FILTER_MAC_WILD] +
  1670. EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
  1671. }
  1672. table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF];
  1673. if (table->size) {
  1674. EFX_SET_OWORD_FIELD(
  1675. filter_ctl, FRF_CZ_UNICAST_NOMATCH_Q_ID,
  1676. table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].dmaq_id);
  1677. EFX_SET_OWORD_FIELD(
  1678. filter_ctl, FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED,
  1679. !!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags &
  1680. EFX_FILTER_FLAG_RX_RSS));
  1681. EFX_SET_OWORD_FIELD(
  1682. filter_ctl, FRF_CZ_MULTICAST_NOMATCH_Q_ID,
  1683. table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].dmaq_id);
  1684. EFX_SET_OWORD_FIELD(
  1685. filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED,
  1686. !!(table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags &
  1687. EFX_FILTER_FLAG_RX_RSS));
  1688. /* There is a single bit to enable RX scatter for all
  1689. * unmatched packets. Only set it if scatter is
  1690. * enabled in both filter specs.
  1691. */
  1692. EFX_SET_OWORD_FIELD(
  1693. filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
  1694. !!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags &
  1695. table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags &
  1696. EFX_FILTER_FLAG_RX_SCATTER));
  1697. } else if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
  1698. /* We don't expose 'default' filters because unmatched
  1699. * packets always go to the queue number found in the
  1700. * RSS table. But we still need to set the RX scatter
  1701. * bit here.
  1702. */
  1703. EFX_SET_OWORD_FIELD(
  1704. filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
  1705. efx->rx_scatter);
  1706. }
  1707. efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
  1708. }
  1709. static void efx_farch_filter_push_tx_limits(struct efx_nic *efx)
  1710. {
  1711. struct efx_farch_filter_state *state = efx->filter_state;
  1712. struct efx_farch_filter_table *table;
  1713. efx_oword_t tx_cfg;
  1714. efx_reado(efx, &tx_cfg, FR_AZ_TX_CFG);
  1715. table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC];
  1716. if (table->size) {
  1717. EFX_SET_OWORD_FIELD(
  1718. tx_cfg, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE,
  1719. table->search_limit[EFX_FARCH_FILTER_MAC_FULL] +
  1720. EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
  1721. EFX_SET_OWORD_FIELD(
  1722. tx_cfg, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE,
  1723. table->search_limit[EFX_FARCH_FILTER_MAC_WILD] +
  1724. EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
  1725. }
  1726. efx_writeo(efx, &tx_cfg, FR_AZ_TX_CFG);
  1727. }
  1728. static int
  1729. efx_farch_filter_from_gen_spec(struct efx_farch_filter_spec *spec,
  1730. const struct efx_filter_spec *gen_spec)
  1731. {
  1732. bool is_full = false;
  1733. if ((gen_spec->flags & EFX_FILTER_FLAG_RX_RSS) &&
  1734. gen_spec->rss_context != EFX_FILTER_RSS_CONTEXT_DEFAULT)
  1735. return -EINVAL;
  1736. spec->priority = gen_spec->priority;
  1737. spec->flags = gen_spec->flags;
  1738. spec->dmaq_id = gen_spec->dmaq_id;
  1739. switch (gen_spec->match_flags) {
  1740. case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
  1741. EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
  1742. EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT):
  1743. is_full = true;
  1744. /* fall through */
  1745. case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
  1746. EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT): {
  1747. __be32 rhost, host1, host2;
  1748. __be16 rport, port1, port2;
  1749. EFX_BUG_ON_PARANOID(!(gen_spec->flags & EFX_FILTER_FLAG_RX));
  1750. if (gen_spec->ether_type != htons(ETH_P_IP))
  1751. return -EPROTONOSUPPORT;
  1752. if (gen_spec->loc_port == 0 ||
  1753. (is_full && gen_spec->rem_port == 0))
  1754. return -EADDRNOTAVAIL;
  1755. switch (gen_spec->ip_proto) {
  1756. case IPPROTO_TCP:
  1757. spec->type = (is_full ? EFX_FARCH_FILTER_TCP_FULL :
  1758. EFX_FARCH_FILTER_TCP_WILD);
  1759. break;
  1760. case IPPROTO_UDP:
  1761. spec->type = (is_full ? EFX_FARCH_FILTER_UDP_FULL :
  1762. EFX_FARCH_FILTER_UDP_WILD);
  1763. break;
  1764. default:
  1765. return -EPROTONOSUPPORT;
  1766. }
  1767. /* Filter is constructed in terms of source and destination,
  1768. * with the odd wrinkle that the ports are swapped in a UDP
  1769. * wildcard filter. We need to convert from local and remote
  1770. * (= zero for wildcard) addresses.
  1771. */
  1772. rhost = is_full ? gen_spec->rem_host[0] : 0;
  1773. rport = is_full ? gen_spec->rem_port : 0;
  1774. host1 = rhost;
  1775. host2 = gen_spec->loc_host[0];
  1776. if (!is_full && gen_spec->ip_proto == IPPROTO_UDP) {
  1777. port1 = gen_spec->loc_port;
  1778. port2 = rport;
  1779. } else {
  1780. port1 = rport;
  1781. port2 = gen_spec->loc_port;
  1782. }
  1783. spec->data[0] = ntohl(host1) << 16 | ntohs(port1);
  1784. spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16;
  1785. spec->data[2] = ntohl(host2);
  1786. break;
  1787. }
  1788. case EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_OUTER_VID:
  1789. is_full = true;
  1790. /* fall through */
  1791. case EFX_FILTER_MATCH_LOC_MAC:
  1792. spec->type = (is_full ? EFX_FARCH_FILTER_MAC_FULL :
  1793. EFX_FARCH_FILTER_MAC_WILD);
  1794. spec->data[0] = is_full ? ntohs(gen_spec->outer_vid) : 0;
  1795. spec->data[1] = (gen_spec->loc_mac[2] << 24 |
  1796. gen_spec->loc_mac[3] << 16 |
  1797. gen_spec->loc_mac[4] << 8 |
  1798. gen_spec->loc_mac[5]);
  1799. spec->data[2] = (gen_spec->loc_mac[0] << 8 |
  1800. gen_spec->loc_mac[1]);
  1801. break;
  1802. case EFX_FILTER_MATCH_LOC_MAC_IG:
  1803. spec->type = (is_multicast_ether_addr(gen_spec->loc_mac) ?
  1804. EFX_FARCH_FILTER_MC_DEF :
  1805. EFX_FARCH_FILTER_UC_DEF);
  1806. memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */
  1807. break;
  1808. default:
  1809. return -EPROTONOSUPPORT;
  1810. }
  1811. return 0;
  1812. }
  1813. static void
  1814. efx_farch_filter_to_gen_spec(struct efx_filter_spec *gen_spec,
  1815. const struct efx_farch_filter_spec *spec)
  1816. {
  1817. bool is_full = false;
  1818. /* *gen_spec should be completely initialised, to be consistent
  1819. * with efx_filter_init_{rx,tx}() and in case we want to copy
  1820. * it back to userland.
  1821. */
  1822. memset(gen_spec, 0, sizeof(*gen_spec));
  1823. gen_spec->priority = spec->priority;
  1824. gen_spec->flags = spec->flags;
  1825. gen_spec->dmaq_id = spec->dmaq_id;
  1826. switch (spec->type) {
  1827. case EFX_FARCH_FILTER_TCP_FULL:
  1828. case EFX_FARCH_FILTER_UDP_FULL:
  1829. is_full = true;
  1830. /* fall through */
  1831. case EFX_FARCH_FILTER_TCP_WILD:
  1832. case EFX_FARCH_FILTER_UDP_WILD: {
  1833. __be32 host1, host2;
  1834. __be16 port1, port2;
  1835. gen_spec->match_flags =
  1836. EFX_FILTER_MATCH_ETHER_TYPE |
  1837. EFX_FILTER_MATCH_IP_PROTO |
  1838. EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT;
  1839. if (is_full)
  1840. gen_spec->match_flags |= (EFX_FILTER_MATCH_REM_HOST |
  1841. EFX_FILTER_MATCH_REM_PORT);
  1842. gen_spec->ether_type = htons(ETH_P_IP);
  1843. gen_spec->ip_proto =
  1844. (spec->type == EFX_FARCH_FILTER_TCP_FULL ||
  1845. spec->type == EFX_FARCH_FILTER_TCP_WILD) ?
  1846. IPPROTO_TCP : IPPROTO_UDP;
  1847. host1 = htonl(spec->data[0] >> 16 | spec->data[1] << 16);
  1848. port1 = htons(spec->data[0]);
  1849. host2 = htonl(spec->data[2]);
  1850. port2 = htons(spec->data[1] >> 16);
  1851. if (spec->flags & EFX_FILTER_FLAG_TX) {
  1852. gen_spec->loc_host[0] = host1;
  1853. gen_spec->rem_host[0] = host2;
  1854. } else {
  1855. gen_spec->loc_host[0] = host2;
  1856. gen_spec->rem_host[0] = host1;
  1857. }
  1858. if (!!(gen_spec->flags & EFX_FILTER_FLAG_TX) ^
  1859. (!is_full && gen_spec->ip_proto == IPPROTO_UDP)) {
  1860. gen_spec->loc_port = port1;
  1861. gen_spec->rem_port = port2;
  1862. } else {
  1863. gen_spec->loc_port = port2;
  1864. gen_spec->rem_port = port1;
  1865. }
  1866. break;
  1867. }
  1868. case EFX_FARCH_FILTER_MAC_FULL:
  1869. is_full = true;
  1870. /* fall through */
  1871. case EFX_FARCH_FILTER_MAC_WILD:
  1872. gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC;
  1873. if (is_full)
  1874. gen_spec->match_flags |= EFX_FILTER_MATCH_OUTER_VID;
  1875. gen_spec->loc_mac[0] = spec->data[2] >> 8;
  1876. gen_spec->loc_mac[1] = spec->data[2];
  1877. gen_spec->loc_mac[2] = spec->data[1] >> 24;
  1878. gen_spec->loc_mac[3] = spec->data[1] >> 16;
  1879. gen_spec->loc_mac[4] = spec->data[1] >> 8;
  1880. gen_spec->loc_mac[5] = spec->data[1];
  1881. gen_spec->outer_vid = htons(spec->data[0]);
  1882. break;
  1883. case EFX_FARCH_FILTER_UC_DEF:
  1884. case EFX_FARCH_FILTER_MC_DEF:
  1885. gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC_IG;
  1886. gen_spec->loc_mac[0] = spec->type == EFX_FARCH_FILTER_MC_DEF;
  1887. break;
  1888. default:
  1889. WARN_ON(1);
  1890. break;
  1891. }
  1892. }
  1893. static void
  1894. efx_farch_filter_init_rx_for_stack(struct efx_nic *efx,
  1895. struct efx_farch_filter_spec *spec)
  1896. {
  1897. /* If there's only one channel then disable RSS for non VF
  1898. * traffic, thereby allowing VFs to use RSS when the PF can't.
  1899. */
  1900. spec->priority = EFX_FILTER_PRI_REQUIRED;
  1901. spec->flags = (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_STACK |
  1902. (efx->n_rx_channels > 1 ? EFX_FILTER_FLAG_RX_RSS : 0) |
  1903. (efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0));
  1904. spec->dmaq_id = 0;
  1905. }
  1906. /* Build a filter entry and return its n-tuple key. */
  1907. static u32 efx_farch_filter_build(efx_oword_t *filter,
  1908. struct efx_farch_filter_spec *spec)
  1909. {
  1910. u32 data3;
  1911. switch (efx_farch_filter_spec_table_id(spec)) {
  1912. case EFX_FARCH_FILTER_TABLE_RX_IP: {
  1913. bool is_udp = (spec->type == EFX_FARCH_FILTER_UDP_FULL ||
  1914. spec->type == EFX_FARCH_FILTER_UDP_WILD);
  1915. EFX_POPULATE_OWORD_7(
  1916. *filter,
  1917. FRF_BZ_RSS_EN,
  1918. !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
  1919. FRF_BZ_SCATTER_EN,
  1920. !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
  1921. FRF_BZ_TCP_UDP, is_udp,
  1922. FRF_BZ_RXQ_ID, spec->dmaq_id,
  1923. EFX_DWORD_2, spec->data[2],
  1924. EFX_DWORD_1, spec->data[1],
  1925. EFX_DWORD_0, spec->data[0]);
  1926. data3 = is_udp;
  1927. break;
  1928. }
  1929. case EFX_FARCH_FILTER_TABLE_RX_MAC: {
  1930. bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD;
  1931. EFX_POPULATE_OWORD_7(
  1932. *filter,
  1933. FRF_CZ_RMFT_RSS_EN,
  1934. !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
  1935. FRF_CZ_RMFT_SCATTER_EN,
  1936. !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
  1937. FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id,
  1938. FRF_CZ_RMFT_WILDCARD_MATCH, is_wild,
  1939. FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2],
  1940. FRF_CZ_RMFT_DEST_MAC_LO, spec->data[1],
  1941. FRF_CZ_RMFT_VLAN_ID, spec->data[0]);
  1942. data3 = is_wild;
  1943. break;
  1944. }
  1945. case EFX_FARCH_FILTER_TABLE_TX_MAC: {
  1946. bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD;
  1947. EFX_POPULATE_OWORD_5(*filter,
  1948. FRF_CZ_TMFT_TXQ_ID, spec->dmaq_id,
  1949. FRF_CZ_TMFT_WILDCARD_MATCH, is_wild,
  1950. FRF_CZ_TMFT_SRC_MAC_HI, spec->data[2],
  1951. FRF_CZ_TMFT_SRC_MAC_LO, spec->data[1],
  1952. FRF_CZ_TMFT_VLAN_ID, spec->data[0]);
  1953. data3 = is_wild | spec->dmaq_id << 1;
  1954. break;
  1955. }
  1956. default:
  1957. BUG();
  1958. }
  1959. return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3;
  1960. }
  1961. static bool efx_farch_filter_equal(const struct efx_farch_filter_spec *left,
  1962. const struct efx_farch_filter_spec *right)
  1963. {
  1964. if (left->type != right->type ||
  1965. memcmp(left->data, right->data, sizeof(left->data)))
  1966. return false;
  1967. if (left->flags & EFX_FILTER_FLAG_TX &&
  1968. left->dmaq_id != right->dmaq_id)
  1969. return false;
  1970. return true;
  1971. }
  1972. /*
  1973. * Construct/deconstruct external filter IDs. At least the RX filter
  1974. * IDs must be ordered by matching priority, for RX NFC semantics.
  1975. *
  1976. * Deconstruction needs to be robust against invalid IDs so that
  1977. * efx_filter_remove_id_safe() and efx_filter_get_filter_safe() can
  1978. * accept user-provided IDs.
  1979. */
  1980. #define EFX_FARCH_FILTER_MATCH_PRI_COUNT 5
  1981. static const u8 efx_farch_filter_type_match_pri[EFX_FARCH_FILTER_TYPE_COUNT] = {
  1982. [EFX_FARCH_FILTER_TCP_FULL] = 0,
  1983. [EFX_FARCH_FILTER_UDP_FULL] = 0,
  1984. [EFX_FARCH_FILTER_TCP_WILD] = 1,
  1985. [EFX_FARCH_FILTER_UDP_WILD] = 1,
  1986. [EFX_FARCH_FILTER_MAC_FULL] = 2,
  1987. [EFX_FARCH_FILTER_MAC_WILD] = 3,
  1988. [EFX_FARCH_FILTER_UC_DEF] = 4,
  1989. [EFX_FARCH_FILTER_MC_DEF] = 4,
  1990. };
  1991. static const enum efx_farch_filter_table_id efx_farch_filter_range_table[] = {
  1992. EFX_FARCH_FILTER_TABLE_RX_IP, /* RX match pri 0 */
  1993. EFX_FARCH_FILTER_TABLE_RX_IP,
  1994. EFX_FARCH_FILTER_TABLE_RX_MAC,
  1995. EFX_FARCH_FILTER_TABLE_RX_MAC,
  1996. EFX_FARCH_FILTER_TABLE_RX_DEF, /* RX match pri 4 */
  1997. EFX_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 0 */
  1998. EFX_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 1 */
  1999. };
  2000. #define EFX_FARCH_FILTER_INDEX_WIDTH 13
  2001. #define EFX_FARCH_FILTER_INDEX_MASK ((1 << EFX_FARCH_FILTER_INDEX_WIDTH) - 1)
  2002. static inline u32
  2003. efx_farch_filter_make_id(const struct efx_farch_filter_spec *spec,
  2004. unsigned int index)
  2005. {
  2006. unsigned int range;
  2007. range = efx_farch_filter_type_match_pri[spec->type];
  2008. if (!(spec->flags & EFX_FILTER_FLAG_RX))
  2009. range += EFX_FARCH_FILTER_MATCH_PRI_COUNT;
  2010. return range << EFX_FARCH_FILTER_INDEX_WIDTH | index;
  2011. }
  2012. static inline enum efx_farch_filter_table_id
  2013. efx_farch_filter_id_table_id(u32 id)
  2014. {
  2015. unsigned int range = id >> EFX_FARCH_FILTER_INDEX_WIDTH;
  2016. if (range < ARRAY_SIZE(efx_farch_filter_range_table))
  2017. return efx_farch_filter_range_table[range];
  2018. else
  2019. return EFX_FARCH_FILTER_TABLE_COUNT; /* invalid */
  2020. }
  2021. static inline unsigned int efx_farch_filter_id_index(u32 id)
  2022. {
  2023. return id & EFX_FARCH_FILTER_INDEX_MASK;
  2024. }
  2025. u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx)
  2026. {
  2027. struct efx_farch_filter_state *state = efx->filter_state;
  2028. unsigned int range = EFX_FARCH_FILTER_MATCH_PRI_COUNT - 1;
  2029. enum efx_farch_filter_table_id table_id;
  2030. do {
  2031. table_id = efx_farch_filter_range_table[range];
  2032. if (state->table[table_id].size != 0)
  2033. return range << EFX_FARCH_FILTER_INDEX_WIDTH |
  2034. state->table[table_id].size;
  2035. } while (range--);
  2036. return 0;
  2037. }
  2038. s32 efx_farch_filter_insert(struct efx_nic *efx,
  2039. struct efx_filter_spec *gen_spec,
  2040. bool replace_equal)
  2041. {
  2042. struct efx_farch_filter_state *state = efx->filter_state;
  2043. struct efx_farch_filter_table *table;
  2044. struct efx_farch_filter_spec spec;
  2045. efx_oword_t filter;
  2046. int rep_index, ins_index;
  2047. unsigned int depth = 0;
  2048. int rc;
  2049. rc = efx_farch_filter_from_gen_spec(&spec, gen_spec);
  2050. if (rc)
  2051. return rc;
  2052. table = &state->table[efx_farch_filter_spec_table_id(&spec)];
  2053. if (table->size == 0)
  2054. return -EINVAL;
  2055. netif_vdbg(efx, hw, efx->net_dev,
  2056. "%s: type %d search_limit=%d", __func__, spec.type,
  2057. table->search_limit[spec.type]);
  2058. if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) {
  2059. /* One filter spec per type */
  2060. BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_UC_DEF != 0);
  2061. BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_MC_DEF !=
  2062. EFX_FARCH_FILTER_MC_DEF - EFX_FARCH_FILTER_UC_DEF);
  2063. rep_index = spec.type - EFX_FARCH_FILTER_UC_DEF;
  2064. ins_index = rep_index;
  2065. spin_lock_bh(&efx->filter_lock);
  2066. } else {
  2067. /* Search concurrently for
  2068. * (1) a filter to be replaced (rep_index): any filter
  2069. * with the same match values, up to the current
  2070. * search depth for this type, and
  2071. * (2) the insertion point (ins_index): (1) or any
  2072. * free slot before it or up to the maximum search
  2073. * depth for this priority
  2074. * We fail if we cannot find (2).
  2075. *
  2076. * We can stop once either
  2077. * (a) we find (1), in which case we have definitely
  2078. * found (2) as well; or
  2079. * (b) we have searched exhaustively for (1), and have
  2080. * either found (2) or searched exhaustively for it
  2081. */
  2082. u32 key = efx_farch_filter_build(&filter, &spec);
  2083. unsigned int hash = efx_farch_filter_hash(key);
  2084. unsigned int incr = efx_farch_filter_increment(key);
  2085. unsigned int max_rep_depth = table->search_limit[spec.type];
  2086. unsigned int max_ins_depth =
  2087. spec.priority <= EFX_FILTER_PRI_HINT ?
  2088. EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX :
  2089. EFX_FARCH_FILTER_CTL_SRCH_MAX;
  2090. unsigned int i = hash & (table->size - 1);
  2091. ins_index = -1;
  2092. depth = 1;
  2093. spin_lock_bh(&efx->filter_lock);
  2094. for (;;) {
  2095. if (!test_bit(i, table->used_bitmap)) {
  2096. if (ins_index < 0)
  2097. ins_index = i;
  2098. } else if (efx_farch_filter_equal(&spec,
  2099. &table->spec[i])) {
  2100. /* Case (a) */
  2101. if (ins_index < 0)
  2102. ins_index = i;
  2103. rep_index = i;
  2104. break;
  2105. }
  2106. if (depth >= max_rep_depth &&
  2107. (ins_index >= 0 || depth >= max_ins_depth)) {
  2108. /* Case (b) */
  2109. if (ins_index < 0) {
  2110. rc = -EBUSY;
  2111. goto out;
  2112. }
  2113. rep_index = -1;
  2114. break;
  2115. }
  2116. i = (i + incr) & (table->size - 1);
  2117. ++depth;
  2118. }
  2119. }
  2120. /* If we found a filter to be replaced, check whether we
  2121. * should do so
  2122. */
  2123. if (rep_index >= 0) {
  2124. struct efx_farch_filter_spec *saved_spec =
  2125. &table->spec[rep_index];
  2126. if (spec.priority == saved_spec->priority && !replace_equal) {
  2127. rc = -EEXIST;
  2128. goto out;
  2129. }
  2130. if (spec.priority < saved_spec->priority &&
  2131. !(saved_spec->priority == EFX_FILTER_PRI_REQUIRED &&
  2132. saved_spec->flags & EFX_FILTER_FLAG_RX_STACK)) {
  2133. rc = -EPERM;
  2134. goto out;
  2135. }
  2136. if (spec.flags & EFX_FILTER_FLAG_RX_STACK) {
  2137. /* Just make sure it won't be removed */
  2138. saved_spec->flags |= EFX_FILTER_FLAG_RX_STACK;
  2139. rc = 0;
  2140. goto out;
  2141. }
  2142. /* Retain the RX_STACK flag */
  2143. spec.flags |= saved_spec->flags & EFX_FILTER_FLAG_RX_STACK;
  2144. }
  2145. /* Insert the filter */
  2146. if (ins_index != rep_index) {
  2147. __set_bit(ins_index, table->used_bitmap);
  2148. ++table->used;
  2149. }
  2150. table->spec[ins_index] = spec;
  2151. if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) {
  2152. efx_farch_filter_push_rx_config(efx);
  2153. } else {
  2154. if (table->search_limit[spec.type] < depth) {
  2155. table->search_limit[spec.type] = depth;
  2156. if (spec.flags & EFX_FILTER_FLAG_TX)
  2157. efx_farch_filter_push_tx_limits(efx);
  2158. else
  2159. efx_farch_filter_push_rx_config(efx);
  2160. }
  2161. efx_writeo(efx, &filter,
  2162. table->offset + table->step * ins_index);
  2163. /* If we were able to replace a filter by inserting
  2164. * at a lower depth, clear the replaced filter
  2165. */
  2166. if (ins_index != rep_index && rep_index >= 0)
  2167. efx_farch_filter_table_clear_entry(efx, table,
  2168. rep_index);
  2169. }
  2170. netif_vdbg(efx, hw, efx->net_dev,
  2171. "%s: filter type %d index %d rxq %u set",
  2172. __func__, spec.type, ins_index, spec.dmaq_id);
  2173. rc = efx_farch_filter_make_id(&spec, ins_index);
  2174. out:
  2175. spin_unlock_bh(&efx->filter_lock);
  2176. return rc;
  2177. }
  2178. static void
  2179. efx_farch_filter_table_clear_entry(struct efx_nic *efx,
  2180. struct efx_farch_filter_table *table,
  2181. unsigned int filter_idx)
  2182. {
  2183. static efx_oword_t filter;
  2184. EFX_WARN_ON_PARANOID(!test_bit(filter_idx, table->used_bitmap));
  2185. BUG_ON(table->offset == 0); /* can't clear MAC default filters */
  2186. __clear_bit(filter_idx, table->used_bitmap);
  2187. --table->used;
  2188. memset(&table->spec[filter_idx], 0, sizeof(table->spec[0]));
  2189. efx_writeo(efx, &filter, table->offset + table->step * filter_idx);
  2190. /* If this filter required a greater search depth than
  2191. * any other, the search limit for its type can now be
  2192. * decreased. However, it is hard to determine that
  2193. * unless the table has become completely empty - in
  2194. * which case, all its search limits can be set to 0.
  2195. */
  2196. if (unlikely(table->used == 0)) {
  2197. memset(table->search_limit, 0, sizeof(table->search_limit));
  2198. if (table->id == EFX_FARCH_FILTER_TABLE_TX_MAC)
  2199. efx_farch_filter_push_tx_limits(efx);
  2200. else
  2201. efx_farch_filter_push_rx_config(efx);
  2202. }
  2203. }
  2204. static int efx_farch_filter_remove(struct efx_nic *efx,
  2205. struct efx_farch_filter_table *table,
  2206. unsigned int filter_idx,
  2207. enum efx_filter_priority priority)
  2208. {
  2209. struct efx_farch_filter_spec *spec = &table->spec[filter_idx];
  2210. if (!test_bit(filter_idx, table->used_bitmap) ||
  2211. spec->priority > priority)
  2212. return -ENOENT;
  2213. if (spec->flags & EFX_FILTER_FLAG_RX_STACK) {
  2214. efx_farch_filter_init_rx_for_stack(efx, spec);
  2215. efx_farch_filter_push_rx_config(efx);
  2216. } else {
  2217. efx_farch_filter_table_clear_entry(efx, table, filter_idx);
  2218. }
  2219. return 0;
  2220. }
  2221. int efx_farch_filter_remove_safe(struct efx_nic *efx,
  2222. enum efx_filter_priority priority,
  2223. u32 filter_id)
  2224. {
  2225. struct efx_farch_filter_state *state = efx->filter_state;
  2226. enum efx_farch_filter_table_id table_id;
  2227. struct efx_farch_filter_table *table;
  2228. unsigned int filter_idx;
  2229. struct efx_farch_filter_spec *spec;
  2230. int rc;
  2231. table_id = efx_farch_filter_id_table_id(filter_id);
  2232. if ((unsigned int)table_id >= EFX_FARCH_FILTER_TABLE_COUNT)
  2233. return -ENOENT;
  2234. table = &state->table[table_id];
  2235. filter_idx = efx_farch_filter_id_index(filter_id);
  2236. if (filter_idx >= table->size)
  2237. return -ENOENT;
  2238. spec = &table->spec[filter_idx];
  2239. spin_lock_bh(&efx->filter_lock);
  2240. rc = efx_farch_filter_remove(efx, table, filter_idx, priority);
  2241. spin_unlock_bh(&efx->filter_lock);
  2242. return rc;
  2243. }
  2244. int efx_farch_filter_get_safe(struct efx_nic *efx,
  2245. enum efx_filter_priority priority,
  2246. u32 filter_id, struct efx_filter_spec *spec_buf)
  2247. {
  2248. struct efx_farch_filter_state *state = efx->filter_state;
  2249. enum efx_farch_filter_table_id table_id;
  2250. struct efx_farch_filter_table *table;
  2251. struct efx_farch_filter_spec *spec;
  2252. unsigned int filter_idx;
  2253. int rc;
  2254. table_id = efx_farch_filter_id_table_id(filter_id);
  2255. if ((unsigned int)table_id >= EFX_FARCH_FILTER_TABLE_COUNT)
  2256. return -ENOENT;
  2257. table = &state->table[table_id];
  2258. filter_idx = efx_farch_filter_id_index(filter_id);
  2259. if (filter_idx >= table->size)
  2260. return -ENOENT;
  2261. spec = &table->spec[filter_idx];
  2262. spin_lock_bh(&efx->filter_lock);
  2263. if (test_bit(filter_idx, table->used_bitmap) &&
  2264. spec->priority == priority) {
  2265. efx_farch_filter_to_gen_spec(spec_buf, spec);
  2266. rc = 0;
  2267. } else {
  2268. rc = -ENOENT;
  2269. }
  2270. spin_unlock_bh(&efx->filter_lock);
  2271. return rc;
  2272. }
  2273. static void
  2274. efx_farch_filter_table_clear(struct efx_nic *efx,
  2275. enum efx_farch_filter_table_id table_id,
  2276. enum efx_filter_priority priority)
  2277. {
  2278. struct efx_farch_filter_state *state = efx->filter_state;
  2279. struct efx_farch_filter_table *table = &state->table[table_id];
  2280. unsigned int filter_idx;
  2281. spin_lock_bh(&efx->filter_lock);
  2282. for (filter_idx = 0; filter_idx < table->size; ++filter_idx)
  2283. efx_farch_filter_remove(efx, table, filter_idx, priority);
  2284. spin_unlock_bh(&efx->filter_lock);
  2285. }
  2286. void efx_farch_filter_clear_rx(struct efx_nic *efx,
  2287. enum efx_filter_priority priority)
  2288. {
  2289. efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_IP,
  2290. priority);
  2291. efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_MAC,
  2292. priority);
  2293. efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_DEF,
  2294. priority);
  2295. }
  2296. u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
  2297. enum efx_filter_priority priority)
  2298. {
  2299. struct efx_farch_filter_state *state = efx->filter_state;
  2300. enum efx_farch_filter_table_id table_id;
  2301. struct efx_farch_filter_table *table;
  2302. unsigned int filter_idx;
  2303. u32 count = 0;
  2304. spin_lock_bh(&efx->filter_lock);
  2305. for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP;
  2306. table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF;
  2307. table_id++) {
  2308. table = &state->table[table_id];
  2309. for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
  2310. if (test_bit(filter_idx, table->used_bitmap) &&
  2311. table->spec[filter_idx].priority == priority)
  2312. ++count;
  2313. }
  2314. }
  2315. spin_unlock_bh(&efx->filter_lock);
  2316. return count;
  2317. }
  2318. s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx,
  2319. enum efx_filter_priority priority,
  2320. u32 *buf, u32 size)
  2321. {
  2322. struct efx_farch_filter_state *state = efx->filter_state;
  2323. enum efx_farch_filter_table_id table_id;
  2324. struct efx_farch_filter_table *table;
  2325. unsigned int filter_idx;
  2326. s32 count = 0;
  2327. spin_lock_bh(&efx->filter_lock);
  2328. for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP;
  2329. table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF;
  2330. table_id++) {
  2331. table = &state->table[table_id];
  2332. for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
  2333. if (test_bit(filter_idx, table->used_bitmap) &&
  2334. table->spec[filter_idx].priority == priority) {
  2335. if (count == size) {
  2336. count = -EMSGSIZE;
  2337. goto out;
  2338. }
  2339. buf[count++] = efx_farch_filter_make_id(
  2340. &table->spec[filter_idx], filter_idx);
  2341. }
  2342. }
  2343. }
  2344. out:
  2345. spin_unlock_bh(&efx->filter_lock);
  2346. return count;
  2347. }
  2348. /* Restore filter stater after reset */
  2349. void efx_farch_filter_table_restore(struct efx_nic *efx)
  2350. {
  2351. struct efx_farch_filter_state *state = efx->filter_state;
  2352. enum efx_farch_filter_table_id table_id;
  2353. struct efx_farch_filter_table *table;
  2354. efx_oword_t filter;
  2355. unsigned int filter_idx;
  2356. spin_lock_bh(&efx->filter_lock);
  2357. for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) {
  2358. table = &state->table[table_id];
  2359. /* Check whether this is a regular register table */
  2360. if (table->step == 0)
  2361. continue;
  2362. for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
  2363. if (!test_bit(filter_idx, table->used_bitmap))
  2364. continue;
  2365. efx_farch_filter_build(&filter, &table->spec[filter_idx]);
  2366. efx_writeo(efx, &filter,
  2367. table->offset + table->step * filter_idx);
  2368. }
  2369. }
  2370. efx_farch_filter_push_rx_config(efx);
  2371. efx_farch_filter_push_tx_limits(efx);
  2372. spin_unlock_bh(&efx->filter_lock);
  2373. }
  2374. void efx_farch_filter_table_remove(struct efx_nic *efx)
  2375. {
  2376. struct efx_farch_filter_state *state = efx->filter_state;
  2377. enum efx_farch_filter_table_id table_id;
  2378. for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) {
  2379. kfree(state->table[table_id].used_bitmap);
  2380. vfree(state->table[table_id].spec);
  2381. }
  2382. kfree(state);
  2383. }
  2384. int efx_farch_filter_table_probe(struct efx_nic *efx)
  2385. {
  2386. struct efx_farch_filter_state *state;
  2387. struct efx_farch_filter_table *table;
  2388. unsigned table_id;
  2389. state = kzalloc(sizeof(struct efx_farch_filter_state), GFP_KERNEL);
  2390. if (!state)
  2391. return -ENOMEM;
  2392. efx->filter_state = state;
  2393. if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
  2394. table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
  2395. table->id = EFX_FARCH_FILTER_TABLE_RX_IP;
  2396. table->offset = FR_BZ_RX_FILTER_TBL0;
  2397. table->size = FR_BZ_RX_FILTER_TBL0_ROWS;
  2398. table->step = FR_BZ_RX_FILTER_TBL0_STEP;
  2399. }
  2400. if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
  2401. table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC];
  2402. table->id = EFX_FARCH_FILTER_TABLE_RX_MAC;
  2403. table->offset = FR_CZ_RX_MAC_FILTER_TBL0;
  2404. table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS;
  2405. table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP;
  2406. table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF];
  2407. table->id = EFX_FARCH_FILTER_TABLE_RX_DEF;
  2408. table->size = EFX_FARCH_FILTER_SIZE_RX_DEF;
  2409. table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC];
  2410. table->id = EFX_FARCH_FILTER_TABLE_TX_MAC;
  2411. table->offset = FR_CZ_TX_MAC_FILTER_TBL0;
  2412. table->size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS;
  2413. table->step = FR_CZ_TX_MAC_FILTER_TBL0_STEP;
  2414. }
  2415. for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) {
  2416. table = &state->table[table_id];
  2417. if (table->size == 0)
  2418. continue;
  2419. table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size),
  2420. sizeof(unsigned long),
  2421. GFP_KERNEL);
  2422. if (!table->used_bitmap)
  2423. goto fail;
  2424. table->spec = vzalloc(table->size * sizeof(*table->spec));
  2425. if (!table->spec)
  2426. goto fail;
  2427. }
  2428. table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF];
  2429. if (table->size) {
  2430. /* RX default filters must always exist */
  2431. struct efx_farch_filter_spec *spec;
  2432. unsigned i;
  2433. for (i = 0; i < EFX_FARCH_FILTER_SIZE_RX_DEF; i++) {
  2434. spec = &table->spec[i];
  2435. spec->type = EFX_FARCH_FILTER_UC_DEF + i;
  2436. efx_farch_filter_init_rx_for_stack(efx, spec);
  2437. __set_bit(i, table->used_bitmap);
  2438. }
  2439. }
  2440. efx_farch_filter_push_rx_config(efx);
  2441. return 0;
  2442. fail:
  2443. efx_farch_filter_table_remove(efx);
  2444. return -ENOMEM;
  2445. }
  2446. /* Update scatter enable flags for filters pointing to our own RX queues */
  2447. void efx_farch_filter_update_rx_scatter(struct efx_nic *efx)
  2448. {
  2449. struct efx_farch_filter_state *state = efx->filter_state;
  2450. enum efx_farch_filter_table_id table_id;
  2451. struct efx_farch_filter_table *table;
  2452. efx_oword_t filter;
  2453. unsigned int filter_idx;
  2454. spin_lock_bh(&efx->filter_lock);
  2455. for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP;
  2456. table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF;
  2457. table_id++) {
  2458. table = &state->table[table_id];
  2459. for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
  2460. if (!test_bit(filter_idx, table->used_bitmap) ||
  2461. table->spec[filter_idx].dmaq_id >=
  2462. efx->n_rx_channels)
  2463. continue;
  2464. if (efx->rx_scatter)
  2465. table->spec[filter_idx].flags |=
  2466. EFX_FILTER_FLAG_RX_SCATTER;
  2467. else
  2468. table->spec[filter_idx].flags &=
  2469. ~EFX_FILTER_FLAG_RX_SCATTER;
  2470. if (table_id == EFX_FARCH_FILTER_TABLE_RX_DEF)
  2471. /* Pushed by efx_farch_filter_push_rx_config() */
  2472. continue;
  2473. efx_farch_filter_build(&filter, &table->spec[filter_idx]);
  2474. efx_writeo(efx, &filter,
  2475. table->offset + table->step * filter_idx);
  2476. }
  2477. }
  2478. efx_farch_filter_push_rx_config(efx);
  2479. spin_unlock_bh(&efx->filter_lock);
  2480. }
  2481. #ifdef CONFIG_RFS_ACCEL
  2482. s32 efx_farch_filter_rfs_insert(struct efx_nic *efx,
  2483. struct efx_filter_spec *gen_spec)
  2484. {
  2485. return efx_farch_filter_insert(efx, gen_spec, true);
  2486. }
  2487. bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
  2488. unsigned int index)
  2489. {
  2490. struct efx_farch_filter_state *state = efx->filter_state;
  2491. struct efx_farch_filter_table *table =
  2492. &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
  2493. if (test_bit(index, table->used_bitmap) &&
  2494. table->spec[index].priority == EFX_FILTER_PRI_HINT &&
  2495. rps_may_expire_flow(efx->net_dev, table->spec[index].dmaq_id,
  2496. flow_id, index)) {
  2497. efx_farch_filter_table_clear_entry(efx, table, index);
  2498. return true;
  2499. }
  2500. return false;
  2501. }
  2502. #endif /* CONFIG_RFS_ACCEL */
  2503. void efx_farch_filter_sync_rx_mode(struct efx_nic *efx)
  2504. {
  2505. struct net_device *net_dev = efx->net_dev;
  2506. struct netdev_hw_addr *ha;
  2507. union efx_multicast_hash *mc_hash = &efx->multicast_hash;
  2508. u32 crc;
  2509. int bit;
  2510. netif_addr_lock_bh(net_dev);
  2511. efx->unicast_filter = !(net_dev->flags & IFF_PROMISC);
  2512. /* Build multicast hash table */
  2513. if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
  2514. memset(mc_hash, 0xff, sizeof(*mc_hash));
  2515. } else {
  2516. memset(mc_hash, 0x00, sizeof(*mc_hash));
  2517. netdev_for_each_mc_addr(ha, net_dev) {
  2518. crc = ether_crc_le(ETH_ALEN, ha->addr);
  2519. bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
  2520. __set_bit_le(bit, mc_hash);
  2521. }
  2522. /* Broadcast packets go through the multicast hash filter.
  2523. * ether_crc_le() of the broadcast address is 0xbe2612ff
  2524. * so we always add bit 0xff to the mask.
  2525. */
  2526. __set_bit_le(0xff, mc_hash);
  2527. }
  2528. netif_addr_unlock_bh(net_dev);
  2529. }