sge.c 58 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141
  1. /*****************************************************************************
  2. * *
  3. * File: sge.c *
  4. * $Revision: 1.26 $ *
  5. * $Date: 2005/06/21 18:29:48 $ *
  6. * Description: *
  7. * DMA engine. *
  8. * part of the Chelsio 10Gb Ethernet Driver. *
  9. * *
  10. * This program is free software; you can redistribute it and/or modify *
  11. * it under the terms of the GNU General Public License, version 2, as *
  12. * published by the Free Software Foundation. *
  13. * *
  14. * You should have received a copy of the GNU General Public License along *
  15. * with this program; if not, write to the Free Software Foundation, Inc., *
  16. * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
  17. * *
  18. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
  19. * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
  20. * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
  21. * *
  22. * http://www.chelsio.com *
  23. * *
  24. * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
  25. * All rights reserved. *
  26. * *
  27. * Maintainers: maintainers@chelsio.com *
  28. * *
  29. * Authors: Dimitrios Michailidis <dm@chelsio.com> *
  30. * Tina Yang <tainay@chelsio.com> *
  31. * Felix Marti <felix@chelsio.com> *
  32. * Scott Bardone <sbardone@chelsio.com> *
  33. * Kurt Ottaway <kottaway@chelsio.com> *
  34. * Frank DiMambro <frank@chelsio.com> *
  35. * *
  36. * History: *
  37. * *
  38. ****************************************************************************/
  39. #include "common.h"
  40. #include <linux/types.h>
  41. #include <linux/errno.h>
  42. #include <linux/pci.h>
  43. #include <linux/ktime.h>
  44. #include <linux/netdevice.h>
  45. #include <linux/etherdevice.h>
  46. #include <linux/if_vlan.h>
  47. #include <linux/skbuff.h>
  48. #include <linux/init.h>
  49. #include <linux/mm.h>
  50. #include <linux/tcp.h>
  51. #include <linux/ip.h>
  52. #include <linux/in.h>
  53. #include <linux/if_arp.h>
  54. #include <linux/slab.h>
  55. #include "cpl5_cmd.h"
  56. #include "sge.h"
  57. #include "regs.h"
  58. #include "espi.h"
  59. /* This belongs in if_ether.h */
  60. #define ETH_P_CPL5 0xf
  61. #define SGE_CMDQ_N 2
  62. #define SGE_FREELQ_N 2
  63. #define SGE_CMDQ0_E_N 1024
  64. #define SGE_CMDQ1_E_N 128
  65. #define SGE_FREEL_SIZE 4096
  66. #define SGE_JUMBO_FREEL_SIZE 512
  67. #define SGE_FREEL_REFILL_THRESH 16
  68. #define SGE_RESPQ_E_N 1024
  69. #define SGE_INTRTIMER_NRES 1000
  70. #define SGE_RX_SM_BUF_SIZE 1536
  71. #define SGE_TX_DESC_MAX_PLEN 16384
  72. #define SGE_RESPQ_REPLENISH_THRES (SGE_RESPQ_E_N / 4)
  73. /*
  74. * Period of the TX buffer reclaim timer. This timer does not need to run
  75. * frequently as TX buffers are usually reclaimed by new TX packets.
  76. */
  77. #define TX_RECLAIM_PERIOD (HZ / 4)
  78. #define M_CMD_LEN 0x7fffffff
  79. #define V_CMD_LEN(v) (v)
  80. #define G_CMD_LEN(v) ((v) & M_CMD_LEN)
  81. #define V_CMD_GEN1(v) ((v) << 31)
  82. #define V_CMD_GEN2(v) (v)
  83. #define F_CMD_DATAVALID (1 << 1)
  84. #define F_CMD_SOP (1 << 2)
  85. #define V_CMD_EOP(v) ((v) << 3)
  86. /*
  87. * Command queue, receive buffer list, and response queue descriptors.
  88. */
  89. #if defined(__BIG_ENDIAN_BITFIELD)
  90. struct cmdQ_e {
  91. u32 addr_lo;
  92. u32 len_gen;
  93. u32 flags;
  94. u32 addr_hi;
  95. };
  96. struct freelQ_e {
  97. u32 addr_lo;
  98. u32 len_gen;
  99. u32 gen2;
  100. u32 addr_hi;
  101. };
  102. struct respQ_e {
  103. u32 Qsleeping : 4;
  104. u32 Cmdq1CreditReturn : 5;
  105. u32 Cmdq1DmaComplete : 5;
  106. u32 Cmdq0CreditReturn : 5;
  107. u32 Cmdq0DmaComplete : 5;
  108. u32 FreelistQid : 2;
  109. u32 CreditValid : 1;
  110. u32 DataValid : 1;
  111. u32 Offload : 1;
  112. u32 Eop : 1;
  113. u32 Sop : 1;
  114. u32 GenerationBit : 1;
  115. u32 BufferLength;
  116. };
  117. #elif defined(__LITTLE_ENDIAN_BITFIELD)
  118. struct cmdQ_e {
  119. u32 len_gen;
  120. u32 addr_lo;
  121. u32 addr_hi;
  122. u32 flags;
  123. };
  124. struct freelQ_e {
  125. u32 len_gen;
  126. u32 addr_lo;
  127. u32 addr_hi;
  128. u32 gen2;
  129. };
  130. struct respQ_e {
  131. u32 BufferLength;
  132. u32 GenerationBit : 1;
  133. u32 Sop : 1;
  134. u32 Eop : 1;
  135. u32 Offload : 1;
  136. u32 DataValid : 1;
  137. u32 CreditValid : 1;
  138. u32 FreelistQid : 2;
  139. u32 Cmdq0DmaComplete : 5;
  140. u32 Cmdq0CreditReturn : 5;
  141. u32 Cmdq1DmaComplete : 5;
  142. u32 Cmdq1CreditReturn : 5;
  143. u32 Qsleeping : 4;
  144. } ;
  145. #endif
  146. /*
  147. * SW Context Command and Freelist Queue Descriptors
  148. */
  149. struct cmdQ_ce {
  150. struct sk_buff *skb;
  151. DEFINE_DMA_UNMAP_ADDR(dma_addr);
  152. DEFINE_DMA_UNMAP_LEN(dma_len);
  153. };
  154. struct freelQ_ce {
  155. struct sk_buff *skb;
  156. DEFINE_DMA_UNMAP_ADDR(dma_addr);
  157. DEFINE_DMA_UNMAP_LEN(dma_len);
  158. };
  159. /*
  160. * SW command, freelist and response rings
  161. */
  162. struct cmdQ {
  163. unsigned long status; /* HW DMA fetch status */
  164. unsigned int in_use; /* # of in-use command descriptors */
  165. unsigned int size; /* # of descriptors */
  166. unsigned int processed; /* total # of descs HW has processed */
  167. unsigned int cleaned; /* total # of descs SW has reclaimed */
  168. unsigned int stop_thres; /* SW TX queue suspend threshold */
  169. u16 pidx; /* producer index (SW) */
  170. u16 cidx; /* consumer index (HW) */
  171. u8 genbit; /* current generation (=valid) bit */
  172. u8 sop; /* is next entry start of packet? */
  173. struct cmdQ_e *entries; /* HW command descriptor Q */
  174. struct cmdQ_ce *centries; /* SW command context descriptor Q */
  175. dma_addr_t dma_addr; /* DMA addr HW command descriptor Q */
  176. spinlock_t lock; /* Lock to protect cmdQ enqueuing */
  177. };
  178. struct freelQ {
  179. unsigned int credits; /* # of available RX buffers */
  180. unsigned int size; /* free list capacity */
  181. u16 pidx; /* producer index (SW) */
  182. u16 cidx; /* consumer index (HW) */
  183. u16 rx_buffer_size; /* Buffer size on this free list */
  184. u16 dma_offset; /* DMA offset to align IP headers */
  185. u16 recycleq_idx; /* skb recycle q to use */
  186. u8 genbit; /* current generation (=valid) bit */
  187. struct freelQ_e *entries; /* HW freelist descriptor Q */
  188. struct freelQ_ce *centries; /* SW freelist context descriptor Q */
  189. dma_addr_t dma_addr; /* DMA addr HW freelist descriptor Q */
  190. };
  191. struct respQ {
  192. unsigned int credits; /* credits to be returned to SGE */
  193. unsigned int size; /* # of response Q descriptors */
  194. u16 cidx; /* consumer index (SW) */
  195. u8 genbit; /* current generation(=valid) bit */
  196. struct respQ_e *entries; /* HW response descriptor Q */
  197. dma_addr_t dma_addr; /* DMA addr HW response descriptor Q */
  198. };
  199. /* Bit flags for cmdQ.status */
  200. enum {
  201. CMDQ_STAT_RUNNING = 1, /* fetch engine is running */
  202. CMDQ_STAT_LAST_PKT_DB = 2 /* last packet rung the doorbell */
  203. };
  204. /* T204 TX SW scheduler */
  205. /* Per T204 TX port */
  206. struct sched_port {
  207. unsigned int avail; /* available bits - quota */
  208. unsigned int drain_bits_per_1024ns; /* drain rate */
  209. unsigned int speed; /* drain rate, mbps */
  210. unsigned int mtu; /* mtu size */
  211. struct sk_buff_head skbq; /* pending skbs */
  212. };
  213. /* Per T204 device */
  214. struct sched {
  215. ktime_t last_updated; /* last time quotas were computed */
  216. unsigned int max_avail; /* max bits to be sent to any port */
  217. unsigned int port; /* port index (round robin ports) */
  218. unsigned int num; /* num skbs in per port queues */
  219. struct sched_port p[MAX_NPORTS];
  220. struct tasklet_struct sched_tsk;/* tasklet used to run scheduler */
  221. };
  222. static void restart_sched(unsigned long);
  223. /*
  224. * Main SGE data structure
  225. *
  226. * Interrupts are handled by a single CPU and it is likely that on a MP system
  227. * the application is migrated to another CPU. In that scenario, we try to
  228. * separate the RX(in irq context) and TX state in order to decrease memory
  229. * contention.
  230. */
  231. struct sge {
  232. struct adapter *adapter; /* adapter backpointer */
  233. struct net_device *netdev; /* netdevice backpointer */
  234. struct freelQ freelQ[SGE_FREELQ_N]; /* buffer free lists */
  235. struct respQ respQ; /* response Q */
  236. unsigned long stopped_tx_queues; /* bitmap of suspended Tx queues */
  237. unsigned int rx_pkt_pad; /* RX padding for L2 packets */
  238. unsigned int jumbo_fl; /* jumbo freelist Q index */
  239. unsigned int intrtimer_nres; /* no-resource interrupt timer */
  240. unsigned int fixed_intrtimer;/* non-adaptive interrupt timer */
  241. struct timer_list tx_reclaim_timer; /* reclaims TX buffers */
  242. struct timer_list espibug_timer;
  243. unsigned long espibug_timeout;
  244. struct sk_buff *espibug_skb[MAX_NPORTS];
  245. u32 sge_control; /* shadow value of sge control reg */
  246. struct sge_intr_counts stats;
  247. struct sge_port_stats __percpu *port_stats[MAX_NPORTS];
  248. struct sched *tx_sched;
  249. struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned_in_smp;
  250. };
  251. static const u8 ch_mac_addr[ETH_ALEN] = {
  252. 0x0, 0x7, 0x43, 0x0, 0x0, 0x0
  253. };
  254. /*
  255. * stop tasklet and free all pending skb's
  256. */
  257. static void tx_sched_stop(struct sge *sge)
  258. {
  259. struct sched *s = sge->tx_sched;
  260. int i;
  261. tasklet_kill(&s->sched_tsk);
  262. for (i = 0; i < MAX_NPORTS; i++)
  263. __skb_queue_purge(&s->p[s->port].skbq);
  264. }
  265. /*
  266. * t1_sched_update_parms() is called when the MTU or link speed changes. It
  267. * re-computes scheduler parameters to scope with the change.
  268. */
  269. unsigned int t1_sched_update_parms(struct sge *sge, unsigned int port,
  270. unsigned int mtu, unsigned int speed)
  271. {
  272. struct sched *s = sge->tx_sched;
  273. struct sched_port *p = &s->p[port];
  274. unsigned int max_avail_segs;
  275. pr_debug("t1_sched_update_params mtu=%d speed=%d\n", mtu, speed);
  276. if (speed)
  277. p->speed = speed;
  278. if (mtu)
  279. p->mtu = mtu;
  280. if (speed || mtu) {
  281. unsigned long long drain = 1024ULL * p->speed * (p->mtu - 40);
  282. do_div(drain, (p->mtu + 50) * 1000);
  283. p->drain_bits_per_1024ns = (unsigned int) drain;
  284. if (p->speed < 1000)
  285. p->drain_bits_per_1024ns =
  286. 90 * p->drain_bits_per_1024ns / 100;
  287. }
  288. if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204) {
  289. p->drain_bits_per_1024ns -= 16;
  290. s->max_avail = max(4096U, p->mtu + 16 + 14 + 4);
  291. max_avail_segs = max(1U, 4096 / (p->mtu - 40));
  292. } else {
  293. s->max_avail = 16384;
  294. max_avail_segs = max(1U, 9000 / (p->mtu - 40));
  295. }
  296. pr_debug("t1_sched_update_parms: mtu %u speed %u max_avail %u "
  297. "max_avail_segs %u drain_bits_per_1024ns %u\n", p->mtu,
  298. p->speed, s->max_avail, max_avail_segs,
  299. p->drain_bits_per_1024ns);
  300. return max_avail_segs * (p->mtu - 40);
  301. }
  302. #if 0
  303. /*
  304. * t1_sched_max_avail_bytes() tells the scheduler the maximum amount of
  305. * data that can be pushed per port.
  306. */
  307. void t1_sched_set_max_avail_bytes(struct sge *sge, unsigned int val)
  308. {
  309. struct sched *s = sge->tx_sched;
  310. unsigned int i;
  311. s->max_avail = val;
  312. for (i = 0; i < MAX_NPORTS; i++)
  313. t1_sched_update_parms(sge, i, 0, 0);
  314. }
  315. /*
  316. * t1_sched_set_drain_bits_per_us() tells the scheduler at which rate a port
  317. * is draining.
  318. */
  319. void t1_sched_set_drain_bits_per_us(struct sge *sge, unsigned int port,
  320. unsigned int val)
  321. {
  322. struct sched *s = sge->tx_sched;
  323. struct sched_port *p = &s->p[port];
  324. p->drain_bits_per_1024ns = val * 1024 / 1000;
  325. t1_sched_update_parms(sge, port, 0, 0);
  326. }
  327. #endif /* 0 */
  328. /*
  329. * get_clock() implements a ns clock (see ktime_get)
  330. */
  331. static inline ktime_t get_clock(void)
  332. {
  333. struct timespec ts;
  334. ktime_get_ts(&ts);
  335. return timespec_to_ktime(ts);
  336. }
  337. /*
  338. * tx_sched_init() allocates resources and does basic initialization.
  339. */
  340. static int tx_sched_init(struct sge *sge)
  341. {
  342. struct sched *s;
  343. int i;
  344. s = kzalloc(sizeof (struct sched), GFP_KERNEL);
  345. if (!s)
  346. return -ENOMEM;
  347. pr_debug("tx_sched_init\n");
  348. tasklet_init(&s->sched_tsk, restart_sched, (unsigned long) sge);
  349. sge->tx_sched = s;
  350. for (i = 0; i < MAX_NPORTS; i++) {
  351. skb_queue_head_init(&s->p[i].skbq);
  352. t1_sched_update_parms(sge, i, 1500, 1000);
  353. }
  354. return 0;
  355. }
  356. /*
  357. * sched_update_avail() computes the delta since the last time it was called
  358. * and updates the per port quota (number of bits that can be sent to the any
  359. * port).
  360. */
  361. static inline int sched_update_avail(struct sge *sge)
  362. {
  363. struct sched *s = sge->tx_sched;
  364. ktime_t now = get_clock();
  365. unsigned int i;
  366. long long delta_time_ns;
  367. delta_time_ns = ktime_to_ns(ktime_sub(now, s->last_updated));
  368. pr_debug("sched_update_avail delta=%lld\n", delta_time_ns);
  369. if (delta_time_ns < 15000)
  370. return 0;
  371. for (i = 0; i < MAX_NPORTS; i++) {
  372. struct sched_port *p = &s->p[i];
  373. unsigned int delta_avail;
  374. delta_avail = (p->drain_bits_per_1024ns * delta_time_ns) >> 13;
  375. p->avail = min(p->avail + delta_avail, s->max_avail);
  376. }
  377. s->last_updated = now;
  378. return 1;
  379. }
  380. /*
  381. * sched_skb() is called from two different places. In the tx path, any
  382. * packet generating load on an output port will call sched_skb()
  383. * (skb != NULL). In addition, sched_skb() is called from the irq/soft irq
  384. * context (skb == NULL).
  385. * The scheduler only returns a skb (which will then be sent) if the
  386. * length of the skb is <= the current quota of the output port.
  387. */
  388. static struct sk_buff *sched_skb(struct sge *sge, struct sk_buff *skb,
  389. unsigned int credits)
  390. {
  391. struct sched *s = sge->tx_sched;
  392. struct sk_buff_head *skbq;
  393. unsigned int i, len, update = 1;
  394. pr_debug("sched_skb %p\n", skb);
  395. if (!skb) {
  396. if (!s->num)
  397. return NULL;
  398. } else {
  399. skbq = &s->p[skb->dev->if_port].skbq;
  400. __skb_queue_tail(skbq, skb);
  401. s->num++;
  402. skb = NULL;
  403. }
  404. if (credits < MAX_SKB_FRAGS + 1)
  405. goto out;
  406. again:
  407. for (i = 0; i < MAX_NPORTS; i++) {
  408. s->port = (s->port + 1) & (MAX_NPORTS - 1);
  409. skbq = &s->p[s->port].skbq;
  410. skb = skb_peek(skbq);
  411. if (!skb)
  412. continue;
  413. len = skb->len;
  414. if (len <= s->p[s->port].avail) {
  415. s->p[s->port].avail -= len;
  416. s->num--;
  417. __skb_unlink(skb, skbq);
  418. goto out;
  419. }
  420. skb = NULL;
  421. }
  422. if (update-- && sched_update_avail(sge))
  423. goto again;
  424. out:
  425. /* If there are more pending skbs, we use the hardware to schedule us
  426. * again.
  427. */
  428. if (s->num && !skb) {
  429. struct cmdQ *q = &sge->cmdQ[0];
  430. clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
  431. if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
  432. set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
  433. writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL);
  434. }
  435. }
  436. pr_debug("sched_skb ret %p\n", skb);
  437. return skb;
  438. }
  439. /*
  440. * PIO to indicate that memory mapped Q contains valid descriptor(s).
  441. */
  442. static inline void doorbell_pio(struct adapter *adapter, u32 val)
  443. {
  444. wmb();
  445. writel(val, adapter->regs + A_SG_DOORBELL);
  446. }
  447. /*
  448. * Frees all RX buffers on the freelist Q. The caller must make sure that
  449. * the SGE is turned off before calling this function.
  450. */
  451. static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *q)
  452. {
  453. unsigned int cidx = q->cidx;
  454. while (q->credits--) {
  455. struct freelQ_ce *ce = &q->centries[cidx];
  456. pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
  457. dma_unmap_len(ce, dma_len),
  458. PCI_DMA_FROMDEVICE);
  459. dev_kfree_skb(ce->skb);
  460. ce->skb = NULL;
  461. if (++cidx == q->size)
  462. cidx = 0;
  463. }
  464. }
  465. /*
  466. * Free RX free list and response queue resources.
  467. */
  468. static void free_rx_resources(struct sge *sge)
  469. {
  470. struct pci_dev *pdev = sge->adapter->pdev;
  471. unsigned int size, i;
  472. if (sge->respQ.entries) {
  473. size = sizeof(struct respQ_e) * sge->respQ.size;
  474. pci_free_consistent(pdev, size, sge->respQ.entries,
  475. sge->respQ.dma_addr);
  476. }
  477. for (i = 0; i < SGE_FREELQ_N; i++) {
  478. struct freelQ *q = &sge->freelQ[i];
  479. if (q->centries) {
  480. free_freelQ_buffers(pdev, q);
  481. kfree(q->centries);
  482. }
  483. if (q->entries) {
  484. size = sizeof(struct freelQ_e) * q->size;
  485. pci_free_consistent(pdev, size, q->entries,
  486. q->dma_addr);
  487. }
  488. }
  489. }
  490. /*
  491. * Allocates basic RX resources, consisting of memory mapped freelist Qs and a
  492. * response queue.
  493. */
  494. static int alloc_rx_resources(struct sge *sge, struct sge_params *p)
  495. {
  496. struct pci_dev *pdev = sge->adapter->pdev;
  497. unsigned int size, i;
  498. for (i = 0; i < SGE_FREELQ_N; i++) {
  499. struct freelQ *q = &sge->freelQ[i];
  500. q->genbit = 1;
  501. q->size = p->freelQ_size[i];
  502. q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN;
  503. size = sizeof(struct freelQ_e) * q->size;
  504. q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr);
  505. if (!q->entries)
  506. goto err_no_mem;
  507. size = sizeof(struct freelQ_ce) * q->size;
  508. q->centries = kzalloc(size, GFP_KERNEL);
  509. if (!q->centries)
  510. goto err_no_mem;
  511. }
  512. /*
  513. * Calculate the buffer sizes for the two free lists. FL0 accommodates
  514. * regular sized Ethernet frames, FL1 is sized not to exceed 16K,
  515. * including all the sk_buff overhead.
  516. *
  517. * Note: For T2 FL0 and FL1 are reversed.
  518. */
  519. sge->freelQ[!sge->jumbo_fl].rx_buffer_size = SGE_RX_SM_BUF_SIZE +
  520. sizeof(struct cpl_rx_data) +
  521. sge->freelQ[!sge->jumbo_fl].dma_offset;
  522. size = (16 * 1024) -
  523. SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
  524. sge->freelQ[sge->jumbo_fl].rx_buffer_size = size;
  525. /*
  526. * Setup which skb recycle Q should be used when recycling buffers from
  527. * each free list.
  528. */
  529. sge->freelQ[!sge->jumbo_fl].recycleq_idx = 0;
  530. sge->freelQ[sge->jumbo_fl].recycleq_idx = 1;
  531. sge->respQ.genbit = 1;
  532. sge->respQ.size = SGE_RESPQ_E_N;
  533. sge->respQ.credits = 0;
  534. size = sizeof(struct respQ_e) * sge->respQ.size;
  535. sge->respQ.entries =
  536. pci_alloc_consistent(pdev, size, &sge->respQ.dma_addr);
  537. if (!sge->respQ.entries)
  538. goto err_no_mem;
  539. return 0;
  540. err_no_mem:
  541. free_rx_resources(sge);
  542. return -ENOMEM;
  543. }
  544. /*
  545. * Reclaims n TX descriptors and frees the buffers associated with them.
  546. */
  547. static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n)
  548. {
  549. struct cmdQ_ce *ce;
  550. struct pci_dev *pdev = sge->adapter->pdev;
  551. unsigned int cidx = q->cidx;
  552. q->in_use -= n;
  553. ce = &q->centries[cidx];
  554. while (n--) {
  555. if (likely(dma_unmap_len(ce, dma_len))) {
  556. pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
  557. dma_unmap_len(ce, dma_len),
  558. PCI_DMA_TODEVICE);
  559. if (q->sop)
  560. q->sop = 0;
  561. }
  562. if (ce->skb) {
  563. dev_kfree_skb_any(ce->skb);
  564. q->sop = 1;
  565. }
  566. ce++;
  567. if (++cidx == q->size) {
  568. cidx = 0;
  569. ce = q->centries;
  570. }
  571. }
  572. q->cidx = cidx;
  573. }
  574. /*
  575. * Free TX resources.
  576. *
  577. * Assumes that SGE is stopped and all interrupts are disabled.
  578. */
  579. static void free_tx_resources(struct sge *sge)
  580. {
  581. struct pci_dev *pdev = sge->adapter->pdev;
  582. unsigned int size, i;
  583. for (i = 0; i < SGE_CMDQ_N; i++) {
  584. struct cmdQ *q = &sge->cmdQ[i];
  585. if (q->centries) {
  586. if (q->in_use)
  587. free_cmdQ_buffers(sge, q, q->in_use);
  588. kfree(q->centries);
  589. }
  590. if (q->entries) {
  591. size = sizeof(struct cmdQ_e) * q->size;
  592. pci_free_consistent(pdev, size, q->entries,
  593. q->dma_addr);
  594. }
  595. }
  596. }
  597. /*
  598. * Allocates basic TX resources, consisting of memory mapped command Qs.
  599. */
  600. static int alloc_tx_resources(struct sge *sge, struct sge_params *p)
  601. {
  602. struct pci_dev *pdev = sge->adapter->pdev;
  603. unsigned int size, i;
  604. for (i = 0; i < SGE_CMDQ_N; i++) {
  605. struct cmdQ *q = &sge->cmdQ[i];
  606. q->genbit = 1;
  607. q->sop = 1;
  608. q->size = p->cmdQ_size[i];
  609. q->in_use = 0;
  610. q->status = 0;
  611. q->processed = q->cleaned = 0;
  612. q->stop_thres = 0;
  613. spin_lock_init(&q->lock);
  614. size = sizeof(struct cmdQ_e) * q->size;
  615. q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr);
  616. if (!q->entries)
  617. goto err_no_mem;
  618. size = sizeof(struct cmdQ_ce) * q->size;
  619. q->centries = kzalloc(size, GFP_KERNEL);
  620. if (!q->centries)
  621. goto err_no_mem;
  622. }
  623. /*
  624. * CommandQ 0 handles Ethernet and TOE packets, while queue 1 is TOE
  625. * only. For queue 0 set the stop threshold so we can handle one more
  626. * packet from each port, plus reserve an additional 24 entries for
  627. * Ethernet packets only. Queue 1 never suspends nor do we reserve
  628. * space for Ethernet packets.
  629. */
  630. sge->cmdQ[0].stop_thres = sge->adapter->params.nports *
  631. (MAX_SKB_FRAGS + 1);
  632. return 0;
  633. err_no_mem:
  634. free_tx_resources(sge);
  635. return -ENOMEM;
  636. }
  637. static inline void setup_ring_params(struct adapter *adapter, u64 addr,
  638. u32 size, int base_reg_lo,
  639. int base_reg_hi, int size_reg)
  640. {
  641. writel((u32)addr, adapter->regs + base_reg_lo);
  642. writel(addr >> 32, adapter->regs + base_reg_hi);
  643. writel(size, adapter->regs + size_reg);
  644. }
  645. /*
  646. * Enable/disable VLAN acceleration.
  647. */
  648. void t1_set_vlan_accel(struct adapter *adapter, int on_off)
  649. {
  650. struct sge *sge = adapter->sge;
  651. sge->sge_control &= ~F_VLAN_XTRACT;
  652. if (on_off)
  653. sge->sge_control |= F_VLAN_XTRACT;
  654. if (adapter->open_device_map) {
  655. writel(sge->sge_control, adapter->regs + A_SG_CONTROL);
  656. readl(adapter->regs + A_SG_CONTROL); /* flush */
  657. }
  658. }
  659. /*
  660. * Programs the various SGE registers. However, the engine is not yet enabled,
  661. * but sge->sge_control is setup and ready to go.
  662. */
  663. static void configure_sge(struct sge *sge, struct sge_params *p)
  664. {
  665. struct adapter *ap = sge->adapter;
  666. writel(0, ap->regs + A_SG_CONTROL);
  667. setup_ring_params(ap, sge->cmdQ[0].dma_addr, sge->cmdQ[0].size,
  668. A_SG_CMD0BASELWR, A_SG_CMD0BASEUPR, A_SG_CMD0SIZE);
  669. setup_ring_params(ap, sge->cmdQ[1].dma_addr, sge->cmdQ[1].size,
  670. A_SG_CMD1BASELWR, A_SG_CMD1BASEUPR, A_SG_CMD1SIZE);
  671. setup_ring_params(ap, sge->freelQ[0].dma_addr,
  672. sge->freelQ[0].size, A_SG_FL0BASELWR,
  673. A_SG_FL0BASEUPR, A_SG_FL0SIZE);
  674. setup_ring_params(ap, sge->freelQ[1].dma_addr,
  675. sge->freelQ[1].size, A_SG_FL1BASELWR,
  676. A_SG_FL1BASEUPR, A_SG_FL1SIZE);
  677. /* The threshold comparison uses <. */
  678. writel(SGE_RX_SM_BUF_SIZE + 1, ap->regs + A_SG_FLTHRESHOLD);
  679. setup_ring_params(ap, sge->respQ.dma_addr, sge->respQ.size,
  680. A_SG_RSPBASELWR, A_SG_RSPBASEUPR, A_SG_RSPSIZE);
  681. writel((u32)sge->respQ.size - 1, ap->regs + A_SG_RSPQUEUECREDIT);
  682. sge->sge_control = F_CMDQ0_ENABLE | F_CMDQ1_ENABLE | F_FL0_ENABLE |
  683. F_FL1_ENABLE | F_CPL_ENABLE | F_RESPONSE_QUEUE_ENABLE |
  684. V_CMDQ_PRIORITY(2) | F_DISABLE_CMDQ1_GTS | F_ISCSI_COALESCE |
  685. V_RX_PKT_OFFSET(sge->rx_pkt_pad);
  686. #if defined(__BIG_ENDIAN_BITFIELD)
  687. sge->sge_control |= F_ENABLE_BIG_ENDIAN;
  688. #endif
  689. /* Initialize no-resource timer */
  690. sge->intrtimer_nres = SGE_INTRTIMER_NRES * core_ticks_per_usec(ap);
  691. t1_sge_set_coalesce_params(sge, p);
  692. }
  693. /*
  694. * Return the payload capacity of the jumbo free-list buffers.
  695. */
  696. static inline unsigned int jumbo_payload_capacity(const struct sge *sge)
  697. {
  698. return sge->freelQ[sge->jumbo_fl].rx_buffer_size -
  699. sge->freelQ[sge->jumbo_fl].dma_offset -
  700. sizeof(struct cpl_rx_data);
  701. }
  702. /*
  703. * Frees all SGE related resources and the sge structure itself
  704. */
  705. void t1_sge_destroy(struct sge *sge)
  706. {
  707. int i;
  708. for_each_port(sge->adapter, i)
  709. free_percpu(sge->port_stats[i]);
  710. kfree(sge->tx_sched);
  711. free_tx_resources(sge);
  712. free_rx_resources(sge);
  713. kfree(sge);
  714. }
  715. /*
  716. * Allocates new RX buffers on the freelist Q (and tracks them on the freelist
  717. * context Q) until the Q is full or alloc_skb fails.
  718. *
  719. * It is possible that the generation bits already match, indicating that the
  720. * buffer is already valid and nothing needs to be done. This happens when we
  721. * copied a received buffer into a new sk_buff during the interrupt processing.
  722. *
  723. * If the SGE doesn't automatically align packets properly (!sge->rx_pkt_pad),
  724. * we specify a RX_OFFSET in order to make sure that the IP header is 4B
  725. * aligned.
  726. */
  727. static void refill_free_list(struct sge *sge, struct freelQ *q)
  728. {
  729. struct pci_dev *pdev = sge->adapter->pdev;
  730. struct freelQ_ce *ce = &q->centries[q->pidx];
  731. struct freelQ_e *e = &q->entries[q->pidx];
  732. unsigned int dma_len = q->rx_buffer_size - q->dma_offset;
  733. while (q->credits < q->size) {
  734. struct sk_buff *skb;
  735. dma_addr_t mapping;
  736. skb = alloc_skb(q->rx_buffer_size, GFP_ATOMIC);
  737. if (!skb)
  738. break;
  739. skb_reserve(skb, q->dma_offset);
  740. mapping = pci_map_single(pdev, skb->data, dma_len,
  741. PCI_DMA_FROMDEVICE);
  742. skb_reserve(skb, sge->rx_pkt_pad);
  743. ce->skb = skb;
  744. dma_unmap_addr_set(ce, dma_addr, mapping);
  745. dma_unmap_len_set(ce, dma_len, dma_len);
  746. e->addr_lo = (u32)mapping;
  747. e->addr_hi = (u64)mapping >> 32;
  748. e->len_gen = V_CMD_LEN(dma_len) | V_CMD_GEN1(q->genbit);
  749. wmb();
  750. e->gen2 = V_CMD_GEN2(q->genbit);
  751. e++;
  752. ce++;
  753. if (++q->pidx == q->size) {
  754. q->pidx = 0;
  755. q->genbit ^= 1;
  756. ce = q->centries;
  757. e = q->entries;
  758. }
  759. q->credits++;
  760. }
  761. }
  762. /*
  763. * Calls refill_free_list for both free lists. If we cannot fill at least 1/4
  764. * of both rings, we go into 'few interrupt mode' in order to give the system
  765. * time to free up resources.
  766. */
  767. static void freelQs_empty(struct sge *sge)
  768. {
  769. struct adapter *adapter = sge->adapter;
  770. u32 irq_reg = readl(adapter->regs + A_SG_INT_ENABLE);
  771. u32 irqholdoff_reg;
  772. refill_free_list(sge, &sge->freelQ[0]);
  773. refill_free_list(sge, &sge->freelQ[1]);
  774. if (sge->freelQ[0].credits > (sge->freelQ[0].size >> 2) &&
  775. sge->freelQ[1].credits > (sge->freelQ[1].size >> 2)) {
  776. irq_reg |= F_FL_EXHAUSTED;
  777. irqholdoff_reg = sge->fixed_intrtimer;
  778. } else {
  779. /* Clear the F_FL_EXHAUSTED interrupts for now */
  780. irq_reg &= ~F_FL_EXHAUSTED;
  781. irqholdoff_reg = sge->intrtimer_nres;
  782. }
  783. writel(irqholdoff_reg, adapter->regs + A_SG_INTRTIMER);
  784. writel(irq_reg, adapter->regs + A_SG_INT_ENABLE);
  785. /* We reenable the Qs to force a freelist GTS interrupt later */
  786. doorbell_pio(adapter, F_FL0_ENABLE | F_FL1_ENABLE);
  787. }
  788. #define SGE_PL_INTR_MASK (F_PL_INTR_SGE_ERR | F_PL_INTR_SGE_DATA)
  789. #define SGE_INT_FATAL (F_RESPQ_OVERFLOW | F_PACKET_TOO_BIG | F_PACKET_MISMATCH)
  790. #define SGE_INT_ENABLE (F_RESPQ_EXHAUSTED | F_RESPQ_OVERFLOW | \
  791. F_FL_EXHAUSTED | F_PACKET_TOO_BIG | F_PACKET_MISMATCH)
  792. /*
  793. * Disable SGE Interrupts
  794. */
  795. void t1_sge_intr_disable(struct sge *sge)
  796. {
  797. u32 val = readl(sge->adapter->regs + A_PL_ENABLE);
  798. writel(val & ~SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE);
  799. writel(0, sge->adapter->regs + A_SG_INT_ENABLE);
  800. }
  801. /*
  802. * Enable SGE interrupts.
  803. */
  804. void t1_sge_intr_enable(struct sge *sge)
  805. {
  806. u32 en = SGE_INT_ENABLE;
  807. u32 val = readl(sge->adapter->regs + A_PL_ENABLE);
  808. if (sge->adapter->port[0].dev->hw_features & NETIF_F_TSO)
  809. en &= ~F_PACKET_TOO_BIG;
  810. writel(en, sge->adapter->regs + A_SG_INT_ENABLE);
  811. writel(val | SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE);
  812. }
  813. /*
  814. * Clear SGE interrupts.
  815. */
  816. void t1_sge_intr_clear(struct sge *sge)
  817. {
  818. writel(SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_CAUSE);
  819. writel(0xffffffff, sge->adapter->regs + A_SG_INT_CAUSE);
  820. }
  821. /*
  822. * SGE 'Error' interrupt handler
  823. */
  824. int t1_sge_intr_error_handler(struct sge *sge)
  825. {
  826. struct adapter *adapter = sge->adapter;
  827. u32 cause = readl(adapter->regs + A_SG_INT_CAUSE);
  828. if (adapter->port[0].dev->hw_features & NETIF_F_TSO)
  829. cause &= ~F_PACKET_TOO_BIG;
  830. if (cause & F_RESPQ_EXHAUSTED)
  831. sge->stats.respQ_empty++;
  832. if (cause & F_RESPQ_OVERFLOW) {
  833. sge->stats.respQ_overflow++;
  834. pr_alert("%s: SGE response queue overflow\n",
  835. adapter->name);
  836. }
  837. if (cause & F_FL_EXHAUSTED) {
  838. sge->stats.freelistQ_empty++;
  839. freelQs_empty(sge);
  840. }
  841. if (cause & F_PACKET_TOO_BIG) {
  842. sge->stats.pkt_too_big++;
  843. pr_alert("%s: SGE max packet size exceeded\n",
  844. adapter->name);
  845. }
  846. if (cause & F_PACKET_MISMATCH) {
  847. sge->stats.pkt_mismatch++;
  848. pr_alert("%s: SGE packet mismatch\n", adapter->name);
  849. }
  850. if (cause & SGE_INT_FATAL)
  851. t1_fatal_err(adapter);
  852. writel(cause, adapter->regs + A_SG_INT_CAUSE);
  853. return 0;
  854. }
  855. const struct sge_intr_counts *t1_sge_get_intr_counts(const struct sge *sge)
  856. {
  857. return &sge->stats;
  858. }
  859. void t1_sge_get_port_stats(const struct sge *sge, int port,
  860. struct sge_port_stats *ss)
  861. {
  862. int cpu;
  863. memset(ss, 0, sizeof(*ss));
  864. for_each_possible_cpu(cpu) {
  865. struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[port], cpu);
  866. ss->rx_cso_good += st->rx_cso_good;
  867. ss->tx_cso += st->tx_cso;
  868. ss->tx_tso += st->tx_tso;
  869. ss->tx_need_hdrroom += st->tx_need_hdrroom;
  870. ss->vlan_xtract += st->vlan_xtract;
  871. ss->vlan_insert += st->vlan_insert;
  872. }
  873. }
  874. /**
  875. * recycle_fl_buf - recycle a free list buffer
  876. * @fl: the free list
  877. * @idx: index of buffer to recycle
  878. *
  879. * Recycles the specified buffer on the given free list by adding it at
  880. * the next available slot on the list.
  881. */
  882. static void recycle_fl_buf(struct freelQ *fl, int idx)
  883. {
  884. struct freelQ_e *from = &fl->entries[idx];
  885. struct freelQ_e *to = &fl->entries[fl->pidx];
  886. fl->centries[fl->pidx] = fl->centries[idx];
  887. to->addr_lo = from->addr_lo;
  888. to->addr_hi = from->addr_hi;
  889. to->len_gen = G_CMD_LEN(from->len_gen) | V_CMD_GEN1(fl->genbit);
  890. wmb();
  891. to->gen2 = V_CMD_GEN2(fl->genbit);
  892. fl->credits++;
  893. if (++fl->pidx == fl->size) {
  894. fl->pidx = 0;
  895. fl->genbit ^= 1;
  896. }
  897. }
  898. static int copybreak __read_mostly = 256;
  899. module_param(copybreak, int, 0);
  900. MODULE_PARM_DESC(copybreak, "Receive copy threshold");
  901. /**
  902. * get_packet - return the next ingress packet buffer
  903. * @pdev: the PCI device that received the packet
  904. * @fl: the SGE free list holding the packet
  905. * @len: the actual packet length, excluding any SGE padding
  906. *
  907. * Get the next packet from a free list and complete setup of the
  908. * sk_buff. If the packet is small we make a copy and recycle the
  909. * original buffer, otherwise we use the original buffer itself. If a
  910. * positive drop threshold is supplied packets are dropped and their
  911. * buffers recycled if (a) the number of remaining buffers is under the
  912. * threshold and the packet is too big to copy, or (b) the packet should
  913. * be copied but there is no memory for the copy.
  914. */
  915. static inline struct sk_buff *get_packet(struct pci_dev *pdev,
  916. struct freelQ *fl, unsigned int len)
  917. {
  918. struct sk_buff *skb;
  919. const struct freelQ_ce *ce = &fl->centries[fl->cidx];
  920. if (len < copybreak) {
  921. skb = alloc_skb(len + 2, GFP_ATOMIC);
  922. if (!skb)
  923. goto use_orig_buf;
  924. skb_reserve(skb, 2); /* align IP header */
  925. skb_put(skb, len);
  926. pci_dma_sync_single_for_cpu(pdev,
  927. dma_unmap_addr(ce, dma_addr),
  928. dma_unmap_len(ce, dma_len),
  929. PCI_DMA_FROMDEVICE);
  930. skb_copy_from_linear_data(ce->skb, skb->data, len);
  931. pci_dma_sync_single_for_device(pdev,
  932. dma_unmap_addr(ce, dma_addr),
  933. dma_unmap_len(ce, dma_len),
  934. PCI_DMA_FROMDEVICE);
  935. recycle_fl_buf(fl, fl->cidx);
  936. return skb;
  937. }
  938. use_orig_buf:
  939. if (fl->credits < 2) {
  940. recycle_fl_buf(fl, fl->cidx);
  941. return NULL;
  942. }
  943. pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
  944. dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
  945. skb = ce->skb;
  946. prefetch(skb->data);
  947. skb_put(skb, len);
  948. return skb;
  949. }
  950. /**
  951. * unexpected_offload - handle an unexpected offload packet
  952. * @adapter: the adapter
  953. * @fl: the free list that received the packet
  954. *
  955. * Called when we receive an unexpected offload packet (e.g., the TOE
  956. * function is disabled or the card is a NIC). Prints a message and
  957. * recycles the buffer.
  958. */
  959. static void unexpected_offload(struct adapter *adapter, struct freelQ *fl)
  960. {
  961. struct freelQ_ce *ce = &fl->centries[fl->cidx];
  962. struct sk_buff *skb = ce->skb;
  963. pci_dma_sync_single_for_cpu(adapter->pdev, dma_unmap_addr(ce, dma_addr),
  964. dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
  965. pr_err("%s: unexpected offload packet, cmd %u\n",
  966. adapter->name, *skb->data);
  967. recycle_fl_buf(fl, fl->cidx);
  968. }
  969. /*
  970. * T1/T2 SGE limits the maximum DMA size per TX descriptor to
  971. * SGE_TX_DESC_MAX_PLEN (16KB). If the PAGE_SIZE is larger than 16KB, the
  972. * stack might send more than SGE_TX_DESC_MAX_PLEN in a contiguous manner.
  973. * Note that the *_large_page_tx_descs stuff will be optimized out when
  974. * PAGE_SIZE <= SGE_TX_DESC_MAX_PLEN.
  975. *
  976. * compute_large_page_descs() computes how many additional descriptors are
  977. * required to break down the stack's request.
  978. */
  979. static inline unsigned int compute_large_page_tx_descs(struct sk_buff *skb)
  980. {
  981. unsigned int count = 0;
  982. if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) {
  983. unsigned int nfrags = skb_shinfo(skb)->nr_frags;
  984. unsigned int i, len = skb_headlen(skb);
  985. while (len > SGE_TX_DESC_MAX_PLEN) {
  986. count++;
  987. len -= SGE_TX_DESC_MAX_PLEN;
  988. }
  989. for (i = 0; nfrags--; i++) {
  990. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  991. len = frag->size;
  992. while (len > SGE_TX_DESC_MAX_PLEN) {
  993. count++;
  994. len -= SGE_TX_DESC_MAX_PLEN;
  995. }
  996. }
  997. }
  998. return count;
  999. }
  1000. /*
  1001. * Write a cmdQ entry.
  1002. *
  1003. * Since this function writes the 'flags' field, it must not be used to
  1004. * write the first cmdQ entry.
  1005. */
  1006. static inline void write_tx_desc(struct cmdQ_e *e, dma_addr_t mapping,
  1007. unsigned int len, unsigned int gen,
  1008. unsigned int eop)
  1009. {
  1010. BUG_ON(len > SGE_TX_DESC_MAX_PLEN);
  1011. e->addr_lo = (u32)mapping;
  1012. e->addr_hi = (u64)mapping >> 32;
  1013. e->len_gen = V_CMD_LEN(len) | V_CMD_GEN1(gen);
  1014. e->flags = F_CMD_DATAVALID | V_CMD_EOP(eop) | V_CMD_GEN2(gen);
  1015. }
  1016. /*
  1017. * See comment for previous function.
  1018. *
  1019. * write_tx_descs_large_page() writes additional SGE tx descriptors if
  1020. * *desc_len exceeds HW's capability.
  1021. */
  1022. static inline unsigned int write_large_page_tx_descs(unsigned int pidx,
  1023. struct cmdQ_e **e,
  1024. struct cmdQ_ce **ce,
  1025. unsigned int *gen,
  1026. dma_addr_t *desc_mapping,
  1027. unsigned int *desc_len,
  1028. unsigned int nfrags,
  1029. struct cmdQ *q)
  1030. {
  1031. if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) {
  1032. struct cmdQ_e *e1 = *e;
  1033. struct cmdQ_ce *ce1 = *ce;
  1034. while (*desc_len > SGE_TX_DESC_MAX_PLEN) {
  1035. *desc_len -= SGE_TX_DESC_MAX_PLEN;
  1036. write_tx_desc(e1, *desc_mapping, SGE_TX_DESC_MAX_PLEN,
  1037. *gen, nfrags == 0 && *desc_len == 0);
  1038. ce1->skb = NULL;
  1039. dma_unmap_len_set(ce1, dma_len, 0);
  1040. *desc_mapping += SGE_TX_DESC_MAX_PLEN;
  1041. if (*desc_len) {
  1042. ce1++;
  1043. e1++;
  1044. if (++pidx == q->size) {
  1045. pidx = 0;
  1046. *gen ^= 1;
  1047. ce1 = q->centries;
  1048. e1 = q->entries;
  1049. }
  1050. }
  1051. }
  1052. *e = e1;
  1053. *ce = ce1;
  1054. }
  1055. return pidx;
  1056. }
  1057. /*
  1058. * Write the command descriptors to transmit the given skb starting at
  1059. * descriptor pidx with the given generation.
  1060. */
  1061. static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
  1062. unsigned int pidx, unsigned int gen,
  1063. struct cmdQ *q)
  1064. {
  1065. dma_addr_t mapping, desc_mapping;
  1066. struct cmdQ_e *e, *e1;
  1067. struct cmdQ_ce *ce;
  1068. unsigned int i, flags, first_desc_len, desc_len,
  1069. nfrags = skb_shinfo(skb)->nr_frags;
  1070. e = e1 = &q->entries[pidx];
  1071. ce = &q->centries[pidx];
  1072. mapping = pci_map_single(adapter->pdev, skb->data,
  1073. skb_headlen(skb), PCI_DMA_TODEVICE);
  1074. desc_mapping = mapping;
  1075. desc_len = skb_headlen(skb);
  1076. flags = F_CMD_DATAVALID | F_CMD_SOP |
  1077. V_CMD_EOP(nfrags == 0 && desc_len <= SGE_TX_DESC_MAX_PLEN) |
  1078. V_CMD_GEN2(gen);
  1079. first_desc_len = (desc_len <= SGE_TX_DESC_MAX_PLEN) ?
  1080. desc_len : SGE_TX_DESC_MAX_PLEN;
  1081. e->addr_lo = (u32)desc_mapping;
  1082. e->addr_hi = (u64)desc_mapping >> 32;
  1083. e->len_gen = V_CMD_LEN(first_desc_len) | V_CMD_GEN1(gen);
  1084. ce->skb = NULL;
  1085. dma_unmap_len_set(ce, dma_len, 0);
  1086. if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN &&
  1087. desc_len > SGE_TX_DESC_MAX_PLEN) {
  1088. desc_mapping += first_desc_len;
  1089. desc_len -= first_desc_len;
  1090. e1++;
  1091. ce++;
  1092. if (++pidx == q->size) {
  1093. pidx = 0;
  1094. gen ^= 1;
  1095. e1 = q->entries;
  1096. ce = q->centries;
  1097. }
  1098. pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen,
  1099. &desc_mapping, &desc_len,
  1100. nfrags, q);
  1101. if (likely(desc_len))
  1102. write_tx_desc(e1, desc_mapping, desc_len, gen,
  1103. nfrags == 0);
  1104. }
  1105. ce->skb = NULL;
  1106. dma_unmap_addr_set(ce, dma_addr, mapping);
  1107. dma_unmap_len_set(ce, dma_len, skb_headlen(skb));
  1108. for (i = 0; nfrags--; i++) {
  1109. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  1110. e1++;
  1111. ce++;
  1112. if (++pidx == q->size) {
  1113. pidx = 0;
  1114. gen ^= 1;
  1115. e1 = q->entries;
  1116. ce = q->centries;
  1117. }
  1118. mapping = pci_map_page(adapter->pdev, frag->page,
  1119. frag->page_offset, frag->size,
  1120. PCI_DMA_TODEVICE);
  1121. desc_mapping = mapping;
  1122. desc_len = frag->size;
  1123. pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen,
  1124. &desc_mapping, &desc_len,
  1125. nfrags, q);
  1126. if (likely(desc_len))
  1127. write_tx_desc(e1, desc_mapping, desc_len, gen,
  1128. nfrags == 0);
  1129. ce->skb = NULL;
  1130. dma_unmap_addr_set(ce, dma_addr, mapping);
  1131. dma_unmap_len_set(ce, dma_len, frag->size);
  1132. }
  1133. ce->skb = skb;
  1134. wmb();
  1135. e->flags = flags;
  1136. }
  1137. /*
  1138. * Clean up completed Tx buffers.
  1139. */
  1140. static inline void reclaim_completed_tx(struct sge *sge, struct cmdQ *q)
  1141. {
  1142. unsigned int reclaim = q->processed - q->cleaned;
  1143. if (reclaim) {
  1144. pr_debug("reclaim_completed_tx processed:%d cleaned:%d\n",
  1145. q->processed, q->cleaned);
  1146. free_cmdQ_buffers(sge, q, reclaim);
  1147. q->cleaned += reclaim;
  1148. }
  1149. }
  1150. /*
  1151. * Called from tasklet. Checks the scheduler for any
  1152. * pending skbs that can be sent.
  1153. */
  1154. static void restart_sched(unsigned long arg)
  1155. {
  1156. struct sge *sge = (struct sge *) arg;
  1157. struct adapter *adapter = sge->adapter;
  1158. struct cmdQ *q = &sge->cmdQ[0];
  1159. struct sk_buff *skb;
  1160. unsigned int credits, queued_skb = 0;
  1161. spin_lock(&q->lock);
  1162. reclaim_completed_tx(sge, q);
  1163. credits = q->size - q->in_use;
  1164. pr_debug("restart_sched credits=%d\n", credits);
  1165. while ((skb = sched_skb(sge, NULL, credits)) != NULL) {
  1166. unsigned int genbit, pidx, count;
  1167. count = 1 + skb_shinfo(skb)->nr_frags;
  1168. count += compute_large_page_tx_descs(skb);
  1169. q->in_use += count;
  1170. genbit = q->genbit;
  1171. pidx = q->pidx;
  1172. q->pidx += count;
  1173. if (q->pidx >= q->size) {
  1174. q->pidx -= q->size;
  1175. q->genbit ^= 1;
  1176. }
  1177. write_tx_descs(adapter, skb, pidx, genbit, q);
  1178. credits = q->size - q->in_use;
  1179. queued_skb = 1;
  1180. }
  1181. if (queued_skb) {
  1182. clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
  1183. if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
  1184. set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
  1185. writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
  1186. }
  1187. }
  1188. spin_unlock(&q->lock);
  1189. }
  1190. /**
  1191. * sge_rx - process an ingress ethernet packet
  1192. * @sge: the sge structure
  1193. * @fl: the free list that contains the packet buffer
  1194. * @len: the packet length
  1195. *
  1196. * Process an ingress ethernet pakcet and deliver it to the stack.
  1197. */
  1198. static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
  1199. {
  1200. struct sk_buff *skb;
  1201. const struct cpl_rx_pkt *p;
  1202. struct adapter *adapter = sge->adapter;
  1203. struct sge_port_stats *st;
  1204. struct net_device *dev;
  1205. skb = get_packet(adapter->pdev, fl, len - sge->rx_pkt_pad);
  1206. if (unlikely(!skb)) {
  1207. sge->stats.rx_drops++;
  1208. return;
  1209. }
  1210. p = (const struct cpl_rx_pkt *) skb->data;
  1211. if (p->iff >= adapter->params.nports) {
  1212. kfree_skb(skb);
  1213. return;
  1214. }
  1215. __skb_pull(skb, sizeof(*p));
  1216. st = this_cpu_ptr(sge->port_stats[p->iff]);
  1217. dev = adapter->port[p->iff].dev;
  1218. skb->protocol = eth_type_trans(skb, dev);
  1219. if ((dev->features & NETIF_F_RXCSUM) && p->csum == 0xffff &&
  1220. skb->protocol == htons(ETH_P_IP) &&
  1221. (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) {
  1222. ++st->rx_cso_good;
  1223. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1224. } else
  1225. skb_checksum_none_assert(skb);
  1226. if (unlikely(adapter->vlan_grp && p->vlan_valid)) {
  1227. st->vlan_xtract++;
  1228. vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
  1229. ntohs(p->vlan));
  1230. } else
  1231. netif_receive_skb(skb);
  1232. }
  1233. /*
  1234. * Returns true if a command queue has enough available descriptors that
  1235. * we can resume Tx operation after temporarily disabling its packet queue.
  1236. */
  1237. static inline int enough_free_Tx_descs(const struct cmdQ *q)
  1238. {
  1239. unsigned int r = q->processed - q->cleaned;
  1240. return q->in_use - r < (q->size >> 1);
  1241. }
  1242. /*
  1243. * Called when sufficient space has become available in the SGE command queues
  1244. * after the Tx packet schedulers have been suspended to restart the Tx path.
  1245. */
  1246. static void restart_tx_queues(struct sge *sge)
  1247. {
  1248. struct adapter *adap = sge->adapter;
  1249. int i;
  1250. if (!enough_free_Tx_descs(&sge->cmdQ[0]))
  1251. return;
  1252. for_each_port(adap, i) {
  1253. struct net_device *nd = adap->port[i].dev;
  1254. if (test_and_clear_bit(nd->if_port, &sge->stopped_tx_queues) &&
  1255. netif_running(nd)) {
  1256. sge->stats.cmdQ_restarted[2]++;
  1257. netif_wake_queue(nd);
  1258. }
  1259. }
  1260. }
  1261. /*
  1262. * update_tx_info is called from the interrupt handler/NAPI to return cmdQ0
  1263. * information.
  1264. */
  1265. static unsigned int update_tx_info(struct adapter *adapter,
  1266. unsigned int flags,
  1267. unsigned int pr0)
  1268. {
  1269. struct sge *sge = adapter->sge;
  1270. struct cmdQ *cmdq = &sge->cmdQ[0];
  1271. cmdq->processed += pr0;
  1272. if (flags & (F_FL0_ENABLE | F_FL1_ENABLE)) {
  1273. freelQs_empty(sge);
  1274. flags &= ~(F_FL0_ENABLE | F_FL1_ENABLE);
  1275. }
  1276. if (flags & F_CMDQ0_ENABLE) {
  1277. clear_bit(CMDQ_STAT_RUNNING, &cmdq->status);
  1278. if (cmdq->cleaned + cmdq->in_use != cmdq->processed &&
  1279. !test_and_set_bit(CMDQ_STAT_LAST_PKT_DB, &cmdq->status)) {
  1280. set_bit(CMDQ_STAT_RUNNING, &cmdq->status);
  1281. writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
  1282. }
  1283. if (sge->tx_sched)
  1284. tasklet_hi_schedule(&sge->tx_sched->sched_tsk);
  1285. flags &= ~F_CMDQ0_ENABLE;
  1286. }
  1287. if (unlikely(sge->stopped_tx_queues != 0))
  1288. restart_tx_queues(sge);
  1289. return flags;
  1290. }
  1291. /*
  1292. * Process SGE responses, up to the supplied budget. Returns the number of
  1293. * responses processed. A negative budget is effectively unlimited.
  1294. */
  1295. static int process_responses(struct adapter *adapter, int budget)
  1296. {
  1297. struct sge *sge = adapter->sge;
  1298. struct respQ *q = &sge->respQ;
  1299. struct respQ_e *e = &q->entries[q->cidx];
  1300. int done = 0;
  1301. unsigned int flags = 0;
  1302. unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
  1303. while (done < budget && e->GenerationBit == q->genbit) {
  1304. flags |= e->Qsleeping;
  1305. cmdq_processed[0] += e->Cmdq0CreditReturn;
  1306. cmdq_processed[1] += e->Cmdq1CreditReturn;
  1307. /* We batch updates to the TX side to avoid cacheline
  1308. * ping-pong of TX state information on MP where the sender
  1309. * might run on a different CPU than this function...
  1310. */
  1311. if (unlikely((flags & F_CMDQ0_ENABLE) || cmdq_processed[0] > 64)) {
  1312. flags = update_tx_info(adapter, flags, cmdq_processed[0]);
  1313. cmdq_processed[0] = 0;
  1314. }
  1315. if (unlikely(cmdq_processed[1] > 16)) {
  1316. sge->cmdQ[1].processed += cmdq_processed[1];
  1317. cmdq_processed[1] = 0;
  1318. }
  1319. if (likely(e->DataValid)) {
  1320. struct freelQ *fl = &sge->freelQ[e->FreelistQid];
  1321. BUG_ON(!e->Sop || !e->Eop);
  1322. if (unlikely(e->Offload))
  1323. unexpected_offload(adapter, fl);
  1324. else
  1325. sge_rx(sge, fl, e->BufferLength);
  1326. ++done;
  1327. /*
  1328. * Note: this depends on each packet consuming a
  1329. * single free-list buffer; cf. the BUG above.
  1330. */
  1331. if (++fl->cidx == fl->size)
  1332. fl->cidx = 0;
  1333. prefetch(fl->centries[fl->cidx].skb);
  1334. if (unlikely(--fl->credits <
  1335. fl->size - SGE_FREEL_REFILL_THRESH))
  1336. refill_free_list(sge, fl);
  1337. } else
  1338. sge->stats.pure_rsps++;
  1339. e++;
  1340. if (unlikely(++q->cidx == q->size)) {
  1341. q->cidx = 0;
  1342. q->genbit ^= 1;
  1343. e = q->entries;
  1344. }
  1345. prefetch(e);
  1346. if (++q->credits > SGE_RESPQ_REPLENISH_THRES) {
  1347. writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);
  1348. q->credits = 0;
  1349. }
  1350. }
  1351. flags = update_tx_info(adapter, flags, cmdq_processed[0]);
  1352. sge->cmdQ[1].processed += cmdq_processed[1];
  1353. return done;
  1354. }
  1355. static inline int responses_pending(const struct adapter *adapter)
  1356. {
  1357. const struct respQ *Q = &adapter->sge->respQ;
  1358. const struct respQ_e *e = &Q->entries[Q->cidx];
  1359. return e->GenerationBit == Q->genbit;
  1360. }
  1361. /*
  1362. * A simpler version of process_responses() that handles only pure (i.e.,
  1363. * non data-carrying) responses. Such respones are too light-weight to justify
  1364. * calling a softirq when using NAPI, so we handle them specially in hard
  1365. * interrupt context. The function is called with a pointer to a response,
  1366. * which the caller must ensure is a valid pure response. Returns 1 if it
  1367. * encounters a valid data-carrying response, 0 otherwise.
  1368. */
  1369. static int process_pure_responses(struct adapter *adapter)
  1370. {
  1371. struct sge *sge = adapter->sge;
  1372. struct respQ *q = &sge->respQ;
  1373. struct respQ_e *e = &q->entries[q->cidx];
  1374. const struct freelQ *fl = &sge->freelQ[e->FreelistQid];
  1375. unsigned int flags = 0;
  1376. unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
  1377. prefetch(fl->centries[fl->cidx].skb);
  1378. if (e->DataValid)
  1379. return 1;
  1380. do {
  1381. flags |= e->Qsleeping;
  1382. cmdq_processed[0] += e->Cmdq0CreditReturn;
  1383. cmdq_processed[1] += e->Cmdq1CreditReturn;
  1384. e++;
  1385. if (unlikely(++q->cidx == q->size)) {
  1386. q->cidx = 0;
  1387. q->genbit ^= 1;
  1388. e = q->entries;
  1389. }
  1390. prefetch(e);
  1391. if (++q->credits > SGE_RESPQ_REPLENISH_THRES) {
  1392. writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);
  1393. q->credits = 0;
  1394. }
  1395. sge->stats.pure_rsps++;
  1396. } while (e->GenerationBit == q->genbit && !e->DataValid);
  1397. flags = update_tx_info(adapter, flags, cmdq_processed[0]);
  1398. sge->cmdQ[1].processed += cmdq_processed[1];
  1399. return e->GenerationBit == q->genbit;
  1400. }
  1401. /*
  1402. * Handler for new data events when using NAPI. This does not need any locking
  1403. * or protection from interrupts as data interrupts are off at this point and
  1404. * other adapter interrupts do not interfere.
  1405. */
  1406. int t1_poll(struct napi_struct *napi, int budget)
  1407. {
  1408. struct adapter *adapter = container_of(napi, struct adapter, napi);
  1409. int work_done = process_responses(adapter, budget);
  1410. if (likely(work_done < budget)) {
  1411. napi_complete(napi);
  1412. writel(adapter->sge->respQ.cidx,
  1413. adapter->regs + A_SG_SLEEPING);
  1414. }
  1415. return work_done;
  1416. }
  1417. irqreturn_t t1_interrupt(int irq, void *data)
  1418. {
  1419. struct adapter *adapter = data;
  1420. struct sge *sge = adapter->sge;
  1421. int handled;
  1422. if (likely(responses_pending(adapter))) {
  1423. writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
  1424. if (napi_schedule_prep(&adapter->napi)) {
  1425. if (process_pure_responses(adapter))
  1426. __napi_schedule(&adapter->napi);
  1427. else {
  1428. /* no data, no NAPI needed */
  1429. writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING);
  1430. /* undo schedule_prep */
  1431. napi_enable(&adapter->napi);
  1432. }
  1433. }
  1434. return IRQ_HANDLED;
  1435. }
  1436. spin_lock(&adapter->async_lock);
  1437. handled = t1_slow_intr_handler(adapter);
  1438. spin_unlock(&adapter->async_lock);
  1439. if (!handled)
  1440. sge->stats.unhandled_irqs++;
  1441. return IRQ_RETVAL(handled != 0);
  1442. }
  1443. /*
  1444. * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it.
  1445. *
  1446. * The code figures out how many entries the sk_buff will require in the
  1447. * cmdQ and updates the cmdQ data structure with the state once the enqueue
  1448. * has complete. Then, it doesn't access the global structure anymore, but
  1449. * uses the corresponding fields on the stack. In conjunction with a spinlock
  1450. * around that code, we can make the function reentrant without holding the
  1451. * lock when we actually enqueue (which might be expensive, especially on
  1452. * architectures with IO MMUs).
  1453. *
  1454. * This runs with softirqs disabled.
  1455. */
  1456. static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
  1457. unsigned int qid, struct net_device *dev)
  1458. {
  1459. struct sge *sge = adapter->sge;
  1460. struct cmdQ *q = &sge->cmdQ[qid];
  1461. unsigned int credits, pidx, genbit, count, use_sched_skb = 0;
  1462. if (!spin_trylock(&q->lock))
  1463. return NETDEV_TX_LOCKED;
  1464. reclaim_completed_tx(sge, q);
  1465. pidx = q->pidx;
  1466. credits = q->size - q->in_use;
  1467. count = 1 + skb_shinfo(skb)->nr_frags;
  1468. count += compute_large_page_tx_descs(skb);
  1469. /* Ethernet packet */
  1470. if (unlikely(credits < count)) {
  1471. if (!netif_queue_stopped(dev)) {
  1472. netif_stop_queue(dev);
  1473. set_bit(dev->if_port, &sge->stopped_tx_queues);
  1474. sge->stats.cmdQ_full[2]++;
  1475. pr_err("%s: Tx ring full while queue awake!\n",
  1476. adapter->name);
  1477. }
  1478. spin_unlock(&q->lock);
  1479. return NETDEV_TX_BUSY;
  1480. }
  1481. if (unlikely(credits - count < q->stop_thres)) {
  1482. netif_stop_queue(dev);
  1483. set_bit(dev->if_port, &sge->stopped_tx_queues);
  1484. sge->stats.cmdQ_full[2]++;
  1485. }
  1486. /* T204 cmdQ0 skbs that are destined for a certain port have to go
  1487. * through the scheduler.
  1488. */
  1489. if (sge->tx_sched && !qid && skb->dev) {
  1490. use_sched:
  1491. use_sched_skb = 1;
  1492. /* Note that the scheduler might return a different skb than
  1493. * the one passed in.
  1494. */
  1495. skb = sched_skb(sge, skb, credits);
  1496. if (!skb) {
  1497. spin_unlock(&q->lock);
  1498. return NETDEV_TX_OK;
  1499. }
  1500. pidx = q->pidx;
  1501. count = 1 + skb_shinfo(skb)->nr_frags;
  1502. count += compute_large_page_tx_descs(skb);
  1503. }
  1504. q->in_use += count;
  1505. genbit = q->genbit;
  1506. pidx = q->pidx;
  1507. q->pidx += count;
  1508. if (q->pidx >= q->size) {
  1509. q->pidx -= q->size;
  1510. q->genbit ^= 1;
  1511. }
  1512. spin_unlock(&q->lock);
  1513. write_tx_descs(adapter, skb, pidx, genbit, q);
  1514. /*
  1515. * We always ring the doorbell for cmdQ1. For cmdQ0, we only ring
  1516. * the doorbell if the Q is asleep. There is a natural race, where
  1517. * the hardware is going to sleep just after we checked, however,
  1518. * then the interrupt handler will detect the outstanding TX packet
  1519. * and ring the doorbell for us.
  1520. */
  1521. if (qid)
  1522. doorbell_pio(adapter, F_CMDQ1_ENABLE);
  1523. else {
  1524. clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
  1525. if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
  1526. set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
  1527. writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
  1528. }
  1529. }
  1530. if (use_sched_skb) {
  1531. if (spin_trylock(&q->lock)) {
  1532. credits = q->size - q->in_use;
  1533. skb = NULL;
  1534. goto use_sched;
  1535. }
  1536. }
  1537. return NETDEV_TX_OK;
  1538. }
  1539. #define MK_ETH_TYPE_MSS(type, mss) (((mss) & 0x3FFF) | ((type) << 14))
  1540. /*
  1541. * eth_hdr_len - return the length of an Ethernet header
  1542. * @data: pointer to the start of the Ethernet header
  1543. *
  1544. * Returns the length of an Ethernet header, including optional VLAN tag.
  1545. */
  1546. static inline int eth_hdr_len(const void *data)
  1547. {
  1548. const struct ethhdr *e = data;
  1549. return e->h_proto == htons(ETH_P_8021Q) ? VLAN_ETH_HLEN : ETH_HLEN;
  1550. }
  1551. /*
  1552. * Adds the CPL header to the sk_buff and passes it to t1_sge_tx.
  1553. */
  1554. netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
  1555. {
  1556. struct adapter *adapter = dev->ml_priv;
  1557. struct sge *sge = adapter->sge;
  1558. struct sge_port_stats *st = this_cpu_ptr(sge->port_stats[dev->if_port]);
  1559. struct cpl_tx_pkt *cpl;
  1560. struct sk_buff *orig_skb = skb;
  1561. int ret;
  1562. if (skb->protocol == htons(ETH_P_CPL5))
  1563. goto send;
  1564. /*
  1565. * We are using a non-standard hard_header_len.
  1566. * Allocate more header room in the rare cases it is not big enough.
  1567. */
  1568. if (unlikely(skb_headroom(skb) < dev->hard_header_len - ETH_HLEN)) {
  1569. skb = skb_realloc_headroom(skb, sizeof(struct cpl_tx_pkt_lso));
  1570. ++st->tx_need_hdrroom;
  1571. dev_kfree_skb_any(orig_skb);
  1572. if (!skb)
  1573. return NETDEV_TX_OK;
  1574. }
  1575. if (skb_shinfo(skb)->gso_size) {
  1576. int eth_type;
  1577. struct cpl_tx_pkt_lso *hdr;
  1578. ++st->tx_tso;
  1579. eth_type = skb_network_offset(skb) == ETH_HLEN ?
  1580. CPL_ETH_II : CPL_ETH_II_VLAN;
  1581. hdr = (struct cpl_tx_pkt_lso *)skb_push(skb, sizeof(*hdr));
  1582. hdr->opcode = CPL_TX_PKT_LSO;
  1583. hdr->ip_csum_dis = hdr->l4_csum_dis = 0;
  1584. hdr->ip_hdr_words = ip_hdr(skb)->ihl;
  1585. hdr->tcp_hdr_words = tcp_hdr(skb)->doff;
  1586. hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type,
  1587. skb_shinfo(skb)->gso_size));
  1588. hdr->len = htonl(skb->len - sizeof(*hdr));
  1589. cpl = (struct cpl_tx_pkt *)hdr;
  1590. } else {
  1591. /*
  1592. * Packets shorter than ETH_HLEN can break the MAC, drop them
  1593. * early. Also, we may get oversized packets because some
  1594. * parts of the kernel don't handle our unusual hard_header_len
  1595. * right, drop those too.
  1596. */
  1597. if (unlikely(skb->len < ETH_HLEN ||
  1598. skb->len > dev->mtu + eth_hdr_len(skb->data))) {
  1599. pr_debug("%s: packet size %d hdr %d mtu%d\n", dev->name,
  1600. skb->len, eth_hdr_len(skb->data), dev->mtu);
  1601. dev_kfree_skb_any(skb);
  1602. return NETDEV_TX_OK;
  1603. }
  1604. if (skb->ip_summed == CHECKSUM_PARTIAL &&
  1605. ip_hdr(skb)->protocol == IPPROTO_UDP) {
  1606. if (unlikely(skb_checksum_help(skb))) {
  1607. pr_debug("%s: unable to do udp checksum\n", dev->name);
  1608. dev_kfree_skb_any(skb);
  1609. return NETDEV_TX_OK;
  1610. }
  1611. }
  1612. /* Hmmm, assuming to catch the gratious arp... and we'll use
  1613. * it to flush out stuck espi packets...
  1614. */
  1615. if ((unlikely(!adapter->sge->espibug_skb[dev->if_port]))) {
  1616. if (skb->protocol == htons(ETH_P_ARP) &&
  1617. arp_hdr(skb)->ar_op == htons(ARPOP_REQUEST)) {
  1618. adapter->sge->espibug_skb[dev->if_port] = skb;
  1619. /* We want to re-use this skb later. We
  1620. * simply bump the reference count and it
  1621. * will not be freed...
  1622. */
  1623. skb = skb_get(skb);
  1624. }
  1625. }
  1626. cpl = (struct cpl_tx_pkt *)__skb_push(skb, sizeof(*cpl));
  1627. cpl->opcode = CPL_TX_PKT;
  1628. cpl->ip_csum_dis = 1; /* SW calculates IP csum */
  1629. cpl->l4_csum_dis = skb->ip_summed == CHECKSUM_PARTIAL ? 0 : 1;
  1630. /* the length field isn't used so don't bother setting it */
  1631. st->tx_cso += (skb->ip_summed == CHECKSUM_PARTIAL);
  1632. }
  1633. cpl->iff = dev->if_port;
  1634. #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
  1635. if (vlan_tx_tag_present(skb)) {
  1636. cpl->vlan_valid = 1;
  1637. cpl->vlan = htons(vlan_tx_tag_get(skb));
  1638. st->vlan_insert++;
  1639. } else
  1640. #endif
  1641. cpl->vlan_valid = 0;
  1642. send:
  1643. ret = t1_sge_tx(skb, adapter, 0, dev);
  1644. /* If transmit busy, and we reallocated skb's due to headroom limit,
  1645. * then silently discard to avoid leak.
  1646. */
  1647. if (unlikely(ret != NETDEV_TX_OK && skb != orig_skb)) {
  1648. dev_kfree_skb_any(skb);
  1649. ret = NETDEV_TX_OK;
  1650. }
  1651. return ret;
  1652. }
  1653. /*
  1654. * Callback for the Tx buffer reclaim timer. Runs with softirqs disabled.
  1655. */
  1656. static void sge_tx_reclaim_cb(unsigned long data)
  1657. {
  1658. int i;
  1659. struct sge *sge = (struct sge *)data;
  1660. for (i = 0; i < SGE_CMDQ_N; ++i) {
  1661. struct cmdQ *q = &sge->cmdQ[i];
  1662. if (!spin_trylock(&q->lock))
  1663. continue;
  1664. reclaim_completed_tx(sge, q);
  1665. if (i == 0 && q->in_use) { /* flush pending credits */
  1666. writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL);
  1667. }
  1668. spin_unlock(&q->lock);
  1669. }
  1670. mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
  1671. }
  1672. /*
  1673. * Propagate changes of the SGE coalescing parameters to the HW.
  1674. */
  1675. int t1_sge_set_coalesce_params(struct sge *sge, struct sge_params *p)
  1676. {
  1677. sge->fixed_intrtimer = p->rx_coalesce_usecs *
  1678. core_ticks_per_usec(sge->adapter);
  1679. writel(sge->fixed_intrtimer, sge->adapter->regs + A_SG_INTRTIMER);
  1680. return 0;
  1681. }
  1682. /*
  1683. * Allocates both RX and TX resources and configures the SGE. However,
  1684. * the hardware is not enabled yet.
  1685. */
  1686. int t1_sge_configure(struct sge *sge, struct sge_params *p)
  1687. {
  1688. if (alloc_rx_resources(sge, p))
  1689. return -ENOMEM;
  1690. if (alloc_tx_resources(sge, p)) {
  1691. free_rx_resources(sge);
  1692. return -ENOMEM;
  1693. }
  1694. configure_sge(sge, p);
  1695. /*
  1696. * Now that we have sized the free lists calculate the payload
  1697. * capacity of the large buffers. Other parts of the driver use
  1698. * this to set the max offload coalescing size so that RX packets
  1699. * do not overflow our large buffers.
  1700. */
  1701. p->large_buf_capacity = jumbo_payload_capacity(sge);
  1702. return 0;
  1703. }
  1704. /*
  1705. * Disables the DMA engine.
  1706. */
  1707. void t1_sge_stop(struct sge *sge)
  1708. {
  1709. int i;
  1710. writel(0, sge->adapter->regs + A_SG_CONTROL);
  1711. readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
  1712. if (is_T2(sge->adapter))
  1713. del_timer_sync(&sge->espibug_timer);
  1714. del_timer_sync(&sge->tx_reclaim_timer);
  1715. if (sge->tx_sched)
  1716. tx_sched_stop(sge);
  1717. for (i = 0; i < MAX_NPORTS; i++)
  1718. kfree_skb(sge->espibug_skb[i]);
  1719. }
  1720. /*
  1721. * Enables the DMA engine.
  1722. */
  1723. void t1_sge_start(struct sge *sge)
  1724. {
  1725. refill_free_list(sge, &sge->freelQ[0]);
  1726. refill_free_list(sge, &sge->freelQ[1]);
  1727. writel(sge->sge_control, sge->adapter->regs + A_SG_CONTROL);
  1728. doorbell_pio(sge->adapter, F_FL0_ENABLE | F_FL1_ENABLE);
  1729. readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
  1730. mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
  1731. if (is_T2(sge->adapter))
  1732. mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
  1733. }
  1734. /*
  1735. * Callback for the T2 ESPI 'stuck packet feature' workaorund
  1736. */
  1737. static void espibug_workaround_t204(unsigned long data)
  1738. {
  1739. struct adapter *adapter = (struct adapter *)data;
  1740. struct sge *sge = adapter->sge;
  1741. unsigned int nports = adapter->params.nports;
  1742. u32 seop[MAX_NPORTS];
  1743. if (adapter->open_device_map & PORT_MASK) {
  1744. int i;
  1745. if (t1_espi_get_mon_t204(adapter, &(seop[0]), 0) < 0)
  1746. return;
  1747. for (i = 0; i < nports; i++) {
  1748. struct sk_buff *skb = sge->espibug_skb[i];
  1749. if (!netif_running(adapter->port[i].dev) ||
  1750. netif_queue_stopped(adapter->port[i].dev) ||
  1751. !seop[i] || ((seop[i] & 0xfff) != 0) || !skb)
  1752. continue;
  1753. if (!skb->cb[0]) {
  1754. skb_copy_to_linear_data_offset(skb,
  1755. sizeof(struct cpl_tx_pkt),
  1756. ch_mac_addr,
  1757. ETH_ALEN);
  1758. skb_copy_to_linear_data_offset(skb,
  1759. skb->len - 10,
  1760. ch_mac_addr,
  1761. ETH_ALEN);
  1762. skb->cb[0] = 0xff;
  1763. }
  1764. /* bump the reference count to avoid freeing of
  1765. * the skb once the DMA has completed.
  1766. */
  1767. skb = skb_get(skb);
  1768. t1_sge_tx(skb, adapter, 0, adapter->port[i].dev);
  1769. }
  1770. }
  1771. mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
  1772. }
  1773. static void espibug_workaround(unsigned long data)
  1774. {
  1775. struct adapter *adapter = (struct adapter *)data;
  1776. struct sge *sge = adapter->sge;
  1777. if (netif_running(adapter->port[0].dev)) {
  1778. struct sk_buff *skb = sge->espibug_skb[0];
  1779. u32 seop = t1_espi_get_mon(adapter, 0x930, 0);
  1780. if ((seop & 0xfff0fff) == 0xfff && skb) {
  1781. if (!skb->cb[0]) {
  1782. skb_copy_to_linear_data_offset(skb,
  1783. sizeof(struct cpl_tx_pkt),
  1784. ch_mac_addr,
  1785. ETH_ALEN);
  1786. skb_copy_to_linear_data_offset(skb,
  1787. skb->len - 10,
  1788. ch_mac_addr,
  1789. ETH_ALEN);
  1790. skb->cb[0] = 0xff;
  1791. }
  1792. /* bump the reference count to avoid freeing of the
  1793. * skb once the DMA has completed.
  1794. */
  1795. skb = skb_get(skb);
  1796. t1_sge_tx(skb, adapter, 0, adapter->port[0].dev);
  1797. }
  1798. }
  1799. mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
  1800. }
  1801. /*
  1802. * Creates a t1_sge structure and returns suggested resource parameters.
  1803. */
  1804. struct sge * __devinit t1_sge_create(struct adapter *adapter,
  1805. struct sge_params *p)
  1806. {
  1807. struct sge *sge = kzalloc(sizeof(*sge), GFP_KERNEL);
  1808. int i;
  1809. if (!sge)
  1810. return NULL;
  1811. sge->adapter = adapter;
  1812. sge->netdev = adapter->port[0].dev;
  1813. sge->rx_pkt_pad = t1_is_T1B(adapter) ? 0 : 2;
  1814. sge->jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
  1815. for_each_port(adapter, i) {
  1816. sge->port_stats[i] = alloc_percpu(struct sge_port_stats);
  1817. if (!sge->port_stats[i])
  1818. goto nomem_port;
  1819. }
  1820. init_timer(&sge->tx_reclaim_timer);
  1821. sge->tx_reclaim_timer.data = (unsigned long)sge;
  1822. sge->tx_reclaim_timer.function = sge_tx_reclaim_cb;
  1823. if (is_T2(sge->adapter)) {
  1824. init_timer(&sge->espibug_timer);
  1825. if (adapter->params.nports > 1) {
  1826. tx_sched_init(sge);
  1827. sge->espibug_timer.function = espibug_workaround_t204;
  1828. } else
  1829. sge->espibug_timer.function = espibug_workaround;
  1830. sge->espibug_timer.data = (unsigned long)sge->adapter;
  1831. sge->espibug_timeout = 1;
  1832. /* for T204, every 10ms */
  1833. if (adapter->params.nports > 1)
  1834. sge->espibug_timeout = HZ/100;
  1835. }
  1836. p->cmdQ_size[0] = SGE_CMDQ0_E_N;
  1837. p->cmdQ_size[1] = SGE_CMDQ1_E_N;
  1838. p->freelQ_size[!sge->jumbo_fl] = SGE_FREEL_SIZE;
  1839. p->freelQ_size[sge->jumbo_fl] = SGE_JUMBO_FREEL_SIZE;
  1840. if (sge->tx_sched) {
  1841. if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204)
  1842. p->rx_coalesce_usecs = 15;
  1843. else
  1844. p->rx_coalesce_usecs = 50;
  1845. } else
  1846. p->rx_coalesce_usecs = 50;
  1847. p->coalesce_enable = 0;
  1848. p->sample_interval_usecs = 0;
  1849. return sge;
  1850. nomem_port:
  1851. while (i >= 0) {
  1852. free_percpu(sge->port_stats[i]);
  1853. --i;
  1854. }
  1855. kfree(sge);
  1856. return NULL;
  1857. }