sge.c 58 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140
  1. /*****************************************************************************
  2. * *
  3. * File: sge.c *
  4. * $Revision: 1.26 $ *
  5. * $Date: 2005/06/21 18:29:48 $ *
  6. * Description: *
  7. * DMA engine. *
  8. * part of the Chelsio 10Gb Ethernet Driver. *
  9. * *
  10. * This program is free software; you can redistribute it and/or modify *
  11. * it under the terms of the GNU General Public License, version 2, as *
  12. * published by the Free Software Foundation. *
  13. * *
  14. * You should have received a copy of the GNU General Public License along *
  15. * with this program; if not, write to the Free Software Foundation, Inc., *
  16. * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
  17. * *
  18. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
  19. * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
  20. * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
  21. * *
  22. * http://www.chelsio.com *
  23. * *
  24. * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
  25. * All rights reserved. *
  26. * *
  27. * Maintainers: maintainers@chelsio.com *
  28. * *
  29. * Authors: Dimitrios Michailidis <dm@chelsio.com> *
  30. * Tina Yang <tainay@chelsio.com> *
  31. * Felix Marti <felix@chelsio.com> *
  32. * Scott Bardone <sbardone@chelsio.com> *
  33. * Kurt Ottaway <kottaway@chelsio.com> *
  34. * Frank DiMambro <frank@chelsio.com> *
  35. * *
  36. * History: *
  37. * *
  38. ****************************************************************************/
  39. #include "common.h"
  40. #include <linux/types.h>
  41. #include <linux/errno.h>
  42. #include <linux/pci.h>
  43. #include <linux/ktime.h>
  44. #include <linux/netdevice.h>
  45. #include <linux/etherdevice.h>
  46. #include <linux/if_vlan.h>
  47. #include <linux/skbuff.h>
  48. #include <linux/init.h>
  49. #include <linux/mm.h>
  50. #include <linux/tcp.h>
  51. #include <linux/ip.h>
  52. #include <linux/in.h>
  53. #include <linux/if_arp.h>
  54. #include <linux/slab.h>
  55. #include <linux/prefetch.h>
  56. #include "cpl5_cmd.h"
  57. #include "sge.h"
  58. #include "regs.h"
  59. #include "espi.h"
  60. /* This belongs in if_ether.h */
  61. #define ETH_P_CPL5 0xf
  62. #define SGE_CMDQ_N 2
  63. #define SGE_FREELQ_N 2
  64. #define SGE_CMDQ0_E_N 1024
  65. #define SGE_CMDQ1_E_N 128
  66. #define SGE_FREEL_SIZE 4096
  67. #define SGE_JUMBO_FREEL_SIZE 512
  68. #define SGE_FREEL_REFILL_THRESH 16
  69. #define SGE_RESPQ_E_N 1024
  70. #define SGE_INTRTIMER_NRES 1000
  71. #define SGE_RX_SM_BUF_SIZE 1536
  72. #define SGE_TX_DESC_MAX_PLEN 16384
  73. #define SGE_RESPQ_REPLENISH_THRES (SGE_RESPQ_E_N / 4)
  74. /*
  75. * Period of the TX buffer reclaim timer. This timer does not need to run
  76. * frequently as TX buffers are usually reclaimed by new TX packets.
  77. */
  78. #define TX_RECLAIM_PERIOD (HZ / 4)
  79. #define M_CMD_LEN 0x7fffffff
  80. #define V_CMD_LEN(v) (v)
  81. #define G_CMD_LEN(v) ((v) & M_CMD_LEN)
  82. #define V_CMD_GEN1(v) ((v) << 31)
  83. #define V_CMD_GEN2(v) (v)
  84. #define F_CMD_DATAVALID (1 << 1)
  85. #define F_CMD_SOP (1 << 2)
  86. #define V_CMD_EOP(v) ((v) << 3)
  87. /*
  88. * Command queue, receive buffer list, and response queue descriptors.
  89. */
  90. #if defined(__BIG_ENDIAN_BITFIELD)
  91. struct cmdQ_e {
  92. u32 addr_lo;
  93. u32 len_gen;
  94. u32 flags;
  95. u32 addr_hi;
  96. };
  97. struct freelQ_e {
  98. u32 addr_lo;
  99. u32 len_gen;
  100. u32 gen2;
  101. u32 addr_hi;
  102. };
  103. struct respQ_e {
  104. u32 Qsleeping : 4;
  105. u32 Cmdq1CreditReturn : 5;
  106. u32 Cmdq1DmaComplete : 5;
  107. u32 Cmdq0CreditReturn : 5;
  108. u32 Cmdq0DmaComplete : 5;
  109. u32 FreelistQid : 2;
  110. u32 CreditValid : 1;
  111. u32 DataValid : 1;
  112. u32 Offload : 1;
  113. u32 Eop : 1;
  114. u32 Sop : 1;
  115. u32 GenerationBit : 1;
  116. u32 BufferLength;
  117. };
  118. #elif defined(__LITTLE_ENDIAN_BITFIELD)
  119. struct cmdQ_e {
  120. u32 len_gen;
  121. u32 addr_lo;
  122. u32 addr_hi;
  123. u32 flags;
  124. };
  125. struct freelQ_e {
  126. u32 len_gen;
  127. u32 addr_lo;
  128. u32 addr_hi;
  129. u32 gen2;
  130. };
  131. struct respQ_e {
  132. u32 BufferLength;
  133. u32 GenerationBit : 1;
  134. u32 Sop : 1;
  135. u32 Eop : 1;
  136. u32 Offload : 1;
  137. u32 DataValid : 1;
  138. u32 CreditValid : 1;
  139. u32 FreelistQid : 2;
  140. u32 Cmdq0DmaComplete : 5;
  141. u32 Cmdq0CreditReturn : 5;
  142. u32 Cmdq1DmaComplete : 5;
  143. u32 Cmdq1CreditReturn : 5;
  144. u32 Qsleeping : 4;
  145. } ;
  146. #endif
  147. /*
  148. * SW Context Command and Freelist Queue Descriptors
  149. */
  150. struct cmdQ_ce {
  151. struct sk_buff *skb;
  152. DEFINE_DMA_UNMAP_ADDR(dma_addr);
  153. DEFINE_DMA_UNMAP_LEN(dma_len);
  154. };
  155. struct freelQ_ce {
  156. struct sk_buff *skb;
  157. DEFINE_DMA_UNMAP_ADDR(dma_addr);
  158. DEFINE_DMA_UNMAP_LEN(dma_len);
  159. };
  160. /*
  161. * SW command, freelist and response rings
  162. */
  163. struct cmdQ {
  164. unsigned long status; /* HW DMA fetch status */
  165. unsigned int in_use; /* # of in-use command descriptors */
  166. unsigned int size; /* # of descriptors */
  167. unsigned int processed; /* total # of descs HW has processed */
  168. unsigned int cleaned; /* total # of descs SW has reclaimed */
  169. unsigned int stop_thres; /* SW TX queue suspend threshold */
  170. u16 pidx; /* producer index (SW) */
  171. u16 cidx; /* consumer index (HW) */
  172. u8 genbit; /* current generation (=valid) bit */
  173. u8 sop; /* is next entry start of packet? */
  174. struct cmdQ_e *entries; /* HW command descriptor Q */
  175. struct cmdQ_ce *centries; /* SW command context descriptor Q */
  176. dma_addr_t dma_addr; /* DMA addr HW command descriptor Q */
  177. spinlock_t lock; /* Lock to protect cmdQ enqueuing */
  178. };
  179. struct freelQ {
  180. unsigned int credits; /* # of available RX buffers */
  181. unsigned int size; /* free list capacity */
  182. u16 pidx; /* producer index (SW) */
  183. u16 cidx; /* consumer index (HW) */
  184. u16 rx_buffer_size; /* Buffer size on this free list */
  185. u16 dma_offset; /* DMA offset to align IP headers */
  186. u16 recycleq_idx; /* skb recycle q to use */
  187. u8 genbit; /* current generation (=valid) bit */
  188. struct freelQ_e *entries; /* HW freelist descriptor Q */
  189. struct freelQ_ce *centries; /* SW freelist context descriptor Q */
  190. dma_addr_t dma_addr; /* DMA addr HW freelist descriptor Q */
  191. };
  192. struct respQ {
  193. unsigned int credits; /* credits to be returned to SGE */
  194. unsigned int size; /* # of response Q descriptors */
  195. u16 cidx; /* consumer index (SW) */
  196. u8 genbit; /* current generation(=valid) bit */
  197. struct respQ_e *entries; /* HW response descriptor Q */
  198. dma_addr_t dma_addr; /* DMA addr HW response descriptor Q */
  199. };
  200. /* Bit flags for cmdQ.status */
  201. enum {
  202. CMDQ_STAT_RUNNING = 1, /* fetch engine is running */
  203. CMDQ_STAT_LAST_PKT_DB = 2 /* last packet rung the doorbell */
  204. };
  205. /* T204 TX SW scheduler */
  206. /* Per T204 TX port */
  207. struct sched_port {
  208. unsigned int avail; /* available bits - quota */
  209. unsigned int drain_bits_per_1024ns; /* drain rate */
  210. unsigned int speed; /* drain rate, mbps */
  211. unsigned int mtu; /* mtu size */
  212. struct sk_buff_head skbq; /* pending skbs */
  213. };
  214. /* Per T204 device */
  215. struct sched {
  216. ktime_t last_updated; /* last time quotas were computed */
  217. unsigned int max_avail; /* max bits to be sent to any port */
  218. unsigned int port; /* port index (round robin ports) */
  219. unsigned int num; /* num skbs in per port queues */
  220. struct sched_port p[MAX_NPORTS];
  221. struct tasklet_struct sched_tsk;/* tasklet used to run scheduler */
  222. };
  223. static void restart_sched(unsigned long);
  224. /*
  225. * Main SGE data structure
  226. *
  227. * Interrupts are handled by a single CPU and it is likely that on a MP system
  228. * the application is migrated to another CPU. In that scenario, we try to
  229. * separate the RX(in irq context) and TX state in order to decrease memory
  230. * contention.
  231. */
  232. struct sge {
  233. struct adapter *adapter; /* adapter backpointer */
  234. struct net_device *netdev; /* netdevice backpointer */
  235. struct freelQ freelQ[SGE_FREELQ_N]; /* buffer free lists */
  236. struct respQ respQ; /* response Q */
  237. unsigned long stopped_tx_queues; /* bitmap of suspended Tx queues */
  238. unsigned int rx_pkt_pad; /* RX padding for L2 packets */
  239. unsigned int jumbo_fl; /* jumbo freelist Q index */
  240. unsigned int intrtimer_nres; /* no-resource interrupt timer */
  241. unsigned int fixed_intrtimer;/* non-adaptive interrupt timer */
  242. struct timer_list tx_reclaim_timer; /* reclaims TX buffers */
  243. struct timer_list espibug_timer;
  244. unsigned long espibug_timeout;
  245. struct sk_buff *espibug_skb[MAX_NPORTS];
  246. u32 sge_control; /* shadow value of sge control reg */
  247. struct sge_intr_counts stats;
  248. struct sge_port_stats __percpu *port_stats[MAX_NPORTS];
  249. struct sched *tx_sched;
  250. struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned_in_smp;
  251. };
  252. static const u8 ch_mac_addr[ETH_ALEN] = {
  253. 0x0, 0x7, 0x43, 0x0, 0x0, 0x0
  254. };
  255. /*
  256. * stop tasklet and free all pending skb's
  257. */
  258. static void tx_sched_stop(struct sge *sge)
  259. {
  260. struct sched *s = sge->tx_sched;
  261. int i;
  262. tasklet_kill(&s->sched_tsk);
  263. for (i = 0; i < MAX_NPORTS; i++)
  264. __skb_queue_purge(&s->p[s->port].skbq);
  265. }
  266. /*
  267. * t1_sched_update_parms() is called when the MTU or link speed changes. It
  268. * re-computes scheduler parameters to scope with the change.
  269. */
  270. unsigned int t1_sched_update_parms(struct sge *sge, unsigned int port,
  271. unsigned int mtu, unsigned int speed)
  272. {
  273. struct sched *s = sge->tx_sched;
  274. struct sched_port *p = &s->p[port];
  275. unsigned int max_avail_segs;
  276. pr_debug("t1_sched_update_params mtu=%d speed=%d\n", mtu, speed);
  277. if (speed)
  278. p->speed = speed;
  279. if (mtu)
  280. p->mtu = mtu;
  281. if (speed || mtu) {
  282. unsigned long long drain = 1024ULL * p->speed * (p->mtu - 40);
  283. do_div(drain, (p->mtu + 50) * 1000);
  284. p->drain_bits_per_1024ns = (unsigned int) drain;
  285. if (p->speed < 1000)
  286. p->drain_bits_per_1024ns =
  287. 90 * p->drain_bits_per_1024ns / 100;
  288. }
  289. if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204) {
  290. p->drain_bits_per_1024ns -= 16;
  291. s->max_avail = max(4096U, p->mtu + 16 + 14 + 4);
  292. max_avail_segs = max(1U, 4096 / (p->mtu - 40));
  293. } else {
  294. s->max_avail = 16384;
  295. max_avail_segs = max(1U, 9000 / (p->mtu - 40));
  296. }
  297. pr_debug("t1_sched_update_parms: mtu %u speed %u max_avail %u "
  298. "max_avail_segs %u drain_bits_per_1024ns %u\n", p->mtu,
  299. p->speed, s->max_avail, max_avail_segs,
  300. p->drain_bits_per_1024ns);
  301. return max_avail_segs * (p->mtu - 40);
  302. }
  303. #if 0
  304. /*
  305. * t1_sched_max_avail_bytes() tells the scheduler the maximum amount of
  306. * data that can be pushed per port.
  307. */
  308. void t1_sched_set_max_avail_bytes(struct sge *sge, unsigned int val)
  309. {
  310. struct sched *s = sge->tx_sched;
  311. unsigned int i;
  312. s->max_avail = val;
  313. for (i = 0; i < MAX_NPORTS; i++)
  314. t1_sched_update_parms(sge, i, 0, 0);
  315. }
  316. /*
  317. * t1_sched_set_drain_bits_per_us() tells the scheduler at which rate a port
  318. * is draining.
  319. */
  320. void t1_sched_set_drain_bits_per_us(struct sge *sge, unsigned int port,
  321. unsigned int val)
  322. {
  323. struct sched *s = sge->tx_sched;
  324. struct sched_port *p = &s->p[port];
  325. p->drain_bits_per_1024ns = val * 1024 / 1000;
  326. t1_sched_update_parms(sge, port, 0, 0);
  327. }
  328. #endif /* 0 */
  329. /*
  330. * get_clock() implements a ns clock (see ktime_get)
  331. */
  332. static inline ktime_t get_clock(void)
  333. {
  334. struct timespec ts;
  335. ktime_get_ts(&ts);
  336. return timespec_to_ktime(ts);
  337. }
  338. /*
  339. * tx_sched_init() allocates resources and does basic initialization.
  340. */
  341. static int tx_sched_init(struct sge *sge)
  342. {
  343. struct sched *s;
  344. int i;
  345. s = kzalloc(sizeof (struct sched), GFP_KERNEL);
  346. if (!s)
  347. return -ENOMEM;
  348. pr_debug("tx_sched_init\n");
  349. tasklet_init(&s->sched_tsk, restart_sched, (unsigned long) sge);
  350. sge->tx_sched = s;
  351. for (i = 0; i < MAX_NPORTS; i++) {
  352. skb_queue_head_init(&s->p[i].skbq);
  353. t1_sched_update_parms(sge, i, 1500, 1000);
  354. }
  355. return 0;
  356. }
  357. /*
  358. * sched_update_avail() computes the delta since the last time it was called
  359. * and updates the per port quota (number of bits that can be sent to the any
  360. * port).
  361. */
  362. static inline int sched_update_avail(struct sge *sge)
  363. {
  364. struct sched *s = sge->tx_sched;
  365. ktime_t now = get_clock();
  366. unsigned int i;
  367. long long delta_time_ns;
  368. delta_time_ns = ktime_to_ns(ktime_sub(now, s->last_updated));
  369. pr_debug("sched_update_avail delta=%lld\n", delta_time_ns);
  370. if (delta_time_ns < 15000)
  371. return 0;
  372. for (i = 0; i < MAX_NPORTS; i++) {
  373. struct sched_port *p = &s->p[i];
  374. unsigned int delta_avail;
  375. delta_avail = (p->drain_bits_per_1024ns * delta_time_ns) >> 13;
  376. p->avail = min(p->avail + delta_avail, s->max_avail);
  377. }
  378. s->last_updated = now;
  379. return 1;
  380. }
  381. /*
  382. * sched_skb() is called from two different places. In the tx path, any
  383. * packet generating load on an output port will call sched_skb()
  384. * (skb != NULL). In addition, sched_skb() is called from the irq/soft irq
  385. * context (skb == NULL).
  386. * The scheduler only returns a skb (which will then be sent) if the
  387. * length of the skb is <= the current quota of the output port.
  388. */
  389. static struct sk_buff *sched_skb(struct sge *sge, struct sk_buff *skb,
  390. unsigned int credits)
  391. {
  392. struct sched *s = sge->tx_sched;
  393. struct sk_buff_head *skbq;
  394. unsigned int i, len, update = 1;
  395. pr_debug("sched_skb %p\n", skb);
  396. if (!skb) {
  397. if (!s->num)
  398. return NULL;
  399. } else {
  400. skbq = &s->p[skb->dev->if_port].skbq;
  401. __skb_queue_tail(skbq, skb);
  402. s->num++;
  403. skb = NULL;
  404. }
  405. if (credits < MAX_SKB_FRAGS + 1)
  406. goto out;
  407. again:
  408. for (i = 0; i < MAX_NPORTS; i++) {
  409. s->port = (s->port + 1) & (MAX_NPORTS - 1);
  410. skbq = &s->p[s->port].skbq;
  411. skb = skb_peek(skbq);
  412. if (!skb)
  413. continue;
  414. len = skb->len;
  415. if (len <= s->p[s->port].avail) {
  416. s->p[s->port].avail -= len;
  417. s->num--;
  418. __skb_unlink(skb, skbq);
  419. goto out;
  420. }
  421. skb = NULL;
  422. }
  423. if (update-- && sched_update_avail(sge))
  424. goto again;
  425. out:
  426. /* If there are more pending skbs, we use the hardware to schedule us
  427. * again.
  428. */
  429. if (s->num && !skb) {
  430. struct cmdQ *q = &sge->cmdQ[0];
  431. clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
  432. if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
  433. set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
  434. writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL);
  435. }
  436. }
  437. pr_debug("sched_skb ret %p\n", skb);
  438. return skb;
  439. }
  440. /*
  441. * PIO to indicate that memory mapped Q contains valid descriptor(s).
  442. */
  443. static inline void doorbell_pio(struct adapter *adapter, u32 val)
  444. {
  445. wmb();
  446. writel(val, adapter->regs + A_SG_DOORBELL);
  447. }
  448. /*
  449. * Frees all RX buffers on the freelist Q. The caller must make sure that
  450. * the SGE is turned off before calling this function.
  451. */
  452. static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *q)
  453. {
  454. unsigned int cidx = q->cidx;
  455. while (q->credits--) {
  456. struct freelQ_ce *ce = &q->centries[cidx];
  457. pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
  458. dma_unmap_len(ce, dma_len),
  459. PCI_DMA_FROMDEVICE);
  460. dev_kfree_skb(ce->skb);
  461. ce->skb = NULL;
  462. if (++cidx == q->size)
  463. cidx = 0;
  464. }
  465. }
  466. /*
  467. * Free RX free list and response queue resources.
  468. */
  469. static void free_rx_resources(struct sge *sge)
  470. {
  471. struct pci_dev *pdev = sge->adapter->pdev;
  472. unsigned int size, i;
  473. if (sge->respQ.entries) {
  474. size = sizeof(struct respQ_e) * sge->respQ.size;
  475. pci_free_consistent(pdev, size, sge->respQ.entries,
  476. sge->respQ.dma_addr);
  477. }
  478. for (i = 0; i < SGE_FREELQ_N; i++) {
  479. struct freelQ *q = &sge->freelQ[i];
  480. if (q->centries) {
  481. free_freelQ_buffers(pdev, q);
  482. kfree(q->centries);
  483. }
  484. if (q->entries) {
  485. size = sizeof(struct freelQ_e) * q->size;
  486. pci_free_consistent(pdev, size, q->entries,
  487. q->dma_addr);
  488. }
  489. }
  490. }
  491. /*
  492. * Allocates basic RX resources, consisting of memory mapped freelist Qs and a
  493. * response queue.
  494. */
  495. static int alloc_rx_resources(struct sge *sge, struct sge_params *p)
  496. {
  497. struct pci_dev *pdev = sge->adapter->pdev;
  498. unsigned int size, i;
  499. for (i = 0; i < SGE_FREELQ_N; i++) {
  500. struct freelQ *q = &sge->freelQ[i];
  501. q->genbit = 1;
  502. q->size = p->freelQ_size[i];
  503. q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN;
  504. size = sizeof(struct freelQ_e) * q->size;
  505. q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr);
  506. if (!q->entries)
  507. goto err_no_mem;
  508. size = sizeof(struct freelQ_ce) * q->size;
  509. q->centries = kzalloc(size, GFP_KERNEL);
  510. if (!q->centries)
  511. goto err_no_mem;
  512. }
  513. /*
  514. * Calculate the buffer sizes for the two free lists. FL0 accommodates
  515. * regular sized Ethernet frames, FL1 is sized not to exceed 16K,
  516. * including all the sk_buff overhead.
  517. *
  518. * Note: For T2 FL0 and FL1 are reversed.
  519. */
  520. sge->freelQ[!sge->jumbo_fl].rx_buffer_size = SGE_RX_SM_BUF_SIZE +
  521. sizeof(struct cpl_rx_data) +
  522. sge->freelQ[!sge->jumbo_fl].dma_offset;
  523. size = (16 * 1024) -
  524. SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
  525. sge->freelQ[sge->jumbo_fl].rx_buffer_size = size;
  526. /*
  527. * Setup which skb recycle Q should be used when recycling buffers from
  528. * each free list.
  529. */
  530. sge->freelQ[!sge->jumbo_fl].recycleq_idx = 0;
  531. sge->freelQ[sge->jumbo_fl].recycleq_idx = 1;
  532. sge->respQ.genbit = 1;
  533. sge->respQ.size = SGE_RESPQ_E_N;
  534. sge->respQ.credits = 0;
  535. size = sizeof(struct respQ_e) * sge->respQ.size;
  536. sge->respQ.entries =
  537. pci_alloc_consistent(pdev, size, &sge->respQ.dma_addr);
  538. if (!sge->respQ.entries)
  539. goto err_no_mem;
  540. return 0;
  541. err_no_mem:
  542. free_rx_resources(sge);
  543. return -ENOMEM;
  544. }
  545. /*
  546. * Reclaims n TX descriptors and frees the buffers associated with them.
  547. */
  548. static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n)
  549. {
  550. struct cmdQ_ce *ce;
  551. struct pci_dev *pdev = sge->adapter->pdev;
  552. unsigned int cidx = q->cidx;
  553. q->in_use -= n;
  554. ce = &q->centries[cidx];
  555. while (n--) {
  556. if (likely(dma_unmap_len(ce, dma_len))) {
  557. pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
  558. dma_unmap_len(ce, dma_len),
  559. PCI_DMA_TODEVICE);
  560. if (q->sop)
  561. q->sop = 0;
  562. }
  563. if (ce->skb) {
  564. dev_kfree_skb_any(ce->skb);
  565. q->sop = 1;
  566. }
  567. ce++;
  568. if (++cidx == q->size) {
  569. cidx = 0;
  570. ce = q->centries;
  571. }
  572. }
  573. q->cidx = cidx;
  574. }
  575. /*
  576. * Free TX resources.
  577. *
  578. * Assumes that SGE is stopped and all interrupts are disabled.
  579. */
  580. static void free_tx_resources(struct sge *sge)
  581. {
  582. struct pci_dev *pdev = sge->adapter->pdev;
  583. unsigned int size, i;
  584. for (i = 0; i < SGE_CMDQ_N; i++) {
  585. struct cmdQ *q = &sge->cmdQ[i];
  586. if (q->centries) {
  587. if (q->in_use)
  588. free_cmdQ_buffers(sge, q, q->in_use);
  589. kfree(q->centries);
  590. }
  591. if (q->entries) {
  592. size = sizeof(struct cmdQ_e) * q->size;
  593. pci_free_consistent(pdev, size, q->entries,
  594. q->dma_addr);
  595. }
  596. }
  597. }
  598. /*
  599. * Allocates basic TX resources, consisting of memory mapped command Qs.
  600. */
  601. static int alloc_tx_resources(struct sge *sge, struct sge_params *p)
  602. {
  603. struct pci_dev *pdev = sge->adapter->pdev;
  604. unsigned int size, i;
  605. for (i = 0; i < SGE_CMDQ_N; i++) {
  606. struct cmdQ *q = &sge->cmdQ[i];
  607. q->genbit = 1;
  608. q->sop = 1;
  609. q->size = p->cmdQ_size[i];
  610. q->in_use = 0;
  611. q->status = 0;
  612. q->processed = q->cleaned = 0;
  613. q->stop_thres = 0;
  614. spin_lock_init(&q->lock);
  615. size = sizeof(struct cmdQ_e) * q->size;
  616. q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr);
  617. if (!q->entries)
  618. goto err_no_mem;
  619. size = sizeof(struct cmdQ_ce) * q->size;
  620. q->centries = kzalloc(size, GFP_KERNEL);
  621. if (!q->centries)
  622. goto err_no_mem;
  623. }
  624. /*
  625. * CommandQ 0 handles Ethernet and TOE packets, while queue 1 is TOE
  626. * only. For queue 0 set the stop threshold so we can handle one more
  627. * packet from each port, plus reserve an additional 24 entries for
  628. * Ethernet packets only. Queue 1 never suspends nor do we reserve
  629. * space for Ethernet packets.
  630. */
  631. sge->cmdQ[0].stop_thres = sge->adapter->params.nports *
  632. (MAX_SKB_FRAGS + 1);
  633. return 0;
  634. err_no_mem:
  635. free_tx_resources(sge);
  636. return -ENOMEM;
  637. }
  638. static inline void setup_ring_params(struct adapter *adapter, u64 addr,
  639. u32 size, int base_reg_lo,
  640. int base_reg_hi, int size_reg)
  641. {
  642. writel((u32)addr, adapter->regs + base_reg_lo);
  643. writel(addr >> 32, adapter->regs + base_reg_hi);
  644. writel(size, adapter->regs + size_reg);
  645. }
  646. /*
  647. * Enable/disable VLAN acceleration.
  648. */
  649. void t1_vlan_mode(struct adapter *adapter, u32 features)
  650. {
  651. struct sge *sge = adapter->sge;
  652. if (features & NETIF_F_HW_VLAN_RX)
  653. sge->sge_control |= F_VLAN_XTRACT;
  654. else
  655. sge->sge_control &= ~F_VLAN_XTRACT;
  656. if (adapter->open_device_map) {
  657. writel(sge->sge_control, adapter->regs + A_SG_CONTROL);
  658. readl(adapter->regs + A_SG_CONTROL); /* flush */
  659. }
  660. }
  661. /*
  662. * Programs the various SGE registers. However, the engine is not yet enabled,
  663. * but sge->sge_control is setup and ready to go.
  664. */
  665. static void configure_sge(struct sge *sge, struct sge_params *p)
  666. {
  667. struct adapter *ap = sge->adapter;
  668. writel(0, ap->regs + A_SG_CONTROL);
  669. setup_ring_params(ap, sge->cmdQ[0].dma_addr, sge->cmdQ[0].size,
  670. A_SG_CMD0BASELWR, A_SG_CMD0BASEUPR, A_SG_CMD0SIZE);
  671. setup_ring_params(ap, sge->cmdQ[1].dma_addr, sge->cmdQ[1].size,
  672. A_SG_CMD1BASELWR, A_SG_CMD1BASEUPR, A_SG_CMD1SIZE);
  673. setup_ring_params(ap, sge->freelQ[0].dma_addr,
  674. sge->freelQ[0].size, A_SG_FL0BASELWR,
  675. A_SG_FL0BASEUPR, A_SG_FL0SIZE);
  676. setup_ring_params(ap, sge->freelQ[1].dma_addr,
  677. sge->freelQ[1].size, A_SG_FL1BASELWR,
  678. A_SG_FL1BASEUPR, A_SG_FL1SIZE);
  679. /* The threshold comparison uses <. */
  680. writel(SGE_RX_SM_BUF_SIZE + 1, ap->regs + A_SG_FLTHRESHOLD);
  681. setup_ring_params(ap, sge->respQ.dma_addr, sge->respQ.size,
  682. A_SG_RSPBASELWR, A_SG_RSPBASEUPR, A_SG_RSPSIZE);
  683. writel((u32)sge->respQ.size - 1, ap->regs + A_SG_RSPQUEUECREDIT);
  684. sge->sge_control = F_CMDQ0_ENABLE | F_CMDQ1_ENABLE | F_FL0_ENABLE |
  685. F_FL1_ENABLE | F_CPL_ENABLE | F_RESPONSE_QUEUE_ENABLE |
  686. V_CMDQ_PRIORITY(2) | F_DISABLE_CMDQ1_GTS | F_ISCSI_COALESCE |
  687. V_RX_PKT_OFFSET(sge->rx_pkt_pad);
  688. #if defined(__BIG_ENDIAN_BITFIELD)
  689. sge->sge_control |= F_ENABLE_BIG_ENDIAN;
  690. #endif
  691. /* Initialize no-resource timer */
  692. sge->intrtimer_nres = SGE_INTRTIMER_NRES * core_ticks_per_usec(ap);
  693. t1_sge_set_coalesce_params(sge, p);
  694. }
  695. /*
  696. * Return the payload capacity of the jumbo free-list buffers.
  697. */
  698. static inline unsigned int jumbo_payload_capacity(const struct sge *sge)
  699. {
  700. return sge->freelQ[sge->jumbo_fl].rx_buffer_size -
  701. sge->freelQ[sge->jumbo_fl].dma_offset -
  702. sizeof(struct cpl_rx_data);
  703. }
  704. /*
  705. * Frees all SGE related resources and the sge structure itself
  706. */
  707. void t1_sge_destroy(struct sge *sge)
  708. {
  709. int i;
  710. for_each_port(sge->adapter, i)
  711. free_percpu(sge->port_stats[i]);
  712. kfree(sge->tx_sched);
  713. free_tx_resources(sge);
  714. free_rx_resources(sge);
  715. kfree(sge);
  716. }
  717. /*
  718. * Allocates new RX buffers on the freelist Q (and tracks them on the freelist
  719. * context Q) until the Q is full or alloc_skb fails.
  720. *
  721. * It is possible that the generation bits already match, indicating that the
  722. * buffer is already valid and nothing needs to be done. This happens when we
  723. * copied a received buffer into a new sk_buff during the interrupt processing.
  724. *
  725. * If the SGE doesn't automatically align packets properly (!sge->rx_pkt_pad),
  726. * we specify a RX_OFFSET in order to make sure that the IP header is 4B
  727. * aligned.
  728. */
  729. static void refill_free_list(struct sge *sge, struct freelQ *q)
  730. {
  731. struct pci_dev *pdev = sge->adapter->pdev;
  732. struct freelQ_ce *ce = &q->centries[q->pidx];
  733. struct freelQ_e *e = &q->entries[q->pidx];
  734. unsigned int dma_len = q->rx_buffer_size - q->dma_offset;
  735. while (q->credits < q->size) {
  736. struct sk_buff *skb;
  737. dma_addr_t mapping;
  738. skb = alloc_skb(q->rx_buffer_size, GFP_ATOMIC);
  739. if (!skb)
  740. break;
  741. skb_reserve(skb, q->dma_offset);
  742. mapping = pci_map_single(pdev, skb->data, dma_len,
  743. PCI_DMA_FROMDEVICE);
  744. skb_reserve(skb, sge->rx_pkt_pad);
  745. ce->skb = skb;
  746. dma_unmap_addr_set(ce, dma_addr, mapping);
  747. dma_unmap_len_set(ce, dma_len, dma_len);
  748. e->addr_lo = (u32)mapping;
  749. e->addr_hi = (u64)mapping >> 32;
  750. e->len_gen = V_CMD_LEN(dma_len) | V_CMD_GEN1(q->genbit);
  751. wmb();
  752. e->gen2 = V_CMD_GEN2(q->genbit);
  753. e++;
  754. ce++;
  755. if (++q->pidx == q->size) {
  756. q->pidx = 0;
  757. q->genbit ^= 1;
  758. ce = q->centries;
  759. e = q->entries;
  760. }
  761. q->credits++;
  762. }
  763. }
  764. /*
  765. * Calls refill_free_list for both free lists. If we cannot fill at least 1/4
  766. * of both rings, we go into 'few interrupt mode' in order to give the system
  767. * time to free up resources.
  768. */
  769. static void freelQs_empty(struct sge *sge)
  770. {
  771. struct adapter *adapter = sge->adapter;
  772. u32 irq_reg = readl(adapter->regs + A_SG_INT_ENABLE);
  773. u32 irqholdoff_reg;
  774. refill_free_list(sge, &sge->freelQ[0]);
  775. refill_free_list(sge, &sge->freelQ[1]);
  776. if (sge->freelQ[0].credits > (sge->freelQ[0].size >> 2) &&
  777. sge->freelQ[1].credits > (sge->freelQ[1].size >> 2)) {
  778. irq_reg |= F_FL_EXHAUSTED;
  779. irqholdoff_reg = sge->fixed_intrtimer;
  780. } else {
  781. /* Clear the F_FL_EXHAUSTED interrupts for now */
  782. irq_reg &= ~F_FL_EXHAUSTED;
  783. irqholdoff_reg = sge->intrtimer_nres;
  784. }
  785. writel(irqholdoff_reg, adapter->regs + A_SG_INTRTIMER);
  786. writel(irq_reg, adapter->regs + A_SG_INT_ENABLE);
  787. /* We reenable the Qs to force a freelist GTS interrupt later */
  788. doorbell_pio(adapter, F_FL0_ENABLE | F_FL1_ENABLE);
  789. }
  790. #define SGE_PL_INTR_MASK (F_PL_INTR_SGE_ERR | F_PL_INTR_SGE_DATA)
  791. #define SGE_INT_FATAL (F_RESPQ_OVERFLOW | F_PACKET_TOO_BIG | F_PACKET_MISMATCH)
  792. #define SGE_INT_ENABLE (F_RESPQ_EXHAUSTED | F_RESPQ_OVERFLOW | \
  793. F_FL_EXHAUSTED | F_PACKET_TOO_BIG | F_PACKET_MISMATCH)
  794. /*
  795. * Disable SGE Interrupts
  796. */
  797. void t1_sge_intr_disable(struct sge *sge)
  798. {
  799. u32 val = readl(sge->adapter->regs + A_PL_ENABLE);
  800. writel(val & ~SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE);
  801. writel(0, sge->adapter->regs + A_SG_INT_ENABLE);
  802. }
  803. /*
  804. * Enable SGE interrupts.
  805. */
  806. void t1_sge_intr_enable(struct sge *sge)
  807. {
  808. u32 en = SGE_INT_ENABLE;
  809. u32 val = readl(sge->adapter->regs + A_PL_ENABLE);
  810. if (sge->adapter->port[0].dev->hw_features & NETIF_F_TSO)
  811. en &= ~F_PACKET_TOO_BIG;
  812. writel(en, sge->adapter->regs + A_SG_INT_ENABLE);
  813. writel(val | SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE);
  814. }
  815. /*
  816. * Clear SGE interrupts.
  817. */
  818. void t1_sge_intr_clear(struct sge *sge)
  819. {
  820. writel(SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_CAUSE);
  821. writel(0xffffffff, sge->adapter->regs + A_SG_INT_CAUSE);
  822. }
  823. /*
  824. * SGE 'Error' interrupt handler
  825. */
  826. int t1_sge_intr_error_handler(struct sge *sge)
  827. {
  828. struct adapter *adapter = sge->adapter;
  829. u32 cause = readl(adapter->regs + A_SG_INT_CAUSE);
  830. if (adapter->port[0].dev->hw_features & NETIF_F_TSO)
  831. cause &= ~F_PACKET_TOO_BIG;
  832. if (cause & F_RESPQ_EXHAUSTED)
  833. sge->stats.respQ_empty++;
  834. if (cause & F_RESPQ_OVERFLOW) {
  835. sge->stats.respQ_overflow++;
  836. pr_alert("%s: SGE response queue overflow\n",
  837. adapter->name);
  838. }
  839. if (cause & F_FL_EXHAUSTED) {
  840. sge->stats.freelistQ_empty++;
  841. freelQs_empty(sge);
  842. }
  843. if (cause & F_PACKET_TOO_BIG) {
  844. sge->stats.pkt_too_big++;
  845. pr_alert("%s: SGE max packet size exceeded\n",
  846. adapter->name);
  847. }
  848. if (cause & F_PACKET_MISMATCH) {
  849. sge->stats.pkt_mismatch++;
  850. pr_alert("%s: SGE packet mismatch\n", adapter->name);
  851. }
  852. if (cause & SGE_INT_FATAL)
  853. t1_fatal_err(adapter);
  854. writel(cause, adapter->regs + A_SG_INT_CAUSE);
  855. return 0;
  856. }
  857. const struct sge_intr_counts *t1_sge_get_intr_counts(const struct sge *sge)
  858. {
  859. return &sge->stats;
  860. }
  861. void t1_sge_get_port_stats(const struct sge *sge, int port,
  862. struct sge_port_stats *ss)
  863. {
  864. int cpu;
  865. memset(ss, 0, sizeof(*ss));
  866. for_each_possible_cpu(cpu) {
  867. struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[port], cpu);
  868. ss->rx_cso_good += st->rx_cso_good;
  869. ss->tx_cso += st->tx_cso;
  870. ss->tx_tso += st->tx_tso;
  871. ss->tx_need_hdrroom += st->tx_need_hdrroom;
  872. ss->vlan_xtract += st->vlan_xtract;
  873. ss->vlan_insert += st->vlan_insert;
  874. }
  875. }
  876. /**
  877. * recycle_fl_buf - recycle a free list buffer
  878. * @fl: the free list
  879. * @idx: index of buffer to recycle
  880. *
  881. * Recycles the specified buffer on the given free list by adding it at
  882. * the next available slot on the list.
  883. */
  884. static void recycle_fl_buf(struct freelQ *fl, int idx)
  885. {
  886. struct freelQ_e *from = &fl->entries[idx];
  887. struct freelQ_e *to = &fl->entries[fl->pidx];
  888. fl->centries[fl->pidx] = fl->centries[idx];
  889. to->addr_lo = from->addr_lo;
  890. to->addr_hi = from->addr_hi;
  891. to->len_gen = G_CMD_LEN(from->len_gen) | V_CMD_GEN1(fl->genbit);
  892. wmb();
  893. to->gen2 = V_CMD_GEN2(fl->genbit);
  894. fl->credits++;
  895. if (++fl->pidx == fl->size) {
  896. fl->pidx = 0;
  897. fl->genbit ^= 1;
  898. }
  899. }
  900. static int copybreak __read_mostly = 256;
  901. module_param(copybreak, int, 0);
  902. MODULE_PARM_DESC(copybreak, "Receive copy threshold");
  903. /**
  904. * get_packet - return the next ingress packet buffer
  905. * @pdev: the PCI device that received the packet
  906. * @fl: the SGE free list holding the packet
  907. * @len: the actual packet length, excluding any SGE padding
  908. *
  909. * Get the next packet from a free list and complete setup of the
  910. * sk_buff. If the packet is small we make a copy and recycle the
  911. * original buffer, otherwise we use the original buffer itself. If a
  912. * positive drop threshold is supplied packets are dropped and their
  913. * buffers recycled if (a) the number of remaining buffers is under the
  914. * threshold and the packet is too big to copy, or (b) the packet should
  915. * be copied but there is no memory for the copy.
  916. */
  917. static inline struct sk_buff *get_packet(struct pci_dev *pdev,
  918. struct freelQ *fl, unsigned int len)
  919. {
  920. struct sk_buff *skb;
  921. const struct freelQ_ce *ce = &fl->centries[fl->cidx];
  922. if (len < copybreak) {
  923. skb = alloc_skb(len + 2, GFP_ATOMIC);
  924. if (!skb)
  925. goto use_orig_buf;
  926. skb_reserve(skb, 2); /* align IP header */
  927. skb_put(skb, len);
  928. pci_dma_sync_single_for_cpu(pdev,
  929. dma_unmap_addr(ce, dma_addr),
  930. dma_unmap_len(ce, dma_len),
  931. PCI_DMA_FROMDEVICE);
  932. skb_copy_from_linear_data(ce->skb, skb->data, len);
  933. pci_dma_sync_single_for_device(pdev,
  934. dma_unmap_addr(ce, dma_addr),
  935. dma_unmap_len(ce, dma_len),
  936. PCI_DMA_FROMDEVICE);
  937. recycle_fl_buf(fl, fl->cidx);
  938. return skb;
  939. }
  940. use_orig_buf:
  941. if (fl->credits < 2) {
  942. recycle_fl_buf(fl, fl->cidx);
  943. return NULL;
  944. }
  945. pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
  946. dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
  947. skb = ce->skb;
  948. prefetch(skb->data);
  949. skb_put(skb, len);
  950. return skb;
  951. }
  952. /**
  953. * unexpected_offload - handle an unexpected offload packet
  954. * @adapter: the adapter
  955. * @fl: the free list that received the packet
  956. *
  957. * Called when we receive an unexpected offload packet (e.g., the TOE
  958. * function is disabled or the card is a NIC). Prints a message and
  959. * recycles the buffer.
  960. */
  961. static void unexpected_offload(struct adapter *adapter, struct freelQ *fl)
  962. {
  963. struct freelQ_ce *ce = &fl->centries[fl->cidx];
  964. struct sk_buff *skb = ce->skb;
  965. pci_dma_sync_single_for_cpu(adapter->pdev, dma_unmap_addr(ce, dma_addr),
  966. dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
  967. pr_err("%s: unexpected offload packet, cmd %u\n",
  968. adapter->name, *skb->data);
  969. recycle_fl_buf(fl, fl->cidx);
  970. }
  971. /*
  972. * T1/T2 SGE limits the maximum DMA size per TX descriptor to
  973. * SGE_TX_DESC_MAX_PLEN (16KB). If the PAGE_SIZE is larger than 16KB, the
  974. * stack might send more than SGE_TX_DESC_MAX_PLEN in a contiguous manner.
  975. * Note that the *_large_page_tx_descs stuff will be optimized out when
  976. * PAGE_SIZE <= SGE_TX_DESC_MAX_PLEN.
  977. *
  978. * compute_large_page_descs() computes how many additional descriptors are
  979. * required to break down the stack's request.
  980. */
  981. static inline unsigned int compute_large_page_tx_descs(struct sk_buff *skb)
  982. {
  983. unsigned int count = 0;
  984. if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) {
  985. unsigned int nfrags = skb_shinfo(skb)->nr_frags;
  986. unsigned int i, len = skb_headlen(skb);
  987. while (len > SGE_TX_DESC_MAX_PLEN) {
  988. count++;
  989. len -= SGE_TX_DESC_MAX_PLEN;
  990. }
  991. for (i = 0; nfrags--; i++) {
  992. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  993. len = frag->size;
  994. while (len > SGE_TX_DESC_MAX_PLEN) {
  995. count++;
  996. len -= SGE_TX_DESC_MAX_PLEN;
  997. }
  998. }
  999. }
  1000. return count;
  1001. }
  1002. /*
  1003. * Write a cmdQ entry.
  1004. *
  1005. * Since this function writes the 'flags' field, it must not be used to
  1006. * write the first cmdQ entry.
  1007. */
  1008. static inline void write_tx_desc(struct cmdQ_e *e, dma_addr_t mapping,
  1009. unsigned int len, unsigned int gen,
  1010. unsigned int eop)
  1011. {
  1012. BUG_ON(len > SGE_TX_DESC_MAX_PLEN);
  1013. e->addr_lo = (u32)mapping;
  1014. e->addr_hi = (u64)mapping >> 32;
  1015. e->len_gen = V_CMD_LEN(len) | V_CMD_GEN1(gen);
  1016. e->flags = F_CMD_DATAVALID | V_CMD_EOP(eop) | V_CMD_GEN2(gen);
  1017. }
  1018. /*
  1019. * See comment for previous function.
  1020. *
  1021. * write_tx_descs_large_page() writes additional SGE tx descriptors if
  1022. * *desc_len exceeds HW's capability.
  1023. */
  1024. static inline unsigned int write_large_page_tx_descs(unsigned int pidx,
  1025. struct cmdQ_e **e,
  1026. struct cmdQ_ce **ce,
  1027. unsigned int *gen,
  1028. dma_addr_t *desc_mapping,
  1029. unsigned int *desc_len,
  1030. unsigned int nfrags,
  1031. struct cmdQ *q)
  1032. {
  1033. if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) {
  1034. struct cmdQ_e *e1 = *e;
  1035. struct cmdQ_ce *ce1 = *ce;
  1036. while (*desc_len > SGE_TX_DESC_MAX_PLEN) {
  1037. *desc_len -= SGE_TX_DESC_MAX_PLEN;
  1038. write_tx_desc(e1, *desc_mapping, SGE_TX_DESC_MAX_PLEN,
  1039. *gen, nfrags == 0 && *desc_len == 0);
  1040. ce1->skb = NULL;
  1041. dma_unmap_len_set(ce1, dma_len, 0);
  1042. *desc_mapping += SGE_TX_DESC_MAX_PLEN;
  1043. if (*desc_len) {
  1044. ce1++;
  1045. e1++;
  1046. if (++pidx == q->size) {
  1047. pidx = 0;
  1048. *gen ^= 1;
  1049. ce1 = q->centries;
  1050. e1 = q->entries;
  1051. }
  1052. }
  1053. }
  1054. *e = e1;
  1055. *ce = ce1;
  1056. }
  1057. return pidx;
  1058. }
  1059. /*
  1060. * Write the command descriptors to transmit the given skb starting at
  1061. * descriptor pidx with the given generation.
  1062. */
  1063. static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
  1064. unsigned int pidx, unsigned int gen,
  1065. struct cmdQ *q)
  1066. {
  1067. dma_addr_t mapping, desc_mapping;
  1068. struct cmdQ_e *e, *e1;
  1069. struct cmdQ_ce *ce;
  1070. unsigned int i, flags, first_desc_len, desc_len,
  1071. nfrags = skb_shinfo(skb)->nr_frags;
  1072. e = e1 = &q->entries[pidx];
  1073. ce = &q->centries[pidx];
  1074. mapping = pci_map_single(adapter->pdev, skb->data,
  1075. skb_headlen(skb), PCI_DMA_TODEVICE);
  1076. desc_mapping = mapping;
  1077. desc_len = skb_headlen(skb);
  1078. flags = F_CMD_DATAVALID | F_CMD_SOP |
  1079. V_CMD_EOP(nfrags == 0 && desc_len <= SGE_TX_DESC_MAX_PLEN) |
  1080. V_CMD_GEN2(gen);
  1081. first_desc_len = (desc_len <= SGE_TX_DESC_MAX_PLEN) ?
  1082. desc_len : SGE_TX_DESC_MAX_PLEN;
  1083. e->addr_lo = (u32)desc_mapping;
  1084. e->addr_hi = (u64)desc_mapping >> 32;
  1085. e->len_gen = V_CMD_LEN(first_desc_len) | V_CMD_GEN1(gen);
  1086. ce->skb = NULL;
  1087. dma_unmap_len_set(ce, dma_len, 0);
  1088. if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN &&
  1089. desc_len > SGE_TX_DESC_MAX_PLEN) {
  1090. desc_mapping += first_desc_len;
  1091. desc_len -= first_desc_len;
  1092. e1++;
  1093. ce++;
  1094. if (++pidx == q->size) {
  1095. pidx = 0;
  1096. gen ^= 1;
  1097. e1 = q->entries;
  1098. ce = q->centries;
  1099. }
  1100. pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen,
  1101. &desc_mapping, &desc_len,
  1102. nfrags, q);
  1103. if (likely(desc_len))
  1104. write_tx_desc(e1, desc_mapping, desc_len, gen,
  1105. nfrags == 0);
  1106. }
  1107. ce->skb = NULL;
  1108. dma_unmap_addr_set(ce, dma_addr, mapping);
  1109. dma_unmap_len_set(ce, dma_len, skb_headlen(skb));
  1110. for (i = 0; nfrags--; i++) {
  1111. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  1112. e1++;
  1113. ce++;
  1114. if (++pidx == q->size) {
  1115. pidx = 0;
  1116. gen ^= 1;
  1117. e1 = q->entries;
  1118. ce = q->centries;
  1119. }
  1120. mapping = pci_map_page(adapter->pdev, frag->page,
  1121. frag->page_offset, frag->size,
  1122. PCI_DMA_TODEVICE);
  1123. desc_mapping = mapping;
  1124. desc_len = frag->size;
  1125. pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen,
  1126. &desc_mapping, &desc_len,
  1127. nfrags, q);
  1128. if (likely(desc_len))
  1129. write_tx_desc(e1, desc_mapping, desc_len, gen,
  1130. nfrags == 0);
  1131. ce->skb = NULL;
  1132. dma_unmap_addr_set(ce, dma_addr, mapping);
  1133. dma_unmap_len_set(ce, dma_len, frag->size);
  1134. }
  1135. ce->skb = skb;
  1136. wmb();
  1137. e->flags = flags;
  1138. }
  1139. /*
  1140. * Clean up completed Tx buffers.
  1141. */
  1142. static inline void reclaim_completed_tx(struct sge *sge, struct cmdQ *q)
  1143. {
  1144. unsigned int reclaim = q->processed - q->cleaned;
  1145. if (reclaim) {
  1146. pr_debug("reclaim_completed_tx processed:%d cleaned:%d\n",
  1147. q->processed, q->cleaned);
  1148. free_cmdQ_buffers(sge, q, reclaim);
  1149. q->cleaned += reclaim;
  1150. }
  1151. }
  1152. /*
  1153. * Called from tasklet. Checks the scheduler for any
  1154. * pending skbs that can be sent.
  1155. */
  1156. static void restart_sched(unsigned long arg)
  1157. {
  1158. struct sge *sge = (struct sge *) arg;
  1159. struct adapter *adapter = sge->adapter;
  1160. struct cmdQ *q = &sge->cmdQ[0];
  1161. struct sk_buff *skb;
  1162. unsigned int credits, queued_skb = 0;
  1163. spin_lock(&q->lock);
  1164. reclaim_completed_tx(sge, q);
  1165. credits = q->size - q->in_use;
  1166. pr_debug("restart_sched credits=%d\n", credits);
  1167. while ((skb = sched_skb(sge, NULL, credits)) != NULL) {
  1168. unsigned int genbit, pidx, count;
  1169. count = 1 + skb_shinfo(skb)->nr_frags;
  1170. count += compute_large_page_tx_descs(skb);
  1171. q->in_use += count;
  1172. genbit = q->genbit;
  1173. pidx = q->pidx;
  1174. q->pidx += count;
  1175. if (q->pidx >= q->size) {
  1176. q->pidx -= q->size;
  1177. q->genbit ^= 1;
  1178. }
  1179. write_tx_descs(adapter, skb, pidx, genbit, q);
  1180. credits = q->size - q->in_use;
  1181. queued_skb = 1;
  1182. }
  1183. if (queued_skb) {
  1184. clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
  1185. if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
  1186. set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
  1187. writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
  1188. }
  1189. }
  1190. spin_unlock(&q->lock);
  1191. }
  1192. /**
  1193. * sge_rx - process an ingress ethernet packet
  1194. * @sge: the sge structure
  1195. * @fl: the free list that contains the packet buffer
  1196. * @len: the packet length
  1197. *
  1198. * Process an ingress ethernet pakcet and deliver it to the stack.
  1199. */
  1200. static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
  1201. {
  1202. struct sk_buff *skb;
  1203. const struct cpl_rx_pkt *p;
  1204. struct adapter *adapter = sge->adapter;
  1205. struct sge_port_stats *st;
  1206. struct net_device *dev;
  1207. skb = get_packet(adapter->pdev, fl, len - sge->rx_pkt_pad);
  1208. if (unlikely(!skb)) {
  1209. sge->stats.rx_drops++;
  1210. return;
  1211. }
  1212. p = (const struct cpl_rx_pkt *) skb->data;
  1213. if (p->iff >= adapter->params.nports) {
  1214. kfree_skb(skb);
  1215. return;
  1216. }
  1217. __skb_pull(skb, sizeof(*p));
  1218. st = this_cpu_ptr(sge->port_stats[p->iff]);
  1219. dev = adapter->port[p->iff].dev;
  1220. skb->protocol = eth_type_trans(skb, dev);
  1221. if ((dev->features & NETIF_F_RXCSUM) && p->csum == 0xffff &&
  1222. skb->protocol == htons(ETH_P_IP) &&
  1223. (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) {
  1224. ++st->rx_cso_good;
  1225. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1226. } else
  1227. skb_checksum_none_assert(skb);
  1228. if (p->vlan_valid) {
  1229. st->vlan_xtract++;
  1230. __vlan_hwaccel_put_tag(skb, ntohs(p->vlan));
  1231. }
  1232. netif_receive_skb(skb);
  1233. }
  1234. /*
  1235. * Returns true if a command queue has enough available descriptors that
  1236. * we can resume Tx operation after temporarily disabling its packet queue.
  1237. */
  1238. static inline int enough_free_Tx_descs(const struct cmdQ *q)
  1239. {
  1240. unsigned int r = q->processed - q->cleaned;
  1241. return q->in_use - r < (q->size >> 1);
  1242. }
  1243. /*
  1244. * Called when sufficient space has become available in the SGE command queues
  1245. * after the Tx packet schedulers have been suspended to restart the Tx path.
  1246. */
  1247. static void restart_tx_queues(struct sge *sge)
  1248. {
  1249. struct adapter *adap = sge->adapter;
  1250. int i;
  1251. if (!enough_free_Tx_descs(&sge->cmdQ[0]))
  1252. return;
  1253. for_each_port(adap, i) {
  1254. struct net_device *nd = adap->port[i].dev;
  1255. if (test_and_clear_bit(nd->if_port, &sge->stopped_tx_queues) &&
  1256. netif_running(nd)) {
  1257. sge->stats.cmdQ_restarted[2]++;
  1258. netif_wake_queue(nd);
  1259. }
  1260. }
  1261. }
  1262. /*
  1263. * update_tx_info is called from the interrupt handler/NAPI to return cmdQ0
  1264. * information.
  1265. */
  1266. static unsigned int update_tx_info(struct adapter *adapter,
  1267. unsigned int flags,
  1268. unsigned int pr0)
  1269. {
  1270. struct sge *sge = adapter->sge;
  1271. struct cmdQ *cmdq = &sge->cmdQ[0];
  1272. cmdq->processed += pr0;
  1273. if (flags & (F_FL0_ENABLE | F_FL1_ENABLE)) {
  1274. freelQs_empty(sge);
  1275. flags &= ~(F_FL0_ENABLE | F_FL1_ENABLE);
  1276. }
  1277. if (flags & F_CMDQ0_ENABLE) {
  1278. clear_bit(CMDQ_STAT_RUNNING, &cmdq->status);
  1279. if (cmdq->cleaned + cmdq->in_use != cmdq->processed &&
  1280. !test_and_set_bit(CMDQ_STAT_LAST_PKT_DB, &cmdq->status)) {
  1281. set_bit(CMDQ_STAT_RUNNING, &cmdq->status);
  1282. writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
  1283. }
  1284. if (sge->tx_sched)
  1285. tasklet_hi_schedule(&sge->tx_sched->sched_tsk);
  1286. flags &= ~F_CMDQ0_ENABLE;
  1287. }
  1288. if (unlikely(sge->stopped_tx_queues != 0))
  1289. restart_tx_queues(sge);
  1290. return flags;
  1291. }
  1292. /*
  1293. * Process SGE responses, up to the supplied budget. Returns the number of
  1294. * responses processed. A negative budget is effectively unlimited.
  1295. */
  1296. static int process_responses(struct adapter *adapter, int budget)
  1297. {
  1298. struct sge *sge = adapter->sge;
  1299. struct respQ *q = &sge->respQ;
  1300. struct respQ_e *e = &q->entries[q->cidx];
  1301. int done = 0;
  1302. unsigned int flags = 0;
  1303. unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
  1304. while (done < budget && e->GenerationBit == q->genbit) {
  1305. flags |= e->Qsleeping;
  1306. cmdq_processed[0] += e->Cmdq0CreditReturn;
  1307. cmdq_processed[1] += e->Cmdq1CreditReturn;
  1308. /* We batch updates to the TX side to avoid cacheline
  1309. * ping-pong of TX state information on MP where the sender
  1310. * might run on a different CPU than this function...
  1311. */
  1312. if (unlikely((flags & F_CMDQ0_ENABLE) || cmdq_processed[0] > 64)) {
  1313. flags = update_tx_info(adapter, flags, cmdq_processed[0]);
  1314. cmdq_processed[0] = 0;
  1315. }
  1316. if (unlikely(cmdq_processed[1] > 16)) {
  1317. sge->cmdQ[1].processed += cmdq_processed[1];
  1318. cmdq_processed[1] = 0;
  1319. }
  1320. if (likely(e->DataValid)) {
  1321. struct freelQ *fl = &sge->freelQ[e->FreelistQid];
  1322. BUG_ON(!e->Sop || !e->Eop);
  1323. if (unlikely(e->Offload))
  1324. unexpected_offload(adapter, fl);
  1325. else
  1326. sge_rx(sge, fl, e->BufferLength);
  1327. ++done;
  1328. /*
  1329. * Note: this depends on each packet consuming a
  1330. * single free-list buffer; cf. the BUG above.
  1331. */
  1332. if (++fl->cidx == fl->size)
  1333. fl->cidx = 0;
  1334. prefetch(fl->centries[fl->cidx].skb);
  1335. if (unlikely(--fl->credits <
  1336. fl->size - SGE_FREEL_REFILL_THRESH))
  1337. refill_free_list(sge, fl);
  1338. } else
  1339. sge->stats.pure_rsps++;
  1340. e++;
  1341. if (unlikely(++q->cidx == q->size)) {
  1342. q->cidx = 0;
  1343. q->genbit ^= 1;
  1344. e = q->entries;
  1345. }
  1346. prefetch(e);
  1347. if (++q->credits > SGE_RESPQ_REPLENISH_THRES) {
  1348. writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);
  1349. q->credits = 0;
  1350. }
  1351. }
  1352. flags = update_tx_info(adapter, flags, cmdq_processed[0]);
  1353. sge->cmdQ[1].processed += cmdq_processed[1];
  1354. return done;
  1355. }
  1356. static inline int responses_pending(const struct adapter *adapter)
  1357. {
  1358. const struct respQ *Q = &adapter->sge->respQ;
  1359. const struct respQ_e *e = &Q->entries[Q->cidx];
  1360. return e->GenerationBit == Q->genbit;
  1361. }
  1362. /*
  1363. * A simpler version of process_responses() that handles only pure (i.e.,
  1364. * non data-carrying) responses. Such respones are too light-weight to justify
  1365. * calling a softirq when using NAPI, so we handle them specially in hard
  1366. * interrupt context. The function is called with a pointer to a response,
  1367. * which the caller must ensure is a valid pure response. Returns 1 if it
  1368. * encounters a valid data-carrying response, 0 otherwise.
  1369. */
  1370. static int process_pure_responses(struct adapter *adapter)
  1371. {
  1372. struct sge *sge = adapter->sge;
  1373. struct respQ *q = &sge->respQ;
  1374. struct respQ_e *e = &q->entries[q->cidx];
  1375. const struct freelQ *fl = &sge->freelQ[e->FreelistQid];
  1376. unsigned int flags = 0;
  1377. unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
  1378. prefetch(fl->centries[fl->cidx].skb);
  1379. if (e->DataValid)
  1380. return 1;
  1381. do {
  1382. flags |= e->Qsleeping;
  1383. cmdq_processed[0] += e->Cmdq0CreditReturn;
  1384. cmdq_processed[1] += e->Cmdq1CreditReturn;
  1385. e++;
  1386. if (unlikely(++q->cidx == q->size)) {
  1387. q->cidx = 0;
  1388. q->genbit ^= 1;
  1389. e = q->entries;
  1390. }
  1391. prefetch(e);
  1392. if (++q->credits > SGE_RESPQ_REPLENISH_THRES) {
  1393. writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);
  1394. q->credits = 0;
  1395. }
  1396. sge->stats.pure_rsps++;
  1397. } while (e->GenerationBit == q->genbit && !e->DataValid);
  1398. flags = update_tx_info(adapter, flags, cmdq_processed[0]);
  1399. sge->cmdQ[1].processed += cmdq_processed[1];
  1400. return e->GenerationBit == q->genbit;
  1401. }
  1402. /*
  1403. * Handler for new data events when using NAPI. This does not need any locking
  1404. * or protection from interrupts as data interrupts are off at this point and
  1405. * other adapter interrupts do not interfere.
  1406. */
  1407. int t1_poll(struct napi_struct *napi, int budget)
  1408. {
  1409. struct adapter *adapter = container_of(napi, struct adapter, napi);
  1410. int work_done = process_responses(adapter, budget);
  1411. if (likely(work_done < budget)) {
  1412. napi_complete(napi);
  1413. writel(adapter->sge->respQ.cidx,
  1414. adapter->regs + A_SG_SLEEPING);
  1415. }
  1416. return work_done;
  1417. }
  1418. irqreturn_t t1_interrupt(int irq, void *data)
  1419. {
  1420. struct adapter *adapter = data;
  1421. struct sge *sge = adapter->sge;
  1422. int handled;
  1423. if (likely(responses_pending(adapter))) {
  1424. writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
  1425. if (napi_schedule_prep(&adapter->napi)) {
  1426. if (process_pure_responses(adapter))
  1427. __napi_schedule(&adapter->napi);
  1428. else {
  1429. /* no data, no NAPI needed */
  1430. writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING);
  1431. /* undo schedule_prep */
  1432. napi_enable(&adapter->napi);
  1433. }
  1434. }
  1435. return IRQ_HANDLED;
  1436. }
  1437. spin_lock(&adapter->async_lock);
  1438. handled = t1_slow_intr_handler(adapter);
  1439. spin_unlock(&adapter->async_lock);
  1440. if (!handled)
  1441. sge->stats.unhandled_irqs++;
  1442. return IRQ_RETVAL(handled != 0);
  1443. }
  1444. /*
  1445. * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it.
  1446. *
  1447. * The code figures out how many entries the sk_buff will require in the
  1448. * cmdQ and updates the cmdQ data structure with the state once the enqueue
  1449. * has complete. Then, it doesn't access the global structure anymore, but
  1450. * uses the corresponding fields on the stack. In conjunction with a spinlock
  1451. * around that code, we can make the function reentrant without holding the
  1452. * lock when we actually enqueue (which might be expensive, especially on
  1453. * architectures with IO MMUs).
  1454. *
  1455. * This runs with softirqs disabled.
  1456. */
  1457. static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
  1458. unsigned int qid, struct net_device *dev)
  1459. {
  1460. struct sge *sge = adapter->sge;
  1461. struct cmdQ *q = &sge->cmdQ[qid];
  1462. unsigned int credits, pidx, genbit, count, use_sched_skb = 0;
  1463. if (!spin_trylock(&q->lock))
  1464. return NETDEV_TX_LOCKED;
  1465. reclaim_completed_tx(sge, q);
  1466. pidx = q->pidx;
  1467. credits = q->size - q->in_use;
  1468. count = 1 + skb_shinfo(skb)->nr_frags;
  1469. count += compute_large_page_tx_descs(skb);
  1470. /* Ethernet packet */
  1471. if (unlikely(credits < count)) {
  1472. if (!netif_queue_stopped(dev)) {
  1473. netif_stop_queue(dev);
  1474. set_bit(dev->if_port, &sge->stopped_tx_queues);
  1475. sge->stats.cmdQ_full[2]++;
  1476. pr_err("%s: Tx ring full while queue awake!\n",
  1477. adapter->name);
  1478. }
  1479. spin_unlock(&q->lock);
  1480. return NETDEV_TX_BUSY;
  1481. }
  1482. if (unlikely(credits - count < q->stop_thres)) {
  1483. netif_stop_queue(dev);
  1484. set_bit(dev->if_port, &sge->stopped_tx_queues);
  1485. sge->stats.cmdQ_full[2]++;
  1486. }
  1487. /* T204 cmdQ0 skbs that are destined for a certain port have to go
  1488. * through the scheduler.
  1489. */
  1490. if (sge->tx_sched && !qid && skb->dev) {
  1491. use_sched:
  1492. use_sched_skb = 1;
  1493. /* Note that the scheduler might return a different skb than
  1494. * the one passed in.
  1495. */
  1496. skb = sched_skb(sge, skb, credits);
  1497. if (!skb) {
  1498. spin_unlock(&q->lock);
  1499. return NETDEV_TX_OK;
  1500. }
  1501. pidx = q->pidx;
  1502. count = 1 + skb_shinfo(skb)->nr_frags;
  1503. count += compute_large_page_tx_descs(skb);
  1504. }
  1505. q->in_use += count;
  1506. genbit = q->genbit;
  1507. pidx = q->pidx;
  1508. q->pidx += count;
  1509. if (q->pidx >= q->size) {
  1510. q->pidx -= q->size;
  1511. q->genbit ^= 1;
  1512. }
  1513. spin_unlock(&q->lock);
  1514. write_tx_descs(adapter, skb, pidx, genbit, q);
  1515. /*
  1516. * We always ring the doorbell for cmdQ1. For cmdQ0, we only ring
  1517. * the doorbell if the Q is asleep. There is a natural race, where
  1518. * the hardware is going to sleep just after we checked, however,
  1519. * then the interrupt handler will detect the outstanding TX packet
  1520. * and ring the doorbell for us.
  1521. */
  1522. if (qid)
  1523. doorbell_pio(adapter, F_CMDQ1_ENABLE);
  1524. else {
  1525. clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
  1526. if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
  1527. set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
  1528. writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
  1529. }
  1530. }
  1531. if (use_sched_skb) {
  1532. if (spin_trylock(&q->lock)) {
  1533. credits = q->size - q->in_use;
  1534. skb = NULL;
  1535. goto use_sched;
  1536. }
  1537. }
  1538. return NETDEV_TX_OK;
  1539. }
  1540. #define MK_ETH_TYPE_MSS(type, mss) (((mss) & 0x3FFF) | ((type) << 14))
  1541. /*
  1542. * eth_hdr_len - return the length of an Ethernet header
  1543. * @data: pointer to the start of the Ethernet header
  1544. *
  1545. * Returns the length of an Ethernet header, including optional VLAN tag.
  1546. */
  1547. static inline int eth_hdr_len(const void *data)
  1548. {
  1549. const struct ethhdr *e = data;
  1550. return e->h_proto == htons(ETH_P_8021Q) ? VLAN_ETH_HLEN : ETH_HLEN;
  1551. }
  1552. /*
  1553. * Adds the CPL header to the sk_buff and passes it to t1_sge_tx.
  1554. */
  1555. netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
  1556. {
  1557. struct adapter *adapter = dev->ml_priv;
  1558. struct sge *sge = adapter->sge;
  1559. struct sge_port_stats *st = this_cpu_ptr(sge->port_stats[dev->if_port]);
  1560. struct cpl_tx_pkt *cpl;
  1561. struct sk_buff *orig_skb = skb;
  1562. int ret;
  1563. if (skb->protocol == htons(ETH_P_CPL5))
  1564. goto send;
  1565. /*
  1566. * We are using a non-standard hard_header_len.
  1567. * Allocate more header room in the rare cases it is not big enough.
  1568. */
  1569. if (unlikely(skb_headroom(skb) < dev->hard_header_len - ETH_HLEN)) {
  1570. skb = skb_realloc_headroom(skb, sizeof(struct cpl_tx_pkt_lso));
  1571. ++st->tx_need_hdrroom;
  1572. dev_kfree_skb_any(orig_skb);
  1573. if (!skb)
  1574. return NETDEV_TX_OK;
  1575. }
  1576. if (skb_shinfo(skb)->gso_size) {
  1577. int eth_type;
  1578. struct cpl_tx_pkt_lso *hdr;
  1579. ++st->tx_tso;
  1580. eth_type = skb_network_offset(skb) == ETH_HLEN ?
  1581. CPL_ETH_II : CPL_ETH_II_VLAN;
  1582. hdr = (struct cpl_tx_pkt_lso *)skb_push(skb, sizeof(*hdr));
  1583. hdr->opcode = CPL_TX_PKT_LSO;
  1584. hdr->ip_csum_dis = hdr->l4_csum_dis = 0;
  1585. hdr->ip_hdr_words = ip_hdr(skb)->ihl;
  1586. hdr->tcp_hdr_words = tcp_hdr(skb)->doff;
  1587. hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type,
  1588. skb_shinfo(skb)->gso_size));
  1589. hdr->len = htonl(skb->len - sizeof(*hdr));
  1590. cpl = (struct cpl_tx_pkt *)hdr;
  1591. } else {
  1592. /*
  1593. * Packets shorter than ETH_HLEN can break the MAC, drop them
  1594. * early. Also, we may get oversized packets because some
  1595. * parts of the kernel don't handle our unusual hard_header_len
  1596. * right, drop those too.
  1597. */
  1598. if (unlikely(skb->len < ETH_HLEN ||
  1599. skb->len > dev->mtu + eth_hdr_len(skb->data))) {
  1600. pr_debug("%s: packet size %d hdr %d mtu%d\n", dev->name,
  1601. skb->len, eth_hdr_len(skb->data), dev->mtu);
  1602. dev_kfree_skb_any(skb);
  1603. return NETDEV_TX_OK;
  1604. }
  1605. if (skb->ip_summed == CHECKSUM_PARTIAL &&
  1606. ip_hdr(skb)->protocol == IPPROTO_UDP) {
  1607. if (unlikely(skb_checksum_help(skb))) {
  1608. pr_debug("%s: unable to do udp checksum\n", dev->name);
  1609. dev_kfree_skb_any(skb);
  1610. return NETDEV_TX_OK;
  1611. }
  1612. }
  1613. /* Hmmm, assuming to catch the gratious arp... and we'll use
  1614. * it to flush out stuck espi packets...
  1615. */
  1616. if ((unlikely(!adapter->sge->espibug_skb[dev->if_port]))) {
  1617. if (skb->protocol == htons(ETH_P_ARP) &&
  1618. arp_hdr(skb)->ar_op == htons(ARPOP_REQUEST)) {
  1619. adapter->sge->espibug_skb[dev->if_port] = skb;
  1620. /* We want to re-use this skb later. We
  1621. * simply bump the reference count and it
  1622. * will not be freed...
  1623. */
  1624. skb = skb_get(skb);
  1625. }
  1626. }
  1627. cpl = (struct cpl_tx_pkt *)__skb_push(skb, sizeof(*cpl));
  1628. cpl->opcode = CPL_TX_PKT;
  1629. cpl->ip_csum_dis = 1; /* SW calculates IP csum */
  1630. cpl->l4_csum_dis = skb->ip_summed == CHECKSUM_PARTIAL ? 0 : 1;
  1631. /* the length field isn't used so don't bother setting it */
  1632. st->tx_cso += (skb->ip_summed == CHECKSUM_PARTIAL);
  1633. }
  1634. cpl->iff = dev->if_port;
  1635. if (vlan_tx_tag_present(skb)) {
  1636. cpl->vlan_valid = 1;
  1637. cpl->vlan = htons(vlan_tx_tag_get(skb));
  1638. st->vlan_insert++;
  1639. } else
  1640. cpl->vlan_valid = 0;
  1641. send:
  1642. ret = t1_sge_tx(skb, adapter, 0, dev);
  1643. /* If transmit busy, and we reallocated skb's due to headroom limit,
  1644. * then silently discard to avoid leak.
  1645. */
  1646. if (unlikely(ret != NETDEV_TX_OK && skb != orig_skb)) {
  1647. dev_kfree_skb_any(skb);
  1648. ret = NETDEV_TX_OK;
  1649. }
  1650. return ret;
  1651. }
  1652. /*
  1653. * Callback for the Tx buffer reclaim timer. Runs with softirqs disabled.
  1654. */
  1655. static void sge_tx_reclaim_cb(unsigned long data)
  1656. {
  1657. int i;
  1658. struct sge *sge = (struct sge *)data;
  1659. for (i = 0; i < SGE_CMDQ_N; ++i) {
  1660. struct cmdQ *q = &sge->cmdQ[i];
  1661. if (!spin_trylock(&q->lock))
  1662. continue;
  1663. reclaim_completed_tx(sge, q);
  1664. if (i == 0 && q->in_use) { /* flush pending credits */
  1665. writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL);
  1666. }
  1667. spin_unlock(&q->lock);
  1668. }
  1669. mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
  1670. }
  1671. /*
  1672. * Propagate changes of the SGE coalescing parameters to the HW.
  1673. */
  1674. int t1_sge_set_coalesce_params(struct sge *sge, struct sge_params *p)
  1675. {
  1676. sge->fixed_intrtimer = p->rx_coalesce_usecs *
  1677. core_ticks_per_usec(sge->adapter);
  1678. writel(sge->fixed_intrtimer, sge->adapter->regs + A_SG_INTRTIMER);
  1679. return 0;
  1680. }
  1681. /*
  1682. * Allocates both RX and TX resources and configures the SGE. However,
  1683. * the hardware is not enabled yet.
  1684. */
  1685. int t1_sge_configure(struct sge *sge, struct sge_params *p)
  1686. {
  1687. if (alloc_rx_resources(sge, p))
  1688. return -ENOMEM;
  1689. if (alloc_tx_resources(sge, p)) {
  1690. free_rx_resources(sge);
  1691. return -ENOMEM;
  1692. }
  1693. configure_sge(sge, p);
  1694. /*
  1695. * Now that we have sized the free lists calculate the payload
  1696. * capacity of the large buffers. Other parts of the driver use
  1697. * this to set the max offload coalescing size so that RX packets
  1698. * do not overflow our large buffers.
  1699. */
  1700. p->large_buf_capacity = jumbo_payload_capacity(sge);
  1701. return 0;
  1702. }
  1703. /*
  1704. * Disables the DMA engine.
  1705. */
  1706. void t1_sge_stop(struct sge *sge)
  1707. {
  1708. int i;
  1709. writel(0, sge->adapter->regs + A_SG_CONTROL);
  1710. readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
  1711. if (is_T2(sge->adapter))
  1712. del_timer_sync(&sge->espibug_timer);
  1713. del_timer_sync(&sge->tx_reclaim_timer);
  1714. if (sge->tx_sched)
  1715. tx_sched_stop(sge);
  1716. for (i = 0; i < MAX_NPORTS; i++)
  1717. kfree_skb(sge->espibug_skb[i]);
  1718. }
  1719. /*
  1720. * Enables the DMA engine.
  1721. */
  1722. void t1_sge_start(struct sge *sge)
  1723. {
  1724. refill_free_list(sge, &sge->freelQ[0]);
  1725. refill_free_list(sge, &sge->freelQ[1]);
  1726. writel(sge->sge_control, sge->adapter->regs + A_SG_CONTROL);
  1727. doorbell_pio(sge->adapter, F_FL0_ENABLE | F_FL1_ENABLE);
  1728. readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
  1729. mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
  1730. if (is_T2(sge->adapter))
  1731. mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
  1732. }
  1733. /*
  1734. * Callback for the T2 ESPI 'stuck packet feature' workaorund
  1735. */
  1736. static void espibug_workaround_t204(unsigned long data)
  1737. {
  1738. struct adapter *adapter = (struct adapter *)data;
  1739. struct sge *sge = adapter->sge;
  1740. unsigned int nports = adapter->params.nports;
  1741. u32 seop[MAX_NPORTS];
  1742. if (adapter->open_device_map & PORT_MASK) {
  1743. int i;
  1744. if (t1_espi_get_mon_t204(adapter, &(seop[0]), 0) < 0)
  1745. return;
  1746. for (i = 0; i < nports; i++) {
  1747. struct sk_buff *skb = sge->espibug_skb[i];
  1748. if (!netif_running(adapter->port[i].dev) ||
  1749. netif_queue_stopped(adapter->port[i].dev) ||
  1750. !seop[i] || ((seop[i] & 0xfff) != 0) || !skb)
  1751. continue;
  1752. if (!skb->cb[0]) {
  1753. skb_copy_to_linear_data_offset(skb,
  1754. sizeof(struct cpl_tx_pkt),
  1755. ch_mac_addr,
  1756. ETH_ALEN);
  1757. skb_copy_to_linear_data_offset(skb,
  1758. skb->len - 10,
  1759. ch_mac_addr,
  1760. ETH_ALEN);
  1761. skb->cb[0] = 0xff;
  1762. }
  1763. /* bump the reference count to avoid freeing of
  1764. * the skb once the DMA has completed.
  1765. */
  1766. skb = skb_get(skb);
  1767. t1_sge_tx(skb, adapter, 0, adapter->port[i].dev);
  1768. }
  1769. }
  1770. mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
  1771. }
  1772. static void espibug_workaround(unsigned long data)
  1773. {
  1774. struct adapter *adapter = (struct adapter *)data;
  1775. struct sge *sge = adapter->sge;
  1776. if (netif_running(adapter->port[0].dev)) {
  1777. struct sk_buff *skb = sge->espibug_skb[0];
  1778. u32 seop = t1_espi_get_mon(adapter, 0x930, 0);
  1779. if ((seop & 0xfff0fff) == 0xfff && skb) {
  1780. if (!skb->cb[0]) {
  1781. skb_copy_to_linear_data_offset(skb,
  1782. sizeof(struct cpl_tx_pkt),
  1783. ch_mac_addr,
  1784. ETH_ALEN);
  1785. skb_copy_to_linear_data_offset(skb,
  1786. skb->len - 10,
  1787. ch_mac_addr,
  1788. ETH_ALEN);
  1789. skb->cb[0] = 0xff;
  1790. }
  1791. /* bump the reference count to avoid freeing of the
  1792. * skb once the DMA has completed.
  1793. */
  1794. skb = skb_get(skb);
  1795. t1_sge_tx(skb, adapter, 0, adapter->port[0].dev);
  1796. }
  1797. }
  1798. mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
  1799. }
  1800. /*
  1801. * Creates a t1_sge structure and returns suggested resource parameters.
  1802. */
  1803. struct sge * __devinit t1_sge_create(struct adapter *adapter,
  1804. struct sge_params *p)
  1805. {
  1806. struct sge *sge = kzalloc(sizeof(*sge), GFP_KERNEL);
  1807. int i;
  1808. if (!sge)
  1809. return NULL;
  1810. sge->adapter = adapter;
  1811. sge->netdev = adapter->port[0].dev;
  1812. sge->rx_pkt_pad = t1_is_T1B(adapter) ? 0 : 2;
  1813. sge->jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
  1814. for_each_port(adapter, i) {
  1815. sge->port_stats[i] = alloc_percpu(struct sge_port_stats);
  1816. if (!sge->port_stats[i])
  1817. goto nomem_port;
  1818. }
  1819. init_timer(&sge->tx_reclaim_timer);
  1820. sge->tx_reclaim_timer.data = (unsigned long)sge;
  1821. sge->tx_reclaim_timer.function = sge_tx_reclaim_cb;
  1822. if (is_T2(sge->adapter)) {
  1823. init_timer(&sge->espibug_timer);
  1824. if (adapter->params.nports > 1) {
  1825. tx_sched_init(sge);
  1826. sge->espibug_timer.function = espibug_workaround_t204;
  1827. } else
  1828. sge->espibug_timer.function = espibug_workaround;
  1829. sge->espibug_timer.data = (unsigned long)sge->adapter;
  1830. sge->espibug_timeout = 1;
  1831. /* for T204, every 10ms */
  1832. if (adapter->params.nports > 1)
  1833. sge->espibug_timeout = HZ/100;
  1834. }
  1835. p->cmdQ_size[0] = SGE_CMDQ0_E_N;
  1836. p->cmdQ_size[1] = SGE_CMDQ1_E_N;
  1837. p->freelQ_size[!sge->jumbo_fl] = SGE_FREEL_SIZE;
  1838. p->freelQ_size[sge->jumbo_fl] = SGE_JUMBO_FREEL_SIZE;
  1839. if (sge->tx_sched) {
  1840. if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204)
  1841. p->rx_coalesce_usecs = 15;
  1842. else
  1843. p->rx_coalesce_usecs = 50;
  1844. } else
  1845. p->rx_coalesce_usecs = 50;
  1846. p->coalesce_enable = 0;
  1847. p->sample_interval_usecs = 0;
  1848. return sge;
  1849. nomem_port:
  1850. while (i >= 0) {
  1851. free_percpu(sge->port_stats[i]);
  1852. --i;
  1853. }
  1854. kfree(sge);
  1855. return NULL;
  1856. }