nes_mgt.c 36 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160
  1. /*
  2. * Copyright (c) 2006 - 2011 Intel-NE, Inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. *
  32. */
  33. #include <linux/skbuff.h>
  34. #include <linux/etherdevice.h>
  35. #include <linux/kthread.h>
  36. #include <linux/ip.h>
  37. #include <linux/tcp.h>
  38. #include <net/tcp.h>
  39. #include "nes.h"
  40. #include "nes_mgt.h"
  41. atomic_t pau_qps_created;
  42. atomic_t pau_qps_destroyed;
  43. static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
  44. {
  45. unsigned long flags;
  46. dma_addr_t bus_address;
  47. struct sk_buff *skb;
  48. struct nes_hw_nic_rq_wqe *nic_rqe;
  49. struct nes_hw_mgt *nesmgt;
  50. struct nes_device *nesdev;
  51. struct nes_rskb_cb *cb;
  52. u32 rx_wqes_posted = 0;
  53. nesmgt = &mgtvnic->mgt;
  54. nesdev = mgtvnic->nesvnic->nesdev;
  55. spin_lock_irqsave(&nesmgt->rq_lock, flags);
  56. if (nesmgt->replenishing_rq != 0) {
  57. if (((nesmgt->rq_size - 1) == atomic_read(&mgtvnic->rx_skbs_needed)) &&
  58. (atomic_read(&mgtvnic->rx_skb_timer_running) == 0)) {
  59. atomic_set(&mgtvnic->rx_skb_timer_running, 1);
  60. spin_unlock_irqrestore(&nesmgt->rq_lock, flags);
  61. mgtvnic->rq_wqes_timer.expires = jiffies + (HZ / 2); /* 1/2 second */
  62. add_timer(&mgtvnic->rq_wqes_timer);
  63. } else {
  64. spin_unlock_irqrestore(&nesmgt->rq_lock, flags);
  65. }
  66. return;
  67. }
  68. nesmgt->replenishing_rq = 1;
  69. spin_unlock_irqrestore(&nesmgt->rq_lock, flags);
  70. do {
  71. skb = dev_alloc_skb(mgtvnic->nesvnic->max_frame_size);
  72. if (skb) {
  73. skb->dev = mgtvnic->nesvnic->netdev;
  74. bus_address = pci_map_single(nesdev->pcidev,
  75. skb->data, mgtvnic->nesvnic->max_frame_size, PCI_DMA_FROMDEVICE);
  76. cb = (struct nes_rskb_cb *)&skb->cb[0];
  77. cb->busaddr = bus_address;
  78. cb->maplen = mgtvnic->nesvnic->max_frame_size;
  79. nic_rqe = &nesmgt->rq_vbase[mgtvnic->mgt.rq_head];
  80. nic_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_1_0_IDX] =
  81. cpu_to_le32(mgtvnic->nesvnic->max_frame_size);
  82. nic_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_3_2_IDX] = 0;
  83. nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_LOW_IDX] =
  84. cpu_to_le32((u32)bus_address);
  85. nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_HIGH_IDX] =
  86. cpu_to_le32((u32)((u64)bus_address >> 32));
  87. nesmgt->rx_skb[nesmgt->rq_head] = skb;
  88. nesmgt->rq_head++;
  89. nesmgt->rq_head &= nesmgt->rq_size - 1;
  90. atomic_dec(&mgtvnic->rx_skbs_needed);
  91. barrier();
  92. if (++rx_wqes_posted == 255) {
  93. nes_write32(nesdev->regs + NES_WQE_ALLOC, (rx_wqes_posted << 24) | nesmgt->qp_id);
  94. rx_wqes_posted = 0;
  95. }
  96. } else {
  97. spin_lock_irqsave(&nesmgt->rq_lock, flags);
  98. if (((nesmgt->rq_size - 1) == atomic_read(&mgtvnic->rx_skbs_needed)) &&
  99. (atomic_read(&mgtvnic->rx_skb_timer_running) == 0)) {
  100. atomic_set(&mgtvnic->rx_skb_timer_running, 1);
  101. spin_unlock_irqrestore(&nesmgt->rq_lock, flags);
  102. mgtvnic->rq_wqes_timer.expires = jiffies + (HZ / 2); /* 1/2 second */
  103. add_timer(&mgtvnic->rq_wqes_timer);
  104. } else {
  105. spin_unlock_irqrestore(&nesmgt->rq_lock, flags);
  106. }
  107. break;
  108. }
  109. } while (atomic_read(&mgtvnic->rx_skbs_needed));
  110. barrier();
  111. if (rx_wqes_posted)
  112. nes_write32(nesdev->regs + NES_WQE_ALLOC, (rx_wqes_posted << 24) | nesmgt->qp_id);
  113. nesmgt->replenishing_rq = 0;
  114. }
  115. /**
  116. * nes_mgt_rq_wqes_timeout
  117. */
  118. static void nes_mgt_rq_wqes_timeout(unsigned long parm)
  119. {
  120. struct nes_vnic_mgt *mgtvnic = (struct nes_vnic_mgt *)parm;
  121. atomic_set(&mgtvnic->rx_skb_timer_running, 0);
  122. if (atomic_read(&mgtvnic->rx_skbs_needed))
  123. nes_replenish_mgt_rq(mgtvnic);
  124. }
  125. /**
  126. * nes_mgt_free_skb - unmap and free skb
  127. */
  128. static void nes_mgt_free_skb(struct nes_device *nesdev, struct sk_buff *skb, u32 dir)
  129. {
  130. struct nes_rskb_cb *cb;
  131. cb = (struct nes_rskb_cb *)&skb->cb[0];
  132. pci_unmap_single(nesdev->pcidev, cb->busaddr, cb->maplen, dir);
  133. cb->busaddr = 0;
  134. dev_kfree_skb_any(skb);
  135. }
  136. /**
  137. * nes_download_callback - handle download completions
  138. */
  139. static void nes_download_callback(struct nes_device *nesdev, struct nes_cqp_request *cqp_request)
  140. {
  141. struct pau_fpdu_info *fpdu_info = cqp_request->cqp_callback_pointer;
  142. struct nes_qp *nesqp = fpdu_info->nesqp;
  143. struct sk_buff *skb;
  144. int i;
  145. for (i = 0; i < fpdu_info->frag_cnt; i++) {
  146. skb = fpdu_info->frags[i].skb;
  147. if (fpdu_info->frags[i].cmplt) {
  148. nes_mgt_free_skb(nesdev, skb, PCI_DMA_TODEVICE);
  149. nes_rem_ref_cm_node(nesqp->cm_node);
  150. }
  151. }
  152. if (fpdu_info->hdr_vbase)
  153. pci_free_consistent(nesdev->pcidev, fpdu_info->hdr_len,
  154. fpdu_info->hdr_vbase, fpdu_info->hdr_pbase);
  155. kfree(fpdu_info);
  156. }
  157. /**
  158. * nes_get_seq - Get the seq, ack_seq and window from the packet
  159. */
  160. static u32 nes_get_seq(struct sk_buff *skb, u32 *ack, u16 *wnd, u32 *fin_rcvd, u32 *rst_rcvd)
  161. {
  162. struct nes_rskb_cb *cb = (struct nes_rskb_cb *)&skb->cb[0];
  163. struct iphdr *iph = (struct iphdr *)(cb->data_start + ETH_HLEN);
  164. struct tcphdr *tcph = (struct tcphdr *)(((char *)iph) + (4 * iph->ihl));
  165. *ack = be32_to_cpu(tcph->ack_seq);
  166. *wnd = be16_to_cpu(tcph->window);
  167. *fin_rcvd = tcph->fin;
  168. *rst_rcvd = tcph->rst;
  169. return be32_to_cpu(tcph->seq);
  170. }
  171. /**
  172. * nes_get_next_skb - Get the next skb based on where current skb is in the queue
  173. */
  174. static struct sk_buff *nes_get_next_skb(struct nes_device *nesdev, struct nes_qp *nesqp,
  175. struct sk_buff *skb, u32 nextseq, u32 *ack,
  176. u16 *wnd, u32 *fin_rcvd, u32 *rst_rcvd)
  177. {
  178. u32 seq;
  179. bool processacks;
  180. struct sk_buff *old_skb;
  181. if (skb) {
  182. /* Continue processing fpdu */
  183. if (skb->next == (struct sk_buff *)&nesqp->pau_list)
  184. goto out;
  185. skb = skb->next;
  186. processacks = false;
  187. } else {
  188. /* Starting a new one */
  189. if (skb_queue_empty(&nesqp->pau_list))
  190. goto out;
  191. skb = skb_peek(&nesqp->pau_list);
  192. processacks = true;
  193. }
  194. while (1) {
  195. if (skb_queue_empty(&nesqp->pau_list))
  196. goto out;
  197. seq = nes_get_seq(skb, ack, wnd, fin_rcvd, rst_rcvd);
  198. if (seq == nextseq) {
  199. if (skb->len || processacks)
  200. break;
  201. } else if (after(seq, nextseq)) {
  202. goto out;
  203. }
  204. old_skb = skb;
  205. skb = skb->next;
  206. skb_unlink(old_skb, &nesqp->pau_list);
  207. nes_mgt_free_skb(nesdev, old_skb, PCI_DMA_TODEVICE);
  208. nes_rem_ref_cm_node(nesqp->cm_node);
  209. if (skb == (struct sk_buff *)&nesqp->pau_list)
  210. goto out;
  211. }
  212. return skb;
  213. out:
  214. return NULL;
  215. }
  216. /**
  217. * get_fpdu_info - Find the next complete fpdu and return its fragments.
  218. */
  219. static int get_fpdu_info(struct nes_device *nesdev, struct nes_qp *nesqp,
  220. struct pau_fpdu_info **pau_fpdu_info)
  221. {
  222. struct sk_buff *skb;
  223. struct iphdr *iph;
  224. struct tcphdr *tcph;
  225. struct nes_rskb_cb *cb;
  226. struct pau_fpdu_info *fpdu_info = NULL;
  227. struct pau_fpdu_frag frags[MAX_FPDU_FRAGS];
  228. u32 fpdu_len = 0;
  229. u32 tmp_len;
  230. int frag_cnt = 0;
  231. u32 tot_len;
  232. u32 frag_tot;
  233. u32 ack;
  234. u32 fin_rcvd;
  235. u32 rst_rcvd;
  236. u16 wnd;
  237. int i;
  238. int rc = 0;
  239. *pau_fpdu_info = NULL;
  240. skb = nes_get_next_skb(nesdev, nesqp, NULL, nesqp->pau_rcv_nxt, &ack, &wnd, &fin_rcvd, &rst_rcvd);
  241. if (!skb)
  242. goto out;
  243. cb = (struct nes_rskb_cb *)&skb->cb[0];
  244. if (skb->len) {
  245. fpdu_len = be16_to_cpu(*(__be16 *) skb->data) + MPA_FRAMING;
  246. fpdu_len = (fpdu_len + 3) & 0xfffffffc;
  247. tmp_len = fpdu_len;
  248. /* See if we have all of the fpdu */
  249. frag_tot = 0;
  250. memset(&frags, 0, sizeof frags);
  251. for (i = 0; i < MAX_FPDU_FRAGS; i++) {
  252. frags[i].physaddr = cb->busaddr;
  253. frags[i].physaddr += skb->data - cb->data_start;
  254. frags[i].frag_len = min(tmp_len, skb->len);
  255. frags[i].skb = skb;
  256. frags[i].cmplt = (skb->len == frags[i].frag_len);
  257. frag_tot += frags[i].frag_len;
  258. frag_cnt++;
  259. tmp_len -= frags[i].frag_len;
  260. if (tmp_len == 0)
  261. break;
  262. skb = nes_get_next_skb(nesdev, nesqp, skb,
  263. nesqp->pau_rcv_nxt + frag_tot, &ack, &wnd, &fin_rcvd, &rst_rcvd);
  264. if (!skb)
  265. goto out;
  266. if (rst_rcvd) {
  267. /* rst received in the middle of fpdu */
  268. for (; i >= 0; i--) {
  269. skb_unlink(frags[i].skb, &nesqp->pau_list);
  270. nes_mgt_free_skb(nesdev, frags[i].skb, PCI_DMA_TODEVICE);
  271. }
  272. cb = (struct nes_rskb_cb *)&skb->cb[0];
  273. frags[0].physaddr = cb->busaddr;
  274. frags[0].physaddr += skb->data - cb->data_start;
  275. frags[0].frag_len = skb->len;
  276. frags[0].skb = skb;
  277. frags[0].cmplt = true;
  278. frag_cnt = 1;
  279. break;
  280. }
  281. cb = (struct nes_rskb_cb *)&skb->cb[0];
  282. }
  283. } else {
  284. /* no data */
  285. frags[0].physaddr = cb->busaddr;
  286. frags[0].frag_len = 0;
  287. frags[0].skb = skb;
  288. frags[0].cmplt = true;
  289. frag_cnt = 1;
  290. }
  291. /* Found one */
  292. fpdu_info = kzalloc(sizeof(*fpdu_info), GFP_ATOMIC);
  293. if (fpdu_info == NULL) {
  294. nes_debug(NES_DBG_PAU, "Failed to alloc a fpdu_info.\n");
  295. rc = -ENOMEM;
  296. goto out;
  297. }
  298. fpdu_info->cqp_request = nes_get_cqp_request(nesdev);
  299. if (fpdu_info->cqp_request == NULL) {
  300. nes_debug(NES_DBG_PAU, "Failed to get a cqp_request.\n");
  301. rc = -ENOMEM;
  302. goto out;
  303. }
  304. cb = (struct nes_rskb_cb *)&frags[0].skb->cb[0];
  305. iph = (struct iphdr *)(cb->data_start + ETH_HLEN);
  306. tcph = (struct tcphdr *)(((char *)iph) + (4 * iph->ihl));
  307. fpdu_info->hdr_len = (((unsigned char *)tcph) + 4 * (tcph->doff)) - cb->data_start;
  308. fpdu_info->data_len = fpdu_len;
  309. tot_len = fpdu_info->hdr_len + fpdu_len - ETH_HLEN;
  310. if (frags[0].cmplt) {
  311. fpdu_info->hdr_pbase = cb->busaddr;
  312. fpdu_info->hdr_vbase = NULL;
  313. } else {
  314. fpdu_info->hdr_vbase = pci_alloc_consistent(nesdev->pcidev,
  315. fpdu_info->hdr_len, &fpdu_info->hdr_pbase);
  316. if (!fpdu_info->hdr_vbase) {
  317. nes_debug(NES_DBG_PAU, "Unable to allocate memory for pau first frag\n");
  318. rc = -ENOMEM;
  319. goto out;
  320. }
  321. /* Copy hdrs, adjusting len and seqnum */
  322. memcpy(fpdu_info->hdr_vbase, cb->data_start, fpdu_info->hdr_len);
  323. iph = (struct iphdr *)(fpdu_info->hdr_vbase + ETH_HLEN);
  324. tcph = (struct tcphdr *)(((char *)iph) + (4 * iph->ihl));
  325. }
  326. iph->tot_len = cpu_to_be16(tot_len);
  327. iph->saddr = cpu_to_be32(0x7f000001);
  328. tcph->seq = cpu_to_be32(nesqp->pau_rcv_nxt);
  329. tcph->ack_seq = cpu_to_be32(ack);
  330. tcph->window = cpu_to_be16(wnd);
  331. nesqp->pau_rcv_nxt += fpdu_len + fin_rcvd;
  332. memcpy(fpdu_info->frags, frags, sizeof(fpdu_info->frags));
  333. fpdu_info->frag_cnt = frag_cnt;
  334. fpdu_info->nesqp = nesqp;
  335. *pau_fpdu_info = fpdu_info;
  336. /* Update skb's for next pass */
  337. for (i = 0; i < frag_cnt; i++) {
  338. cb = (struct nes_rskb_cb *)&frags[i].skb->cb[0];
  339. skb_pull(frags[i].skb, frags[i].frag_len);
  340. if (frags[i].skb->len == 0) {
  341. /* Pull skb off the list - it will be freed in the callback */
  342. if (!skb_queue_empty(&nesqp->pau_list))
  343. skb_unlink(frags[i].skb, &nesqp->pau_list);
  344. } else {
  345. /* Last skb still has data so update the seq */
  346. iph = (struct iphdr *)(cb->data_start + ETH_HLEN);
  347. tcph = (struct tcphdr *)(((char *)iph) + (4 * iph->ihl));
  348. tcph->seq = cpu_to_be32(nesqp->pau_rcv_nxt);
  349. }
  350. }
  351. out:
  352. if (rc) {
  353. if (fpdu_info) {
  354. if (fpdu_info->cqp_request)
  355. nes_put_cqp_request(nesdev, fpdu_info->cqp_request);
  356. kfree(fpdu_info);
  357. }
  358. }
  359. return rc;
  360. }
  361. /**
  362. * forward_fpdu - send complete fpdus, one at a time
  363. */
  364. static int forward_fpdus(struct nes_vnic *nesvnic, struct nes_qp *nesqp)
  365. {
  366. struct nes_device *nesdev = nesvnic->nesdev;
  367. struct pau_fpdu_info *fpdu_info;
  368. struct nes_hw_cqp_wqe *cqp_wqe;
  369. struct nes_cqp_request *cqp_request;
  370. unsigned long flags;
  371. u64 u64tmp;
  372. u32 u32tmp;
  373. int rc;
  374. while (1) {
  375. spin_lock_irqsave(&nesqp->pau_lock, flags);
  376. rc = get_fpdu_info(nesdev, nesqp, &fpdu_info);
  377. if (rc || (fpdu_info == NULL)) {
  378. spin_unlock_irqrestore(&nesqp->pau_lock, flags);
  379. return rc;
  380. }
  381. cqp_request = fpdu_info->cqp_request;
  382. cqp_wqe = &cqp_request->cqp_wqe;
  383. nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
  384. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_DL_OPCODE_IDX,
  385. NES_CQP_DOWNLOAD_SEGMENT |
  386. (((u32)nesvnic->logical_port) << NES_CQP_OP_LOGICAL_PORT_SHIFT));
  387. u32tmp = fpdu_info->hdr_len << 16;
  388. u32tmp |= fpdu_info->hdr_len + (u32)fpdu_info->data_len;
  389. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_DL_LENGTH_0_TOTAL_IDX,
  390. u32tmp);
  391. u32tmp = (fpdu_info->frags[1].frag_len << 16) | fpdu_info->frags[0].frag_len;
  392. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_LENGTH_2_1_IDX,
  393. u32tmp);
  394. u32tmp = (fpdu_info->frags[3].frag_len << 16) | fpdu_info->frags[2].frag_len;
  395. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_LENGTH_4_3_IDX,
  396. u32tmp);
  397. u64tmp = (u64)fpdu_info->hdr_pbase;
  398. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG0_LOW_IDX,
  399. lower_32_bits(u64tmp));
  400. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG0_HIGH_IDX,
  401. upper_32_bits(u64tmp));
  402. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG1_LOW_IDX,
  403. lower_32_bits(fpdu_info->frags[0].physaddr));
  404. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG1_HIGH_IDX,
  405. upper_32_bits(fpdu_info->frags[0].physaddr));
  406. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG2_LOW_IDX,
  407. lower_32_bits(fpdu_info->frags[1].physaddr));
  408. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG2_HIGH_IDX,
  409. upper_32_bits(fpdu_info->frags[1].physaddr));
  410. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG3_LOW_IDX,
  411. lower_32_bits(fpdu_info->frags[2].physaddr));
  412. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG3_HIGH_IDX,
  413. upper_32_bits(fpdu_info->frags[2].physaddr));
  414. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG4_LOW_IDX,
  415. lower_32_bits(fpdu_info->frags[3].physaddr));
  416. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG4_HIGH_IDX,
  417. upper_32_bits(fpdu_info->frags[3].physaddr));
  418. cqp_request->cqp_callback_pointer = fpdu_info;
  419. cqp_request->callback = 1;
  420. cqp_request->cqp_callback = nes_download_callback;
  421. atomic_set(&cqp_request->refcount, 1);
  422. nes_post_cqp_request(nesdev, cqp_request);
  423. spin_unlock_irqrestore(&nesqp->pau_lock, flags);
  424. }
  425. return 0;
  426. }
  427. static void process_fpdus(struct nes_vnic *nesvnic, struct nes_qp *nesqp)
  428. {
  429. int again = 1;
  430. unsigned long flags;
  431. do {
  432. /* Ignore rc - if it failed, tcp retries will cause it to try again */
  433. forward_fpdus(nesvnic, nesqp);
  434. spin_lock_irqsave(&nesqp->pau_lock, flags);
  435. if (nesqp->pau_pending) {
  436. nesqp->pau_pending = 0;
  437. } else {
  438. nesqp->pau_busy = 0;
  439. again = 0;
  440. }
  441. spin_unlock_irqrestore(&nesqp->pau_lock, flags);
  442. } while (again);
  443. }
  444. /**
  445. * queue_fpdus - Handle fpdu's that hw passed up to sw
  446. */
  447. static void queue_fpdus(struct sk_buff *skb, struct nes_vnic *nesvnic, struct nes_qp *nesqp)
  448. {
  449. struct sk_buff *tmpskb;
  450. struct nes_rskb_cb *cb;
  451. struct iphdr *iph;
  452. struct tcphdr *tcph;
  453. unsigned char *tcph_end;
  454. u32 rcv_nxt;
  455. u32 rcv_wnd;
  456. u32 seqnum;
  457. u32 len;
  458. bool process_it = false;
  459. unsigned long flags;
  460. /* Move data ptr to after tcp header */
  461. iph = (struct iphdr *)skb->data;
  462. tcph = (struct tcphdr *)(((char *)iph) + (4 * iph->ihl));
  463. seqnum = be32_to_cpu(tcph->seq);
  464. tcph_end = (((char *)tcph) + (4 * tcph->doff));
  465. len = be16_to_cpu(iph->tot_len);
  466. if (skb->len > len)
  467. skb_trim(skb, len);
  468. skb_pull(skb, tcph_end - skb->data);
  469. /* Initialize tracking values */
  470. cb = (struct nes_rskb_cb *)&skb->cb[0];
  471. cb->seqnum = seqnum;
  472. /* Make sure data is in the receive window */
  473. rcv_nxt = nesqp->pau_rcv_nxt;
  474. rcv_wnd = le32_to_cpu(nesqp->nesqp_context->rcv_wnd);
  475. if (!between(seqnum, rcv_nxt, (rcv_nxt + rcv_wnd))) {
  476. nes_mgt_free_skb(nesvnic->nesdev, skb, PCI_DMA_TODEVICE);
  477. nes_rem_ref_cm_node(nesqp->cm_node);
  478. return;
  479. }
  480. spin_lock_irqsave(&nesqp->pau_lock, flags);
  481. if (nesqp->pau_busy)
  482. nesqp->pau_pending = 1;
  483. else
  484. nesqp->pau_busy = 1;
  485. /* Queue skb by sequence number */
  486. if (skb_queue_len(&nesqp->pau_list) == 0) {
  487. skb_queue_head(&nesqp->pau_list, skb);
  488. } else {
  489. tmpskb = nesqp->pau_list.next;
  490. while (tmpskb != (struct sk_buff *)&nesqp->pau_list) {
  491. cb = (struct nes_rskb_cb *)&tmpskb->cb[0];
  492. if (before(seqnum, cb->seqnum))
  493. break;
  494. tmpskb = tmpskb->next;
  495. }
  496. skb_insert(tmpskb, skb, &nesqp->pau_list);
  497. }
  498. if (nesqp->pau_state == PAU_READY)
  499. process_it = true;
  500. spin_unlock_irqrestore(&nesqp->pau_lock, flags);
  501. if (process_it)
  502. process_fpdus(nesvnic, nesqp);
  503. return;
  504. }
  505. /**
  506. * mgt_thread - Handle mgt skbs in a safe context
  507. */
  508. static int mgt_thread(void *context)
  509. {
  510. struct nes_vnic *nesvnic = context;
  511. struct sk_buff *skb;
  512. struct nes_rskb_cb *cb;
  513. while (!kthread_should_stop()) {
  514. wait_event_interruptible(nesvnic->mgt_wait_queue,
  515. skb_queue_len(&nesvnic->mgt_skb_list) || kthread_should_stop());
  516. while ((skb_queue_len(&nesvnic->mgt_skb_list)) && !kthread_should_stop()) {
  517. skb = skb_dequeue(&nesvnic->mgt_skb_list);
  518. cb = (struct nes_rskb_cb *)&skb->cb[0];
  519. cb->data_start = skb->data - ETH_HLEN;
  520. cb->busaddr = pci_map_single(nesvnic->nesdev->pcidev, cb->data_start,
  521. nesvnic->max_frame_size, PCI_DMA_TODEVICE);
  522. queue_fpdus(skb, nesvnic, cb->nesqp);
  523. }
  524. }
  525. /* Closing down so delete any entries on the queue */
  526. while (skb_queue_len(&nesvnic->mgt_skb_list)) {
  527. skb = skb_dequeue(&nesvnic->mgt_skb_list);
  528. cb = (struct nes_rskb_cb *)&skb->cb[0];
  529. nes_rem_ref_cm_node(cb->nesqp->cm_node);
  530. dev_kfree_skb_any(skb);
  531. }
  532. return 0;
  533. }
  534. /**
  535. * nes_queue_skbs - Queue skb so it can be handled in a thread context
  536. */
  537. void nes_queue_mgt_skbs(struct sk_buff *skb, struct nes_vnic *nesvnic, struct nes_qp *nesqp)
  538. {
  539. struct nes_rskb_cb *cb;
  540. cb = (struct nes_rskb_cb *)&skb->cb[0];
  541. cb->nesqp = nesqp;
  542. skb_queue_tail(&nesvnic->mgt_skb_list, skb);
  543. wake_up_interruptible(&nesvnic->mgt_wait_queue);
  544. }
  545. void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
  546. {
  547. struct sk_buff *skb;
  548. unsigned long flags;
  549. atomic_inc(&pau_qps_destroyed);
  550. /* Free packets that have not yet been forwarded */
  551. /* Lock is acquired by skb_dequeue when removing the skb */
  552. spin_lock_irqsave(&nesqp->pau_lock, flags);
  553. while (skb_queue_len(&nesqp->pau_list)) {
  554. skb = skb_dequeue(&nesqp->pau_list);
  555. nes_mgt_free_skb(nesdev, skb, PCI_DMA_TODEVICE);
  556. nes_rem_ref_cm_node(nesqp->cm_node);
  557. }
  558. spin_unlock_irqrestore(&nesqp->pau_lock, flags);
  559. }
  560. static void nes_chg_qh_handler(struct nes_device *nesdev, struct nes_cqp_request *cqp_request)
  561. {
  562. struct pau_qh_chg *qh_chg = cqp_request->cqp_callback_pointer;
  563. struct nes_cqp_request *new_request;
  564. struct nes_hw_cqp_wqe *cqp_wqe;
  565. struct nes_adapter *nesadapter;
  566. struct nes_qp *nesqp;
  567. struct nes_v4_quad nes_quad;
  568. u32 crc_value;
  569. u64 u64temp;
  570. nesadapter = nesdev->nesadapter;
  571. nesqp = qh_chg->nesqp;
  572. /* Should we handle the bad completion */
  573. if (cqp_request->major_code)
  574. WARN(1, PFX "Invalid cqp_request major_code=0x%x\n",
  575. cqp_request->major_code);
  576. switch (nesqp->pau_state) {
  577. case PAU_DEL_QH:
  578. /* Old hash code deleted, now set the new one */
  579. nesqp->pau_state = PAU_ADD_LB_QH;
  580. new_request = nes_get_cqp_request(nesdev);
  581. if (new_request == NULL) {
  582. nes_debug(NES_DBG_PAU, "Failed to get a new_request.\n");
  583. WARN_ON(1);
  584. return;
  585. }
  586. memset(&nes_quad, 0, sizeof(nes_quad));
  587. nes_quad.DstIpAdrIndex =
  588. cpu_to_le32((u32)PCI_FUNC(nesdev->pcidev->devfn) << 24);
  589. nes_quad.SrcIpadr = cpu_to_be32(0x7f000001);
  590. nes_quad.TcpPorts[0] = swab16(nesqp->nesqp_context->tcpPorts[1]);
  591. nes_quad.TcpPorts[1] = swab16(nesqp->nesqp_context->tcpPorts[0]);
  592. /* Produce hash key */
  593. crc_value = get_crc_value(&nes_quad);
  594. nesqp->hte_index = cpu_to_be32(crc_value ^ 0xffffffff);
  595. nes_debug(NES_DBG_PAU, "new HTE Index = 0x%08X, CRC = 0x%08X\n",
  596. nesqp->hte_index, nesqp->hte_index & nesadapter->hte_index_mask);
  597. nesqp->hte_index &= nesadapter->hte_index_mask;
  598. nesqp->nesqp_context->hte_index = cpu_to_le32(nesqp->hte_index);
  599. nesqp->nesqp_context->ip0 = cpu_to_le32(0x7f000001);
  600. nesqp->nesqp_context->rcv_nxt = cpu_to_le32(nesqp->pau_rcv_nxt);
  601. cqp_wqe = &new_request->cqp_wqe;
  602. nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
  603. set_wqe_32bit_value(cqp_wqe->wqe_words,
  604. NES_CQP_WQE_OPCODE_IDX, NES_CQP_MANAGE_QUAD_HASH |
  605. NES_CQP_QP_TYPE_IWARP | NES_CQP_QP_CONTEXT_VALID | NES_CQP_QP_IWARP_STATE_RTS);
  606. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, nesqp->hwqp.qp_id);
  607. u64temp = (u64)nesqp->nesqp_context_pbase;
  608. set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, u64temp);
  609. nes_debug(NES_DBG_PAU, "Waiting for CQP completion for adding the quad hash.\n");
  610. new_request->cqp_callback_pointer = qh_chg;
  611. new_request->callback = 1;
  612. new_request->cqp_callback = nes_chg_qh_handler;
  613. atomic_set(&new_request->refcount, 1);
  614. nes_post_cqp_request(nesdev, new_request);
  615. break;
  616. case PAU_ADD_LB_QH:
  617. /* Start processing the queued fpdu's */
  618. nesqp->pau_state = PAU_READY;
  619. process_fpdus(qh_chg->nesvnic, qh_chg->nesqp);
  620. kfree(qh_chg);
  621. break;
  622. }
  623. }
  624. /**
  625. * nes_change_quad_hash
  626. */
  627. static int nes_change_quad_hash(struct nes_device *nesdev,
  628. struct nes_vnic *nesvnic, struct nes_qp *nesqp)
  629. {
  630. struct nes_cqp_request *cqp_request = NULL;
  631. struct pau_qh_chg *qh_chg = NULL;
  632. u64 u64temp;
  633. struct nes_hw_cqp_wqe *cqp_wqe;
  634. int ret = 0;
  635. cqp_request = nes_get_cqp_request(nesdev);
  636. if (cqp_request == NULL) {
  637. nes_debug(NES_DBG_PAU, "Failed to get a cqp_request.\n");
  638. ret = -ENOMEM;
  639. goto chg_qh_err;
  640. }
  641. qh_chg = kmalloc(sizeof *qh_chg, GFP_ATOMIC);
  642. if (qh_chg == NULL) {
  643. nes_debug(NES_DBG_PAU, "Failed to get a cqp_request.\n");
  644. ret = -ENOMEM;
  645. goto chg_qh_err;
  646. }
  647. qh_chg->nesdev = nesdev;
  648. qh_chg->nesvnic = nesvnic;
  649. qh_chg->nesqp = nesqp;
  650. nesqp->pau_state = PAU_DEL_QH;
  651. cqp_wqe = &cqp_request->cqp_wqe;
  652. nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
  653. set_wqe_32bit_value(cqp_wqe->wqe_words,
  654. NES_CQP_WQE_OPCODE_IDX, NES_CQP_MANAGE_QUAD_HASH | NES_CQP_QP_DEL_HTE |
  655. NES_CQP_QP_TYPE_IWARP | NES_CQP_QP_CONTEXT_VALID | NES_CQP_QP_IWARP_STATE_RTS);
  656. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, nesqp->hwqp.qp_id);
  657. u64temp = (u64)nesqp->nesqp_context_pbase;
  658. set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, u64temp);
  659. nes_debug(NES_DBG_PAU, "Waiting for CQP completion for deleting the quad hash.\n");
  660. cqp_request->cqp_callback_pointer = qh_chg;
  661. cqp_request->callback = 1;
  662. cqp_request->cqp_callback = nes_chg_qh_handler;
  663. atomic_set(&cqp_request->refcount, 1);
  664. nes_post_cqp_request(nesdev, cqp_request);
  665. return ret;
  666. chg_qh_err:
  667. kfree(qh_chg);
  668. if (cqp_request)
  669. nes_put_cqp_request(nesdev, cqp_request);
  670. return ret;
  671. }
  672. /**
  673. * nes_mgt_ce_handler
  674. * This management code deals with any packed and unaligned (pau) fpdu's
  675. * that the hardware cannot handle.
  676. */
  677. static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
  678. {
  679. struct nes_vnic_mgt *mgtvnic = container_of(cq, struct nes_vnic_mgt, mgt_cq);
  680. struct nes_adapter *nesadapter = nesdev->nesadapter;
  681. u32 head;
  682. u32 cq_size;
  683. u32 cqe_count = 0;
  684. u32 cqe_misc;
  685. u32 qp_id = 0;
  686. u32 skbs_needed;
  687. unsigned long context;
  688. struct nes_qp *nesqp;
  689. struct sk_buff *rx_skb;
  690. struct nes_rskb_cb *cb;
  691. head = cq->cq_head;
  692. cq_size = cq->cq_size;
  693. while (1) {
  694. cqe_misc = le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX]);
  695. if (!(cqe_misc & NES_NIC_CQE_VALID))
  696. break;
  697. nesqp = NULL;
  698. if (cqe_misc & NES_NIC_CQE_ACCQP_VALID) {
  699. qp_id = le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_ACCQP_ID_IDX]);
  700. qp_id &= 0x001fffff;
  701. if (qp_id < nesadapter->max_qp) {
  702. context = (unsigned long)nesadapter->qp_table[qp_id - NES_FIRST_QPN];
  703. nesqp = (struct nes_qp *)context;
  704. }
  705. }
  706. if (nesqp) {
  707. if (nesqp->pau_mode == false) {
  708. nesqp->pau_mode = true; /* First time for this qp */
  709. nesqp->pau_rcv_nxt = le32_to_cpu(
  710. cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
  711. skb_queue_head_init(&nesqp->pau_list);
  712. spin_lock_init(&nesqp->pau_lock);
  713. atomic_inc(&pau_qps_created);
  714. nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
  715. }
  716. rx_skb = mgtvnic->mgt.rx_skb[mgtvnic->mgt.rq_tail];
  717. rx_skb->len = 0;
  718. skb_put(rx_skb, cqe_misc & 0x0000ffff);
  719. rx_skb->protocol = eth_type_trans(rx_skb, mgtvnic->nesvnic->netdev);
  720. cb = (struct nes_rskb_cb *)&rx_skb->cb[0];
  721. pci_unmap_single(nesdev->pcidev, cb->busaddr, cb->maplen, PCI_DMA_FROMDEVICE);
  722. cb->busaddr = 0;
  723. mgtvnic->mgt.rq_tail++;
  724. mgtvnic->mgt.rq_tail &= mgtvnic->mgt.rq_size - 1;
  725. nes_add_ref_cm_node(nesqp->cm_node);
  726. nes_queue_mgt_skbs(rx_skb, mgtvnic->nesvnic, nesqp);
  727. } else {
  728. printk(KERN_ERR PFX "Invalid QP %d for packed/unaligned handling\n", qp_id);
  729. }
  730. cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX] = 0;
  731. cqe_count++;
  732. if (++head >= cq_size)
  733. head = 0;
  734. if (cqe_count == 255) {
  735. /* Replenish mgt CQ */
  736. nes_write32(nesdev->regs + NES_CQE_ALLOC, cq->cq_number | (cqe_count << 16));
  737. nesdev->currcq_count += cqe_count;
  738. cqe_count = 0;
  739. }
  740. skbs_needed = atomic_inc_return(&mgtvnic->rx_skbs_needed);
  741. if (skbs_needed > (mgtvnic->mgt.rq_size >> 1))
  742. nes_replenish_mgt_rq(mgtvnic);
  743. }
  744. cq->cq_head = head;
  745. nes_write32(nesdev->regs + NES_CQE_ALLOC, NES_CQE_ALLOC_NOTIFY_NEXT |
  746. cq->cq_number | (cqe_count << 16));
  747. nes_read32(nesdev->regs + NES_CQE_ALLOC);
  748. nesdev->currcq_count += cqe_count;
  749. }
  750. /**
  751. * nes_init_mgt_qp
  752. */
  753. int nes_init_mgt_qp(struct nes_device *nesdev, struct net_device *netdev, struct nes_vnic *nesvnic)
  754. {
  755. struct nes_vnic_mgt *mgtvnic;
  756. u32 counter;
  757. void *vmem;
  758. dma_addr_t pmem;
  759. struct nes_hw_cqp_wqe *cqp_wqe;
  760. u32 cqp_head;
  761. unsigned long flags;
  762. struct nes_hw_nic_qp_context *mgt_context;
  763. u64 u64temp;
  764. struct nes_hw_nic_rq_wqe *mgt_rqe;
  765. struct sk_buff *skb;
  766. u32 wqe_count;
  767. struct nes_rskb_cb *cb;
  768. u32 mgt_mem_size;
  769. void *mgt_vbase;
  770. dma_addr_t mgt_pbase;
  771. int i;
  772. int ret;
  773. /* Allocate space the all mgt QPs once */
  774. mgtvnic = kzalloc(NES_MGT_QP_COUNT * sizeof(struct nes_vnic_mgt), GFP_KERNEL);
  775. if (mgtvnic == NULL) {
  776. nes_debug(NES_DBG_INIT, "Unable to allocate memory for mgt structure\n");
  777. return -ENOMEM;
  778. }
  779. /* Allocate fragment, RQ, and CQ; Reuse CEQ based on the PCI function */
  780. /* We are not sending from this NIC so sq is not allocated */
  781. mgt_mem_size = 256 +
  782. (NES_MGT_WQ_COUNT * sizeof(struct nes_hw_nic_rq_wqe)) +
  783. (NES_MGT_WQ_COUNT * sizeof(struct nes_hw_nic_cqe)) +
  784. sizeof(struct nes_hw_nic_qp_context);
  785. mgt_mem_size = (mgt_mem_size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
  786. mgt_vbase = pci_alloc_consistent(nesdev->pcidev, NES_MGT_QP_COUNT * mgt_mem_size, &mgt_pbase);
  787. if (!mgt_vbase) {
  788. kfree(mgtvnic);
  789. nes_debug(NES_DBG_INIT, "Unable to allocate memory for mgt host descriptor rings\n");
  790. return -ENOMEM;
  791. }
  792. nesvnic->mgt_mem_size = NES_MGT_QP_COUNT * mgt_mem_size;
  793. nesvnic->mgt_vbase = mgt_vbase;
  794. nesvnic->mgt_pbase = mgt_pbase;
  795. skb_queue_head_init(&nesvnic->mgt_skb_list);
  796. init_waitqueue_head(&nesvnic->mgt_wait_queue);
  797. nesvnic->mgt_thread = kthread_run(mgt_thread, nesvnic, "nes_mgt_thread");
  798. for (i = 0; i < NES_MGT_QP_COUNT; i++) {
  799. mgtvnic->nesvnic = nesvnic;
  800. mgtvnic->mgt.qp_id = nesdev->mac_index + NES_MGT_QP_OFFSET + i;
  801. memset(mgt_vbase, 0, mgt_mem_size);
  802. nes_debug(NES_DBG_INIT, "Allocated mgt QP structures at %p (phys = %016lX), size = %u.\n",
  803. mgt_vbase, (unsigned long)mgt_pbase, mgt_mem_size);
  804. vmem = (void *)(((unsigned long)mgt_vbase + (256 - 1)) &
  805. ~(unsigned long)(256 - 1));
  806. pmem = (dma_addr_t)(((unsigned long long)mgt_pbase + (256 - 1)) &
  807. ~(unsigned long long)(256 - 1));
  808. spin_lock_init(&mgtvnic->mgt.rq_lock);
  809. /* setup the RQ */
  810. mgtvnic->mgt.rq_vbase = vmem;
  811. mgtvnic->mgt.rq_pbase = pmem;
  812. mgtvnic->mgt.rq_head = 0;
  813. mgtvnic->mgt.rq_tail = 0;
  814. mgtvnic->mgt.rq_size = NES_MGT_WQ_COUNT;
  815. /* setup the CQ */
  816. vmem += (NES_MGT_WQ_COUNT * sizeof(struct nes_hw_nic_rq_wqe));
  817. pmem += (NES_MGT_WQ_COUNT * sizeof(struct nes_hw_nic_rq_wqe));
  818. mgtvnic->mgt_cq.cq_number = mgtvnic->mgt.qp_id;
  819. mgtvnic->mgt_cq.cq_vbase = vmem;
  820. mgtvnic->mgt_cq.cq_pbase = pmem;
  821. mgtvnic->mgt_cq.cq_head = 0;
  822. mgtvnic->mgt_cq.cq_size = NES_MGT_WQ_COUNT;
  823. mgtvnic->mgt_cq.ce_handler = nes_mgt_ce_handler;
  824. /* Send CreateCQ request to CQP */
  825. spin_lock_irqsave(&nesdev->cqp.lock, flags);
  826. cqp_head = nesdev->cqp.sq_head;
  827. cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
  828. nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
  829. cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(
  830. NES_CQP_CREATE_CQ | NES_CQP_CQ_CEQ_VALID |
  831. ((u32)mgtvnic->mgt_cq.cq_size << 16));
  832. cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(
  833. mgtvnic->mgt_cq.cq_number | ((u32)nesdev->ceq_index << 16));
  834. u64temp = (u64)mgtvnic->mgt_cq.cq_pbase;
  835. set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_CQ_WQE_PBL_LOW_IDX, u64temp);
  836. cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_HIGH_IDX] = 0;
  837. u64temp = (unsigned long)&mgtvnic->mgt_cq;
  838. cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_LOW_IDX] = cpu_to_le32((u32)(u64temp >> 1));
  839. cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_HIGH_IDX] =
  840. cpu_to_le32(((u32)((u64temp) >> 33)) & 0x7FFFFFFF);
  841. cqp_wqe->wqe_words[NES_CQP_CQ_WQE_DOORBELL_INDEX_HIGH_IDX] = 0;
  842. if (++cqp_head >= nesdev->cqp.sq_size)
  843. cqp_head = 0;
  844. cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
  845. nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
  846. /* Send CreateQP request to CQP */
  847. mgt_context = (void *)(&mgtvnic->mgt_cq.cq_vbase[mgtvnic->mgt_cq.cq_size]);
  848. mgt_context->context_words[NES_NIC_CTX_MISC_IDX] =
  849. cpu_to_le32((u32)NES_MGT_CTX_SIZE |
  850. ((u32)PCI_FUNC(nesdev->pcidev->devfn) << 12));
  851. nes_debug(NES_DBG_INIT, "RX_WINDOW_BUFFER_PAGE_TABLE_SIZE = 0x%08X, RX_WINDOW_BUFFER_SIZE = 0x%08X\n",
  852. nes_read_indexed(nesdev, NES_IDX_RX_WINDOW_BUFFER_PAGE_TABLE_SIZE),
  853. nes_read_indexed(nesdev, NES_IDX_RX_WINDOW_BUFFER_SIZE));
  854. if (nes_read_indexed(nesdev, NES_IDX_RX_WINDOW_BUFFER_SIZE) != 0)
  855. mgt_context->context_words[NES_NIC_CTX_MISC_IDX] |= cpu_to_le32(NES_NIC_BACK_STORE);
  856. u64temp = (u64)mgtvnic->mgt.rq_pbase;
  857. mgt_context->context_words[NES_NIC_CTX_SQ_LOW_IDX] = cpu_to_le32((u32)u64temp);
  858. mgt_context->context_words[NES_NIC_CTX_SQ_HIGH_IDX] = cpu_to_le32((u32)(u64temp >> 32));
  859. u64temp = (u64)mgtvnic->mgt.rq_pbase;
  860. mgt_context->context_words[NES_NIC_CTX_RQ_LOW_IDX] = cpu_to_le32((u32)u64temp);
  861. mgt_context->context_words[NES_NIC_CTX_RQ_HIGH_IDX] = cpu_to_le32((u32)(u64temp >> 32));
  862. cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(NES_CQP_CREATE_QP |
  863. NES_CQP_QP_TYPE_NIC);
  864. cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(mgtvnic->mgt.qp_id);
  865. u64temp = (u64)mgtvnic->mgt_cq.cq_pbase +
  866. (mgtvnic->mgt_cq.cq_size * sizeof(struct nes_hw_nic_cqe));
  867. set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, u64temp);
  868. if (++cqp_head >= nesdev->cqp.sq_size)
  869. cqp_head = 0;
  870. nesdev->cqp.sq_head = cqp_head;
  871. barrier();
  872. /* Ring doorbell (2 WQEs) */
  873. nes_write32(nesdev->regs + NES_WQE_ALLOC, 0x02800000 | nesdev->cqp.qp_id);
  874. spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
  875. nes_debug(NES_DBG_INIT, "Waiting for create MGT QP%u to complete.\n",
  876. mgtvnic->mgt.qp_id);
  877. ret = wait_event_timeout(nesdev->cqp.waitq, (nesdev->cqp.sq_tail == cqp_head),
  878. NES_EVENT_TIMEOUT);
  879. nes_debug(NES_DBG_INIT, "Create MGT QP%u completed, wait_event_timeout ret = %u.\n",
  880. mgtvnic->mgt.qp_id, ret);
  881. if (!ret) {
  882. nes_debug(NES_DBG_INIT, "MGT QP%u create timeout expired\n", mgtvnic->mgt.qp_id);
  883. if (i == 0) {
  884. pci_free_consistent(nesdev->pcidev, nesvnic->mgt_mem_size, nesvnic->mgt_vbase,
  885. nesvnic->mgt_pbase);
  886. kfree(mgtvnic);
  887. } else {
  888. nes_destroy_mgt(nesvnic);
  889. }
  890. return -EIO;
  891. }
  892. /* Populate the RQ */
  893. for (counter = 0; counter < (NES_MGT_WQ_COUNT - 1); counter++) {
  894. skb = dev_alloc_skb(nesvnic->max_frame_size);
  895. if (!skb) {
  896. nes_debug(NES_DBG_INIT, "%s: out of memory for receive skb\n", netdev->name);
  897. return -ENOMEM;
  898. }
  899. skb->dev = netdev;
  900. pmem = pci_map_single(nesdev->pcidev, skb->data,
  901. nesvnic->max_frame_size, PCI_DMA_FROMDEVICE);
  902. cb = (struct nes_rskb_cb *)&skb->cb[0];
  903. cb->busaddr = pmem;
  904. cb->maplen = nesvnic->max_frame_size;
  905. mgt_rqe = &mgtvnic->mgt.rq_vbase[counter];
  906. mgt_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_1_0_IDX] = cpu_to_le32((u32)nesvnic->max_frame_size);
  907. mgt_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_3_2_IDX] = 0;
  908. mgt_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_LOW_IDX] = cpu_to_le32((u32)pmem);
  909. mgt_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_HIGH_IDX] = cpu_to_le32((u32)((u64)pmem >> 32));
  910. mgtvnic->mgt.rx_skb[counter] = skb;
  911. }
  912. init_timer(&mgtvnic->rq_wqes_timer);
  913. mgtvnic->rq_wqes_timer.function = nes_mgt_rq_wqes_timeout;
  914. mgtvnic->rq_wqes_timer.data = (unsigned long)mgtvnic;
  915. wqe_count = NES_MGT_WQ_COUNT - 1;
  916. mgtvnic->mgt.rq_head = wqe_count;
  917. barrier();
  918. do {
  919. counter = min(wqe_count, ((u32)255));
  920. wqe_count -= counter;
  921. nes_write32(nesdev->regs + NES_WQE_ALLOC, (counter << 24) | mgtvnic->mgt.qp_id);
  922. } while (wqe_count);
  923. nes_write32(nesdev->regs + NES_CQE_ALLOC, NES_CQE_ALLOC_NOTIFY_NEXT |
  924. mgtvnic->mgt_cq.cq_number);
  925. nes_read32(nesdev->regs + NES_CQE_ALLOC);
  926. mgt_vbase += mgt_mem_size;
  927. mgt_pbase += mgt_mem_size;
  928. nesvnic->mgtvnic[i] = mgtvnic++;
  929. }
  930. return 0;
  931. }
  932. void nes_destroy_mgt(struct nes_vnic *nesvnic)
  933. {
  934. struct nes_device *nesdev = nesvnic->nesdev;
  935. struct nes_vnic_mgt *mgtvnic;
  936. struct nes_vnic_mgt *first_mgtvnic;
  937. unsigned long flags;
  938. struct nes_hw_cqp_wqe *cqp_wqe;
  939. u32 cqp_head;
  940. struct sk_buff *rx_skb;
  941. int i;
  942. int ret;
  943. kthread_stop(nesvnic->mgt_thread);
  944. /* Free remaining NIC receive buffers */
  945. first_mgtvnic = nesvnic->mgtvnic[0];
  946. for (i = 0; i < NES_MGT_QP_COUNT; i++) {
  947. mgtvnic = nesvnic->mgtvnic[i];
  948. if (mgtvnic == NULL)
  949. continue;
  950. while (mgtvnic->mgt.rq_head != mgtvnic->mgt.rq_tail) {
  951. rx_skb = mgtvnic->mgt.rx_skb[mgtvnic->mgt.rq_tail];
  952. nes_mgt_free_skb(nesdev, rx_skb, PCI_DMA_FROMDEVICE);
  953. mgtvnic->mgt.rq_tail++;
  954. mgtvnic->mgt.rq_tail &= (mgtvnic->mgt.rq_size - 1);
  955. }
  956. spin_lock_irqsave(&nesdev->cqp.lock, flags);
  957. /* Destroy NIC QP */
  958. cqp_head = nesdev->cqp.sq_head;
  959. cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
  960. nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
  961. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX,
  962. (NES_CQP_DESTROY_QP | NES_CQP_QP_TYPE_NIC));
  963. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX,
  964. mgtvnic->mgt.qp_id);
  965. if (++cqp_head >= nesdev->cqp.sq_size)
  966. cqp_head = 0;
  967. cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
  968. /* Destroy NIC CQ */
  969. nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
  970. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX,
  971. (NES_CQP_DESTROY_CQ | ((u32)mgtvnic->mgt_cq.cq_size << 16)));
  972. set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX,
  973. (mgtvnic->mgt_cq.cq_number | ((u32)nesdev->ceq_index << 16)));
  974. if (++cqp_head >= nesdev->cqp.sq_size)
  975. cqp_head = 0;
  976. nesdev->cqp.sq_head = cqp_head;
  977. barrier();
  978. /* Ring doorbell (2 WQEs) */
  979. nes_write32(nesdev->regs + NES_WQE_ALLOC, 0x02800000 | nesdev->cqp.qp_id);
  980. spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
  981. nes_debug(NES_DBG_SHUTDOWN, "Waiting for CQP, cqp_head=%u, cqp.sq_head=%u,"
  982. " cqp.sq_tail=%u, cqp.sq_size=%u\n",
  983. cqp_head, nesdev->cqp.sq_head,
  984. nesdev->cqp.sq_tail, nesdev->cqp.sq_size);
  985. ret = wait_event_timeout(nesdev->cqp.waitq, (nesdev->cqp.sq_tail == cqp_head),
  986. NES_EVENT_TIMEOUT);
  987. nes_debug(NES_DBG_SHUTDOWN, "Destroy MGT QP returned, wait_event_timeout ret = %u, cqp_head=%u,"
  988. " cqp.sq_head=%u, cqp.sq_tail=%u\n",
  989. ret, cqp_head, nesdev->cqp.sq_head, nesdev->cqp.sq_tail);
  990. if (!ret)
  991. nes_debug(NES_DBG_SHUTDOWN, "MGT QP%u destroy timeout expired\n",
  992. mgtvnic->mgt.qp_id);
  993. nesvnic->mgtvnic[i] = NULL;
  994. }
  995. if (nesvnic->mgt_vbase) {
  996. pci_free_consistent(nesdev->pcidev, nesvnic->mgt_mem_size, nesvnic->mgt_vbase,
  997. nesvnic->mgt_pbase);
  998. nesvnic->mgt_vbase = NULL;
  999. nesvnic->mgt_pbase = 0;
  1000. }
  1001. kfree(first_mgtvnic);
  1002. }