fnic_fcs.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719
  1. /*
  2. * Copyright 2008 Cisco Systems, Inc. All rights reserved.
  3. * Copyright 2007 Nuova Systems, Inc. All rights reserved.
  4. *
  5. * This program is free software; you may redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; version 2 of the License.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  10. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  11. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  12. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  13. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  14. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  15. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  16. * SOFTWARE.
  17. */
  18. #include <linux/errno.h>
  19. #include <linux/pci.h>
  20. #include <linux/skbuff.h>
  21. #include <linux/interrupt.h>
  22. #include <linux/spinlock.h>
  23. #include <linux/if_ether.h>
  24. #include <linux/if_vlan.h>
  25. #include <linux/workqueue.h>
  26. #include <scsi/fc/fc_fip.h>
  27. #include <scsi/fc/fc_els.h>
  28. #include <scsi/fc/fc_fcoe.h>
  29. #include <scsi/fc_frame.h>
  30. #include <scsi/libfc.h>
  31. #include "fnic_io.h"
  32. #include "fnic.h"
  33. #include "cq_enet_desc.h"
  34. #include "cq_exch_desc.h"
  35. struct workqueue_struct *fnic_event_queue;
  36. static void fnic_set_eth_mode(struct fnic *);
  37. void fnic_handle_link(struct work_struct *work)
  38. {
  39. struct fnic *fnic = container_of(work, struct fnic, link_work);
  40. unsigned long flags;
  41. int old_link_status;
  42. u32 old_link_down_cnt;
  43. spin_lock_irqsave(&fnic->fnic_lock, flags);
  44. if (fnic->stop_rx_link_events) {
  45. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  46. return;
  47. }
  48. old_link_down_cnt = fnic->link_down_cnt;
  49. old_link_status = fnic->link_status;
  50. fnic->link_status = vnic_dev_link_status(fnic->vdev);
  51. fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev);
  52. if (old_link_status == fnic->link_status) {
  53. if (!fnic->link_status)
  54. /* DOWN -> DOWN */
  55. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  56. else {
  57. if (old_link_down_cnt != fnic->link_down_cnt) {
  58. /* UP -> DOWN -> UP */
  59. fnic->lport->host_stats.link_failure_count++;
  60. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  61. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  62. "link down\n");
  63. fcoe_ctlr_link_down(&fnic->ctlr);
  64. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  65. "link up\n");
  66. fcoe_ctlr_link_up(&fnic->ctlr);
  67. } else
  68. /* UP -> UP */
  69. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  70. }
  71. } else if (fnic->link_status) {
  72. /* DOWN -> UP */
  73. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  74. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n");
  75. fcoe_ctlr_link_up(&fnic->ctlr);
  76. } else {
  77. /* UP -> DOWN */
  78. fnic->lport->host_stats.link_failure_count++;
  79. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  80. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n");
  81. fcoe_ctlr_link_down(&fnic->ctlr);
  82. }
  83. }
  84. /*
  85. * This function passes incoming fabric frames to libFC
  86. */
  87. void fnic_handle_frame(struct work_struct *work)
  88. {
  89. struct fnic *fnic = container_of(work, struct fnic, frame_work);
  90. struct fc_lport *lp = fnic->lport;
  91. unsigned long flags;
  92. struct sk_buff *skb;
  93. struct fc_frame *fp;
  94. while ((skb = skb_dequeue(&fnic->frame_queue))) {
  95. spin_lock_irqsave(&fnic->fnic_lock, flags);
  96. if (fnic->stop_rx_link_events) {
  97. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  98. dev_kfree_skb(skb);
  99. return;
  100. }
  101. fp = (struct fc_frame *)skb;
  102. /*
  103. * If we're in a transitional state, just re-queue and return.
  104. * The queue will be serviced when we get to a stable state.
  105. */
  106. if (fnic->state != FNIC_IN_FC_MODE &&
  107. fnic->state != FNIC_IN_ETH_MODE) {
  108. skb_queue_head(&fnic->frame_queue, skb);
  109. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  110. return;
  111. }
  112. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  113. fc_exch_recv(lp, fp);
  114. }
  115. }
  116. /**
  117. * fnic_import_rq_eth_pkt() - handle received FCoE or FIP frame.
  118. * @fnic: fnic instance.
  119. * @skb: Ethernet Frame.
  120. */
  121. static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb)
  122. {
  123. struct fc_frame *fp;
  124. struct ethhdr *eh;
  125. struct fcoe_hdr *fcoe_hdr;
  126. struct fcoe_crc_eof *ft;
  127. /*
  128. * Undo VLAN encapsulation if present.
  129. */
  130. eh = (struct ethhdr *)skb->data;
  131. if (eh->h_proto == htons(ETH_P_8021Q)) {
  132. memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2);
  133. eh = (struct ethhdr *)skb_pull(skb, VLAN_HLEN);
  134. skb_reset_mac_header(skb);
  135. }
  136. if (eh->h_proto == htons(ETH_P_FIP)) {
  137. skb_pull(skb, sizeof(*eh));
  138. fcoe_ctlr_recv(&fnic->ctlr, skb);
  139. return 1; /* let caller know packet was used */
  140. }
  141. if (eh->h_proto != htons(ETH_P_FCOE))
  142. goto drop;
  143. skb_set_network_header(skb, sizeof(*eh));
  144. skb_pull(skb, sizeof(*eh));
  145. fcoe_hdr = (struct fcoe_hdr *)skb->data;
  146. if (FC_FCOE_DECAPS_VER(fcoe_hdr) != FC_FCOE_VER)
  147. goto drop;
  148. fp = (struct fc_frame *)skb;
  149. fc_frame_init(fp);
  150. fr_sof(fp) = fcoe_hdr->fcoe_sof;
  151. skb_pull(skb, sizeof(struct fcoe_hdr));
  152. skb_reset_transport_header(skb);
  153. ft = (struct fcoe_crc_eof *)(skb->data + skb->len - sizeof(*ft));
  154. fr_eof(fp) = ft->fcoe_eof;
  155. skb_trim(skb, skb->len - sizeof(*ft));
  156. return 0;
  157. drop:
  158. dev_kfree_skb_irq(skb);
  159. return -1;
  160. }
  161. /**
  162. * fnic_update_mac_locked() - set data MAC address and filters.
  163. * @fnic: fnic instance.
  164. * @new: newly-assigned FCoE MAC address.
  165. *
  166. * Called with the fnic lock held.
  167. */
  168. void fnic_update_mac_locked(struct fnic *fnic, u8 *new)
  169. {
  170. u8 *ctl = fnic->ctlr.ctl_src_addr;
  171. u8 *data = fnic->data_src_addr;
  172. if (is_zero_ether_addr(new))
  173. new = ctl;
  174. if (!compare_ether_addr(data, new))
  175. return;
  176. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "update_mac %pM\n", new);
  177. if (!is_zero_ether_addr(data) && compare_ether_addr(data, ctl))
  178. vnic_dev_del_addr(fnic->vdev, data);
  179. memcpy(data, new, ETH_ALEN);
  180. if (compare_ether_addr(new, ctl))
  181. vnic_dev_add_addr(fnic->vdev, new);
  182. }
  183. /**
  184. * fnic_update_mac() - set data MAC address and filters.
  185. * @lport: local port.
  186. * @new: newly-assigned FCoE MAC address.
  187. */
  188. void fnic_update_mac(struct fc_lport *lport, u8 *new)
  189. {
  190. struct fnic *fnic = lport_priv(lport);
  191. spin_lock_irq(&fnic->fnic_lock);
  192. fnic_update_mac_locked(fnic, new);
  193. spin_unlock_irq(&fnic->fnic_lock);
  194. }
  195. /**
  196. * fnic_set_port_id() - set the port_ID after successful FLOGI.
  197. * @lport: local port.
  198. * @port_id: assigned FC_ID.
  199. * @fp: received frame containing the FLOGI accept or NULL.
  200. *
  201. * This is called from libfc when a new FC_ID has been assigned.
  202. * This causes us to reset the firmware to FC_MODE and setup the new MAC
  203. * address and FC_ID.
  204. *
  205. * It is also called with FC_ID 0 when we're logged off.
  206. *
  207. * If the FC_ID is due to point-to-point, fp may be NULL.
  208. */
  209. void fnic_set_port_id(struct fc_lport *lport, u32 port_id, struct fc_frame *fp)
  210. {
  211. struct fnic *fnic = lport_priv(lport);
  212. u8 *mac;
  213. int ret;
  214. FNIC_FCS_DBG(KERN_DEBUG, lport->host, "set port_id %x fp %p\n",
  215. port_id, fp);
  216. /*
  217. * If we're clearing the FC_ID, change to use the ctl_src_addr.
  218. * Set ethernet mode to send FLOGI.
  219. */
  220. if (!port_id) {
  221. fnic_update_mac(lport, fnic->ctlr.ctl_src_addr);
  222. fnic_set_eth_mode(fnic);
  223. return;
  224. }
  225. if (fp) {
  226. mac = fr_cb(fp)->granted_mac;
  227. if (is_zero_ether_addr(mac)) {
  228. /* non-FIP - FLOGI already accepted - ignore return */
  229. fcoe_ctlr_recv_flogi(&fnic->ctlr, lport, fp);
  230. }
  231. fnic_update_mac(lport, mac);
  232. }
  233. /* Change state to reflect transition to FC mode */
  234. spin_lock_irq(&fnic->fnic_lock);
  235. if (fnic->state == FNIC_IN_ETH_MODE || fnic->state == FNIC_IN_FC_MODE)
  236. fnic->state = FNIC_IN_ETH_TRANS_FC_MODE;
  237. else {
  238. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  239. "Unexpected fnic state %s while"
  240. " processing flogi resp\n",
  241. fnic_state_to_str(fnic->state));
  242. spin_unlock_irq(&fnic->fnic_lock);
  243. return;
  244. }
  245. spin_unlock_irq(&fnic->fnic_lock);
  246. /*
  247. * Send FLOGI registration to firmware to set up FC mode.
  248. * The new address will be set up when registration completes.
  249. */
  250. ret = fnic_flogi_reg_handler(fnic, port_id);
  251. if (ret < 0) {
  252. spin_lock_irq(&fnic->fnic_lock);
  253. if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE)
  254. fnic->state = FNIC_IN_ETH_MODE;
  255. spin_unlock_irq(&fnic->fnic_lock);
  256. }
  257. }
  258. static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
  259. *cq_desc, struct vnic_rq_buf *buf,
  260. int skipped __attribute__((unused)),
  261. void *opaque)
  262. {
  263. struct fnic *fnic = vnic_dev_priv(rq->vdev);
  264. struct sk_buff *skb;
  265. struct fc_frame *fp;
  266. unsigned int eth_hdrs_stripped;
  267. u8 type, color, eop, sop, ingress_port, vlan_stripped;
  268. u8 fcoe = 0, fcoe_sof, fcoe_eof;
  269. u8 fcoe_fc_crc_ok = 1, fcoe_enc_error = 0;
  270. u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
  271. u8 ipv6, ipv4, ipv4_fragment, rss_type, csum_not_calc;
  272. u8 fcs_ok = 1, packet_error = 0;
  273. u16 q_number, completed_index, bytes_written = 0, vlan, checksum;
  274. u32 rss_hash;
  275. u16 exchange_id, tmpl;
  276. u8 sof = 0;
  277. u8 eof = 0;
  278. u32 fcp_bytes_written = 0;
  279. unsigned long flags;
  280. pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len,
  281. PCI_DMA_FROMDEVICE);
  282. skb = buf->os_buf;
  283. fp = (struct fc_frame *)skb;
  284. buf->os_buf = NULL;
  285. cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index);
  286. if (type == CQ_DESC_TYPE_RQ_FCP) {
  287. cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc *)cq_desc,
  288. &type, &color, &q_number, &completed_index,
  289. &eop, &sop, &fcoe_fc_crc_ok, &exchange_id,
  290. &tmpl, &fcp_bytes_written, &sof, &eof,
  291. &ingress_port, &packet_error,
  292. &fcoe_enc_error, &fcs_ok, &vlan_stripped,
  293. &vlan);
  294. eth_hdrs_stripped = 1;
  295. skb_trim(skb, fcp_bytes_written);
  296. fr_sof(fp) = sof;
  297. fr_eof(fp) = eof;
  298. } else if (type == CQ_DESC_TYPE_RQ_ENET) {
  299. cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
  300. &type, &color, &q_number, &completed_index,
  301. &ingress_port, &fcoe, &eop, &sop,
  302. &rss_type, &csum_not_calc, &rss_hash,
  303. &bytes_written, &packet_error,
  304. &vlan_stripped, &vlan, &checksum,
  305. &fcoe_sof, &fcoe_fc_crc_ok,
  306. &fcoe_enc_error, &fcoe_eof,
  307. &tcp_udp_csum_ok, &udp, &tcp,
  308. &ipv4_csum_ok, &ipv6, &ipv4,
  309. &ipv4_fragment, &fcs_ok);
  310. eth_hdrs_stripped = 0;
  311. skb_trim(skb, bytes_written);
  312. if (!fcs_ok) {
  313. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  314. "fcs error. dropping packet.\n");
  315. goto drop;
  316. }
  317. if (fnic_import_rq_eth_pkt(fnic, skb))
  318. return;
  319. } else {
  320. /* wrong CQ type*/
  321. shost_printk(KERN_ERR, fnic->lport->host,
  322. "fnic rq_cmpl wrong cq type x%x\n", type);
  323. goto drop;
  324. }
  325. if (!fcs_ok || packet_error || !fcoe_fc_crc_ok || fcoe_enc_error) {
  326. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  327. "fnic rq_cmpl fcoe x%x fcsok x%x"
  328. " pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err"
  329. " x%x\n",
  330. fcoe, fcs_ok, packet_error,
  331. fcoe_fc_crc_ok, fcoe_enc_error);
  332. goto drop;
  333. }
  334. spin_lock_irqsave(&fnic->fnic_lock, flags);
  335. if (fnic->stop_rx_link_events) {
  336. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  337. goto drop;
  338. }
  339. fr_dev(fp) = fnic->lport;
  340. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  341. skb_queue_tail(&fnic->frame_queue, skb);
  342. queue_work(fnic_event_queue, &fnic->frame_work);
  343. return;
  344. drop:
  345. dev_kfree_skb_irq(skb);
  346. }
  347. static int fnic_rq_cmpl_handler_cont(struct vnic_dev *vdev,
  348. struct cq_desc *cq_desc, u8 type,
  349. u16 q_number, u16 completed_index,
  350. void *opaque)
  351. {
  352. struct fnic *fnic = vnic_dev_priv(vdev);
  353. vnic_rq_service(&fnic->rq[q_number], cq_desc, completed_index,
  354. VNIC_RQ_RETURN_DESC, fnic_rq_cmpl_frame_recv,
  355. NULL);
  356. return 0;
  357. }
  358. int fnic_rq_cmpl_handler(struct fnic *fnic, int rq_work_to_do)
  359. {
  360. unsigned int tot_rq_work_done = 0, cur_work_done;
  361. unsigned int i;
  362. int err;
  363. for (i = 0; i < fnic->rq_count; i++) {
  364. cur_work_done = vnic_cq_service(&fnic->cq[i], rq_work_to_do,
  365. fnic_rq_cmpl_handler_cont,
  366. NULL);
  367. if (cur_work_done) {
  368. err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
  369. if (err)
  370. shost_printk(KERN_ERR, fnic->lport->host,
  371. "fnic_alloc_rq_frame cant alloc"
  372. " frame\n");
  373. }
  374. tot_rq_work_done += cur_work_done;
  375. }
  376. return tot_rq_work_done;
  377. }
  378. /*
  379. * This function is called once at init time to allocate and fill RQ
  380. * buffers. Subsequently, it is called in the interrupt context after RQ
  381. * buffer processing to replenish the buffers in the RQ
  382. */
  383. int fnic_alloc_rq_frame(struct vnic_rq *rq)
  384. {
  385. struct fnic *fnic = vnic_dev_priv(rq->vdev);
  386. struct sk_buff *skb;
  387. u16 len;
  388. dma_addr_t pa;
  389. len = FC_FRAME_HEADROOM + FC_MAX_FRAME + FC_FRAME_TAILROOM;
  390. skb = dev_alloc_skb(len);
  391. if (!skb) {
  392. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  393. "Unable to allocate RQ sk_buff\n");
  394. return -ENOMEM;
  395. }
  396. skb_reset_mac_header(skb);
  397. skb_reset_transport_header(skb);
  398. skb_reset_network_header(skb);
  399. skb_put(skb, len);
  400. pa = pci_map_single(fnic->pdev, skb->data, len, PCI_DMA_FROMDEVICE);
  401. fnic_queue_rq_desc(rq, skb, pa, len);
  402. return 0;
  403. }
  404. void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
  405. {
  406. struct fc_frame *fp = buf->os_buf;
  407. struct fnic *fnic = vnic_dev_priv(rq->vdev);
  408. pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len,
  409. PCI_DMA_FROMDEVICE);
  410. dev_kfree_skb(fp_skb(fp));
  411. buf->os_buf = NULL;
  412. }
  413. /**
  414. * fnic_eth_send() - Send Ethernet frame.
  415. * @fip: fcoe_ctlr instance.
  416. * @skb: Ethernet Frame, FIP, without VLAN encapsulation.
  417. */
  418. void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
  419. {
  420. struct fnic *fnic = fnic_from_ctlr(fip);
  421. struct vnic_wq *wq = &fnic->wq[0];
  422. dma_addr_t pa;
  423. struct ethhdr *eth_hdr;
  424. struct vlan_ethhdr *vlan_hdr;
  425. unsigned long flags;
  426. if (!fnic->vlan_hw_insert) {
  427. eth_hdr = (struct ethhdr *)skb_mac_header(skb);
  428. vlan_hdr = (struct vlan_ethhdr *)skb_push(skb,
  429. sizeof(*vlan_hdr) - sizeof(*eth_hdr));
  430. memcpy(vlan_hdr, eth_hdr, 2 * ETH_ALEN);
  431. vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
  432. vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto;
  433. vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
  434. }
  435. pa = pci_map_single(fnic->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
  436. spin_lock_irqsave(&fnic->wq_lock[0], flags);
  437. if (!vnic_wq_desc_avail(wq)) {
  438. pci_unmap_single(fnic->pdev, pa, skb->len, PCI_DMA_TODEVICE);
  439. spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
  440. kfree_skb(skb);
  441. return;
  442. }
  443. fnic_queue_wq_eth_desc(wq, skb, pa, skb->len,
  444. fnic->vlan_hw_insert, fnic->vlan_id, 1);
  445. spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
  446. }
  447. /*
  448. * Send FC frame.
  449. */
  450. static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
  451. {
  452. struct vnic_wq *wq = &fnic->wq[0];
  453. struct sk_buff *skb;
  454. dma_addr_t pa;
  455. struct ethhdr *eth_hdr;
  456. struct vlan_ethhdr *vlan_hdr;
  457. struct fcoe_hdr *fcoe_hdr;
  458. struct fc_frame_header *fh;
  459. u32 tot_len, eth_hdr_len;
  460. int ret = 0;
  461. unsigned long flags;
  462. fh = fc_frame_header_get(fp);
  463. skb = fp_skb(fp);
  464. if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) &&
  465. fcoe_ctlr_els_send(&fnic->ctlr, fnic->lport, skb))
  466. return 0;
  467. if (!fnic->vlan_hw_insert) {
  468. eth_hdr_len = sizeof(*vlan_hdr) + sizeof(*fcoe_hdr);
  469. vlan_hdr = (struct vlan_ethhdr *)skb_push(skb, eth_hdr_len);
  470. eth_hdr = (struct ethhdr *)vlan_hdr;
  471. vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
  472. vlan_hdr->h_vlan_encapsulated_proto = htons(ETH_P_FCOE);
  473. vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
  474. fcoe_hdr = (struct fcoe_hdr *)(vlan_hdr + 1);
  475. } else {
  476. eth_hdr_len = sizeof(*eth_hdr) + sizeof(*fcoe_hdr);
  477. eth_hdr = (struct ethhdr *)skb_push(skb, eth_hdr_len);
  478. eth_hdr->h_proto = htons(ETH_P_FCOE);
  479. fcoe_hdr = (struct fcoe_hdr *)(eth_hdr + 1);
  480. }
  481. if (fnic->ctlr.map_dest)
  482. fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id);
  483. else
  484. memcpy(eth_hdr->h_dest, fnic->ctlr.dest_addr, ETH_ALEN);
  485. memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN);
  486. tot_len = skb->len;
  487. BUG_ON(tot_len % 4);
  488. memset(fcoe_hdr, 0, sizeof(*fcoe_hdr));
  489. fcoe_hdr->fcoe_sof = fr_sof(fp);
  490. if (FC_FCOE_VER)
  491. FC_FCOE_ENCAPS_VER(fcoe_hdr, FC_FCOE_VER);
  492. pa = pci_map_single(fnic->pdev, eth_hdr, tot_len, PCI_DMA_TODEVICE);
  493. spin_lock_irqsave(&fnic->wq_lock[0], flags);
  494. if (!vnic_wq_desc_avail(wq)) {
  495. pci_unmap_single(fnic->pdev, pa,
  496. tot_len, PCI_DMA_TODEVICE);
  497. ret = -1;
  498. goto fnic_send_frame_end;
  499. }
  500. fnic_queue_wq_desc(wq, skb, pa, tot_len, fr_eof(fp),
  501. fnic->vlan_hw_insert, fnic->vlan_id, 1, 1, 1);
  502. fnic_send_frame_end:
  503. spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
  504. if (ret)
  505. dev_kfree_skb_any(fp_skb(fp));
  506. return ret;
  507. }
  508. /*
  509. * fnic_send
  510. * Routine to send a raw frame
  511. */
  512. int fnic_send(struct fc_lport *lp, struct fc_frame *fp)
  513. {
  514. struct fnic *fnic = lport_priv(lp);
  515. unsigned long flags;
  516. if (fnic->in_remove) {
  517. dev_kfree_skb(fp_skb(fp));
  518. return -1;
  519. }
  520. /*
  521. * Queue frame if in a transitional state.
  522. * This occurs while registering the Port_ID / MAC address after FLOGI.
  523. */
  524. spin_lock_irqsave(&fnic->fnic_lock, flags);
  525. if (fnic->state != FNIC_IN_FC_MODE && fnic->state != FNIC_IN_ETH_MODE) {
  526. skb_queue_tail(&fnic->tx_queue, fp_skb(fp));
  527. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  528. return 0;
  529. }
  530. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  531. return fnic_send_frame(fnic, fp);
  532. }
  533. /**
  534. * fnic_flush_tx() - send queued frames.
  535. * @fnic: fnic device
  536. *
  537. * Send frames that were waiting to go out in FC or Ethernet mode.
  538. * Whenever changing modes we purge queued frames, so these frames should
  539. * be queued for the stable mode that we're in, either FC or Ethernet.
  540. *
  541. * Called without fnic_lock held.
  542. */
  543. void fnic_flush_tx(struct fnic *fnic)
  544. {
  545. struct sk_buff *skb;
  546. struct fc_frame *fp;
  547. while ((skb = skb_dequeue(&fnic->frame_queue))) {
  548. fp = (struct fc_frame *)skb;
  549. fnic_send_frame(fnic, fp);
  550. }
  551. }
  552. /**
  553. * fnic_set_eth_mode() - put fnic into ethernet mode.
  554. * @fnic: fnic device
  555. *
  556. * Called without fnic lock held.
  557. */
  558. static void fnic_set_eth_mode(struct fnic *fnic)
  559. {
  560. unsigned long flags;
  561. enum fnic_state old_state;
  562. int ret;
  563. spin_lock_irqsave(&fnic->fnic_lock, flags);
  564. again:
  565. old_state = fnic->state;
  566. switch (old_state) {
  567. case FNIC_IN_FC_MODE:
  568. case FNIC_IN_ETH_TRANS_FC_MODE:
  569. default:
  570. fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
  571. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  572. ret = fnic_fw_reset_handler(fnic);
  573. spin_lock_irqsave(&fnic->fnic_lock, flags);
  574. if (fnic->state != FNIC_IN_FC_TRANS_ETH_MODE)
  575. goto again;
  576. if (ret)
  577. fnic->state = old_state;
  578. break;
  579. case FNIC_IN_FC_TRANS_ETH_MODE:
  580. case FNIC_IN_ETH_MODE:
  581. break;
  582. }
  583. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  584. }
  585. static void fnic_wq_complete_frame_send(struct vnic_wq *wq,
  586. struct cq_desc *cq_desc,
  587. struct vnic_wq_buf *buf, void *opaque)
  588. {
  589. struct sk_buff *skb = buf->os_buf;
  590. struct fc_frame *fp = (struct fc_frame *)skb;
  591. struct fnic *fnic = vnic_dev_priv(wq->vdev);
  592. pci_unmap_single(fnic->pdev, buf->dma_addr,
  593. buf->len, PCI_DMA_TODEVICE);
  594. dev_kfree_skb_irq(fp_skb(fp));
  595. buf->os_buf = NULL;
  596. }
  597. static int fnic_wq_cmpl_handler_cont(struct vnic_dev *vdev,
  598. struct cq_desc *cq_desc, u8 type,
  599. u16 q_number, u16 completed_index,
  600. void *opaque)
  601. {
  602. struct fnic *fnic = vnic_dev_priv(vdev);
  603. unsigned long flags;
  604. spin_lock_irqsave(&fnic->wq_lock[q_number], flags);
  605. vnic_wq_service(&fnic->wq[q_number], cq_desc, completed_index,
  606. fnic_wq_complete_frame_send, NULL);
  607. spin_unlock_irqrestore(&fnic->wq_lock[q_number], flags);
  608. return 0;
  609. }
  610. int fnic_wq_cmpl_handler(struct fnic *fnic, int work_to_do)
  611. {
  612. unsigned int wq_work_done = 0;
  613. unsigned int i;
  614. for (i = 0; i < fnic->raw_wq_count; i++) {
  615. wq_work_done += vnic_cq_service(&fnic->cq[fnic->rq_count+i],
  616. work_to_do,
  617. fnic_wq_cmpl_handler_cont,
  618. NULL);
  619. }
  620. return wq_work_done;
  621. }
  622. void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
  623. {
  624. struct fc_frame *fp = buf->os_buf;
  625. struct fnic *fnic = vnic_dev_priv(wq->vdev);
  626. pci_unmap_single(fnic->pdev, buf->dma_addr,
  627. buf->len, PCI_DMA_TODEVICE);
  628. dev_kfree_skb(fp_skb(fp));
  629. buf->os_buf = NULL;
  630. }