fnic_fcs.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742
  1. /*
  2. * Copyright 2008 Cisco Systems, Inc. All rights reserved.
  3. * Copyright 2007 Nuova Systems, Inc. All rights reserved.
  4. *
  5. * This program is free software; you may redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; version 2 of the License.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  10. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  11. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  12. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  13. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  14. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  15. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  16. * SOFTWARE.
  17. */
  18. #include <linux/errno.h>
  19. #include <linux/pci.h>
  20. #include <linux/skbuff.h>
  21. #include <linux/interrupt.h>
  22. #include <linux/spinlock.h>
  23. #include <linux/if_ether.h>
  24. #include <linux/if_vlan.h>
  25. #include <linux/workqueue.h>
  26. #include <scsi/fc/fc_els.h>
  27. #include <scsi/fc/fc_fcoe.h>
  28. #include <scsi/fc_frame.h>
  29. #include <scsi/libfc.h>
  30. #include "fnic_io.h"
  31. #include "fnic.h"
  32. #include "cq_enet_desc.h"
  33. #include "cq_exch_desc.h"
  34. struct workqueue_struct *fnic_event_queue;
  35. void fnic_handle_link(struct work_struct *work)
  36. {
  37. struct fnic *fnic = container_of(work, struct fnic, link_work);
  38. unsigned long flags;
  39. int old_link_status;
  40. u32 old_link_down_cnt;
  41. spin_lock_irqsave(&fnic->fnic_lock, flags);
  42. if (fnic->stop_rx_link_events) {
  43. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  44. return;
  45. }
  46. old_link_down_cnt = fnic->link_down_cnt;
  47. old_link_status = fnic->link_status;
  48. fnic->link_status = vnic_dev_link_status(fnic->vdev);
  49. fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev);
  50. if (old_link_status == fnic->link_status) {
  51. if (!fnic->link_status)
  52. /* DOWN -> DOWN */
  53. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  54. else {
  55. if (old_link_down_cnt != fnic->link_down_cnt) {
  56. /* UP -> DOWN -> UP */
  57. fnic->lport->host_stats.link_failure_count++;
  58. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  59. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  60. "link down\n");
  61. fc_linkdown(fnic->lport);
  62. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  63. "link up\n");
  64. fc_linkup(fnic->lport);
  65. } else
  66. /* UP -> UP */
  67. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  68. }
  69. } else if (fnic->link_status) {
  70. /* DOWN -> UP */
  71. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  72. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n");
  73. fc_linkup(fnic->lport);
  74. } else {
  75. /* UP -> DOWN */
  76. fnic->lport->host_stats.link_failure_count++;
  77. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  78. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n");
  79. fc_linkdown(fnic->lport);
  80. }
  81. }
  82. /*
  83. * This function passes incoming fabric frames to libFC
  84. */
  85. void fnic_handle_frame(struct work_struct *work)
  86. {
  87. struct fnic *fnic = container_of(work, struct fnic, frame_work);
  88. struct fc_lport *lp = fnic->lport;
  89. unsigned long flags;
  90. struct sk_buff *skb;
  91. struct fc_frame *fp;
  92. while ((skb = skb_dequeue(&fnic->frame_queue))) {
  93. spin_lock_irqsave(&fnic->fnic_lock, flags);
  94. if (fnic->stop_rx_link_events) {
  95. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  96. dev_kfree_skb(skb);
  97. return;
  98. }
  99. fp = (struct fc_frame *)skb;
  100. /* if Flogi resp frame, register the address */
  101. if (fr_flags(fp)) {
  102. vnic_dev_add_addr(fnic->vdev,
  103. fnic->data_src_addr);
  104. fr_flags(fp) = 0;
  105. }
  106. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  107. fc_exch_recv(lp, fp);
  108. }
  109. }
  110. static inline void fnic_import_rq_fc_frame(struct sk_buff *skb,
  111. u32 len, u8 sof, u8 eof)
  112. {
  113. struct fc_frame *fp = (struct fc_frame *)skb;
  114. skb_trim(skb, len);
  115. fr_eof(fp) = eof;
  116. fr_sof(fp) = sof;
  117. }
  118. static inline int fnic_import_rq_eth_pkt(struct sk_buff *skb, u32 len)
  119. {
  120. struct fc_frame *fp;
  121. struct ethhdr *eh;
  122. struct vlan_ethhdr *vh;
  123. struct fcoe_hdr *fcoe_hdr;
  124. struct fcoe_crc_eof *ft;
  125. u32 transport_len = 0;
  126. eh = (struct ethhdr *)skb->data;
  127. vh = (struct vlan_ethhdr *)skb->data;
  128. if (vh->h_vlan_proto == htons(ETH_P_8021Q) &&
  129. vh->h_vlan_encapsulated_proto == htons(ETH_P_FCOE)) {
  130. skb_pull(skb, sizeof(struct vlan_ethhdr));
  131. transport_len += sizeof(struct vlan_ethhdr);
  132. } else if (eh->h_proto == htons(ETH_P_FCOE)) {
  133. transport_len += sizeof(struct ethhdr);
  134. skb_pull(skb, sizeof(struct ethhdr));
  135. } else
  136. return -1;
  137. fcoe_hdr = (struct fcoe_hdr *)skb->data;
  138. if (FC_FCOE_DECAPS_VER(fcoe_hdr) != FC_FCOE_VER)
  139. return -1;
  140. fp = (struct fc_frame *)skb;
  141. fc_frame_init(fp);
  142. fr_sof(fp) = fcoe_hdr->fcoe_sof;
  143. skb_pull(skb, sizeof(struct fcoe_hdr));
  144. transport_len += sizeof(struct fcoe_hdr);
  145. ft = (struct fcoe_crc_eof *)(skb->data + len -
  146. transport_len - sizeof(*ft));
  147. fr_eof(fp) = ft->fcoe_eof;
  148. skb_trim(skb, len - transport_len - sizeof(*ft));
  149. return 0;
  150. }
  151. static inline int fnic_handle_flogi_resp(struct fnic *fnic,
  152. struct fc_frame *fp)
  153. {
  154. u8 mac[ETH_ALEN] = FC_FCOE_FLOGI_MAC;
  155. struct ethhdr *eth_hdr;
  156. struct fc_frame_header *fh;
  157. int ret = 0;
  158. unsigned long flags;
  159. struct fc_frame *old_flogi_resp = NULL;
  160. fh = (struct fc_frame_header *)fr_hdr(fp);
  161. spin_lock_irqsave(&fnic->fnic_lock, flags);
  162. if (fnic->state == FNIC_IN_ETH_MODE) {
  163. /*
  164. * Check if oxid matches on taking the lock. A new Flogi
  165. * issued by libFC might have changed the fnic cached oxid
  166. */
  167. if (fnic->flogi_oxid != ntohs(fh->fh_ox_id)) {
  168. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  169. "Flogi response oxid not"
  170. " matching cached oxid, dropping frame"
  171. "\n");
  172. ret = -1;
  173. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  174. dev_kfree_skb_irq(fp_skb(fp));
  175. goto handle_flogi_resp_end;
  176. }
  177. /* Drop older cached flogi response frame, cache this frame */
  178. old_flogi_resp = fnic->flogi_resp;
  179. fnic->flogi_resp = fp;
  180. fnic->flogi_oxid = FC_XID_UNKNOWN;
  181. /*
  182. * this frame is part of flogi get the src mac addr from this
  183. * frame if the src mac is fcoui based then we mark the
  184. * address mode flag to use fcoui base for dst mac addr
  185. * otherwise we have to store the fcoe gateway addr
  186. */
  187. eth_hdr = (struct ethhdr *)skb_mac_header(fp_skb(fp));
  188. memcpy(mac, eth_hdr->h_source, ETH_ALEN);
  189. if (ntoh24(mac) == FC_FCOE_OUI)
  190. fnic->fcoui_mode = 1;
  191. else {
  192. fnic->fcoui_mode = 0;
  193. memcpy(fnic->dest_addr, mac, ETH_ALEN);
  194. }
  195. /*
  196. * Except for Flogi frame, all outbound frames from us have the
  197. * Eth Src address as FC_FCOE_OUI"our_sid". Flogi frame uses
  198. * the vnic MAC address as the Eth Src address
  199. */
  200. fc_fcoe_set_mac(fnic->data_src_addr, fh->fh_d_id);
  201. /* We get our s_id from the d_id of the flogi resp frame */
  202. fnic->s_id = ntoh24(fh->fh_d_id);
  203. /* Change state to reflect transition from Eth to FC mode */
  204. fnic->state = FNIC_IN_ETH_TRANS_FC_MODE;
  205. } else {
  206. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  207. "Unexpected fnic state %s while"
  208. " processing flogi resp\n",
  209. fnic_state_to_str(fnic->state));
  210. ret = -1;
  211. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  212. dev_kfree_skb_irq(fp_skb(fp));
  213. goto handle_flogi_resp_end;
  214. }
  215. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  216. /* Drop older cached frame */
  217. if (old_flogi_resp)
  218. dev_kfree_skb_irq(fp_skb(old_flogi_resp));
  219. /*
  220. * send flogi reg request to firmware, this will put the fnic in
  221. * in FC mode
  222. */
  223. ret = fnic_flogi_reg_handler(fnic);
  224. if (ret < 0) {
  225. int free_fp = 1;
  226. spin_lock_irqsave(&fnic->fnic_lock, flags);
  227. /*
  228. * free the frame is some other thread is not
  229. * pointing to it
  230. */
  231. if (fnic->flogi_resp != fp)
  232. free_fp = 0;
  233. else
  234. fnic->flogi_resp = NULL;
  235. if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE)
  236. fnic->state = FNIC_IN_ETH_MODE;
  237. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  238. if (free_fp)
  239. dev_kfree_skb_irq(fp_skb(fp));
  240. }
  241. handle_flogi_resp_end:
  242. return ret;
  243. }
  244. /* Returns 1 for a response that matches cached flogi oxid */
  245. static inline int is_matching_flogi_resp_frame(struct fnic *fnic,
  246. struct fc_frame *fp)
  247. {
  248. struct fc_frame_header *fh;
  249. int ret = 0;
  250. u32 f_ctl;
  251. fh = fc_frame_header_get(fp);
  252. f_ctl = ntoh24(fh->fh_f_ctl);
  253. if (fnic->flogi_oxid == ntohs(fh->fh_ox_id) &&
  254. fh->fh_r_ctl == FC_RCTL_ELS_REP &&
  255. (f_ctl & (FC_FC_EX_CTX | FC_FC_SEQ_CTX)) == FC_FC_EX_CTX &&
  256. fh->fh_type == FC_TYPE_ELS)
  257. ret = 1;
  258. return ret;
  259. }
  260. static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
  261. *cq_desc, struct vnic_rq_buf *buf,
  262. int skipped __attribute__((unused)),
  263. void *opaque)
  264. {
  265. struct fnic *fnic = vnic_dev_priv(rq->vdev);
  266. struct sk_buff *skb;
  267. struct fc_frame *fp;
  268. unsigned int eth_hdrs_stripped;
  269. u8 type, color, eop, sop, ingress_port, vlan_stripped;
  270. u8 fcoe = 0, fcoe_sof, fcoe_eof;
  271. u8 fcoe_fc_crc_ok = 1, fcoe_enc_error = 0;
  272. u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
  273. u8 ipv6, ipv4, ipv4_fragment, rss_type, csum_not_calc;
  274. u8 fcs_ok = 1, packet_error = 0;
  275. u16 q_number, completed_index, bytes_written = 0, vlan, checksum;
  276. u32 rss_hash;
  277. u16 exchange_id, tmpl;
  278. u8 sof = 0;
  279. u8 eof = 0;
  280. u32 fcp_bytes_written = 0;
  281. unsigned long flags;
  282. pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len,
  283. PCI_DMA_FROMDEVICE);
  284. skb = buf->os_buf;
  285. buf->os_buf = NULL;
  286. cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index);
  287. if (type == CQ_DESC_TYPE_RQ_FCP) {
  288. cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc *)cq_desc,
  289. &type, &color, &q_number, &completed_index,
  290. &eop, &sop, &fcoe_fc_crc_ok, &exchange_id,
  291. &tmpl, &fcp_bytes_written, &sof, &eof,
  292. &ingress_port, &packet_error,
  293. &fcoe_enc_error, &fcs_ok, &vlan_stripped,
  294. &vlan);
  295. eth_hdrs_stripped = 1;
  296. } else if (type == CQ_DESC_TYPE_RQ_ENET) {
  297. cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
  298. &type, &color, &q_number, &completed_index,
  299. &ingress_port, &fcoe, &eop, &sop,
  300. &rss_type, &csum_not_calc, &rss_hash,
  301. &bytes_written, &packet_error,
  302. &vlan_stripped, &vlan, &checksum,
  303. &fcoe_sof, &fcoe_fc_crc_ok,
  304. &fcoe_enc_error, &fcoe_eof,
  305. &tcp_udp_csum_ok, &udp, &tcp,
  306. &ipv4_csum_ok, &ipv6, &ipv4,
  307. &ipv4_fragment, &fcs_ok);
  308. eth_hdrs_stripped = 0;
  309. } else {
  310. /* wrong CQ type*/
  311. shost_printk(KERN_ERR, fnic->lport->host,
  312. "fnic rq_cmpl wrong cq type x%x\n", type);
  313. goto drop;
  314. }
  315. if (!fcs_ok || packet_error || !fcoe_fc_crc_ok || fcoe_enc_error) {
  316. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  317. "fnic rq_cmpl fcoe x%x fcsok x%x"
  318. " pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err"
  319. " x%x\n",
  320. fcoe, fcs_ok, packet_error,
  321. fcoe_fc_crc_ok, fcoe_enc_error);
  322. goto drop;
  323. }
  324. if (eth_hdrs_stripped)
  325. fnic_import_rq_fc_frame(skb, fcp_bytes_written, sof, eof);
  326. else if (fnic_import_rq_eth_pkt(skb, bytes_written))
  327. goto drop;
  328. fp = (struct fc_frame *)skb;
  329. /*
  330. * If frame is an ELS response that matches the cached FLOGI OX_ID,
  331. * and is accept, issue flogi_reg_request copy wq request to firmware
  332. * to register the S_ID and determine whether FC_OUI mode or GW mode.
  333. */
  334. if (is_matching_flogi_resp_frame(fnic, fp)) {
  335. if (!eth_hdrs_stripped) {
  336. if (fc_frame_payload_op(fp) == ELS_LS_ACC) {
  337. fnic_handle_flogi_resp(fnic, fp);
  338. return;
  339. }
  340. /*
  341. * Recd. Flogi reject. No point registering
  342. * with fw, but forward to libFC
  343. */
  344. goto forward;
  345. }
  346. goto drop;
  347. }
  348. if (!eth_hdrs_stripped)
  349. goto drop;
  350. forward:
  351. spin_lock_irqsave(&fnic->fnic_lock, flags);
  352. if (fnic->stop_rx_link_events) {
  353. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  354. goto drop;
  355. }
  356. /* Use fr_flags to indicate whether succ. flogi resp or not */
  357. fr_flags(fp) = 0;
  358. fr_dev(fp) = fnic->lport;
  359. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  360. skb_queue_tail(&fnic->frame_queue, skb);
  361. queue_work(fnic_event_queue, &fnic->frame_work);
  362. return;
  363. drop:
  364. dev_kfree_skb_irq(skb);
  365. }
  366. static int fnic_rq_cmpl_handler_cont(struct vnic_dev *vdev,
  367. struct cq_desc *cq_desc, u8 type,
  368. u16 q_number, u16 completed_index,
  369. void *opaque)
  370. {
  371. struct fnic *fnic = vnic_dev_priv(vdev);
  372. vnic_rq_service(&fnic->rq[q_number], cq_desc, completed_index,
  373. VNIC_RQ_RETURN_DESC, fnic_rq_cmpl_frame_recv,
  374. NULL);
  375. return 0;
  376. }
  377. int fnic_rq_cmpl_handler(struct fnic *fnic, int rq_work_to_do)
  378. {
  379. unsigned int tot_rq_work_done = 0, cur_work_done;
  380. unsigned int i;
  381. int err;
  382. for (i = 0; i < fnic->rq_count; i++) {
  383. cur_work_done = vnic_cq_service(&fnic->cq[i], rq_work_to_do,
  384. fnic_rq_cmpl_handler_cont,
  385. NULL);
  386. if (cur_work_done) {
  387. err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
  388. if (err)
  389. shost_printk(KERN_ERR, fnic->lport->host,
  390. "fnic_alloc_rq_frame cant alloc"
  391. " frame\n");
  392. }
  393. tot_rq_work_done += cur_work_done;
  394. }
  395. return tot_rq_work_done;
  396. }
  397. /*
  398. * This function is called once at init time to allocate and fill RQ
  399. * buffers. Subsequently, it is called in the interrupt context after RQ
  400. * buffer processing to replenish the buffers in the RQ
  401. */
  402. int fnic_alloc_rq_frame(struct vnic_rq *rq)
  403. {
  404. struct fnic *fnic = vnic_dev_priv(rq->vdev);
  405. struct sk_buff *skb;
  406. u16 len;
  407. dma_addr_t pa;
  408. len = FC_FRAME_HEADROOM + FC_MAX_FRAME + FC_FRAME_TAILROOM;
  409. skb = dev_alloc_skb(len);
  410. if (!skb) {
  411. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  412. "Unable to allocate RQ sk_buff\n");
  413. return -ENOMEM;
  414. }
  415. skb_reset_mac_header(skb);
  416. skb_reset_transport_header(skb);
  417. skb_reset_network_header(skb);
  418. skb_put(skb, len);
  419. pa = pci_map_single(fnic->pdev, skb->data, len, PCI_DMA_FROMDEVICE);
  420. fnic_queue_rq_desc(rq, skb, pa, len);
  421. return 0;
  422. }
  423. void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
  424. {
  425. struct fc_frame *fp = buf->os_buf;
  426. struct fnic *fnic = vnic_dev_priv(rq->vdev);
  427. pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len,
  428. PCI_DMA_FROMDEVICE);
  429. dev_kfree_skb(fp_skb(fp));
  430. buf->os_buf = NULL;
  431. }
  432. static inline int is_flogi_frame(struct fc_frame_header *fh)
  433. {
  434. return fh->fh_r_ctl == FC_RCTL_ELS_REQ && *(u8 *)(fh + 1) == ELS_FLOGI;
  435. }
  436. int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
  437. {
  438. struct vnic_wq *wq = &fnic->wq[0];
  439. struct sk_buff *skb;
  440. dma_addr_t pa;
  441. struct ethhdr *eth_hdr;
  442. struct vlan_ethhdr *vlan_hdr;
  443. struct fcoe_hdr *fcoe_hdr;
  444. struct fc_frame_header *fh;
  445. u32 tot_len, eth_hdr_len;
  446. int ret = 0;
  447. unsigned long flags;
  448. fh = fc_frame_header_get(fp);
  449. skb = fp_skb(fp);
  450. if (!fnic->vlan_hw_insert) {
  451. eth_hdr_len = sizeof(*vlan_hdr) + sizeof(*fcoe_hdr);
  452. vlan_hdr = (struct vlan_ethhdr *)skb_push(skb, eth_hdr_len);
  453. eth_hdr = (struct ethhdr *)vlan_hdr;
  454. vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
  455. vlan_hdr->h_vlan_encapsulated_proto = htons(ETH_P_FCOE);
  456. vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
  457. fcoe_hdr = (struct fcoe_hdr *)(vlan_hdr + 1);
  458. } else {
  459. eth_hdr_len = sizeof(*eth_hdr) + sizeof(*fcoe_hdr);
  460. eth_hdr = (struct ethhdr *)skb_push(skb, eth_hdr_len);
  461. eth_hdr->h_proto = htons(ETH_P_FCOE);
  462. fcoe_hdr = (struct fcoe_hdr *)(eth_hdr + 1);
  463. }
  464. if (is_flogi_frame(fh)) {
  465. fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id);
  466. memcpy(eth_hdr->h_source, fnic->mac_addr, ETH_ALEN);
  467. } else {
  468. if (fnic->fcoui_mode)
  469. fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id);
  470. else
  471. memcpy(eth_hdr->h_dest, fnic->dest_addr, ETH_ALEN);
  472. memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN);
  473. }
  474. tot_len = skb->len;
  475. BUG_ON(tot_len % 4);
  476. memset(fcoe_hdr, 0, sizeof(*fcoe_hdr));
  477. fcoe_hdr->fcoe_sof = fr_sof(fp);
  478. if (FC_FCOE_VER)
  479. FC_FCOE_ENCAPS_VER(fcoe_hdr, FC_FCOE_VER);
  480. pa = pci_map_single(fnic->pdev, eth_hdr, tot_len, PCI_DMA_TODEVICE);
  481. spin_lock_irqsave(&fnic->wq_lock[0], flags);
  482. if (!vnic_wq_desc_avail(wq)) {
  483. pci_unmap_single(fnic->pdev, pa,
  484. tot_len, PCI_DMA_TODEVICE);
  485. ret = -1;
  486. goto fnic_send_frame_end;
  487. }
  488. fnic_queue_wq_desc(wq, skb, pa, tot_len, fr_eof(fp),
  489. fnic->vlan_hw_insert, fnic->vlan_id, 1, 1, 1);
  490. fnic_send_frame_end:
  491. spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
  492. if (ret)
  493. dev_kfree_skb_any(fp_skb(fp));
  494. return ret;
  495. }
  496. /*
  497. * fnic_send
  498. * Routine to send a raw frame
  499. */
  500. int fnic_send(struct fc_lport *lp, struct fc_frame *fp)
  501. {
  502. struct fnic *fnic = lport_priv(lp);
  503. struct fc_frame_header *fh;
  504. int ret = 0;
  505. enum fnic_state old_state;
  506. unsigned long flags;
  507. struct fc_frame *old_flogi = NULL;
  508. struct fc_frame *old_flogi_resp = NULL;
  509. if (fnic->in_remove) {
  510. dev_kfree_skb(fp_skb(fp));
  511. ret = -1;
  512. goto fnic_send_end;
  513. }
  514. fh = fc_frame_header_get(fp);
  515. /* if not an Flogi frame, send it out, this is the common case */
  516. if (!is_flogi_frame(fh))
  517. return fnic_send_frame(fnic, fp);
  518. /* Flogi frame, now enter the state machine */
  519. spin_lock_irqsave(&fnic->fnic_lock, flags);
  520. again:
  521. /* Get any old cached frames, free them after dropping lock */
  522. old_flogi = fnic->flogi;
  523. fnic->flogi = NULL;
  524. old_flogi_resp = fnic->flogi_resp;
  525. fnic->flogi_resp = NULL;
  526. fnic->flogi_oxid = FC_XID_UNKNOWN;
  527. old_state = fnic->state;
  528. switch (old_state) {
  529. case FNIC_IN_FC_MODE:
  530. case FNIC_IN_ETH_TRANS_FC_MODE:
  531. default:
  532. fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
  533. vnic_dev_del_addr(fnic->vdev, fnic->data_src_addr);
  534. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  535. if (old_flogi) {
  536. dev_kfree_skb(fp_skb(old_flogi));
  537. old_flogi = NULL;
  538. }
  539. if (old_flogi_resp) {
  540. dev_kfree_skb(fp_skb(old_flogi_resp));
  541. old_flogi_resp = NULL;
  542. }
  543. ret = fnic_fw_reset_handler(fnic);
  544. spin_lock_irqsave(&fnic->fnic_lock, flags);
  545. if (fnic->state != FNIC_IN_FC_TRANS_ETH_MODE)
  546. goto again;
  547. if (ret) {
  548. fnic->state = old_state;
  549. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  550. dev_kfree_skb(fp_skb(fp));
  551. goto fnic_send_end;
  552. }
  553. old_flogi = fnic->flogi;
  554. fnic->flogi = fp;
  555. fnic->flogi_oxid = ntohs(fh->fh_ox_id);
  556. old_flogi_resp = fnic->flogi_resp;
  557. fnic->flogi_resp = NULL;
  558. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  559. break;
  560. case FNIC_IN_FC_TRANS_ETH_MODE:
  561. /*
  562. * A reset is pending with the firmware. Store the flogi
  563. * and its oxid. The transition out of this state happens
  564. * only when Firmware completes the reset, either with
  565. * success or failed. If success, transition to
  566. * FNIC_IN_ETH_MODE, if fail, then transition to
  567. * FNIC_IN_FC_MODE
  568. */
  569. fnic->flogi = fp;
  570. fnic->flogi_oxid = ntohs(fh->fh_ox_id);
  571. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  572. break;
  573. case FNIC_IN_ETH_MODE:
  574. /*
  575. * The fw/hw is already in eth mode. Store the oxid,
  576. * and send the flogi frame out. The transition out of this
  577. * state happens only we receive flogi response from the
  578. * network, and the oxid matches the cached oxid when the
  579. * flogi frame was sent out. If they match, then we issue
  580. * a flogi_reg request and transition to state
  581. * FNIC_IN_ETH_TRANS_FC_MODE
  582. */
  583. fnic->flogi_oxid = ntohs(fh->fh_ox_id);
  584. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  585. ret = fnic_send_frame(fnic, fp);
  586. break;
  587. }
  588. fnic_send_end:
  589. if (old_flogi)
  590. dev_kfree_skb(fp_skb(old_flogi));
  591. if (old_flogi_resp)
  592. dev_kfree_skb(fp_skb(old_flogi_resp));
  593. return ret;
  594. }
  595. static void fnic_wq_complete_frame_send(struct vnic_wq *wq,
  596. struct cq_desc *cq_desc,
  597. struct vnic_wq_buf *buf, void *opaque)
  598. {
  599. struct sk_buff *skb = buf->os_buf;
  600. struct fc_frame *fp = (struct fc_frame *)skb;
  601. struct fnic *fnic = vnic_dev_priv(wq->vdev);
  602. pci_unmap_single(fnic->pdev, buf->dma_addr,
  603. buf->len, PCI_DMA_TODEVICE);
  604. dev_kfree_skb_irq(fp_skb(fp));
  605. buf->os_buf = NULL;
  606. }
  607. static int fnic_wq_cmpl_handler_cont(struct vnic_dev *vdev,
  608. struct cq_desc *cq_desc, u8 type,
  609. u16 q_number, u16 completed_index,
  610. void *opaque)
  611. {
  612. struct fnic *fnic = vnic_dev_priv(vdev);
  613. unsigned long flags;
  614. spin_lock_irqsave(&fnic->wq_lock[q_number], flags);
  615. vnic_wq_service(&fnic->wq[q_number], cq_desc, completed_index,
  616. fnic_wq_complete_frame_send, NULL);
  617. spin_unlock_irqrestore(&fnic->wq_lock[q_number], flags);
  618. return 0;
  619. }
  620. int fnic_wq_cmpl_handler(struct fnic *fnic, int work_to_do)
  621. {
  622. unsigned int wq_work_done = 0;
  623. unsigned int i;
  624. for (i = 0; i < fnic->raw_wq_count; i++) {
  625. wq_work_done += vnic_cq_service(&fnic->cq[fnic->rq_count+i],
  626. work_to_do,
  627. fnic_wq_cmpl_handler_cont,
  628. NULL);
  629. }
  630. return wq_work_done;
  631. }
  632. void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
  633. {
  634. struct fc_frame *fp = buf->os_buf;
  635. struct fnic *fnic = vnic_dev_priv(wq->vdev);
  636. pci_unmap_single(fnic->pdev, buf->dma_addr,
  637. buf->len, PCI_DMA_TODEVICE);
  638. dev_kfree_skb(fp_skb(fp));
  639. buf->os_buf = NULL;
  640. }