fnic_fcs.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294
  1. /*
  2. * Copyright 2008 Cisco Systems, Inc. All rights reserved.
  3. * Copyright 2007 Nuova Systems, Inc. All rights reserved.
  4. *
  5. * This program is free software; you may redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; version 2 of the License.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  10. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  11. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  12. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  13. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  14. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  15. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  16. * SOFTWARE.
  17. */
  18. #include <linux/errno.h>
  19. #include <linux/pci.h>
  20. #include <linux/slab.h>
  21. #include <linux/skbuff.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/spinlock.h>
  24. #include <linux/if_ether.h>
  25. #include <linux/if_vlan.h>
  26. #include <linux/workqueue.h>
  27. #include <scsi/fc/fc_fip.h>
  28. #include <scsi/fc/fc_els.h>
  29. #include <scsi/fc/fc_fcoe.h>
  30. #include <scsi/fc_frame.h>
  31. #include <scsi/libfc.h>
  32. #include "fnic_io.h"
  33. #include "fnic.h"
  34. #include "fnic_fip.h"
  35. #include "cq_enet_desc.h"
  36. #include "cq_exch_desc.h"
  37. static u8 fcoe_all_fcfs[ETH_ALEN];
  38. struct workqueue_struct *fnic_fip_queue;
  39. struct workqueue_struct *fnic_event_queue;
  40. static void fnic_set_eth_mode(struct fnic *);
  41. static void fnic_fcoe_send_vlan_req(struct fnic *fnic);
  42. static void fnic_fcoe_start_fcf_disc(struct fnic *fnic);
  43. static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *);
  44. static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag);
  45. static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb);
  46. void fnic_handle_link(struct work_struct *work)
  47. {
  48. struct fnic *fnic = container_of(work, struct fnic, link_work);
  49. unsigned long flags;
  50. int old_link_status;
  51. u32 old_link_down_cnt;
  52. spin_lock_irqsave(&fnic->fnic_lock, flags);
  53. if (fnic->stop_rx_link_events) {
  54. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  55. return;
  56. }
  57. old_link_down_cnt = fnic->link_down_cnt;
  58. old_link_status = fnic->link_status;
  59. fnic->link_status = vnic_dev_link_status(fnic->vdev);
  60. fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev);
  61. if (old_link_status == fnic->link_status) {
  62. if (!fnic->link_status)
  63. /* DOWN -> DOWN */
  64. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  65. else {
  66. if (old_link_down_cnt != fnic->link_down_cnt) {
  67. /* UP -> DOWN -> UP */
  68. fnic->lport->host_stats.link_failure_count++;
  69. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  70. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  71. "link down\n");
  72. fcoe_ctlr_link_down(&fnic->ctlr);
  73. if (fnic->config.flags & VFCF_FIP_CAPABLE) {
  74. /* start FCoE VLAN discovery */
  75. fnic_fcoe_send_vlan_req(fnic);
  76. return;
  77. }
  78. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  79. "link up\n");
  80. fcoe_ctlr_link_up(&fnic->ctlr);
  81. } else
  82. /* UP -> UP */
  83. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  84. }
  85. } else if (fnic->link_status) {
  86. /* DOWN -> UP */
  87. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  88. if (fnic->config.flags & VFCF_FIP_CAPABLE) {
  89. /* start FCoE VLAN discovery */
  90. fnic_fcoe_send_vlan_req(fnic);
  91. return;
  92. }
  93. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n");
  94. fcoe_ctlr_link_up(&fnic->ctlr);
  95. } else {
  96. /* UP -> DOWN */
  97. fnic->lport->host_stats.link_failure_count++;
  98. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  99. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n");
  100. fcoe_ctlr_link_down(&fnic->ctlr);
  101. }
  102. }
  103. /*
  104. * This function passes incoming fabric frames to libFC
  105. */
  106. void fnic_handle_frame(struct work_struct *work)
  107. {
  108. struct fnic *fnic = container_of(work, struct fnic, frame_work);
  109. struct fc_lport *lp = fnic->lport;
  110. unsigned long flags;
  111. struct sk_buff *skb;
  112. struct fc_frame *fp;
  113. while ((skb = skb_dequeue(&fnic->frame_queue))) {
  114. spin_lock_irqsave(&fnic->fnic_lock, flags);
  115. if (fnic->stop_rx_link_events) {
  116. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  117. dev_kfree_skb(skb);
  118. return;
  119. }
  120. fp = (struct fc_frame *)skb;
  121. /*
  122. * If we're in a transitional state, just re-queue and return.
  123. * The queue will be serviced when we get to a stable state.
  124. */
  125. if (fnic->state != FNIC_IN_FC_MODE &&
  126. fnic->state != FNIC_IN_ETH_MODE) {
  127. skb_queue_head(&fnic->frame_queue, skb);
  128. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  129. return;
  130. }
  131. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  132. fc_exch_recv(lp, fp);
  133. }
  134. }
  135. void fnic_fcoe_evlist_free(struct fnic *fnic)
  136. {
  137. struct fnic_event *fevt = NULL;
  138. struct fnic_event *next = NULL;
  139. unsigned long flags;
  140. spin_lock_irqsave(&fnic->fnic_lock, flags);
  141. if (list_empty(&fnic->evlist)) {
  142. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  143. return;
  144. }
  145. list_for_each_entry_safe(fevt, next, &fnic->evlist, list) {
  146. list_del(&fevt->list);
  147. kfree(fevt);
  148. }
  149. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  150. }
  151. void fnic_handle_event(struct work_struct *work)
  152. {
  153. struct fnic *fnic = container_of(work, struct fnic, event_work);
  154. struct fnic_event *fevt = NULL;
  155. struct fnic_event *next = NULL;
  156. unsigned long flags;
  157. spin_lock_irqsave(&fnic->fnic_lock, flags);
  158. if (list_empty(&fnic->evlist)) {
  159. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  160. return;
  161. }
  162. list_for_each_entry_safe(fevt, next, &fnic->evlist, list) {
  163. if (fnic->stop_rx_link_events) {
  164. list_del(&fevt->list);
  165. kfree(fevt);
  166. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  167. return;
  168. }
  169. /*
  170. * If we're in a transitional state, just re-queue and return.
  171. * The queue will be serviced when we get to a stable state.
  172. */
  173. if (fnic->state != FNIC_IN_FC_MODE &&
  174. fnic->state != FNIC_IN_ETH_MODE) {
  175. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  176. return;
  177. }
  178. list_del(&fevt->list);
  179. switch (fevt->event) {
  180. case FNIC_EVT_START_VLAN_DISC:
  181. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  182. fnic_fcoe_send_vlan_req(fnic);
  183. spin_lock_irqsave(&fnic->fnic_lock, flags);
  184. break;
  185. case FNIC_EVT_START_FCF_DISC:
  186. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  187. "Start FCF Discovery\n");
  188. fnic_fcoe_start_fcf_disc(fnic);
  189. break;
  190. default:
  191. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  192. "Unknown event 0x%x\n", fevt->event);
  193. break;
  194. }
  195. kfree(fevt);
  196. }
  197. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  198. }
  199. /**
  200. * Check if the Received FIP FLOGI frame is rejected
  201. * @fip: The FCoE controller that received the frame
  202. * @skb: The received FIP frame
  203. *
  204. * Returns non-zero if the frame is rejected with unsupported cmd with
  205. * insufficient resource els explanation.
  206. */
  207. static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr *fip,
  208. struct sk_buff *skb)
  209. {
  210. struct fc_lport *lport = fip->lp;
  211. struct fip_header *fiph;
  212. struct fc_frame_header *fh = NULL;
  213. struct fip_desc *desc;
  214. struct fip_encaps *els;
  215. enum fip_desc_type els_dtype = 0;
  216. u16 op;
  217. u8 els_op;
  218. u8 sub;
  219. size_t els_len = 0;
  220. size_t rlen;
  221. size_t dlen = 0;
  222. if (skb_linearize(skb))
  223. return 0;
  224. if (skb->len < sizeof(*fiph))
  225. return 0;
  226. fiph = (struct fip_header *)skb->data;
  227. op = ntohs(fiph->fip_op);
  228. sub = fiph->fip_subcode;
  229. if (op != FIP_OP_LS)
  230. return 0;
  231. if (sub != FIP_SC_REP)
  232. return 0;
  233. rlen = ntohs(fiph->fip_dl_len) * 4;
  234. if (rlen + sizeof(*fiph) > skb->len)
  235. return 0;
  236. desc = (struct fip_desc *)(fiph + 1);
  237. dlen = desc->fip_dlen * FIP_BPW;
  238. if (desc->fip_dtype == FIP_DT_FLOGI) {
  239. shost_printk(KERN_DEBUG, lport->host,
  240. " FIP TYPE FLOGI: fab name:%llx "
  241. "vfid:%d map:%x\n",
  242. fip->sel_fcf->fabric_name, fip->sel_fcf->vfid,
  243. fip->sel_fcf->fc_map);
  244. if (dlen < sizeof(*els) + sizeof(*fh) + 1)
  245. return 0;
  246. els_len = dlen - sizeof(*els);
  247. els = (struct fip_encaps *)desc;
  248. fh = (struct fc_frame_header *)(els + 1);
  249. els_dtype = desc->fip_dtype;
  250. if (!fh)
  251. return 0;
  252. /*
  253. * ELS command code, reason and explanation should be = Reject,
  254. * unsupported command and insufficient resource
  255. */
  256. els_op = *(u8 *)(fh + 1);
  257. if (els_op == ELS_LS_RJT) {
  258. shost_printk(KERN_INFO, lport->host,
  259. "Flogi Request Rejected by Switch\n");
  260. return 1;
  261. }
  262. shost_printk(KERN_INFO, lport->host,
  263. "Flogi Request Accepted by Switch\n");
  264. }
  265. return 0;
  266. }
  267. static void fnic_fcoe_send_vlan_req(struct fnic *fnic)
  268. {
  269. struct fcoe_ctlr *fip = &fnic->ctlr;
  270. struct fnic_stats *fnic_stats = &fnic->fnic_stats;
  271. struct sk_buff *skb;
  272. char *eth_fr;
  273. int fr_len;
  274. struct fip_vlan *vlan;
  275. u64 vlan_tov;
  276. fnic_fcoe_reset_vlans(fnic);
  277. fnic->set_vlan(fnic, 0);
  278. FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
  279. "Sending VLAN request...\n");
  280. skb = dev_alloc_skb(sizeof(struct fip_vlan));
  281. if (!skb)
  282. return;
  283. fr_len = sizeof(*vlan);
  284. eth_fr = (char *)skb->data;
  285. vlan = (struct fip_vlan *)eth_fr;
  286. memset(vlan, 0, sizeof(*vlan));
  287. memcpy(vlan->eth.h_source, fip->ctl_src_addr, ETH_ALEN);
  288. memcpy(vlan->eth.h_dest, fcoe_all_fcfs, ETH_ALEN);
  289. vlan->eth.h_proto = htons(ETH_P_FIP);
  290. vlan->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER);
  291. vlan->fip.fip_op = htons(FIP_OP_VLAN);
  292. vlan->fip.fip_subcode = FIP_SC_VL_REQ;
  293. vlan->fip.fip_dl_len = htons(sizeof(vlan->desc) / FIP_BPW);
  294. vlan->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC;
  295. vlan->desc.mac.fd_desc.fip_dlen = sizeof(vlan->desc.mac) / FIP_BPW;
  296. memcpy(&vlan->desc.mac.fd_mac, fip->ctl_src_addr, ETH_ALEN);
  297. vlan->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME;
  298. vlan->desc.wwnn.fd_desc.fip_dlen = sizeof(vlan->desc.wwnn) / FIP_BPW;
  299. put_unaligned_be64(fip->lp->wwnn, &vlan->desc.wwnn.fd_wwn);
  300. atomic64_inc(&fnic_stats->vlan_stats.vlan_disc_reqs);
  301. skb_put(skb, sizeof(*vlan));
  302. skb->protocol = htons(ETH_P_FIP);
  303. skb_reset_mac_header(skb);
  304. skb_reset_network_header(skb);
  305. fip->send(fip, skb);
  306. /* set a timer so that we can retry if there no response */
  307. vlan_tov = jiffies + msecs_to_jiffies(FCOE_CTLR_FIPVLAN_TOV);
  308. mod_timer(&fnic->fip_timer, round_jiffies(vlan_tov));
  309. }
  310. static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb)
  311. {
  312. struct fcoe_ctlr *fip = &fnic->ctlr;
  313. struct fip_header *fiph;
  314. struct fip_desc *desc;
  315. struct fnic_stats *fnic_stats = &fnic->fnic_stats;
  316. u16 vid;
  317. size_t rlen;
  318. size_t dlen;
  319. struct fcoe_vlan *vlan;
  320. u64 sol_time;
  321. unsigned long flags;
  322. FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
  323. "Received VLAN response...\n");
  324. fiph = (struct fip_header *) skb->data;
  325. FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
  326. "Received VLAN response... OP 0x%x SUB_OP 0x%x\n",
  327. ntohs(fiph->fip_op), fiph->fip_subcode);
  328. rlen = ntohs(fiph->fip_dl_len) * 4;
  329. fnic_fcoe_reset_vlans(fnic);
  330. spin_lock_irqsave(&fnic->vlans_lock, flags);
  331. desc = (struct fip_desc *)(fiph + 1);
  332. while (rlen > 0) {
  333. dlen = desc->fip_dlen * FIP_BPW;
  334. switch (desc->fip_dtype) {
  335. case FIP_DT_VLAN:
  336. vid = ntohs(((struct fip_vlan_desc *)desc)->fd_vlan);
  337. shost_printk(KERN_INFO, fnic->lport->host,
  338. "process_vlan_resp: FIP VLAN %d\n", vid);
  339. vlan = kmalloc(sizeof(*vlan),
  340. GFP_ATOMIC);
  341. if (!vlan) {
  342. /* retry from timer */
  343. spin_unlock_irqrestore(&fnic->vlans_lock,
  344. flags);
  345. goto out;
  346. }
  347. memset(vlan, 0, sizeof(struct fcoe_vlan));
  348. vlan->vid = vid & 0x0fff;
  349. vlan->state = FIP_VLAN_AVAIL;
  350. list_add_tail(&vlan->list, &fnic->vlans);
  351. break;
  352. }
  353. desc = (struct fip_desc *)((char *)desc + dlen);
  354. rlen -= dlen;
  355. }
  356. /* any VLAN descriptors present ? */
  357. if (list_empty(&fnic->vlans)) {
  358. /* retry from timer */
  359. atomic64_inc(&fnic_stats->vlan_stats.resp_withno_vlanID);
  360. FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
  361. "No VLAN descriptors in FIP VLAN response\n");
  362. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  363. goto out;
  364. }
  365. vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
  366. fnic->set_vlan(fnic, vlan->vid);
  367. vlan->state = FIP_VLAN_SENT; /* sent now */
  368. vlan->sol_count++;
  369. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  370. /* start the solicitation */
  371. fcoe_ctlr_link_up(fip);
  372. sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY);
  373. mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
  374. out:
  375. return;
  376. }
  377. static void fnic_fcoe_start_fcf_disc(struct fnic *fnic)
  378. {
  379. unsigned long flags;
  380. struct fcoe_vlan *vlan;
  381. u64 sol_time;
  382. spin_lock_irqsave(&fnic->vlans_lock, flags);
  383. vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
  384. fnic->set_vlan(fnic, vlan->vid);
  385. vlan->state = FIP_VLAN_SENT; /* sent now */
  386. vlan->sol_count = 1;
  387. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  388. /* start the solicitation */
  389. fcoe_ctlr_link_up(&fnic->ctlr);
  390. sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY);
  391. mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
  392. }
  393. static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag)
  394. {
  395. unsigned long flags;
  396. struct fcoe_vlan *fvlan;
  397. spin_lock_irqsave(&fnic->vlans_lock, flags);
  398. if (list_empty(&fnic->vlans)) {
  399. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  400. return -EINVAL;
  401. }
  402. fvlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
  403. if (fvlan->state == FIP_VLAN_USED) {
  404. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  405. return 0;
  406. }
  407. if (fvlan->state == FIP_VLAN_SENT) {
  408. fvlan->state = FIP_VLAN_USED;
  409. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  410. return 0;
  411. }
  412. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  413. return -EINVAL;
  414. }
  415. static void fnic_event_enq(struct fnic *fnic, enum fnic_evt ev)
  416. {
  417. struct fnic_event *fevt;
  418. unsigned long flags;
  419. fevt = kmalloc(sizeof(*fevt), GFP_ATOMIC);
  420. if (!fevt)
  421. return;
  422. fevt->fnic = fnic;
  423. fevt->event = ev;
  424. spin_lock_irqsave(&fnic->fnic_lock, flags);
  425. list_add_tail(&fevt->list, &fnic->evlist);
  426. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  427. schedule_work(&fnic->event_work);
  428. }
  429. static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb)
  430. {
  431. struct fip_header *fiph;
  432. int ret = 1;
  433. u16 op;
  434. u8 sub;
  435. if (!skb || !(skb->data))
  436. return -1;
  437. if (skb_linearize(skb))
  438. goto drop;
  439. fiph = (struct fip_header *)skb->data;
  440. op = ntohs(fiph->fip_op);
  441. sub = fiph->fip_subcode;
  442. if (FIP_VER_DECAPS(fiph->fip_ver) != FIP_VER)
  443. goto drop;
  444. if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len)
  445. goto drop;
  446. if (op == FIP_OP_DISC && sub == FIP_SC_ADV) {
  447. if (fnic_fcoe_vlan_check(fnic, ntohs(fiph->fip_flags)))
  448. goto drop;
  449. /* pass it on to fcoe */
  450. ret = 1;
  451. } else if (op == FIP_OP_VLAN && sub == FIP_SC_VL_REP) {
  452. /* set the vlan as used */
  453. fnic_fcoe_process_vlan_resp(fnic, skb);
  454. ret = 0;
  455. } else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK) {
  456. /* received CVL request, restart vlan disc */
  457. fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
  458. /* pass it on to fcoe */
  459. ret = 1;
  460. }
  461. drop:
  462. return ret;
  463. }
  464. void fnic_handle_fip_frame(struct work_struct *work)
  465. {
  466. struct fnic *fnic = container_of(work, struct fnic, fip_frame_work);
  467. struct fnic_stats *fnic_stats = &fnic->fnic_stats;
  468. unsigned long flags;
  469. struct sk_buff *skb;
  470. struct ethhdr *eh;
  471. while ((skb = skb_dequeue(&fnic->fip_frame_queue))) {
  472. spin_lock_irqsave(&fnic->fnic_lock, flags);
  473. if (fnic->stop_rx_link_events) {
  474. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  475. dev_kfree_skb(skb);
  476. return;
  477. }
  478. /*
  479. * If we're in a transitional state, just re-queue and return.
  480. * The queue will be serviced when we get to a stable state.
  481. */
  482. if (fnic->state != FNIC_IN_FC_MODE &&
  483. fnic->state != FNIC_IN_ETH_MODE) {
  484. skb_queue_head(&fnic->fip_frame_queue, skb);
  485. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  486. return;
  487. }
  488. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  489. eh = (struct ethhdr *)skb->data;
  490. if (eh->h_proto == htons(ETH_P_FIP)) {
  491. skb_pull(skb, sizeof(*eh));
  492. if (fnic_fcoe_handle_fip_frame(fnic, skb) <= 0) {
  493. dev_kfree_skb(skb);
  494. continue;
  495. }
  496. /*
  497. * If there's FLOGI rejects - clear all
  498. * fcf's & restart from scratch
  499. */
  500. if (is_fnic_fip_flogi_reject(&fnic->ctlr, skb)) {
  501. atomic64_inc(
  502. &fnic_stats->vlan_stats.flogi_rejects);
  503. shost_printk(KERN_INFO, fnic->lport->host,
  504. "Trigger a Link down - VLAN Disc\n");
  505. fcoe_ctlr_link_down(&fnic->ctlr);
  506. /* start FCoE VLAN discovery */
  507. fnic_fcoe_send_vlan_req(fnic);
  508. dev_kfree_skb(skb);
  509. continue;
  510. }
  511. fcoe_ctlr_recv(&fnic->ctlr, skb);
  512. continue;
  513. }
  514. }
  515. }
  516. /**
  517. * fnic_import_rq_eth_pkt() - handle received FCoE or FIP frame.
  518. * @fnic: fnic instance.
  519. * @skb: Ethernet Frame.
  520. */
  521. static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb)
  522. {
  523. struct fc_frame *fp;
  524. struct ethhdr *eh;
  525. struct fcoe_hdr *fcoe_hdr;
  526. struct fcoe_crc_eof *ft;
  527. /*
  528. * Undo VLAN encapsulation if present.
  529. */
  530. eh = (struct ethhdr *)skb->data;
  531. if (eh->h_proto == htons(ETH_P_8021Q)) {
  532. memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2);
  533. eh = (struct ethhdr *)skb_pull(skb, VLAN_HLEN);
  534. skb_reset_mac_header(skb);
  535. }
  536. if (eh->h_proto == htons(ETH_P_FIP)) {
  537. if (!(fnic->config.flags & VFCF_FIP_CAPABLE)) {
  538. printk(KERN_ERR "Dropped FIP frame, as firmware "
  539. "uses non-FIP mode, Enable FIP "
  540. "using UCSM\n");
  541. goto drop;
  542. }
  543. skb_queue_tail(&fnic->fip_frame_queue, skb);
  544. queue_work(fnic_fip_queue, &fnic->fip_frame_work);
  545. return 1; /* let caller know packet was used */
  546. }
  547. if (eh->h_proto != htons(ETH_P_FCOE))
  548. goto drop;
  549. skb_set_network_header(skb, sizeof(*eh));
  550. skb_pull(skb, sizeof(*eh));
  551. fcoe_hdr = (struct fcoe_hdr *)skb->data;
  552. if (FC_FCOE_DECAPS_VER(fcoe_hdr) != FC_FCOE_VER)
  553. goto drop;
  554. fp = (struct fc_frame *)skb;
  555. fc_frame_init(fp);
  556. fr_sof(fp) = fcoe_hdr->fcoe_sof;
  557. skb_pull(skb, sizeof(struct fcoe_hdr));
  558. skb_reset_transport_header(skb);
  559. ft = (struct fcoe_crc_eof *)(skb->data + skb->len - sizeof(*ft));
  560. fr_eof(fp) = ft->fcoe_eof;
  561. skb_trim(skb, skb->len - sizeof(*ft));
  562. return 0;
  563. drop:
  564. dev_kfree_skb_irq(skb);
  565. return -1;
  566. }
  567. /**
  568. * fnic_update_mac_locked() - set data MAC address and filters.
  569. * @fnic: fnic instance.
  570. * @new: newly-assigned FCoE MAC address.
  571. *
  572. * Called with the fnic lock held.
  573. */
  574. void fnic_update_mac_locked(struct fnic *fnic, u8 *new)
  575. {
  576. u8 *ctl = fnic->ctlr.ctl_src_addr;
  577. u8 *data = fnic->data_src_addr;
  578. if (is_zero_ether_addr(new))
  579. new = ctl;
  580. if (ether_addr_equal(data, new))
  581. return;
  582. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "update_mac %pM\n", new);
  583. if (!is_zero_ether_addr(data) && !ether_addr_equal(data, ctl))
  584. vnic_dev_del_addr(fnic->vdev, data);
  585. memcpy(data, new, ETH_ALEN);
  586. if (!ether_addr_equal(new, ctl))
  587. vnic_dev_add_addr(fnic->vdev, new);
  588. }
  589. /**
  590. * fnic_update_mac() - set data MAC address and filters.
  591. * @lport: local port.
  592. * @new: newly-assigned FCoE MAC address.
  593. */
  594. void fnic_update_mac(struct fc_lport *lport, u8 *new)
  595. {
  596. struct fnic *fnic = lport_priv(lport);
  597. spin_lock_irq(&fnic->fnic_lock);
  598. fnic_update_mac_locked(fnic, new);
  599. spin_unlock_irq(&fnic->fnic_lock);
  600. }
  601. /**
  602. * fnic_set_port_id() - set the port_ID after successful FLOGI.
  603. * @lport: local port.
  604. * @port_id: assigned FC_ID.
  605. * @fp: received frame containing the FLOGI accept or NULL.
  606. *
  607. * This is called from libfc when a new FC_ID has been assigned.
  608. * This causes us to reset the firmware to FC_MODE and setup the new MAC
  609. * address and FC_ID.
  610. *
  611. * It is also called with FC_ID 0 when we're logged off.
  612. *
  613. * If the FC_ID is due to point-to-point, fp may be NULL.
  614. */
  615. void fnic_set_port_id(struct fc_lport *lport, u32 port_id, struct fc_frame *fp)
  616. {
  617. struct fnic *fnic = lport_priv(lport);
  618. u8 *mac;
  619. int ret;
  620. FNIC_FCS_DBG(KERN_DEBUG, lport->host, "set port_id %x fp %p\n",
  621. port_id, fp);
  622. /*
  623. * If we're clearing the FC_ID, change to use the ctl_src_addr.
  624. * Set ethernet mode to send FLOGI.
  625. */
  626. if (!port_id) {
  627. fnic_update_mac(lport, fnic->ctlr.ctl_src_addr);
  628. fnic_set_eth_mode(fnic);
  629. return;
  630. }
  631. if (fp) {
  632. mac = fr_cb(fp)->granted_mac;
  633. if (is_zero_ether_addr(mac)) {
  634. /* non-FIP - FLOGI already accepted - ignore return */
  635. fcoe_ctlr_recv_flogi(&fnic->ctlr, lport, fp);
  636. }
  637. fnic_update_mac(lport, mac);
  638. }
  639. /* Change state to reflect transition to FC mode */
  640. spin_lock_irq(&fnic->fnic_lock);
  641. if (fnic->state == FNIC_IN_ETH_MODE || fnic->state == FNIC_IN_FC_MODE)
  642. fnic->state = FNIC_IN_ETH_TRANS_FC_MODE;
  643. else {
  644. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  645. "Unexpected fnic state %s while"
  646. " processing flogi resp\n",
  647. fnic_state_to_str(fnic->state));
  648. spin_unlock_irq(&fnic->fnic_lock);
  649. return;
  650. }
  651. spin_unlock_irq(&fnic->fnic_lock);
  652. /*
  653. * Send FLOGI registration to firmware to set up FC mode.
  654. * The new address will be set up when registration completes.
  655. */
  656. ret = fnic_flogi_reg_handler(fnic, port_id);
  657. if (ret < 0) {
  658. spin_lock_irq(&fnic->fnic_lock);
  659. if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE)
  660. fnic->state = FNIC_IN_ETH_MODE;
  661. spin_unlock_irq(&fnic->fnic_lock);
  662. }
  663. }
  664. static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
  665. *cq_desc, struct vnic_rq_buf *buf,
  666. int skipped __attribute__((unused)),
  667. void *opaque)
  668. {
  669. struct fnic *fnic = vnic_dev_priv(rq->vdev);
  670. struct sk_buff *skb;
  671. struct fc_frame *fp;
  672. struct fnic_stats *fnic_stats = &fnic->fnic_stats;
  673. unsigned int eth_hdrs_stripped;
  674. u8 type, color, eop, sop, ingress_port, vlan_stripped;
  675. u8 fcoe = 0, fcoe_sof, fcoe_eof;
  676. u8 fcoe_fc_crc_ok = 1, fcoe_enc_error = 0;
  677. u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
  678. u8 ipv6, ipv4, ipv4_fragment, rss_type, csum_not_calc;
  679. u8 fcs_ok = 1, packet_error = 0;
  680. u16 q_number, completed_index, bytes_written = 0, vlan, checksum;
  681. u32 rss_hash;
  682. u16 exchange_id, tmpl;
  683. u8 sof = 0;
  684. u8 eof = 0;
  685. u32 fcp_bytes_written = 0;
  686. unsigned long flags;
  687. pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len,
  688. PCI_DMA_FROMDEVICE);
  689. skb = buf->os_buf;
  690. fp = (struct fc_frame *)skb;
  691. buf->os_buf = NULL;
  692. cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index);
  693. if (type == CQ_DESC_TYPE_RQ_FCP) {
  694. cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc *)cq_desc,
  695. &type, &color, &q_number, &completed_index,
  696. &eop, &sop, &fcoe_fc_crc_ok, &exchange_id,
  697. &tmpl, &fcp_bytes_written, &sof, &eof,
  698. &ingress_port, &packet_error,
  699. &fcoe_enc_error, &fcs_ok, &vlan_stripped,
  700. &vlan);
  701. eth_hdrs_stripped = 1;
  702. skb_trim(skb, fcp_bytes_written);
  703. fr_sof(fp) = sof;
  704. fr_eof(fp) = eof;
  705. } else if (type == CQ_DESC_TYPE_RQ_ENET) {
  706. cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
  707. &type, &color, &q_number, &completed_index,
  708. &ingress_port, &fcoe, &eop, &sop,
  709. &rss_type, &csum_not_calc, &rss_hash,
  710. &bytes_written, &packet_error,
  711. &vlan_stripped, &vlan, &checksum,
  712. &fcoe_sof, &fcoe_fc_crc_ok,
  713. &fcoe_enc_error, &fcoe_eof,
  714. &tcp_udp_csum_ok, &udp, &tcp,
  715. &ipv4_csum_ok, &ipv6, &ipv4,
  716. &ipv4_fragment, &fcs_ok);
  717. eth_hdrs_stripped = 0;
  718. skb_trim(skb, bytes_written);
  719. if (!fcs_ok) {
  720. atomic64_inc(&fnic_stats->misc_stats.frame_errors);
  721. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  722. "fcs error. dropping packet.\n");
  723. goto drop;
  724. }
  725. if (fnic_import_rq_eth_pkt(fnic, skb))
  726. return;
  727. } else {
  728. /* wrong CQ type*/
  729. shost_printk(KERN_ERR, fnic->lport->host,
  730. "fnic rq_cmpl wrong cq type x%x\n", type);
  731. goto drop;
  732. }
  733. if (!fcs_ok || packet_error || !fcoe_fc_crc_ok || fcoe_enc_error) {
  734. atomic64_inc(&fnic_stats->misc_stats.frame_errors);
  735. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  736. "fnic rq_cmpl fcoe x%x fcsok x%x"
  737. " pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err"
  738. " x%x\n",
  739. fcoe, fcs_ok, packet_error,
  740. fcoe_fc_crc_ok, fcoe_enc_error);
  741. goto drop;
  742. }
  743. spin_lock_irqsave(&fnic->fnic_lock, flags);
  744. if (fnic->stop_rx_link_events) {
  745. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  746. goto drop;
  747. }
  748. fr_dev(fp) = fnic->lport;
  749. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  750. skb_queue_tail(&fnic->frame_queue, skb);
  751. queue_work(fnic_event_queue, &fnic->frame_work);
  752. return;
  753. drop:
  754. dev_kfree_skb_irq(skb);
  755. }
  756. static int fnic_rq_cmpl_handler_cont(struct vnic_dev *vdev,
  757. struct cq_desc *cq_desc, u8 type,
  758. u16 q_number, u16 completed_index,
  759. void *opaque)
  760. {
  761. struct fnic *fnic = vnic_dev_priv(vdev);
  762. vnic_rq_service(&fnic->rq[q_number], cq_desc, completed_index,
  763. VNIC_RQ_RETURN_DESC, fnic_rq_cmpl_frame_recv,
  764. NULL);
  765. return 0;
  766. }
  767. int fnic_rq_cmpl_handler(struct fnic *fnic, int rq_work_to_do)
  768. {
  769. unsigned int tot_rq_work_done = 0, cur_work_done;
  770. unsigned int i;
  771. int err;
  772. for (i = 0; i < fnic->rq_count; i++) {
  773. cur_work_done = vnic_cq_service(&fnic->cq[i], rq_work_to_do,
  774. fnic_rq_cmpl_handler_cont,
  775. NULL);
  776. if (cur_work_done) {
  777. err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
  778. if (err)
  779. shost_printk(KERN_ERR, fnic->lport->host,
  780. "fnic_alloc_rq_frame can't alloc"
  781. " frame\n");
  782. }
  783. tot_rq_work_done += cur_work_done;
  784. }
  785. return tot_rq_work_done;
  786. }
  787. /*
  788. * This function is called once at init time to allocate and fill RQ
  789. * buffers. Subsequently, it is called in the interrupt context after RQ
  790. * buffer processing to replenish the buffers in the RQ
  791. */
  792. int fnic_alloc_rq_frame(struct vnic_rq *rq)
  793. {
  794. struct fnic *fnic = vnic_dev_priv(rq->vdev);
  795. struct sk_buff *skb;
  796. u16 len;
  797. dma_addr_t pa;
  798. len = FC_FRAME_HEADROOM + FC_MAX_FRAME + FC_FRAME_TAILROOM;
  799. skb = dev_alloc_skb(len);
  800. if (!skb) {
  801. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  802. "Unable to allocate RQ sk_buff\n");
  803. return -ENOMEM;
  804. }
  805. skb_reset_mac_header(skb);
  806. skb_reset_transport_header(skb);
  807. skb_reset_network_header(skb);
  808. skb_put(skb, len);
  809. pa = pci_map_single(fnic->pdev, skb->data, len, PCI_DMA_FROMDEVICE);
  810. fnic_queue_rq_desc(rq, skb, pa, len);
  811. return 0;
  812. }
  813. void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
  814. {
  815. struct fc_frame *fp = buf->os_buf;
  816. struct fnic *fnic = vnic_dev_priv(rq->vdev);
  817. pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len,
  818. PCI_DMA_FROMDEVICE);
  819. dev_kfree_skb(fp_skb(fp));
  820. buf->os_buf = NULL;
  821. }
  822. /**
  823. * fnic_eth_send() - Send Ethernet frame.
  824. * @fip: fcoe_ctlr instance.
  825. * @skb: Ethernet Frame, FIP, without VLAN encapsulation.
  826. */
  827. void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
  828. {
  829. struct fnic *fnic = fnic_from_ctlr(fip);
  830. struct vnic_wq *wq = &fnic->wq[0];
  831. dma_addr_t pa;
  832. struct ethhdr *eth_hdr;
  833. struct vlan_ethhdr *vlan_hdr;
  834. unsigned long flags;
  835. if (!fnic->vlan_hw_insert) {
  836. eth_hdr = (struct ethhdr *)skb_mac_header(skb);
  837. vlan_hdr = (struct vlan_ethhdr *)skb_push(skb,
  838. sizeof(*vlan_hdr) - sizeof(*eth_hdr));
  839. memcpy(vlan_hdr, eth_hdr, 2 * ETH_ALEN);
  840. vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
  841. vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto;
  842. vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
  843. }
  844. pa = pci_map_single(fnic->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
  845. spin_lock_irqsave(&fnic->wq_lock[0], flags);
  846. if (!vnic_wq_desc_avail(wq)) {
  847. pci_unmap_single(fnic->pdev, pa, skb->len, PCI_DMA_TODEVICE);
  848. spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
  849. kfree_skb(skb);
  850. return;
  851. }
  852. fnic_queue_wq_eth_desc(wq, skb, pa, skb->len,
  853. 0 /* hw inserts cos value */,
  854. fnic->vlan_id, 1);
  855. spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
  856. }
  857. /*
  858. * Send FC frame.
  859. */
  860. static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
  861. {
  862. struct vnic_wq *wq = &fnic->wq[0];
  863. struct sk_buff *skb;
  864. dma_addr_t pa;
  865. struct ethhdr *eth_hdr;
  866. struct vlan_ethhdr *vlan_hdr;
  867. struct fcoe_hdr *fcoe_hdr;
  868. struct fc_frame_header *fh;
  869. u32 tot_len, eth_hdr_len;
  870. int ret = 0;
  871. unsigned long flags;
  872. fh = fc_frame_header_get(fp);
  873. skb = fp_skb(fp);
  874. if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) &&
  875. fcoe_ctlr_els_send(&fnic->ctlr, fnic->lport, skb))
  876. return 0;
  877. if (!fnic->vlan_hw_insert) {
  878. eth_hdr_len = sizeof(*vlan_hdr) + sizeof(*fcoe_hdr);
  879. vlan_hdr = (struct vlan_ethhdr *)skb_push(skb, eth_hdr_len);
  880. eth_hdr = (struct ethhdr *)vlan_hdr;
  881. vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
  882. vlan_hdr->h_vlan_encapsulated_proto = htons(ETH_P_FCOE);
  883. vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
  884. fcoe_hdr = (struct fcoe_hdr *)(vlan_hdr + 1);
  885. } else {
  886. eth_hdr_len = sizeof(*eth_hdr) + sizeof(*fcoe_hdr);
  887. eth_hdr = (struct ethhdr *)skb_push(skb, eth_hdr_len);
  888. eth_hdr->h_proto = htons(ETH_P_FCOE);
  889. fcoe_hdr = (struct fcoe_hdr *)(eth_hdr + 1);
  890. }
  891. if (fnic->ctlr.map_dest)
  892. fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id);
  893. else
  894. memcpy(eth_hdr->h_dest, fnic->ctlr.dest_addr, ETH_ALEN);
  895. memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN);
  896. tot_len = skb->len;
  897. BUG_ON(tot_len % 4);
  898. memset(fcoe_hdr, 0, sizeof(*fcoe_hdr));
  899. fcoe_hdr->fcoe_sof = fr_sof(fp);
  900. if (FC_FCOE_VER)
  901. FC_FCOE_ENCAPS_VER(fcoe_hdr, FC_FCOE_VER);
  902. pa = pci_map_single(fnic->pdev, eth_hdr, tot_len, PCI_DMA_TODEVICE);
  903. spin_lock_irqsave(&fnic->wq_lock[0], flags);
  904. if (!vnic_wq_desc_avail(wq)) {
  905. pci_unmap_single(fnic->pdev, pa,
  906. tot_len, PCI_DMA_TODEVICE);
  907. ret = -1;
  908. goto fnic_send_frame_end;
  909. }
  910. fnic_queue_wq_desc(wq, skb, pa, tot_len, fr_eof(fp),
  911. 0 /* hw inserts cos value */,
  912. fnic->vlan_id, 1, 1, 1);
  913. fnic_send_frame_end:
  914. spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
  915. if (ret)
  916. dev_kfree_skb_any(fp_skb(fp));
  917. return ret;
  918. }
  919. /*
  920. * fnic_send
  921. * Routine to send a raw frame
  922. */
  923. int fnic_send(struct fc_lport *lp, struct fc_frame *fp)
  924. {
  925. struct fnic *fnic = lport_priv(lp);
  926. unsigned long flags;
  927. if (fnic->in_remove) {
  928. dev_kfree_skb(fp_skb(fp));
  929. return -1;
  930. }
  931. /*
  932. * Queue frame if in a transitional state.
  933. * This occurs while registering the Port_ID / MAC address after FLOGI.
  934. */
  935. spin_lock_irqsave(&fnic->fnic_lock, flags);
  936. if (fnic->state != FNIC_IN_FC_MODE && fnic->state != FNIC_IN_ETH_MODE) {
  937. skb_queue_tail(&fnic->tx_queue, fp_skb(fp));
  938. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  939. return 0;
  940. }
  941. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  942. return fnic_send_frame(fnic, fp);
  943. }
  944. /**
  945. * fnic_flush_tx() - send queued frames.
  946. * @fnic: fnic device
  947. *
  948. * Send frames that were waiting to go out in FC or Ethernet mode.
  949. * Whenever changing modes we purge queued frames, so these frames should
  950. * be queued for the stable mode that we're in, either FC or Ethernet.
  951. *
  952. * Called without fnic_lock held.
  953. */
  954. void fnic_flush_tx(struct fnic *fnic)
  955. {
  956. struct sk_buff *skb;
  957. struct fc_frame *fp;
  958. while ((skb = skb_dequeue(&fnic->tx_queue))) {
  959. fp = (struct fc_frame *)skb;
  960. fnic_send_frame(fnic, fp);
  961. }
  962. }
  963. /**
  964. * fnic_set_eth_mode() - put fnic into ethernet mode.
  965. * @fnic: fnic device
  966. *
  967. * Called without fnic lock held.
  968. */
  969. static void fnic_set_eth_mode(struct fnic *fnic)
  970. {
  971. unsigned long flags;
  972. enum fnic_state old_state;
  973. int ret;
  974. spin_lock_irqsave(&fnic->fnic_lock, flags);
  975. again:
  976. old_state = fnic->state;
  977. switch (old_state) {
  978. case FNIC_IN_FC_MODE:
  979. case FNIC_IN_ETH_TRANS_FC_MODE:
  980. default:
  981. fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
  982. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  983. ret = fnic_fw_reset_handler(fnic);
  984. spin_lock_irqsave(&fnic->fnic_lock, flags);
  985. if (fnic->state != FNIC_IN_FC_TRANS_ETH_MODE)
  986. goto again;
  987. if (ret)
  988. fnic->state = old_state;
  989. break;
  990. case FNIC_IN_FC_TRANS_ETH_MODE:
  991. case FNIC_IN_ETH_MODE:
  992. break;
  993. }
  994. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  995. }
  996. static void fnic_wq_complete_frame_send(struct vnic_wq *wq,
  997. struct cq_desc *cq_desc,
  998. struct vnic_wq_buf *buf, void *opaque)
  999. {
  1000. struct sk_buff *skb = buf->os_buf;
  1001. struct fc_frame *fp = (struct fc_frame *)skb;
  1002. struct fnic *fnic = vnic_dev_priv(wq->vdev);
  1003. pci_unmap_single(fnic->pdev, buf->dma_addr,
  1004. buf->len, PCI_DMA_TODEVICE);
  1005. dev_kfree_skb_irq(fp_skb(fp));
  1006. buf->os_buf = NULL;
  1007. }
  1008. static int fnic_wq_cmpl_handler_cont(struct vnic_dev *vdev,
  1009. struct cq_desc *cq_desc, u8 type,
  1010. u16 q_number, u16 completed_index,
  1011. void *opaque)
  1012. {
  1013. struct fnic *fnic = vnic_dev_priv(vdev);
  1014. unsigned long flags;
  1015. spin_lock_irqsave(&fnic->wq_lock[q_number], flags);
  1016. vnic_wq_service(&fnic->wq[q_number], cq_desc, completed_index,
  1017. fnic_wq_complete_frame_send, NULL);
  1018. spin_unlock_irqrestore(&fnic->wq_lock[q_number], flags);
  1019. return 0;
  1020. }
  1021. int fnic_wq_cmpl_handler(struct fnic *fnic, int work_to_do)
  1022. {
  1023. unsigned int wq_work_done = 0;
  1024. unsigned int i;
  1025. for (i = 0; i < fnic->raw_wq_count; i++) {
  1026. wq_work_done += vnic_cq_service(&fnic->cq[fnic->rq_count+i],
  1027. work_to_do,
  1028. fnic_wq_cmpl_handler_cont,
  1029. NULL);
  1030. }
  1031. return wq_work_done;
  1032. }
  1033. void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
  1034. {
  1035. struct fc_frame *fp = buf->os_buf;
  1036. struct fnic *fnic = vnic_dev_priv(wq->vdev);
  1037. pci_unmap_single(fnic->pdev, buf->dma_addr,
  1038. buf->len, PCI_DMA_TODEVICE);
  1039. dev_kfree_skb(fp_skb(fp));
  1040. buf->os_buf = NULL;
  1041. }
  1042. void fnic_fcoe_reset_vlans(struct fnic *fnic)
  1043. {
  1044. unsigned long flags;
  1045. struct fcoe_vlan *vlan;
  1046. struct fcoe_vlan *next;
  1047. /*
  1048. * indicate a link down to fcoe so that all fcf's are free'd
  1049. * might not be required since we did this before sending vlan
  1050. * discovery request
  1051. */
  1052. spin_lock_irqsave(&fnic->vlans_lock, flags);
  1053. if (!list_empty(&fnic->vlans)) {
  1054. list_for_each_entry_safe(vlan, next, &fnic->vlans, list) {
  1055. list_del(&vlan->list);
  1056. kfree(vlan);
  1057. }
  1058. }
  1059. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  1060. }
  1061. void fnic_handle_fip_timer(struct fnic *fnic)
  1062. {
  1063. unsigned long flags;
  1064. struct fcoe_vlan *vlan;
  1065. struct fnic_stats *fnic_stats = &fnic->fnic_stats;
  1066. u64 sol_time;
  1067. spin_lock_irqsave(&fnic->fnic_lock, flags);
  1068. if (fnic->stop_rx_link_events) {
  1069. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  1070. return;
  1071. }
  1072. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  1073. if (fnic->ctlr.mode == FIP_ST_NON_FIP)
  1074. return;
  1075. spin_lock_irqsave(&fnic->vlans_lock, flags);
  1076. if (list_empty(&fnic->vlans)) {
  1077. /* no vlans available, try again */
  1078. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  1079. "Start VLAN Discovery\n");
  1080. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  1081. fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
  1082. return;
  1083. }
  1084. vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
  1085. shost_printk(KERN_DEBUG, fnic->lport->host,
  1086. "fip_timer: vlan %d state %d sol_count %d\n",
  1087. vlan->vid, vlan->state, vlan->sol_count);
  1088. switch (vlan->state) {
  1089. case FIP_VLAN_USED:
  1090. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  1091. "FIP VLAN is selected for FC transaction\n");
  1092. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  1093. break;
  1094. case FIP_VLAN_FAILED:
  1095. /* if all vlans are in failed state, restart vlan disc */
  1096. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  1097. "Start VLAN Discovery\n");
  1098. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  1099. fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
  1100. break;
  1101. case FIP_VLAN_SENT:
  1102. if (vlan->sol_count >= FCOE_CTLR_MAX_SOL) {
  1103. /*
  1104. * no response on this vlan, remove from the list.
  1105. * Try the next vlan
  1106. */
  1107. shost_printk(KERN_INFO, fnic->lport->host,
  1108. "Dequeue this VLAN ID %d from list\n",
  1109. vlan->vid);
  1110. list_del(&vlan->list);
  1111. kfree(vlan);
  1112. vlan = NULL;
  1113. if (list_empty(&fnic->vlans)) {
  1114. /* we exhausted all vlans, restart vlan disc */
  1115. spin_unlock_irqrestore(&fnic->vlans_lock,
  1116. flags);
  1117. shost_printk(KERN_INFO, fnic->lport->host,
  1118. "fip_timer: vlan list empty, "
  1119. "trigger vlan disc\n");
  1120. fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
  1121. return;
  1122. }
  1123. /* check the next vlan */
  1124. vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan,
  1125. list);
  1126. fnic->set_vlan(fnic, vlan->vid);
  1127. vlan->state = FIP_VLAN_SENT; /* sent now */
  1128. }
  1129. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  1130. atomic64_inc(&fnic_stats->vlan_stats.sol_expiry_count);
  1131. vlan->sol_count++;
  1132. sol_time = jiffies + msecs_to_jiffies
  1133. (FCOE_CTLR_START_DELAY);
  1134. mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
  1135. break;
  1136. }
  1137. }