fnic_fcs.c 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282
  1. /*
  2. * Copyright 2008 Cisco Systems, Inc. All rights reserved.
  3. * Copyright 2007 Nuova Systems, Inc. All rights reserved.
  4. *
  5. * This program is free software; you may redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; version 2 of the License.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  10. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  11. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  12. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  13. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  14. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  15. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  16. * SOFTWARE.
  17. */
  18. #include <linux/errno.h>
  19. #include <linux/pci.h>
  20. #include <linux/slab.h>
  21. #include <linux/skbuff.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/spinlock.h>
  24. #include <linux/if_ether.h>
  25. #include <linux/if_vlan.h>
  26. #include <linux/workqueue.h>
  27. #include <scsi/fc/fc_fip.h>
  28. #include <scsi/fc/fc_els.h>
  29. #include <scsi/fc/fc_fcoe.h>
  30. #include <scsi/fc_frame.h>
  31. #include <scsi/libfc.h>
  32. #include "fnic_io.h"
  33. #include "fnic.h"
  34. #include "fnic_fip.h"
  35. #include "cq_enet_desc.h"
  36. #include "cq_exch_desc.h"
  37. static u8 fcoe_all_fcfs[ETH_ALEN];
  38. struct workqueue_struct *fnic_fip_queue;
  39. struct workqueue_struct *fnic_event_queue;
  40. static void fnic_set_eth_mode(struct fnic *);
  41. static void fnic_fcoe_send_vlan_req(struct fnic *fnic);
  42. static void fnic_fcoe_start_fcf_disc(struct fnic *fnic);
  43. static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *);
  44. static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag);
  45. static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb);
  46. void fnic_handle_link(struct work_struct *work)
  47. {
  48. struct fnic *fnic = container_of(work, struct fnic, link_work);
  49. unsigned long flags;
  50. int old_link_status;
  51. u32 old_link_down_cnt;
  52. spin_lock_irqsave(&fnic->fnic_lock, flags);
  53. if (fnic->stop_rx_link_events) {
  54. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  55. return;
  56. }
  57. old_link_down_cnt = fnic->link_down_cnt;
  58. old_link_status = fnic->link_status;
  59. fnic->link_status = vnic_dev_link_status(fnic->vdev);
  60. fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev);
  61. if (old_link_status == fnic->link_status) {
  62. if (!fnic->link_status)
  63. /* DOWN -> DOWN */
  64. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  65. else {
  66. if (old_link_down_cnt != fnic->link_down_cnt) {
  67. /* UP -> DOWN -> UP */
  68. fnic->lport->host_stats.link_failure_count++;
  69. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  70. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  71. "link down\n");
  72. fcoe_ctlr_link_down(&fnic->ctlr);
  73. if (fnic->config.flags & VFCF_FIP_CAPABLE) {
  74. /* start FCoE VLAN discovery */
  75. fnic_fcoe_send_vlan_req(fnic);
  76. return;
  77. }
  78. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  79. "link up\n");
  80. fcoe_ctlr_link_up(&fnic->ctlr);
  81. } else
  82. /* UP -> UP */
  83. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  84. }
  85. } else if (fnic->link_status) {
  86. /* DOWN -> UP */
  87. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  88. if (fnic->config.flags & VFCF_FIP_CAPABLE) {
  89. /* start FCoE VLAN discovery */
  90. fnic_fcoe_send_vlan_req(fnic);
  91. return;
  92. }
  93. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n");
  94. fcoe_ctlr_link_up(&fnic->ctlr);
  95. } else {
  96. /* UP -> DOWN */
  97. fnic->lport->host_stats.link_failure_count++;
  98. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  99. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n");
  100. fcoe_ctlr_link_down(&fnic->ctlr);
  101. }
  102. }
  103. /*
  104. * This function passes incoming fabric frames to libFC
  105. */
  106. void fnic_handle_frame(struct work_struct *work)
  107. {
  108. struct fnic *fnic = container_of(work, struct fnic, frame_work);
  109. struct fc_lport *lp = fnic->lport;
  110. unsigned long flags;
  111. struct sk_buff *skb;
  112. struct fc_frame *fp;
  113. while ((skb = skb_dequeue(&fnic->frame_queue))) {
  114. spin_lock_irqsave(&fnic->fnic_lock, flags);
  115. if (fnic->stop_rx_link_events) {
  116. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  117. dev_kfree_skb(skb);
  118. return;
  119. }
  120. fp = (struct fc_frame *)skb;
  121. /*
  122. * If we're in a transitional state, just re-queue and return.
  123. * The queue will be serviced when we get to a stable state.
  124. */
  125. if (fnic->state != FNIC_IN_FC_MODE &&
  126. fnic->state != FNIC_IN_ETH_MODE) {
  127. skb_queue_head(&fnic->frame_queue, skb);
  128. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  129. return;
  130. }
  131. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  132. fc_exch_recv(lp, fp);
  133. }
  134. }
  135. void fnic_fcoe_evlist_free(struct fnic *fnic)
  136. {
  137. struct fnic_event *fevt = NULL;
  138. struct fnic_event *next = NULL;
  139. unsigned long flags;
  140. spin_lock_irqsave(&fnic->fnic_lock, flags);
  141. if (list_empty(&fnic->evlist)) {
  142. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  143. return;
  144. }
  145. list_for_each_entry_safe(fevt, next, &fnic->evlist, list) {
  146. list_del(&fevt->list);
  147. kfree(fevt);
  148. }
  149. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  150. }
  151. void fnic_handle_event(struct work_struct *work)
  152. {
  153. struct fnic *fnic = container_of(work, struct fnic, event_work);
  154. struct fnic_event *fevt = NULL;
  155. struct fnic_event *next = NULL;
  156. unsigned long flags;
  157. spin_lock_irqsave(&fnic->fnic_lock, flags);
  158. if (list_empty(&fnic->evlist)) {
  159. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  160. return;
  161. }
  162. list_for_each_entry_safe(fevt, next, &fnic->evlist, list) {
  163. if (fnic->stop_rx_link_events) {
  164. list_del(&fevt->list);
  165. kfree(fevt);
  166. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  167. return;
  168. }
  169. /*
  170. * If we're in a transitional state, just re-queue and return.
  171. * The queue will be serviced when we get to a stable state.
  172. */
  173. if (fnic->state != FNIC_IN_FC_MODE &&
  174. fnic->state != FNIC_IN_ETH_MODE) {
  175. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  176. return;
  177. }
  178. list_del(&fevt->list);
  179. switch (fevt->event) {
  180. case FNIC_EVT_START_VLAN_DISC:
  181. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  182. fnic_fcoe_send_vlan_req(fnic);
  183. spin_lock_irqsave(&fnic->fnic_lock, flags);
  184. break;
  185. case FNIC_EVT_START_FCF_DISC:
  186. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  187. "Start FCF Discovery\n");
  188. fnic_fcoe_start_fcf_disc(fnic);
  189. break;
  190. default:
  191. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  192. "Unknown event 0x%x\n", fevt->event);
  193. break;
  194. }
  195. kfree(fevt);
  196. }
  197. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  198. }
  199. /**
  200. * Check if the Received FIP FLOGI frame is rejected
  201. * @fip: The FCoE controller that received the frame
  202. * @skb: The received FIP frame
  203. *
  204. * Returns non-zero if the frame is rejected with unsupported cmd with
  205. * insufficient resource els explanation.
  206. */
  207. static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr *fip,
  208. struct sk_buff *skb)
  209. {
  210. struct fc_lport *lport = fip->lp;
  211. struct fip_header *fiph;
  212. struct fc_frame_header *fh = NULL;
  213. struct fip_desc *desc;
  214. struct fip_encaps *els;
  215. enum fip_desc_type els_dtype = 0;
  216. u16 op;
  217. u8 els_op;
  218. u8 sub;
  219. size_t els_len = 0;
  220. size_t rlen;
  221. size_t dlen = 0;
  222. if (skb_linearize(skb))
  223. return 0;
  224. if (skb->len < sizeof(*fiph))
  225. return 0;
  226. fiph = (struct fip_header *)skb->data;
  227. op = ntohs(fiph->fip_op);
  228. sub = fiph->fip_subcode;
  229. if (op != FIP_OP_LS)
  230. return 0;
  231. if (sub != FIP_SC_REP)
  232. return 0;
  233. rlen = ntohs(fiph->fip_dl_len) * 4;
  234. if (rlen + sizeof(*fiph) > skb->len)
  235. return 0;
  236. desc = (struct fip_desc *)(fiph + 1);
  237. dlen = desc->fip_dlen * FIP_BPW;
  238. if (desc->fip_dtype == FIP_DT_FLOGI) {
  239. shost_printk(KERN_DEBUG, lport->host,
  240. " FIP TYPE FLOGI: fab name:%llx "
  241. "vfid:%d map:%x\n",
  242. fip->sel_fcf->fabric_name, fip->sel_fcf->vfid,
  243. fip->sel_fcf->fc_map);
  244. if (dlen < sizeof(*els) + sizeof(*fh) + 1)
  245. return 0;
  246. els_len = dlen - sizeof(*els);
  247. els = (struct fip_encaps *)desc;
  248. fh = (struct fc_frame_header *)(els + 1);
  249. els_dtype = desc->fip_dtype;
  250. if (!fh)
  251. return 0;
  252. /*
  253. * ELS command code, reason and explanation should be = Reject,
  254. * unsupported command and insufficient resource
  255. */
  256. els_op = *(u8 *)(fh + 1);
  257. if (els_op == ELS_LS_RJT) {
  258. shost_printk(KERN_INFO, lport->host,
  259. "Flogi Request Rejected by Switch\n");
  260. return 1;
  261. }
  262. shost_printk(KERN_INFO, lport->host,
  263. "Flogi Request Accepted by Switch\n");
  264. }
  265. return 0;
  266. }
  267. static void fnic_fcoe_send_vlan_req(struct fnic *fnic)
  268. {
  269. struct fcoe_ctlr *fip = &fnic->ctlr;
  270. struct sk_buff *skb;
  271. char *eth_fr;
  272. int fr_len;
  273. struct fip_vlan *vlan;
  274. u64 vlan_tov;
  275. fnic_fcoe_reset_vlans(fnic);
  276. fnic->set_vlan(fnic, 0);
  277. FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
  278. "Sending VLAN request...\n");
  279. skb = dev_alloc_skb(sizeof(struct fip_vlan));
  280. if (!skb)
  281. return;
  282. fr_len = sizeof(*vlan);
  283. eth_fr = (char *)skb->data;
  284. vlan = (struct fip_vlan *)eth_fr;
  285. memset(vlan, 0, sizeof(*vlan));
  286. memcpy(vlan->eth.h_source, fip->ctl_src_addr, ETH_ALEN);
  287. memcpy(vlan->eth.h_dest, fcoe_all_fcfs, ETH_ALEN);
  288. vlan->eth.h_proto = htons(ETH_P_FIP);
  289. vlan->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER);
  290. vlan->fip.fip_op = htons(FIP_OP_VLAN);
  291. vlan->fip.fip_subcode = FIP_SC_VL_REQ;
  292. vlan->fip.fip_dl_len = htons(sizeof(vlan->desc) / FIP_BPW);
  293. vlan->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC;
  294. vlan->desc.mac.fd_desc.fip_dlen = sizeof(vlan->desc.mac) / FIP_BPW;
  295. memcpy(&vlan->desc.mac.fd_mac, fip->ctl_src_addr, ETH_ALEN);
  296. vlan->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME;
  297. vlan->desc.wwnn.fd_desc.fip_dlen = sizeof(vlan->desc.wwnn) / FIP_BPW;
  298. put_unaligned_be64(fip->lp->wwnn, &vlan->desc.wwnn.fd_wwn);
  299. skb_put(skb, sizeof(*vlan));
  300. skb->protocol = htons(ETH_P_FIP);
  301. skb_reset_mac_header(skb);
  302. skb_reset_network_header(skb);
  303. fip->send(fip, skb);
  304. /* set a timer so that we can retry if there no response */
  305. vlan_tov = jiffies + msecs_to_jiffies(FCOE_CTLR_FIPVLAN_TOV);
  306. mod_timer(&fnic->fip_timer, round_jiffies(vlan_tov));
  307. }
  308. static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb)
  309. {
  310. struct fcoe_ctlr *fip = &fnic->ctlr;
  311. struct fip_header *fiph;
  312. struct fip_desc *desc;
  313. u16 vid;
  314. size_t rlen;
  315. size_t dlen;
  316. struct fcoe_vlan *vlan;
  317. u64 sol_time;
  318. unsigned long flags;
  319. FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
  320. "Received VLAN response...\n");
  321. fiph = (struct fip_header *) skb->data;
  322. FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
  323. "Received VLAN response... OP 0x%x SUB_OP 0x%x\n",
  324. ntohs(fiph->fip_op), fiph->fip_subcode);
  325. rlen = ntohs(fiph->fip_dl_len) * 4;
  326. fnic_fcoe_reset_vlans(fnic);
  327. spin_lock_irqsave(&fnic->vlans_lock, flags);
  328. desc = (struct fip_desc *)(fiph + 1);
  329. while (rlen > 0) {
  330. dlen = desc->fip_dlen * FIP_BPW;
  331. switch (desc->fip_dtype) {
  332. case FIP_DT_VLAN:
  333. vid = ntohs(((struct fip_vlan_desc *)desc)->fd_vlan);
  334. shost_printk(KERN_INFO, fnic->lport->host,
  335. "process_vlan_resp: FIP VLAN %d\n", vid);
  336. vlan = kmalloc(sizeof(*vlan),
  337. GFP_ATOMIC);
  338. if (!vlan) {
  339. /* retry from timer */
  340. spin_unlock_irqrestore(&fnic->vlans_lock,
  341. flags);
  342. goto out;
  343. }
  344. memset(vlan, 0, sizeof(struct fcoe_vlan));
  345. vlan->vid = vid & 0x0fff;
  346. vlan->state = FIP_VLAN_AVAIL;
  347. list_add_tail(&vlan->list, &fnic->vlans);
  348. break;
  349. }
  350. desc = (struct fip_desc *)((char *)desc + dlen);
  351. rlen -= dlen;
  352. }
  353. /* any VLAN descriptors present ? */
  354. if (list_empty(&fnic->vlans)) {
  355. /* retry from timer */
  356. FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
  357. "No VLAN descriptors in FIP VLAN response\n");
  358. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  359. goto out;
  360. }
  361. vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
  362. fnic->set_vlan(fnic, vlan->vid);
  363. vlan->state = FIP_VLAN_SENT; /* sent now */
  364. vlan->sol_count++;
  365. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  366. /* start the solicitation */
  367. fcoe_ctlr_link_up(fip);
  368. sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY);
  369. mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
  370. out:
  371. return;
  372. }
  373. static void fnic_fcoe_start_fcf_disc(struct fnic *fnic)
  374. {
  375. unsigned long flags;
  376. struct fcoe_vlan *vlan;
  377. u64 sol_time;
  378. spin_lock_irqsave(&fnic->vlans_lock, flags);
  379. vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
  380. fnic->set_vlan(fnic, vlan->vid);
  381. vlan->state = FIP_VLAN_SENT; /* sent now */
  382. vlan->sol_count = 1;
  383. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  384. /* start the solicitation */
  385. fcoe_ctlr_link_up(&fnic->ctlr);
  386. sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY);
  387. mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
  388. }
  389. static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag)
  390. {
  391. unsigned long flags;
  392. struct fcoe_vlan *fvlan;
  393. spin_lock_irqsave(&fnic->vlans_lock, flags);
  394. if (list_empty(&fnic->vlans)) {
  395. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  396. return -EINVAL;
  397. }
  398. fvlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
  399. if (fvlan->state == FIP_VLAN_USED) {
  400. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  401. return 0;
  402. }
  403. if (fvlan->state == FIP_VLAN_SENT) {
  404. fvlan->state = FIP_VLAN_USED;
  405. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  406. return 0;
  407. }
  408. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  409. return -EINVAL;
  410. }
  411. static void fnic_event_enq(struct fnic *fnic, enum fnic_evt ev)
  412. {
  413. struct fnic_event *fevt;
  414. unsigned long flags;
  415. fevt = kmalloc(sizeof(*fevt), GFP_ATOMIC);
  416. if (!fevt)
  417. return;
  418. fevt->fnic = fnic;
  419. fevt->event = ev;
  420. spin_lock_irqsave(&fnic->fnic_lock, flags);
  421. list_add_tail(&fevt->list, &fnic->evlist);
  422. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  423. schedule_work(&fnic->event_work);
  424. }
  425. static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb)
  426. {
  427. struct fip_header *fiph;
  428. int ret = 1;
  429. u16 op;
  430. u8 sub;
  431. if (!skb || !(skb->data))
  432. return -1;
  433. if (skb_linearize(skb))
  434. goto drop;
  435. fiph = (struct fip_header *)skb->data;
  436. op = ntohs(fiph->fip_op);
  437. sub = fiph->fip_subcode;
  438. if (FIP_VER_DECAPS(fiph->fip_ver) != FIP_VER)
  439. goto drop;
  440. if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len)
  441. goto drop;
  442. if (op == FIP_OP_DISC && sub == FIP_SC_ADV) {
  443. if (fnic_fcoe_vlan_check(fnic, ntohs(fiph->fip_flags)))
  444. goto drop;
  445. /* pass it on to fcoe */
  446. ret = 1;
  447. } else if (op == FIP_OP_VLAN && sub == FIP_SC_VL_REP) {
  448. /* set the vlan as used */
  449. fnic_fcoe_process_vlan_resp(fnic, skb);
  450. ret = 0;
  451. } else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK) {
  452. /* received CVL request, restart vlan disc */
  453. fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
  454. /* pass it on to fcoe */
  455. ret = 1;
  456. }
  457. drop:
  458. return ret;
  459. }
  460. void fnic_handle_fip_frame(struct work_struct *work)
  461. {
  462. struct fnic *fnic = container_of(work, struct fnic, fip_frame_work);
  463. unsigned long flags;
  464. struct sk_buff *skb;
  465. struct ethhdr *eh;
  466. while ((skb = skb_dequeue(&fnic->fip_frame_queue))) {
  467. spin_lock_irqsave(&fnic->fnic_lock, flags);
  468. if (fnic->stop_rx_link_events) {
  469. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  470. dev_kfree_skb(skb);
  471. return;
  472. }
  473. /*
  474. * If we're in a transitional state, just re-queue and return.
  475. * The queue will be serviced when we get to a stable state.
  476. */
  477. if (fnic->state != FNIC_IN_FC_MODE &&
  478. fnic->state != FNIC_IN_ETH_MODE) {
  479. skb_queue_head(&fnic->fip_frame_queue, skb);
  480. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  481. return;
  482. }
  483. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  484. eh = (struct ethhdr *)skb->data;
  485. if (eh->h_proto == htons(ETH_P_FIP)) {
  486. skb_pull(skb, sizeof(*eh));
  487. if (fnic_fcoe_handle_fip_frame(fnic, skb) <= 0) {
  488. dev_kfree_skb(skb);
  489. continue;
  490. }
  491. /*
  492. * If there's FLOGI rejects - clear all
  493. * fcf's & restart from scratch
  494. */
  495. if (is_fnic_fip_flogi_reject(&fnic->ctlr, skb)) {
  496. shost_printk(KERN_INFO, fnic->lport->host,
  497. "Trigger a Link down - VLAN Disc\n");
  498. fcoe_ctlr_link_down(&fnic->ctlr);
  499. /* start FCoE VLAN discovery */
  500. fnic_fcoe_send_vlan_req(fnic);
  501. dev_kfree_skb(skb);
  502. continue;
  503. }
  504. fcoe_ctlr_recv(&fnic->ctlr, skb);
  505. continue;
  506. }
  507. }
  508. }
  509. /**
  510. * fnic_import_rq_eth_pkt() - handle received FCoE or FIP frame.
  511. * @fnic: fnic instance.
  512. * @skb: Ethernet Frame.
  513. */
  514. static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb)
  515. {
  516. struct fc_frame *fp;
  517. struct ethhdr *eh;
  518. struct fcoe_hdr *fcoe_hdr;
  519. struct fcoe_crc_eof *ft;
  520. /*
  521. * Undo VLAN encapsulation if present.
  522. */
  523. eh = (struct ethhdr *)skb->data;
  524. if (eh->h_proto == htons(ETH_P_8021Q)) {
  525. memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2);
  526. eh = (struct ethhdr *)skb_pull(skb, VLAN_HLEN);
  527. skb_reset_mac_header(skb);
  528. }
  529. if (eh->h_proto == htons(ETH_P_FIP)) {
  530. if (!(fnic->config.flags & VFCF_FIP_CAPABLE)) {
  531. printk(KERN_ERR "Dropped FIP frame, as firmware "
  532. "uses non-FIP mode, Enable FIP "
  533. "using UCSM\n");
  534. goto drop;
  535. }
  536. skb_queue_tail(&fnic->fip_frame_queue, skb);
  537. queue_work(fnic_fip_queue, &fnic->fip_frame_work);
  538. return 1; /* let caller know packet was used */
  539. }
  540. if (eh->h_proto != htons(ETH_P_FCOE))
  541. goto drop;
  542. skb_set_network_header(skb, sizeof(*eh));
  543. skb_pull(skb, sizeof(*eh));
  544. fcoe_hdr = (struct fcoe_hdr *)skb->data;
  545. if (FC_FCOE_DECAPS_VER(fcoe_hdr) != FC_FCOE_VER)
  546. goto drop;
  547. fp = (struct fc_frame *)skb;
  548. fc_frame_init(fp);
  549. fr_sof(fp) = fcoe_hdr->fcoe_sof;
  550. skb_pull(skb, sizeof(struct fcoe_hdr));
  551. skb_reset_transport_header(skb);
  552. ft = (struct fcoe_crc_eof *)(skb->data + skb->len - sizeof(*ft));
  553. fr_eof(fp) = ft->fcoe_eof;
  554. skb_trim(skb, skb->len - sizeof(*ft));
  555. return 0;
  556. drop:
  557. dev_kfree_skb_irq(skb);
  558. return -1;
  559. }
  560. /**
  561. * fnic_update_mac_locked() - set data MAC address and filters.
  562. * @fnic: fnic instance.
  563. * @new: newly-assigned FCoE MAC address.
  564. *
  565. * Called with the fnic lock held.
  566. */
  567. void fnic_update_mac_locked(struct fnic *fnic, u8 *new)
  568. {
  569. u8 *ctl = fnic->ctlr.ctl_src_addr;
  570. u8 *data = fnic->data_src_addr;
  571. if (is_zero_ether_addr(new))
  572. new = ctl;
  573. if (!compare_ether_addr(data, new))
  574. return;
  575. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "update_mac %pM\n", new);
  576. if (!is_zero_ether_addr(data) && compare_ether_addr(data, ctl))
  577. vnic_dev_del_addr(fnic->vdev, data);
  578. memcpy(data, new, ETH_ALEN);
  579. if (compare_ether_addr(new, ctl))
  580. vnic_dev_add_addr(fnic->vdev, new);
  581. }
  582. /**
  583. * fnic_update_mac() - set data MAC address and filters.
  584. * @lport: local port.
  585. * @new: newly-assigned FCoE MAC address.
  586. */
  587. void fnic_update_mac(struct fc_lport *lport, u8 *new)
  588. {
  589. struct fnic *fnic = lport_priv(lport);
  590. spin_lock_irq(&fnic->fnic_lock);
  591. fnic_update_mac_locked(fnic, new);
  592. spin_unlock_irq(&fnic->fnic_lock);
  593. }
  594. /**
  595. * fnic_set_port_id() - set the port_ID after successful FLOGI.
  596. * @lport: local port.
  597. * @port_id: assigned FC_ID.
  598. * @fp: received frame containing the FLOGI accept or NULL.
  599. *
  600. * This is called from libfc when a new FC_ID has been assigned.
  601. * This causes us to reset the firmware to FC_MODE and setup the new MAC
  602. * address and FC_ID.
  603. *
  604. * It is also called with FC_ID 0 when we're logged off.
  605. *
  606. * If the FC_ID is due to point-to-point, fp may be NULL.
  607. */
  608. void fnic_set_port_id(struct fc_lport *lport, u32 port_id, struct fc_frame *fp)
  609. {
  610. struct fnic *fnic = lport_priv(lport);
  611. u8 *mac;
  612. int ret;
  613. FNIC_FCS_DBG(KERN_DEBUG, lport->host, "set port_id %x fp %p\n",
  614. port_id, fp);
  615. /*
  616. * If we're clearing the FC_ID, change to use the ctl_src_addr.
  617. * Set ethernet mode to send FLOGI.
  618. */
  619. if (!port_id) {
  620. fnic_update_mac(lport, fnic->ctlr.ctl_src_addr);
  621. fnic_set_eth_mode(fnic);
  622. return;
  623. }
  624. if (fp) {
  625. mac = fr_cb(fp)->granted_mac;
  626. if (is_zero_ether_addr(mac)) {
  627. /* non-FIP - FLOGI already accepted - ignore return */
  628. fcoe_ctlr_recv_flogi(&fnic->ctlr, lport, fp);
  629. }
  630. fnic_update_mac(lport, mac);
  631. }
  632. /* Change state to reflect transition to FC mode */
  633. spin_lock_irq(&fnic->fnic_lock);
  634. if (fnic->state == FNIC_IN_ETH_MODE || fnic->state == FNIC_IN_FC_MODE)
  635. fnic->state = FNIC_IN_ETH_TRANS_FC_MODE;
  636. else {
  637. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  638. "Unexpected fnic state %s while"
  639. " processing flogi resp\n",
  640. fnic_state_to_str(fnic->state));
  641. spin_unlock_irq(&fnic->fnic_lock);
  642. return;
  643. }
  644. spin_unlock_irq(&fnic->fnic_lock);
  645. /*
  646. * Send FLOGI registration to firmware to set up FC mode.
  647. * The new address will be set up when registration completes.
  648. */
  649. ret = fnic_flogi_reg_handler(fnic, port_id);
  650. if (ret < 0) {
  651. spin_lock_irq(&fnic->fnic_lock);
  652. if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE)
  653. fnic->state = FNIC_IN_ETH_MODE;
  654. spin_unlock_irq(&fnic->fnic_lock);
  655. }
  656. }
  657. static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
  658. *cq_desc, struct vnic_rq_buf *buf,
  659. int skipped __attribute__((unused)),
  660. void *opaque)
  661. {
  662. struct fnic *fnic = vnic_dev_priv(rq->vdev);
  663. struct sk_buff *skb;
  664. struct fc_frame *fp;
  665. unsigned int eth_hdrs_stripped;
  666. u8 type, color, eop, sop, ingress_port, vlan_stripped;
  667. u8 fcoe = 0, fcoe_sof, fcoe_eof;
  668. u8 fcoe_fc_crc_ok = 1, fcoe_enc_error = 0;
  669. u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
  670. u8 ipv6, ipv4, ipv4_fragment, rss_type, csum_not_calc;
  671. u8 fcs_ok = 1, packet_error = 0;
  672. u16 q_number, completed_index, bytes_written = 0, vlan, checksum;
  673. u32 rss_hash;
  674. u16 exchange_id, tmpl;
  675. u8 sof = 0;
  676. u8 eof = 0;
  677. u32 fcp_bytes_written = 0;
  678. unsigned long flags;
  679. pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len,
  680. PCI_DMA_FROMDEVICE);
  681. skb = buf->os_buf;
  682. fp = (struct fc_frame *)skb;
  683. buf->os_buf = NULL;
  684. cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index);
  685. if (type == CQ_DESC_TYPE_RQ_FCP) {
  686. cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc *)cq_desc,
  687. &type, &color, &q_number, &completed_index,
  688. &eop, &sop, &fcoe_fc_crc_ok, &exchange_id,
  689. &tmpl, &fcp_bytes_written, &sof, &eof,
  690. &ingress_port, &packet_error,
  691. &fcoe_enc_error, &fcs_ok, &vlan_stripped,
  692. &vlan);
  693. eth_hdrs_stripped = 1;
  694. skb_trim(skb, fcp_bytes_written);
  695. fr_sof(fp) = sof;
  696. fr_eof(fp) = eof;
  697. } else if (type == CQ_DESC_TYPE_RQ_ENET) {
  698. cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
  699. &type, &color, &q_number, &completed_index,
  700. &ingress_port, &fcoe, &eop, &sop,
  701. &rss_type, &csum_not_calc, &rss_hash,
  702. &bytes_written, &packet_error,
  703. &vlan_stripped, &vlan, &checksum,
  704. &fcoe_sof, &fcoe_fc_crc_ok,
  705. &fcoe_enc_error, &fcoe_eof,
  706. &tcp_udp_csum_ok, &udp, &tcp,
  707. &ipv4_csum_ok, &ipv6, &ipv4,
  708. &ipv4_fragment, &fcs_ok);
  709. eth_hdrs_stripped = 0;
  710. skb_trim(skb, bytes_written);
  711. if (!fcs_ok) {
  712. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  713. "fcs error. dropping packet.\n");
  714. goto drop;
  715. }
  716. if (fnic_import_rq_eth_pkt(fnic, skb))
  717. return;
  718. } else {
  719. /* wrong CQ type*/
  720. shost_printk(KERN_ERR, fnic->lport->host,
  721. "fnic rq_cmpl wrong cq type x%x\n", type);
  722. goto drop;
  723. }
  724. if (!fcs_ok || packet_error || !fcoe_fc_crc_ok || fcoe_enc_error) {
  725. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  726. "fnic rq_cmpl fcoe x%x fcsok x%x"
  727. " pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err"
  728. " x%x\n",
  729. fcoe, fcs_ok, packet_error,
  730. fcoe_fc_crc_ok, fcoe_enc_error);
  731. goto drop;
  732. }
  733. spin_lock_irqsave(&fnic->fnic_lock, flags);
  734. if (fnic->stop_rx_link_events) {
  735. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  736. goto drop;
  737. }
  738. fr_dev(fp) = fnic->lport;
  739. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  740. skb_queue_tail(&fnic->frame_queue, skb);
  741. queue_work(fnic_event_queue, &fnic->frame_work);
  742. return;
  743. drop:
  744. dev_kfree_skb_irq(skb);
  745. }
  746. static int fnic_rq_cmpl_handler_cont(struct vnic_dev *vdev,
  747. struct cq_desc *cq_desc, u8 type,
  748. u16 q_number, u16 completed_index,
  749. void *opaque)
  750. {
  751. struct fnic *fnic = vnic_dev_priv(vdev);
  752. vnic_rq_service(&fnic->rq[q_number], cq_desc, completed_index,
  753. VNIC_RQ_RETURN_DESC, fnic_rq_cmpl_frame_recv,
  754. NULL);
  755. return 0;
  756. }
  757. int fnic_rq_cmpl_handler(struct fnic *fnic, int rq_work_to_do)
  758. {
  759. unsigned int tot_rq_work_done = 0, cur_work_done;
  760. unsigned int i;
  761. int err;
  762. for (i = 0; i < fnic->rq_count; i++) {
  763. cur_work_done = vnic_cq_service(&fnic->cq[i], rq_work_to_do,
  764. fnic_rq_cmpl_handler_cont,
  765. NULL);
  766. if (cur_work_done) {
  767. err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
  768. if (err)
  769. shost_printk(KERN_ERR, fnic->lport->host,
  770. "fnic_alloc_rq_frame can't alloc"
  771. " frame\n");
  772. }
  773. tot_rq_work_done += cur_work_done;
  774. }
  775. return tot_rq_work_done;
  776. }
  777. /*
  778. * This function is called once at init time to allocate and fill RQ
  779. * buffers. Subsequently, it is called in the interrupt context after RQ
  780. * buffer processing to replenish the buffers in the RQ
  781. */
  782. int fnic_alloc_rq_frame(struct vnic_rq *rq)
  783. {
  784. struct fnic *fnic = vnic_dev_priv(rq->vdev);
  785. struct sk_buff *skb;
  786. u16 len;
  787. dma_addr_t pa;
  788. len = FC_FRAME_HEADROOM + FC_MAX_FRAME + FC_FRAME_TAILROOM;
  789. skb = dev_alloc_skb(len);
  790. if (!skb) {
  791. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  792. "Unable to allocate RQ sk_buff\n");
  793. return -ENOMEM;
  794. }
  795. skb_reset_mac_header(skb);
  796. skb_reset_transport_header(skb);
  797. skb_reset_network_header(skb);
  798. skb_put(skb, len);
  799. pa = pci_map_single(fnic->pdev, skb->data, len, PCI_DMA_FROMDEVICE);
  800. fnic_queue_rq_desc(rq, skb, pa, len);
  801. return 0;
  802. }
  803. void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
  804. {
  805. struct fc_frame *fp = buf->os_buf;
  806. struct fnic *fnic = vnic_dev_priv(rq->vdev);
  807. pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len,
  808. PCI_DMA_FROMDEVICE);
  809. dev_kfree_skb(fp_skb(fp));
  810. buf->os_buf = NULL;
  811. }
  812. /**
  813. * fnic_eth_send() - Send Ethernet frame.
  814. * @fip: fcoe_ctlr instance.
  815. * @skb: Ethernet Frame, FIP, without VLAN encapsulation.
  816. */
  817. void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
  818. {
  819. struct fnic *fnic = fnic_from_ctlr(fip);
  820. struct vnic_wq *wq = &fnic->wq[0];
  821. dma_addr_t pa;
  822. struct ethhdr *eth_hdr;
  823. struct vlan_ethhdr *vlan_hdr;
  824. unsigned long flags;
  825. if (!fnic->vlan_hw_insert) {
  826. eth_hdr = (struct ethhdr *)skb_mac_header(skb);
  827. vlan_hdr = (struct vlan_ethhdr *)skb_push(skb,
  828. sizeof(*vlan_hdr) - sizeof(*eth_hdr));
  829. memcpy(vlan_hdr, eth_hdr, 2 * ETH_ALEN);
  830. vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
  831. vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto;
  832. vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
  833. }
  834. pa = pci_map_single(fnic->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
  835. spin_lock_irqsave(&fnic->wq_lock[0], flags);
  836. if (!vnic_wq_desc_avail(wq)) {
  837. pci_unmap_single(fnic->pdev, pa, skb->len, PCI_DMA_TODEVICE);
  838. spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
  839. kfree_skb(skb);
  840. return;
  841. }
  842. fnic_queue_wq_eth_desc(wq, skb, pa, skb->len,
  843. 0 /* hw inserts cos value */,
  844. fnic->vlan_id, 1);
  845. spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
  846. }
  847. /*
  848. * Send FC frame.
  849. */
  850. static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
  851. {
  852. struct vnic_wq *wq = &fnic->wq[0];
  853. struct sk_buff *skb;
  854. dma_addr_t pa;
  855. struct ethhdr *eth_hdr;
  856. struct vlan_ethhdr *vlan_hdr;
  857. struct fcoe_hdr *fcoe_hdr;
  858. struct fc_frame_header *fh;
  859. u32 tot_len, eth_hdr_len;
  860. int ret = 0;
  861. unsigned long flags;
  862. fh = fc_frame_header_get(fp);
  863. skb = fp_skb(fp);
  864. if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) &&
  865. fcoe_ctlr_els_send(&fnic->ctlr, fnic->lport, skb))
  866. return 0;
  867. if (!fnic->vlan_hw_insert) {
  868. eth_hdr_len = sizeof(*vlan_hdr) + sizeof(*fcoe_hdr);
  869. vlan_hdr = (struct vlan_ethhdr *)skb_push(skb, eth_hdr_len);
  870. eth_hdr = (struct ethhdr *)vlan_hdr;
  871. vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
  872. vlan_hdr->h_vlan_encapsulated_proto = htons(ETH_P_FCOE);
  873. vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
  874. fcoe_hdr = (struct fcoe_hdr *)(vlan_hdr + 1);
  875. } else {
  876. eth_hdr_len = sizeof(*eth_hdr) + sizeof(*fcoe_hdr);
  877. eth_hdr = (struct ethhdr *)skb_push(skb, eth_hdr_len);
  878. eth_hdr->h_proto = htons(ETH_P_FCOE);
  879. fcoe_hdr = (struct fcoe_hdr *)(eth_hdr + 1);
  880. }
  881. if (fnic->ctlr.map_dest)
  882. fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id);
  883. else
  884. memcpy(eth_hdr->h_dest, fnic->ctlr.dest_addr, ETH_ALEN);
  885. memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN);
  886. tot_len = skb->len;
  887. BUG_ON(tot_len % 4);
  888. memset(fcoe_hdr, 0, sizeof(*fcoe_hdr));
  889. fcoe_hdr->fcoe_sof = fr_sof(fp);
  890. if (FC_FCOE_VER)
  891. FC_FCOE_ENCAPS_VER(fcoe_hdr, FC_FCOE_VER);
  892. pa = pci_map_single(fnic->pdev, eth_hdr, tot_len, PCI_DMA_TODEVICE);
  893. spin_lock_irqsave(&fnic->wq_lock[0], flags);
  894. if (!vnic_wq_desc_avail(wq)) {
  895. pci_unmap_single(fnic->pdev, pa,
  896. tot_len, PCI_DMA_TODEVICE);
  897. ret = -1;
  898. goto fnic_send_frame_end;
  899. }
  900. fnic_queue_wq_desc(wq, skb, pa, tot_len, fr_eof(fp),
  901. 0 /* hw inserts cos value */,
  902. fnic->vlan_id, 1, 1, 1);
  903. fnic_send_frame_end:
  904. spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
  905. if (ret)
  906. dev_kfree_skb_any(fp_skb(fp));
  907. return ret;
  908. }
  909. /*
  910. * fnic_send
  911. * Routine to send a raw frame
  912. */
  913. int fnic_send(struct fc_lport *lp, struct fc_frame *fp)
  914. {
  915. struct fnic *fnic = lport_priv(lp);
  916. unsigned long flags;
  917. if (fnic->in_remove) {
  918. dev_kfree_skb(fp_skb(fp));
  919. return -1;
  920. }
  921. /*
  922. * Queue frame if in a transitional state.
  923. * This occurs while registering the Port_ID / MAC address after FLOGI.
  924. */
  925. spin_lock_irqsave(&fnic->fnic_lock, flags);
  926. if (fnic->state != FNIC_IN_FC_MODE && fnic->state != FNIC_IN_ETH_MODE) {
  927. skb_queue_tail(&fnic->tx_queue, fp_skb(fp));
  928. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  929. return 0;
  930. }
  931. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  932. return fnic_send_frame(fnic, fp);
  933. }
  934. /**
  935. * fnic_flush_tx() - send queued frames.
  936. * @fnic: fnic device
  937. *
  938. * Send frames that were waiting to go out in FC or Ethernet mode.
  939. * Whenever changing modes we purge queued frames, so these frames should
  940. * be queued for the stable mode that we're in, either FC or Ethernet.
  941. *
  942. * Called without fnic_lock held.
  943. */
  944. void fnic_flush_tx(struct fnic *fnic)
  945. {
  946. struct sk_buff *skb;
  947. struct fc_frame *fp;
  948. while ((skb = skb_dequeue(&fnic->tx_queue))) {
  949. fp = (struct fc_frame *)skb;
  950. fnic_send_frame(fnic, fp);
  951. }
  952. }
  953. /**
  954. * fnic_set_eth_mode() - put fnic into ethernet mode.
  955. * @fnic: fnic device
  956. *
  957. * Called without fnic lock held.
  958. */
  959. static void fnic_set_eth_mode(struct fnic *fnic)
  960. {
  961. unsigned long flags;
  962. enum fnic_state old_state;
  963. int ret;
  964. spin_lock_irqsave(&fnic->fnic_lock, flags);
  965. again:
  966. old_state = fnic->state;
  967. switch (old_state) {
  968. case FNIC_IN_FC_MODE:
  969. case FNIC_IN_ETH_TRANS_FC_MODE:
  970. default:
  971. fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
  972. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  973. ret = fnic_fw_reset_handler(fnic);
  974. spin_lock_irqsave(&fnic->fnic_lock, flags);
  975. if (fnic->state != FNIC_IN_FC_TRANS_ETH_MODE)
  976. goto again;
  977. if (ret)
  978. fnic->state = old_state;
  979. break;
  980. case FNIC_IN_FC_TRANS_ETH_MODE:
  981. case FNIC_IN_ETH_MODE:
  982. break;
  983. }
  984. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  985. }
  986. static void fnic_wq_complete_frame_send(struct vnic_wq *wq,
  987. struct cq_desc *cq_desc,
  988. struct vnic_wq_buf *buf, void *opaque)
  989. {
  990. struct sk_buff *skb = buf->os_buf;
  991. struct fc_frame *fp = (struct fc_frame *)skb;
  992. struct fnic *fnic = vnic_dev_priv(wq->vdev);
  993. pci_unmap_single(fnic->pdev, buf->dma_addr,
  994. buf->len, PCI_DMA_TODEVICE);
  995. dev_kfree_skb_irq(fp_skb(fp));
  996. buf->os_buf = NULL;
  997. }
  998. static int fnic_wq_cmpl_handler_cont(struct vnic_dev *vdev,
  999. struct cq_desc *cq_desc, u8 type,
  1000. u16 q_number, u16 completed_index,
  1001. void *opaque)
  1002. {
  1003. struct fnic *fnic = vnic_dev_priv(vdev);
  1004. unsigned long flags;
  1005. spin_lock_irqsave(&fnic->wq_lock[q_number], flags);
  1006. vnic_wq_service(&fnic->wq[q_number], cq_desc, completed_index,
  1007. fnic_wq_complete_frame_send, NULL);
  1008. spin_unlock_irqrestore(&fnic->wq_lock[q_number], flags);
  1009. return 0;
  1010. }
  1011. int fnic_wq_cmpl_handler(struct fnic *fnic, int work_to_do)
  1012. {
  1013. unsigned int wq_work_done = 0;
  1014. unsigned int i;
  1015. for (i = 0; i < fnic->raw_wq_count; i++) {
  1016. wq_work_done += vnic_cq_service(&fnic->cq[fnic->rq_count+i],
  1017. work_to_do,
  1018. fnic_wq_cmpl_handler_cont,
  1019. NULL);
  1020. }
  1021. return wq_work_done;
  1022. }
  1023. void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
  1024. {
  1025. struct fc_frame *fp = buf->os_buf;
  1026. struct fnic *fnic = vnic_dev_priv(wq->vdev);
  1027. pci_unmap_single(fnic->pdev, buf->dma_addr,
  1028. buf->len, PCI_DMA_TODEVICE);
  1029. dev_kfree_skb(fp_skb(fp));
  1030. buf->os_buf = NULL;
  1031. }
  1032. void fnic_fcoe_reset_vlans(struct fnic *fnic)
  1033. {
  1034. unsigned long flags;
  1035. struct fcoe_vlan *vlan;
  1036. struct fcoe_vlan *next;
  1037. /*
  1038. * indicate a link down to fcoe so that all fcf's are free'd
  1039. * might not be required since we did this before sending vlan
  1040. * discovery request
  1041. */
  1042. spin_lock_irqsave(&fnic->vlans_lock, flags);
  1043. if (!list_empty(&fnic->vlans)) {
  1044. list_for_each_entry_safe(vlan, next, &fnic->vlans, list) {
  1045. list_del(&vlan->list);
  1046. kfree(vlan);
  1047. }
  1048. }
  1049. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  1050. }
  1051. void fnic_handle_fip_timer(struct fnic *fnic)
  1052. {
  1053. unsigned long flags;
  1054. struct fcoe_vlan *vlan;
  1055. u64 sol_time;
  1056. spin_lock_irqsave(&fnic->fnic_lock, flags);
  1057. if (fnic->stop_rx_link_events) {
  1058. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  1059. return;
  1060. }
  1061. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  1062. if (fnic->ctlr.mode == FIP_ST_NON_FIP)
  1063. return;
  1064. spin_lock_irqsave(&fnic->vlans_lock, flags);
  1065. if (list_empty(&fnic->vlans)) {
  1066. /* no vlans available, try again */
  1067. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  1068. "Start VLAN Discovery\n");
  1069. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  1070. fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
  1071. return;
  1072. }
  1073. vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
  1074. shost_printk(KERN_DEBUG, fnic->lport->host,
  1075. "fip_timer: vlan %d state %d sol_count %d\n",
  1076. vlan->vid, vlan->state, vlan->sol_count);
  1077. switch (vlan->state) {
  1078. case FIP_VLAN_USED:
  1079. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  1080. "FIP VLAN is selected for FC transaction\n");
  1081. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  1082. break;
  1083. case FIP_VLAN_FAILED:
  1084. /* if all vlans are in failed state, restart vlan disc */
  1085. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  1086. "Start VLAN Discovery\n");
  1087. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  1088. fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
  1089. break;
  1090. case FIP_VLAN_SENT:
  1091. if (vlan->sol_count >= FCOE_CTLR_MAX_SOL) {
  1092. /*
  1093. * no response on this vlan, remove from the list.
  1094. * Try the next vlan
  1095. */
  1096. shost_printk(KERN_INFO, fnic->lport->host,
  1097. "Dequeue this VLAN ID %d from list\n",
  1098. vlan->vid);
  1099. list_del(&vlan->list);
  1100. kfree(vlan);
  1101. vlan = NULL;
  1102. if (list_empty(&fnic->vlans)) {
  1103. /* we exhausted all vlans, restart vlan disc */
  1104. spin_unlock_irqrestore(&fnic->vlans_lock,
  1105. flags);
  1106. shost_printk(KERN_INFO, fnic->lport->host,
  1107. "fip_timer: vlan list empty, "
  1108. "trigger vlan disc\n");
  1109. fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
  1110. return;
  1111. }
  1112. /* check the next vlan */
  1113. vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan,
  1114. list);
  1115. fnic->set_vlan(fnic, vlan->vid);
  1116. vlan->state = FIP_VLAN_SENT; /* sent now */
  1117. }
  1118. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  1119. vlan->sol_count++;
  1120. sol_time = jiffies + msecs_to_jiffies
  1121. (FCOE_CTLR_START_DELAY);
  1122. mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
  1123. break;
  1124. }
  1125. }