bnx2fc_fcoe.c 62 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494
  1. /* bnx2fc_fcoe.c: Broadcom NetXtreme II Linux FCoE offload driver.
  2. * This file contains the code that interacts with libfc, libfcoe,
  3. * cnic modules to create FCoE instances, send/receive non-offloaded
  4. * FIP/FCoE packets, listen to link events etc.
  5. *
  6. * Copyright (c) 2008 - 2010 Broadcom Corporation
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation.
  11. *
  12. * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
  13. */
  14. #include "bnx2fc.h"
  15. static struct list_head adapter_list;
  16. static u32 adapter_count;
  17. static DEFINE_MUTEX(bnx2fc_dev_lock);
  18. DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
  19. #define DRV_MODULE_NAME "bnx2fc"
  20. #define DRV_MODULE_VERSION BNX2FC_VERSION
  21. #define DRV_MODULE_RELDATE "May 27, 2011"
  22. static char version[] __devinitdata =
  23. "Broadcom NetXtreme II FCoE Driver " DRV_MODULE_NAME \
  24. " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
  25. MODULE_AUTHOR("Bhanu Prakash Gollapudi <bprakash@broadcom.com>");
  26. MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 FCoE Driver");
  27. MODULE_LICENSE("GPL");
  28. MODULE_VERSION(DRV_MODULE_VERSION);
  29. #define BNX2FC_MAX_QUEUE_DEPTH 256
  30. #define BNX2FC_MIN_QUEUE_DEPTH 32
  31. #define FCOE_WORD_TO_BYTE 4
  32. static struct scsi_transport_template *bnx2fc_transport_template;
  33. static struct scsi_transport_template *bnx2fc_vport_xport_template;
  34. struct workqueue_struct *bnx2fc_wq;
  35. /* bnx2fc structure needs only one instance of the fcoe_percpu_s structure.
  36. * Here the io threads are per cpu but the l2 thread is just one
  37. */
  38. struct fcoe_percpu_s bnx2fc_global;
  39. DEFINE_SPINLOCK(bnx2fc_global_lock);
  40. static struct cnic_ulp_ops bnx2fc_cnic_cb;
  41. static struct libfc_function_template bnx2fc_libfc_fcn_templ;
  42. static struct scsi_host_template bnx2fc_shost_template;
  43. static struct fc_function_template bnx2fc_transport_function;
  44. static struct fc_function_template bnx2fc_vport_xport_function;
  45. static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode);
  46. static int bnx2fc_destroy(struct net_device *net_device);
  47. static int bnx2fc_enable(struct net_device *netdev);
  48. static int bnx2fc_disable(struct net_device *netdev);
  49. static void bnx2fc_recv_frame(struct sk_buff *skb);
  50. static void bnx2fc_start_disc(struct bnx2fc_hba *hba);
  51. static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev);
  52. static int bnx2fc_net_config(struct fc_lport *lp);
  53. static int bnx2fc_lport_config(struct fc_lport *lport);
  54. static int bnx2fc_em_config(struct fc_lport *lport);
  55. static int bnx2fc_bind_adapter_devices(struct bnx2fc_hba *hba);
  56. static void bnx2fc_unbind_adapter_devices(struct bnx2fc_hba *hba);
  57. static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba);
  58. static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba);
  59. static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba,
  60. struct device *parent, int npiv);
  61. static void bnx2fc_destroy_work(struct work_struct *work);
  62. static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev);
  63. static struct bnx2fc_hba *bnx2fc_find_hba_for_cnic(struct cnic_dev *cnic);
  64. static int bnx2fc_fw_init(struct bnx2fc_hba *hba);
  65. static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba);
  66. static void bnx2fc_port_shutdown(struct fc_lport *lport);
  67. static void bnx2fc_stop(struct bnx2fc_hba *hba);
  68. static int __init bnx2fc_mod_init(void);
  69. static void __exit bnx2fc_mod_exit(void);
  70. unsigned int bnx2fc_debug_level;
  71. module_param_named(debug_logging, bnx2fc_debug_level, int, S_IRUGO|S_IWUSR);
  72. static int bnx2fc_cpu_callback(struct notifier_block *nfb,
  73. unsigned long action, void *hcpu);
  74. /* notification function for CPU hotplug events */
  75. static struct notifier_block bnx2fc_cpu_notifier = {
  76. .notifier_call = bnx2fc_cpu_callback,
  77. };
  78. static void bnx2fc_clean_rx_queue(struct fc_lport *lp)
  79. {
  80. struct fcoe_percpu_s *bg;
  81. struct fcoe_rcv_info *fr;
  82. struct sk_buff_head *list;
  83. struct sk_buff *skb, *next;
  84. struct sk_buff *head;
  85. bg = &bnx2fc_global;
  86. spin_lock_bh(&bg->fcoe_rx_list.lock);
  87. list = &bg->fcoe_rx_list;
  88. head = list->next;
  89. for (skb = head; skb != (struct sk_buff *)list;
  90. skb = next) {
  91. next = skb->next;
  92. fr = fcoe_dev_from_skb(skb);
  93. if (fr->fr_dev == lp) {
  94. __skb_unlink(skb, list);
  95. kfree_skb(skb);
  96. }
  97. }
  98. spin_unlock_bh(&bg->fcoe_rx_list.lock);
  99. }
  100. int bnx2fc_get_paged_crc_eof(struct sk_buff *skb, int tlen)
  101. {
  102. int rc;
  103. spin_lock(&bnx2fc_global_lock);
  104. rc = fcoe_get_paged_crc_eof(skb, tlen, &bnx2fc_global);
  105. spin_unlock(&bnx2fc_global_lock);
  106. return rc;
  107. }
  108. static void bnx2fc_abort_io(struct fc_lport *lport)
  109. {
  110. /*
  111. * This function is no-op for bnx2fc, but we do
  112. * not want to leave it as NULL either, as libfc
  113. * can call the default function which is
  114. * fc_fcp_abort_io.
  115. */
  116. }
  117. static void bnx2fc_cleanup(struct fc_lport *lport)
  118. {
  119. struct fcoe_port *port = lport_priv(lport);
  120. struct bnx2fc_hba *hba = port->priv;
  121. struct bnx2fc_rport *tgt;
  122. int i;
  123. BNX2FC_MISC_DBG("Entered %s\n", __func__);
  124. mutex_lock(&hba->hba_mutex);
  125. spin_lock_bh(&hba->hba_lock);
  126. for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) {
  127. tgt = hba->tgt_ofld_list[i];
  128. if (tgt) {
  129. /* Cleanup IOs belonging to requested vport */
  130. if (tgt->port == port) {
  131. spin_unlock_bh(&hba->hba_lock);
  132. BNX2FC_TGT_DBG(tgt, "flush/cleanup\n");
  133. bnx2fc_flush_active_ios(tgt);
  134. spin_lock_bh(&hba->hba_lock);
  135. }
  136. }
  137. }
  138. spin_unlock_bh(&hba->hba_lock);
  139. mutex_unlock(&hba->hba_mutex);
  140. }
  141. static int bnx2fc_xmit_l2_frame(struct bnx2fc_rport *tgt,
  142. struct fc_frame *fp)
  143. {
  144. struct fc_rport_priv *rdata = tgt->rdata;
  145. struct fc_frame_header *fh;
  146. int rc = 0;
  147. fh = fc_frame_header_get(fp);
  148. BNX2FC_TGT_DBG(tgt, "Xmit L2 frame rport = 0x%x, oxid = 0x%x, "
  149. "r_ctl = 0x%x\n", rdata->ids.port_id,
  150. ntohs(fh->fh_ox_id), fh->fh_r_ctl);
  151. if ((fh->fh_type == FC_TYPE_ELS) &&
  152. (fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
  153. switch (fc_frame_payload_op(fp)) {
  154. case ELS_ADISC:
  155. rc = bnx2fc_send_adisc(tgt, fp);
  156. break;
  157. case ELS_LOGO:
  158. rc = bnx2fc_send_logo(tgt, fp);
  159. break;
  160. case ELS_RLS:
  161. rc = bnx2fc_send_rls(tgt, fp);
  162. break;
  163. default:
  164. break;
  165. }
  166. } else if ((fh->fh_type == FC_TYPE_BLS) &&
  167. (fh->fh_r_ctl == FC_RCTL_BA_ABTS))
  168. BNX2FC_TGT_DBG(tgt, "ABTS frame\n");
  169. else {
  170. BNX2FC_TGT_DBG(tgt, "Send L2 frame type 0x%x "
  171. "rctl 0x%x thru non-offload path\n",
  172. fh->fh_type, fh->fh_r_ctl);
  173. return -ENODEV;
  174. }
  175. if (rc)
  176. return -ENOMEM;
  177. else
  178. return 0;
  179. }
  180. /**
  181. * bnx2fc_xmit - bnx2fc's FCoE frame transmit function
  182. *
  183. * @lport: the associated local port
  184. * @fp: the fc_frame to be transmitted
  185. */
  186. static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
  187. {
  188. struct ethhdr *eh;
  189. struct fcoe_crc_eof *cp;
  190. struct sk_buff *skb;
  191. struct fc_frame_header *fh;
  192. struct bnx2fc_hba *hba;
  193. struct fcoe_port *port;
  194. struct fcoe_hdr *hp;
  195. struct bnx2fc_rport *tgt;
  196. struct fcoe_dev_stats *stats;
  197. u8 sof, eof;
  198. u32 crc;
  199. unsigned int hlen, tlen, elen;
  200. int wlen, rc = 0;
  201. port = (struct fcoe_port *)lport_priv(lport);
  202. hba = port->priv;
  203. fh = fc_frame_header_get(fp);
  204. skb = fp_skb(fp);
  205. if (!lport->link_up) {
  206. BNX2FC_HBA_DBG(lport, "bnx2fc_xmit link down\n");
  207. kfree_skb(skb);
  208. return 0;
  209. }
  210. if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
  211. if (!hba->ctlr.sel_fcf) {
  212. BNX2FC_HBA_DBG(lport, "FCF not selected yet!\n");
  213. kfree_skb(skb);
  214. return -EINVAL;
  215. }
  216. if (fcoe_ctlr_els_send(&hba->ctlr, lport, skb))
  217. return 0;
  218. }
  219. sof = fr_sof(fp);
  220. eof = fr_eof(fp);
  221. /*
  222. * Snoop the frame header to check if the frame is for
  223. * an offloaded session
  224. */
  225. /*
  226. * tgt_ofld_list access is synchronized using
  227. * both hba mutex and hba lock. Atleast hba mutex or
  228. * hba lock needs to be held for read access.
  229. */
  230. spin_lock_bh(&hba->hba_lock);
  231. tgt = bnx2fc_tgt_lookup(port, ntoh24(fh->fh_d_id));
  232. if (tgt && (test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags))) {
  233. /* This frame is for offloaded session */
  234. BNX2FC_HBA_DBG(lport, "xmit: Frame is for offloaded session "
  235. "port_id = 0x%x\n", ntoh24(fh->fh_d_id));
  236. spin_unlock_bh(&hba->hba_lock);
  237. rc = bnx2fc_xmit_l2_frame(tgt, fp);
  238. if (rc != -ENODEV) {
  239. kfree_skb(skb);
  240. return rc;
  241. }
  242. } else {
  243. spin_unlock_bh(&hba->hba_lock);
  244. }
  245. elen = sizeof(struct ethhdr);
  246. hlen = sizeof(struct fcoe_hdr);
  247. tlen = sizeof(struct fcoe_crc_eof);
  248. wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
  249. skb->ip_summed = CHECKSUM_NONE;
  250. crc = fcoe_fc_crc(fp);
  251. /* copy port crc and eof to the skb buff */
  252. if (skb_is_nonlinear(skb)) {
  253. skb_frag_t *frag;
  254. if (bnx2fc_get_paged_crc_eof(skb, tlen)) {
  255. kfree_skb(skb);
  256. return -ENOMEM;
  257. }
  258. frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
  259. cp = kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ)
  260. + frag->page_offset;
  261. } else {
  262. cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
  263. }
  264. memset(cp, 0, sizeof(*cp));
  265. cp->fcoe_eof = eof;
  266. cp->fcoe_crc32 = cpu_to_le32(~crc);
  267. if (skb_is_nonlinear(skb)) {
  268. kunmap_atomic(cp, KM_SKB_DATA_SOFTIRQ);
  269. cp = NULL;
  270. }
  271. /* adjust skb network/transport offsets to match mac/fcoe/port */
  272. skb_push(skb, elen + hlen);
  273. skb_reset_mac_header(skb);
  274. skb_reset_network_header(skb);
  275. skb->mac_len = elen;
  276. skb->protocol = htons(ETH_P_FCOE);
  277. skb->dev = hba->netdev;
  278. /* fill up mac and fcoe headers */
  279. eh = eth_hdr(skb);
  280. eh->h_proto = htons(ETH_P_FCOE);
  281. if (hba->ctlr.map_dest)
  282. fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id);
  283. else
  284. /* insert GW address */
  285. memcpy(eh->h_dest, hba->ctlr.dest_addr, ETH_ALEN);
  286. if (unlikely(hba->ctlr.flogi_oxid != FC_XID_UNKNOWN))
  287. memcpy(eh->h_source, hba->ctlr.ctl_src_addr, ETH_ALEN);
  288. else
  289. memcpy(eh->h_source, port->data_src_addr, ETH_ALEN);
  290. hp = (struct fcoe_hdr *)(eh + 1);
  291. memset(hp, 0, sizeof(*hp));
  292. if (FC_FCOE_VER)
  293. FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER);
  294. hp->fcoe_sof = sof;
  295. /* fcoe lso, mss is in max_payload which is non-zero for FCP data */
  296. if (lport->seq_offload && fr_max_payload(fp)) {
  297. skb_shinfo(skb)->gso_type = SKB_GSO_FCOE;
  298. skb_shinfo(skb)->gso_size = fr_max_payload(fp);
  299. } else {
  300. skb_shinfo(skb)->gso_type = 0;
  301. skb_shinfo(skb)->gso_size = 0;
  302. }
  303. /*update tx stats */
  304. stats = per_cpu_ptr(lport->dev_stats, get_cpu());
  305. stats->TxFrames++;
  306. stats->TxWords += wlen;
  307. put_cpu();
  308. /* send down to lld */
  309. fr_dev(fp) = lport;
  310. if (port->fcoe_pending_queue.qlen)
  311. fcoe_check_wait_queue(lport, skb);
  312. else if (fcoe_start_io(skb))
  313. fcoe_check_wait_queue(lport, skb);
  314. return 0;
  315. }
  316. /**
  317. * bnx2fc_rcv - This is bnx2fc's receive function called by NET_RX_SOFTIRQ
  318. *
  319. * @skb: the receive socket buffer
  320. * @dev: associated net device
  321. * @ptype: context
  322. * @olddev: last device
  323. *
  324. * This function receives the packet and builds FC frame and passes it up
  325. */
  326. static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
  327. struct packet_type *ptype, struct net_device *olddev)
  328. {
  329. struct fc_lport *lport;
  330. struct bnx2fc_hba *hba;
  331. struct fc_frame_header *fh;
  332. struct fcoe_rcv_info *fr;
  333. struct fcoe_percpu_s *bg;
  334. unsigned short oxid;
  335. hba = container_of(ptype, struct bnx2fc_hba, fcoe_packet_type);
  336. lport = hba->ctlr.lp;
  337. if (unlikely(lport == NULL)) {
  338. printk(KERN_ALERT PFX "bnx2fc_rcv: lport is NULL\n");
  339. goto err;
  340. }
  341. if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) {
  342. printk(KERN_ALERT PFX "bnx2fc_rcv: Wrong FC type frame\n");
  343. goto err;
  344. }
  345. /*
  346. * Check for minimum frame length, and make sure required FCoE
  347. * and FC headers are pulled into the linear data area.
  348. */
  349. if (unlikely((skb->len < FCOE_MIN_FRAME) ||
  350. !pskb_may_pull(skb, FCOE_HEADER_LEN)))
  351. goto err;
  352. skb_set_transport_header(skb, sizeof(struct fcoe_hdr));
  353. fh = (struct fc_frame_header *) skb_transport_header(skb);
  354. oxid = ntohs(fh->fh_ox_id);
  355. fr = fcoe_dev_from_skb(skb);
  356. fr->fr_dev = lport;
  357. fr->ptype = ptype;
  358. bg = &bnx2fc_global;
  359. spin_lock_bh(&bg->fcoe_rx_list.lock);
  360. __skb_queue_tail(&bg->fcoe_rx_list, skb);
  361. if (bg->fcoe_rx_list.qlen == 1)
  362. wake_up_process(bg->thread);
  363. spin_unlock_bh(&bg->fcoe_rx_list.lock);
  364. return 0;
  365. err:
  366. kfree_skb(skb);
  367. return -1;
  368. }
  369. static int bnx2fc_l2_rcv_thread(void *arg)
  370. {
  371. struct fcoe_percpu_s *bg = arg;
  372. struct sk_buff *skb;
  373. set_user_nice(current, -20);
  374. set_current_state(TASK_INTERRUPTIBLE);
  375. while (!kthread_should_stop()) {
  376. schedule();
  377. spin_lock_bh(&bg->fcoe_rx_list.lock);
  378. while ((skb = __skb_dequeue(&bg->fcoe_rx_list)) != NULL) {
  379. spin_unlock_bh(&bg->fcoe_rx_list.lock);
  380. bnx2fc_recv_frame(skb);
  381. spin_lock_bh(&bg->fcoe_rx_list.lock);
  382. }
  383. __set_current_state(TASK_INTERRUPTIBLE);
  384. spin_unlock_bh(&bg->fcoe_rx_list.lock);
  385. }
  386. __set_current_state(TASK_RUNNING);
  387. return 0;
  388. }
  389. static void bnx2fc_recv_frame(struct sk_buff *skb)
  390. {
  391. u32 fr_len;
  392. struct fc_lport *lport;
  393. struct fcoe_rcv_info *fr;
  394. struct fcoe_dev_stats *stats;
  395. struct fc_frame_header *fh;
  396. struct fcoe_crc_eof crc_eof;
  397. struct fc_frame *fp;
  398. struct fc_lport *vn_port;
  399. struct fcoe_port *port;
  400. u8 *mac = NULL;
  401. u8 *dest_mac = NULL;
  402. struct fcoe_hdr *hp;
  403. fr = fcoe_dev_from_skb(skb);
  404. lport = fr->fr_dev;
  405. if (unlikely(lport == NULL)) {
  406. printk(KERN_ALERT PFX "Invalid lport struct\n");
  407. kfree_skb(skb);
  408. return;
  409. }
  410. if (skb_is_nonlinear(skb))
  411. skb_linearize(skb);
  412. mac = eth_hdr(skb)->h_source;
  413. dest_mac = eth_hdr(skb)->h_dest;
  414. /* Pull the header */
  415. hp = (struct fcoe_hdr *) skb_network_header(skb);
  416. fh = (struct fc_frame_header *) skb_transport_header(skb);
  417. skb_pull(skb, sizeof(struct fcoe_hdr));
  418. fr_len = skb->len - sizeof(struct fcoe_crc_eof);
  419. stats = per_cpu_ptr(lport->dev_stats, get_cpu());
  420. stats->RxFrames++;
  421. stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
  422. fp = (struct fc_frame *)skb;
  423. fc_frame_init(fp);
  424. fr_dev(fp) = lport;
  425. fr_sof(fp) = hp->fcoe_sof;
  426. if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) {
  427. put_cpu();
  428. kfree_skb(skb);
  429. return;
  430. }
  431. fr_eof(fp) = crc_eof.fcoe_eof;
  432. fr_crc(fp) = crc_eof.fcoe_crc32;
  433. if (pskb_trim(skb, fr_len)) {
  434. put_cpu();
  435. kfree_skb(skb);
  436. return;
  437. }
  438. fh = fc_frame_header_get(fp);
  439. vn_port = fc_vport_id_lookup(lport, ntoh24(fh->fh_d_id));
  440. if (vn_port) {
  441. port = lport_priv(vn_port);
  442. if (compare_ether_addr(port->data_src_addr, dest_mac)
  443. != 0) {
  444. BNX2FC_HBA_DBG(lport, "fpma mismatch\n");
  445. put_cpu();
  446. kfree_skb(skb);
  447. return;
  448. }
  449. }
  450. if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
  451. fh->fh_type == FC_TYPE_FCP) {
  452. /* Drop FCP data. We dont this in L2 path */
  453. put_cpu();
  454. kfree_skb(skb);
  455. return;
  456. }
  457. if (fh->fh_r_ctl == FC_RCTL_ELS_REQ &&
  458. fh->fh_type == FC_TYPE_ELS) {
  459. switch (fc_frame_payload_op(fp)) {
  460. case ELS_LOGO:
  461. if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI) {
  462. /* drop non-FIP LOGO */
  463. put_cpu();
  464. kfree_skb(skb);
  465. return;
  466. }
  467. break;
  468. }
  469. }
  470. if (le32_to_cpu(fr_crc(fp)) !=
  471. ~crc32(~0, skb->data, fr_len)) {
  472. if (stats->InvalidCRCCount < 5)
  473. printk(KERN_WARNING PFX "dropping frame with "
  474. "CRC error\n");
  475. stats->InvalidCRCCount++;
  476. put_cpu();
  477. kfree_skb(skb);
  478. return;
  479. }
  480. put_cpu();
  481. fc_exch_recv(lport, fp);
  482. }
  483. /**
  484. * bnx2fc_percpu_io_thread - thread per cpu for ios
  485. *
  486. * @arg: ptr to bnx2fc_percpu_info structure
  487. */
  488. int bnx2fc_percpu_io_thread(void *arg)
  489. {
  490. struct bnx2fc_percpu_s *p = arg;
  491. struct bnx2fc_work *work, *tmp;
  492. LIST_HEAD(work_list);
  493. set_user_nice(current, -20);
  494. set_current_state(TASK_INTERRUPTIBLE);
  495. while (!kthread_should_stop()) {
  496. schedule();
  497. spin_lock_bh(&p->fp_work_lock);
  498. while (!list_empty(&p->work_list)) {
  499. list_splice_init(&p->work_list, &work_list);
  500. spin_unlock_bh(&p->fp_work_lock);
  501. list_for_each_entry_safe(work, tmp, &work_list, list) {
  502. list_del_init(&work->list);
  503. bnx2fc_process_cq_compl(work->tgt, work->wqe);
  504. kfree(work);
  505. }
  506. spin_lock_bh(&p->fp_work_lock);
  507. }
  508. __set_current_state(TASK_INTERRUPTIBLE);
  509. spin_unlock_bh(&p->fp_work_lock);
  510. }
  511. __set_current_state(TASK_RUNNING);
  512. return 0;
  513. }
  514. static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost)
  515. {
  516. struct fc_host_statistics *bnx2fc_stats;
  517. struct fc_lport *lport = shost_priv(shost);
  518. struct fcoe_port *port = lport_priv(lport);
  519. struct bnx2fc_hba *hba = port->priv;
  520. struct fcoe_statistics_params *fw_stats;
  521. int rc = 0;
  522. fw_stats = (struct fcoe_statistics_params *)hba->stats_buffer;
  523. if (!fw_stats)
  524. return NULL;
  525. bnx2fc_stats = fc_get_host_stats(shost);
  526. init_completion(&hba->stat_req_done);
  527. if (bnx2fc_send_stat_req(hba))
  528. return bnx2fc_stats;
  529. rc = wait_for_completion_timeout(&hba->stat_req_done, (2 * HZ));
  530. if (!rc) {
  531. BNX2FC_HBA_DBG(lport, "FW stat req timed out\n");
  532. return bnx2fc_stats;
  533. }
  534. bnx2fc_stats->invalid_crc_count += fw_stats->rx_stat1.fc_crc_cnt;
  535. bnx2fc_stats->tx_frames += fw_stats->tx_stat.fcoe_tx_pkt_cnt;
  536. bnx2fc_stats->tx_words += (fw_stats->tx_stat.fcoe_tx_byte_cnt) / 4;
  537. bnx2fc_stats->rx_frames += fw_stats->rx_stat0.fcoe_rx_pkt_cnt;
  538. bnx2fc_stats->rx_words += (fw_stats->rx_stat0.fcoe_rx_byte_cnt) / 4;
  539. bnx2fc_stats->dumped_frames = 0;
  540. bnx2fc_stats->lip_count = 0;
  541. bnx2fc_stats->nos_count = 0;
  542. bnx2fc_stats->loss_of_sync_count = 0;
  543. bnx2fc_stats->loss_of_signal_count = 0;
  544. bnx2fc_stats->prim_seq_protocol_err_count = 0;
  545. return bnx2fc_stats;
  546. }
  547. static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev)
  548. {
  549. struct fcoe_port *port = lport_priv(lport);
  550. struct bnx2fc_hba *hba = port->priv;
  551. struct Scsi_Host *shost = lport->host;
  552. int rc = 0;
  553. shost->max_cmd_len = BNX2FC_MAX_CMD_LEN;
  554. shost->max_lun = BNX2FC_MAX_LUN;
  555. shost->max_id = BNX2FC_MAX_FCP_TGT;
  556. shost->max_channel = 0;
  557. if (lport->vport)
  558. shost->transportt = bnx2fc_vport_xport_template;
  559. else
  560. shost->transportt = bnx2fc_transport_template;
  561. /* Add the new host to SCSI-ml */
  562. rc = scsi_add_host(lport->host, dev);
  563. if (rc) {
  564. printk(KERN_ERR PFX "Error on scsi_add_host\n");
  565. return rc;
  566. }
  567. if (!lport->vport)
  568. fc_host_max_npiv_vports(lport->host) = USHRT_MAX;
  569. sprintf(fc_host_symbolic_name(lport->host), "%s v%s over %s",
  570. BNX2FC_NAME, BNX2FC_VERSION,
  571. hba->netdev->name);
  572. return 0;
  573. }
  574. static void bnx2fc_link_speed_update(struct fc_lport *lport)
  575. {
  576. struct fcoe_port *port = lport_priv(lport);
  577. struct bnx2fc_hba *hba = port->priv;
  578. struct net_device *netdev = hba->netdev;
  579. struct ethtool_cmd ecmd;
  580. if (!dev_ethtool_get_settings(netdev, &ecmd)) {
  581. lport->link_supported_speeds &=
  582. ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
  583. if (ecmd.supported & (SUPPORTED_1000baseT_Half |
  584. SUPPORTED_1000baseT_Full))
  585. lport->link_supported_speeds |= FC_PORTSPEED_1GBIT;
  586. if (ecmd.supported & SUPPORTED_10000baseT_Full)
  587. lport->link_supported_speeds |= FC_PORTSPEED_10GBIT;
  588. switch (ethtool_cmd_speed(&ecmd)) {
  589. case SPEED_1000:
  590. lport->link_speed = FC_PORTSPEED_1GBIT;
  591. break;
  592. case SPEED_2500:
  593. lport->link_speed = FC_PORTSPEED_2GBIT;
  594. break;
  595. case SPEED_10000:
  596. lport->link_speed = FC_PORTSPEED_10GBIT;
  597. break;
  598. }
  599. }
  600. }
  601. static int bnx2fc_link_ok(struct fc_lport *lport)
  602. {
  603. struct fcoe_port *port = lport_priv(lport);
  604. struct bnx2fc_hba *hba = port->priv;
  605. struct net_device *dev = hba->phys_dev;
  606. int rc = 0;
  607. if ((dev->flags & IFF_UP) && netif_carrier_ok(dev))
  608. clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
  609. else {
  610. set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
  611. rc = -1;
  612. }
  613. return rc;
  614. }
  615. /**
  616. * bnx2fc_get_link_state - get network link state
  617. *
  618. * @hba: adapter instance pointer
  619. *
  620. * updates adapter structure flag based on netdev state
  621. */
  622. void bnx2fc_get_link_state(struct bnx2fc_hba *hba)
  623. {
  624. if (test_bit(__LINK_STATE_NOCARRIER, &hba->netdev->state))
  625. set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
  626. else
  627. clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
  628. }
  629. static int bnx2fc_net_config(struct fc_lport *lport)
  630. {
  631. struct bnx2fc_hba *hba;
  632. struct fcoe_port *port;
  633. u64 wwnn, wwpn;
  634. port = lport_priv(lport);
  635. hba = port->priv;
  636. /* require support for get_pauseparam ethtool op. */
  637. if (!hba->phys_dev->ethtool_ops ||
  638. !hba->phys_dev->ethtool_ops->get_pauseparam)
  639. return -EOPNOTSUPP;
  640. if (fc_set_mfs(lport, BNX2FC_MFS))
  641. return -EINVAL;
  642. skb_queue_head_init(&port->fcoe_pending_queue);
  643. port->fcoe_pending_queue_active = 0;
  644. setup_timer(&port->timer, fcoe_queue_timer, (unsigned long) lport);
  645. bnx2fc_link_speed_update(lport);
  646. if (!lport->vport) {
  647. wwnn = fcoe_wwn_from_mac(hba->ctlr.ctl_src_addr, 1, 0);
  648. BNX2FC_HBA_DBG(lport, "WWNN = 0x%llx\n", wwnn);
  649. fc_set_wwnn(lport, wwnn);
  650. wwpn = fcoe_wwn_from_mac(hba->ctlr.ctl_src_addr, 2, 0);
  651. BNX2FC_HBA_DBG(lport, "WWPN = 0x%llx\n", wwpn);
  652. fc_set_wwpn(lport, wwpn);
  653. }
  654. return 0;
  655. }
  656. static void bnx2fc_destroy_timer(unsigned long data)
  657. {
  658. struct bnx2fc_hba *hba = (struct bnx2fc_hba *)data;
  659. BNX2FC_HBA_DBG(hba->ctlr.lp, "ERROR:bnx2fc_destroy_timer - "
  660. "Destroy compl not received!!\n");
  661. hba->flags |= BNX2FC_FLAG_DESTROY_CMPL;
  662. wake_up_interruptible(&hba->destroy_wait);
  663. }
  664. /**
  665. * bnx2fc_indicate_netevent - Generic netdev event handler
  666. *
  667. * @context: adapter structure pointer
  668. * @event: event type
  669. *
  670. * Handles NETDEV_UP, NETDEV_DOWN, NETDEV_GOING_DOWN,NETDEV_CHANGE and
  671. * NETDEV_CHANGE_MTU events
  672. */
  673. static void bnx2fc_indicate_netevent(void *context, unsigned long event)
  674. {
  675. struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context;
  676. struct fc_lport *lport = hba->ctlr.lp;
  677. struct fc_lport *vport;
  678. u32 link_possible = 1;
  679. if (!test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
  680. BNX2FC_MISC_DBG("driver not ready. event=%s %ld\n",
  681. hba->netdev->name, event);
  682. return;
  683. }
  684. /*
  685. * ASSUMPTION:
  686. * indicate_netevent cannot be called from cnic unless bnx2fc
  687. * does register_device
  688. */
  689. BUG_ON(!lport);
  690. BNX2FC_HBA_DBG(lport, "enter netevent handler - event=%s %ld\n",
  691. hba->netdev->name, event);
  692. switch (event) {
  693. case NETDEV_UP:
  694. BNX2FC_HBA_DBG(lport, "Port up, adapter_state = %ld\n",
  695. hba->adapter_state);
  696. if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state))
  697. printk(KERN_ERR "indicate_netevent: "\
  698. "adapter is not UP!!\n");
  699. break;
  700. case NETDEV_DOWN:
  701. BNX2FC_HBA_DBG(lport, "Port down\n");
  702. clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
  703. clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
  704. link_possible = 0;
  705. break;
  706. case NETDEV_GOING_DOWN:
  707. BNX2FC_HBA_DBG(lport, "Port going down\n");
  708. set_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
  709. link_possible = 0;
  710. break;
  711. case NETDEV_CHANGE:
  712. BNX2FC_HBA_DBG(lport, "NETDEV_CHANGE\n");
  713. break;
  714. default:
  715. printk(KERN_ERR PFX "Unkonwn netevent %ld", event);
  716. return;
  717. }
  718. bnx2fc_link_speed_update(lport);
  719. if (link_possible && !bnx2fc_link_ok(lport)) {
  720. printk(KERN_ERR "indicate_netevent: call ctlr_link_up\n");
  721. fcoe_ctlr_link_up(&hba->ctlr);
  722. } else {
  723. printk(KERN_ERR "indicate_netevent: call ctlr_link_down\n");
  724. if (fcoe_ctlr_link_down(&hba->ctlr)) {
  725. clear_bit(ADAPTER_STATE_READY, &hba->adapter_state);
  726. mutex_lock(&lport->lp_mutex);
  727. list_for_each_entry(vport, &lport->vports, list)
  728. fc_host_port_type(vport->host) =
  729. FC_PORTTYPE_UNKNOWN;
  730. mutex_unlock(&lport->lp_mutex);
  731. fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
  732. per_cpu_ptr(lport->dev_stats,
  733. get_cpu())->LinkFailureCount++;
  734. put_cpu();
  735. fcoe_clean_pending_queue(lport);
  736. init_waitqueue_head(&hba->shutdown_wait);
  737. BNX2FC_HBA_DBG(lport, "indicate_netevent "
  738. "num_ofld_sess = %d\n",
  739. hba->num_ofld_sess);
  740. hba->wait_for_link_down = 1;
  741. BNX2FC_HBA_DBG(lport, "waiting for uploads to "
  742. "compl proc = %s\n",
  743. current->comm);
  744. wait_event_interruptible(hba->shutdown_wait,
  745. (hba->num_ofld_sess == 0));
  746. BNX2FC_HBA_DBG(lport, "wakeup - num_ofld_sess = %d\n",
  747. hba->num_ofld_sess);
  748. hba->wait_for_link_down = 0;
  749. if (signal_pending(current))
  750. flush_signals(current);
  751. }
  752. }
  753. }
  754. static int bnx2fc_libfc_config(struct fc_lport *lport)
  755. {
  756. /* Set the function pointers set by bnx2fc driver */
  757. memcpy(&lport->tt, &bnx2fc_libfc_fcn_templ,
  758. sizeof(struct libfc_function_template));
  759. fc_elsct_init(lport);
  760. fc_exch_init(lport);
  761. fc_rport_init(lport);
  762. fc_disc_init(lport);
  763. return 0;
  764. }
  765. static int bnx2fc_em_config(struct fc_lport *lport)
  766. {
  767. struct fcoe_port *port = lport_priv(lport);
  768. struct bnx2fc_hba *hba = port->priv;
  769. if (!fc_exch_mgr_alloc(lport, FC_CLASS_3, FCOE_MIN_XID,
  770. FCOE_MAX_XID, NULL)) {
  771. printk(KERN_ERR PFX "em_config:fc_exch_mgr_alloc failed\n");
  772. return -ENOMEM;
  773. }
  774. hba->cmd_mgr = bnx2fc_cmd_mgr_alloc(hba, BNX2FC_MIN_XID,
  775. BNX2FC_MAX_XID);
  776. if (!hba->cmd_mgr) {
  777. printk(KERN_ERR PFX "em_config:bnx2fc_cmd_mgr_alloc failed\n");
  778. fc_exch_mgr_free(lport);
  779. return -ENOMEM;
  780. }
  781. return 0;
  782. }
  783. static int bnx2fc_lport_config(struct fc_lport *lport)
  784. {
  785. lport->link_up = 0;
  786. lport->qfull = 0;
  787. lport->max_retry_count = 3;
  788. lport->max_rport_retry_count = 3;
  789. lport->e_d_tov = 2 * 1000;
  790. lport->r_a_tov = 10 * 1000;
  791. /* REVISIT: enable when supporting tape devices
  792. lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
  793. FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
  794. */
  795. lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS);
  796. lport->does_npiv = 1;
  797. memset(&lport->rnid_gen, 0, sizeof(struct fc_els_rnid_gen));
  798. lport->rnid_gen.rnid_atype = BNX2FC_RNID_HBA;
  799. /* alloc stats structure */
  800. if (fc_lport_init_stats(lport))
  801. return -ENOMEM;
  802. /* Finish fc_lport configuration */
  803. fc_lport_config(lport);
  804. return 0;
  805. }
  806. /**
  807. * bnx2fc_fip_recv - handle a received FIP frame.
  808. *
  809. * @skb: the received skb
  810. * @dev: associated &net_device
  811. * @ptype: the &packet_type structure which was used to register this handler.
  812. * @orig_dev: original receive &net_device, in case @ dev is a bond.
  813. *
  814. * Returns: 0 for success
  815. */
  816. static int bnx2fc_fip_recv(struct sk_buff *skb, struct net_device *dev,
  817. struct packet_type *ptype,
  818. struct net_device *orig_dev)
  819. {
  820. struct bnx2fc_hba *hba;
  821. hba = container_of(ptype, struct bnx2fc_hba, fip_packet_type);
  822. fcoe_ctlr_recv(&hba->ctlr, skb);
  823. return 0;
  824. }
  825. /**
  826. * bnx2fc_update_src_mac - Update Ethernet MAC filters.
  827. *
  828. * @fip: FCoE controller.
  829. * @old: Unicast MAC address to delete if the MAC is non-zero.
  830. * @new: Unicast MAC address to add.
  831. *
  832. * Remove any previously-set unicast MAC filter.
  833. * Add secondary FCoE MAC address filter for our OUI.
  834. */
  835. static void bnx2fc_update_src_mac(struct fc_lport *lport, u8 *addr)
  836. {
  837. struct fcoe_port *port = lport_priv(lport);
  838. memcpy(port->data_src_addr, addr, ETH_ALEN);
  839. }
  840. /**
  841. * bnx2fc_get_src_mac - return the ethernet source address for an lport
  842. *
  843. * @lport: libfc port
  844. */
  845. static u8 *bnx2fc_get_src_mac(struct fc_lport *lport)
  846. {
  847. struct fcoe_port *port;
  848. port = (struct fcoe_port *)lport_priv(lport);
  849. return port->data_src_addr;
  850. }
  851. /**
  852. * bnx2fc_fip_send - send an Ethernet-encapsulated FIP frame.
  853. *
  854. * @fip: FCoE controller.
  855. * @skb: FIP Packet.
  856. */
  857. static void bnx2fc_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
  858. {
  859. skb->dev = bnx2fc_from_ctlr(fip)->netdev;
  860. dev_queue_xmit(skb);
  861. }
  862. static int bnx2fc_vport_create(struct fc_vport *vport, bool disabled)
  863. {
  864. struct Scsi_Host *shost = vport_to_shost(vport);
  865. struct fc_lport *n_port = shost_priv(shost);
  866. struct fcoe_port *port = lport_priv(n_port);
  867. struct bnx2fc_hba *hba = port->priv;
  868. struct net_device *netdev = hba->netdev;
  869. struct fc_lport *vn_port;
  870. if (!test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) {
  871. printk(KERN_ERR PFX "vn ports cannot be created on"
  872. "this hba\n");
  873. return -EIO;
  874. }
  875. mutex_lock(&bnx2fc_dev_lock);
  876. vn_port = bnx2fc_if_create(hba, &vport->dev, 1);
  877. mutex_unlock(&bnx2fc_dev_lock);
  878. if (IS_ERR(vn_port)) {
  879. printk(KERN_ERR PFX "bnx2fc_vport_create (%s) failed\n",
  880. netdev->name);
  881. return -EIO;
  882. }
  883. if (disabled) {
  884. fc_vport_set_state(vport, FC_VPORT_DISABLED);
  885. } else {
  886. vn_port->boot_time = jiffies;
  887. fc_lport_init(vn_port);
  888. fc_fabric_login(vn_port);
  889. fc_vport_setlink(vn_port);
  890. }
  891. return 0;
  892. }
  893. static int bnx2fc_vport_destroy(struct fc_vport *vport)
  894. {
  895. struct Scsi_Host *shost = vport_to_shost(vport);
  896. struct fc_lport *n_port = shost_priv(shost);
  897. struct fc_lport *vn_port = vport->dd_data;
  898. struct fcoe_port *port = lport_priv(vn_port);
  899. mutex_lock(&n_port->lp_mutex);
  900. list_del(&vn_port->list);
  901. mutex_unlock(&n_port->lp_mutex);
  902. queue_work(bnx2fc_wq, &port->destroy_work);
  903. return 0;
  904. }
  905. static int bnx2fc_vport_disable(struct fc_vport *vport, bool disable)
  906. {
  907. struct fc_lport *lport = vport->dd_data;
  908. if (disable) {
  909. fc_vport_set_state(vport, FC_VPORT_DISABLED);
  910. fc_fabric_logoff(lport);
  911. } else {
  912. lport->boot_time = jiffies;
  913. fc_fabric_login(lport);
  914. fc_vport_setlink(lport);
  915. }
  916. return 0;
  917. }
  918. static int bnx2fc_netdev_setup(struct bnx2fc_hba *hba)
  919. {
  920. struct net_device *netdev = hba->netdev;
  921. struct net_device *physdev = hba->phys_dev;
  922. struct netdev_hw_addr *ha;
  923. int sel_san_mac = 0;
  924. /* setup Source MAC Address */
  925. rcu_read_lock();
  926. for_each_dev_addr(physdev, ha) {
  927. BNX2FC_MISC_DBG("net_config: ha->type = %d, fip_mac = ",
  928. ha->type);
  929. printk(KERN_INFO "%2x:%2x:%2x:%2x:%2x:%2x\n", ha->addr[0],
  930. ha->addr[1], ha->addr[2], ha->addr[3],
  931. ha->addr[4], ha->addr[5]);
  932. if ((ha->type == NETDEV_HW_ADDR_T_SAN) &&
  933. (is_valid_ether_addr(ha->addr))) {
  934. memcpy(hba->ctlr.ctl_src_addr, ha->addr, ETH_ALEN);
  935. sel_san_mac = 1;
  936. BNX2FC_MISC_DBG("Found SAN MAC\n");
  937. }
  938. }
  939. rcu_read_unlock();
  940. if (!sel_san_mac)
  941. return -ENODEV;
  942. hba->fip_packet_type.func = bnx2fc_fip_recv;
  943. hba->fip_packet_type.type = htons(ETH_P_FIP);
  944. hba->fip_packet_type.dev = netdev;
  945. dev_add_pack(&hba->fip_packet_type);
  946. hba->fcoe_packet_type.func = bnx2fc_rcv;
  947. hba->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE);
  948. hba->fcoe_packet_type.dev = netdev;
  949. dev_add_pack(&hba->fcoe_packet_type);
  950. return 0;
  951. }
  952. static int bnx2fc_attach_transport(void)
  953. {
  954. bnx2fc_transport_template =
  955. fc_attach_transport(&bnx2fc_transport_function);
  956. if (bnx2fc_transport_template == NULL) {
  957. printk(KERN_ERR PFX "Failed to attach FC transport\n");
  958. return -ENODEV;
  959. }
  960. bnx2fc_vport_xport_template =
  961. fc_attach_transport(&bnx2fc_vport_xport_function);
  962. if (bnx2fc_vport_xport_template == NULL) {
  963. printk(KERN_ERR PFX
  964. "Failed to attach FC transport for vport\n");
  965. fc_release_transport(bnx2fc_transport_template);
  966. bnx2fc_transport_template = NULL;
  967. return -ENODEV;
  968. }
  969. return 0;
  970. }
  971. static void bnx2fc_release_transport(void)
  972. {
  973. fc_release_transport(bnx2fc_transport_template);
  974. fc_release_transport(bnx2fc_vport_xport_template);
  975. bnx2fc_transport_template = NULL;
  976. bnx2fc_vport_xport_template = NULL;
  977. }
  978. static void bnx2fc_interface_release(struct kref *kref)
  979. {
  980. struct bnx2fc_hba *hba;
  981. struct net_device *netdev;
  982. struct net_device *phys_dev;
  983. hba = container_of(kref, struct bnx2fc_hba, kref);
  984. BNX2FC_MISC_DBG("Interface is being released\n");
  985. netdev = hba->netdev;
  986. phys_dev = hba->phys_dev;
  987. /* tear-down FIP controller */
  988. if (test_and_clear_bit(BNX2FC_CTLR_INIT_DONE, &hba->init_done))
  989. fcoe_ctlr_destroy(&hba->ctlr);
  990. /* Free the command manager */
  991. if (hba->cmd_mgr) {
  992. bnx2fc_cmd_mgr_free(hba->cmd_mgr);
  993. hba->cmd_mgr = NULL;
  994. }
  995. dev_put(netdev);
  996. module_put(THIS_MODULE);
  997. }
  998. static inline void bnx2fc_interface_get(struct bnx2fc_hba *hba)
  999. {
  1000. kref_get(&hba->kref);
  1001. }
  1002. static inline void bnx2fc_interface_put(struct bnx2fc_hba *hba)
  1003. {
  1004. kref_put(&hba->kref, bnx2fc_interface_release);
  1005. }
  1006. static void bnx2fc_interface_destroy(struct bnx2fc_hba *hba)
  1007. {
  1008. bnx2fc_unbind_pcidev(hba);
  1009. kfree(hba);
  1010. }
  1011. /**
  1012. * bnx2fc_interface_create - create a new fcoe instance
  1013. *
  1014. * @cnic: pointer to cnic device
  1015. *
  1016. * Creates a new FCoE instance on the given device which include allocating
  1017. * hba structure, scsi_host and lport structures.
  1018. */
  1019. static struct bnx2fc_hba *bnx2fc_interface_create(struct cnic_dev *cnic)
  1020. {
  1021. struct bnx2fc_hba *hba;
  1022. int rc;
  1023. hba = kzalloc(sizeof(*hba), GFP_KERNEL);
  1024. if (!hba) {
  1025. printk(KERN_ERR PFX "Unable to allocate hba structure\n");
  1026. return NULL;
  1027. }
  1028. spin_lock_init(&hba->hba_lock);
  1029. mutex_init(&hba->hba_mutex);
  1030. hba->cnic = cnic;
  1031. rc = bnx2fc_bind_pcidev(hba);
  1032. if (rc)
  1033. goto bind_err;
  1034. hba->phys_dev = cnic->netdev;
  1035. /* will get overwritten after we do vlan discovery */
  1036. hba->netdev = hba->phys_dev;
  1037. init_waitqueue_head(&hba->shutdown_wait);
  1038. init_waitqueue_head(&hba->destroy_wait);
  1039. return hba;
  1040. bind_err:
  1041. printk(KERN_ERR PFX "create_interface: bind error\n");
  1042. kfree(hba);
  1043. return NULL;
  1044. }
  1045. static int bnx2fc_interface_setup(struct bnx2fc_hba *hba,
  1046. enum fip_state fip_mode)
  1047. {
  1048. int rc = 0;
  1049. struct net_device *netdev = hba->netdev;
  1050. struct fcoe_ctlr *fip = &hba->ctlr;
  1051. dev_hold(netdev);
  1052. kref_init(&hba->kref);
  1053. hba->flags = 0;
  1054. /* Initialize FIP */
  1055. memset(fip, 0, sizeof(*fip));
  1056. fcoe_ctlr_init(fip, fip_mode);
  1057. hba->ctlr.send = bnx2fc_fip_send;
  1058. hba->ctlr.update_mac = bnx2fc_update_src_mac;
  1059. hba->ctlr.get_src_addr = bnx2fc_get_src_mac;
  1060. set_bit(BNX2FC_CTLR_INIT_DONE, &hba->init_done);
  1061. INIT_LIST_HEAD(&hba->vports);
  1062. rc = bnx2fc_netdev_setup(hba);
  1063. if (rc)
  1064. goto setup_err;
  1065. hba->next_conn_id = 0;
  1066. memset(hba->tgt_ofld_list, 0, sizeof(hba->tgt_ofld_list));
  1067. hba->num_ofld_sess = 0;
  1068. return 0;
  1069. setup_err:
  1070. fcoe_ctlr_destroy(&hba->ctlr);
  1071. dev_put(netdev);
  1072. bnx2fc_interface_put(hba);
  1073. return rc;
  1074. }
  1075. /**
  1076. * bnx2fc_if_create - Create FCoE instance on a given interface
  1077. *
  1078. * @hba: FCoE interface to create a local port on
  1079. * @parent: Device pointer to be the parent in sysfs for the SCSI host
  1080. * @npiv: Indicates if the port is vport or not
  1081. *
  1082. * Creates a fc_lport instance and a Scsi_Host instance and configure them.
  1083. *
  1084. * Returns: Allocated fc_lport or an error pointer
  1085. */
  1086. static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba,
  1087. struct device *parent, int npiv)
  1088. {
  1089. struct fc_lport *lport, *n_port;
  1090. struct fcoe_port *port;
  1091. struct Scsi_Host *shost;
  1092. struct fc_vport *vport = dev_to_vport(parent);
  1093. struct bnx2fc_lport *blport;
  1094. int rc = 0;
  1095. blport = kzalloc(sizeof(struct bnx2fc_lport), GFP_KERNEL);
  1096. if (!blport) {
  1097. BNX2FC_HBA_DBG(hba->ctlr.lp, "Unable to alloc bnx2fc_lport\n");
  1098. return NULL;
  1099. }
  1100. /* Allocate Scsi_Host structure */
  1101. if (!npiv)
  1102. lport = libfc_host_alloc(&bnx2fc_shost_template, sizeof(*port));
  1103. else
  1104. lport = libfc_vport_create(vport, sizeof(*port));
  1105. if (!lport) {
  1106. printk(KERN_ERR PFX "could not allocate scsi host structure\n");
  1107. goto free_blport;
  1108. }
  1109. shost = lport->host;
  1110. port = lport_priv(lport);
  1111. port->lport = lport;
  1112. port->priv = hba;
  1113. INIT_WORK(&port->destroy_work, bnx2fc_destroy_work);
  1114. /* Configure fcoe_port */
  1115. rc = bnx2fc_lport_config(lport);
  1116. if (rc)
  1117. goto lp_config_err;
  1118. if (npiv) {
  1119. printk(KERN_ERR PFX "Setting vport names, 0x%llX 0x%llX\n",
  1120. vport->node_name, vport->port_name);
  1121. fc_set_wwnn(lport, vport->node_name);
  1122. fc_set_wwpn(lport, vport->port_name);
  1123. }
  1124. /* Configure netdev and networking properties of the lport */
  1125. rc = bnx2fc_net_config(lport);
  1126. if (rc) {
  1127. printk(KERN_ERR PFX "Error on bnx2fc_net_config\n");
  1128. goto lp_config_err;
  1129. }
  1130. rc = bnx2fc_shost_config(lport, parent);
  1131. if (rc) {
  1132. printk(KERN_ERR PFX "Couldnt configure shost for %s\n",
  1133. hba->netdev->name);
  1134. goto lp_config_err;
  1135. }
  1136. /* Initialize the libfc library */
  1137. rc = bnx2fc_libfc_config(lport);
  1138. if (rc) {
  1139. printk(KERN_ERR PFX "Couldnt configure libfc\n");
  1140. goto shost_err;
  1141. }
  1142. fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
  1143. /* Allocate exchange manager */
  1144. if (!npiv)
  1145. rc = bnx2fc_em_config(lport);
  1146. else {
  1147. shost = vport_to_shost(vport);
  1148. n_port = shost_priv(shost);
  1149. rc = fc_exch_mgr_list_clone(n_port, lport);
  1150. }
  1151. if (rc) {
  1152. printk(KERN_ERR PFX "Error on bnx2fc_em_config\n");
  1153. goto shost_err;
  1154. }
  1155. bnx2fc_interface_get(hba);
  1156. spin_lock_bh(&hba->hba_lock);
  1157. blport->lport = lport;
  1158. list_add_tail(&blport->list, &hba->vports);
  1159. spin_unlock_bh(&hba->hba_lock);
  1160. return lport;
  1161. shost_err:
  1162. scsi_remove_host(shost);
  1163. lp_config_err:
  1164. scsi_host_put(lport->host);
  1165. free_blport:
  1166. kfree(blport);
  1167. return NULL;
  1168. }
  1169. static void bnx2fc_netdev_cleanup(struct bnx2fc_hba *hba)
  1170. {
  1171. /* Dont listen for Ethernet packets anymore */
  1172. __dev_remove_pack(&hba->fcoe_packet_type);
  1173. __dev_remove_pack(&hba->fip_packet_type);
  1174. synchronize_net();
  1175. }
  1176. static void bnx2fc_if_destroy(struct fc_lport *lport)
  1177. {
  1178. struct fcoe_port *port = lport_priv(lport);
  1179. struct bnx2fc_hba *hba = port->priv;
  1180. struct bnx2fc_lport *blport, *tmp;
  1181. BNX2FC_HBA_DBG(hba->ctlr.lp, "ENTERED bnx2fc_if_destroy\n");
  1182. /* Stop the transmit retry timer */
  1183. del_timer_sync(&port->timer);
  1184. /* Free existing transmit skbs */
  1185. fcoe_clean_pending_queue(lport);
  1186. /* Free queued packets for the receive thread */
  1187. bnx2fc_clean_rx_queue(lport);
  1188. /* Detach from scsi-ml */
  1189. fc_remove_host(lport->host);
  1190. scsi_remove_host(lport->host);
  1191. /*
  1192. * Note that only the physical lport will have the exchange manager.
  1193. * for vports, this function is NOP
  1194. */
  1195. fc_exch_mgr_free(lport);
  1196. /* Free memory used by statistical counters */
  1197. fc_lport_free_stats(lport);
  1198. spin_lock_bh(&hba->hba_lock);
  1199. list_for_each_entry_safe(blport, tmp, &hba->vports, list) {
  1200. if (blport->lport == lport) {
  1201. list_del(&blport->list);
  1202. kfree(blport);
  1203. }
  1204. }
  1205. spin_unlock_bh(&hba->hba_lock);
  1206. /* Release Scsi_Host */
  1207. scsi_host_put(lport->host);
  1208. bnx2fc_interface_put(hba);
  1209. }
  1210. /**
  1211. * bnx2fc_destroy - Destroy a bnx2fc FCoE interface
  1212. *
  1213. * @buffer: The name of the Ethernet interface to be destroyed
  1214. * @kp: The associated kernel parameter
  1215. *
  1216. * Called from sysfs.
  1217. *
  1218. * Returns: 0 for success
  1219. */
  1220. static int bnx2fc_destroy(struct net_device *netdev)
  1221. {
  1222. struct bnx2fc_hba *hba = NULL;
  1223. struct net_device *phys_dev;
  1224. int rc = 0;
  1225. rtnl_lock();
  1226. mutex_lock(&bnx2fc_dev_lock);
  1227. /* obtain physical netdev */
  1228. if (netdev->priv_flags & IFF_802_1Q_VLAN)
  1229. phys_dev = vlan_dev_real_dev(netdev);
  1230. else {
  1231. printk(KERN_ERR PFX "Not a vlan device\n");
  1232. rc = -ENODEV;
  1233. goto netdev_err;
  1234. }
  1235. hba = bnx2fc_hba_lookup(phys_dev);
  1236. if (!hba || !hba->ctlr.lp) {
  1237. rc = -ENODEV;
  1238. printk(KERN_ERR PFX "bnx2fc_destroy: hba or lport not found\n");
  1239. goto netdev_err;
  1240. }
  1241. if (!test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
  1242. printk(KERN_ERR PFX "bnx2fc_destroy: Create not called\n");
  1243. goto netdev_err;
  1244. }
  1245. bnx2fc_netdev_cleanup(hba);
  1246. bnx2fc_stop(hba);
  1247. bnx2fc_if_destroy(hba->ctlr.lp);
  1248. destroy_workqueue(hba->timer_work_queue);
  1249. if (test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done))
  1250. bnx2fc_fw_destroy(hba);
  1251. clear_bit(BNX2FC_CREATE_DONE, &hba->init_done);
  1252. netdev_err:
  1253. mutex_unlock(&bnx2fc_dev_lock);
  1254. rtnl_unlock();
  1255. return rc;
  1256. }
  1257. static void bnx2fc_destroy_work(struct work_struct *work)
  1258. {
  1259. struct fcoe_port *port;
  1260. struct fc_lport *lport;
  1261. port = container_of(work, struct fcoe_port, destroy_work);
  1262. lport = port->lport;
  1263. BNX2FC_HBA_DBG(lport, "Entered bnx2fc_destroy_work\n");
  1264. bnx2fc_port_shutdown(lport);
  1265. rtnl_lock();
  1266. mutex_lock(&bnx2fc_dev_lock);
  1267. bnx2fc_if_destroy(lport);
  1268. mutex_unlock(&bnx2fc_dev_lock);
  1269. rtnl_unlock();
  1270. }
  1271. static void bnx2fc_unbind_adapter_devices(struct bnx2fc_hba *hba)
  1272. {
  1273. bnx2fc_free_fw_resc(hba);
  1274. bnx2fc_free_task_ctx(hba);
  1275. }
  1276. /**
  1277. * bnx2fc_bind_adapter_devices - binds bnx2fc adapter with the associated
  1278. * pci structure
  1279. *
  1280. * @hba: Adapter instance
  1281. */
  1282. static int bnx2fc_bind_adapter_devices(struct bnx2fc_hba *hba)
  1283. {
  1284. if (bnx2fc_setup_task_ctx(hba))
  1285. goto mem_err;
  1286. if (bnx2fc_setup_fw_resc(hba))
  1287. goto mem_err;
  1288. return 0;
  1289. mem_err:
  1290. bnx2fc_unbind_adapter_devices(hba);
  1291. return -ENOMEM;
  1292. }
  1293. static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba)
  1294. {
  1295. struct cnic_dev *cnic;
  1296. if (!hba->cnic) {
  1297. printk(KERN_ERR PFX "cnic is NULL\n");
  1298. return -ENODEV;
  1299. }
  1300. cnic = hba->cnic;
  1301. hba->pcidev = cnic->pcidev;
  1302. if (hba->pcidev)
  1303. pci_dev_get(hba->pcidev);
  1304. return 0;
  1305. }
  1306. static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba)
  1307. {
  1308. if (hba->pcidev)
  1309. pci_dev_put(hba->pcidev);
  1310. hba->pcidev = NULL;
  1311. }
  1312. /**
  1313. * bnx2fc_ulp_start - cnic callback to initialize & start adapter instance
  1314. *
  1315. * @handle: transport handle pointing to adapter struture
  1316. *
  1317. * This function maps adapter structure to pcidev structure and initiates
  1318. * firmware handshake to enable/initialize on-chip FCoE components.
  1319. * This bnx2fc - cnic interface api callback is used after following
  1320. * conditions are met -
  1321. * a) underlying network interface is up (marked by event NETDEV_UP
  1322. * from netdev
  1323. * b) bnx2fc adatper structure is registered.
  1324. */
  1325. static void bnx2fc_ulp_start(void *handle)
  1326. {
  1327. struct bnx2fc_hba *hba = handle;
  1328. struct fc_lport *lport = hba->ctlr.lp;
  1329. BNX2FC_MISC_DBG("Entered %s\n", __func__);
  1330. mutex_lock(&bnx2fc_dev_lock);
  1331. if (test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done))
  1332. goto start_disc;
  1333. if (test_bit(BNX2FC_CREATE_DONE, &hba->init_done))
  1334. bnx2fc_fw_init(hba);
  1335. start_disc:
  1336. mutex_unlock(&bnx2fc_dev_lock);
  1337. BNX2FC_MISC_DBG("bnx2fc started.\n");
  1338. /* Kick off Fabric discovery*/
  1339. if (test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
  1340. printk(KERN_ERR PFX "ulp_init: start discovery\n");
  1341. lport->tt.frame_send = bnx2fc_xmit;
  1342. bnx2fc_start_disc(hba);
  1343. }
  1344. }
  1345. static void bnx2fc_port_shutdown(struct fc_lport *lport)
  1346. {
  1347. BNX2FC_MISC_DBG("Entered %s\n", __func__);
  1348. fc_fabric_logoff(lport);
  1349. fc_lport_destroy(lport);
  1350. }
  1351. static void bnx2fc_stop(struct bnx2fc_hba *hba)
  1352. {
  1353. struct fc_lport *lport;
  1354. struct fc_lport *vport;
  1355. BNX2FC_MISC_DBG("ENTERED %s - init_done = %ld\n", __func__,
  1356. hba->init_done);
  1357. if (test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done) &&
  1358. test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
  1359. lport = hba->ctlr.lp;
  1360. bnx2fc_port_shutdown(lport);
  1361. BNX2FC_HBA_DBG(lport, "bnx2fc_stop: waiting for %d "
  1362. "offloaded sessions\n",
  1363. hba->num_ofld_sess);
  1364. wait_event_interruptible(hba->shutdown_wait,
  1365. (hba->num_ofld_sess == 0));
  1366. mutex_lock(&lport->lp_mutex);
  1367. list_for_each_entry(vport, &lport->vports, list)
  1368. fc_host_port_type(vport->host) = FC_PORTTYPE_UNKNOWN;
  1369. mutex_unlock(&lport->lp_mutex);
  1370. fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
  1371. fcoe_ctlr_link_down(&hba->ctlr);
  1372. fcoe_clean_pending_queue(lport);
  1373. mutex_lock(&hba->hba_mutex);
  1374. clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
  1375. clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
  1376. clear_bit(ADAPTER_STATE_READY, &hba->adapter_state);
  1377. mutex_unlock(&hba->hba_mutex);
  1378. }
  1379. }
  1380. static int bnx2fc_fw_init(struct bnx2fc_hba *hba)
  1381. {
  1382. #define BNX2FC_INIT_POLL_TIME (1000 / HZ)
  1383. int rc = -1;
  1384. int i = HZ;
  1385. rc = bnx2fc_bind_adapter_devices(hba);
  1386. if (rc) {
  1387. printk(KERN_ALERT PFX
  1388. "bnx2fc_bind_adapter_devices failed - rc = %d\n", rc);
  1389. goto err_out;
  1390. }
  1391. rc = bnx2fc_send_fw_fcoe_init_msg(hba);
  1392. if (rc) {
  1393. printk(KERN_ALERT PFX
  1394. "bnx2fc_send_fw_fcoe_init_msg failed - rc = %d\n", rc);
  1395. goto err_unbind;
  1396. }
  1397. /*
  1398. * Wait until the adapter init message is complete, and adapter
  1399. * state is UP.
  1400. */
  1401. while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) && i--)
  1402. msleep(BNX2FC_INIT_POLL_TIME);
  1403. if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state)) {
  1404. printk(KERN_ERR PFX "bnx2fc_start: %s failed to initialize. "
  1405. "Ignoring...\n",
  1406. hba->cnic->netdev->name);
  1407. rc = -1;
  1408. goto err_unbind;
  1409. }
  1410. /* Mark HBA to indicate that the FW INIT is done */
  1411. set_bit(BNX2FC_FW_INIT_DONE, &hba->init_done);
  1412. return 0;
  1413. err_unbind:
  1414. bnx2fc_unbind_adapter_devices(hba);
  1415. err_out:
  1416. return rc;
  1417. }
  1418. static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba)
  1419. {
  1420. if (test_and_clear_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) {
  1421. if (bnx2fc_send_fw_fcoe_destroy_msg(hba) == 0) {
  1422. init_timer(&hba->destroy_timer);
  1423. hba->destroy_timer.expires = BNX2FC_FW_TIMEOUT +
  1424. jiffies;
  1425. hba->destroy_timer.function = bnx2fc_destroy_timer;
  1426. hba->destroy_timer.data = (unsigned long)hba;
  1427. add_timer(&hba->destroy_timer);
  1428. wait_event_interruptible(hba->destroy_wait,
  1429. (hba->flags &
  1430. BNX2FC_FLAG_DESTROY_CMPL));
  1431. /* This should never happen */
  1432. if (signal_pending(current))
  1433. flush_signals(current);
  1434. del_timer_sync(&hba->destroy_timer);
  1435. }
  1436. bnx2fc_unbind_adapter_devices(hba);
  1437. }
  1438. }
  1439. /**
  1440. * bnx2fc_ulp_stop - cnic callback to shutdown adapter instance
  1441. *
  1442. * @handle: transport handle pointing to adapter structure
  1443. *
  1444. * Driver checks if adapter is already in shutdown mode, if not start
  1445. * the shutdown process.
  1446. */
  1447. static void bnx2fc_ulp_stop(void *handle)
  1448. {
  1449. struct bnx2fc_hba *hba = (struct bnx2fc_hba *)handle;
  1450. printk(KERN_ERR "ULP_STOP\n");
  1451. mutex_lock(&bnx2fc_dev_lock);
  1452. bnx2fc_stop(hba);
  1453. bnx2fc_fw_destroy(hba);
  1454. mutex_unlock(&bnx2fc_dev_lock);
  1455. }
  1456. static void bnx2fc_start_disc(struct bnx2fc_hba *hba)
  1457. {
  1458. struct fc_lport *lport;
  1459. int wait_cnt = 0;
  1460. BNX2FC_MISC_DBG("Entered %s\n", __func__);
  1461. /* Kick off FIP/FLOGI */
  1462. if (!test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) {
  1463. printk(KERN_ERR PFX "Init not done yet\n");
  1464. return;
  1465. }
  1466. lport = hba->ctlr.lp;
  1467. BNX2FC_HBA_DBG(lport, "calling fc_fabric_login\n");
  1468. if (!bnx2fc_link_ok(lport)) {
  1469. BNX2FC_HBA_DBG(lport, "ctlr_link_up\n");
  1470. fcoe_ctlr_link_up(&hba->ctlr);
  1471. fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
  1472. set_bit(ADAPTER_STATE_READY, &hba->adapter_state);
  1473. }
  1474. /* wait for the FCF to be selected before issuing FLOGI */
  1475. while (!hba->ctlr.sel_fcf) {
  1476. msleep(250);
  1477. /* give up after 3 secs */
  1478. if (++wait_cnt > 12)
  1479. break;
  1480. }
  1481. fc_lport_init(lport);
  1482. fc_fabric_login(lport);
  1483. }
  1484. /**
  1485. * bnx2fc_ulp_init - Initialize an adapter instance
  1486. *
  1487. * @dev : cnic device handle
  1488. * Called from cnic_register_driver() context to initialize all
  1489. * enumerated cnic devices. This routine allocates adapter structure
  1490. * and other device specific resources.
  1491. */
  1492. static void bnx2fc_ulp_init(struct cnic_dev *dev)
  1493. {
  1494. struct bnx2fc_hba *hba;
  1495. int rc = 0;
  1496. BNX2FC_MISC_DBG("Entered %s\n", __func__);
  1497. /* bnx2fc works only when bnx2x is loaded */
  1498. if (!test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
  1499. printk(KERN_ERR PFX "bnx2fc FCoE not supported on %s,"
  1500. " flags: %lx\n",
  1501. dev->netdev->name, dev->flags);
  1502. return;
  1503. }
  1504. /* Configure FCoE interface */
  1505. hba = bnx2fc_interface_create(dev);
  1506. if (!hba) {
  1507. printk(KERN_ERR PFX "hba initialization failed\n");
  1508. return;
  1509. }
  1510. /* Add HBA to the adapter list */
  1511. mutex_lock(&bnx2fc_dev_lock);
  1512. list_add_tail(&hba->link, &adapter_list);
  1513. adapter_count++;
  1514. mutex_unlock(&bnx2fc_dev_lock);
  1515. clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic);
  1516. rc = dev->register_device(dev, CNIC_ULP_FCOE,
  1517. (void *) hba);
  1518. if (rc)
  1519. printk(KERN_ALERT PFX "register_device failed, rc = %d\n", rc);
  1520. else
  1521. set_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic);
  1522. }
  1523. static int bnx2fc_disable(struct net_device *netdev)
  1524. {
  1525. struct bnx2fc_hba *hba;
  1526. struct net_device *phys_dev;
  1527. struct ethtool_drvinfo drvinfo;
  1528. int rc = 0;
  1529. rtnl_lock();
  1530. mutex_lock(&bnx2fc_dev_lock);
  1531. /* obtain physical netdev */
  1532. if (netdev->priv_flags & IFF_802_1Q_VLAN)
  1533. phys_dev = vlan_dev_real_dev(netdev);
  1534. else {
  1535. printk(KERN_ERR PFX "Not a vlan device\n");
  1536. rc = -ENODEV;
  1537. goto nodev;
  1538. }
  1539. /* verify if the physical device is a netxtreme2 device */
  1540. if (phys_dev->ethtool_ops && phys_dev->ethtool_ops->get_drvinfo) {
  1541. memset(&drvinfo, 0, sizeof(drvinfo));
  1542. phys_dev->ethtool_ops->get_drvinfo(phys_dev, &drvinfo);
  1543. if (strcmp(drvinfo.driver, "bnx2x")) {
  1544. printk(KERN_ERR PFX "Not a netxtreme2 device\n");
  1545. rc = -ENODEV;
  1546. goto nodev;
  1547. }
  1548. } else {
  1549. printk(KERN_ERR PFX "unable to obtain drv_info\n");
  1550. rc = -ENODEV;
  1551. goto nodev;
  1552. }
  1553. printk(KERN_ERR PFX "phys_dev is netxtreme2 device\n");
  1554. /* obtain hba and initialize rest of the structure */
  1555. hba = bnx2fc_hba_lookup(phys_dev);
  1556. if (!hba || !hba->ctlr.lp) {
  1557. rc = -ENODEV;
  1558. printk(KERN_ERR PFX "bnx2fc_disable: hba or lport not found\n");
  1559. } else {
  1560. fcoe_ctlr_link_down(&hba->ctlr);
  1561. fcoe_clean_pending_queue(hba->ctlr.lp);
  1562. }
  1563. nodev:
  1564. mutex_unlock(&bnx2fc_dev_lock);
  1565. rtnl_unlock();
  1566. return rc;
  1567. }
  1568. static int bnx2fc_enable(struct net_device *netdev)
  1569. {
  1570. struct bnx2fc_hba *hba;
  1571. struct net_device *phys_dev;
  1572. struct ethtool_drvinfo drvinfo;
  1573. int rc = 0;
  1574. rtnl_lock();
  1575. BNX2FC_MISC_DBG("Entered %s\n", __func__);
  1576. mutex_lock(&bnx2fc_dev_lock);
  1577. /* obtain physical netdev */
  1578. if (netdev->priv_flags & IFF_802_1Q_VLAN)
  1579. phys_dev = vlan_dev_real_dev(netdev);
  1580. else {
  1581. printk(KERN_ERR PFX "Not a vlan device\n");
  1582. rc = -ENODEV;
  1583. goto nodev;
  1584. }
  1585. /* verify if the physical device is a netxtreme2 device */
  1586. if (phys_dev->ethtool_ops && phys_dev->ethtool_ops->get_drvinfo) {
  1587. memset(&drvinfo, 0, sizeof(drvinfo));
  1588. phys_dev->ethtool_ops->get_drvinfo(phys_dev, &drvinfo);
  1589. if (strcmp(drvinfo.driver, "bnx2x")) {
  1590. printk(KERN_ERR PFX "Not a netxtreme2 device\n");
  1591. rc = -ENODEV;
  1592. goto nodev;
  1593. }
  1594. } else {
  1595. printk(KERN_ERR PFX "unable to obtain drv_info\n");
  1596. rc = -ENODEV;
  1597. goto nodev;
  1598. }
  1599. /* obtain hba and initialize rest of the structure */
  1600. hba = bnx2fc_hba_lookup(phys_dev);
  1601. if (!hba || !hba->ctlr.lp) {
  1602. rc = -ENODEV;
  1603. printk(KERN_ERR PFX "bnx2fc_enable: hba or lport not found\n");
  1604. } else if (!bnx2fc_link_ok(hba->ctlr.lp))
  1605. fcoe_ctlr_link_up(&hba->ctlr);
  1606. nodev:
  1607. mutex_unlock(&bnx2fc_dev_lock);
  1608. rtnl_unlock();
  1609. return rc;
  1610. }
  1611. /**
  1612. * bnx2fc_create - Create bnx2fc FCoE interface
  1613. *
  1614. * @buffer: The name of Ethernet interface to create on
  1615. * @kp: The associated kernel param
  1616. *
  1617. * Called from sysfs.
  1618. *
  1619. * Returns: 0 for success
  1620. */
  1621. static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
  1622. {
  1623. struct bnx2fc_hba *hba;
  1624. struct net_device *phys_dev;
  1625. struct fc_lport *lport;
  1626. struct ethtool_drvinfo drvinfo;
  1627. int rc = 0;
  1628. int vlan_id;
  1629. BNX2FC_MISC_DBG("Entered bnx2fc_create\n");
  1630. if (fip_mode != FIP_MODE_FABRIC) {
  1631. printk(KERN_ERR "fip mode not FABRIC\n");
  1632. return -EIO;
  1633. }
  1634. rtnl_lock();
  1635. mutex_lock(&bnx2fc_dev_lock);
  1636. if (!try_module_get(THIS_MODULE)) {
  1637. rc = -EINVAL;
  1638. goto mod_err;
  1639. }
  1640. /* obtain physical netdev */
  1641. if (netdev->priv_flags & IFF_802_1Q_VLAN) {
  1642. phys_dev = vlan_dev_real_dev(netdev);
  1643. vlan_id = vlan_dev_vlan_id(netdev);
  1644. } else {
  1645. printk(KERN_ERR PFX "Not a vlan device\n");
  1646. rc = -EINVAL;
  1647. goto netdev_err;
  1648. }
  1649. /* verify if the physical device is a netxtreme2 device */
  1650. if (phys_dev->ethtool_ops && phys_dev->ethtool_ops->get_drvinfo) {
  1651. memset(&drvinfo, 0, sizeof(drvinfo));
  1652. phys_dev->ethtool_ops->get_drvinfo(phys_dev, &drvinfo);
  1653. if (strcmp(drvinfo.driver, "bnx2x")) {
  1654. printk(KERN_ERR PFX "Not a netxtreme2 device\n");
  1655. rc = -EINVAL;
  1656. goto netdev_err;
  1657. }
  1658. } else {
  1659. printk(KERN_ERR PFX "unable to obtain drv_info\n");
  1660. rc = -EINVAL;
  1661. goto netdev_err;
  1662. }
  1663. /* obtain hba and initialize rest of the structure */
  1664. hba = bnx2fc_hba_lookup(phys_dev);
  1665. if (!hba) {
  1666. rc = -ENODEV;
  1667. printk(KERN_ERR PFX "bnx2fc_create: hba not found\n");
  1668. goto netdev_err;
  1669. }
  1670. if (!test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) {
  1671. rc = bnx2fc_fw_init(hba);
  1672. if (rc)
  1673. goto netdev_err;
  1674. }
  1675. if (test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
  1676. rc = -EEXIST;
  1677. goto netdev_err;
  1678. }
  1679. /* update netdev with vlan netdev */
  1680. hba->netdev = netdev;
  1681. hba->vlan_id = vlan_id;
  1682. hba->vlan_enabled = 1;
  1683. rc = bnx2fc_interface_setup(hba, fip_mode);
  1684. if (rc) {
  1685. printk(KERN_ERR PFX "bnx2fc_interface_setup failed\n");
  1686. goto ifput_err;
  1687. }
  1688. hba->timer_work_queue =
  1689. create_singlethread_workqueue("bnx2fc_timer_wq");
  1690. if (!hba->timer_work_queue) {
  1691. printk(KERN_ERR PFX "ulp_init could not create timer_wq\n");
  1692. rc = -EINVAL;
  1693. goto ifput_err;
  1694. }
  1695. lport = bnx2fc_if_create(hba, &hba->pcidev->dev, 0);
  1696. if (!lport) {
  1697. printk(KERN_ERR PFX "Failed to create interface (%s)\n",
  1698. netdev->name);
  1699. bnx2fc_netdev_cleanup(hba);
  1700. rc = -EINVAL;
  1701. goto if_create_err;
  1702. }
  1703. lport->boot_time = jiffies;
  1704. /* Make this master N_port */
  1705. hba->ctlr.lp = lport;
  1706. set_bit(BNX2FC_CREATE_DONE, &hba->init_done);
  1707. printk(KERN_ERR PFX "create: START DISC\n");
  1708. bnx2fc_start_disc(hba);
  1709. /*
  1710. * Release from kref_init in bnx2fc_interface_setup, on success
  1711. * lport should be holding a reference taken in bnx2fc_if_create
  1712. */
  1713. bnx2fc_interface_put(hba);
  1714. /* put netdev that was held while calling dev_get_by_name */
  1715. mutex_unlock(&bnx2fc_dev_lock);
  1716. rtnl_unlock();
  1717. return 0;
  1718. if_create_err:
  1719. destroy_workqueue(hba->timer_work_queue);
  1720. ifput_err:
  1721. bnx2fc_interface_put(hba);
  1722. netdev_err:
  1723. module_put(THIS_MODULE);
  1724. mod_err:
  1725. mutex_unlock(&bnx2fc_dev_lock);
  1726. rtnl_unlock();
  1727. return rc;
  1728. }
  1729. /**
  1730. * bnx2fc_find_hba_for_cnic - maps cnic instance to bnx2fc adapter instance
  1731. *
  1732. * @cnic: Pointer to cnic device instance
  1733. *
  1734. **/
  1735. static struct bnx2fc_hba *bnx2fc_find_hba_for_cnic(struct cnic_dev *cnic)
  1736. {
  1737. struct list_head *list;
  1738. struct list_head *temp;
  1739. struct bnx2fc_hba *hba;
  1740. /* Called with bnx2fc_dev_lock held */
  1741. list_for_each_safe(list, temp, &adapter_list) {
  1742. hba = (struct bnx2fc_hba *)list;
  1743. if (hba->cnic == cnic)
  1744. return hba;
  1745. }
  1746. return NULL;
  1747. }
  1748. static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev)
  1749. {
  1750. struct list_head *list;
  1751. struct list_head *temp;
  1752. struct bnx2fc_hba *hba;
  1753. /* Called with bnx2fc_dev_lock held */
  1754. list_for_each_safe(list, temp, &adapter_list) {
  1755. hba = (struct bnx2fc_hba *)list;
  1756. if (hba->phys_dev == phys_dev)
  1757. return hba;
  1758. }
  1759. printk(KERN_ERR PFX "hba_lookup: hba NULL\n");
  1760. return NULL;
  1761. }
  1762. /**
  1763. * bnx2fc_ulp_exit - shuts down adapter instance and frees all resources
  1764. *
  1765. * @dev cnic device handle
  1766. */
  1767. static void bnx2fc_ulp_exit(struct cnic_dev *dev)
  1768. {
  1769. struct bnx2fc_hba *hba;
  1770. BNX2FC_MISC_DBG("Entered bnx2fc_ulp_exit\n");
  1771. if (!test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
  1772. printk(KERN_ERR PFX "bnx2fc port check: %s, flags: %lx\n",
  1773. dev->netdev->name, dev->flags);
  1774. return;
  1775. }
  1776. mutex_lock(&bnx2fc_dev_lock);
  1777. hba = bnx2fc_find_hba_for_cnic(dev);
  1778. if (!hba) {
  1779. printk(KERN_ERR PFX "bnx2fc_ulp_exit: hba not found, dev 0%p\n",
  1780. dev);
  1781. mutex_unlock(&bnx2fc_dev_lock);
  1782. return;
  1783. }
  1784. list_del_init(&hba->link);
  1785. adapter_count--;
  1786. if (test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
  1787. /* destroy not called yet, move to quiesced list */
  1788. bnx2fc_netdev_cleanup(hba);
  1789. bnx2fc_if_destroy(hba->ctlr.lp);
  1790. }
  1791. mutex_unlock(&bnx2fc_dev_lock);
  1792. bnx2fc_ulp_stop(hba);
  1793. /* unregister cnic device */
  1794. if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic))
  1795. hba->cnic->unregister_device(hba->cnic, CNIC_ULP_FCOE);
  1796. bnx2fc_interface_destroy(hba);
  1797. }
  1798. /**
  1799. * bnx2fc_fcoe_reset - Resets the fcoe
  1800. *
  1801. * @shost: shost the reset is from
  1802. *
  1803. * Returns: always 0
  1804. */
  1805. static int bnx2fc_fcoe_reset(struct Scsi_Host *shost)
  1806. {
  1807. struct fc_lport *lport = shost_priv(shost);
  1808. fc_lport_reset(lport);
  1809. return 0;
  1810. }
  1811. static bool bnx2fc_match(struct net_device *netdev)
  1812. {
  1813. mutex_lock(&bnx2fc_dev_lock);
  1814. if (netdev->priv_flags & IFF_802_1Q_VLAN) {
  1815. struct net_device *phys_dev = vlan_dev_real_dev(netdev);
  1816. if (bnx2fc_hba_lookup(phys_dev)) {
  1817. mutex_unlock(&bnx2fc_dev_lock);
  1818. return true;
  1819. }
  1820. }
  1821. mutex_unlock(&bnx2fc_dev_lock);
  1822. return false;
  1823. }
  1824. static struct fcoe_transport bnx2fc_transport = {
  1825. .name = {"bnx2fc"},
  1826. .attached = false,
  1827. .list = LIST_HEAD_INIT(bnx2fc_transport.list),
  1828. .match = bnx2fc_match,
  1829. .create = bnx2fc_create,
  1830. .destroy = bnx2fc_destroy,
  1831. .enable = bnx2fc_enable,
  1832. .disable = bnx2fc_disable,
  1833. };
  1834. /**
  1835. * bnx2fc_percpu_thread_create - Create a receive thread for an
  1836. * online CPU
  1837. *
  1838. * @cpu: cpu index for the online cpu
  1839. */
  1840. static void bnx2fc_percpu_thread_create(unsigned int cpu)
  1841. {
  1842. struct bnx2fc_percpu_s *p;
  1843. struct task_struct *thread;
  1844. p = &per_cpu(bnx2fc_percpu, cpu);
  1845. thread = kthread_create(bnx2fc_percpu_io_thread,
  1846. (void *)p,
  1847. "bnx2fc_thread/%d", cpu);
  1848. /* bind thread to the cpu */
  1849. if (likely(!IS_ERR(p->iothread))) {
  1850. kthread_bind(thread, cpu);
  1851. p->iothread = thread;
  1852. wake_up_process(thread);
  1853. }
  1854. }
  1855. static void bnx2fc_percpu_thread_destroy(unsigned int cpu)
  1856. {
  1857. struct bnx2fc_percpu_s *p;
  1858. struct task_struct *thread;
  1859. struct bnx2fc_work *work, *tmp;
  1860. LIST_HEAD(work_list);
  1861. BNX2FC_MISC_DBG("destroying io thread for CPU %d\n", cpu);
  1862. /* Prevent any new work from being queued for this CPU */
  1863. p = &per_cpu(bnx2fc_percpu, cpu);
  1864. spin_lock_bh(&p->fp_work_lock);
  1865. thread = p->iothread;
  1866. p->iothread = NULL;
  1867. /* Free all work in the list */
  1868. list_for_each_entry_safe(work, tmp, &work_list, list) {
  1869. list_del_init(&work->list);
  1870. bnx2fc_process_cq_compl(work->tgt, work->wqe);
  1871. kfree(work);
  1872. }
  1873. spin_unlock_bh(&p->fp_work_lock);
  1874. if (thread)
  1875. kthread_stop(thread);
  1876. }
  1877. /**
  1878. * bnx2fc_cpu_callback - Handler for CPU hotplug events
  1879. *
  1880. * @nfb: The callback data block
  1881. * @action: The event triggering the callback
  1882. * @hcpu: The index of the CPU that the event is for
  1883. *
  1884. * This creates or destroys per-CPU data for fcoe
  1885. *
  1886. * Returns NOTIFY_OK always.
  1887. */
  1888. static int bnx2fc_cpu_callback(struct notifier_block *nfb,
  1889. unsigned long action, void *hcpu)
  1890. {
  1891. unsigned cpu = (unsigned long)hcpu;
  1892. switch (action) {
  1893. case CPU_ONLINE:
  1894. case CPU_ONLINE_FROZEN:
  1895. printk(PFX "CPU %x online: Create Rx thread\n", cpu);
  1896. bnx2fc_percpu_thread_create(cpu);
  1897. break;
  1898. case CPU_DEAD:
  1899. case CPU_DEAD_FROZEN:
  1900. printk(PFX "CPU %x offline: Remove Rx thread\n", cpu);
  1901. bnx2fc_percpu_thread_destroy(cpu);
  1902. break;
  1903. default:
  1904. break;
  1905. }
  1906. return NOTIFY_OK;
  1907. }
  1908. /**
  1909. * bnx2fc_mod_init - module init entry point
  1910. *
  1911. * Initialize driver wide global data structures, and register
  1912. * with cnic module
  1913. **/
  1914. static int __init bnx2fc_mod_init(void)
  1915. {
  1916. struct fcoe_percpu_s *bg;
  1917. struct task_struct *l2_thread;
  1918. int rc = 0;
  1919. unsigned int cpu = 0;
  1920. struct bnx2fc_percpu_s *p;
  1921. printk(KERN_INFO PFX "%s", version);
  1922. /* register as a fcoe transport */
  1923. rc = fcoe_transport_attach(&bnx2fc_transport);
  1924. if (rc) {
  1925. printk(KERN_ERR "failed to register an fcoe transport, check "
  1926. "if libfcoe is loaded\n");
  1927. goto out;
  1928. }
  1929. INIT_LIST_HEAD(&adapter_list);
  1930. mutex_init(&bnx2fc_dev_lock);
  1931. adapter_count = 0;
  1932. /* Attach FC transport template */
  1933. rc = bnx2fc_attach_transport();
  1934. if (rc)
  1935. goto detach_ft;
  1936. bnx2fc_wq = alloc_workqueue("bnx2fc", 0, 0);
  1937. if (!bnx2fc_wq) {
  1938. rc = -ENOMEM;
  1939. goto release_bt;
  1940. }
  1941. bg = &bnx2fc_global;
  1942. skb_queue_head_init(&bg->fcoe_rx_list);
  1943. l2_thread = kthread_create(bnx2fc_l2_rcv_thread,
  1944. (void *)bg,
  1945. "bnx2fc_l2_thread");
  1946. if (IS_ERR(l2_thread)) {
  1947. rc = PTR_ERR(l2_thread);
  1948. goto free_wq;
  1949. }
  1950. wake_up_process(l2_thread);
  1951. spin_lock_bh(&bg->fcoe_rx_list.lock);
  1952. bg->thread = l2_thread;
  1953. spin_unlock_bh(&bg->fcoe_rx_list.lock);
  1954. for_each_possible_cpu(cpu) {
  1955. p = &per_cpu(bnx2fc_percpu, cpu);
  1956. INIT_LIST_HEAD(&p->work_list);
  1957. spin_lock_init(&p->fp_work_lock);
  1958. }
  1959. for_each_online_cpu(cpu) {
  1960. bnx2fc_percpu_thread_create(cpu);
  1961. }
  1962. /* Initialize per CPU interrupt thread */
  1963. register_hotcpu_notifier(&bnx2fc_cpu_notifier);
  1964. cnic_register_driver(CNIC_ULP_FCOE, &bnx2fc_cnic_cb);
  1965. return 0;
  1966. free_wq:
  1967. destroy_workqueue(bnx2fc_wq);
  1968. release_bt:
  1969. bnx2fc_release_transport();
  1970. detach_ft:
  1971. fcoe_transport_detach(&bnx2fc_transport);
  1972. out:
  1973. return rc;
  1974. }
  1975. static void __exit bnx2fc_mod_exit(void)
  1976. {
  1977. LIST_HEAD(to_be_deleted);
  1978. struct bnx2fc_hba *hba, *next;
  1979. struct fcoe_percpu_s *bg;
  1980. struct task_struct *l2_thread;
  1981. struct sk_buff *skb;
  1982. unsigned int cpu = 0;
  1983. /*
  1984. * NOTE: Since cnic calls register_driver routine rtnl_lock,
  1985. * it will have higher precedence than bnx2fc_dev_lock.
  1986. * unregister_device() cannot be called with bnx2fc_dev_lock
  1987. * held.
  1988. */
  1989. mutex_lock(&bnx2fc_dev_lock);
  1990. list_splice(&adapter_list, &to_be_deleted);
  1991. INIT_LIST_HEAD(&adapter_list);
  1992. adapter_count = 0;
  1993. mutex_unlock(&bnx2fc_dev_lock);
  1994. /* Unregister with cnic */
  1995. list_for_each_entry_safe(hba, next, &to_be_deleted, link) {
  1996. list_del_init(&hba->link);
  1997. printk(KERN_ERR PFX "MOD_EXIT:destroy hba = 0x%p, kref = %d\n",
  1998. hba, atomic_read(&hba->kref.refcount));
  1999. bnx2fc_ulp_stop(hba);
  2000. /* unregister cnic device */
  2001. if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED,
  2002. &hba->reg_with_cnic))
  2003. hba->cnic->unregister_device(hba->cnic, CNIC_ULP_FCOE);
  2004. bnx2fc_interface_destroy(hba);
  2005. }
  2006. cnic_unregister_driver(CNIC_ULP_FCOE);
  2007. /* Destroy global thread */
  2008. bg = &bnx2fc_global;
  2009. spin_lock_bh(&bg->fcoe_rx_list.lock);
  2010. l2_thread = bg->thread;
  2011. bg->thread = NULL;
  2012. while ((skb = __skb_dequeue(&bg->fcoe_rx_list)) != NULL)
  2013. kfree_skb(skb);
  2014. spin_unlock_bh(&bg->fcoe_rx_list.lock);
  2015. if (l2_thread)
  2016. kthread_stop(l2_thread);
  2017. unregister_hotcpu_notifier(&bnx2fc_cpu_notifier);
  2018. /* Destroy per cpu threads */
  2019. for_each_online_cpu(cpu) {
  2020. bnx2fc_percpu_thread_destroy(cpu);
  2021. }
  2022. destroy_workqueue(bnx2fc_wq);
  2023. /*
  2024. * detach from scsi transport
  2025. * must happen after all destroys are done
  2026. */
  2027. bnx2fc_release_transport();
  2028. /* detach from fcoe transport */
  2029. fcoe_transport_detach(&bnx2fc_transport);
  2030. }
  2031. module_init(bnx2fc_mod_init);
  2032. module_exit(bnx2fc_mod_exit);
  2033. static struct fc_function_template bnx2fc_transport_function = {
  2034. .show_host_node_name = 1,
  2035. .show_host_port_name = 1,
  2036. .show_host_supported_classes = 1,
  2037. .show_host_supported_fc4s = 1,
  2038. .show_host_active_fc4s = 1,
  2039. .show_host_maxframe_size = 1,
  2040. .show_host_port_id = 1,
  2041. .show_host_supported_speeds = 1,
  2042. .get_host_speed = fc_get_host_speed,
  2043. .show_host_speed = 1,
  2044. .show_host_port_type = 1,
  2045. .get_host_port_state = fc_get_host_port_state,
  2046. .show_host_port_state = 1,
  2047. .show_host_symbolic_name = 1,
  2048. .dd_fcrport_size = (sizeof(struct fc_rport_libfc_priv) +
  2049. sizeof(struct bnx2fc_rport)),
  2050. .show_rport_maxframe_size = 1,
  2051. .show_rport_supported_classes = 1,
  2052. .show_host_fabric_name = 1,
  2053. .show_starget_node_name = 1,
  2054. .show_starget_port_name = 1,
  2055. .show_starget_port_id = 1,
  2056. .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
  2057. .show_rport_dev_loss_tmo = 1,
  2058. .get_fc_host_stats = bnx2fc_get_host_stats,
  2059. .issue_fc_host_lip = bnx2fc_fcoe_reset,
  2060. .terminate_rport_io = fc_rport_terminate_io,
  2061. .vport_create = bnx2fc_vport_create,
  2062. .vport_delete = bnx2fc_vport_destroy,
  2063. .vport_disable = bnx2fc_vport_disable,
  2064. };
  2065. static struct fc_function_template bnx2fc_vport_xport_function = {
  2066. .show_host_node_name = 1,
  2067. .show_host_port_name = 1,
  2068. .show_host_supported_classes = 1,
  2069. .show_host_supported_fc4s = 1,
  2070. .show_host_active_fc4s = 1,
  2071. .show_host_maxframe_size = 1,
  2072. .show_host_port_id = 1,
  2073. .show_host_supported_speeds = 1,
  2074. .get_host_speed = fc_get_host_speed,
  2075. .show_host_speed = 1,
  2076. .show_host_port_type = 1,
  2077. .get_host_port_state = fc_get_host_port_state,
  2078. .show_host_port_state = 1,
  2079. .show_host_symbolic_name = 1,
  2080. .dd_fcrport_size = (sizeof(struct fc_rport_libfc_priv) +
  2081. sizeof(struct bnx2fc_rport)),
  2082. .show_rport_maxframe_size = 1,
  2083. .show_rport_supported_classes = 1,
  2084. .show_host_fabric_name = 1,
  2085. .show_starget_node_name = 1,
  2086. .show_starget_port_name = 1,
  2087. .show_starget_port_id = 1,
  2088. .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
  2089. .show_rport_dev_loss_tmo = 1,
  2090. .get_fc_host_stats = fc_get_host_stats,
  2091. .issue_fc_host_lip = bnx2fc_fcoe_reset,
  2092. .terminate_rport_io = fc_rport_terminate_io,
  2093. };
  2094. /**
  2095. * scsi_host_template structure used while registering with SCSI-ml
  2096. */
  2097. static struct scsi_host_template bnx2fc_shost_template = {
  2098. .module = THIS_MODULE,
  2099. .name = "Broadcom Offload FCoE Initiator",
  2100. .queuecommand = bnx2fc_queuecommand,
  2101. .eh_abort_handler = bnx2fc_eh_abort, /* abts */
  2102. .eh_device_reset_handler = bnx2fc_eh_device_reset, /* lun reset */
  2103. .eh_target_reset_handler = bnx2fc_eh_target_reset, /* tgt reset */
  2104. .eh_host_reset_handler = fc_eh_host_reset,
  2105. .slave_alloc = fc_slave_alloc,
  2106. .change_queue_depth = fc_change_queue_depth,
  2107. .change_queue_type = fc_change_queue_type,
  2108. .this_id = -1,
  2109. .cmd_per_lun = 3,
  2110. .can_queue = BNX2FC_CAN_QUEUE,
  2111. .use_clustering = ENABLE_CLUSTERING,
  2112. .sg_tablesize = BNX2FC_MAX_BDS_PER_CMD,
  2113. .max_sectors = 512,
  2114. };
  2115. static struct libfc_function_template bnx2fc_libfc_fcn_templ = {
  2116. .frame_send = bnx2fc_xmit,
  2117. .elsct_send = bnx2fc_elsct_send,
  2118. .fcp_abort_io = bnx2fc_abort_io,
  2119. .fcp_cleanup = bnx2fc_cleanup,
  2120. .rport_event_callback = bnx2fc_rport_event_handler,
  2121. };
  2122. /**
  2123. * bnx2fc_cnic_cb - global template of bnx2fc - cnic driver interface
  2124. * structure carrying callback function pointers
  2125. */
  2126. static struct cnic_ulp_ops bnx2fc_cnic_cb = {
  2127. .owner = THIS_MODULE,
  2128. .cnic_init = bnx2fc_ulp_init,
  2129. .cnic_exit = bnx2fc_ulp_exit,
  2130. .cnic_start = bnx2fc_ulp_start,
  2131. .cnic_stop = bnx2fc_ulp_stop,
  2132. .indicate_kcqes = bnx2fc_indicate_kcqe,
  2133. .indicate_netevent = bnx2fc_indicate_netevent,
  2134. };