bnx2fc_fcoe.c 62 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500
  1. /* bnx2fc_fcoe.c: Broadcom NetXtreme II Linux FCoE offload driver.
  2. * This file contains the code that interacts with libfc, libfcoe,
  3. * cnic modules to create FCoE instances, send/receive non-offloaded
  4. * FIP/FCoE packets, listen to link events etc.
  5. *
  6. * Copyright (c) 2008 - 2010 Broadcom Corporation
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation.
  11. *
  12. * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
  13. */
  14. #include "bnx2fc.h"
  15. static struct list_head adapter_list;
  16. static u32 adapter_count;
  17. static DEFINE_MUTEX(bnx2fc_dev_lock);
  18. DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
  19. #define DRV_MODULE_NAME "bnx2fc"
  20. #define DRV_MODULE_VERSION BNX2FC_VERSION
  21. #define DRV_MODULE_RELDATE "Jun 10, 2011"
  22. static char version[] __devinitdata =
  23. "Broadcom NetXtreme II FCoE Driver " DRV_MODULE_NAME \
  24. " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
  25. MODULE_AUTHOR("Bhanu Prakash Gollapudi <bprakash@broadcom.com>");
  26. MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 FCoE Driver");
  27. MODULE_LICENSE("GPL");
  28. MODULE_VERSION(DRV_MODULE_VERSION);
  29. #define BNX2FC_MAX_QUEUE_DEPTH 256
  30. #define BNX2FC_MIN_QUEUE_DEPTH 32
  31. #define FCOE_WORD_TO_BYTE 4
  32. static struct scsi_transport_template *bnx2fc_transport_template;
  33. static struct scsi_transport_template *bnx2fc_vport_xport_template;
  34. struct workqueue_struct *bnx2fc_wq;
  35. /* bnx2fc structure needs only one instance of the fcoe_percpu_s structure.
  36. * Here the io threads are per cpu but the l2 thread is just one
  37. */
  38. struct fcoe_percpu_s bnx2fc_global;
  39. DEFINE_SPINLOCK(bnx2fc_global_lock);
  40. static struct cnic_ulp_ops bnx2fc_cnic_cb;
  41. static struct libfc_function_template bnx2fc_libfc_fcn_templ;
  42. static struct scsi_host_template bnx2fc_shost_template;
  43. static struct fc_function_template bnx2fc_transport_function;
  44. static struct fc_function_template bnx2fc_vport_xport_function;
  45. static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode);
  46. static int bnx2fc_destroy(struct net_device *net_device);
  47. static int bnx2fc_enable(struct net_device *netdev);
  48. static int bnx2fc_disable(struct net_device *netdev);
  49. static void bnx2fc_recv_frame(struct sk_buff *skb);
  50. static void bnx2fc_start_disc(struct bnx2fc_hba *hba);
  51. static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev);
  52. static int bnx2fc_net_config(struct fc_lport *lp);
  53. static int bnx2fc_lport_config(struct fc_lport *lport);
  54. static int bnx2fc_em_config(struct fc_lport *lport);
  55. static int bnx2fc_bind_adapter_devices(struct bnx2fc_hba *hba);
  56. static void bnx2fc_unbind_adapter_devices(struct bnx2fc_hba *hba);
  57. static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba);
  58. static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba);
  59. static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba,
  60. struct device *parent, int npiv);
  61. static void bnx2fc_destroy_work(struct work_struct *work);
  62. static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev);
  63. static struct bnx2fc_hba *bnx2fc_find_hba_for_cnic(struct cnic_dev *cnic);
  64. static int bnx2fc_fw_init(struct bnx2fc_hba *hba);
  65. static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba);
  66. static void bnx2fc_port_shutdown(struct fc_lport *lport);
  67. static void bnx2fc_stop(struct bnx2fc_hba *hba);
  68. static int __init bnx2fc_mod_init(void);
  69. static void __exit bnx2fc_mod_exit(void);
  70. unsigned int bnx2fc_debug_level;
  71. module_param_named(debug_logging, bnx2fc_debug_level, int, S_IRUGO|S_IWUSR);
  72. static int bnx2fc_cpu_callback(struct notifier_block *nfb,
  73. unsigned long action, void *hcpu);
  74. /* notification function for CPU hotplug events */
  75. static struct notifier_block bnx2fc_cpu_notifier = {
  76. .notifier_call = bnx2fc_cpu_callback,
  77. };
  78. static void bnx2fc_clean_rx_queue(struct fc_lport *lp)
  79. {
  80. struct fcoe_percpu_s *bg;
  81. struct fcoe_rcv_info *fr;
  82. struct sk_buff_head *list;
  83. struct sk_buff *skb, *next;
  84. struct sk_buff *head;
  85. bg = &bnx2fc_global;
  86. spin_lock_bh(&bg->fcoe_rx_list.lock);
  87. list = &bg->fcoe_rx_list;
  88. head = list->next;
  89. for (skb = head; skb != (struct sk_buff *)list;
  90. skb = next) {
  91. next = skb->next;
  92. fr = fcoe_dev_from_skb(skb);
  93. if (fr->fr_dev == lp) {
  94. __skb_unlink(skb, list);
  95. kfree_skb(skb);
  96. }
  97. }
  98. spin_unlock_bh(&bg->fcoe_rx_list.lock);
  99. }
  100. int bnx2fc_get_paged_crc_eof(struct sk_buff *skb, int tlen)
  101. {
  102. int rc;
  103. spin_lock(&bnx2fc_global_lock);
  104. rc = fcoe_get_paged_crc_eof(skb, tlen, &bnx2fc_global);
  105. spin_unlock(&bnx2fc_global_lock);
  106. return rc;
  107. }
  108. static void bnx2fc_abort_io(struct fc_lport *lport)
  109. {
  110. /*
  111. * This function is no-op for bnx2fc, but we do
  112. * not want to leave it as NULL either, as libfc
  113. * can call the default function which is
  114. * fc_fcp_abort_io.
  115. */
  116. }
  117. static void bnx2fc_cleanup(struct fc_lport *lport)
  118. {
  119. struct fcoe_port *port = lport_priv(lport);
  120. struct bnx2fc_hba *hba = port->priv;
  121. struct bnx2fc_rport *tgt;
  122. int i;
  123. BNX2FC_MISC_DBG("Entered %s\n", __func__);
  124. mutex_lock(&hba->hba_mutex);
  125. spin_lock_bh(&hba->hba_lock);
  126. for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) {
  127. tgt = hba->tgt_ofld_list[i];
  128. if (tgt) {
  129. /* Cleanup IOs belonging to requested vport */
  130. if (tgt->port == port) {
  131. spin_unlock_bh(&hba->hba_lock);
  132. BNX2FC_TGT_DBG(tgt, "flush/cleanup\n");
  133. bnx2fc_flush_active_ios(tgt);
  134. spin_lock_bh(&hba->hba_lock);
  135. }
  136. }
  137. }
  138. spin_unlock_bh(&hba->hba_lock);
  139. mutex_unlock(&hba->hba_mutex);
  140. }
  141. static int bnx2fc_xmit_l2_frame(struct bnx2fc_rport *tgt,
  142. struct fc_frame *fp)
  143. {
  144. struct fc_rport_priv *rdata = tgt->rdata;
  145. struct fc_frame_header *fh;
  146. int rc = 0;
  147. fh = fc_frame_header_get(fp);
  148. BNX2FC_TGT_DBG(tgt, "Xmit L2 frame rport = 0x%x, oxid = 0x%x, "
  149. "r_ctl = 0x%x\n", rdata->ids.port_id,
  150. ntohs(fh->fh_ox_id), fh->fh_r_ctl);
  151. if ((fh->fh_type == FC_TYPE_ELS) &&
  152. (fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
  153. switch (fc_frame_payload_op(fp)) {
  154. case ELS_ADISC:
  155. rc = bnx2fc_send_adisc(tgt, fp);
  156. break;
  157. case ELS_LOGO:
  158. rc = bnx2fc_send_logo(tgt, fp);
  159. break;
  160. case ELS_RLS:
  161. rc = bnx2fc_send_rls(tgt, fp);
  162. break;
  163. default:
  164. break;
  165. }
  166. } else if ((fh->fh_type == FC_TYPE_BLS) &&
  167. (fh->fh_r_ctl == FC_RCTL_BA_ABTS))
  168. BNX2FC_TGT_DBG(tgt, "ABTS frame\n");
  169. else {
  170. BNX2FC_TGT_DBG(tgt, "Send L2 frame type 0x%x "
  171. "rctl 0x%x thru non-offload path\n",
  172. fh->fh_type, fh->fh_r_ctl);
  173. return -ENODEV;
  174. }
  175. if (rc)
  176. return -ENOMEM;
  177. else
  178. return 0;
  179. }
  180. /**
  181. * bnx2fc_xmit - bnx2fc's FCoE frame transmit function
  182. *
  183. * @lport: the associated local port
  184. * @fp: the fc_frame to be transmitted
  185. */
  186. static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
  187. {
  188. struct ethhdr *eh;
  189. struct fcoe_crc_eof *cp;
  190. struct sk_buff *skb;
  191. struct fc_frame_header *fh;
  192. struct bnx2fc_hba *hba;
  193. struct fcoe_port *port;
  194. struct fcoe_hdr *hp;
  195. struct bnx2fc_rport *tgt;
  196. struct fcoe_dev_stats *stats;
  197. u8 sof, eof;
  198. u32 crc;
  199. unsigned int hlen, tlen, elen;
  200. int wlen, rc = 0;
  201. port = (struct fcoe_port *)lport_priv(lport);
  202. hba = port->priv;
  203. fh = fc_frame_header_get(fp);
  204. skb = fp_skb(fp);
  205. if (!lport->link_up) {
  206. BNX2FC_HBA_DBG(lport, "bnx2fc_xmit link down\n");
  207. kfree_skb(skb);
  208. return 0;
  209. }
  210. if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
  211. if (!hba->ctlr.sel_fcf) {
  212. BNX2FC_HBA_DBG(lport, "FCF not selected yet!\n");
  213. kfree_skb(skb);
  214. return -EINVAL;
  215. }
  216. if (fcoe_ctlr_els_send(&hba->ctlr, lport, skb))
  217. return 0;
  218. }
  219. sof = fr_sof(fp);
  220. eof = fr_eof(fp);
  221. /*
  222. * Snoop the frame header to check if the frame is for
  223. * an offloaded session
  224. */
  225. /*
  226. * tgt_ofld_list access is synchronized using
  227. * both hba mutex and hba lock. Atleast hba mutex or
  228. * hba lock needs to be held for read access.
  229. */
  230. spin_lock_bh(&hba->hba_lock);
  231. tgt = bnx2fc_tgt_lookup(port, ntoh24(fh->fh_d_id));
  232. if (tgt && (test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags))) {
  233. /* This frame is for offloaded session */
  234. BNX2FC_HBA_DBG(lport, "xmit: Frame is for offloaded session "
  235. "port_id = 0x%x\n", ntoh24(fh->fh_d_id));
  236. spin_unlock_bh(&hba->hba_lock);
  237. rc = bnx2fc_xmit_l2_frame(tgt, fp);
  238. if (rc != -ENODEV) {
  239. kfree_skb(skb);
  240. return rc;
  241. }
  242. } else {
  243. spin_unlock_bh(&hba->hba_lock);
  244. }
  245. elen = sizeof(struct ethhdr);
  246. hlen = sizeof(struct fcoe_hdr);
  247. tlen = sizeof(struct fcoe_crc_eof);
  248. wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
  249. skb->ip_summed = CHECKSUM_NONE;
  250. crc = fcoe_fc_crc(fp);
  251. /* copy port crc and eof to the skb buff */
  252. if (skb_is_nonlinear(skb)) {
  253. skb_frag_t *frag;
  254. if (bnx2fc_get_paged_crc_eof(skb, tlen)) {
  255. kfree_skb(skb);
  256. return -ENOMEM;
  257. }
  258. frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
  259. cp = kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ)
  260. + frag->page_offset;
  261. } else {
  262. cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
  263. }
  264. memset(cp, 0, sizeof(*cp));
  265. cp->fcoe_eof = eof;
  266. cp->fcoe_crc32 = cpu_to_le32(~crc);
  267. if (skb_is_nonlinear(skb)) {
  268. kunmap_atomic(cp, KM_SKB_DATA_SOFTIRQ);
  269. cp = NULL;
  270. }
  271. /* adjust skb network/transport offsets to match mac/fcoe/port */
  272. skb_push(skb, elen + hlen);
  273. skb_reset_mac_header(skb);
  274. skb_reset_network_header(skb);
  275. skb->mac_len = elen;
  276. skb->protocol = htons(ETH_P_FCOE);
  277. skb->dev = hba->netdev;
  278. /* fill up mac and fcoe headers */
  279. eh = eth_hdr(skb);
  280. eh->h_proto = htons(ETH_P_FCOE);
  281. if (hba->ctlr.map_dest)
  282. fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id);
  283. else
  284. /* insert GW address */
  285. memcpy(eh->h_dest, hba->ctlr.dest_addr, ETH_ALEN);
  286. if (unlikely(hba->ctlr.flogi_oxid != FC_XID_UNKNOWN))
  287. memcpy(eh->h_source, hba->ctlr.ctl_src_addr, ETH_ALEN);
  288. else
  289. memcpy(eh->h_source, port->data_src_addr, ETH_ALEN);
  290. hp = (struct fcoe_hdr *)(eh + 1);
  291. memset(hp, 0, sizeof(*hp));
  292. if (FC_FCOE_VER)
  293. FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER);
  294. hp->fcoe_sof = sof;
  295. /* fcoe lso, mss is in max_payload which is non-zero for FCP data */
  296. if (lport->seq_offload && fr_max_payload(fp)) {
  297. skb_shinfo(skb)->gso_type = SKB_GSO_FCOE;
  298. skb_shinfo(skb)->gso_size = fr_max_payload(fp);
  299. } else {
  300. skb_shinfo(skb)->gso_type = 0;
  301. skb_shinfo(skb)->gso_size = 0;
  302. }
  303. /*update tx stats */
  304. stats = per_cpu_ptr(lport->dev_stats, get_cpu());
  305. stats->TxFrames++;
  306. stats->TxWords += wlen;
  307. put_cpu();
  308. /* send down to lld */
  309. fr_dev(fp) = lport;
  310. if (port->fcoe_pending_queue.qlen)
  311. fcoe_check_wait_queue(lport, skb);
  312. else if (fcoe_start_io(skb))
  313. fcoe_check_wait_queue(lport, skb);
  314. return 0;
  315. }
  316. /**
  317. * bnx2fc_rcv - This is bnx2fc's receive function called by NET_RX_SOFTIRQ
  318. *
  319. * @skb: the receive socket buffer
  320. * @dev: associated net device
  321. * @ptype: context
  322. * @olddev: last device
  323. *
  324. * This function receives the packet and builds FC frame and passes it up
  325. */
  326. static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
  327. struct packet_type *ptype, struct net_device *olddev)
  328. {
  329. struct fc_lport *lport;
  330. struct bnx2fc_hba *hba;
  331. struct fc_frame_header *fh;
  332. struct fcoe_rcv_info *fr;
  333. struct fcoe_percpu_s *bg;
  334. unsigned short oxid;
  335. hba = container_of(ptype, struct bnx2fc_hba, fcoe_packet_type);
  336. lport = hba->ctlr.lp;
  337. if (unlikely(lport == NULL)) {
  338. printk(KERN_ALERT PFX "bnx2fc_rcv: lport is NULL\n");
  339. goto err;
  340. }
  341. if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) {
  342. printk(KERN_ALERT PFX "bnx2fc_rcv: Wrong FC type frame\n");
  343. goto err;
  344. }
  345. /*
  346. * Check for minimum frame length, and make sure required FCoE
  347. * and FC headers are pulled into the linear data area.
  348. */
  349. if (unlikely((skb->len < FCOE_MIN_FRAME) ||
  350. !pskb_may_pull(skb, FCOE_HEADER_LEN)))
  351. goto err;
  352. skb_set_transport_header(skb, sizeof(struct fcoe_hdr));
  353. fh = (struct fc_frame_header *) skb_transport_header(skb);
  354. oxid = ntohs(fh->fh_ox_id);
  355. fr = fcoe_dev_from_skb(skb);
  356. fr->fr_dev = lport;
  357. fr->ptype = ptype;
  358. bg = &bnx2fc_global;
  359. spin_lock_bh(&bg->fcoe_rx_list.lock);
  360. __skb_queue_tail(&bg->fcoe_rx_list, skb);
  361. if (bg->fcoe_rx_list.qlen == 1)
  362. wake_up_process(bg->thread);
  363. spin_unlock_bh(&bg->fcoe_rx_list.lock);
  364. return 0;
  365. err:
  366. kfree_skb(skb);
  367. return -1;
  368. }
  369. static int bnx2fc_l2_rcv_thread(void *arg)
  370. {
  371. struct fcoe_percpu_s *bg = arg;
  372. struct sk_buff *skb;
  373. set_user_nice(current, -20);
  374. set_current_state(TASK_INTERRUPTIBLE);
  375. while (!kthread_should_stop()) {
  376. schedule();
  377. spin_lock_bh(&bg->fcoe_rx_list.lock);
  378. while ((skb = __skb_dequeue(&bg->fcoe_rx_list)) != NULL) {
  379. spin_unlock_bh(&bg->fcoe_rx_list.lock);
  380. bnx2fc_recv_frame(skb);
  381. spin_lock_bh(&bg->fcoe_rx_list.lock);
  382. }
  383. __set_current_state(TASK_INTERRUPTIBLE);
  384. spin_unlock_bh(&bg->fcoe_rx_list.lock);
  385. }
  386. __set_current_state(TASK_RUNNING);
  387. return 0;
  388. }
  389. static void bnx2fc_recv_frame(struct sk_buff *skb)
  390. {
  391. u32 fr_len;
  392. struct fc_lport *lport;
  393. struct fcoe_rcv_info *fr;
  394. struct fcoe_dev_stats *stats;
  395. struct fc_frame_header *fh;
  396. struct fcoe_crc_eof crc_eof;
  397. struct fc_frame *fp;
  398. struct fc_lport *vn_port;
  399. struct fcoe_port *port;
  400. u8 *mac = NULL;
  401. u8 *dest_mac = NULL;
  402. struct fcoe_hdr *hp;
  403. fr = fcoe_dev_from_skb(skb);
  404. lport = fr->fr_dev;
  405. if (unlikely(lport == NULL)) {
  406. printk(KERN_ALERT PFX "Invalid lport struct\n");
  407. kfree_skb(skb);
  408. return;
  409. }
  410. if (skb_is_nonlinear(skb))
  411. skb_linearize(skb);
  412. mac = eth_hdr(skb)->h_source;
  413. dest_mac = eth_hdr(skb)->h_dest;
  414. /* Pull the header */
  415. hp = (struct fcoe_hdr *) skb_network_header(skb);
  416. fh = (struct fc_frame_header *) skb_transport_header(skb);
  417. skb_pull(skb, sizeof(struct fcoe_hdr));
  418. fr_len = skb->len - sizeof(struct fcoe_crc_eof);
  419. stats = per_cpu_ptr(lport->dev_stats, get_cpu());
  420. stats->RxFrames++;
  421. stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
  422. fp = (struct fc_frame *)skb;
  423. fc_frame_init(fp);
  424. fr_dev(fp) = lport;
  425. fr_sof(fp) = hp->fcoe_sof;
  426. if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) {
  427. put_cpu();
  428. kfree_skb(skb);
  429. return;
  430. }
  431. fr_eof(fp) = crc_eof.fcoe_eof;
  432. fr_crc(fp) = crc_eof.fcoe_crc32;
  433. if (pskb_trim(skb, fr_len)) {
  434. put_cpu();
  435. kfree_skb(skb);
  436. return;
  437. }
  438. fh = fc_frame_header_get(fp);
  439. vn_port = fc_vport_id_lookup(lport, ntoh24(fh->fh_d_id));
  440. if (vn_port) {
  441. port = lport_priv(vn_port);
  442. if (compare_ether_addr(port->data_src_addr, dest_mac)
  443. != 0) {
  444. BNX2FC_HBA_DBG(lport, "fpma mismatch\n");
  445. put_cpu();
  446. kfree_skb(skb);
  447. return;
  448. }
  449. }
  450. if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
  451. fh->fh_type == FC_TYPE_FCP) {
  452. /* Drop FCP data. We dont this in L2 path */
  453. put_cpu();
  454. kfree_skb(skb);
  455. return;
  456. }
  457. if (fh->fh_r_ctl == FC_RCTL_ELS_REQ &&
  458. fh->fh_type == FC_TYPE_ELS) {
  459. switch (fc_frame_payload_op(fp)) {
  460. case ELS_LOGO:
  461. if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI) {
  462. /* drop non-FIP LOGO */
  463. put_cpu();
  464. kfree_skb(skb);
  465. return;
  466. }
  467. break;
  468. }
  469. }
  470. if (le32_to_cpu(fr_crc(fp)) !=
  471. ~crc32(~0, skb->data, fr_len)) {
  472. if (stats->InvalidCRCCount < 5)
  473. printk(KERN_WARNING PFX "dropping frame with "
  474. "CRC error\n");
  475. stats->InvalidCRCCount++;
  476. put_cpu();
  477. kfree_skb(skb);
  478. return;
  479. }
  480. put_cpu();
  481. fc_exch_recv(lport, fp);
  482. }
  483. /**
  484. * bnx2fc_percpu_io_thread - thread per cpu for ios
  485. *
  486. * @arg: ptr to bnx2fc_percpu_info structure
  487. */
  488. int bnx2fc_percpu_io_thread(void *arg)
  489. {
  490. struct bnx2fc_percpu_s *p = arg;
  491. struct bnx2fc_work *work, *tmp;
  492. LIST_HEAD(work_list);
  493. set_user_nice(current, -20);
  494. set_current_state(TASK_INTERRUPTIBLE);
  495. while (!kthread_should_stop()) {
  496. schedule();
  497. spin_lock_bh(&p->fp_work_lock);
  498. while (!list_empty(&p->work_list)) {
  499. list_splice_init(&p->work_list, &work_list);
  500. spin_unlock_bh(&p->fp_work_lock);
  501. list_for_each_entry_safe(work, tmp, &work_list, list) {
  502. list_del_init(&work->list);
  503. bnx2fc_process_cq_compl(work->tgt, work->wqe);
  504. kfree(work);
  505. }
  506. spin_lock_bh(&p->fp_work_lock);
  507. }
  508. __set_current_state(TASK_INTERRUPTIBLE);
  509. spin_unlock_bh(&p->fp_work_lock);
  510. }
  511. __set_current_state(TASK_RUNNING);
  512. return 0;
  513. }
  514. static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost)
  515. {
  516. struct fc_host_statistics *bnx2fc_stats;
  517. struct fc_lport *lport = shost_priv(shost);
  518. struct fcoe_port *port = lport_priv(lport);
  519. struct bnx2fc_hba *hba = port->priv;
  520. struct fcoe_statistics_params *fw_stats;
  521. int rc = 0;
  522. fw_stats = (struct fcoe_statistics_params *)hba->stats_buffer;
  523. if (!fw_stats)
  524. return NULL;
  525. bnx2fc_stats = fc_get_host_stats(shost);
  526. init_completion(&hba->stat_req_done);
  527. if (bnx2fc_send_stat_req(hba))
  528. return bnx2fc_stats;
  529. rc = wait_for_completion_timeout(&hba->stat_req_done, (2 * HZ));
  530. if (!rc) {
  531. BNX2FC_HBA_DBG(lport, "FW stat req timed out\n");
  532. return bnx2fc_stats;
  533. }
  534. bnx2fc_stats->invalid_crc_count += fw_stats->rx_stat2.fc_crc_cnt;
  535. bnx2fc_stats->tx_frames += fw_stats->tx_stat.fcoe_tx_pkt_cnt;
  536. bnx2fc_stats->tx_words += (fw_stats->tx_stat.fcoe_tx_byte_cnt) / 4;
  537. bnx2fc_stats->rx_frames += fw_stats->rx_stat0.fcoe_rx_pkt_cnt;
  538. bnx2fc_stats->rx_words += (fw_stats->rx_stat0.fcoe_rx_byte_cnt) / 4;
  539. bnx2fc_stats->dumped_frames = 0;
  540. bnx2fc_stats->lip_count = 0;
  541. bnx2fc_stats->nos_count = 0;
  542. bnx2fc_stats->loss_of_sync_count = 0;
  543. bnx2fc_stats->loss_of_signal_count = 0;
  544. bnx2fc_stats->prim_seq_protocol_err_count = 0;
  545. return bnx2fc_stats;
  546. }
  547. static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev)
  548. {
  549. struct fcoe_port *port = lport_priv(lport);
  550. struct bnx2fc_hba *hba = port->priv;
  551. struct Scsi_Host *shost = lport->host;
  552. int rc = 0;
  553. shost->max_cmd_len = BNX2FC_MAX_CMD_LEN;
  554. shost->max_lun = BNX2FC_MAX_LUN;
  555. shost->max_id = BNX2FC_MAX_FCP_TGT;
  556. shost->max_channel = 0;
  557. if (lport->vport)
  558. shost->transportt = bnx2fc_vport_xport_template;
  559. else
  560. shost->transportt = bnx2fc_transport_template;
  561. /* Add the new host to SCSI-ml */
  562. rc = scsi_add_host(lport->host, dev);
  563. if (rc) {
  564. printk(KERN_ERR PFX "Error on scsi_add_host\n");
  565. return rc;
  566. }
  567. if (!lport->vport)
  568. fc_host_max_npiv_vports(lport->host) = USHRT_MAX;
  569. sprintf(fc_host_symbolic_name(lport->host), "%s v%s over %s",
  570. BNX2FC_NAME, BNX2FC_VERSION,
  571. hba->netdev->name);
  572. return 0;
  573. }
  574. static void bnx2fc_link_speed_update(struct fc_lport *lport)
  575. {
  576. struct fcoe_port *port = lport_priv(lport);
  577. struct bnx2fc_hba *hba = port->priv;
  578. struct net_device *netdev = hba->netdev;
  579. struct ethtool_cmd ecmd;
  580. if (!dev_ethtool_get_settings(netdev, &ecmd)) {
  581. lport->link_supported_speeds &=
  582. ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
  583. if (ecmd.supported & (SUPPORTED_1000baseT_Half |
  584. SUPPORTED_1000baseT_Full))
  585. lport->link_supported_speeds |= FC_PORTSPEED_1GBIT;
  586. if (ecmd.supported & SUPPORTED_10000baseT_Full)
  587. lport->link_supported_speeds |= FC_PORTSPEED_10GBIT;
  588. switch (ethtool_cmd_speed(&ecmd)) {
  589. case SPEED_1000:
  590. lport->link_speed = FC_PORTSPEED_1GBIT;
  591. break;
  592. case SPEED_2500:
  593. lport->link_speed = FC_PORTSPEED_2GBIT;
  594. break;
  595. case SPEED_10000:
  596. lport->link_speed = FC_PORTSPEED_10GBIT;
  597. break;
  598. }
  599. }
  600. }
  601. static int bnx2fc_link_ok(struct fc_lport *lport)
  602. {
  603. struct fcoe_port *port = lport_priv(lport);
  604. struct bnx2fc_hba *hba = port->priv;
  605. struct net_device *dev = hba->phys_dev;
  606. int rc = 0;
  607. if ((dev->flags & IFF_UP) && netif_carrier_ok(dev))
  608. clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
  609. else {
  610. set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
  611. rc = -1;
  612. }
  613. return rc;
  614. }
  615. /**
  616. * bnx2fc_get_link_state - get network link state
  617. *
  618. * @hba: adapter instance pointer
  619. *
  620. * updates adapter structure flag based on netdev state
  621. */
  622. void bnx2fc_get_link_state(struct bnx2fc_hba *hba)
  623. {
  624. if (test_bit(__LINK_STATE_NOCARRIER, &hba->netdev->state))
  625. set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
  626. else
  627. clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
  628. }
  629. static int bnx2fc_net_config(struct fc_lport *lport)
  630. {
  631. struct bnx2fc_hba *hba;
  632. struct fcoe_port *port;
  633. u64 wwnn, wwpn;
  634. port = lport_priv(lport);
  635. hba = port->priv;
  636. /* require support for get_pauseparam ethtool op. */
  637. if (!hba->phys_dev->ethtool_ops ||
  638. !hba->phys_dev->ethtool_ops->get_pauseparam)
  639. return -EOPNOTSUPP;
  640. if (fc_set_mfs(lport, BNX2FC_MFS))
  641. return -EINVAL;
  642. skb_queue_head_init(&port->fcoe_pending_queue);
  643. port->fcoe_pending_queue_active = 0;
  644. setup_timer(&port->timer, fcoe_queue_timer, (unsigned long) lport);
  645. bnx2fc_link_speed_update(lport);
  646. if (!lport->vport) {
  647. wwnn = fcoe_wwn_from_mac(hba->ctlr.ctl_src_addr, 1, 0);
  648. BNX2FC_HBA_DBG(lport, "WWNN = 0x%llx\n", wwnn);
  649. fc_set_wwnn(lport, wwnn);
  650. wwpn = fcoe_wwn_from_mac(hba->ctlr.ctl_src_addr, 2, 0);
  651. BNX2FC_HBA_DBG(lport, "WWPN = 0x%llx\n", wwpn);
  652. fc_set_wwpn(lport, wwpn);
  653. }
  654. return 0;
  655. }
  656. static void bnx2fc_destroy_timer(unsigned long data)
  657. {
  658. struct bnx2fc_hba *hba = (struct bnx2fc_hba *)data;
  659. BNX2FC_HBA_DBG(hba->ctlr.lp, "ERROR:bnx2fc_destroy_timer - "
  660. "Destroy compl not received!!\n");
  661. hba->flags |= BNX2FC_FLAG_DESTROY_CMPL;
  662. wake_up_interruptible(&hba->destroy_wait);
  663. }
  664. /**
  665. * bnx2fc_indicate_netevent - Generic netdev event handler
  666. *
  667. * @context: adapter structure pointer
  668. * @event: event type
  669. * @vlan_id: vlan id - associated vlan id with this event
  670. *
  671. * Handles NETDEV_UP, NETDEV_DOWN, NETDEV_GOING_DOWN,NETDEV_CHANGE and
  672. * NETDEV_CHANGE_MTU events
  673. */
  674. static void bnx2fc_indicate_netevent(void *context, unsigned long event,
  675. u16 vlan_id)
  676. {
  677. struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context;
  678. struct fc_lport *lport = hba->ctlr.lp;
  679. struct fc_lport *vport;
  680. u32 link_possible = 1;
  681. /* Ignore vlans for now */
  682. if (vlan_id != 0)
  683. return;
  684. if (!test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
  685. BNX2FC_MISC_DBG("driver not ready. event=%s %ld\n",
  686. hba->netdev->name, event);
  687. return;
  688. }
  689. /*
  690. * ASSUMPTION:
  691. * indicate_netevent cannot be called from cnic unless bnx2fc
  692. * does register_device
  693. */
  694. BUG_ON(!lport);
  695. BNX2FC_HBA_DBG(lport, "enter netevent handler - event=%s %ld\n",
  696. hba->netdev->name, event);
  697. switch (event) {
  698. case NETDEV_UP:
  699. BNX2FC_HBA_DBG(lport, "Port up, adapter_state = %ld\n",
  700. hba->adapter_state);
  701. if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state))
  702. printk(KERN_ERR "indicate_netevent: "\
  703. "adapter is not UP!!\n");
  704. break;
  705. case NETDEV_DOWN:
  706. BNX2FC_HBA_DBG(lport, "Port down\n");
  707. clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
  708. clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
  709. link_possible = 0;
  710. break;
  711. case NETDEV_GOING_DOWN:
  712. BNX2FC_HBA_DBG(lport, "Port going down\n");
  713. set_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
  714. link_possible = 0;
  715. break;
  716. case NETDEV_CHANGE:
  717. BNX2FC_HBA_DBG(lport, "NETDEV_CHANGE\n");
  718. break;
  719. default:
  720. printk(KERN_ERR PFX "Unkonwn netevent %ld", event);
  721. return;
  722. }
  723. bnx2fc_link_speed_update(lport);
  724. if (link_possible && !bnx2fc_link_ok(lport)) {
  725. printk(KERN_ERR "indicate_netevent: call ctlr_link_up\n");
  726. fcoe_ctlr_link_up(&hba->ctlr);
  727. } else {
  728. printk(KERN_ERR "indicate_netevent: call ctlr_link_down\n");
  729. if (fcoe_ctlr_link_down(&hba->ctlr)) {
  730. clear_bit(ADAPTER_STATE_READY, &hba->adapter_state);
  731. mutex_lock(&lport->lp_mutex);
  732. list_for_each_entry(vport, &lport->vports, list)
  733. fc_host_port_type(vport->host) =
  734. FC_PORTTYPE_UNKNOWN;
  735. mutex_unlock(&lport->lp_mutex);
  736. fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
  737. per_cpu_ptr(lport->dev_stats,
  738. get_cpu())->LinkFailureCount++;
  739. put_cpu();
  740. fcoe_clean_pending_queue(lport);
  741. init_waitqueue_head(&hba->shutdown_wait);
  742. BNX2FC_HBA_DBG(lport, "indicate_netevent "
  743. "num_ofld_sess = %d\n",
  744. hba->num_ofld_sess);
  745. hba->wait_for_link_down = 1;
  746. BNX2FC_HBA_DBG(lport, "waiting for uploads to "
  747. "compl proc = %s\n",
  748. current->comm);
  749. wait_event_interruptible(hba->shutdown_wait,
  750. (hba->num_ofld_sess == 0));
  751. BNX2FC_HBA_DBG(lport, "wakeup - num_ofld_sess = %d\n",
  752. hba->num_ofld_sess);
  753. hba->wait_for_link_down = 0;
  754. if (signal_pending(current))
  755. flush_signals(current);
  756. }
  757. }
  758. }
  759. static int bnx2fc_libfc_config(struct fc_lport *lport)
  760. {
  761. /* Set the function pointers set by bnx2fc driver */
  762. memcpy(&lport->tt, &bnx2fc_libfc_fcn_templ,
  763. sizeof(struct libfc_function_template));
  764. fc_elsct_init(lport);
  765. fc_exch_init(lport);
  766. fc_rport_init(lport);
  767. fc_disc_init(lport);
  768. return 0;
  769. }
  770. static int bnx2fc_em_config(struct fc_lport *lport)
  771. {
  772. struct fcoe_port *port = lport_priv(lport);
  773. struct bnx2fc_hba *hba = port->priv;
  774. if (!fc_exch_mgr_alloc(lport, FC_CLASS_3, FCOE_MIN_XID,
  775. FCOE_MAX_XID, NULL)) {
  776. printk(KERN_ERR PFX "em_config:fc_exch_mgr_alloc failed\n");
  777. return -ENOMEM;
  778. }
  779. hba->cmd_mgr = bnx2fc_cmd_mgr_alloc(hba, BNX2FC_MIN_XID,
  780. BNX2FC_MAX_XID);
  781. if (!hba->cmd_mgr) {
  782. printk(KERN_ERR PFX "em_config:bnx2fc_cmd_mgr_alloc failed\n");
  783. fc_exch_mgr_free(lport);
  784. return -ENOMEM;
  785. }
  786. return 0;
  787. }
  788. static int bnx2fc_lport_config(struct fc_lport *lport)
  789. {
  790. lport->link_up = 0;
  791. lport->qfull = 0;
  792. lport->max_retry_count = 3;
  793. lport->max_rport_retry_count = 3;
  794. lport->e_d_tov = 2 * 1000;
  795. lport->r_a_tov = 10 * 1000;
  796. /* REVISIT: enable when supporting tape devices
  797. lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
  798. FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
  799. */
  800. lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS);
  801. lport->does_npiv = 1;
  802. memset(&lport->rnid_gen, 0, sizeof(struct fc_els_rnid_gen));
  803. lport->rnid_gen.rnid_atype = BNX2FC_RNID_HBA;
  804. /* alloc stats structure */
  805. if (fc_lport_init_stats(lport))
  806. return -ENOMEM;
  807. /* Finish fc_lport configuration */
  808. fc_lport_config(lport);
  809. return 0;
  810. }
  811. /**
  812. * bnx2fc_fip_recv - handle a received FIP frame.
  813. *
  814. * @skb: the received skb
  815. * @dev: associated &net_device
  816. * @ptype: the &packet_type structure which was used to register this handler.
  817. * @orig_dev: original receive &net_device, in case @ dev is a bond.
  818. *
  819. * Returns: 0 for success
  820. */
  821. static int bnx2fc_fip_recv(struct sk_buff *skb, struct net_device *dev,
  822. struct packet_type *ptype,
  823. struct net_device *orig_dev)
  824. {
  825. struct bnx2fc_hba *hba;
  826. hba = container_of(ptype, struct bnx2fc_hba, fip_packet_type);
  827. fcoe_ctlr_recv(&hba->ctlr, skb);
  828. return 0;
  829. }
  830. /**
  831. * bnx2fc_update_src_mac - Update Ethernet MAC filters.
  832. *
  833. * @fip: FCoE controller.
  834. * @old: Unicast MAC address to delete if the MAC is non-zero.
  835. * @new: Unicast MAC address to add.
  836. *
  837. * Remove any previously-set unicast MAC filter.
  838. * Add secondary FCoE MAC address filter for our OUI.
  839. */
  840. static void bnx2fc_update_src_mac(struct fc_lport *lport, u8 *addr)
  841. {
  842. struct fcoe_port *port = lport_priv(lport);
  843. memcpy(port->data_src_addr, addr, ETH_ALEN);
  844. }
  845. /**
  846. * bnx2fc_get_src_mac - return the ethernet source address for an lport
  847. *
  848. * @lport: libfc port
  849. */
  850. static u8 *bnx2fc_get_src_mac(struct fc_lport *lport)
  851. {
  852. struct fcoe_port *port;
  853. port = (struct fcoe_port *)lport_priv(lport);
  854. return port->data_src_addr;
  855. }
  856. /**
  857. * bnx2fc_fip_send - send an Ethernet-encapsulated FIP frame.
  858. *
  859. * @fip: FCoE controller.
  860. * @skb: FIP Packet.
  861. */
  862. static void bnx2fc_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
  863. {
  864. skb->dev = bnx2fc_from_ctlr(fip)->netdev;
  865. dev_queue_xmit(skb);
  866. }
  867. static int bnx2fc_vport_create(struct fc_vport *vport, bool disabled)
  868. {
  869. struct Scsi_Host *shost = vport_to_shost(vport);
  870. struct fc_lport *n_port = shost_priv(shost);
  871. struct fcoe_port *port = lport_priv(n_port);
  872. struct bnx2fc_hba *hba = port->priv;
  873. struct net_device *netdev = hba->netdev;
  874. struct fc_lport *vn_port;
  875. if (!test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) {
  876. printk(KERN_ERR PFX "vn ports cannot be created on"
  877. "this hba\n");
  878. return -EIO;
  879. }
  880. mutex_lock(&bnx2fc_dev_lock);
  881. vn_port = bnx2fc_if_create(hba, &vport->dev, 1);
  882. mutex_unlock(&bnx2fc_dev_lock);
  883. if (IS_ERR(vn_port)) {
  884. printk(KERN_ERR PFX "bnx2fc_vport_create (%s) failed\n",
  885. netdev->name);
  886. return -EIO;
  887. }
  888. if (disabled) {
  889. fc_vport_set_state(vport, FC_VPORT_DISABLED);
  890. } else {
  891. vn_port->boot_time = jiffies;
  892. fc_lport_init(vn_port);
  893. fc_fabric_login(vn_port);
  894. fc_vport_setlink(vn_port);
  895. }
  896. return 0;
  897. }
  898. static int bnx2fc_vport_destroy(struct fc_vport *vport)
  899. {
  900. struct Scsi_Host *shost = vport_to_shost(vport);
  901. struct fc_lport *n_port = shost_priv(shost);
  902. struct fc_lport *vn_port = vport->dd_data;
  903. struct fcoe_port *port = lport_priv(vn_port);
  904. mutex_lock(&n_port->lp_mutex);
  905. list_del(&vn_port->list);
  906. mutex_unlock(&n_port->lp_mutex);
  907. queue_work(bnx2fc_wq, &port->destroy_work);
  908. return 0;
  909. }
  910. static int bnx2fc_vport_disable(struct fc_vport *vport, bool disable)
  911. {
  912. struct fc_lport *lport = vport->dd_data;
  913. if (disable) {
  914. fc_vport_set_state(vport, FC_VPORT_DISABLED);
  915. fc_fabric_logoff(lport);
  916. } else {
  917. lport->boot_time = jiffies;
  918. fc_fabric_login(lport);
  919. fc_vport_setlink(lport);
  920. }
  921. return 0;
  922. }
  923. static int bnx2fc_netdev_setup(struct bnx2fc_hba *hba)
  924. {
  925. struct net_device *netdev = hba->netdev;
  926. struct net_device *physdev = hba->phys_dev;
  927. struct netdev_hw_addr *ha;
  928. int sel_san_mac = 0;
  929. /* setup Source MAC Address */
  930. rcu_read_lock();
  931. for_each_dev_addr(physdev, ha) {
  932. BNX2FC_MISC_DBG("net_config: ha->type = %d, fip_mac = ",
  933. ha->type);
  934. printk(KERN_INFO "%2x:%2x:%2x:%2x:%2x:%2x\n", ha->addr[0],
  935. ha->addr[1], ha->addr[2], ha->addr[3],
  936. ha->addr[4], ha->addr[5]);
  937. if ((ha->type == NETDEV_HW_ADDR_T_SAN) &&
  938. (is_valid_ether_addr(ha->addr))) {
  939. memcpy(hba->ctlr.ctl_src_addr, ha->addr, ETH_ALEN);
  940. sel_san_mac = 1;
  941. BNX2FC_MISC_DBG("Found SAN MAC\n");
  942. }
  943. }
  944. rcu_read_unlock();
  945. if (!sel_san_mac)
  946. return -ENODEV;
  947. hba->fip_packet_type.func = bnx2fc_fip_recv;
  948. hba->fip_packet_type.type = htons(ETH_P_FIP);
  949. hba->fip_packet_type.dev = netdev;
  950. dev_add_pack(&hba->fip_packet_type);
  951. hba->fcoe_packet_type.func = bnx2fc_rcv;
  952. hba->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE);
  953. hba->fcoe_packet_type.dev = netdev;
  954. dev_add_pack(&hba->fcoe_packet_type);
  955. return 0;
  956. }
  957. static int bnx2fc_attach_transport(void)
  958. {
  959. bnx2fc_transport_template =
  960. fc_attach_transport(&bnx2fc_transport_function);
  961. if (bnx2fc_transport_template == NULL) {
  962. printk(KERN_ERR PFX "Failed to attach FC transport\n");
  963. return -ENODEV;
  964. }
  965. bnx2fc_vport_xport_template =
  966. fc_attach_transport(&bnx2fc_vport_xport_function);
  967. if (bnx2fc_vport_xport_template == NULL) {
  968. printk(KERN_ERR PFX
  969. "Failed to attach FC transport for vport\n");
  970. fc_release_transport(bnx2fc_transport_template);
  971. bnx2fc_transport_template = NULL;
  972. return -ENODEV;
  973. }
  974. return 0;
  975. }
  976. static void bnx2fc_release_transport(void)
  977. {
  978. fc_release_transport(bnx2fc_transport_template);
  979. fc_release_transport(bnx2fc_vport_xport_template);
  980. bnx2fc_transport_template = NULL;
  981. bnx2fc_vport_xport_template = NULL;
  982. }
  983. static void bnx2fc_interface_release(struct kref *kref)
  984. {
  985. struct bnx2fc_hba *hba;
  986. struct net_device *netdev;
  987. struct net_device *phys_dev;
  988. hba = container_of(kref, struct bnx2fc_hba, kref);
  989. BNX2FC_MISC_DBG("Interface is being released\n");
  990. netdev = hba->netdev;
  991. phys_dev = hba->phys_dev;
  992. /* tear-down FIP controller */
  993. if (test_and_clear_bit(BNX2FC_CTLR_INIT_DONE, &hba->init_done))
  994. fcoe_ctlr_destroy(&hba->ctlr);
  995. /* Free the command manager */
  996. if (hba->cmd_mgr) {
  997. bnx2fc_cmd_mgr_free(hba->cmd_mgr);
  998. hba->cmd_mgr = NULL;
  999. }
  1000. dev_put(netdev);
  1001. module_put(THIS_MODULE);
  1002. }
  1003. static inline void bnx2fc_interface_get(struct bnx2fc_hba *hba)
  1004. {
  1005. kref_get(&hba->kref);
  1006. }
  1007. static inline void bnx2fc_interface_put(struct bnx2fc_hba *hba)
  1008. {
  1009. kref_put(&hba->kref, bnx2fc_interface_release);
  1010. }
  1011. static void bnx2fc_interface_destroy(struct bnx2fc_hba *hba)
  1012. {
  1013. bnx2fc_unbind_pcidev(hba);
  1014. kfree(hba);
  1015. }
  1016. /**
  1017. * bnx2fc_interface_create - create a new fcoe instance
  1018. *
  1019. * @cnic: pointer to cnic device
  1020. *
  1021. * Creates a new FCoE instance on the given device which include allocating
  1022. * hba structure, scsi_host and lport structures.
  1023. */
  1024. static struct bnx2fc_hba *bnx2fc_interface_create(struct cnic_dev *cnic)
  1025. {
  1026. struct bnx2fc_hba *hba;
  1027. int rc;
  1028. hba = kzalloc(sizeof(*hba), GFP_KERNEL);
  1029. if (!hba) {
  1030. printk(KERN_ERR PFX "Unable to allocate hba structure\n");
  1031. return NULL;
  1032. }
  1033. spin_lock_init(&hba->hba_lock);
  1034. mutex_init(&hba->hba_mutex);
  1035. hba->cnic = cnic;
  1036. rc = bnx2fc_bind_pcidev(hba);
  1037. if (rc)
  1038. goto bind_err;
  1039. hba->phys_dev = cnic->netdev;
  1040. /* will get overwritten after we do vlan discovery */
  1041. hba->netdev = hba->phys_dev;
  1042. init_waitqueue_head(&hba->shutdown_wait);
  1043. init_waitqueue_head(&hba->destroy_wait);
  1044. return hba;
  1045. bind_err:
  1046. printk(KERN_ERR PFX "create_interface: bind error\n");
  1047. kfree(hba);
  1048. return NULL;
  1049. }
  1050. static int bnx2fc_interface_setup(struct bnx2fc_hba *hba,
  1051. enum fip_state fip_mode)
  1052. {
  1053. int rc = 0;
  1054. struct net_device *netdev = hba->netdev;
  1055. struct fcoe_ctlr *fip = &hba->ctlr;
  1056. dev_hold(netdev);
  1057. kref_init(&hba->kref);
  1058. hba->flags = 0;
  1059. /* Initialize FIP */
  1060. memset(fip, 0, sizeof(*fip));
  1061. fcoe_ctlr_init(fip, fip_mode);
  1062. hba->ctlr.send = bnx2fc_fip_send;
  1063. hba->ctlr.update_mac = bnx2fc_update_src_mac;
  1064. hba->ctlr.get_src_addr = bnx2fc_get_src_mac;
  1065. set_bit(BNX2FC_CTLR_INIT_DONE, &hba->init_done);
  1066. INIT_LIST_HEAD(&hba->vports);
  1067. rc = bnx2fc_netdev_setup(hba);
  1068. if (rc)
  1069. goto setup_err;
  1070. hba->next_conn_id = 0;
  1071. memset(hba->tgt_ofld_list, 0, sizeof(hba->tgt_ofld_list));
  1072. hba->num_ofld_sess = 0;
  1073. return 0;
  1074. setup_err:
  1075. fcoe_ctlr_destroy(&hba->ctlr);
  1076. dev_put(netdev);
  1077. bnx2fc_interface_put(hba);
  1078. return rc;
  1079. }
  1080. /**
  1081. * bnx2fc_if_create - Create FCoE instance on a given interface
  1082. *
  1083. * @hba: FCoE interface to create a local port on
  1084. * @parent: Device pointer to be the parent in sysfs for the SCSI host
  1085. * @npiv: Indicates if the port is vport or not
  1086. *
  1087. * Creates a fc_lport instance and a Scsi_Host instance and configure them.
  1088. *
  1089. * Returns: Allocated fc_lport or an error pointer
  1090. */
  1091. static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba,
  1092. struct device *parent, int npiv)
  1093. {
  1094. struct fc_lport *lport, *n_port;
  1095. struct fcoe_port *port;
  1096. struct Scsi_Host *shost;
  1097. struct fc_vport *vport = dev_to_vport(parent);
  1098. struct bnx2fc_lport *blport;
  1099. int rc = 0;
  1100. blport = kzalloc(sizeof(struct bnx2fc_lport), GFP_KERNEL);
  1101. if (!blport) {
  1102. BNX2FC_HBA_DBG(hba->ctlr.lp, "Unable to alloc bnx2fc_lport\n");
  1103. return NULL;
  1104. }
  1105. /* Allocate Scsi_Host structure */
  1106. if (!npiv)
  1107. lport = libfc_host_alloc(&bnx2fc_shost_template, sizeof(*port));
  1108. else
  1109. lport = libfc_vport_create(vport, sizeof(*port));
  1110. if (!lport) {
  1111. printk(KERN_ERR PFX "could not allocate scsi host structure\n");
  1112. goto free_blport;
  1113. }
  1114. shost = lport->host;
  1115. port = lport_priv(lport);
  1116. port->lport = lport;
  1117. port->priv = hba;
  1118. INIT_WORK(&port->destroy_work, bnx2fc_destroy_work);
  1119. /* Configure fcoe_port */
  1120. rc = bnx2fc_lport_config(lport);
  1121. if (rc)
  1122. goto lp_config_err;
  1123. if (npiv) {
  1124. printk(KERN_ERR PFX "Setting vport names, 0x%llX 0x%llX\n",
  1125. vport->node_name, vport->port_name);
  1126. fc_set_wwnn(lport, vport->node_name);
  1127. fc_set_wwpn(lport, vport->port_name);
  1128. }
  1129. /* Configure netdev and networking properties of the lport */
  1130. rc = bnx2fc_net_config(lport);
  1131. if (rc) {
  1132. printk(KERN_ERR PFX "Error on bnx2fc_net_config\n");
  1133. goto lp_config_err;
  1134. }
  1135. rc = bnx2fc_shost_config(lport, parent);
  1136. if (rc) {
  1137. printk(KERN_ERR PFX "Couldnt configure shost for %s\n",
  1138. hba->netdev->name);
  1139. goto lp_config_err;
  1140. }
  1141. /* Initialize the libfc library */
  1142. rc = bnx2fc_libfc_config(lport);
  1143. if (rc) {
  1144. printk(KERN_ERR PFX "Couldnt configure libfc\n");
  1145. goto shost_err;
  1146. }
  1147. fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
  1148. /* Allocate exchange manager */
  1149. if (!npiv)
  1150. rc = bnx2fc_em_config(lport);
  1151. else {
  1152. shost = vport_to_shost(vport);
  1153. n_port = shost_priv(shost);
  1154. rc = fc_exch_mgr_list_clone(n_port, lport);
  1155. }
  1156. if (rc) {
  1157. printk(KERN_ERR PFX "Error on bnx2fc_em_config\n");
  1158. goto shost_err;
  1159. }
  1160. bnx2fc_interface_get(hba);
  1161. spin_lock_bh(&hba->hba_lock);
  1162. blport->lport = lport;
  1163. list_add_tail(&blport->list, &hba->vports);
  1164. spin_unlock_bh(&hba->hba_lock);
  1165. return lport;
  1166. shost_err:
  1167. scsi_remove_host(shost);
  1168. lp_config_err:
  1169. scsi_host_put(lport->host);
  1170. free_blport:
  1171. kfree(blport);
  1172. return NULL;
  1173. }
  1174. static void bnx2fc_netdev_cleanup(struct bnx2fc_hba *hba)
  1175. {
  1176. /* Dont listen for Ethernet packets anymore */
  1177. __dev_remove_pack(&hba->fcoe_packet_type);
  1178. __dev_remove_pack(&hba->fip_packet_type);
  1179. synchronize_net();
  1180. }
  1181. static void bnx2fc_if_destroy(struct fc_lport *lport)
  1182. {
  1183. struct fcoe_port *port = lport_priv(lport);
  1184. struct bnx2fc_hba *hba = port->priv;
  1185. struct bnx2fc_lport *blport, *tmp;
  1186. BNX2FC_HBA_DBG(hba->ctlr.lp, "ENTERED bnx2fc_if_destroy\n");
  1187. /* Stop the transmit retry timer */
  1188. del_timer_sync(&port->timer);
  1189. /* Free existing transmit skbs */
  1190. fcoe_clean_pending_queue(lport);
  1191. /* Free queued packets for the receive thread */
  1192. bnx2fc_clean_rx_queue(lport);
  1193. /* Detach from scsi-ml */
  1194. fc_remove_host(lport->host);
  1195. scsi_remove_host(lport->host);
  1196. /*
  1197. * Note that only the physical lport will have the exchange manager.
  1198. * for vports, this function is NOP
  1199. */
  1200. fc_exch_mgr_free(lport);
  1201. /* Free memory used by statistical counters */
  1202. fc_lport_free_stats(lport);
  1203. spin_lock_bh(&hba->hba_lock);
  1204. list_for_each_entry_safe(blport, tmp, &hba->vports, list) {
  1205. if (blport->lport == lport) {
  1206. list_del(&blport->list);
  1207. kfree(blport);
  1208. }
  1209. }
  1210. spin_unlock_bh(&hba->hba_lock);
  1211. /* Release Scsi_Host */
  1212. scsi_host_put(lport->host);
  1213. bnx2fc_interface_put(hba);
  1214. }
  1215. /**
  1216. * bnx2fc_destroy - Destroy a bnx2fc FCoE interface
  1217. *
  1218. * @buffer: The name of the Ethernet interface to be destroyed
  1219. * @kp: The associated kernel parameter
  1220. *
  1221. * Called from sysfs.
  1222. *
  1223. * Returns: 0 for success
  1224. */
  1225. static int bnx2fc_destroy(struct net_device *netdev)
  1226. {
  1227. struct bnx2fc_hba *hba = NULL;
  1228. struct net_device *phys_dev;
  1229. int rc = 0;
  1230. rtnl_lock();
  1231. mutex_lock(&bnx2fc_dev_lock);
  1232. /* obtain physical netdev */
  1233. if (netdev->priv_flags & IFF_802_1Q_VLAN)
  1234. phys_dev = vlan_dev_real_dev(netdev);
  1235. else {
  1236. printk(KERN_ERR PFX "Not a vlan device\n");
  1237. rc = -ENODEV;
  1238. goto netdev_err;
  1239. }
  1240. hba = bnx2fc_hba_lookup(phys_dev);
  1241. if (!hba || !hba->ctlr.lp) {
  1242. rc = -ENODEV;
  1243. printk(KERN_ERR PFX "bnx2fc_destroy: hba or lport not found\n");
  1244. goto netdev_err;
  1245. }
  1246. if (!test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
  1247. printk(KERN_ERR PFX "bnx2fc_destroy: Create not called\n");
  1248. goto netdev_err;
  1249. }
  1250. bnx2fc_netdev_cleanup(hba);
  1251. bnx2fc_stop(hba);
  1252. bnx2fc_if_destroy(hba->ctlr.lp);
  1253. destroy_workqueue(hba->timer_work_queue);
  1254. if (test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done))
  1255. bnx2fc_fw_destroy(hba);
  1256. clear_bit(BNX2FC_CREATE_DONE, &hba->init_done);
  1257. netdev_err:
  1258. mutex_unlock(&bnx2fc_dev_lock);
  1259. rtnl_unlock();
  1260. return rc;
  1261. }
  1262. static void bnx2fc_destroy_work(struct work_struct *work)
  1263. {
  1264. struct fcoe_port *port;
  1265. struct fc_lport *lport;
  1266. port = container_of(work, struct fcoe_port, destroy_work);
  1267. lport = port->lport;
  1268. BNX2FC_HBA_DBG(lport, "Entered bnx2fc_destroy_work\n");
  1269. bnx2fc_port_shutdown(lport);
  1270. rtnl_lock();
  1271. mutex_lock(&bnx2fc_dev_lock);
  1272. bnx2fc_if_destroy(lport);
  1273. mutex_unlock(&bnx2fc_dev_lock);
  1274. rtnl_unlock();
  1275. }
  1276. static void bnx2fc_unbind_adapter_devices(struct bnx2fc_hba *hba)
  1277. {
  1278. bnx2fc_free_fw_resc(hba);
  1279. bnx2fc_free_task_ctx(hba);
  1280. }
  1281. /**
  1282. * bnx2fc_bind_adapter_devices - binds bnx2fc adapter with the associated
  1283. * pci structure
  1284. *
  1285. * @hba: Adapter instance
  1286. */
  1287. static int bnx2fc_bind_adapter_devices(struct bnx2fc_hba *hba)
  1288. {
  1289. if (bnx2fc_setup_task_ctx(hba))
  1290. goto mem_err;
  1291. if (bnx2fc_setup_fw_resc(hba))
  1292. goto mem_err;
  1293. return 0;
  1294. mem_err:
  1295. bnx2fc_unbind_adapter_devices(hba);
  1296. return -ENOMEM;
  1297. }
  1298. static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba)
  1299. {
  1300. struct cnic_dev *cnic;
  1301. if (!hba->cnic) {
  1302. printk(KERN_ERR PFX "cnic is NULL\n");
  1303. return -ENODEV;
  1304. }
  1305. cnic = hba->cnic;
  1306. hba->pcidev = cnic->pcidev;
  1307. if (hba->pcidev)
  1308. pci_dev_get(hba->pcidev);
  1309. return 0;
  1310. }
  1311. static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba)
  1312. {
  1313. if (hba->pcidev)
  1314. pci_dev_put(hba->pcidev);
  1315. hba->pcidev = NULL;
  1316. }
  1317. /**
  1318. * bnx2fc_ulp_start - cnic callback to initialize & start adapter instance
  1319. *
  1320. * @handle: transport handle pointing to adapter struture
  1321. *
  1322. * This function maps adapter structure to pcidev structure and initiates
  1323. * firmware handshake to enable/initialize on-chip FCoE components.
  1324. * This bnx2fc - cnic interface api callback is used after following
  1325. * conditions are met -
  1326. * a) underlying network interface is up (marked by event NETDEV_UP
  1327. * from netdev
  1328. * b) bnx2fc adatper structure is registered.
  1329. */
  1330. static void bnx2fc_ulp_start(void *handle)
  1331. {
  1332. struct bnx2fc_hba *hba = handle;
  1333. struct fc_lport *lport = hba->ctlr.lp;
  1334. BNX2FC_MISC_DBG("Entered %s\n", __func__);
  1335. mutex_lock(&bnx2fc_dev_lock);
  1336. if (test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done))
  1337. goto start_disc;
  1338. if (test_bit(BNX2FC_CREATE_DONE, &hba->init_done))
  1339. bnx2fc_fw_init(hba);
  1340. start_disc:
  1341. mutex_unlock(&bnx2fc_dev_lock);
  1342. BNX2FC_MISC_DBG("bnx2fc started.\n");
  1343. /* Kick off Fabric discovery*/
  1344. if (test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
  1345. printk(KERN_ERR PFX "ulp_init: start discovery\n");
  1346. lport->tt.frame_send = bnx2fc_xmit;
  1347. bnx2fc_start_disc(hba);
  1348. }
  1349. }
  1350. static void bnx2fc_port_shutdown(struct fc_lport *lport)
  1351. {
  1352. BNX2FC_MISC_DBG("Entered %s\n", __func__);
  1353. fc_fabric_logoff(lport);
  1354. fc_lport_destroy(lport);
  1355. }
  1356. static void bnx2fc_stop(struct bnx2fc_hba *hba)
  1357. {
  1358. struct fc_lport *lport;
  1359. struct fc_lport *vport;
  1360. BNX2FC_MISC_DBG("ENTERED %s - init_done = %ld\n", __func__,
  1361. hba->init_done);
  1362. if (test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done) &&
  1363. test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
  1364. lport = hba->ctlr.lp;
  1365. bnx2fc_port_shutdown(lport);
  1366. BNX2FC_HBA_DBG(lport, "bnx2fc_stop: waiting for %d "
  1367. "offloaded sessions\n",
  1368. hba->num_ofld_sess);
  1369. wait_event_interruptible(hba->shutdown_wait,
  1370. (hba->num_ofld_sess == 0));
  1371. mutex_lock(&lport->lp_mutex);
  1372. list_for_each_entry(vport, &lport->vports, list)
  1373. fc_host_port_type(vport->host) = FC_PORTTYPE_UNKNOWN;
  1374. mutex_unlock(&lport->lp_mutex);
  1375. fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
  1376. fcoe_ctlr_link_down(&hba->ctlr);
  1377. fcoe_clean_pending_queue(lport);
  1378. mutex_lock(&hba->hba_mutex);
  1379. clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
  1380. clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
  1381. clear_bit(ADAPTER_STATE_READY, &hba->adapter_state);
  1382. mutex_unlock(&hba->hba_mutex);
  1383. }
  1384. }
  1385. static int bnx2fc_fw_init(struct bnx2fc_hba *hba)
  1386. {
  1387. #define BNX2FC_INIT_POLL_TIME (1000 / HZ)
  1388. int rc = -1;
  1389. int i = HZ;
  1390. rc = bnx2fc_bind_adapter_devices(hba);
  1391. if (rc) {
  1392. printk(KERN_ALERT PFX
  1393. "bnx2fc_bind_adapter_devices failed - rc = %d\n", rc);
  1394. goto err_out;
  1395. }
  1396. rc = bnx2fc_send_fw_fcoe_init_msg(hba);
  1397. if (rc) {
  1398. printk(KERN_ALERT PFX
  1399. "bnx2fc_send_fw_fcoe_init_msg failed - rc = %d\n", rc);
  1400. goto err_unbind;
  1401. }
  1402. /*
  1403. * Wait until the adapter init message is complete, and adapter
  1404. * state is UP.
  1405. */
  1406. while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) && i--)
  1407. msleep(BNX2FC_INIT_POLL_TIME);
  1408. if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state)) {
  1409. printk(KERN_ERR PFX "bnx2fc_start: %s failed to initialize. "
  1410. "Ignoring...\n",
  1411. hba->cnic->netdev->name);
  1412. rc = -1;
  1413. goto err_unbind;
  1414. }
  1415. /* Mark HBA to indicate that the FW INIT is done */
  1416. set_bit(BNX2FC_FW_INIT_DONE, &hba->init_done);
  1417. return 0;
  1418. err_unbind:
  1419. bnx2fc_unbind_adapter_devices(hba);
  1420. err_out:
  1421. return rc;
  1422. }
  1423. static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba)
  1424. {
  1425. if (test_and_clear_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) {
  1426. if (bnx2fc_send_fw_fcoe_destroy_msg(hba) == 0) {
  1427. init_timer(&hba->destroy_timer);
  1428. hba->destroy_timer.expires = BNX2FC_FW_TIMEOUT +
  1429. jiffies;
  1430. hba->destroy_timer.function = bnx2fc_destroy_timer;
  1431. hba->destroy_timer.data = (unsigned long)hba;
  1432. add_timer(&hba->destroy_timer);
  1433. wait_event_interruptible(hba->destroy_wait,
  1434. (hba->flags &
  1435. BNX2FC_FLAG_DESTROY_CMPL));
  1436. /* This should never happen */
  1437. if (signal_pending(current))
  1438. flush_signals(current);
  1439. del_timer_sync(&hba->destroy_timer);
  1440. }
  1441. bnx2fc_unbind_adapter_devices(hba);
  1442. }
  1443. }
  1444. /**
  1445. * bnx2fc_ulp_stop - cnic callback to shutdown adapter instance
  1446. *
  1447. * @handle: transport handle pointing to adapter structure
  1448. *
  1449. * Driver checks if adapter is already in shutdown mode, if not start
  1450. * the shutdown process.
  1451. */
  1452. static void bnx2fc_ulp_stop(void *handle)
  1453. {
  1454. struct bnx2fc_hba *hba = (struct bnx2fc_hba *)handle;
  1455. printk(KERN_ERR "ULP_STOP\n");
  1456. mutex_lock(&bnx2fc_dev_lock);
  1457. bnx2fc_stop(hba);
  1458. bnx2fc_fw_destroy(hba);
  1459. mutex_unlock(&bnx2fc_dev_lock);
  1460. }
  1461. static void bnx2fc_start_disc(struct bnx2fc_hba *hba)
  1462. {
  1463. struct fc_lport *lport;
  1464. int wait_cnt = 0;
  1465. BNX2FC_MISC_DBG("Entered %s\n", __func__);
  1466. /* Kick off FIP/FLOGI */
  1467. if (!test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) {
  1468. printk(KERN_ERR PFX "Init not done yet\n");
  1469. return;
  1470. }
  1471. lport = hba->ctlr.lp;
  1472. BNX2FC_HBA_DBG(lport, "calling fc_fabric_login\n");
  1473. if (!bnx2fc_link_ok(lport)) {
  1474. BNX2FC_HBA_DBG(lport, "ctlr_link_up\n");
  1475. fcoe_ctlr_link_up(&hba->ctlr);
  1476. fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
  1477. set_bit(ADAPTER_STATE_READY, &hba->adapter_state);
  1478. }
  1479. /* wait for the FCF to be selected before issuing FLOGI */
  1480. while (!hba->ctlr.sel_fcf) {
  1481. msleep(250);
  1482. /* give up after 3 secs */
  1483. if (++wait_cnt > 12)
  1484. break;
  1485. }
  1486. fc_lport_init(lport);
  1487. fc_fabric_login(lport);
  1488. }
  1489. /**
  1490. * bnx2fc_ulp_init - Initialize an adapter instance
  1491. *
  1492. * @dev : cnic device handle
  1493. * Called from cnic_register_driver() context to initialize all
  1494. * enumerated cnic devices. This routine allocates adapter structure
  1495. * and other device specific resources.
  1496. */
  1497. static void bnx2fc_ulp_init(struct cnic_dev *dev)
  1498. {
  1499. struct bnx2fc_hba *hba;
  1500. int rc = 0;
  1501. BNX2FC_MISC_DBG("Entered %s\n", __func__);
  1502. /* bnx2fc works only when bnx2x is loaded */
  1503. if (!test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
  1504. printk(KERN_ERR PFX "bnx2fc FCoE not supported on %s,"
  1505. " flags: %lx\n",
  1506. dev->netdev->name, dev->flags);
  1507. return;
  1508. }
  1509. /* Configure FCoE interface */
  1510. hba = bnx2fc_interface_create(dev);
  1511. if (!hba) {
  1512. printk(KERN_ERR PFX "hba initialization failed\n");
  1513. return;
  1514. }
  1515. /* Add HBA to the adapter list */
  1516. mutex_lock(&bnx2fc_dev_lock);
  1517. list_add_tail(&hba->link, &adapter_list);
  1518. adapter_count++;
  1519. mutex_unlock(&bnx2fc_dev_lock);
  1520. clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic);
  1521. rc = dev->register_device(dev, CNIC_ULP_FCOE,
  1522. (void *) hba);
  1523. if (rc)
  1524. printk(KERN_ALERT PFX "register_device failed, rc = %d\n", rc);
  1525. else
  1526. set_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic);
  1527. }
  1528. static int bnx2fc_disable(struct net_device *netdev)
  1529. {
  1530. struct bnx2fc_hba *hba;
  1531. struct net_device *phys_dev;
  1532. struct ethtool_drvinfo drvinfo;
  1533. int rc = 0;
  1534. rtnl_lock();
  1535. mutex_lock(&bnx2fc_dev_lock);
  1536. /* obtain physical netdev */
  1537. if (netdev->priv_flags & IFF_802_1Q_VLAN)
  1538. phys_dev = vlan_dev_real_dev(netdev);
  1539. else {
  1540. printk(KERN_ERR PFX "Not a vlan device\n");
  1541. rc = -ENODEV;
  1542. goto nodev;
  1543. }
  1544. /* verify if the physical device is a netxtreme2 device */
  1545. if (phys_dev->ethtool_ops && phys_dev->ethtool_ops->get_drvinfo) {
  1546. memset(&drvinfo, 0, sizeof(drvinfo));
  1547. phys_dev->ethtool_ops->get_drvinfo(phys_dev, &drvinfo);
  1548. if (strcmp(drvinfo.driver, "bnx2x")) {
  1549. printk(KERN_ERR PFX "Not a netxtreme2 device\n");
  1550. rc = -ENODEV;
  1551. goto nodev;
  1552. }
  1553. } else {
  1554. printk(KERN_ERR PFX "unable to obtain drv_info\n");
  1555. rc = -ENODEV;
  1556. goto nodev;
  1557. }
  1558. printk(KERN_ERR PFX "phys_dev is netxtreme2 device\n");
  1559. /* obtain hba and initialize rest of the structure */
  1560. hba = bnx2fc_hba_lookup(phys_dev);
  1561. if (!hba || !hba->ctlr.lp) {
  1562. rc = -ENODEV;
  1563. printk(KERN_ERR PFX "bnx2fc_disable: hba or lport not found\n");
  1564. } else {
  1565. fcoe_ctlr_link_down(&hba->ctlr);
  1566. fcoe_clean_pending_queue(hba->ctlr.lp);
  1567. }
  1568. nodev:
  1569. mutex_unlock(&bnx2fc_dev_lock);
  1570. rtnl_unlock();
  1571. return rc;
  1572. }
  1573. static int bnx2fc_enable(struct net_device *netdev)
  1574. {
  1575. struct bnx2fc_hba *hba;
  1576. struct net_device *phys_dev;
  1577. struct ethtool_drvinfo drvinfo;
  1578. int rc = 0;
  1579. rtnl_lock();
  1580. BNX2FC_MISC_DBG("Entered %s\n", __func__);
  1581. mutex_lock(&bnx2fc_dev_lock);
  1582. /* obtain physical netdev */
  1583. if (netdev->priv_flags & IFF_802_1Q_VLAN)
  1584. phys_dev = vlan_dev_real_dev(netdev);
  1585. else {
  1586. printk(KERN_ERR PFX "Not a vlan device\n");
  1587. rc = -ENODEV;
  1588. goto nodev;
  1589. }
  1590. /* verify if the physical device is a netxtreme2 device */
  1591. if (phys_dev->ethtool_ops && phys_dev->ethtool_ops->get_drvinfo) {
  1592. memset(&drvinfo, 0, sizeof(drvinfo));
  1593. phys_dev->ethtool_ops->get_drvinfo(phys_dev, &drvinfo);
  1594. if (strcmp(drvinfo.driver, "bnx2x")) {
  1595. printk(KERN_ERR PFX "Not a netxtreme2 device\n");
  1596. rc = -ENODEV;
  1597. goto nodev;
  1598. }
  1599. } else {
  1600. printk(KERN_ERR PFX "unable to obtain drv_info\n");
  1601. rc = -ENODEV;
  1602. goto nodev;
  1603. }
  1604. /* obtain hba and initialize rest of the structure */
  1605. hba = bnx2fc_hba_lookup(phys_dev);
  1606. if (!hba || !hba->ctlr.lp) {
  1607. rc = -ENODEV;
  1608. printk(KERN_ERR PFX "bnx2fc_enable: hba or lport not found\n");
  1609. } else if (!bnx2fc_link_ok(hba->ctlr.lp))
  1610. fcoe_ctlr_link_up(&hba->ctlr);
  1611. nodev:
  1612. mutex_unlock(&bnx2fc_dev_lock);
  1613. rtnl_unlock();
  1614. return rc;
  1615. }
  1616. /**
  1617. * bnx2fc_create - Create bnx2fc FCoE interface
  1618. *
  1619. * @buffer: The name of Ethernet interface to create on
  1620. * @kp: The associated kernel param
  1621. *
  1622. * Called from sysfs.
  1623. *
  1624. * Returns: 0 for success
  1625. */
  1626. static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
  1627. {
  1628. struct bnx2fc_hba *hba;
  1629. struct net_device *phys_dev;
  1630. struct fc_lport *lport;
  1631. struct ethtool_drvinfo drvinfo;
  1632. int rc = 0;
  1633. int vlan_id;
  1634. BNX2FC_MISC_DBG("Entered bnx2fc_create\n");
  1635. if (fip_mode != FIP_MODE_FABRIC) {
  1636. printk(KERN_ERR "fip mode not FABRIC\n");
  1637. return -EIO;
  1638. }
  1639. rtnl_lock();
  1640. mutex_lock(&bnx2fc_dev_lock);
  1641. if (!try_module_get(THIS_MODULE)) {
  1642. rc = -EINVAL;
  1643. goto mod_err;
  1644. }
  1645. /* obtain physical netdev */
  1646. if (netdev->priv_flags & IFF_802_1Q_VLAN) {
  1647. phys_dev = vlan_dev_real_dev(netdev);
  1648. vlan_id = vlan_dev_vlan_id(netdev);
  1649. } else {
  1650. printk(KERN_ERR PFX "Not a vlan device\n");
  1651. rc = -EINVAL;
  1652. goto netdev_err;
  1653. }
  1654. /* verify if the physical device is a netxtreme2 device */
  1655. if (phys_dev->ethtool_ops && phys_dev->ethtool_ops->get_drvinfo) {
  1656. memset(&drvinfo, 0, sizeof(drvinfo));
  1657. phys_dev->ethtool_ops->get_drvinfo(phys_dev, &drvinfo);
  1658. if (strcmp(drvinfo.driver, "bnx2x")) {
  1659. printk(KERN_ERR PFX "Not a netxtreme2 device\n");
  1660. rc = -EINVAL;
  1661. goto netdev_err;
  1662. }
  1663. } else {
  1664. printk(KERN_ERR PFX "unable to obtain drv_info\n");
  1665. rc = -EINVAL;
  1666. goto netdev_err;
  1667. }
  1668. /* obtain hba and initialize rest of the structure */
  1669. hba = bnx2fc_hba_lookup(phys_dev);
  1670. if (!hba) {
  1671. rc = -ENODEV;
  1672. printk(KERN_ERR PFX "bnx2fc_create: hba not found\n");
  1673. goto netdev_err;
  1674. }
  1675. if (!test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) {
  1676. rc = bnx2fc_fw_init(hba);
  1677. if (rc)
  1678. goto netdev_err;
  1679. }
  1680. if (test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
  1681. rc = -EEXIST;
  1682. goto netdev_err;
  1683. }
  1684. /* update netdev with vlan netdev */
  1685. hba->netdev = netdev;
  1686. hba->vlan_id = vlan_id;
  1687. hba->vlan_enabled = 1;
  1688. rc = bnx2fc_interface_setup(hba, fip_mode);
  1689. if (rc) {
  1690. printk(KERN_ERR PFX "bnx2fc_interface_setup failed\n");
  1691. goto ifput_err;
  1692. }
  1693. hba->timer_work_queue =
  1694. create_singlethread_workqueue("bnx2fc_timer_wq");
  1695. if (!hba->timer_work_queue) {
  1696. printk(KERN_ERR PFX "ulp_init could not create timer_wq\n");
  1697. rc = -EINVAL;
  1698. goto ifput_err;
  1699. }
  1700. lport = bnx2fc_if_create(hba, &hba->pcidev->dev, 0);
  1701. if (!lport) {
  1702. printk(KERN_ERR PFX "Failed to create interface (%s)\n",
  1703. netdev->name);
  1704. bnx2fc_netdev_cleanup(hba);
  1705. rc = -EINVAL;
  1706. goto if_create_err;
  1707. }
  1708. lport->boot_time = jiffies;
  1709. /* Make this master N_port */
  1710. hba->ctlr.lp = lport;
  1711. set_bit(BNX2FC_CREATE_DONE, &hba->init_done);
  1712. printk(KERN_ERR PFX "create: START DISC\n");
  1713. bnx2fc_start_disc(hba);
  1714. /*
  1715. * Release from kref_init in bnx2fc_interface_setup, on success
  1716. * lport should be holding a reference taken in bnx2fc_if_create
  1717. */
  1718. bnx2fc_interface_put(hba);
  1719. /* put netdev that was held while calling dev_get_by_name */
  1720. mutex_unlock(&bnx2fc_dev_lock);
  1721. rtnl_unlock();
  1722. return 0;
  1723. if_create_err:
  1724. destroy_workqueue(hba->timer_work_queue);
  1725. ifput_err:
  1726. bnx2fc_interface_put(hba);
  1727. netdev_err:
  1728. module_put(THIS_MODULE);
  1729. mod_err:
  1730. mutex_unlock(&bnx2fc_dev_lock);
  1731. rtnl_unlock();
  1732. return rc;
  1733. }
  1734. /**
  1735. * bnx2fc_find_hba_for_cnic - maps cnic instance to bnx2fc adapter instance
  1736. *
  1737. * @cnic: Pointer to cnic device instance
  1738. *
  1739. **/
  1740. static struct bnx2fc_hba *bnx2fc_find_hba_for_cnic(struct cnic_dev *cnic)
  1741. {
  1742. struct list_head *list;
  1743. struct list_head *temp;
  1744. struct bnx2fc_hba *hba;
  1745. /* Called with bnx2fc_dev_lock held */
  1746. list_for_each_safe(list, temp, &adapter_list) {
  1747. hba = (struct bnx2fc_hba *)list;
  1748. if (hba->cnic == cnic)
  1749. return hba;
  1750. }
  1751. return NULL;
  1752. }
  1753. static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev)
  1754. {
  1755. struct list_head *list;
  1756. struct list_head *temp;
  1757. struct bnx2fc_hba *hba;
  1758. /* Called with bnx2fc_dev_lock held */
  1759. list_for_each_safe(list, temp, &adapter_list) {
  1760. hba = (struct bnx2fc_hba *)list;
  1761. if (hba->phys_dev == phys_dev)
  1762. return hba;
  1763. }
  1764. printk(KERN_ERR PFX "hba_lookup: hba NULL\n");
  1765. return NULL;
  1766. }
  1767. /**
  1768. * bnx2fc_ulp_exit - shuts down adapter instance and frees all resources
  1769. *
  1770. * @dev cnic device handle
  1771. */
  1772. static void bnx2fc_ulp_exit(struct cnic_dev *dev)
  1773. {
  1774. struct bnx2fc_hba *hba;
  1775. BNX2FC_MISC_DBG("Entered bnx2fc_ulp_exit\n");
  1776. if (!test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
  1777. printk(KERN_ERR PFX "bnx2fc port check: %s, flags: %lx\n",
  1778. dev->netdev->name, dev->flags);
  1779. return;
  1780. }
  1781. mutex_lock(&bnx2fc_dev_lock);
  1782. hba = bnx2fc_find_hba_for_cnic(dev);
  1783. if (!hba) {
  1784. printk(KERN_ERR PFX "bnx2fc_ulp_exit: hba not found, dev 0%p\n",
  1785. dev);
  1786. mutex_unlock(&bnx2fc_dev_lock);
  1787. return;
  1788. }
  1789. list_del_init(&hba->link);
  1790. adapter_count--;
  1791. if (test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
  1792. /* destroy not called yet, move to quiesced list */
  1793. bnx2fc_netdev_cleanup(hba);
  1794. bnx2fc_if_destroy(hba->ctlr.lp);
  1795. }
  1796. mutex_unlock(&bnx2fc_dev_lock);
  1797. bnx2fc_ulp_stop(hba);
  1798. /* unregister cnic device */
  1799. if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic))
  1800. hba->cnic->unregister_device(hba->cnic, CNIC_ULP_FCOE);
  1801. bnx2fc_interface_destroy(hba);
  1802. }
  1803. /**
  1804. * bnx2fc_fcoe_reset - Resets the fcoe
  1805. *
  1806. * @shost: shost the reset is from
  1807. *
  1808. * Returns: always 0
  1809. */
  1810. static int bnx2fc_fcoe_reset(struct Scsi_Host *shost)
  1811. {
  1812. struct fc_lport *lport = shost_priv(shost);
  1813. fc_lport_reset(lport);
  1814. return 0;
  1815. }
  1816. static bool bnx2fc_match(struct net_device *netdev)
  1817. {
  1818. mutex_lock(&bnx2fc_dev_lock);
  1819. if (netdev->priv_flags & IFF_802_1Q_VLAN) {
  1820. struct net_device *phys_dev = vlan_dev_real_dev(netdev);
  1821. if (bnx2fc_hba_lookup(phys_dev)) {
  1822. mutex_unlock(&bnx2fc_dev_lock);
  1823. return true;
  1824. }
  1825. }
  1826. mutex_unlock(&bnx2fc_dev_lock);
  1827. return false;
  1828. }
  1829. static struct fcoe_transport bnx2fc_transport = {
  1830. .name = {"bnx2fc"},
  1831. .attached = false,
  1832. .list = LIST_HEAD_INIT(bnx2fc_transport.list),
  1833. .match = bnx2fc_match,
  1834. .create = bnx2fc_create,
  1835. .destroy = bnx2fc_destroy,
  1836. .enable = bnx2fc_enable,
  1837. .disable = bnx2fc_disable,
  1838. };
  1839. /**
  1840. * bnx2fc_percpu_thread_create - Create a receive thread for an
  1841. * online CPU
  1842. *
  1843. * @cpu: cpu index for the online cpu
  1844. */
  1845. static void bnx2fc_percpu_thread_create(unsigned int cpu)
  1846. {
  1847. struct bnx2fc_percpu_s *p;
  1848. struct task_struct *thread;
  1849. p = &per_cpu(bnx2fc_percpu, cpu);
  1850. thread = kthread_create(bnx2fc_percpu_io_thread,
  1851. (void *)p,
  1852. "bnx2fc_thread/%d", cpu);
  1853. /* bind thread to the cpu */
  1854. if (likely(!IS_ERR(p->iothread))) {
  1855. kthread_bind(thread, cpu);
  1856. p->iothread = thread;
  1857. wake_up_process(thread);
  1858. }
  1859. }
  1860. static void bnx2fc_percpu_thread_destroy(unsigned int cpu)
  1861. {
  1862. struct bnx2fc_percpu_s *p;
  1863. struct task_struct *thread;
  1864. struct bnx2fc_work *work, *tmp;
  1865. LIST_HEAD(work_list);
  1866. BNX2FC_MISC_DBG("destroying io thread for CPU %d\n", cpu);
  1867. /* Prevent any new work from being queued for this CPU */
  1868. p = &per_cpu(bnx2fc_percpu, cpu);
  1869. spin_lock_bh(&p->fp_work_lock);
  1870. thread = p->iothread;
  1871. p->iothread = NULL;
  1872. /* Free all work in the list */
  1873. list_for_each_entry_safe(work, tmp, &work_list, list) {
  1874. list_del_init(&work->list);
  1875. bnx2fc_process_cq_compl(work->tgt, work->wqe);
  1876. kfree(work);
  1877. }
  1878. spin_unlock_bh(&p->fp_work_lock);
  1879. if (thread)
  1880. kthread_stop(thread);
  1881. }
  1882. /**
  1883. * bnx2fc_cpu_callback - Handler for CPU hotplug events
  1884. *
  1885. * @nfb: The callback data block
  1886. * @action: The event triggering the callback
  1887. * @hcpu: The index of the CPU that the event is for
  1888. *
  1889. * This creates or destroys per-CPU data for fcoe
  1890. *
  1891. * Returns NOTIFY_OK always.
  1892. */
  1893. static int bnx2fc_cpu_callback(struct notifier_block *nfb,
  1894. unsigned long action, void *hcpu)
  1895. {
  1896. unsigned cpu = (unsigned long)hcpu;
  1897. switch (action) {
  1898. case CPU_ONLINE:
  1899. case CPU_ONLINE_FROZEN:
  1900. printk(PFX "CPU %x online: Create Rx thread\n", cpu);
  1901. bnx2fc_percpu_thread_create(cpu);
  1902. break;
  1903. case CPU_DEAD:
  1904. case CPU_DEAD_FROZEN:
  1905. printk(PFX "CPU %x offline: Remove Rx thread\n", cpu);
  1906. bnx2fc_percpu_thread_destroy(cpu);
  1907. break;
  1908. default:
  1909. break;
  1910. }
  1911. return NOTIFY_OK;
  1912. }
  1913. /**
  1914. * bnx2fc_mod_init - module init entry point
  1915. *
  1916. * Initialize driver wide global data structures, and register
  1917. * with cnic module
  1918. **/
  1919. static int __init bnx2fc_mod_init(void)
  1920. {
  1921. struct fcoe_percpu_s *bg;
  1922. struct task_struct *l2_thread;
  1923. int rc = 0;
  1924. unsigned int cpu = 0;
  1925. struct bnx2fc_percpu_s *p;
  1926. printk(KERN_INFO PFX "%s", version);
  1927. /* register as a fcoe transport */
  1928. rc = fcoe_transport_attach(&bnx2fc_transport);
  1929. if (rc) {
  1930. printk(KERN_ERR "failed to register an fcoe transport, check "
  1931. "if libfcoe is loaded\n");
  1932. goto out;
  1933. }
  1934. INIT_LIST_HEAD(&adapter_list);
  1935. mutex_init(&bnx2fc_dev_lock);
  1936. adapter_count = 0;
  1937. /* Attach FC transport template */
  1938. rc = bnx2fc_attach_transport();
  1939. if (rc)
  1940. goto detach_ft;
  1941. bnx2fc_wq = alloc_workqueue("bnx2fc", 0, 0);
  1942. if (!bnx2fc_wq) {
  1943. rc = -ENOMEM;
  1944. goto release_bt;
  1945. }
  1946. bg = &bnx2fc_global;
  1947. skb_queue_head_init(&bg->fcoe_rx_list);
  1948. l2_thread = kthread_create(bnx2fc_l2_rcv_thread,
  1949. (void *)bg,
  1950. "bnx2fc_l2_thread");
  1951. if (IS_ERR(l2_thread)) {
  1952. rc = PTR_ERR(l2_thread);
  1953. goto free_wq;
  1954. }
  1955. wake_up_process(l2_thread);
  1956. spin_lock_bh(&bg->fcoe_rx_list.lock);
  1957. bg->thread = l2_thread;
  1958. spin_unlock_bh(&bg->fcoe_rx_list.lock);
  1959. for_each_possible_cpu(cpu) {
  1960. p = &per_cpu(bnx2fc_percpu, cpu);
  1961. INIT_LIST_HEAD(&p->work_list);
  1962. spin_lock_init(&p->fp_work_lock);
  1963. }
  1964. for_each_online_cpu(cpu) {
  1965. bnx2fc_percpu_thread_create(cpu);
  1966. }
  1967. /* Initialize per CPU interrupt thread */
  1968. register_hotcpu_notifier(&bnx2fc_cpu_notifier);
  1969. cnic_register_driver(CNIC_ULP_FCOE, &bnx2fc_cnic_cb);
  1970. return 0;
  1971. free_wq:
  1972. destroy_workqueue(bnx2fc_wq);
  1973. release_bt:
  1974. bnx2fc_release_transport();
  1975. detach_ft:
  1976. fcoe_transport_detach(&bnx2fc_transport);
  1977. out:
  1978. return rc;
  1979. }
  1980. static void __exit bnx2fc_mod_exit(void)
  1981. {
  1982. LIST_HEAD(to_be_deleted);
  1983. struct bnx2fc_hba *hba, *next;
  1984. struct fcoe_percpu_s *bg;
  1985. struct task_struct *l2_thread;
  1986. struct sk_buff *skb;
  1987. unsigned int cpu = 0;
  1988. /*
  1989. * NOTE: Since cnic calls register_driver routine rtnl_lock,
  1990. * it will have higher precedence than bnx2fc_dev_lock.
  1991. * unregister_device() cannot be called with bnx2fc_dev_lock
  1992. * held.
  1993. */
  1994. mutex_lock(&bnx2fc_dev_lock);
  1995. list_splice(&adapter_list, &to_be_deleted);
  1996. INIT_LIST_HEAD(&adapter_list);
  1997. adapter_count = 0;
  1998. mutex_unlock(&bnx2fc_dev_lock);
  1999. /* Unregister with cnic */
  2000. list_for_each_entry_safe(hba, next, &to_be_deleted, link) {
  2001. list_del_init(&hba->link);
  2002. printk(KERN_ERR PFX "MOD_EXIT:destroy hba = 0x%p, kref = %d\n",
  2003. hba, atomic_read(&hba->kref.refcount));
  2004. bnx2fc_ulp_stop(hba);
  2005. /* unregister cnic device */
  2006. if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED,
  2007. &hba->reg_with_cnic))
  2008. hba->cnic->unregister_device(hba->cnic, CNIC_ULP_FCOE);
  2009. bnx2fc_interface_destroy(hba);
  2010. }
  2011. cnic_unregister_driver(CNIC_ULP_FCOE);
  2012. /* Destroy global thread */
  2013. bg = &bnx2fc_global;
  2014. spin_lock_bh(&bg->fcoe_rx_list.lock);
  2015. l2_thread = bg->thread;
  2016. bg->thread = NULL;
  2017. while ((skb = __skb_dequeue(&bg->fcoe_rx_list)) != NULL)
  2018. kfree_skb(skb);
  2019. spin_unlock_bh(&bg->fcoe_rx_list.lock);
  2020. if (l2_thread)
  2021. kthread_stop(l2_thread);
  2022. unregister_hotcpu_notifier(&bnx2fc_cpu_notifier);
  2023. /* Destroy per cpu threads */
  2024. for_each_online_cpu(cpu) {
  2025. bnx2fc_percpu_thread_destroy(cpu);
  2026. }
  2027. destroy_workqueue(bnx2fc_wq);
  2028. /*
  2029. * detach from scsi transport
  2030. * must happen after all destroys are done
  2031. */
  2032. bnx2fc_release_transport();
  2033. /* detach from fcoe transport */
  2034. fcoe_transport_detach(&bnx2fc_transport);
  2035. }
  2036. module_init(bnx2fc_mod_init);
  2037. module_exit(bnx2fc_mod_exit);
  2038. static struct fc_function_template bnx2fc_transport_function = {
  2039. .show_host_node_name = 1,
  2040. .show_host_port_name = 1,
  2041. .show_host_supported_classes = 1,
  2042. .show_host_supported_fc4s = 1,
  2043. .show_host_active_fc4s = 1,
  2044. .show_host_maxframe_size = 1,
  2045. .show_host_port_id = 1,
  2046. .show_host_supported_speeds = 1,
  2047. .get_host_speed = fc_get_host_speed,
  2048. .show_host_speed = 1,
  2049. .show_host_port_type = 1,
  2050. .get_host_port_state = fc_get_host_port_state,
  2051. .show_host_port_state = 1,
  2052. .show_host_symbolic_name = 1,
  2053. .dd_fcrport_size = (sizeof(struct fc_rport_libfc_priv) +
  2054. sizeof(struct bnx2fc_rport)),
  2055. .show_rport_maxframe_size = 1,
  2056. .show_rport_supported_classes = 1,
  2057. .show_host_fabric_name = 1,
  2058. .show_starget_node_name = 1,
  2059. .show_starget_port_name = 1,
  2060. .show_starget_port_id = 1,
  2061. .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
  2062. .show_rport_dev_loss_tmo = 1,
  2063. .get_fc_host_stats = bnx2fc_get_host_stats,
  2064. .issue_fc_host_lip = bnx2fc_fcoe_reset,
  2065. .terminate_rport_io = fc_rport_terminate_io,
  2066. .vport_create = bnx2fc_vport_create,
  2067. .vport_delete = bnx2fc_vport_destroy,
  2068. .vport_disable = bnx2fc_vport_disable,
  2069. };
  2070. static struct fc_function_template bnx2fc_vport_xport_function = {
  2071. .show_host_node_name = 1,
  2072. .show_host_port_name = 1,
  2073. .show_host_supported_classes = 1,
  2074. .show_host_supported_fc4s = 1,
  2075. .show_host_active_fc4s = 1,
  2076. .show_host_maxframe_size = 1,
  2077. .show_host_port_id = 1,
  2078. .show_host_supported_speeds = 1,
  2079. .get_host_speed = fc_get_host_speed,
  2080. .show_host_speed = 1,
  2081. .show_host_port_type = 1,
  2082. .get_host_port_state = fc_get_host_port_state,
  2083. .show_host_port_state = 1,
  2084. .show_host_symbolic_name = 1,
  2085. .dd_fcrport_size = (sizeof(struct fc_rport_libfc_priv) +
  2086. sizeof(struct bnx2fc_rport)),
  2087. .show_rport_maxframe_size = 1,
  2088. .show_rport_supported_classes = 1,
  2089. .show_host_fabric_name = 1,
  2090. .show_starget_node_name = 1,
  2091. .show_starget_port_name = 1,
  2092. .show_starget_port_id = 1,
  2093. .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
  2094. .show_rport_dev_loss_tmo = 1,
  2095. .get_fc_host_stats = fc_get_host_stats,
  2096. .issue_fc_host_lip = bnx2fc_fcoe_reset,
  2097. .terminate_rport_io = fc_rport_terminate_io,
  2098. };
  2099. /**
  2100. * scsi_host_template structure used while registering with SCSI-ml
  2101. */
  2102. static struct scsi_host_template bnx2fc_shost_template = {
  2103. .module = THIS_MODULE,
  2104. .name = "Broadcom Offload FCoE Initiator",
  2105. .queuecommand = bnx2fc_queuecommand,
  2106. .eh_abort_handler = bnx2fc_eh_abort, /* abts */
  2107. .eh_device_reset_handler = bnx2fc_eh_device_reset, /* lun reset */
  2108. .eh_target_reset_handler = bnx2fc_eh_target_reset, /* tgt reset */
  2109. .eh_host_reset_handler = fc_eh_host_reset,
  2110. .slave_alloc = fc_slave_alloc,
  2111. .change_queue_depth = fc_change_queue_depth,
  2112. .change_queue_type = fc_change_queue_type,
  2113. .this_id = -1,
  2114. .cmd_per_lun = 3,
  2115. .can_queue = BNX2FC_CAN_QUEUE,
  2116. .use_clustering = ENABLE_CLUSTERING,
  2117. .sg_tablesize = BNX2FC_MAX_BDS_PER_CMD,
  2118. .max_sectors = 512,
  2119. };
  2120. static struct libfc_function_template bnx2fc_libfc_fcn_templ = {
  2121. .frame_send = bnx2fc_xmit,
  2122. .elsct_send = bnx2fc_elsct_send,
  2123. .fcp_abort_io = bnx2fc_abort_io,
  2124. .fcp_cleanup = bnx2fc_cleanup,
  2125. .rport_event_callback = bnx2fc_rport_event_handler,
  2126. };
  2127. /**
  2128. * bnx2fc_cnic_cb - global template of bnx2fc - cnic driver interface
  2129. * structure carrying callback function pointers
  2130. */
  2131. static struct cnic_ulp_ops bnx2fc_cnic_cb = {
  2132. .owner = THIS_MODULE,
  2133. .cnic_init = bnx2fc_ulp_init,
  2134. .cnic_exit = bnx2fc_ulp_exit,
  2135. .cnic_start = bnx2fc_ulp_start,
  2136. .cnic_stop = bnx2fc_ulp_stop,
  2137. .indicate_kcqes = bnx2fc_indicate_kcqe,
  2138. .indicate_netevent = bnx2fc_indicate_netevent,
  2139. };