fcoe.c 47 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913
  1. /*
  2. * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along with
  14. * this program; if not, write to the Free Software Foundation, Inc.,
  15. * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  16. *
  17. * Maintained at www.Open-FCoE.org
  18. */
  19. #include <linux/module.h>
  20. #include <linux/version.h>
  21. #include <linux/spinlock.h>
  22. #include <linux/netdevice.h>
  23. #include <linux/etherdevice.h>
  24. #include <linux/ethtool.h>
  25. #include <linux/if_ether.h>
  26. #include <linux/if_vlan.h>
  27. #include <linux/crc32.h>
  28. #include <linux/cpu.h>
  29. #include <linux/fs.h>
  30. #include <linux/sysfs.h>
  31. #include <linux/ctype.h>
  32. #include <scsi/scsi_tcq.h>
  33. #include <scsi/scsicam.h>
  34. #include <scsi/scsi_transport.h>
  35. #include <scsi/scsi_transport_fc.h>
  36. #include <net/rtnetlink.h>
  37. #include <scsi/fc/fc_encaps.h>
  38. #include <scsi/libfc.h>
  39. #include <scsi/fc_frame.h>
  40. #include <scsi/libfcoe.h>
  41. #include "fcoe.h"
  42. static int debug_fcoe;
  43. MODULE_AUTHOR("Open-FCoE.org");
  44. MODULE_DESCRIPTION("FCoE");
  45. MODULE_LICENSE("GPL v2");
  46. /* fcoe host list */
  47. LIST_HEAD(fcoe_hostlist);
  48. DEFINE_RWLOCK(fcoe_hostlist_lock);
  49. DEFINE_TIMER(fcoe_timer, NULL, 0, 0);
  50. DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu);
  51. /* Function Prototyes */
  52. static int fcoe_reset(struct Scsi_Host *shost);
  53. static int fcoe_xmit(struct fc_lport *, struct fc_frame *);
  54. static int fcoe_rcv(struct sk_buff *, struct net_device *,
  55. struct packet_type *, struct net_device *);
  56. static int fcoe_percpu_receive_thread(void *arg);
  57. static void fcoe_clean_pending_queue(struct fc_lport *lp);
  58. static void fcoe_percpu_clean(struct fc_lport *lp);
  59. static int fcoe_link_ok(struct fc_lport *lp);
  60. static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *);
  61. static int fcoe_hostlist_add(const struct fc_lport *);
  62. static int fcoe_hostlist_remove(const struct fc_lport *);
  63. static struct Scsi_Host *fcoe_host_alloc(struct scsi_host_template *, int);
  64. static int fcoe_check_wait_queue(struct fc_lport *);
  65. static void fcoe_recv_flogi(struct fcoe_softc *, struct fc_frame *, u8 *);
  66. static int fcoe_device_notification(struct notifier_block *, ulong, void *);
  67. static void fcoe_dev_setup(void);
  68. static void fcoe_dev_cleanup(void);
  69. /* notification function from net device */
  70. static struct notifier_block fcoe_notifier = {
  71. .notifier_call = fcoe_device_notification,
  72. };
  73. static struct scsi_transport_template *scsi_transport_fcoe_sw;
  74. struct fc_function_template fcoe_transport_function = {
  75. .show_host_node_name = 1,
  76. .show_host_port_name = 1,
  77. .show_host_supported_classes = 1,
  78. .show_host_supported_fc4s = 1,
  79. .show_host_active_fc4s = 1,
  80. .show_host_maxframe_size = 1,
  81. .show_host_port_id = 1,
  82. .show_host_supported_speeds = 1,
  83. .get_host_speed = fc_get_host_speed,
  84. .show_host_speed = 1,
  85. .show_host_port_type = 1,
  86. .get_host_port_state = fc_get_host_port_state,
  87. .show_host_port_state = 1,
  88. .show_host_symbolic_name = 1,
  89. .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
  90. .show_rport_maxframe_size = 1,
  91. .show_rport_supported_classes = 1,
  92. .show_host_fabric_name = 1,
  93. .show_starget_node_name = 1,
  94. .show_starget_port_name = 1,
  95. .show_starget_port_id = 1,
  96. .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
  97. .show_rport_dev_loss_tmo = 1,
  98. .get_fc_host_stats = fc_get_host_stats,
  99. .issue_fc_host_lip = fcoe_reset,
  100. .terminate_rport_io = fc_rport_terminate_io,
  101. };
  102. static struct scsi_host_template fcoe_shost_template = {
  103. .module = THIS_MODULE,
  104. .name = "FCoE Driver",
  105. .proc_name = FCOE_NAME,
  106. .queuecommand = fc_queuecommand,
  107. .eh_abort_handler = fc_eh_abort,
  108. .eh_device_reset_handler = fc_eh_device_reset,
  109. .eh_host_reset_handler = fc_eh_host_reset,
  110. .slave_alloc = fc_slave_alloc,
  111. .change_queue_depth = fc_change_queue_depth,
  112. .change_queue_type = fc_change_queue_type,
  113. .this_id = -1,
  114. .cmd_per_lun = 32,
  115. .can_queue = FCOE_MAX_OUTSTANDING_COMMANDS,
  116. .use_clustering = ENABLE_CLUSTERING,
  117. .sg_tablesize = SG_ALL,
  118. .max_sectors = 0xffff,
  119. };
  120. /**
  121. * fcoe_lport_config() - sets up the fc_lport
  122. * @lp: ptr to the fc_lport
  123. * @shost: ptr to the parent scsi host
  124. *
  125. * Returns: 0 for success
  126. */
  127. static int fcoe_lport_config(struct fc_lport *lp)
  128. {
  129. lp->link_up = 0;
  130. lp->qfull = 0;
  131. lp->max_retry_count = 3;
  132. lp->e_d_tov = 2 * 1000; /* FC-FS default */
  133. lp->r_a_tov = 2 * 2 * 1000;
  134. lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
  135. FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
  136. fc_lport_init_stats(lp);
  137. /* lport fc_lport related configuration */
  138. fc_lport_config(lp);
  139. /* offload related configuration */
  140. lp->crc_offload = 0;
  141. lp->seq_offload = 0;
  142. lp->lro_enabled = 0;
  143. lp->lro_xid = 0;
  144. lp->lso_max = 0;
  145. return 0;
  146. }
  147. /**
  148. * fcoe_netdev_config() - Set up netdev for SW FCoE
  149. * @lp : ptr to the fc_lport
  150. * @netdev : ptr to the associated netdevice struct
  151. *
  152. * Must be called after fcoe_lport_config() as it will use lport mutex
  153. *
  154. * Returns : 0 for success
  155. */
  156. static int fcoe_netdev_config(struct fc_lport *lp, struct net_device *netdev)
  157. {
  158. u32 mfs;
  159. u64 wwnn, wwpn;
  160. struct fcoe_softc *fc;
  161. u8 flogi_maddr[ETH_ALEN];
  162. /* Setup lport private data to point to fcoe softc */
  163. fc = lport_priv(lp);
  164. fc->lp = lp;
  165. fc->real_dev = netdev;
  166. fc->phys_dev = netdev;
  167. /* Require support for get_pauseparam ethtool op. */
  168. if (netdev->priv_flags & IFF_802_1Q_VLAN)
  169. fc->phys_dev = vlan_dev_real_dev(netdev);
  170. /* Do not support for bonding device */
  171. if ((fc->real_dev->priv_flags & IFF_MASTER_ALB) ||
  172. (fc->real_dev->priv_flags & IFF_SLAVE_INACTIVE) ||
  173. (fc->real_dev->priv_flags & IFF_MASTER_8023AD)) {
  174. return -EOPNOTSUPP;
  175. }
  176. /*
  177. * Determine max frame size based on underlying device and optional
  178. * user-configured limit. If the MFS is too low, fcoe_link_ok()
  179. * will return 0, so do this first.
  180. */
  181. mfs = fc->real_dev->mtu - (sizeof(struct fcoe_hdr) +
  182. sizeof(struct fcoe_crc_eof));
  183. if (fc_set_mfs(lp, mfs))
  184. return -EINVAL;
  185. if (!fcoe_link_ok(lp))
  186. lp->link_up = 1;
  187. /* offload features support */
  188. if (fc->real_dev->features & NETIF_F_SG)
  189. lp->sg_supp = 1;
  190. #ifdef NETIF_F_FCOE_CRC
  191. if (netdev->features & NETIF_F_FCOE_CRC) {
  192. lp->crc_offload = 1;
  193. printk(KERN_DEBUG "fcoe:%s supports FCCRC offload\n",
  194. netdev->name);
  195. }
  196. #endif
  197. #ifdef NETIF_F_FSO
  198. if (netdev->features & NETIF_F_FSO) {
  199. lp->seq_offload = 1;
  200. lp->lso_max = netdev->gso_max_size;
  201. printk(KERN_DEBUG "fcoe:%s supports LSO for max len 0x%x\n",
  202. netdev->name, lp->lso_max);
  203. }
  204. #endif
  205. if (netdev->fcoe_ddp_xid) {
  206. lp->lro_enabled = 1;
  207. lp->lro_xid = netdev->fcoe_ddp_xid;
  208. printk(KERN_DEBUG "fcoe:%s supports LRO for max xid 0x%x\n",
  209. netdev->name, lp->lro_xid);
  210. }
  211. skb_queue_head_init(&fc->fcoe_pending_queue);
  212. fc->fcoe_pending_queue_active = 0;
  213. /* setup Source Mac Address */
  214. memcpy(fc->ctl_src_addr, fc->real_dev->dev_addr,
  215. fc->real_dev->addr_len);
  216. wwnn = fcoe_wwn_from_mac(fc->real_dev->dev_addr, 1, 0);
  217. fc_set_wwnn(lp, wwnn);
  218. /* XXX - 3rd arg needs to be vlan id */
  219. wwpn = fcoe_wwn_from_mac(fc->real_dev->dev_addr, 2, 0);
  220. fc_set_wwpn(lp, wwpn);
  221. /*
  222. * Add FCoE MAC address as second unicast MAC address
  223. * or enter promiscuous mode if not capable of listening
  224. * for multiple unicast MACs.
  225. */
  226. rtnl_lock();
  227. memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
  228. dev_unicast_add(fc->real_dev, flogi_maddr, ETH_ALEN);
  229. rtnl_unlock();
  230. /*
  231. * setup the receive function from ethernet driver
  232. * on the ethertype for the given device
  233. */
  234. fc->fcoe_packet_type.func = fcoe_rcv;
  235. fc->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE);
  236. fc->fcoe_packet_type.dev = fc->real_dev;
  237. dev_add_pack(&fc->fcoe_packet_type);
  238. return 0;
  239. }
  240. /**
  241. * fcoe_shost_config() - Sets up fc_lport->host
  242. * @lp : ptr to the fc_lport
  243. * @shost : ptr to the associated scsi host
  244. * @dev : device associated to scsi host
  245. *
  246. * Must be called after fcoe_lport_config() and fcoe_netdev_config()
  247. *
  248. * Returns : 0 for success
  249. */
  250. static int fcoe_shost_config(struct fc_lport *lp, struct Scsi_Host *shost,
  251. struct device *dev)
  252. {
  253. int rc = 0;
  254. /* lport scsi host config */
  255. lp->host = shost;
  256. lp->host->max_lun = FCOE_MAX_LUN;
  257. lp->host->max_id = FCOE_MAX_FCP_TARGET;
  258. lp->host->max_channel = 0;
  259. lp->host->transportt = scsi_transport_fcoe_sw;
  260. /* add the new host to the SCSI-ml */
  261. rc = scsi_add_host(lp->host, dev);
  262. if (rc) {
  263. FC_DBG("fcoe_shost_config:error on scsi_add_host\n");
  264. return rc;
  265. }
  266. sprintf(fc_host_symbolic_name(lp->host), "%s v%s over %s",
  267. FCOE_NAME, FCOE_VERSION,
  268. fcoe_netdev(lp)->name);
  269. return 0;
  270. }
  271. /**
  272. * fcoe_em_config() - allocates em for this lport
  273. * @lp: the port that em is to allocated for
  274. *
  275. * Returns : 0 on success
  276. */
  277. static inline int fcoe_em_config(struct fc_lport *lp)
  278. {
  279. BUG_ON(lp->emp);
  280. lp->emp = fc_exch_mgr_alloc(lp, FC_CLASS_3,
  281. FCOE_MIN_XID, FCOE_MAX_XID);
  282. if (!lp->emp)
  283. return -ENOMEM;
  284. return 0;
  285. }
  286. /**
  287. * fcoe_if_destroy() - FCoE software HBA tear-down function
  288. * @netdev: ptr to the associated net_device
  289. *
  290. * Returns: 0 if link is OK for use by FCoE.
  291. */
  292. static int fcoe_if_destroy(struct net_device *netdev)
  293. {
  294. struct fc_lport *lp = NULL;
  295. struct fcoe_softc *fc;
  296. u8 flogi_maddr[ETH_ALEN];
  297. BUG_ON(!netdev);
  298. printk(KERN_DEBUG "fcoe_if_destroy:interface on %s\n",
  299. netdev->name);
  300. lp = fcoe_hostlist_lookup(netdev);
  301. if (!lp)
  302. return -ENODEV;
  303. fc = lport_priv(lp);
  304. /* Logout of the fabric */
  305. fc_fabric_logoff(lp);
  306. /* Remove the instance from fcoe's list */
  307. fcoe_hostlist_remove(lp);
  308. /* Don't listen for Ethernet packets anymore */
  309. dev_remove_pack(&fc->fcoe_packet_type);
  310. /* Cleanup the fc_lport */
  311. fc_lport_destroy(lp);
  312. fc_fcp_destroy(lp);
  313. /* Detach from the scsi-ml */
  314. fc_remove_host(lp->host);
  315. scsi_remove_host(lp->host);
  316. /* There are no more rports or I/O, free the EM */
  317. if (lp->emp)
  318. fc_exch_mgr_free(lp->emp);
  319. /* Delete secondary MAC addresses */
  320. rtnl_lock();
  321. memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
  322. dev_unicast_delete(fc->real_dev, flogi_maddr, ETH_ALEN);
  323. if (compare_ether_addr(fc->data_src_addr, (u8[6]) { 0 }))
  324. dev_unicast_delete(fc->real_dev, fc->data_src_addr, ETH_ALEN);
  325. rtnl_unlock();
  326. /* Free the per-CPU revieve threads */
  327. fcoe_percpu_clean(lp);
  328. /* Free existing skbs */
  329. fcoe_clean_pending_queue(lp);
  330. /* Free memory used by statistical counters */
  331. fc_lport_free_stats(lp);
  332. /* Release the net_device and Scsi_Host */
  333. dev_put(fc->real_dev);
  334. scsi_host_put(lp->host);
  335. return 0;
  336. }
  337. /*
  338. * fcoe_ddp_setup - calls LLD's ddp_setup through net_device
  339. * @lp: the corresponding fc_lport
  340. * @xid: the exchange id for this ddp transfer
  341. * @sgl: the scatterlist describing this transfer
  342. * @sgc: number of sg items
  343. *
  344. * Returns : 0 no ddp
  345. */
  346. static int fcoe_ddp_setup(struct fc_lport *lp, u16 xid,
  347. struct scatterlist *sgl, unsigned int sgc)
  348. {
  349. struct net_device *n = fcoe_netdev(lp);
  350. if (n->netdev_ops && n->netdev_ops->ndo_fcoe_ddp_setup)
  351. return n->netdev_ops->ndo_fcoe_ddp_setup(n, xid, sgl, sgc);
  352. return 0;
  353. }
  354. /*
  355. * fcoe_ddp_done - calls LLD's ddp_done through net_device
  356. * @lp: the corresponding fc_lport
  357. * @xid: the exchange id for this ddp transfer
  358. *
  359. * Returns : the length of data that have been completed by ddp
  360. */
  361. static int fcoe_ddp_done(struct fc_lport *lp, u16 xid)
  362. {
  363. struct net_device *n = fcoe_netdev(lp);
  364. if (n->netdev_ops && n->netdev_ops->ndo_fcoe_ddp_done)
  365. return n->netdev_ops->ndo_fcoe_ddp_done(n, xid);
  366. return 0;
  367. }
  368. static struct libfc_function_template fcoe_libfc_fcn_templ = {
  369. .frame_send = fcoe_xmit,
  370. .ddp_setup = fcoe_ddp_setup,
  371. .ddp_done = fcoe_ddp_done,
  372. };
  373. /**
  374. * fcoe_if_create() - this function creates the fcoe interface
  375. * @netdev: pointer the associated netdevice
  376. *
  377. * Creates fc_lport struct and scsi_host for lport, configures lport
  378. * and starts fabric login.
  379. *
  380. * Returns : 0 on success
  381. */
  382. static int fcoe_if_create(struct net_device *netdev)
  383. {
  384. int rc;
  385. struct fc_lport *lp = NULL;
  386. struct fcoe_softc *fc;
  387. struct Scsi_Host *shost;
  388. BUG_ON(!netdev);
  389. printk(KERN_DEBUG "fcoe_if_create:interface on %s\n",
  390. netdev->name);
  391. lp = fcoe_hostlist_lookup(netdev);
  392. if (lp)
  393. return -EEXIST;
  394. shost = fcoe_host_alloc(&fcoe_shost_template,
  395. sizeof(struct fcoe_softc));
  396. if (!shost) {
  397. FC_DBG("Could not allocate host structure\n");
  398. return -ENOMEM;
  399. }
  400. lp = shost_priv(shost);
  401. fc = lport_priv(lp);
  402. /* configure fc_lport, e.g., em */
  403. rc = fcoe_lport_config(lp);
  404. if (rc) {
  405. FC_DBG("Could not configure lport\n");
  406. goto out_host_put;
  407. }
  408. /* configure lport network properties */
  409. rc = fcoe_netdev_config(lp, netdev);
  410. if (rc) {
  411. FC_DBG("Could not configure netdev for lport\n");
  412. goto out_host_put;
  413. }
  414. /* configure lport scsi host properties */
  415. rc = fcoe_shost_config(lp, shost, &netdev->dev);
  416. if (rc) {
  417. FC_DBG("Could not configure shost for lport\n");
  418. goto out_host_put;
  419. }
  420. /* lport exch manager allocation */
  421. rc = fcoe_em_config(lp);
  422. if (rc) {
  423. FC_DBG("Could not configure em for lport\n");
  424. goto out_host_put;
  425. }
  426. /* Initialize the library */
  427. rc = fcoe_libfc_config(lp, &fcoe_libfc_fcn_templ);
  428. if (rc) {
  429. FC_DBG("Could not configure libfc for lport!\n");
  430. goto out_lp_destroy;
  431. }
  432. /* add to lports list */
  433. fcoe_hostlist_add(lp);
  434. lp->boot_time = jiffies;
  435. fc_fabric_login(lp);
  436. dev_hold(netdev);
  437. return rc;
  438. out_lp_destroy:
  439. fc_exch_mgr_free(lp->emp); /* Free the EM */
  440. out_host_put:
  441. scsi_host_put(lp->host);
  442. return rc;
  443. }
  444. /**
  445. * fcoe_if_init() - attach to scsi transport
  446. *
  447. * Returns : 0 on success
  448. */
  449. static int __init fcoe_if_init(void)
  450. {
  451. /* attach to scsi transport */
  452. scsi_transport_fcoe_sw =
  453. fc_attach_transport(&fcoe_transport_function);
  454. if (!scsi_transport_fcoe_sw) {
  455. printk(KERN_ERR "fcoe_init:fc_attach_transport() failed\n");
  456. return -ENODEV;
  457. }
  458. return 0;
  459. }
  460. /**
  461. * fcoe_if_exit() - detach from scsi transport
  462. *
  463. * Returns : 0 on success
  464. */
  465. int __exit fcoe_if_exit(void)
  466. {
  467. fc_release_transport(scsi_transport_fcoe_sw);
  468. return 0;
  469. }
  470. /**
  471. * fcoe_percpu_thread_create() - Create a receive thread for an online cpu
  472. * @cpu: cpu index for the online cpu
  473. */
  474. static void fcoe_percpu_thread_create(unsigned int cpu)
  475. {
  476. struct fcoe_percpu_s *p;
  477. struct task_struct *thread;
  478. p = &per_cpu(fcoe_percpu, cpu);
  479. thread = kthread_create(fcoe_percpu_receive_thread,
  480. (void *)p, "fcoethread/%d", cpu);
  481. if (likely(!IS_ERR(p->thread))) {
  482. kthread_bind(thread, cpu);
  483. wake_up_process(thread);
  484. spin_lock_bh(&p->fcoe_rx_list.lock);
  485. p->thread = thread;
  486. spin_unlock_bh(&p->fcoe_rx_list.lock);
  487. }
  488. }
  489. /**
  490. * fcoe_percpu_thread_destroy() - removes the rx thread for the given cpu
  491. * @cpu: cpu index the rx thread is to be removed
  492. *
  493. * Destroys a per-CPU Rx thread. Any pending skbs are moved to the
  494. * current CPU's Rx thread. If the thread being destroyed is bound to
  495. * the CPU processing this context the skbs will be freed.
  496. */
  497. static void fcoe_percpu_thread_destroy(unsigned int cpu)
  498. {
  499. struct fcoe_percpu_s *p;
  500. struct task_struct *thread;
  501. struct page *crc_eof;
  502. struct sk_buff *skb;
  503. #ifdef CONFIG_SMP
  504. struct fcoe_percpu_s *p0;
  505. unsigned targ_cpu = smp_processor_id();
  506. #endif /* CONFIG_SMP */
  507. printk(KERN_DEBUG "fcoe: Destroying receive thread for CPU %d\n", cpu);
  508. /* Prevent any new skbs from being queued for this CPU. */
  509. p = &per_cpu(fcoe_percpu, cpu);
  510. spin_lock_bh(&p->fcoe_rx_list.lock);
  511. thread = p->thread;
  512. p->thread = NULL;
  513. crc_eof = p->crc_eof_page;
  514. p->crc_eof_page = NULL;
  515. p->crc_eof_offset = 0;
  516. spin_unlock_bh(&p->fcoe_rx_list.lock);
  517. #ifdef CONFIG_SMP
  518. /*
  519. * Don't bother moving the skb's if this context is running
  520. * on the same CPU that is having its thread destroyed. This
  521. * can easily happen when the module is removed.
  522. */
  523. if (cpu != targ_cpu) {
  524. p0 = &per_cpu(fcoe_percpu, targ_cpu);
  525. spin_lock_bh(&p0->fcoe_rx_list.lock);
  526. if (p0->thread) {
  527. FC_DBG("Moving frames from CPU %d to CPU %d\n",
  528. cpu, targ_cpu);
  529. while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
  530. __skb_queue_tail(&p0->fcoe_rx_list, skb);
  531. spin_unlock_bh(&p0->fcoe_rx_list.lock);
  532. } else {
  533. /*
  534. * The targeted CPU is not initialized and cannot accept
  535. * new skbs. Unlock the targeted CPU and drop the skbs
  536. * on the CPU that is going offline.
  537. */
  538. while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
  539. kfree_skb(skb);
  540. spin_unlock_bh(&p0->fcoe_rx_list.lock);
  541. }
  542. } else {
  543. /*
  544. * This scenario occurs when the module is being removed
  545. * and all threads are being destroyed. skbs will continue
  546. * to be shifted from the CPU thread that is being removed
  547. * to the CPU thread associated with the CPU that is processing
  548. * the module removal. Once there is only one CPU Rx thread it
  549. * will reach this case and we will drop all skbs and later
  550. * stop the thread.
  551. */
  552. spin_lock_bh(&p->fcoe_rx_list.lock);
  553. while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
  554. kfree_skb(skb);
  555. spin_unlock_bh(&p->fcoe_rx_list.lock);
  556. }
  557. #else
  558. /*
  559. * This a non-SMP scenario where the singluar Rx thread is
  560. * being removed. Free all skbs and stop the thread.
  561. */
  562. spin_lock_bh(&p->fcoe_rx_list.lock);
  563. while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
  564. kfree_skb(skb);
  565. spin_unlock_bh(&p->fcoe_rx_list.lock);
  566. #endif
  567. if (thread)
  568. kthread_stop(thread);
  569. if (crc_eof)
  570. put_page(crc_eof);
  571. }
  572. /**
  573. * fcoe_cpu_callback() - fcoe cpu hotplug event callback
  574. * @nfb: callback data block
  575. * @action: event triggering the callback
  576. * @hcpu: index for the cpu of this event
  577. *
  578. * This creates or destroys per cpu data for fcoe
  579. *
  580. * Returns NOTIFY_OK always.
  581. */
  582. static int fcoe_cpu_callback(struct notifier_block *nfb,
  583. unsigned long action, void *hcpu)
  584. {
  585. unsigned cpu = (unsigned long)hcpu;
  586. switch (action) {
  587. case CPU_ONLINE:
  588. case CPU_ONLINE_FROZEN:
  589. FC_DBG("CPU %x online: Create Rx thread\n", cpu);
  590. fcoe_percpu_thread_create(cpu);
  591. break;
  592. case CPU_DEAD:
  593. case CPU_DEAD_FROZEN:
  594. FC_DBG("CPU %x offline: Remove Rx thread\n", cpu);
  595. fcoe_percpu_thread_destroy(cpu);
  596. break;
  597. default:
  598. break;
  599. }
  600. return NOTIFY_OK;
  601. }
  602. static struct notifier_block fcoe_cpu_notifier = {
  603. .notifier_call = fcoe_cpu_callback,
  604. };
  605. /**
  606. * fcoe_rcv() - this is the fcoe receive function called by NET_RX_SOFTIRQ
  607. * @skb: the receive skb
  608. * @dev: associated net device
  609. * @ptype: context
  610. * @odldev: last device
  611. *
  612. * this function will receive the packet and build fc frame and pass it up
  613. *
  614. * Returns: 0 for success
  615. */
  616. int fcoe_rcv(struct sk_buff *skb, struct net_device *dev,
  617. struct packet_type *ptype, struct net_device *olddev)
  618. {
  619. struct fc_lport *lp;
  620. struct fcoe_rcv_info *fr;
  621. struct fcoe_softc *fc;
  622. struct fc_frame_header *fh;
  623. struct fcoe_percpu_s *fps;
  624. unsigned short oxid;
  625. unsigned int cpu = 0;
  626. fc = container_of(ptype, struct fcoe_softc, fcoe_packet_type);
  627. lp = fc->lp;
  628. if (unlikely(lp == NULL)) {
  629. FC_DBG("cannot find hba structure");
  630. goto err2;
  631. }
  632. if (unlikely(debug_fcoe)) {
  633. FC_DBG("skb_info: len:%d data_len:%d head:%p data:%p tail:%p "
  634. "end:%p sum:%d dev:%s", skb->len, skb->data_len,
  635. skb->head, skb->data, skb_tail_pointer(skb),
  636. skb_end_pointer(skb), skb->csum,
  637. skb->dev ? skb->dev->name : "<NULL>");
  638. }
  639. /* check for FCOE packet type */
  640. if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) {
  641. FC_DBG("wrong FC type frame");
  642. goto err;
  643. }
  644. /*
  645. * Check for minimum frame length, and make sure required FCoE
  646. * and FC headers are pulled into the linear data area.
  647. */
  648. if (unlikely((skb->len < FCOE_MIN_FRAME) ||
  649. !pskb_may_pull(skb, FCOE_HEADER_LEN)))
  650. goto err;
  651. skb_set_transport_header(skb, sizeof(struct fcoe_hdr));
  652. fh = (struct fc_frame_header *) skb_transport_header(skb);
  653. oxid = ntohs(fh->fh_ox_id);
  654. fr = fcoe_dev_from_skb(skb);
  655. fr->fr_dev = lp;
  656. fr->ptype = ptype;
  657. #ifdef CONFIG_SMP
  658. /*
  659. * The incoming frame exchange id(oxid) is ANDed with num of online
  660. * cpu bits to get cpu and then this cpu is used for selecting
  661. * a per cpu kernel thread from fcoe_percpu.
  662. */
  663. cpu = oxid & (num_online_cpus() - 1);
  664. #endif
  665. fps = &per_cpu(fcoe_percpu, cpu);
  666. spin_lock_bh(&fps->fcoe_rx_list.lock);
  667. if (unlikely(!fps->thread)) {
  668. /*
  669. * The targeted CPU is not ready, let's target
  670. * the first CPU now. For non-SMP systems this
  671. * will check the same CPU twice.
  672. */
  673. FC_DBG("CPU is online, but no receive thread ready "
  674. "for incoming skb- using first online CPU.\n");
  675. spin_unlock_bh(&fps->fcoe_rx_list.lock);
  676. cpu = first_cpu(cpu_online_map);
  677. fps = &per_cpu(fcoe_percpu, cpu);
  678. spin_lock_bh(&fps->fcoe_rx_list.lock);
  679. if (!fps->thread) {
  680. spin_unlock_bh(&fps->fcoe_rx_list.lock);
  681. goto err;
  682. }
  683. }
  684. /*
  685. * We now have a valid CPU that we're targeting for
  686. * this skb. We also have this receive thread locked,
  687. * so we're free to queue skbs into it's queue.
  688. */
  689. __skb_queue_tail(&fps->fcoe_rx_list, skb);
  690. if (fps->fcoe_rx_list.qlen == 1)
  691. wake_up_process(fps->thread);
  692. spin_unlock_bh(&fps->fcoe_rx_list.lock);
  693. return 0;
  694. err:
  695. fc_lport_get_stats(lp)->ErrorFrames++;
  696. err2:
  697. kfree_skb(skb);
  698. return -1;
  699. }
  700. EXPORT_SYMBOL_GPL(fcoe_rcv);
  701. /**
  702. * fcoe_start_io() - pass to netdev to start xmit for fcoe
  703. * @skb: the skb to be xmitted
  704. *
  705. * Returns: 0 for success
  706. */
  707. static inline int fcoe_start_io(struct sk_buff *skb)
  708. {
  709. int rc;
  710. skb_get(skb);
  711. rc = dev_queue_xmit(skb);
  712. if (rc != 0)
  713. return rc;
  714. kfree_skb(skb);
  715. return 0;
  716. }
  717. /**
  718. * fcoe_get_paged_crc_eof() - in case we need alloc a page for crc_eof
  719. * @skb: the skb to be xmitted
  720. * @tlen: total len
  721. *
  722. * Returns: 0 for success
  723. */
  724. static int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen)
  725. {
  726. struct fcoe_percpu_s *fps;
  727. struct page *page;
  728. fps = &get_cpu_var(fcoe_percpu);
  729. page = fps->crc_eof_page;
  730. if (!page) {
  731. page = alloc_page(GFP_ATOMIC);
  732. if (!page) {
  733. put_cpu_var(fcoe_percpu);
  734. return -ENOMEM;
  735. }
  736. fps->crc_eof_page = page;
  737. fps->crc_eof_offset = 0;
  738. }
  739. get_page(page);
  740. skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page,
  741. fps->crc_eof_offset, tlen);
  742. skb->len += tlen;
  743. skb->data_len += tlen;
  744. skb->truesize += tlen;
  745. fps->crc_eof_offset += sizeof(struct fcoe_crc_eof);
  746. if (fps->crc_eof_offset >= PAGE_SIZE) {
  747. fps->crc_eof_page = NULL;
  748. fps->crc_eof_offset = 0;
  749. put_page(page);
  750. }
  751. put_cpu_var(fcoe_percpu);
  752. return 0;
  753. }
  754. /**
  755. * fcoe_fc_crc() - calculates FC CRC in this fcoe skb
  756. * @fp: the fc_frame containg data to be checksummed
  757. *
  758. * This uses crc32() to calculate the crc for fc frame
  759. * Return : 32 bit crc
  760. */
  761. u32 fcoe_fc_crc(struct fc_frame *fp)
  762. {
  763. struct sk_buff *skb = fp_skb(fp);
  764. struct skb_frag_struct *frag;
  765. unsigned char *data;
  766. unsigned long off, len, clen;
  767. u32 crc;
  768. unsigned i;
  769. crc = crc32(~0, skb->data, skb_headlen(skb));
  770. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  771. frag = &skb_shinfo(skb)->frags[i];
  772. off = frag->page_offset;
  773. len = frag->size;
  774. while (len > 0) {
  775. clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK));
  776. data = kmap_atomic(frag->page + (off >> PAGE_SHIFT),
  777. KM_SKB_DATA_SOFTIRQ);
  778. crc = crc32(crc, data + (off & ~PAGE_MASK), clen);
  779. kunmap_atomic(data, KM_SKB_DATA_SOFTIRQ);
  780. off += clen;
  781. len -= clen;
  782. }
  783. }
  784. return crc;
  785. }
  786. /**
  787. * fcoe_xmit() - FCoE frame transmit function
  788. * @lp: the associated local port
  789. * @fp: the fc_frame to be transmitted
  790. *
  791. * Return : 0 for success
  792. */
  793. int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
  794. {
  795. int wlen, rc = 0;
  796. u32 crc;
  797. struct ethhdr *eh;
  798. struct fcoe_crc_eof *cp;
  799. struct sk_buff *skb;
  800. struct fcoe_dev_stats *stats;
  801. struct fc_frame_header *fh;
  802. unsigned int hlen; /* header length implies the version */
  803. unsigned int tlen; /* trailer length */
  804. unsigned int elen; /* eth header, may include vlan */
  805. int flogi_in_progress = 0;
  806. struct fcoe_softc *fc;
  807. u8 sof, eof;
  808. struct fcoe_hdr *hp;
  809. WARN_ON((fr_len(fp) % sizeof(u32)) != 0);
  810. fc = lport_priv(lp);
  811. /*
  812. * if it is a flogi then we need to learn gw-addr
  813. * and my own fcid
  814. */
  815. fh = fc_frame_header_get(fp);
  816. if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
  817. if (fc_frame_payload_op(fp) == ELS_FLOGI) {
  818. fc->flogi_oxid = ntohs(fh->fh_ox_id);
  819. fc->address_mode = FCOE_FCOUI_ADDR_MODE;
  820. fc->flogi_progress = 1;
  821. flogi_in_progress = 1;
  822. } else if (fc->flogi_progress && ntoh24(fh->fh_s_id) != 0) {
  823. /*
  824. * Here we must've gotten an SID by accepting an FLOGI
  825. * from a point-to-point connection. Switch to using
  826. * the source mac based on the SID. The destination
  827. * MAC in this case would have been set by receving the
  828. * FLOGI.
  829. */
  830. fc_fcoe_set_mac(fc->data_src_addr, fh->fh_s_id);
  831. fc->flogi_progress = 0;
  832. }
  833. }
  834. skb = fp_skb(fp);
  835. sof = fr_sof(fp);
  836. eof = fr_eof(fp);
  837. elen = (fc->real_dev->priv_flags & IFF_802_1Q_VLAN) ?
  838. sizeof(struct vlan_ethhdr) : sizeof(struct ethhdr);
  839. hlen = sizeof(struct fcoe_hdr);
  840. tlen = sizeof(struct fcoe_crc_eof);
  841. wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
  842. /* crc offload */
  843. if (likely(lp->crc_offload)) {
  844. skb->ip_summed = CHECKSUM_PARTIAL;
  845. skb->csum_start = skb_headroom(skb);
  846. skb->csum_offset = skb->len;
  847. crc = 0;
  848. } else {
  849. skb->ip_summed = CHECKSUM_NONE;
  850. crc = fcoe_fc_crc(fp);
  851. }
  852. /* copy fc crc and eof to the skb buff */
  853. if (skb_is_nonlinear(skb)) {
  854. skb_frag_t *frag;
  855. if (fcoe_get_paged_crc_eof(skb, tlen)) {
  856. kfree_skb(skb);
  857. return -ENOMEM;
  858. }
  859. frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
  860. cp = kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ)
  861. + frag->page_offset;
  862. } else {
  863. cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
  864. }
  865. memset(cp, 0, sizeof(*cp));
  866. cp->fcoe_eof = eof;
  867. cp->fcoe_crc32 = cpu_to_le32(~crc);
  868. if (skb_is_nonlinear(skb)) {
  869. kunmap_atomic(cp, KM_SKB_DATA_SOFTIRQ);
  870. cp = NULL;
  871. }
  872. /* adjust skb netowrk/transport offsets to match mac/fcoe/fc */
  873. skb_push(skb, elen + hlen);
  874. skb_reset_mac_header(skb);
  875. skb_reset_network_header(skb);
  876. skb->mac_len = elen;
  877. skb->protocol = htons(ETH_P_FCOE);
  878. skb->dev = fc->real_dev;
  879. /* fill up mac and fcoe headers */
  880. eh = eth_hdr(skb);
  881. eh->h_proto = htons(ETH_P_FCOE);
  882. if (fc->address_mode == FCOE_FCOUI_ADDR_MODE)
  883. fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id);
  884. else
  885. /* insert GW address */
  886. memcpy(eh->h_dest, fc->dest_addr, ETH_ALEN);
  887. if (unlikely(flogi_in_progress))
  888. memcpy(eh->h_source, fc->ctl_src_addr, ETH_ALEN);
  889. else
  890. memcpy(eh->h_source, fc->data_src_addr, ETH_ALEN);
  891. hp = (struct fcoe_hdr *)(eh + 1);
  892. memset(hp, 0, sizeof(*hp));
  893. if (FC_FCOE_VER)
  894. FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER);
  895. hp->fcoe_sof = sof;
  896. #ifdef NETIF_F_FSO
  897. /* fcoe lso, mss is in max_payload which is non-zero for FCP data */
  898. if (lp->seq_offload && fr_max_payload(fp)) {
  899. skb_shinfo(skb)->gso_type = SKB_GSO_FCOE;
  900. skb_shinfo(skb)->gso_size = fr_max_payload(fp);
  901. } else {
  902. skb_shinfo(skb)->gso_type = 0;
  903. skb_shinfo(skb)->gso_size = 0;
  904. }
  905. #endif
  906. /* update tx stats: regardless if LLD fails */
  907. stats = fc_lport_get_stats(lp);
  908. stats->TxFrames++;
  909. stats->TxWords += wlen;
  910. /* send down to lld */
  911. fr_dev(fp) = lp;
  912. if (fc->fcoe_pending_queue.qlen)
  913. rc = fcoe_check_wait_queue(lp);
  914. if (rc == 0)
  915. rc = fcoe_start_io(skb);
  916. if (rc) {
  917. spin_lock_bh(&fc->fcoe_pending_queue.lock);
  918. __skb_queue_tail(&fc->fcoe_pending_queue, skb);
  919. spin_unlock_bh(&fc->fcoe_pending_queue.lock);
  920. if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
  921. lp->qfull = 1;
  922. }
  923. return 0;
  924. }
  925. EXPORT_SYMBOL_GPL(fcoe_xmit);
  926. /**
  927. * fcoe_percpu_receive_thread() - recv thread per cpu
  928. * @arg: ptr to the fcoe per cpu struct
  929. *
  930. * Return: 0 for success
  931. */
  932. int fcoe_percpu_receive_thread(void *arg)
  933. {
  934. struct fcoe_percpu_s *p = arg;
  935. u32 fr_len;
  936. struct fc_lport *lp;
  937. struct fcoe_rcv_info *fr;
  938. struct fcoe_dev_stats *stats;
  939. struct fc_frame_header *fh;
  940. struct sk_buff *skb;
  941. struct fcoe_crc_eof crc_eof;
  942. struct fc_frame *fp;
  943. u8 *mac = NULL;
  944. struct fcoe_softc *fc;
  945. struct fcoe_hdr *hp;
  946. set_user_nice(current, -20);
  947. while (!kthread_should_stop()) {
  948. spin_lock_bh(&p->fcoe_rx_list.lock);
  949. while ((skb = __skb_dequeue(&p->fcoe_rx_list)) == NULL) {
  950. set_current_state(TASK_INTERRUPTIBLE);
  951. spin_unlock_bh(&p->fcoe_rx_list.lock);
  952. schedule();
  953. set_current_state(TASK_RUNNING);
  954. if (kthread_should_stop())
  955. return 0;
  956. spin_lock_bh(&p->fcoe_rx_list.lock);
  957. }
  958. spin_unlock_bh(&p->fcoe_rx_list.lock);
  959. fr = fcoe_dev_from_skb(skb);
  960. lp = fr->fr_dev;
  961. if (unlikely(lp == NULL)) {
  962. FC_DBG("invalid HBA Structure");
  963. kfree_skb(skb);
  964. continue;
  965. }
  966. if (unlikely(debug_fcoe)) {
  967. FC_DBG("skb_info: len:%d data_len:%d head:%p data:%p "
  968. "tail:%p end:%p sum:%d dev:%s",
  969. skb->len, skb->data_len,
  970. skb->head, skb->data, skb_tail_pointer(skb),
  971. skb_end_pointer(skb), skb->csum,
  972. skb->dev ? skb->dev->name : "<NULL>");
  973. }
  974. /*
  975. * Save source MAC address before discarding header.
  976. */
  977. fc = lport_priv(lp);
  978. if (unlikely(fc->flogi_progress))
  979. mac = eth_hdr(skb)->h_source;
  980. if (skb_is_nonlinear(skb))
  981. skb_linearize(skb); /* not ideal */
  982. /*
  983. * Frame length checks and setting up the header pointers
  984. * was done in fcoe_rcv already.
  985. */
  986. hp = (struct fcoe_hdr *) skb_network_header(skb);
  987. fh = (struct fc_frame_header *) skb_transport_header(skb);
  988. stats = fc_lport_get_stats(lp);
  989. if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
  990. if (stats->ErrorFrames < 5)
  991. printk(KERN_WARNING "FCoE version "
  992. "mismatch: The frame has "
  993. "version %x, but the "
  994. "initiator supports version "
  995. "%x\n", FC_FCOE_DECAPS_VER(hp),
  996. FC_FCOE_VER);
  997. stats->ErrorFrames++;
  998. kfree_skb(skb);
  999. continue;
  1000. }
  1001. skb_pull(skb, sizeof(struct fcoe_hdr));
  1002. fr_len = skb->len - sizeof(struct fcoe_crc_eof);
  1003. stats->RxFrames++;
  1004. stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
  1005. fp = (struct fc_frame *)skb;
  1006. fc_frame_init(fp);
  1007. fr_dev(fp) = lp;
  1008. fr_sof(fp) = hp->fcoe_sof;
  1009. /* Copy out the CRC and EOF trailer for access */
  1010. if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) {
  1011. kfree_skb(skb);
  1012. continue;
  1013. }
  1014. fr_eof(fp) = crc_eof.fcoe_eof;
  1015. fr_crc(fp) = crc_eof.fcoe_crc32;
  1016. if (pskb_trim(skb, fr_len)) {
  1017. kfree_skb(skb);
  1018. continue;
  1019. }
  1020. /*
  1021. * We only check CRC if no offload is available and if it is
  1022. * it's solicited data, in which case, the FCP layer would
  1023. * check it during the copy.
  1024. */
  1025. if (lp->crc_offload && skb->ip_summed == CHECKSUM_UNNECESSARY)
  1026. fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
  1027. else
  1028. fr_flags(fp) |= FCPHF_CRC_UNCHECKED;
  1029. fh = fc_frame_header_get(fp);
  1030. if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
  1031. fh->fh_type == FC_TYPE_FCP) {
  1032. fc_exch_recv(lp, lp->emp, fp);
  1033. continue;
  1034. }
  1035. if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) {
  1036. if (le32_to_cpu(fr_crc(fp)) !=
  1037. ~crc32(~0, skb->data, fr_len)) {
  1038. if (debug_fcoe || stats->InvalidCRCCount < 5)
  1039. printk(KERN_WARNING "fcoe: dropping "
  1040. "frame with CRC error\n");
  1041. stats->InvalidCRCCount++;
  1042. stats->ErrorFrames++;
  1043. fc_frame_free(fp);
  1044. continue;
  1045. }
  1046. fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
  1047. }
  1048. /* non flogi and non data exchanges are handled here */
  1049. if (unlikely(fc->flogi_progress))
  1050. fcoe_recv_flogi(fc, fp, mac);
  1051. fc_exch_recv(lp, lp->emp, fp);
  1052. }
  1053. return 0;
  1054. }
  1055. /**
  1056. * fcoe_recv_flogi() - flogi receive function
  1057. * @fc: associated fcoe_softc
  1058. * @fp: the recieved frame
  1059. * @sa: the source address of this flogi
  1060. *
  1061. * This is responsible to parse the flogi response and sets the corresponding
  1062. * mac address for the initiator, eitehr OUI based or GW based.
  1063. *
  1064. * Returns: none
  1065. */
  1066. static void fcoe_recv_flogi(struct fcoe_softc *fc, struct fc_frame *fp, u8 *sa)
  1067. {
  1068. struct fc_frame_header *fh;
  1069. u8 op;
  1070. fh = fc_frame_header_get(fp);
  1071. if (fh->fh_type != FC_TYPE_ELS)
  1072. return;
  1073. op = fc_frame_payload_op(fp);
  1074. if (op == ELS_LS_ACC && fh->fh_r_ctl == FC_RCTL_ELS_REP &&
  1075. fc->flogi_oxid == ntohs(fh->fh_ox_id)) {
  1076. /*
  1077. * FLOGI accepted.
  1078. * If the src mac addr is FC_OUI-based, then we mark the
  1079. * address_mode flag to use FC_OUI-based Ethernet DA.
  1080. * Otherwise we use the FCoE gateway addr
  1081. */
  1082. if (!compare_ether_addr(sa, (u8[6]) FC_FCOE_FLOGI_MAC)) {
  1083. fc->address_mode = FCOE_FCOUI_ADDR_MODE;
  1084. } else {
  1085. memcpy(fc->dest_addr, sa, ETH_ALEN);
  1086. fc->address_mode = FCOE_GW_ADDR_MODE;
  1087. }
  1088. /*
  1089. * Remove any previously-set unicast MAC filter.
  1090. * Add secondary FCoE MAC address filter for our OUI.
  1091. */
  1092. rtnl_lock();
  1093. if (compare_ether_addr(fc->data_src_addr, (u8[6]) { 0 }))
  1094. dev_unicast_delete(fc->real_dev, fc->data_src_addr,
  1095. ETH_ALEN);
  1096. fc_fcoe_set_mac(fc->data_src_addr, fh->fh_d_id);
  1097. dev_unicast_add(fc->real_dev, fc->data_src_addr, ETH_ALEN);
  1098. rtnl_unlock();
  1099. fc->flogi_progress = 0;
  1100. } else if (op == ELS_FLOGI && fh->fh_r_ctl == FC_RCTL_ELS_REQ && sa) {
  1101. /*
  1102. * Save source MAC for point-to-point responses.
  1103. */
  1104. memcpy(fc->dest_addr, sa, ETH_ALEN);
  1105. fc->address_mode = FCOE_GW_ADDR_MODE;
  1106. }
  1107. }
  1108. /**
  1109. * fcoe_watchdog() - fcoe timer callback
  1110. * @vp:
  1111. *
  1112. * This checks the pending queue length for fcoe and set lport qfull
  1113. * if the FCOE_MAX_QUEUE_DEPTH is reached. This is done for all fc_lport on the
  1114. * fcoe_hostlist.
  1115. *
  1116. * Returns: 0 for success
  1117. */
  1118. void fcoe_watchdog(ulong vp)
  1119. {
  1120. struct fcoe_softc *fc;
  1121. read_lock(&fcoe_hostlist_lock);
  1122. list_for_each_entry(fc, &fcoe_hostlist, list) {
  1123. if (fc->lp)
  1124. fcoe_check_wait_queue(fc->lp);
  1125. }
  1126. read_unlock(&fcoe_hostlist_lock);
  1127. fcoe_timer.expires = jiffies + (1 * HZ);
  1128. add_timer(&fcoe_timer);
  1129. }
  1130. /**
  1131. * fcoe_check_wait_queue() - put the skb into fcoe pending xmit queue
  1132. * @lp: the fc_port for this skb
  1133. * @skb: the associated skb to be xmitted
  1134. *
  1135. * This empties the wait_queue, dequeue the head of the wait_queue queue
  1136. * and calls fcoe_start_io() for each packet, if all skb have been
  1137. * transmitted, return qlen or -1 if a error occurs, then restore
  1138. * wait_queue and try again later.
  1139. *
  1140. * The wait_queue is used when the skb transmit fails. skb will go
  1141. * in the wait_queue which will be emptied by the time function OR
  1142. * by the next skb transmit.
  1143. *
  1144. * Returns: 0 for success
  1145. */
  1146. static int fcoe_check_wait_queue(struct fc_lport *lp)
  1147. {
  1148. struct fcoe_softc *fc = lport_priv(lp);
  1149. struct sk_buff *skb;
  1150. int rc = -1;
  1151. spin_lock_bh(&fc->fcoe_pending_queue.lock);
  1152. if (fc->fcoe_pending_queue_active)
  1153. goto out;
  1154. fc->fcoe_pending_queue_active = 1;
  1155. while (fc->fcoe_pending_queue.qlen) {
  1156. /* keep qlen > 0 until fcoe_start_io succeeds */
  1157. fc->fcoe_pending_queue.qlen++;
  1158. skb = __skb_dequeue(&fc->fcoe_pending_queue);
  1159. spin_unlock_bh(&fc->fcoe_pending_queue.lock);
  1160. rc = fcoe_start_io(skb);
  1161. spin_lock_bh(&fc->fcoe_pending_queue.lock);
  1162. if (rc) {
  1163. __skb_queue_head(&fc->fcoe_pending_queue, skb);
  1164. /* undo temporary increment above */
  1165. fc->fcoe_pending_queue.qlen--;
  1166. break;
  1167. }
  1168. /* undo temporary increment above */
  1169. fc->fcoe_pending_queue.qlen--;
  1170. }
  1171. if (fc->fcoe_pending_queue.qlen < FCOE_LOW_QUEUE_DEPTH)
  1172. lp->qfull = 0;
  1173. fc->fcoe_pending_queue_active = 0;
  1174. rc = fc->fcoe_pending_queue.qlen;
  1175. out:
  1176. spin_unlock_bh(&fc->fcoe_pending_queue.lock);
  1177. return rc;
  1178. }
  1179. /**
  1180. * fcoe_dev_setup() - setup link change notification interface
  1181. */
  1182. static void fcoe_dev_setup()
  1183. {
  1184. /*
  1185. * here setup a interface specific wd time to
  1186. * monitor the link state
  1187. */
  1188. register_netdevice_notifier(&fcoe_notifier);
  1189. }
  1190. /**
  1191. * fcoe_dev_setup() - cleanup link change notification interface
  1192. */
  1193. static void fcoe_dev_cleanup(void)
  1194. {
  1195. unregister_netdevice_notifier(&fcoe_notifier);
  1196. }
  1197. /**
  1198. * fcoe_device_notification() - netdev event notification callback
  1199. * @notifier: context of the notification
  1200. * @event: type of event
  1201. * @ptr: fixed array for output parsed ifname
  1202. *
  1203. * This function is called by the ethernet driver in case of link change event
  1204. *
  1205. * Returns: 0 for success
  1206. */
  1207. static int fcoe_device_notification(struct notifier_block *notifier,
  1208. ulong event, void *ptr)
  1209. {
  1210. struct fc_lport *lp = NULL;
  1211. struct net_device *real_dev = ptr;
  1212. struct fcoe_softc *fc;
  1213. struct fcoe_dev_stats *stats;
  1214. u32 new_link_up;
  1215. u32 mfs;
  1216. int rc = NOTIFY_OK;
  1217. read_lock(&fcoe_hostlist_lock);
  1218. list_for_each_entry(fc, &fcoe_hostlist, list) {
  1219. if (fc->real_dev == real_dev) {
  1220. lp = fc->lp;
  1221. break;
  1222. }
  1223. }
  1224. read_unlock(&fcoe_hostlist_lock);
  1225. if (lp == NULL) {
  1226. rc = NOTIFY_DONE;
  1227. goto out;
  1228. }
  1229. new_link_up = lp->link_up;
  1230. switch (event) {
  1231. case NETDEV_DOWN:
  1232. case NETDEV_GOING_DOWN:
  1233. new_link_up = 0;
  1234. break;
  1235. case NETDEV_UP:
  1236. case NETDEV_CHANGE:
  1237. new_link_up = !fcoe_link_ok(lp);
  1238. break;
  1239. case NETDEV_CHANGEMTU:
  1240. mfs = fc->real_dev->mtu -
  1241. (sizeof(struct fcoe_hdr) +
  1242. sizeof(struct fcoe_crc_eof));
  1243. if (mfs >= FC_MIN_MAX_FRAME)
  1244. fc_set_mfs(lp, mfs);
  1245. new_link_up = !fcoe_link_ok(lp);
  1246. break;
  1247. case NETDEV_REGISTER:
  1248. break;
  1249. default:
  1250. FC_DBG("unknown event %ld call", event);
  1251. }
  1252. if (lp->link_up != new_link_up) {
  1253. if (new_link_up)
  1254. fc_linkup(lp);
  1255. else {
  1256. stats = fc_lport_get_stats(lp);
  1257. stats->LinkFailureCount++;
  1258. fc_linkdown(lp);
  1259. fcoe_clean_pending_queue(lp);
  1260. }
  1261. }
  1262. out:
  1263. return rc;
  1264. }
  1265. /**
  1266. * fcoe_if_to_netdev() - parse a name buffer to get netdev
  1267. * @ifname: fixed array for output parsed ifname
  1268. * @buffer: incoming buffer to be copied
  1269. *
  1270. * Returns: NULL or ptr to netdeive
  1271. */
  1272. static struct net_device *fcoe_if_to_netdev(const char *buffer)
  1273. {
  1274. char *cp;
  1275. char ifname[IFNAMSIZ + 2];
  1276. if (buffer) {
  1277. strlcpy(ifname, buffer, IFNAMSIZ);
  1278. cp = ifname + strlen(ifname);
  1279. while (--cp >= ifname && *cp == '\n')
  1280. *cp = '\0';
  1281. return dev_get_by_name(&init_net, ifname);
  1282. }
  1283. return NULL;
  1284. }
  1285. /**
  1286. * fcoe_netdev_to_module_owner() - finds out the nic drive moddule of the netdev
  1287. * @netdev: the target netdev
  1288. *
  1289. * Returns: ptr to the struct module, NULL for failure
  1290. */
  1291. static struct module *
  1292. fcoe_netdev_to_module_owner(const struct net_device *netdev)
  1293. {
  1294. struct device *dev;
  1295. if (!netdev)
  1296. return NULL;
  1297. dev = netdev->dev.parent;
  1298. if (!dev)
  1299. return NULL;
  1300. if (!dev->driver)
  1301. return NULL;
  1302. return dev->driver->owner;
  1303. }
  1304. /**
  1305. * fcoe_ethdrv_get() - Hold the Ethernet driver
  1306. * @netdev: the target netdev
  1307. *
  1308. * Holds the Ethernet driver module by try_module_get() for
  1309. * the corresponding netdev.
  1310. *
  1311. * Returns: 0 for succsss
  1312. */
  1313. static int fcoe_ethdrv_get(const struct net_device *netdev)
  1314. {
  1315. struct module *owner;
  1316. owner = fcoe_netdev_to_module_owner(netdev);
  1317. if (owner) {
  1318. printk(KERN_DEBUG "fcoe:hold driver module %s for %s\n",
  1319. module_name(owner), netdev->name);
  1320. return try_module_get(owner);
  1321. }
  1322. return -ENODEV;
  1323. }
  1324. /**
  1325. * fcoe_ethdrv_put() - Release the Ethernet driver
  1326. * @netdev: the target netdev
  1327. *
  1328. * Releases the Ethernet driver module by module_put for
  1329. * the corresponding netdev.
  1330. *
  1331. * Returns: 0 for succsss
  1332. */
  1333. static int fcoe_ethdrv_put(const struct net_device *netdev)
  1334. {
  1335. struct module *owner;
  1336. owner = fcoe_netdev_to_module_owner(netdev);
  1337. if (owner) {
  1338. printk(KERN_DEBUG "fcoe:release driver module %s for %s\n",
  1339. module_name(owner), netdev->name);
  1340. module_put(owner);
  1341. return 0;
  1342. }
  1343. return -ENODEV;
  1344. }
  1345. /**
  1346. * fcoe_destroy() - handles the destroy from sysfs
  1347. * @buffer: expcted to be a eth if name
  1348. * @kp: associated kernel param
  1349. *
  1350. * Returns: 0 for success
  1351. */
  1352. static int fcoe_destroy(const char *buffer, struct kernel_param *kp)
  1353. {
  1354. int rc;
  1355. struct net_device *netdev;
  1356. netdev = fcoe_if_to_netdev(buffer);
  1357. if (!netdev) {
  1358. rc = -ENODEV;
  1359. goto out_nodev;
  1360. }
  1361. /* look for existing lport */
  1362. if (!fcoe_hostlist_lookup(netdev)) {
  1363. rc = -ENODEV;
  1364. goto out_putdev;
  1365. }
  1366. rc = fcoe_if_destroy(netdev);
  1367. if (rc) {
  1368. printk(KERN_ERR "fcoe: fcoe_if_destroy(%s) failed\n",
  1369. netdev->name);
  1370. rc = -EIO;
  1371. goto out_putdev;
  1372. }
  1373. fcoe_ethdrv_put(netdev);
  1374. rc = 0;
  1375. out_putdev:
  1376. dev_put(netdev);
  1377. out_nodev:
  1378. return rc;
  1379. }
  1380. /**
  1381. * fcoe_create() - Handles the create call from sysfs
  1382. * @buffer: expcted to be a eth if name
  1383. * @kp: associated kernel param
  1384. *
  1385. * Returns: 0 for success
  1386. */
  1387. static int fcoe_create(const char *buffer, struct kernel_param *kp)
  1388. {
  1389. int rc;
  1390. struct net_device *netdev;
  1391. netdev = fcoe_if_to_netdev(buffer);
  1392. if (!netdev) {
  1393. rc = -ENODEV;
  1394. goto out_nodev;
  1395. }
  1396. /* look for existing lport */
  1397. if (fcoe_hostlist_lookup(netdev)) {
  1398. rc = -EEXIST;
  1399. goto out_putdev;
  1400. }
  1401. fcoe_ethdrv_get(netdev);
  1402. rc = fcoe_if_create(netdev);
  1403. if (rc) {
  1404. printk(KERN_ERR "fcoe: fcoe_if_create(%s) failed\n",
  1405. netdev->name);
  1406. fcoe_ethdrv_put(netdev);
  1407. rc = -EIO;
  1408. goto out_putdev;
  1409. }
  1410. rc = 0;
  1411. out_putdev:
  1412. dev_put(netdev);
  1413. out_nodev:
  1414. return rc;
  1415. }
  1416. module_param_call(create, fcoe_create, NULL, NULL, S_IWUSR);
  1417. __MODULE_PARM_TYPE(create, "string");
  1418. MODULE_PARM_DESC(create, "Create fcoe port using net device passed in.");
  1419. module_param_call(destroy, fcoe_destroy, NULL, NULL, S_IWUSR);
  1420. __MODULE_PARM_TYPE(destroy, "string");
  1421. MODULE_PARM_DESC(destroy, "Destroy fcoe port");
  1422. /**
  1423. * fcoe_link_ok() - Check if link is ok for the fc_lport
  1424. * @lp: ptr to the fc_lport
  1425. *
  1426. * Any permanently-disqualifying conditions have been previously checked.
  1427. * This also updates the speed setting, which may change with link for 100/1000.
  1428. *
  1429. * This function should probably be checking for PAUSE support at some point
  1430. * in the future. Currently Per-priority-pause is not determinable using
  1431. * ethtool, so we shouldn't be restrictive until that problem is resolved.
  1432. *
  1433. * Returns: 0 if link is OK for use by FCoE.
  1434. *
  1435. */
  1436. int fcoe_link_ok(struct fc_lport *lp)
  1437. {
  1438. struct fcoe_softc *fc = lport_priv(lp);
  1439. struct net_device *dev = fc->real_dev;
  1440. struct ethtool_cmd ecmd = { ETHTOOL_GSET };
  1441. int rc = 0;
  1442. if ((dev->flags & IFF_UP) && netif_carrier_ok(dev)) {
  1443. dev = fc->phys_dev;
  1444. if (dev->ethtool_ops->get_settings) {
  1445. dev->ethtool_ops->get_settings(dev, &ecmd);
  1446. lp->link_supported_speeds &=
  1447. ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
  1448. if (ecmd.supported & (SUPPORTED_1000baseT_Half |
  1449. SUPPORTED_1000baseT_Full))
  1450. lp->link_supported_speeds |= FC_PORTSPEED_1GBIT;
  1451. if (ecmd.supported & SUPPORTED_10000baseT_Full)
  1452. lp->link_supported_speeds |=
  1453. FC_PORTSPEED_10GBIT;
  1454. if (ecmd.speed == SPEED_1000)
  1455. lp->link_speed = FC_PORTSPEED_1GBIT;
  1456. if (ecmd.speed == SPEED_10000)
  1457. lp->link_speed = FC_PORTSPEED_10GBIT;
  1458. }
  1459. } else
  1460. rc = -1;
  1461. return rc;
  1462. }
  1463. EXPORT_SYMBOL_GPL(fcoe_link_ok);
  1464. /**
  1465. * fcoe_percpu_clean() - Clear the pending skbs for an lport
  1466. * @lp: the fc_lport
  1467. */
  1468. void fcoe_percpu_clean(struct fc_lport *lp)
  1469. {
  1470. struct fcoe_percpu_s *pp;
  1471. struct fcoe_rcv_info *fr;
  1472. struct sk_buff_head *list;
  1473. struct sk_buff *skb, *next;
  1474. struct sk_buff *head;
  1475. unsigned int cpu;
  1476. for_each_possible_cpu(cpu) {
  1477. pp = &per_cpu(fcoe_percpu, cpu);
  1478. spin_lock_bh(&pp->fcoe_rx_list.lock);
  1479. list = &pp->fcoe_rx_list;
  1480. head = list->next;
  1481. for (skb = head; skb != (struct sk_buff *)list;
  1482. skb = next) {
  1483. next = skb->next;
  1484. fr = fcoe_dev_from_skb(skb);
  1485. if (fr->fr_dev == lp) {
  1486. __skb_unlink(skb, list);
  1487. kfree_skb(skb);
  1488. }
  1489. }
  1490. spin_unlock_bh(&pp->fcoe_rx_list.lock);
  1491. }
  1492. }
  1493. EXPORT_SYMBOL_GPL(fcoe_percpu_clean);
  1494. /**
  1495. * fcoe_clean_pending_queue() - Dequeue a skb and free it
  1496. * @lp: the corresponding fc_lport
  1497. *
  1498. * Returns: none
  1499. */
  1500. void fcoe_clean_pending_queue(struct fc_lport *lp)
  1501. {
  1502. struct fcoe_softc *fc = lport_priv(lp);
  1503. struct sk_buff *skb;
  1504. spin_lock_bh(&fc->fcoe_pending_queue.lock);
  1505. while ((skb = __skb_dequeue(&fc->fcoe_pending_queue)) != NULL) {
  1506. spin_unlock_bh(&fc->fcoe_pending_queue.lock);
  1507. kfree_skb(skb);
  1508. spin_lock_bh(&fc->fcoe_pending_queue.lock);
  1509. }
  1510. spin_unlock_bh(&fc->fcoe_pending_queue.lock);
  1511. }
  1512. EXPORT_SYMBOL_GPL(fcoe_clean_pending_queue);
  1513. /**
  1514. * libfc_host_alloc() - Allocate a Scsi_Host with room for the fc_lport
  1515. * @sht: ptr to the scsi host templ
  1516. * @priv_size: size of private data after fc_lport
  1517. *
  1518. * Returns: ptr to Scsi_Host
  1519. * TODO: to libfc?
  1520. */
  1521. static inline struct Scsi_Host *
  1522. libfc_host_alloc(struct scsi_host_template *sht, int priv_size)
  1523. {
  1524. return scsi_host_alloc(sht, sizeof(struct fc_lport) + priv_size);
  1525. }
  1526. /**
  1527. * fcoe_host_alloc() - Allocate a Scsi_Host with room for the fcoe_softc
  1528. * @sht: ptr to the scsi host templ
  1529. * @priv_size: size of private data after fc_lport
  1530. *
  1531. * Returns: ptr to Scsi_Host
  1532. */
  1533. struct Scsi_Host *fcoe_host_alloc(struct scsi_host_template *sht, int priv_size)
  1534. {
  1535. return libfc_host_alloc(sht, sizeof(struct fcoe_softc) + priv_size);
  1536. }
  1537. EXPORT_SYMBOL_GPL(fcoe_host_alloc);
  1538. /**
  1539. * fcoe_reset() - Resets the fcoe
  1540. * @shost: shost the reset is from
  1541. *
  1542. * Returns: always 0
  1543. */
  1544. int fcoe_reset(struct Scsi_Host *shost)
  1545. {
  1546. struct fc_lport *lport = shost_priv(shost);
  1547. fc_lport_reset(lport);
  1548. return 0;
  1549. }
  1550. EXPORT_SYMBOL_GPL(fcoe_reset);
  1551. /**
  1552. * fcoe_hostlist_lookup_softc() - find the corresponding lport by a given device
  1553. * @device: this is currently ptr to net_device
  1554. *
  1555. * Returns: NULL or the located fcoe_softc
  1556. */
  1557. static struct fcoe_softc *
  1558. fcoe_hostlist_lookup_softc(const struct net_device *dev)
  1559. {
  1560. struct fcoe_softc *fc;
  1561. read_lock(&fcoe_hostlist_lock);
  1562. list_for_each_entry(fc, &fcoe_hostlist, list) {
  1563. if (fc->real_dev == dev) {
  1564. read_unlock(&fcoe_hostlist_lock);
  1565. return fc;
  1566. }
  1567. }
  1568. read_unlock(&fcoe_hostlist_lock);
  1569. return NULL;
  1570. }
  1571. /**
  1572. * fcoe_hostlist_lookup() - Find the corresponding lport by netdev
  1573. * @netdev: ptr to net_device
  1574. *
  1575. * Returns: 0 for success
  1576. */
  1577. struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev)
  1578. {
  1579. struct fcoe_softc *fc;
  1580. fc = fcoe_hostlist_lookup_softc(netdev);
  1581. return (fc) ? fc->lp : NULL;
  1582. }
  1583. EXPORT_SYMBOL_GPL(fcoe_hostlist_lookup);
  1584. /**
  1585. * fcoe_hostlist_add() - Add a lport to lports list
  1586. * @lp: ptr to the fc_lport to badded
  1587. *
  1588. * Returns: 0 for success
  1589. */
  1590. int fcoe_hostlist_add(const struct fc_lport *lp)
  1591. {
  1592. struct fcoe_softc *fc;
  1593. fc = fcoe_hostlist_lookup_softc(fcoe_netdev(lp));
  1594. if (!fc) {
  1595. fc = lport_priv(lp);
  1596. write_lock_bh(&fcoe_hostlist_lock);
  1597. list_add_tail(&fc->list, &fcoe_hostlist);
  1598. write_unlock_bh(&fcoe_hostlist_lock);
  1599. }
  1600. return 0;
  1601. }
  1602. EXPORT_SYMBOL_GPL(fcoe_hostlist_add);
  1603. /**
  1604. * fcoe_hostlist_remove() - remove a lport from lports list
  1605. * @lp: ptr to the fc_lport to badded
  1606. *
  1607. * Returns: 0 for success
  1608. */
  1609. int fcoe_hostlist_remove(const struct fc_lport *lp)
  1610. {
  1611. struct fcoe_softc *fc;
  1612. fc = fcoe_hostlist_lookup_softc(fcoe_netdev(lp));
  1613. BUG_ON(!fc);
  1614. write_lock_bh(&fcoe_hostlist_lock);
  1615. list_del(&fc->list);
  1616. write_unlock_bh(&fcoe_hostlist_lock);
  1617. return 0;
  1618. }
  1619. EXPORT_SYMBOL_GPL(fcoe_hostlist_remove);
  1620. /**
  1621. * fcoe_init() - fcoe module loading initialization
  1622. *
  1623. * Returns 0 on success, negative on failure
  1624. */
  1625. static int __init fcoe_init(void)
  1626. {
  1627. unsigned int cpu;
  1628. int rc = 0;
  1629. struct fcoe_percpu_s *p;
  1630. INIT_LIST_HEAD(&fcoe_hostlist);
  1631. rwlock_init(&fcoe_hostlist_lock);
  1632. for_each_possible_cpu(cpu) {
  1633. p = &per_cpu(fcoe_percpu, cpu);
  1634. skb_queue_head_init(&p->fcoe_rx_list);
  1635. }
  1636. for_each_online_cpu(cpu)
  1637. fcoe_percpu_thread_create(cpu);
  1638. /* Initialize per CPU interrupt thread */
  1639. rc = register_hotcpu_notifier(&fcoe_cpu_notifier);
  1640. if (rc)
  1641. goto out_free;
  1642. /* Setup link change notification */
  1643. fcoe_dev_setup();
  1644. setup_timer(&fcoe_timer, fcoe_watchdog, 0);
  1645. mod_timer(&fcoe_timer, jiffies + (10 * HZ));
  1646. fcoe_if_init();
  1647. return 0;
  1648. out_free:
  1649. for_each_online_cpu(cpu) {
  1650. fcoe_percpu_thread_destroy(cpu);
  1651. }
  1652. return rc;
  1653. }
  1654. module_init(fcoe_init);
  1655. /**
  1656. * fcoe_exit() - fcoe module unloading cleanup
  1657. *
  1658. * Returns 0 on success, negative on failure
  1659. */
  1660. static void __exit fcoe_exit(void)
  1661. {
  1662. unsigned int cpu;
  1663. struct fcoe_softc *fc, *tmp;
  1664. fcoe_dev_cleanup();
  1665. /* Stop the timer */
  1666. del_timer_sync(&fcoe_timer);
  1667. /* releases the associated fcoe hosts */
  1668. list_for_each_entry_safe(fc, tmp, &fcoe_hostlist, list)
  1669. fcoe_if_destroy(fc->real_dev);
  1670. unregister_hotcpu_notifier(&fcoe_cpu_notifier);
  1671. for_each_online_cpu(cpu) {
  1672. fcoe_percpu_thread_destroy(cpu);
  1673. }
  1674. /* detach from scsi transport */
  1675. fcoe_if_exit();
  1676. }
  1677. module_exit(fcoe_exit);