fcoe.c 48 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943
  1. /*
  2. * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along with
  14. * this program; if not, write to the Free Software Foundation, Inc.,
  15. * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  16. *
  17. * Maintained at www.Open-FCoE.org
  18. */
  19. #include <linux/module.h>
  20. #include <linux/version.h>
  21. #include <linux/spinlock.h>
  22. #include <linux/netdevice.h>
  23. #include <linux/etherdevice.h>
  24. #include <linux/ethtool.h>
  25. #include <linux/if_ether.h>
  26. #include <linux/if_vlan.h>
  27. #include <linux/crc32.h>
  28. #include <linux/cpu.h>
  29. #include <linux/fs.h>
  30. #include <linux/sysfs.h>
  31. #include <linux/ctype.h>
  32. #include <scsi/scsi_tcq.h>
  33. #include <scsi/scsicam.h>
  34. #include <scsi/scsi_transport.h>
  35. #include <scsi/scsi_transport_fc.h>
  36. #include <net/rtnetlink.h>
  37. #include <scsi/fc/fc_encaps.h>
  38. #include <scsi/fc/fc_fip.h>
  39. #include <scsi/libfc.h>
  40. #include <scsi/fc_frame.h>
  41. #include <scsi/libfcoe.h>
  42. #include "fcoe.h"
  43. MODULE_AUTHOR("Open-FCoE.org");
  44. MODULE_DESCRIPTION("FCoE");
  45. MODULE_LICENSE("GPL v2");
  46. /* Performance tuning parameters for fcoe */
  47. static unsigned int fcoe_ddp_min;
  48. module_param_named(ddp_min, fcoe_ddp_min, uint, S_IRUGO | S_IWUSR);
  49. MODULE_PARM_DESC(ddp_min, "Minimum I/O size in bytes for " \
  50. "Direct Data Placement (DDP).");
  51. /* fcoe host list */
  52. LIST_HEAD(fcoe_hostlist);
  53. DEFINE_RWLOCK(fcoe_hostlist_lock);
  54. DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu);
  55. /* Function Prototypes */
  56. static int fcoe_reset(struct Scsi_Host *shost);
  57. static int fcoe_xmit(struct fc_lport *, struct fc_frame *);
  58. static int fcoe_rcv(struct sk_buff *, struct net_device *,
  59. struct packet_type *, struct net_device *);
  60. static int fcoe_percpu_receive_thread(void *arg);
  61. static void fcoe_clean_pending_queue(struct fc_lport *lp);
  62. static void fcoe_percpu_clean(struct fc_lport *lp);
  63. static int fcoe_link_ok(struct fc_lport *lp);
  64. static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *);
  65. static int fcoe_hostlist_add(const struct fc_lport *);
  66. static int fcoe_hostlist_remove(const struct fc_lport *);
  67. static void fcoe_check_wait_queue(struct fc_lport *, struct sk_buff *);
  68. static int fcoe_device_notification(struct notifier_block *, ulong, void *);
  69. static void fcoe_dev_setup(void);
  70. static void fcoe_dev_cleanup(void);
  71. /* notification function from net device */
  72. static struct notifier_block fcoe_notifier = {
  73. .notifier_call = fcoe_device_notification,
  74. };
  75. static struct scsi_transport_template *scsi_transport_fcoe_sw;
  76. struct fc_function_template fcoe_transport_function = {
  77. .show_host_node_name = 1,
  78. .show_host_port_name = 1,
  79. .show_host_supported_classes = 1,
  80. .show_host_supported_fc4s = 1,
  81. .show_host_active_fc4s = 1,
  82. .show_host_maxframe_size = 1,
  83. .show_host_port_id = 1,
  84. .show_host_supported_speeds = 1,
  85. .get_host_speed = fc_get_host_speed,
  86. .show_host_speed = 1,
  87. .show_host_port_type = 1,
  88. .get_host_port_state = fc_get_host_port_state,
  89. .show_host_port_state = 1,
  90. .show_host_symbolic_name = 1,
  91. .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
  92. .show_rport_maxframe_size = 1,
  93. .show_rport_supported_classes = 1,
  94. .show_host_fabric_name = 1,
  95. .show_starget_node_name = 1,
  96. .show_starget_port_name = 1,
  97. .show_starget_port_id = 1,
  98. .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
  99. .show_rport_dev_loss_tmo = 1,
  100. .get_fc_host_stats = fc_get_host_stats,
  101. .issue_fc_host_lip = fcoe_reset,
  102. .terminate_rport_io = fc_rport_terminate_io,
  103. };
  104. static struct scsi_host_template fcoe_shost_template = {
  105. .module = THIS_MODULE,
  106. .name = "FCoE Driver",
  107. .proc_name = FCOE_NAME,
  108. .queuecommand = fc_queuecommand,
  109. .eh_abort_handler = fc_eh_abort,
  110. .eh_device_reset_handler = fc_eh_device_reset,
  111. .eh_host_reset_handler = fc_eh_host_reset,
  112. .slave_alloc = fc_slave_alloc,
  113. .change_queue_depth = fc_change_queue_depth,
  114. .change_queue_type = fc_change_queue_type,
  115. .this_id = -1,
  116. .cmd_per_lun = 32,
  117. .can_queue = FCOE_MAX_OUTSTANDING_COMMANDS,
  118. .use_clustering = ENABLE_CLUSTERING,
  119. .sg_tablesize = SG_ALL,
  120. .max_sectors = 0xffff,
  121. };
  122. /**
  123. * fcoe_fip_recv - handle a received FIP frame.
  124. * @skb: the receive skb
  125. * @dev: associated &net_device
  126. * @ptype: the &packet_type structure which was used to register this handler.
  127. * @orig_dev: original receive &net_device, in case @dev is a bond.
  128. *
  129. * Returns: 0 for success
  130. */
  131. static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *dev,
  132. struct packet_type *ptype,
  133. struct net_device *orig_dev)
  134. {
  135. struct fcoe_port *port;
  136. port = container_of(ptype, struct fcoe_port, fip_packet_type);
  137. fcoe_ctlr_recv(&port->ctlr, skb);
  138. return 0;
  139. }
  140. /**
  141. * fcoe_fip_send() - send an Ethernet-encapsulated FIP frame.
  142. * @fip: FCoE controller.
  143. * @skb: FIP Packet.
  144. */
  145. static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
  146. {
  147. skb->dev = fcoe_from_ctlr(fip)->netdev;
  148. dev_queue_xmit(skb);
  149. }
  150. /**
  151. * fcoe_update_src_mac() - Update Ethernet MAC filters.
  152. * @fip: FCoE controller.
  153. * @old: Unicast MAC address to delete if the MAC is non-zero.
  154. * @new: Unicast MAC address to add.
  155. *
  156. * Remove any previously-set unicast MAC filter.
  157. * Add secondary FCoE MAC address filter for our OUI.
  158. */
  159. static void fcoe_update_src_mac(struct fcoe_ctlr *fip, u8 *old, u8 *new)
  160. {
  161. struct fcoe_port *port;
  162. port = fcoe_from_ctlr(fip);
  163. rtnl_lock();
  164. if (!is_zero_ether_addr(old))
  165. dev_unicast_delete(port->netdev, old);
  166. dev_unicast_add(port->netdev, new);
  167. rtnl_unlock();
  168. }
  169. /**
  170. * fcoe_lport_config() - sets up the fc_lport
  171. * @lp: ptr to the fc_lport
  172. *
  173. * Returns: 0 for success
  174. */
  175. static int fcoe_lport_config(struct fc_lport *lp)
  176. {
  177. lp->link_up = 0;
  178. lp->qfull = 0;
  179. lp->max_retry_count = 3;
  180. lp->max_rport_retry_count = 3;
  181. lp->e_d_tov = 2 * 1000; /* FC-FS default */
  182. lp->r_a_tov = 2 * 2 * 1000;
  183. lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
  184. FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
  185. fc_lport_init_stats(lp);
  186. /* lport fc_lport related configuration */
  187. fc_lport_config(lp);
  188. /* offload related configuration */
  189. lp->crc_offload = 0;
  190. lp->seq_offload = 0;
  191. lp->lro_enabled = 0;
  192. lp->lro_xid = 0;
  193. lp->lso_max = 0;
  194. return 0;
  195. }
  196. /**
  197. * fcoe_netdev_cleanup() - clean up netdev configurations
  198. * @port: ptr to the fcoe_port
  199. */
  200. void fcoe_netdev_cleanup(struct fcoe_port *port)
  201. {
  202. u8 flogi_maddr[ETH_ALEN];
  203. /* Don't listen for Ethernet packets anymore */
  204. dev_remove_pack(&port->fcoe_packet_type);
  205. dev_remove_pack(&port->fip_packet_type);
  206. /* Delete secondary MAC addresses */
  207. rtnl_lock();
  208. memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
  209. dev_unicast_delete(port->netdev, flogi_maddr);
  210. if (!is_zero_ether_addr(port->ctlr.data_src_addr))
  211. dev_unicast_delete(port->netdev, port->ctlr.data_src_addr);
  212. if (port->ctlr.spma)
  213. dev_unicast_delete(port->netdev, port->ctlr.ctl_src_addr);
  214. dev_mc_delete(port->netdev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0);
  215. rtnl_unlock();
  216. }
  217. /**
  218. * fcoe_queue_timer() - fcoe queue timer
  219. * @lp: the fc_lport pointer
  220. *
  221. * Calls fcoe_check_wait_queue on timeout
  222. *
  223. */
  224. static void fcoe_queue_timer(ulong lp)
  225. {
  226. fcoe_check_wait_queue((struct fc_lport *)lp, NULL);
  227. }
  228. /**
  229. * fcoe_netdev_config() - Set up netdev for SW FCoE
  230. * @lp : ptr to the fc_lport
  231. * @netdev : ptr to the associated netdevice struct
  232. *
  233. * Must be called after fcoe_lport_config() as it will use lport mutex
  234. *
  235. * Returns : 0 for success
  236. */
  237. static int fcoe_netdev_config(struct fc_lport *lp, struct net_device *netdev)
  238. {
  239. u32 mfs;
  240. u64 wwnn, wwpn;
  241. struct fcoe_port *port;
  242. u8 flogi_maddr[ETH_ALEN];
  243. struct netdev_hw_addr *ha;
  244. /* Setup lport private data to point to fcoe softc */
  245. port = lport_priv(lp);
  246. port->ctlr.lp = lp;
  247. port->netdev = netdev;
  248. /* Do not support for bonding device */
  249. if ((netdev->priv_flags & IFF_MASTER_ALB) ||
  250. (netdev->priv_flags & IFF_SLAVE_INACTIVE) ||
  251. (netdev->priv_flags & IFF_MASTER_8023AD)) {
  252. return -EOPNOTSUPP;
  253. }
  254. /*
  255. * Determine max frame size based on underlying device and optional
  256. * user-configured limit. If the MFS is too low, fcoe_link_ok()
  257. * will return 0, so do this first.
  258. */
  259. mfs = netdev->mtu - (sizeof(struct fcoe_hdr) +
  260. sizeof(struct fcoe_crc_eof));
  261. if (fc_set_mfs(lp, mfs))
  262. return -EINVAL;
  263. /* offload features support */
  264. if (netdev->features & NETIF_F_SG)
  265. lp->sg_supp = 1;
  266. if (netdev->features & NETIF_F_FCOE_CRC) {
  267. lp->crc_offload = 1;
  268. FCOE_NETDEV_DBG(netdev, "Supports FCCRC offload\n");
  269. }
  270. if (netdev->features & NETIF_F_FSO) {
  271. lp->seq_offload = 1;
  272. lp->lso_max = netdev->gso_max_size;
  273. FCOE_NETDEV_DBG(netdev, "Supports LSO for max len 0x%x\n",
  274. lp->lso_max);
  275. }
  276. if (netdev->fcoe_ddp_xid) {
  277. lp->lro_enabled = 1;
  278. lp->lro_xid = netdev->fcoe_ddp_xid;
  279. FCOE_NETDEV_DBG(netdev, "Supports LRO for max xid 0x%x\n",
  280. lp->lro_xid);
  281. }
  282. skb_queue_head_init(&port->fcoe_pending_queue);
  283. port->fcoe_pending_queue_active = 0;
  284. setup_timer(&port->timer, fcoe_queue_timer, (unsigned long)lp);
  285. /* look for SAN MAC address, if multiple SAN MACs exist, only
  286. * use the first one for SPMA */
  287. rcu_read_lock();
  288. for_each_dev_addr(netdev, ha) {
  289. if ((ha->type == NETDEV_HW_ADDR_T_SAN) &&
  290. (is_valid_ether_addr(port->ctlr.ctl_src_addr))) {
  291. memcpy(port->ctlr.ctl_src_addr, ha->addr, ETH_ALEN);
  292. port->ctlr.spma = 1;
  293. break;
  294. }
  295. }
  296. rcu_read_unlock();
  297. /* setup Source Mac Address */
  298. if (!port->ctlr.spma)
  299. memcpy(port->ctlr.ctl_src_addr, netdev->dev_addr,
  300. netdev->addr_len);
  301. wwnn = fcoe_wwn_from_mac(netdev->dev_addr, 1, 0);
  302. fc_set_wwnn(lp, wwnn);
  303. /* XXX - 3rd arg needs to be vlan id */
  304. wwpn = fcoe_wwn_from_mac(netdev->dev_addr, 2, 0);
  305. fc_set_wwpn(lp, wwpn);
  306. /*
  307. * Add FCoE MAC address as second unicast MAC address
  308. * or enter promiscuous mode if not capable of listening
  309. * for multiple unicast MACs.
  310. */
  311. rtnl_lock();
  312. memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
  313. dev_unicast_add(netdev, flogi_maddr);
  314. if (port->ctlr.spma)
  315. dev_unicast_add(netdev, port->ctlr.ctl_src_addr);
  316. dev_mc_add(netdev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0);
  317. rtnl_unlock();
  318. /*
  319. * setup the receive function from ethernet driver
  320. * on the ethertype for the given device
  321. */
  322. port->fcoe_packet_type.func = fcoe_rcv;
  323. port->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE);
  324. port->fcoe_packet_type.dev = netdev;
  325. dev_add_pack(&port->fcoe_packet_type);
  326. port->fip_packet_type.func = fcoe_fip_recv;
  327. port->fip_packet_type.type = htons(ETH_P_FIP);
  328. port->fip_packet_type.dev = netdev;
  329. dev_add_pack(&port->fip_packet_type);
  330. return 0;
  331. }
  332. /**
  333. * fcoe_shost_config() - Sets up fc_lport->host
  334. * @lp : ptr to the fc_lport
  335. * @shost : ptr to the associated scsi host
  336. * @dev : device associated to scsi host
  337. *
  338. * Must be called after fcoe_lport_config() and fcoe_netdev_config()
  339. *
  340. * Returns : 0 for success
  341. */
  342. static int fcoe_shost_config(struct fc_lport *lp, struct Scsi_Host *shost,
  343. struct device *dev)
  344. {
  345. int rc = 0;
  346. /* lport scsi host config */
  347. lp->host = shost;
  348. lp->host->max_lun = FCOE_MAX_LUN;
  349. lp->host->max_id = FCOE_MAX_FCP_TARGET;
  350. lp->host->max_channel = 0;
  351. lp->host->transportt = scsi_transport_fcoe_sw;
  352. /* add the new host to the SCSI-ml */
  353. rc = scsi_add_host(lp->host, dev);
  354. if (rc) {
  355. FCOE_NETDEV_DBG(fcoe_netdev(lp), "fcoe_shost_config: "
  356. "error on scsi_add_host\n");
  357. return rc;
  358. }
  359. sprintf(fc_host_symbolic_name(lp->host), "%s v%s over %s",
  360. FCOE_NAME, FCOE_VERSION,
  361. fcoe_netdev(lp)->name);
  362. return 0;
  363. }
  364. /*
  365. * fcoe_oem_match() - match for read types IO
  366. * @fp: the fc_frame for new IO.
  367. *
  368. * Returns : true for read types IO, otherwise returns false.
  369. */
  370. bool fcoe_oem_match(struct fc_frame *fp)
  371. {
  372. return fc_fcp_is_read(fr_fsp(fp)) &&
  373. (fr_fsp(fp)->data_len > fcoe_ddp_min);
  374. }
  375. /**
  376. * fcoe_em_config() - allocates em for this lport
  377. * @lp: the fcoe that em is to allocated for
  378. *
  379. * Called with write fcoe_hostlist_lock held.
  380. *
  381. * Returns : 0 on success
  382. */
  383. static inline int fcoe_em_config(struct fc_lport *lp)
  384. {
  385. struct fcoe_interface *fcoe;
  386. struct fcoe_port *port = lport_priv(lp);
  387. struct fcoe_port *oldfc = NULL;
  388. struct net_device *old_real_dev, *cur_real_dev;
  389. u16 min_xid = FCOE_MIN_XID;
  390. u16 max_xid = FCOE_MAX_XID;
  391. /*
  392. * Check if need to allocate an em instance for
  393. * offload exchange ids to be shared across all VN_PORTs/lport.
  394. */
  395. if (!lp->lro_enabled || !lp->lro_xid || (lp->lro_xid >= max_xid)) {
  396. lp->lro_xid = 0;
  397. goto skip_oem;
  398. }
  399. /*
  400. * Reuse existing offload em instance in case
  401. * it is already allocated on real eth device
  402. */
  403. if (port->netdev->priv_flags & IFF_802_1Q_VLAN)
  404. cur_real_dev = vlan_dev_real_dev(port->netdev);
  405. else
  406. cur_real_dev = port->netdev;
  407. list_for_each_entry(fcoe, &fcoe_hostlist, list) {
  408. oldfc = fcoe->priv;
  409. if (oldfc->netdev->priv_flags & IFF_802_1Q_VLAN)
  410. old_real_dev = vlan_dev_real_dev(oldfc->netdev);
  411. else
  412. old_real_dev = oldfc->netdev;
  413. if (cur_real_dev == old_real_dev) {
  414. port->oem = oldfc->oem;
  415. break;
  416. }
  417. }
  418. if (port->oem) {
  419. if (!fc_exch_mgr_add(lp, port->oem, fcoe_oem_match)) {
  420. printk(KERN_ERR "fcoe_em_config: failed to add "
  421. "offload em:%p on interface:%s\n",
  422. port->oem, port->netdev->name);
  423. return -ENOMEM;
  424. }
  425. } else {
  426. port->oem = fc_exch_mgr_alloc(lp, FC_CLASS_3,
  427. FCOE_MIN_XID, lp->lro_xid,
  428. fcoe_oem_match);
  429. if (!port->oem) {
  430. printk(KERN_ERR "fcoe_em_config: failed to allocate "
  431. "em for offload exches on interface:%s\n",
  432. port->netdev->name);
  433. return -ENOMEM;
  434. }
  435. }
  436. /*
  437. * Exclude offload EM xid range from next EM xid range.
  438. */
  439. min_xid += lp->lro_xid + 1;
  440. skip_oem:
  441. if (!fc_exch_mgr_alloc(lp, FC_CLASS_3, min_xid, max_xid, NULL)) {
  442. printk(KERN_ERR "fcoe_em_config: failed to "
  443. "allocate em on interface %s\n", port->netdev->name);
  444. return -ENOMEM;
  445. }
  446. return 0;
  447. }
  448. /**
  449. * fcoe_if_destroy() - FCoE software HBA tear-down function
  450. * @lport: fc_lport to destroy
  451. */
  452. static void fcoe_if_destroy(struct fc_lport *lport)
  453. {
  454. struct fcoe_port *port = lport_priv(lport);
  455. struct fcoe_interface *fcoe = port->fcoe;
  456. struct net_device *netdev = port->netdev;
  457. FCOE_NETDEV_DBG(netdev, "Destroying interface\n");
  458. /* Logout of the fabric */
  459. fc_fabric_logoff(lport);
  460. /* Remove the instance from fcoe's list */
  461. fcoe_hostlist_remove(lport);
  462. /* clean up netdev configurations */
  463. fcoe_netdev_cleanup(port);
  464. /* tear-down the FCoE controller */
  465. fcoe_ctlr_destroy(&port->ctlr);
  466. /* Free queued packets for the per-CPU receive threads */
  467. fcoe_percpu_clean(lport);
  468. /* Cleanup the fc_lport */
  469. fc_lport_destroy(lport);
  470. fc_fcp_destroy(lport);
  471. /* Detach from the scsi-ml */
  472. fc_remove_host(lport->host);
  473. scsi_remove_host(lport->host);
  474. /* There are no more rports or I/O, free the EM */
  475. fc_exch_mgr_free(lport);
  476. /* Free existing skbs */
  477. fcoe_clean_pending_queue(lport);
  478. /* Stop the timer */
  479. del_timer_sync(&port->timer);
  480. /* Free memory used by statistical counters */
  481. fc_lport_free_stats(lport);
  482. /* Release the net_device and Scsi_Host */
  483. dev_put(netdev);
  484. scsi_host_put(lport->host);
  485. kfree(fcoe); /* TODO, should be refcounted */
  486. }
  487. /*
  488. * fcoe_ddp_setup - calls LLD's ddp_setup through net_device
  489. * @lp: the corresponding fc_lport
  490. * @xid: the exchange id for this ddp transfer
  491. * @sgl: the scatterlist describing this transfer
  492. * @sgc: number of sg items
  493. *
  494. * Returns : 0 no ddp
  495. */
  496. static int fcoe_ddp_setup(struct fc_lport *lp, u16 xid,
  497. struct scatterlist *sgl, unsigned int sgc)
  498. {
  499. struct net_device *n = fcoe_netdev(lp);
  500. if (n->netdev_ops && n->netdev_ops->ndo_fcoe_ddp_setup)
  501. return n->netdev_ops->ndo_fcoe_ddp_setup(n, xid, sgl, sgc);
  502. return 0;
  503. }
  504. /*
  505. * fcoe_ddp_done - calls LLD's ddp_done through net_device
  506. * @lp: the corresponding fc_lport
  507. * @xid: the exchange id for this ddp transfer
  508. *
  509. * Returns : the length of data that have been completed by ddp
  510. */
  511. static int fcoe_ddp_done(struct fc_lport *lp, u16 xid)
  512. {
  513. struct net_device *n = fcoe_netdev(lp);
  514. if (n->netdev_ops && n->netdev_ops->ndo_fcoe_ddp_done)
  515. return n->netdev_ops->ndo_fcoe_ddp_done(n, xid);
  516. return 0;
  517. }
  518. static struct libfc_function_template fcoe_libfc_fcn_templ = {
  519. .frame_send = fcoe_xmit,
  520. .ddp_setup = fcoe_ddp_setup,
  521. .ddp_done = fcoe_ddp_done,
  522. };
  523. /**
  524. * fcoe_if_create() - this function creates the fcoe interface
  525. * @netdev: pointer the associated netdevice
  526. * @parent: device pointer to be the parent in sysfs for the SCSI host
  527. *
  528. * Creates fc_lport struct and scsi_host for lport, configures lport
  529. * and starts fabric login.
  530. *
  531. * Returns : The allocated fc_lport or an error pointer
  532. */
  533. static struct fc_lport *fcoe_if_create(struct net_device *netdev,
  534. struct device *parent)
  535. {
  536. int rc;
  537. struct fc_lport *lport = NULL;
  538. struct fcoe_port *port;
  539. struct fcoe_interface *fcoe;
  540. struct Scsi_Host *shost;
  541. FCOE_NETDEV_DBG(netdev, "Create Interface\n");
  542. fcoe = kzalloc(sizeof(*fcoe), GFP_KERNEL);
  543. if (!fcoe) {
  544. FCOE_NETDEV_DBG(netdev, "Could not allocate fcoe structure\n");
  545. rc = -ENOMEM;
  546. goto out;
  547. }
  548. shost = libfc_host_alloc(&fcoe_shost_template,
  549. sizeof(struct fcoe_port));
  550. if (!shost) {
  551. FCOE_NETDEV_DBG(netdev, "Could not allocate host structure\n");
  552. rc = -ENOMEM;
  553. goto out_kfree_port;
  554. }
  555. lport = shost_priv(shost);
  556. port = lport_priv(lport);
  557. port->fcoe = fcoe;
  558. fcoe->priv = port;
  559. /* configure fc_lport, e.g., em */
  560. rc = fcoe_lport_config(lport);
  561. if (rc) {
  562. FCOE_NETDEV_DBG(netdev, "Could not configure lport for the "
  563. "interface\n");
  564. goto out_host_put;
  565. }
  566. /*
  567. * Initialize FIP.
  568. */
  569. fcoe_ctlr_init(&port->ctlr);
  570. port->ctlr.send = fcoe_fip_send;
  571. port->ctlr.update_mac = fcoe_update_src_mac;
  572. /* configure lport network properties */
  573. rc = fcoe_netdev_config(lport, netdev);
  574. if (rc) {
  575. FCOE_NETDEV_DBG(netdev, "Could not configure netdev for the "
  576. "interface\n");
  577. goto out_netdev_cleanup;
  578. }
  579. /* configure lport scsi host properties */
  580. rc = fcoe_shost_config(lport, shost, parent);
  581. if (rc) {
  582. FCOE_NETDEV_DBG(netdev, "Could not configure shost for the "
  583. "interface\n");
  584. goto out_netdev_cleanup;
  585. }
  586. /* Initialize the library */
  587. rc = fcoe_libfc_config(lport, &fcoe_libfc_fcn_templ);
  588. if (rc) {
  589. FCOE_NETDEV_DBG(netdev, "Could not configure libfc for the "
  590. "interface\n");
  591. goto out_lp_destroy;
  592. }
  593. /*
  594. * fcoe_em_alloc() and fcoe_hostlist_add() both
  595. * need to be atomic under fcoe_hostlist_lock
  596. * since fcoe_em_alloc() looks for an existing EM
  597. * instance on host list updated by fcoe_hostlist_add().
  598. */
  599. write_lock(&fcoe_hostlist_lock);
  600. /* lport exch manager allocation */
  601. rc = fcoe_em_config(lport);
  602. if (rc) {
  603. FCOE_NETDEV_DBG(netdev, "Could not configure the EM for the "
  604. "interface\n");
  605. goto out_lp_destroy;
  606. }
  607. /* add to lports list */
  608. fcoe_hostlist_add(lport);
  609. write_unlock(&fcoe_hostlist_lock);
  610. lport->boot_time = jiffies;
  611. fc_fabric_login(lport);
  612. if (!fcoe_link_ok(lport))
  613. fcoe_ctlr_link_up(&port->ctlr);
  614. dev_hold(netdev);
  615. return lport;
  616. out_lp_destroy:
  617. fc_exch_mgr_free(lport);
  618. out_netdev_cleanup:
  619. fcoe_netdev_cleanup(port);
  620. out_host_put:
  621. scsi_host_put(lport->host);
  622. out_kfree_port:
  623. kfree(fcoe);
  624. out:
  625. return ERR_PTR(rc);
  626. }
  627. /**
  628. * fcoe_if_init() - attach to scsi transport
  629. *
  630. * Returns : 0 on success
  631. */
  632. static int __init fcoe_if_init(void)
  633. {
  634. /* attach to scsi transport */
  635. scsi_transport_fcoe_sw =
  636. fc_attach_transport(&fcoe_transport_function);
  637. if (!scsi_transport_fcoe_sw) {
  638. printk(KERN_ERR "fcoe: Failed to attach to the FC transport\n");
  639. return -ENODEV;
  640. }
  641. return 0;
  642. }
  643. /**
  644. * fcoe_if_exit() - detach from scsi transport
  645. *
  646. * Returns : 0 on success
  647. */
  648. int __exit fcoe_if_exit(void)
  649. {
  650. fc_release_transport(scsi_transport_fcoe_sw);
  651. return 0;
  652. }
  653. /**
  654. * fcoe_percpu_thread_create() - Create a receive thread for an online cpu
  655. * @cpu: cpu index for the online cpu
  656. */
  657. static void fcoe_percpu_thread_create(unsigned int cpu)
  658. {
  659. struct fcoe_percpu_s *p;
  660. struct task_struct *thread;
  661. p = &per_cpu(fcoe_percpu, cpu);
  662. thread = kthread_create(fcoe_percpu_receive_thread,
  663. (void *)p, "fcoethread/%d", cpu);
  664. if (likely(!IS_ERR(p->thread))) {
  665. kthread_bind(thread, cpu);
  666. wake_up_process(thread);
  667. spin_lock_bh(&p->fcoe_rx_list.lock);
  668. p->thread = thread;
  669. spin_unlock_bh(&p->fcoe_rx_list.lock);
  670. }
  671. }
  672. /**
  673. * fcoe_percpu_thread_destroy() - removes the rx thread for the given cpu
  674. * @cpu: cpu index the rx thread is to be removed
  675. *
  676. * Destroys a per-CPU Rx thread. Any pending skbs are moved to the
  677. * current CPU's Rx thread. If the thread being destroyed is bound to
  678. * the CPU processing this context the skbs will be freed.
  679. */
  680. static void fcoe_percpu_thread_destroy(unsigned int cpu)
  681. {
  682. struct fcoe_percpu_s *p;
  683. struct task_struct *thread;
  684. struct page *crc_eof;
  685. struct sk_buff *skb;
  686. #ifdef CONFIG_SMP
  687. struct fcoe_percpu_s *p0;
  688. unsigned targ_cpu = smp_processor_id();
  689. #endif /* CONFIG_SMP */
  690. FCOE_DBG("Destroying receive thread for CPU %d\n", cpu);
  691. /* Prevent any new skbs from being queued for this CPU. */
  692. p = &per_cpu(fcoe_percpu, cpu);
  693. spin_lock_bh(&p->fcoe_rx_list.lock);
  694. thread = p->thread;
  695. p->thread = NULL;
  696. crc_eof = p->crc_eof_page;
  697. p->crc_eof_page = NULL;
  698. p->crc_eof_offset = 0;
  699. spin_unlock_bh(&p->fcoe_rx_list.lock);
  700. #ifdef CONFIG_SMP
  701. /*
  702. * Don't bother moving the skb's if this context is running
  703. * on the same CPU that is having its thread destroyed. This
  704. * can easily happen when the module is removed.
  705. */
  706. if (cpu != targ_cpu) {
  707. p0 = &per_cpu(fcoe_percpu, targ_cpu);
  708. spin_lock_bh(&p0->fcoe_rx_list.lock);
  709. if (p0->thread) {
  710. FCOE_DBG("Moving frames from CPU %d to CPU %d\n",
  711. cpu, targ_cpu);
  712. while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
  713. __skb_queue_tail(&p0->fcoe_rx_list, skb);
  714. spin_unlock_bh(&p0->fcoe_rx_list.lock);
  715. } else {
  716. /*
  717. * The targeted CPU is not initialized and cannot accept
  718. * new skbs. Unlock the targeted CPU and drop the skbs
  719. * on the CPU that is going offline.
  720. */
  721. while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
  722. kfree_skb(skb);
  723. spin_unlock_bh(&p0->fcoe_rx_list.lock);
  724. }
  725. } else {
  726. /*
  727. * This scenario occurs when the module is being removed
  728. * and all threads are being destroyed. skbs will continue
  729. * to be shifted from the CPU thread that is being removed
  730. * to the CPU thread associated with the CPU that is processing
  731. * the module removal. Once there is only one CPU Rx thread it
  732. * will reach this case and we will drop all skbs and later
  733. * stop the thread.
  734. */
  735. spin_lock_bh(&p->fcoe_rx_list.lock);
  736. while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
  737. kfree_skb(skb);
  738. spin_unlock_bh(&p->fcoe_rx_list.lock);
  739. }
  740. #else
  741. /*
  742. * This a non-SMP scenario where the singular Rx thread is
  743. * being removed. Free all skbs and stop the thread.
  744. */
  745. spin_lock_bh(&p->fcoe_rx_list.lock);
  746. while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
  747. kfree_skb(skb);
  748. spin_unlock_bh(&p->fcoe_rx_list.lock);
  749. #endif
  750. if (thread)
  751. kthread_stop(thread);
  752. if (crc_eof)
  753. put_page(crc_eof);
  754. }
  755. /**
  756. * fcoe_cpu_callback() - fcoe cpu hotplug event callback
  757. * @nfb: callback data block
  758. * @action: event triggering the callback
  759. * @hcpu: index for the cpu of this event
  760. *
  761. * This creates or destroys per cpu data for fcoe
  762. *
  763. * Returns NOTIFY_OK always.
  764. */
  765. static int fcoe_cpu_callback(struct notifier_block *nfb,
  766. unsigned long action, void *hcpu)
  767. {
  768. unsigned cpu = (unsigned long)hcpu;
  769. switch (action) {
  770. case CPU_ONLINE:
  771. case CPU_ONLINE_FROZEN:
  772. FCOE_DBG("CPU %x online: Create Rx thread\n", cpu);
  773. fcoe_percpu_thread_create(cpu);
  774. break;
  775. case CPU_DEAD:
  776. case CPU_DEAD_FROZEN:
  777. FCOE_DBG("CPU %x offline: Remove Rx thread\n", cpu);
  778. fcoe_percpu_thread_destroy(cpu);
  779. break;
  780. default:
  781. break;
  782. }
  783. return NOTIFY_OK;
  784. }
  785. static struct notifier_block fcoe_cpu_notifier = {
  786. .notifier_call = fcoe_cpu_callback,
  787. };
  788. /**
  789. * fcoe_rcv() - this is the fcoe receive function called by NET_RX_SOFTIRQ
  790. * @skb: the receive skb
  791. * @dev: associated net device
  792. * @ptype: context
  793. * @olddev: last device
  794. *
  795. * this function will receive the packet and build fc frame and pass it up
  796. *
  797. * Returns: 0 for success
  798. */
  799. int fcoe_rcv(struct sk_buff *skb, struct net_device *dev,
  800. struct packet_type *ptype, struct net_device *olddev)
  801. {
  802. struct fc_lport *lp;
  803. struct fcoe_rcv_info *fr;
  804. struct fcoe_port *port;
  805. struct fc_frame_header *fh;
  806. struct fcoe_percpu_s *fps;
  807. unsigned int cpu;
  808. port = container_of(ptype, struct fcoe_port, fcoe_packet_type);
  809. lp = port->ctlr.lp;
  810. if (unlikely(lp == NULL)) {
  811. FCOE_NETDEV_DBG(dev, "Cannot find hba structure");
  812. goto err2;
  813. }
  814. if (!lp->link_up)
  815. goto err2;
  816. FCOE_NETDEV_DBG(dev, "skb_info: len:%d data_len:%d head:%p "
  817. "data:%p tail:%p end:%p sum:%d dev:%s",
  818. skb->len, skb->data_len, skb->head, skb->data,
  819. skb_tail_pointer(skb), skb_end_pointer(skb),
  820. skb->csum, skb->dev ? skb->dev->name : "<NULL>");
  821. /* check for FCOE packet type */
  822. if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) {
  823. FCOE_NETDEV_DBG(dev, "Wrong FC type frame");
  824. goto err;
  825. }
  826. /*
  827. * Check for minimum frame length, and make sure required FCoE
  828. * and FC headers are pulled into the linear data area.
  829. */
  830. if (unlikely((skb->len < FCOE_MIN_FRAME) ||
  831. !pskb_may_pull(skb, FCOE_HEADER_LEN)))
  832. goto err;
  833. skb_set_transport_header(skb, sizeof(struct fcoe_hdr));
  834. fh = (struct fc_frame_header *) skb_transport_header(skb);
  835. fr = fcoe_dev_from_skb(skb);
  836. fr->fr_dev = lp;
  837. fr->ptype = ptype;
  838. /*
  839. * In case the incoming frame's exchange is originated from
  840. * the initiator, then received frame's exchange id is ANDed
  841. * with fc_cpu_mask bits to get the same cpu on which exchange
  842. * was originated, otherwise just use the current cpu.
  843. */
  844. if (ntoh24(fh->fh_f_ctl) & FC_FC_EX_CTX)
  845. cpu = ntohs(fh->fh_ox_id) & fc_cpu_mask;
  846. else
  847. cpu = smp_processor_id();
  848. fps = &per_cpu(fcoe_percpu, cpu);
  849. spin_lock_bh(&fps->fcoe_rx_list.lock);
  850. if (unlikely(!fps->thread)) {
  851. /*
  852. * The targeted CPU is not ready, let's target
  853. * the first CPU now. For non-SMP systems this
  854. * will check the same CPU twice.
  855. */
  856. FCOE_NETDEV_DBG(dev, "CPU is online, but no receive thread "
  857. "ready for incoming skb- using first online "
  858. "CPU.\n");
  859. spin_unlock_bh(&fps->fcoe_rx_list.lock);
  860. cpu = first_cpu(cpu_online_map);
  861. fps = &per_cpu(fcoe_percpu, cpu);
  862. spin_lock_bh(&fps->fcoe_rx_list.lock);
  863. if (!fps->thread) {
  864. spin_unlock_bh(&fps->fcoe_rx_list.lock);
  865. goto err;
  866. }
  867. }
  868. /*
  869. * We now have a valid CPU that we're targeting for
  870. * this skb. We also have this receive thread locked,
  871. * so we're free to queue skbs into it's queue.
  872. */
  873. __skb_queue_tail(&fps->fcoe_rx_list, skb);
  874. if (fps->fcoe_rx_list.qlen == 1)
  875. wake_up_process(fps->thread);
  876. spin_unlock_bh(&fps->fcoe_rx_list.lock);
  877. return 0;
  878. err:
  879. fc_lport_get_stats(lp)->ErrorFrames++;
  880. err2:
  881. kfree_skb(skb);
  882. return -1;
  883. }
  884. /**
  885. * fcoe_start_io() - pass to netdev to start xmit for fcoe
  886. * @skb: the skb to be xmitted
  887. *
  888. * Returns: 0 for success
  889. */
  890. static inline int fcoe_start_io(struct sk_buff *skb)
  891. {
  892. int rc;
  893. skb_get(skb);
  894. rc = dev_queue_xmit(skb);
  895. if (rc != 0)
  896. return rc;
  897. kfree_skb(skb);
  898. return 0;
  899. }
  900. /**
  901. * fcoe_get_paged_crc_eof() - in case we need to alloc a page for crc_eof
  902. * @skb: the skb to be xmitted
  903. * @tlen: total len
  904. *
  905. * Returns: 0 for success
  906. */
  907. static int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen)
  908. {
  909. struct fcoe_percpu_s *fps;
  910. struct page *page;
  911. fps = &get_cpu_var(fcoe_percpu);
  912. page = fps->crc_eof_page;
  913. if (!page) {
  914. page = alloc_page(GFP_ATOMIC);
  915. if (!page) {
  916. put_cpu_var(fcoe_percpu);
  917. return -ENOMEM;
  918. }
  919. fps->crc_eof_page = page;
  920. fps->crc_eof_offset = 0;
  921. }
  922. get_page(page);
  923. skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page,
  924. fps->crc_eof_offset, tlen);
  925. skb->len += tlen;
  926. skb->data_len += tlen;
  927. skb->truesize += tlen;
  928. fps->crc_eof_offset += sizeof(struct fcoe_crc_eof);
  929. if (fps->crc_eof_offset >= PAGE_SIZE) {
  930. fps->crc_eof_page = NULL;
  931. fps->crc_eof_offset = 0;
  932. put_page(page);
  933. }
  934. put_cpu_var(fcoe_percpu);
  935. return 0;
  936. }
  937. /**
  938. * fcoe_fc_crc() - calculates FC CRC in this fcoe skb
  939. * @fp: the fc_frame containing data to be checksummed
  940. *
  941. * This uses crc32() to calculate the crc for port frame
  942. * Return : 32 bit crc
  943. */
  944. u32 fcoe_fc_crc(struct fc_frame *fp)
  945. {
  946. struct sk_buff *skb = fp_skb(fp);
  947. struct skb_frag_struct *frag;
  948. unsigned char *data;
  949. unsigned long off, len, clen;
  950. u32 crc;
  951. unsigned i;
  952. crc = crc32(~0, skb->data, skb_headlen(skb));
  953. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  954. frag = &skb_shinfo(skb)->frags[i];
  955. off = frag->page_offset;
  956. len = frag->size;
  957. while (len > 0) {
  958. clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK));
  959. data = kmap_atomic(frag->page + (off >> PAGE_SHIFT),
  960. KM_SKB_DATA_SOFTIRQ);
  961. crc = crc32(crc, data + (off & ~PAGE_MASK), clen);
  962. kunmap_atomic(data, KM_SKB_DATA_SOFTIRQ);
  963. off += clen;
  964. len -= clen;
  965. }
  966. }
  967. return crc;
  968. }
  969. /**
  970. * fcoe_xmit() - FCoE frame transmit function
  971. * @lp: the associated local fcoe
  972. * @fp: the fc_frame to be transmitted
  973. *
  974. * Return : 0 for success
  975. */
  976. int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
  977. {
  978. int wlen;
  979. u32 crc;
  980. struct ethhdr *eh;
  981. struct fcoe_crc_eof *cp;
  982. struct sk_buff *skb;
  983. struct fcoe_dev_stats *stats;
  984. struct fc_frame_header *fh;
  985. unsigned int hlen; /* header length implies the version */
  986. unsigned int tlen; /* trailer length */
  987. unsigned int elen; /* eth header, may include vlan */
  988. struct fcoe_port *port;
  989. u8 sof, eof;
  990. struct fcoe_hdr *hp;
  991. WARN_ON((fr_len(fp) % sizeof(u32)) != 0);
  992. port = lport_priv(lp);
  993. fh = fc_frame_header_get(fp);
  994. skb = fp_skb(fp);
  995. wlen = skb->len / FCOE_WORD_TO_BYTE;
  996. if (!lp->link_up) {
  997. kfree_skb(skb);
  998. return 0;
  999. }
  1000. if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) &&
  1001. fcoe_ctlr_els_send(&port->ctlr, skb))
  1002. return 0;
  1003. sof = fr_sof(fp);
  1004. eof = fr_eof(fp);
  1005. elen = sizeof(struct ethhdr);
  1006. hlen = sizeof(struct fcoe_hdr);
  1007. tlen = sizeof(struct fcoe_crc_eof);
  1008. wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
  1009. /* crc offload */
  1010. if (likely(lp->crc_offload)) {
  1011. skb->ip_summed = CHECKSUM_PARTIAL;
  1012. skb->csum_start = skb_headroom(skb);
  1013. skb->csum_offset = skb->len;
  1014. crc = 0;
  1015. } else {
  1016. skb->ip_summed = CHECKSUM_NONE;
  1017. crc = fcoe_fc_crc(fp);
  1018. }
  1019. /* copy port crc and eof to the skb buff */
  1020. if (skb_is_nonlinear(skb)) {
  1021. skb_frag_t *frag;
  1022. if (fcoe_get_paged_crc_eof(skb, tlen)) {
  1023. kfree_skb(skb);
  1024. return -ENOMEM;
  1025. }
  1026. frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
  1027. cp = kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ)
  1028. + frag->page_offset;
  1029. } else {
  1030. cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
  1031. }
  1032. memset(cp, 0, sizeof(*cp));
  1033. cp->fcoe_eof = eof;
  1034. cp->fcoe_crc32 = cpu_to_le32(~crc);
  1035. if (skb_is_nonlinear(skb)) {
  1036. kunmap_atomic(cp, KM_SKB_DATA_SOFTIRQ);
  1037. cp = NULL;
  1038. }
  1039. /* adjust skb network/transport offsets to match mac/fcoe/port */
  1040. skb_push(skb, elen + hlen);
  1041. skb_reset_mac_header(skb);
  1042. skb_reset_network_header(skb);
  1043. skb->mac_len = elen;
  1044. skb->protocol = htons(ETH_P_FCOE);
  1045. skb->dev = port->netdev;
  1046. /* fill up mac and fcoe headers */
  1047. eh = eth_hdr(skb);
  1048. eh->h_proto = htons(ETH_P_FCOE);
  1049. if (port->ctlr.map_dest)
  1050. fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id);
  1051. else
  1052. /* insert GW address */
  1053. memcpy(eh->h_dest, port->ctlr.dest_addr, ETH_ALEN);
  1054. if (unlikely(port->ctlr.flogi_oxid != FC_XID_UNKNOWN))
  1055. memcpy(eh->h_source, port->ctlr.ctl_src_addr, ETH_ALEN);
  1056. else
  1057. memcpy(eh->h_source, port->ctlr.data_src_addr, ETH_ALEN);
  1058. hp = (struct fcoe_hdr *)(eh + 1);
  1059. memset(hp, 0, sizeof(*hp));
  1060. if (FC_FCOE_VER)
  1061. FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER);
  1062. hp->fcoe_sof = sof;
  1063. /* fcoe lso, mss is in max_payload which is non-zero for FCP data */
  1064. if (lp->seq_offload && fr_max_payload(fp)) {
  1065. skb_shinfo(skb)->gso_type = SKB_GSO_FCOE;
  1066. skb_shinfo(skb)->gso_size = fr_max_payload(fp);
  1067. } else {
  1068. skb_shinfo(skb)->gso_type = 0;
  1069. skb_shinfo(skb)->gso_size = 0;
  1070. }
  1071. /* update tx stats: regardless if LLD fails */
  1072. stats = fc_lport_get_stats(lp);
  1073. stats->TxFrames++;
  1074. stats->TxWords += wlen;
  1075. /* send down to lld */
  1076. fr_dev(fp) = lp;
  1077. if (port->fcoe_pending_queue.qlen)
  1078. fcoe_check_wait_queue(lp, skb);
  1079. else if (fcoe_start_io(skb))
  1080. fcoe_check_wait_queue(lp, skb);
  1081. return 0;
  1082. }
  1083. /**
  1084. * fcoe_percpu_receive_thread() - recv thread per cpu
  1085. * @arg: ptr to the fcoe per cpu struct
  1086. *
  1087. * Return: 0 for success
  1088. */
  1089. int fcoe_percpu_receive_thread(void *arg)
  1090. {
  1091. struct fcoe_percpu_s *p = arg;
  1092. u32 fr_len;
  1093. struct fc_lport *lp;
  1094. struct fcoe_rcv_info *fr;
  1095. struct fcoe_dev_stats *stats;
  1096. struct fc_frame_header *fh;
  1097. struct sk_buff *skb;
  1098. struct fcoe_crc_eof crc_eof;
  1099. struct fc_frame *fp;
  1100. u8 *mac = NULL;
  1101. struct fcoe_port *port;
  1102. struct fcoe_hdr *hp;
  1103. set_user_nice(current, -20);
  1104. while (!kthread_should_stop()) {
  1105. spin_lock_bh(&p->fcoe_rx_list.lock);
  1106. while ((skb = __skb_dequeue(&p->fcoe_rx_list)) == NULL) {
  1107. set_current_state(TASK_INTERRUPTIBLE);
  1108. spin_unlock_bh(&p->fcoe_rx_list.lock);
  1109. schedule();
  1110. set_current_state(TASK_RUNNING);
  1111. if (kthread_should_stop())
  1112. return 0;
  1113. spin_lock_bh(&p->fcoe_rx_list.lock);
  1114. }
  1115. spin_unlock_bh(&p->fcoe_rx_list.lock);
  1116. fr = fcoe_dev_from_skb(skb);
  1117. lp = fr->fr_dev;
  1118. if (unlikely(lp == NULL)) {
  1119. FCOE_NETDEV_DBG(skb->dev, "Invalid HBA Structure");
  1120. kfree_skb(skb);
  1121. continue;
  1122. }
  1123. FCOE_NETDEV_DBG(skb->dev, "skb_info: len:%d data_len:%d "
  1124. "head:%p data:%p tail:%p end:%p sum:%d dev:%s",
  1125. skb->len, skb->data_len,
  1126. skb->head, skb->data, skb_tail_pointer(skb),
  1127. skb_end_pointer(skb), skb->csum,
  1128. skb->dev ? skb->dev->name : "<NULL>");
  1129. /*
  1130. * Save source MAC address before discarding header.
  1131. */
  1132. port = lport_priv(lp);
  1133. if (skb_is_nonlinear(skb))
  1134. skb_linearize(skb); /* not ideal */
  1135. mac = eth_hdr(skb)->h_source;
  1136. /*
  1137. * Frame length checks and setting up the header pointers
  1138. * was done in fcoe_rcv already.
  1139. */
  1140. hp = (struct fcoe_hdr *) skb_network_header(skb);
  1141. fh = (struct fc_frame_header *) skb_transport_header(skb);
  1142. stats = fc_lport_get_stats(lp);
  1143. if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
  1144. if (stats->ErrorFrames < 5)
  1145. printk(KERN_WARNING "fcoe: FCoE version "
  1146. "mismatch: The frame has "
  1147. "version %x, but the "
  1148. "initiator supports version "
  1149. "%x\n", FC_FCOE_DECAPS_VER(hp),
  1150. FC_FCOE_VER);
  1151. stats->ErrorFrames++;
  1152. kfree_skb(skb);
  1153. continue;
  1154. }
  1155. skb_pull(skb, sizeof(struct fcoe_hdr));
  1156. fr_len = skb->len - sizeof(struct fcoe_crc_eof);
  1157. stats->RxFrames++;
  1158. stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
  1159. fp = (struct fc_frame *)skb;
  1160. fc_frame_init(fp);
  1161. fr_dev(fp) = lp;
  1162. fr_sof(fp) = hp->fcoe_sof;
  1163. /* Copy out the CRC and EOF trailer for access */
  1164. if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) {
  1165. kfree_skb(skb);
  1166. continue;
  1167. }
  1168. fr_eof(fp) = crc_eof.fcoe_eof;
  1169. fr_crc(fp) = crc_eof.fcoe_crc32;
  1170. if (pskb_trim(skb, fr_len)) {
  1171. kfree_skb(skb);
  1172. continue;
  1173. }
  1174. /*
  1175. * We only check CRC if no offload is available and if it is
  1176. * it's solicited data, in which case, the FCP layer would
  1177. * check it during the copy.
  1178. */
  1179. if (lp->crc_offload && skb->ip_summed == CHECKSUM_UNNECESSARY)
  1180. fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
  1181. else
  1182. fr_flags(fp) |= FCPHF_CRC_UNCHECKED;
  1183. fh = fc_frame_header_get(fp);
  1184. if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
  1185. fh->fh_type == FC_TYPE_FCP) {
  1186. fc_exch_recv(lp, fp);
  1187. continue;
  1188. }
  1189. if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) {
  1190. if (le32_to_cpu(fr_crc(fp)) !=
  1191. ~crc32(~0, skb->data, fr_len)) {
  1192. if (stats->InvalidCRCCount < 5)
  1193. printk(KERN_WARNING "fcoe: dropping "
  1194. "frame with CRC error\n");
  1195. stats->InvalidCRCCount++;
  1196. stats->ErrorFrames++;
  1197. fc_frame_free(fp);
  1198. continue;
  1199. }
  1200. fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
  1201. }
  1202. if (unlikely(port->ctlr.flogi_oxid != FC_XID_UNKNOWN) &&
  1203. fcoe_ctlr_recv_flogi(&port->ctlr, fp, mac)) {
  1204. fc_frame_free(fp);
  1205. continue;
  1206. }
  1207. fc_exch_recv(lp, fp);
  1208. }
  1209. return 0;
  1210. }
  1211. /**
  1212. * fcoe_check_wait_queue() - attempt to clear the transmit backlog
  1213. * @lp: the fc_lport
  1214. *
  1215. * This empties the wait_queue, dequeue the head of the wait_queue queue
  1216. * and calls fcoe_start_io() for each packet, if all skb have been
  1217. * transmitted, return qlen or -1 if a error occurs, then restore
  1218. * wait_queue and try again later.
  1219. *
  1220. * The wait_queue is used when the skb transmit fails. skb will go
  1221. * in the wait_queue which will be emptied by the timer function or
  1222. * by the next skb transmit.
  1223. */
  1224. static void fcoe_check_wait_queue(struct fc_lport *lp, struct sk_buff *skb)
  1225. {
  1226. struct fcoe_port *port = lport_priv(lp);
  1227. int rc;
  1228. spin_lock_bh(&port->fcoe_pending_queue.lock);
  1229. if (skb)
  1230. __skb_queue_tail(&port->fcoe_pending_queue, skb);
  1231. if (port->fcoe_pending_queue_active)
  1232. goto out;
  1233. port->fcoe_pending_queue_active = 1;
  1234. while (port->fcoe_pending_queue.qlen) {
  1235. /* keep qlen > 0 until fcoe_start_io succeeds */
  1236. port->fcoe_pending_queue.qlen++;
  1237. skb = __skb_dequeue(&port->fcoe_pending_queue);
  1238. spin_unlock_bh(&port->fcoe_pending_queue.lock);
  1239. rc = fcoe_start_io(skb);
  1240. spin_lock_bh(&port->fcoe_pending_queue.lock);
  1241. if (rc) {
  1242. __skb_queue_head(&port->fcoe_pending_queue, skb);
  1243. /* undo temporary increment above */
  1244. port->fcoe_pending_queue.qlen--;
  1245. break;
  1246. }
  1247. /* undo temporary increment above */
  1248. port->fcoe_pending_queue.qlen--;
  1249. }
  1250. if (port->fcoe_pending_queue.qlen < FCOE_LOW_QUEUE_DEPTH)
  1251. lp->qfull = 0;
  1252. if (port->fcoe_pending_queue.qlen && !timer_pending(&port->timer))
  1253. mod_timer(&port->timer, jiffies + 2);
  1254. port->fcoe_pending_queue_active = 0;
  1255. out:
  1256. if (port->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
  1257. lp->qfull = 1;
  1258. spin_unlock_bh(&port->fcoe_pending_queue.lock);
  1259. return;
  1260. }
  1261. /**
  1262. * fcoe_dev_setup() - setup link change notification interface
  1263. */
  1264. static void fcoe_dev_setup(void)
  1265. {
  1266. register_netdevice_notifier(&fcoe_notifier);
  1267. }
  1268. /**
  1269. * fcoe_dev_cleanup() - cleanup link change notification interface
  1270. */
  1271. static void fcoe_dev_cleanup(void)
  1272. {
  1273. unregister_netdevice_notifier(&fcoe_notifier);
  1274. }
  1275. /**
  1276. * fcoe_device_notification() - netdev event notification callback
  1277. * @notifier: context of the notification
  1278. * @event: type of event
  1279. * @ptr: fixed array for output parsed ifname
  1280. *
  1281. * This function is called by the ethernet driver in case of link change event
  1282. *
  1283. * Returns: 0 for success
  1284. */
  1285. static int fcoe_device_notification(struct notifier_block *notifier,
  1286. ulong event, void *ptr)
  1287. {
  1288. struct fc_lport *lp = NULL;
  1289. struct net_device *netdev = ptr;
  1290. struct fcoe_interface *fcoe;
  1291. struct fcoe_port *port = NULL;
  1292. struct fcoe_dev_stats *stats;
  1293. u32 link_possible = 1;
  1294. u32 mfs;
  1295. int rc = NOTIFY_OK;
  1296. read_lock(&fcoe_hostlist_lock);
  1297. list_for_each_entry(fcoe, &fcoe_hostlist, list) {
  1298. port = fcoe->priv;
  1299. if (port->netdev == netdev) {
  1300. lp = port->ctlr.lp;
  1301. break;
  1302. }
  1303. }
  1304. read_unlock(&fcoe_hostlist_lock);
  1305. if (lp == NULL) {
  1306. rc = NOTIFY_DONE;
  1307. goto out;
  1308. }
  1309. switch (event) {
  1310. case NETDEV_DOWN:
  1311. case NETDEV_GOING_DOWN:
  1312. link_possible = 0;
  1313. break;
  1314. case NETDEV_UP:
  1315. case NETDEV_CHANGE:
  1316. break;
  1317. case NETDEV_CHANGEMTU:
  1318. mfs = netdev->mtu - (sizeof(struct fcoe_hdr) +
  1319. sizeof(struct fcoe_crc_eof));
  1320. if (mfs >= FC_MIN_MAX_FRAME)
  1321. fc_set_mfs(lp, mfs);
  1322. break;
  1323. case NETDEV_REGISTER:
  1324. break;
  1325. default:
  1326. FCOE_NETDEV_DBG(netdev, "Unknown event %ld "
  1327. "from netdev netlink\n", event);
  1328. }
  1329. if (link_possible && !fcoe_link_ok(lp))
  1330. fcoe_ctlr_link_up(&port->ctlr);
  1331. else if (fcoe_ctlr_link_down(&port->ctlr)) {
  1332. stats = fc_lport_get_stats(lp);
  1333. stats->LinkFailureCount++;
  1334. fcoe_clean_pending_queue(lp);
  1335. }
  1336. out:
  1337. return rc;
  1338. }
  1339. /**
  1340. * fcoe_if_to_netdev() - parse a name buffer to get netdev
  1341. * @buffer: incoming buffer to be copied
  1342. *
  1343. * Returns: NULL or ptr to net_device
  1344. */
  1345. static struct net_device *fcoe_if_to_netdev(const char *buffer)
  1346. {
  1347. char *cp;
  1348. char ifname[IFNAMSIZ + 2];
  1349. if (buffer) {
  1350. strlcpy(ifname, buffer, IFNAMSIZ);
  1351. cp = ifname + strlen(ifname);
  1352. while (--cp >= ifname && *cp == '\n')
  1353. *cp = '\0';
  1354. return dev_get_by_name(&init_net, ifname);
  1355. }
  1356. return NULL;
  1357. }
  1358. /**
  1359. * fcoe_netdev_to_module_owner() - finds out the driver module of the netdev
  1360. * @netdev: the target netdev
  1361. *
  1362. * Returns: ptr to the struct module, NULL for failure
  1363. */
  1364. static struct module *
  1365. fcoe_netdev_to_module_owner(const struct net_device *netdev)
  1366. {
  1367. struct device *dev;
  1368. if (!netdev)
  1369. return NULL;
  1370. dev = netdev->dev.parent;
  1371. if (!dev)
  1372. return NULL;
  1373. if (!dev->driver)
  1374. return NULL;
  1375. return dev->driver->owner;
  1376. }
  1377. /**
  1378. * fcoe_ethdrv_get() - Hold the Ethernet driver
  1379. * @netdev: the target netdev
  1380. *
  1381. * Holds the Ethernet driver module by try_module_get() for
  1382. * the corresponding netdev.
  1383. *
  1384. * Returns: 0 for success
  1385. */
  1386. static int fcoe_ethdrv_get(const struct net_device *netdev)
  1387. {
  1388. struct module *owner;
  1389. owner = fcoe_netdev_to_module_owner(netdev);
  1390. if (owner) {
  1391. FCOE_NETDEV_DBG(netdev, "Hold driver module %s\n",
  1392. module_name(owner));
  1393. return try_module_get(owner);
  1394. }
  1395. return -ENODEV;
  1396. }
  1397. /**
  1398. * fcoe_ethdrv_put() - Release the Ethernet driver
  1399. * @netdev: the target netdev
  1400. *
  1401. * Releases the Ethernet driver module by module_put for
  1402. * the corresponding netdev.
  1403. *
  1404. * Returns: 0 for success
  1405. */
  1406. static int fcoe_ethdrv_put(const struct net_device *netdev)
  1407. {
  1408. struct module *owner;
  1409. owner = fcoe_netdev_to_module_owner(netdev);
  1410. if (owner) {
  1411. FCOE_NETDEV_DBG(netdev, "Release driver module %s\n",
  1412. module_name(owner));
  1413. module_put(owner);
  1414. return 0;
  1415. }
  1416. return -ENODEV;
  1417. }
  1418. /**
  1419. * fcoe_destroy() - handles the destroy from sysfs
  1420. * @buffer: expected to be an eth if name
  1421. * @kp: associated kernel param
  1422. *
  1423. * Returns: 0 for success
  1424. */
  1425. static int fcoe_destroy(const char *buffer, struct kernel_param *kp)
  1426. {
  1427. struct net_device *netdev;
  1428. struct fc_lport *lport;
  1429. int rc;
  1430. netdev = fcoe_if_to_netdev(buffer);
  1431. if (!netdev) {
  1432. rc = -ENODEV;
  1433. goto out_nodev;
  1434. }
  1435. /* look for existing lport */
  1436. lport = fcoe_hostlist_lookup(netdev);
  1437. if (!lport) {
  1438. rc = -ENODEV;
  1439. goto out_putdev;
  1440. }
  1441. fcoe_if_destroy(lport);
  1442. fcoe_ethdrv_put(netdev);
  1443. rc = 0;
  1444. out_putdev:
  1445. dev_put(netdev);
  1446. out_nodev:
  1447. return rc;
  1448. }
  1449. /**
  1450. * fcoe_create() - Handles the create call from sysfs
  1451. * @buffer: expected to be an eth if name
  1452. * @kp: associated kernel param
  1453. *
  1454. * Returns: 0 for success
  1455. */
  1456. static int fcoe_create(const char *buffer, struct kernel_param *kp)
  1457. {
  1458. int rc;
  1459. struct fc_lport *lport;
  1460. struct net_device *netdev;
  1461. netdev = fcoe_if_to_netdev(buffer);
  1462. if (!netdev) {
  1463. rc = -ENODEV;
  1464. goto out_nodev;
  1465. }
  1466. /* look for existing lport */
  1467. if (fcoe_hostlist_lookup(netdev)) {
  1468. rc = -EEXIST;
  1469. goto out_putdev;
  1470. }
  1471. fcoe_ethdrv_get(netdev);
  1472. lport = fcoe_if_create(netdev, &netdev->dev);
  1473. if (IS_ERR(lport)) {
  1474. printk(KERN_ERR "fcoe: Failed to create interface (%s)\n",
  1475. netdev->name);
  1476. fcoe_ethdrv_put(netdev);
  1477. rc = -EIO;
  1478. goto out_putdev;
  1479. }
  1480. rc = 0;
  1481. out_putdev:
  1482. dev_put(netdev);
  1483. out_nodev:
  1484. return rc;
  1485. }
  1486. module_param_call(create, fcoe_create, NULL, NULL, S_IWUSR);
  1487. __MODULE_PARM_TYPE(create, "string");
  1488. MODULE_PARM_DESC(create, "Create fcoe fcoe using net device passed in.");
  1489. module_param_call(destroy, fcoe_destroy, NULL, NULL, S_IWUSR);
  1490. __MODULE_PARM_TYPE(destroy, "string");
  1491. MODULE_PARM_DESC(destroy, "Destroy fcoe fcoe");
  1492. /**
  1493. * fcoe_link_ok() - Check if link is ok for the fc_lport
  1494. * @lp: ptr to the fc_lport
  1495. *
  1496. * Any permanently-disqualifying conditions have been previously checked.
  1497. * This also updates the speed setting, which may change with link for 100/1000.
  1498. *
  1499. * This function should probably be checking for PAUSE support at some point
  1500. * in the future. Currently Per-priority-pause is not determinable using
  1501. * ethtool, so we shouldn't be restrictive until that problem is resolved.
  1502. *
  1503. * Returns: 0 if link is OK for use by FCoE.
  1504. *
  1505. */
  1506. int fcoe_link_ok(struct fc_lport *lp)
  1507. {
  1508. struct fcoe_port *port = lport_priv(lp);
  1509. struct net_device *dev = port->netdev;
  1510. struct ethtool_cmd ecmd = { ETHTOOL_GSET };
  1511. if ((dev->flags & IFF_UP) && netif_carrier_ok(dev) &&
  1512. (!dev_ethtool_get_settings(dev, &ecmd))) {
  1513. lp->link_supported_speeds &=
  1514. ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
  1515. if (ecmd.supported & (SUPPORTED_1000baseT_Half |
  1516. SUPPORTED_1000baseT_Full))
  1517. lp->link_supported_speeds |= FC_PORTSPEED_1GBIT;
  1518. if (ecmd.supported & SUPPORTED_10000baseT_Full)
  1519. lp->link_supported_speeds |=
  1520. FC_PORTSPEED_10GBIT;
  1521. if (ecmd.speed == SPEED_1000)
  1522. lp->link_speed = FC_PORTSPEED_1GBIT;
  1523. if (ecmd.speed == SPEED_10000)
  1524. lp->link_speed = FC_PORTSPEED_10GBIT;
  1525. return 0;
  1526. }
  1527. return -1;
  1528. }
  1529. /**
  1530. * fcoe_percpu_clean() - Clear the pending skbs for an lport
  1531. * @lp: the fc_lport
  1532. */
  1533. void fcoe_percpu_clean(struct fc_lport *lp)
  1534. {
  1535. struct fcoe_percpu_s *pp;
  1536. struct fcoe_rcv_info *fr;
  1537. struct sk_buff_head *list;
  1538. struct sk_buff *skb, *next;
  1539. struct sk_buff *head;
  1540. unsigned int cpu;
  1541. for_each_possible_cpu(cpu) {
  1542. pp = &per_cpu(fcoe_percpu, cpu);
  1543. spin_lock_bh(&pp->fcoe_rx_list.lock);
  1544. list = &pp->fcoe_rx_list;
  1545. head = list->next;
  1546. for (skb = head; skb != (struct sk_buff *)list;
  1547. skb = next) {
  1548. next = skb->next;
  1549. fr = fcoe_dev_from_skb(skb);
  1550. if (fr->fr_dev == lp) {
  1551. __skb_unlink(skb, list);
  1552. kfree_skb(skb);
  1553. }
  1554. }
  1555. spin_unlock_bh(&pp->fcoe_rx_list.lock);
  1556. }
  1557. }
  1558. /**
  1559. * fcoe_clean_pending_queue() - Dequeue a skb and free it
  1560. * @lp: the corresponding fc_lport
  1561. *
  1562. * Returns: none
  1563. */
  1564. void fcoe_clean_pending_queue(struct fc_lport *lp)
  1565. {
  1566. struct fcoe_port *port = lport_priv(lp);
  1567. struct sk_buff *skb;
  1568. spin_lock_bh(&port->fcoe_pending_queue.lock);
  1569. while ((skb = __skb_dequeue(&port->fcoe_pending_queue)) != NULL) {
  1570. spin_unlock_bh(&port->fcoe_pending_queue.lock);
  1571. kfree_skb(skb);
  1572. spin_lock_bh(&port->fcoe_pending_queue.lock);
  1573. }
  1574. spin_unlock_bh(&port->fcoe_pending_queue.lock);
  1575. }
  1576. /**
  1577. * fcoe_reset() - Resets the fcoe
  1578. * @shost: shost the reset is from
  1579. *
  1580. * Returns: always 0
  1581. */
  1582. int fcoe_reset(struct Scsi_Host *shost)
  1583. {
  1584. struct fc_lport *lport = shost_priv(shost);
  1585. fc_lport_reset(lport);
  1586. return 0;
  1587. }
  1588. /**
  1589. * fcoe_hostlist_lookup_port() - find the corresponding lport by a given device
  1590. * @dev: this is currently ptr to net_device
  1591. *
  1592. * Called with fcoe_hostlist_lock held.
  1593. *
  1594. * Returns: NULL or the located fcoe_port
  1595. */
  1596. static struct fcoe_interface *
  1597. fcoe_hostlist_lookup_port(const struct net_device *dev)
  1598. {
  1599. struct fcoe_interface *fcoe;
  1600. list_for_each_entry(fcoe, &fcoe_hostlist, list) {
  1601. if (fcoe->priv->netdev == dev)
  1602. return fcoe;
  1603. }
  1604. return NULL;
  1605. }
  1606. /**
  1607. * fcoe_hostlist_lookup() - Find the corresponding lport by netdev
  1608. * @netdev: ptr to net_device
  1609. *
  1610. * Returns: 0 for success
  1611. */
  1612. struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev)
  1613. {
  1614. struct fcoe_interface *fcoe;
  1615. read_lock(&fcoe_hostlist_lock);
  1616. fcoe = fcoe_hostlist_lookup_port(netdev);
  1617. read_unlock(&fcoe_hostlist_lock);
  1618. return (fcoe) ? fcoe->priv->ctlr.lp : NULL;
  1619. }
  1620. /**
  1621. * fcoe_hostlist_add() - Add a lport to lports list
  1622. * @lp: ptr to the fc_lport to be added
  1623. *
  1624. * Called with write fcoe_hostlist_lock held.
  1625. *
  1626. * Returns: 0 for success
  1627. */
  1628. int fcoe_hostlist_add(const struct fc_lport *lport)
  1629. {
  1630. struct fcoe_interface *fcoe;
  1631. struct fcoe_port *port;
  1632. fcoe = fcoe_hostlist_lookup_port(fcoe_netdev(lport));
  1633. if (!fcoe) {
  1634. port = lport_priv(lport);
  1635. fcoe = port->fcoe;
  1636. list_add_tail(&fcoe->list, &fcoe_hostlist);
  1637. }
  1638. return 0;
  1639. }
  1640. /**
  1641. * fcoe_hostlist_remove() - remove a lport from lports list
  1642. * @lp: ptr to the fc_lport to be removed
  1643. *
  1644. * Returns: 0 for success
  1645. */
  1646. int fcoe_hostlist_remove(const struct fc_lport *lport)
  1647. {
  1648. struct fcoe_interface *fcoe;
  1649. write_lock_bh(&fcoe_hostlist_lock);
  1650. fcoe = fcoe_hostlist_lookup_port(fcoe_netdev(lport));
  1651. BUG_ON(!fcoe);
  1652. list_del(&fcoe->list);
  1653. write_unlock_bh(&fcoe_hostlist_lock);
  1654. return 0;
  1655. }
  1656. /**
  1657. * fcoe_init() - fcoe module loading initialization
  1658. *
  1659. * Returns 0 on success, negative on failure
  1660. */
  1661. static int __init fcoe_init(void)
  1662. {
  1663. unsigned int cpu;
  1664. int rc = 0;
  1665. struct fcoe_percpu_s *p;
  1666. for_each_possible_cpu(cpu) {
  1667. p = &per_cpu(fcoe_percpu, cpu);
  1668. skb_queue_head_init(&p->fcoe_rx_list);
  1669. }
  1670. for_each_online_cpu(cpu)
  1671. fcoe_percpu_thread_create(cpu);
  1672. /* Initialize per CPU interrupt thread */
  1673. rc = register_hotcpu_notifier(&fcoe_cpu_notifier);
  1674. if (rc)
  1675. goto out_free;
  1676. /* Setup link change notification */
  1677. fcoe_dev_setup();
  1678. rc = fcoe_if_init();
  1679. if (rc)
  1680. goto out_free;
  1681. return 0;
  1682. out_free:
  1683. for_each_online_cpu(cpu) {
  1684. fcoe_percpu_thread_destroy(cpu);
  1685. }
  1686. return rc;
  1687. }
  1688. module_init(fcoe_init);
  1689. /**
  1690. * fcoe_exit() - fcoe module unloading cleanup
  1691. *
  1692. * Returns 0 on success, negative on failure
  1693. */
  1694. static void __exit fcoe_exit(void)
  1695. {
  1696. unsigned int cpu;
  1697. struct fcoe_interface *fcoe, *tmp;
  1698. fcoe_dev_cleanup();
  1699. /* releases the associated fcoe hosts */
  1700. list_for_each_entry_safe(fcoe, tmp, &fcoe_hostlist, list)
  1701. fcoe_if_destroy(fcoe->priv->ctlr.lp);
  1702. unregister_hotcpu_notifier(&fcoe_cpu_notifier);
  1703. for_each_online_cpu(cpu)
  1704. fcoe_percpu_thread_destroy(cpu);
  1705. /* detach from scsi transport */
  1706. fcoe_if_exit();
  1707. }
  1708. module_exit(fcoe_exit);