fcoe.c 55 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172
  1. /*
  2. * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along with
  14. * this program; if not, write to the Free Software Foundation, Inc.,
  15. * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  16. *
  17. * Maintained at www.Open-FCoE.org
  18. */
  19. #include <linux/module.h>
  20. #include <linux/version.h>
  21. #include <linux/spinlock.h>
  22. #include <linux/netdevice.h>
  23. #include <linux/etherdevice.h>
  24. #include <linux/ethtool.h>
  25. #include <linux/if_ether.h>
  26. #include <linux/if_vlan.h>
  27. #include <linux/crc32.h>
  28. #include <linux/cpu.h>
  29. #include <linux/fs.h>
  30. #include <linux/sysfs.h>
  31. #include <linux/ctype.h>
  32. #include <scsi/scsi_tcq.h>
  33. #include <scsi/scsicam.h>
  34. #include <scsi/scsi_transport.h>
  35. #include <scsi/scsi_transport_fc.h>
  36. #include <net/rtnetlink.h>
  37. #include <scsi/fc/fc_encaps.h>
  38. #include <scsi/fc/fc_fip.h>
  39. #include <scsi/libfc.h>
  40. #include <scsi/fc_frame.h>
  41. #include <scsi/libfcoe.h>
  42. #include "fcoe.h"
  43. MODULE_AUTHOR("Open-FCoE.org");
  44. MODULE_DESCRIPTION("FCoE");
  45. MODULE_LICENSE("GPL v2");
  46. /* Performance tuning parameters for fcoe */
  47. static unsigned int fcoe_ddp_min;
  48. module_param_named(ddp_min, fcoe_ddp_min, uint, S_IRUGO | S_IWUSR);
  49. MODULE_PARM_DESC(ddp_min, "Minimum I/O size in bytes for " \
  50. "Direct Data Placement (DDP).");
  51. DEFINE_MUTEX(fcoe_config_mutex);
  52. /* fcoe_percpu_clean completion. Waiter protected by fcoe_create_mutex */
  53. static DECLARE_COMPLETION(fcoe_flush_completion);
  54. /* fcoe host list */
  55. /* must only by accessed under the RTNL mutex */
  56. LIST_HEAD(fcoe_hostlist);
  57. DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu);
  58. /* Function Prototypes */
  59. static int fcoe_reset(struct Scsi_Host *shost);
  60. static int fcoe_xmit(struct fc_lport *, struct fc_frame *);
  61. static int fcoe_rcv(struct sk_buff *, struct net_device *,
  62. struct packet_type *, struct net_device *);
  63. static int fcoe_percpu_receive_thread(void *arg);
  64. static void fcoe_clean_pending_queue(struct fc_lport *lp);
  65. static void fcoe_percpu_clean(struct fc_lport *lp);
  66. static int fcoe_link_ok(struct fc_lport *lp);
  67. static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *);
  68. static int fcoe_hostlist_add(const struct fc_lport *);
  69. static void fcoe_check_wait_queue(struct fc_lport *, struct sk_buff *);
  70. static int fcoe_device_notification(struct notifier_block *, ulong, void *);
  71. static void fcoe_dev_setup(void);
  72. static void fcoe_dev_cleanup(void);
  73. static struct fcoe_interface *
  74. fcoe_hostlist_lookup_port(const struct net_device *dev);
  75. /* notification function from net device */
  76. static struct notifier_block fcoe_notifier = {
  77. .notifier_call = fcoe_device_notification,
  78. };
  79. static struct scsi_transport_template *scsi_transport_fcoe_sw;
  80. struct fc_function_template fcoe_transport_function = {
  81. .show_host_node_name = 1,
  82. .show_host_port_name = 1,
  83. .show_host_supported_classes = 1,
  84. .show_host_supported_fc4s = 1,
  85. .show_host_active_fc4s = 1,
  86. .show_host_maxframe_size = 1,
  87. .show_host_port_id = 1,
  88. .show_host_supported_speeds = 1,
  89. .get_host_speed = fc_get_host_speed,
  90. .show_host_speed = 1,
  91. .show_host_port_type = 1,
  92. .get_host_port_state = fc_get_host_port_state,
  93. .show_host_port_state = 1,
  94. .show_host_symbolic_name = 1,
  95. .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
  96. .show_rport_maxframe_size = 1,
  97. .show_rport_supported_classes = 1,
  98. .show_host_fabric_name = 1,
  99. .show_starget_node_name = 1,
  100. .show_starget_port_name = 1,
  101. .show_starget_port_id = 1,
  102. .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
  103. .show_rport_dev_loss_tmo = 1,
  104. .get_fc_host_stats = fc_get_host_stats,
  105. .issue_fc_host_lip = fcoe_reset,
  106. .terminate_rport_io = fc_rport_terminate_io,
  107. };
  108. static struct scsi_host_template fcoe_shost_template = {
  109. .module = THIS_MODULE,
  110. .name = "FCoE Driver",
  111. .proc_name = FCOE_NAME,
  112. .queuecommand = fc_queuecommand,
  113. .eh_abort_handler = fc_eh_abort,
  114. .eh_device_reset_handler = fc_eh_device_reset,
  115. .eh_host_reset_handler = fc_eh_host_reset,
  116. .slave_alloc = fc_slave_alloc,
  117. .change_queue_depth = fc_change_queue_depth,
  118. .change_queue_type = fc_change_queue_type,
  119. .this_id = -1,
  120. .cmd_per_lun = 3,
  121. .can_queue = FCOE_MAX_OUTSTANDING_COMMANDS,
  122. .use_clustering = ENABLE_CLUSTERING,
  123. .sg_tablesize = SG_ALL,
  124. .max_sectors = 0xffff,
  125. };
  126. static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *dev,
  127. struct packet_type *ptype,
  128. struct net_device *orig_dev);
  129. /**
  130. * fcoe_interface_setup()
  131. * @fcoe: new fcoe_interface
  132. * @netdev : ptr to the associated netdevice struct
  133. *
  134. * Returns : 0 for success
  135. * Locking: must be called with the RTNL mutex held
  136. */
  137. static int fcoe_interface_setup(struct fcoe_interface *fcoe,
  138. struct net_device *netdev)
  139. {
  140. struct fcoe_ctlr *fip = &fcoe->ctlr;
  141. struct netdev_hw_addr *ha;
  142. u8 flogi_maddr[ETH_ALEN];
  143. const struct net_device_ops *ops;
  144. fcoe->netdev = netdev;
  145. /* Let LLD initialize for FCoE */
  146. ops = netdev->netdev_ops;
  147. if (ops->ndo_fcoe_enable) {
  148. if (ops->ndo_fcoe_enable(netdev))
  149. FCOE_NETDEV_DBG(netdev, "Failed to enable FCoE"
  150. " specific feature for LLD.\n");
  151. }
  152. /* Do not support for bonding device */
  153. if ((netdev->priv_flags & IFF_MASTER_ALB) ||
  154. (netdev->priv_flags & IFF_SLAVE_INACTIVE) ||
  155. (netdev->priv_flags & IFF_MASTER_8023AD)) {
  156. return -EOPNOTSUPP;
  157. }
  158. /* look for SAN MAC address, if multiple SAN MACs exist, only
  159. * use the first one for SPMA */
  160. rcu_read_lock();
  161. for_each_dev_addr(netdev, ha) {
  162. if ((ha->type == NETDEV_HW_ADDR_T_SAN) &&
  163. (is_valid_ether_addr(fip->ctl_src_addr))) {
  164. memcpy(fip->ctl_src_addr, ha->addr, ETH_ALEN);
  165. fip->spma = 1;
  166. break;
  167. }
  168. }
  169. rcu_read_unlock();
  170. /* setup Source Mac Address */
  171. if (!fip->spma)
  172. memcpy(fip->ctl_src_addr, netdev->dev_addr, netdev->addr_len);
  173. /*
  174. * Add FCoE MAC address as second unicast MAC address
  175. * or enter promiscuous mode if not capable of listening
  176. * for multiple unicast MACs.
  177. */
  178. memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
  179. dev_unicast_add(netdev, flogi_maddr);
  180. if (fip->spma)
  181. dev_unicast_add(netdev, fip->ctl_src_addr);
  182. dev_mc_add(netdev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0);
  183. /*
  184. * setup the receive function from ethernet driver
  185. * on the ethertype for the given device
  186. */
  187. fcoe->fcoe_packet_type.func = fcoe_rcv;
  188. fcoe->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE);
  189. fcoe->fcoe_packet_type.dev = netdev;
  190. dev_add_pack(&fcoe->fcoe_packet_type);
  191. fcoe->fip_packet_type.func = fcoe_fip_recv;
  192. fcoe->fip_packet_type.type = htons(ETH_P_FIP);
  193. fcoe->fip_packet_type.dev = netdev;
  194. dev_add_pack(&fcoe->fip_packet_type);
  195. return 0;
  196. }
  197. static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb);
  198. static void fcoe_update_src_mac(struct fc_lport *lport, u8 *addr);
  199. static u8 *fcoe_get_src_mac(struct fc_lport *lport);
  200. static void fcoe_destroy_work(struct work_struct *work);
  201. /**
  202. * fcoe_interface_create()
  203. * @netdev: network interface
  204. *
  205. * Returns: pointer to a struct fcoe_interface or NULL on error
  206. */
  207. static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev)
  208. {
  209. struct fcoe_interface *fcoe;
  210. fcoe = kzalloc(sizeof(*fcoe), GFP_KERNEL);
  211. if (!fcoe) {
  212. FCOE_NETDEV_DBG(netdev, "Could not allocate fcoe structure\n");
  213. return NULL;
  214. }
  215. dev_hold(netdev);
  216. kref_init(&fcoe->kref);
  217. /*
  218. * Initialize FIP.
  219. */
  220. fcoe_ctlr_init(&fcoe->ctlr);
  221. fcoe->ctlr.send = fcoe_fip_send;
  222. fcoe->ctlr.update_mac = fcoe_update_src_mac;
  223. fcoe->ctlr.get_src_addr = fcoe_get_src_mac;
  224. fcoe_interface_setup(fcoe, netdev);
  225. return fcoe;
  226. }
  227. /**
  228. * fcoe_interface_cleanup() - clean up netdev configurations
  229. * @fcoe:
  230. *
  231. * Caller must be holding the RTNL mutex
  232. */
  233. void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
  234. {
  235. struct net_device *netdev = fcoe->netdev;
  236. struct fcoe_ctlr *fip = &fcoe->ctlr;
  237. u8 flogi_maddr[ETH_ALEN];
  238. const struct net_device_ops *ops;
  239. /*
  240. * Don't listen for Ethernet packets anymore.
  241. * synchronize_net() ensures that the packet handlers are not running
  242. * on another CPU. dev_remove_pack() would do that, this calls the
  243. * unsyncronized version __dev_remove_pack() to avoid multiple delays.
  244. */
  245. __dev_remove_pack(&fcoe->fcoe_packet_type);
  246. __dev_remove_pack(&fcoe->fip_packet_type);
  247. synchronize_net();
  248. /* Delete secondary MAC addresses */
  249. memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
  250. dev_unicast_delete(netdev, flogi_maddr);
  251. if (fip->spma)
  252. dev_unicast_delete(netdev, fip->ctl_src_addr);
  253. dev_mc_delete(netdev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0);
  254. /* Tell the LLD we are done w/ FCoE */
  255. ops = netdev->netdev_ops;
  256. if (ops->ndo_fcoe_disable) {
  257. if (ops->ndo_fcoe_disable(netdev))
  258. FCOE_NETDEV_DBG(netdev, "Failed to disable FCoE"
  259. " specific feature for LLD.\n");
  260. }
  261. }
  262. /**
  263. * fcoe_interface_release() - fcoe_port kref release function
  264. * @kref: embedded reference count in an fcoe_interface struct
  265. */
  266. static void fcoe_interface_release(struct kref *kref)
  267. {
  268. struct fcoe_interface *fcoe;
  269. struct net_device *netdev;
  270. fcoe = container_of(kref, struct fcoe_interface, kref);
  271. netdev = fcoe->netdev;
  272. /* tear-down the FCoE controller */
  273. fcoe_ctlr_destroy(&fcoe->ctlr);
  274. kfree(fcoe);
  275. dev_put(netdev);
  276. }
  277. /**
  278. * fcoe_interface_get()
  279. * @fcoe:
  280. */
  281. static inline void fcoe_interface_get(struct fcoe_interface *fcoe)
  282. {
  283. kref_get(&fcoe->kref);
  284. }
  285. /**
  286. * fcoe_interface_put()
  287. * @fcoe:
  288. */
  289. static inline void fcoe_interface_put(struct fcoe_interface *fcoe)
  290. {
  291. kref_put(&fcoe->kref, fcoe_interface_release);
  292. }
  293. /**
  294. * fcoe_fip_recv - handle a received FIP frame.
  295. * @skb: the receive skb
  296. * @dev: associated &net_device
  297. * @ptype: the &packet_type structure which was used to register this handler.
  298. * @orig_dev: original receive &net_device, in case @dev is a bond.
  299. *
  300. * Returns: 0 for success
  301. */
  302. static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *dev,
  303. struct packet_type *ptype,
  304. struct net_device *orig_dev)
  305. {
  306. struct fcoe_interface *fcoe;
  307. fcoe = container_of(ptype, struct fcoe_interface, fip_packet_type);
  308. fcoe_ctlr_recv(&fcoe->ctlr, skb);
  309. return 0;
  310. }
  311. /**
  312. * fcoe_fip_send() - send an Ethernet-encapsulated FIP frame.
  313. * @fip: FCoE controller.
  314. * @skb: FIP Packet.
  315. */
  316. static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
  317. {
  318. skb->dev = fcoe_from_ctlr(fip)->netdev;
  319. dev_queue_xmit(skb);
  320. }
  321. /**
  322. * fcoe_update_src_mac() - Update Ethernet MAC filters.
  323. * @lport: libfc lport
  324. * @addr: Unicast MAC address to add.
  325. *
  326. * Remove any previously-set unicast MAC filter.
  327. * Add secondary FCoE MAC address filter for our OUI.
  328. */
  329. static void fcoe_update_src_mac(struct fc_lport *lport, u8 *addr)
  330. {
  331. struct fcoe_port *port = lport_priv(lport);
  332. struct fcoe_interface *fcoe = port->fcoe;
  333. rtnl_lock();
  334. if (!is_zero_ether_addr(port->data_src_addr))
  335. dev_unicast_delete(fcoe->netdev, port->data_src_addr);
  336. if (!is_zero_ether_addr(addr))
  337. dev_unicast_add(fcoe->netdev, addr);
  338. memcpy(port->data_src_addr, addr, ETH_ALEN);
  339. rtnl_unlock();
  340. }
  341. /**
  342. * fcoe_get_src_mac() - return the Ethernet source address for an lport
  343. * @lport: libfc lport
  344. */
  345. static u8 *fcoe_get_src_mac(struct fc_lport *lport)
  346. {
  347. struct fcoe_port *port = lport_priv(lport);
  348. return port->data_src_addr;
  349. }
  350. /**
  351. * fcoe_lport_config() - sets up the fc_lport
  352. * @lp: ptr to the fc_lport
  353. *
  354. * Returns: 0 for success
  355. */
  356. static int fcoe_lport_config(struct fc_lport *lp)
  357. {
  358. lp->link_up = 0;
  359. lp->qfull = 0;
  360. lp->max_retry_count = 3;
  361. lp->max_rport_retry_count = 3;
  362. lp->e_d_tov = 2 * 1000; /* FC-FS default */
  363. lp->r_a_tov = 2 * 2 * 1000;
  364. lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
  365. FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
  366. fc_lport_init_stats(lp);
  367. /* lport fc_lport related configuration */
  368. fc_lport_config(lp);
  369. /* offload related configuration */
  370. lp->crc_offload = 0;
  371. lp->seq_offload = 0;
  372. lp->lro_enabled = 0;
  373. lp->lro_xid = 0;
  374. lp->lso_max = 0;
  375. return 0;
  376. }
  377. /**
  378. * fcoe_queue_timer() - fcoe queue timer
  379. * @lp: the fc_lport pointer
  380. *
  381. * Calls fcoe_check_wait_queue on timeout
  382. *
  383. */
  384. static void fcoe_queue_timer(ulong lp)
  385. {
  386. fcoe_check_wait_queue((struct fc_lport *)lp, NULL);
  387. }
  388. /**
  389. * fcoe_netdev_config() - Set up netdev for SW FCoE
  390. * @lp : ptr to the fc_lport
  391. * @netdev : ptr to the associated netdevice struct
  392. *
  393. * Must be called after fcoe_lport_config() as it will use lport mutex
  394. *
  395. * Returns : 0 for success
  396. */
  397. static int fcoe_netdev_config(struct fc_lport *lp, struct net_device *netdev)
  398. {
  399. u32 mfs;
  400. u64 wwnn, wwpn;
  401. struct fcoe_interface *fcoe;
  402. struct fcoe_port *port;
  403. /* Setup lport private data to point to fcoe softc */
  404. port = lport_priv(lp);
  405. fcoe = port->fcoe;
  406. /*
  407. * Determine max frame size based on underlying device and optional
  408. * user-configured limit. If the MFS is too low, fcoe_link_ok()
  409. * will return 0, so do this first.
  410. */
  411. mfs = netdev->mtu;
  412. if (netdev->features & NETIF_F_FCOE_MTU) {
  413. mfs = FCOE_MTU;
  414. FCOE_NETDEV_DBG(netdev, "Supports FCOE_MTU of %d bytes\n", mfs);
  415. }
  416. mfs -= (sizeof(struct fcoe_hdr) + sizeof(struct fcoe_crc_eof));
  417. if (fc_set_mfs(lp, mfs))
  418. return -EINVAL;
  419. /* offload features support */
  420. if (netdev->features & NETIF_F_SG)
  421. lp->sg_supp = 1;
  422. if (netdev->features & NETIF_F_FCOE_CRC) {
  423. lp->crc_offload = 1;
  424. FCOE_NETDEV_DBG(netdev, "Supports FCCRC offload\n");
  425. }
  426. if (netdev->features & NETIF_F_FSO) {
  427. lp->seq_offload = 1;
  428. lp->lso_max = netdev->gso_max_size;
  429. FCOE_NETDEV_DBG(netdev, "Supports LSO for max len 0x%x\n",
  430. lp->lso_max);
  431. }
  432. if (netdev->fcoe_ddp_xid) {
  433. lp->lro_enabled = 1;
  434. lp->lro_xid = netdev->fcoe_ddp_xid;
  435. FCOE_NETDEV_DBG(netdev, "Supports LRO for max xid 0x%x\n",
  436. lp->lro_xid);
  437. }
  438. skb_queue_head_init(&port->fcoe_pending_queue);
  439. port->fcoe_pending_queue_active = 0;
  440. setup_timer(&port->timer, fcoe_queue_timer, (unsigned long)lp);
  441. wwnn = fcoe_wwn_from_mac(netdev->dev_addr, 1, 0);
  442. fc_set_wwnn(lp, wwnn);
  443. /* XXX - 3rd arg needs to be vlan id */
  444. wwpn = fcoe_wwn_from_mac(netdev->dev_addr, 2, 0);
  445. fc_set_wwpn(lp, wwpn);
  446. return 0;
  447. }
  448. /**
  449. * fcoe_shost_config() - Sets up fc_lport->host
  450. * @lp : ptr to the fc_lport
  451. * @shost : ptr to the associated scsi host
  452. * @dev : device associated to scsi host
  453. *
  454. * Must be called after fcoe_lport_config() and fcoe_netdev_config()
  455. *
  456. * Returns : 0 for success
  457. */
  458. static int fcoe_shost_config(struct fc_lport *lp, struct Scsi_Host *shost,
  459. struct device *dev)
  460. {
  461. int rc = 0;
  462. /* lport scsi host config */
  463. lp->host->max_lun = FCOE_MAX_LUN;
  464. lp->host->max_id = FCOE_MAX_FCP_TARGET;
  465. lp->host->max_channel = 0;
  466. lp->host->transportt = scsi_transport_fcoe_sw;
  467. /* add the new host to the SCSI-ml */
  468. rc = scsi_add_host(lp->host, dev);
  469. if (rc) {
  470. FCOE_NETDEV_DBG(fcoe_netdev(lp), "fcoe_shost_config: "
  471. "error on scsi_add_host\n");
  472. return rc;
  473. }
  474. sprintf(fc_host_symbolic_name(lp->host), "%s v%s over %s",
  475. FCOE_NAME, FCOE_VERSION,
  476. fcoe_netdev(lp)->name);
  477. return 0;
  478. }
  479. /*
  480. * fcoe_oem_match() - match for read types IO
  481. * @fp: the fc_frame for new IO.
  482. *
  483. * Returns : true for read types IO, otherwise returns false.
  484. */
  485. bool fcoe_oem_match(struct fc_frame *fp)
  486. {
  487. return fc_fcp_is_read(fr_fsp(fp)) &&
  488. (fr_fsp(fp)->data_len > fcoe_ddp_min);
  489. }
  490. /**
  491. * fcoe_em_config() - allocates em for this lport
  492. * @lp: the fcoe that em is to allocated for
  493. *
  494. * Returns : 0 on success
  495. */
  496. static inline int fcoe_em_config(struct fc_lport *lp)
  497. {
  498. struct fcoe_port *port = lport_priv(lp);
  499. struct fcoe_interface *fcoe = port->fcoe;
  500. struct fcoe_interface *oldfcoe = NULL;
  501. struct net_device *old_real_dev, *cur_real_dev;
  502. u16 min_xid = FCOE_MIN_XID;
  503. u16 max_xid = FCOE_MAX_XID;
  504. /*
  505. * Check if need to allocate an em instance for
  506. * offload exchange ids to be shared across all VN_PORTs/lport.
  507. */
  508. if (!lp->lro_enabled || !lp->lro_xid || (lp->lro_xid >= max_xid)) {
  509. lp->lro_xid = 0;
  510. goto skip_oem;
  511. }
  512. /*
  513. * Reuse existing offload em instance in case
  514. * it is already allocated on real eth device
  515. */
  516. if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN)
  517. cur_real_dev = vlan_dev_real_dev(fcoe->netdev);
  518. else
  519. cur_real_dev = fcoe->netdev;
  520. list_for_each_entry(oldfcoe, &fcoe_hostlist, list) {
  521. if (oldfcoe->netdev->priv_flags & IFF_802_1Q_VLAN)
  522. old_real_dev = vlan_dev_real_dev(oldfcoe->netdev);
  523. else
  524. old_real_dev = oldfcoe->netdev;
  525. if (cur_real_dev == old_real_dev) {
  526. fcoe->oem = oldfcoe->oem;
  527. break;
  528. }
  529. }
  530. if (fcoe->oem) {
  531. if (!fc_exch_mgr_add(lp, fcoe->oem, fcoe_oem_match)) {
  532. printk(KERN_ERR "fcoe_em_config: failed to add "
  533. "offload em:%p on interface:%s\n",
  534. fcoe->oem, fcoe->netdev->name);
  535. return -ENOMEM;
  536. }
  537. } else {
  538. fcoe->oem = fc_exch_mgr_alloc(lp, FC_CLASS_3,
  539. FCOE_MIN_XID, lp->lro_xid,
  540. fcoe_oem_match);
  541. if (!fcoe->oem) {
  542. printk(KERN_ERR "fcoe_em_config: failed to allocate "
  543. "em for offload exches on interface:%s\n",
  544. fcoe->netdev->name);
  545. return -ENOMEM;
  546. }
  547. }
  548. /*
  549. * Exclude offload EM xid range from next EM xid range.
  550. */
  551. min_xid += lp->lro_xid + 1;
  552. skip_oem:
  553. if (!fc_exch_mgr_alloc(lp, FC_CLASS_3, min_xid, max_xid, NULL)) {
  554. printk(KERN_ERR "fcoe_em_config: failed to "
  555. "allocate em on interface %s\n", fcoe->netdev->name);
  556. return -ENOMEM;
  557. }
  558. return 0;
  559. }
  560. /**
  561. * fcoe_if_destroy() - FCoE software HBA tear-down function
  562. * @lport: fc_lport to destroy
  563. */
  564. static void fcoe_if_destroy(struct fc_lport *lport)
  565. {
  566. struct fcoe_port *port = lport_priv(lport);
  567. struct fcoe_interface *fcoe = port->fcoe;
  568. struct net_device *netdev = fcoe->netdev;
  569. FCOE_NETDEV_DBG(netdev, "Destroying interface\n");
  570. /* Logout of the fabric */
  571. fc_fabric_logoff(lport);
  572. /* Cleanup the fc_lport */
  573. fc_lport_destroy(lport);
  574. fc_fcp_destroy(lport);
  575. /* Stop the transmit retry timer */
  576. del_timer_sync(&port->timer);
  577. /* Free existing transmit skbs */
  578. fcoe_clean_pending_queue(lport);
  579. rtnl_lock();
  580. if (!is_zero_ether_addr(port->data_src_addr))
  581. dev_unicast_delete(netdev, port->data_src_addr);
  582. rtnl_unlock();
  583. /* receives may not be stopped until after this */
  584. fcoe_interface_put(fcoe);
  585. /* Free queued packets for the per-CPU receive threads */
  586. fcoe_percpu_clean(lport);
  587. /* Detach from the scsi-ml */
  588. fc_remove_host(lport->host);
  589. scsi_remove_host(lport->host);
  590. /* There are no more rports or I/O, free the EM */
  591. fc_exch_mgr_free(lport);
  592. /* Free memory used by statistical counters */
  593. fc_lport_free_stats(lport);
  594. /* Release the Scsi_Host */
  595. scsi_host_put(lport->host);
  596. }
  597. /*
  598. * fcoe_ddp_setup - calls LLD's ddp_setup through net_device
  599. * @lp: the corresponding fc_lport
  600. * @xid: the exchange id for this ddp transfer
  601. * @sgl: the scatterlist describing this transfer
  602. * @sgc: number of sg items
  603. *
  604. * Returns : 0 no ddp
  605. */
  606. static int fcoe_ddp_setup(struct fc_lport *lp, u16 xid,
  607. struct scatterlist *sgl, unsigned int sgc)
  608. {
  609. struct net_device *n = fcoe_netdev(lp);
  610. if (n->netdev_ops->ndo_fcoe_ddp_setup)
  611. return n->netdev_ops->ndo_fcoe_ddp_setup(n, xid, sgl, sgc);
  612. return 0;
  613. }
  614. /*
  615. * fcoe_ddp_done - calls LLD's ddp_done through net_device
  616. * @lp: the corresponding fc_lport
  617. * @xid: the exchange id for this ddp transfer
  618. *
  619. * Returns : the length of data that have been completed by ddp
  620. */
  621. static int fcoe_ddp_done(struct fc_lport *lp, u16 xid)
  622. {
  623. struct net_device *n = fcoe_netdev(lp);
  624. if (n->netdev_ops->ndo_fcoe_ddp_done)
  625. return n->netdev_ops->ndo_fcoe_ddp_done(n, xid);
  626. return 0;
  627. }
  628. static struct fc_seq *fcoe_elsct_send(struct fc_lport *lport,
  629. u32 did, struct fc_frame *fp, unsigned int op,
  630. void (*resp)(struct fc_seq *, struct fc_frame *, void *),
  631. void *arg, u32 timeout);
  632. static struct libfc_function_template fcoe_libfc_fcn_templ = {
  633. .frame_send = fcoe_xmit,
  634. .ddp_setup = fcoe_ddp_setup,
  635. .ddp_done = fcoe_ddp_done,
  636. .elsct_send = fcoe_elsct_send,
  637. };
  638. /**
  639. * fcoe_if_create() - this function creates the fcoe port
  640. * @fcoe: fcoe_interface structure to create an fc_lport instance on
  641. * @parent: device pointer to be the parent in sysfs for the SCSI host
  642. *
  643. * Creates fc_lport struct and scsi_host for lport, configures lport.
  644. *
  645. * Returns : The allocated fc_lport or an error pointer
  646. */
  647. static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
  648. struct device *parent)
  649. {
  650. int rc;
  651. struct fc_lport *lport = NULL;
  652. struct fcoe_port *port;
  653. struct Scsi_Host *shost;
  654. struct net_device *netdev = fcoe->netdev;
  655. FCOE_NETDEV_DBG(netdev, "Create Interface\n");
  656. lport = libfc_host_alloc(&fcoe_shost_template,
  657. sizeof(struct fcoe_port));
  658. if (!lport) {
  659. FCOE_NETDEV_DBG(netdev, "Could not allocate host structure\n");
  660. rc = -ENOMEM;
  661. goto out;
  662. }
  663. shost = lport->host;
  664. port = lport_priv(lport);
  665. port->lport = lport;
  666. port->fcoe = fcoe;
  667. INIT_WORK(&port->destroy_work, fcoe_destroy_work);
  668. /* configure fc_lport, e.g., em */
  669. rc = fcoe_lport_config(lport);
  670. if (rc) {
  671. FCOE_NETDEV_DBG(netdev, "Could not configure lport for the "
  672. "interface\n");
  673. goto out_host_put;
  674. }
  675. /* configure lport network properties */
  676. rc = fcoe_netdev_config(lport, netdev);
  677. if (rc) {
  678. FCOE_NETDEV_DBG(netdev, "Could not configure netdev for the "
  679. "interface\n");
  680. goto out_lp_destroy;
  681. }
  682. /* configure lport scsi host properties */
  683. rc = fcoe_shost_config(lport, shost, parent);
  684. if (rc) {
  685. FCOE_NETDEV_DBG(netdev, "Could not configure shost for the "
  686. "interface\n");
  687. goto out_lp_destroy;
  688. }
  689. /* Initialize the library */
  690. rc = fcoe_libfc_config(lport, &fcoe_libfc_fcn_templ);
  691. if (rc) {
  692. FCOE_NETDEV_DBG(netdev, "Could not configure libfc for the "
  693. "interface\n");
  694. goto out_lp_destroy;
  695. }
  696. /*
  697. * fcoe_em_alloc() and fcoe_hostlist_add() both
  698. * need to be atomic with respect to other changes to the hostlist
  699. * since fcoe_em_alloc() looks for an existing EM
  700. * instance on host list updated by fcoe_hostlist_add().
  701. *
  702. * This is currently handled through the fcoe_config_mutex begin held.
  703. */
  704. /* lport exch manager allocation */
  705. rc = fcoe_em_config(lport);
  706. if (rc) {
  707. FCOE_NETDEV_DBG(netdev, "Could not configure the EM for the "
  708. "interface\n");
  709. goto out_lp_destroy;
  710. }
  711. fcoe_interface_get(fcoe);
  712. return lport;
  713. out_lp_destroy:
  714. fc_exch_mgr_free(lport);
  715. out_host_put:
  716. scsi_host_put(lport->host);
  717. out:
  718. return ERR_PTR(rc);
  719. }
  720. /**
  721. * fcoe_if_init() - attach to scsi transport
  722. *
  723. * Returns : 0 on success
  724. */
  725. static int __init fcoe_if_init(void)
  726. {
  727. /* attach to scsi transport */
  728. scsi_transport_fcoe_sw =
  729. fc_attach_transport(&fcoe_transport_function);
  730. if (!scsi_transport_fcoe_sw) {
  731. printk(KERN_ERR "fcoe: Failed to attach to the FC transport\n");
  732. return -ENODEV;
  733. }
  734. return 0;
  735. }
  736. /**
  737. * fcoe_if_exit() - detach from scsi transport
  738. *
  739. * Returns : 0 on success
  740. */
  741. int __exit fcoe_if_exit(void)
  742. {
  743. fc_release_transport(scsi_transport_fcoe_sw);
  744. scsi_transport_fcoe_sw = NULL;
  745. return 0;
  746. }
  747. /**
  748. * fcoe_percpu_thread_create() - Create a receive thread for an online cpu
  749. * @cpu: cpu index for the online cpu
  750. */
  751. static void fcoe_percpu_thread_create(unsigned int cpu)
  752. {
  753. struct fcoe_percpu_s *p;
  754. struct task_struct *thread;
  755. p = &per_cpu(fcoe_percpu, cpu);
  756. thread = kthread_create(fcoe_percpu_receive_thread,
  757. (void *)p, "fcoethread/%d", cpu);
  758. if (likely(!IS_ERR(thread))) {
  759. kthread_bind(thread, cpu);
  760. wake_up_process(thread);
  761. spin_lock_bh(&p->fcoe_rx_list.lock);
  762. p->thread = thread;
  763. spin_unlock_bh(&p->fcoe_rx_list.lock);
  764. }
  765. }
  766. /**
  767. * fcoe_percpu_thread_destroy() - removes the rx thread for the given cpu
  768. * @cpu: cpu index the rx thread is to be removed
  769. *
  770. * Destroys a per-CPU Rx thread. Any pending skbs are moved to the
  771. * current CPU's Rx thread. If the thread being destroyed is bound to
  772. * the CPU processing this context the skbs will be freed.
  773. */
  774. static void fcoe_percpu_thread_destroy(unsigned int cpu)
  775. {
  776. struct fcoe_percpu_s *p;
  777. struct task_struct *thread;
  778. struct page *crc_eof;
  779. struct sk_buff *skb;
  780. #ifdef CONFIG_SMP
  781. struct fcoe_percpu_s *p0;
  782. unsigned targ_cpu = smp_processor_id();
  783. #endif /* CONFIG_SMP */
  784. FCOE_DBG("Destroying receive thread for CPU %d\n", cpu);
  785. /* Prevent any new skbs from being queued for this CPU. */
  786. p = &per_cpu(fcoe_percpu, cpu);
  787. spin_lock_bh(&p->fcoe_rx_list.lock);
  788. thread = p->thread;
  789. p->thread = NULL;
  790. crc_eof = p->crc_eof_page;
  791. p->crc_eof_page = NULL;
  792. p->crc_eof_offset = 0;
  793. spin_unlock_bh(&p->fcoe_rx_list.lock);
  794. #ifdef CONFIG_SMP
  795. /*
  796. * Don't bother moving the skb's if this context is running
  797. * on the same CPU that is having its thread destroyed. This
  798. * can easily happen when the module is removed.
  799. */
  800. if (cpu != targ_cpu) {
  801. p0 = &per_cpu(fcoe_percpu, targ_cpu);
  802. spin_lock_bh(&p0->fcoe_rx_list.lock);
  803. if (p0->thread) {
  804. FCOE_DBG("Moving frames from CPU %d to CPU %d\n",
  805. cpu, targ_cpu);
  806. while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
  807. __skb_queue_tail(&p0->fcoe_rx_list, skb);
  808. spin_unlock_bh(&p0->fcoe_rx_list.lock);
  809. } else {
  810. /*
  811. * The targeted CPU is not initialized and cannot accept
  812. * new skbs. Unlock the targeted CPU and drop the skbs
  813. * on the CPU that is going offline.
  814. */
  815. while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
  816. kfree_skb(skb);
  817. spin_unlock_bh(&p0->fcoe_rx_list.lock);
  818. }
  819. } else {
  820. /*
  821. * This scenario occurs when the module is being removed
  822. * and all threads are being destroyed. skbs will continue
  823. * to be shifted from the CPU thread that is being removed
  824. * to the CPU thread associated with the CPU that is processing
  825. * the module removal. Once there is only one CPU Rx thread it
  826. * will reach this case and we will drop all skbs and later
  827. * stop the thread.
  828. */
  829. spin_lock_bh(&p->fcoe_rx_list.lock);
  830. while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
  831. kfree_skb(skb);
  832. spin_unlock_bh(&p->fcoe_rx_list.lock);
  833. }
  834. #else
  835. /*
  836. * This a non-SMP scenario where the singular Rx thread is
  837. * being removed. Free all skbs and stop the thread.
  838. */
  839. spin_lock_bh(&p->fcoe_rx_list.lock);
  840. while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
  841. kfree_skb(skb);
  842. spin_unlock_bh(&p->fcoe_rx_list.lock);
  843. #endif
  844. if (thread)
  845. kthread_stop(thread);
  846. if (crc_eof)
  847. put_page(crc_eof);
  848. }
  849. /**
  850. * fcoe_cpu_callback() - fcoe cpu hotplug event callback
  851. * @nfb: callback data block
  852. * @action: event triggering the callback
  853. * @hcpu: index for the cpu of this event
  854. *
  855. * This creates or destroys per cpu data for fcoe
  856. *
  857. * Returns NOTIFY_OK always.
  858. */
  859. static int fcoe_cpu_callback(struct notifier_block *nfb,
  860. unsigned long action, void *hcpu)
  861. {
  862. unsigned cpu = (unsigned long)hcpu;
  863. switch (action) {
  864. case CPU_ONLINE:
  865. case CPU_ONLINE_FROZEN:
  866. FCOE_DBG("CPU %x online: Create Rx thread\n", cpu);
  867. fcoe_percpu_thread_create(cpu);
  868. break;
  869. case CPU_DEAD:
  870. case CPU_DEAD_FROZEN:
  871. FCOE_DBG("CPU %x offline: Remove Rx thread\n", cpu);
  872. fcoe_percpu_thread_destroy(cpu);
  873. break;
  874. default:
  875. break;
  876. }
  877. return NOTIFY_OK;
  878. }
  879. static struct notifier_block fcoe_cpu_notifier = {
  880. .notifier_call = fcoe_cpu_callback,
  881. };
  882. /**
  883. * fcoe_rcv() - this is the fcoe receive function called by NET_RX_SOFTIRQ
  884. * @skb: the receive skb
  885. * @dev: associated net device
  886. * @ptype: context
  887. * @olddev: last device
  888. *
  889. * this function will receive the packet and build fc frame and pass it up
  890. *
  891. * Returns: 0 for success
  892. */
  893. int fcoe_rcv(struct sk_buff *skb, struct net_device *dev,
  894. struct packet_type *ptype, struct net_device *olddev)
  895. {
  896. struct fc_lport *lp;
  897. struct fcoe_rcv_info *fr;
  898. struct fcoe_interface *fcoe;
  899. struct fc_frame_header *fh;
  900. struct fcoe_percpu_s *fps;
  901. unsigned int cpu;
  902. fcoe = container_of(ptype, struct fcoe_interface, fcoe_packet_type);
  903. lp = fcoe->ctlr.lp;
  904. if (unlikely(lp == NULL)) {
  905. FCOE_NETDEV_DBG(dev, "Cannot find hba structure");
  906. goto err2;
  907. }
  908. if (!lp->link_up)
  909. goto err2;
  910. FCOE_NETDEV_DBG(dev, "skb_info: len:%d data_len:%d head:%p "
  911. "data:%p tail:%p end:%p sum:%d dev:%s",
  912. skb->len, skb->data_len, skb->head, skb->data,
  913. skb_tail_pointer(skb), skb_end_pointer(skb),
  914. skb->csum, skb->dev ? skb->dev->name : "<NULL>");
  915. /* check for FCOE packet type */
  916. if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) {
  917. FCOE_NETDEV_DBG(dev, "Wrong FC type frame");
  918. goto err;
  919. }
  920. /*
  921. * Check for minimum frame length, and make sure required FCoE
  922. * and FC headers are pulled into the linear data area.
  923. */
  924. if (unlikely((skb->len < FCOE_MIN_FRAME) ||
  925. !pskb_may_pull(skb, FCOE_HEADER_LEN)))
  926. goto err;
  927. skb_set_transport_header(skb, sizeof(struct fcoe_hdr));
  928. fh = (struct fc_frame_header *) skb_transport_header(skb);
  929. fr = fcoe_dev_from_skb(skb);
  930. fr->fr_dev = lp;
  931. fr->ptype = ptype;
  932. /*
  933. * In case the incoming frame's exchange is originated from
  934. * the initiator, then received frame's exchange id is ANDed
  935. * with fc_cpu_mask bits to get the same cpu on which exchange
  936. * was originated, otherwise just use the current cpu.
  937. */
  938. if (ntoh24(fh->fh_f_ctl) & FC_FC_EX_CTX)
  939. cpu = ntohs(fh->fh_ox_id) & fc_cpu_mask;
  940. else
  941. cpu = smp_processor_id();
  942. fps = &per_cpu(fcoe_percpu, cpu);
  943. spin_lock_bh(&fps->fcoe_rx_list.lock);
  944. if (unlikely(!fps->thread)) {
  945. /*
  946. * The targeted CPU is not ready, let's target
  947. * the first CPU now. For non-SMP systems this
  948. * will check the same CPU twice.
  949. */
  950. FCOE_NETDEV_DBG(dev, "CPU is online, but no receive thread "
  951. "ready for incoming skb- using first online "
  952. "CPU.\n");
  953. spin_unlock_bh(&fps->fcoe_rx_list.lock);
  954. cpu = first_cpu(cpu_online_map);
  955. fps = &per_cpu(fcoe_percpu, cpu);
  956. spin_lock_bh(&fps->fcoe_rx_list.lock);
  957. if (!fps->thread) {
  958. spin_unlock_bh(&fps->fcoe_rx_list.lock);
  959. goto err;
  960. }
  961. }
  962. /*
  963. * We now have a valid CPU that we're targeting for
  964. * this skb. We also have this receive thread locked,
  965. * so we're free to queue skbs into it's queue.
  966. */
  967. __skb_queue_tail(&fps->fcoe_rx_list, skb);
  968. if (fps->fcoe_rx_list.qlen == 1)
  969. wake_up_process(fps->thread);
  970. spin_unlock_bh(&fps->fcoe_rx_list.lock);
  971. return 0;
  972. err:
  973. fc_lport_get_stats(lp)->ErrorFrames++;
  974. err2:
  975. kfree_skb(skb);
  976. return -1;
  977. }
  978. /**
  979. * fcoe_start_io() - pass to netdev to start xmit for fcoe
  980. * @skb: the skb to be xmitted
  981. *
  982. * Returns: 0 for success
  983. */
  984. static inline int fcoe_start_io(struct sk_buff *skb)
  985. {
  986. int rc;
  987. skb_get(skb);
  988. rc = dev_queue_xmit(skb);
  989. if (rc != 0)
  990. return rc;
  991. kfree_skb(skb);
  992. return 0;
  993. }
  994. /**
  995. * fcoe_get_paged_crc_eof() - in case we need to alloc a page for crc_eof
  996. * @skb: the skb to be xmitted
  997. * @tlen: total len
  998. *
  999. * Returns: 0 for success
  1000. */
  1001. static int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen)
  1002. {
  1003. struct fcoe_percpu_s *fps;
  1004. struct page *page;
  1005. fps = &get_cpu_var(fcoe_percpu);
  1006. page = fps->crc_eof_page;
  1007. if (!page) {
  1008. page = alloc_page(GFP_ATOMIC);
  1009. if (!page) {
  1010. put_cpu_var(fcoe_percpu);
  1011. return -ENOMEM;
  1012. }
  1013. fps->crc_eof_page = page;
  1014. fps->crc_eof_offset = 0;
  1015. }
  1016. get_page(page);
  1017. skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page,
  1018. fps->crc_eof_offset, tlen);
  1019. skb->len += tlen;
  1020. skb->data_len += tlen;
  1021. skb->truesize += tlen;
  1022. fps->crc_eof_offset += sizeof(struct fcoe_crc_eof);
  1023. if (fps->crc_eof_offset >= PAGE_SIZE) {
  1024. fps->crc_eof_page = NULL;
  1025. fps->crc_eof_offset = 0;
  1026. put_page(page);
  1027. }
  1028. put_cpu_var(fcoe_percpu);
  1029. return 0;
  1030. }
  1031. /**
  1032. * fcoe_fc_crc() - calculates FC CRC in this fcoe skb
  1033. * @fp: the fc_frame containing data to be checksummed
  1034. *
  1035. * This uses crc32() to calculate the crc for port frame
  1036. * Return : 32 bit crc
  1037. */
  1038. u32 fcoe_fc_crc(struct fc_frame *fp)
  1039. {
  1040. struct sk_buff *skb = fp_skb(fp);
  1041. struct skb_frag_struct *frag;
  1042. unsigned char *data;
  1043. unsigned long off, len, clen;
  1044. u32 crc;
  1045. unsigned i;
  1046. crc = crc32(~0, skb->data, skb_headlen(skb));
  1047. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  1048. frag = &skb_shinfo(skb)->frags[i];
  1049. off = frag->page_offset;
  1050. len = frag->size;
  1051. while (len > 0) {
  1052. clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK));
  1053. data = kmap_atomic(frag->page + (off >> PAGE_SHIFT),
  1054. KM_SKB_DATA_SOFTIRQ);
  1055. crc = crc32(crc, data + (off & ~PAGE_MASK), clen);
  1056. kunmap_atomic(data, KM_SKB_DATA_SOFTIRQ);
  1057. off += clen;
  1058. len -= clen;
  1059. }
  1060. }
  1061. return crc;
  1062. }
  1063. /**
  1064. * fcoe_xmit() - FCoE frame transmit function
  1065. * @lp: the associated local fcoe
  1066. * @fp: the fc_frame to be transmitted
  1067. *
  1068. * Return : 0 for success
  1069. */
  1070. int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
  1071. {
  1072. int wlen;
  1073. u32 crc;
  1074. struct ethhdr *eh;
  1075. struct fcoe_crc_eof *cp;
  1076. struct sk_buff *skb;
  1077. struct fcoe_dev_stats *stats;
  1078. struct fc_frame_header *fh;
  1079. unsigned int hlen; /* header length implies the version */
  1080. unsigned int tlen; /* trailer length */
  1081. unsigned int elen; /* eth header, may include vlan */
  1082. struct fcoe_port *port = lport_priv(lp);
  1083. struct fcoe_interface *fcoe = port->fcoe;
  1084. u8 sof, eof;
  1085. struct fcoe_hdr *hp;
  1086. WARN_ON((fr_len(fp) % sizeof(u32)) != 0);
  1087. fh = fc_frame_header_get(fp);
  1088. skb = fp_skb(fp);
  1089. wlen = skb->len / FCOE_WORD_TO_BYTE;
  1090. if (!lp->link_up) {
  1091. kfree_skb(skb);
  1092. return 0;
  1093. }
  1094. if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) &&
  1095. fcoe_ctlr_els_send(&fcoe->ctlr, lp, skb))
  1096. return 0;
  1097. sof = fr_sof(fp);
  1098. eof = fr_eof(fp);
  1099. elen = sizeof(struct ethhdr);
  1100. hlen = sizeof(struct fcoe_hdr);
  1101. tlen = sizeof(struct fcoe_crc_eof);
  1102. wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
  1103. /* crc offload */
  1104. if (likely(lp->crc_offload)) {
  1105. skb->ip_summed = CHECKSUM_PARTIAL;
  1106. skb->csum_start = skb_headroom(skb);
  1107. skb->csum_offset = skb->len;
  1108. crc = 0;
  1109. } else {
  1110. skb->ip_summed = CHECKSUM_NONE;
  1111. crc = fcoe_fc_crc(fp);
  1112. }
  1113. /* copy port crc and eof to the skb buff */
  1114. if (skb_is_nonlinear(skb)) {
  1115. skb_frag_t *frag;
  1116. if (fcoe_get_paged_crc_eof(skb, tlen)) {
  1117. kfree_skb(skb);
  1118. return -ENOMEM;
  1119. }
  1120. frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
  1121. cp = kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ)
  1122. + frag->page_offset;
  1123. } else {
  1124. cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
  1125. }
  1126. memset(cp, 0, sizeof(*cp));
  1127. cp->fcoe_eof = eof;
  1128. cp->fcoe_crc32 = cpu_to_le32(~crc);
  1129. if (skb_is_nonlinear(skb)) {
  1130. kunmap_atomic(cp, KM_SKB_DATA_SOFTIRQ);
  1131. cp = NULL;
  1132. }
  1133. /* adjust skb network/transport offsets to match mac/fcoe/port */
  1134. skb_push(skb, elen + hlen);
  1135. skb_reset_mac_header(skb);
  1136. skb_reset_network_header(skb);
  1137. skb->mac_len = elen;
  1138. skb->protocol = htons(ETH_P_FCOE);
  1139. skb->dev = fcoe->netdev;
  1140. /* fill up mac and fcoe headers */
  1141. eh = eth_hdr(skb);
  1142. eh->h_proto = htons(ETH_P_FCOE);
  1143. if (fcoe->ctlr.map_dest)
  1144. fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id);
  1145. else
  1146. /* insert GW address */
  1147. memcpy(eh->h_dest, fcoe->ctlr.dest_addr, ETH_ALEN);
  1148. if (unlikely(fcoe->ctlr.flogi_oxid != FC_XID_UNKNOWN))
  1149. memcpy(eh->h_source, fcoe->ctlr.ctl_src_addr, ETH_ALEN);
  1150. else
  1151. memcpy(eh->h_source, port->data_src_addr, ETH_ALEN);
  1152. hp = (struct fcoe_hdr *)(eh + 1);
  1153. memset(hp, 0, sizeof(*hp));
  1154. if (FC_FCOE_VER)
  1155. FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER);
  1156. hp->fcoe_sof = sof;
  1157. /* fcoe lso, mss is in max_payload which is non-zero for FCP data */
  1158. if (lp->seq_offload && fr_max_payload(fp)) {
  1159. skb_shinfo(skb)->gso_type = SKB_GSO_FCOE;
  1160. skb_shinfo(skb)->gso_size = fr_max_payload(fp);
  1161. } else {
  1162. skb_shinfo(skb)->gso_type = 0;
  1163. skb_shinfo(skb)->gso_size = 0;
  1164. }
  1165. /* update tx stats: regardless if LLD fails */
  1166. stats = fc_lport_get_stats(lp);
  1167. stats->TxFrames++;
  1168. stats->TxWords += wlen;
  1169. /* send down to lld */
  1170. fr_dev(fp) = lp;
  1171. if (port->fcoe_pending_queue.qlen)
  1172. fcoe_check_wait_queue(lp, skb);
  1173. else if (fcoe_start_io(skb))
  1174. fcoe_check_wait_queue(lp, skb);
  1175. return 0;
  1176. }
  1177. /**
  1178. * fcoe_percpu_flush_done() - Indicate percpu queue flush completion.
  1179. * @skb: the skb being completed.
  1180. */
  1181. static void fcoe_percpu_flush_done(struct sk_buff *skb)
  1182. {
  1183. complete(&fcoe_flush_completion);
  1184. }
  1185. /**
  1186. * fcoe_percpu_receive_thread() - recv thread per cpu
  1187. * @arg: ptr to the fcoe per cpu struct
  1188. *
  1189. * Return: 0 for success
  1190. */
  1191. int fcoe_percpu_receive_thread(void *arg)
  1192. {
  1193. struct fcoe_percpu_s *p = arg;
  1194. u32 fr_len;
  1195. struct fc_lport *lp;
  1196. struct fcoe_rcv_info *fr;
  1197. struct fcoe_dev_stats *stats;
  1198. struct fc_frame_header *fh;
  1199. struct sk_buff *skb;
  1200. struct fcoe_crc_eof crc_eof;
  1201. struct fc_frame *fp;
  1202. u8 *mac = NULL;
  1203. struct fcoe_port *port;
  1204. struct fcoe_hdr *hp;
  1205. set_user_nice(current, -20);
  1206. while (!kthread_should_stop()) {
  1207. spin_lock_bh(&p->fcoe_rx_list.lock);
  1208. while ((skb = __skb_dequeue(&p->fcoe_rx_list)) == NULL) {
  1209. set_current_state(TASK_INTERRUPTIBLE);
  1210. spin_unlock_bh(&p->fcoe_rx_list.lock);
  1211. schedule();
  1212. set_current_state(TASK_RUNNING);
  1213. if (kthread_should_stop())
  1214. return 0;
  1215. spin_lock_bh(&p->fcoe_rx_list.lock);
  1216. }
  1217. spin_unlock_bh(&p->fcoe_rx_list.lock);
  1218. fr = fcoe_dev_from_skb(skb);
  1219. lp = fr->fr_dev;
  1220. if (unlikely(lp == NULL)) {
  1221. if (skb->destructor != fcoe_percpu_flush_done)
  1222. FCOE_NETDEV_DBG(skb->dev, "NULL lport in skb");
  1223. kfree_skb(skb);
  1224. continue;
  1225. }
  1226. FCOE_NETDEV_DBG(skb->dev, "skb_info: len:%d data_len:%d "
  1227. "head:%p data:%p tail:%p end:%p sum:%d dev:%s",
  1228. skb->len, skb->data_len,
  1229. skb->head, skb->data, skb_tail_pointer(skb),
  1230. skb_end_pointer(skb), skb->csum,
  1231. skb->dev ? skb->dev->name : "<NULL>");
  1232. /*
  1233. * Save source MAC address before discarding header.
  1234. */
  1235. port = lport_priv(lp);
  1236. if (skb_is_nonlinear(skb))
  1237. skb_linearize(skb); /* not ideal */
  1238. mac = eth_hdr(skb)->h_source;
  1239. /*
  1240. * Frame length checks and setting up the header pointers
  1241. * was done in fcoe_rcv already.
  1242. */
  1243. hp = (struct fcoe_hdr *) skb_network_header(skb);
  1244. fh = (struct fc_frame_header *) skb_transport_header(skb);
  1245. stats = fc_lport_get_stats(lp);
  1246. if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
  1247. if (stats->ErrorFrames < 5)
  1248. printk(KERN_WARNING "fcoe: FCoE version "
  1249. "mismatch: The frame has "
  1250. "version %x, but the "
  1251. "initiator supports version "
  1252. "%x\n", FC_FCOE_DECAPS_VER(hp),
  1253. FC_FCOE_VER);
  1254. stats->ErrorFrames++;
  1255. kfree_skb(skb);
  1256. continue;
  1257. }
  1258. skb_pull(skb, sizeof(struct fcoe_hdr));
  1259. fr_len = skb->len - sizeof(struct fcoe_crc_eof);
  1260. stats->RxFrames++;
  1261. stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
  1262. fp = (struct fc_frame *)skb;
  1263. fc_frame_init(fp);
  1264. fr_dev(fp) = lp;
  1265. fr_sof(fp) = hp->fcoe_sof;
  1266. /* Copy out the CRC and EOF trailer for access */
  1267. if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) {
  1268. kfree_skb(skb);
  1269. continue;
  1270. }
  1271. fr_eof(fp) = crc_eof.fcoe_eof;
  1272. fr_crc(fp) = crc_eof.fcoe_crc32;
  1273. if (pskb_trim(skb, fr_len)) {
  1274. kfree_skb(skb);
  1275. continue;
  1276. }
  1277. /*
  1278. * We only check CRC if no offload is available and if it is
  1279. * it's solicited data, in which case, the FCP layer would
  1280. * check it during the copy.
  1281. */
  1282. if (lp->crc_offload && skb->ip_summed == CHECKSUM_UNNECESSARY)
  1283. fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
  1284. else
  1285. fr_flags(fp) |= FCPHF_CRC_UNCHECKED;
  1286. fh = fc_frame_header_get(fp);
  1287. if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
  1288. fh->fh_type == FC_TYPE_FCP) {
  1289. fc_exch_recv(lp, fp);
  1290. continue;
  1291. }
  1292. if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) {
  1293. if (le32_to_cpu(fr_crc(fp)) !=
  1294. ~crc32(~0, skb->data, fr_len)) {
  1295. if (stats->InvalidCRCCount < 5)
  1296. printk(KERN_WARNING "fcoe: dropping "
  1297. "frame with CRC error\n");
  1298. stats->InvalidCRCCount++;
  1299. stats->ErrorFrames++;
  1300. fc_frame_free(fp);
  1301. continue;
  1302. }
  1303. fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
  1304. }
  1305. fc_exch_recv(lp, fp);
  1306. }
  1307. return 0;
  1308. }
  1309. /**
  1310. * fcoe_check_wait_queue() - attempt to clear the transmit backlog
  1311. * @lp: the fc_lport
  1312. *
  1313. * This empties the wait_queue, dequeue the head of the wait_queue queue
  1314. * and calls fcoe_start_io() for each packet, if all skb have been
  1315. * transmitted, return qlen or -1 if a error occurs, then restore
  1316. * wait_queue and try again later.
  1317. *
  1318. * The wait_queue is used when the skb transmit fails. skb will go
  1319. * in the wait_queue which will be emptied by the timer function or
  1320. * by the next skb transmit.
  1321. */
  1322. static void fcoe_check_wait_queue(struct fc_lport *lp, struct sk_buff *skb)
  1323. {
  1324. struct fcoe_port *port = lport_priv(lp);
  1325. int rc;
  1326. spin_lock_bh(&port->fcoe_pending_queue.lock);
  1327. if (skb)
  1328. __skb_queue_tail(&port->fcoe_pending_queue, skb);
  1329. if (port->fcoe_pending_queue_active)
  1330. goto out;
  1331. port->fcoe_pending_queue_active = 1;
  1332. while (port->fcoe_pending_queue.qlen) {
  1333. /* keep qlen > 0 until fcoe_start_io succeeds */
  1334. port->fcoe_pending_queue.qlen++;
  1335. skb = __skb_dequeue(&port->fcoe_pending_queue);
  1336. spin_unlock_bh(&port->fcoe_pending_queue.lock);
  1337. rc = fcoe_start_io(skb);
  1338. spin_lock_bh(&port->fcoe_pending_queue.lock);
  1339. if (rc) {
  1340. __skb_queue_head(&port->fcoe_pending_queue, skb);
  1341. /* undo temporary increment above */
  1342. port->fcoe_pending_queue.qlen--;
  1343. break;
  1344. }
  1345. /* undo temporary increment above */
  1346. port->fcoe_pending_queue.qlen--;
  1347. }
  1348. if (port->fcoe_pending_queue.qlen < FCOE_LOW_QUEUE_DEPTH)
  1349. lp->qfull = 0;
  1350. if (port->fcoe_pending_queue.qlen && !timer_pending(&port->timer))
  1351. mod_timer(&port->timer, jiffies + 2);
  1352. port->fcoe_pending_queue_active = 0;
  1353. out:
  1354. if (port->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
  1355. lp->qfull = 1;
  1356. spin_unlock_bh(&port->fcoe_pending_queue.lock);
  1357. return;
  1358. }
  1359. /**
  1360. * fcoe_dev_setup() - setup link change notification interface
  1361. */
  1362. static void fcoe_dev_setup(void)
  1363. {
  1364. register_netdevice_notifier(&fcoe_notifier);
  1365. }
  1366. /**
  1367. * fcoe_dev_cleanup() - cleanup link change notification interface
  1368. */
  1369. static void fcoe_dev_cleanup(void)
  1370. {
  1371. unregister_netdevice_notifier(&fcoe_notifier);
  1372. }
  1373. /**
  1374. * fcoe_device_notification() - netdev event notification callback
  1375. * @notifier: context of the notification
  1376. * @event: type of event
  1377. * @ptr: fixed array for output parsed ifname
  1378. *
  1379. * This function is called by the ethernet driver in case of link change event
  1380. *
  1381. * Returns: 0 for success
  1382. */
  1383. static int fcoe_device_notification(struct notifier_block *notifier,
  1384. ulong event, void *ptr)
  1385. {
  1386. struct fc_lport *lp = NULL;
  1387. struct net_device *netdev = ptr;
  1388. struct fcoe_interface *fcoe;
  1389. struct fcoe_port *port;
  1390. struct fcoe_dev_stats *stats;
  1391. u32 link_possible = 1;
  1392. u32 mfs;
  1393. int rc = NOTIFY_OK;
  1394. list_for_each_entry(fcoe, &fcoe_hostlist, list) {
  1395. if (fcoe->netdev == netdev) {
  1396. lp = fcoe->ctlr.lp;
  1397. break;
  1398. }
  1399. }
  1400. if (lp == NULL) {
  1401. rc = NOTIFY_DONE;
  1402. goto out;
  1403. }
  1404. switch (event) {
  1405. case NETDEV_DOWN:
  1406. case NETDEV_GOING_DOWN:
  1407. link_possible = 0;
  1408. break;
  1409. case NETDEV_UP:
  1410. case NETDEV_CHANGE:
  1411. break;
  1412. case NETDEV_CHANGEMTU:
  1413. if (netdev->features & NETIF_F_FCOE_MTU)
  1414. break;
  1415. mfs = netdev->mtu - (sizeof(struct fcoe_hdr) +
  1416. sizeof(struct fcoe_crc_eof));
  1417. if (mfs >= FC_MIN_MAX_FRAME)
  1418. fc_set_mfs(lp, mfs);
  1419. break;
  1420. case NETDEV_REGISTER:
  1421. break;
  1422. case NETDEV_UNREGISTER:
  1423. list_del(&fcoe->list);
  1424. port = lport_priv(fcoe->ctlr.lp);
  1425. fcoe_interface_cleanup(fcoe);
  1426. schedule_work(&port->destroy_work);
  1427. goto out;
  1428. break;
  1429. default:
  1430. FCOE_NETDEV_DBG(netdev, "Unknown event %ld "
  1431. "from netdev netlink\n", event);
  1432. }
  1433. if (link_possible && !fcoe_link_ok(lp))
  1434. fcoe_ctlr_link_up(&fcoe->ctlr);
  1435. else if (fcoe_ctlr_link_down(&fcoe->ctlr)) {
  1436. stats = fc_lport_get_stats(lp);
  1437. stats->LinkFailureCount++;
  1438. fcoe_clean_pending_queue(lp);
  1439. }
  1440. out:
  1441. return rc;
  1442. }
  1443. /**
  1444. * fcoe_if_to_netdev() - parse a name buffer to get netdev
  1445. * @buffer: incoming buffer to be copied
  1446. *
  1447. * Returns: NULL or ptr to net_device
  1448. */
  1449. static struct net_device *fcoe_if_to_netdev(const char *buffer)
  1450. {
  1451. char *cp;
  1452. char ifname[IFNAMSIZ + 2];
  1453. if (buffer) {
  1454. strlcpy(ifname, buffer, IFNAMSIZ);
  1455. cp = ifname + strlen(ifname);
  1456. while (--cp >= ifname && *cp == '\n')
  1457. *cp = '\0';
  1458. return dev_get_by_name(&init_net, ifname);
  1459. }
  1460. return NULL;
  1461. }
  1462. /**
  1463. * fcoe_destroy() - handles the destroy from sysfs
  1464. * @buffer: expected to be an eth if name
  1465. * @kp: associated kernel param
  1466. *
  1467. * Returns: 0 for success
  1468. */
  1469. static int fcoe_destroy(const char *buffer, struct kernel_param *kp)
  1470. {
  1471. struct fcoe_interface *fcoe;
  1472. struct net_device *netdev;
  1473. int rc = 0;
  1474. mutex_lock(&fcoe_config_mutex);
  1475. #ifdef CONFIG_FCOE_MODULE
  1476. /*
  1477. * Make sure the module has been initialized, and is not about to be
  1478. * removed. Module paramter sysfs files are writable before the
  1479. * module_init function is called and after module_exit.
  1480. */
  1481. if (THIS_MODULE->state != MODULE_STATE_LIVE) {
  1482. rc = -ENODEV;
  1483. goto out_nodev;
  1484. }
  1485. #endif
  1486. netdev = fcoe_if_to_netdev(buffer);
  1487. if (!netdev) {
  1488. rc = -ENODEV;
  1489. goto out_nodev;
  1490. }
  1491. rtnl_lock();
  1492. fcoe = fcoe_hostlist_lookup_port(netdev);
  1493. if (!fcoe) {
  1494. rtnl_unlock();
  1495. rc = -ENODEV;
  1496. goto out_putdev;
  1497. }
  1498. list_del(&fcoe->list);
  1499. fcoe_interface_cleanup(fcoe);
  1500. rtnl_unlock();
  1501. fcoe_if_destroy(fcoe->ctlr.lp);
  1502. out_putdev:
  1503. dev_put(netdev);
  1504. out_nodev:
  1505. mutex_unlock(&fcoe_config_mutex);
  1506. return rc;
  1507. }
  1508. static void fcoe_destroy_work(struct work_struct *work)
  1509. {
  1510. struct fcoe_port *port;
  1511. port = container_of(work, struct fcoe_port, destroy_work);
  1512. mutex_lock(&fcoe_config_mutex);
  1513. fcoe_if_destroy(port->lport);
  1514. mutex_unlock(&fcoe_config_mutex);
  1515. }
  1516. /**
  1517. * fcoe_create() - Handles the create call from sysfs
  1518. * @buffer: expected to be an eth if name
  1519. * @kp: associated kernel param
  1520. *
  1521. * Returns: 0 for success
  1522. */
  1523. static int fcoe_create(const char *buffer, struct kernel_param *kp)
  1524. {
  1525. int rc;
  1526. struct fcoe_interface *fcoe;
  1527. struct fc_lport *lport;
  1528. struct net_device *netdev;
  1529. mutex_lock(&fcoe_config_mutex);
  1530. #ifdef CONFIG_FCOE_MODULE
  1531. /*
  1532. * Make sure the module has been initialized, and is not about to be
  1533. * removed. Module paramter sysfs files are writable before the
  1534. * module_init function is called and after module_exit.
  1535. */
  1536. if (THIS_MODULE->state != MODULE_STATE_LIVE) {
  1537. rc = -ENODEV;
  1538. goto out_nodev;
  1539. }
  1540. #endif
  1541. rtnl_lock();
  1542. netdev = fcoe_if_to_netdev(buffer);
  1543. if (!netdev) {
  1544. rc = -ENODEV;
  1545. goto out_nodev;
  1546. }
  1547. /* look for existing lport */
  1548. if (fcoe_hostlist_lookup(netdev)) {
  1549. rc = -EEXIST;
  1550. goto out_putdev;
  1551. }
  1552. fcoe = fcoe_interface_create(netdev);
  1553. if (!fcoe) {
  1554. rc = -ENOMEM;
  1555. goto out_putdev;
  1556. }
  1557. lport = fcoe_if_create(fcoe, &netdev->dev);
  1558. if (IS_ERR(lport)) {
  1559. printk(KERN_ERR "fcoe: Failed to create interface (%s)\n",
  1560. netdev->name);
  1561. rc = -EIO;
  1562. fcoe_interface_cleanup(fcoe);
  1563. goto out_free;
  1564. }
  1565. /* Make this the "master" N_Port */
  1566. fcoe->ctlr.lp = lport;
  1567. /* add to lports list */
  1568. fcoe_hostlist_add(lport);
  1569. /* start FIP Discovery and FLOGI */
  1570. lport->boot_time = jiffies;
  1571. fc_fabric_login(lport);
  1572. if (!fcoe_link_ok(lport))
  1573. fcoe_ctlr_link_up(&fcoe->ctlr);
  1574. rc = 0;
  1575. out_free:
  1576. /*
  1577. * Release from init in fcoe_interface_create(), on success lport
  1578. * should be holding a reference taken in fcoe_if_create().
  1579. */
  1580. fcoe_interface_put(fcoe);
  1581. out_putdev:
  1582. dev_put(netdev);
  1583. out_nodev:
  1584. rtnl_unlock();
  1585. mutex_unlock(&fcoe_config_mutex);
  1586. return rc;
  1587. }
  1588. module_param_call(create, fcoe_create, NULL, NULL, S_IWUSR);
  1589. __MODULE_PARM_TYPE(create, "string");
  1590. MODULE_PARM_DESC(create, "Create fcoe fcoe using net device passed in.");
  1591. module_param_call(destroy, fcoe_destroy, NULL, NULL, S_IWUSR);
  1592. __MODULE_PARM_TYPE(destroy, "string");
  1593. MODULE_PARM_DESC(destroy, "Destroy fcoe fcoe");
  1594. /**
  1595. * fcoe_link_ok() - Check if link is ok for the fc_lport
  1596. * @lp: ptr to the fc_lport
  1597. *
  1598. * Any permanently-disqualifying conditions have been previously checked.
  1599. * This also updates the speed setting, which may change with link for 100/1000.
  1600. *
  1601. * This function should probably be checking for PAUSE support at some point
  1602. * in the future. Currently Per-priority-pause is not determinable using
  1603. * ethtool, so we shouldn't be restrictive until that problem is resolved.
  1604. *
  1605. * Returns: 0 if link is OK for use by FCoE.
  1606. *
  1607. */
  1608. int fcoe_link_ok(struct fc_lport *lp)
  1609. {
  1610. struct fcoe_port *port = lport_priv(lp);
  1611. struct net_device *dev = port->fcoe->netdev;
  1612. struct ethtool_cmd ecmd = { ETHTOOL_GSET };
  1613. if ((dev->flags & IFF_UP) && netif_carrier_ok(dev) &&
  1614. (!dev_ethtool_get_settings(dev, &ecmd))) {
  1615. lp->link_supported_speeds &=
  1616. ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
  1617. if (ecmd.supported & (SUPPORTED_1000baseT_Half |
  1618. SUPPORTED_1000baseT_Full))
  1619. lp->link_supported_speeds |= FC_PORTSPEED_1GBIT;
  1620. if (ecmd.supported & SUPPORTED_10000baseT_Full)
  1621. lp->link_supported_speeds |=
  1622. FC_PORTSPEED_10GBIT;
  1623. if (ecmd.speed == SPEED_1000)
  1624. lp->link_speed = FC_PORTSPEED_1GBIT;
  1625. if (ecmd.speed == SPEED_10000)
  1626. lp->link_speed = FC_PORTSPEED_10GBIT;
  1627. return 0;
  1628. }
  1629. return -1;
  1630. }
  1631. /**
  1632. * fcoe_percpu_clean() - Clear the pending skbs for an lport
  1633. * @lp: the fc_lport
  1634. *
  1635. * Must be called with fcoe_create_mutex held to single-thread completion.
  1636. *
  1637. * This flushes the pending skbs by adding a new skb to each queue and
  1638. * waiting until they are all freed. This assures us that not only are
  1639. * there no packets that will be handled by the lport, but also that any
  1640. * threads already handling packet have returned.
  1641. */
  1642. void fcoe_percpu_clean(struct fc_lport *lp)
  1643. {
  1644. struct fcoe_percpu_s *pp;
  1645. struct fcoe_rcv_info *fr;
  1646. struct sk_buff_head *list;
  1647. struct sk_buff *skb, *next;
  1648. struct sk_buff *head;
  1649. unsigned int cpu;
  1650. for_each_possible_cpu(cpu) {
  1651. pp = &per_cpu(fcoe_percpu, cpu);
  1652. spin_lock_bh(&pp->fcoe_rx_list.lock);
  1653. list = &pp->fcoe_rx_list;
  1654. head = list->next;
  1655. for (skb = head; skb != (struct sk_buff *)list;
  1656. skb = next) {
  1657. next = skb->next;
  1658. fr = fcoe_dev_from_skb(skb);
  1659. if (fr->fr_dev == lp) {
  1660. __skb_unlink(skb, list);
  1661. kfree_skb(skb);
  1662. }
  1663. }
  1664. if (!pp->thread || !cpu_online(cpu)) {
  1665. spin_unlock_bh(&pp->fcoe_rx_list.lock);
  1666. continue;
  1667. }
  1668. skb = dev_alloc_skb(0);
  1669. if (!skb) {
  1670. spin_unlock_bh(&pp->fcoe_rx_list.lock);
  1671. continue;
  1672. }
  1673. skb->destructor = fcoe_percpu_flush_done;
  1674. __skb_queue_tail(&pp->fcoe_rx_list, skb);
  1675. if (pp->fcoe_rx_list.qlen == 1)
  1676. wake_up_process(pp->thread);
  1677. spin_unlock_bh(&pp->fcoe_rx_list.lock);
  1678. wait_for_completion(&fcoe_flush_completion);
  1679. }
  1680. }
  1681. /**
  1682. * fcoe_clean_pending_queue() - Dequeue a skb and free it
  1683. * @lp: the corresponding fc_lport
  1684. *
  1685. * Returns: none
  1686. */
  1687. void fcoe_clean_pending_queue(struct fc_lport *lp)
  1688. {
  1689. struct fcoe_port *port = lport_priv(lp);
  1690. struct sk_buff *skb;
  1691. spin_lock_bh(&port->fcoe_pending_queue.lock);
  1692. while ((skb = __skb_dequeue(&port->fcoe_pending_queue)) != NULL) {
  1693. spin_unlock_bh(&port->fcoe_pending_queue.lock);
  1694. kfree_skb(skb);
  1695. spin_lock_bh(&port->fcoe_pending_queue.lock);
  1696. }
  1697. spin_unlock_bh(&port->fcoe_pending_queue.lock);
  1698. }
  1699. /**
  1700. * fcoe_reset() - Resets the fcoe
  1701. * @shost: shost the reset is from
  1702. *
  1703. * Returns: always 0
  1704. */
  1705. int fcoe_reset(struct Scsi_Host *shost)
  1706. {
  1707. struct fc_lport *lport = shost_priv(shost);
  1708. fc_lport_reset(lport);
  1709. return 0;
  1710. }
  1711. /**
  1712. * fcoe_hostlist_lookup_port() - find the corresponding lport by a given device
  1713. * @dev: this is currently ptr to net_device
  1714. *
  1715. * Returns: NULL or the located fcoe_port
  1716. * Locking: must be called with the RNL mutex held
  1717. */
  1718. static struct fcoe_interface *
  1719. fcoe_hostlist_lookup_port(const struct net_device *dev)
  1720. {
  1721. struct fcoe_interface *fcoe;
  1722. list_for_each_entry(fcoe, &fcoe_hostlist, list) {
  1723. if (fcoe->netdev == dev)
  1724. return fcoe;
  1725. }
  1726. return NULL;
  1727. }
  1728. /**
  1729. * fcoe_hostlist_lookup() - Find the corresponding lport by netdev
  1730. * @netdev: ptr to net_device
  1731. *
  1732. * Returns: 0 for success
  1733. * Locking: must be called with the RTNL mutex held
  1734. */
  1735. static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev)
  1736. {
  1737. struct fcoe_interface *fcoe;
  1738. fcoe = fcoe_hostlist_lookup_port(netdev);
  1739. return (fcoe) ? fcoe->ctlr.lp : NULL;
  1740. }
  1741. /**
  1742. * fcoe_hostlist_add() - Add a lport to lports list
  1743. * @lp: ptr to the fc_lport to be added
  1744. *
  1745. * Returns: 0 for success
  1746. * Locking: must be called with the RTNL mutex held
  1747. */
  1748. static int fcoe_hostlist_add(const struct fc_lport *lport)
  1749. {
  1750. struct fcoe_interface *fcoe;
  1751. struct fcoe_port *port;
  1752. fcoe = fcoe_hostlist_lookup_port(fcoe_netdev(lport));
  1753. if (!fcoe) {
  1754. port = lport_priv(lport);
  1755. fcoe = port->fcoe;
  1756. list_add_tail(&fcoe->list, &fcoe_hostlist);
  1757. }
  1758. return 0;
  1759. }
  1760. /**
  1761. * fcoe_init() - fcoe module loading initialization
  1762. *
  1763. * Returns 0 on success, negative on failure
  1764. */
  1765. static int __init fcoe_init(void)
  1766. {
  1767. unsigned int cpu;
  1768. int rc = 0;
  1769. struct fcoe_percpu_s *p;
  1770. mutex_lock(&fcoe_config_mutex);
  1771. for_each_possible_cpu(cpu) {
  1772. p = &per_cpu(fcoe_percpu, cpu);
  1773. skb_queue_head_init(&p->fcoe_rx_list);
  1774. }
  1775. for_each_online_cpu(cpu)
  1776. fcoe_percpu_thread_create(cpu);
  1777. /* Initialize per CPU interrupt thread */
  1778. rc = register_hotcpu_notifier(&fcoe_cpu_notifier);
  1779. if (rc)
  1780. goto out_free;
  1781. /* Setup link change notification */
  1782. fcoe_dev_setup();
  1783. rc = fcoe_if_init();
  1784. if (rc)
  1785. goto out_free;
  1786. mutex_unlock(&fcoe_config_mutex);
  1787. return 0;
  1788. out_free:
  1789. for_each_online_cpu(cpu) {
  1790. fcoe_percpu_thread_destroy(cpu);
  1791. }
  1792. mutex_unlock(&fcoe_config_mutex);
  1793. return rc;
  1794. }
  1795. module_init(fcoe_init);
  1796. /**
  1797. * fcoe_exit() - fcoe module unloading cleanup
  1798. *
  1799. * Returns 0 on success, negative on failure
  1800. */
  1801. static void __exit fcoe_exit(void)
  1802. {
  1803. unsigned int cpu;
  1804. struct fcoe_interface *fcoe, *tmp;
  1805. struct fcoe_port *port;
  1806. mutex_lock(&fcoe_config_mutex);
  1807. fcoe_dev_cleanup();
  1808. /* releases the associated fcoe hosts */
  1809. rtnl_lock();
  1810. list_for_each_entry_safe(fcoe, tmp, &fcoe_hostlist, list) {
  1811. list_del(&fcoe->list);
  1812. port = lport_priv(fcoe->ctlr.lp);
  1813. fcoe_interface_cleanup(fcoe);
  1814. schedule_work(&port->destroy_work);
  1815. }
  1816. rtnl_unlock();
  1817. unregister_hotcpu_notifier(&fcoe_cpu_notifier);
  1818. for_each_online_cpu(cpu)
  1819. fcoe_percpu_thread_destroy(cpu);
  1820. mutex_unlock(&fcoe_config_mutex);
  1821. /* flush any asyncronous interface destroys,
  1822. * this should happen after the netdev notifier is unregistered */
  1823. flush_scheduled_work();
  1824. /* detach from scsi transport
  1825. * must happen after all destroys are done, therefor after the flush */
  1826. fcoe_if_exit();
  1827. }
  1828. module_exit(fcoe_exit);
  1829. /**
  1830. * fcoe_flogi_resp() - FCoE specific FLOGI and FDISC response handler
  1831. * @seq: active sequence in the FLOGI or FDISC exchange
  1832. * @fp: response frame, or error encoded in a pointer (timeout)
  1833. * @arg: pointer the the fcoe_ctlr structure
  1834. *
  1835. * This handles MAC address managment for FCoE, then passes control on to
  1836. * the libfc FLOGI response handler.
  1837. */
  1838. static void fcoe_flogi_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
  1839. {
  1840. struct fcoe_ctlr *fip = arg;
  1841. struct fc_exch *exch = fc_seq_exch(seq);
  1842. struct fc_lport *lport = exch->lp;
  1843. u8 *mac;
  1844. if (IS_ERR(fp))
  1845. goto done;
  1846. mac = fr_cb(fp)->granted_mac;
  1847. if (is_zero_ether_addr(mac)) {
  1848. /* pre-FIP */
  1849. mac = eth_hdr(&fp->skb)->h_source;
  1850. if (fcoe_ctlr_recv_flogi(fip, lport, fp, mac)) {
  1851. fc_frame_free(fp);
  1852. return;
  1853. }
  1854. } else {
  1855. /* FIP, libfcoe has already seen it */
  1856. fip->update_mac(lport, fr_cb(fp)->granted_mac);
  1857. }
  1858. done:
  1859. fc_lport_flogi_resp(seq, fp, lport);
  1860. }
  1861. /**
  1862. * fcoe_logo_resp() - FCoE specific LOGO response handler
  1863. * @seq: active sequence in the LOGO exchange
  1864. * @fp: response frame, or error encoded in a pointer (timeout)
  1865. * @arg: pointer the the fcoe_ctlr structure
  1866. *
  1867. * This handles MAC address managment for FCoE, then passes control on to
  1868. * the libfc LOGO response handler.
  1869. */
  1870. static void fcoe_logo_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
  1871. {
  1872. struct fcoe_ctlr *fip = arg;
  1873. struct fc_exch *exch = fc_seq_exch(seq);
  1874. struct fc_lport *lport = exch->lp;
  1875. static u8 zero_mac[ETH_ALEN] = { 0 };
  1876. if (!IS_ERR(fp))
  1877. fip->update_mac(lport, zero_mac);
  1878. fc_lport_logo_resp(seq, fp, lport);
  1879. }
  1880. /**
  1881. * fcoe_elsct_send - FCoE specific ELS handler
  1882. *
  1883. * This does special case handling of FIP encapsualted ELS exchanges for FCoE,
  1884. * using FCoE specific response handlers and passing the FIP controller as
  1885. * the argument (the lport is still available from the exchange).
  1886. *
  1887. * Most of the work here is just handed off to the libfc routine.
  1888. */
  1889. static struct fc_seq *fcoe_elsct_send(struct fc_lport *lport,
  1890. u32 did, struct fc_frame *fp, unsigned int op,
  1891. void (*resp)(struct fc_seq *, struct fc_frame *, void *),
  1892. void *arg, u32 timeout)
  1893. {
  1894. struct fcoe_port *port = lport_priv(lport);
  1895. struct fcoe_interface *fcoe = port->fcoe;
  1896. struct fcoe_ctlr *fip = &fcoe->ctlr;
  1897. struct fc_frame_header *fh = fc_frame_header_get(fp);
  1898. switch (op) {
  1899. case ELS_FLOGI:
  1900. case ELS_FDISC:
  1901. return fc_elsct_send(lport, did, fp, op, fcoe_flogi_resp,
  1902. fip, timeout);
  1903. case ELS_LOGO:
  1904. /* only hook onto fabric logouts, not port logouts */
  1905. if (ntoh24(fh->fh_d_id) != FC_FID_FLOGI)
  1906. break;
  1907. return fc_elsct_send(lport, did, fp, op, fcoe_logo_resp,
  1908. fip, timeout);
  1909. }
  1910. return fc_elsct_send(lport, did, fp, op, resp, arg, timeout);
  1911. }