libfcoe.c 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464
  1. /*
  2. * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along with
  14. * this program; if not, write to the Free Software Foundation, Inc.,
  15. * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  16. *
  17. * Maintained at www.Open-FCoE.org
  18. */
  19. #include <linux/module.h>
  20. #include <linux/version.h>
  21. #include <linux/kernel.h>
  22. #include <linux/spinlock.h>
  23. #include <linux/skbuff.h>
  24. #include <linux/netdevice.h>
  25. #include <linux/etherdevice.h>
  26. #include <linux/ethtool.h>
  27. #include <linux/if_ether.h>
  28. #include <linux/if_vlan.h>
  29. #include <linux/kthread.h>
  30. #include <linux/crc32.h>
  31. #include <linux/cpu.h>
  32. #include <linux/fs.h>
  33. #include <linux/sysfs.h>
  34. #include <linux/ctype.h>
  35. #include <scsi/scsi_tcq.h>
  36. #include <scsi/scsicam.h>
  37. #include <scsi/scsi_transport.h>
  38. #include <scsi/scsi_transport_fc.h>
  39. #include <net/rtnetlink.h>
  40. #include <scsi/fc/fc_encaps.h>
  41. #include <scsi/libfc.h>
  42. #include <scsi/fc_frame.h>
  43. #include <scsi/libfcoe.h>
  44. #include <scsi/fc_transport_fcoe.h>
  45. static int debug_fcoe;
  46. #define FCOE_MAX_QUEUE_DEPTH 256
  47. #define FCOE_LOW_QUEUE_DEPTH 32
  48. /* destination address mode */
  49. #define FCOE_GW_ADDR_MODE 0x00
  50. #define FCOE_FCOUI_ADDR_MODE 0x01
  51. #define FCOE_WORD_TO_BYTE 4
  52. MODULE_AUTHOR("Open-FCoE.org");
  53. MODULE_DESCRIPTION("FCoE");
  54. MODULE_LICENSE("GPL");
  55. /* fcoe host list */
  56. LIST_HEAD(fcoe_hostlist);
  57. DEFINE_RWLOCK(fcoe_hostlist_lock);
  58. DEFINE_TIMER(fcoe_timer, NULL, 0, 0);
  59. struct fcoe_percpu_s *fcoe_percpu[NR_CPUS];
  60. /* Function Prototyes */
  61. static int fcoe_check_wait_queue(struct fc_lport *);
  62. static void fcoe_recv_flogi(struct fcoe_softc *, struct fc_frame *, u8 *);
  63. #ifdef CONFIG_HOTPLUG_CPU
  64. static int fcoe_cpu_callback(struct notifier_block *, ulong, void *);
  65. #endif /* CONFIG_HOTPLUG_CPU */
  66. static int fcoe_device_notification(struct notifier_block *, ulong, void *);
  67. static void fcoe_dev_setup(void);
  68. static void fcoe_dev_cleanup(void);
  69. /* notification function from net device */
  70. static struct notifier_block fcoe_notifier = {
  71. .notifier_call = fcoe_device_notification,
  72. };
  73. #ifdef CONFIG_HOTPLUG_CPU
  74. static struct notifier_block fcoe_cpu_notifier = {
  75. .notifier_call = fcoe_cpu_callback,
  76. };
  77. /**
  78. * fcoe_create_percpu_data() - creates the associated cpu data
  79. * @cpu: index for the cpu where fcoe cpu data will be created
  80. *
  81. * create percpu stats block, from cpu add notifier
  82. *
  83. * Returns: none
  84. */
  85. static void fcoe_create_percpu_data(int cpu)
  86. {
  87. struct fc_lport *lp;
  88. struct fcoe_softc *fc;
  89. write_lock_bh(&fcoe_hostlist_lock);
  90. list_for_each_entry(fc, &fcoe_hostlist, list) {
  91. lp = fc->lp;
  92. if (lp->dev_stats[cpu] == NULL)
  93. lp->dev_stats[cpu] =
  94. kzalloc(sizeof(struct fcoe_dev_stats),
  95. GFP_KERNEL);
  96. }
  97. write_unlock_bh(&fcoe_hostlist_lock);
  98. }
  99. /**
  100. * fcoe_destroy_percpu_data() - destroys the associated cpu data
  101. * @cpu: index for the cpu where fcoe cpu data will destroyed
  102. *
  103. * destroy percpu stats block called by cpu add/remove notifier
  104. *
  105. * Retuns: none
  106. */
  107. static void fcoe_destroy_percpu_data(int cpu)
  108. {
  109. struct fc_lport *lp;
  110. struct fcoe_softc *fc;
  111. write_lock_bh(&fcoe_hostlist_lock);
  112. list_for_each_entry(fc, &fcoe_hostlist, list) {
  113. lp = fc->lp;
  114. kfree(lp->dev_stats[cpu]);
  115. lp->dev_stats[cpu] = NULL;
  116. }
  117. write_unlock_bh(&fcoe_hostlist_lock);
  118. }
  119. /**
  120. * fcoe_cpu_callback() - fcoe cpu hotplug event callback
  121. * @nfb: callback data block
  122. * @action: event triggering the callback
  123. * @hcpu: index for the cpu of this event
  124. *
  125. * this creates or destroys per cpu data for fcoe
  126. *
  127. * Returns NOTIFY_OK always.
  128. */
  129. static int fcoe_cpu_callback(struct notifier_block *nfb, unsigned long action,
  130. void *hcpu)
  131. {
  132. unsigned int cpu = (unsigned long)hcpu;
  133. switch (action) {
  134. case CPU_ONLINE:
  135. fcoe_create_percpu_data(cpu);
  136. break;
  137. case CPU_DEAD:
  138. fcoe_destroy_percpu_data(cpu);
  139. break;
  140. default:
  141. break;
  142. }
  143. return NOTIFY_OK;
  144. }
  145. #endif /* CONFIG_HOTPLUG_CPU */
  146. /**
  147. * fcoe_rcv() - this is the fcoe receive function called by NET_RX_SOFTIRQ
  148. * @skb: the receive skb
  149. * @dev: associated net device
  150. * @ptype: context
  151. * @odldev: last device
  152. *
  153. * this function will receive the packet and build fc frame and pass it up
  154. *
  155. * Returns: 0 for success
  156. */
  157. int fcoe_rcv(struct sk_buff *skb, struct net_device *dev,
  158. struct packet_type *ptype, struct net_device *olddev)
  159. {
  160. struct fc_lport *lp;
  161. struct fcoe_rcv_info *fr;
  162. struct fcoe_softc *fc;
  163. struct fcoe_dev_stats *stats;
  164. struct fc_frame_header *fh;
  165. unsigned short oxid;
  166. int cpu_idx;
  167. struct fcoe_percpu_s *fps;
  168. fc = container_of(ptype, struct fcoe_softc, fcoe_packet_type);
  169. lp = fc->lp;
  170. if (unlikely(lp == NULL)) {
  171. FC_DBG("cannot find hba structure");
  172. goto err2;
  173. }
  174. if (unlikely(debug_fcoe)) {
  175. FC_DBG("skb_info: len:%d data_len:%d head:%p data:%p tail:%p "
  176. "end:%p sum:%d dev:%s", skb->len, skb->data_len,
  177. skb->head, skb->data, skb_tail_pointer(skb),
  178. skb_end_pointer(skb), skb->csum,
  179. skb->dev ? skb->dev->name : "<NULL>");
  180. }
  181. /* check for FCOE packet type */
  182. if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) {
  183. FC_DBG("wrong FC type frame");
  184. goto err;
  185. }
  186. /*
  187. * Check for minimum frame length, and make sure required FCoE
  188. * and FC headers are pulled into the linear data area.
  189. */
  190. if (unlikely((skb->len < FCOE_MIN_FRAME) ||
  191. !pskb_may_pull(skb, FCOE_HEADER_LEN)))
  192. goto err;
  193. skb_set_transport_header(skb, sizeof(struct fcoe_hdr));
  194. fh = (struct fc_frame_header *) skb_transport_header(skb);
  195. oxid = ntohs(fh->fh_ox_id);
  196. fr = fcoe_dev_from_skb(skb);
  197. fr->fr_dev = lp;
  198. fr->ptype = ptype;
  199. cpu_idx = 0;
  200. #ifdef CONFIG_SMP
  201. /*
  202. * The incoming frame exchange id(oxid) is ANDed with num of online
  203. * cpu bits to get cpu_idx and then this cpu_idx is used for selecting
  204. * a per cpu kernel thread from fcoe_percpu. In case the cpu is
  205. * offline or no kernel thread for derived cpu_idx then cpu_idx is
  206. * initialize to first online cpu index.
  207. */
  208. cpu_idx = oxid & (num_online_cpus() - 1);
  209. if (!fcoe_percpu[cpu_idx] || !cpu_online(cpu_idx))
  210. cpu_idx = first_cpu(cpu_online_map);
  211. #endif
  212. fps = fcoe_percpu[cpu_idx];
  213. spin_lock_bh(&fps->fcoe_rx_list.lock);
  214. __skb_queue_tail(&fps->fcoe_rx_list, skb);
  215. if (fps->fcoe_rx_list.qlen == 1)
  216. wake_up_process(fps->thread);
  217. spin_unlock_bh(&fps->fcoe_rx_list.lock);
  218. return 0;
  219. err:
  220. #ifdef CONFIG_SMP
  221. stats = lp->dev_stats[smp_processor_id()];
  222. #else
  223. stats = lp->dev_stats[0];
  224. #endif
  225. if (stats)
  226. stats->ErrorFrames++;
  227. err2:
  228. kfree_skb(skb);
  229. return -1;
  230. }
  231. EXPORT_SYMBOL_GPL(fcoe_rcv);
  232. /**
  233. * fcoe_start_io() - pass to netdev to start xmit for fcoe
  234. * @skb: the skb to be xmitted
  235. *
  236. * Returns: 0 for success
  237. */
  238. static inline int fcoe_start_io(struct sk_buff *skb)
  239. {
  240. int rc;
  241. skb_get(skb);
  242. rc = dev_queue_xmit(skb);
  243. if (rc != 0)
  244. return rc;
  245. kfree_skb(skb);
  246. return 0;
  247. }
  248. /**
  249. * fcoe_get_paged_crc_eof() - in case we need alloc a page for crc_eof
  250. * @skb: the skb to be xmitted
  251. * @tlen: total len
  252. *
  253. * Returns: 0 for success
  254. */
  255. static int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen)
  256. {
  257. struct fcoe_percpu_s *fps;
  258. struct page *page;
  259. int cpu_idx;
  260. cpu_idx = get_cpu();
  261. fps = fcoe_percpu[cpu_idx];
  262. page = fps->crc_eof_page;
  263. if (!page) {
  264. page = alloc_page(GFP_ATOMIC);
  265. if (!page) {
  266. put_cpu();
  267. return -ENOMEM;
  268. }
  269. fps->crc_eof_page = page;
  270. WARN_ON(fps->crc_eof_offset != 0);
  271. }
  272. get_page(page);
  273. skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page,
  274. fps->crc_eof_offset, tlen);
  275. skb->len += tlen;
  276. skb->data_len += tlen;
  277. skb->truesize += tlen;
  278. fps->crc_eof_offset += sizeof(struct fcoe_crc_eof);
  279. if (fps->crc_eof_offset >= PAGE_SIZE) {
  280. fps->crc_eof_page = NULL;
  281. fps->crc_eof_offset = 0;
  282. put_page(page);
  283. }
  284. put_cpu();
  285. return 0;
  286. }
  287. /**
  288. * fcoe_fc_crc() - calculates FC CRC in this fcoe skb
  289. * @fp: the fc_frame containg data to be checksummed
  290. *
  291. * This uses crc32() to calculate the crc for fc frame
  292. * Return : 32 bit crc
  293. */
  294. u32 fcoe_fc_crc(struct fc_frame *fp)
  295. {
  296. struct sk_buff *skb = fp_skb(fp);
  297. struct skb_frag_struct *frag;
  298. unsigned char *data;
  299. unsigned long off, len, clen;
  300. u32 crc;
  301. unsigned i;
  302. crc = crc32(~0, skb->data, skb_headlen(skb));
  303. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  304. frag = &skb_shinfo(skb)->frags[i];
  305. off = frag->page_offset;
  306. len = frag->size;
  307. while (len > 0) {
  308. clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK));
  309. data = kmap_atomic(frag->page + (off >> PAGE_SHIFT),
  310. KM_SKB_DATA_SOFTIRQ);
  311. crc = crc32(crc, data + (off & ~PAGE_MASK), clen);
  312. kunmap_atomic(data, KM_SKB_DATA_SOFTIRQ);
  313. off += clen;
  314. len -= clen;
  315. }
  316. }
  317. return crc;
  318. }
  319. EXPORT_SYMBOL_GPL(fcoe_fc_crc);
  320. /**
  321. * fcoe_xmit() - FCoE frame transmit function
  322. * @lp: the associated local port
  323. * @fp: the fc_frame to be transmitted
  324. *
  325. * Return : 0 for success
  326. */
  327. int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
  328. {
  329. int wlen, rc = 0;
  330. u32 crc;
  331. struct ethhdr *eh;
  332. struct fcoe_crc_eof *cp;
  333. struct sk_buff *skb;
  334. struct fcoe_dev_stats *stats;
  335. struct fc_frame_header *fh;
  336. unsigned int hlen; /* header length implies the version */
  337. unsigned int tlen; /* trailer length */
  338. unsigned int elen; /* eth header, may include vlan */
  339. int flogi_in_progress = 0;
  340. struct fcoe_softc *fc;
  341. u8 sof, eof;
  342. struct fcoe_hdr *hp;
  343. WARN_ON((fr_len(fp) % sizeof(u32)) != 0);
  344. fc = lport_priv(lp);
  345. /*
  346. * if it is a flogi then we need to learn gw-addr
  347. * and my own fcid
  348. */
  349. fh = fc_frame_header_get(fp);
  350. if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
  351. if (fc_frame_payload_op(fp) == ELS_FLOGI) {
  352. fc->flogi_oxid = ntohs(fh->fh_ox_id);
  353. fc->address_mode = FCOE_FCOUI_ADDR_MODE;
  354. fc->flogi_progress = 1;
  355. flogi_in_progress = 1;
  356. } else if (fc->flogi_progress && ntoh24(fh->fh_s_id) != 0) {
  357. /*
  358. * Here we must've gotten an SID by accepting an FLOGI
  359. * from a point-to-point connection. Switch to using
  360. * the source mac based on the SID. The destination
  361. * MAC in this case would have been set by receving the
  362. * FLOGI.
  363. */
  364. fc_fcoe_set_mac(fc->data_src_addr, fh->fh_s_id);
  365. fc->flogi_progress = 0;
  366. }
  367. }
  368. skb = fp_skb(fp);
  369. sof = fr_sof(fp);
  370. eof = fr_eof(fp);
  371. elen = (fc->real_dev->priv_flags & IFF_802_1Q_VLAN) ?
  372. sizeof(struct vlan_ethhdr) : sizeof(struct ethhdr);
  373. hlen = sizeof(struct fcoe_hdr);
  374. tlen = sizeof(struct fcoe_crc_eof);
  375. wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
  376. /* crc offload */
  377. if (likely(lp->crc_offload)) {
  378. skb->ip_summed = CHECKSUM_COMPLETE;
  379. skb->csum_start = skb_headroom(skb);
  380. skb->csum_offset = skb->len;
  381. crc = 0;
  382. } else {
  383. skb->ip_summed = CHECKSUM_NONE;
  384. crc = fcoe_fc_crc(fp);
  385. }
  386. /* copy fc crc and eof to the skb buff */
  387. if (skb_is_nonlinear(skb)) {
  388. skb_frag_t *frag;
  389. if (fcoe_get_paged_crc_eof(skb, tlen)) {
  390. kfree_skb(skb);
  391. return -ENOMEM;
  392. }
  393. frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
  394. cp = kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ)
  395. + frag->page_offset;
  396. } else {
  397. cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
  398. }
  399. memset(cp, 0, sizeof(*cp));
  400. cp->fcoe_eof = eof;
  401. cp->fcoe_crc32 = cpu_to_le32(~crc);
  402. if (skb_is_nonlinear(skb)) {
  403. kunmap_atomic(cp, KM_SKB_DATA_SOFTIRQ);
  404. cp = NULL;
  405. }
  406. /* adjust skb netowrk/transport offsets to match mac/fcoe/fc */
  407. skb_push(skb, elen + hlen);
  408. skb_reset_mac_header(skb);
  409. skb_reset_network_header(skb);
  410. skb->mac_len = elen;
  411. skb->protocol = htons(ETH_P_802_3);
  412. skb->dev = fc->real_dev;
  413. /* fill up mac and fcoe headers */
  414. eh = eth_hdr(skb);
  415. eh->h_proto = htons(ETH_P_FCOE);
  416. if (fc->address_mode == FCOE_FCOUI_ADDR_MODE)
  417. fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id);
  418. else
  419. /* insert GW address */
  420. memcpy(eh->h_dest, fc->dest_addr, ETH_ALEN);
  421. if (unlikely(flogi_in_progress))
  422. memcpy(eh->h_source, fc->ctl_src_addr, ETH_ALEN);
  423. else
  424. memcpy(eh->h_source, fc->data_src_addr, ETH_ALEN);
  425. hp = (struct fcoe_hdr *)(eh + 1);
  426. memset(hp, 0, sizeof(*hp));
  427. if (FC_FCOE_VER)
  428. FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER);
  429. hp->fcoe_sof = sof;
  430. /* update tx stats: regardless if LLD fails */
  431. stats = lp->dev_stats[smp_processor_id()];
  432. if (stats) {
  433. stats->TxFrames++;
  434. stats->TxWords += wlen;
  435. }
  436. /* send down to lld */
  437. fr_dev(fp) = lp;
  438. if (fc->fcoe_pending_queue.qlen)
  439. rc = fcoe_check_wait_queue(lp);
  440. if (rc == 0)
  441. rc = fcoe_start_io(skb);
  442. if (rc) {
  443. spin_lock_bh(&fc->fcoe_pending_queue.lock);
  444. __skb_queue_tail(&fc->fcoe_pending_queue, skb);
  445. spin_unlock_bh(&fc->fcoe_pending_queue.lock);
  446. if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
  447. lp->qfull = 1;
  448. }
  449. return 0;
  450. }
  451. EXPORT_SYMBOL_GPL(fcoe_xmit);
  452. /**
  453. * fcoe_percpu_receive_thread() - recv thread per cpu
  454. * @arg: ptr to the fcoe per cpu struct
  455. *
  456. * Return: 0 for success
  457. */
  458. int fcoe_percpu_receive_thread(void *arg)
  459. {
  460. struct fcoe_percpu_s *p = arg;
  461. u32 fr_len;
  462. struct fc_lport *lp;
  463. struct fcoe_rcv_info *fr;
  464. struct fcoe_dev_stats *stats;
  465. struct fc_frame_header *fh;
  466. struct sk_buff *skb;
  467. struct fcoe_crc_eof crc_eof;
  468. struct fc_frame *fp;
  469. u8 *mac = NULL;
  470. struct fcoe_softc *fc;
  471. struct fcoe_hdr *hp;
  472. set_user_nice(current, -20);
  473. while (!kthread_should_stop()) {
  474. spin_lock_bh(&p->fcoe_rx_list.lock);
  475. while ((skb = __skb_dequeue(&p->fcoe_rx_list)) == NULL) {
  476. set_current_state(TASK_INTERRUPTIBLE);
  477. spin_unlock_bh(&p->fcoe_rx_list.lock);
  478. schedule();
  479. set_current_state(TASK_RUNNING);
  480. if (kthread_should_stop())
  481. return 0;
  482. spin_lock_bh(&p->fcoe_rx_list.lock);
  483. }
  484. spin_unlock_bh(&p->fcoe_rx_list.lock);
  485. fr = fcoe_dev_from_skb(skb);
  486. lp = fr->fr_dev;
  487. if (unlikely(lp == NULL)) {
  488. FC_DBG("invalid HBA Structure");
  489. kfree_skb(skb);
  490. continue;
  491. }
  492. stats = lp->dev_stats[smp_processor_id()];
  493. if (unlikely(debug_fcoe)) {
  494. FC_DBG("skb_info: len:%d data_len:%d head:%p data:%p "
  495. "tail:%p end:%p sum:%d dev:%s",
  496. skb->len, skb->data_len,
  497. skb->head, skb->data, skb_tail_pointer(skb),
  498. skb_end_pointer(skb), skb->csum,
  499. skb->dev ? skb->dev->name : "<NULL>");
  500. }
  501. /*
  502. * Save source MAC address before discarding header.
  503. */
  504. fc = lport_priv(lp);
  505. if (unlikely(fc->flogi_progress))
  506. mac = eth_hdr(skb)->h_source;
  507. if (skb_is_nonlinear(skb))
  508. skb_linearize(skb); /* not ideal */
  509. /*
  510. * Frame length checks and setting up the header pointers
  511. * was done in fcoe_rcv already.
  512. */
  513. hp = (struct fcoe_hdr *) skb_network_header(skb);
  514. fh = (struct fc_frame_header *) skb_transport_header(skb);
  515. if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
  516. if (stats) {
  517. if (stats->ErrorFrames < 5)
  518. FC_DBG("unknown FCoE version %x",
  519. FC_FCOE_DECAPS_VER(hp));
  520. stats->ErrorFrames++;
  521. }
  522. kfree_skb(skb);
  523. continue;
  524. }
  525. skb_pull(skb, sizeof(struct fcoe_hdr));
  526. fr_len = skb->len - sizeof(struct fcoe_crc_eof);
  527. if (stats) {
  528. stats->RxFrames++;
  529. stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
  530. }
  531. fp = (struct fc_frame *)skb;
  532. fc_frame_init(fp);
  533. fr_dev(fp) = lp;
  534. fr_sof(fp) = hp->fcoe_sof;
  535. /* Copy out the CRC and EOF trailer for access */
  536. if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) {
  537. kfree_skb(skb);
  538. continue;
  539. }
  540. fr_eof(fp) = crc_eof.fcoe_eof;
  541. fr_crc(fp) = crc_eof.fcoe_crc32;
  542. if (pskb_trim(skb, fr_len)) {
  543. kfree_skb(skb);
  544. continue;
  545. }
  546. /*
  547. * We only check CRC if no offload is available and if it is
  548. * it's solicited data, in which case, the FCP layer would
  549. * check it during the copy.
  550. */
  551. if (lp->crc_offload)
  552. fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
  553. else
  554. fr_flags(fp) |= FCPHF_CRC_UNCHECKED;
  555. fh = fc_frame_header_get(fp);
  556. if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
  557. fh->fh_type == FC_TYPE_FCP) {
  558. fc_exch_recv(lp, lp->emp, fp);
  559. continue;
  560. }
  561. if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) {
  562. if (le32_to_cpu(fr_crc(fp)) !=
  563. ~crc32(~0, skb->data, fr_len)) {
  564. if (debug_fcoe || stats->InvalidCRCCount < 5)
  565. printk(KERN_WARNING "fcoe: dropping "
  566. "frame with CRC error\n");
  567. stats->InvalidCRCCount++;
  568. stats->ErrorFrames++;
  569. fc_frame_free(fp);
  570. continue;
  571. }
  572. fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
  573. }
  574. /* non flogi and non data exchanges are handled here */
  575. if (unlikely(fc->flogi_progress))
  576. fcoe_recv_flogi(fc, fp, mac);
  577. fc_exch_recv(lp, lp->emp, fp);
  578. }
  579. return 0;
  580. }
  581. /**
  582. * fcoe_recv_flogi() - flogi receive function
  583. * @fc: associated fcoe_softc
  584. * @fp: the recieved frame
  585. * @sa: the source address of this flogi
  586. *
  587. * This is responsible to parse the flogi response and sets the corresponding
  588. * mac address for the initiator, eitehr OUI based or GW based.
  589. *
  590. * Returns: none
  591. */
  592. static void fcoe_recv_flogi(struct fcoe_softc *fc, struct fc_frame *fp, u8 *sa)
  593. {
  594. struct fc_frame_header *fh;
  595. u8 op;
  596. fh = fc_frame_header_get(fp);
  597. if (fh->fh_type != FC_TYPE_ELS)
  598. return;
  599. op = fc_frame_payload_op(fp);
  600. if (op == ELS_LS_ACC && fh->fh_r_ctl == FC_RCTL_ELS_REP &&
  601. fc->flogi_oxid == ntohs(fh->fh_ox_id)) {
  602. /*
  603. * FLOGI accepted.
  604. * If the src mac addr is FC_OUI-based, then we mark the
  605. * address_mode flag to use FC_OUI-based Ethernet DA.
  606. * Otherwise we use the FCoE gateway addr
  607. */
  608. if (!compare_ether_addr(sa, (u8[6]) FC_FCOE_FLOGI_MAC)) {
  609. fc->address_mode = FCOE_FCOUI_ADDR_MODE;
  610. } else {
  611. memcpy(fc->dest_addr, sa, ETH_ALEN);
  612. fc->address_mode = FCOE_GW_ADDR_MODE;
  613. }
  614. /*
  615. * Remove any previously-set unicast MAC filter.
  616. * Add secondary FCoE MAC address filter for our OUI.
  617. */
  618. rtnl_lock();
  619. if (compare_ether_addr(fc->data_src_addr, (u8[6]) { 0 }))
  620. dev_unicast_delete(fc->real_dev, fc->data_src_addr,
  621. ETH_ALEN);
  622. fc_fcoe_set_mac(fc->data_src_addr, fh->fh_d_id);
  623. dev_unicast_add(fc->real_dev, fc->data_src_addr, ETH_ALEN);
  624. rtnl_unlock();
  625. fc->flogi_progress = 0;
  626. } else if (op == ELS_FLOGI && fh->fh_r_ctl == FC_RCTL_ELS_REQ && sa) {
  627. /*
  628. * Save source MAC for point-to-point responses.
  629. */
  630. memcpy(fc->dest_addr, sa, ETH_ALEN);
  631. fc->address_mode = FCOE_GW_ADDR_MODE;
  632. }
  633. }
  634. /**
  635. * fcoe_watchdog() - fcoe timer callback
  636. * @vp:
  637. *
  638. * This checks the pending queue length for fcoe and set lport qfull
  639. * if the FCOE_MAX_QUEUE_DEPTH is reached. This is done for all fc_lport on the
  640. * fcoe_hostlist.
  641. *
  642. * Returns: 0 for success
  643. */
  644. void fcoe_watchdog(ulong vp)
  645. {
  646. struct fcoe_softc *fc;
  647. read_lock(&fcoe_hostlist_lock);
  648. list_for_each_entry(fc, &fcoe_hostlist, list) {
  649. if (fc->lp)
  650. fcoe_check_wait_queue(fc->lp);
  651. }
  652. read_unlock(&fcoe_hostlist_lock);
  653. fcoe_timer.expires = jiffies + (1 * HZ);
  654. add_timer(&fcoe_timer);
  655. }
  656. /**
  657. * fcoe_check_wait_queue() - put the skb into fcoe pending xmit queue
  658. * @lp: the fc_port for this skb
  659. * @skb: the associated skb to be xmitted
  660. *
  661. * This empties the wait_queue, dequeue the head of the wait_queue queue
  662. * and calls fcoe_start_io() for each packet, if all skb have been
  663. * transmitted, return qlen or -1 if a error occurs, then restore
  664. * wait_queue and try again later.
  665. *
  666. * The wait_queue is used when the skb transmit fails. skb will go
  667. * in the wait_queue which will be emptied by the time function OR
  668. * by the next skb transmit.
  669. *
  670. * Returns: 0 for success
  671. */
  672. static int fcoe_check_wait_queue(struct fc_lport *lp)
  673. {
  674. struct fcoe_softc *fc = lport_priv(lp);
  675. struct sk_buff *skb;
  676. int rc = -1;
  677. spin_lock_bh(&fc->fcoe_pending_queue.lock);
  678. if (fc->fcoe_pending_queue_active)
  679. goto out;
  680. fc->fcoe_pending_queue_active = 1;
  681. while (fc->fcoe_pending_queue.qlen) {
  682. /* keep qlen > 0 until fcoe_start_io succeeds */
  683. fc->fcoe_pending_queue.qlen++;
  684. skb = __skb_dequeue(&fc->fcoe_pending_queue);
  685. spin_unlock_bh(&fc->fcoe_pending_queue.lock);
  686. rc = fcoe_start_io(skb);
  687. spin_lock_bh(&fc->fcoe_pending_queue.lock);
  688. if (rc) {
  689. __skb_queue_head(&fc->fcoe_pending_queue, skb);
  690. /* undo temporary increment above */
  691. fc->fcoe_pending_queue.qlen--;
  692. break;
  693. }
  694. /* undo temporary increment above */
  695. fc->fcoe_pending_queue.qlen--;
  696. }
  697. if (fc->fcoe_pending_queue.qlen < FCOE_LOW_QUEUE_DEPTH)
  698. lp->qfull = 0;
  699. fc->fcoe_pending_queue_active = 0;
  700. rc = fc->fcoe_pending_queue.qlen;
  701. out:
  702. spin_unlock_bh(&fc->fcoe_pending_queue.lock);
  703. return rc;
  704. }
  705. /**
  706. * fcoe_dev_setup() - setup link change notification interface
  707. */
  708. static void fcoe_dev_setup()
  709. {
  710. /*
  711. * here setup a interface specific wd time to
  712. * monitor the link state
  713. */
  714. register_netdevice_notifier(&fcoe_notifier);
  715. }
  716. /**
  717. * fcoe_dev_setup() - cleanup link change notification interface
  718. */
  719. static void fcoe_dev_cleanup(void)
  720. {
  721. unregister_netdevice_notifier(&fcoe_notifier);
  722. }
  723. /**
  724. * fcoe_device_notification() - netdev event notification callback
  725. * @notifier: context of the notification
  726. * @event: type of event
  727. * @ptr: fixed array for output parsed ifname
  728. *
  729. * This function is called by the ethernet driver in case of link change event
  730. *
  731. * Returns: 0 for success
  732. */
  733. static int fcoe_device_notification(struct notifier_block *notifier,
  734. ulong event, void *ptr)
  735. {
  736. struct fc_lport *lp = NULL;
  737. struct net_device *real_dev = ptr;
  738. struct fcoe_softc *fc;
  739. struct fcoe_dev_stats *stats;
  740. u32 new_link_up;
  741. u32 mfs;
  742. int rc = NOTIFY_OK;
  743. read_lock(&fcoe_hostlist_lock);
  744. list_for_each_entry(fc, &fcoe_hostlist, list) {
  745. if (fc->real_dev == real_dev) {
  746. lp = fc->lp;
  747. break;
  748. }
  749. }
  750. read_unlock(&fcoe_hostlist_lock);
  751. if (lp == NULL) {
  752. rc = NOTIFY_DONE;
  753. goto out;
  754. }
  755. new_link_up = lp->link_up;
  756. switch (event) {
  757. case NETDEV_DOWN:
  758. case NETDEV_GOING_DOWN:
  759. new_link_up = 0;
  760. break;
  761. case NETDEV_UP:
  762. case NETDEV_CHANGE:
  763. new_link_up = !fcoe_link_ok(lp);
  764. break;
  765. case NETDEV_CHANGEMTU:
  766. mfs = fc->real_dev->mtu -
  767. (sizeof(struct fcoe_hdr) +
  768. sizeof(struct fcoe_crc_eof));
  769. if (mfs >= FC_MIN_MAX_FRAME)
  770. fc_set_mfs(lp, mfs);
  771. new_link_up = !fcoe_link_ok(lp);
  772. break;
  773. case NETDEV_REGISTER:
  774. break;
  775. default:
  776. FC_DBG("unknown event %ld call", event);
  777. }
  778. if (lp->link_up != new_link_up) {
  779. if (new_link_up)
  780. fc_linkup(lp);
  781. else {
  782. stats = lp->dev_stats[smp_processor_id()];
  783. if (stats)
  784. stats->LinkFailureCount++;
  785. fc_linkdown(lp);
  786. fcoe_clean_pending_queue(lp);
  787. }
  788. }
  789. out:
  790. return rc;
  791. }
  792. /**
  793. * fcoe_if_to_netdev() - parse a name buffer to get netdev
  794. * @ifname: fixed array for output parsed ifname
  795. * @buffer: incoming buffer to be copied
  796. *
  797. * Returns: NULL or ptr to netdeive
  798. */
  799. static struct net_device *fcoe_if_to_netdev(const char *buffer)
  800. {
  801. char *cp;
  802. char ifname[IFNAMSIZ + 2];
  803. if (buffer) {
  804. strlcpy(ifname, buffer, IFNAMSIZ);
  805. cp = ifname + strlen(ifname);
  806. while (--cp >= ifname && *cp == '\n')
  807. *cp = '\0';
  808. return dev_get_by_name(&init_net, ifname);
  809. }
  810. return NULL;
  811. }
  812. /**
  813. * fcoe_netdev_to_module_owner() - finds out the nic drive moddule of the netdev
  814. * @netdev: the target netdev
  815. *
  816. * Returns: ptr to the struct module, NULL for failure
  817. */
  818. static struct module *
  819. fcoe_netdev_to_module_owner(const struct net_device *netdev)
  820. {
  821. struct device *dev;
  822. if (!netdev)
  823. return NULL;
  824. dev = netdev->dev.parent;
  825. if (!dev)
  826. return NULL;
  827. if (!dev->driver)
  828. return NULL;
  829. return dev->driver->owner;
  830. }
  831. /**
  832. * fcoe_ethdrv_get() - Hold the Ethernet driver
  833. * @netdev: the target netdev
  834. *
  835. * Holds the Ethernet driver module by try_module_get() for
  836. * the corresponding netdev.
  837. *
  838. * Returns: 0 for succsss
  839. */
  840. static int fcoe_ethdrv_get(const struct net_device *netdev)
  841. {
  842. struct module *owner;
  843. owner = fcoe_netdev_to_module_owner(netdev);
  844. if (owner) {
  845. printk(KERN_DEBUG "fcoe:hold driver module %s for %s\n",
  846. module_name(owner), netdev->name);
  847. return try_module_get(owner);
  848. }
  849. return -ENODEV;
  850. }
  851. /**
  852. * fcoe_ethdrv_put() - Release the Ethernet driver
  853. * @netdev: the target netdev
  854. *
  855. * Releases the Ethernet driver module by module_put for
  856. * the corresponding netdev.
  857. *
  858. * Returns: 0 for succsss
  859. */
  860. static int fcoe_ethdrv_put(const struct net_device *netdev)
  861. {
  862. struct module *owner;
  863. owner = fcoe_netdev_to_module_owner(netdev);
  864. if (owner) {
  865. printk(KERN_DEBUG "fcoe:release driver module %s for %s\n",
  866. module_name(owner), netdev->name);
  867. module_put(owner);
  868. return 0;
  869. }
  870. return -ENODEV;
  871. }
  872. /**
  873. * fcoe_destroy() - handles the destroy from sysfs
  874. * @buffer: expcted to be a eth if name
  875. * @kp: associated kernel param
  876. *
  877. * Returns: 0 for success
  878. */
  879. static int fcoe_destroy(const char *buffer, struct kernel_param *kp)
  880. {
  881. int rc;
  882. struct net_device *netdev;
  883. netdev = fcoe_if_to_netdev(buffer);
  884. if (!netdev) {
  885. rc = -ENODEV;
  886. goto out_nodev;
  887. }
  888. /* look for existing lport */
  889. if (!fcoe_hostlist_lookup(netdev)) {
  890. rc = -ENODEV;
  891. goto out_putdev;
  892. }
  893. /* pass to transport */
  894. rc = fcoe_transport_release(netdev);
  895. if (rc) {
  896. printk(KERN_ERR "fcoe: fcoe_transport_release(%s) failed\n",
  897. netdev->name);
  898. rc = -EIO;
  899. goto out_putdev;
  900. }
  901. fcoe_ethdrv_put(netdev);
  902. rc = 0;
  903. out_putdev:
  904. dev_put(netdev);
  905. out_nodev:
  906. return rc;
  907. }
  908. /**
  909. * fcoe_create() - Handles the create call from sysfs
  910. * @buffer: expcted to be a eth if name
  911. * @kp: associated kernel param
  912. *
  913. * Returns: 0 for success
  914. */
  915. static int fcoe_create(const char *buffer, struct kernel_param *kp)
  916. {
  917. int rc;
  918. struct net_device *netdev;
  919. netdev = fcoe_if_to_netdev(buffer);
  920. if (!netdev) {
  921. rc = -ENODEV;
  922. goto out_nodev;
  923. }
  924. /* look for existing lport */
  925. if (fcoe_hostlist_lookup(netdev)) {
  926. rc = -EEXIST;
  927. goto out_putdev;
  928. }
  929. fcoe_ethdrv_get(netdev);
  930. /* pass to transport */
  931. rc = fcoe_transport_attach(netdev);
  932. if (rc) {
  933. printk(KERN_ERR "fcoe: fcoe_transport_attach(%s) failed\n",
  934. netdev->name);
  935. fcoe_ethdrv_put(netdev);
  936. rc = -EIO;
  937. goto out_putdev;
  938. }
  939. rc = 0;
  940. out_putdev:
  941. dev_put(netdev);
  942. out_nodev:
  943. return rc;
  944. }
  945. module_param_call(create, fcoe_create, NULL, NULL, S_IWUSR);
  946. __MODULE_PARM_TYPE(create, "string");
  947. MODULE_PARM_DESC(create, "Create fcoe port using net device passed in.");
  948. module_param_call(destroy, fcoe_destroy, NULL, NULL, S_IWUSR);
  949. __MODULE_PARM_TYPE(destroy, "string");
  950. MODULE_PARM_DESC(destroy, "Destroy fcoe port");
  951. /**
  952. * fcoe_link_ok() - Check if link is ok for the fc_lport
  953. * @lp: ptr to the fc_lport
  954. *
  955. * Any permanently-disqualifying conditions have been previously checked.
  956. * This also updates the speed setting, which may change with link for 100/1000.
  957. *
  958. * This function should probably be checking for PAUSE support at some point
  959. * in the future. Currently Per-priority-pause is not determinable using
  960. * ethtool, so we shouldn't be restrictive until that problem is resolved.
  961. *
  962. * Returns: 0 if link is OK for use by FCoE.
  963. *
  964. */
  965. int fcoe_link_ok(struct fc_lport *lp)
  966. {
  967. struct fcoe_softc *fc = lport_priv(lp);
  968. struct net_device *dev = fc->real_dev;
  969. struct ethtool_cmd ecmd = { ETHTOOL_GSET };
  970. int rc = 0;
  971. if ((dev->flags & IFF_UP) && netif_carrier_ok(dev)) {
  972. dev = fc->phys_dev;
  973. if (dev->ethtool_ops->get_settings) {
  974. dev->ethtool_ops->get_settings(dev, &ecmd);
  975. lp->link_supported_speeds &=
  976. ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
  977. if (ecmd.supported & (SUPPORTED_1000baseT_Half |
  978. SUPPORTED_1000baseT_Full))
  979. lp->link_supported_speeds |= FC_PORTSPEED_1GBIT;
  980. if (ecmd.supported & SUPPORTED_10000baseT_Full)
  981. lp->link_supported_speeds |=
  982. FC_PORTSPEED_10GBIT;
  983. if (ecmd.speed == SPEED_1000)
  984. lp->link_speed = FC_PORTSPEED_1GBIT;
  985. if (ecmd.speed == SPEED_10000)
  986. lp->link_speed = FC_PORTSPEED_10GBIT;
  987. }
  988. } else
  989. rc = -1;
  990. return rc;
  991. }
  992. EXPORT_SYMBOL_GPL(fcoe_link_ok);
  993. /**
  994. * fcoe_percpu_clean() - Clear the pending skbs for an lport
  995. * @lp: the fc_lport
  996. */
  997. void fcoe_percpu_clean(struct fc_lport *lp)
  998. {
  999. int idx;
  1000. struct fcoe_percpu_s *pp;
  1001. struct fcoe_rcv_info *fr;
  1002. struct sk_buff_head *list;
  1003. struct sk_buff *skb, *next;
  1004. struct sk_buff *head;
  1005. for (idx = 0; idx < NR_CPUS; idx++) {
  1006. if (fcoe_percpu[idx]) {
  1007. pp = fcoe_percpu[idx];
  1008. spin_lock_bh(&pp->fcoe_rx_list.lock);
  1009. list = &pp->fcoe_rx_list;
  1010. head = list->next;
  1011. for (skb = head; skb != (struct sk_buff *)list;
  1012. skb = next) {
  1013. next = skb->next;
  1014. fr = fcoe_dev_from_skb(skb);
  1015. if (fr->fr_dev == lp) {
  1016. __skb_unlink(skb, list);
  1017. kfree_skb(skb);
  1018. }
  1019. }
  1020. spin_unlock_bh(&pp->fcoe_rx_list.lock);
  1021. }
  1022. }
  1023. }
  1024. EXPORT_SYMBOL_GPL(fcoe_percpu_clean);
  1025. /**
  1026. * fcoe_clean_pending_queue() - Dequeue a skb and free it
  1027. * @lp: the corresponding fc_lport
  1028. *
  1029. * Returns: none
  1030. */
  1031. void fcoe_clean_pending_queue(struct fc_lport *lp)
  1032. {
  1033. struct fcoe_softc *fc = lport_priv(lp);
  1034. struct sk_buff *skb;
  1035. spin_lock_bh(&fc->fcoe_pending_queue.lock);
  1036. while ((skb = __skb_dequeue(&fc->fcoe_pending_queue)) != NULL) {
  1037. spin_unlock_bh(&fc->fcoe_pending_queue.lock);
  1038. kfree_skb(skb);
  1039. spin_lock_bh(&fc->fcoe_pending_queue.lock);
  1040. }
  1041. spin_unlock_bh(&fc->fcoe_pending_queue.lock);
  1042. }
  1043. EXPORT_SYMBOL_GPL(fcoe_clean_pending_queue);
  1044. /**
  1045. * libfc_host_alloc() - Allocate a Scsi_Host with room for the fc_lport
  1046. * @sht: ptr to the scsi host templ
  1047. * @priv_size: size of private data after fc_lport
  1048. *
  1049. * Returns: ptr to Scsi_Host
  1050. * TODO: to libfc?
  1051. */
  1052. static inline struct Scsi_Host *
  1053. libfc_host_alloc(struct scsi_host_template *sht, int priv_size)
  1054. {
  1055. return scsi_host_alloc(sht, sizeof(struct fc_lport) + priv_size);
  1056. }
  1057. /**
  1058. * fcoe_host_alloc() - Allocate a Scsi_Host with room for the fcoe_softc
  1059. * @sht: ptr to the scsi host templ
  1060. * @priv_size: size of private data after fc_lport
  1061. *
  1062. * Returns: ptr to Scsi_Host
  1063. */
  1064. struct Scsi_Host *fcoe_host_alloc(struct scsi_host_template *sht, int priv_size)
  1065. {
  1066. return libfc_host_alloc(sht, sizeof(struct fcoe_softc) + priv_size);
  1067. }
  1068. EXPORT_SYMBOL_GPL(fcoe_host_alloc);
  1069. /**
  1070. * fcoe_reset() - Resets the fcoe
  1071. * @shost: shost the reset is from
  1072. *
  1073. * Returns: always 0
  1074. */
  1075. int fcoe_reset(struct Scsi_Host *shost)
  1076. {
  1077. struct fc_lport *lport = shost_priv(shost);
  1078. fc_lport_reset(lport);
  1079. return 0;
  1080. }
  1081. EXPORT_SYMBOL_GPL(fcoe_reset);
  1082. /**
  1083. * fcoe_wwn_from_mac() - Converts 48-bit IEEE MAC address to 64-bit FC WWN.
  1084. * @mac: mac address
  1085. * @scheme: check port
  1086. * @port: port indicator for converting
  1087. *
  1088. * Returns: u64 fc world wide name
  1089. */
  1090. u64 fcoe_wwn_from_mac(unsigned char mac[MAX_ADDR_LEN],
  1091. unsigned int scheme, unsigned int port)
  1092. {
  1093. u64 wwn;
  1094. u64 host_mac;
  1095. /* The MAC is in NO, so flip only the low 48 bits */
  1096. host_mac = ((u64) mac[0] << 40) |
  1097. ((u64) mac[1] << 32) |
  1098. ((u64) mac[2] << 24) |
  1099. ((u64) mac[3] << 16) |
  1100. ((u64) mac[4] << 8) |
  1101. (u64) mac[5];
  1102. WARN_ON(host_mac >= (1ULL << 48));
  1103. wwn = host_mac | ((u64) scheme << 60);
  1104. switch (scheme) {
  1105. case 1:
  1106. WARN_ON(port != 0);
  1107. break;
  1108. case 2:
  1109. WARN_ON(port >= 0xfff);
  1110. wwn |= (u64) port << 48;
  1111. break;
  1112. default:
  1113. WARN_ON(1);
  1114. break;
  1115. }
  1116. return wwn;
  1117. }
  1118. EXPORT_SYMBOL_GPL(fcoe_wwn_from_mac);
  1119. /**
  1120. * fcoe_hostlist_lookup_softc() - find the corresponding lport by a given device
  1121. * @device: this is currently ptr to net_device
  1122. *
  1123. * Returns: NULL or the located fcoe_softc
  1124. */
  1125. static struct fcoe_softc *
  1126. fcoe_hostlist_lookup_softc(const struct net_device *dev)
  1127. {
  1128. struct fcoe_softc *fc;
  1129. read_lock(&fcoe_hostlist_lock);
  1130. list_for_each_entry(fc, &fcoe_hostlist, list) {
  1131. if (fc->real_dev == dev) {
  1132. read_unlock(&fcoe_hostlist_lock);
  1133. return fc;
  1134. }
  1135. }
  1136. read_unlock(&fcoe_hostlist_lock);
  1137. return NULL;
  1138. }
  1139. /**
  1140. * fcoe_hostlist_lookup() - Find the corresponding lport by netdev
  1141. * @netdev: ptr to net_device
  1142. *
  1143. * Returns: 0 for success
  1144. */
  1145. struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev)
  1146. {
  1147. struct fcoe_softc *fc;
  1148. fc = fcoe_hostlist_lookup_softc(netdev);
  1149. return (fc) ? fc->lp : NULL;
  1150. }
  1151. EXPORT_SYMBOL_GPL(fcoe_hostlist_lookup);
  1152. /**
  1153. * fcoe_hostlist_add() - Add a lport to lports list
  1154. * @lp: ptr to the fc_lport to badded
  1155. *
  1156. * Returns: 0 for success
  1157. */
  1158. int fcoe_hostlist_add(const struct fc_lport *lp)
  1159. {
  1160. struct fcoe_softc *fc;
  1161. fc = fcoe_hostlist_lookup_softc(fcoe_netdev(lp));
  1162. if (!fc) {
  1163. fc = lport_priv(lp);
  1164. write_lock_bh(&fcoe_hostlist_lock);
  1165. list_add_tail(&fc->list, &fcoe_hostlist);
  1166. write_unlock_bh(&fcoe_hostlist_lock);
  1167. }
  1168. return 0;
  1169. }
  1170. EXPORT_SYMBOL_GPL(fcoe_hostlist_add);
  1171. /**
  1172. * fcoe_hostlist_remove() - remove a lport from lports list
  1173. * @lp: ptr to the fc_lport to badded
  1174. *
  1175. * Returns: 0 for success
  1176. */
  1177. int fcoe_hostlist_remove(const struct fc_lport *lp)
  1178. {
  1179. struct fcoe_softc *fc;
  1180. fc = fcoe_hostlist_lookup_softc(fcoe_netdev(lp));
  1181. BUG_ON(!fc);
  1182. write_lock_bh(&fcoe_hostlist_lock);
  1183. list_del(&fc->list);
  1184. write_unlock_bh(&fcoe_hostlist_lock);
  1185. return 0;
  1186. }
  1187. EXPORT_SYMBOL_GPL(fcoe_hostlist_remove);
  1188. /**
  1189. * fcoe_libfc_config() - sets up libfc related properties for lport
  1190. * @lp: ptr to the fc_lport
  1191. * @tt: libfc function template
  1192. *
  1193. * Returns : 0 for success
  1194. */
  1195. int fcoe_libfc_config(struct fc_lport *lp, struct libfc_function_template *tt)
  1196. {
  1197. /* Set the function pointers set by the LLDD */
  1198. memcpy(&lp->tt, tt, sizeof(*tt));
  1199. if (fc_fcp_init(lp))
  1200. return -ENOMEM;
  1201. fc_exch_init(lp);
  1202. fc_elsct_init(lp);
  1203. fc_lport_init(lp);
  1204. fc_rport_init(lp);
  1205. fc_disc_init(lp);
  1206. return 0;
  1207. }
  1208. EXPORT_SYMBOL_GPL(fcoe_libfc_config);
  1209. /**
  1210. * fcoe_init() - fcoe module loading initialization
  1211. *
  1212. * Initialization routine
  1213. * 1. Will create fc transport software structure
  1214. * 2. initialize the link list of port information structure
  1215. *
  1216. * Returns 0 on success, negative on failure
  1217. */
  1218. static int __init fcoe_init(void)
  1219. {
  1220. int cpu;
  1221. struct fcoe_percpu_s *p;
  1222. INIT_LIST_HEAD(&fcoe_hostlist);
  1223. rwlock_init(&fcoe_hostlist_lock);
  1224. #ifdef CONFIG_HOTPLUG_CPU
  1225. register_cpu_notifier(&fcoe_cpu_notifier);
  1226. #endif /* CONFIG_HOTPLUG_CPU */
  1227. /*
  1228. * initialize per CPU interrupt thread
  1229. */
  1230. for_each_online_cpu(cpu) {
  1231. p = kzalloc(sizeof(struct fcoe_percpu_s), GFP_KERNEL);
  1232. if (p) {
  1233. p->thread = kthread_create(fcoe_percpu_receive_thread,
  1234. (void *)p,
  1235. "fcoethread/%d", cpu);
  1236. /*
  1237. * if there is no error then bind the thread to the cpu
  1238. * initialize the semaphore and skb queue head
  1239. */
  1240. if (likely(!IS_ERR(p->thread))) {
  1241. p->cpu = cpu;
  1242. fcoe_percpu[cpu] = p;
  1243. skb_queue_head_init(&p->fcoe_rx_list);
  1244. kthread_bind(p->thread, cpu);
  1245. wake_up_process(p->thread);
  1246. } else {
  1247. fcoe_percpu[cpu] = NULL;
  1248. kfree(p);
  1249. }
  1250. }
  1251. }
  1252. /*
  1253. * setup link change notification
  1254. */
  1255. fcoe_dev_setup();
  1256. setup_timer(&fcoe_timer, fcoe_watchdog, 0);
  1257. mod_timer(&fcoe_timer, jiffies + (10 * HZ));
  1258. /* initiatlize the fcoe transport */
  1259. fcoe_transport_init();
  1260. fcoe_sw_init();
  1261. return 0;
  1262. }
  1263. module_init(fcoe_init);
  1264. /**
  1265. * fcoe_exit() - fcoe module unloading cleanup
  1266. *
  1267. * Returns 0 on success, negative on failure
  1268. */
  1269. static void __exit fcoe_exit(void)
  1270. {
  1271. u32 idx;
  1272. struct fcoe_softc *fc, *tmp;
  1273. struct fcoe_percpu_s *p;
  1274. struct sk_buff *skb;
  1275. /*
  1276. * Stop all call back interfaces
  1277. */
  1278. #ifdef CONFIG_HOTPLUG_CPU
  1279. unregister_cpu_notifier(&fcoe_cpu_notifier);
  1280. #endif /* CONFIG_HOTPLUG_CPU */
  1281. fcoe_dev_cleanup();
  1282. /*
  1283. * stop timer
  1284. */
  1285. del_timer_sync(&fcoe_timer);
  1286. /* releases the associated fcoe transport for each lport */
  1287. list_for_each_entry_safe(fc, tmp, &fcoe_hostlist, list)
  1288. fcoe_transport_release(fc->real_dev);
  1289. for (idx = 0; idx < NR_CPUS; idx++) {
  1290. if (fcoe_percpu[idx]) {
  1291. kthread_stop(fcoe_percpu[idx]->thread);
  1292. p = fcoe_percpu[idx];
  1293. spin_lock_bh(&p->fcoe_rx_list.lock);
  1294. while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
  1295. kfree_skb(skb);
  1296. spin_unlock_bh(&p->fcoe_rx_list.lock);
  1297. if (fcoe_percpu[idx]->crc_eof_page)
  1298. put_page(fcoe_percpu[idx]->crc_eof_page);
  1299. kfree(fcoe_percpu[idx]);
  1300. }
  1301. }
  1302. /* remove sw trasnport */
  1303. fcoe_sw_exit();
  1304. /* detach the transport */
  1305. fcoe_transport_exit();
  1306. }
  1307. module_exit(fcoe_exit);