fc_disc.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779
  1. /*
  2. * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along with
  14. * this program; if not, write to the Free Software Foundation, Inc.,
  15. * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  16. *
  17. * Maintained at www.Open-FCoE.org
  18. */
  19. /*
  20. * Target Discovery
  21. *
  22. * This block discovers all FC-4 remote ports, including FCP initiators. It
  23. * also handles RSCN events and re-discovery if necessary.
  24. */
  25. /*
  26. * DISC LOCKING
  27. *
  28. * The disc mutex is can be locked when acquiring rport locks, but may not
  29. * be held when acquiring the lport lock. Refer to fc_lport.c for more
  30. * details.
  31. */
  32. #include <linux/timer.h>
  33. #include <linux/err.h>
  34. #include <asm/unaligned.h>
  35. #include <scsi/fc/fc_gs.h>
  36. #include <scsi/libfc.h>
  37. #define FC_DISC_RETRY_LIMIT 3 /* max retries */
  38. #define FC_DISC_RETRY_DELAY 500UL /* (msecs) delay */
  39. static void fc_disc_gpn_ft_req(struct fc_disc *);
  40. static void fc_disc_gpn_ft_resp(struct fc_seq *, struct fc_frame *, void *);
  41. static int fc_disc_new_target(struct fc_disc *, struct fc_rport_priv *,
  42. struct fc_rport_identifiers *);
  43. static void fc_disc_done(struct fc_disc *, enum fc_disc_event);
  44. static void fc_disc_timeout(struct work_struct *);
  45. static void fc_disc_single(struct fc_disc *, struct fc_disc_port *);
  46. static void fc_disc_restart(struct fc_disc *);
  47. /**
  48. * fc_disc_lookup_rport() - lookup a remote port by port_id
  49. * @lport: Fibre Channel host port instance
  50. * @port_id: remote port port_id to match
  51. */
  52. struct fc_rport_priv *fc_disc_lookup_rport(const struct fc_lport *lport,
  53. u32 port_id)
  54. {
  55. const struct fc_disc *disc = &lport->disc;
  56. struct fc_rport_priv *rdata;
  57. list_for_each_entry(rdata, &disc->rports, peers) {
  58. if (rdata->ids.port_id == port_id &&
  59. rdata->rp_state != RPORT_ST_DELETE)
  60. return rdata;
  61. }
  62. return NULL;
  63. }
  64. /**
  65. * fc_disc_stop_rports() - delete all the remote ports associated with the lport
  66. * @disc: The discovery job to stop rports on
  67. *
  68. * Locking Note: This function expects that the lport mutex is locked before
  69. * calling it.
  70. */
  71. void fc_disc_stop_rports(struct fc_disc *disc)
  72. {
  73. struct fc_lport *lport;
  74. struct fc_rport_priv *rdata, *next;
  75. lport = disc->lport;
  76. mutex_lock(&disc->disc_mutex);
  77. list_for_each_entry_safe(rdata, next, &disc->rports, peers)
  78. lport->tt.rport_logoff(rdata);
  79. mutex_unlock(&disc->disc_mutex);
  80. }
  81. /**
  82. * fc_disc_rport_callback() - Event handler for rport events
  83. * @lport: The lport which is receiving the event
  84. * @rdata: private remote port data
  85. * @event: The event that occured
  86. *
  87. * Locking Note: The rport lock should not be held when calling
  88. * this function.
  89. */
  90. static void fc_disc_rport_callback(struct fc_lport *lport,
  91. struct fc_rport_priv *rdata,
  92. enum fc_rport_event event)
  93. {
  94. struct fc_disc *disc = &lport->disc;
  95. FC_DISC_DBG(disc, "Received a %d event for port (%6x)\n", event,
  96. rdata->ids.port_id);
  97. switch (event) {
  98. case RPORT_EV_READY:
  99. break;
  100. case RPORT_EV_LOGO:
  101. case RPORT_EV_FAILED:
  102. case RPORT_EV_STOP:
  103. mutex_lock(&disc->disc_mutex);
  104. list_del(&rdata->peers);
  105. mutex_unlock(&disc->disc_mutex);
  106. break;
  107. default:
  108. break;
  109. }
  110. }
  111. /**
  112. * fc_disc_recv_rscn_req() - Handle Registered State Change Notification (RSCN)
  113. * @sp: Current sequence of the RSCN exchange
  114. * @fp: RSCN Frame
  115. * @lport: Fibre Channel host port instance
  116. *
  117. * Locking Note: This function expects that the disc_mutex is locked
  118. * before it is called.
  119. */
  120. static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp,
  121. struct fc_disc *disc)
  122. {
  123. struct fc_lport *lport;
  124. struct fc_rport_priv *rdata;
  125. struct fc_els_rscn *rp;
  126. struct fc_els_rscn_page *pp;
  127. struct fc_seq_els_data rjt_data;
  128. unsigned int len;
  129. int redisc = 0;
  130. enum fc_els_rscn_ev_qual ev_qual;
  131. enum fc_els_rscn_addr_fmt fmt;
  132. LIST_HEAD(disc_ports);
  133. struct fc_disc_port *dp, *next;
  134. lport = disc->lport;
  135. FC_DISC_DBG(disc, "Received an RSCN event\n");
  136. /* make sure the frame contains an RSCN message */
  137. rp = fc_frame_payload_get(fp, sizeof(*rp));
  138. if (!rp)
  139. goto reject;
  140. /* make sure the page length is as expected (4 bytes) */
  141. if (rp->rscn_page_len != sizeof(*pp))
  142. goto reject;
  143. /* get the RSCN payload length */
  144. len = ntohs(rp->rscn_plen);
  145. if (len < sizeof(*rp))
  146. goto reject;
  147. /* make sure the frame contains the expected payload */
  148. rp = fc_frame_payload_get(fp, len);
  149. if (!rp)
  150. goto reject;
  151. /* payload must be a multiple of the RSCN page size */
  152. len -= sizeof(*rp);
  153. if (len % sizeof(*pp))
  154. goto reject;
  155. for (pp = (void *)(rp + 1); len > 0; len -= sizeof(*pp), pp++) {
  156. ev_qual = pp->rscn_page_flags >> ELS_RSCN_EV_QUAL_BIT;
  157. ev_qual &= ELS_RSCN_EV_QUAL_MASK;
  158. fmt = pp->rscn_page_flags >> ELS_RSCN_ADDR_FMT_BIT;
  159. fmt &= ELS_RSCN_ADDR_FMT_MASK;
  160. /*
  161. * if we get an address format other than port
  162. * (area, domain, fabric), then do a full discovery
  163. */
  164. switch (fmt) {
  165. case ELS_ADDR_FMT_PORT:
  166. FC_DISC_DBG(disc, "Port address format for port "
  167. "(%6x)\n", ntoh24(pp->rscn_fid));
  168. dp = kzalloc(sizeof(*dp), GFP_KERNEL);
  169. if (!dp) {
  170. redisc = 1;
  171. break;
  172. }
  173. dp->lp = lport;
  174. dp->ids.port_id = ntoh24(pp->rscn_fid);
  175. dp->ids.port_name = -1;
  176. dp->ids.node_name = -1;
  177. dp->ids.roles = FC_RPORT_ROLE_UNKNOWN;
  178. list_add_tail(&dp->peers, &disc_ports);
  179. break;
  180. case ELS_ADDR_FMT_AREA:
  181. case ELS_ADDR_FMT_DOM:
  182. case ELS_ADDR_FMT_FAB:
  183. default:
  184. FC_DISC_DBG(disc, "Address format is (%d)\n", fmt);
  185. redisc = 1;
  186. break;
  187. }
  188. }
  189. lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
  190. if (redisc) {
  191. FC_DISC_DBG(disc, "RSCN received: rediscovering\n");
  192. fc_disc_restart(disc);
  193. } else {
  194. FC_DISC_DBG(disc, "RSCN received: not rediscovering. "
  195. "redisc %d state %d in_prog %d\n",
  196. redisc, lport->state, disc->pending);
  197. list_for_each_entry_safe(dp, next, &disc_ports, peers) {
  198. list_del(&dp->peers);
  199. rdata = lport->tt.rport_lookup(lport, dp->ids.port_id);
  200. if (rdata) {
  201. lport->tt.rport_logoff(rdata);
  202. }
  203. fc_disc_single(disc, dp);
  204. }
  205. }
  206. fc_frame_free(fp);
  207. return;
  208. reject:
  209. FC_DISC_DBG(disc, "Received a bad RSCN frame\n");
  210. rjt_data.fp = NULL;
  211. rjt_data.reason = ELS_RJT_LOGIC;
  212. rjt_data.explan = ELS_EXPL_NONE;
  213. lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
  214. fc_frame_free(fp);
  215. }
  216. /**
  217. * fc_disc_recv_req() - Handle incoming requests
  218. * @sp: Current sequence of the request exchange
  219. * @fp: The frame
  220. * @lport: The FC local port
  221. *
  222. * Locking Note: This function is called from the EM and will lock
  223. * the disc_mutex before calling the handler for the
  224. * request.
  225. */
  226. static void fc_disc_recv_req(struct fc_seq *sp, struct fc_frame *fp,
  227. struct fc_lport *lport)
  228. {
  229. u8 op;
  230. struct fc_disc *disc = &lport->disc;
  231. op = fc_frame_payload_op(fp);
  232. switch (op) {
  233. case ELS_RSCN:
  234. mutex_lock(&disc->disc_mutex);
  235. fc_disc_recv_rscn_req(sp, fp, disc);
  236. mutex_unlock(&disc->disc_mutex);
  237. break;
  238. default:
  239. FC_DISC_DBG(disc, "Received an unsupported request, "
  240. "the opcode is (%x)\n", op);
  241. break;
  242. }
  243. }
  244. /**
  245. * fc_disc_restart() - Restart discovery
  246. * @lport: FC discovery context
  247. *
  248. * Locking Note: This function expects that the disc mutex
  249. * is already locked.
  250. */
  251. static void fc_disc_restart(struct fc_disc *disc)
  252. {
  253. struct fc_rport_priv *rdata, *next;
  254. struct fc_lport *lport = disc->lport;
  255. FC_DISC_DBG(disc, "Restarting discovery\n");
  256. list_for_each_entry_safe(rdata, next, &disc->rports, peers)
  257. lport->tt.rport_logoff(rdata);
  258. disc->requested = 1;
  259. if (!disc->pending)
  260. fc_disc_gpn_ft_req(disc);
  261. }
  262. /**
  263. * fc_disc_start() - Fibre Channel Target discovery
  264. * @lport: FC local port
  265. *
  266. * Returns non-zero if discovery cannot be started.
  267. */
  268. static void fc_disc_start(void (*disc_callback)(struct fc_lport *,
  269. enum fc_disc_event),
  270. struct fc_lport *lport)
  271. {
  272. struct fc_rport_priv *rdata;
  273. struct fc_disc *disc = &lport->disc;
  274. /*
  275. * At this point we may have a new disc job or an existing
  276. * one. Either way, let's lock when we make changes to it
  277. * and send the GPN_FT request.
  278. */
  279. mutex_lock(&disc->disc_mutex);
  280. disc->disc_callback = disc_callback;
  281. /*
  282. * If not ready, or already running discovery, just set request flag.
  283. */
  284. disc->requested = 1;
  285. if (disc->pending) {
  286. mutex_unlock(&disc->disc_mutex);
  287. return;
  288. }
  289. /*
  290. * Handle point-to-point mode as a simple discovery
  291. * of the remote port. Yucky, yucky, yuck, yuck!
  292. */
  293. rdata = disc->lport->ptp_rp;
  294. if (rdata) {
  295. kref_get(&rdata->kref);
  296. if (!fc_disc_new_target(disc, rdata, &rdata->ids)) {
  297. fc_disc_done(disc, DISC_EV_SUCCESS);
  298. }
  299. kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
  300. } else {
  301. fc_disc_gpn_ft_req(disc); /* get ports by FC-4 type */
  302. }
  303. mutex_unlock(&disc->disc_mutex);
  304. }
  305. static struct fc_rport_operations fc_disc_rport_ops = {
  306. .event_callback = fc_disc_rport_callback,
  307. };
  308. /**
  309. * fc_disc_new_target() - Handle new target found by discovery
  310. * @lport: FC local port
  311. * @rdata: The previous FC remote port priv (NULL if new remote port)
  312. * @ids: Identifiers for the new FC remote port
  313. *
  314. * Locking Note: This function expects that the disc_mutex is locked
  315. * before it is called.
  316. */
  317. static int fc_disc_new_target(struct fc_disc *disc,
  318. struct fc_rport_priv *rdata,
  319. struct fc_rport_identifiers *ids)
  320. {
  321. struct fc_lport *lport = disc->lport;
  322. int error = 0;
  323. if (rdata && ids->port_name) {
  324. if (rdata->ids.port_name == -1) {
  325. /*
  326. * Set WWN and fall through to notify of create.
  327. */
  328. rdata->ids.port_name = ids->port_name;
  329. rdata->ids.node_name = ids->node_name;
  330. } else if (rdata->ids.port_name != ids->port_name) {
  331. /*
  332. * This is a new port with the same FCID as
  333. * a previously-discovered port. Presumably the old
  334. * port logged out and a new port logged in and was
  335. * assigned the same FCID. This should be rare.
  336. * Delete the old one and fall thru to re-create.
  337. */
  338. lport->tt.rport_logoff(rdata);
  339. rdata = NULL;
  340. }
  341. }
  342. if (((ids->port_name != -1) || (ids->port_id != -1)) &&
  343. ids->port_id != fc_host_port_id(lport->host) &&
  344. ids->port_name != lport->wwpn) {
  345. if (!rdata) {
  346. rdata = lport->tt.rport_lookup(lport, ids->port_id);
  347. if (!rdata) {
  348. rdata = lport->tt.rport_create(lport, ids);
  349. if (!rdata)
  350. error = -ENOMEM;
  351. else
  352. list_add_tail(&rdata->peers,
  353. &disc->rports);
  354. }
  355. }
  356. if (rdata) {
  357. rdata->ops = &fc_disc_rport_ops;
  358. lport->tt.rport_login(rdata);
  359. }
  360. }
  361. return error;
  362. }
  363. /**
  364. * fc_disc_done() - Discovery has been completed
  365. * @disc: FC discovery context
  366. * @event: discovery completion status
  367. *
  368. * Locking Note: This function expects that the disc mutex is locked before
  369. * it is called. The discovery callback is then made with the lock released,
  370. * and the lock is re-taken before returning from this function
  371. */
  372. static void fc_disc_done(struct fc_disc *disc, enum fc_disc_event event)
  373. {
  374. struct fc_lport *lport = disc->lport;
  375. FC_DISC_DBG(disc, "Discovery complete\n");
  376. if (disc->requested)
  377. fc_disc_gpn_ft_req(disc);
  378. else
  379. disc->pending = 0;
  380. mutex_unlock(&disc->disc_mutex);
  381. disc->disc_callback(lport, event);
  382. mutex_lock(&disc->disc_mutex);
  383. }
  384. /**
  385. * fc_disc_error() - Handle error on dNS request
  386. * @disc: FC discovery context
  387. * @fp: The frame pointer
  388. */
  389. static void fc_disc_error(struct fc_disc *disc, struct fc_frame *fp)
  390. {
  391. struct fc_lport *lport = disc->lport;
  392. unsigned long delay = 0;
  393. FC_DISC_DBG(disc, "Error %ld, retries %d/%d\n",
  394. PTR_ERR(fp), disc->retry_count,
  395. FC_DISC_RETRY_LIMIT);
  396. if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) {
  397. /*
  398. * Memory allocation failure, or the exchange timed out,
  399. * retry after delay.
  400. */
  401. if (disc->retry_count < FC_DISC_RETRY_LIMIT) {
  402. /* go ahead and retry */
  403. if (!fp)
  404. delay = msecs_to_jiffies(FC_DISC_RETRY_DELAY);
  405. else {
  406. delay = msecs_to_jiffies(lport->e_d_tov);
  407. /* timeout faster first time */
  408. if (!disc->retry_count)
  409. delay /= 4;
  410. }
  411. disc->retry_count++;
  412. schedule_delayed_work(&disc->disc_work, delay);
  413. } else
  414. fc_disc_done(disc, DISC_EV_FAILED);
  415. }
  416. }
  417. /**
  418. * fc_disc_gpn_ft_req() - Send Get Port Names by FC-4 type (GPN_FT) request
  419. * @lport: FC discovery context
  420. *
  421. * Locking Note: This function expects that the disc_mutex is locked
  422. * before it is called.
  423. */
  424. static void fc_disc_gpn_ft_req(struct fc_disc *disc)
  425. {
  426. struct fc_frame *fp;
  427. struct fc_lport *lport = disc->lport;
  428. WARN_ON(!fc_lport_test_ready(lport));
  429. disc->pending = 1;
  430. disc->requested = 0;
  431. disc->buf_len = 0;
  432. disc->seq_count = 0;
  433. fp = fc_frame_alloc(lport,
  434. sizeof(struct fc_ct_hdr) +
  435. sizeof(struct fc_ns_gid_ft));
  436. if (!fp)
  437. goto err;
  438. if (lport->tt.elsct_send(lport, 0, fp,
  439. FC_NS_GPN_FT,
  440. fc_disc_gpn_ft_resp,
  441. disc, lport->e_d_tov))
  442. return;
  443. err:
  444. fc_disc_error(disc, fp);
  445. }
  446. /**
  447. * fc_disc_gpn_ft_parse() - Parse the body of the dNS GPN_FT response.
  448. * @lport: Fibre Channel host port instance
  449. * @buf: GPN_FT response buffer
  450. * @len: size of response buffer
  451. *
  452. * Goes through the list of IDs and names resulting from a request.
  453. */
  454. static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len)
  455. {
  456. struct fc_lport *lport;
  457. struct fc_gpn_ft_resp *np;
  458. char *bp;
  459. size_t plen;
  460. size_t tlen;
  461. int error = 0;
  462. struct fc_rport_identifiers ids;
  463. struct fc_rport_priv *rdata;
  464. lport = disc->lport;
  465. /*
  466. * Handle partial name record left over from previous call.
  467. */
  468. bp = buf;
  469. plen = len;
  470. np = (struct fc_gpn_ft_resp *)bp;
  471. tlen = disc->buf_len;
  472. if (tlen) {
  473. WARN_ON(tlen >= sizeof(*np));
  474. plen = sizeof(*np) - tlen;
  475. WARN_ON(plen <= 0);
  476. WARN_ON(plen >= sizeof(*np));
  477. if (plen > len)
  478. plen = len;
  479. np = &disc->partial_buf;
  480. memcpy((char *)np + tlen, bp, plen);
  481. /*
  482. * Set bp so that the loop below will advance it to the
  483. * first valid full name element.
  484. */
  485. bp -= tlen;
  486. len += tlen;
  487. plen += tlen;
  488. disc->buf_len = (unsigned char) plen;
  489. if (plen == sizeof(*np))
  490. disc->buf_len = 0;
  491. }
  492. /*
  493. * Handle full name records, including the one filled from above.
  494. * Normally, np == bp and plen == len, but from the partial case above,
  495. * bp, len describe the overall buffer, and np, plen describe the
  496. * partial buffer, which if would usually be full now.
  497. * After the first time through the loop, things return to "normal".
  498. */
  499. while (plen >= sizeof(*np)) {
  500. ids.port_id = ntoh24(np->fp_fid);
  501. ids.port_name = ntohll(np->fp_wwpn);
  502. ids.node_name = -1;
  503. ids.roles = FC_RPORT_ROLE_UNKNOWN;
  504. if (ids.port_id != fc_host_port_id(lport->host) &&
  505. ids.port_name != lport->wwpn) {
  506. rdata = lport->tt.rport_create(lport, &ids);
  507. if (rdata) {
  508. rdata->ops = &fc_disc_rport_ops;
  509. list_add_tail(&rdata->peers, &disc->rports);
  510. lport->tt.rport_login(rdata);
  511. } else
  512. printk(KERN_WARNING "libfc: Failed to allocate "
  513. "memory for the newly discovered port "
  514. "(%6x)\n", ids.port_id);
  515. }
  516. if (np->fp_flags & FC_NS_FID_LAST) {
  517. fc_disc_done(disc, DISC_EV_SUCCESS);
  518. len = 0;
  519. break;
  520. }
  521. len -= sizeof(*np);
  522. bp += sizeof(*np);
  523. np = (struct fc_gpn_ft_resp *)bp;
  524. plen = len;
  525. }
  526. /*
  527. * Save any partial record at the end of the buffer for next time.
  528. */
  529. if (error == 0 && len > 0 && len < sizeof(*np)) {
  530. if (np != &disc->partial_buf) {
  531. FC_DISC_DBG(disc, "Partial buffer remains "
  532. "for discovery\n");
  533. memcpy(&disc->partial_buf, np, len);
  534. }
  535. disc->buf_len = (unsigned char) len;
  536. } else {
  537. disc->buf_len = 0;
  538. }
  539. return error;
  540. }
  541. /**
  542. * fc_disc_timeout() - Retry handler for the disc component
  543. * @work: Structure holding disc obj that needs retry discovery
  544. *
  545. * Handle retry of memory allocation for remote ports.
  546. */
  547. static void fc_disc_timeout(struct work_struct *work)
  548. {
  549. struct fc_disc *disc = container_of(work,
  550. struct fc_disc,
  551. disc_work.work);
  552. mutex_lock(&disc->disc_mutex);
  553. if (disc->requested && !disc->pending)
  554. fc_disc_gpn_ft_req(disc);
  555. mutex_unlock(&disc->disc_mutex);
  556. }
  557. /**
  558. * fc_disc_gpn_ft_resp() - Handle a response frame from Get Port Names (GPN_FT)
  559. * @sp: Current sequence of GPN_FT exchange
  560. * @fp: response frame
  561. * @lp_arg: Fibre Channel host port instance
  562. *
  563. * Locking Note: This function is called without disc mutex held, and
  564. * should do all its processing with the mutex held
  565. */
  566. static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp,
  567. void *disc_arg)
  568. {
  569. struct fc_disc *disc = disc_arg;
  570. struct fc_ct_hdr *cp;
  571. struct fc_frame_header *fh;
  572. unsigned int seq_cnt;
  573. void *buf = NULL;
  574. unsigned int len;
  575. int error;
  576. mutex_lock(&disc->disc_mutex);
  577. FC_DISC_DBG(disc, "Received a GPN_FT response\n");
  578. if (IS_ERR(fp)) {
  579. fc_disc_error(disc, fp);
  580. mutex_unlock(&disc->disc_mutex);
  581. return;
  582. }
  583. WARN_ON(!fc_frame_is_linear(fp)); /* buffer must be contiguous */
  584. fh = fc_frame_header_get(fp);
  585. len = fr_len(fp) - sizeof(*fh);
  586. seq_cnt = ntohs(fh->fh_seq_cnt);
  587. if (fr_sof(fp) == FC_SOF_I3 && seq_cnt == 0 &&
  588. disc->seq_count == 0) {
  589. cp = fc_frame_payload_get(fp, sizeof(*cp));
  590. if (!cp) {
  591. FC_DISC_DBG(disc, "GPN_FT response too short, len %d\n",
  592. fr_len(fp));
  593. } else if (ntohs(cp->ct_cmd) == FC_FS_ACC) {
  594. /* Accepted, parse the response. */
  595. buf = cp + 1;
  596. len -= sizeof(*cp);
  597. } else if (ntohs(cp->ct_cmd) == FC_FS_RJT) {
  598. FC_DISC_DBG(disc, "GPN_FT rejected reason %x exp %x "
  599. "(check zoning)\n", cp->ct_reason,
  600. cp->ct_explan);
  601. fc_disc_done(disc, DISC_EV_FAILED);
  602. } else {
  603. FC_DISC_DBG(disc, "GPN_FT unexpected response code "
  604. "%x\n", ntohs(cp->ct_cmd));
  605. }
  606. } else if (fr_sof(fp) == FC_SOF_N3 &&
  607. seq_cnt == disc->seq_count) {
  608. buf = fh + 1;
  609. } else {
  610. FC_DISC_DBG(disc, "GPN_FT unexpected frame - out of sequence? "
  611. "seq_cnt %x expected %x sof %x eof %x\n",
  612. seq_cnt, disc->seq_count, fr_sof(fp), fr_eof(fp));
  613. }
  614. if (buf) {
  615. error = fc_disc_gpn_ft_parse(disc, buf, len);
  616. if (error)
  617. fc_disc_error(disc, fp);
  618. else
  619. disc->seq_count++;
  620. }
  621. fc_frame_free(fp);
  622. mutex_unlock(&disc->disc_mutex);
  623. }
  624. /**
  625. * fc_disc_single() - Discover the directory information for a single target
  626. * @lport: FC local port
  627. * @dp: The port to rediscover
  628. *
  629. * Locking Note: This function expects that the disc_mutex is locked
  630. * before it is called.
  631. */
  632. static void fc_disc_single(struct fc_disc *disc, struct fc_disc_port *dp)
  633. {
  634. struct fc_lport *lport;
  635. struct fc_rport_priv *rdata;
  636. lport = disc->lport;
  637. if (dp->ids.port_id == fc_host_port_id(lport->host))
  638. goto out;
  639. rdata = lport->tt.rport_create(lport, &dp->ids);
  640. if (rdata) {
  641. rdata->ops = &fc_disc_rport_ops;
  642. kfree(dp);
  643. list_add_tail(&rdata->peers, &disc->rports);
  644. lport->tt.rport_login(rdata);
  645. }
  646. return;
  647. out:
  648. kfree(dp);
  649. }
  650. /**
  651. * fc_disc_stop() - Stop discovery for a given lport
  652. * @lport: The lport that discovery should stop for
  653. */
  654. void fc_disc_stop(struct fc_lport *lport)
  655. {
  656. struct fc_disc *disc = &lport->disc;
  657. if (disc) {
  658. cancel_delayed_work_sync(&disc->disc_work);
  659. fc_disc_stop_rports(disc);
  660. }
  661. }
  662. /**
  663. * fc_disc_stop_final() - Stop discovery for a given lport
  664. * @lport: The lport that discovery should stop for
  665. *
  666. * This function will block until discovery has been
  667. * completely stopped and all rports have been deleted.
  668. */
  669. void fc_disc_stop_final(struct fc_lport *lport)
  670. {
  671. fc_disc_stop(lport);
  672. lport->tt.rport_flush_queue();
  673. }
  674. /**
  675. * fc_disc_init() - Initialize the discovery block
  676. * @lport: FC local port
  677. */
  678. int fc_disc_init(struct fc_lport *lport)
  679. {
  680. struct fc_disc *disc;
  681. if (!lport->tt.disc_start)
  682. lport->tt.disc_start = fc_disc_start;
  683. if (!lport->tt.disc_stop)
  684. lport->tt.disc_stop = fc_disc_stop;
  685. if (!lport->tt.disc_stop_final)
  686. lport->tt.disc_stop_final = fc_disc_stop_final;
  687. if (!lport->tt.disc_recv_req)
  688. lport->tt.disc_recv_req = fc_disc_recv_req;
  689. if (!lport->tt.rport_lookup)
  690. lport->tt.rport_lookup = fc_disc_lookup_rport;
  691. disc = &lport->disc;
  692. INIT_DELAYED_WORK(&disc->disc_work, fc_disc_timeout);
  693. mutex_init(&disc->disc_mutex);
  694. INIT_LIST_HEAD(&disc->rports);
  695. disc->lport = lport;
  696. return 0;
  697. }
  698. EXPORT_SYMBOL(fc_disc_init);