fc_lport.c 39 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469
  1. /*
  2. * Copyright(c) 2007 Intel Corporation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along with
  14. * this program; if not, write to the Free Software Foundation, Inc.,
  15. * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  16. *
  17. * Maintained at www.Open-FCoE.org
  18. */
  19. /*
  20. * PORT LOCKING NOTES
  21. *
  22. * These comments only apply to the 'port code' which consists of the lport,
  23. * disc and rport blocks.
  24. *
  25. * MOTIVATION
  26. *
  27. * The lport, disc and rport blocks all have mutexes that are used to protect
  28. * those objects. The main motivation for these locks is to prevent from
  29. * having an lport reset just before we send a frame. In that scenario the
  30. * lport's FID would get set to zero and then we'd send a frame with an
  31. * invalid SID. We also need to ensure that states don't change unexpectedly
  32. * while processing another state.
  33. *
  34. * HEIRARCHY
  35. *
  36. * The following heirarchy defines the locking rules. A greater lock
  37. * may be held before acquiring a lesser lock, but a lesser lock should never
  38. * be held while attempting to acquire a greater lock. Here is the heirarchy-
  39. *
  40. * lport > disc, lport > rport, disc > rport
  41. *
  42. * CALLBACKS
  43. *
  44. * The callbacks cause complications with this scheme. There is a callback
  45. * from the rport (to either lport or disc) and a callback from disc
  46. * (to the lport).
  47. *
  48. * As rports exit the rport state machine a callback is made to the owner of
  49. * the rport to notify success or failure. Since the callback is likely to
  50. * cause the lport or disc to grab its lock we cannot hold the rport lock
  51. * while making the callback. To ensure that the rport is not free'd while
  52. * processing the callback the rport callbacks are serialized through a
  53. * single-threaded workqueue. An rport would never be free'd while in a
  54. * callback handler becuase no other rport work in this queue can be executed
  55. * at the same time.
  56. *
  57. * When discovery succeeds or fails a callback is made to the lport as
  58. * notification. Currently, succesful discovery causes the lport to take no
  59. * action. A failure will cause the lport to reset. There is likely a circular
  60. * locking problem with this implementation.
  61. */
  62. /*
  63. * LPORT LOCKING
  64. *
  65. * The critical sections protected by the lport's mutex are quite broad and
  66. * may be improved upon in the future. The lport code and its locking doesn't
  67. * influence the I/O path, so excessive locking doesn't penalize I/O
  68. * performance.
  69. *
  70. * The strategy is to lock whenever processing a request or response. Note
  71. * that every _enter_* function corresponds to a state change. They generally
  72. * change the lports state and then send a request out on the wire. We lock
  73. * before calling any of these functions to protect that state change. This
  74. * means that the entry points into the lport block manage the locks while
  75. * the state machine can transition between states (i.e. _enter_* functions)
  76. * while always staying protected.
  77. *
  78. * When handling responses we also hold the lport mutex broadly. When the
  79. * lport receives the response frame it locks the mutex and then calls the
  80. * appropriate handler for the particuar response. Generally a response will
  81. * trigger a state change and so the lock must already be held.
  82. *
  83. * Retries also have to consider the locking. The retries occur from a work
  84. * context and the work function will lock the lport and then retry the state
  85. * (i.e. _enter_* function).
  86. */
  87. #include <linux/timer.h>
  88. #include <asm/unaligned.h>
  89. #include <scsi/fc/fc_gs.h>
  90. #include <scsi/libfc.h>
  91. #include <scsi/fc_encode.h>
  92. #include "fc_libfc.h"
  93. /* Fabric IDs to use for point-to-point mode, chosen on whims. */
  94. #define FC_LOCAL_PTP_FID_LO 0x010101
  95. #define FC_LOCAL_PTP_FID_HI 0x010102
  96. #define DNS_DELAY 3 /* Discovery delay after RSCN (in seconds)*/
  97. static void fc_lport_error(struct fc_lport *, struct fc_frame *);
  98. static void fc_lport_enter_reset(struct fc_lport *);
  99. static void fc_lport_enter_flogi(struct fc_lport *);
  100. static void fc_lport_enter_dns(struct fc_lport *);
  101. static void fc_lport_enter_rft_id(struct fc_lport *);
  102. static void fc_lport_enter_scr(struct fc_lport *);
  103. static void fc_lport_enter_ready(struct fc_lport *);
  104. static void fc_lport_enter_logo(struct fc_lport *);
  105. static const char *fc_lport_state_names[] = {
  106. [LPORT_ST_DISABLED] = "disabled",
  107. [LPORT_ST_FLOGI] = "FLOGI",
  108. [LPORT_ST_DNS] = "dNS",
  109. [LPORT_ST_RFT_ID] = "RFT_ID",
  110. [LPORT_ST_SCR] = "SCR",
  111. [LPORT_ST_READY] = "Ready",
  112. [LPORT_ST_LOGO] = "LOGO",
  113. [LPORT_ST_RESET] = "reset",
  114. };
  115. static int fc_frame_drop(struct fc_lport *lport, struct fc_frame *fp)
  116. {
  117. fc_frame_free(fp);
  118. return 0;
  119. }
  120. /**
  121. * fc_lport_rport_callback() - Event handler for rport events
  122. * @lport: The lport which is receiving the event
  123. * @rdata: private remote port data
  124. * @event: The event that occured
  125. *
  126. * Locking Note: The rport lock should not be held when calling
  127. * this function.
  128. */
  129. static void fc_lport_rport_callback(struct fc_lport *lport,
  130. struct fc_rport_priv *rdata,
  131. enum fc_rport_event event)
  132. {
  133. FC_LPORT_DBG(lport, "Received a %d event for port (%6x)\n", event,
  134. rdata->ids.port_id);
  135. mutex_lock(&lport->lp_mutex);
  136. switch (event) {
  137. case RPORT_EV_READY:
  138. if (lport->state == LPORT_ST_DNS) {
  139. lport->dns_rp = rdata;
  140. fc_lport_enter_rft_id(lport);
  141. } else {
  142. FC_LPORT_DBG(lport, "Received an READY event "
  143. "on port (%6x) for the directory "
  144. "server, but the lport is not "
  145. "in the DNS state, it's in the "
  146. "%d state", rdata->ids.port_id,
  147. lport->state);
  148. lport->tt.rport_logoff(rdata);
  149. }
  150. break;
  151. case RPORT_EV_LOGO:
  152. case RPORT_EV_FAILED:
  153. case RPORT_EV_STOP:
  154. lport->dns_rp = NULL;
  155. break;
  156. case RPORT_EV_NONE:
  157. break;
  158. }
  159. mutex_unlock(&lport->lp_mutex);
  160. }
  161. /**
  162. * fc_lport_state() - Return a string which represents the lport's state
  163. * @lport: The lport whose state is to converted to a string
  164. */
  165. static const char *fc_lport_state(struct fc_lport *lport)
  166. {
  167. const char *cp;
  168. cp = fc_lport_state_names[lport->state];
  169. if (!cp)
  170. cp = "unknown";
  171. return cp;
  172. }
  173. /**
  174. * fc_lport_ptp_setup() - Create an rport for point-to-point mode
  175. * @lport: The lport to attach the ptp rport to
  176. * @fid: The FID of the ptp rport
  177. * @remote_wwpn: The WWPN of the ptp rport
  178. * @remote_wwnn: The WWNN of the ptp rport
  179. */
  180. static void fc_lport_ptp_setup(struct fc_lport *lport,
  181. u32 remote_fid, u64 remote_wwpn,
  182. u64 remote_wwnn)
  183. {
  184. mutex_lock(&lport->disc.disc_mutex);
  185. if (lport->ptp_rp)
  186. lport->tt.rport_logoff(lport->ptp_rp);
  187. lport->ptp_rp = lport->tt.rport_create(lport, remote_fid);
  188. lport->ptp_rp->ids.port_name = remote_wwpn;
  189. lport->ptp_rp->ids.node_name = remote_wwnn;
  190. mutex_unlock(&lport->disc.disc_mutex);
  191. lport->tt.rport_login(lport->ptp_rp);
  192. fc_lport_enter_ready(lport);
  193. }
  194. void fc_get_host_port_type(struct Scsi_Host *shost)
  195. {
  196. /* TODO - currently just NPORT */
  197. fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
  198. }
  199. EXPORT_SYMBOL(fc_get_host_port_type);
  200. void fc_get_host_port_state(struct Scsi_Host *shost)
  201. {
  202. struct fc_lport *lp = shost_priv(shost);
  203. mutex_lock(&lp->lp_mutex);
  204. if (!lp->link_up)
  205. fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
  206. else
  207. switch (lp->state) {
  208. case LPORT_ST_READY:
  209. fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
  210. break;
  211. default:
  212. fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
  213. }
  214. mutex_unlock(&lp->lp_mutex);
  215. }
  216. EXPORT_SYMBOL(fc_get_host_port_state);
  217. void fc_get_host_speed(struct Scsi_Host *shost)
  218. {
  219. struct fc_lport *lport = shost_priv(shost);
  220. fc_host_speed(shost) = lport->link_speed;
  221. }
  222. EXPORT_SYMBOL(fc_get_host_speed);
  223. struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost)
  224. {
  225. struct fc_host_statistics *fcoe_stats;
  226. struct fc_lport *lp = shost_priv(shost);
  227. struct timespec v0, v1;
  228. unsigned int cpu;
  229. fcoe_stats = &lp->host_stats;
  230. memset(fcoe_stats, 0, sizeof(struct fc_host_statistics));
  231. jiffies_to_timespec(jiffies, &v0);
  232. jiffies_to_timespec(lp->boot_time, &v1);
  233. fcoe_stats->seconds_since_last_reset = (v0.tv_sec - v1.tv_sec);
  234. for_each_possible_cpu(cpu) {
  235. struct fcoe_dev_stats *stats;
  236. stats = per_cpu_ptr(lp->dev_stats, cpu);
  237. fcoe_stats->tx_frames += stats->TxFrames;
  238. fcoe_stats->tx_words += stats->TxWords;
  239. fcoe_stats->rx_frames += stats->RxFrames;
  240. fcoe_stats->rx_words += stats->RxWords;
  241. fcoe_stats->error_frames += stats->ErrorFrames;
  242. fcoe_stats->invalid_crc_count += stats->InvalidCRCCount;
  243. fcoe_stats->fcp_input_requests += stats->InputRequests;
  244. fcoe_stats->fcp_output_requests += stats->OutputRequests;
  245. fcoe_stats->fcp_control_requests += stats->ControlRequests;
  246. fcoe_stats->fcp_input_megabytes += stats->InputMegabytes;
  247. fcoe_stats->fcp_output_megabytes += stats->OutputMegabytes;
  248. fcoe_stats->link_failure_count += stats->LinkFailureCount;
  249. }
  250. fcoe_stats->lip_count = -1;
  251. fcoe_stats->nos_count = -1;
  252. fcoe_stats->loss_of_sync_count = -1;
  253. fcoe_stats->loss_of_signal_count = -1;
  254. fcoe_stats->prim_seq_protocol_err_count = -1;
  255. fcoe_stats->dumped_frames = -1;
  256. return fcoe_stats;
  257. }
  258. EXPORT_SYMBOL(fc_get_host_stats);
  259. /*
  260. * Fill in FLOGI command for request.
  261. */
  262. static void
  263. fc_lport_flogi_fill(struct fc_lport *lport, struct fc_els_flogi *flogi,
  264. unsigned int op)
  265. {
  266. struct fc_els_csp *sp;
  267. struct fc_els_cssp *cp;
  268. memset(flogi, 0, sizeof(*flogi));
  269. flogi->fl_cmd = (u8) op;
  270. put_unaligned_be64(lport->wwpn, &flogi->fl_wwpn);
  271. put_unaligned_be64(lport->wwnn, &flogi->fl_wwnn);
  272. sp = &flogi->fl_csp;
  273. sp->sp_hi_ver = 0x20;
  274. sp->sp_lo_ver = 0x20;
  275. sp->sp_bb_cred = htons(10); /* this gets set by gateway */
  276. sp->sp_bb_data = htons((u16) lport->mfs);
  277. cp = &flogi->fl_cssp[3 - 1]; /* class 3 parameters */
  278. cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ);
  279. if (op != ELS_FLOGI) {
  280. sp->sp_features = htons(FC_SP_FT_CIRO);
  281. sp->sp_tot_seq = htons(255); /* seq. we accept */
  282. sp->sp_rel_off = htons(0x1f);
  283. sp->sp_e_d_tov = htonl(lport->e_d_tov);
  284. cp->cp_rdfs = htons((u16) lport->mfs);
  285. cp->cp_con_seq = htons(255);
  286. cp->cp_open_seq = 1;
  287. }
  288. }
  289. /*
  290. * Add a supported FC-4 type.
  291. */
  292. static void fc_lport_add_fc4_type(struct fc_lport *lport, enum fc_fh_type type)
  293. {
  294. __be32 *mp;
  295. mp = &lport->fcts.ff_type_map[type / FC_NS_BPW];
  296. *mp = htonl(ntohl(*mp) | 1UL << (type % FC_NS_BPW));
  297. }
  298. /**
  299. * fc_lport_recv_rlir_req() - Handle received Registered Link Incident Report.
  300. * @lport: Fibre Channel local port recieving the RLIR
  301. * @sp: current sequence in the RLIR exchange
  302. * @fp: RLIR request frame
  303. *
  304. * Locking Note: The lport lock is expected to be held before calling
  305. * this function.
  306. */
  307. static void fc_lport_recv_rlir_req(struct fc_seq *sp, struct fc_frame *fp,
  308. struct fc_lport *lport)
  309. {
  310. FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n",
  311. fc_lport_state(lport));
  312. lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
  313. fc_frame_free(fp);
  314. }
  315. /**
  316. * fc_lport_recv_echo_req() - Handle received ECHO request
  317. * @lport: Fibre Channel local port recieving the ECHO
  318. * @sp: current sequence in the ECHO exchange
  319. * @fp: ECHO request frame
  320. *
  321. * Locking Note: The lport lock is expected to be held before calling
  322. * this function.
  323. */
  324. static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp,
  325. struct fc_lport *lport)
  326. {
  327. struct fc_frame *fp;
  328. struct fc_exch *ep = fc_seq_exch(sp);
  329. unsigned int len;
  330. void *pp;
  331. void *dp;
  332. u32 f_ctl;
  333. FC_LPORT_DBG(lport, "Received ECHO request while in state %s\n",
  334. fc_lport_state(lport));
  335. len = fr_len(in_fp) - sizeof(struct fc_frame_header);
  336. pp = fc_frame_payload_get(in_fp, len);
  337. if (len < sizeof(__be32))
  338. len = sizeof(__be32);
  339. fp = fc_frame_alloc(lport, len);
  340. if (fp) {
  341. dp = fc_frame_payload_get(fp, len);
  342. memcpy(dp, pp, len);
  343. *((__be32 *)dp) = htonl(ELS_LS_ACC << 24);
  344. sp = lport->tt.seq_start_next(sp);
  345. f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ;
  346. fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
  347. FC_TYPE_ELS, f_ctl, 0);
  348. lport->tt.seq_send(lport, sp, fp);
  349. }
  350. fc_frame_free(in_fp);
  351. }
  352. /**
  353. * fc_lport_recv_rnid_req() - Handle received Request Node ID data request
  354. * @sp: The sequence in the RNID exchange
  355. * @fp: The RNID request frame
  356. * @lport: The local port recieving the RNID
  357. *
  358. * Locking Note: The lport lock is expected to be held before calling
  359. * this function.
  360. */
  361. static void fc_lport_recv_rnid_req(struct fc_seq *sp, struct fc_frame *in_fp,
  362. struct fc_lport *lport)
  363. {
  364. struct fc_frame *fp;
  365. struct fc_exch *ep = fc_seq_exch(sp);
  366. struct fc_els_rnid *req;
  367. struct {
  368. struct fc_els_rnid_resp rnid;
  369. struct fc_els_rnid_cid cid;
  370. struct fc_els_rnid_gen gen;
  371. } *rp;
  372. struct fc_seq_els_data rjt_data;
  373. u8 fmt;
  374. size_t len;
  375. u32 f_ctl;
  376. FC_LPORT_DBG(lport, "Received RNID request while in state %s\n",
  377. fc_lport_state(lport));
  378. req = fc_frame_payload_get(in_fp, sizeof(*req));
  379. if (!req) {
  380. rjt_data.fp = NULL;
  381. rjt_data.reason = ELS_RJT_LOGIC;
  382. rjt_data.explan = ELS_EXPL_NONE;
  383. lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
  384. } else {
  385. fmt = req->rnid_fmt;
  386. len = sizeof(*rp);
  387. if (fmt != ELS_RNIDF_GEN ||
  388. ntohl(lport->rnid_gen.rnid_atype) == 0) {
  389. fmt = ELS_RNIDF_NONE; /* nothing to provide */
  390. len -= sizeof(rp->gen);
  391. }
  392. fp = fc_frame_alloc(lport, len);
  393. if (fp) {
  394. rp = fc_frame_payload_get(fp, len);
  395. memset(rp, 0, len);
  396. rp->rnid.rnid_cmd = ELS_LS_ACC;
  397. rp->rnid.rnid_fmt = fmt;
  398. rp->rnid.rnid_cid_len = sizeof(rp->cid);
  399. rp->cid.rnid_wwpn = htonll(lport->wwpn);
  400. rp->cid.rnid_wwnn = htonll(lport->wwnn);
  401. if (fmt == ELS_RNIDF_GEN) {
  402. rp->rnid.rnid_sid_len = sizeof(rp->gen);
  403. memcpy(&rp->gen, &lport->rnid_gen,
  404. sizeof(rp->gen));
  405. }
  406. sp = lport->tt.seq_start_next(sp);
  407. f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
  408. f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
  409. fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
  410. FC_TYPE_ELS, f_ctl, 0);
  411. lport->tt.seq_send(lport, sp, fp);
  412. }
  413. }
  414. fc_frame_free(in_fp);
  415. }
  416. /**
  417. * fc_lport_recv_logo_req() - Handle received fabric LOGO request
  418. * @lport: Fibre Channel local port recieving the LOGO
  419. * @sp: current sequence in the LOGO exchange
  420. * @fp: LOGO request frame
  421. *
  422. * Locking Note: The lport lock is exected to be held before calling
  423. * this function.
  424. */
  425. static void fc_lport_recv_logo_req(struct fc_seq *sp, struct fc_frame *fp,
  426. struct fc_lport *lport)
  427. {
  428. lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
  429. fc_lport_enter_reset(lport);
  430. fc_frame_free(fp);
  431. }
  432. /**
  433. * fc_fabric_login() - Start the lport state machine
  434. * @lport: The lport that should log into the fabric
  435. *
  436. * Locking Note: This function should not be called
  437. * with the lport lock held.
  438. */
  439. int fc_fabric_login(struct fc_lport *lport)
  440. {
  441. int rc = -1;
  442. mutex_lock(&lport->lp_mutex);
  443. if (lport->state == LPORT_ST_DISABLED) {
  444. fc_lport_enter_reset(lport);
  445. rc = 0;
  446. }
  447. mutex_unlock(&lport->lp_mutex);
  448. return rc;
  449. }
  450. EXPORT_SYMBOL(fc_fabric_login);
  451. /**
  452. * __fc_linkup() - Handler for transport linkup events
  453. * @lport: The lport whose link is up
  454. *
  455. * Locking: must be called with the lp_mutex held
  456. */
  457. void __fc_linkup(struct fc_lport *lport)
  458. {
  459. if (!lport->link_up) {
  460. lport->link_up = 1;
  461. if (lport->state == LPORT_ST_RESET)
  462. fc_lport_enter_flogi(lport);
  463. }
  464. }
  465. /**
  466. * fc_linkup() - Handler for transport linkup events
  467. * @lport: The lport whose link is up
  468. */
  469. void fc_linkup(struct fc_lport *lport)
  470. {
  471. printk(KERN_INFO "libfc: Link up on port (%6x)\n",
  472. fc_host_port_id(lport->host));
  473. mutex_lock(&lport->lp_mutex);
  474. __fc_linkup(lport);
  475. mutex_unlock(&lport->lp_mutex);
  476. }
  477. EXPORT_SYMBOL(fc_linkup);
  478. /**
  479. * __fc_linkdown() - Handler for transport linkdown events
  480. * @lport: The lport whose link is down
  481. *
  482. * Locking: must be called with the lp_mutex held
  483. */
  484. void __fc_linkdown(struct fc_lport *lport)
  485. {
  486. if (lport->link_up) {
  487. lport->link_up = 0;
  488. fc_lport_enter_reset(lport);
  489. lport->tt.fcp_cleanup(lport);
  490. }
  491. }
  492. /**
  493. * fc_linkdown() - Handler for transport linkdown events
  494. * @lport: The lport whose link is down
  495. */
  496. void fc_linkdown(struct fc_lport *lport)
  497. {
  498. printk(KERN_INFO "libfc: Link down on port (%6x)\n",
  499. fc_host_port_id(lport->host));
  500. mutex_lock(&lport->lp_mutex);
  501. __fc_linkdown(lport);
  502. mutex_unlock(&lport->lp_mutex);
  503. }
  504. EXPORT_SYMBOL(fc_linkdown);
  505. /**
  506. * fc_fabric_logoff() - Logout of the fabric
  507. * @lport: fc_lport pointer to logoff the fabric
  508. *
  509. * Return value:
  510. * 0 for success, -1 for failure
  511. */
  512. int fc_fabric_logoff(struct fc_lport *lport)
  513. {
  514. lport->tt.disc_stop_final(lport);
  515. mutex_lock(&lport->lp_mutex);
  516. if (lport->dns_rp)
  517. lport->tt.rport_logoff(lport->dns_rp);
  518. mutex_unlock(&lport->lp_mutex);
  519. lport->tt.rport_flush_queue();
  520. mutex_lock(&lport->lp_mutex);
  521. fc_lport_enter_logo(lport);
  522. mutex_unlock(&lport->lp_mutex);
  523. cancel_delayed_work_sync(&lport->retry_work);
  524. return 0;
  525. }
  526. EXPORT_SYMBOL(fc_fabric_logoff);
  527. /**
  528. * fc_lport_destroy() - unregister a fc_lport
  529. * @lport: fc_lport pointer to unregister
  530. *
  531. * Return value:
  532. * None
  533. * Note:
  534. * exit routine for fc_lport instance
  535. * clean-up all the allocated memory
  536. * and free up other system resources.
  537. *
  538. */
  539. int fc_lport_destroy(struct fc_lport *lport)
  540. {
  541. mutex_lock(&lport->lp_mutex);
  542. lport->state = LPORT_ST_DISABLED;
  543. lport->link_up = 0;
  544. lport->tt.frame_send = fc_frame_drop;
  545. mutex_unlock(&lport->lp_mutex);
  546. lport->tt.fcp_abort_io(lport);
  547. lport->tt.disc_stop_final(lport);
  548. lport->tt.exch_mgr_reset(lport, 0, 0);
  549. return 0;
  550. }
  551. EXPORT_SYMBOL(fc_lport_destroy);
  552. /**
  553. * fc_set_mfs() - sets up the mfs for the corresponding fc_lport
  554. * @lport: fc_lport pointer to unregister
  555. * @mfs: the new mfs for fc_lport
  556. *
  557. * Set mfs for the given fc_lport to the new mfs.
  558. *
  559. * Return: 0 for success
  560. */
  561. int fc_set_mfs(struct fc_lport *lport, u32 mfs)
  562. {
  563. unsigned int old_mfs;
  564. int rc = -EINVAL;
  565. mutex_lock(&lport->lp_mutex);
  566. old_mfs = lport->mfs;
  567. if (mfs >= FC_MIN_MAX_FRAME) {
  568. mfs &= ~3;
  569. if (mfs > FC_MAX_FRAME)
  570. mfs = FC_MAX_FRAME;
  571. mfs -= sizeof(struct fc_frame_header);
  572. lport->mfs = mfs;
  573. rc = 0;
  574. }
  575. if (!rc && mfs < old_mfs)
  576. fc_lport_enter_reset(lport);
  577. mutex_unlock(&lport->lp_mutex);
  578. return rc;
  579. }
  580. EXPORT_SYMBOL(fc_set_mfs);
  581. /**
  582. * fc_lport_disc_callback() - Callback for discovery events
  583. * @lport: FC local port
  584. * @event: The discovery event
  585. */
  586. void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event)
  587. {
  588. switch (event) {
  589. case DISC_EV_SUCCESS:
  590. FC_LPORT_DBG(lport, "Discovery succeeded\n");
  591. break;
  592. case DISC_EV_FAILED:
  593. printk(KERN_ERR "libfc: Discovery failed for port (%6x)\n",
  594. fc_host_port_id(lport->host));
  595. mutex_lock(&lport->lp_mutex);
  596. fc_lport_enter_reset(lport);
  597. mutex_unlock(&lport->lp_mutex);
  598. break;
  599. case DISC_EV_NONE:
  600. WARN_ON(1);
  601. break;
  602. }
  603. }
  604. /**
  605. * fc_rport_enter_ready() - Enter the ready state and start discovery
  606. * @lport: Fibre Channel local port that is ready
  607. *
  608. * Locking Note: The lport lock is expected to be held before calling
  609. * this routine.
  610. */
  611. static void fc_lport_enter_ready(struct fc_lport *lport)
  612. {
  613. FC_LPORT_DBG(lport, "Entered READY from state %s\n",
  614. fc_lport_state(lport));
  615. fc_lport_state_enter(lport, LPORT_ST_READY);
  616. if (lport->vport)
  617. fc_vport_set_state(lport->vport, FC_VPORT_ACTIVE);
  618. fc_vports_linkchange(lport);
  619. if (!lport->ptp_rp)
  620. lport->tt.disc_start(fc_lport_disc_callback, lport);
  621. }
  622. /**
  623. * fc_lport_recv_flogi_req() - Receive a FLOGI request
  624. * @sp_in: The sequence the FLOGI is on
  625. * @rx_fp: The frame the FLOGI is in
  626. * @lport: The lport that recieved the request
  627. *
  628. * A received FLOGI request indicates a point-to-point connection.
  629. * Accept it with the common service parameters indicating our N port.
  630. * Set up to do a PLOGI if we have the higher-number WWPN.
  631. *
  632. * Locking Note: The lport lock is expected to be held before calling
  633. * this function.
  634. */
  635. static void fc_lport_recv_flogi_req(struct fc_seq *sp_in,
  636. struct fc_frame *rx_fp,
  637. struct fc_lport *lport)
  638. {
  639. struct fc_frame *fp;
  640. struct fc_frame_header *fh;
  641. struct fc_seq *sp;
  642. struct fc_exch *ep;
  643. struct fc_els_flogi *flp;
  644. struct fc_els_flogi *new_flp;
  645. u64 remote_wwpn;
  646. u32 remote_fid;
  647. u32 local_fid;
  648. u32 f_ctl;
  649. FC_LPORT_DBG(lport, "Received FLOGI request while in state %s\n",
  650. fc_lport_state(lport));
  651. fh = fc_frame_header_get(rx_fp);
  652. remote_fid = ntoh24(fh->fh_s_id);
  653. flp = fc_frame_payload_get(rx_fp, sizeof(*flp));
  654. if (!flp)
  655. goto out;
  656. remote_wwpn = get_unaligned_be64(&flp->fl_wwpn);
  657. if (remote_wwpn == lport->wwpn) {
  658. printk(KERN_WARNING "libfc: Received FLOGI from port "
  659. "with same WWPN %llx\n", remote_wwpn);
  660. goto out;
  661. }
  662. FC_LPORT_DBG(lport, "FLOGI from port WWPN %llx\n", remote_wwpn);
  663. /*
  664. * XXX what is the right thing to do for FIDs?
  665. * The originator might expect our S_ID to be 0xfffffe.
  666. * But if so, both of us could end up with the same FID.
  667. */
  668. local_fid = FC_LOCAL_PTP_FID_LO;
  669. if (remote_wwpn < lport->wwpn) {
  670. local_fid = FC_LOCAL_PTP_FID_HI;
  671. if (!remote_fid || remote_fid == local_fid)
  672. remote_fid = FC_LOCAL_PTP_FID_LO;
  673. } else if (!remote_fid) {
  674. remote_fid = FC_LOCAL_PTP_FID_HI;
  675. }
  676. fc_host_port_id(lport->host) = local_fid;
  677. fp = fc_frame_alloc(lport, sizeof(*flp));
  678. if (fp) {
  679. sp = lport->tt.seq_start_next(fr_seq(rx_fp));
  680. new_flp = fc_frame_payload_get(fp, sizeof(*flp));
  681. fc_lport_flogi_fill(lport, new_flp, ELS_FLOGI);
  682. new_flp->fl_cmd = (u8) ELS_LS_ACC;
  683. /*
  684. * Send the response. If this fails, the originator should
  685. * repeat the sequence.
  686. */
  687. f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ;
  688. ep = fc_seq_exch(sp);
  689. fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
  690. FC_TYPE_ELS, f_ctl, 0);
  691. lport->tt.seq_send(lport, sp, fp);
  692. } else {
  693. fc_lport_error(lport, fp);
  694. }
  695. fc_lport_ptp_setup(lport, remote_fid, remote_wwpn,
  696. get_unaligned_be64(&flp->fl_wwnn));
  697. out:
  698. sp = fr_seq(rx_fp);
  699. fc_frame_free(rx_fp);
  700. }
  701. /**
  702. * fc_lport_recv_req() - The generic lport request handler
  703. * @lport: The lport that received the request
  704. * @sp: The sequence the request is on
  705. * @fp: The frame the request is in
  706. *
  707. * This function will see if the lport handles the request or
  708. * if an rport should handle the request.
  709. *
  710. * Locking Note: This function should not be called with the lport
  711. * lock held becuase it will grab the lock.
  712. */
  713. static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp,
  714. struct fc_frame *fp)
  715. {
  716. struct fc_frame_header *fh = fc_frame_header_get(fp);
  717. void (*recv) (struct fc_seq *, struct fc_frame *, struct fc_lport *);
  718. mutex_lock(&lport->lp_mutex);
  719. /*
  720. * Handle special ELS cases like FLOGI, LOGO, and
  721. * RSCN here. These don't require a session.
  722. * Even if we had a session, it might not be ready.
  723. */
  724. if (!lport->link_up)
  725. fc_frame_free(fp);
  726. else if (fh->fh_type == FC_TYPE_ELS &&
  727. fh->fh_r_ctl == FC_RCTL_ELS_REQ) {
  728. /*
  729. * Check opcode.
  730. */
  731. recv = lport->tt.rport_recv_req;
  732. switch (fc_frame_payload_op(fp)) {
  733. case ELS_FLOGI:
  734. recv = fc_lport_recv_flogi_req;
  735. break;
  736. case ELS_LOGO:
  737. fh = fc_frame_header_get(fp);
  738. if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI)
  739. recv = fc_lport_recv_logo_req;
  740. break;
  741. case ELS_RSCN:
  742. recv = lport->tt.disc_recv_req;
  743. break;
  744. case ELS_ECHO:
  745. recv = fc_lport_recv_echo_req;
  746. break;
  747. case ELS_RLIR:
  748. recv = fc_lport_recv_rlir_req;
  749. break;
  750. case ELS_RNID:
  751. recv = fc_lport_recv_rnid_req;
  752. break;
  753. }
  754. recv(sp, fp, lport);
  755. } else {
  756. FC_LPORT_DBG(lport, "dropping invalid frame (eof %x)\n",
  757. fr_eof(fp));
  758. fc_frame_free(fp);
  759. }
  760. mutex_unlock(&lport->lp_mutex);
  761. /*
  762. * The common exch_done for all request may not be good
  763. * if any request requires longer hold on exhange. XXX
  764. */
  765. lport->tt.exch_done(sp);
  766. }
  767. /**
  768. * fc_lport_reset() - Reset an lport
  769. * @lport: The lport which should be reset
  770. *
  771. * Locking Note: This functions should not be called with the
  772. * lport lock held.
  773. */
  774. int fc_lport_reset(struct fc_lport *lport)
  775. {
  776. cancel_delayed_work_sync(&lport->retry_work);
  777. mutex_lock(&lport->lp_mutex);
  778. fc_lport_enter_reset(lport);
  779. mutex_unlock(&lport->lp_mutex);
  780. return 0;
  781. }
  782. EXPORT_SYMBOL(fc_lport_reset);
  783. /**
  784. * fc_lport_reset_locked() - Reset the local port
  785. * @lport: Fibre Channel local port to be reset
  786. *
  787. * Locking Note: The lport lock is expected to be held before calling
  788. * this routine.
  789. */
  790. static void fc_lport_reset_locked(struct fc_lport *lport)
  791. {
  792. if (lport->dns_rp)
  793. lport->tt.rport_logoff(lport->dns_rp);
  794. lport->ptp_rp = NULL;
  795. lport->tt.disc_stop(lport);
  796. lport->tt.exch_mgr_reset(lport, 0, 0);
  797. fc_host_fabric_name(lport->host) = 0;
  798. fc_host_port_id(lport->host) = 0;
  799. }
  800. /**
  801. * fc_lport_enter_reset() - Reset the local port
  802. * @lport: Fibre Channel local port to be reset
  803. *
  804. * Locking Note: The lport lock is expected to be held before calling
  805. * this routine.
  806. */
  807. static void fc_lport_enter_reset(struct fc_lport *lport)
  808. {
  809. FC_LPORT_DBG(lport, "Entered RESET state from %s state\n",
  810. fc_lport_state(lport));
  811. if (lport->vport) {
  812. if (lport->link_up)
  813. fc_vport_set_state(lport->vport, FC_VPORT_INITIALIZING);
  814. else
  815. fc_vport_set_state(lport->vport, FC_VPORT_LINKDOWN);
  816. }
  817. fc_lport_state_enter(lport, LPORT_ST_RESET);
  818. fc_vports_linkchange(lport);
  819. fc_lport_reset_locked(lport);
  820. if (lport->link_up)
  821. fc_lport_enter_flogi(lport);
  822. }
  823. /**
  824. * fc_lport_enter_disabled() - disable the local port
  825. * @lport: Fibre Channel local port to be reset
  826. *
  827. * Locking Note: The lport lock is expected to be held before calling
  828. * this routine.
  829. */
  830. static void fc_lport_enter_disabled(struct fc_lport *lport)
  831. {
  832. FC_LPORT_DBG(lport, "Entered disabled state from %s state\n",
  833. fc_lport_state(lport));
  834. fc_lport_state_enter(lport, LPORT_ST_DISABLED);
  835. fc_vports_linkchange(lport);
  836. fc_lport_reset_locked(lport);
  837. }
  838. /**
  839. * fc_lport_error() - Handler for any errors
  840. * @lport: The fc_lport object
  841. * @fp: The frame pointer
  842. *
  843. * If the error was caused by a resource allocation failure
  844. * then wait for half a second and retry, otherwise retry
  845. * after the e_d_tov time.
  846. */
  847. static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp)
  848. {
  849. unsigned long delay = 0;
  850. FC_LPORT_DBG(lport, "Error %ld in state %s, retries %d\n",
  851. PTR_ERR(fp), fc_lport_state(lport),
  852. lport->retry_count);
  853. if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) {
  854. /*
  855. * Memory allocation failure, or the exchange timed out.
  856. * Retry after delay
  857. */
  858. if (lport->retry_count < lport->max_retry_count) {
  859. lport->retry_count++;
  860. if (!fp)
  861. delay = msecs_to_jiffies(500);
  862. else
  863. delay = msecs_to_jiffies(lport->e_d_tov);
  864. schedule_delayed_work(&lport->retry_work, delay);
  865. } else {
  866. switch (lport->state) {
  867. case LPORT_ST_DISABLED:
  868. case LPORT_ST_READY:
  869. case LPORT_ST_RESET:
  870. case LPORT_ST_RFT_ID:
  871. case LPORT_ST_SCR:
  872. case LPORT_ST_DNS:
  873. case LPORT_ST_FLOGI:
  874. case LPORT_ST_LOGO:
  875. fc_lport_enter_reset(lport);
  876. break;
  877. }
  878. }
  879. }
  880. }
  881. /**
  882. * fc_lport_rft_id_resp() - Handle response to Register Fibre
  883. * Channel Types by ID (RFT_ID) request
  884. * @sp: current sequence in RFT_ID exchange
  885. * @fp: response frame
  886. * @lp_arg: Fibre Channel host port instance
  887. *
  888. * Locking Note: This function will be called without the lport lock
  889. * held, but it will lock, call an _enter_* function or fc_lport_error
  890. * and then unlock the lport.
  891. */
  892. static void fc_lport_rft_id_resp(struct fc_seq *sp, struct fc_frame *fp,
  893. void *lp_arg)
  894. {
  895. struct fc_lport *lport = lp_arg;
  896. struct fc_frame_header *fh;
  897. struct fc_ct_hdr *ct;
  898. FC_LPORT_DBG(lport, "Received a RFT_ID %s\n", fc_els_resp_type(fp));
  899. if (fp == ERR_PTR(-FC_EX_CLOSED))
  900. return;
  901. mutex_lock(&lport->lp_mutex);
  902. if (lport->state != LPORT_ST_RFT_ID) {
  903. FC_LPORT_DBG(lport, "Received a RFT_ID response, but in state "
  904. "%s\n", fc_lport_state(lport));
  905. if (IS_ERR(fp))
  906. goto err;
  907. goto out;
  908. }
  909. if (IS_ERR(fp)) {
  910. fc_lport_error(lport, fp);
  911. goto err;
  912. }
  913. fh = fc_frame_header_get(fp);
  914. ct = fc_frame_payload_get(fp, sizeof(*ct));
  915. if (fh && ct && fh->fh_type == FC_TYPE_CT &&
  916. ct->ct_fs_type == FC_FST_DIR &&
  917. ct->ct_fs_subtype == FC_NS_SUBTYPE &&
  918. ntohs(ct->ct_cmd) == FC_FS_ACC)
  919. fc_lport_enter_scr(lport);
  920. else
  921. fc_lport_error(lport, fp);
  922. out:
  923. fc_frame_free(fp);
  924. err:
  925. mutex_unlock(&lport->lp_mutex);
  926. }
  927. /**
  928. * fc_lport_scr_resp() - Handle response to State Change Register (SCR) request
  929. * @sp: current sequence in SCR exchange
  930. * @fp: response frame
  931. * @lp_arg: Fibre Channel lport port instance that sent the registration request
  932. *
  933. * Locking Note: This function will be called without the lport lock
  934. * held, but it will lock, call an _enter_* function or fc_lport_error
  935. * and then unlock the lport.
  936. */
  937. static void fc_lport_scr_resp(struct fc_seq *sp, struct fc_frame *fp,
  938. void *lp_arg)
  939. {
  940. struct fc_lport *lport = lp_arg;
  941. u8 op;
  942. FC_LPORT_DBG(lport, "Received a SCR %s\n", fc_els_resp_type(fp));
  943. if (fp == ERR_PTR(-FC_EX_CLOSED))
  944. return;
  945. mutex_lock(&lport->lp_mutex);
  946. if (lport->state != LPORT_ST_SCR) {
  947. FC_LPORT_DBG(lport, "Received a SCR response, but in state "
  948. "%s\n", fc_lport_state(lport));
  949. if (IS_ERR(fp))
  950. goto err;
  951. goto out;
  952. }
  953. if (IS_ERR(fp)) {
  954. fc_lport_error(lport, fp);
  955. goto err;
  956. }
  957. op = fc_frame_payload_op(fp);
  958. if (op == ELS_LS_ACC)
  959. fc_lport_enter_ready(lport);
  960. else
  961. fc_lport_error(lport, fp);
  962. out:
  963. fc_frame_free(fp);
  964. err:
  965. mutex_unlock(&lport->lp_mutex);
  966. }
  967. /**
  968. * fc_lport_enter_scr() - Send a State Change Register (SCR) request
  969. * @lport: Fibre Channel local port to register for state changes
  970. *
  971. * Locking Note: The lport lock is expected to be held before calling
  972. * this routine.
  973. */
  974. static void fc_lport_enter_scr(struct fc_lport *lport)
  975. {
  976. struct fc_frame *fp;
  977. FC_LPORT_DBG(lport, "Entered SCR state from %s state\n",
  978. fc_lport_state(lport));
  979. fc_lport_state_enter(lport, LPORT_ST_SCR);
  980. fp = fc_frame_alloc(lport, sizeof(struct fc_els_scr));
  981. if (!fp) {
  982. fc_lport_error(lport, fp);
  983. return;
  984. }
  985. if (!lport->tt.elsct_send(lport, FC_FID_FCTRL, fp, ELS_SCR,
  986. fc_lport_scr_resp, lport, lport->e_d_tov))
  987. fc_lport_error(lport, NULL);
  988. }
  989. /**
  990. * fc_lport_enter_rft_id() - Register FC4-types with the name server
  991. * @lport: Fibre Channel local port to register
  992. *
  993. * Locking Note: The lport lock is expected to be held before calling
  994. * this routine.
  995. */
  996. static void fc_lport_enter_rft_id(struct fc_lport *lport)
  997. {
  998. struct fc_frame *fp;
  999. struct fc_ns_fts *lps;
  1000. int i;
  1001. FC_LPORT_DBG(lport, "Entered RFT_ID state from %s state\n",
  1002. fc_lport_state(lport));
  1003. fc_lport_state_enter(lport, LPORT_ST_RFT_ID);
  1004. lps = &lport->fcts;
  1005. i = sizeof(lps->ff_type_map) / sizeof(lps->ff_type_map[0]);
  1006. while (--i >= 0)
  1007. if (ntohl(lps->ff_type_map[i]) != 0)
  1008. break;
  1009. if (i < 0) {
  1010. /* nothing to register, move on to SCR */
  1011. fc_lport_enter_scr(lport);
  1012. return;
  1013. }
  1014. fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) +
  1015. sizeof(struct fc_ns_rft));
  1016. if (!fp) {
  1017. fc_lport_error(lport, fp);
  1018. return;
  1019. }
  1020. if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RFT_ID,
  1021. fc_lport_rft_id_resp,
  1022. lport, lport->e_d_tov))
  1023. fc_lport_error(lport, fp);
  1024. }
  1025. static struct fc_rport_operations fc_lport_rport_ops = {
  1026. .event_callback = fc_lport_rport_callback,
  1027. };
  1028. /**
  1029. * fc_rport_enter_dns() - Create a rport to the name server
  1030. * @lport: Fibre Channel local port requesting a rport for the name server
  1031. *
  1032. * Locking Note: The lport lock is expected to be held before calling
  1033. * this routine.
  1034. */
  1035. static void fc_lport_enter_dns(struct fc_lport *lport)
  1036. {
  1037. struct fc_rport_priv *rdata;
  1038. FC_LPORT_DBG(lport, "Entered DNS state from %s state\n",
  1039. fc_lport_state(lport));
  1040. fc_lport_state_enter(lport, LPORT_ST_DNS);
  1041. mutex_lock(&lport->disc.disc_mutex);
  1042. rdata = lport->tt.rport_create(lport, FC_FID_DIR_SERV);
  1043. mutex_unlock(&lport->disc.disc_mutex);
  1044. if (!rdata)
  1045. goto err;
  1046. rdata->ops = &fc_lport_rport_ops;
  1047. lport->tt.rport_login(rdata);
  1048. return;
  1049. err:
  1050. fc_lport_error(lport, NULL);
  1051. }
  1052. /**
  1053. * fc_lport_timeout() - Handler for the retry_work timer.
  1054. * @work: The work struct of the fc_lport
  1055. */
  1056. static void fc_lport_timeout(struct work_struct *work)
  1057. {
  1058. struct fc_lport *lport =
  1059. container_of(work, struct fc_lport,
  1060. retry_work.work);
  1061. mutex_lock(&lport->lp_mutex);
  1062. switch (lport->state) {
  1063. case LPORT_ST_DISABLED:
  1064. WARN_ON(1);
  1065. break;
  1066. case LPORT_ST_READY:
  1067. WARN_ON(1);
  1068. break;
  1069. case LPORT_ST_RESET:
  1070. break;
  1071. case LPORT_ST_FLOGI:
  1072. fc_lport_enter_flogi(lport);
  1073. break;
  1074. case LPORT_ST_DNS:
  1075. fc_lport_enter_dns(lport);
  1076. break;
  1077. case LPORT_ST_RFT_ID:
  1078. fc_lport_enter_rft_id(lport);
  1079. break;
  1080. case LPORT_ST_SCR:
  1081. fc_lport_enter_scr(lport);
  1082. break;
  1083. case LPORT_ST_LOGO:
  1084. fc_lport_enter_logo(lport);
  1085. break;
  1086. }
  1087. mutex_unlock(&lport->lp_mutex);
  1088. }
  1089. /**
  1090. * fc_lport_logo_resp() - Handle response to LOGO request
  1091. * @sp: current sequence in LOGO exchange
  1092. * @fp: response frame
  1093. * @lp_arg: Fibre Channel lport port instance that sent the LOGO request
  1094. *
  1095. * Locking Note: This function will be called without the lport lock
  1096. * held, but it will lock, call an _enter_* function or fc_lport_error
  1097. * and then unlock the lport.
  1098. */
  1099. void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
  1100. void *lp_arg)
  1101. {
  1102. struct fc_lport *lport = lp_arg;
  1103. u8 op;
  1104. FC_LPORT_DBG(lport, "Received a LOGO %s\n", fc_els_resp_type(fp));
  1105. if (fp == ERR_PTR(-FC_EX_CLOSED))
  1106. return;
  1107. mutex_lock(&lport->lp_mutex);
  1108. if (lport->state != LPORT_ST_LOGO) {
  1109. FC_LPORT_DBG(lport, "Received a LOGO response, but in state "
  1110. "%s\n", fc_lport_state(lport));
  1111. if (IS_ERR(fp))
  1112. goto err;
  1113. goto out;
  1114. }
  1115. if (IS_ERR(fp)) {
  1116. fc_lport_error(lport, fp);
  1117. goto err;
  1118. }
  1119. op = fc_frame_payload_op(fp);
  1120. if (op == ELS_LS_ACC)
  1121. fc_lport_enter_disabled(lport);
  1122. else
  1123. fc_lport_error(lport, fp);
  1124. out:
  1125. fc_frame_free(fp);
  1126. err:
  1127. mutex_unlock(&lport->lp_mutex);
  1128. }
  1129. EXPORT_SYMBOL(fc_lport_logo_resp);
  1130. /**
  1131. * fc_rport_enter_logo() - Logout of the fabric
  1132. * @lport: Fibre Channel local port to be logged out
  1133. *
  1134. * Locking Note: The lport lock is expected to be held before calling
  1135. * this routine.
  1136. */
  1137. static void fc_lport_enter_logo(struct fc_lport *lport)
  1138. {
  1139. struct fc_frame *fp;
  1140. struct fc_els_logo *logo;
  1141. FC_LPORT_DBG(lport, "Entered LOGO state from %s state\n",
  1142. fc_lport_state(lport));
  1143. fc_lport_state_enter(lport, LPORT_ST_LOGO);
  1144. fc_vports_linkchange(lport);
  1145. fp = fc_frame_alloc(lport, sizeof(*logo));
  1146. if (!fp) {
  1147. fc_lport_error(lport, fp);
  1148. return;
  1149. }
  1150. if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, ELS_LOGO,
  1151. fc_lport_logo_resp, lport, lport->e_d_tov))
  1152. fc_lport_error(lport, NULL);
  1153. }
  1154. /**
  1155. * fc_lport_flogi_resp() - Handle response to FLOGI request
  1156. * @sp: current sequence in FLOGI exchange
  1157. * @fp: response frame
  1158. * @lp_arg: Fibre Channel lport port instance that sent the FLOGI request
  1159. *
  1160. * Locking Note: This function will be called without the lport lock
  1161. * held, but it will lock, call an _enter_* function or fc_lport_error
  1162. * and then unlock the lport.
  1163. */
  1164. void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
  1165. void *lp_arg)
  1166. {
  1167. struct fc_lport *lport = lp_arg;
  1168. struct fc_frame_header *fh;
  1169. struct fc_els_flogi *flp;
  1170. u32 did;
  1171. u16 csp_flags;
  1172. unsigned int r_a_tov;
  1173. unsigned int e_d_tov;
  1174. u16 mfs;
  1175. FC_LPORT_DBG(lport, "Received a FLOGI %s\n", fc_els_resp_type(fp));
  1176. if (fp == ERR_PTR(-FC_EX_CLOSED))
  1177. return;
  1178. mutex_lock(&lport->lp_mutex);
  1179. if (lport->state != LPORT_ST_FLOGI) {
  1180. FC_LPORT_DBG(lport, "Received a FLOGI response, but in state "
  1181. "%s\n", fc_lport_state(lport));
  1182. if (IS_ERR(fp))
  1183. goto err;
  1184. goto out;
  1185. }
  1186. if (IS_ERR(fp)) {
  1187. fc_lport_error(lport, fp);
  1188. goto err;
  1189. }
  1190. fh = fc_frame_header_get(fp);
  1191. did = ntoh24(fh->fh_d_id);
  1192. if (fc_frame_payload_op(fp) == ELS_LS_ACC && did != 0) {
  1193. printk(KERN_INFO "libfc: Assigned FID (%6x) in FLOGI response\n",
  1194. did);
  1195. fc_host_port_id(lport->host) = did;
  1196. flp = fc_frame_payload_get(fp, sizeof(*flp));
  1197. if (flp) {
  1198. mfs = ntohs(flp->fl_csp.sp_bb_data) &
  1199. FC_SP_BB_DATA_MASK;
  1200. if (mfs >= FC_SP_MIN_MAX_PAYLOAD &&
  1201. mfs < lport->mfs)
  1202. lport->mfs = mfs;
  1203. csp_flags = ntohs(flp->fl_csp.sp_features);
  1204. r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov);
  1205. e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov);
  1206. if (csp_flags & FC_SP_FT_EDTR)
  1207. e_d_tov /= 1000000;
  1208. lport->npiv_enabled = !!(csp_flags & FC_SP_FT_NPIV_ACC);
  1209. if ((csp_flags & FC_SP_FT_FPORT) == 0) {
  1210. if (e_d_tov > lport->e_d_tov)
  1211. lport->e_d_tov = e_d_tov;
  1212. lport->r_a_tov = 2 * e_d_tov;
  1213. printk(KERN_INFO "libfc: Port (%6x) entered "
  1214. "point to point mode\n", did);
  1215. fc_lport_ptp_setup(lport, ntoh24(fh->fh_s_id),
  1216. get_unaligned_be64(
  1217. &flp->fl_wwpn),
  1218. get_unaligned_be64(
  1219. &flp->fl_wwnn));
  1220. } else {
  1221. lport->e_d_tov = e_d_tov;
  1222. lport->r_a_tov = r_a_tov;
  1223. fc_host_fabric_name(lport->host) =
  1224. get_unaligned_be64(&flp->fl_wwnn);
  1225. fc_lport_enter_dns(lport);
  1226. }
  1227. }
  1228. } else {
  1229. FC_LPORT_DBG(lport, "Bad FLOGI response\n");
  1230. }
  1231. out:
  1232. fc_frame_free(fp);
  1233. err:
  1234. mutex_unlock(&lport->lp_mutex);
  1235. }
  1236. EXPORT_SYMBOL(fc_lport_flogi_resp);
  1237. /**
  1238. * fc_rport_enter_flogi() - Send a FLOGI request to the fabric manager
  1239. * @lport: Fibre Channel local port to be logged in to the fabric
  1240. *
  1241. * Locking Note: The lport lock is expected to be held before calling
  1242. * this routine.
  1243. */
  1244. void fc_lport_enter_flogi(struct fc_lport *lport)
  1245. {
  1246. struct fc_frame *fp;
  1247. FC_LPORT_DBG(lport, "Entered FLOGI state from %s state\n",
  1248. fc_lport_state(lport));
  1249. fc_lport_state_enter(lport, LPORT_ST_FLOGI);
  1250. fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
  1251. if (!fp)
  1252. return fc_lport_error(lport, fp);
  1253. if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp,
  1254. lport->vport ? ELS_FDISC : ELS_FLOGI,
  1255. fc_lport_flogi_resp, lport, lport->e_d_tov))
  1256. fc_lport_error(lport, NULL);
  1257. }
  1258. /* Configure a fc_lport */
  1259. int fc_lport_config(struct fc_lport *lport)
  1260. {
  1261. INIT_DELAYED_WORK(&lport->retry_work, fc_lport_timeout);
  1262. mutex_init(&lport->lp_mutex);
  1263. fc_lport_state_enter(lport, LPORT_ST_DISABLED);
  1264. fc_lport_add_fc4_type(lport, FC_TYPE_FCP);
  1265. fc_lport_add_fc4_type(lport, FC_TYPE_CT);
  1266. return 0;
  1267. }
  1268. EXPORT_SYMBOL(fc_lport_config);
  1269. int fc_lport_init(struct fc_lport *lport)
  1270. {
  1271. if (!lport->tt.lport_recv)
  1272. lport->tt.lport_recv = fc_lport_recv_req;
  1273. if (!lport->tt.lport_reset)
  1274. lport->tt.lport_reset = fc_lport_reset;
  1275. fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
  1276. fc_host_node_name(lport->host) = lport->wwnn;
  1277. fc_host_port_name(lport->host) = lport->wwpn;
  1278. fc_host_supported_classes(lport->host) = FC_COS_CLASS3;
  1279. memset(fc_host_supported_fc4s(lport->host), 0,
  1280. sizeof(fc_host_supported_fc4s(lport->host)));
  1281. fc_host_supported_fc4s(lport->host)[2] = 1;
  1282. fc_host_supported_fc4s(lport->host)[7] = 1;
  1283. /* This value is also unchanging */
  1284. memset(fc_host_active_fc4s(lport->host), 0,
  1285. sizeof(fc_host_active_fc4s(lport->host)));
  1286. fc_host_active_fc4s(lport->host)[2] = 1;
  1287. fc_host_active_fc4s(lport->host)[7] = 1;
  1288. fc_host_maxframe_size(lport->host) = lport->mfs;
  1289. fc_host_supported_speeds(lport->host) = 0;
  1290. if (lport->link_supported_speeds & FC_PORTSPEED_1GBIT)
  1291. fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_1GBIT;
  1292. if (lport->link_supported_speeds & FC_PORTSPEED_10GBIT)
  1293. fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_10GBIT;
  1294. return 0;
  1295. }
  1296. EXPORT_SYMBOL(fc_lport_init);