fc_fcp.c 54 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174
  1. /*
  2. * Copyright(c) 2007 Intel Corporation. All rights reserved.
  3. * Copyright(c) 2008 Red Hat, Inc. All rights reserved.
  4. * Copyright(c) 2008 Mike Christie
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms and conditions of the GNU General Public License,
  8. * version 2, as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. * You should have received a copy of the GNU General Public License along with
  16. * this program; if not, write to the Free Software Foundation, Inc.,
  17. * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  18. *
  19. * Maintained at www.Open-FCoE.org
  20. */
  21. #include <linux/module.h>
  22. #include <linux/delay.h>
  23. #include <linux/kernel.h>
  24. #include <linux/types.h>
  25. #include <linux/spinlock.h>
  26. #include <linux/scatterlist.h>
  27. #include <linux/err.h>
  28. #include <linux/crc32.h>
  29. #include <scsi/scsi_tcq.h>
  30. #include <scsi/scsi.h>
  31. #include <scsi/scsi_host.h>
  32. #include <scsi/scsi_device.h>
  33. #include <scsi/scsi_cmnd.h>
  34. #include <scsi/fc/fc_fc2.h>
  35. #include <scsi/libfc.h>
  36. #include <scsi/fc_encode.h>
  37. MODULE_AUTHOR("Open-FCoE.org");
  38. MODULE_DESCRIPTION("libfc");
  39. MODULE_LICENSE("GPL v2");
  40. unsigned int fc_debug_logging;
  41. module_param_named(debug_logging, fc_debug_logging, int, S_IRUGO|S_IWUSR);
  42. MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
  43. static struct kmem_cache *scsi_pkt_cachep;
  44. /* SRB state definitions */
  45. #define FC_SRB_FREE 0 /* cmd is free */
  46. #define FC_SRB_CMD_SENT (1 << 0) /* cmd has been sent */
  47. #define FC_SRB_RCV_STATUS (1 << 1) /* response has arrived */
  48. #define FC_SRB_ABORT_PENDING (1 << 2) /* cmd abort sent to device */
  49. #define FC_SRB_ABORTED (1 << 3) /* abort acknowleged */
  50. #define FC_SRB_DISCONTIG (1 << 4) /* non-sequential data recvd */
  51. #define FC_SRB_COMPL (1 << 5) /* fc_io_compl has been run */
  52. #define FC_SRB_FCP_PROCESSING_TMO (1 << 6) /* timer function processing */
  53. #define FC_SRB_NOMEM (1 << 7) /* dropped to out of mem */
  54. #define FC_SRB_READ (1 << 1)
  55. #define FC_SRB_WRITE (1 << 0)
  56. /*
  57. * The SCp.ptr should be tested and set under the host lock. NULL indicates
  58. * that the command has been retruned to the scsi layer.
  59. */
  60. #define CMD_SP(Cmnd) ((struct fc_fcp_pkt *)(Cmnd)->SCp.ptr)
  61. #define CMD_ENTRY_STATUS(Cmnd) ((Cmnd)->SCp.have_data_in)
  62. #define CMD_COMPL_STATUS(Cmnd) ((Cmnd)->SCp.this_residual)
  63. #define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status)
  64. #define CMD_RESID_LEN(Cmnd) ((Cmnd)->SCp.buffers_residual)
  65. struct fc_fcp_internal {
  66. mempool_t *scsi_pkt_pool;
  67. struct list_head scsi_pkt_queue;
  68. u8 throttled;
  69. };
  70. #define fc_get_scsi_internal(x) ((struct fc_fcp_internal *)(x)->scsi_priv)
  71. /*
  72. * function prototypes
  73. * FC scsi I/O related functions
  74. */
  75. static void fc_fcp_recv_data(struct fc_fcp_pkt *, struct fc_frame *);
  76. static void fc_fcp_recv(struct fc_seq *, struct fc_frame *, void *);
  77. static void fc_fcp_resp(struct fc_fcp_pkt *, struct fc_frame *);
  78. static void fc_fcp_complete_locked(struct fc_fcp_pkt *);
  79. static void fc_tm_done(struct fc_seq *, struct fc_frame *, void *);
  80. static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp);
  81. static void fc_timeout_error(struct fc_fcp_pkt *);
  82. static void fc_fcp_timeout(unsigned long data);
  83. static void fc_fcp_rec(struct fc_fcp_pkt *);
  84. static void fc_fcp_rec_error(struct fc_fcp_pkt *, struct fc_frame *);
  85. static void fc_fcp_rec_resp(struct fc_seq *, struct fc_frame *, void *);
  86. static void fc_io_compl(struct fc_fcp_pkt *);
  87. static void fc_fcp_srr(struct fc_fcp_pkt *, enum fc_rctl, u32);
  88. static void fc_fcp_srr_resp(struct fc_seq *, struct fc_frame *, void *);
  89. static void fc_fcp_srr_error(struct fc_fcp_pkt *, struct fc_frame *);
  90. /*
  91. * command status codes
  92. */
  93. #define FC_COMPLETE 0
  94. #define FC_CMD_ABORTED 1
  95. #define FC_CMD_RESET 2
  96. #define FC_CMD_PLOGO 3
  97. #define FC_SNS_RCV 4
  98. #define FC_TRANS_ERR 5
  99. #define FC_DATA_OVRRUN 6
  100. #define FC_DATA_UNDRUN 7
  101. #define FC_ERROR 8
  102. #define FC_HRD_ERROR 9
  103. #define FC_CMD_TIME_OUT 10
  104. /*
  105. * Error recovery timeout values.
  106. */
  107. #define FC_SCSI_ER_TIMEOUT (10 * HZ)
  108. #define FC_SCSI_TM_TOV (10 * HZ)
  109. #define FC_SCSI_REC_TOV (2 * HZ)
  110. #define FC_HOST_RESET_TIMEOUT (30 * HZ)
  111. #define FC_MAX_ERROR_CNT 5
  112. #define FC_MAX_RECOV_RETRY 3
  113. #define FC_FCP_DFLT_QUEUE_DEPTH 32
  114. /**
  115. * fc_fcp_pkt_alloc - allocation routine for scsi_pkt packet
  116. * @lp: fc lport struct
  117. * @gfp: gfp flags for allocation
  118. *
  119. * This is used by upper layer scsi driver.
  120. * Return Value : scsi_pkt structure or null on allocation failure.
  121. * Context : call from process context. no locking required.
  122. */
  123. static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lp, gfp_t gfp)
  124. {
  125. struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
  126. struct fc_fcp_pkt *fsp;
  127. fsp = mempool_alloc(si->scsi_pkt_pool, gfp);
  128. if (fsp) {
  129. memset(fsp, 0, sizeof(*fsp));
  130. fsp->lp = lp;
  131. atomic_set(&fsp->ref_cnt, 1);
  132. init_timer(&fsp->timer);
  133. INIT_LIST_HEAD(&fsp->list);
  134. spin_lock_init(&fsp->scsi_pkt_lock);
  135. }
  136. return fsp;
  137. }
  138. /**
  139. * fc_fcp_pkt_release() - release hold on scsi_pkt packet
  140. * @fsp: fcp packet struct
  141. *
  142. * This is used by upper layer scsi driver.
  143. * Context : call from process and interrupt context.
  144. * no locking required
  145. */
  146. static void fc_fcp_pkt_release(struct fc_fcp_pkt *fsp)
  147. {
  148. if (atomic_dec_and_test(&fsp->ref_cnt)) {
  149. struct fc_fcp_internal *si = fc_get_scsi_internal(fsp->lp);
  150. mempool_free(fsp, si->scsi_pkt_pool);
  151. }
  152. }
  153. static void fc_fcp_pkt_hold(struct fc_fcp_pkt *fsp)
  154. {
  155. atomic_inc(&fsp->ref_cnt);
  156. }
  157. /**
  158. * fc_fcp_pkt_destory() - release hold on scsi_pkt packet
  159. * @seq: exchange sequence
  160. * @fsp: fcp packet struct
  161. *
  162. * Release hold on scsi_pkt packet set to keep scsi_pkt
  163. * till EM layer exch resource is not freed.
  164. * Context : called from from EM layer.
  165. * no locking required
  166. */
  167. static void fc_fcp_pkt_destroy(struct fc_seq *seq, void *fsp)
  168. {
  169. fc_fcp_pkt_release(fsp);
  170. }
  171. /**
  172. * fc_fcp_lock_pkt() - lock a packet and get a ref to it.
  173. * @fsp: fcp packet
  174. *
  175. * We should only return error if we return a command to scsi-ml before
  176. * getting a response. This can happen in cases where we send a abort, but
  177. * do not wait for the response and the abort and command can be passing
  178. * each other on the wire/network-layer.
  179. *
  180. * Note: this function locks the packet and gets a reference to allow
  181. * callers to call the completion function while the lock is held and
  182. * not have to worry about the packets refcount.
  183. *
  184. * TODO: Maybe we should just have callers grab/release the lock and
  185. * have a function that they call to verify the fsp and grab a ref if
  186. * needed.
  187. */
  188. static inline int fc_fcp_lock_pkt(struct fc_fcp_pkt *fsp)
  189. {
  190. spin_lock_bh(&fsp->scsi_pkt_lock);
  191. if (fsp->state & FC_SRB_COMPL) {
  192. spin_unlock_bh(&fsp->scsi_pkt_lock);
  193. return -EPERM;
  194. }
  195. fc_fcp_pkt_hold(fsp);
  196. return 0;
  197. }
  198. static inline void fc_fcp_unlock_pkt(struct fc_fcp_pkt *fsp)
  199. {
  200. spin_unlock_bh(&fsp->scsi_pkt_lock);
  201. fc_fcp_pkt_release(fsp);
  202. }
  203. static void fc_fcp_timer_set(struct fc_fcp_pkt *fsp, unsigned long delay)
  204. {
  205. if (!(fsp->state & FC_SRB_COMPL))
  206. mod_timer(&fsp->timer, jiffies + delay);
  207. }
  208. static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp)
  209. {
  210. if (!fsp->seq_ptr)
  211. return -EINVAL;
  212. fsp->state |= FC_SRB_ABORT_PENDING;
  213. return fsp->lp->tt.seq_exch_abort(fsp->seq_ptr, 0);
  214. }
  215. /*
  216. * Retry command.
  217. * An abort isn't needed.
  218. */
  219. static void fc_fcp_retry_cmd(struct fc_fcp_pkt *fsp)
  220. {
  221. if (fsp->seq_ptr) {
  222. fsp->lp->tt.exch_done(fsp->seq_ptr);
  223. fsp->seq_ptr = NULL;
  224. }
  225. fsp->state &= ~FC_SRB_ABORT_PENDING;
  226. fsp->io_status = 0;
  227. fsp->status_code = FC_ERROR;
  228. fc_fcp_complete_locked(fsp);
  229. }
  230. /*
  231. * fc_fcp_ddp_setup - calls to LLD's ddp_setup to set up DDP
  232. * transfer for a read I/O indicated by the fc_fcp_pkt.
  233. * @fsp: ptr to the fc_fcp_pkt
  234. *
  235. * This is called in exch_seq_send() when we have a newly allocated
  236. * exchange with a valid exchange id to setup ddp.
  237. *
  238. * returns: none
  239. */
  240. void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid)
  241. {
  242. struct fc_lport *lp;
  243. if (!fsp)
  244. return;
  245. lp = fsp->lp;
  246. if ((fsp->req_flags & FC_SRB_READ) &&
  247. (lp->lro_enabled) && (lp->tt.ddp_setup)) {
  248. if (lp->tt.ddp_setup(lp, xid, scsi_sglist(fsp->cmd),
  249. scsi_sg_count(fsp->cmd)))
  250. fsp->xfer_ddp = xid;
  251. }
  252. }
  253. /*
  254. * fc_fcp_ddp_done - calls to LLD's ddp_done to release any
  255. * DDP related resources for this I/O if it is initialized
  256. * as a ddp transfer
  257. * @fsp: ptr to the fc_fcp_pkt
  258. *
  259. * returns: none
  260. */
  261. static void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp)
  262. {
  263. struct fc_lport *lp;
  264. if (!fsp)
  265. return;
  266. if (fsp->xfer_ddp == FC_XID_UNKNOWN)
  267. return;
  268. lp = fsp->lp;
  269. if (lp->tt.ddp_done) {
  270. fsp->xfer_len = lp->tt.ddp_done(lp, fsp->xfer_ddp);
  271. fsp->xfer_ddp = FC_XID_UNKNOWN;
  272. }
  273. }
  274. /*
  275. * Receive SCSI data from target.
  276. * Called after receiving solicited data.
  277. */
  278. static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
  279. {
  280. struct scsi_cmnd *sc = fsp->cmd;
  281. struct fc_lport *lp = fsp->lp;
  282. struct fcoe_dev_stats *stats;
  283. struct fc_frame_header *fh;
  284. size_t start_offset;
  285. size_t offset;
  286. u32 crc;
  287. u32 copy_len = 0;
  288. size_t len;
  289. void *buf;
  290. struct scatterlist *sg;
  291. size_t remaining;
  292. fh = fc_frame_header_get(fp);
  293. offset = ntohl(fh->fh_parm_offset);
  294. start_offset = offset;
  295. len = fr_len(fp) - sizeof(*fh);
  296. buf = fc_frame_payload_get(fp, 0);
  297. /* if this I/O is ddped, update xfer len */
  298. fc_fcp_ddp_done(fsp);
  299. if (offset + len > fsp->data_len) {
  300. /* this should never happen */
  301. if ((fr_flags(fp) & FCPHF_CRC_UNCHECKED) &&
  302. fc_frame_crc_check(fp))
  303. goto crc_err;
  304. FC_FCP_DBG(fsp, "data received past end. len %zx offset %zx "
  305. "data_len %x\n", len, offset, fsp->data_len);
  306. fc_fcp_retry_cmd(fsp);
  307. return;
  308. }
  309. if (offset != fsp->xfer_len)
  310. fsp->state |= FC_SRB_DISCONTIG;
  311. crc = 0;
  312. if (fr_flags(fp) & FCPHF_CRC_UNCHECKED)
  313. crc = crc32(~0, (u8 *) fh, sizeof(*fh));
  314. sg = scsi_sglist(sc);
  315. remaining = len;
  316. while (remaining > 0 && sg) {
  317. size_t off;
  318. void *page_addr;
  319. size_t sg_bytes;
  320. if (offset >= sg->length) {
  321. offset -= sg->length;
  322. sg = sg_next(sg);
  323. continue;
  324. }
  325. sg_bytes = min(remaining, sg->length - offset);
  326. /*
  327. * The scatterlist item may be bigger than PAGE_SIZE,
  328. * but we are limited to mapping PAGE_SIZE at a time.
  329. */
  330. off = offset + sg->offset;
  331. sg_bytes = min(sg_bytes, (size_t)
  332. (PAGE_SIZE - (off & ~PAGE_MASK)));
  333. page_addr = kmap_atomic(sg_page(sg) + (off >> PAGE_SHIFT),
  334. KM_SOFTIRQ0);
  335. if (!page_addr)
  336. break; /* XXX panic? */
  337. if (fr_flags(fp) & FCPHF_CRC_UNCHECKED)
  338. crc = crc32(crc, buf, sg_bytes);
  339. memcpy((char *)page_addr + (off & ~PAGE_MASK), buf,
  340. sg_bytes);
  341. kunmap_atomic(page_addr, KM_SOFTIRQ0);
  342. buf += sg_bytes;
  343. offset += sg_bytes;
  344. remaining -= sg_bytes;
  345. copy_len += sg_bytes;
  346. }
  347. if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) {
  348. buf = fc_frame_payload_get(fp, 0);
  349. if (len % 4) {
  350. crc = crc32(crc, buf + len, 4 - (len % 4));
  351. len += 4 - (len % 4);
  352. }
  353. if (~crc != le32_to_cpu(fr_crc(fp))) {
  354. crc_err:
  355. stats = fc_lport_get_stats(lp);
  356. stats->ErrorFrames++;
  357. /* FIXME - per cpu count, not total count! */
  358. if (stats->InvalidCRCCount++ < 5)
  359. printk(KERN_WARNING "libfc: CRC error on data "
  360. "frame for port (%6x)\n",
  361. fc_host_port_id(lp->host));
  362. /*
  363. * Assume the frame is total garbage.
  364. * We may have copied it over the good part
  365. * of the buffer.
  366. * If so, we need to retry the entire operation.
  367. * Otherwise, ignore it.
  368. */
  369. if (fsp->state & FC_SRB_DISCONTIG)
  370. fc_fcp_retry_cmd(fsp);
  371. return;
  372. }
  373. }
  374. if (fsp->xfer_contig_end == start_offset)
  375. fsp->xfer_contig_end += copy_len;
  376. fsp->xfer_len += copy_len;
  377. /*
  378. * In the very rare event that this data arrived after the response
  379. * and completes the transfer, call the completion handler.
  380. */
  381. if (unlikely(fsp->state & FC_SRB_RCV_STATUS) &&
  382. fsp->xfer_len == fsp->data_len - fsp->scsi_resid)
  383. fc_fcp_complete_locked(fsp);
  384. }
  385. /**
  386. * fc_fcp_send_data() - Send SCSI data to target.
  387. * @fsp: ptr to fc_fcp_pkt
  388. * @sp: ptr to this sequence
  389. * @offset: starting offset for this data request
  390. * @seq_blen: the burst length for this data request
  391. *
  392. * Called after receiving a Transfer Ready data descriptor.
  393. * if LLD is capable of seq offload then send down seq_blen
  394. * size of data in single frame, otherwise send multiple FC
  395. * frames of max FC frame payload supported by target port.
  396. *
  397. * Returns : 0 for success.
  398. */
  399. static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
  400. size_t offset, size_t seq_blen)
  401. {
  402. struct fc_exch *ep;
  403. struct scsi_cmnd *sc;
  404. struct scatterlist *sg;
  405. struct fc_frame *fp = NULL;
  406. struct fc_lport *lp = fsp->lp;
  407. size_t remaining;
  408. size_t t_blen;
  409. size_t tlen;
  410. size_t sg_bytes;
  411. size_t frame_offset, fh_parm_offset;
  412. int error;
  413. void *data = NULL;
  414. void *page_addr;
  415. int using_sg = lp->sg_supp;
  416. u32 f_ctl;
  417. WARN_ON(seq_blen <= 0);
  418. if (unlikely(offset + seq_blen > fsp->data_len)) {
  419. /* this should never happen */
  420. FC_FCP_DBG(fsp, "xfer-ready past end. seq_blen %zx "
  421. "offset %zx\n", seq_blen, offset);
  422. fc_fcp_send_abort(fsp);
  423. return 0;
  424. } else if (offset != fsp->xfer_len) {
  425. /* Out of Order Data Request - no problem, but unexpected. */
  426. FC_FCP_DBG(fsp, "xfer-ready non-contiguous. "
  427. "seq_blen %zx offset %zx\n", seq_blen, offset);
  428. }
  429. /*
  430. * if LLD is capable of seq_offload then set transport
  431. * burst length (t_blen) to seq_blen, otherwise set t_blen
  432. * to max FC frame payload previously set in fsp->max_payload.
  433. */
  434. t_blen = fsp->max_payload;
  435. if (lp->seq_offload) {
  436. t_blen = min(seq_blen, (size_t)lp->lso_max);
  437. FC_FCP_DBG(fsp, "fsp=%p:lso:blen=%zx lso_max=0x%x t_blen=%zx\n",
  438. fsp, seq_blen, lp->lso_max, t_blen);
  439. }
  440. WARN_ON(t_blen < FC_MIN_MAX_PAYLOAD);
  441. if (t_blen > 512)
  442. t_blen &= ~(512 - 1); /* round down to block size */
  443. WARN_ON(t_blen < FC_MIN_MAX_PAYLOAD); /* won't go below 256 */
  444. sc = fsp->cmd;
  445. remaining = seq_blen;
  446. fh_parm_offset = frame_offset = offset;
  447. tlen = 0;
  448. seq = lp->tt.seq_start_next(seq);
  449. f_ctl = FC_FC_REL_OFF;
  450. WARN_ON(!seq);
  451. sg = scsi_sglist(sc);
  452. while (remaining > 0 && sg) {
  453. if (offset >= sg->length) {
  454. offset -= sg->length;
  455. sg = sg_next(sg);
  456. continue;
  457. }
  458. if (!fp) {
  459. tlen = min(t_blen, remaining);
  460. /*
  461. * TODO. Temporary workaround. fc_seq_send() can't
  462. * handle odd lengths in non-linear skbs.
  463. * This will be the final fragment only.
  464. */
  465. if (tlen % 4)
  466. using_sg = 0;
  467. if (using_sg) {
  468. fp = _fc_frame_alloc(lp, 0);
  469. if (!fp)
  470. return -ENOMEM;
  471. } else {
  472. fp = fc_frame_alloc(lp, tlen);
  473. if (!fp)
  474. return -ENOMEM;
  475. data = (void *)(fr_hdr(fp)) +
  476. sizeof(struct fc_frame_header);
  477. }
  478. fh_parm_offset = frame_offset;
  479. fr_max_payload(fp) = fsp->max_payload;
  480. }
  481. sg_bytes = min(tlen, sg->length - offset);
  482. if (using_sg) {
  483. get_page(sg_page(sg));
  484. skb_fill_page_desc(fp_skb(fp),
  485. skb_shinfo(fp_skb(fp))->nr_frags,
  486. sg_page(sg), sg->offset + offset,
  487. sg_bytes);
  488. fp_skb(fp)->data_len += sg_bytes;
  489. fr_len(fp) += sg_bytes;
  490. fp_skb(fp)->truesize += PAGE_SIZE;
  491. } else {
  492. size_t off = offset + sg->offset;
  493. /*
  494. * The scatterlist item may be bigger than PAGE_SIZE,
  495. * but we must not cross pages inside the kmap.
  496. */
  497. sg_bytes = min(sg_bytes, (size_t) (PAGE_SIZE -
  498. (off & ~PAGE_MASK)));
  499. page_addr = kmap_atomic(sg_page(sg) +
  500. (off >> PAGE_SHIFT),
  501. KM_SOFTIRQ0);
  502. memcpy(data, (char *)page_addr + (off & ~PAGE_MASK),
  503. sg_bytes);
  504. kunmap_atomic(page_addr, KM_SOFTIRQ0);
  505. data += sg_bytes;
  506. }
  507. offset += sg_bytes;
  508. frame_offset += sg_bytes;
  509. tlen -= sg_bytes;
  510. remaining -= sg_bytes;
  511. if (tlen)
  512. continue;
  513. /*
  514. * Send sequence with transfer sequence initiative in case
  515. * this is last FCP frame of the sequence.
  516. */
  517. if (remaining == 0)
  518. f_ctl |= FC_FC_SEQ_INIT | FC_FC_END_SEQ;
  519. ep = fc_seq_exch(seq);
  520. fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid,
  521. FC_TYPE_FCP, f_ctl, fh_parm_offset);
  522. /*
  523. * send fragment using for a sequence.
  524. */
  525. error = lp->tt.seq_send(lp, seq, fp);
  526. if (error) {
  527. WARN_ON(1); /* send error should be rare */
  528. fc_fcp_retry_cmd(fsp);
  529. return 0;
  530. }
  531. fp = NULL;
  532. }
  533. fsp->xfer_len += seq_blen; /* premature count? */
  534. return 0;
  535. }
  536. static void fc_fcp_abts_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
  537. {
  538. int ba_done = 1;
  539. struct fc_ba_rjt *brp;
  540. struct fc_frame_header *fh;
  541. fh = fc_frame_header_get(fp);
  542. switch (fh->fh_r_ctl) {
  543. case FC_RCTL_BA_ACC:
  544. break;
  545. case FC_RCTL_BA_RJT:
  546. brp = fc_frame_payload_get(fp, sizeof(*brp));
  547. if (brp && brp->br_reason == FC_BA_RJT_LOG_ERR)
  548. break;
  549. /* fall thru */
  550. default:
  551. /*
  552. * we will let the command timeout
  553. * and scsi-ml recover in this case,
  554. * therefore cleared the ba_done flag.
  555. */
  556. ba_done = 0;
  557. }
  558. if (ba_done) {
  559. fsp->state |= FC_SRB_ABORTED;
  560. fsp->state &= ~FC_SRB_ABORT_PENDING;
  561. if (fsp->wait_for_comp)
  562. complete(&fsp->tm_done);
  563. else
  564. fc_fcp_complete_locked(fsp);
  565. }
  566. }
  567. /**
  568. * fc_fcp_reduce_can_queue() - drop can_queue
  569. * @lp: lport to drop queueing for
  570. *
  571. * If we are getting memory allocation failures, then we may
  572. * be trying to execute too many commands. We let the running
  573. * commands complete or timeout, then try again with a reduced
  574. * can_queue. Eventually we will hit the point where we run
  575. * on all reserved structs.
  576. */
  577. static void fc_fcp_reduce_can_queue(struct fc_lport *lp)
  578. {
  579. struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
  580. unsigned long flags;
  581. int can_queue;
  582. spin_lock_irqsave(lp->host->host_lock, flags);
  583. if (si->throttled)
  584. goto done;
  585. si->throttled = 1;
  586. can_queue = lp->host->can_queue;
  587. can_queue >>= 1;
  588. if (!can_queue)
  589. can_queue = 1;
  590. lp->host->can_queue = can_queue;
  591. shost_printk(KERN_ERR, lp->host, "libfc: Could not allocate frame.\n"
  592. "Reducing can_queue to %d.\n", can_queue);
  593. done:
  594. spin_unlock_irqrestore(lp->host->host_lock, flags);
  595. }
  596. /**
  597. * fc_fcp_recv() - Reveive FCP frames
  598. * @seq: The sequence the frame is on
  599. * @fp: The FC frame
  600. * @arg: The related FCP packet
  601. *
  602. * Return : None
  603. * Context : called from Soft IRQ context
  604. * can not called holding list lock
  605. */
  606. static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg)
  607. {
  608. struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)arg;
  609. struct fc_lport *lport = fsp->lp;
  610. struct fc_frame_header *fh;
  611. struct fcp_txrdy *dd;
  612. u8 r_ctl;
  613. int rc = 0;
  614. if (IS_ERR(fp))
  615. goto errout;
  616. fh = fc_frame_header_get(fp);
  617. r_ctl = fh->fh_r_ctl;
  618. if (!(lport->state & LPORT_ST_READY))
  619. goto out;
  620. if (fc_fcp_lock_pkt(fsp))
  621. goto out;
  622. fsp->last_pkt_time = jiffies;
  623. if (fh->fh_type == FC_TYPE_BLS) {
  624. fc_fcp_abts_resp(fsp, fp);
  625. goto unlock;
  626. }
  627. if (fsp->state & (FC_SRB_ABORTED | FC_SRB_ABORT_PENDING))
  628. goto unlock;
  629. if (r_ctl == FC_RCTL_DD_DATA_DESC) {
  630. /*
  631. * received XFER RDY from the target
  632. * need to send data to the target
  633. */
  634. WARN_ON(fr_flags(fp) & FCPHF_CRC_UNCHECKED);
  635. dd = fc_frame_payload_get(fp, sizeof(*dd));
  636. WARN_ON(!dd);
  637. rc = fc_fcp_send_data(fsp, seq,
  638. (size_t) ntohl(dd->ft_data_ro),
  639. (size_t) ntohl(dd->ft_burst_len));
  640. if (!rc)
  641. seq->rec_data = fsp->xfer_len;
  642. else if (rc == -ENOMEM)
  643. fsp->state |= FC_SRB_NOMEM;
  644. } else if (r_ctl == FC_RCTL_DD_SOL_DATA) {
  645. /*
  646. * received a DATA frame
  647. * next we will copy the data to the system buffer
  648. */
  649. WARN_ON(fr_len(fp) < sizeof(*fh)); /* len may be 0 */
  650. fc_fcp_recv_data(fsp, fp);
  651. seq->rec_data = fsp->xfer_contig_end;
  652. } else if (r_ctl == FC_RCTL_DD_CMD_STATUS) {
  653. WARN_ON(fr_flags(fp) & FCPHF_CRC_UNCHECKED);
  654. fc_fcp_resp(fsp, fp);
  655. } else {
  656. FC_FCP_DBG(fsp, "unexpected frame. r_ctl %x\n", r_ctl);
  657. }
  658. unlock:
  659. fc_fcp_unlock_pkt(fsp);
  660. out:
  661. fc_frame_free(fp);
  662. errout:
  663. if (IS_ERR(fp))
  664. fc_fcp_error(fsp, fp);
  665. else if (rc == -ENOMEM)
  666. fc_fcp_reduce_can_queue(lport);
  667. }
  668. static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
  669. {
  670. struct fc_frame_header *fh;
  671. struct fcp_resp *fc_rp;
  672. struct fcp_resp_ext *rp_ex;
  673. struct fcp_resp_rsp_info *fc_rp_info;
  674. u32 plen;
  675. u32 expected_len;
  676. u32 respl = 0;
  677. u32 snsl = 0;
  678. u8 flags = 0;
  679. plen = fr_len(fp);
  680. fh = (struct fc_frame_header *)fr_hdr(fp);
  681. if (unlikely(plen < sizeof(*fh) + sizeof(*fc_rp)))
  682. goto len_err;
  683. plen -= sizeof(*fh);
  684. fc_rp = (struct fcp_resp *)(fh + 1);
  685. fsp->cdb_status = fc_rp->fr_status;
  686. flags = fc_rp->fr_flags;
  687. fsp->scsi_comp_flags = flags;
  688. expected_len = fsp->data_len;
  689. /* if ddp, update xfer len */
  690. fc_fcp_ddp_done(fsp);
  691. if (unlikely((flags & ~FCP_CONF_REQ) || fc_rp->fr_status)) {
  692. rp_ex = (void *)(fc_rp + 1);
  693. if (flags & (FCP_RSP_LEN_VAL | FCP_SNS_LEN_VAL)) {
  694. if (plen < sizeof(*fc_rp) + sizeof(*rp_ex))
  695. goto len_err;
  696. fc_rp_info = (struct fcp_resp_rsp_info *)(rp_ex + 1);
  697. if (flags & FCP_RSP_LEN_VAL) {
  698. respl = ntohl(rp_ex->fr_rsp_len);
  699. if (respl != sizeof(*fc_rp_info))
  700. goto len_err;
  701. if (fsp->wait_for_comp) {
  702. /* Abuse cdb_status for rsp code */
  703. fsp->cdb_status = fc_rp_info->rsp_code;
  704. complete(&fsp->tm_done);
  705. /*
  706. * tmfs will not have any scsi cmd so
  707. * exit here
  708. */
  709. return;
  710. } else
  711. goto err;
  712. }
  713. if (flags & FCP_SNS_LEN_VAL) {
  714. snsl = ntohl(rp_ex->fr_sns_len);
  715. if (snsl > SCSI_SENSE_BUFFERSIZE)
  716. snsl = SCSI_SENSE_BUFFERSIZE;
  717. memcpy(fsp->cmd->sense_buffer,
  718. (char *)fc_rp_info + respl, snsl);
  719. }
  720. }
  721. if (flags & (FCP_RESID_UNDER | FCP_RESID_OVER)) {
  722. if (plen < sizeof(*fc_rp) + sizeof(rp_ex->fr_resid))
  723. goto len_err;
  724. if (flags & FCP_RESID_UNDER) {
  725. fsp->scsi_resid = ntohl(rp_ex->fr_resid);
  726. /*
  727. * The cmnd->underflow is the minimum number of
  728. * bytes that must be transfered for this
  729. * command. Provided a sense condition is not
  730. * present, make sure the actual amount
  731. * transferred is at least the underflow value
  732. * or fail.
  733. */
  734. if (!(flags & FCP_SNS_LEN_VAL) &&
  735. (fc_rp->fr_status == 0) &&
  736. (scsi_bufflen(fsp->cmd) -
  737. fsp->scsi_resid) < fsp->cmd->underflow)
  738. goto err;
  739. expected_len -= fsp->scsi_resid;
  740. } else {
  741. fsp->status_code = FC_ERROR;
  742. }
  743. }
  744. }
  745. fsp->state |= FC_SRB_RCV_STATUS;
  746. /*
  747. * Check for missing or extra data frames.
  748. */
  749. if (unlikely(fsp->xfer_len != expected_len)) {
  750. if (fsp->xfer_len < expected_len) {
  751. /*
  752. * Some data may be queued locally,
  753. * Wait a at least one jiffy to see if it is delivered.
  754. * If this expires without data, we may do SRR.
  755. */
  756. fc_fcp_timer_set(fsp, 2);
  757. return;
  758. }
  759. fsp->status_code = FC_DATA_OVRRUN;
  760. FC_FCP_DBG(fsp, "tgt %6x xfer len %zx greater than expected, "
  761. "len %x, data len %x\n",
  762. fsp->rport->port_id,
  763. fsp->xfer_len, expected_len, fsp->data_len);
  764. }
  765. fc_fcp_complete_locked(fsp);
  766. return;
  767. len_err:
  768. FC_FCP_DBG(fsp, "short FCP response. flags 0x%x len %u respl %u "
  769. "snsl %u\n", flags, fr_len(fp), respl, snsl);
  770. err:
  771. fsp->status_code = FC_ERROR;
  772. fc_fcp_complete_locked(fsp);
  773. }
  774. /**
  775. * fc_fcp_complete_locked() - complete processing of a fcp packet
  776. * @fsp: fcp packet
  777. *
  778. * This function may sleep if a timer is pending. The packet lock must be
  779. * held, and the host lock must not be held.
  780. */
  781. static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp)
  782. {
  783. struct fc_lport *lp = fsp->lp;
  784. struct fc_seq *seq;
  785. struct fc_exch *ep;
  786. u32 f_ctl;
  787. if (fsp->state & FC_SRB_ABORT_PENDING)
  788. return;
  789. if (fsp->state & FC_SRB_ABORTED) {
  790. if (!fsp->status_code)
  791. fsp->status_code = FC_CMD_ABORTED;
  792. } else {
  793. /*
  794. * Test for transport underrun, independent of response
  795. * underrun status.
  796. */
  797. if (fsp->xfer_len < fsp->data_len && !fsp->io_status &&
  798. (!(fsp->scsi_comp_flags & FCP_RESID_UNDER) ||
  799. fsp->xfer_len < fsp->data_len - fsp->scsi_resid)) {
  800. fsp->status_code = FC_DATA_UNDRUN;
  801. fsp->io_status = 0;
  802. }
  803. }
  804. seq = fsp->seq_ptr;
  805. if (seq) {
  806. fsp->seq_ptr = NULL;
  807. if (unlikely(fsp->scsi_comp_flags & FCP_CONF_REQ)) {
  808. struct fc_frame *conf_frame;
  809. struct fc_seq *csp;
  810. csp = lp->tt.seq_start_next(seq);
  811. conf_frame = fc_frame_alloc(fsp->lp, 0);
  812. if (conf_frame) {
  813. f_ctl = FC_FC_SEQ_INIT;
  814. f_ctl |= FC_FC_LAST_SEQ | FC_FC_END_SEQ;
  815. ep = fc_seq_exch(seq);
  816. fc_fill_fc_hdr(conf_frame, FC_RCTL_DD_SOL_CTL,
  817. ep->did, ep->sid,
  818. FC_TYPE_FCP, f_ctl, 0);
  819. lp->tt.seq_send(lp, csp, conf_frame);
  820. }
  821. }
  822. lp->tt.exch_done(seq);
  823. }
  824. fc_io_compl(fsp);
  825. }
  826. static void fc_fcp_cleanup_cmd(struct fc_fcp_pkt *fsp, int error)
  827. {
  828. struct fc_lport *lp = fsp->lp;
  829. if (fsp->seq_ptr) {
  830. lp->tt.exch_done(fsp->seq_ptr);
  831. fsp->seq_ptr = NULL;
  832. }
  833. fsp->status_code = error;
  834. }
  835. /**
  836. * fc_fcp_cleanup_each_cmd() - Cleanup active commads
  837. * @lp: logical port
  838. * @id: target id
  839. * @lun: lun
  840. * @error: fsp status code
  841. *
  842. * If lun or id is -1, they are ignored.
  843. */
  844. static void fc_fcp_cleanup_each_cmd(struct fc_lport *lp, unsigned int id,
  845. unsigned int lun, int error)
  846. {
  847. struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
  848. struct fc_fcp_pkt *fsp;
  849. struct scsi_cmnd *sc_cmd;
  850. unsigned long flags;
  851. spin_lock_irqsave(lp->host->host_lock, flags);
  852. restart:
  853. list_for_each_entry(fsp, &si->scsi_pkt_queue, list) {
  854. sc_cmd = fsp->cmd;
  855. if (id != -1 && scmd_id(sc_cmd) != id)
  856. continue;
  857. if (lun != -1 && sc_cmd->device->lun != lun)
  858. continue;
  859. fc_fcp_pkt_hold(fsp);
  860. spin_unlock_irqrestore(lp->host->host_lock, flags);
  861. if (!fc_fcp_lock_pkt(fsp)) {
  862. fc_fcp_cleanup_cmd(fsp, error);
  863. fc_io_compl(fsp);
  864. fc_fcp_unlock_pkt(fsp);
  865. }
  866. fc_fcp_pkt_release(fsp);
  867. spin_lock_irqsave(lp->host->host_lock, flags);
  868. /*
  869. * while we dropped the lock multiple pkts could
  870. * have been released, so we have to start over.
  871. */
  872. goto restart;
  873. }
  874. spin_unlock_irqrestore(lp->host->host_lock, flags);
  875. }
  876. static void fc_fcp_abort_io(struct fc_lport *lp)
  877. {
  878. fc_fcp_cleanup_each_cmd(lp, -1, -1, FC_HRD_ERROR);
  879. }
  880. /**
  881. * fc_fcp_pkt_send() - send a fcp packet to the lower level.
  882. * @lp: fc lport
  883. * @fsp: fc packet.
  884. *
  885. * This is called by upper layer protocol.
  886. * Return : zero for success and -1 for failure
  887. * Context : called from queuecommand which can be called from process
  888. * or scsi soft irq.
  889. * Locks : called with the host lock and irqs disabled.
  890. */
  891. static int fc_fcp_pkt_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp)
  892. {
  893. struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
  894. int rc;
  895. fsp->cmd->SCp.ptr = (char *)fsp;
  896. fsp->cdb_cmd.fc_dl = htonl(fsp->data_len);
  897. fsp->cdb_cmd.fc_flags = fsp->req_flags & ~FCP_CFL_LEN_MASK;
  898. int_to_scsilun(fsp->cmd->device->lun,
  899. (struct scsi_lun *)fsp->cdb_cmd.fc_lun);
  900. memcpy(fsp->cdb_cmd.fc_cdb, fsp->cmd->cmnd, fsp->cmd->cmd_len);
  901. list_add_tail(&fsp->list, &si->scsi_pkt_queue);
  902. spin_unlock_irq(lp->host->host_lock);
  903. rc = lp->tt.fcp_cmd_send(lp, fsp, fc_fcp_recv);
  904. spin_lock_irq(lp->host->host_lock);
  905. if (rc)
  906. list_del(&fsp->list);
  907. return rc;
  908. }
  909. static int fc_fcp_cmd_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp,
  910. void (*resp)(struct fc_seq *,
  911. struct fc_frame *fp,
  912. void *arg))
  913. {
  914. struct fc_frame *fp;
  915. struct fc_seq *seq;
  916. struct fc_rport *rport;
  917. struct fc_rport_libfc_priv *rp;
  918. const size_t len = sizeof(fsp->cdb_cmd);
  919. int rc = 0;
  920. if (fc_fcp_lock_pkt(fsp))
  921. return 0;
  922. fp = fc_frame_alloc(lp, sizeof(fsp->cdb_cmd));
  923. if (!fp) {
  924. rc = -1;
  925. goto unlock;
  926. }
  927. memcpy(fc_frame_payload_get(fp, len), &fsp->cdb_cmd, len);
  928. fr_fsp(fp) = fsp;
  929. rport = fsp->rport;
  930. fsp->max_payload = rport->maxframe_size;
  931. rp = rport->dd_data;
  932. fc_fill_fc_hdr(fp, FC_RCTL_DD_UNSOL_CMD, rport->port_id,
  933. fc_host_port_id(rp->local_port->host), FC_TYPE_FCP,
  934. FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
  935. seq = lp->tt.exch_seq_send(lp, fp, resp, fc_fcp_pkt_destroy, fsp, 0);
  936. if (!seq) {
  937. fc_frame_free(fp);
  938. rc = -1;
  939. goto unlock;
  940. }
  941. fsp->last_pkt_time = jiffies;
  942. fsp->seq_ptr = seq;
  943. fc_fcp_pkt_hold(fsp); /* hold for fc_fcp_pkt_destroy */
  944. setup_timer(&fsp->timer, fc_fcp_timeout, (unsigned long)fsp);
  945. fc_fcp_timer_set(fsp,
  946. (fsp->tgt_flags & FC_RP_FLAGS_REC_SUPPORTED) ?
  947. FC_SCSI_REC_TOV : FC_SCSI_ER_TIMEOUT);
  948. unlock:
  949. fc_fcp_unlock_pkt(fsp);
  950. return rc;
  951. }
  952. /*
  953. * transport error handler
  954. */
  955. static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
  956. {
  957. int error = PTR_ERR(fp);
  958. if (fc_fcp_lock_pkt(fsp))
  959. return;
  960. if (error == -FC_EX_CLOSED) {
  961. fc_fcp_retry_cmd(fsp);
  962. goto unlock;
  963. }
  964. /*
  965. * clear abort pending, because the lower layer
  966. * decided to force completion.
  967. */
  968. fsp->state &= ~FC_SRB_ABORT_PENDING;
  969. fsp->status_code = FC_CMD_PLOGO;
  970. fc_fcp_complete_locked(fsp);
  971. unlock:
  972. fc_fcp_unlock_pkt(fsp);
  973. }
  974. /*
  975. * Scsi abort handler- calls to send an abort
  976. * and then wait for abort completion
  977. */
  978. static int fc_fcp_pkt_abort(struct fc_fcp_pkt *fsp)
  979. {
  980. int rc = FAILED;
  981. if (fc_fcp_send_abort(fsp))
  982. return FAILED;
  983. init_completion(&fsp->tm_done);
  984. fsp->wait_for_comp = 1;
  985. spin_unlock_bh(&fsp->scsi_pkt_lock);
  986. rc = wait_for_completion_timeout(&fsp->tm_done, FC_SCSI_TM_TOV);
  987. spin_lock_bh(&fsp->scsi_pkt_lock);
  988. fsp->wait_for_comp = 0;
  989. if (!rc) {
  990. FC_FCP_DBG(fsp, "target abort cmd failed\n");
  991. rc = FAILED;
  992. } else if (fsp->state & FC_SRB_ABORTED) {
  993. FC_FCP_DBG(fsp, "target abort cmd passed\n");
  994. rc = SUCCESS;
  995. fc_fcp_complete_locked(fsp);
  996. }
  997. return rc;
  998. }
  999. /*
  1000. * Retry LUN reset after resource allocation failed.
  1001. */
  1002. static void fc_lun_reset_send(unsigned long data)
  1003. {
  1004. struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data;
  1005. struct fc_lport *lp = fsp->lp;
  1006. if (lp->tt.fcp_cmd_send(lp, fsp, fc_tm_done)) {
  1007. if (fsp->recov_retry++ >= FC_MAX_RECOV_RETRY)
  1008. return;
  1009. if (fc_fcp_lock_pkt(fsp))
  1010. return;
  1011. setup_timer(&fsp->timer, fc_lun_reset_send, (unsigned long)fsp);
  1012. fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
  1013. fc_fcp_unlock_pkt(fsp);
  1014. }
  1015. }
  1016. /*
  1017. * Scsi device reset handler- send a LUN RESET to the device
  1018. * and wait for reset reply
  1019. */
  1020. static int fc_lun_reset(struct fc_lport *lp, struct fc_fcp_pkt *fsp,
  1021. unsigned int id, unsigned int lun)
  1022. {
  1023. int rc;
  1024. fsp->cdb_cmd.fc_dl = htonl(fsp->data_len);
  1025. fsp->cdb_cmd.fc_tm_flags = FCP_TMF_LUN_RESET;
  1026. int_to_scsilun(lun, (struct scsi_lun *)fsp->cdb_cmd.fc_lun);
  1027. fsp->wait_for_comp = 1;
  1028. init_completion(&fsp->tm_done);
  1029. fc_lun_reset_send((unsigned long)fsp);
  1030. /*
  1031. * wait for completion of reset
  1032. * after that make sure all commands are terminated
  1033. */
  1034. rc = wait_for_completion_timeout(&fsp->tm_done, FC_SCSI_TM_TOV);
  1035. spin_lock_bh(&fsp->scsi_pkt_lock);
  1036. fsp->state |= FC_SRB_COMPL;
  1037. spin_unlock_bh(&fsp->scsi_pkt_lock);
  1038. del_timer_sync(&fsp->timer);
  1039. spin_lock_bh(&fsp->scsi_pkt_lock);
  1040. if (fsp->seq_ptr) {
  1041. lp->tt.exch_done(fsp->seq_ptr);
  1042. fsp->seq_ptr = NULL;
  1043. }
  1044. fsp->wait_for_comp = 0;
  1045. spin_unlock_bh(&fsp->scsi_pkt_lock);
  1046. if (!rc) {
  1047. FC_SCSI_DBG(lp, "lun reset failed\n");
  1048. return FAILED;
  1049. }
  1050. /* cdb_status holds the tmf's rsp code */
  1051. if (fsp->cdb_status != FCP_TMF_CMPL)
  1052. return FAILED;
  1053. FC_SCSI_DBG(lp, "lun reset to lun %u completed\n", lun);
  1054. fc_fcp_cleanup_each_cmd(lp, id, lun, FC_CMD_ABORTED);
  1055. return SUCCESS;
  1056. }
  1057. /*
  1058. * Task Managment response handler
  1059. */
  1060. static void fc_tm_done(struct fc_seq *seq, struct fc_frame *fp, void *arg)
  1061. {
  1062. struct fc_fcp_pkt *fsp = arg;
  1063. struct fc_frame_header *fh;
  1064. if (IS_ERR(fp)) {
  1065. /*
  1066. * If there is an error just let it timeout or wait
  1067. * for TMF to be aborted if it timedout.
  1068. *
  1069. * scsi-eh will escalate for when either happens.
  1070. */
  1071. return;
  1072. }
  1073. if (fc_fcp_lock_pkt(fsp))
  1074. return;
  1075. /*
  1076. * raced with eh timeout handler.
  1077. */
  1078. if (!fsp->seq_ptr || !fsp->wait_for_comp) {
  1079. spin_unlock_bh(&fsp->scsi_pkt_lock);
  1080. return;
  1081. }
  1082. fh = fc_frame_header_get(fp);
  1083. if (fh->fh_type != FC_TYPE_BLS)
  1084. fc_fcp_resp(fsp, fp);
  1085. fsp->seq_ptr = NULL;
  1086. fsp->lp->tt.exch_done(seq);
  1087. fc_frame_free(fp);
  1088. fc_fcp_unlock_pkt(fsp);
  1089. }
  1090. static void fc_fcp_cleanup(struct fc_lport *lp)
  1091. {
  1092. fc_fcp_cleanup_each_cmd(lp, -1, -1, FC_ERROR);
  1093. }
  1094. /*
  1095. * fc_fcp_timeout: called by OS timer function.
  1096. *
  1097. * The timer has been inactivated and must be reactivated if desired
  1098. * using fc_fcp_timer_set().
  1099. *
  1100. * Algorithm:
  1101. *
  1102. * If REC is supported, just issue it, and return. The REC exchange will
  1103. * complete or time out, and recovery can continue at that point.
  1104. *
  1105. * Otherwise, if the response has been received without all the data,
  1106. * it has been ER_TIMEOUT since the response was received.
  1107. *
  1108. * If the response has not been received,
  1109. * we see if data was received recently. If it has been, we continue waiting,
  1110. * otherwise, we abort the command.
  1111. */
  1112. static void fc_fcp_timeout(unsigned long data)
  1113. {
  1114. struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data;
  1115. struct fc_rport *rport = fsp->rport;
  1116. struct fc_rport_libfc_priv *rp = rport->dd_data;
  1117. if (fc_fcp_lock_pkt(fsp))
  1118. return;
  1119. if (fsp->cdb_cmd.fc_tm_flags)
  1120. goto unlock;
  1121. fsp->state |= FC_SRB_FCP_PROCESSING_TMO;
  1122. if (rp->flags & FC_RP_FLAGS_REC_SUPPORTED)
  1123. fc_fcp_rec(fsp);
  1124. else if (time_after_eq(fsp->last_pkt_time + (FC_SCSI_ER_TIMEOUT / 2),
  1125. jiffies))
  1126. fc_fcp_timer_set(fsp, FC_SCSI_ER_TIMEOUT);
  1127. else if (fsp->state & FC_SRB_RCV_STATUS)
  1128. fc_fcp_complete_locked(fsp);
  1129. else
  1130. fc_timeout_error(fsp);
  1131. fsp->state &= ~FC_SRB_FCP_PROCESSING_TMO;
  1132. unlock:
  1133. fc_fcp_unlock_pkt(fsp);
  1134. }
  1135. /*
  1136. * Send a REC ELS request
  1137. */
  1138. static void fc_fcp_rec(struct fc_fcp_pkt *fsp)
  1139. {
  1140. struct fc_lport *lp;
  1141. struct fc_frame *fp;
  1142. struct fc_rport *rport;
  1143. struct fc_rport_libfc_priv *rp;
  1144. lp = fsp->lp;
  1145. rport = fsp->rport;
  1146. rp = rport->dd_data;
  1147. if (!fsp->seq_ptr || rp->rp_state != RPORT_ST_READY) {
  1148. fsp->status_code = FC_HRD_ERROR;
  1149. fsp->io_status = 0;
  1150. fc_fcp_complete_locked(fsp);
  1151. return;
  1152. }
  1153. fp = fc_frame_alloc(lp, sizeof(struct fc_els_rec));
  1154. if (!fp)
  1155. goto retry;
  1156. fr_seq(fp) = fsp->seq_ptr;
  1157. fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, rport->port_id,
  1158. fc_host_port_id(rp->local_port->host), FC_TYPE_ELS,
  1159. FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
  1160. if (lp->tt.elsct_send(lp, rport->port_id, fp, ELS_REC, fc_fcp_rec_resp,
  1161. fsp, jiffies_to_msecs(FC_SCSI_REC_TOV))) {
  1162. fc_fcp_pkt_hold(fsp); /* hold while REC outstanding */
  1163. return;
  1164. }
  1165. fc_frame_free(fp);
  1166. retry:
  1167. if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
  1168. fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
  1169. else
  1170. fc_timeout_error(fsp);
  1171. }
  1172. /*
  1173. * Receive handler for REC ELS frame
  1174. * if it is a reject then let the scsi layer to handle
  1175. * the timeout. if it is a LS_ACC then if the io was not completed
  1176. * then set the timeout and return otherwise complete the exchange
  1177. * and tell the scsi layer to restart the I/O.
  1178. */
  1179. static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
  1180. {
  1181. struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)arg;
  1182. struct fc_els_rec_acc *recp;
  1183. struct fc_els_ls_rjt *rjt;
  1184. u32 e_stat;
  1185. u8 opcode;
  1186. u32 offset;
  1187. enum dma_data_direction data_dir;
  1188. enum fc_rctl r_ctl;
  1189. struct fc_rport_libfc_priv *rp;
  1190. if (IS_ERR(fp)) {
  1191. fc_fcp_rec_error(fsp, fp);
  1192. return;
  1193. }
  1194. if (fc_fcp_lock_pkt(fsp))
  1195. goto out;
  1196. fsp->recov_retry = 0;
  1197. opcode = fc_frame_payload_op(fp);
  1198. if (opcode == ELS_LS_RJT) {
  1199. rjt = fc_frame_payload_get(fp, sizeof(*rjt));
  1200. switch (rjt->er_reason) {
  1201. default:
  1202. FC_FCP_DBG(fsp, "device %x unexpected REC reject "
  1203. "reason %d expl %d\n",
  1204. fsp->rport->port_id, rjt->er_reason,
  1205. rjt->er_explan);
  1206. /* fall through */
  1207. case ELS_RJT_UNSUP:
  1208. FC_FCP_DBG(fsp, "device does not support REC\n");
  1209. rp = fsp->rport->dd_data;
  1210. /*
  1211. * if we do not spport RECs or got some bogus
  1212. * reason then resetup timer so we check for
  1213. * making progress.
  1214. */
  1215. rp->flags &= ~FC_RP_FLAGS_REC_SUPPORTED;
  1216. fc_fcp_timer_set(fsp, FC_SCSI_ER_TIMEOUT);
  1217. break;
  1218. case ELS_RJT_LOGIC:
  1219. case ELS_RJT_UNAB:
  1220. /*
  1221. * If no data transfer, the command frame got dropped
  1222. * so we just retry. If data was transferred, we
  1223. * lost the response but the target has no record,
  1224. * so we abort and retry.
  1225. */
  1226. if (rjt->er_explan == ELS_EXPL_OXID_RXID &&
  1227. fsp->xfer_len == 0) {
  1228. fc_fcp_retry_cmd(fsp);
  1229. break;
  1230. }
  1231. fc_timeout_error(fsp);
  1232. break;
  1233. }
  1234. } else if (opcode == ELS_LS_ACC) {
  1235. if (fsp->state & FC_SRB_ABORTED)
  1236. goto unlock_out;
  1237. data_dir = fsp->cmd->sc_data_direction;
  1238. recp = fc_frame_payload_get(fp, sizeof(*recp));
  1239. offset = ntohl(recp->reca_fc4value);
  1240. e_stat = ntohl(recp->reca_e_stat);
  1241. if (e_stat & ESB_ST_COMPLETE) {
  1242. /*
  1243. * The exchange is complete.
  1244. *
  1245. * For output, we must've lost the response.
  1246. * For input, all data must've been sent.
  1247. * We lost may have lost the response
  1248. * (and a confirmation was requested) and maybe
  1249. * some data.
  1250. *
  1251. * If all data received, send SRR
  1252. * asking for response. If partial data received,
  1253. * or gaps, SRR requests data at start of gap.
  1254. * Recovery via SRR relies on in-order-delivery.
  1255. */
  1256. if (data_dir == DMA_TO_DEVICE) {
  1257. r_ctl = FC_RCTL_DD_CMD_STATUS;
  1258. } else if (fsp->xfer_contig_end == offset) {
  1259. r_ctl = FC_RCTL_DD_CMD_STATUS;
  1260. } else {
  1261. offset = fsp->xfer_contig_end;
  1262. r_ctl = FC_RCTL_DD_SOL_DATA;
  1263. }
  1264. fc_fcp_srr(fsp, r_ctl, offset);
  1265. } else if (e_stat & ESB_ST_SEQ_INIT) {
  1266. /*
  1267. * The remote port has the initiative, so just
  1268. * keep waiting for it to complete.
  1269. */
  1270. fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
  1271. } else {
  1272. /*
  1273. * The exchange is incomplete, we have seq. initiative.
  1274. * Lost response with requested confirmation,
  1275. * lost confirmation, lost transfer ready or
  1276. * lost write data.
  1277. *
  1278. * For output, if not all data was received, ask
  1279. * for transfer ready to be repeated.
  1280. *
  1281. * If we received or sent all the data, send SRR to
  1282. * request response.
  1283. *
  1284. * If we lost a response, we may have lost some read
  1285. * data as well.
  1286. */
  1287. r_ctl = FC_RCTL_DD_SOL_DATA;
  1288. if (data_dir == DMA_TO_DEVICE) {
  1289. r_ctl = FC_RCTL_DD_CMD_STATUS;
  1290. if (offset < fsp->data_len)
  1291. r_ctl = FC_RCTL_DD_DATA_DESC;
  1292. } else if (offset == fsp->xfer_contig_end) {
  1293. r_ctl = FC_RCTL_DD_CMD_STATUS;
  1294. } else if (fsp->xfer_contig_end < offset) {
  1295. offset = fsp->xfer_contig_end;
  1296. }
  1297. fc_fcp_srr(fsp, r_ctl, offset);
  1298. }
  1299. }
  1300. unlock_out:
  1301. fc_fcp_unlock_pkt(fsp);
  1302. out:
  1303. fc_fcp_pkt_release(fsp); /* drop hold for outstanding REC */
  1304. fc_frame_free(fp);
  1305. }
  1306. /*
  1307. * Handle error response or timeout for REC exchange.
  1308. */
  1309. static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
  1310. {
  1311. int error = PTR_ERR(fp);
  1312. if (fc_fcp_lock_pkt(fsp))
  1313. goto out;
  1314. switch (error) {
  1315. case -FC_EX_CLOSED:
  1316. fc_fcp_retry_cmd(fsp);
  1317. break;
  1318. default:
  1319. FC_FCP_DBG(fsp, "REC %p fid %x error unexpected error %d\n",
  1320. fsp, fsp->rport->port_id, error);
  1321. fsp->status_code = FC_CMD_PLOGO;
  1322. /* fall through */
  1323. case -FC_EX_TIMEOUT:
  1324. /*
  1325. * Assume REC or LS_ACC was lost.
  1326. * The exchange manager will have aborted REC, so retry.
  1327. */
  1328. FC_FCP_DBG(fsp, "REC fid %x error error %d retry %d/%d\n",
  1329. fsp->rport->port_id, error, fsp->recov_retry,
  1330. FC_MAX_RECOV_RETRY);
  1331. if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
  1332. fc_fcp_rec(fsp);
  1333. else
  1334. fc_timeout_error(fsp);
  1335. break;
  1336. }
  1337. fc_fcp_unlock_pkt(fsp);
  1338. out:
  1339. fc_fcp_pkt_release(fsp); /* drop hold for outstanding REC */
  1340. }
  1341. /*
  1342. * Time out error routine:
  1343. * abort's the I/O close the exchange and
  1344. * send completion notification to scsi layer
  1345. */
  1346. static void fc_timeout_error(struct fc_fcp_pkt *fsp)
  1347. {
  1348. fsp->status_code = FC_CMD_TIME_OUT;
  1349. fsp->cdb_status = 0;
  1350. fsp->io_status = 0;
  1351. /*
  1352. * if this fails then we let the scsi command timer fire and
  1353. * scsi-ml escalate.
  1354. */
  1355. fc_fcp_send_abort(fsp);
  1356. }
  1357. /*
  1358. * Sequence retransmission request.
  1359. * This is called after receiving status but insufficient data, or
  1360. * when expecting status but the request has timed out.
  1361. */
  1362. static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
  1363. {
  1364. struct fc_lport *lp = fsp->lp;
  1365. struct fc_rport *rport;
  1366. struct fc_rport_libfc_priv *rp;
  1367. struct fc_exch *ep = fc_seq_exch(fsp->seq_ptr);
  1368. struct fc_seq *seq;
  1369. struct fcp_srr *srr;
  1370. struct fc_frame *fp;
  1371. u8 cdb_op;
  1372. rport = fsp->rport;
  1373. rp = rport->dd_data;
  1374. cdb_op = fsp->cdb_cmd.fc_cdb[0];
  1375. if (!(rp->flags & FC_RP_FLAGS_RETRY) || rp->rp_state != RPORT_ST_READY)
  1376. goto retry; /* shouldn't happen */
  1377. fp = fc_frame_alloc(lp, sizeof(*srr));
  1378. if (!fp)
  1379. goto retry;
  1380. srr = fc_frame_payload_get(fp, sizeof(*srr));
  1381. memset(srr, 0, sizeof(*srr));
  1382. srr->srr_op = ELS_SRR;
  1383. srr->srr_ox_id = htons(ep->oxid);
  1384. srr->srr_rx_id = htons(ep->rxid);
  1385. srr->srr_r_ctl = r_ctl;
  1386. srr->srr_rel_off = htonl(offset);
  1387. fc_fill_fc_hdr(fp, FC_RCTL_ELS4_REQ, rport->port_id,
  1388. fc_host_port_id(rp->local_port->host), FC_TYPE_FCP,
  1389. FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
  1390. seq = lp->tt.exch_seq_send(lp, fp, fc_fcp_srr_resp, NULL,
  1391. fsp, jiffies_to_msecs(FC_SCSI_REC_TOV));
  1392. if (!seq) {
  1393. fc_frame_free(fp);
  1394. goto retry;
  1395. }
  1396. fsp->recov_seq = seq;
  1397. fsp->xfer_len = offset;
  1398. fsp->xfer_contig_end = offset;
  1399. fsp->state &= ~FC_SRB_RCV_STATUS;
  1400. fc_fcp_pkt_hold(fsp); /* hold for outstanding SRR */
  1401. return;
  1402. retry:
  1403. fc_fcp_retry_cmd(fsp);
  1404. }
  1405. /*
  1406. * Handle response from SRR.
  1407. */
  1408. static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
  1409. {
  1410. struct fc_fcp_pkt *fsp = arg;
  1411. struct fc_frame_header *fh;
  1412. if (IS_ERR(fp)) {
  1413. fc_fcp_srr_error(fsp, fp);
  1414. return;
  1415. }
  1416. if (fc_fcp_lock_pkt(fsp))
  1417. goto out;
  1418. fh = fc_frame_header_get(fp);
  1419. /*
  1420. * BUG? fc_fcp_srr_error calls exch_done which would release
  1421. * the ep. But if fc_fcp_srr_error had got -FC_EX_TIMEOUT,
  1422. * then fc_exch_timeout would be sending an abort. The exch_done
  1423. * call by fc_fcp_srr_error would prevent fc_exch.c from seeing
  1424. * an abort response though.
  1425. */
  1426. if (fh->fh_type == FC_TYPE_BLS) {
  1427. fc_fcp_unlock_pkt(fsp);
  1428. return;
  1429. }
  1430. fsp->recov_seq = NULL;
  1431. switch (fc_frame_payload_op(fp)) {
  1432. case ELS_LS_ACC:
  1433. fsp->recov_retry = 0;
  1434. fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
  1435. break;
  1436. case ELS_LS_RJT:
  1437. default:
  1438. fc_timeout_error(fsp);
  1439. break;
  1440. }
  1441. fc_fcp_unlock_pkt(fsp);
  1442. fsp->lp->tt.exch_done(seq);
  1443. out:
  1444. fc_frame_free(fp);
  1445. fc_fcp_pkt_release(fsp); /* drop hold for outstanding SRR */
  1446. }
  1447. static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
  1448. {
  1449. if (fc_fcp_lock_pkt(fsp))
  1450. goto out;
  1451. fsp->lp->tt.exch_done(fsp->recov_seq);
  1452. fsp->recov_seq = NULL;
  1453. switch (PTR_ERR(fp)) {
  1454. case -FC_EX_TIMEOUT:
  1455. if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
  1456. fc_fcp_rec(fsp);
  1457. else
  1458. fc_timeout_error(fsp);
  1459. break;
  1460. case -FC_EX_CLOSED: /* e.g., link failure */
  1461. /* fall through */
  1462. default:
  1463. fc_fcp_retry_cmd(fsp);
  1464. break;
  1465. }
  1466. fc_fcp_unlock_pkt(fsp);
  1467. out:
  1468. fc_fcp_pkt_release(fsp); /* drop hold for outstanding SRR */
  1469. }
  1470. static inline int fc_fcp_lport_queue_ready(struct fc_lport *lp)
  1471. {
  1472. /* lock ? */
  1473. return (lp->state == LPORT_ST_READY) && lp->link_up && !lp->qfull;
  1474. }
  1475. /**
  1476. * fc_queuecommand - The queuecommand function of the scsi template
  1477. * @cmd: struct scsi_cmnd to be executed
  1478. * @done: Callback function to be called when cmd is completed
  1479. *
  1480. * this is the i/o strategy routine, called by the scsi layer
  1481. * this routine is called with holding the host_lock.
  1482. */
  1483. int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
  1484. {
  1485. struct fc_lport *lp;
  1486. struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
  1487. struct fc_fcp_pkt *fsp;
  1488. struct fc_rport_libfc_priv *rp;
  1489. int rval;
  1490. int rc = 0;
  1491. struct fcoe_dev_stats *stats;
  1492. lp = shost_priv(sc_cmd->device->host);
  1493. rval = fc_remote_port_chkready(rport);
  1494. if (rval) {
  1495. sc_cmd->result = rval;
  1496. done(sc_cmd);
  1497. goto out;
  1498. }
  1499. if (!*(struct fc_remote_port **)rport->dd_data) {
  1500. /*
  1501. * rport is transitioning from blocked/deleted to
  1502. * online
  1503. */
  1504. sc_cmd->result = DID_IMM_RETRY << 16;
  1505. done(sc_cmd);
  1506. goto out;
  1507. }
  1508. rp = rport->dd_data;
  1509. if (!fc_fcp_lport_queue_ready(lp)) {
  1510. rc = SCSI_MLQUEUE_HOST_BUSY;
  1511. goto out;
  1512. }
  1513. fsp = fc_fcp_pkt_alloc(lp, GFP_ATOMIC);
  1514. if (fsp == NULL) {
  1515. rc = SCSI_MLQUEUE_HOST_BUSY;
  1516. goto out;
  1517. }
  1518. /*
  1519. * build the libfc request pkt
  1520. */
  1521. fsp->cmd = sc_cmd; /* save the cmd */
  1522. fsp->lp = lp; /* save the softc ptr */
  1523. fsp->rport = rport; /* set the remote port ptr */
  1524. fsp->xfer_ddp = FC_XID_UNKNOWN;
  1525. sc_cmd->scsi_done = done;
  1526. /*
  1527. * set up the transfer length
  1528. */
  1529. fsp->data_len = scsi_bufflen(sc_cmd);
  1530. fsp->xfer_len = 0;
  1531. /*
  1532. * setup the data direction
  1533. */
  1534. stats = fc_lport_get_stats(lp);
  1535. if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
  1536. fsp->req_flags = FC_SRB_READ;
  1537. stats->InputRequests++;
  1538. stats->InputMegabytes = fsp->data_len;
  1539. } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
  1540. fsp->req_flags = FC_SRB_WRITE;
  1541. stats->OutputRequests++;
  1542. stats->OutputMegabytes = fsp->data_len;
  1543. } else {
  1544. fsp->req_flags = 0;
  1545. stats->ControlRequests++;
  1546. }
  1547. fsp->tgt_flags = rp->flags;
  1548. init_timer(&fsp->timer);
  1549. fsp->timer.data = (unsigned long)fsp;
  1550. /*
  1551. * send it to the lower layer
  1552. * if we get -1 return then put the request in the pending
  1553. * queue.
  1554. */
  1555. rval = fc_fcp_pkt_send(lp, fsp);
  1556. if (rval != 0) {
  1557. fsp->state = FC_SRB_FREE;
  1558. fc_fcp_pkt_release(fsp);
  1559. rc = SCSI_MLQUEUE_HOST_BUSY;
  1560. }
  1561. out:
  1562. return rc;
  1563. }
  1564. EXPORT_SYMBOL(fc_queuecommand);
  1565. /**
  1566. * fc_io_compl() - Handle responses for completed commands
  1567. * @fsp: scsi packet
  1568. *
  1569. * Translates a error to a Linux SCSI error.
  1570. *
  1571. * The fcp packet lock must be held when calling.
  1572. */
  1573. static void fc_io_compl(struct fc_fcp_pkt *fsp)
  1574. {
  1575. struct fc_fcp_internal *si;
  1576. struct scsi_cmnd *sc_cmd;
  1577. struct fc_lport *lp;
  1578. unsigned long flags;
  1579. /* release outstanding ddp context */
  1580. fc_fcp_ddp_done(fsp);
  1581. fsp->state |= FC_SRB_COMPL;
  1582. if (!(fsp->state & FC_SRB_FCP_PROCESSING_TMO)) {
  1583. spin_unlock_bh(&fsp->scsi_pkt_lock);
  1584. del_timer_sync(&fsp->timer);
  1585. spin_lock_bh(&fsp->scsi_pkt_lock);
  1586. }
  1587. lp = fsp->lp;
  1588. si = fc_get_scsi_internal(lp);
  1589. spin_lock_irqsave(lp->host->host_lock, flags);
  1590. if (!fsp->cmd) {
  1591. spin_unlock_irqrestore(lp->host->host_lock, flags);
  1592. return;
  1593. }
  1594. /*
  1595. * if a command timed out while we had to try and throttle IO
  1596. * and it is now getting cleaned up, then we are about to
  1597. * try again so clear the throttled flag incase we get more
  1598. * time outs.
  1599. */
  1600. if (si->throttled && fsp->state & FC_SRB_NOMEM)
  1601. si->throttled = 0;
  1602. sc_cmd = fsp->cmd;
  1603. fsp->cmd = NULL;
  1604. if (!sc_cmd->SCp.ptr) {
  1605. spin_unlock_irqrestore(lp->host->host_lock, flags);
  1606. return;
  1607. }
  1608. CMD_SCSI_STATUS(sc_cmd) = fsp->cdb_status;
  1609. switch (fsp->status_code) {
  1610. case FC_COMPLETE:
  1611. if (fsp->cdb_status == 0) {
  1612. /*
  1613. * good I/O status
  1614. */
  1615. sc_cmd->result = DID_OK << 16;
  1616. if (fsp->scsi_resid)
  1617. CMD_RESID_LEN(sc_cmd) = fsp->scsi_resid;
  1618. } else if (fsp->cdb_status == QUEUE_FULL) {
  1619. struct scsi_device *tmp_sdev;
  1620. struct scsi_device *sdev = sc_cmd->device;
  1621. shost_for_each_device(tmp_sdev, sdev->host) {
  1622. if (tmp_sdev->id != sdev->id)
  1623. continue;
  1624. if (tmp_sdev->queue_depth > 1) {
  1625. scsi_track_queue_full(tmp_sdev,
  1626. tmp_sdev->
  1627. queue_depth - 1);
  1628. }
  1629. }
  1630. sc_cmd->result = (DID_OK << 16) | fsp->cdb_status;
  1631. } else {
  1632. /*
  1633. * transport level I/O was ok but scsi
  1634. * has non zero status
  1635. */
  1636. sc_cmd->result = (DID_OK << 16) | fsp->cdb_status;
  1637. }
  1638. break;
  1639. case FC_ERROR:
  1640. sc_cmd->result = DID_ERROR << 16;
  1641. break;
  1642. case FC_DATA_UNDRUN:
  1643. if ((fsp->cdb_status == 0) && !(fsp->req_flags & FC_SRB_READ)) {
  1644. /*
  1645. * scsi status is good but transport level
  1646. * underrun.
  1647. */
  1648. sc_cmd->result = DID_OK << 16;
  1649. } else {
  1650. /*
  1651. * scsi got underrun, this is an error
  1652. */
  1653. CMD_RESID_LEN(sc_cmd) = fsp->scsi_resid;
  1654. sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status;
  1655. }
  1656. break;
  1657. case FC_DATA_OVRRUN:
  1658. /*
  1659. * overrun is an error
  1660. */
  1661. sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status;
  1662. break;
  1663. case FC_CMD_ABORTED:
  1664. sc_cmd->result = (DID_ERROR << 16) | fsp->io_status;
  1665. break;
  1666. case FC_CMD_TIME_OUT:
  1667. sc_cmd->result = (DID_BUS_BUSY << 16) | fsp->io_status;
  1668. break;
  1669. case FC_CMD_RESET:
  1670. sc_cmd->result = (DID_RESET << 16);
  1671. break;
  1672. case FC_HRD_ERROR:
  1673. sc_cmd->result = (DID_NO_CONNECT << 16);
  1674. break;
  1675. default:
  1676. sc_cmd->result = (DID_ERROR << 16);
  1677. break;
  1678. }
  1679. list_del(&fsp->list);
  1680. sc_cmd->SCp.ptr = NULL;
  1681. sc_cmd->scsi_done(sc_cmd);
  1682. spin_unlock_irqrestore(lp->host->host_lock, flags);
  1683. /* release ref from initial allocation in queue command */
  1684. fc_fcp_pkt_release(fsp);
  1685. }
  1686. /**
  1687. * fc_fcp_complete() - complete processing of a fcp packet
  1688. * @fsp: fcp packet
  1689. *
  1690. * This function may sleep if a fsp timer is pending.
  1691. * The host lock must not be held by caller.
  1692. */
  1693. void fc_fcp_complete(struct fc_fcp_pkt *fsp)
  1694. {
  1695. if (fc_fcp_lock_pkt(fsp))
  1696. return;
  1697. fc_fcp_complete_locked(fsp);
  1698. fc_fcp_unlock_pkt(fsp);
  1699. }
  1700. EXPORT_SYMBOL(fc_fcp_complete);
  1701. /**
  1702. * fc_eh_abort() - Abort a command
  1703. * @sc_cmd: scsi command to abort
  1704. *
  1705. * From scsi host template.
  1706. * send ABTS to the target device and wait for the response
  1707. * sc_cmd is the pointer to the command to be aborted.
  1708. */
  1709. int fc_eh_abort(struct scsi_cmnd *sc_cmd)
  1710. {
  1711. struct fc_fcp_pkt *fsp;
  1712. struct fc_lport *lp;
  1713. int rc = FAILED;
  1714. unsigned long flags;
  1715. lp = shost_priv(sc_cmd->device->host);
  1716. if (lp->state != LPORT_ST_READY)
  1717. return rc;
  1718. else if (!lp->link_up)
  1719. return rc;
  1720. spin_lock_irqsave(lp->host->host_lock, flags);
  1721. fsp = CMD_SP(sc_cmd);
  1722. if (!fsp) {
  1723. /* command completed while scsi eh was setting up */
  1724. spin_unlock_irqrestore(lp->host->host_lock, flags);
  1725. return SUCCESS;
  1726. }
  1727. /* grab a ref so the fsp and sc_cmd cannot be relased from under us */
  1728. fc_fcp_pkt_hold(fsp);
  1729. spin_unlock_irqrestore(lp->host->host_lock, flags);
  1730. if (fc_fcp_lock_pkt(fsp)) {
  1731. /* completed while we were waiting for timer to be deleted */
  1732. rc = SUCCESS;
  1733. goto release_pkt;
  1734. }
  1735. rc = fc_fcp_pkt_abort(fsp);
  1736. fc_fcp_unlock_pkt(fsp);
  1737. release_pkt:
  1738. fc_fcp_pkt_release(fsp);
  1739. return rc;
  1740. }
  1741. EXPORT_SYMBOL(fc_eh_abort);
  1742. /**
  1743. * fc_eh_device_reset() Reset a single LUN
  1744. * @sc_cmd: scsi command
  1745. *
  1746. * Set from scsi host template to send tm cmd to the target and wait for the
  1747. * response.
  1748. */
  1749. int fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
  1750. {
  1751. struct fc_lport *lp;
  1752. struct fc_fcp_pkt *fsp;
  1753. struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
  1754. int rc = FAILED;
  1755. struct fc_rport_libfc_priv *rp;
  1756. int rval;
  1757. rval = fc_remote_port_chkready(rport);
  1758. if (rval)
  1759. goto out;
  1760. rp = rport->dd_data;
  1761. lp = shost_priv(sc_cmd->device->host);
  1762. if (lp->state != LPORT_ST_READY)
  1763. return rc;
  1764. FC_SCSI_DBG(lp, "Resetting rport (%6x)\n", rport->port_id);
  1765. fsp = fc_fcp_pkt_alloc(lp, GFP_NOIO);
  1766. if (fsp == NULL) {
  1767. printk(KERN_WARNING "libfc: could not allocate scsi_pkt\n");
  1768. sc_cmd->result = DID_NO_CONNECT << 16;
  1769. goto out;
  1770. }
  1771. /*
  1772. * Build the libfc request pkt. Do not set the scsi cmnd, because
  1773. * the sc passed in is not setup for execution like when sent
  1774. * through the queuecommand callout.
  1775. */
  1776. fsp->lp = lp; /* save the softc ptr */
  1777. fsp->rport = rport; /* set the remote port ptr */
  1778. /*
  1779. * flush outstanding commands
  1780. */
  1781. rc = fc_lun_reset(lp, fsp, scmd_id(sc_cmd), sc_cmd->device->lun);
  1782. fsp->state = FC_SRB_FREE;
  1783. fc_fcp_pkt_release(fsp);
  1784. out:
  1785. return rc;
  1786. }
  1787. EXPORT_SYMBOL(fc_eh_device_reset);
  1788. /**
  1789. * fc_eh_host_reset() - The reset function will reset the ports on the host.
  1790. * @sc_cmd: scsi command
  1791. */
  1792. int fc_eh_host_reset(struct scsi_cmnd *sc_cmd)
  1793. {
  1794. struct Scsi_Host *shost = sc_cmd->device->host;
  1795. struct fc_lport *lp = shost_priv(shost);
  1796. unsigned long wait_tmo;
  1797. FC_SCSI_DBG(lp, "Resetting host\n");
  1798. lp->tt.lport_reset(lp);
  1799. wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT;
  1800. while (!fc_fcp_lport_queue_ready(lp) && time_before(jiffies, wait_tmo))
  1801. msleep(1000);
  1802. if (fc_fcp_lport_queue_ready(lp)) {
  1803. shost_printk(KERN_INFO, shost, "libfc: Host reset succeeded "
  1804. "on port (%6x)\n", fc_host_port_id(lp->host));
  1805. return SUCCESS;
  1806. } else {
  1807. shost_printk(KERN_INFO, shost, "libfc: Host reset failed, "
  1808. "port (%6x) is not ready.\n",
  1809. fc_host_port_id(lp->host));
  1810. return FAILED;
  1811. }
  1812. }
  1813. EXPORT_SYMBOL(fc_eh_host_reset);
  1814. /**
  1815. * fc_slave_alloc() - configure queue depth
  1816. * @sdev: scsi device
  1817. *
  1818. * Configures queue depth based on host's cmd_per_len. If not set
  1819. * then we use the libfc default.
  1820. */
  1821. int fc_slave_alloc(struct scsi_device *sdev)
  1822. {
  1823. struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
  1824. int queue_depth;
  1825. if (!rport || fc_remote_port_chkready(rport))
  1826. return -ENXIO;
  1827. if (sdev->tagged_supported) {
  1828. if (sdev->host->hostt->cmd_per_lun)
  1829. queue_depth = sdev->host->hostt->cmd_per_lun;
  1830. else
  1831. queue_depth = FC_FCP_DFLT_QUEUE_DEPTH;
  1832. scsi_activate_tcq(sdev, queue_depth);
  1833. }
  1834. return 0;
  1835. }
  1836. EXPORT_SYMBOL(fc_slave_alloc);
  1837. int fc_change_queue_depth(struct scsi_device *sdev, int qdepth)
  1838. {
  1839. scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
  1840. return sdev->queue_depth;
  1841. }
  1842. EXPORT_SYMBOL(fc_change_queue_depth);
  1843. int fc_change_queue_type(struct scsi_device *sdev, int tag_type)
  1844. {
  1845. if (sdev->tagged_supported) {
  1846. scsi_set_tag_type(sdev, tag_type);
  1847. if (tag_type)
  1848. scsi_activate_tcq(sdev, sdev->queue_depth);
  1849. else
  1850. scsi_deactivate_tcq(sdev, sdev->queue_depth);
  1851. } else
  1852. tag_type = 0;
  1853. return tag_type;
  1854. }
  1855. EXPORT_SYMBOL(fc_change_queue_type);
  1856. void fc_fcp_destroy(struct fc_lport *lp)
  1857. {
  1858. struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
  1859. if (!list_empty(&si->scsi_pkt_queue))
  1860. printk(KERN_ERR "libfc: Leaked SCSI packets when destroying "
  1861. "port (%6x)\n", fc_host_port_id(lp->host));
  1862. mempool_destroy(si->scsi_pkt_pool);
  1863. kfree(si);
  1864. lp->scsi_priv = NULL;
  1865. }
  1866. EXPORT_SYMBOL(fc_fcp_destroy);
  1867. int fc_fcp_init(struct fc_lport *lp)
  1868. {
  1869. int rc;
  1870. struct fc_fcp_internal *si;
  1871. if (!lp->tt.fcp_cmd_send)
  1872. lp->tt.fcp_cmd_send = fc_fcp_cmd_send;
  1873. if (!lp->tt.fcp_cleanup)
  1874. lp->tt.fcp_cleanup = fc_fcp_cleanup;
  1875. if (!lp->tt.fcp_abort_io)
  1876. lp->tt.fcp_abort_io = fc_fcp_abort_io;
  1877. si = kzalloc(sizeof(struct fc_fcp_internal), GFP_KERNEL);
  1878. if (!si)
  1879. return -ENOMEM;
  1880. lp->scsi_priv = si;
  1881. INIT_LIST_HEAD(&si->scsi_pkt_queue);
  1882. si->scsi_pkt_pool = mempool_create_slab_pool(2, scsi_pkt_cachep);
  1883. if (!si->scsi_pkt_pool) {
  1884. rc = -ENOMEM;
  1885. goto free_internal;
  1886. }
  1887. return 0;
  1888. free_internal:
  1889. kfree(si);
  1890. return rc;
  1891. }
  1892. EXPORT_SYMBOL(fc_fcp_init);
  1893. static int __init libfc_init(void)
  1894. {
  1895. int rc;
  1896. scsi_pkt_cachep = kmem_cache_create("libfc_fcp_pkt",
  1897. sizeof(struct fc_fcp_pkt),
  1898. 0, SLAB_HWCACHE_ALIGN, NULL);
  1899. if (scsi_pkt_cachep == NULL) {
  1900. printk(KERN_ERR "libfc: Unable to allocate SRB cache, "
  1901. "module load failed!");
  1902. return -ENOMEM;
  1903. }
  1904. rc = fc_setup_exch_mgr();
  1905. if (rc)
  1906. goto destroy_pkt_cache;
  1907. rc = fc_setup_rport();
  1908. if (rc)
  1909. goto destroy_em;
  1910. return rc;
  1911. destroy_em:
  1912. fc_destroy_exch_mgr();
  1913. destroy_pkt_cache:
  1914. kmem_cache_destroy(scsi_pkt_cachep);
  1915. return rc;
  1916. }
  1917. static void __exit libfc_exit(void)
  1918. {
  1919. kmem_cache_destroy(scsi_pkt_cachep);
  1920. fc_destroy_exch_mgr();
  1921. fc_destroy_rport();
  1922. }
  1923. module_init(libfc_init);
  1924. module_exit(libfc_exit);