fc_fcp.c 54 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171
  1. /*
  2. * Copyright(c) 2007 Intel Corporation. All rights reserved.
  3. * Copyright(c) 2008 Red Hat, Inc. All rights reserved.
  4. * Copyright(c) 2008 Mike Christie
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms and conditions of the GNU General Public License,
  8. * version 2, as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. * You should have received a copy of the GNU General Public License along with
  16. * this program; if not, write to the Free Software Foundation, Inc.,
  17. * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  18. *
  19. * Maintained at www.Open-FCoE.org
  20. */
  21. #include <linux/module.h>
  22. #include <linux/delay.h>
  23. #include <linux/kernel.h>
  24. #include <linux/types.h>
  25. #include <linux/spinlock.h>
  26. #include <linux/scatterlist.h>
  27. #include <linux/err.h>
  28. #include <linux/crc32.h>
  29. #include <scsi/scsi_tcq.h>
  30. #include <scsi/scsi.h>
  31. #include <scsi/scsi_host.h>
  32. #include <scsi/scsi_device.h>
  33. #include <scsi/scsi_cmnd.h>
  34. #include <scsi/fc/fc_fc2.h>
  35. #include <scsi/libfc.h>
  36. #include <scsi/fc_encode.h>
  37. MODULE_AUTHOR("Open-FCoE.org");
  38. MODULE_DESCRIPTION("libfc");
  39. MODULE_LICENSE("GPL v2");
  40. unsigned int fc_debug_logging;
  41. module_param_named(debug_logging, fc_debug_logging, int, S_IRUGO|S_IWUSR);
  42. MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
  43. static struct kmem_cache *scsi_pkt_cachep;
  44. /* SRB state definitions */
  45. #define FC_SRB_FREE 0 /* cmd is free */
  46. #define FC_SRB_CMD_SENT (1 << 0) /* cmd has been sent */
  47. #define FC_SRB_RCV_STATUS (1 << 1) /* response has arrived */
  48. #define FC_SRB_ABORT_PENDING (1 << 2) /* cmd abort sent to device */
  49. #define FC_SRB_ABORTED (1 << 3) /* abort acknowleged */
  50. #define FC_SRB_DISCONTIG (1 << 4) /* non-sequential data recvd */
  51. #define FC_SRB_COMPL (1 << 5) /* fc_io_compl has been run */
  52. #define FC_SRB_FCP_PROCESSING_TMO (1 << 6) /* timer function processing */
  53. #define FC_SRB_NOMEM (1 << 7) /* dropped to out of mem */
  54. #define FC_SRB_READ (1 << 1)
  55. #define FC_SRB_WRITE (1 << 0)
  56. /*
  57. * The SCp.ptr should be tested and set under the host lock. NULL indicates
  58. * that the command has been retruned to the scsi layer.
  59. */
  60. #define CMD_SP(Cmnd) ((struct fc_fcp_pkt *)(Cmnd)->SCp.ptr)
  61. #define CMD_ENTRY_STATUS(Cmnd) ((Cmnd)->SCp.have_data_in)
  62. #define CMD_COMPL_STATUS(Cmnd) ((Cmnd)->SCp.this_residual)
  63. #define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status)
  64. #define CMD_RESID_LEN(Cmnd) ((Cmnd)->SCp.buffers_residual)
  65. struct fc_fcp_internal {
  66. mempool_t *scsi_pkt_pool;
  67. struct list_head scsi_pkt_queue;
  68. u8 throttled;
  69. };
  70. #define fc_get_scsi_internal(x) ((struct fc_fcp_internal *)(x)->scsi_priv)
  71. /*
  72. * function prototypes
  73. * FC scsi I/O related functions
  74. */
  75. static void fc_fcp_recv_data(struct fc_fcp_pkt *, struct fc_frame *);
  76. static void fc_fcp_recv(struct fc_seq *, struct fc_frame *, void *);
  77. static void fc_fcp_resp(struct fc_fcp_pkt *, struct fc_frame *);
  78. static void fc_fcp_complete_locked(struct fc_fcp_pkt *);
  79. static void fc_tm_done(struct fc_seq *, struct fc_frame *, void *);
  80. static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp);
  81. static void fc_timeout_error(struct fc_fcp_pkt *);
  82. static void fc_fcp_timeout(unsigned long data);
  83. static void fc_fcp_rec(struct fc_fcp_pkt *);
  84. static void fc_fcp_rec_error(struct fc_fcp_pkt *, struct fc_frame *);
  85. static void fc_fcp_rec_resp(struct fc_seq *, struct fc_frame *, void *);
  86. static void fc_io_compl(struct fc_fcp_pkt *);
  87. static void fc_fcp_srr(struct fc_fcp_pkt *, enum fc_rctl, u32);
  88. static void fc_fcp_srr_resp(struct fc_seq *, struct fc_frame *, void *);
  89. static void fc_fcp_srr_error(struct fc_fcp_pkt *, struct fc_frame *);
  90. /*
  91. * command status codes
  92. */
  93. #define FC_COMPLETE 0
  94. #define FC_CMD_ABORTED 1
  95. #define FC_CMD_RESET 2
  96. #define FC_CMD_PLOGO 3
  97. #define FC_SNS_RCV 4
  98. #define FC_TRANS_ERR 5
  99. #define FC_DATA_OVRRUN 6
  100. #define FC_DATA_UNDRUN 7
  101. #define FC_ERROR 8
  102. #define FC_HRD_ERROR 9
  103. #define FC_CMD_TIME_OUT 10
  104. /*
  105. * Error recovery timeout values.
  106. */
  107. #define FC_SCSI_ER_TIMEOUT (10 * HZ)
  108. #define FC_SCSI_TM_TOV (10 * HZ)
  109. #define FC_SCSI_REC_TOV (2 * HZ)
  110. #define FC_HOST_RESET_TIMEOUT (30 * HZ)
  111. #define FC_MAX_ERROR_CNT 5
  112. #define FC_MAX_RECOV_RETRY 3
  113. #define FC_FCP_DFLT_QUEUE_DEPTH 32
  114. /**
  115. * fc_fcp_pkt_alloc - allocation routine for scsi_pkt packet
  116. * @lp: fc lport struct
  117. * @gfp: gfp flags for allocation
  118. *
  119. * This is used by upper layer scsi driver.
  120. * Return Value : scsi_pkt structure or null on allocation failure.
  121. * Context : call from process context. no locking required.
  122. */
  123. static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lp, gfp_t gfp)
  124. {
  125. struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
  126. struct fc_fcp_pkt *fsp;
  127. fsp = mempool_alloc(si->scsi_pkt_pool, gfp);
  128. if (fsp) {
  129. memset(fsp, 0, sizeof(*fsp));
  130. fsp->lp = lp;
  131. atomic_set(&fsp->ref_cnt, 1);
  132. init_timer(&fsp->timer);
  133. INIT_LIST_HEAD(&fsp->list);
  134. spin_lock_init(&fsp->scsi_pkt_lock);
  135. }
  136. return fsp;
  137. }
  138. /**
  139. * fc_fcp_pkt_release() - release hold on scsi_pkt packet
  140. * @fsp: fcp packet struct
  141. *
  142. * This is used by upper layer scsi driver.
  143. * Context : call from process and interrupt context.
  144. * no locking required
  145. */
  146. static void fc_fcp_pkt_release(struct fc_fcp_pkt *fsp)
  147. {
  148. if (atomic_dec_and_test(&fsp->ref_cnt)) {
  149. struct fc_fcp_internal *si = fc_get_scsi_internal(fsp->lp);
  150. mempool_free(fsp, si->scsi_pkt_pool);
  151. }
  152. }
  153. static void fc_fcp_pkt_hold(struct fc_fcp_pkt *fsp)
  154. {
  155. atomic_inc(&fsp->ref_cnt);
  156. }
  157. /**
  158. * fc_fcp_pkt_destory() - release hold on scsi_pkt packet
  159. * @seq: exchange sequence
  160. * @fsp: fcp packet struct
  161. *
  162. * Release hold on scsi_pkt packet set to keep scsi_pkt
  163. * till EM layer exch resource is not freed.
  164. * Context : called from from EM layer.
  165. * no locking required
  166. */
  167. static void fc_fcp_pkt_destroy(struct fc_seq *seq, void *fsp)
  168. {
  169. fc_fcp_pkt_release(fsp);
  170. }
  171. /**
  172. * fc_fcp_lock_pkt() - lock a packet and get a ref to it.
  173. * @fsp: fcp packet
  174. *
  175. * We should only return error if we return a command to scsi-ml before
  176. * getting a response. This can happen in cases where we send a abort, but
  177. * do not wait for the response and the abort and command can be passing
  178. * each other on the wire/network-layer.
  179. *
  180. * Note: this function locks the packet and gets a reference to allow
  181. * callers to call the completion function while the lock is held and
  182. * not have to worry about the packets refcount.
  183. *
  184. * TODO: Maybe we should just have callers grab/release the lock and
  185. * have a function that they call to verify the fsp and grab a ref if
  186. * needed.
  187. */
  188. static inline int fc_fcp_lock_pkt(struct fc_fcp_pkt *fsp)
  189. {
  190. spin_lock_bh(&fsp->scsi_pkt_lock);
  191. if (fsp->state & FC_SRB_COMPL) {
  192. spin_unlock_bh(&fsp->scsi_pkt_lock);
  193. return -EPERM;
  194. }
  195. fc_fcp_pkt_hold(fsp);
  196. return 0;
  197. }
  198. static inline void fc_fcp_unlock_pkt(struct fc_fcp_pkt *fsp)
  199. {
  200. spin_unlock_bh(&fsp->scsi_pkt_lock);
  201. fc_fcp_pkt_release(fsp);
  202. }
  203. static void fc_fcp_timer_set(struct fc_fcp_pkt *fsp, unsigned long delay)
  204. {
  205. if (!(fsp->state & FC_SRB_COMPL))
  206. mod_timer(&fsp->timer, jiffies + delay);
  207. }
  208. static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp)
  209. {
  210. if (!fsp->seq_ptr)
  211. return -EINVAL;
  212. fsp->state |= FC_SRB_ABORT_PENDING;
  213. return fsp->lp->tt.seq_exch_abort(fsp->seq_ptr, 0);
  214. }
  215. /*
  216. * Retry command.
  217. * An abort isn't needed.
  218. */
  219. static void fc_fcp_retry_cmd(struct fc_fcp_pkt *fsp)
  220. {
  221. if (fsp->seq_ptr) {
  222. fsp->lp->tt.exch_done(fsp->seq_ptr);
  223. fsp->seq_ptr = NULL;
  224. }
  225. fsp->state &= ~FC_SRB_ABORT_PENDING;
  226. fsp->io_status = 0;
  227. fsp->status_code = FC_ERROR;
  228. fc_fcp_complete_locked(fsp);
  229. }
  230. /*
  231. * fc_fcp_ddp_setup - calls to LLD's ddp_setup to set up DDP
  232. * transfer for a read I/O indicated by the fc_fcp_pkt.
  233. * @fsp: ptr to the fc_fcp_pkt
  234. *
  235. * This is called in exch_seq_send() when we have a newly allocated
  236. * exchange with a valid exchange id to setup ddp.
  237. *
  238. * returns: none
  239. */
  240. void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid)
  241. {
  242. struct fc_lport *lp;
  243. if (!fsp)
  244. return;
  245. lp = fsp->lp;
  246. if ((fsp->req_flags & FC_SRB_READ) &&
  247. (lp->lro_enabled) && (lp->tt.ddp_setup)) {
  248. if (lp->tt.ddp_setup(lp, xid, scsi_sglist(fsp->cmd),
  249. scsi_sg_count(fsp->cmd)))
  250. fsp->xfer_ddp = xid;
  251. }
  252. }
  253. EXPORT_SYMBOL(fc_fcp_ddp_setup);
  254. /*
  255. * fc_fcp_ddp_done - calls to LLD's ddp_done to release any
  256. * DDP related resources for this I/O if it is initialized
  257. * as a ddp transfer
  258. * @fsp: ptr to the fc_fcp_pkt
  259. *
  260. * returns: none
  261. */
  262. static void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp)
  263. {
  264. struct fc_lport *lp;
  265. if (!fsp)
  266. return;
  267. lp = fsp->lp;
  268. if (fsp->xfer_ddp && lp->tt.ddp_done) {
  269. fsp->xfer_len = lp->tt.ddp_done(lp, fsp->xfer_ddp);
  270. fsp->xfer_ddp = 0;
  271. }
  272. }
  273. /*
  274. * Receive SCSI data from target.
  275. * Called after receiving solicited data.
  276. */
  277. static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
  278. {
  279. struct scsi_cmnd *sc = fsp->cmd;
  280. struct fc_lport *lp = fsp->lp;
  281. struct fcoe_dev_stats *stats;
  282. struct fc_frame_header *fh;
  283. size_t start_offset;
  284. size_t offset;
  285. u32 crc;
  286. u32 copy_len = 0;
  287. size_t len;
  288. void *buf;
  289. struct scatterlist *sg;
  290. size_t remaining;
  291. fh = fc_frame_header_get(fp);
  292. offset = ntohl(fh->fh_parm_offset);
  293. start_offset = offset;
  294. len = fr_len(fp) - sizeof(*fh);
  295. buf = fc_frame_payload_get(fp, 0);
  296. /* if this I/O is ddped, update xfer len */
  297. fc_fcp_ddp_done(fsp);
  298. if (offset + len > fsp->data_len) {
  299. /* this should never happen */
  300. if ((fr_flags(fp) & FCPHF_CRC_UNCHECKED) &&
  301. fc_frame_crc_check(fp))
  302. goto crc_err;
  303. FC_FCP_DBG(fsp, "data received past end. len %zx offset %zx "
  304. "data_len %x\n", len, offset, fsp->data_len);
  305. fc_fcp_retry_cmd(fsp);
  306. return;
  307. }
  308. if (offset != fsp->xfer_len)
  309. fsp->state |= FC_SRB_DISCONTIG;
  310. crc = 0;
  311. if (fr_flags(fp) & FCPHF_CRC_UNCHECKED)
  312. crc = crc32(~0, (u8 *) fh, sizeof(*fh));
  313. sg = scsi_sglist(sc);
  314. remaining = len;
  315. while (remaining > 0 && sg) {
  316. size_t off;
  317. void *page_addr;
  318. size_t sg_bytes;
  319. if (offset >= sg->length) {
  320. offset -= sg->length;
  321. sg = sg_next(sg);
  322. continue;
  323. }
  324. sg_bytes = min(remaining, sg->length - offset);
  325. /*
  326. * The scatterlist item may be bigger than PAGE_SIZE,
  327. * but we are limited to mapping PAGE_SIZE at a time.
  328. */
  329. off = offset + sg->offset;
  330. sg_bytes = min(sg_bytes, (size_t)
  331. (PAGE_SIZE - (off & ~PAGE_MASK)));
  332. page_addr = kmap_atomic(sg_page(sg) + (off >> PAGE_SHIFT),
  333. KM_SOFTIRQ0);
  334. if (!page_addr)
  335. break; /* XXX panic? */
  336. if (fr_flags(fp) & FCPHF_CRC_UNCHECKED)
  337. crc = crc32(crc, buf, sg_bytes);
  338. memcpy((char *)page_addr + (off & ~PAGE_MASK), buf,
  339. sg_bytes);
  340. kunmap_atomic(page_addr, KM_SOFTIRQ0);
  341. buf += sg_bytes;
  342. offset += sg_bytes;
  343. remaining -= sg_bytes;
  344. copy_len += sg_bytes;
  345. }
  346. if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) {
  347. buf = fc_frame_payload_get(fp, 0);
  348. if (len % 4) {
  349. crc = crc32(crc, buf + len, 4 - (len % 4));
  350. len += 4 - (len % 4);
  351. }
  352. if (~crc != le32_to_cpu(fr_crc(fp))) {
  353. crc_err:
  354. stats = fc_lport_get_stats(lp);
  355. stats->ErrorFrames++;
  356. /* FIXME - per cpu count, not total count! */
  357. if (stats->InvalidCRCCount++ < 5)
  358. printk(KERN_WARNING "libfc: CRC error on data "
  359. "frame for port (%6x)\n",
  360. fc_host_port_id(lp->host));
  361. /*
  362. * Assume the frame is total garbage.
  363. * We may have copied it over the good part
  364. * of the buffer.
  365. * If so, we need to retry the entire operation.
  366. * Otherwise, ignore it.
  367. */
  368. if (fsp->state & FC_SRB_DISCONTIG)
  369. fc_fcp_retry_cmd(fsp);
  370. return;
  371. }
  372. }
  373. if (fsp->xfer_contig_end == start_offset)
  374. fsp->xfer_contig_end += copy_len;
  375. fsp->xfer_len += copy_len;
  376. /*
  377. * In the very rare event that this data arrived after the response
  378. * and completes the transfer, call the completion handler.
  379. */
  380. if (unlikely(fsp->state & FC_SRB_RCV_STATUS) &&
  381. fsp->xfer_len == fsp->data_len - fsp->scsi_resid)
  382. fc_fcp_complete_locked(fsp);
  383. }
  384. /**
  385. * fc_fcp_send_data() - Send SCSI data to target.
  386. * @fsp: ptr to fc_fcp_pkt
  387. * @sp: ptr to this sequence
  388. * @offset: starting offset for this data request
  389. * @seq_blen: the burst length for this data request
  390. *
  391. * Called after receiving a Transfer Ready data descriptor.
  392. * if LLD is capable of seq offload then send down seq_blen
  393. * size of data in single frame, otherwise send multiple FC
  394. * frames of max FC frame payload supported by target port.
  395. *
  396. * Returns : 0 for success.
  397. */
  398. static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
  399. size_t offset, size_t seq_blen)
  400. {
  401. struct fc_exch *ep;
  402. struct scsi_cmnd *sc;
  403. struct scatterlist *sg;
  404. struct fc_frame *fp = NULL;
  405. struct fc_lport *lp = fsp->lp;
  406. size_t remaining;
  407. size_t t_blen;
  408. size_t tlen;
  409. size_t sg_bytes;
  410. size_t frame_offset, fh_parm_offset;
  411. int error;
  412. void *data = NULL;
  413. void *page_addr;
  414. int using_sg = lp->sg_supp;
  415. u32 f_ctl;
  416. WARN_ON(seq_blen <= 0);
  417. if (unlikely(offset + seq_blen > fsp->data_len)) {
  418. /* this should never happen */
  419. FC_FCP_DBG(fsp, "xfer-ready past end. seq_blen %zx "
  420. "offset %zx\n", seq_blen, offset);
  421. fc_fcp_send_abort(fsp);
  422. return 0;
  423. } else if (offset != fsp->xfer_len) {
  424. /* Out of Order Data Request - no problem, but unexpected. */
  425. FC_FCP_DBG(fsp, "xfer-ready non-contiguous. "
  426. "seq_blen %zx offset %zx\n", seq_blen, offset);
  427. }
  428. /*
  429. * if LLD is capable of seq_offload then set transport
  430. * burst length (t_blen) to seq_blen, otherwise set t_blen
  431. * to max FC frame payload previously set in fsp->max_payload.
  432. */
  433. t_blen = fsp->max_payload;
  434. if (lp->seq_offload) {
  435. t_blen = min(seq_blen, (size_t)lp->lso_max);
  436. FC_FCP_DBG(fsp, "fsp=%p:lso:blen=%zx lso_max=0x%x t_blen=%zx\n",
  437. fsp, seq_blen, lp->lso_max, t_blen);
  438. }
  439. WARN_ON(t_blen < FC_MIN_MAX_PAYLOAD);
  440. if (t_blen > 512)
  441. t_blen &= ~(512 - 1); /* round down to block size */
  442. WARN_ON(t_blen < FC_MIN_MAX_PAYLOAD); /* won't go below 256 */
  443. sc = fsp->cmd;
  444. remaining = seq_blen;
  445. fh_parm_offset = frame_offset = offset;
  446. tlen = 0;
  447. seq = lp->tt.seq_start_next(seq);
  448. f_ctl = FC_FC_REL_OFF;
  449. WARN_ON(!seq);
  450. sg = scsi_sglist(sc);
  451. while (remaining > 0 && sg) {
  452. if (offset >= sg->length) {
  453. offset -= sg->length;
  454. sg = sg_next(sg);
  455. continue;
  456. }
  457. if (!fp) {
  458. tlen = min(t_blen, remaining);
  459. /*
  460. * TODO. Temporary workaround. fc_seq_send() can't
  461. * handle odd lengths in non-linear skbs.
  462. * This will be the final fragment only.
  463. */
  464. if (tlen % 4)
  465. using_sg = 0;
  466. if (using_sg) {
  467. fp = _fc_frame_alloc(lp, 0);
  468. if (!fp)
  469. return -ENOMEM;
  470. } else {
  471. fp = fc_frame_alloc(lp, tlen);
  472. if (!fp)
  473. return -ENOMEM;
  474. data = (void *)(fr_hdr(fp)) +
  475. sizeof(struct fc_frame_header);
  476. }
  477. fh_parm_offset = frame_offset;
  478. fr_max_payload(fp) = fsp->max_payload;
  479. }
  480. sg_bytes = min(tlen, sg->length - offset);
  481. if (using_sg) {
  482. get_page(sg_page(sg));
  483. skb_fill_page_desc(fp_skb(fp),
  484. skb_shinfo(fp_skb(fp))->nr_frags,
  485. sg_page(sg), sg->offset + offset,
  486. sg_bytes);
  487. fp_skb(fp)->data_len += sg_bytes;
  488. fr_len(fp) += sg_bytes;
  489. fp_skb(fp)->truesize += PAGE_SIZE;
  490. } else {
  491. size_t off = offset + sg->offset;
  492. /*
  493. * The scatterlist item may be bigger than PAGE_SIZE,
  494. * but we must not cross pages inside the kmap.
  495. */
  496. sg_bytes = min(sg_bytes, (size_t) (PAGE_SIZE -
  497. (off & ~PAGE_MASK)));
  498. page_addr = kmap_atomic(sg_page(sg) +
  499. (off >> PAGE_SHIFT),
  500. KM_SOFTIRQ0);
  501. memcpy(data, (char *)page_addr + (off & ~PAGE_MASK),
  502. sg_bytes);
  503. kunmap_atomic(page_addr, KM_SOFTIRQ0);
  504. data += sg_bytes;
  505. }
  506. offset += sg_bytes;
  507. frame_offset += sg_bytes;
  508. tlen -= sg_bytes;
  509. remaining -= sg_bytes;
  510. if (tlen)
  511. continue;
  512. /*
  513. * Send sequence with transfer sequence initiative in case
  514. * this is last FCP frame of the sequence.
  515. */
  516. if (remaining == 0)
  517. f_ctl |= FC_FC_SEQ_INIT | FC_FC_END_SEQ;
  518. ep = fc_seq_exch(seq);
  519. fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid,
  520. FC_TYPE_FCP, f_ctl, fh_parm_offset);
  521. /*
  522. * send fragment using for a sequence.
  523. */
  524. error = lp->tt.seq_send(lp, seq, fp);
  525. if (error) {
  526. WARN_ON(1); /* send error should be rare */
  527. fc_fcp_retry_cmd(fsp);
  528. return 0;
  529. }
  530. fp = NULL;
  531. }
  532. fsp->xfer_len += seq_blen; /* premature count? */
  533. return 0;
  534. }
  535. static void fc_fcp_abts_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
  536. {
  537. int ba_done = 1;
  538. struct fc_ba_rjt *brp;
  539. struct fc_frame_header *fh;
  540. fh = fc_frame_header_get(fp);
  541. switch (fh->fh_r_ctl) {
  542. case FC_RCTL_BA_ACC:
  543. break;
  544. case FC_RCTL_BA_RJT:
  545. brp = fc_frame_payload_get(fp, sizeof(*brp));
  546. if (brp && brp->br_reason == FC_BA_RJT_LOG_ERR)
  547. break;
  548. /* fall thru */
  549. default:
  550. /*
  551. * we will let the command timeout
  552. * and scsi-ml recover in this case,
  553. * therefore cleared the ba_done flag.
  554. */
  555. ba_done = 0;
  556. }
  557. if (ba_done) {
  558. fsp->state |= FC_SRB_ABORTED;
  559. fsp->state &= ~FC_SRB_ABORT_PENDING;
  560. if (fsp->wait_for_comp)
  561. complete(&fsp->tm_done);
  562. else
  563. fc_fcp_complete_locked(fsp);
  564. }
  565. }
  566. /**
  567. * fc_fcp_reduce_can_queue() - drop can_queue
  568. * @lp: lport to drop queueing for
  569. *
  570. * If we are getting memory allocation failures, then we may
  571. * be trying to execute too many commands. We let the running
  572. * commands complete or timeout, then try again with a reduced
  573. * can_queue. Eventually we will hit the point where we run
  574. * on all reserved structs.
  575. */
  576. static void fc_fcp_reduce_can_queue(struct fc_lport *lp)
  577. {
  578. struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
  579. unsigned long flags;
  580. int can_queue;
  581. spin_lock_irqsave(lp->host->host_lock, flags);
  582. if (si->throttled)
  583. goto done;
  584. si->throttled = 1;
  585. can_queue = lp->host->can_queue;
  586. can_queue >>= 1;
  587. if (!can_queue)
  588. can_queue = 1;
  589. lp->host->can_queue = can_queue;
  590. shost_printk(KERN_ERR, lp->host, "libfc: Could not allocate frame.\n"
  591. "Reducing can_queue to %d.\n", can_queue);
  592. done:
  593. spin_unlock_irqrestore(lp->host->host_lock, flags);
  594. }
  595. /**
  596. * fc_fcp_recv() - Reveive FCP frames
  597. * @seq: The sequence the frame is on
  598. * @fp: The FC frame
  599. * @arg: The related FCP packet
  600. *
  601. * Return : None
  602. * Context : called from Soft IRQ context
  603. * can not called holding list lock
  604. */
  605. static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg)
  606. {
  607. struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)arg;
  608. struct fc_lport *lport = fsp->lp;
  609. struct fc_frame_header *fh;
  610. struct fcp_txrdy *dd;
  611. u8 r_ctl;
  612. int rc = 0;
  613. if (IS_ERR(fp))
  614. goto errout;
  615. fh = fc_frame_header_get(fp);
  616. r_ctl = fh->fh_r_ctl;
  617. if (!(lport->state & LPORT_ST_READY))
  618. goto out;
  619. if (fc_fcp_lock_pkt(fsp))
  620. goto out;
  621. fsp->last_pkt_time = jiffies;
  622. if (fh->fh_type == FC_TYPE_BLS) {
  623. fc_fcp_abts_resp(fsp, fp);
  624. goto unlock;
  625. }
  626. if (fsp->state & (FC_SRB_ABORTED | FC_SRB_ABORT_PENDING))
  627. goto unlock;
  628. if (r_ctl == FC_RCTL_DD_DATA_DESC) {
  629. /*
  630. * received XFER RDY from the target
  631. * need to send data to the target
  632. */
  633. WARN_ON(fr_flags(fp) & FCPHF_CRC_UNCHECKED);
  634. dd = fc_frame_payload_get(fp, sizeof(*dd));
  635. WARN_ON(!dd);
  636. rc = fc_fcp_send_data(fsp, seq,
  637. (size_t) ntohl(dd->ft_data_ro),
  638. (size_t) ntohl(dd->ft_burst_len));
  639. if (!rc)
  640. seq->rec_data = fsp->xfer_len;
  641. else if (rc == -ENOMEM)
  642. fsp->state |= FC_SRB_NOMEM;
  643. } else if (r_ctl == FC_RCTL_DD_SOL_DATA) {
  644. /*
  645. * received a DATA frame
  646. * next we will copy the data to the system buffer
  647. */
  648. WARN_ON(fr_len(fp) < sizeof(*fh)); /* len may be 0 */
  649. fc_fcp_recv_data(fsp, fp);
  650. seq->rec_data = fsp->xfer_contig_end;
  651. } else if (r_ctl == FC_RCTL_DD_CMD_STATUS) {
  652. WARN_ON(fr_flags(fp) & FCPHF_CRC_UNCHECKED);
  653. fc_fcp_resp(fsp, fp);
  654. } else {
  655. FC_FCP_DBG(fsp, "unexpected frame. r_ctl %x\n", r_ctl);
  656. }
  657. unlock:
  658. fc_fcp_unlock_pkt(fsp);
  659. out:
  660. fc_frame_free(fp);
  661. errout:
  662. if (IS_ERR(fp))
  663. fc_fcp_error(fsp, fp);
  664. else if (rc == -ENOMEM)
  665. fc_fcp_reduce_can_queue(lport);
  666. }
  667. static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
  668. {
  669. struct fc_frame_header *fh;
  670. struct fcp_resp *fc_rp;
  671. struct fcp_resp_ext *rp_ex;
  672. struct fcp_resp_rsp_info *fc_rp_info;
  673. u32 plen;
  674. u32 expected_len;
  675. u32 respl = 0;
  676. u32 snsl = 0;
  677. u8 flags = 0;
  678. plen = fr_len(fp);
  679. fh = (struct fc_frame_header *)fr_hdr(fp);
  680. if (unlikely(plen < sizeof(*fh) + sizeof(*fc_rp)))
  681. goto len_err;
  682. plen -= sizeof(*fh);
  683. fc_rp = (struct fcp_resp *)(fh + 1);
  684. fsp->cdb_status = fc_rp->fr_status;
  685. flags = fc_rp->fr_flags;
  686. fsp->scsi_comp_flags = flags;
  687. expected_len = fsp->data_len;
  688. /* if ddp, update xfer len */
  689. fc_fcp_ddp_done(fsp);
  690. if (unlikely((flags & ~FCP_CONF_REQ) || fc_rp->fr_status)) {
  691. rp_ex = (void *)(fc_rp + 1);
  692. if (flags & (FCP_RSP_LEN_VAL | FCP_SNS_LEN_VAL)) {
  693. if (plen < sizeof(*fc_rp) + sizeof(*rp_ex))
  694. goto len_err;
  695. fc_rp_info = (struct fcp_resp_rsp_info *)(rp_ex + 1);
  696. if (flags & FCP_RSP_LEN_VAL) {
  697. respl = ntohl(rp_ex->fr_rsp_len);
  698. if (respl != sizeof(*fc_rp_info))
  699. goto len_err;
  700. if (fsp->wait_for_comp) {
  701. /* Abuse cdb_status for rsp code */
  702. fsp->cdb_status = fc_rp_info->rsp_code;
  703. complete(&fsp->tm_done);
  704. /*
  705. * tmfs will not have any scsi cmd so
  706. * exit here
  707. */
  708. return;
  709. } else
  710. goto err;
  711. }
  712. if (flags & FCP_SNS_LEN_VAL) {
  713. snsl = ntohl(rp_ex->fr_sns_len);
  714. if (snsl > SCSI_SENSE_BUFFERSIZE)
  715. snsl = SCSI_SENSE_BUFFERSIZE;
  716. memcpy(fsp->cmd->sense_buffer,
  717. (char *)fc_rp_info + respl, snsl);
  718. }
  719. }
  720. if (flags & (FCP_RESID_UNDER | FCP_RESID_OVER)) {
  721. if (plen < sizeof(*fc_rp) + sizeof(rp_ex->fr_resid))
  722. goto len_err;
  723. if (flags & FCP_RESID_UNDER) {
  724. fsp->scsi_resid = ntohl(rp_ex->fr_resid);
  725. /*
  726. * The cmnd->underflow is the minimum number of
  727. * bytes that must be transfered for this
  728. * command. Provided a sense condition is not
  729. * present, make sure the actual amount
  730. * transferred is at least the underflow value
  731. * or fail.
  732. */
  733. if (!(flags & FCP_SNS_LEN_VAL) &&
  734. (fc_rp->fr_status == 0) &&
  735. (scsi_bufflen(fsp->cmd) -
  736. fsp->scsi_resid) < fsp->cmd->underflow)
  737. goto err;
  738. expected_len -= fsp->scsi_resid;
  739. } else {
  740. fsp->status_code = FC_ERROR;
  741. }
  742. }
  743. }
  744. fsp->state |= FC_SRB_RCV_STATUS;
  745. /*
  746. * Check for missing or extra data frames.
  747. */
  748. if (unlikely(fsp->xfer_len != expected_len)) {
  749. if (fsp->xfer_len < expected_len) {
  750. /*
  751. * Some data may be queued locally,
  752. * Wait a at least one jiffy to see if it is delivered.
  753. * If this expires without data, we may do SRR.
  754. */
  755. fc_fcp_timer_set(fsp, 2);
  756. return;
  757. }
  758. fsp->status_code = FC_DATA_OVRRUN;
  759. FC_FCP_DBG(fsp, "tgt %6x xfer len %zx greater than expected, "
  760. "len %x, data len %x\n",
  761. fsp->rport->port_id,
  762. fsp->xfer_len, expected_len, fsp->data_len);
  763. }
  764. fc_fcp_complete_locked(fsp);
  765. return;
  766. len_err:
  767. FC_FCP_DBG(fsp, "short FCP response. flags 0x%x len %u respl %u "
  768. "snsl %u\n", flags, fr_len(fp), respl, snsl);
  769. err:
  770. fsp->status_code = FC_ERROR;
  771. fc_fcp_complete_locked(fsp);
  772. }
  773. /**
  774. * fc_fcp_complete_locked() - complete processing of a fcp packet
  775. * @fsp: fcp packet
  776. *
  777. * This function may sleep if a timer is pending. The packet lock must be
  778. * held, and the host lock must not be held.
  779. */
  780. static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp)
  781. {
  782. struct fc_lport *lp = fsp->lp;
  783. struct fc_seq *seq;
  784. struct fc_exch *ep;
  785. u32 f_ctl;
  786. if (fsp->state & FC_SRB_ABORT_PENDING)
  787. return;
  788. if (fsp->state & FC_SRB_ABORTED) {
  789. if (!fsp->status_code)
  790. fsp->status_code = FC_CMD_ABORTED;
  791. } else {
  792. /*
  793. * Test for transport underrun, independent of response
  794. * underrun status.
  795. */
  796. if (fsp->xfer_len < fsp->data_len && !fsp->io_status &&
  797. (!(fsp->scsi_comp_flags & FCP_RESID_UNDER) ||
  798. fsp->xfer_len < fsp->data_len - fsp->scsi_resid)) {
  799. fsp->status_code = FC_DATA_UNDRUN;
  800. fsp->io_status = 0;
  801. }
  802. }
  803. seq = fsp->seq_ptr;
  804. if (seq) {
  805. fsp->seq_ptr = NULL;
  806. if (unlikely(fsp->scsi_comp_flags & FCP_CONF_REQ)) {
  807. struct fc_frame *conf_frame;
  808. struct fc_seq *csp;
  809. csp = lp->tt.seq_start_next(seq);
  810. conf_frame = fc_frame_alloc(fsp->lp, 0);
  811. if (conf_frame) {
  812. f_ctl = FC_FC_SEQ_INIT;
  813. f_ctl |= FC_FC_LAST_SEQ | FC_FC_END_SEQ;
  814. ep = fc_seq_exch(seq);
  815. fc_fill_fc_hdr(conf_frame, FC_RCTL_DD_SOL_CTL,
  816. ep->did, ep->sid,
  817. FC_TYPE_FCP, f_ctl, 0);
  818. lp->tt.seq_send(lp, csp, conf_frame);
  819. }
  820. }
  821. lp->tt.exch_done(seq);
  822. }
  823. fc_io_compl(fsp);
  824. }
  825. static void fc_fcp_cleanup_cmd(struct fc_fcp_pkt *fsp, int error)
  826. {
  827. struct fc_lport *lp = fsp->lp;
  828. if (fsp->seq_ptr) {
  829. lp->tt.exch_done(fsp->seq_ptr);
  830. fsp->seq_ptr = NULL;
  831. }
  832. fsp->status_code = error;
  833. }
  834. /**
  835. * fc_fcp_cleanup_each_cmd() - Cleanup active commads
  836. * @lp: logical port
  837. * @id: target id
  838. * @lun: lun
  839. * @error: fsp status code
  840. *
  841. * If lun or id is -1, they are ignored.
  842. */
  843. static void fc_fcp_cleanup_each_cmd(struct fc_lport *lp, unsigned int id,
  844. unsigned int lun, int error)
  845. {
  846. struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
  847. struct fc_fcp_pkt *fsp;
  848. struct scsi_cmnd *sc_cmd;
  849. unsigned long flags;
  850. spin_lock_irqsave(lp->host->host_lock, flags);
  851. restart:
  852. list_for_each_entry(fsp, &si->scsi_pkt_queue, list) {
  853. sc_cmd = fsp->cmd;
  854. if (id != -1 && scmd_id(sc_cmd) != id)
  855. continue;
  856. if (lun != -1 && sc_cmd->device->lun != lun)
  857. continue;
  858. fc_fcp_pkt_hold(fsp);
  859. spin_unlock_irqrestore(lp->host->host_lock, flags);
  860. if (!fc_fcp_lock_pkt(fsp)) {
  861. fc_fcp_cleanup_cmd(fsp, error);
  862. fc_io_compl(fsp);
  863. fc_fcp_unlock_pkt(fsp);
  864. }
  865. fc_fcp_pkt_release(fsp);
  866. spin_lock_irqsave(lp->host->host_lock, flags);
  867. /*
  868. * while we dropped the lock multiple pkts could
  869. * have been released, so we have to start over.
  870. */
  871. goto restart;
  872. }
  873. spin_unlock_irqrestore(lp->host->host_lock, flags);
  874. }
  875. static void fc_fcp_abort_io(struct fc_lport *lp)
  876. {
  877. fc_fcp_cleanup_each_cmd(lp, -1, -1, FC_HRD_ERROR);
  878. }
  879. /**
  880. * fc_fcp_pkt_send() - send a fcp packet to the lower level.
  881. * @lp: fc lport
  882. * @fsp: fc packet.
  883. *
  884. * This is called by upper layer protocol.
  885. * Return : zero for success and -1 for failure
  886. * Context : called from queuecommand which can be called from process
  887. * or scsi soft irq.
  888. * Locks : called with the host lock and irqs disabled.
  889. */
  890. static int fc_fcp_pkt_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp)
  891. {
  892. struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
  893. int rc;
  894. fsp->cmd->SCp.ptr = (char *)fsp;
  895. fsp->cdb_cmd.fc_dl = htonl(fsp->data_len);
  896. fsp->cdb_cmd.fc_flags = fsp->req_flags & ~FCP_CFL_LEN_MASK;
  897. int_to_scsilun(fsp->cmd->device->lun,
  898. (struct scsi_lun *)fsp->cdb_cmd.fc_lun);
  899. memcpy(fsp->cdb_cmd.fc_cdb, fsp->cmd->cmnd, fsp->cmd->cmd_len);
  900. list_add_tail(&fsp->list, &si->scsi_pkt_queue);
  901. spin_unlock_irq(lp->host->host_lock);
  902. rc = lp->tt.fcp_cmd_send(lp, fsp, fc_fcp_recv);
  903. spin_lock_irq(lp->host->host_lock);
  904. if (rc)
  905. list_del(&fsp->list);
  906. return rc;
  907. }
  908. static int fc_fcp_cmd_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp,
  909. void (*resp)(struct fc_seq *,
  910. struct fc_frame *fp,
  911. void *arg))
  912. {
  913. struct fc_frame *fp;
  914. struct fc_seq *seq;
  915. struct fc_rport *rport;
  916. struct fc_rport_libfc_priv *rp;
  917. const size_t len = sizeof(fsp->cdb_cmd);
  918. int rc = 0;
  919. if (fc_fcp_lock_pkt(fsp))
  920. return 0;
  921. fp = fc_frame_alloc(lp, sizeof(fsp->cdb_cmd));
  922. if (!fp) {
  923. rc = -1;
  924. goto unlock;
  925. }
  926. memcpy(fc_frame_payload_get(fp, len), &fsp->cdb_cmd, len);
  927. fr_fsp(fp) = fsp;
  928. rport = fsp->rport;
  929. fsp->max_payload = rport->maxframe_size;
  930. rp = rport->dd_data;
  931. fc_fill_fc_hdr(fp, FC_RCTL_DD_UNSOL_CMD, rport->port_id,
  932. fc_host_port_id(rp->local_port->host), FC_TYPE_FCP,
  933. FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
  934. seq = lp->tt.exch_seq_send(lp, fp, resp, fc_fcp_pkt_destroy, fsp, 0);
  935. if (!seq) {
  936. fc_frame_free(fp);
  937. rc = -1;
  938. goto unlock;
  939. }
  940. fsp->last_pkt_time = jiffies;
  941. fsp->seq_ptr = seq;
  942. fc_fcp_pkt_hold(fsp); /* hold for fc_fcp_pkt_destroy */
  943. setup_timer(&fsp->timer, fc_fcp_timeout, (unsigned long)fsp);
  944. fc_fcp_timer_set(fsp,
  945. (fsp->tgt_flags & FC_RP_FLAGS_REC_SUPPORTED) ?
  946. FC_SCSI_REC_TOV : FC_SCSI_ER_TIMEOUT);
  947. unlock:
  948. fc_fcp_unlock_pkt(fsp);
  949. return rc;
  950. }
  951. /*
  952. * transport error handler
  953. */
  954. static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
  955. {
  956. int error = PTR_ERR(fp);
  957. if (fc_fcp_lock_pkt(fsp))
  958. return;
  959. if (error == -FC_EX_CLOSED) {
  960. fc_fcp_retry_cmd(fsp);
  961. goto unlock;
  962. }
  963. /*
  964. * clear abort pending, because the lower layer
  965. * decided to force completion.
  966. */
  967. fsp->state &= ~FC_SRB_ABORT_PENDING;
  968. fsp->status_code = FC_CMD_PLOGO;
  969. fc_fcp_complete_locked(fsp);
  970. unlock:
  971. fc_fcp_unlock_pkt(fsp);
  972. }
  973. /*
  974. * Scsi abort handler- calls to send an abort
  975. * and then wait for abort completion
  976. */
  977. static int fc_fcp_pkt_abort(struct fc_lport *lp, struct fc_fcp_pkt *fsp)
  978. {
  979. int rc = FAILED;
  980. if (fc_fcp_send_abort(fsp))
  981. return FAILED;
  982. init_completion(&fsp->tm_done);
  983. fsp->wait_for_comp = 1;
  984. spin_unlock_bh(&fsp->scsi_pkt_lock);
  985. rc = wait_for_completion_timeout(&fsp->tm_done, FC_SCSI_TM_TOV);
  986. spin_lock_bh(&fsp->scsi_pkt_lock);
  987. fsp->wait_for_comp = 0;
  988. if (!rc) {
  989. FC_FCP_DBG(fsp, "target abort cmd failed\n");
  990. rc = FAILED;
  991. } else if (fsp->state & FC_SRB_ABORTED) {
  992. FC_FCP_DBG(fsp, "target abort cmd passed\n");
  993. rc = SUCCESS;
  994. fc_fcp_complete_locked(fsp);
  995. }
  996. return rc;
  997. }
  998. /*
  999. * Retry LUN reset after resource allocation failed.
  1000. */
  1001. static void fc_lun_reset_send(unsigned long data)
  1002. {
  1003. struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data;
  1004. struct fc_lport *lp = fsp->lp;
  1005. if (lp->tt.fcp_cmd_send(lp, fsp, fc_tm_done)) {
  1006. if (fsp->recov_retry++ >= FC_MAX_RECOV_RETRY)
  1007. return;
  1008. if (fc_fcp_lock_pkt(fsp))
  1009. return;
  1010. setup_timer(&fsp->timer, fc_lun_reset_send, (unsigned long)fsp);
  1011. fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
  1012. fc_fcp_unlock_pkt(fsp);
  1013. }
  1014. }
  1015. /*
  1016. * Scsi device reset handler- send a LUN RESET to the device
  1017. * and wait for reset reply
  1018. */
  1019. static int fc_lun_reset(struct fc_lport *lp, struct fc_fcp_pkt *fsp,
  1020. unsigned int id, unsigned int lun)
  1021. {
  1022. int rc;
  1023. fsp->cdb_cmd.fc_dl = htonl(fsp->data_len);
  1024. fsp->cdb_cmd.fc_tm_flags = FCP_TMF_LUN_RESET;
  1025. int_to_scsilun(lun, (struct scsi_lun *)fsp->cdb_cmd.fc_lun);
  1026. fsp->wait_for_comp = 1;
  1027. init_completion(&fsp->tm_done);
  1028. fc_lun_reset_send((unsigned long)fsp);
  1029. /*
  1030. * wait for completion of reset
  1031. * after that make sure all commands are terminated
  1032. */
  1033. rc = wait_for_completion_timeout(&fsp->tm_done, FC_SCSI_TM_TOV);
  1034. spin_lock_bh(&fsp->scsi_pkt_lock);
  1035. fsp->state |= FC_SRB_COMPL;
  1036. spin_unlock_bh(&fsp->scsi_pkt_lock);
  1037. del_timer_sync(&fsp->timer);
  1038. spin_lock_bh(&fsp->scsi_pkt_lock);
  1039. if (fsp->seq_ptr) {
  1040. lp->tt.exch_done(fsp->seq_ptr);
  1041. fsp->seq_ptr = NULL;
  1042. }
  1043. fsp->wait_for_comp = 0;
  1044. spin_unlock_bh(&fsp->scsi_pkt_lock);
  1045. if (!rc) {
  1046. FC_SCSI_DBG(lp, "lun reset failed\n");
  1047. return FAILED;
  1048. }
  1049. /* cdb_status holds the tmf's rsp code */
  1050. if (fsp->cdb_status != FCP_TMF_CMPL)
  1051. return FAILED;
  1052. FC_SCSI_DBG(lp, "lun reset to lun %u completed\n", lun);
  1053. fc_fcp_cleanup_each_cmd(lp, id, lun, FC_CMD_ABORTED);
  1054. return SUCCESS;
  1055. }
  1056. /*
  1057. * Task Managment response handler
  1058. */
  1059. static void fc_tm_done(struct fc_seq *seq, struct fc_frame *fp, void *arg)
  1060. {
  1061. struct fc_fcp_pkt *fsp = arg;
  1062. struct fc_frame_header *fh;
  1063. if (IS_ERR(fp)) {
  1064. /*
  1065. * If there is an error just let it timeout or wait
  1066. * for TMF to be aborted if it timedout.
  1067. *
  1068. * scsi-eh will escalate for when either happens.
  1069. */
  1070. return;
  1071. }
  1072. if (fc_fcp_lock_pkt(fsp))
  1073. return;
  1074. /*
  1075. * raced with eh timeout handler.
  1076. */
  1077. if (!fsp->seq_ptr || !fsp->wait_for_comp) {
  1078. spin_unlock_bh(&fsp->scsi_pkt_lock);
  1079. return;
  1080. }
  1081. fh = fc_frame_header_get(fp);
  1082. if (fh->fh_type != FC_TYPE_BLS)
  1083. fc_fcp_resp(fsp, fp);
  1084. fsp->seq_ptr = NULL;
  1085. fsp->lp->tt.exch_done(seq);
  1086. fc_frame_free(fp);
  1087. fc_fcp_unlock_pkt(fsp);
  1088. }
  1089. static void fc_fcp_cleanup(struct fc_lport *lp)
  1090. {
  1091. fc_fcp_cleanup_each_cmd(lp, -1, -1, FC_ERROR);
  1092. }
  1093. /*
  1094. * fc_fcp_timeout: called by OS timer function.
  1095. *
  1096. * The timer has been inactivated and must be reactivated if desired
  1097. * using fc_fcp_timer_set().
  1098. *
  1099. * Algorithm:
  1100. *
  1101. * If REC is supported, just issue it, and return. The REC exchange will
  1102. * complete or time out, and recovery can continue at that point.
  1103. *
  1104. * Otherwise, if the response has been received without all the data,
  1105. * it has been ER_TIMEOUT since the response was received.
  1106. *
  1107. * If the response has not been received,
  1108. * we see if data was received recently. If it has been, we continue waiting,
  1109. * otherwise, we abort the command.
  1110. */
  1111. static void fc_fcp_timeout(unsigned long data)
  1112. {
  1113. struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data;
  1114. struct fc_rport *rport = fsp->rport;
  1115. struct fc_rport_libfc_priv *rp = rport->dd_data;
  1116. if (fc_fcp_lock_pkt(fsp))
  1117. return;
  1118. if (fsp->cdb_cmd.fc_tm_flags)
  1119. goto unlock;
  1120. fsp->state |= FC_SRB_FCP_PROCESSING_TMO;
  1121. if (rp->flags & FC_RP_FLAGS_REC_SUPPORTED)
  1122. fc_fcp_rec(fsp);
  1123. else if (time_after_eq(fsp->last_pkt_time + (FC_SCSI_ER_TIMEOUT / 2),
  1124. jiffies))
  1125. fc_fcp_timer_set(fsp, FC_SCSI_ER_TIMEOUT);
  1126. else if (fsp->state & FC_SRB_RCV_STATUS)
  1127. fc_fcp_complete_locked(fsp);
  1128. else
  1129. fc_timeout_error(fsp);
  1130. fsp->state &= ~FC_SRB_FCP_PROCESSING_TMO;
  1131. unlock:
  1132. fc_fcp_unlock_pkt(fsp);
  1133. }
  1134. /*
  1135. * Send a REC ELS request
  1136. */
  1137. static void fc_fcp_rec(struct fc_fcp_pkt *fsp)
  1138. {
  1139. struct fc_lport *lp;
  1140. struct fc_frame *fp;
  1141. struct fc_rport *rport;
  1142. struct fc_rport_libfc_priv *rp;
  1143. lp = fsp->lp;
  1144. rport = fsp->rport;
  1145. rp = rport->dd_data;
  1146. if (!fsp->seq_ptr || rp->rp_state != RPORT_ST_READY) {
  1147. fsp->status_code = FC_HRD_ERROR;
  1148. fsp->io_status = 0;
  1149. fc_fcp_complete_locked(fsp);
  1150. return;
  1151. }
  1152. fp = fc_frame_alloc(lp, sizeof(struct fc_els_rec));
  1153. if (!fp)
  1154. goto retry;
  1155. fr_seq(fp) = fsp->seq_ptr;
  1156. fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, rport->port_id,
  1157. fc_host_port_id(rp->local_port->host), FC_TYPE_ELS,
  1158. FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
  1159. if (lp->tt.elsct_send(lp, rport->port_id, fp, ELS_REC, fc_fcp_rec_resp,
  1160. fsp, jiffies_to_msecs(FC_SCSI_REC_TOV))) {
  1161. fc_fcp_pkt_hold(fsp); /* hold while REC outstanding */
  1162. return;
  1163. }
  1164. fc_frame_free(fp);
  1165. retry:
  1166. if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
  1167. fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
  1168. else
  1169. fc_timeout_error(fsp);
  1170. }
  1171. /*
  1172. * Receive handler for REC ELS frame
  1173. * if it is a reject then let the scsi layer to handle
  1174. * the timeout. if it is a LS_ACC then if the io was not completed
  1175. * then set the timeout and return otherwise complete the exchange
  1176. * and tell the scsi layer to restart the I/O.
  1177. */
  1178. static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
  1179. {
  1180. struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)arg;
  1181. struct fc_els_rec_acc *recp;
  1182. struct fc_els_ls_rjt *rjt;
  1183. u32 e_stat;
  1184. u8 opcode;
  1185. u32 offset;
  1186. enum dma_data_direction data_dir;
  1187. enum fc_rctl r_ctl;
  1188. struct fc_rport_libfc_priv *rp;
  1189. if (IS_ERR(fp)) {
  1190. fc_fcp_rec_error(fsp, fp);
  1191. return;
  1192. }
  1193. if (fc_fcp_lock_pkt(fsp))
  1194. goto out;
  1195. fsp->recov_retry = 0;
  1196. opcode = fc_frame_payload_op(fp);
  1197. if (opcode == ELS_LS_RJT) {
  1198. rjt = fc_frame_payload_get(fp, sizeof(*rjt));
  1199. switch (rjt->er_reason) {
  1200. default:
  1201. FC_FCP_DBG(fsp, "device %x unexpected REC reject "
  1202. "reason %d expl %d\n",
  1203. fsp->rport->port_id, rjt->er_reason,
  1204. rjt->er_explan);
  1205. /* fall through */
  1206. case ELS_RJT_UNSUP:
  1207. FC_FCP_DBG(fsp, "device does not support REC\n");
  1208. rp = fsp->rport->dd_data;
  1209. /*
  1210. * if we do not spport RECs or got some bogus
  1211. * reason then resetup timer so we check for
  1212. * making progress.
  1213. */
  1214. rp->flags &= ~FC_RP_FLAGS_REC_SUPPORTED;
  1215. fc_fcp_timer_set(fsp, FC_SCSI_ER_TIMEOUT);
  1216. break;
  1217. case ELS_RJT_LOGIC:
  1218. case ELS_RJT_UNAB:
  1219. /*
  1220. * If no data transfer, the command frame got dropped
  1221. * so we just retry. If data was transferred, we
  1222. * lost the response but the target has no record,
  1223. * so we abort and retry.
  1224. */
  1225. if (rjt->er_explan == ELS_EXPL_OXID_RXID &&
  1226. fsp->xfer_len == 0) {
  1227. fc_fcp_retry_cmd(fsp);
  1228. break;
  1229. }
  1230. fc_timeout_error(fsp);
  1231. break;
  1232. }
  1233. } else if (opcode == ELS_LS_ACC) {
  1234. if (fsp->state & FC_SRB_ABORTED)
  1235. goto unlock_out;
  1236. data_dir = fsp->cmd->sc_data_direction;
  1237. recp = fc_frame_payload_get(fp, sizeof(*recp));
  1238. offset = ntohl(recp->reca_fc4value);
  1239. e_stat = ntohl(recp->reca_e_stat);
  1240. if (e_stat & ESB_ST_COMPLETE) {
  1241. /*
  1242. * The exchange is complete.
  1243. *
  1244. * For output, we must've lost the response.
  1245. * For input, all data must've been sent.
  1246. * We lost may have lost the response
  1247. * (and a confirmation was requested) and maybe
  1248. * some data.
  1249. *
  1250. * If all data received, send SRR
  1251. * asking for response. If partial data received,
  1252. * or gaps, SRR requests data at start of gap.
  1253. * Recovery via SRR relies on in-order-delivery.
  1254. */
  1255. if (data_dir == DMA_TO_DEVICE) {
  1256. r_ctl = FC_RCTL_DD_CMD_STATUS;
  1257. } else if (fsp->xfer_contig_end == offset) {
  1258. r_ctl = FC_RCTL_DD_CMD_STATUS;
  1259. } else {
  1260. offset = fsp->xfer_contig_end;
  1261. r_ctl = FC_RCTL_DD_SOL_DATA;
  1262. }
  1263. fc_fcp_srr(fsp, r_ctl, offset);
  1264. } else if (e_stat & ESB_ST_SEQ_INIT) {
  1265. /*
  1266. * The remote port has the initiative, so just
  1267. * keep waiting for it to complete.
  1268. */
  1269. fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
  1270. } else {
  1271. /*
  1272. * The exchange is incomplete, we have seq. initiative.
  1273. * Lost response with requested confirmation,
  1274. * lost confirmation, lost transfer ready or
  1275. * lost write data.
  1276. *
  1277. * For output, if not all data was received, ask
  1278. * for transfer ready to be repeated.
  1279. *
  1280. * If we received or sent all the data, send SRR to
  1281. * request response.
  1282. *
  1283. * If we lost a response, we may have lost some read
  1284. * data as well.
  1285. */
  1286. r_ctl = FC_RCTL_DD_SOL_DATA;
  1287. if (data_dir == DMA_TO_DEVICE) {
  1288. r_ctl = FC_RCTL_DD_CMD_STATUS;
  1289. if (offset < fsp->data_len)
  1290. r_ctl = FC_RCTL_DD_DATA_DESC;
  1291. } else if (offset == fsp->xfer_contig_end) {
  1292. r_ctl = FC_RCTL_DD_CMD_STATUS;
  1293. } else if (fsp->xfer_contig_end < offset) {
  1294. offset = fsp->xfer_contig_end;
  1295. }
  1296. fc_fcp_srr(fsp, r_ctl, offset);
  1297. }
  1298. }
  1299. unlock_out:
  1300. fc_fcp_unlock_pkt(fsp);
  1301. out:
  1302. fc_fcp_pkt_release(fsp); /* drop hold for outstanding REC */
  1303. fc_frame_free(fp);
  1304. }
  1305. /*
  1306. * Handle error response or timeout for REC exchange.
  1307. */
  1308. static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
  1309. {
  1310. int error = PTR_ERR(fp);
  1311. if (fc_fcp_lock_pkt(fsp))
  1312. goto out;
  1313. switch (error) {
  1314. case -FC_EX_CLOSED:
  1315. fc_fcp_retry_cmd(fsp);
  1316. break;
  1317. default:
  1318. FC_FCP_DBG(fsp, "REC %p fid %x error unexpected error %d\n",
  1319. fsp, fsp->rport->port_id, error);
  1320. fsp->status_code = FC_CMD_PLOGO;
  1321. /* fall through */
  1322. case -FC_EX_TIMEOUT:
  1323. /*
  1324. * Assume REC or LS_ACC was lost.
  1325. * The exchange manager will have aborted REC, so retry.
  1326. */
  1327. FC_FCP_DBG(fsp, "REC fid %x error error %d retry %d/%d\n",
  1328. fsp->rport->port_id, error, fsp->recov_retry,
  1329. FC_MAX_RECOV_RETRY);
  1330. if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
  1331. fc_fcp_rec(fsp);
  1332. else
  1333. fc_timeout_error(fsp);
  1334. break;
  1335. }
  1336. fc_fcp_unlock_pkt(fsp);
  1337. out:
  1338. fc_fcp_pkt_release(fsp); /* drop hold for outstanding REC */
  1339. }
  1340. /*
  1341. * Time out error routine:
  1342. * abort's the I/O close the exchange and
  1343. * send completion notification to scsi layer
  1344. */
  1345. static void fc_timeout_error(struct fc_fcp_pkt *fsp)
  1346. {
  1347. fsp->status_code = FC_CMD_TIME_OUT;
  1348. fsp->cdb_status = 0;
  1349. fsp->io_status = 0;
  1350. /*
  1351. * if this fails then we let the scsi command timer fire and
  1352. * scsi-ml escalate.
  1353. */
  1354. fc_fcp_send_abort(fsp);
  1355. }
  1356. /*
  1357. * Sequence retransmission request.
  1358. * This is called after receiving status but insufficient data, or
  1359. * when expecting status but the request has timed out.
  1360. */
  1361. static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
  1362. {
  1363. struct fc_lport *lp = fsp->lp;
  1364. struct fc_rport *rport;
  1365. struct fc_rport_libfc_priv *rp;
  1366. struct fc_exch *ep = fc_seq_exch(fsp->seq_ptr);
  1367. struct fc_seq *seq;
  1368. struct fcp_srr *srr;
  1369. struct fc_frame *fp;
  1370. u8 cdb_op;
  1371. rport = fsp->rport;
  1372. rp = rport->dd_data;
  1373. cdb_op = fsp->cdb_cmd.fc_cdb[0];
  1374. if (!(rp->flags & FC_RP_FLAGS_RETRY) || rp->rp_state != RPORT_ST_READY)
  1375. goto retry; /* shouldn't happen */
  1376. fp = fc_frame_alloc(lp, sizeof(*srr));
  1377. if (!fp)
  1378. goto retry;
  1379. srr = fc_frame_payload_get(fp, sizeof(*srr));
  1380. memset(srr, 0, sizeof(*srr));
  1381. srr->srr_op = ELS_SRR;
  1382. srr->srr_ox_id = htons(ep->oxid);
  1383. srr->srr_rx_id = htons(ep->rxid);
  1384. srr->srr_r_ctl = r_ctl;
  1385. srr->srr_rel_off = htonl(offset);
  1386. fc_fill_fc_hdr(fp, FC_RCTL_ELS4_REQ, rport->port_id,
  1387. fc_host_port_id(rp->local_port->host), FC_TYPE_FCP,
  1388. FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
  1389. seq = lp->tt.exch_seq_send(lp, fp, fc_fcp_srr_resp, NULL,
  1390. fsp, jiffies_to_msecs(FC_SCSI_REC_TOV));
  1391. if (!seq) {
  1392. fc_frame_free(fp);
  1393. goto retry;
  1394. }
  1395. fsp->recov_seq = seq;
  1396. fsp->xfer_len = offset;
  1397. fsp->xfer_contig_end = offset;
  1398. fsp->state &= ~FC_SRB_RCV_STATUS;
  1399. fc_fcp_pkt_hold(fsp); /* hold for outstanding SRR */
  1400. return;
  1401. retry:
  1402. fc_fcp_retry_cmd(fsp);
  1403. }
  1404. /*
  1405. * Handle response from SRR.
  1406. */
  1407. static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
  1408. {
  1409. struct fc_fcp_pkt *fsp = arg;
  1410. struct fc_frame_header *fh;
  1411. if (IS_ERR(fp)) {
  1412. fc_fcp_srr_error(fsp, fp);
  1413. return;
  1414. }
  1415. if (fc_fcp_lock_pkt(fsp))
  1416. goto out;
  1417. fh = fc_frame_header_get(fp);
  1418. /*
  1419. * BUG? fc_fcp_srr_error calls exch_done which would release
  1420. * the ep. But if fc_fcp_srr_error had got -FC_EX_TIMEOUT,
  1421. * then fc_exch_timeout would be sending an abort. The exch_done
  1422. * call by fc_fcp_srr_error would prevent fc_exch.c from seeing
  1423. * an abort response though.
  1424. */
  1425. if (fh->fh_type == FC_TYPE_BLS) {
  1426. fc_fcp_unlock_pkt(fsp);
  1427. return;
  1428. }
  1429. fsp->recov_seq = NULL;
  1430. switch (fc_frame_payload_op(fp)) {
  1431. case ELS_LS_ACC:
  1432. fsp->recov_retry = 0;
  1433. fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
  1434. break;
  1435. case ELS_LS_RJT:
  1436. default:
  1437. fc_timeout_error(fsp);
  1438. break;
  1439. }
  1440. fc_fcp_unlock_pkt(fsp);
  1441. fsp->lp->tt.exch_done(seq);
  1442. out:
  1443. fc_frame_free(fp);
  1444. fc_fcp_pkt_release(fsp); /* drop hold for outstanding SRR */
  1445. }
  1446. static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
  1447. {
  1448. if (fc_fcp_lock_pkt(fsp))
  1449. goto out;
  1450. fsp->lp->tt.exch_done(fsp->recov_seq);
  1451. fsp->recov_seq = NULL;
  1452. switch (PTR_ERR(fp)) {
  1453. case -FC_EX_TIMEOUT:
  1454. if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
  1455. fc_fcp_rec(fsp);
  1456. else
  1457. fc_timeout_error(fsp);
  1458. break;
  1459. case -FC_EX_CLOSED: /* e.g., link failure */
  1460. /* fall through */
  1461. default:
  1462. fc_fcp_retry_cmd(fsp);
  1463. break;
  1464. }
  1465. fc_fcp_unlock_pkt(fsp);
  1466. out:
  1467. fc_fcp_pkt_release(fsp); /* drop hold for outstanding SRR */
  1468. }
  1469. static inline int fc_fcp_lport_queue_ready(struct fc_lport *lp)
  1470. {
  1471. /* lock ? */
  1472. return (lp->state == LPORT_ST_READY) && lp->link_up && !lp->qfull;
  1473. }
  1474. /**
  1475. * fc_queuecommand - The queuecommand function of the scsi template
  1476. * @cmd: struct scsi_cmnd to be executed
  1477. * @done: Callback function to be called when cmd is completed
  1478. *
  1479. * this is the i/o strategy routine, called by the scsi layer
  1480. * this routine is called with holding the host_lock.
  1481. */
  1482. int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
  1483. {
  1484. struct fc_lport *lp;
  1485. struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
  1486. struct fc_fcp_pkt *fsp;
  1487. struct fc_rport_libfc_priv *rp;
  1488. int rval;
  1489. int rc = 0;
  1490. struct fcoe_dev_stats *stats;
  1491. lp = shost_priv(sc_cmd->device->host);
  1492. rval = fc_remote_port_chkready(rport);
  1493. if (rval) {
  1494. sc_cmd->result = rval;
  1495. done(sc_cmd);
  1496. goto out;
  1497. }
  1498. if (!*(struct fc_remote_port **)rport->dd_data) {
  1499. /*
  1500. * rport is transitioning from blocked/deleted to
  1501. * online
  1502. */
  1503. sc_cmd->result = DID_IMM_RETRY << 16;
  1504. done(sc_cmd);
  1505. goto out;
  1506. }
  1507. rp = rport->dd_data;
  1508. if (!fc_fcp_lport_queue_ready(lp)) {
  1509. rc = SCSI_MLQUEUE_HOST_BUSY;
  1510. goto out;
  1511. }
  1512. fsp = fc_fcp_pkt_alloc(lp, GFP_ATOMIC);
  1513. if (fsp == NULL) {
  1514. rc = SCSI_MLQUEUE_HOST_BUSY;
  1515. goto out;
  1516. }
  1517. /*
  1518. * build the libfc request pkt
  1519. */
  1520. fsp->cmd = sc_cmd; /* save the cmd */
  1521. fsp->lp = lp; /* save the softc ptr */
  1522. fsp->rport = rport; /* set the remote port ptr */
  1523. sc_cmd->scsi_done = done;
  1524. /*
  1525. * set up the transfer length
  1526. */
  1527. fsp->data_len = scsi_bufflen(sc_cmd);
  1528. fsp->xfer_len = 0;
  1529. /*
  1530. * setup the data direction
  1531. */
  1532. stats = fc_lport_get_stats(lp);
  1533. if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
  1534. fsp->req_flags = FC_SRB_READ;
  1535. stats->InputRequests++;
  1536. stats->InputMegabytes = fsp->data_len;
  1537. } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
  1538. fsp->req_flags = FC_SRB_WRITE;
  1539. stats->OutputRequests++;
  1540. stats->OutputMegabytes = fsp->data_len;
  1541. } else {
  1542. fsp->req_flags = 0;
  1543. stats->ControlRequests++;
  1544. }
  1545. fsp->tgt_flags = rp->flags;
  1546. init_timer(&fsp->timer);
  1547. fsp->timer.data = (unsigned long)fsp;
  1548. /*
  1549. * send it to the lower layer
  1550. * if we get -1 return then put the request in the pending
  1551. * queue.
  1552. */
  1553. rval = fc_fcp_pkt_send(lp, fsp);
  1554. if (rval != 0) {
  1555. fsp->state = FC_SRB_FREE;
  1556. fc_fcp_pkt_release(fsp);
  1557. rc = SCSI_MLQUEUE_HOST_BUSY;
  1558. }
  1559. out:
  1560. return rc;
  1561. }
  1562. EXPORT_SYMBOL(fc_queuecommand);
  1563. /**
  1564. * fc_io_compl() - Handle responses for completed commands
  1565. * @fsp: scsi packet
  1566. *
  1567. * Translates a error to a Linux SCSI error.
  1568. *
  1569. * The fcp packet lock must be held when calling.
  1570. */
  1571. static void fc_io_compl(struct fc_fcp_pkt *fsp)
  1572. {
  1573. struct fc_fcp_internal *si;
  1574. struct scsi_cmnd *sc_cmd;
  1575. struct fc_lport *lp;
  1576. unsigned long flags;
  1577. /* release outstanding ddp context */
  1578. fc_fcp_ddp_done(fsp);
  1579. fsp->state |= FC_SRB_COMPL;
  1580. if (!(fsp->state & FC_SRB_FCP_PROCESSING_TMO)) {
  1581. spin_unlock_bh(&fsp->scsi_pkt_lock);
  1582. del_timer_sync(&fsp->timer);
  1583. spin_lock_bh(&fsp->scsi_pkt_lock);
  1584. }
  1585. lp = fsp->lp;
  1586. si = fc_get_scsi_internal(lp);
  1587. spin_lock_irqsave(lp->host->host_lock, flags);
  1588. if (!fsp->cmd) {
  1589. spin_unlock_irqrestore(lp->host->host_lock, flags);
  1590. return;
  1591. }
  1592. /*
  1593. * if a command timed out while we had to try and throttle IO
  1594. * and it is now getting cleaned up, then we are about to
  1595. * try again so clear the throttled flag incase we get more
  1596. * time outs.
  1597. */
  1598. if (si->throttled && fsp->state & FC_SRB_NOMEM)
  1599. si->throttled = 0;
  1600. sc_cmd = fsp->cmd;
  1601. fsp->cmd = NULL;
  1602. if (!sc_cmd->SCp.ptr) {
  1603. spin_unlock_irqrestore(lp->host->host_lock, flags);
  1604. return;
  1605. }
  1606. CMD_SCSI_STATUS(sc_cmd) = fsp->cdb_status;
  1607. switch (fsp->status_code) {
  1608. case FC_COMPLETE:
  1609. if (fsp->cdb_status == 0) {
  1610. /*
  1611. * good I/O status
  1612. */
  1613. sc_cmd->result = DID_OK << 16;
  1614. if (fsp->scsi_resid)
  1615. CMD_RESID_LEN(sc_cmd) = fsp->scsi_resid;
  1616. } else if (fsp->cdb_status == QUEUE_FULL) {
  1617. struct scsi_device *tmp_sdev;
  1618. struct scsi_device *sdev = sc_cmd->device;
  1619. shost_for_each_device(tmp_sdev, sdev->host) {
  1620. if (tmp_sdev->id != sdev->id)
  1621. continue;
  1622. if (tmp_sdev->queue_depth > 1) {
  1623. scsi_track_queue_full(tmp_sdev,
  1624. tmp_sdev->
  1625. queue_depth - 1);
  1626. }
  1627. }
  1628. sc_cmd->result = (DID_OK << 16) | fsp->cdb_status;
  1629. } else {
  1630. /*
  1631. * transport level I/O was ok but scsi
  1632. * has non zero status
  1633. */
  1634. sc_cmd->result = (DID_OK << 16) | fsp->cdb_status;
  1635. }
  1636. break;
  1637. case FC_ERROR:
  1638. sc_cmd->result = DID_ERROR << 16;
  1639. break;
  1640. case FC_DATA_UNDRUN:
  1641. if ((fsp->cdb_status == 0) && !(fsp->req_flags & FC_SRB_READ)) {
  1642. /*
  1643. * scsi status is good but transport level
  1644. * underrun.
  1645. */
  1646. sc_cmd->result = DID_OK << 16;
  1647. } else {
  1648. /*
  1649. * scsi got underrun, this is an error
  1650. */
  1651. CMD_RESID_LEN(sc_cmd) = fsp->scsi_resid;
  1652. sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status;
  1653. }
  1654. break;
  1655. case FC_DATA_OVRRUN:
  1656. /*
  1657. * overrun is an error
  1658. */
  1659. sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status;
  1660. break;
  1661. case FC_CMD_ABORTED:
  1662. sc_cmd->result = (DID_ERROR << 16) | fsp->io_status;
  1663. break;
  1664. case FC_CMD_TIME_OUT:
  1665. sc_cmd->result = (DID_BUS_BUSY << 16) | fsp->io_status;
  1666. break;
  1667. case FC_CMD_RESET:
  1668. sc_cmd->result = (DID_RESET << 16);
  1669. break;
  1670. case FC_HRD_ERROR:
  1671. sc_cmd->result = (DID_NO_CONNECT << 16);
  1672. break;
  1673. default:
  1674. sc_cmd->result = (DID_ERROR << 16);
  1675. break;
  1676. }
  1677. list_del(&fsp->list);
  1678. sc_cmd->SCp.ptr = NULL;
  1679. sc_cmd->scsi_done(sc_cmd);
  1680. spin_unlock_irqrestore(lp->host->host_lock, flags);
  1681. /* release ref from initial allocation in queue command */
  1682. fc_fcp_pkt_release(fsp);
  1683. }
  1684. /**
  1685. * fc_fcp_complete() - complete processing of a fcp packet
  1686. * @fsp: fcp packet
  1687. *
  1688. * This function may sleep if a fsp timer is pending.
  1689. * The host lock must not be held by caller.
  1690. */
  1691. void fc_fcp_complete(struct fc_fcp_pkt *fsp)
  1692. {
  1693. if (fc_fcp_lock_pkt(fsp))
  1694. return;
  1695. fc_fcp_complete_locked(fsp);
  1696. fc_fcp_unlock_pkt(fsp);
  1697. }
  1698. EXPORT_SYMBOL(fc_fcp_complete);
  1699. /**
  1700. * fc_eh_abort() - Abort a command
  1701. * @sc_cmd: scsi command to abort
  1702. *
  1703. * From scsi host template.
  1704. * send ABTS to the target device and wait for the response
  1705. * sc_cmd is the pointer to the command to be aborted.
  1706. */
  1707. int fc_eh_abort(struct scsi_cmnd *sc_cmd)
  1708. {
  1709. struct fc_fcp_pkt *fsp;
  1710. struct fc_lport *lp;
  1711. int rc = FAILED;
  1712. unsigned long flags;
  1713. lp = shost_priv(sc_cmd->device->host);
  1714. if (lp->state != LPORT_ST_READY)
  1715. return rc;
  1716. else if (!lp->link_up)
  1717. return rc;
  1718. spin_lock_irqsave(lp->host->host_lock, flags);
  1719. fsp = CMD_SP(sc_cmd);
  1720. if (!fsp) {
  1721. /* command completed while scsi eh was setting up */
  1722. spin_unlock_irqrestore(lp->host->host_lock, flags);
  1723. return SUCCESS;
  1724. }
  1725. /* grab a ref so the fsp and sc_cmd cannot be relased from under us */
  1726. fc_fcp_pkt_hold(fsp);
  1727. spin_unlock_irqrestore(lp->host->host_lock, flags);
  1728. if (fc_fcp_lock_pkt(fsp)) {
  1729. /* completed while we were waiting for timer to be deleted */
  1730. rc = SUCCESS;
  1731. goto release_pkt;
  1732. }
  1733. rc = fc_fcp_pkt_abort(lp, fsp);
  1734. fc_fcp_unlock_pkt(fsp);
  1735. release_pkt:
  1736. fc_fcp_pkt_release(fsp);
  1737. return rc;
  1738. }
  1739. EXPORT_SYMBOL(fc_eh_abort);
  1740. /**
  1741. * fc_eh_device_reset() Reset a single LUN
  1742. * @sc_cmd: scsi command
  1743. *
  1744. * Set from scsi host template to send tm cmd to the target and wait for the
  1745. * response.
  1746. */
  1747. int fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
  1748. {
  1749. struct fc_lport *lp;
  1750. struct fc_fcp_pkt *fsp;
  1751. struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
  1752. int rc = FAILED;
  1753. struct fc_rport_libfc_priv *rp;
  1754. int rval;
  1755. rval = fc_remote_port_chkready(rport);
  1756. if (rval)
  1757. goto out;
  1758. rp = rport->dd_data;
  1759. lp = shost_priv(sc_cmd->device->host);
  1760. if (lp->state != LPORT_ST_READY)
  1761. return rc;
  1762. FC_SCSI_DBG(lp, "Resetting rport (%6x)\n", rport->port_id);
  1763. fsp = fc_fcp_pkt_alloc(lp, GFP_NOIO);
  1764. if (fsp == NULL) {
  1765. printk(KERN_WARNING "libfc: could not allocate scsi_pkt\n");
  1766. sc_cmd->result = DID_NO_CONNECT << 16;
  1767. goto out;
  1768. }
  1769. /*
  1770. * Build the libfc request pkt. Do not set the scsi cmnd, because
  1771. * the sc passed in is not setup for execution like when sent
  1772. * through the queuecommand callout.
  1773. */
  1774. fsp->lp = lp; /* save the softc ptr */
  1775. fsp->rport = rport; /* set the remote port ptr */
  1776. /*
  1777. * flush outstanding commands
  1778. */
  1779. rc = fc_lun_reset(lp, fsp, scmd_id(sc_cmd), sc_cmd->device->lun);
  1780. fsp->state = FC_SRB_FREE;
  1781. fc_fcp_pkt_release(fsp);
  1782. out:
  1783. return rc;
  1784. }
  1785. EXPORT_SYMBOL(fc_eh_device_reset);
  1786. /**
  1787. * fc_eh_host_reset() - The reset function will reset the ports on the host.
  1788. * @sc_cmd: scsi command
  1789. */
  1790. int fc_eh_host_reset(struct scsi_cmnd *sc_cmd)
  1791. {
  1792. struct Scsi_Host *shost = sc_cmd->device->host;
  1793. struct fc_lport *lp = shost_priv(shost);
  1794. unsigned long wait_tmo;
  1795. FC_SCSI_DBG(lp, "Resetting host\n");
  1796. lp->tt.lport_reset(lp);
  1797. wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT;
  1798. while (!fc_fcp_lport_queue_ready(lp) && time_before(jiffies, wait_tmo))
  1799. msleep(1000);
  1800. if (fc_fcp_lport_queue_ready(lp)) {
  1801. shost_printk(KERN_INFO, shost, "libfc: Host reset succeeded "
  1802. "on port (%6x)\n", fc_host_port_id(lp->host));
  1803. return SUCCESS;
  1804. } else {
  1805. shost_printk(KERN_INFO, shost, "libfc: Host reset failed, "
  1806. "port (%6x) is not ready.\n",
  1807. fc_host_port_id(lp->host));
  1808. return FAILED;
  1809. }
  1810. }
  1811. EXPORT_SYMBOL(fc_eh_host_reset);
  1812. /**
  1813. * fc_slave_alloc() - configure queue depth
  1814. * @sdev: scsi device
  1815. *
  1816. * Configures queue depth based on host's cmd_per_len. If not set
  1817. * then we use the libfc default.
  1818. */
  1819. int fc_slave_alloc(struct scsi_device *sdev)
  1820. {
  1821. struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
  1822. int queue_depth;
  1823. if (!rport || fc_remote_port_chkready(rport))
  1824. return -ENXIO;
  1825. if (sdev->tagged_supported) {
  1826. if (sdev->host->hostt->cmd_per_lun)
  1827. queue_depth = sdev->host->hostt->cmd_per_lun;
  1828. else
  1829. queue_depth = FC_FCP_DFLT_QUEUE_DEPTH;
  1830. scsi_activate_tcq(sdev, queue_depth);
  1831. }
  1832. return 0;
  1833. }
  1834. EXPORT_SYMBOL(fc_slave_alloc);
  1835. int fc_change_queue_depth(struct scsi_device *sdev, int qdepth)
  1836. {
  1837. scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
  1838. return sdev->queue_depth;
  1839. }
  1840. EXPORT_SYMBOL(fc_change_queue_depth);
  1841. int fc_change_queue_type(struct scsi_device *sdev, int tag_type)
  1842. {
  1843. if (sdev->tagged_supported) {
  1844. scsi_set_tag_type(sdev, tag_type);
  1845. if (tag_type)
  1846. scsi_activate_tcq(sdev, sdev->queue_depth);
  1847. else
  1848. scsi_deactivate_tcq(sdev, sdev->queue_depth);
  1849. } else
  1850. tag_type = 0;
  1851. return tag_type;
  1852. }
  1853. EXPORT_SYMBOL(fc_change_queue_type);
  1854. void fc_fcp_destroy(struct fc_lport *lp)
  1855. {
  1856. struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
  1857. if (!list_empty(&si->scsi_pkt_queue))
  1858. printk(KERN_ERR "libfc: Leaked SCSI packets when destroying "
  1859. "port (%6x)\n", fc_host_port_id(lp->host));
  1860. mempool_destroy(si->scsi_pkt_pool);
  1861. kfree(si);
  1862. lp->scsi_priv = NULL;
  1863. }
  1864. EXPORT_SYMBOL(fc_fcp_destroy);
  1865. int fc_fcp_init(struct fc_lport *lp)
  1866. {
  1867. int rc;
  1868. struct fc_fcp_internal *si;
  1869. if (!lp->tt.fcp_cmd_send)
  1870. lp->tt.fcp_cmd_send = fc_fcp_cmd_send;
  1871. if (!lp->tt.fcp_cleanup)
  1872. lp->tt.fcp_cleanup = fc_fcp_cleanup;
  1873. if (!lp->tt.fcp_abort_io)
  1874. lp->tt.fcp_abort_io = fc_fcp_abort_io;
  1875. si = kzalloc(sizeof(struct fc_fcp_internal), GFP_KERNEL);
  1876. if (!si)
  1877. return -ENOMEM;
  1878. lp->scsi_priv = si;
  1879. INIT_LIST_HEAD(&si->scsi_pkt_queue);
  1880. si->scsi_pkt_pool = mempool_create_slab_pool(2, scsi_pkt_cachep);
  1881. if (!si->scsi_pkt_pool) {
  1882. rc = -ENOMEM;
  1883. goto free_internal;
  1884. }
  1885. return 0;
  1886. free_internal:
  1887. kfree(si);
  1888. return rc;
  1889. }
  1890. EXPORT_SYMBOL(fc_fcp_init);
  1891. static int __init libfc_init(void)
  1892. {
  1893. int rc;
  1894. scsi_pkt_cachep = kmem_cache_create("libfc_fcp_pkt",
  1895. sizeof(struct fc_fcp_pkt),
  1896. 0, SLAB_HWCACHE_ALIGN, NULL);
  1897. if (scsi_pkt_cachep == NULL) {
  1898. printk(KERN_ERR "libfc: Unable to allocate SRB cache, "
  1899. "module load failed!");
  1900. return -ENOMEM;
  1901. }
  1902. rc = fc_setup_exch_mgr();
  1903. if (rc)
  1904. goto destroy_pkt_cache;
  1905. rc = fc_setup_rport();
  1906. if (rc)
  1907. goto destroy_em;
  1908. return rc;
  1909. destroy_em:
  1910. fc_destroy_exch_mgr();
  1911. destroy_pkt_cache:
  1912. kmem_cache_destroy(scsi_pkt_cachep);
  1913. return rc;
  1914. }
  1915. static void __exit libfc_exit(void)
  1916. {
  1917. kmem_cache_destroy(scsi_pkt_cachep);
  1918. fc_destroy_exch_mgr();
  1919. fc_destroy_rport();
  1920. }
  1921. module_init(libfc_init);
  1922. module_exit(libfc_exit);