iscsi_tcp.c 54 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027
  1. /*
  2. * iSCSI Initiator over TCP/IP Data-Path
  3. *
  4. * Copyright (C) 2004 Dmitry Yusupov
  5. * Copyright (C) 2004 Alex Aizman
  6. * Copyright (C) 2005 - 2006 Mike Christie
  7. * Copyright (C) 2006 Red Hat, Inc. All rights reserved.
  8. * maintained by open-iscsi@googlegroups.com
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License as published
  12. * by the Free Software Foundation; either version 2 of the License, or
  13. * (at your option) any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful, but
  16. * WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * General Public License for more details.
  19. *
  20. * See the file COPYING included with this distribution for more details.
  21. *
  22. * Credits:
  23. * Christoph Hellwig
  24. * FUJITA Tomonori
  25. * Arne Redlich
  26. * Zhenyu Wang
  27. */
  28. #include <linux/types.h>
  29. #include <linux/list.h>
  30. #include <linux/inet.h>
  31. #include <linux/file.h>
  32. #include <linux/blkdev.h>
  33. #include <linux/crypto.h>
  34. #include <linux/delay.h>
  35. #include <linux/kfifo.h>
  36. #include <linux/scatterlist.h>
  37. #include <net/tcp.h>
  38. #include <scsi/scsi_cmnd.h>
  39. #include <scsi/scsi_device.h>
  40. #include <scsi/scsi_host.h>
  41. #include <scsi/scsi.h>
  42. #include <scsi/scsi_transport_iscsi.h>
  43. #include "iscsi_tcp.h"
  44. MODULE_AUTHOR("Dmitry Yusupov <dmitry_yus@yahoo.com>, "
  45. "Alex Aizman <itn780@yahoo.com>");
  46. MODULE_DESCRIPTION("iSCSI/TCP data-path");
  47. MODULE_LICENSE("GPL");
  48. #undef DEBUG_TCP
  49. #define DEBUG_ASSERT
  50. #ifdef DEBUG_TCP
  51. #define debug_tcp(fmt...) printk(KERN_INFO "tcp: " fmt)
  52. #else
  53. #define debug_tcp(fmt...)
  54. #endif
  55. #ifndef DEBUG_ASSERT
  56. #ifdef BUG_ON
  57. #undef BUG_ON
  58. #endif
  59. #define BUG_ON(expr)
  60. #endif
  61. static unsigned int iscsi_max_lun = 512;
  62. module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
  63. static int iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn,
  64. struct iscsi_segment *segment);
  65. /*
  66. * Scatterlist handling: inside the iscsi_segment, we
  67. * remember an index into the scatterlist, and set data/size
  68. * to the current scatterlist entry. For highmem pages, we
  69. * kmap as needed.
  70. *
  71. * Note that the page is unmapped when we return from
  72. * TCP's data_ready handler, so we may end up mapping and
  73. * unmapping the same page repeatedly. The whole reason
  74. * for this is that we shouldn't keep the page mapped
  75. * outside the softirq.
  76. */
  77. /**
  78. * iscsi_tcp_segment_init_sg - init indicated scatterlist entry
  79. * @segment: the buffer object
  80. * @sg: scatterlist
  81. * @offset: byte offset into that sg entry
  82. *
  83. * This function sets up the segment so that subsequent
  84. * data is copied to the indicated sg entry, at the given
  85. * offset.
  86. */
  87. static inline void
  88. iscsi_tcp_segment_init_sg(struct iscsi_segment *segment,
  89. struct scatterlist *sg, unsigned int offset)
  90. {
  91. segment->sg = sg;
  92. segment->sg_offset = offset;
  93. segment->size = min(sg->length - offset,
  94. segment->total_size - segment->total_copied);
  95. segment->data = NULL;
  96. }
  97. /**
  98. * iscsi_tcp_segment_map - map the current S/G page
  99. * @segment: iscsi_segment
  100. * @recv: 1 if called from recv path
  101. *
  102. * We only need to possibly kmap data if scatter lists are being used,
  103. * because the iscsi passthrough and internal IO paths will never use high
  104. * mem pages.
  105. */
  106. static inline void
  107. iscsi_tcp_segment_map(struct iscsi_segment *segment, int recv)
  108. {
  109. struct scatterlist *sg;
  110. if (segment->data != NULL || !segment->sg)
  111. return;
  112. sg = segment->sg;
  113. BUG_ON(segment->sg_mapped);
  114. BUG_ON(sg->length == 0);
  115. /*
  116. * If the page count is greater than one it is ok to send
  117. * to the network layer's zero copy send path. If not we
  118. * have to go the slow sendmsg path. We always map for the
  119. * recv path.
  120. */
  121. if (page_count(sg_page(sg)) >= 1 && !recv)
  122. return;
  123. debug_tcp("iscsi_tcp_segment_map %s %p\n", recv ? "recv" : "xmit",
  124. segment);
  125. segment->sg_mapped = kmap_atomic(sg_page(sg), KM_SOFTIRQ0);
  126. segment->data = segment->sg_mapped + sg->offset + segment->sg_offset;
  127. }
  128. static inline void
  129. iscsi_tcp_segment_unmap(struct iscsi_segment *segment)
  130. {
  131. debug_tcp("iscsi_tcp_segment_unmap %p\n", segment);
  132. if (segment->sg_mapped) {
  133. debug_tcp("iscsi_tcp_segment_unmap valid\n");
  134. kunmap_atomic(segment->sg_mapped, KM_SOFTIRQ0);
  135. segment->sg_mapped = NULL;
  136. segment->data = NULL;
  137. }
  138. }
  139. /*
  140. * Splice the digest buffer into the buffer
  141. */
  142. static inline void
  143. iscsi_tcp_segment_splice_digest(struct iscsi_segment *segment, void *digest)
  144. {
  145. segment->data = digest;
  146. segment->digest_len = ISCSI_DIGEST_SIZE;
  147. segment->total_size += ISCSI_DIGEST_SIZE;
  148. segment->size = ISCSI_DIGEST_SIZE;
  149. segment->copied = 0;
  150. segment->sg = NULL;
  151. segment->hash = NULL;
  152. }
  153. /**
  154. * iscsi_tcp_segment_done - check whether the segment is complete
  155. * @segment: iscsi segment to check
  156. * @recv: set to one of this is called from the recv path
  157. * @copied: number of bytes copied
  158. *
  159. * Check if we're done receiving this segment. If the receive
  160. * buffer is full but we expect more data, move on to the
  161. * next entry in the scatterlist.
  162. *
  163. * If the amount of data we received isn't a multiple of 4,
  164. * we will transparently receive the pad bytes, too.
  165. *
  166. * This function must be re-entrant.
  167. */
  168. static inline int
  169. iscsi_tcp_segment_done(struct iscsi_segment *segment, int recv, unsigned copied)
  170. {
  171. static unsigned char padbuf[ISCSI_PAD_LEN];
  172. struct scatterlist sg;
  173. unsigned int pad;
  174. debug_tcp("copied %u %u size %u %s\n", segment->copied, copied,
  175. segment->size, recv ? "recv" : "xmit");
  176. if (segment->hash && copied) {
  177. /*
  178. * If a segment is kmapd we must unmap it before sending
  179. * to the crypto layer since that will try to kmap it again.
  180. */
  181. iscsi_tcp_segment_unmap(segment);
  182. if (!segment->data) {
  183. sg_init_table(&sg, 1);
  184. sg_set_page(&sg, sg_page(segment->sg), copied,
  185. segment->copied + segment->sg_offset +
  186. segment->sg->offset);
  187. } else
  188. sg_init_one(&sg, segment->data + segment->copied,
  189. copied);
  190. crypto_hash_update(segment->hash, &sg, copied);
  191. }
  192. segment->copied += copied;
  193. if (segment->copied < segment->size) {
  194. iscsi_tcp_segment_map(segment, recv);
  195. return 0;
  196. }
  197. segment->total_copied += segment->copied;
  198. segment->copied = 0;
  199. segment->size = 0;
  200. /* Unmap the current scatterlist page, if there is one. */
  201. iscsi_tcp_segment_unmap(segment);
  202. /* Do we have more scatterlist entries? */
  203. debug_tcp("total copied %u total size %u\n", segment->total_copied,
  204. segment->total_size);
  205. if (segment->total_copied < segment->total_size) {
  206. /* Proceed to the next entry in the scatterlist. */
  207. iscsi_tcp_segment_init_sg(segment, sg_next(segment->sg),
  208. 0);
  209. iscsi_tcp_segment_map(segment, recv);
  210. BUG_ON(segment->size == 0);
  211. return 0;
  212. }
  213. /* Do we need to handle padding? */
  214. pad = iscsi_padding(segment->total_copied);
  215. if (pad != 0) {
  216. debug_tcp("consume %d pad bytes\n", pad);
  217. segment->total_size += pad;
  218. segment->size = pad;
  219. segment->data = padbuf;
  220. return 0;
  221. }
  222. /*
  223. * Set us up for transferring the data digest. hdr digest
  224. * is completely handled in hdr done function.
  225. */
  226. if (segment->hash) {
  227. crypto_hash_final(segment->hash, segment->digest);
  228. iscsi_tcp_segment_splice_digest(segment,
  229. recv ? segment->recv_digest : segment->digest);
  230. return 0;
  231. }
  232. return 1;
  233. }
  234. /**
  235. * iscsi_tcp_xmit_segment - transmit segment
  236. * @tcp_conn: the iSCSI TCP connection
  237. * @segment: the buffer to transmnit
  238. *
  239. * This function transmits as much of the buffer as
  240. * the network layer will accept, and returns the number of
  241. * bytes transmitted.
  242. *
  243. * If CRC hashing is enabled, the function will compute the
  244. * hash as it goes. When the entire segment has been transmitted,
  245. * it will retrieve the hash value and send it as well.
  246. */
  247. static int
  248. iscsi_tcp_xmit_segment(struct iscsi_tcp_conn *tcp_conn,
  249. struct iscsi_segment *segment)
  250. {
  251. struct socket *sk = tcp_conn->sock;
  252. unsigned int copied = 0;
  253. int r = 0;
  254. while (!iscsi_tcp_segment_done(segment, 0, r)) {
  255. struct scatterlist *sg;
  256. unsigned int offset, copy;
  257. int flags = 0;
  258. r = 0;
  259. offset = segment->copied;
  260. copy = segment->size - offset;
  261. if (segment->total_copied + segment->size < segment->total_size)
  262. flags |= MSG_MORE;
  263. /* Use sendpage if we can; else fall back to sendmsg */
  264. if (!segment->data) {
  265. sg = segment->sg;
  266. offset += segment->sg_offset + sg->offset;
  267. r = tcp_conn->sendpage(sk, sg_page(sg), offset, copy,
  268. flags);
  269. } else {
  270. struct msghdr msg = { .msg_flags = flags };
  271. struct kvec iov = {
  272. .iov_base = segment->data + offset,
  273. .iov_len = copy
  274. };
  275. r = kernel_sendmsg(sk, &msg, &iov, 1, copy);
  276. }
  277. if (r < 0) {
  278. iscsi_tcp_segment_unmap(segment);
  279. if (copied || r == -EAGAIN)
  280. break;
  281. return r;
  282. }
  283. copied += r;
  284. }
  285. return copied;
  286. }
  287. /**
  288. * iscsi_tcp_segment_recv - copy data to segment
  289. * @tcp_conn: the iSCSI TCP connection
  290. * @segment: the buffer to copy to
  291. * @ptr: data pointer
  292. * @len: amount of data available
  293. *
  294. * This function copies up to @len bytes to the
  295. * given buffer, and returns the number of bytes
  296. * consumed, which can actually be less than @len.
  297. *
  298. * If hash digest is enabled, the function will update the
  299. * hash while copying.
  300. * Combining these two operations doesn't buy us a lot (yet),
  301. * but in the future we could implement combined copy+crc,
  302. * just way we do for network layer checksums.
  303. */
  304. static int
  305. iscsi_tcp_segment_recv(struct iscsi_tcp_conn *tcp_conn,
  306. struct iscsi_segment *segment, const void *ptr,
  307. unsigned int len)
  308. {
  309. unsigned int copy = 0, copied = 0;
  310. while (!iscsi_tcp_segment_done(segment, 1, copy)) {
  311. if (copied == len) {
  312. debug_tcp("iscsi_tcp_segment_recv copied %d bytes\n",
  313. len);
  314. break;
  315. }
  316. copy = min(len - copied, segment->size - segment->copied);
  317. debug_tcp("iscsi_tcp_segment_recv copying %d\n", copy);
  318. memcpy(segment->data + segment->copied, ptr + copied, copy);
  319. copied += copy;
  320. }
  321. return copied;
  322. }
  323. static inline void
  324. iscsi_tcp_dgst_header(struct hash_desc *hash, const void *hdr, size_t hdrlen,
  325. unsigned char digest[ISCSI_DIGEST_SIZE])
  326. {
  327. struct scatterlist sg;
  328. sg_init_one(&sg, hdr, hdrlen);
  329. crypto_hash_digest(hash, &sg, hdrlen, digest);
  330. }
  331. static inline int
  332. iscsi_tcp_dgst_verify(struct iscsi_tcp_conn *tcp_conn,
  333. struct iscsi_segment *segment)
  334. {
  335. if (!segment->digest_len)
  336. return 1;
  337. if (memcmp(segment->recv_digest, segment->digest,
  338. segment->digest_len)) {
  339. debug_scsi("digest mismatch\n");
  340. return 0;
  341. }
  342. return 1;
  343. }
  344. /*
  345. * Helper function to set up segment buffer
  346. */
  347. static inline void
  348. __iscsi_segment_init(struct iscsi_segment *segment, size_t size,
  349. iscsi_segment_done_fn_t *done, struct hash_desc *hash)
  350. {
  351. memset(segment, 0, sizeof(*segment));
  352. segment->total_size = size;
  353. segment->done = done;
  354. if (hash) {
  355. segment->hash = hash;
  356. crypto_hash_init(hash);
  357. }
  358. }
  359. static inline void
  360. iscsi_segment_init_linear(struct iscsi_segment *segment, void *data,
  361. size_t size, iscsi_segment_done_fn_t *done,
  362. struct hash_desc *hash)
  363. {
  364. __iscsi_segment_init(segment, size, done, hash);
  365. segment->data = data;
  366. segment->size = size;
  367. }
  368. static inline int
  369. iscsi_segment_seek_sg(struct iscsi_segment *segment,
  370. struct scatterlist *sg_list, unsigned int sg_count,
  371. unsigned int offset, size_t size,
  372. iscsi_segment_done_fn_t *done, struct hash_desc *hash)
  373. {
  374. struct scatterlist *sg;
  375. unsigned int i;
  376. debug_scsi("iscsi_segment_seek_sg offset %u size %llu\n",
  377. offset, size);
  378. __iscsi_segment_init(segment, size, done, hash);
  379. for_each_sg(sg_list, sg, sg_count, i) {
  380. debug_scsi("sg %d, len %u offset %u\n", i, sg->length,
  381. sg->offset);
  382. if (offset < sg->length) {
  383. iscsi_tcp_segment_init_sg(segment, sg, offset);
  384. return 0;
  385. }
  386. offset -= sg->length;
  387. }
  388. return ISCSI_ERR_DATA_OFFSET;
  389. }
  390. /**
  391. * iscsi_tcp_hdr_recv_prep - prep segment for hdr reception
  392. * @tcp_conn: iscsi connection to prep for
  393. *
  394. * This function always passes NULL for the hash argument, because when this
  395. * function is called we do not yet know the final size of the header and want
  396. * to delay the digest processing until we know that.
  397. */
  398. static void
  399. iscsi_tcp_hdr_recv_prep(struct iscsi_tcp_conn *tcp_conn)
  400. {
  401. debug_tcp("iscsi_tcp_hdr_recv_prep(%p%s)\n", tcp_conn,
  402. tcp_conn->iscsi_conn->hdrdgst_en ? ", digest enabled" : "");
  403. iscsi_segment_init_linear(&tcp_conn->in.segment,
  404. tcp_conn->in.hdr_buf, sizeof(struct iscsi_hdr),
  405. iscsi_tcp_hdr_recv_done, NULL);
  406. }
  407. /*
  408. * Handle incoming reply to any other type of command
  409. */
  410. static int
  411. iscsi_tcp_data_recv_done(struct iscsi_tcp_conn *tcp_conn,
  412. struct iscsi_segment *segment)
  413. {
  414. struct iscsi_conn *conn = tcp_conn->iscsi_conn;
  415. int rc = 0;
  416. if (!iscsi_tcp_dgst_verify(tcp_conn, segment))
  417. return ISCSI_ERR_DATA_DGST;
  418. rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr,
  419. conn->data, tcp_conn->in.datalen);
  420. if (rc)
  421. return rc;
  422. iscsi_tcp_hdr_recv_prep(tcp_conn);
  423. return 0;
  424. }
  425. static void
  426. iscsi_tcp_data_recv_prep(struct iscsi_tcp_conn *tcp_conn)
  427. {
  428. struct iscsi_conn *conn = tcp_conn->iscsi_conn;
  429. struct hash_desc *rx_hash = NULL;
  430. if (conn->datadgst_en)
  431. rx_hash = &tcp_conn->rx_hash;
  432. iscsi_segment_init_linear(&tcp_conn->in.segment,
  433. conn->data, tcp_conn->in.datalen,
  434. iscsi_tcp_data_recv_done, rx_hash);
  435. }
  436. /*
  437. * must be called with session lock
  438. */
  439. static void
  440. iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
  441. {
  442. struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
  443. struct iscsi_r2t_info *r2t;
  444. /* flush ctask's r2t queues */
  445. while (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*))) {
  446. __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
  447. sizeof(void*));
  448. debug_scsi("iscsi_tcp_cleanup_ctask pending r2t dropped\n");
  449. }
  450. r2t = tcp_ctask->r2t;
  451. if (r2t != NULL) {
  452. __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
  453. sizeof(void*));
  454. tcp_ctask->r2t = NULL;
  455. }
  456. }
  457. /**
  458. * iscsi_data_rsp - SCSI Data-In Response processing
  459. * @conn: iscsi connection
  460. * @ctask: scsi command task
  461. **/
  462. static int
  463. iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
  464. {
  465. struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
  466. struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
  467. struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)tcp_conn->in.hdr;
  468. struct iscsi_session *session = conn->session;
  469. struct scsi_cmnd *sc = ctask->sc;
  470. int datasn = be32_to_cpu(rhdr->datasn);
  471. iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
  472. if (tcp_conn->in.datalen == 0)
  473. return 0;
  474. if (tcp_ctask->exp_datasn != datasn) {
  475. debug_tcp("%s: ctask->exp_datasn(%d) != rhdr->datasn(%d)\n",
  476. __FUNCTION__, tcp_ctask->exp_datasn, datasn);
  477. return ISCSI_ERR_DATASN;
  478. }
  479. tcp_ctask->exp_datasn++;
  480. tcp_ctask->data_offset = be32_to_cpu(rhdr->offset);
  481. if (tcp_ctask->data_offset + tcp_conn->in.datalen > scsi_bufflen(sc)) {
  482. debug_tcp("%s: data_offset(%d) + data_len(%d) > total_length_in(%d)\n",
  483. __FUNCTION__, tcp_ctask->data_offset,
  484. tcp_conn->in.datalen, scsi_bufflen(sc));
  485. return ISCSI_ERR_DATA_OFFSET;
  486. }
  487. if (rhdr->flags & ISCSI_FLAG_DATA_STATUS) {
  488. sc->result = (DID_OK << 16) | rhdr->cmd_status;
  489. conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
  490. if (rhdr->flags & (ISCSI_FLAG_DATA_UNDERFLOW |
  491. ISCSI_FLAG_DATA_OVERFLOW)) {
  492. int res_count = be32_to_cpu(rhdr->residual_count);
  493. if (res_count > 0 &&
  494. (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW ||
  495. res_count <= scsi_bufflen(sc)))
  496. scsi_set_resid(sc, res_count);
  497. else
  498. sc->result = (DID_BAD_TARGET << 16) |
  499. rhdr->cmd_status;
  500. }
  501. }
  502. conn->datain_pdus_cnt++;
  503. return 0;
  504. }
  505. /**
  506. * iscsi_solicit_data_init - initialize first Data-Out
  507. * @conn: iscsi connection
  508. * @ctask: scsi command task
  509. * @r2t: R2T info
  510. *
  511. * Notes:
  512. * Initialize first Data-Out within this R2T sequence and finds
  513. * proper data_offset within this SCSI command.
  514. *
  515. * This function is called with connection lock taken.
  516. **/
  517. static void
  518. iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
  519. struct iscsi_r2t_info *r2t)
  520. {
  521. struct iscsi_data *hdr;
  522. hdr = &r2t->dtask.hdr;
  523. memset(hdr, 0, sizeof(struct iscsi_data));
  524. hdr->ttt = r2t->ttt;
  525. hdr->datasn = cpu_to_be32(r2t->solicit_datasn);
  526. r2t->solicit_datasn++;
  527. hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
  528. memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
  529. hdr->itt = ctask->hdr->itt;
  530. hdr->exp_statsn = r2t->exp_statsn;
  531. hdr->offset = cpu_to_be32(r2t->data_offset);
  532. if (r2t->data_length > conn->max_xmit_dlength) {
  533. hton24(hdr->dlength, conn->max_xmit_dlength);
  534. r2t->data_count = conn->max_xmit_dlength;
  535. hdr->flags = 0;
  536. } else {
  537. hton24(hdr->dlength, r2t->data_length);
  538. r2t->data_count = r2t->data_length;
  539. hdr->flags = ISCSI_FLAG_CMD_FINAL;
  540. }
  541. conn->dataout_pdus_cnt++;
  542. r2t->sent = 0;
  543. }
  544. /**
  545. * iscsi_r2t_rsp - iSCSI R2T Response processing
  546. * @conn: iscsi connection
  547. * @ctask: scsi command task
  548. **/
  549. static int
  550. iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
  551. {
  552. struct iscsi_r2t_info *r2t;
  553. struct iscsi_session *session = conn->session;
  554. struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
  555. struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
  556. struct iscsi_r2t_rsp *rhdr = (struct iscsi_r2t_rsp *)tcp_conn->in.hdr;
  557. int r2tsn = be32_to_cpu(rhdr->r2tsn);
  558. int rc;
  559. if (tcp_conn->in.datalen) {
  560. printk(KERN_ERR "iscsi_tcp: invalid R2t with datalen %d\n",
  561. tcp_conn->in.datalen);
  562. return ISCSI_ERR_DATALEN;
  563. }
  564. if (tcp_ctask->exp_datasn != r2tsn){
  565. debug_tcp("%s: ctask->exp_datasn(%d) != rhdr->r2tsn(%d)\n",
  566. __FUNCTION__, tcp_ctask->exp_datasn, r2tsn);
  567. return ISCSI_ERR_R2TSN;
  568. }
  569. /* fill-in new R2T associated with the task */
  570. iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
  571. if (!ctask->sc || session->state != ISCSI_STATE_LOGGED_IN) {
  572. printk(KERN_INFO "iscsi_tcp: dropping R2T itt %d in "
  573. "recovery...\n", ctask->itt);
  574. return 0;
  575. }
  576. rc = __kfifo_get(tcp_ctask->r2tpool.queue, (void*)&r2t, sizeof(void*));
  577. BUG_ON(!rc);
  578. r2t->exp_statsn = rhdr->statsn;
  579. r2t->data_length = be32_to_cpu(rhdr->data_length);
  580. if (r2t->data_length == 0) {
  581. printk(KERN_ERR "iscsi_tcp: invalid R2T with zero data len\n");
  582. __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
  583. sizeof(void*));
  584. return ISCSI_ERR_DATALEN;
  585. }
  586. if (r2t->data_length > session->max_burst)
  587. debug_scsi("invalid R2T with data len %u and max burst %u."
  588. "Attempting to execute request.\n",
  589. r2t->data_length, session->max_burst);
  590. r2t->data_offset = be32_to_cpu(rhdr->data_offset);
  591. if (r2t->data_offset + r2t->data_length > scsi_bufflen(ctask->sc)) {
  592. printk(KERN_ERR "iscsi_tcp: invalid R2T with data len %u at "
  593. "offset %u and total length %d\n", r2t->data_length,
  594. r2t->data_offset, scsi_bufflen(ctask->sc));
  595. __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
  596. sizeof(void*));
  597. return ISCSI_ERR_DATALEN;
  598. }
  599. r2t->ttt = rhdr->ttt; /* no flip */
  600. r2t->solicit_datasn = 0;
  601. iscsi_solicit_data_init(conn, ctask, r2t);
  602. tcp_ctask->exp_datasn = r2tsn + 1;
  603. __kfifo_put(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*));
  604. conn->r2t_pdus_cnt++;
  605. iscsi_requeue_ctask(ctask);
  606. return 0;
  607. }
  608. /*
  609. * Handle incoming reply to DataIn command
  610. */
  611. static int
  612. iscsi_tcp_process_data_in(struct iscsi_tcp_conn *tcp_conn,
  613. struct iscsi_segment *segment)
  614. {
  615. struct iscsi_conn *conn = tcp_conn->iscsi_conn;
  616. struct iscsi_hdr *hdr = tcp_conn->in.hdr;
  617. int rc;
  618. if (!iscsi_tcp_dgst_verify(tcp_conn, segment))
  619. return ISCSI_ERR_DATA_DGST;
  620. /* check for non-exceptional status */
  621. if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
  622. rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr, NULL, 0);
  623. if (rc)
  624. return rc;
  625. }
  626. iscsi_tcp_hdr_recv_prep(tcp_conn);
  627. return 0;
  628. }
  629. /**
  630. * iscsi_tcp_hdr_dissect - process PDU header
  631. * @conn: iSCSI connection
  632. * @hdr: PDU header
  633. *
  634. * This function analyzes the header of the PDU received,
  635. * and performs several sanity checks. If the PDU is accompanied
  636. * by data, the receive buffer is set up to copy the incoming data
  637. * to the correct location.
  638. */
  639. static int
  640. iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
  641. {
  642. int rc = 0, opcode, ahslen;
  643. struct iscsi_session *session = conn->session;
  644. struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
  645. struct iscsi_cmd_task *ctask;
  646. uint32_t itt;
  647. /* verify PDU length */
  648. tcp_conn->in.datalen = ntoh24(hdr->dlength);
  649. if (tcp_conn->in.datalen > conn->max_recv_dlength) {
  650. printk(KERN_ERR "iscsi_tcp: datalen %d > %d\n",
  651. tcp_conn->in.datalen, conn->max_recv_dlength);
  652. return ISCSI_ERR_DATALEN;
  653. }
  654. /* Additional header segments. So far, we don't
  655. * process additional headers.
  656. */
  657. ahslen = hdr->hlength << 2;
  658. opcode = hdr->opcode & ISCSI_OPCODE_MASK;
  659. /* verify itt (itt encoding: age+cid+itt) */
  660. rc = iscsi_verify_itt(conn, hdr, &itt);
  661. if (rc)
  662. return rc;
  663. debug_tcp("opcode 0x%x ahslen %d datalen %d\n",
  664. opcode, ahslen, tcp_conn->in.datalen);
  665. switch(opcode) {
  666. case ISCSI_OP_SCSI_DATA_IN:
  667. ctask = session->cmds[itt];
  668. spin_lock(&conn->session->lock);
  669. rc = iscsi_data_rsp(conn, ctask);
  670. spin_unlock(&conn->session->lock);
  671. if (rc)
  672. return rc;
  673. if (tcp_conn->in.datalen) {
  674. struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
  675. struct hash_desc *rx_hash = NULL;
  676. /*
  677. * Setup copy of Data-In into the Scsi_Cmnd
  678. * Scatterlist case:
  679. * We set up the iscsi_segment to point to the next
  680. * scatterlist entry to copy to. As we go along,
  681. * we move on to the next scatterlist entry and
  682. * update the digest per-entry.
  683. */
  684. if (conn->datadgst_en)
  685. rx_hash = &tcp_conn->rx_hash;
  686. debug_tcp("iscsi_tcp_begin_data_in(%p, offset=%d, "
  687. "datalen=%d)\n", tcp_conn,
  688. tcp_ctask->data_offset,
  689. tcp_conn->in.datalen);
  690. return iscsi_segment_seek_sg(&tcp_conn->in.segment,
  691. scsi_sglist(ctask->sc),
  692. scsi_sg_count(ctask->sc),
  693. tcp_ctask->data_offset,
  694. tcp_conn->in.datalen,
  695. iscsi_tcp_process_data_in,
  696. rx_hash);
  697. }
  698. /* fall through */
  699. case ISCSI_OP_SCSI_CMD_RSP:
  700. if (tcp_conn->in.datalen) {
  701. iscsi_tcp_data_recv_prep(tcp_conn);
  702. return 0;
  703. }
  704. rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
  705. break;
  706. case ISCSI_OP_R2T:
  707. ctask = session->cmds[itt];
  708. if (ahslen)
  709. rc = ISCSI_ERR_AHSLEN;
  710. else if (ctask->sc->sc_data_direction == DMA_TO_DEVICE) {
  711. spin_lock(&session->lock);
  712. rc = iscsi_r2t_rsp(conn, ctask);
  713. spin_unlock(&session->lock);
  714. } else
  715. rc = ISCSI_ERR_PROTO;
  716. break;
  717. case ISCSI_OP_LOGIN_RSP:
  718. case ISCSI_OP_TEXT_RSP:
  719. case ISCSI_OP_REJECT:
  720. case ISCSI_OP_ASYNC_EVENT:
  721. /*
  722. * It is possible that we could get a PDU with a buffer larger
  723. * than 8K, but there are no targets that currently do this.
  724. * For now we fail until we find a vendor that needs it
  725. */
  726. if (ISCSI_DEF_MAX_RECV_SEG_LEN < tcp_conn->in.datalen) {
  727. printk(KERN_ERR "iscsi_tcp: received buffer of len %u "
  728. "but conn buffer is only %u (opcode %0x)\n",
  729. tcp_conn->in.datalen,
  730. ISCSI_DEF_MAX_RECV_SEG_LEN, opcode);
  731. rc = ISCSI_ERR_PROTO;
  732. break;
  733. }
  734. /* If there's data coming in with the response,
  735. * receive it to the connection's buffer.
  736. */
  737. if (tcp_conn->in.datalen) {
  738. iscsi_tcp_data_recv_prep(tcp_conn);
  739. return 0;
  740. }
  741. /* fall through */
  742. case ISCSI_OP_LOGOUT_RSP:
  743. case ISCSI_OP_NOOP_IN:
  744. case ISCSI_OP_SCSI_TMFUNC_RSP:
  745. rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
  746. break;
  747. default:
  748. rc = ISCSI_ERR_BAD_OPCODE;
  749. break;
  750. }
  751. if (rc == 0) {
  752. /* Anything that comes with data should have
  753. * been handled above. */
  754. if (tcp_conn->in.datalen)
  755. return ISCSI_ERR_PROTO;
  756. iscsi_tcp_hdr_recv_prep(tcp_conn);
  757. }
  758. return rc;
  759. }
  760. /**
  761. * iscsi_tcp_hdr_recv_done - process PDU header
  762. *
  763. * This is the callback invoked when the PDU header has
  764. * been received. If the header is followed by additional
  765. * header segments, we go back for more data.
  766. */
  767. static int
  768. iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn,
  769. struct iscsi_segment *segment)
  770. {
  771. struct iscsi_conn *conn = tcp_conn->iscsi_conn;
  772. struct iscsi_hdr *hdr;
  773. /* Check if there are additional header segments
  774. * *prior* to computing the digest, because we
  775. * may need to go back to the caller for more.
  776. */
  777. hdr = (struct iscsi_hdr *) tcp_conn->in.hdr_buf;
  778. if (segment->copied == sizeof(struct iscsi_hdr) && hdr->hlength) {
  779. /* Bump the header length - the caller will
  780. * just loop around and get the AHS for us, and
  781. * call again. */
  782. unsigned int ahslen = hdr->hlength << 2;
  783. /* Make sure we don't overflow */
  784. if (sizeof(*hdr) + ahslen > sizeof(tcp_conn->in.hdr_buf))
  785. return ISCSI_ERR_AHSLEN;
  786. segment->total_size += ahslen;
  787. segment->size += ahslen;
  788. return 0;
  789. }
  790. /* We're done processing the header. See if we're doing
  791. * header digests; if so, set up the recv_digest buffer
  792. * and go back for more. */
  793. if (conn->hdrdgst_en) {
  794. if (segment->digest_len == 0) {
  795. iscsi_tcp_segment_splice_digest(segment,
  796. segment->recv_digest);
  797. return 0;
  798. }
  799. iscsi_tcp_dgst_header(&tcp_conn->rx_hash, hdr,
  800. segment->total_copied - ISCSI_DIGEST_SIZE,
  801. segment->digest);
  802. if (!iscsi_tcp_dgst_verify(tcp_conn, segment))
  803. return ISCSI_ERR_HDR_DGST;
  804. }
  805. tcp_conn->in.hdr = hdr;
  806. return iscsi_tcp_hdr_dissect(conn, hdr);
  807. }
  808. /**
  809. * iscsi_tcp_recv - TCP receive in sendfile fashion
  810. * @rd_desc: read descriptor
  811. * @skb: socket buffer
  812. * @offset: offset in skb
  813. * @len: skb->len - offset
  814. **/
  815. static int
  816. iscsi_tcp_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
  817. unsigned int offset, size_t len)
  818. {
  819. struct iscsi_conn *conn = rd_desc->arg.data;
  820. struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
  821. struct iscsi_segment *segment = &tcp_conn->in.segment;
  822. struct skb_seq_state seq;
  823. unsigned int consumed = 0;
  824. int rc = 0;
  825. debug_tcp("in %d bytes\n", skb->len - offset);
  826. if (unlikely(conn->suspend_rx)) {
  827. debug_tcp("conn %d Rx suspended!\n", conn->id);
  828. return 0;
  829. }
  830. skb_prepare_seq_read(skb, offset, skb->len, &seq);
  831. while (1) {
  832. unsigned int avail;
  833. const u8 *ptr;
  834. avail = skb_seq_read(consumed, &ptr, &seq);
  835. if (avail == 0) {
  836. debug_tcp("no more data avail. Consumed %d\n",
  837. consumed);
  838. break;
  839. }
  840. BUG_ON(segment->copied >= segment->size);
  841. debug_tcp("skb %p ptr=%p avail=%u\n", skb, ptr, avail);
  842. rc = iscsi_tcp_segment_recv(tcp_conn, segment, ptr, avail);
  843. BUG_ON(rc == 0);
  844. consumed += rc;
  845. if (segment->total_copied >= segment->total_size) {
  846. debug_tcp("segment done\n");
  847. rc = segment->done(tcp_conn, segment);
  848. if (rc != 0) {
  849. skb_abort_seq_read(&seq);
  850. goto error;
  851. }
  852. /* The done() functions sets up the
  853. * next segment. */
  854. }
  855. }
  856. skb_abort_seq_read(&seq);
  857. conn->rxdata_octets += consumed;
  858. return consumed;
  859. error:
  860. debug_tcp("Error receiving PDU, errno=%d\n", rc);
  861. iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
  862. return 0;
  863. }
  864. static void
  865. iscsi_tcp_data_ready(struct sock *sk, int flag)
  866. {
  867. struct iscsi_conn *conn = sk->sk_user_data;
  868. struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
  869. read_descriptor_t rd_desc;
  870. read_lock(&sk->sk_callback_lock);
  871. /*
  872. * Use rd_desc to pass 'conn' to iscsi_tcp_recv.
  873. * We set count to 1 because we want the network layer to
  874. * hand us all the skbs that are available. iscsi_tcp_recv
  875. * handled pdus that cross buffers or pdus that still need data.
  876. */
  877. rd_desc.arg.data = conn;
  878. rd_desc.count = 1;
  879. tcp_read_sock(sk, &rd_desc, iscsi_tcp_recv);
  880. read_unlock(&sk->sk_callback_lock);
  881. /* If we had to (atomically) map a highmem page,
  882. * unmap it now. */
  883. iscsi_tcp_segment_unmap(&tcp_conn->in.segment);
  884. }
  885. static void
  886. iscsi_tcp_state_change(struct sock *sk)
  887. {
  888. struct iscsi_tcp_conn *tcp_conn;
  889. struct iscsi_conn *conn;
  890. struct iscsi_session *session;
  891. void (*old_state_change)(struct sock *);
  892. read_lock(&sk->sk_callback_lock);
  893. conn = (struct iscsi_conn*)sk->sk_user_data;
  894. session = conn->session;
  895. if ((sk->sk_state == TCP_CLOSE_WAIT ||
  896. sk->sk_state == TCP_CLOSE) &&
  897. !atomic_read(&sk->sk_rmem_alloc)) {
  898. debug_tcp("iscsi_tcp_state_change: TCP_CLOSE|TCP_CLOSE_WAIT\n");
  899. iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
  900. }
  901. tcp_conn = conn->dd_data;
  902. old_state_change = tcp_conn->old_state_change;
  903. read_unlock(&sk->sk_callback_lock);
  904. old_state_change(sk);
  905. }
  906. /**
  907. * iscsi_write_space - Called when more output buffer space is available
  908. * @sk: socket space is available for
  909. **/
  910. static void
  911. iscsi_write_space(struct sock *sk)
  912. {
  913. struct iscsi_conn *conn = (struct iscsi_conn*)sk->sk_user_data;
  914. struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
  915. tcp_conn->old_write_space(sk);
  916. debug_tcp("iscsi_write_space: cid %d\n", conn->id);
  917. scsi_queue_work(conn->session->host, &conn->xmitwork);
  918. }
  919. static void
  920. iscsi_conn_set_callbacks(struct iscsi_conn *conn)
  921. {
  922. struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
  923. struct sock *sk = tcp_conn->sock->sk;
  924. /* assign new callbacks */
  925. write_lock_bh(&sk->sk_callback_lock);
  926. sk->sk_user_data = conn;
  927. tcp_conn->old_data_ready = sk->sk_data_ready;
  928. tcp_conn->old_state_change = sk->sk_state_change;
  929. tcp_conn->old_write_space = sk->sk_write_space;
  930. sk->sk_data_ready = iscsi_tcp_data_ready;
  931. sk->sk_state_change = iscsi_tcp_state_change;
  932. sk->sk_write_space = iscsi_write_space;
  933. write_unlock_bh(&sk->sk_callback_lock);
  934. }
  935. static void
  936. iscsi_conn_restore_callbacks(struct iscsi_tcp_conn *tcp_conn)
  937. {
  938. struct sock *sk = tcp_conn->sock->sk;
  939. /* restore socket callbacks, see also: iscsi_conn_set_callbacks() */
  940. write_lock_bh(&sk->sk_callback_lock);
  941. sk->sk_user_data = NULL;
  942. sk->sk_data_ready = tcp_conn->old_data_ready;
  943. sk->sk_state_change = tcp_conn->old_state_change;
  944. sk->sk_write_space = tcp_conn->old_write_space;
  945. sk->sk_no_check = 0;
  946. write_unlock_bh(&sk->sk_callback_lock);
  947. }
  948. /**
  949. * iscsi_xmit - TCP transmit
  950. **/
  951. static int
  952. iscsi_xmit(struct iscsi_conn *conn)
  953. {
  954. struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
  955. struct iscsi_segment *segment = &tcp_conn->out.segment;
  956. unsigned int consumed = 0;
  957. int rc = 0;
  958. while (1) {
  959. rc = iscsi_tcp_xmit_segment(tcp_conn, segment);
  960. if (rc < 0)
  961. goto error;
  962. if (rc == 0)
  963. break;
  964. consumed += rc;
  965. if (segment->total_copied >= segment->total_size) {
  966. if (segment->done != NULL) {
  967. rc = segment->done(tcp_conn, segment);
  968. if (rc < 0)
  969. goto error;
  970. }
  971. }
  972. }
  973. debug_tcp("xmit %d bytes\n", consumed);
  974. conn->txdata_octets += consumed;
  975. return consumed;
  976. error:
  977. /* Transmit error. We could initiate error recovery
  978. * here. */
  979. debug_tcp("Error sending PDU, errno=%d\n", rc);
  980. iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
  981. return rc;
  982. }
  983. /**
  984. * iscsi_tcp_xmit_qlen - return the number of bytes queued for xmit
  985. */
  986. static inline int
  987. iscsi_tcp_xmit_qlen(struct iscsi_conn *conn)
  988. {
  989. struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
  990. struct iscsi_segment *segment = &tcp_conn->out.segment;
  991. return segment->total_copied - segment->total_size;
  992. }
  993. static inline int
  994. iscsi_tcp_flush(struct iscsi_conn *conn)
  995. {
  996. int rc;
  997. while (iscsi_tcp_xmit_qlen(conn)) {
  998. rc = iscsi_xmit(conn);
  999. if (rc == 0)
  1000. return -EAGAIN;
  1001. if (rc < 0)
  1002. return rc;
  1003. }
  1004. return 0;
  1005. }
  1006. /*
  1007. * This is called when we're done sending the header.
  1008. * Simply copy the data_segment to the send segment, and return.
  1009. */
  1010. static int
  1011. iscsi_tcp_send_hdr_done(struct iscsi_tcp_conn *tcp_conn,
  1012. struct iscsi_segment *segment)
  1013. {
  1014. tcp_conn->out.segment = tcp_conn->out.data_segment;
  1015. debug_tcp("Header done. Next segment size %u total_size %u\n",
  1016. tcp_conn->out.segment.size, tcp_conn->out.segment.total_size);
  1017. return 0;
  1018. }
  1019. static void
  1020. iscsi_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr, size_t hdrlen)
  1021. {
  1022. struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
  1023. debug_tcp("%s(%p%s)\n", __FUNCTION__, tcp_conn,
  1024. conn->hdrdgst_en? ", digest enabled" : "");
  1025. /* Clear the data segment - needs to be filled in by the
  1026. * caller using iscsi_tcp_send_data_prep() */
  1027. memset(&tcp_conn->out.data_segment, 0, sizeof(struct iscsi_segment));
  1028. /* If header digest is enabled, compute the CRC and
  1029. * place the digest into the same buffer. We make
  1030. * sure that both iscsi_tcp_ctask and mtask have
  1031. * sufficient room.
  1032. */
  1033. if (conn->hdrdgst_en) {
  1034. iscsi_tcp_dgst_header(&tcp_conn->tx_hash, hdr, hdrlen,
  1035. hdr + hdrlen);
  1036. hdrlen += ISCSI_DIGEST_SIZE;
  1037. }
  1038. /* Remember header pointer for later, when we need
  1039. * to decide whether there's a payload to go along
  1040. * with the header. */
  1041. tcp_conn->out.hdr = hdr;
  1042. iscsi_segment_init_linear(&tcp_conn->out.segment, hdr, hdrlen,
  1043. iscsi_tcp_send_hdr_done, NULL);
  1044. }
  1045. /*
  1046. * Prepare the send buffer for the payload data.
  1047. * Padding and checksumming will all be taken care
  1048. * of by the iscsi_segment routines.
  1049. */
  1050. static int
  1051. iscsi_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg,
  1052. unsigned int count, unsigned int offset,
  1053. unsigned int len)
  1054. {
  1055. struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
  1056. struct hash_desc *tx_hash = NULL;
  1057. unsigned int hdr_spec_len;
  1058. debug_tcp("%s(%p, offset=%d, datalen=%d%s)\n", __FUNCTION__,
  1059. tcp_conn, offset, len,
  1060. conn->datadgst_en? ", digest enabled" : "");
  1061. /* Make sure the datalen matches what the caller
  1062. said he would send. */
  1063. hdr_spec_len = ntoh24(tcp_conn->out.hdr->dlength);
  1064. WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));
  1065. if (conn->datadgst_en)
  1066. tx_hash = &tcp_conn->tx_hash;
  1067. return iscsi_segment_seek_sg(&tcp_conn->out.data_segment,
  1068. sg, count, offset, len,
  1069. NULL, tx_hash);
  1070. }
  1071. static void
  1072. iscsi_tcp_send_linear_data_prepare(struct iscsi_conn *conn, void *data,
  1073. size_t len)
  1074. {
  1075. struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
  1076. struct hash_desc *tx_hash = NULL;
  1077. unsigned int hdr_spec_len;
  1078. debug_tcp("%s(%p, datalen=%d%s)\n", __FUNCTION__, tcp_conn, len,
  1079. conn->datadgst_en? ", digest enabled" : "");
  1080. /* Make sure the datalen matches what the caller
  1081. said he would send. */
  1082. hdr_spec_len = ntoh24(tcp_conn->out.hdr->dlength);
  1083. WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));
  1084. if (conn->datadgst_en)
  1085. tx_hash = &tcp_conn->tx_hash;
  1086. iscsi_segment_init_linear(&tcp_conn->out.data_segment,
  1087. data, len, NULL, tx_hash);
  1088. }
  1089. /**
  1090. * iscsi_solicit_data_cont - initialize next Data-Out
  1091. * @conn: iscsi connection
  1092. * @ctask: scsi command task
  1093. * @r2t: R2T info
  1094. * @left: bytes left to transfer
  1095. *
  1096. * Notes:
  1097. * Initialize next Data-Out within this R2T sequence and continue
  1098. * to process next Scatter-Gather element(if any) of this SCSI command.
  1099. *
  1100. * Called under connection lock.
  1101. **/
  1102. static int
  1103. iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
  1104. struct iscsi_r2t_info *r2t)
  1105. {
  1106. struct iscsi_data *hdr;
  1107. int new_offset, left;
  1108. BUG_ON(r2t->data_length - r2t->sent < 0);
  1109. left = r2t->data_length - r2t->sent;
  1110. if (left == 0)
  1111. return 0;
  1112. hdr = &r2t->dtask.hdr;
  1113. memset(hdr, 0, sizeof(struct iscsi_data));
  1114. hdr->ttt = r2t->ttt;
  1115. hdr->datasn = cpu_to_be32(r2t->solicit_datasn);
  1116. r2t->solicit_datasn++;
  1117. hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
  1118. memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
  1119. hdr->itt = ctask->hdr->itt;
  1120. hdr->exp_statsn = r2t->exp_statsn;
  1121. new_offset = r2t->data_offset + r2t->sent;
  1122. hdr->offset = cpu_to_be32(new_offset);
  1123. if (left > conn->max_xmit_dlength) {
  1124. hton24(hdr->dlength, conn->max_xmit_dlength);
  1125. r2t->data_count = conn->max_xmit_dlength;
  1126. } else {
  1127. hton24(hdr->dlength, left);
  1128. r2t->data_count = left;
  1129. hdr->flags = ISCSI_FLAG_CMD_FINAL;
  1130. }
  1131. conn->dataout_pdus_cnt++;
  1132. return 1;
  1133. }
  1134. /**
  1135. * iscsi_tcp_ctask - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
  1136. * @conn: iscsi connection
  1137. * @ctask: scsi command task
  1138. * @sc: scsi command
  1139. **/
  1140. static int
  1141. iscsi_tcp_ctask_init(struct iscsi_cmd_task *ctask)
  1142. {
  1143. struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
  1144. struct iscsi_conn *conn = ctask->conn;
  1145. struct scsi_cmnd *sc = ctask->sc;
  1146. int err;
  1147. BUG_ON(__kfifo_len(tcp_ctask->r2tqueue));
  1148. tcp_ctask->sent = 0;
  1149. tcp_ctask->exp_datasn = 0;
  1150. /* Prepare PDU, optionally w/ immediate data */
  1151. debug_scsi("ctask deq [cid %d itt 0x%x imm %d unsol %d]\n",
  1152. conn->id, ctask->itt, ctask->imm_count,
  1153. ctask->unsol_count);
  1154. iscsi_tcp_send_hdr_prep(conn, ctask->hdr, ctask->hdr_len);
  1155. if (!ctask->imm_count)
  1156. return 0;
  1157. /* If we have immediate data, attach a payload */
  1158. err = iscsi_tcp_send_data_prep(conn, scsi_sglist(sc), scsi_sg_count(sc),
  1159. 0, ctask->imm_count);
  1160. if (err)
  1161. return err;
  1162. tcp_ctask->sent += ctask->imm_count;
  1163. ctask->imm_count = 0;
  1164. return 0;
  1165. }
  1166. /**
  1167. * iscsi_tcp_mtask_xmit - xmit management(immediate) task
  1168. * @conn: iscsi connection
  1169. * @mtask: task management task
  1170. *
  1171. * Notes:
  1172. * The function can return -EAGAIN in which case caller must
  1173. * call it again later, or recover. '0' return code means successful
  1174. * xmit.
  1175. **/
  1176. static int
  1177. iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
  1178. {
  1179. int rc;
  1180. /* Flush any pending data first. */
  1181. rc = iscsi_tcp_flush(conn);
  1182. if (rc < 0)
  1183. return rc;
  1184. if (mtask->hdr->itt == RESERVED_ITT) {
  1185. struct iscsi_session *session = conn->session;
  1186. spin_lock_bh(&session->lock);
  1187. iscsi_free_mgmt_task(conn, mtask);
  1188. spin_unlock_bh(&session->lock);
  1189. }
  1190. return 0;
  1191. }
  1192. /*
  1193. * iscsi_tcp_ctask_xmit - xmit normal PDU task
  1194. * @conn: iscsi connection
  1195. * @ctask: iscsi command task
  1196. *
  1197. * We're expected to return 0 when everything was transmitted succesfully,
  1198. * -EAGAIN if there's still data in the queue, or != 0 for any other kind
  1199. * of error.
  1200. */
  1201. static int
  1202. iscsi_tcp_ctask_xmit(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
  1203. {
  1204. struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
  1205. struct scsi_cmnd *sc = ctask->sc;
  1206. int rc = 0;
  1207. flush:
  1208. /* Flush any pending data first. */
  1209. rc = iscsi_tcp_flush(conn);
  1210. if (rc < 0)
  1211. return rc;
  1212. /* Are we done already? */
  1213. if (sc->sc_data_direction != DMA_TO_DEVICE)
  1214. return 0;
  1215. if (ctask->unsol_count != 0) {
  1216. struct iscsi_data *hdr = &tcp_ctask->unsol_dtask.hdr;
  1217. /* Prepare a header for the unsolicited PDU.
  1218. * The amount of data we want to send will be
  1219. * in ctask->data_count.
  1220. * FIXME: return the data count instead.
  1221. */
  1222. iscsi_prep_unsolicit_data_pdu(ctask, hdr);
  1223. debug_tcp("unsol dout [itt 0x%x doff %d dlen %d]\n",
  1224. ctask->itt, tcp_ctask->sent, ctask->data_count);
  1225. iscsi_tcp_send_hdr_prep(conn, hdr, sizeof(*hdr));
  1226. rc = iscsi_tcp_send_data_prep(conn, scsi_sglist(sc),
  1227. scsi_sg_count(sc),
  1228. tcp_ctask->sent,
  1229. ctask->data_count);
  1230. if (rc)
  1231. goto fail;
  1232. tcp_ctask->sent += ctask->data_count;
  1233. ctask->unsol_count -= ctask->data_count;
  1234. goto flush;
  1235. } else {
  1236. struct iscsi_session *session = conn->session;
  1237. struct iscsi_r2t_info *r2t;
  1238. /* All unsolicited PDUs sent. Check for solicited PDUs.
  1239. */
  1240. spin_lock_bh(&session->lock);
  1241. r2t = tcp_ctask->r2t;
  1242. if (r2t != NULL) {
  1243. /* Continue with this R2T? */
  1244. if (!iscsi_solicit_data_cont(conn, ctask, r2t)) {
  1245. debug_scsi(" done with r2t %p\n", r2t);
  1246. __kfifo_put(tcp_ctask->r2tpool.queue,
  1247. (void*)&r2t, sizeof(void*));
  1248. tcp_ctask->r2t = r2t = NULL;
  1249. }
  1250. }
  1251. if (r2t == NULL) {
  1252. __kfifo_get(tcp_ctask->r2tqueue, (void*)&tcp_ctask->r2t,
  1253. sizeof(void*));
  1254. r2t = tcp_ctask->r2t;
  1255. }
  1256. spin_unlock_bh(&session->lock);
  1257. /* Waiting for more R2Ts to arrive. */
  1258. if (r2t == NULL) {
  1259. debug_tcp("no R2Ts yet\n");
  1260. return 0;
  1261. }
  1262. debug_scsi("sol dout %p [dsn %d itt 0x%x doff %d dlen %d]\n",
  1263. r2t, r2t->solicit_datasn - 1, ctask->itt,
  1264. r2t->data_offset + r2t->sent, r2t->data_count);
  1265. iscsi_tcp_send_hdr_prep(conn, &r2t->dtask.hdr,
  1266. sizeof(struct iscsi_hdr));
  1267. rc = iscsi_tcp_send_data_prep(conn, scsi_sglist(sc),
  1268. scsi_sg_count(sc),
  1269. r2t->data_offset + r2t->sent,
  1270. r2t->data_count);
  1271. if (rc)
  1272. goto fail;
  1273. tcp_ctask->sent += r2t->data_count;
  1274. r2t->sent += r2t->data_count;
  1275. goto flush;
  1276. }
  1277. return 0;
  1278. fail:
  1279. iscsi_conn_failure(conn, rc);
  1280. return -EIO;
  1281. }
  1282. static struct iscsi_cls_conn *
  1283. iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
  1284. {
  1285. struct iscsi_conn *conn;
  1286. struct iscsi_cls_conn *cls_conn;
  1287. struct iscsi_tcp_conn *tcp_conn;
  1288. cls_conn = iscsi_conn_setup(cls_session, conn_idx);
  1289. if (!cls_conn)
  1290. return NULL;
  1291. conn = cls_conn->dd_data;
  1292. /*
  1293. * due to strange issues with iser these are not set
  1294. * in iscsi_conn_setup
  1295. */
  1296. conn->max_recv_dlength = ISCSI_DEF_MAX_RECV_SEG_LEN;
  1297. tcp_conn = kzalloc(sizeof(*tcp_conn), GFP_KERNEL);
  1298. if (!tcp_conn)
  1299. goto tcp_conn_alloc_fail;
  1300. conn->dd_data = tcp_conn;
  1301. tcp_conn->iscsi_conn = conn;
  1302. tcp_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0,
  1303. CRYPTO_ALG_ASYNC);
  1304. tcp_conn->tx_hash.flags = 0;
  1305. if (IS_ERR(tcp_conn->tx_hash.tfm)) {
  1306. printk(KERN_ERR "Could not create connection due to crc32c "
  1307. "loading error %ld. Make sure the crc32c module is "
  1308. "built as a module or into the kernel\n",
  1309. PTR_ERR(tcp_conn->tx_hash.tfm));
  1310. goto free_tcp_conn;
  1311. }
  1312. tcp_conn->rx_hash.tfm = crypto_alloc_hash("crc32c", 0,
  1313. CRYPTO_ALG_ASYNC);
  1314. tcp_conn->rx_hash.flags = 0;
  1315. if (IS_ERR(tcp_conn->rx_hash.tfm)) {
  1316. printk(KERN_ERR "Could not create connection due to crc32c "
  1317. "loading error %ld. Make sure the crc32c module is "
  1318. "built as a module or into the kernel\n",
  1319. PTR_ERR(tcp_conn->rx_hash.tfm));
  1320. goto free_tx_tfm;
  1321. }
  1322. return cls_conn;
  1323. free_tx_tfm:
  1324. crypto_free_hash(tcp_conn->tx_hash.tfm);
  1325. free_tcp_conn:
  1326. kfree(tcp_conn);
  1327. tcp_conn_alloc_fail:
  1328. iscsi_conn_teardown(cls_conn);
  1329. return NULL;
  1330. }
  1331. static void
  1332. iscsi_tcp_release_conn(struct iscsi_conn *conn)
  1333. {
  1334. struct iscsi_session *session = conn->session;
  1335. struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
  1336. struct socket *sock = tcp_conn->sock;
  1337. if (!sock)
  1338. return;
  1339. sock_hold(sock->sk);
  1340. iscsi_conn_restore_callbacks(tcp_conn);
  1341. sock_put(sock->sk);
  1342. spin_lock_bh(&session->lock);
  1343. tcp_conn->sock = NULL;
  1344. conn->recv_lock = NULL;
  1345. spin_unlock_bh(&session->lock);
  1346. sockfd_put(sock);
  1347. }
  1348. static void
  1349. iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
  1350. {
  1351. struct iscsi_conn *conn = cls_conn->dd_data;
  1352. struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
  1353. iscsi_tcp_release_conn(conn);
  1354. iscsi_conn_teardown(cls_conn);
  1355. if (tcp_conn->tx_hash.tfm)
  1356. crypto_free_hash(tcp_conn->tx_hash.tfm);
  1357. if (tcp_conn->rx_hash.tfm)
  1358. crypto_free_hash(tcp_conn->rx_hash.tfm);
  1359. kfree(tcp_conn);
  1360. }
  1361. static void
  1362. iscsi_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
  1363. {
  1364. struct iscsi_conn *conn = cls_conn->dd_data;
  1365. iscsi_conn_stop(cls_conn, flag);
  1366. iscsi_tcp_release_conn(conn);
  1367. }
  1368. static int iscsi_tcp_get_addr(struct iscsi_conn *conn, struct socket *sock,
  1369. char *buf, int *port,
  1370. int (*getname)(struct socket *, struct sockaddr *,
  1371. int *addrlen))
  1372. {
  1373. struct sockaddr_storage *addr;
  1374. struct sockaddr_in6 *sin6;
  1375. struct sockaddr_in *sin;
  1376. int rc = 0, len;
  1377. addr = kmalloc(sizeof(*addr), GFP_KERNEL);
  1378. if (!addr)
  1379. return -ENOMEM;
  1380. if (getname(sock, (struct sockaddr *) addr, &len)) {
  1381. rc = -ENODEV;
  1382. goto free_addr;
  1383. }
  1384. switch (addr->ss_family) {
  1385. case AF_INET:
  1386. sin = (struct sockaddr_in *)addr;
  1387. spin_lock_bh(&conn->session->lock);
  1388. sprintf(buf, NIPQUAD_FMT, NIPQUAD(sin->sin_addr.s_addr));
  1389. *port = be16_to_cpu(sin->sin_port);
  1390. spin_unlock_bh(&conn->session->lock);
  1391. break;
  1392. case AF_INET6:
  1393. sin6 = (struct sockaddr_in6 *)addr;
  1394. spin_lock_bh(&conn->session->lock);
  1395. sprintf(buf, NIP6_FMT, NIP6(sin6->sin6_addr));
  1396. *port = be16_to_cpu(sin6->sin6_port);
  1397. spin_unlock_bh(&conn->session->lock);
  1398. break;
  1399. }
  1400. free_addr:
  1401. kfree(addr);
  1402. return rc;
  1403. }
  1404. static int
  1405. iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
  1406. struct iscsi_cls_conn *cls_conn, uint64_t transport_eph,
  1407. int is_leading)
  1408. {
  1409. struct iscsi_conn *conn = cls_conn->dd_data;
  1410. struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
  1411. struct sock *sk;
  1412. struct socket *sock;
  1413. int err;
  1414. /* lookup for existing socket */
  1415. sock = sockfd_lookup((int)transport_eph, &err);
  1416. if (!sock) {
  1417. printk(KERN_ERR "iscsi_tcp: sockfd_lookup failed %d\n", err);
  1418. return -EEXIST;
  1419. }
  1420. /*
  1421. * copy these values now because if we drop the session
  1422. * userspace may still want to query the values since we will
  1423. * be using them for the reconnect
  1424. */
  1425. err = iscsi_tcp_get_addr(conn, sock, conn->portal_address,
  1426. &conn->portal_port, kernel_getpeername);
  1427. if (err)
  1428. goto free_socket;
  1429. err = iscsi_tcp_get_addr(conn, sock, conn->local_address,
  1430. &conn->local_port, kernel_getsockname);
  1431. if (err)
  1432. goto free_socket;
  1433. err = iscsi_conn_bind(cls_session, cls_conn, is_leading);
  1434. if (err)
  1435. goto free_socket;
  1436. /* bind iSCSI connection and socket */
  1437. tcp_conn->sock = sock;
  1438. /* setup Socket parameters */
  1439. sk = sock->sk;
  1440. sk->sk_reuse = 1;
  1441. sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */
  1442. sk->sk_allocation = GFP_ATOMIC;
  1443. /* FIXME: disable Nagle's algorithm */
  1444. /*
  1445. * Intercept TCP callbacks for sendfile like receive
  1446. * processing.
  1447. */
  1448. conn->recv_lock = &sk->sk_callback_lock;
  1449. iscsi_conn_set_callbacks(conn);
  1450. tcp_conn->sendpage = tcp_conn->sock->ops->sendpage;
  1451. /*
  1452. * set receive state machine into initial state
  1453. */
  1454. iscsi_tcp_hdr_recv_prep(tcp_conn);
  1455. return 0;
  1456. free_socket:
  1457. sockfd_put(sock);
  1458. return err;
  1459. }
  1460. /* called with host lock */
  1461. static void
  1462. iscsi_tcp_mtask_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
  1463. {
  1464. debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id, mtask->itt);
  1465. /* Prepare PDU, optionally w/ immediate data */
  1466. iscsi_tcp_send_hdr_prep(conn, mtask->hdr, sizeof(*mtask->hdr));
  1467. /* If we have immediate data, attach a payload */
  1468. if (mtask->data_count)
  1469. iscsi_tcp_send_linear_data_prepare(conn, mtask->data,
  1470. mtask->data_count);
  1471. }
  1472. static int
  1473. iscsi_r2tpool_alloc(struct iscsi_session *session)
  1474. {
  1475. int i;
  1476. int cmd_i;
  1477. /*
  1478. * initialize per-task: R2T pool and xmit queue
  1479. */
  1480. for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
  1481. struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
  1482. struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
  1483. /*
  1484. * pre-allocated x4 as much r2ts to handle race when
  1485. * target acks DataOut faster than we data_xmit() queues
  1486. * could replenish r2tqueue.
  1487. */
  1488. /* R2T pool */
  1489. if (iscsi_pool_init(&tcp_ctask->r2tpool, session->max_r2t * 4, NULL,
  1490. sizeof(struct iscsi_r2t_info))) {
  1491. goto r2t_alloc_fail;
  1492. }
  1493. /* R2T xmit queue */
  1494. tcp_ctask->r2tqueue = kfifo_alloc(
  1495. session->max_r2t * 4 * sizeof(void*), GFP_KERNEL, NULL);
  1496. if (tcp_ctask->r2tqueue == ERR_PTR(-ENOMEM)) {
  1497. iscsi_pool_free(&tcp_ctask->r2tpool);
  1498. goto r2t_alloc_fail;
  1499. }
  1500. }
  1501. return 0;
  1502. r2t_alloc_fail:
  1503. for (i = 0; i < cmd_i; i++) {
  1504. struct iscsi_cmd_task *ctask = session->cmds[i];
  1505. struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
  1506. kfifo_free(tcp_ctask->r2tqueue);
  1507. iscsi_pool_free(&tcp_ctask->r2tpool);
  1508. }
  1509. return -ENOMEM;
  1510. }
  1511. static void
  1512. iscsi_r2tpool_free(struct iscsi_session *session)
  1513. {
  1514. int i;
  1515. for (i = 0; i < session->cmds_max; i++) {
  1516. struct iscsi_cmd_task *ctask = session->cmds[i];
  1517. struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
  1518. kfifo_free(tcp_ctask->r2tqueue);
  1519. iscsi_pool_free(&tcp_ctask->r2tpool);
  1520. }
  1521. }
  1522. static int
  1523. iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param,
  1524. char *buf, int buflen)
  1525. {
  1526. struct iscsi_conn *conn = cls_conn->dd_data;
  1527. struct iscsi_session *session = conn->session;
  1528. struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
  1529. int value;
  1530. switch(param) {
  1531. case ISCSI_PARAM_HDRDGST_EN:
  1532. iscsi_set_param(cls_conn, param, buf, buflen);
  1533. break;
  1534. case ISCSI_PARAM_DATADGST_EN:
  1535. iscsi_set_param(cls_conn, param, buf, buflen);
  1536. tcp_conn->sendpage = conn->datadgst_en ?
  1537. sock_no_sendpage : tcp_conn->sock->ops->sendpage;
  1538. break;
  1539. case ISCSI_PARAM_MAX_R2T:
  1540. sscanf(buf, "%d", &value);
  1541. if (value <= 0 || !is_power_of_2(value))
  1542. return -EINVAL;
  1543. if (session->max_r2t == value)
  1544. break;
  1545. iscsi_r2tpool_free(session);
  1546. iscsi_set_param(cls_conn, param, buf, buflen);
  1547. if (iscsi_r2tpool_alloc(session))
  1548. return -ENOMEM;
  1549. break;
  1550. default:
  1551. return iscsi_set_param(cls_conn, param, buf, buflen);
  1552. }
  1553. return 0;
  1554. }
  1555. static int
  1556. iscsi_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
  1557. enum iscsi_param param, char *buf)
  1558. {
  1559. struct iscsi_conn *conn = cls_conn->dd_data;
  1560. int len;
  1561. switch(param) {
  1562. case ISCSI_PARAM_CONN_PORT:
  1563. spin_lock_bh(&conn->session->lock);
  1564. len = sprintf(buf, "%hu\n", conn->portal_port);
  1565. spin_unlock_bh(&conn->session->lock);
  1566. break;
  1567. case ISCSI_PARAM_CONN_ADDRESS:
  1568. spin_lock_bh(&conn->session->lock);
  1569. len = sprintf(buf, "%s\n", conn->portal_address);
  1570. spin_unlock_bh(&conn->session->lock);
  1571. break;
  1572. default:
  1573. return iscsi_conn_get_param(cls_conn, param, buf);
  1574. }
  1575. return len;
  1576. }
  1577. static int
  1578. iscsi_tcp_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
  1579. char *buf)
  1580. {
  1581. struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
  1582. int len;
  1583. switch (param) {
  1584. case ISCSI_HOST_PARAM_IPADDRESS:
  1585. spin_lock_bh(&session->lock);
  1586. if (!session->leadconn)
  1587. len = -ENODEV;
  1588. else
  1589. len = sprintf(buf, "%s\n",
  1590. session->leadconn->local_address);
  1591. spin_unlock_bh(&session->lock);
  1592. break;
  1593. default:
  1594. return iscsi_host_get_param(shost, param, buf);
  1595. }
  1596. return len;
  1597. }
  1598. static void
  1599. iscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats)
  1600. {
  1601. struct iscsi_conn *conn = cls_conn->dd_data;
  1602. struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
  1603. stats->txdata_octets = conn->txdata_octets;
  1604. stats->rxdata_octets = conn->rxdata_octets;
  1605. stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
  1606. stats->dataout_pdus = conn->dataout_pdus_cnt;
  1607. stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
  1608. stats->datain_pdus = conn->datain_pdus_cnt;
  1609. stats->r2t_pdus = conn->r2t_pdus_cnt;
  1610. stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
  1611. stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
  1612. stats->custom_length = 3;
  1613. strcpy(stats->custom[0].desc, "tx_sendpage_failures");
  1614. stats->custom[0].value = tcp_conn->sendpage_failures_cnt;
  1615. strcpy(stats->custom[1].desc, "rx_discontiguous_hdr");
  1616. stats->custom[1].value = tcp_conn->discontiguous_hdr_cnt;
  1617. strcpy(stats->custom[2].desc, "eh_abort_cnt");
  1618. stats->custom[2].value = conn->eh_abort_cnt;
  1619. }
  1620. static struct iscsi_cls_session *
  1621. iscsi_tcp_session_create(struct iscsi_transport *iscsit,
  1622. struct scsi_transport_template *scsit,
  1623. uint16_t cmds_max, uint16_t qdepth,
  1624. uint32_t initial_cmdsn, uint32_t *hostno)
  1625. {
  1626. struct iscsi_cls_session *cls_session;
  1627. struct iscsi_session *session;
  1628. uint32_t hn;
  1629. int cmd_i;
  1630. cls_session = iscsi_session_setup(iscsit, scsit, cmds_max, qdepth,
  1631. sizeof(struct iscsi_tcp_cmd_task),
  1632. sizeof(struct iscsi_tcp_mgmt_task),
  1633. initial_cmdsn, &hn);
  1634. if (!cls_session)
  1635. return NULL;
  1636. *hostno = hn;
  1637. session = class_to_transport_session(cls_session);
  1638. for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
  1639. struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
  1640. struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
  1641. ctask->hdr = &tcp_ctask->hdr.cmd_hdr;
  1642. ctask->hdr_max = sizeof(tcp_ctask->hdr) - ISCSI_DIGEST_SIZE;
  1643. }
  1644. for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++) {
  1645. struct iscsi_mgmt_task *mtask = session->mgmt_cmds[cmd_i];
  1646. struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data;
  1647. mtask->hdr = (struct iscsi_hdr *) &tcp_mtask->hdr;
  1648. }
  1649. if (iscsi_r2tpool_alloc(class_to_transport_session(cls_session)))
  1650. goto r2tpool_alloc_fail;
  1651. return cls_session;
  1652. r2tpool_alloc_fail:
  1653. iscsi_session_teardown(cls_session);
  1654. return NULL;
  1655. }
  1656. static void iscsi_tcp_session_destroy(struct iscsi_cls_session *cls_session)
  1657. {
  1658. iscsi_r2tpool_free(class_to_transport_session(cls_session));
  1659. iscsi_session_teardown(cls_session);
  1660. }
  1661. static int iscsi_tcp_slave_configure(struct scsi_device *sdev)
  1662. {
  1663. blk_queue_bounce_limit(sdev->request_queue, BLK_BOUNCE_ANY);
  1664. blk_queue_dma_alignment(sdev->request_queue, 0);
  1665. return 0;
  1666. }
  1667. static struct scsi_host_template iscsi_sht = {
  1668. .module = THIS_MODULE,
  1669. .name = "iSCSI Initiator over TCP/IP",
  1670. .queuecommand = iscsi_queuecommand,
  1671. .change_queue_depth = iscsi_change_queue_depth,
  1672. .can_queue = ISCSI_DEF_XMIT_CMDS_MAX - 1,
  1673. .sg_tablesize = 4096,
  1674. .max_sectors = 0xFFFF,
  1675. .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
  1676. .eh_abort_handler = iscsi_eh_abort,
  1677. .eh_device_reset_handler= iscsi_eh_device_reset,
  1678. .eh_host_reset_handler = iscsi_eh_host_reset,
  1679. .use_clustering = DISABLE_CLUSTERING,
  1680. .use_sg_chaining = ENABLE_SG_CHAINING,
  1681. .slave_configure = iscsi_tcp_slave_configure,
  1682. .proc_name = "iscsi_tcp",
  1683. .this_id = -1,
  1684. };
  1685. static struct iscsi_transport iscsi_tcp_transport = {
  1686. .owner = THIS_MODULE,
  1687. .name = "tcp",
  1688. .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST
  1689. | CAP_DATADGST,
  1690. .param_mask = ISCSI_MAX_RECV_DLENGTH |
  1691. ISCSI_MAX_XMIT_DLENGTH |
  1692. ISCSI_HDRDGST_EN |
  1693. ISCSI_DATADGST_EN |
  1694. ISCSI_INITIAL_R2T_EN |
  1695. ISCSI_MAX_R2T |
  1696. ISCSI_IMM_DATA_EN |
  1697. ISCSI_FIRST_BURST |
  1698. ISCSI_MAX_BURST |
  1699. ISCSI_PDU_INORDER_EN |
  1700. ISCSI_DATASEQ_INORDER_EN |
  1701. ISCSI_ERL |
  1702. ISCSI_CONN_PORT |
  1703. ISCSI_CONN_ADDRESS |
  1704. ISCSI_EXP_STATSN |
  1705. ISCSI_PERSISTENT_PORT |
  1706. ISCSI_PERSISTENT_ADDRESS |
  1707. ISCSI_TARGET_NAME | ISCSI_TPGT |
  1708. ISCSI_USERNAME | ISCSI_PASSWORD |
  1709. ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
  1710. ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
  1711. ISCSI_LU_RESET_TMO |
  1712. ISCSI_PING_TMO | ISCSI_RECV_TMO,
  1713. .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
  1714. ISCSI_HOST_INITIATOR_NAME |
  1715. ISCSI_HOST_NETDEV_NAME,
  1716. .host_template = &iscsi_sht,
  1717. .conndata_size = sizeof(struct iscsi_conn),
  1718. .max_conn = 1,
  1719. .max_cmd_len = 16,
  1720. /* session management */
  1721. .create_session = iscsi_tcp_session_create,
  1722. .destroy_session = iscsi_tcp_session_destroy,
  1723. /* connection management */
  1724. .create_conn = iscsi_tcp_conn_create,
  1725. .bind_conn = iscsi_tcp_conn_bind,
  1726. .destroy_conn = iscsi_tcp_conn_destroy,
  1727. .set_param = iscsi_conn_set_param,
  1728. .get_conn_param = iscsi_tcp_conn_get_param,
  1729. .get_session_param = iscsi_session_get_param,
  1730. .start_conn = iscsi_conn_start,
  1731. .stop_conn = iscsi_tcp_conn_stop,
  1732. /* iscsi host params */
  1733. .get_host_param = iscsi_tcp_host_get_param,
  1734. .set_host_param = iscsi_host_set_param,
  1735. /* IO */
  1736. .send_pdu = iscsi_conn_send_pdu,
  1737. .get_stats = iscsi_conn_get_stats,
  1738. .init_cmd_task = iscsi_tcp_ctask_init,
  1739. .init_mgmt_task = iscsi_tcp_mtask_init,
  1740. .xmit_cmd_task = iscsi_tcp_ctask_xmit,
  1741. .xmit_mgmt_task = iscsi_tcp_mtask_xmit,
  1742. .cleanup_cmd_task = iscsi_tcp_cleanup_ctask,
  1743. /* recovery */
  1744. .session_recovery_timedout = iscsi_session_recovery_timedout,
  1745. };
  1746. static int __init
  1747. iscsi_tcp_init(void)
  1748. {
  1749. if (iscsi_max_lun < 1) {
  1750. printk(KERN_ERR "iscsi_tcp: Invalid max_lun value of %u\n",
  1751. iscsi_max_lun);
  1752. return -EINVAL;
  1753. }
  1754. iscsi_tcp_transport.max_lun = iscsi_max_lun;
  1755. if (!iscsi_register_transport(&iscsi_tcp_transport))
  1756. return -ENODEV;
  1757. return 0;
  1758. }
  1759. static void __exit
  1760. iscsi_tcp_exit(void)
  1761. {
  1762. iscsi_unregister_transport(&iscsi_tcp_transport);
  1763. }
  1764. module_init(iscsi_tcp_init);
  1765. module_exit(iscsi_tcp_exit);