iscsi_tcp.c 51 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952
  1. /*
  2. * iSCSI Initiator over TCP/IP Data-Path
  3. *
  4. * Copyright (C) 2004 Dmitry Yusupov
  5. * Copyright (C) 2004 Alex Aizman
  6. * Copyright (C) 2005 - 2006 Mike Christie
  7. * Copyright (C) 2006 Red Hat, Inc. All rights reserved.
  8. * maintained by open-iscsi@googlegroups.com
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License as published
  12. * by the Free Software Foundation; either version 2 of the License, or
  13. * (at your option) any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful, but
  16. * WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * General Public License for more details.
  19. *
  20. * See the file COPYING included with this distribution for more details.
  21. *
  22. * Credits:
  23. * Christoph Hellwig
  24. * FUJITA Tomonori
  25. * Arne Redlich
  26. * Zhenyu Wang
  27. */
  28. #include <linux/types.h>
  29. #include <linux/list.h>
  30. #include <linux/inet.h>
  31. #include <linux/file.h>
  32. #include <linux/blkdev.h>
  33. #include <linux/crypto.h>
  34. #include <linux/delay.h>
  35. #include <linux/kfifo.h>
  36. #include <linux/scatterlist.h>
  37. #include <net/tcp.h>
  38. #include <scsi/scsi_cmnd.h>
  39. #include <scsi/scsi_device.h>
  40. #include <scsi/scsi_host.h>
  41. #include <scsi/scsi.h>
  42. #include <scsi/scsi_transport_iscsi.h>
  43. #include "iscsi_tcp.h"
  44. MODULE_AUTHOR("Dmitry Yusupov <dmitry_yus@yahoo.com>, "
  45. "Alex Aizman <itn780@yahoo.com>");
  46. MODULE_DESCRIPTION("iSCSI/TCP data-path");
  47. MODULE_LICENSE("GPL");
  48. #undef DEBUG_TCP
  49. #define DEBUG_ASSERT
  50. #ifdef DEBUG_TCP
  51. #define debug_tcp(fmt...) printk(KERN_INFO "tcp: " fmt)
  52. #else
  53. #define debug_tcp(fmt...)
  54. #endif
  55. static struct scsi_transport_template *iscsi_tcp_scsi_transport;
  56. static struct scsi_host_template iscsi_sht;
  57. static struct iscsi_transport iscsi_tcp_transport;
  58. static unsigned int iscsi_max_lun = 512;
  59. module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
  60. static int iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn,
  61. struct iscsi_segment *segment);
  62. /*
  63. * Scatterlist handling: inside the iscsi_segment, we
  64. * remember an index into the scatterlist, and set data/size
  65. * to the current scatterlist entry. For highmem pages, we
  66. * kmap as needed.
  67. *
  68. * Note that the page is unmapped when we return from
  69. * TCP's data_ready handler, so we may end up mapping and
  70. * unmapping the same page repeatedly. The whole reason
  71. * for this is that we shouldn't keep the page mapped
  72. * outside the softirq.
  73. */
  74. /**
  75. * iscsi_tcp_segment_init_sg - init indicated scatterlist entry
  76. * @segment: the buffer object
  77. * @sg: scatterlist
  78. * @offset: byte offset into that sg entry
  79. *
  80. * This function sets up the segment so that subsequent
  81. * data is copied to the indicated sg entry, at the given
  82. * offset.
  83. */
  84. static inline void
  85. iscsi_tcp_segment_init_sg(struct iscsi_segment *segment,
  86. struct scatterlist *sg, unsigned int offset)
  87. {
  88. segment->sg = sg;
  89. segment->sg_offset = offset;
  90. segment->size = min(sg->length - offset,
  91. segment->total_size - segment->total_copied);
  92. segment->data = NULL;
  93. }
  94. /**
  95. * iscsi_tcp_segment_map - map the current S/G page
  96. * @segment: iscsi_segment
  97. * @recv: 1 if called from recv path
  98. *
  99. * We only need to possibly kmap data if scatter lists are being used,
  100. * because the iscsi passthrough and internal IO paths will never use high
  101. * mem pages.
  102. */
  103. static inline void
  104. iscsi_tcp_segment_map(struct iscsi_segment *segment, int recv)
  105. {
  106. struct scatterlist *sg;
  107. if (segment->data != NULL || !segment->sg)
  108. return;
  109. sg = segment->sg;
  110. BUG_ON(segment->sg_mapped);
  111. BUG_ON(sg->length == 0);
  112. /*
  113. * If the page count is greater than one it is ok to send
  114. * to the network layer's zero copy send path. If not we
  115. * have to go the slow sendmsg path. We always map for the
  116. * recv path.
  117. */
  118. if (page_count(sg_page(sg)) >= 1 && !recv)
  119. return;
  120. debug_tcp("iscsi_tcp_segment_map %s %p\n", recv ? "recv" : "xmit",
  121. segment);
  122. segment->sg_mapped = kmap_atomic(sg_page(sg), KM_SOFTIRQ0);
  123. segment->data = segment->sg_mapped + sg->offset + segment->sg_offset;
  124. }
  125. static inline void
  126. iscsi_tcp_segment_unmap(struct iscsi_segment *segment)
  127. {
  128. debug_tcp("iscsi_tcp_segment_unmap %p\n", segment);
  129. if (segment->sg_mapped) {
  130. debug_tcp("iscsi_tcp_segment_unmap valid\n");
  131. kunmap_atomic(segment->sg_mapped, KM_SOFTIRQ0);
  132. segment->sg_mapped = NULL;
  133. segment->data = NULL;
  134. }
  135. }
  136. /*
  137. * Splice the digest buffer into the buffer
  138. */
  139. static inline void
  140. iscsi_tcp_segment_splice_digest(struct iscsi_segment *segment, void *digest)
  141. {
  142. segment->data = digest;
  143. segment->digest_len = ISCSI_DIGEST_SIZE;
  144. segment->total_size += ISCSI_DIGEST_SIZE;
  145. segment->size = ISCSI_DIGEST_SIZE;
  146. segment->copied = 0;
  147. segment->sg = NULL;
  148. segment->hash = NULL;
  149. }
  150. /**
  151. * iscsi_tcp_segment_done - check whether the segment is complete
  152. * @segment: iscsi segment to check
  153. * @recv: set to one of this is called from the recv path
  154. * @copied: number of bytes copied
  155. *
  156. * Check if we're done receiving this segment. If the receive
  157. * buffer is full but we expect more data, move on to the
  158. * next entry in the scatterlist.
  159. *
  160. * If the amount of data we received isn't a multiple of 4,
  161. * we will transparently receive the pad bytes, too.
  162. *
  163. * This function must be re-entrant.
  164. */
  165. static inline int
  166. iscsi_tcp_segment_done(struct iscsi_segment *segment, int recv, unsigned copied)
  167. {
  168. static unsigned char padbuf[ISCSI_PAD_LEN];
  169. struct scatterlist sg;
  170. unsigned int pad;
  171. debug_tcp("copied %u %u size %u %s\n", segment->copied, copied,
  172. segment->size, recv ? "recv" : "xmit");
  173. if (segment->hash && copied) {
  174. /*
  175. * If a segment is kmapd we must unmap it before sending
  176. * to the crypto layer since that will try to kmap it again.
  177. */
  178. iscsi_tcp_segment_unmap(segment);
  179. if (!segment->data) {
  180. sg_init_table(&sg, 1);
  181. sg_set_page(&sg, sg_page(segment->sg), copied,
  182. segment->copied + segment->sg_offset +
  183. segment->sg->offset);
  184. } else
  185. sg_init_one(&sg, segment->data + segment->copied,
  186. copied);
  187. crypto_hash_update(segment->hash, &sg, copied);
  188. }
  189. segment->copied += copied;
  190. if (segment->copied < segment->size) {
  191. iscsi_tcp_segment_map(segment, recv);
  192. return 0;
  193. }
  194. segment->total_copied += segment->copied;
  195. segment->copied = 0;
  196. segment->size = 0;
  197. /* Unmap the current scatterlist page, if there is one. */
  198. iscsi_tcp_segment_unmap(segment);
  199. /* Do we have more scatterlist entries? */
  200. debug_tcp("total copied %u total size %u\n", segment->total_copied,
  201. segment->total_size);
  202. if (segment->total_copied < segment->total_size) {
  203. /* Proceed to the next entry in the scatterlist. */
  204. iscsi_tcp_segment_init_sg(segment, sg_next(segment->sg),
  205. 0);
  206. iscsi_tcp_segment_map(segment, recv);
  207. BUG_ON(segment->size == 0);
  208. return 0;
  209. }
  210. /* Do we need to handle padding? */
  211. pad = iscsi_padding(segment->total_copied);
  212. if (pad != 0) {
  213. debug_tcp("consume %d pad bytes\n", pad);
  214. segment->total_size += pad;
  215. segment->size = pad;
  216. segment->data = padbuf;
  217. return 0;
  218. }
  219. /*
  220. * Set us up for transferring the data digest. hdr digest
  221. * is completely handled in hdr done function.
  222. */
  223. if (segment->hash) {
  224. crypto_hash_final(segment->hash, segment->digest);
  225. iscsi_tcp_segment_splice_digest(segment,
  226. recv ? segment->recv_digest : segment->digest);
  227. return 0;
  228. }
  229. return 1;
  230. }
  231. /**
  232. * iscsi_tcp_xmit_segment - transmit segment
  233. * @tcp_conn: the iSCSI TCP connection
  234. * @segment: the buffer to transmnit
  235. *
  236. * This function transmits as much of the buffer as
  237. * the network layer will accept, and returns the number of
  238. * bytes transmitted.
  239. *
  240. * If CRC hashing is enabled, the function will compute the
  241. * hash as it goes. When the entire segment has been transmitted,
  242. * it will retrieve the hash value and send it as well.
  243. */
  244. static int
  245. iscsi_tcp_xmit_segment(struct iscsi_tcp_conn *tcp_conn,
  246. struct iscsi_segment *segment)
  247. {
  248. struct socket *sk = tcp_conn->sock;
  249. unsigned int copied = 0;
  250. int r = 0;
  251. while (!iscsi_tcp_segment_done(segment, 0, r)) {
  252. struct scatterlist *sg;
  253. unsigned int offset, copy;
  254. int flags = 0;
  255. r = 0;
  256. offset = segment->copied;
  257. copy = segment->size - offset;
  258. if (segment->total_copied + segment->size < segment->total_size)
  259. flags |= MSG_MORE;
  260. /* Use sendpage if we can; else fall back to sendmsg */
  261. if (!segment->data) {
  262. sg = segment->sg;
  263. offset += segment->sg_offset + sg->offset;
  264. r = tcp_conn->sendpage(sk, sg_page(sg), offset, copy,
  265. flags);
  266. } else {
  267. struct msghdr msg = { .msg_flags = flags };
  268. struct kvec iov = {
  269. .iov_base = segment->data + offset,
  270. .iov_len = copy
  271. };
  272. r = kernel_sendmsg(sk, &msg, &iov, 1, copy);
  273. }
  274. if (r < 0) {
  275. iscsi_tcp_segment_unmap(segment);
  276. if (copied || r == -EAGAIN)
  277. break;
  278. return r;
  279. }
  280. copied += r;
  281. }
  282. return copied;
  283. }
  284. /**
  285. * iscsi_tcp_segment_recv - copy data to segment
  286. * @tcp_conn: the iSCSI TCP connection
  287. * @segment: the buffer to copy to
  288. * @ptr: data pointer
  289. * @len: amount of data available
  290. *
  291. * This function copies up to @len bytes to the
  292. * given buffer, and returns the number of bytes
  293. * consumed, which can actually be less than @len.
  294. *
  295. * If hash digest is enabled, the function will update the
  296. * hash while copying.
  297. * Combining these two operations doesn't buy us a lot (yet),
  298. * but in the future we could implement combined copy+crc,
  299. * just way we do for network layer checksums.
  300. */
  301. static int
  302. iscsi_tcp_segment_recv(struct iscsi_tcp_conn *tcp_conn,
  303. struct iscsi_segment *segment, const void *ptr,
  304. unsigned int len)
  305. {
  306. unsigned int copy = 0, copied = 0;
  307. while (!iscsi_tcp_segment_done(segment, 1, copy)) {
  308. if (copied == len) {
  309. debug_tcp("iscsi_tcp_segment_recv copied %d bytes\n",
  310. len);
  311. break;
  312. }
  313. copy = min(len - copied, segment->size - segment->copied);
  314. debug_tcp("iscsi_tcp_segment_recv copying %d\n", copy);
  315. memcpy(segment->data + segment->copied, ptr + copied, copy);
  316. copied += copy;
  317. }
  318. return copied;
  319. }
  320. static inline void
  321. iscsi_tcp_dgst_header(struct hash_desc *hash, const void *hdr, size_t hdrlen,
  322. unsigned char digest[ISCSI_DIGEST_SIZE])
  323. {
  324. struct scatterlist sg;
  325. sg_init_one(&sg, hdr, hdrlen);
  326. crypto_hash_digest(hash, &sg, hdrlen, digest);
  327. }
  328. static inline int
  329. iscsi_tcp_dgst_verify(struct iscsi_tcp_conn *tcp_conn,
  330. struct iscsi_segment *segment)
  331. {
  332. if (!segment->digest_len)
  333. return 1;
  334. if (memcmp(segment->recv_digest, segment->digest,
  335. segment->digest_len)) {
  336. debug_scsi("digest mismatch\n");
  337. return 0;
  338. }
  339. return 1;
  340. }
  341. /*
  342. * Helper function to set up segment buffer
  343. */
  344. static inline void
  345. __iscsi_segment_init(struct iscsi_segment *segment, size_t size,
  346. iscsi_segment_done_fn_t *done, struct hash_desc *hash)
  347. {
  348. memset(segment, 0, sizeof(*segment));
  349. segment->total_size = size;
  350. segment->done = done;
  351. if (hash) {
  352. segment->hash = hash;
  353. crypto_hash_init(hash);
  354. }
  355. }
  356. static inline void
  357. iscsi_segment_init_linear(struct iscsi_segment *segment, void *data,
  358. size_t size, iscsi_segment_done_fn_t *done,
  359. struct hash_desc *hash)
  360. {
  361. __iscsi_segment_init(segment, size, done, hash);
  362. segment->data = data;
  363. segment->size = size;
  364. }
  365. static inline int
  366. iscsi_segment_seek_sg(struct iscsi_segment *segment,
  367. struct scatterlist *sg_list, unsigned int sg_count,
  368. unsigned int offset, size_t size,
  369. iscsi_segment_done_fn_t *done, struct hash_desc *hash)
  370. {
  371. struct scatterlist *sg;
  372. unsigned int i;
  373. debug_scsi("iscsi_segment_seek_sg offset %u size %llu\n",
  374. offset, size);
  375. __iscsi_segment_init(segment, size, done, hash);
  376. for_each_sg(sg_list, sg, sg_count, i) {
  377. debug_scsi("sg %d, len %u offset %u\n", i, sg->length,
  378. sg->offset);
  379. if (offset < sg->length) {
  380. iscsi_tcp_segment_init_sg(segment, sg, offset);
  381. return 0;
  382. }
  383. offset -= sg->length;
  384. }
  385. return ISCSI_ERR_DATA_OFFSET;
  386. }
  387. /**
  388. * iscsi_tcp_hdr_recv_prep - prep segment for hdr reception
  389. * @tcp_conn: iscsi connection to prep for
  390. *
  391. * This function always passes NULL for the hash argument, because when this
  392. * function is called we do not yet know the final size of the header and want
  393. * to delay the digest processing until we know that.
  394. */
  395. static void
  396. iscsi_tcp_hdr_recv_prep(struct iscsi_tcp_conn *tcp_conn)
  397. {
  398. debug_tcp("iscsi_tcp_hdr_recv_prep(%p%s)\n", tcp_conn,
  399. tcp_conn->iscsi_conn->hdrdgst_en ? ", digest enabled" : "");
  400. iscsi_segment_init_linear(&tcp_conn->in.segment,
  401. tcp_conn->in.hdr_buf, sizeof(struct iscsi_hdr),
  402. iscsi_tcp_hdr_recv_done, NULL);
  403. }
  404. /*
  405. * Handle incoming reply to any other type of command
  406. */
  407. static int
  408. iscsi_tcp_data_recv_done(struct iscsi_tcp_conn *tcp_conn,
  409. struct iscsi_segment *segment)
  410. {
  411. struct iscsi_conn *conn = tcp_conn->iscsi_conn;
  412. int rc = 0;
  413. if (!iscsi_tcp_dgst_verify(tcp_conn, segment))
  414. return ISCSI_ERR_DATA_DGST;
  415. rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr,
  416. conn->data, tcp_conn->in.datalen);
  417. if (rc)
  418. return rc;
  419. iscsi_tcp_hdr_recv_prep(tcp_conn);
  420. return 0;
  421. }
  422. static void
  423. iscsi_tcp_data_recv_prep(struct iscsi_tcp_conn *tcp_conn)
  424. {
  425. struct iscsi_conn *conn = tcp_conn->iscsi_conn;
  426. struct hash_desc *rx_hash = NULL;
  427. if (conn->datadgst_en &
  428. !(conn->session->tt->caps & CAP_DIGEST_OFFLOAD))
  429. rx_hash = &tcp_conn->rx_hash;
  430. iscsi_segment_init_linear(&tcp_conn->in.segment,
  431. conn->data, tcp_conn->in.datalen,
  432. iscsi_tcp_data_recv_done, rx_hash);
  433. }
  434. /*
  435. * must be called with session lock
  436. */
  437. static void iscsi_tcp_cleanup_task(struct iscsi_task *task)
  438. {
  439. struct iscsi_tcp_task *tcp_task = task->dd_data;
  440. struct iscsi_r2t_info *r2t;
  441. /* nothing to do for mgmt or pending tasks */
  442. if (!task->sc || task->state == ISCSI_TASK_PENDING)
  443. return;
  444. /* flush task's r2t queues */
  445. while (__kfifo_get(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*))) {
  446. __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
  447. sizeof(void*));
  448. debug_scsi("iscsi_tcp_cleanup_task pending r2t dropped\n");
  449. }
  450. r2t = tcp_task->r2t;
  451. if (r2t != NULL) {
  452. __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
  453. sizeof(void*));
  454. tcp_task->r2t = NULL;
  455. }
  456. }
  457. /**
  458. * iscsi_tcp_data_in - SCSI Data-In Response processing
  459. * @conn: iscsi connection
  460. * @task: scsi command task
  461. */
  462. static int iscsi_tcp_data_in(struct iscsi_conn *conn, struct iscsi_task *task)
  463. {
  464. struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
  465. struct iscsi_tcp_task *tcp_task = task->dd_data;
  466. struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)tcp_conn->in.hdr;
  467. int datasn = be32_to_cpu(rhdr->datasn);
  468. unsigned total_in_length = scsi_in(task->sc)->length;
  469. iscsi_update_cmdsn(conn->session, (struct iscsi_nopin*)rhdr);
  470. if (tcp_conn->in.datalen == 0)
  471. return 0;
  472. if (tcp_task->exp_datasn != datasn) {
  473. debug_tcp("%s: task->exp_datasn(%d) != rhdr->datasn(%d)\n",
  474. __func__, tcp_task->exp_datasn, datasn);
  475. return ISCSI_ERR_DATASN;
  476. }
  477. tcp_task->exp_datasn++;
  478. tcp_task->data_offset = be32_to_cpu(rhdr->offset);
  479. if (tcp_task->data_offset + tcp_conn->in.datalen > total_in_length) {
  480. debug_tcp("%s: data_offset(%d) + data_len(%d) > total_length_in(%d)\n",
  481. __func__, tcp_task->data_offset,
  482. tcp_conn->in.datalen, total_in_length);
  483. return ISCSI_ERR_DATA_OFFSET;
  484. }
  485. conn->datain_pdus_cnt++;
  486. return 0;
  487. }
  488. /**
  489. * iscsi_tcp_r2t_rsp - iSCSI R2T Response processing
  490. * @conn: iscsi connection
  491. * @task: scsi command task
  492. */
  493. static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
  494. {
  495. struct iscsi_session *session = conn->session;
  496. struct iscsi_tcp_task *tcp_task = task->dd_data;
  497. struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
  498. struct iscsi_r2t_rsp *rhdr = (struct iscsi_r2t_rsp *)tcp_conn->in.hdr;
  499. struct iscsi_r2t_info *r2t;
  500. int r2tsn = be32_to_cpu(rhdr->r2tsn);
  501. int rc;
  502. if (tcp_conn->in.datalen) {
  503. iscsi_conn_printk(KERN_ERR, conn,
  504. "invalid R2t with datalen %d\n",
  505. tcp_conn->in.datalen);
  506. return ISCSI_ERR_DATALEN;
  507. }
  508. if (tcp_task->exp_datasn != r2tsn){
  509. debug_tcp("%s: task->exp_datasn(%d) != rhdr->r2tsn(%d)\n",
  510. __func__, tcp_task->exp_datasn, r2tsn);
  511. return ISCSI_ERR_R2TSN;
  512. }
  513. /* fill-in new R2T associated with the task */
  514. iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
  515. if (!task->sc || session->state != ISCSI_STATE_LOGGED_IN) {
  516. iscsi_conn_printk(KERN_INFO, conn,
  517. "dropping R2T itt %d in recovery.\n",
  518. task->itt);
  519. return 0;
  520. }
  521. rc = __kfifo_get(tcp_task->r2tpool.queue, (void*)&r2t, sizeof(void*));
  522. if (!rc) {
  523. iscsi_conn_printk(KERN_ERR, conn, "Could not allocate R2T. "
  524. "Target has sent more R2Ts than it "
  525. "negotiated for or driver has has leaked.\n");
  526. return ISCSI_ERR_PROTO;
  527. }
  528. r2t->exp_statsn = rhdr->statsn;
  529. r2t->data_length = be32_to_cpu(rhdr->data_length);
  530. if (r2t->data_length == 0) {
  531. iscsi_conn_printk(KERN_ERR, conn,
  532. "invalid R2T with zero data len\n");
  533. __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
  534. sizeof(void*));
  535. return ISCSI_ERR_DATALEN;
  536. }
  537. if (r2t->data_length > session->max_burst)
  538. debug_scsi("invalid R2T with data len %u and max burst %u."
  539. "Attempting to execute request.\n",
  540. r2t->data_length, session->max_burst);
  541. r2t->data_offset = be32_to_cpu(rhdr->data_offset);
  542. if (r2t->data_offset + r2t->data_length > scsi_out(task->sc)->length) {
  543. iscsi_conn_printk(KERN_ERR, conn,
  544. "invalid R2T with data len %u at offset %u "
  545. "and total length %d\n", r2t->data_length,
  546. r2t->data_offset, scsi_out(task->sc)->length);
  547. __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
  548. sizeof(void*));
  549. return ISCSI_ERR_DATALEN;
  550. }
  551. r2t->ttt = rhdr->ttt; /* no flip */
  552. r2t->datasn = 0;
  553. r2t->sent = 0;
  554. tcp_task->exp_datasn = r2tsn + 1;
  555. __kfifo_put(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*));
  556. conn->r2t_pdus_cnt++;
  557. iscsi_requeue_task(task);
  558. return 0;
  559. }
  560. /*
  561. * Handle incoming reply to DataIn command
  562. */
  563. static int
  564. iscsi_tcp_process_data_in(struct iscsi_tcp_conn *tcp_conn,
  565. struct iscsi_segment *segment)
  566. {
  567. struct iscsi_conn *conn = tcp_conn->iscsi_conn;
  568. struct iscsi_hdr *hdr = tcp_conn->in.hdr;
  569. int rc;
  570. if (!iscsi_tcp_dgst_verify(tcp_conn, segment))
  571. return ISCSI_ERR_DATA_DGST;
  572. /* check for non-exceptional status */
  573. if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
  574. rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr, NULL, 0);
  575. if (rc)
  576. return rc;
  577. }
  578. iscsi_tcp_hdr_recv_prep(tcp_conn);
  579. return 0;
  580. }
  581. /**
  582. * iscsi_tcp_hdr_dissect - process PDU header
  583. * @conn: iSCSI connection
  584. * @hdr: PDU header
  585. *
  586. * This function analyzes the header of the PDU received,
  587. * and performs several sanity checks. If the PDU is accompanied
  588. * by data, the receive buffer is set up to copy the incoming data
  589. * to the correct location.
  590. */
  591. static int
  592. iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
  593. {
  594. int rc = 0, opcode, ahslen;
  595. struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
  596. struct iscsi_task *task;
  597. /* verify PDU length */
  598. tcp_conn->in.datalen = ntoh24(hdr->dlength);
  599. if (tcp_conn->in.datalen > conn->max_recv_dlength) {
  600. iscsi_conn_printk(KERN_ERR, conn,
  601. "iscsi_tcp: datalen %d > %d\n",
  602. tcp_conn->in.datalen, conn->max_recv_dlength);
  603. return ISCSI_ERR_DATALEN;
  604. }
  605. /* Additional header segments. So far, we don't
  606. * process additional headers.
  607. */
  608. ahslen = hdr->hlength << 2;
  609. opcode = hdr->opcode & ISCSI_OPCODE_MASK;
  610. /* verify itt (itt encoding: age+cid+itt) */
  611. rc = iscsi_verify_itt(conn, hdr->itt);
  612. if (rc)
  613. return rc;
  614. debug_tcp("opcode 0x%x ahslen %d datalen %d\n",
  615. opcode, ahslen, tcp_conn->in.datalen);
  616. switch(opcode) {
  617. case ISCSI_OP_SCSI_DATA_IN:
  618. spin_lock(&conn->session->lock);
  619. task = iscsi_itt_to_ctask(conn, hdr->itt);
  620. if (!task)
  621. rc = ISCSI_ERR_BAD_ITT;
  622. else
  623. rc = iscsi_tcp_data_in(conn, task);
  624. if (rc) {
  625. spin_unlock(&conn->session->lock);
  626. break;
  627. }
  628. if (tcp_conn->in.datalen) {
  629. struct iscsi_tcp_task *tcp_task = task->dd_data;
  630. struct hash_desc *rx_hash = NULL;
  631. struct scsi_data_buffer *sdb = scsi_in(task->sc);
  632. /*
  633. * Setup copy of Data-In into the Scsi_Cmnd
  634. * Scatterlist case:
  635. * We set up the iscsi_segment to point to the next
  636. * scatterlist entry to copy to. As we go along,
  637. * we move on to the next scatterlist entry and
  638. * update the digest per-entry.
  639. */
  640. if (conn->datadgst_en &&
  641. !(conn->session->tt->caps & CAP_DIGEST_OFFLOAD))
  642. rx_hash = &tcp_conn->rx_hash;
  643. debug_tcp("iscsi_tcp_begin_data_in(%p, offset=%d, "
  644. "datalen=%d)\n", tcp_conn,
  645. tcp_task->data_offset,
  646. tcp_conn->in.datalen);
  647. rc = iscsi_segment_seek_sg(&tcp_conn->in.segment,
  648. sdb->table.sgl,
  649. sdb->table.nents,
  650. tcp_task->data_offset,
  651. tcp_conn->in.datalen,
  652. iscsi_tcp_process_data_in,
  653. rx_hash);
  654. spin_unlock(&conn->session->lock);
  655. return rc;
  656. }
  657. rc = __iscsi_complete_pdu(conn, hdr, NULL, 0);
  658. spin_unlock(&conn->session->lock);
  659. break;
  660. case ISCSI_OP_SCSI_CMD_RSP:
  661. if (tcp_conn->in.datalen) {
  662. iscsi_tcp_data_recv_prep(tcp_conn);
  663. return 0;
  664. }
  665. rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
  666. break;
  667. case ISCSI_OP_R2T:
  668. spin_lock(&conn->session->lock);
  669. task = iscsi_itt_to_ctask(conn, hdr->itt);
  670. if (!task)
  671. rc = ISCSI_ERR_BAD_ITT;
  672. else if (ahslen)
  673. rc = ISCSI_ERR_AHSLEN;
  674. else if (task->sc->sc_data_direction == DMA_TO_DEVICE)
  675. rc = iscsi_tcp_r2t_rsp(conn, task);
  676. else
  677. rc = ISCSI_ERR_PROTO;
  678. spin_unlock(&conn->session->lock);
  679. break;
  680. case ISCSI_OP_LOGIN_RSP:
  681. case ISCSI_OP_TEXT_RSP:
  682. case ISCSI_OP_REJECT:
  683. case ISCSI_OP_ASYNC_EVENT:
  684. /*
  685. * It is possible that we could get a PDU with a buffer larger
  686. * than 8K, but there are no targets that currently do this.
  687. * For now we fail until we find a vendor that needs it
  688. */
  689. if (ISCSI_DEF_MAX_RECV_SEG_LEN < tcp_conn->in.datalen) {
  690. iscsi_conn_printk(KERN_ERR, conn,
  691. "iscsi_tcp: received buffer of "
  692. "len %u but conn buffer is only %u "
  693. "(opcode %0x)\n",
  694. tcp_conn->in.datalen,
  695. ISCSI_DEF_MAX_RECV_SEG_LEN, opcode);
  696. rc = ISCSI_ERR_PROTO;
  697. break;
  698. }
  699. /* If there's data coming in with the response,
  700. * receive it to the connection's buffer.
  701. */
  702. if (tcp_conn->in.datalen) {
  703. iscsi_tcp_data_recv_prep(tcp_conn);
  704. return 0;
  705. }
  706. /* fall through */
  707. case ISCSI_OP_LOGOUT_RSP:
  708. case ISCSI_OP_NOOP_IN:
  709. case ISCSI_OP_SCSI_TMFUNC_RSP:
  710. rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
  711. break;
  712. default:
  713. rc = ISCSI_ERR_BAD_OPCODE;
  714. break;
  715. }
  716. if (rc == 0) {
  717. /* Anything that comes with data should have
  718. * been handled above. */
  719. if (tcp_conn->in.datalen)
  720. return ISCSI_ERR_PROTO;
  721. iscsi_tcp_hdr_recv_prep(tcp_conn);
  722. }
  723. return rc;
  724. }
  725. /**
  726. * iscsi_tcp_hdr_recv_done - process PDU header
  727. *
  728. * This is the callback invoked when the PDU header has
  729. * been received. If the header is followed by additional
  730. * header segments, we go back for more data.
  731. */
  732. static int
  733. iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn,
  734. struct iscsi_segment *segment)
  735. {
  736. struct iscsi_conn *conn = tcp_conn->iscsi_conn;
  737. struct iscsi_hdr *hdr;
  738. /* Check if there are additional header segments
  739. * *prior* to computing the digest, because we
  740. * may need to go back to the caller for more.
  741. */
  742. hdr = (struct iscsi_hdr *) tcp_conn->in.hdr_buf;
  743. if (segment->copied == sizeof(struct iscsi_hdr) && hdr->hlength) {
  744. /* Bump the header length - the caller will
  745. * just loop around and get the AHS for us, and
  746. * call again. */
  747. unsigned int ahslen = hdr->hlength << 2;
  748. /* Make sure we don't overflow */
  749. if (sizeof(*hdr) + ahslen > sizeof(tcp_conn->in.hdr_buf))
  750. return ISCSI_ERR_AHSLEN;
  751. segment->total_size += ahslen;
  752. segment->size += ahslen;
  753. return 0;
  754. }
  755. /* We're done processing the header. See if we're doing
  756. * header digests; if so, set up the recv_digest buffer
  757. * and go back for more. */
  758. if (conn->hdrdgst_en) {
  759. if (segment->digest_len == 0) {
  760. /*
  761. * Even if we offload the digest processing we
  762. * splice it in so we can increment the skb/segment
  763. * counters in preparation for the data segment.
  764. */
  765. iscsi_tcp_segment_splice_digest(segment,
  766. segment->recv_digest);
  767. return 0;
  768. }
  769. if (!(conn->session->tt->caps & CAP_DIGEST_OFFLOAD)) {
  770. iscsi_tcp_dgst_header(&tcp_conn->rx_hash, hdr,
  771. segment->total_copied - ISCSI_DIGEST_SIZE,
  772. segment->digest);
  773. if (!iscsi_tcp_dgst_verify(tcp_conn, segment))
  774. return ISCSI_ERR_HDR_DGST;
  775. }
  776. }
  777. tcp_conn->in.hdr = hdr;
  778. return iscsi_tcp_hdr_dissect(conn, hdr);
  779. }
  780. inline int iscsi_tcp_recv_segment_is_hdr(struct iscsi_tcp_conn *tcp_conn)
  781. {
  782. return tcp_conn->in.segment.done == iscsi_tcp_hdr_recv_done;
  783. }
  784. enum {
  785. ISCSI_TCP_SEGMENT_DONE, /* curr seg has been processed */
  786. ISCSI_TCP_SKB_DONE, /* skb is out of data */
  787. ISCSI_TCP_CONN_ERR, /* iscsi layer has fired a conn err */
  788. ISCSI_TCP_SUSPENDED, /* conn is suspended */
  789. };
  790. /**
  791. * iscsi_tcp_recv_skb - Process skb
  792. * @conn: iscsi connection
  793. * @skb: network buffer with header and/or data segment
  794. * @offset: offset in skb
  795. * @offload: bool indicating if transfer was offloaded
  796. */
  797. int iscsi_tcp_recv_skb(struct iscsi_conn *conn, struct sk_buff *skb,
  798. unsigned int offset, bool offloaded, int *status)
  799. {
  800. struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
  801. struct iscsi_segment *segment = &tcp_conn->in.segment;
  802. struct skb_seq_state seq;
  803. unsigned int consumed = 0;
  804. int rc = 0;
  805. debug_tcp("in %d bytes\n", skb->len - offset);
  806. if (unlikely(conn->suspend_rx)) {
  807. debug_tcp("conn %d Rx suspended!\n", conn->id);
  808. *status = ISCSI_TCP_SUSPENDED;
  809. return 0;
  810. }
  811. if (offloaded) {
  812. segment->total_copied = segment->total_size;
  813. goto segment_done;
  814. }
  815. skb_prepare_seq_read(skb, offset, skb->len, &seq);
  816. while (1) {
  817. unsigned int avail;
  818. const u8 *ptr;
  819. avail = skb_seq_read(consumed, &ptr, &seq);
  820. if (avail == 0) {
  821. debug_tcp("no more data avail. Consumed %d\n",
  822. consumed);
  823. *status = ISCSI_TCP_SKB_DONE;
  824. skb_abort_seq_read(&seq);
  825. goto skb_done;
  826. }
  827. BUG_ON(segment->copied >= segment->size);
  828. debug_tcp("skb %p ptr=%p avail=%u\n", skb, ptr, avail);
  829. rc = iscsi_tcp_segment_recv(tcp_conn, segment, ptr, avail);
  830. BUG_ON(rc == 0);
  831. consumed += rc;
  832. if (segment->total_copied >= segment->total_size) {
  833. skb_abort_seq_read(&seq);
  834. goto segment_done;
  835. }
  836. }
  837. segment_done:
  838. *status = ISCSI_TCP_SEGMENT_DONE;
  839. debug_tcp("segment done\n");
  840. rc = segment->done(tcp_conn, segment);
  841. if (rc != 0) {
  842. *status = ISCSI_TCP_CONN_ERR;
  843. debug_tcp("Error receiving PDU, errno=%d\n", rc);
  844. iscsi_conn_failure(conn, rc);
  845. return 0;
  846. }
  847. /* The done() functions sets up the next segment. */
  848. skb_done:
  849. conn->rxdata_octets += consumed;
  850. return consumed;
  851. }
  852. EXPORT_SYMBOL_GPL(iscsi_tcp_recv_skb);
  853. /**
  854. * iscsi_tcp_recv - TCP receive in sendfile fashion
  855. * @rd_desc: read descriptor
  856. * @skb: socket buffer
  857. * @offset: offset in skb
  858. * @len: skb->len - offset
  859. **/
  860. static int
  861. iscsi_tcp_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
  862. unsigned int offset, size_t len)
  863. {
  864. struct iscsi_conn *conn = rd_desc->arg.data;
  865. unsigned int consumed, total_consumed = 0;
  866. int status;
  867. debug_tcp("in %d bytes\n", skb->len - offset);
  868. do {
  869. status = 0;
  870. consumed = iscsi_tcp_recv_skb(conn, skb, offset, 0, &status);
  871. offset += consumed;
  872. total_consumed += consumed;
  873. } while (consumed != 0 && status != ISCSI_TCP_SKB_DONE);
  874. debug_tcp("read %d bytes status %d\n", skb->len - offset, status);
  875. return total_consumed;
  876. }
  877. static void
  878. iscsi_tcp_data_ready(struct sock *sk, int flag)
  879. {
  880. struct iscsi_conn *conn = sk->sk_user_data;
  881. struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
  882. read_descriptor_t rd_desc;
  883. read_lock(&sk->sk_callback_lock);
  884. /*
  885. * Use rd_desc to pass 'conn' to iscsi_tcp_recv.
  886. * We set count to 1 because we want the network layer to
  887. * hand us all the skbs that are available. iscsi_tcp_recv
  888. * handled pdus that cross buffers or pdus that still need data.
  889. */
  890. rd_desc.arg.data = conn;
  891. rd_desc.count = 1;
  892. tcp_read_sock(sk, &rd_desc, iscsi_tcp_recv);
  893. read_unlock(&sk->sk_callback_lock);
  894. /* If we had to (atomically) map a highmem page,
  895. * unmap it now. */
  896. iscsi_tcp_segment_unmap(&tcp_conn->in.segment);
  897. }
  898. static void
  899. iscsi_tcp_state_change(struct sock *sk)
  900. {
  901. struct iscsi_tcp_conn *tcp_conn;
  902. struct iscsi_conn *conn;
  903. struct iscsi_session *session;
  904. void (*old_state_change)(struct sock *);
  905. read_lock(&sk->sk_callback_lock);
  906. conn = (struct iscsi_conn*)sk->sk_user_data;
  907. session = conn->session;
  908. if ((sk->sk_state == TCP_CLOSE_WAIT ||
  909. sk->sk_state == TCP_CLOSE) &&
  910. !atomic_read(&sk->sk_rmem_alloc)) {
  911. debug_tcp("iscsi_tcp_state_change: TCP_CLOSE|TCP_CLOSE_WAIT\n");
  912. iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
  913. }
  914. tcp_conn = conn->dd_data;
  915. old_state_change = tcp_conn->old_state_change;
  916. read_unlock(&sk->sk_callback_lock);
  917. old_state_change(sk);
  918. }
  919. /**
  920. * iscsi_write_space - Called when more output buffer space is available
  921. * @sk: socket space is available for
  922. **/
  923. static void iscsi_tcp_write_space(struct sock *sk)
  924. {
  925. struct iscsi_conn *conn = (struct iscsi_conn*)sk->sk_user_data;
  926. struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
  927. tcp_conn->old_write_space(sk);
  928. debug_tcp("iscsi_write_space: cid %d\n", conn->id);
  929. scsi_queue_work(conn->session->host, &conn->xmitwork);
  930. }
  931. static void iscsi_tcp_conn_set_callbacks(struct iscsi_conn *conn)
  932. {
  933. struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
  934. struct sock *sk = tcp_conn->sock->sk;
  935. /* assign new callbacks */
  936. write_lock_bh(&sk->sk_callback_lock);
  937. sk->sk_user_data = conn;
  938. tcp_conn->old_data_ready = sk->sk_data_ready;
  939. tcp_conn->old_state_change = sk->sk_state_change;
  940. tcp_conn->old_write_space = sk->sk_write_space;
  941. sk->sk_data_ready = iscsi_tcp_data_ready;
  942. sk->sk_state_change = iscsi_tcp_state_change;
  943. sk->sk_write_space = iscsi_tcp_write_space;
  944. write_unlock_bh(&sk->sk_callback_lock);
  945. }
  946. static void iscsi_conn_restore_callbacks(struct iscsi_tcp_conn *tcp_conn)
  947. {
  948. struct sock *sk = tcp_conn->sock->sk;
  949. /* restore socket callbacks, see also: iscsi_conn_set_callbacks() */
  950. write_lock_bh(&sk->sk_callback_lock);
  951. sk->sk_user_data = NULL;
  952. sk->sk_data_ready = tcp_conn->old_data_ready;
  953. sk->sk_state_change = tcp_conn->old_state_change;
  954. sk->sk_write_space = tcp_conn->old_write_space;
  955. sk->sk_no_check = 0;
  956. write_unlock_bh(&sk->sk_callback_lock);
  957. }
  958. /**
  959. * iscsi_tcp_xmit - TCP transmit
  960. **/
  961. static int iscsi_tcp_xmit(struct iscsi_conn *conn)
  962. {
  963. struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
  964. struct iscsi_segment *segment = &tcp_conn->out.segment;
  965. unsigned int consumed = 0;
  966. int rc = 0;
  967. while (1) {
  968. rc = iscsi_tcp_xmit_segment(tcp_conn, segment);
  969. if (rc < 0) {
  970. rc = ISCSI_ERR_XMIT_FAILED;
  971. goto error;
  972. }
  973. if (rc == 0)
  974. break;
  975. consumed += rc;
  976. if (segment->total_copied >= segment->total_size) {
  977. if (segment->done != NULL) {
  978. rc = segment->done(tcp_conn, segment);
  979. if (rc != 0)
  980. goto error;
  981. }
  982. }
  983. }
  984. debug_tcp("xmit %d bytes\n", consumed);
  985. conn->txdata_octets += consumed;
  986. return consumed;
  987. error:
  988. /* Transmit error. We could initiate error recovery
  989. * here. */
  990. debug_tcp("Error sending PDU, errno=%d\n", rc);
  991. iscsi_conn_failure(conn, rc);
  992. return -EIO;
  993. }
  994. /**
  995. * iscsi_tcp_xmit_qlen - return the number of bytes queued for xmit
  996. */
  997. static inline int
  998. iscsi_tcp_xmit_qlen(struct iscsi_conn *conn)
  999. {
  1000. struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
  1001. struct iscsi_segment *segment = &tcp_conn->out.segment;
  1002. return segment->total_copied - segment->total_size;
  1003. }
  1004. static int iscsi_tcp_flush(struct iscsi_task *task)
  1005. {
  1006. struct iscsi_conn *conn = task->conn;
  1007. int rc;
  1008. while (iscsi_tcp_xmit_qlen(conn)) {
  1009. rc = iscsi_tcp_xmit(conn);
  1010. if (rc == 0)
  1011. return -EAGAIN;
  1012. if (rc < 0)
  1013. return rc;
  1014. }
  1015. return 0;
  1016. }
  1017. /*
  1018. * This is called when we're done sending the header.
  1019. * Simply copy the data_segment to the send segment, and return.
  1020. */
  1021. static int
  1022. iscsi_tcp_send_hdr_done(struct iscsi_tcp_conn *tcp_conn,
  1023. struct iscsi_segment *segment)
  1024. {
  1025. tcp_conn->out.segment = tcp_conn->out.data_segment;
  1026. debug_tcp("Header done. Next segment size %u total_size %u\n",
  1027. tcp_conn->out.segment.size, tcp_conn->out.segment.total_size);
  1028. return 0;
  1029. }
  1030. static void
  1031. iscsi_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr, size_t hdrlen)
  1032. {
  1033. struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
  1034. debug_tcp("%s(%p%s)\n", __func__, tcp_conn,
  1035. conn->hdrdgst_en? ", digest enabled" : "");
  1036. /* Clear the data segment - needs to be filled in by the
  1037. * caller using iscsi_tcp_send_data_prep() */
  1038. memset(&tcp_conn->out.data_segment, 0, sizeof(struct iscsi_segment));
  1039. /* If header digest is enabled, compute the CRC and
  1040. * place the digest into the same buffer. We make
  1041. * sure that both iscsi_tcp_task and mtask have
  1042. * sufficient room.
  1043. */
  1044. if (conn->hdrdgst_en) {
  1045. iscsi_tcp_dgst_header(&tcp_conn->tx_hash, hdr, hdrlen,
  1046. hdr + hdrlen);
  1047. hdrlen += ISCSI_DIGEST_SIZE;
  1048. }
  1049. /* Remember header pointer for later, when we need
  1050. * to decide whether there's a payload to go along
  1051. * with the header. */
  1052. tcp_conn->out.hdr = hdr;
  1053. iscsi_segment_init_linear(&tcp_conn->out.segment, hdr, hdrlen,
  1054. iscsi_tcp_send_hdr_done, NULL);
  1055. }
  1056. /*
  1057. * Prepare the send buffer for the payload data.
  1058. * Padding and checksumming will all be taken care
  1059. * of by the iscsi_segment routines.
  1060. */
  1061. static int
  1062. iscsi_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg,
  1063. unsigned int count, unsigned int offset,
  1064. unsigned int len)
  1065. {
  1066. struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
  1067. struct hash_desc *tx_hash = NULL;
  1068. unsigned int hdr_spec_len;
  1069. debug_tcp("%s(%p, offset=%d, datalen=%d%s)\n", __func__,
  1070. tcp_conn, offset, len,
  1071. conn->datadgst_en? ", digest enabled" : "");
  1072. /* Make sure the datalen matches what the caller
  1073. said he would send. */
  1074. hdr_spec_len = ntoh24(tcp_conn->out.hdr->dlength);
  1075. WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));
  1076. if (conn->datadgst_en)
  1077. tx_hash = &tcp_conn->tx_hash;
  1078. return iscsi_segment_seek_sg(&tcp_conn->out.data_segment,
  1079. sg, count, offset, len,
  1080. NULL, tx_hash);
  1081. }
  1082. static void
  1083. iscsi_tcp_send_linear_data_prepare(struct iscsi_conn *conn, void *data,
  1084. size_t len)
  1085. {
  1086. struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
  1087. struct hash_desc *tx_hash = NULL;
  1088. unsigned int hdr_spec_len;
  1089. debug_tcp("%s(%p, datalen=%d%s)\n", __func__, tcp_conn, len,
  1090. conn->datadgst_en? ", digest enabled" : "");
  1091. /* Make sure the datalen matches what the caller
  1092. said he would send. */
  1093. hdr_spec_len = ntoh24(tcp_conn->out.hdr->dlength);
  1094. WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));
  1095. if (conn->datadgst_en)
  1096. tx_hash = &tcp_conn->tx_hash;
  1097. iscsi_segment_init_linear(&tcp_conn->out.data_segment,
  1098. data, len, NULL, tx_hash);
  1099. }
  1100. static int iscsi_tcp_pdu_init(struct iscsi_task *task,
  1101. unsigned int offset, unsigned int count)
  1102. {
  1103. struct iscsi_conn *conn = task->conn;
  1104. int err = 0;
  1105. iscsi_tcp_send_hdr_prep(conn, task->hdr, task->hdr_len);
  1106. if (!count)
  1107. return 0;
  1108. if (!task->sc)
  1109. iscsi_tcp_send_linear_data_prepare(conn, task->data, count);
  1110. else {
  1111. struct scsi_data_buffer *sdb = scsi_out(task->sc);
  1112. err = iscsi_tcp_send_data_prep(conn, sdb->table.sgl,
  1113. sdb->table.nents, offset, count);
  1114. }
  1115. if (err) {
  1116. iscsi_conn_failure(conn, err);
  1117. return -EIO;
  1118. }
  1119. return 0;
  1120. }
  1121. static int iscsi_tcp_pdu_alloc(struct iscsi_task *task)
  1122. {
  1123. struct iscsi_tcp_task *tcp_task = task->dd_data;
  1124. task->hdr = &tcp_task->hdr.hdrbuf;
  1125. task->hdr_max = sizeof(tcp_task->hdr) - ISCSI_DIGEST_SIZE;
  1126. return 0;
  1127. }
  1128. /**
  1129. * iscsi_tcp_task - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
  1130. * @conn: iscsi connection
  1131. * @task: scsi command task
  1132. * @sc: scsi command
  1133. **/
  1134. static int iscsi_tcp_task_init(struct iscsi_task *task)
  1135. {
  1136. struct iscsi_tcp_task *tcp_task = task->dd_data;
  1137. struct iscsi_conn *conn = task->conn;
  1138. struct scsi_cmnd *sc = task->sc;
  1139. int err;
  1140. if (!sc) {
  1141. /*
  1142. * mgmt tasks do not have a scatterlist since they come
  1143. * in from the iscsi interface.
  1144. */
  1145. debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id,
  1146. task->itt);
  1147. return conn->session->tt->init_pdu(task, 0, task->data_count);
  1148. }
  1149. BUG_ON(__kfifo_len(tcp_task->r2tqueue));
  1150. tcp_task->exp_datasn = 0;
  1151. /* Prepare PDU, optionally w/ immediate data */
  1152. debug_scsi("task deq [cid %d itt 0x%x imm %d unsol %d]\n",
  1153. conn->id, task->itt, task->imm_count,
  1154. task->unsol_r2t.data_length);
  1155. err = conn->session->tt->init_pdu(task, 0, task->imm_count);
  1156. if (err)
  1157. return err;
  1158. task->imm_count = 0;
  1159. return 0;
  1160. }
  1161. static struct iscsi_r2t_info *iscsi_tcp_get_curr_r2t(struct iscsi_task *task)
  1162. {
  1163. struct iscsi_session *session = task->conn->session;
  1164. struct iscsi_tcp_task *tcp_task = task->dd_data;
  1165. struct iscsi_r2t_info *r2t = NULL;
  1166. if (iscsi_task_has_unsol_data(task))
  1167. r2t = &task->unsol_r2t;
  1168. else {
  1169. spin_lock_bh(&session->lock);
  1170. if (tcp_task->r2t) {
  1171. r2t = tcp_task->r2t;
  1172. /* Continue with this R2T? */
  1173. if (r2t->data_length <= r2t->sent) {
  1174. debug_scsi(" done with r2t %p\n", r2t);
  1175. __kfifo_put(tcp_task->r2tpool.queue,
  1176. (void *)&tcp_task->r2t,
  1177. sizeof(void *));
  1178. tcp_task->r2t = r2t = NULL;
  1179. }
  1180. }
  1181. if (r2t == NULL) {
  1182. __kfifo_get(tcp_task->r2tqueue,
  1183. (void *)&tcp_task->r2t, sizeof(void *));
  1184. r2t = tcp_task->r2t;
  1185. }
  1186. spin_unlock_bh(&session->lock);
  1187. }
  1188. return r2t;
  1189. }
  1190. /*
  1191. * iscsi_tcp_task_xmit - xmit normal PDU task
  1192. * @task: iscsi command task
  1193. *
  1194. * We're expected to return 0 when everything was transmitted succesfully,
  1195. * -EAGAIN if there's still data in the queue, or != 0 for any other kind
  1196. * of error.
  1197. */
  1198. static int iscsi_tcp_task_xmit(struct iscsi_task *task)
  1199. {
  1200. struct iscsi_conn *conn = task->conn;
  1201. struct iscsi_session *session = conn->session;
  1202. struct iscsi_r2t_info *r2t;
  1203. int rc = 0;
  1204. flush:
  1205. /* Flush any pending data first. */
  1206. rc = session->tt->xmit_pdu(task);
  1207. if (rc < 0)
  1208. return rc;
  1209. /* mgmt command */
  1210. if (!task->sc) {
  1211. if (task->hdr->itt == RESERVED_ITT)
  1212. iscsi_put_task(task);
  1213. return 0;
  1214. }
  1215. /* Are we done already? */
  1216. if (task->sc->sc_data_direction != DMA_TO_DEVICE)
  1217. return 0;
  1218. r2t = iscsi_tcp_get_curr_r2t(task);
  1219. if (r2t == NULL) {
  1220. /* Waiting for more R2Ts to arrive. */
  1221. debug_tcp("no R2Ts yet\n");
  1222. return 0;
  1223. }
  1224. rc = conn->session->tt->alloc_pdu(task);
  1225. if (rc)
  1226. return rc;
  1227. iscsi_prep_data_out_pdu(task, r2t, (struct iscsi_data *) task->hdr);
  1228. debug_scsi("sol dout %p [dsn %d itt 0x%x doff %d dlen %d]\n",
  1229. r2t, r2t->datasn - 1, task->hdr->itt,
  1230. r2t->data_offset + r2t->sent, r2t->data_count);
  1231. rc = conn->session->tt->init_pdu(task, r2t->data_offset + r2t->sent,
  1232. r2t->data_count);
  1233. if (rc)
  1234. return rc;
  1235. r2t->sent += r2t->data_count;
  1236. goto flush;
  1237. }
  1238. static struct iscsi_cls_conn *
  1239. iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
  1240. {
  1241. struct iscsi_conn *conn;
  1242. struct iscsi_cls_conn *cls_conn;
  1243. struct iscsi_tcp_conn *tcp_conn;
  1244. cls_conn = iscsi_conn_setup(cls_session, sizeof(*tcp_conn), conn_idx);
  1245. if (!cls_conn)
  1246. return NULL;
  1247. conn = cls_conn->dd_data;
  1248. /*
  1249. * due to strange issues with iser these are not set
  1250. * in iscsi_conn_setup
  1251. */
  1252. conn->max_recv_dlength = ISCSI_DEF_MAX_RECV_SEG_LEN;
  1253. tcp_conn = conn->dd_data;
  1254. tcp_conn->iscsi_conn = conn;
  1255. tcp_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0,
  1256. CRYPTO_ALG_ASYNC);
  1257. tcp_conn->tx_hash.flags = 0;
  1258. if (IS_ERR(tcp_conn->tx_hash.tfm))
  1259. goto free_conn;
  1260. tcp_conn->rx_hash.tfm = crypto_alloc_hash("crc32c", 0,
  1261. CRYPTO_ALG_ASYNC);
  1262. tcp_conn->rx_hash.flags = 0;
  1263. if (IS_ERR(tcp_conn->rx_hash.tfm))
  1264. goto free_tx_tfm;
  1265. return cls_conn;
  1266. free_tx_tfm:
  1267. crypto_free_hash(tcp_conn->tx_hash.tfm);
  1268. free_conn:
  1269. iscsi_conn_printk(KERN_ERR, conn,
  1270. "Could not create connection due to crc32c "
  1271. "loading error. Make sure the crc32c "
  1272. "module is built as a module or into the "
  1273. "kernel\n");
  1274. iscsi_conn_teardown(cls_conn);
  1275. return NULL;
  1276. }
  1277. static void
  1278. iscsi_tcp_release_conn(struct iscsi_conn *conn)
  1279. {
  1280. struct iscsi_session *session = conn->session;
  1281. struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
  1282. struct socket *sock = tcp_conn->sock;
  1283. if (!sock)
  1284. return;
  1285. sock_hold(sock->sk);
  1286. iscsi_conn_restore_callbacks(tcp_conn);
  1287. sock_put(sock->sk);
  1288. spin_lock_bh(&session->lock);
  1289. tcp_conn->sock = NULL;
  1290. spin_unlock_bh(&session->lock);
  1291. sockfd_put(sock);
  1292. }
  1293. static void
  1294. iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
  1295. {
  1296. struct iscsi_conn *conn = cls_conn->dd_data;
  1297. struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
  1298. iscsi_tcp_release_conn(conn);
  1299. if (tcp_conn->tx_hash.tfm)
  1300. crypto_free_hash(tcp_conn->tx_hash.tfm);
  1301. if (tcp_conn->rx_hash.tfm)
  1302. crypto_free_hash(tcp_conn->rx_hash.tfm);
  1303. iscsi_conn_teardown(cls_conn);
  1304. }
  1305. static void
  1306. iscsi_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
  1307. {
  1308. struct iscsi_conn *conn = cls_conn->dd_data;
  1309. struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
  1310. /* userspace may have goofed up and not bound us */
  1311. if (!tcp_conn->sock)
  1312. return;
  1313. /*
  1314. * Make sure our recv side is stopped.
  1315. * Older tools called conn stop before ep_disconnect
  1316. * so IO could still be coming in.
  1317. */
  1318. write_lock_bh(&tcp_conn->sock->sk->sk_callback_lock);
  1319. set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
  1320. write_unlock_bh(&tcp_conn->sock->sk->sk_callback_lock);
  1321. iscsi_conn_stop(cls_conn, flag);
  1322. iscsi_tcp_release_conn(conn);
  1323. }
  1324. static int iscsi_tcp_get_addr(struct iscsi_conn *conn, struct socket *sock,
  1325. char *buf, int *port,
  1326. int (*getname)(struct socket *, struct sockaddr *,
  1327. int *addrlen))
  1328. {
  1329. struct sockaddr_storage *addr;
  1330. struct sockaddr_in6 *sin6;
  1331. struct sockaddr_in *sin;
  1332. int rc = 0, len;
  1333. addr = kmalloc(sizeof(*addr), GFP_KERNEL);
  1334. if (!addr)
  1335. return -ENOMEM;
  1336. if (getname(sock, (struct sockaddr *) addr, &len)) {
  1337. rc = -ENODEV;
  1338. goto free_addr;
  1339. }
  1340. switch (addr->ss_family) {
  1341. case AF_INET:
  1342. sin = (struct sockaddr_in *)addr;
  1343. spin_lock_bh(&conn->session->lock);
  1344. sprintf(buf, "%pI4", &sin->sin_addr.s_addr);
  1345. *port = be16_to_cpu(sin->sin_port);
  1346. spin_unlock_bh(&conn->session->lock);
  1347. break;
  1348. case AF_INET6:
  1349. sin6 = (struct sockaddr_in6 *)addr;
  1350. spin_lock_bh(&conn->session->lock);
  1351. sprintf(buf, "%pI6", &sin6->sin6_addr);
  1352. *port = be16_to_cpu(sin6->sin6_port);
  1353. spin_unlock_bh(&conn->session->lock);
  1354. break;
  1355. }
  1356. free_addr:
  1357. kfree(addr);
  1358. return rc;
  1359. }
  1360. static int
  1361. iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
  1362. struct iscsi_cls_conn *cls_conn, uint64_t transport_eph,
  1363. int is_leading)
  1364. {
  1365. struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
  1366. struct iscsi_host *ihost = shost_priv(shost);
  1367. struct iscsi_conn *conn = cls_conn->dd_data;
  1368. struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
  1369. struct sock *sk;
  1370. struct socket *sock;
  1371. int err;
  1372. /* lookup for existing socket */
  1373. sock = sockfd_lookup((int)transport_eph, &err);
  1374. if (!sock) {
  1375. iscsi_conn_printk(KERN_ERR, conn,
  1376. "sockfd_lookup failed %d\n", err);
  1377. return -EEXIST;
  1378. }
  1379. /*
  1380. * copy these values now because if we drop the session
  1381. * userspace may still want to query the values since we will
  1382. * be using them for the reconnect
  1383. */
  1384. err = iscsi_tcp_get_addr(conn, sock, conn->portal_address,
  1385. &conn->portal_port, kernel_getpeername);
  1386. if (err)
  1387. goto free_socket;
  1388. err = iscsi_tcp_get_addr(conn, sock, ihost->local_address,
  1389. &ihost->local_port, kernel_getsockname);
  1390. if (err)
  1391. goto free_socket;
  1392. err = iscsi_conn_bind(cls_session, cls_conn, is_leading);
  1393. if (err)
  1394. goto free_socket;
  1395. /* bind iSCSI connection and socket */
  1396. tcp_conn->sock = sock;
  1397. /* setup Socket parameters */
  1398. sk = sock->sk;
  1399. sk->sk_reuse = 1;
  1400. sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */
  1401. sk->sk_allocation = GFP_ATOMIC;
  1402. iscsi_tcp_conn_set_callbacks(conn);
  1403. tcp_conn->sendpage = tcp_conn->sock->ops->sendpage;
  1404. /*
  1405. * set receive state machine into initial state
  1406. */
  1407. iscsi_tcp_hdr_recv_prep(tcp_conn);
  1408. return 0;
  1409. free_socket:
  1410. sockfd_put(sock);
  1411. return err;
  1412. }
  1413. static int
  1414. iscsi_r2tpool_alloc(struct iscsi_session *session)
  1415. {
  1416. int i;
  1417. int cmd_i;
  1418. /*
  1419. * initialize per-task: R2T pool and xmit queue
  1420. */
  1421. for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
  1422. struct iscsi_task *task = session->cmds[cmd_i];
  1423. struct iscsi_tcp_task *tcp_task = task->dd_data;
  1424. /*
  1425. * pre-allocated x2 as much r2ts to handle race when
  1426. * target acks DataOut faster than we data_xmit() queues
  1427. * could replenish r2tqueue.
  1428. */
  1429. /* R2T pool */
  1430. if (iscsi_pool_init(&tcp_task->r2tpool,
  1431. session->max_r2t * 2, NULL,
  1432. sizeof(struct iscsi_r2t_info))) {
  1433. goto r2t_alloc_fail;
  1434. }
  1435. /* R2T xmit queue */
  1436. tcp_task->r2tqueue = kfifo_alloc(
  1437. session->max_r2t * 4 * sizeof(void*), GFP_KERNEL, NULL);
  1438. if (tcp_task->r2tqueue == ERR_PTR(-ENOMEM)) {
  1439. iscsi_pool_free(&tcp_task->r2tpool);
  1440. goto r2t_alloc_fail;
  1441. }
  1442. }
  1443. return 0;
  1444. r2t_alloc_fail:
  1445. for (i = 0; i < cmd_i; i++) {
  1446. struct iscsi_task *task = session->cmds[i];
  1447. struct iscsi_tcp_task *tcp_task = task->dd_data;
  1448. kfifo_free(tcp_task->r2tqueue);
  1449. iscsi_pool_free(&tcp_task->r2tpool);
  1450. }
  1451. return -ENOMEM;
  1452. }
  1453. static void
  1454. iscsi_r2tpool_free(struct iscsi_session *session)
  1455. {
  1456. int i;
  1457. for (i = 0; i < session->cmds_max; i++) {
  1458. struct iscsi_task *task = session->cmds[i];
  1459. struct iscsi_tcp_task *tcp_task = task->dd_data;
  1460. kfifo_free(tcp_task->r2tqueue);
  1461. iscsi_pool_free(&tcp_task->r2tpool);
  1462. }
  1463. }
  1464. static int
  1465. iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param,
  1466. char *buf, int buflen)
  1467. {
  1468. struct iscsi_conn *conn = cls_conn->dd_data;
  1469. struct iscsi_session *session = conn->session;
  1470. struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
  1471. int value;
  1472. switch(param) {
  1473. case ISCSI_PARAM_HDRDGST_EN:
  1474. iscsi_set_param(cls_conn, param, buf, buflen);
  1475. break;
  1476. case ISCSI_PARAM_DATADGST_EN:
  1477. iscsi_set_param(cls_conn, param, buf, buflen);
  1478. tcp_conn->sendpage = conn->datadgst_en ?
  1479. sock_no_sendpage : tcp_conn->sock->ops->sendpage;
  1480. break;
  1481. case ISCSI_PARAM_MAX_R2T:
  1482. sscanf(buf, "%d", &value);
  1483. if (value <= 0 || !is_power_of_2(value))
  1484. return -EINVAL;
  1485. if (session->max_r2t == value)
  1486. break;
  1487. iscsi_r2tpool_free(session);
  1488. iscsi_set_param(cls_conn, param, buf, buflen);
  1489. if (iscsi_r2tpool_alloc(session))
  1490. return -ENOMEM;
  1491. break;
  1492. default:
  1493. return iscsi_set_param(cls_conn, param, buf, buflen);
  1494. }
  1495. return 0;
  1496. }
  1497. static int
  1498. iscsi_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
  1499. enum iscsi_param param, char *buf)
  1500. {
  1501. struct iscsi_conn *conn = cls_conn->dd_data;
  1502. int len;
  1503. switch(param) {
  1504. case ISCSI_PARAM_CONN_PORT:
  1505. spin_lock_bh(&conn->session->lock);
  1506. len = sprintf(buf, "%hu\n", conn->portal_port);
  1507. spin_unlock_bh(&conn->session->lock);
  1508. break;
  1509. case ISCSI_PARAM_CONN_ADDRESS:
  1510. spin_lock_bh(&conn->session->lock);
  1511. len = sprintf(buf, "%s\n", conn->portal_address);
  1512. spin_unlock_bh(&conn->session->lock);
  1513. break;
  1514. default:
  1515. return iscsi_conn_get_param(cls_conn, param, buf);
  1516. }
  1517. return len;
  1518. }
  1519. static void
  1520. iscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats)
  1521. {
  1522. struct iscsi_conn *conn = cls_conn->dd_data;
  1523. struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
  1524. stats->txdata_octets = conn->txdata_octets;
  1525. stats->rxdata_octets = conn->rxdata_octets;
  1526. stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
  1527. stats->dataout_pdus = conn->dataout_pdus_cnt;
  1528. stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
  1529. stats->datain_pdus = conn->datain_pdus_cnt;
  1530. stats->r2t_pdus = conn->r2t_pdus_cnt;
  1531. stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
  1532. stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
  1533. stats->custom_length = 3;
  1534. strcpy(stats->custom[0].desc, "tx_sendpage_failures");
  1535. stats->custom[0].value = tcp_conn->sendpage_failures_cnt;
  1536. strcpy(stats->custom[1].desc, "rx_discontiguous_hdr");
  1537. stats->custom[1].value = tcp_conn->discontiguous_hdr_cnt;
  1538. strcpy(stats->custom[2].desc, "eh_abort_cnt");
  1539. stats->custom[2].value = conn->eh_abort_cnt;
  1540. }
  1541. static struct iscsi_cls_session *
  1542. iscsi_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
  1543. uint16_t qdepth, uint32_t initial_cmdsn,
  1544. uint32_t *hostno)
  1545. {
  1546. struct iscsi_cls_session *cls_session;
  1547. struct iscsi_session *session;
  1548. struct Scsi_Host *shost;
  1549. if (ep) {
  1550. printk(KERN_ERR "iscsi_tcp: invalid ep %p.\n", ep);
  1551. return NULL;
  1552. }
  1553. shost = iscsi_host_alloc(&iscsi_sht, 0, qdepth);
  1554. if (!shost)
  1555. return NULL;
  1556. shost->transportt = iscsi_tcp_scsi_transport;
  1557. shost->max_lun = iscsi_max_lun;
  1558. shost->max_id = 0;
  1559. shost->max_channel = 0;
  1560. shost->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
  1561. if (iscsi_host_add(shost, NULL))
  1562. goto free_host;
  1563. *hostno = shost->host_no;
  1564. cls_session = iscsi_session_setup(&iscsi_tcp_transport, shost, cmds_max,
  1565. sizeof(struct iscsi_tcp_task),
  1566. initial_cmdsn, 0);
  1567. if (!cls_session)
  1568. goto remove_host;
  1569. session = cls_session->dd_data;
  1570. shost->can_queue = session->scsi_cmds_max;
  1571. if (iscsi_r2tpool_alloc(session))
  1572. goto remove_session;
  1573. return cls_session;
  1574. remove_session:
  1575. iscsi_session_teardown(cls_session);
  1576. remove_host:
  1577. iscsi_host_remove(shost);
  1578. free_host:
  1579. iscsi_host_free(shost);
  1580. return NULL;
  1581. }
  1582. static void iscsi_tcp_session_destroy(struct iscsi_cls_session *cls_session)
  1583. {
  1584. struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
  1585. iscsi_r2tpool_free(cls_session->dd_data);
  1586. iscsi_session_teardown(cls_session);
  1587. iscsi_host_remove(shost);
  1588. iscsi_host_free(shost);
  1589. }
  1590. static int iscsi_tcp_slave_configure(struct scsi_device *sdev)
  1591. {
  1592. blk_queue_bounce_limit(sdev->request_queue, BLK_BOUNCE_ANY);
  1593. blk_queue_dma_alignment(sdev->request_queue, 0);
  1594. return 0;
  1595. }
  1596. static struct scsi_host_template iscsi_sht = {
  1597. .module = THIS_MODULE,
  1598. .name = "iSCSI Initiator over TCP/IP",
  1599. .queuecommand = iscsi_queuecommand,
  1600. .change_queue_depth = iscsi_change_queue_depth,
  1601. .can_queue = ISCSI_DEF_XMIT_CMDS_MAX - 1,
  1602. .sg_tablesize = 4096,
  1603. .max_sectors = 0xFFFF,
  1604. .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
  1605. .eh_abort_handler = iscsi_eh_abort,
  1606. .eh_device_reset_handler= iscsi_eh_device_reset,
  1607. .eh_target_reset_handler= iscsi_eh_target_reset,
  1608. .use_clustering = DISABLE_CLUSTERING,
  1609. .slave_configure = iscsi_tcp_slave_configure,
  1610. .proc_name = "iscsi_tcp",
  1611. .this_id = -1,
  1612. };
  1613. static struct iscsi_transport iscsi_tcp_transport = {
  1614. .owner = THIS_MODULE,
  1615. .name = "tcp",
  1616. .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST
  1617. | CAP_DATADGST,
  1618. .param_mask = ISCSI_MAX_RECV_DLENGTH |
  1619. ISCSI_MAX_XMIT_DLENGTH |
  1620. ISCSI_HDRDGST_EN |
  1621. ISCSI_DATADGST_EN |
  1622. ISCSI_INITIAL_R2T_EN |
  1623. ISCSI_MAX_R2T |
  1624. ISCSI_IMM_DATA_EN |
  1625. ISCSI_FIRST_BURST |
  1626. ISCSI_MAX_BURST |
  1627. ISCSI_PDU_INORDER_EN |
  1628. ISCSI_DATASEQ_INORDER_EN |
  1629. ISCSI_ERL |
  1630. ISCSI_CONN_PORT |
  1631. ISCSI_CONN_ADDRESS |
  1632. ISCSI_EXP_STATSN |
  1633. ISCSI_PERSISTENT_PORT |
  1634. ISCSI_PERSISTENT_ADDRESS |
  1635. ISCSI_TARGET_NAME | ISCSI_TPGT |
  1636. ISCSI_USERNAME | ISCSI_PASSWORD |
  1637. ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
  1638. ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
  1639. ISCSI_LU_RESET_TMO |
  1640. ISCSI_PING_TMO | ISCSI_RECV_TMO |
  1641. ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
  1642. .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
  1643. ISCSI_HOST_INITIATOR_NAME |
  1644. ISCSI_HOST_NETDEV_NAME,
  1645. /* session management */
  1646. .create_session = iscsi_tcp_session_create,
  1647. .destroy_session = iscsi_tcp_session_destroy,
  1648. /* connection management */
  1649. .create_conn = iscsi_tcp_conn_create,
  1650. .bind_conn = iscsi_tcp_conn_bind,
  1651. .destroy_conn = iscsi_tcp_conn_destroy,
  1652. .set_param = iscsi_conn_set_param,
  1653. .get_conn_param = iscsi_tcp_conn_get_param,
  1654. .get_session_param = iscsi_session_get_param,
  1655. .start_conn = iscsi_conn_start,
  1656. .stop_conn = iscsi_tcp_conn_stop,
  1657. /* iscsi host params */
  1658. .get_host_param = iscsi_host_get_param,
  1659. .set_host_param = iscsi_host_set_param,
  1660. /* IO */
  1661. .send_pdu = iscsi_conn_send_pdu,
  1662. .get_stats = iscsi_conn_get_stats,
  1663. /* iscsi task/cmd helpers */
  1664. .init_task = iscsi_tcp_task_init,
  1665. .xmit_task = iscsi_tcp_task_xmit,
  1666. .cleanup_task = iscsi_tcp_cleanup_task,
  1667. /* low level pdu helpers */
  1668. .xmit_pdu = iscsi_tcp_flush,
  1669. .init_pdu = iscsi_tcp_pdu_init,
  1670. .alloc_pdu = iscsi_tcp_pdu_alloc,
  1671. /* recovery */
  1672. .session_recovery_timedout = iscsi_session_recovery_timedout,
  1673. };
  1674. static int __init
  1675. iscsi_tcp_init(void)
  1676. {
  1677. if (iscsi_max_lun < 1) {
  1678. printk(KERN_ERR "iscsi_tcp: Invalid max_lun value of %u\n",
  1679. iscsi_max_lun);
  1680. return -EINVAL;
  1681. }
  1682. iscsi_tcp_scsi_transport = iscsi_register_transport(
  1683. &iscsi_tcp_transport);
  1684. if (!iscsi_tcp_scsi_transport)
  1685. return -ENODEV;
  1686. return 0;
  1687. }
  1688. static void __exit
  1689. iscsi_tcp_exit(void)
  1690. {
  1691. iscsi_unregister_transport(&iscsi_tcp_transport);
  1692. }
  1693. module_init(iscsi_tcp_init);
  1694. module_exit(iscsi_tcp_exit);