iscsi_tcp.c 54 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029
  1. /*
  2. * iSCSI Initiator over TCP/IP Data-Path
  3. *
  4. * Copyright (C) 2004 Dmitry Yusupov
  5. * Copyright (C) 2004 Alex Aizman
  6. * Copyright (C) 2005 - 2006 Mike Christie
  7. * Copyright (C) 2006 Red Hat, Inc. All rights reserved.
  8. * maintained by open-iscsi@googlegroups.com
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License as published
  12. * by the Free Software Foundation; either version 2 of the License, or
  13. * (at your option) any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful, but
  16. * WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * General Public License for more details.
  19. *
  20. * See the file COPYING included with this distribution for more details.
  21. *
  22. * Credits:
  23. * Christoph Hellwig
  24. * FUJITA Tomonori
  25. * Arne Redlich
  26. * Zhenyu Wang
  27. */
  28. #include <linux/types.h>
  29. #include <linux/list.h>
  30. #include <linux/inet.h>
  31. #include <linux/file.h>
  32. #include <linux/blkdev.h>
  33. #include <linux/crypto.h>
  34. #include <linux/delay.h>
  35. #include <linux/kfifo.h>
  36. #include <linux/scatterlist.h>
  37. #include <net/tcp.h>
  38. #include <scsi/scsi_cmnd.h>
  39. #include <scsi/scsi_device.h>
  40. #include <scsi/scsi_host.h>
  41. #include <scsi/scsi.h>
  42. #include <scsi/scsi_transport_iscsi.h>
  43. #include "iscsi_tcp.h"
  44. MODULE_AUTHOR("Dmitry Yusupov <dmitry_yus@yahoo.com>, "
  45. "Alex Aizman <itn780@yahoo.com>");
  46. MODULE_DESCRIPTION("iSCSI/TCP data-path");
  47. MODULE_LICENSE("GPL");
  48. #undef DEBUG_TCP
  49. #define DEBUG_ASSERT
  50. #ifdef DEBUG_TCP
  51. #define debug_tcp(fmt...) printk(KERN_INFO "tcp: " fmt)
  52. #else
  53. #define debug_tcp(fmt...)
  54. #endif
  55. #ifndef DEBUG_ASSERT
  56. #ifdef BUG_ON
  57. #undef BUG_ON
  58. #endif
  59. #define BUG_ON(expr)
  60. #endif
  61. static unsigned int iscsi_max_lun = 512;
  62. module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
  63. static int iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn,
  64. struct iscsi_segment *segment);
  65. /*
  66. * Scatterlist handling: inside the iscsi_segment, we
  67. * remember an index into the scatterlist, and set data/size
  68. * to the current scatterlist entry. For highmem pages, we
  69. * kmap as needed.
  70. *
  71. * Note that the page is unmapped when we return from
  72. * TCP's data_ready handler, so we may end up mapping and
  73. * unmapping the same page repeatedly. The whole reason
  74. * for this is that we shouldn't keep the page mapped
  75. * outside the softirq.
  76. */
  77. /**
  78. * iscsi_tcp_segment_init_sg - init indicated scatterlist entry
  79. * @segment: the buffer object
  80. * @sg: scatterlist
  81. * @offset: byte offset into that sg entry
  82. *
  83. * This function sets up the segment so that subsequent
  84. * data is copied to the indicated sg entry, at the given
  85. * offset.
  86. */
  87. static inline void
  88. iscsi_tcp_segment_init_sg(struct iscsi_segment *segment,
  89. struct scatterlist *sg, unsigned int offset)
  90. {
  91. segment->sg = sg;
  92. segment->sg_offset = offset;
  93. segment->size = min(sg->length - offset,
  94. segment->total_size - segment->total_copied);
  95. segment->data = NULL;
  96. }
  97. /**
  98. * iscsi_tcp_segment_map - map the current S/G page
  99. * @segment: iscsi_segment
  100. * @recv: 1 if called from recv path
  101. *
  102. * We only need to possibly kmap data if scatter lists are being used,
  103. * because the iscsi passthrough and internal IO paths will never use high
  104. * mem pages.
  105. */
  106. static inline void
  107. iscsi_tcp_segment_map(struct iscsi_segment *segment, int recv)
  108. {
  109. struct scatterlist *sg;
  110. if (segment->data != NULL || !segment->sg)
  111. return;
  112. sg = segment->sg;
  113. BUG_ON(segment->sg_mapped);
  114. BUG_ON(sg->length == 0);
  115. /*
  116. * If the page count is greater than one it is ok to send
  117. * to the network layer's zero copy send path. If not we
  118. * have to go the slow sendmsg path. We always map for the
  119. * recv path.
  120. */
  121. if (page_count(sg_page(sg)) >= 1 && !recv)
  122. return;
  123. debug_tcp("iscsi_tcp_segment_map %s %p\n", recv ? "recv" : "xmit",
  124. segment);
  125. segment->sg_mapped = kmap_atomic(sg_page(sg), KM_SOFTIRQ0);
  126. segment->data = segment->sg_mapped + sg->offset + segment->sg_offset;
  127. }
  128. static inline void
  129. iscsi_tcp_segment_unmap(struct iscsi_segment *segment)
  130. {
  131. debug_tcp("iscsi_tcp_segment_unmap %p\n", segment);
  132. if (segment->sg_mapped) {
  133. debug_tcp("iscsi_tcp_segment_unmap valid\n");
  134. kunmap_atomic(segment->sg_mapped, KM_SOFTIRQ0);
  135. segment->sg_mapped = NULL;
  136. segment->data = NULL;
  137. }
  138. }
  139. /*
  140. * Splice the digest buffer into the buffer
  141. */
  142. static inline void
  143. iscsi_tcp_segment_splice_digest(struct iscsi_segment *segment, void *digest)
  144. {
  145. segment->data = digest;
  146. segment->digest_len = ISCSI_DIGEST_SIZE;
  147. segment->total_size += ISCSI_DIGEST_SIZE;
  148. segment->size = ISCSI_DIGEST_SIZE;
  149. segment->copied = 0;
  150. segment->sg = NULL;
  151. segment->hash = NULL;
  152. }
  153. /**
  154. * iscsi_tcp_segment_done - check whether the segment is complete
  155. * @segment: iscsi segment to check
  156. * @recv: set to one of this is called from the recv path
  157. * @copied: number of bytes copied
  158. *
  159. * Check if we're done receiving this segment. If the receive
  160. * buffer is full but we expect more data, move on to the
  161. * next entry in the scatterlist.
  162. *
  163. * If the amount of data we received isn't a multiple of 4,
  164. * we will transparently receive the pad bytes, too.
  165. *
  166. * This function must be re-entrant.
  167. */
  168. static inline int
  169. iscsi_tcp_segment_done(struct iscsi_segment *segment, int recv, unsigned copied)
  170. {
  171. static unsigned char padbuf[ISCSI_PAD_LEN];
  172. struct scatterlist sg;
  173. unsigned int pad;
  174. debug_tcp("copied %u %u size %u %s\n", segment->copied, copied,
  175. segment->size, recv ? "recv" : "xmit");
  176. if (segment->hash && copied) {
  177. /*
  178. * If a segment is kmapd we must unmap it before sending
  179. * to the crypto layer since that will try to kmap it again.
  180. */
  181. iscsi_tcp_segment_unmap(segment);
  182. if (!segment->data) {
  183. sg_init_table(&sg, 1);
  184. sg_set_page(&sg, sg_page(segment->sg), copied,
  185. segment->copied + segment->sg_offset +
  186. segment->sg->offset);
  187. } else
  188. sg_init_one(&sg, segment->data + segment->copied,
  189. copied);
  190. crypto_hash_update(segment->hash, &sg, copied);
  191. }
  192. segment->copied += copied;
  193. if (segment->copied < segment->size) {
  194. iscsi_tcp_segment_map(segment, recv);
  195. return 0;
  196. }
  197. segment->total_copied += segment->copied;
  198. segment->copied = 0;
  199. segment->size = 0;
  200. /* Unmap the current scatterlist page, if there is one. */
  201. iscsi_tcp_segment_unmap(segment);
  202. /* Do we have more scatterlist entries? */
  203. debug_tcp("total copied %u total size %u\n", segment->total_copied,
  204. segment->total_size);
  205. if (segment->total_copied < segment->total_size) {
  206. /* Proceed to the next entry in the scatterlist. */
  207. iscsi_tcp_segment_init_sg(segment, sg_next(segment->sg),
  208. 0);
  209. iscsi_tcp_segment_map(segment, recv);
  210. BUG_ON(segment->size == 0);
  211. return 0;
  212. }
  213. /* Do we need to handle padding? */
  214. pad = iscsi_padding(segment->total_copied);
  215. if (pad != 0) {
  216. debug_tcp("consume %d pad bytes\n", pad);
  217. segment->total_size += pad;
  218. segment->size = pad;
  219. segment->data = padbuf;
  220. return 0;
  221. }
  222. /*
  223. * Set us up for transferring the data digest. hdr digest
  224. * is completely handled in hdr done function.
  225. */
  226. if (segment->hash) {
  227. crypto_hash_final(segment->hash, segment->digest);
  228. iscsi_tcp_segment_splice_digest(segment,
  229. recv ? segment->recv_digest : segment->digest);
  230. return 0;
  231. }
  232. return 1;
  233. }
  234. /**
  235. * iscsi_tcp_xmit_segment - transmit segment
  236. * @tcp_conn: the iSCSI TCP connection
  237. * @segment: the buffer to transmnit
  238. *
  239. * This function transmits as much of the buffer as
  240. * the network layer will accept, and returns the number of
  241. * bytes transmitted.
  242. *
  243. * If CRC hashing is enabled, the function will compute the
  244. * hash as it goes. When the entire segment has been transmitted,
  245. * it will retrieve the hash value and send it as well.
  246. */
  247. static int
  248. iscsi_tcp_xmit_segment(struct iscsi_tcp_conn *tcp_conn,
  249. struct iscsi_segment *segment)
  250. {
  251. struct socket *sk = tcp_conn->sock;
  252. unsigned int copied = 0;
  253. int r = 0;
  254. while (!iscsi_tcp_segment_done(segment, 0, r)) {
  255. struct scatterlist *sg;
  256. unsigned int offset, copy;
  257. int flags = 0;
  258. r = 0;
  259. offset = segment->copied;
  260. copy = segment->size - offset;
  261. if (segment->total_copied + segment->size < segment->total_size)
  262. flags |= MSG_MORE;
  263. /* Use sendpage if we can; else fall back to sendmsg */
  264. if (!segment->data) {
  265. sg = segment->sg;
  266. offset += segment->sg_offset + sg->offset;
  267. r = tcp_conn->sendpage(sk, sg_page(sg), offset, copy,
  268. flags);
  269. } else {
  270. struct msghdr msg = { .msg_flags = flags };
  271. struct kvec iov = {
  272. .iov_base = segment->data + offset,
  273. .iov_len = copy
  274. };
  275. r = kernel_sendmsg(sk, &msg, &iov, 1, copy);
  276. }
  277. if (r < 0) {
  278. iscsi_tcp_segment_unmap(segment);
  279. if (copied || r == -EAGAIN)
  280. break;
  281. return r;
  282. }
  283. copied += r;
  284. }
  285. return copied;
  286. }
  287. /**
  288. * iscsi_tcp_segment_recv - copy data to segment
  289. * @tcp_conn: the iSCSI TCP connection
  290. * @segment: the buffer to copy to
  291. * @ptr: data pointer
  292. * @len: amount of data available
  293. *
  294. * This function copies up to @len bytes to the
  295. * given buffer, and returns the number of bytes
  296. * consumed, which can actually be less than @len.
  297. *
  298. * If hash digest is enabled, the function will update the
  299. * hash while copying.
  300. * Combining these two operations doesn't buy us a lot (yet),
  301. * but in the future we could implement combined copy+crc,
  302. * just way we do for network layer checksums.
  303. */
  304. static int
  305. iscsi_tcp_segment_recv(struct iscsi_tcp_conn *tcp_conn,
  306. struct iscsi_segment *segment, const void *ptr,
  307. unsigned int len)
  308. {
  309. unsigned int copy = 0, copied = 0;
  310. while (!iscsi_tcp_segment_done(segment, 1, copy)) {
  311. if (copied == len) {
  312. debug_tcp("iscsi_tcp_segment_recv copied %d bytes\n",
  313. len);
  314. break;
  315. }
  316. copy = min(len - copied, segment->size - segment->copied);
  317. debug_tcp("iscsi_tcp_segment_recv copying %d\n", copy);
  318. memcpy(segment->data + segment->copied, ptr + copied, copy);
  319. copied += copy;
  320. }
  321. return copied;
  322. }
  323. static inline void
  324. iscsi_tcp_dgst_header(struct hash_desc *hash, const void *hdr, size_t hdrlen,
  325. unsigned char digest[ISCSI_DIGEST_SIZE])
  326. {
  327. struct scatterlist sg;
  328. sg_init_one(&sg, hdr, hdrlen);
  329. crypto_hash_digest(hash, &sg, hdrlen, digest);
  330. }
  331. static inline int
  332. iscsi_tcp_dgst_verify(struct iscsi_tcp_conn *tcp_conn,
  333. struct iscsi_segment *segment)
  334. {
  335. if (!segment->digest_len)
  336. return 1;
  337. if (memcmp(segment->recv_digest, segment->digest,
  338. segment->digest_len)) {
  339. debug_scsi("digest mismatch\n");
  340. return 0;
  341. }
  342. return 1;
  343. }
  344. /*
  345. * Helper function to set up segment buffer
  346. */
  347. static inline void
  348. __iscsi_segment_init(struct iscsi_segment *segment, size_t size,
  349. iscsi_segment_done_fn_t *done, struct hash_desc *hash)
  350. {
  351. memset(segment, 0, sizeof(*segment));
  352. segment->total_size = size;
  353. segment->done = done;
  354. if (hash) {
  355. segment->hash = hash;
  356. crypto_hash_init(hash);
  357. }
  358. }
  359. static inline void
  360. iscsi_segment_init_linear(struct iscsi_segment *segment, void *data,
  361. size_t size, iscsi_segment_done_fn_t *done,
  362. struct hash_desc *hash)
  363. {
  364. __iscsi_segment_init(segment, size, done, hash);
  365. segment->data = data;
  366. segment->size = size;
  367. }
  368. static inline int
  369. iscsi_segment_seek_sg(struct iscsi_segment *segment,
  370. struct scatterlist *sg_list, unsigned int sg_count,
  371. unsigned int offset, size_t size,
  372. iscsi_segment_done_fn_t *done, struct hash_desc *hash)
  373. {
  374. struct scatterlist *sg;
  375. unsigned int i;
  376. debug_scsi("iscsi_segment_seek_sg offset %u size %llu\n",
  377. offset, size);
  378. __iscsi_segment_init(segment, size, done, hash);
  379. for_each_sg(sg_list, sg, sg_count, i) {
  380. debug_scsi("sg %d, len %u offset %u\n", i, sg->length,
  381. sg->offset);
  382. if (offset < sg->length) {
  383. iscsi_tcp_segment_init_sg(segment, sg, offset);
  384. return 0;
  385. }
  386. offset -= sg->length;
  387. }
  388. return ISCSI_ERR_DATA_OFFSET;
  389. }
  390. /**
  391. * iscsi_tcp_hdr_recv_prep - prep segment for hdr reception
  392. * @tcp_conn: iscsi connection to prep for
  393. *
  394. * This function always passes NULL for the hash argument, because when this
  395. * function is called we do not yet know the final size of the header and want
  396. * to delay the digest processing until we know that.
  397. */
  398. static void
  399. iscsi_tcp_hdr_recv_prep(struct iscsi_tcp_conn *tcp_conn)
  400. {
  401. debug_tcp("iscsi_tcp_hdr_recv_prep(%p%s)\n", tcp_conn,
  402. tcp_conn->iscsi_conn->hdrdgst_en ? ", digest enabled" : "");
  403. iscsi_segment_init_linear(&tcp_conn->in.segment,
  404. tcp_conn->in.hdr_buf, sizeof(struct iscsi_hdr),
  405. iscsi_tcp_hdr_recv_done, NULL);
  406. }
  407. /*
  408. * Handle incoming reply to any other type of command
  409. */
  410. static int
  411. iscsi_tcp_data_recv_done(struct iscsi_tcp_conn *tcp_conn,
  412. struct iscsi_segment *segment)
  413. {
  414. struct iscsi_conn *conn = tcp_conn->iscsi_conn;
  415. int rc = 0;
  416. if (!iscsi_tcp_dgst_verify(tcp_conn, segment))
  417. return ISCSI_ERR_DATA_DGST;
  418. rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr,
  419. conn->data, tcp_conn->in.datalen);
  420. if (rc)
  421. return rc;
  422. iscsi_tcp_hdr_recv_prep(tcp_conn);
  423. return 0;
  424. }
  425. static void
  426. iscsi_tcp_data_recv_prep(struct iscsi_tcp_conn *tcp_conn)
  427. {
  428. struct iscsi_conn *conn = tcp_conn->iscsi_conn;
  429. struct hash_desc *rx_hash = NULL;
  430. if (conn->datadgst_en)
  431. rx_hash = &tcp_conn->rx_hash;
  432. iscsi_segment_init_linear(&tcp_conn->in.segment,
  433. conn->data, tcp_conn->in.datalen,
  434. iscsi_tcp_data_recv_done, rx_hash);
  435. }
  436. /*
  437. * must be called with session lock
  438. */
  439. static void
  440. iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
  441. {
  442. struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
  443. struct iscsi_r2t_info *r2t;
  444. /* flush ctask's r2t queues */
  445. while (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*))) {
  446. __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
  447. sizeof(void*));
  448. debug_scsi("iscsi_tcp_cleanup_ctask pending r2t dropped\n");
  449. }
  450. r2t = tcp_ctask->r2t;
  451. if (r2t != NULL) {
  452. __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
  453. sizeof(void*));
  454. tcp_ctask->r2t = NULL;
  455. }
  456. }
  457. /**
  458. * iscsi_data_rsp - SCSI Data-In Response processing
  459. * @conn: iscsi connection
  460. * @ctask: scsi command task
  461. **/
  462. static int
  463. iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
  464. {
  465. struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
  466. struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
  467. struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)tcp_conn->in.hdr;
  468. struct iscsi_session *session = conn->session;
  469. struct scsi_cmnd *sc = ctask->sc;
  470. int datasn = be32_to_cpu(rhdr->datasn);
  471. iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
  472. if (tcp_conn->in.datalen == 0)
  473. return 0;
  474. if (tcp_ctask->exp_datasn != datasn) {
  475. debug_tcp("%s: ctask->exp_datasn(%d) != rhdr->datasn(%d)\n",
  476. __FUNCTION__, tcp_ctask->exp_datasn, datasn);
  477. return ISCSI_ERR_DATASN;
  478. }
  479. tcp_ctask->exp_datasn++;
  480. tcp_ctask->data_offset = be32_to_cpu(rhdr->offset);
  481. if (tcp_ctask->data_offset + tcp_conn->in.datalen > scsi_bufflen(sc)) {
  482. debug_tcp("%s: data_offset(%d) + data_len(%d) > total_length_in(%d)\n",
  483. __FUNCTION__, tcp_ctask->data_offset,
  484. tcp_conn->in.datalen, scsi_bufflen(sc));
  485. return ISCSI_ERR_DATA_OFFSET;
  486. }
  487. if (rhdr->flags & ISCSI_FLAG_DATA_STATUS) {
  488. sc->result = (DID_OK << 16) | rhdr->cmd_status;
  489. conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
  490. if (rhdr->flags & (ISCSI_FLAG_DATA_UNDERFLOW |
  491. ISCSI_FLAG_DATA_OVERFLOW)) {
  492. int res_count = be32_to_cpu(rhdr->residual_count);
  493. if (res_count > 0 &&
  494. (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW ||
  495. res_count <= scsi_bufflen(sc)))
  496. scsi_set_resid(sc, res_count);
  497. else
  498. sc->result = (DID_BAD_TARGET << 16) |
  499. rhdr->cmd_status;
  500. }
  501. }
  502. conn->datain_pdus_cnt++;
  503. return 0;
  504. }
  505. /**
  506. * iscsi_solicit_data_init - initialize first Data-Out
  507. * @conn: iscsi connection
  508. * @ctask: scsi command task
  509. * @r2t: R2T info
  510. *
  511. * Notes:
  512. * Initialize first Data-Out within this R2T sequence and finds
  513. * proper data_offset within this SCSI command.
  514. *
  515. * This function is called with connection lock taken.
  516. **/
  517. static void
  518. iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
  519. struct iscsi_r2t_info *r2t)
  520. {
  521. struct iscsi_data *hdr;
  522. hdr = &r2t->dtask.hdr;
  523. memset(hdr, 0, sizeof(struct iscsi_data));
  524. hdr->ttt = r2t->ttt;
  525. hdr->datasn = cpu_to_be32(r2t->solicit_datasn);
  526. r2t->solicit_datasn++;
  527. hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
  528. memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
  529. hdr->itt = ctask->hdr->itt;
  530. hdr->exp_statsn = r2t->exp_statsn;
  531. hdr->offset = cpu_to_be32(r2t->data_offset);
  532. if (r2t->data_length > conn->max_xmit_dlength) {
  533. hton24(hdr->dlength, conn->max_xmit_dlength);
  534. r2t->data_count = conn->max_xmit_dlength;
  535. hdr->flags = 0;
  536. } else {
  537. hton24(hdr->dlength, r2t->data_length);
  538. r2t->data_count = r2t->data_length;
  539. hdr->flags = ISCSI_FLAG_CMD_FINAL;
  540. }
  541. conn->dataout_pdus_cnt++;
  542. r2t->sent = 0;
  543. }
  544. /**
  545. * iscsi_r2t_rsp - iSCSI R2T Response processing
  546. * @conn: iscsi connection
  547. * @ctask: scsi command task
  548. **/
  549. static int
  550. iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
  551. {
  552. struct iscsi_r2t_info *r2t;
  553. struct iscsi_session *session = conn->session;
  554. struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
  555. struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
  556. struct iscsi_r2t_rsp *rhdr = (struct iscsi_r2t_rsp *)tcp_conn->in.hdr;
  557. int r2tsn = be32_to_cpu(rhdr->r2tsn);
  558. int rc;
  559. if (tcp_conn->in.datalen) {
  560. printk(KERN_ERR "iscsi_tcp: invalid R2t with datalen %d\n",
  561. tcp_conn->in.datalen);
  562. return ISCSI_ERR_DATALEN;
  563. }
  564. if (tcp_ctask->exp_datasn != r2tsn){
  565. debug_tcp("%s: ctask->exp_datasn(%d) != rhdr->r2tsn(%d)\n",
  566. __FUNCTION__, tcp_ctask->exp_datasn, r2tsn);
  567. return ISCSI_ERR_R2TSN;
  568. }
  569. /* fill-in new R2T associated with the task */
  570. spin_lock(&session->lock);
  571. iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
  572. if (!ctask->sc || session->state != ISCSI_STATE_LOGGED_IN) {
  573. printk(KERN_INFO "iscsi_tcp: dropping R2T itt %d in "
  574. "recovery...\n", ctask->itt);
  575. spin_unlock(&session->lock);
  576. return 0;
  577. }
  578. rc = __kfifo_get(tcp_ctask->r2tpool.queue, (void*)&r2t, sizeof(void*));
  579. BUG_ON(!rc);
  580. r2t->exp_statsn = rhdr->statsn;
  581. r2t->data_length = be32_to_cpu(rhdr->data_length);
  582. if (r2t->data_length == 0) {
  583. printk(KERN_ERR "iscsi_tcp: invalid R2T with zero data len\n");
  584. __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
  585. sizeof(void*));
  586. spin_unlock(&session->lock);
  587. return ISCSI_ERR_DATALEN;
  588. }
  589. if (r2t->data_length > session->max_burst)
  590. debug_scsi("invalid R2T with data len %u and max burst %u."
  591. "Attempting to execute request.\n",
  592. r2t->data_length, session->max_burst);
  593. r2t->data_offset = be32_to_cpu(rhdr->data_offset);
  594. if (r2t->data_offset + r2t->data_length > scsi_bufflen(ctask->sc)) {
  595. printk(KERN_ERR "iscsi_tcp: invalid R2T with data len %u at "
  596. "offset %u and total length %d\n", r2t->data_length,
  597. r2t->data_offset, scsi_bufflen(ctask->sc));
  598. __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
  599. sizeof(void*));
  600. spin_unlock(&session->lock);
  601. return ISCSI_ERR_DATALEN;
  602. }
  603. r2t->ttt = rhdr->ttt; /* no flip */
  604. r2t->solicit_datasn = 0;
  605. iscsi_solicit_data_init(conn, ctask, r2t);
  606. tcp_ctask->exp_datasn = r2tsn + 1;
  607. __kfifo_put(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*));
  608. conn->r2t_pdus_cnt++;
  609. iscsi_requeue_ctask(ctask);
  610. spin_unlock(&session->lock);
  611. return 0;
  612. }
  613. /*
  614. * Handle incoming reply to DataIn command
  615. */
  616. static int
  617. iscsi_tcp_process_data_in(struct iscsi_tcp_conn *tcp_conn,
  618. struct iscsi_segment *segment)
  619. {
  620. struct iscsi_conn *conn = tcp_conn->iscsi_conn;
  621. struct iscsi_hdr *hdr = tcp_conn->in.hdr;
  622. int rc;
  623. if (!iscsi_tcp_dgst_verify(tcp_conn, segment))
  624. return ISCSI_ERR_DATA_DGST;
  625. /* check for non-exceptional status */
  626. if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
  627. rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr, NULL, 0);
  628. if (rc)
  629. return rc;
  630. }
  631. iscsi_tcp_hdr_recv_prep(tcp_conn);
  632. return 0;
  633. }
  634. /**
  635. * iscsi_tcp_hdr_dissect - process PDU header
  636. * @conn: iSCSI connection
  637. * @hdr: PDU header
  638. *
  639. * This function analyzes the header of the PDU received,
  640. * and performs several sanity checks. If the PDU is accompanied
  641. * by data, the receive buffer is set up to copy the incoming data
  642. * to the correct location.
  643. */
  644. static int
  645. iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
  646. {
  647. int rc = 0, opcode, ahslen;
  648. struct iscsi_session *session = conn->session;
  649. struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
  650. struct iscsi_cmd_task *ctask;
  651. uint32_t itt;
  652. /* verify PDU length */
  653. tcp_conn->in.datalen = ntoh24(hdr->dlength);
  654. if (tcp_conn->in.datalen > conn->max_recv_dlength) {
  655. printk(KERN_ERR "iscsi_tcp: datalen %d > %d\n",
  656. tcp_conn->in.datalen, conn->max_recv_dlength);
  657. return ISCSI_ERR_DATALEN;
  658. }
  659. /* Additional header segments. So far, we don't
  660. * process additional headers.
  661. */
  662. ahslen = hdr->hlength << 2;
  663. opcode = hdr->opcode & ISCSI_OPCODE_MASK;
  664. /* verify itt (itt encoding: age+cid+itt) */
  665. rc = iscsi_verify_itt(conn, hdr, &itt);
  666. if (rc)
  667. return rc;
  668. debug_tcp("opcode 0x%x ahslen %d datalen %d\n",
  669. opcode, ahslen, tcp_conn->in.datalen);
  670. switch(opcode) {
  671. case ISCSI_OP_SCSI_DATA_IN:
  672. ctask = session->cmds[itt];
  673. rc = iscsi_data_rsp(conn, ctask);
  674. if (rc)
  675. return rc;
  676. if (tcp_conn->in.datalen) {
  677. struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
  678. struct hash_desc *rx_hash = NULL;
  679. /*
  680. * Setup copy of Data-In into the Scsi_Cmnd
  681. * Scatterlist case:
  682. * We set up the iscsi_segment to point to the next
  683. * scatterlist entry to copy to. As we go along,
  684. * we move on to the next scatterlist entry and
  685. * update the digest per-entry.
  686. */
  687. if (conn->datadgst_en)
  688. rx_hash = &tcp_conn->rx_hash;
  689. debug_tcp("iscsi_tcp_begin_data_in(%p, offset=%d, "
  690. "datalen=%d)\n", tcp_conn,
  691. tcp_ctask->data_offset,
  692. tcp_conn->in.datalen);
  693. return iscsi_segment_seek_sg(&tcp_conn->in.segment,
  694. scsi_sglist(ctask->sc),
  695. scsi_sg_count(ctask->sc),
  696. tcp_ctask->data_offset,
  697. tcp_conn->in.datalen,
  698. iscsi_tcp_process_data_in,
  699. rx_hash);
  700. }
  701. /* fall through */
  702. case ISCSI_OP_SCSI_CMD_RSP:
  703. if (tcp_conn->in.datalen) {
  704. iscsi_tcp_data_recv_prep(tcp_conn);
  705. return 0;
  706. }
  707. rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
  708. break;
  709. case ISCSI_OP_R2T:
  710. ctask = session->cmds[itt];
  711. if (ahslen)
  712. rc = ISCSI_ERR_AHSLEN;
  713. else if (ctask->sc->sc_data_direction == DMA_TO_DEVICE)
  714. rc = iscsi_r2t_rsp(conn, ctask);
  715. else
  716. rc = ISCSI_ERR_PROTO;
  717. break;
  718. case ISCSI_OP_LOGIN_RSP:
  719. case ISCSI_OP_TEXT_RSP:
  720. case ISCSI_OP_REJECT:
  721. case ISCSI_OP_ASYNC_EVENT:
  722. /*
  723. * It is possible that we could get a PDU with a buffer larger
  724. * than 8K, but there are no targets that currently do this.
  725. * For now we fail until we find a vendor that needs it
  726. */
  727. if (ISCSI_DEF_MAX_RECV_SEG_LEN < tcp_conn->in.datalen) {
  728. printk(KERN_ERR "iscsi_tcp: received buffer of len %u "
  729. "but conn buffer is only %u (opcode %0x)\n",
  730. tcp_conn->in.datalen,
  731. ISCSI_DEF_MAX_RECV_SEG_LEN, opcode);
  732. rc = ISCSI_ERR_PROTO;
  733. break;
  734. }
  735. /* If there's data coming in with the response,
  736. * receive it to the connection's buffer.
  737. */
  738. if (tcp_conn->in.datalen) {
  739. iscsi_tcp_data_recv_prep(tcp_conn);
  740. return 0;
  741. }
  742. /* fall through */
  743. case ISCSI_OP_LOGOUT_RSP:
  744. case ISCSI_OP_NOOP_IN:
  745. case ISCSI_OP_SCSI_TMFUNC_RSP:
  746. rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
  747. break;
  748. default:
  749. rc = ISCSI_ERR_BAD_OPCODE;
  750. break;
  751. }
  752. if (rc == 0) {
  753. /* Anything that comes with data should have
  754. * been handled above. */
  755. if (tcp_conn->in.datalen)
  756. return ISCSI_ERR_PROTO;
  757. iscsi_tcp_hdr_recv_prep(tcp_conn);
  758. }
  759. return rc;
  760. }
  761. /**
  762. * iscsi_tcp_hdr_recv_done - process PDU header
  763. *
  764. * This is the callback invoked when the PDU header has
  765. * been received. If the header is followed by additional
  766. * header segments, we go back for more data.
  767. */
  768. static int
  769. iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn,
  770. struct iscsi_segment *segment)
  771. {
  772. struct iscsi_conn *conn = tcp_conn->iscsi_conn;
  773. struct iscsi_hdr *hdr;
  774. /* Check if there are additional header segments
  775. * *prior* to computing the digest, because we
  776. * may need to go back to the caller for more.
  777. */
  778. hdr = (struct iscsi_hdr *) tcp_conn->in.hdr_buf;
  779. if (segment->copied == sizeof(struct iscsi_hdr) && hdr->hlength) {
  780. /* Bump the header length - the caller will
  781. * just loop around and get the AHS for us, and
  782. * call again. */
  783. unsigned int ahslen = hdr->hlength << 2;
  784. /* Make sure we don't overflow */
  785. if (sizeof(*hdr) + ahslen > sizeof(tcp_conn->in.hdr_buf))
  786. return ISCSI_ERR_AHSLEN;
  787. segment->total_size += ahslen;
  788. segment->size += ahslen;
  789. return 0;
  790. }
  791. /* We're done processing the header. See if we're doing
  792. * header digests; if so, set up the recv_digest buffer
  793. * and go back for more. */
  794. if (conn->hdrdgst_en) {
  795. if (segment->digest_len == 0) {
  796. iscsi_tcp_segment_splice_digest(segment,
  797. segment->recv_digest);
  798. return 0;
  799. }
  800. iscsi_tcp_dgst_header(&tcp_conn->rx_hash, hdr,
  801. segment->total_copied - ISCSI_DIGEST_SIZE,
  802. segment->digest);
  803. if (!iscsi_tcp_dgst_verify(tcp_conn, segment))
  804. return ISCSI_ERR_HDR_DGST;
  805. }
  806. tcp_conn->in.hdr = hdr;
  807. return iscsi_tcp_hdr_dissect(conn, hdr);
  808. }
  809. /**
  810. * iscsi_tcp_recv - TCP receive in sendfile fashion
  811. * @rd_desc: read descriptor
  812. * @skb: socket buffer
  813. * @offset: offset in skb
  814. * @len: skb->len - offset
  815. **/
  816. static int
  817. iscsi_tcp_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
  818. unsigned int offset, size_t len)
  819. {
  820. struct iscsi_conn *conn = rd_desc->arg.data;
  821. struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
  822. struct iscsi_segment *segment = &tcp_conn->in.segment;
  823. struct skb_seq_state seq;
  824. unsigned int consumed = 0;
  825. int rc = 0;
  826. debug_tcp("in %d bytes\n", skb->len - offset);
  827. if (unlikely(conn->suspend_rx)) {
  828. debug_tcp("conn %d Rx suspended!\n", conn->id);
  829. return 0;
  830. }
  831. skb_prepare_seq_read(skb, offset, skb->len, &seq);
  832. while (1) {
  833. unsigned int avail;
  834. const u8 *ptr;
  835. avail = skb_seq_read(consumed, &ptr, &seq);
  836. if (avail == 0) {
  837. debug_tcp("no more data avail. Consumed %d\n",
  838. consumed);
  839. break;
  840. }
  841. BUG_ON(segment->copied >= segment->size);
  842. debug_tcp("skb %p ptr=%p avail=%u\n", skb, ptr, avail);
  843. rc = iscsi_tcp_segment_recv(tcp_conn, segment, ptr, avail);
  844. BUG_ON(rc == 0);
  845. consumed += rc;
  846. if (segment->total_copied >= segment->total_size) {
  847. debug_tcp("segment done\n");
  848. rc = segment->done(tcp_conn, segment);
  849. if (rc != 0) {
  850. skb_abort_seq_read(&seq);
  851. goto error;
  852. }
  853. /* The done() functions sets up the
  854. * next segment. */
  855. }
  856. }
  857. skb_abort_seq_read(&seq);
  858. conn->rxdata_octets += consumed;
  859. return consumed;
  860. error:
  861. debug_tcp("Error receiving PDU, errno=%d\n", rc);
  862. iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
  863. return 0;
  864. }
  865. static void
  866. iscsi_tcp_data_ready(struct sock *sk, int flag)
  867. {
  868. struct iscsi_conn *conn = sk->sk_user_data;
  869. struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
  870. read_descriptor_t rd_desc;
  871. read_lock(&sk->sk_callback_lock);
  872. /*
  873. * Use rd_desc to pass 'conn' to iscsi_tcp_recv.
  874. * We set count to 1 because we want the network layer to
  875. * hand us all the skbs that are available. iscsi_tcp_recv
  876. * handled pdus that cross buffers or pdus that still need data.
  877. */
  878. rd_desc.arg.data = conn;
  879. rd_desc.count = 1;
  880. tcp_read_sock(sk, &rd_desc, iscsi_tcp_recv);
  881. read_unlock(&sk->sk_callback_lock);
  882. /* If we had to (atomically) map a highmem page,
  883. * unmap it now. */
  884. iscsi_tcp_segment_unmap(&tcp_conn->in.segment);
  885. }
  886. static void
  887. iscsi_tcp_state_change(struct sock *sk)
  888. {
  889. struct iscsi_tcp_conn *tcp_conn;
  890. struct iscsi_conn *conn;
  891. struct iscsi_session *session;
  892. void (*old_state_change)(struct sock *);
  893. read_lock(&sk->sk_callback_lock);
  894. conn = (struct iscsi_conn*)sk->sk_user_data;
  895. session = conn->session;
  896. if ((sk->sk_state == TCP_CLOSE_WAIT ||
  897. sk->sk_state == TCP_CLOSE) &&
  898. !atomic_read(&sk->sk_rmem_alloc)) {
  899. debug_tcp("iscsi_tcp_state_change: TCP_CLOSE|TCP_CLOSE_WAIT\n");
  900. iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
  901. }
  902. tcp_conn = conn->dd_data;
  903. old_state_change = tcp_conn->old_state_change;
  904. read_unlock(&sk->sk_callback_lock);
  905. old_state_change(sk);
  906. }
  907. /**
  908. * iscsi_write_space - Called when more output buffer space is available
  909. * @sk: socket space is available for
  910. **/
  911. static void
  912. iscsi_write_space(struct sock *sk)
  913. {
  914. struct iscsi_conn *conn = (struct iscsi_conn*)sk->sk_user_data;
  915. struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
  916. tcp_conn->old_write_space(sk);
  917. debug_tcp("iscsi_write_space: cid %d\n", conn->id);
  918. scsi_queue_work(conn->session->host, &conn->xmitwork);
  919. }
  920. static void
  921. iscsi_conn_set_callbacks(struct iscsi_conn *conn)
  922. {
  923. struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
  924. struct sock *sk = tcp_conn->sock->sk;
  925. /* assign new callbacks */
  926. write_lock_bh(&sk->sk_callback_lock);
  927. sk->sk_user_data = conn;
  928. tcp_conn->old_data_ready = sk->sk_data_ready;
  929. tcp_conn->old_state_change = sk->sk_state_change;
  930. tcp_conn->old_write_space = sk->sk_write_space;
  931. sk->sk_data_ready = iscsi_tcp_data_ready;
  932. sk->sk_state_change = iscsi_tcp_state_change;
  933. sk->sk_write_space = iscsi_write_space;
  934. write_unlock_bh(&sk->sk_callback_lock);
  935. }
  936. static void
  937. iscsi_conn_restore_callbacks(struct iscsi_tcp_conn *tcp_conn)
  938. {
  939. struct sock *sk = tcp_conn->sock->sk;
  940. /* restore socket callbacks, see also: iscsi_conn_set_callbacks() */
  941. write_lock_bh(&sk->sk_callback_lock);
  942. sk->sk_user_data = NULL;
  943. sk->sk_data_ready = tcp_conn->old_data_ready;
  944. sk->sk_state_change = tcp_conn->old_state_change;
  945. sk->sk_write_space = tcp_conn->old_write_space;
  946. sk->sk_no_check = 0;
  947. write_unlock_bh(&sk->sk_callback_lock);
  948. }
  949. /**
  950. * iscsi_xmit - TCP transmit
  951. **/
  952. static int
  953. iscsi_xmit(struct iscsi_conn *conn)
  954. {
  955. struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
  956. struct iscsi_segment *segment = &tcp_conn->out.segment;
  957. unsigned int consumed = 0;
  958. int rc = 0;
  959. while (1) {
  960. rc = iscsi_tcp_xmit_segment(tcp_conn, segment);
  961. if (rc < 0)
  962. goto error;
  963. if (rc == 0)
  964. break;
  965. consumed += rc;
  966. if (segment->total_copied >= segment->total_size) {
  967. if (segment->done != NULL) {
  968. rc = segment->done(tcp_conn, segment);
  969. if (rc < 0)
  970. goto error;
  971. }
  972. }
  973. }
  974. debug_tcp("xmit %d bytes\n", consumed);
  975. conn->txdata_octets += consumed;
  976. return consumed;
  977. error:
  978. /* Transmit error. We could initiate error recovery
  979. * here. */
  980. debug_tcp("Error sending PDU, errno=%d\n", rc);
  981. iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
  982. return rc;
  983. }
  984. /**
  985. * iscsi_tcp_xmit_qlen - return the number of bytes queued for xmit
  986. */
  987. static inline int
  988. iscsi_tcp_xmit_qlen(struct iscsi_conn *conn)
  989. {
  990. struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
  991. struct iscsi_segment *segment = &tcp_conn->out.segment;
  992. return segment->total_copied - segment->total_size;
  993. }
  994. static inline int
  995. iscsi_tcp_flush(struct iscsi_conn *conn)
  996. {
  997. int rc;
  998. while (iscsi_tcp_xmit_qlen(conn)) {
  999. rc = iscsi_xmit(conn);
  1000. if (rc == 0)
  1001. return -EAGAIN;
  1002. if (rc < 0)
  1003. return rc;
  1004. }
  1005. return 0;
  1006. }
  1007. /*
  1008. * This is called when we're done sending the header.
  1009. * Simply copy the data_segment to the send segment, and return.
  1010. */
  1011. static int
  1012. iscsi_tcp_send_hdr_done(struct iscsi_tcp_conn *tcp_conn,
  1013. struct iscsi_segment *segment)
  1014. {
  1015. tcp_conn->out.segment = tcp_conn->out.data_segment;
  1016. debug_tcp("Header done. Next segment size %u total_size %u\n",
  1017. tcp_conn->out.segment.size, tcp_conn->out.segment.total_size);
  1018. return 0;
  1019. }
  1020. static void
  1021. iscsi_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr, size_t hdrlen)
  1022. {
  1023. struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
  1024. debug_tcp("%s(%p%s)\n", __FUNCTION__, tcp_conn,
  1025. conn->hdrdgst_en? ", digest enabled" : "");
  1026. /* Clear the data segment - needs to be filled in by the
  1027. * caller using iscsi_tcp_send_data_prep() */
  1028. memset(&tcp_conn->out.data_segment, 0, sizeof(struct iscsi_segment));
  1029. /* If header digest is enabled, compute the CRC and
  1030. * place the digest into the same buffer. We make
  1031. * sure that both iscsi_tcp_ctask and mtask have
  1032. * sufficient room.
  1033. */
  1034. if (conn->hdrdgst_en) {
  1035. iscsi_tcp_dgst_header(&tcp_conn->tx_hash, hdr, hdrlen,
  1036. hdr + hdrlen);
  1037. hdrlen += ISCSI_DIGEST_SIZE;
  1038. }
  1039. /* Remember header pointer for later, when we need
  1040. * to decide whether there's a payload to go along
  1041. * with the header. */
  1042. tcp_conn->out.hdr = hdr;
  1043. iscsi_segment_init_linear(&tcp_conn->out.segment, hdr, hdrlen,
  1044. iscsi_tcp_send_hdr_done, NULL);
  1045. }
  1046. /*
  1047. * Prepare the send buffer for the payload data.
  1048. * Padding and checksumming will all be taken care
  1049. * of by the iscsi_segment routines.
  1050. */
  1051. static int
  1052. iscsi_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg,
  1053. unsigned int count, unsigned int offset,
  1054. unsigned int len)
  1055. {
  1056. struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
  1057. struct hash_desc *tx_hash = NULL;
  1058. unsigned int hdr_spec_len;
  1059. debug_tcp("%s(%p, offset=%d, datalen=%d%s)\n", __FUNCTION__,
  1060. tcp_conn, offset, len,
  1061. conn->datadgst_en? ", digest enabled" : "");
  1062. /* Make sure the datalen matches what the caller
  1063. said he would send. */
  1064. hdr_spec_len = ntoh24(tcp_conn->out.hdr->dlength);
  1065. WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));
  1066. if (conn->datadgst_en)
  1067. tx_hash = &tcp_conn->tx_hash;
  1068. return iscsi_segment_seek_sg(&tcp_conn->out.data_segment,
  1069. sg, count, offset, len,
  1070. NULL, tx_hash);
  1071. }
  1072. static void
  1073. iscsi_tcp_send_linear_data_prepare(struct iscsi_conn *conn, void *data,
  1074. size_t len)
  1075. {
  1076. struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
  1077. struct hash_desc *tx_hash = NULL;
  1078. unsigned int hdr_spec_len;
  1079. debug_tcp("%s(%p, datalen=%d%s)\n", __FUNCTION__, tcp_conn, len,
  1080. conn->datadgst_en? ", digest enabled" : "");
  1081. /* Make sure the datalen matches what the caller
  1082. said he would send. */
  1083. hdr_spec_len = ntoh24(tcp_conn->out.hdr->dlength);
  1084. WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));
  1085. if (conn->datadgst_en)
  1086. tx_hash = &tcp_conn->tx_hash;
  1087. iscsi_segment_init_linear(&tcp_conn->out.data_segment,
  1088. data, len, NULL, tx_hash);
  1089. }
  1090. /**
  1091. * iscsi_solicit_data_cont - initialize next Data-Out
  1092. * @conn: iscsi connection
  1093. * @ctask: scsi command task
  1094. * @r2t: R2T info
  1095. * @left: bytes left to transfer
  1096. *
  1097. * Notes:
  1098. * Initialize next Data-Out within this R2T sequence and continue
  1099. * to process next Scatter-Gather element(if any) of this SCSI command.
  1100. *
  1101. * Called under connection lock.
  1102. **/
  1103. static int
  1104. iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
  1105. struct iscsi_r2t_info *r2t)
  1106. {
  1107. struct iscsi_data *hdr;
  1108. int new_offset, left;
  1109. BUG_ON(r2t->data_length - r2t->sent < 0);
  1110. left = r2t->data_length - r2t->sent;
  1111. if (left == 0)
  1112. return 0;
  1113. hdr = &r2t->dtask.hdr;
  1114. memset(hdr, 0, sizeof(struct iscsi_data));
  1115. hdr->ttt = r2t->ttt;
  1116. hdr->datasn = cpu_to_be32(r2t->solicit_datasn);
  1117. r2t->solicit_datasn++;
  1118. hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
  1119. memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
  1120. hdr->itt = ctask->hdr->itt;
  1121. hdr->exp_statsn = r2t->exp_statsn;
  1122. new_offset = r2t->data_offset + r2t->sent;
  1123. hdr->offset = cpu_to_be32(new_offset);
  1124. if (left > conn->max_xmit_dlength) {
  1125. hton24(hdr->dlength, conn->max_xmit_dlength);
  1126. r2t->data_count = conn->max_xmit_dlength;
  1127. } else {
  1128. hton24(hdr->dlength, left);
  1129. r2t->data_count = left;
  1130. hdr->flags = ISCSI_FLAG_CMD_FINAL;
  1131. }
  1132. conn->dataout_pdus_cnt++;
  1133. return 1;
  1134. }
  1135. /**
  1136. * iscsi_tcp_ctask - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
  1137. * @conn: iscsi connection
  1138. * @ctask: scsi command task
  1139. * @sc: scsi command
  1140. **/
  1141. static int
  1142. iscsi_tcp_ctask_init(struct iscsi_cmd_task *ctask)
  1143. {
  1144. struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
  1145. struct iscsi_conn *conn = ctask->conn;
  1146. struct scsi_cmnd *sc = ctask->sc;
  1147. int err;
  1148. BUG_ON(__kfifo_len(tcp_ctask->r2tqueue));
  1149. tcp_ctask->sent = 0;
  1150. tcp_ctask->exp_datasn = 0;
  1151. /* Prepare PDU, optionally w/ immediate data */
  1152. debug_scsi("ctask deq [cid %d itt 0x%x imm %d unsol %d]\n",
  1153. conn->id, ctask->itt, ctask->imm_count,
  1154. ctask->unsol_count);
  1155. iscsi_tcp_send_hdr_prep(conn, ctask->hdr, ctask->hdr_len);
  1156. if (!ctask->imm_count)
  1157. return 0;
  1158. /* If we have immediate data, attach a payload */
  1159. err = iscsi_tcp_send_data_prep(conn, scsi_sglist(sc), scsi_sg_count(sc),
  1160. 0, ctask->imm_count);
  1161. if (err)
  1162. return err;
  1163. tcp_ctask->sent += ctask->imm_count;
  1164. ctask->imm_count = 0;
  1165. return 0;
  1166. }
  1167. /**
  1168. * iscsi_tcp_mtask_xmit - xmit management(immediate) task
  1169. * @conn: iscsi connection
  1170. * @mtask: task management task
  1171. *
  1172. * Notes:
  1173. * The function can return -EAGAIN in which case caller must
  1174. * call it again later, or recover. '0' return code means successful
  1175. * xmit.
  1176. **/
  1177. static int
  1178. iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
  1179. {
  1180. int rc;
  1181. /* Flush any pending data first. */
  1182. rc = iscsi_tcp_flush(conn);
  1183. if (rc < 0)
  1184. return rc;
  1185. if (mtask->hdr->itt == RESERVED_ITT) {
  1186. struct iscsi_session *session = conn->session;
  1187. spin_lock_bh(&session->lock);
  1188. iscsi_free_mgmt_task(conn, mtask);
  1189. spin_unlock_bh(&session->lock);
  1190. }
  1191. return 0;
  1192. }
  1193. /*
  1194. * iscsi_tcp_ctask_xmit - xmit normal PDU task
  1195. * @conn: iscsi connection
  1196. * @ctask: iscsi command task
  1197. *
  1198. * We're expected to return 0 when everything was transmitted succesfully,
  1199. * -EAGAIN if there's still data in the queue, or != 0 for any other kind
  1200. * of error.
  1201. */
  1202. static int
  1203. iscsi_tcp_ctask_xmit(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
  1204. {
  1205. struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
  1206. struct scsi_cmnd *sc = ctask->sc;
  1207. int rc = 0;
  1208. flush:
  1209. /* Flush any pending data first. */
  1210. rc = iscsi_tcp_flush(conn);
  1211. if (rc < 0)
  1212. return rc;
  1213. /* Are we done already? */
  1214. if (sc->sc_data_direction != DMA_TO_DEVICE)
  1215. return 0;
  1216. if (ctask->unsol_count != 0) {
  1217. struct iscsi_data *hdr = &tcp_ctask->unsol_dtask.hdr;
  1218. /* Prepare a header for the unsolicited PDU.
  1219. * The amount of data we want to send will be
  1220. * in ctask->data_count.
  1221. * FIXME: return the data count instead.
  1222. */
  1223. iscsi_prep_unsolicit_data_pdu(ctask, hdr);
  1224. debug_tcp("unsol dout [itt 0x%x doff %d dlen %d]\n",
  1225. ctask->itt, tcp_ctask->sent, ctask->data_count);
  1226. iscsi_tcp_send_hdr_prep(conn, hdr, sizeof(*hdr));
  1227. rc = iscsi_tcp_send_data_prep(conn, scsi_sglist(sc),
  1228. scsi_sg_count(sc),
  1229. tcp_ctask->sent,
  1230. ctask->data_count);
  1231. if (rc)
  1232. goto fail;
  1233. tcp_ctask->sent += ctask->data_count;
  1234. ctask->unsol_count -= ctask->data_count;
  1235. goto flush;
  1236. } else {
  1237. struct iscsi_session *session = conn->session;
  1238. struct iscsi_r2t_info *r2t;
  1239. /* All unsolicited PDUs sent. Check for solicited PDUs.
  1240. */
  1241. spin_lock_bh(&session->lock);
  1242. r2t = tcp_ctask->r2t;
  1243. if (r2t != NULL) {
  1244. /* Continue with this R2T? */
  1245. if (!iscsi_solicit_data_cont(conn, ctask, r2t)) {
  1246. debug_scsi(" done with r2t %p\n", r2t);
  1247. __kfifo_put(tcp_ctask->r2tpool.queue,
  1248. (void*)&r2t, sizeof(void*));
  1249. tcp_ctask->r2t = r2t = NULL;
  1250. }
  1251. }
  1252. if (r2t == NULL) {
  1253. __kfifo_get(tcp_ctask->r2tqueue, (void*)&tcp_ctask->r2t,
  1254. sizeof(void*));
  1255. r2t = tcp_ctask->r2t;
  1256. }
  1257. spin_unlock_bh(&session->lock);
  1258. /* Waiting for more R2Ts to arrive. */
  1259. if (r2t == NULL) {
  1260. debug_tcp("no R2Ts yet\n");
  1261. return 0;
  1262. }
  1263. debug_scsi("sol dout %p [dsn %d itt 0x%x doff %d dlen %d]\n",
  1264. r2t, r2t->solicit_datasn - 1, ctask->itt,
  1265. r2t->data_offset + r2t->sent, r2t->data_count);
  1266. iscsi_tcp_send_hdr_prep(conn, &r2t->dtask.hdr,
  1267. sizeof(struct iscsi_hdr));
  1268. rc = iscsi_tcp_send_data_prep(conn, scsi_sglist(sc),
  1269. scsi_sg_count(sc),
  1270. r2t->data_offset + r2t->sent,
  1271. r2t->data_count);
  1272. if (rc)
  1273. goto fail;
  1274. tcp_ctask->sent += r2t->data_count;
  1275. r2t->sent += r2t->data_count;
  1276. goto flush;
  1277. }
  1278. return 0;
  1279. fail:
  1280. iscsi_conn_failure(conn, rc);
  1281. return -EIO;
  1282. }
  1283. static struct iscsi_cls_conn *
  1284. iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
  1285. {
  1286. struct iscsi_conn *conn;
  1287. struct iscsi_cls_conn *cls_conn;
  1288. struct iscsi_tcp_conn *tcp_conn;
  1289. cls_conn = iscsi_conn_setup(cls_session, conn_idx);
  1290. if (!cls_conn)
  1291. return NULL;
  1292. conn = cls_conn->dd_data;
  1293. /*
  1294. * due to strange issues with iser these are not set
  1295. * in iscsi_conn_setup
  1296. */
  1297. conn->max_recv_dlength = ISCSI_DEF_MAX_RECV_SEG_LEN;
  1298. tcp_conn = kzalloc(sizeof(*tcp_conn), GFP_KERNEL);
  1299. if (!tcp_conn)
  1300. goto tcp_conn_alloc_fail;
  1301. conn->dd_data = tcp_conn;
  1302. tcp_conn->iscsi_conn = conn;
  1303. tcp_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0,
  1304. CRYPTO_ALG_ASYNC);
  1305. tcp_conn->tx_hash.flags = 0;
  1306. if (IS_ERR(tcp_conn->tx_hash.tfm)) {
  1307. printk(KERN_ERR "Could not create connection due to crc32c "
  1308. "loading error %ld. Make sure the crc32c module is "
  1309. "built as a module or into the kernel\n",
  1310. PTR_ERR(tcp_conn->tx_hash.tfm));
  1311. goto free_tcp_conn;
  1312. }
  1313. tcp_conn->rx_hash.tfm = crypto_alloc_hash("crc32c", 0,
  1314. CRYPTO_ALG_ASYNC);
  1315. tcp_conn->rx_hash.flags = 0;
  1316. if (IS_ERR(tcp_conn->rx_hash.tfm)) {
  1317. printk(KERN_ERR "Could not create connection due to crc32c "
  1318. "loading error %ld. Make sure the crc32c module is "
  1319. "built as a module or into the kernel\n",
  1320. PTR_ERR(tcp_conn->rx_hash.tfm));
  1321. goto free_tx_tfm;
  1322. }
  1323. return cls_conn;
  1324. free_tx_tfm:
  1325. crypto_free_hash(tcp_conn->tx_hash.tfm);
  1326. free_tcp_conn:
  1327. kfree(tcp_conn);
  1328. tcp_conn_alloc_fail:
  1329. iscsi_conn_teardown(cls_conn);
  1330. return NULL;
  1331. }
  1332. static void
  1333. iscsi_tcp_release_conn(struct iscsi_conn *conn)
  1334. {
  1335. struct iscsi_session *session = conn->session;
  1336. struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
  1337. struct socket *sock = tcp_conn->sock;
  1338. if (!sock)
  1339. return;
  1340. sock_hold(sock->sk);
  1341. iscsi_conn_restore_callbacks(tcp_conn);
  1342. sock_put(sock->sk);
  1343. spin_lock_bh(&session->lock);
  1344. tcp_conn->sock = NULL;
  1345. conn->recv_lock = NULL;
  1346. spin_unlock_bh(&session->lock);
  1347. sockfd_put(sock);
  1348. }
  1349. static void
  1350. iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
  1351. {
  1352. struct iscsi_conn *conn = cls_conn->dd_data;
  1353. struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
  1354. iscsi_tcp_release_conn(conn);
  1355. iscsi_conn_teardown(cls_conn);
  1356. if (tcp_conn->tx_hash.tfm)
  1357. crypto_free_hash(tcp_conn->tx_hash.tfm);
  1358. if (tcp_conn->rx_hash.tfm)
  1359. crypto_free_hash(tcp_conn->rx_hash.tfm);
  1360. kfree(tcp_conn);
  1361. }
  1362. static void
  1363. iscsi_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
  1364. {
  1365. struct iscsi_conn *conn = cls_conn->dd_data;
  1366. iscsi_conn_stop(cls_conn, flag);
  1367. iscsi_tcp_release_conn(conn);
  1368. }
  1369. static int iscsi_tcp_get_addr(struct iscsi_conn *conn, struct socket *sock,
  1370. char *buf, int *port,
  1371. int (*getname)(struct socket *, struct sockaddr *,
  1372. int *addrlen))
  1373. {
  1374. struct sockaddr_storage *addr;
  1375. struct sockaddr_in6 *sin6;
  1376. struct sockaddr_in *sin;
  1377. int rc = 0, len;
  1378. addr = kmalloc(sizeof(*addr), GFP_KERNEL);
  1379. if (!addr)
  1380. return -ENOMEM;
  1381. if (getname(sock, (struct sockaddr *) addr, &len)) {
  1382. rc = -ENODEV;
  1383. goto free_addr;
  1384. }
  1385. switch (addr->ss_family) {
  1386. case AF_INET:
  1387. sin = (struct sockaddr_in *)addr;
  1388. spin_lock_bh(&conn->session->lock);
  1389. sprintf(buf, NIPQUAD_FMT, NIPQUAD(sin->sin_addr.s_addr));
  1390. *port = be16_to_cpu(sin->sin_port);
  1391. spin_unlock_bh(&conn->session->lock);
  1392. break;
  1393. case AF_INET6:
  1394. sin6 = (struct sockaddr_in6 *)addr;
  1395. spin_lock_bh(&conn->session->lock);
  1396. sprintf(buf, NIP6_FMT, NIP6(sin6->sin6_addr));
  1397. *port = be16_to_cpu(sin6->sin6_port);
  1398. spin_unlock_bh(&conn->session->lock);
  1399. break;
  1400. }
  1401. free_addr:
  1402. kfree(addr);
  1403. return rc;
  1404. }
  1405. static int
  1406. iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
  1407. struct iscsi_cls_conn *cls_conn, uint64_t transport_eph,
  1408. int is_leading)
  1409. {
  1410. struct iscsi_conn *conn = cls_conn->dd_data;
  1411. struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
  1412. struct sock *sk;
  1413. struct socket *sock;
  1414. int err;
  1415. /* lookup for existing socket */
  1416. sock = sockfd_lookup((int)transport_eph, &err);
  1417. if (!sock) {
  1418. printk(KERN_ERR "iscsi_tcp: sockfd_lookup failed %d\n", err);
  1419. return -EEXIST;
  1420. }
  1421. /*
  1422. * copy these values now because if we drop the session
  1423. * userspace may still want to query the values since we will
  1424. * be using them for the reconnect
  1425. */
  1426. err = iscsi_tcp_get_addr(conn, sock, conn->portal_address,
  1427. &conn->portal_port, kernel_getpeername);
  1428. if (err)
  1429. goto free_socket;
  1430. err = iscsi_tcp_get_addr(conn, sock, conn->local_address,
  1431. &conn->local_port, kernel_getsockname);
  1432. if (err)
  1433. goto free_socket;
  1434. err = iscsi_conn_bind(cls_session, cls_conn, is_leading);
  1435. if (err)
  1436. goto free_socket;
  1437. /* bind iSCSI connection and socket */
  1438. tcp_conn->sock = sock;
  1439. /* setup Socket parameters */
  1440. sk = sock->sk;
  1441. sk->sk_reuse = 1;
  1442. sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */
  1443. sk->sk_allocation = GFP_ATOMIC;
  1444. /* FIXME: disable Nagle's algorithm */
  1445. /*
  1446. * Intercept TCP callbacks for sendfile like receive
  1447. * processing.
  1448. */
  1449. conn->recv_lock = &sk->sk_callback_lock;
  1450. iscsi_conn_set_callbacks(conn);
  1451. tcp_conn->sendpage = tcp_conn->sock->ops->sendpage;
  1452. /*
  1453. * set receive state machine into initial state
  1454. */
  1455. iscsi_tcp_hdr_recv_prep(tcp_conn);
  1456. return 0;
  1457. free_socket:
  1458. sockfd_put(sock);
  1459. return err;
  1460. }
  1461. /* called with host lock */
  1462. static void
  1463. iscsi_tcp_mtask_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
  1464. {
  1465. debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id, mtask->itt);
  1466. /* Prepare PDU, optionally w/ immediate data */
  1467. iscsi_tcp_send_hdr_prep(conn, mtask->hdr, sizeof(*mtask->hdr));
  1468. /* If we have immediate data, attach a payload */
  1469. if (mtask->data_count)
  1470. iscsi_tcp_send_linear_data_prepare(conn, mtask->data,
  1471. mtask->data_count);
  1472. }
  1473. static int
  1474. iscsi_r2tpool_alloc(struct iscsi_session *session)
  1475. {
  1476. int i;
  1477. int cmd_i;
  1478. /*
  1479. * initialize per-task: R2T pool and xmit queue
  1480. */
  1481. for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
  1482. struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
  1483. struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
  1484. /*
  1485. * pre-allocated x4 as much r2ts to handle race when
  1486. * target acks DataOut faster than we data_xmit() queues
  1487. * could replenish r2tqueue.
  1488. */
  1489. /* R2T pool */
  1490. if (iscsi_pool_init(&tcp_ctask->r2tpool, session->max_r2t * 4, NULL,
  1491. sizeof(struct iscsi_r2t_info))) {
  1492. goto r2t_alloc_fail;
  1493. }
  1494. /* R2T xmit queue */
  1495. tcp_ctask->r2tqueue = kfifo_alloc(
  1496. session->max_r2t * 4 * sizeof(void*), GFP_KERNEL, NULL);
  1497. if (tcp_ctask->r2tqueue == ERR_PTR(-ENOMEM)) {
  1498. iscsi_pool_free(&tcp_ctask->r2tpool);
  1499. goto r2t_alloc_fail;
  1500. }
  1501. }
  1502. return 0;
  1503. r2t_alloc_fail:
  1504. for (i = 0; i < cmd_i; i++) {
  1505. struct iscsi_cmd_task *ctask = session->cmds[i];
  1506. struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
  1507. kfifo_free(tcp_ctask->r2tqueue);
  1508. iscsi_pool_free(&tcp_ctask->r2tpool);
  1509. }
  1510. return -ENOMEM;
  1511. }
  1512. static void
  1513. iscsi_r2tpool_free(struct iscsi_session *session)
  1514. {
  1515. int i;
  1516. for (i = 0; i < session->cmds_max; i++) {
  1517. struct iscsi_cmd_task *ctask = session->cmds[i];
  1518. struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
  1519. kfifo_free(tcp_ctask->r2tqueue);
  1520. iscsi_pool_free(&tcp_ctask->r2tpool);
  1521. }
  1522. }
  1523. static int
  1524. iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param,
  1525. char *buf, int buflen)
  1526. {
  1527. struct iscsi_conn *conn = cls_conn->dd_data;
  1528. struct iscsi_session *session = conn->session;
  1529. struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
  1530. int value;
  1531. switch(param) {
  1532. case ISCSI_PARAM_HDRDGST_EN:
  1533. iscsi_set_param(cls_conn, param, buf, buflen);
  1534. break;
  1535. case ISCSI_PARAM_DATADGST_EN:
  1536. iscsi_set_param(cls_conn, param, buf, buflen);
  1537. tcp_conn->sendpage = conn->datadgst_en ?
  1538. sock_no_sendpage : tcp_conn->sock->ops->sendpage;
  1539. break;
  1540. case ISCSI_PARAM_MAX_R2T:
  1541. sscanf(buf, "%d", &value);
  1542. if (session->max_r2t == roundup_pow_of_two(value))
  1543. break;
  1544. iscsi_r2tpool_free(session);
  1545. iscsi_set_param(cls_conn, param, buf, buflen);
  1546. if (session->max_r2t & (session->max_r2t - 1))
  1547. session->max_r2t = roundup_pow_of_two(session->max_r2t);
  1548. if (iscsi_r2tpool_alloc(session))
  1549. return -ENOMEM;
  1550. break;
  1551. default:
  1552. return iscsi_set_param(cls_conn, param, buf, buflen);
  1553. }
  1554. return 0;
  1555. }
  1556. static int
  1557. iscsi_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
  1558. enum iscsi_param param, char *buf)
  1559. {
  1560. struct iscsi_conn *conn = cls_conn->dd_data;
  1561. int len;
  1562. switch(param) {
  1563. case ISCSI_PARAM_CONN_PORT:
  1564. spin_lock_bh(&conn->session->lock);
  1565. len = sprintf(buf, "%hu\n", conn->portal_port);
  1566. spin_unlock_bh(&conn->session->lock);
  1567. break;
  1568. case ISCSI_PARAM_CONN_ADDRESS:
  1569. spin_lock_bh(&conn->session->lock);
  1570. len = sprintf(buf, "%s\n", conn->portal_address);
  1571. spin_unlock_bh(&conn->session->lock);
  1572. break;
  1573. default:
  1574. return iscsi_conn_get_param(cls_conn, param, buf);
  1575. }
  1576. return len;
  1577. }
  1578. static int
  1579. iscsi_tcp_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
  1580. char *buf)
  1581. {
  1582. struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
  1583. int len;
  1584. switch (param) {
  1585. case ISCSI_HOST_PARAM_IPADDRESS:
  1586. spin_lock_bh(&session->lock);
  1587. if (!session->leadconn)
  1588. len = -ENODEV;
  1589. else
  1590. len = sprintf(buf, "%s\n",
  1591. session->leadconn->local_address);
  1592. spin_unlock_bh(&session->lock);
  1593. break;
  1594. default:
  1595. return iscsi_host_get_param(shost, param, buf);
  1596. }
  1597. return len;
  1598. }
  1599. static void
  1600. iscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats)
  1601. {
  1602. struct iscsi_conn *conn = cls_conn->dd_data;
  1603. struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
  1604. stats->txdata_octets = conn->txdata_octets;
  1605. stats->rxdata_octets = conn->rxdata_octets;
  1606. stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
  1607. stats->dataout_pdus = conn->dataout_pdus_cnt;
  1608. stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
  1609. stats->datain_pdus = conn->datain_pdus_cnt;
  1610. stats->r2t_pdus = conn->r2t_pdus_cnt;
  1611. stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
  1612. stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
  1613. stats->custom_length = 3;
  1614. strcpy(stats->custom[0].desc, "tx_sendpage_failures");
  1615. stats->custom[0].value = tcp_conn->sendpage_failures_cnt;
  1616. strcpy(stats->custom[1].desc, "rx_discontiguous_hdr");
  1617. stats->custom[1].value = tcp_conn->discontiguous_hdr_cnt;
  1618. strcpy(stats->custom[2].desc, "eh_abort_cnt");
  1619. stats->custom[2].value = conn->eh_abort_cnt;
  1620. }
  1621. static struct iscsi_cls_session *
  1622. iscsi_tcp_session_create(struct iscsi_transport *iscsit,
  1623. struct scsi_transport_template *scsit,
  1624. uint16_t cmds_max, uint16_t qdepth,
  1625. uint32_t initial_cmdsn, uint32_t *hostno)
  1626. {
  1627. struct iscsi_cls_session *cls_session;
  1628. struct iscsi_session *session;
  1629. uint32_t hn;
  1630. int cmd_i;
  1631. cls_session = iscsi_session_setup(iscsit, scsit, cmds_max, qdepth,
  1632. sizeof(struct iscsi_tcp_cmd_task),
  1633. sizeof(struct iscsi_tcp_mgmt_task),
  1634. initial_cmdsn, &hn);
  1635. if (!cls_session)
  1636. return NULL;
  1637. *hostno = hn;
  1638. session = class_to_transport_session(cls_session);
  1639. for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
  1640. struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
  1641. struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
  1642. ctask->hdr = &tcp_ctask->hdr.cmd_hdr;
  1643. ctask->hdr_max = sizeof(tcp_ctask->hdr) - ISCSI_DIGEST_SIZE;
  1644. }
  1645. for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++) {
  1646. struct iscsi_mgmt_task *mtask = session->mgmt_cmds[cmd_i];
  1647. struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data;
  1648. mtask->hdr = (struct iscsi_hdr *) &tcp_mtask->hdr;
  1649. }
  1650. if (iscsi_r2tpool_alloc(class_to_transport_session(cls_session)))
  1651. goto r2tpool_alloc_fail;
  1652. return cls_session;
  1653. r2tpool_alloc_fail:
  1654. iscsi_session_teardown(cls_session);
  1655. return NULL;
  1656. }
  1657. static void iscsi_tcp_session_destroy(struct iscsi_cls_session *cls_session)
  1658. {
  1659. iscsi_r2tpool_free(class_to_transport_session(cls_session));
  1660. iscsi_session_teardown(cls_session);
  1661. }
  1662. static int iscsi_tcp_slave_configure(struct scsi_device *sdev)
  1663. {
  1664. blk_queue_bounce_limit(sdev->request_queue, BLK_BOUNCE_ANY);
  1665. blk_queue_dma_alignment(sdev->request_queue, 0);
  1666. return 0;
  1667. }
  1668. static struct scsi_host_template iscsi_sht = {
  1669. .module = THIS_MODULE,
  1670. .name = "iSCSI Initiator over TCP/IP",
  1671. .queuecommand = iscsi_queuecommand,
  1672. .change_queue_depth = iscsi_change_queue_depth,
  1673. .can_queue = ISCSI_DEF_XMIT_CMDS_MAX - 1,
  1674. .sg_tablesize = 4096,
  1675. .max_sectors = 0xFFFF,
  1676. .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
  1677. .eh_abort_handler = iscsi_eh_abort,
  1678. .eh_device_reset_handler= iscsi_eh_device_reset,
  1679. .eh_host_reset_handler = iscsi_eh_host_reset,
  1680. .use_clustering = DISABLE_CLUSTERING,
  1681. .use_sg_chaining = ENABLE_SG_CHAINING,
  1682. .slave_configure = iscsi_tcp_slave_configure,
  1683. .proc_name = "iscsi_tcp",
  1684. .this_id = -1,
  1685. };
  1686. static struct iscsi_transport iscsi_tcp_transport = {
  1687. .owner = THIS_MODULE,
  1688. .name = "tcp",
  1689. .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST
  1690. | CAP_DATADGST,
  1691. .param_mask = ISCSI_MAX_RECV_DLENGTH |
  1692. ISCSI_MAX_XMIT_DLENGTH |
  1693. ISCSI_HDRDGST_EN |
  1694. ISCSI_DATADGST_EN |
  1695. ISCSI_INITIAL_R2T_EN |
  1696. ISCSI_MAX_R2T |
  1697. ISCSI_IMM_DATA_EN |
  1698. ISCSI_FIRST_BURST |
  1699. ISCSI_MAX_BURST |
  1700. ISCSI_PDU_INORDER_EN |
  1701. ISCSI_DATASEQ_INORDER_EN |
  1702. ISCSI_ERL |
  1703. ISCSI_CONN_PORT |
  1704. ISCSI_CONN_ADDRESS |
  1705. ISCSI_EXP_STATSN |
  1706. ISCSI_PERSISTENT_PORT |
  1707. ISCSI_PERSISTENT_ADDRESS |
  1708. ISCSI_TARGET_NAME | ISCSI_TPGT |
  1709. ISCSI_USERNAME | ISCSI_PASSWORD |
  1710. ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
  1711. ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
  1712. ISCSI_LU_RESET_TMO |
  1713. ISCSI_PING_TMO | ISCSI_RECV_TMO,
  1714. .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
  1715. ISCSI_HOST_INITIATOR_NAME |
  1716. ISCSI_HOST_NETDEV_NAME,
  1717. .host_template = &iscsi_sht,
  1718. .conndata_size = sizeof(struct iscsi_conn),
  1719. .max_conn = 1,
  1720. .max_cmd_len = 16,
  1721. /* session management */
  1722. .create_session = iscsi_tcp_session_create,
  1723. .destroy_session = iscsi_tcp_session_destroy,
  1724. /* connection management */
  1725. .create_conn = iscsi_tcp_conn_create,
  1726. .bind_conn = iscsi_tcp_conn_bind,
  1727. .destroy_conn = iscsi_tcp_conn_destroy,
  1728. .set_param = iscsi_conn_set_param,
  1729. .get_conn_param = iscsi_tcp_conn_get_param,
  1730. .get_session_param = iscsi_session_get_param,
  1731. .start_conn = iscsi_conn_start,
  1732. .stop_conn = iscsi_tcp_conn_stop,
  1733. /* iscsi host params */
  1734. .get_host_param = iscsi_tcp_host_get_param,
  1735. .set_host_param = iscsi_host_set_param,
  1736. /* IO */
  1737. .send_pdu = iscsi_conn_send_pdu,
  1738. .get_stats = iscsi_conn_get_stats,
  1739. .init_cmd_task = iscsi_tcp_ctask_init,
  1740. .init_mgmt_task = iscsi_tcp_mtask_init,
  1741. .xmit_cmd_task = iscsi_tcp_ctask_xmit,
  1742. .xmit_mgmt_task = iscsi_tcp_mtask_xmit,
  1743. .cleanup_cmd_task = iscsi_tcp_cleanup_ctask,
  1744. /* recovery */
  1745. .session_recovery_timedout = iscsi_session_recovery_timedout,
  1746. };
  1747. static int __init
  1748. iscsi_tcp_init(void)
  1749. {
  1750. if (iscsi_max_lun < 1) {
  1751. printk(KERN_ERR "iscsi_tcp: Invalid max_lun value of %u\n",
  1752. iscsi_max_lun);
  1753. return -EINVAL;
  1754. }
  1755. iscsi_tcp_transport.max_lun = iscsi_max_lun;
  1756. if (!iscsi_register_transport(&iscsi_tcp_transport))
  1757. return -ENODEV;
  1758. return 0;
  1759. }
  1760. static void __exit
  1761. iscsi_tcp_exit(void)
  1762. {
  1763. iscsi_unregister_transport(&iscsi_tcp_transport);
  1764. }
  1765. module_init(iscsi_tcp_init);
  1766. module_exit(iscsi_tcp_exit);