cxgb3i_pdu.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495
  1. /*
  2. * cxgb3i_pdu.c: Chelsio S3xx iSCSI driver.
  3. *
  4. * Copyright (c) 2008 Chelsio Communications, Inc.
  5. * Copyright (c) 2008 Mike Christie
  6. * Copyright (c) 2008 Red Hat, Inc. All rights reserved.
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation.
  11. *
  12. * Written by: Karen Xie (kxie@chelsio.com)
  13. */
  14. #include <linux/slab.h>
  15. #include <linux/skbuff.h>
  16. #include <linux/crypto.h>
  17. #include <scsi/scsi_cmnd.h>
  18. #include <scsi/scsi_host.h>
  19. #include "cxgb3i.h"
  20. #include "cxgb3i_pdu.h"
  21. #ifdef __DEBUG_CXGB3I_RX__
  22. #define cxgb3i_rx_debug cxgb3i_log_debug
  23. #else
  24. #define cxgb3i_rx_debug(fmt...)
  25. #endif
  26. #ifdef __DEBUG_CXGB3I_TX__
  27. #define cxgb3i_tx_debug cxgb3i_log_debug
  28. #else
  29. #define cxgb3i_tx_debug(fmt...)
  30. #endif
  31. /* always allocate rooms for AHS */
  32. #define SKB_TX_PDU_HEADER_LEN \
  33. (sizeof(struct iscsi_hdr) + ISCSI_MAX_AHS_SIZE)
  34. static unsigned int skb_extra_headroom;
  35. static struct page *pad_page;
  36. /*
  37. * pdu receive, interact with libiscsi_tcp
  38. */
  39. static inline int read_pdu_skb(struct iscsi_conn *conn, struct sk_buff *skb,
  40. unsigned int offset, int offloaded)
  41. {
  42. int status = 0;
  43. int bytes_read;
  44. bytes_read = iscsi_tcp_recv_skb(conn, skb, offset, offloaded, &status);
  45. switch (status) {
  46. case ISCSI_TCP_CONN_ERR:
  47. return -EIO;
  48. case ISCSI_TCP_SUSPENDED:
  49. /* no transfer - just have caller flush queue */
  50. return bytes_read;
  51. case ISCSI_TCP_SKB_DONE:
  52. /*
  53. * pdus should always fit in the skb and we should get
  54. * segment done notifcation.
  55. */
  56. iscsi_conn_printk(KERN_ERR, conn, "Invalid pdu or skb.");
  57. return -EFAULT;
  58. case ISCSI_TCP_SEGMENT_DONE:
  59. return bytes_read;
  60. default:
  61. iscsi_conn_printk(KERN_ERR, conn, "Invalid iscsi_tcp_recv_skb "
  62. "status %d\n", status);
  63. return -EINVAL;
  64. }
  65. }
  66. static int cxgb3i_conn_read_pdu_skb(struct iscsi_conn *conn,
  67. struct sk_buff *skb)
  68. {
  69. struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
  70. bool offloaded = 0;
  71. unsigned int offset;
  72. int rc;
  73. cxgb3i_rx_debug("conn 0x%p, skb 0x%p, len %u, flag 0x%x.\n",
  74. conn, skb, skb->len, skb_ulp_mode(skb));
  75. if (!iscsi_tcp_recv_segment_is_hdr(tcp_conn)) {
  76. iscsi_conn_failure(conn, ISCSI_ERR_PROTO);
  77. return -EIO;
  78. }
  79. if (conn->hdrdgst_en && (skb_ulp_mode(skb) & ULP2_FLAG_HCRC_ERROR)) {
  80. iscsi_conn_failure(conn, ISCSI_ERR_HDR_DGST);
  81. return -EIO;
  82. }
  83. if (conn->datadgst_en && (skb_ulp_mode(skb) & ULP2_FLAG_DCRC_ERROR)) {
  84. iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST);
  85. return -EIO;
  86. }
  87. /* iscsi hdr */
  88. rc = read_pdu_skb(conn, skb, 0, 0);
  89. if (rc <= 0)
  90. return rc;
  91. if (iscsi_tcp_recv_segment_is_hdr(tcp_conn))
  92. return 0;
  93. offset = rc;
  94. if (conn->hdrdgst_en)
  95. offset += ISCSI_DIGEST_SIZE;
  96. /* iscsi data */
  97. if (skb_ulp_mode(skb) & ULP2_FLAG_DATA_DDPED) {
  98. cxgb3i_rx_debug("skb 0x%p, opcode 0x%x, data %u, ddp'ed, "
  99. "itt 0x%x.\n",
  100. skb,
  101. tcp_conn->in.hdr->opcode & ISCSI_OPCODE_MASK,
  102. tcp_conn->in.datalen,
  103. ntohl(tcp_conn->in.hdr->itt));
  104. offloaded = 1;
  105. } else {
  106. cxgb3i_rx_debug("skb 0x%p, opcode 0x%x, data %u, NOT ddp'ed, "
  107. "itt 0x%x.\n",
  108. skb,
  109. tcp_conn->in.hdr->opcode & ISCSI_OPCODE_MASK,
  110. tcp_conn->in.datalen,
  111. ntohl(tcp_conn->in.hdr->itt));
  112. offset += sizeof(struct cpl_iscsi_hdr_norss);
  113. }
  114. rc = read_pdu_skb(conn, skb, offset, offloaded);
  115. if (rc < 0)
  116. return rc;
  117. else
  118. return 0;
  119. }
  120. /*
  121. * pdu transmit, interact with libiscsi_tcp
  122. */
  123. static inline void tx_skb_setmode(struct sk_buff *skb, int hcrc, int dcrc)
  124. {
  125. u8 submode = 0;
  126. if (hcrc)
  127. submode |= 1;
  128. if (dcrc)
  129. submode |= 2;
  130. skb_ulp_mode(skb) = (ULP_MODE_ISCSI << 4) | submode;
  131. }
  132. void cxgb3i_conn_cleanup_task(struct iscsi_task *task)
  133. {
  134. struct cxgb3i_task_data *tdata = task->dd_data +
  135. sizeof(struct iscsi_tcp_task);
  136. /* never reached the xmit task callout */
  137. if (tdata->skb)
  138. __kfree_skb(tdata->skb);
  139. memset(tdata, 0, sizeof(struct cxgb3i_task_data));
  140. /* MNC - Do we need a check in case this is called but
  141. * cxgb3i_conn_alloc_pdu has never been called on the task */
  142. cxgb3i_release_itt(task, task->hdr_itt);
  143. iscsi_tcp_cleanup_task(task);
  144. }
  145. static int sgl_seek_offset(struct scatterlist *sgl, unsigned int sgcnt,
  146. unsigned int offset, unsigned int *off,
  147. struct scatterlist **sgp)
  148. {
  149. int i;
  150. struct scatterlist *sg;
  151. for_each_sg(sgl, sg, sgcnt, i) {
  152. if (offset < sg->length) {
  153. *off = offset;
  154. *sgp = sg;
  155. return 0;
  156. }
  157. offset -= sg->length;
  158. }
  159. return -EFAULT;
  160. }
  161. static int sgl_read_to_frags(struct scatterlist *sg, unsigned int sgoffset,
  162. unsigned int dlen, skb_frag_t *frags,
  163. int frag_max)
  164. {
  165. unsigned int datalen = dlen;
  166. unsigned int sglen = sg->length - sgoffset;
  167. struct page *page = sg_page(sg);
  168. int i;
  169. i = 0;
  170. do {
  171. unsigned int copy;
  172. if (!sglen) {
  173. sg = sg_next(sg);
  174. if (!sg) {
  175. cxgb3i_log_error("%s, sg NULL, len %u/%u.\n",
  176. __func__, datalen, dlen);
  177. return -EINVAL;
  178. }
  179. sgoffset = 0;
  180. sglen = sg->length;
  181. page = sg_page(sg);
  182. }
  183. copy = min(datalen, sglen);
  184. if (i && page == frags[i - 1].page &&
  185. sgoffset + sg->offset ==
  186. frags[i - 1].page_offset + frags[i - 1].size) {
  187. frags[i - 1].size += copy;
  188. } else {
  189. if (i >= frag_max) {
  190. cxgb3i_log_error("%s, too many pages %u, "
  191. "dlen %u.\n", __func__,
  192. frag_max, dlen);
  193. return -EINVAL;
  194. }
  195. frags[i].page = page;
  196. frags[i].page_offset = sg->offset + sgoffset;
  197. frags[i].size = copy;
  198. i++;
  199. }
  200. datalen -= copy;
  201. sgoffset += copy;
  202. sglen -= copy;
  203. } while (datalen);
  204. return i;
  205. }
  206. int cxgb3i_conn_alloc_pdu(struct iscsi_task *task, u8 opcode)
  207. {
  208. struct iscsi_conn *conn = task->conn;
  209. struct iscsi_tcp_task *tcp_task = task->dd_data;
  210. struct cxgb3i_task_data *tdata = task->dd_data + sizeof(*tcp_task);
  211. struct scsi_cmnd *sc = task->sc;
  212. int headroom = SKB_TX_PDU_HEADER_LEN;
  213. tcp_task->dd_data = tdata;
  214. task->hdr = NULL;
  215. /* write command, need to send data pdus */
  216. if (skb_extra_headroom && (opcode == ISCSI_OP_SCSI_DATA_OUT ||
  217. (opcode == ISCSI_OP_SCSI_CMD &&
  218. (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_TO_DEVICE))))
  219. headroom += min(skb_extra_headroom, conn->max_xmit_dlength);
  220. tdata->skb = alloc_skb(TX_HEADER_LEN + headroom, GFP_ATOMIC);
  221. if (!tdata->skb)
  222. return -ENOMEM;
  223. skb_reserve(tdata->skb, TX_HEADER_LEN);
  224. cxgb3i_tx_debug("task 0x%p, opcode 0x%x, skb 0x%p.\n",
  225. task, opcode, tdata->skb);
  226. task->hdr = (struct iscsi_hdr *)tdata->skb->data;
  227. task->hdr_max = SKB_TX_PDU_HEADER_LEN;
  228. /* data_out uses scsi_cmd's itt */
  229. if (opcode != ISCSI_OP_SCSI_DATA_OUT)
  230. cxgb3i_reserve_itt(task, &task->hdr->itt);
  231. return 0;
  232. }
  233. int cxgb3i_conn_init_pdu(struct iscsi_task *task, unsigned int offset,
  234. unsigned int count)
  235. {
  236. struct iscsi_conn *conn = task->conn;
  237. struct iscsi_tcp_task *tcp_task = task->dd_data;
  238. struct cxgb3i_task_data *tdata = tcp_task->dd_data;
  239. struct sk_buff *skb = tdata->skb;
  240. unsigned int datalen = count;
  241. int i, padlen = iscsi_padding(count);
  242. struct page *pg;
  243. cxgb3i_tx_debug("task 0x%p,0x%p, offset %u, count %u, skb 0x%p.\n",
  244. task, task->sc, offset, count, skb);
  245. skb_put(skb, task->hdr_len);
  246. tx_skb_setmode(skb, conn->hdrdgst_en, datalen ? conn->datadgst_en : 0);
  247. if (!count)
  248. return 0;
  249. if (task->sc) {
  250. struct scsi_data_buffer *sdb = scsi_out(task->sc);
  251. struct scatterlist *sg = NULL;
  252. int err;
  253. tdata->offset = offset;
  254. tdata->count = count;
  255. err = sgl_seek_offset(sdb->table.sgl, sdb->table.nents,
  256. tdata->offset, &tdata->sgoffset, &sg);
  257. if (err < 0) {
  258. cxgb3i_log_warn("tpdu, sgl %u, bad offset %u/%u.\n",
  259. sdb->table.nents, tdata->offset,
  260. sdb->length);
  261. return err;
  262. }
  263. err = sgl_read_to_frags(sg, tdata->sgoffset, tdata->count,
  264. tdata->frags, MAX_PDU_FRAGS);
  265. if (err < 0) {
  266. cxgb3i_log_warn("tpdu, sgl %u, bad offset %u + %u.\n",
  267. sdb->table.nents, tdata->offset,
  268. tdata->count);
  269. return err;
  270. }
  271. tdata->nr_frags = err;
  272. if (tdata->nr_frags > MAX_SKB_FRAGS ||
  273. (padlen && tdata->nr_frags == MAX_SKB_FRAGS)) {
  274. char *dst = skb->data + task->hdr_len;
  275. skb_frag_t *frag = tdata->frags;
  276. /* data fits in the skb's headroom */
  277. for (i = 0; i < tdata->nr_frags; i++, frag++) {
  278. char *src = kmap_atomic(frag->page,
  279. KM_SOFTIRQ0);
  280. memcpy(dst, src+frag->page_offset, frag->size);
  281. dst += frag->size;
  282. kunmap_atomic(src, KM_SOFTIRQ0);
  283. }
  284. if (padlen) {
  285. memset(dst, 0, padlen);
  286. padlen = 0;
  287. }
  288. skb_put(skb, count + padlen);
  289. } else {
  290. /* data fit into frag_list */
  291. for (i = 0; i < tdata->nr_frags; i++)
  292. get_page(tdata->frags[i].page);
  293. memcpy(skb_shinfo(skb)->frags, tdata->frags,
  294. sizeof(skb_frag_t) * tdata->nr_frags);
  295. skb_shinfo(skb)->nr_frags = tdata->nr_frags;
  296. skb->len += count;
  297. skb->data_len += count;
  298. skb->truesize += count;
  299. }
  300. } else {
  301. pg = virt_to_page(task->data);
  302. get_page(pg);
  303. skb_fill_page_desc(skb, 0, pg, offset_in_page(task->data),
  304. count);
  305. skb->len += count;
  306. skb->data_len += count;
  307. skb->truesize += count;
  308. }
  309. if (padlen) {
  310. i = skb_shinfo(skb)->nr_frags;
  311. get_page(pad_page);
  312. skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, pad_page, 0,
  313. padlen);
  314. skb->data_len += padlen;
  315. skb->truesize += padlen;
  316. skb->len += padlen;
  317. }
  318. return 0;
  319. }
  320. int cxgb3i_conn_xmit_pdu(struct iscsi_task *task)
  321. {
  322. struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data;
  323. struct cxgb3i_conn *cconn = tcp_conn->dd_data;
  324. struct iscsi_tcp_task *tcp_task = task->dd_data;
  325. struct cxgb3i_task_data *tdata = tcp_task->dd_data;
  326. struct sk_buff *skb = tdata->skb;
  327. unsigned int datalen;
  328. int err;
  329. if (!skb)
  330. return 0;
  331. datalen = skb->data_len;
  332. tdata->skb = NULL;
  333. err = cxgb3i_c3cn_send_pdus(cconn->cep->c3cn, skb);
  334. if (err > 0) {
  335. int pdulen = err;
  336. cxgb3i_tx_debug("task 0x%p, skb 0x%p, len %u/%u, rv %d.\n",
  337. task, skb, skb->len, skb->data_len, err);
  338. if (task->conn->hdrdgst_en)
  339. pdulen += ISCSI_DIGEST_SIZE;
  340. if (datalen && task->conn->datadgst_en)
  341. pdulen += ISCSI_DIGEST_SIZE;
  342. task->conn->txdata_octets += pdulen;
  343. return 0;
  344. }
  345. if (err == -EAGAIN || err == -ENOBUFS) {
  346. /* reset skb to send when we are called again */
  347. tdata->skb = skb;
  348. return err;
  349. }
  350. kfree_skb(skb);
  351. cxgb3i_tx_debug("itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n",
  352. task->itt, skb, skb->len, skb->data_len, err);
  353. iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err);
  354. iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED);
  355. return err;
  356. }
  357. int cxgb3i_pdu_init(void)
  358. {
  359. if (SKB_TX_HEADROOM > (512 * MAX_SKB_FRAGS))
  360. skb_extra_headroom = SKB_TX_HEADROOM;
  361. pad_page = alloc_page(GFP_KERNEL);
  362. if (!pad_page)
  363. return -ENOMEM;
  364. memset(page_address(pad_page), 0, PAGE_SIZE);
  365. return 0;
  366. }
  367. void cxgb3i_pdu_cleanup(void)
  368. {
  369. if (pad_page) {
  370. __free_page(pad_page);
  371. pad_page = NULL;
  372. }
  373. }
  374. void cxgb3i_conn_pdu_ready(struct s3_conn *c3cn)
  375. {
  376. struct sk_buff *skb;
  377. unsigned int read = 0;
  378. struct iscsi_conn *conn = c3cn->user_data;
  379. int err = 0;
  380. cxgb3i_rx_debug("cn 0x%p.\n", c3cn);
  381. read_lock(&c3cn->callback_lock);
  382. if (unlikely(!conn || conn->suspend_rx)) {
  383. cxgb3i_rx_debug("conn 0x%p, id %d, suspend_rx %lu!\n",
  384. conn, conn ? conn->id : 0xFF,
  385. conn ? conn->suspend_rx : 0xFF);
  386. read_unlock(&c3cn->callback_lock);
  387. return;
  388. }
  389. skb = skb_peek(&c3cn->receive_queue);
  390. while (!err && skb) {
  391. __skb_unlink(skb, &c3cn->receive_queue);
  392. read += skb_rx_pdulen(skb);
  393. cxgb3i_rx_debug("conn 0x%p, cn 0x%p, rx skb 0x%p, pdulen %u.\n",
  394. conn, c3cn, skb, skb_rx_pdulen(skb));
  395. err = cxgb3i_conn_read_pdu_skb(conn, skb);
  396. __kfree_skb(skb);
  397. skb = skb_peek(&c3cn->receive_queue);
  398. }
  399. read_unlock(&c3cn->callback_lock);
  400. c3cn->copied_seq += read;
  401. cxgb3i_c3cn_rx_credits(c3cn, read);
  402. conn->rxdata_octets += read;
  403. if (err) {
  404. cxgb3i_log_info("conn 0x%p rx failed err %d.\n", conn, err);
  405. iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
  406. }
  407. }
  408. void cxgb3i_conn_tx_open(struct s3_conn *c3cn)
  409. {
  410. struct iscsi_conn *conn = c3cn->user_data;
  411. cxgb3i_tx_debug("cn 0x%p.\n", c3cn);
  412. if (conn) {
  413. cxgb3i_tx_debug("cn 0x%p, cid %d.\n", c3cn, conn->id);
  414. iscsi_conn_queue_work(conn);
  415. }
  416. }
  417. void cxgb3i_conn_closing(struct s3_conn *c3cn)
  418. {
  419. struct iscsi_conn *conn;
  420. read_lock(&c3cn->callback_lock);
  421. conn = c3cn->user_data;
  422. if (conn && c3cn->state != C3CN_STATE_ESTABLISHED)
  423. iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
  424. read_unlock(&c3cn->callback_lock);
  425. }