iser_memory.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482
  1. /*
  2. * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <linux/module.h>
  33. #include <linux/kernel.h>
  34. #include <linux/slab.h>
  35. #include <linux/mm.h>
  36. #include <linux/highmem.h>
  37. #include <linux/scatterlist.h>
  38. #include "iscsi_iser.h"
  39. #define ISER_KMALLOC_THRESHOLD 0x20000 /* 128K - kmalloc limit */
  40. /**
  41. * Decrements the reference count for the
  42. * registered buffer & releases it
  43. *
  44. * returns 0 if released, 1 if deferred
  45. */
  46. int iser_regd_buff_release(struct iser_regd_buf *regd_buf)
  47. {
  48. struct ib_device *dev;
  49. if ((atomic_read(&regd_buf->ref_count) == 0) ||
  50. atomic_dec_and_test(&regd_buf->ref_count)) {
  51. /* if we used the dma mr, unreg is just NOP */
  52. if (regd_buf->reg.is_fmr)
  53. iser_unreg_mem(&regd_buf->reg);
  54. if (regd_buf->dma_addr) {
  55. dev = regd_buf->device->ib_device;
  56. ib_dma_unmap_single(dev,
  57. regd_buf->dma_addr,
  58. regd_buf->data_size,
  59. regd_buf->direction);
  60. }
  61. /* else this regd buf is associated with task which we */
  62. /* dma_unmap_single/sg later */
  63. return 0;
  64. } else {
  65. iser_dbg("Release deferred, regd.buff: 0x%p\n", regd_buf);
  66. return 1;
  67. }
  68. }
  69. /**
  70. * iser_reg_single - fills registered buffer descriptor with
  71. * registration information
  72. */
  73. void iser_reg_single(struct iser_device *device,
  74. struct iser_regd_buf *regd_buf,
  75. enum dma_data_direction direction)
  76. {
  77. u64 dma_addr;
  78. dma_addr = ib_dma_map_single(device->ib_device,
  79. regd_buf->virt_addr,
  80. regd_buf->data_size, direction);
  81. BUG_ON(ib_dma_mapping_error(device->ib_device, dma_addr));
  82. regd_buf->reg.lkey = device->mr->lkey;
  83. regd_buf->reg.len = regd_buf->data_size;
  84. regd_buf->reg.va = dma_addr;
  85. regd_buf->reg.is_fmr = 0;
  86. regd_buf->dma_addr = dma_addr;
  87. regd_buf->direction = direction;
  88. }
  89. /**
  90. * iser_start_rdma_unaligned_sg
  91. */
  92. static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
  93. enum iser_data_dir cmd_dir)
  94. {
  95. int dma_nents;
  96. struct ib_device *dev;
  97. char *mem = NULL;
  98. struct iser_data_buf *data = &iser_task->data[cmd_dir];
  99. unsigned long cmd_data_len = data->data_len;
  100. if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
  101. mem = (void *)__get_free_pages(GFP_NOIO,
  102. ilog2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT);
  103. else
  104. mem = kmalloc(cmd_data_len, GFP_NOIO);
  105. if (mem == NULL) {
  106. iser_err("Failed to allocate mem size %d %d for copying sglist\n",
  107. data->size,(int)cmd_data_len);
  108. return -ENOMEM;
  109. }
  110. if (cmd_dir == ISER_DIR_OUT) {
  111. /* copy the unaligned sg the buffer which is used for RDMA */
  112. struct scatterlist *sgl = (struct scatterlist *)data->buf;
  113. struct scatterlist *sg;
  114. int i;
  115. char *p, *from;
  116. p = mem;
  117. for_each_sg(sgl, sg, data->size, i) {
  118. from = kmap_atomic(sg_page(sg), KM_USER0);
  119. memcpy(p,
  120. from + sg->offset,
  121. sg->length);
  122. kunmap_atomic(from, KM_USER0);
  123. p += sg->length;
  124. }
  125. }
  126. sg_init_one(&iser_task->data_copy[cmd_dir].sg_single, mem, cmd_data_len);
  127. iser_task->data_copy[cmd_dir].buf =
  128. &iser_task->data_copy[cmd_dir].sg_single;
  129. iser_task->data_copy[cmd_dir].size = 1;
  130. iser_task->data_copy[cmd_dir].copy_buf = mem;
  131. dev = iser_task->iser_conn->ib_conn->device->ib_device;
  132. dma_nents = ib_dma_map_sg(dev,
  133. &iser_task->data_copy[cmd_dir].sg_single,
  134. 1,
  135. (cmd_dir == ISER_DIR_OUT) ?
  136. DMA_TO_DEVICE : DMA_FROM_DEVICE);
  137. BUG_ON(dma_nents == 0);
  138. iser_task->data_copy[cmd_dir].dma_nents = dma_nents;
  139. return 0;
  140. }
  141. /**
  142. * iser_finalize_rdma_unaligned_sg
  143. */
  144. void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
  145. enum iser_data_dir cmd_dir)
  146. {
  147. struct ib_device *dev;
  148. struct iser_data_buf *mem_copy;
  149. unsigned long cmd_data_len;
  150. dev = iser_task->iser_conn->ib_conn->device->ib_device;
  151. mem_copy = &iser_task->data_copy[cmd_dir];
  152. ib_dma_unmap_sg(dev, &mem_copy->sg_single, 1,
  153. (cmd_dir == ISER_DIR_OUT) ?
  154. DMA_TO_DEVICE : DMA_FROM_DEVICE);
  155. if (cmd_dir == ISER_DIR_IN) {
  156. char *mem;
  157. struct scatterlist *sgl, *sg;
  158. unsigned char *p, *to;
  159. unsigned int sg_size;
  160. int i;
  161. /* copy back read RDMA to unaligned sg */
  162. mem = mem_copy->copy_buf;
  163. sgl = (struct scatterlist *)iser_task->data[ISER_DIR_IN].buf;
  164. sg_size = iser_task->data[ISER_DIR_IN].size;
  165. p = mem;
  166. for_each_sg(sgl, sg, sg_size, i) {
  167. to = kmap_atomic(sg_page(sg), KM_SOFTIRQ0);
  168. memcpy(to + sg->offset,
  169. p,
  170. sg->length);
  171. kunmap_atomic(to, KM_SOFTIRQ0);
  172. p += sg->length;
  173. }
  174. }
  175. cmd_data_len = iser_task->data[cmd_dir].data_len;
  176. if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
  177. free_pages((unsigned long)mem_copy->copy_buf,
  178. ilog2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT);
  179. else
  180. kfree(mem_copy->copy_buf);
  181. mem_copy->copy_buf = NULL;
  182. }
  183. #define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & ~MASK_4K) == 0)
  184. /**
  185. * iser_sg_to_page_vec - Translates scatterlist entries to physical addresses
  186. * and returns the length of resulting physical address array (may be less than
  187. * the original due to possible compaction).
  188. *
  189. * we build a "page vec" under the assumption that the SG meets the RDMA
  190. * alignment requirements. Other then the first and last SG elements, all
  191. * the "internal" elements can be compacted into a list whose elements are
  192. * dma addresses of physical pages. The code supports also the weird case
  193. * where --few fragments of the same page-- are present in the SG as
  194. * consecutive elements. Also, it handles one entry SG.
  195. */
  196. static int iser_sg_to_page_vec(struct iser_data_buf *data,
  197. struct iser_page_vec *page_vec,
  198. struct ib_device *ibdev)
  199. {
  200. struct scatterlist *sg, *sgl = (struct scatterlist *)data->buf;
  201. u64 start_addr, end_addr, page, chunk_start = 0;
  202. unsigned long total_sz = 0;
  203. unsigned int dma_len;
  204. int i, new_chunk, cur_page, last_ent = data->dma_nents - 1;
  205. /* compute the offset of first element */
  206. page_vec->offset = (u64) sgl[0].offset & ~MASK_4K;
  207. new_chunk = 1;
  208. cur_page = 0;
  209. for_each_sg(sgl, sg, data->dma_nents, i) {
  210. start_addr = ib_sg_dma_address(ibdev, sg);
  211. if (new_chunk)
  212. chunk_start = start_addr;
  213. dma_len = ib_sg_dma_len(ibdev, sg);
  214. end_addr = start_addr + dma_len;
  215. total_sz += dma_len;
  216. /* collect page fragments until aligned or end of SG list */
  217. if (!IS_4K_ALIGNED(end_addr) && i < last_ent) {
  218. new_chunk = 0;
  219. continue;
  220. }
  221. new_chunk = 1;
  222. /* address of the first page in the contiguous chunk;
  223. masking relevant for the very first SG entry,
  224. which might be unaligned */
  225. page = chunk_start & MASK_4K;
  226. do {
  227. page_vec->pages[cur_page++] = page;
  228. page += SIZE_4K;
  229. } while (page < end_addr);
  230. }
  231. page_vec->data_size = total_sz;
  232. iser_dbg("page_vec->data_size:%d cur_page %d\n", page_vec->data_size,cur_page);
  233. return cur_page;
  234. }
  235. /**
  236. * iser_data_buf_aligned_len - Tries to determine the maximal correctly aligned
  237. * for RDMA sub-list of a scatter-gather list of memory buffers, and returns
  238. * the number of entries which are aligned correctly. Supports the case where
  239. * consecutive SG elements are actually fragments of the same physcial page.
  240. */
  241. static int iser_data_buf_aligned_len(struct iser_data_buf *data,
  242. struct ib_device *ibdev)
  243. {
  244. struct scatterlist *sgl, *sg, *next_sg = NULL;
  245. u64 start_addr, end_addr;
  246. int i, ret_len, start_check = 0;
  247. if (data->dma_nents == 1)
  248. return 1;
  249. sgl = (struct scatterlist *)data->buf;
  250. start_addr = ib_sg_dma_address(ibdev, sgl);
  251. for_each_sg(sgl, sg, data->dma_nents, i) {
  252. if (start_check && !IS_4K_ALIGNED(start_addr))
  253. break;
  254. next_sg = sg_next(sg);
  255. if (!next_sg)
  256. break;
  257. end_addr = start_addr + ib_sg_dma_len(ibdev, sg);
  258. start_addr = ib_sg_dma_address(ibdev, next_sg);
  259. if (end_addr == start_addr) {
  260. start_check = 0;
  261. continue;
  262. } else
  263. start_check = 1;
  264. if (!IS_4K_ALIGNED(end_addr))
  265. break;
  266. }
  267. ret_len = (next_sg) ? i : i+1;
  268. iser_dbg("Found %d aligned entries out of %d in sg:0x%p\n",
  269. ret_len, data->dma_nents, data);
  270. return ret_len;
  271. }
  272. static void iser_data_buf_dump(struct iser_data_buf *data,
  273. struct ib_device *ibdev)
  274. {
  275. struct scatterlist *sgl = (struct scatterlist *)data->buf;
  276. struct scatterlist *sg;
  277. int i;
  278. if (iser_debug_level == 0)
  279. return;
  280. for_each_sg(sgl, sg, data->dma_nents, i)
  281. iser_warn("sg[%d] dma_addr:0x%lX page:0x%p "
  282. "off:0x%x sz:0x%x dma_len:0x%x\n",
  283. i, (unsigned long)ib_sg_dma_address(ibdev, sg),
  284. sg_page(sg), sg->offset,
  285. sg->length, ib_sg_dma_len(ibdev, sg));
  286. }
  287. static void iser_dump_page_vec(struct iser_page_vec *page_vec)
  288. {
  289. int i;
  290. iser_err("page vec length %d data size %d\n",
  291. page_vec->length, page_vec->data_size);
  292. for (i = 0; i < page_vec->length; i++)
  293. iser_err("%d %lx\n",i,(unsigned long)page_vec->pages[i]);
  294. }
  295. static void iser_page_vec_build(struct iser_data_buf *data,
  296. struct iser_page_vec *page_vec,
  297. struct ib_device *ibdev)
  298. {
  299. int page_vec_len = 0;
  300. page_vec->length = 0;
  301. page_vec->offset = 0;
  302. iser_dbg("Translating sg sz: %d\n", data->dma_nents);
  303. page_vec_len = iser_sg_to_page_vec(data, page_vec, ibdev);
  304. iser_dbg("sg len %d page_vec_len %d\n", data->dma_nents,page_vec_len);
  305. page_vec->length = page_vec_len;
  306. if (page_vec_len * SIZE_4K < page_vec->data_size) {
  307. iser_err("page_vec too short to hold this SG\n");
  308. iser_data_buf_dump(data, ibdev);
  309. iser_dump_page_vec(page_vec);
  310. BUG();
  311. }
  312. }
  313. int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
  314. struct iser_data_buf *data,
  315. enum iser_data_dir iser_dir,
  316. enum dma_data_direction dma_dir)
  317. {
  318. struct ib_device *dev;
  319. iser_task->dir[iser_dir] = 1;
  320. dev = iser_task->iser_conn->ib_conn->device->ib_device;
  321. data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir);
  322. if (data->dma_nents == 0) {
  323. iser_err("dma_map_sg failed!!!\n");
  324. return -EINVAL;
  325. }
  326. return 0;
  327. }
  328. void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task)
  329. {
  330. struct ib_device *dev;
  331. struct iser_data_buf *data;
  332. dev = iser_task->iser_conn->ib_conn->device->ib_device;
  333. if (iser_task->dir[ISER_DIR_IN]) {
  334. data = &iser_task->data[ISER_DIR_IN];
  335. ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE);
  336. }
  337. if (iser_task->dir[ISER_DIR_OUT]) {
  338. data = &iser_task->data[ISER_DIR_OUT];
  339. ib_dma_unmap_sg(dev, data->buf, data->size, DMA_TO_DEVICE);
  340. }
  341. }
  342. /**
  343. * iser_reg_rdma_mem - Registers memory intended for RDMA,
  344. * obtaining rkey and va
  345. *
  346. * returns 0 on success, errno code on failure
  347. */
  348. int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
  349. enum iser_data_dir cmd_dir)
  350. {
  351. struct iscsi_conn *iscsi_conn = iser_task->iser_conn->iscsi_conn;
  352. struct iser_conn *ib_conn = iser_task->iser_conn->ib_conn;
  353. struct iser_device *device = ib_conn->device;
  354. struct ib_device *ibdev = device->ib_device;
  355. struct iser_data_buf *mem = &iser_task->data[cmd_dir];
  356. struct iser_regd_buf *regd_buf;
  357. int aligned_len;
  358. int err;
  359. int i;
  360. struct scatterlist *sg;
  361. regd_buf = &iser_task->rdma_regd[cmd_dir];
  362. aligned_len = iser_data_buf_aligned_len(mem, ibdev);
  363. if (aligned_len != mem->dma_nents) {
  364. iscsi_conn->fmr_unalign_cnt++;
  365. iser_warn("rdma alignment violation %d/%d aligned\n",
  366. aligned_len, mem->size);
  367. iser_data_buf_dump(mem, ibdev);
  368. /* unmap the command data before accessing it */
  369. iser_dma_unmap_task_data(iser_task);
  370. /* allocate copy buf, if we are writing, copy the */
  371. /* unaligned scatterlist, dma map the copy */
  372. if (iser_start_rdma_unaligned_sg(iser_task, cmd_dir) != 0)
  373. return -ENOMEM;
  374. mem = &iser_task->data_copy[cmd_dir];
  375. }
  376. /* if there a single dma entry, FMR is not needed */
  377. if (mem->dma_nents == 1) {
  378. sg = (struct scatterlist *)mem->buf;
  379. regd_buf->reg.lkey = device->mr->lkey;
  380. regd_buf->reg.rkey = device->mr->rkey;
  381. regd_buf->reg.len = ib_sg_dma_len(ibdev, &sg[0]);
  382. regd_buf->reg.va = ib_sg_dma_address(ibdev, &sg[0]);
  383. regd_buf->reg.is_fmr = 0;
  384. iser_dbg("PHYSICAL Mem.register: lkey: 0x%08X rkey: 0x%08X "
  385. "va: 0x%08lX sz: %ld]\n",
  386. (unsigned int)regd_buf->reg.lkey,
  387. (unsigned int)regd_buf->reg.rkey,
  388. (unsigned long)regd_buf->reg.va,
  389. (unsigned long)regd_buf->reg.len);
  390. } else { /* use FMR for multiple dma entries */
  391. iser_page_vec_build(mem, ib_conn->page_vec, ibdev);
  392. err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, &regd_buf->reg);
  393. if (err) {
  394. iser_data_buf_dump(mem, ibdev);
  395. iser_err("mem->dma_nents = %d (dlength = 0x%x)\n",
  396. mem->dma_nents,
  397. ntoh24(iser_task->desc.iscsi_header.dlength));
  398. iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n",
  399. ib_conn->page_vec->data_size, ib_conn->page_vec->length,
  400. ib_conn->page_vec->offset);
  401. for (i=0 ; i<ib_conn->page_vec->length ; i++)
  402. iser_err("page_vec[%d] = 0x%llx\n", i,
  403. (unsigned long long) ib_conn->page_vec->pages[i]);
  404. return err;
  405. }
  406. }
  407. /* take a reference on this regd buf such that it will not be released *
  408. * (eg in send dto completion) before we get the scsi response */
  409. atomic_inc(&regd_buf->ref_count);
  410. return 0;
  411. }