umem.c 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263
  1. /*
  2. * Copyright (c) 2005 Topspin Communications. All rights reserved.
  3. * Copyright (c) 2005 Cisco Systems. All rights reserved.
  4. * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
  5. *
  6. * This software is available to you under a choice of one of two
  7. * licenses. You may choose to be licensed under the terms of the GNU
  8. * General Public License (GPL) Version 2, available from the file
  9. * COPYING in the main directory of this source tree, or the
  10. * OpenIB.org BSD license below:
  11. *
  12. * Redistribution and use in source and binary forms, with or
  13. * without modification, are permitted provided that the following
  14. * conditions are met:
  15. *
  16. * - Redistributions of source code must retain the above
  17. * copyright notice, this list of conditions and the following
  18. * disclaimer.
  19. *
  20. * - Redistributions in binary form must reproduce the above
  21. * copyright notice, this list of conditions and the following
  22. * disclaimer in the documentation and/or other materials
  23. * provided with the distribution.
  24. *
  25. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32. * SOFTWARE.
  33. *
  34. * $Id: uverbs_mem.c 2743 2005-06-28 22:27:59Z roland $
  35. */
  36. #include <linux/mm.h>
  37. #include <linux/dma-mapping.h>
  38. #include <linux/sched.h>
  39. #include "uverbs.h"
  40. static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty)
  41. {
  42. struct ib_umem_chunk *chunk, *tmp;
  43. int i;
  44. list_for_each_entry_safe(chunk, tmp, &umem->chunk_list, list) {
  45. ib_dma_unmap_sg(dev, chunk->page_list,
  46. chunk->nents, DMA_BIDIRECTIONAL);
  47. for (i = 0; i < chunk->nents; ++i) {
  48. if (umem->writable && dirty)
  49. set_page_dirty_lock(chunk->page_list[i].page);
  50. put_page(chunk->page_list[i].page);
  51. }
  52. kfree(chunk);
  53. }
  54. }
  55. /**
  56. * ib_umem_get - Pin and DMA map userspace memory.
  57. * @context: userspace context to pin memory for
  58. * @addr: userspace virtual address to start at
  59. * @size: length of region to pin
  60. * @access: IB_ACCESS_xxx flags for memory being pinned
  61. */
  62. struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
  63. size_t size, int access)
  64. {
  65. struct ib_umem *umem;
  66. struct page **page_list;
  67. struct ib_umem_chunk *chunk;
  68. unsigned long locked;
  69. unsigned long lock_limit;
  70. unsigned long cur_base;
  71. unsigned long npages;
  72. int ret;
  73. int off;
  74. int i;
  75. if (!can_do_mlock())
  76. return ERR_PTR(-EPERM);
  77. umem = kmalloc(sizeof *umem, GFP_KERNEL);
  78. if (!umem)
  79. return ERR_PTR(-ENOMEM);
  80. umem->context = context;
  81. umem->length = size;
  82. umem->offset = addr & ~PAGE_MASK;
  83. umem->page_size = PAGE_SIZE;
  84. /*
  85. * We ask for writable memory if any access flags other than
  86. * "remote read" are set. "Local write" and "remote write"
  87. * obviously require write access. "Remote atomic" can do
  88. * things like fetch and add, which will modify memory, and
  89. * "MW bind" can change permissions by binding a window.
  90. */
  91. umem->writable = !!(access & ~IB_ACCESS_REMOTE_READ);
  92. INIT_LIST_HEAD(&umem->chunk_list);
  93. page_list = (struct page **) __get_free_page(GFP_KERNEL);
  94. if (!page_list) {
  95. kfree(umem);
  96. return ERR_PTR(-ENOMEM);
  97. }
  98. npages = PAGE_ALIGN(size + umem->offset) >> PAGE_SHIFT;
  99. down_write(&current->mm->mmap_sem);
  100. locked = npages + current->mm->locked_vm;
  101. lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
  102. if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
  103. ret = -ENOMEM;
  104. goto out;
  105. }
  106. cur_base = addr & PAGE_MASK;
  107. while (npages) {
  108. ret = get_user_pages(current, current->mm, cur_base,
  109. min_t(int, npages,
  110. PAGE_SIZE / sizeof (struct page *)),
  111. 1, !umem->writable, page_list, NULL);
  112. if (ret < 0)
  113. goto out;
  114. cur_base += ret * PAGE_SIZE;
  115. npages -= ret;
  116. off = 0;
  117. while (ret) {
  118. chunk = kmalloc(sizeof *chunk + sizeof (struct scatterlist) *
  119. min_t(int, ret, IB_UMEM_MAX_PAGE_CHUNK),
  120. GFP_KERNEL);
  121. if (!chunk) {
  122. ret = -ENOMEM;
  123. goto out;
  124. }
  125. chunk->nents = min_t(int, ret, IB_UMEM_MAX_PAGE_CHUNK);
  126. for (i = 0; i < chunk->nents; ++i) {
  127. chunk->page_list[i].page = page_list[i + off];
  128. chunk->page_list[i].offset = 0;
  129. chunk->page_list[i].length = PAGE_SIZE;
  130. }
  131. chunk->nmap = ib_dma_map_sg(context->device,
  132. &chunk->page_list[0],
  133. chunk->nents,
  134. DMA_BIDIRECTIONAL);
  135. if (chunk->nmap <= 0) {
  136. for (i = 0; i < chunk->nents; ++i)
  137. put_page(chunk->page_list[i].page);
  138. kfree(chunk);
  139. ret = -ENOMEM;
  140. goto out;
  141. }
  142. ret -= chunk->nents;
  143. off += chunk->nents;
  144. list_add_tail(&chunk->list, &umem->chunk_list);
  145. }
  146. ret = 0;
  147. }
  148. out:
  149. if (ret < 0) {
  150. __ib_umem_release(context->device, umem, 0);
  151. kfree(umem);
  152. } else
  153. current->mm->locked_vm = locked;
  154. up_write(&current->mm->mmap_sem);
  155. free_page((unsigned long) page_list);
  156. return ret < 0 ? ERR_PTR(ret) : umem;
  157. }
  158. EXPORT_SYMBOL(ib_umem_get);
  159. static void ib_umem_account(struct work_struct *work)
  160. {
  161. struct ib_umem *umem = container_of(work, struct ib_umem, work);
  162. down_write(&umem->mm->mmap_sem);
  163. umem->mm->locked_vm -= umem->diff;
  164. up_write(&umem->mm->mmap_sem);
  165. mmput(umem->mm);
  166. kfree(umem);
  167. }
  168. /**
  169. * ib_umem_release - release memory pinned with ib_umem_get
  170. * @umem: umem struct to release
  171. */
  172. void ib_umem_release(struct ib_umem *umem)
  173. {
  174. struct ib_ucontext *context = umem->context;
  175. struct mm_struct *mm;
  176. unsigned long diff;
  177. __ib_umem_release(umem->context->device, umem, 1);
  178. mm = get_task_mm(current);
  179. if (!mm) {
  180. kfree(umem);
  181. return;
  182. }
  183. diff = PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT;
  184. /*
  185. * We may be called with the mm's mmap_sem already held. This
  186. * can happen when a userspace munmap() is the call that drops
  187. * the last reference to our file and calls our release
  188. * method. If there are memory regions to destroy, we'll end
  189. * up here and not be able to take the mmap_sem. In that case
  190. * we defer the vm_locked accounting to the system workqueue.
  191. */
  192. if (context->closing) {
  193. if (!down_write_trylock(&mm->mmap_sem)) {
  194. INIT_WORK(&umem->work, ib_umem_account);
  195. umem->mm = mm;
  196. umem->diff = diff;
  197. schedule_work(&umem->work);
  198. return;
  199. }
  200. } else
  201. down_write(&mm->mmap_sem);
  202. current->mm->locked_vm -= diff;
  203. up_write(&mm->mmap_sem);
  204. mmput(mm);
  205. kfree(umem);
  206. }
  207. EXPORT_SYMBOL(ib_umem_release);
  208. int ib_umem_page_count(struct ib_umem *umem)
  209. {
  210. struct ib_umem_chunk *chunk;
  211. int shift;
  212. int i;
  213. int n;
  214. shift = ilog2(umem->page_size);
  215. n = 0;
  216. list_for_each_entry(chunk, &umem->chunk_list, list)
  217. for (i = 0; i < chunk->nmap; ++i)
  218. n += sg_dma_len(&chunk->page_list[i]) >> shift;
  219. return n;
  220. }
  221. EXPORT_SYMBOL(ib_umem_page_count);