fmr_pool.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506
  1. /*
  2. * Copyright (c) 2004 Topspin Communications. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. *
  32. * $Id: fmr_pool.c 1349 2004-12-16 21:09:43Z roland $
  33. */
  34. #include <linux/errno.h>
  35. #include <linux/spinlock.h>
  36. #include <linux/slab.h>
  37. #include <linux/jhash.h>
  38. #include <linux/kthread.h>
  39. #include <ib_fmr_pool.h>
  40. #include "core_priv.h"
  41. enum {
  42. IB_FMR_MAX_REMAPS = 32,
  43. IB_FMR_HASH_BITS = 8,
  44. IB_FMR_HASH_SIZE = 1 << IB_FMR_HASH_BITS,
  45. IB_FMR_HASH_MASK = IB_FMR_HASH_SIZE - 1
  46. };
  47. /*
  48. * If an FMR is not in use, then the list member will point to either
  49. * its pool's free_list (if the FMR can be mapped again; that is,
  50. * remap_count < IB_FMR_MAX_REMAPS) or its pool's dirty_list (if the
  51. * FMR needs to be unmapped before being remapped). In either of
  52. * these cases it is a bug if the ref_count is not 0. In other words,
  53. * if ref_count is > 0, then the list member must not be linked into
  54. * either free_list or dirty_list.
  55. *
  56. * The cache_node member is used to link the FMR into a cache bucket
  57. * (if caching is enabled). This is independent of the reference
  58. * count of the FMR. When a valid FMR is released, its ref_count is
  59. * decremented, and if ref_count reaches 0, the FMR is placed in
  60. * either free_list or dirty_list as appropriate. However, it is not
  61. * removed from the cache and may be "revived" if a call to
  62. * ib_fmr_register_physical() occurs before the FMR is remapped. In
  63. * this case we just increment the ref_count and remove the FMR from
  64. * free_list/dirty_list.
  65. *
  66. * Before we remap an FMR from free_list, we remove it from the cache
  67. * (to prevent another user from obtaining a stale FMR). When an FMR
  68. * is released, we add it to the tail of the free list, so that our
  69. * cache eviction policy is "least recently used."
  70. *
  71. * All manipulation of ref_count, list and cache_node is protected by
  72. * pool_lock to maintain consistency.
  73. */
  74. struct ib_fmr_pool {
  75. spinlock_t pool_lock;
  76. int pool_size;
  77. int max_pages;
  78. int dirty_watermark;
  79. int dirty_len;
  80. struct list_head free_list;
  81. struct list_head dirty_list;
  82. struct hlist_head *cache_bucket;
  83. void (*flush_function)(struct ib_fmr_pool *pool,
  84. void * arg);
  85. void *flush_arg;
  86. struct task_struct *thread;
  87. atomic_t req_ser;
  88. atomic_t flush_ser;
  89. wait_queue_head_t force_wait;
  90. };
  91. static inline u32 ib_fmr_hash(u64 first_page)
  92. {
  93. return jhash_2words((u32) first_page, (u32) (first_page >> 32), 0) &
  94. (IB_FMR_HASH_SIZE - 1);
  95. }
  96. /* Caller must hold pool_lock */
  97. static inline struct ib_pool_fmr *ib_fmr_cache_lookup(struct ib_fmr_pool *pool,
  98. u64 *page_list,
  99. int page_list_len,
  100. u64 io_virtual_address)
  101. {
  102. struct hlist_head *bucket;
  103. struct ib_pool_fmr *fmr;
  104. struct hlist_node *pos;
  105. if (!pool->cache_bucket)
  106. return NULL;
  107. bucket = pool->cache_bucket + ib_fmr_hash(*page_list);
  108. hlist_for_each_entry(fmr, pos, bucket, cache_node)
  109. if (io_virtual_address == fmr->io_virtual_address &&
  110. page_list_len == fmr->page_list_len &&
  111. !memcmp(page_list, fmr->page_list,
  112. page_list_len * sizeof *page_list))
  113. return fmr;
  114. return NULL;
  115. }
  116. static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
  117. {
  118. int ret;
  119. struct ib_pool_fmr *fmr;
  120. LIST_HEAD(unmap_list);
  121. LIST_HEAD(fmr_list);
  122. spin_lock_irq(&pool->pool_lock);
  123. list_for_each_entry(fmr, &pool->dirty_list, list) {
  124. hlist_del_init(&fmr->cache_node);
  125. fmr->remap_count = 0;
  126. list_add_tail(&fmr->fmr->list, &fmr_list);
  127. #ifdef DEBUG
  128. if (fmr->ref_count !=0) {
  129. printk(KERN_WARNING "Unmapping FMR 0x%08x with ref count %d",
  130. fmr, fmr->ref_count);
  131. }
  132. #endif
  133. }
  134. list_splice(&pool->dirty_list, &unmap_list);
  135. INIT_LIST_HEAD(&pool->dirty_list);
  136. pool->dirty_len = 0;
  137. spin_unlock_irq(&pool->pool_lock);
  138. if (list_empty(&unmap_list)) {
  139. return;
  140. }
  141. ret = ib_unmap_fmr(&fmr_list);
  142. if (ret)
  143. printk(KERN_WARNING "ib_unmap_fmr returned %d", ret);
  144. spin_lock_irq(&pool->pool_lock);
  145. list_splice(&unmap_list, &pool->free_list);
  146. spin_unlock_irq(&pool->pool_lock);
  147. }
  148. static int ib_fmr_cleanup_thread(void *pool_ptr)
  149. {
  150. struct ib_fmr_pool *pool = pool_ptr;
  151. do {
  152. if (pool->dirty_len >= pool->dirty_watermark ||
  153. atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
  154. ib_fmr_batch_release(pool);
  155. atomic_inc(&pool->flush_ser);
  156. wake_up_interruptible(&pool->force_wait);
  157. if (pool->flush_function)
  158. pool->flush_function(pool, pool->flush_arg);
  159. }
  160. set_current_state(TASK_INTERRUPTIBLE);
  161. if (pool->dirty_len < pool->dirty_watermark &&
  162. atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
  163. !kthread_should_stop())
  164. schedule();
  165. __set_current_state(TASK_RUNNING);
  166. } while (!kthread_should_stop());
  167. return 0;
  168. }
  169. /**
  170. * ib_create_fmr_pool - Create an FMR pool
  171. * @pd:Protection domain for FMRs
  172. * @params:FMR pool parameters
  173. *
  174. * Create a pool of FMRs. Return value is pointer to new pool or
  175. * error code if creation failed.
  176. */
  177. struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
  178. struct ib_fmr_pool_param *params)
  179. {
  180. struct ib_device *device;
  181. struct ib_fmr_pool *pool;
  182. int i;
  183. int ret;
  184. if (!params)
  185. return ERR_PTR(-EINVAL);
  186. device = pd->device;
  187. if (!device->alloc_fmr || !device->dealloc_fmr ||
  188. !device->map_phys_fmr || !device->unmap_fmr) {
  189. printk(KERN_WARNING "Device %s does not support fast memory regions",
  190. device->name);
  191. return ERR_PTR(-ENOSYS);
  192. }
  193. pool = kmalloc(sizeof *pool, GFP_KERNEL);
  194. if (!pool) {
  195. printk(KERN_WARNING "couldn't allocate pool struct");
  196. return ERR_PTR(-ENOMEM);
  197. }
  198. pool->cache_bucket = NULL;
  199. pool->flush_function = params->flush_function;
  200. pool->flush_arg = params->flush_arg;
  201. INIT_LIST_HEAD(&pool->free_list);
  202. INIT_LIST_HEAD(&pool->dirty_list);
  203. if (params->cache) {
  204. pool->cache_bucket =
  205. kmalloc(IB_FMR_HASH_SIZE * sizeof *pool->cache_bucket,
  206. GFP_KERNEL);
  207. if (!pool->cache_bucket) {
  208. printk(KERN_WARNING "Failed to allocate cache in pool");
  209. ret = -ENOMEM;
  210. goto out_free_pool;
  211. }
  212. for (i = 0; i < IB_FMR_HASH_SIZE; ++i)
  213. INIT_HLIST_HEAD(pool->cache_bucket + i);
  214. }
  215. pool->pool_size = 0;
  216. pool->max_pages = params->max_pages_per_fmr;
  217. pool->dirty_watermark = params->dirty_watermark;
  218. pool->dirty_len = 0;
  219. spin_lock_init(&pool->pool_lock);
  220. atomic_set(&pool->req_ser, 0);
  221. atomic_set(&pool->flush_ser, 0);
  222. init_waitqueue_head(&pool->force_wait);
  223. pool->thread = kthread_create(ib_fmr_cleanup_thread,
  224. pool,
  225. "ib_fmr(%s)",
  226. device->name);
  227. if (IS_ERR(pool->thread)) {
  228. printk(KERN_WARNING "couldn't start cleanup thread");
  229. ret = PTR_ERR(pool->thread);
  230. goto out_free_pool;
  231. }
  232. {
  233. struct ib_pool_fmr *fmr;
  234. struct ib_fmr_attr attr = {
  235. .max_pages = params->max_pages_per_fmr,
  236. .max_maps = IB_FMR_MAX_REMAPS,
  237. .page_size = PAGE_SHIFT
  238. };
  239. for (i = 0; i < params->pool_size; ++i) {
  240. fmr = kmalloc(sizeof *fmr + params->max_pages_per_fmr * sizeof (u64),
  241. GFP_KERNEL);
  242. if (!fmr) {
  243. printk(KERN_WARNING "failed to allocate fmr struct "
  244. "for FMR %d", i);
  245. goto out_fail;
  246. }
  247. fmr->pool = pool;
  248. fmr->remap_count = 0;
  249. fmr->ref_count = 0;
  250. INIT_HLIST_NODE(&fmr->cache_node);
  251. fmr->fmr = ib_alloc_fmr(pd, params->access, &attr);
  252. if (IS_ERR(fmr->fmr)) {
  253. printk(KERN_WARNING "fmr_create failed for FMR %d", i);
  254. kfree(fmr);
  255. goto out_fail;
  256. }
  257. list_add_tail(&fmr->list, &pool->free_list);
  258. ++pool->pool_size;
  259. }
  260. }
  261. return pool;
  262. out_free_pool:
  263. kfree(pool->cache_bucket);
  264. kfree(pool);
  265. return ERR_PTR(ret);
  266. out_fail:
  267. ib_destroy_fmr_pool(pool);
  268. return ERR_PTR(-ENOMEM);
  269. }
  270. EXPORT_SYMBOL(ib_create_fmr_pool);
  271. /**
  272. * ib_destroy_fmr_pool - Free FMR pool
  273. * @pool:FMR pool to free
  274. *
  275. * Destroy an FMR pool and free all associated resources.
  276. */
  277. int ib_destroy_fmr_pool(struct ib_fmr_pool *pool)
  278. {
  279. struct ib_pool_fmr *fmr;
  280. struct ib_pool_fmr *tmp;
  281. int i;
  282. kthread_stop(pool->thread);
  283. ib_fmr_batch_release(pool);
  284. i = 0;
  285. list_for_each_entry_safe(fmr, tmp, &pool->free_list, list) {
  286. ib_dealloc_fmr(fmr->fmr);
  287. list_del(&fmr->list);
  288. kfree(fmr);
  289. ++i;
  290. }
  291. if (i < pool->pool_size)
  292. printk(KERN_WARNING "pool still has %d regions registered",
  293. pool->pool_size - i);
  294. kfree(pool->cache_bucket);
  295. kfree(pool);
  296. return 0;
  297. }
  298. EXPORT_SYMBOL(ib_destroy_fmr_pool);
  299. /**
  300. * ib_flush_fmr_pool - Invalidate all unmapped FMRs
  301. * @pool:FMR pool to flush
  302. *
  303. * Ensure that all unmapped FMRs are fully invalidated.
  304. */
  305. int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
  306. {
  307. int serial;
  308. atomic_inc(&pool->req_ser);
  309. /*
  310. * It's OK if someone else bumps req_ser again here -- we'll
  311. * just wait a little longer.
  312. */
  313. serial = atomic_read(&pool->req_ser);
  314. wake_up_process(pool->thread);
  315. if (wait_event_interruptible(pool->force_wait,
  316. atomic_read(&pool->flush_ser) -
  317. atomic_read(&pool->req_ser) >= 0))
  318. return -EINTR;
  319. return 0;
  320. }
  321. EXPORT_SYMBOL(ib_flush_fmr_pool);
  322. /**
  323. * ib_fmr_pool_map_phys -
  324. * @pool:FMR pool to allocate FMR from
  325. * @page_list:List of pages to map
  326. * @list_len:Number of pages in @page_list
  327. * @io_virtual_address:I/O virtual address for new FMR
  328. *
  329. * Map an FMR from an FMR pool.
  330. */
  331. struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle,
  332. u64 *page_list,
  333. int list_len,
  334. u64 *io_virtual_address)
  335. {
  336. struct ib_fmr_pool *pool = pool_handle;
  337. struct ib_pool_fmr *fmr;
  338. unsigned long flags;
  339. int result;
  340. if (list_len < 1 || list_len > pool->max_pages)
  341. return ERR_PTR(-EINVAL);
  342. spin_lock_irqsave(&pool->pool_lock, flags);
  343. fmr = ib_fmr_cache_lookup(pool,
  344. page_list,
  345. list_len,
  346. *io_virtual_address);
  347. if (fmr) {
  348. /* found in cache */
  349. ++fmr->ref_count;
  350. if (fmr->ref_count == 1) {
  351. list_del(&fmr->list);
  352. }
  353. spin_unlock_irqrestore(&pool->pool_lock, flags);
  354. return fmr;
  355. }
  356. if (list_empty(&pool->free_list)) {
  357. spin_unlock_irqrestore(&pool->pool_lock, flags);
  358. return ERR_PTR(-EAGAIN);
  359. }
  360. fmr = list_entry(pool->free_list.next, struct ib_pool_fmr, list);
  361. list_del(&fmr->list);
  362. hlist_del_init(&fmr->cache_node);
  363. spin_unlock_irqrestore(&pool->pool_lock, flags);
  364. result = ib_map_phys_fmr(fmr->fmr, page_list, list_len,
  365. *io_virtual_address);
  366. if (result) {
  367. spin_lock_irqsave(&pool->pool_lock, flags);
  368. list_add(&fmr->list, &pool->free_list);
  369. spin_unlock_irqrestore(&pool->pool_lock, flags);
  370. printk(KERN_WARNING "fmr_map returns %d\n",
  371. result);
  372. return ERR_PTR(result);
  373. }
  374. ++fmr->remap_count;
  375. fmr->ref_count = 1;
  376. if (pool->cache_bucket) {
  377. fmr->io_virtual_address = *io_virtual_address;
  378. fmr->page_list_len = list_len;
  379. memcpy(fmr->page_list, page_list, list_len * sizeof(*page_list));
  380. spin_lock_irqsave(&pool->pool_lock, flags);
  381. hlist_add_head(&fmr->cache_node,
  382. pool->cache_bucket + ib_fmr_hash(fmr->page_list[0]));
  383. spin_unlock_irqrestore(&pool->pool_lock, flags);
  384. }
  385. return fmr;
  386. }
  387. EXPORT_SYMBOL(ib_fmr_pool_map_phys);
  388. /**
  389. * ib_fmr_pool_unmap - Unmap FMR
  390. * @fmr:FMR to unmap
  391. *
  392. * Unmap an FMR. The FMR mapping may remain valid until the FMR is
  393. * reused (or until ib_flush_fmr_pool() is called).
  394. */
  395. int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
  396. {
  397. struct ib_fmr_pool *pool;
  398. unsigned long flags;
  399. pool = fmr->pool;
  400. spin_lock_irqsave(&pool->pool_lock, flags);
  401. --fmr->ref_count;
  402. if (!fmr->ref_count) {
  403. if (fmr->remap_count < IB_FMR_MAX_REMAPS) {
  404. list_add_tail(&fmr->list, &pool->free_list);
  405. } else {
  406. list_add_tail(&fmr->list, &pool->dirty_list);
  407. ++pool->dirty_len;
  408. wake_up_process(pool->thread);
  409. }
  410. }
  411. #ifdef DEBUG
  412. if (fmr->ref_count < 0)
  413. printk(KERN_WARNING "FMR %p has ref count %d < 0",
  414. fmr, fmr->ref_count);
  415. #endif
  416. spin_unlock_irqrestore(&pool->pool_lock, flags);
  417. return 0;
  418. }
  419. EXPORT_SYMBOL(ib_fmr_pool_unmap);