|
@@ -398,8 +398,23 @@ EXPORT_SYMBOL(ib_destroy_fmr_pool);
|
|
*/
|
|
*/
|
|
int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
|
|
int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
|
|
{
|
|
{
|
|
- int serial = atomic_inc_return(&pool->req_ser);
|
|
|
|
|
|
+ int serial;
|
|
|
|
+ struct ib_pool_fmr *fmr, *next;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * The free_list holds FMRs that may have been used
|
|
|
|
+ * but have not been remapped enough times to be dirty.
|
|
|
|
+ * Put them on the dirty list now so that the cleanup
|
|
|
|
+ * thread will reap them too.
|
|
|
|
+ */
|
|
|
|
+ spin_lock_irq(&pool->pool_lock);
|
|
|
|
+ list_for_each_entry_safe(fmr, next, &pool->free_list, list) {
|
|
|
|
+ if (fmr->remap_count > 0)
|
|
|
|
+ list_move(&fmr->list, &pool->dirty_list);
|
|
|
|
+ }
|
|
|
|
+ spin_unlock_irq(&pool->pool_lock);
|
|
|
|
|
|
|
|
+ serial = atomic_inc_return(&pool->req_ser);
|
|
wake_up_process(pool->thread);
|
|
wake_up_process(pool->thread);
|
|
|
|
|
|
if (wait_event_interruptible(pool->force_wait,
|
|
if (wait_event_interruptible(pool->force_wait,
|