|
@@ -45,6 +45,8 @@ struct kmmio_fault_page {
|
|
|
* Protected by kmmio_lock, when linked into kmmio_page_table.
|
|
|
*/
|
|
|
int count;
|
|
|
+
|
|
|
+ bool scheduled_for_release;
|
|
|
};
|
|
|
|
|
|
struct kmmio_delayed_release {
|
|
@@ -398,8 +400,11 @@ static void release_kmmio_fault_page(unsigned long page,
|
|
|
BUG_ON(f->count < 0);
|
|
|
if (!f->count) {
|
|
|
disarm_kmmio_fault_page(f);
|
|
|
- f->release_next = *release_list;
|
|
|
- *release_list = f;
|
|
|
+ if (!f->scheduled_for_release) {
|
|
|
+ f->release_next = *release_list;
|
|
|
+ *release_list = f;
|
|
|
+ f->scheduled_for_release = true;
|
|
|
+ }
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -471,8 +476,10 @@ static void remove_kmmio_fault_pages(struct rcu_head *head)
|
|
|
prevp = &f->release_next;
|
|
|
} else {
|
|
|
*prevp = f->release_next;
|
|
|
+ f->release_next = NULL;
|
|
|
+ f->scheduled_for_release = false;
|
|
|
}
|
|
|
- f = f->release_next;
|
|
|
+ f = *prevp;
|
|
|
}
|
|
|
spin_unlock_irqrestore(&kmmio_lock, flags);
|
|
|
|
|
@@ -510,6 +517,9 @@ void unregister_kmmio_probe(struct kmmio_probe *p)
|
|
|
kmmio_count--;
|
|
|
spin_unlock_irqrestore(&kmmio_lock, flags);
|
|
|
|
|
|
+ if (!release_list)
|
|
|
+ return;
|
|
|
+
|
|
|
drelease = kmalloc(sizeof(*drelease), GFP_ATOMIC);
|
|
|
if (!drelease) {
|
|
|
pr_crit("leaking kmmio_fault_page objects.\n");
|