|
@@ -32,7 +32,11 @@
|
|
|
struct mc_buffer {
|
|
|
struct multicall_entry entries[MC_BATCH];
|
|
|
u64 args[MC_ARGS];
|
|
|
- unsigned mcidx, argidx;
|
|
|
+ struct callback {
|
|
|
+ void (*fn)(void *);
|
|
|
+ void *data;
|
|
|
+ } callbacks[MC_BATCH];
|
|
|
+ unsigned mcidx, argidx, cbidx;
|
|
|
};
|
|
|
|
|
|
static DEFINE_PER_CPU(struct mc_buffer, mc_buffer);
|
|
@@ -43,6 +47,7 @@ void xen_mc_flush(void)
|
|
|
struct mc_buffer *b = &__get_cpu_var(mc_buffer);
|
|
|
int ret = 0;
|
|
|
unsigned long flags;
|
|
|
+ int i;
|
|
|
|
|
|
BUG_ON(preemptible());
|
|
|
|
|
@@ -51,8 +56,6 @@ void xen_mc_flush(void)
|
|
|
local_irq_save(flags);
|
|
|
|
|
|
if (b->mcidx) {
|
|
|
- int i;
|
|
|
-
|
|
|
if (HYPERVISOR_multicall(b->entries, b->mcidx) != 0)
|
|
|
BUG();
|
|
|
for (i = 0; i < b->mcidx; i++)
|
|
@@ -65,6 +68,13 @@ void xen_mc_flush(void)
|
|
|
|
|
|
local_irq_restore(flags);
|
|
|
|
|
|
+ for(i = 0; i < b->cbidx; i++) {
|
|
|
+ struct callback *cb = &b->callbacks[i];
|
|
|
+
|
|
|
+ (*cb->fn)(cb->data);
|
|
|
+ }
|
|
|
+ b->cbidx = 0;
|
|
|
+
|
|
|
BUG_ON(ret);
|
|
|
}
|
|
|
|
|
@@ -88,3 +98,16 @@ struct multicall_space __xen_mc_entry(size_t args)
|
|
|
|
|
|
return ret;
|
|
|
}
|
|
|
+
|
|
|
+void xen_mc_callback(void (*fn)(void *), void *data)
|
|
|
+{
|
|
|
+ struct mc_buffer *b = &__get_cpu_var(mc_buffer);
|
|
|
+ struct callback *cb;
|
|
|
+
|
|
|
+ if (b->cbidx == MC_BATCH)
|
|
|
+ xen_mc_flush();
|
|
|
+
|
|
|
+ cb = &b->callbacks[b->cbidx++];
|
|
|
+ cb->fn = fn;
|
|
|
+ cb->data = data;
|
|
|
+}
|