multicalls.c 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278
  1. /*
  2. * Xen hypercall batching.
  3. *
  4. * Xen allows multiple hypercalls to be issued at once, using the
  5. * multicall interface. This allows the cost of trapping into the
  6. * hypervisor to be amortized over several calls.
  7. *
  8. * This file implements a simple interface for multicalls. There's a
  9. * per-cpu buffer of outstanding multicalls. When you want to queue a
  10. * multicall for issuing, you can allocate a multicall slot for the
  11. * call and its arguments, along with storage for space which is
  12. * pointed to by the arguments (for passing pointers to structures,
  13. * etc). When the multicall is actually issued, all the space for the
  14. * commands and allocated memory is freed for reuse.
  15. *
  16. * Multicalls are flushed whenever any of the buffers get full, or
  17. * when explicitly requested. There's no way to get per-multicall
  18. * return results back. It will BUG if any of the multicalls fail.
  19. *
  20. * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
  21. */
  22. #include <linux/percpu.h>
  23. #include <linux/hardirq.h>
  24. #include <linux/debugfs.h>
  25. #include <asm/xen/hypercall.h>
  26. #include "multicalls.h"
  27. #include "debugfs.h"
  28. #define MC_BATCH 32
  29. #define MC_DEBUG 1
  30. #define MC_ARGS (MC_BATCH * 16)
  31. struct mc_buffer {
  32. struct multicall_entry entries[MC_BATCH];
  33. #if MC_DEBUG
  34. struct multicall_entry debug[MC_BATCH];
  35. #endif
  36. unsigned char args[MC_ARGS];
  37. struct callback {
  38. void (*fn)(void *);
  39. void *data;
  40. } callbacks[MC_BATCH];
  41. unsigned mcidx, argidx, cbidx;
  42. };
  43. static DEFINE_PER_CPU(struct mc_buffer, mc_buffer);
  44. DEFINE_PER_CPU(unsigned long, xen_mc_irq_flags);
  45. /* flush reasons 0- slots, 1- args, 2- callbacks */
  46. enum flush_reasons
  47. {
  48. FL_SLOTS,
  49. FL_ARGS,
  50. FL_CALLBACKS,
  51. FL_N_REASONS
  52. };
  53. #ifdef CONFIG_XEN_DEBUG_FS
  54. #define NHYPERCALLS 40 /* not really */
  55. static struct {
  56. unsigned histo[MC_BATCH+1];
  57. unsigned issued;
  58. unsigned arg_total;
  59. unsigned hypercalls;
  60. unsigned histo_hypercalls[NHYPERCALLS];
  61. unsigned flush[FL_N_REASONS];
  62. } mc_stats;
  63. static u8 zero_stats;
  64. static inline void check_zero(void)
  65. {
  66. if (unlikely(zero_stats)) {
  67. memset(&mc_stats, 0, sizeof(mc_stats));
  68. zero_stats = 0;
  69. }
  70. }
  71. static void mc_add_stats(const struct mc_buffer *mc)
  72. {
  73. int i;
  74. check_zero();
  75. mc_stats.issued++;
  76. mc_stats.hypercalls += mc->mcidx;
  77. mc_stats.arg_total += mc->argidx;
  78. mc_stats.histo[mc->mcidx]++;
  79. for(i = 0; i < mc->mcidx; i++) {
  80. unsigned op = mc->entries[i].op;
  81. if (op < NHYPERCALLS)
  82. mc_stats.histo_hypercalls[op]++;
  83. }
  84. }
  85. static void mc_stats_flush(enum flush_reasons idx)
  86. {
  87. check_zero();
  88. mc_stats.flush[idx]++;
  89. }
  90. #else /* !CONFIG_XEN_DEBUG_FS */
  91. static inline void mc_add_stats(const struct mc_buffer *mc)
  92. {
  93. }
  94. static inline void mc_stats_flush(enum flush_reasons idx)
  95. {
  96. }
  97. #endif /* CONFIG_XEN_DEBUG_FS */
  98. void xen_mc_flush(void)
  99. {
  100. struct mc_buffer *b = &__get_cpu_var(mc_buffer);
  101. int ret = 0;
  102. unsigned long flags;
  103. int i;
  104. BUG_ON(preemptible());
  105. /* Disable interrupts in case someone comes in and queues
  106. something in the middle */
  107. local_irq_save(flags);
  108. mc_add_stats(b);
  109. if (b->mcidx) {
  110. #if MC_DEBUG
  111. memcpy(b->debug, b->entries,
  112. b->mcidx * sizeof(struct multicall_entry));
  113. #endif
  114. if (HYPERVISOR_multicall(b->entries, b->mcidx) != 0)
  115. BUG();
  116. for (i = 0; i < b->mcidx; i++)
  117. if (b->entries[i].result < 0)
  118. ret++;
  119. #if MC_DEBUG
  120. if (ret) {
  121. printk(KERN_ERR "%d multicall(s) failed: cpu %d\n",
  122. ret, smp_processor_id());
  123. dump_stack();
  124. for (i = 0; i < b->mcidx; i++) {
  125. printk(" call %2d/%d: op=%lu arg=[%lx] result=%ld\n",
  126. i+1, b->mcidx,
  127. b->debug[i].op,
  128. b->debug[i].args[0],
  129. b->entries[i].result);
  130. }
  131. }
  132. #endif
  133. b->mcidx = 0;
  134. b->argidx = 0;
  135. } else
  136. BUG_ON(b->argidx != 0);
  137. local_irq_restore(flags);
  138. for (i = 0; i < b->cbidx; i++) {
  139. struct callback *cb = &b->callbacks[i];
  140. (*cb->fn)(cb->data);
  141. }
  142. b->cbidx = 0;
  143. BUG_ON(ret);
  144. }
  145. struct multicall_space __xen_mc_entry(size_t args)
  146. {
  147. struct mc_buffer *b = &__get_cpu_var(mc_buffer);
  148. struct multicall_space ret;
  149. unsigned argidx = roundup(b->argidx, sizeof(u64));
  150. BUG_ON(preemptible());
  151. BUG_ON(b->argidx > MC_ARGS);
  152. if (b->mcidx == MC_BATCH ||
  153. (argidx + args) > MC_ARGS) {
  154. mc_stats_flush(b->mcidx == MC_BATCH ? FL_SLOTS : FL_ARGS);
  155. xen_mc_flush();
  156. argidx = roundup(b->argidx, sizeof(u64));
  157. }
  158. ret.mc = &b->entries[b->mcidx];
  159. b->mcidx++;
  160. ret.args = &b->args[argidx];
  161. b->argidx = argidx + args;
  162. BUG_ON(b->argidx > MC_ARGS);
  163. return ret;
  164. }
  165. struct multicall_space xen_mc_extend_args(unsigned long op, size_t size)
  166. {
  167. struct mc_buffer *b = &__get_cpu_var(mc_buffer);
  168. struct multicall_space ret = { NULL, NULL };
  169. BUG_ON(preemptible());
  170. BUG_ON(b->argidx > MC_ARGS);
  171. if (b->mcidx == 0)
  172. return ret;
  173. if (b->entries[b->mcidx - 1].op != op)
  174. return ret;
  175. if ((b->argidx + size) > MC_ARGS)
  176. return ret;
  177. ret.mc = &b->entries[b->mcidx - 1];
  178. ret.args = &b->args[b->argidx];
  179. b->argidx += size;
  180. BUG_ON(b->argidx > MC_ARGS);
  181. return ret;
  182. }
  183. void xen_mc_callback(void (*fn)(void *), void *data)
  184. {
  185. struct mc_buffer *b = &__get_cpu_var(mc_buffer);
  186. struct callback *cb;
  187. if (b->cbidx == MC_BATCH) {
  188. mc_stats_flush(FL_CALLBACKS);
  189. xen_mc_flush();
  190. }
  191. cb = &b->callbacks[b->cbidx++];
  192. cb->fn = fn;
  193. cb->data = data;
  194. }
  195. #ifdef CONFIG_XEN_DEBUG_FS
  196. static struct dentry *d_mc_debug;
  197. static int __init xen_mc_debugfs(void)
  198. {
  199. struct dentry *d_xen = xen_init_debugfs();
  200. if (d_xen == NULL)
  201. return -ENOMEM;
  202. d_mc_debug = debugfs_create_dir("multicalls", d_xen);
  203. debugfs_create_u8("zero_stats", 0644, d_mc_debug, &zero_stats);
  204. debugfs_create_u32("batches", 0444, d_mc_debug, &mc_stats.issued);
  205. debugfs_create_u32("hypercalls", 0444, d_mc_debug, &mc_stats.hypercalls);
  206. debugfs_create_u32("arg_total", 0444, d_mc_debug, &mc_stats.arg_total);
  207. xen_debugfs_create_u32_array("batch_histo", 0444, d_mc_debug,
  208. mc_stats.histo, MC_BATCH);
  209. xen_debugfs_create_u32_array("hypercall_histo", 0444, d_mc_debug,
  210. mc_stats.histo_hypercalls, NHYPERCALLS);
  211. xen_debugfs_create_u32_array("flush_reasons", 0444, d_mc_debug,
  212. mc_stats.flush, FL_N_REASONS);
  213. return 0;
  214. }
  215. fs_initcall(xen_mc_debugfs);
  216. #endif /* CONFIG_XEN_DEBUG_FS */