multicalls.c 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284
  1. /*
  2. * Xen hypercall batching.
  3. *
  4. * Xen allows multiple hypercalls to be issued at once, using the
  5. * multicall interface. This allows the cost of trapping into the
  6. * hypervisor to be amortized over several calls.
  7. *
  8. * This file implements a simple interface for multicalls. There's a
  9. * per-cpu buffer of outstanding multicalls. When you want to queue a
  10. * multicall for issuing, you can allocate a multicall slot for the
  11. * call and its arguments, along with storage for space which is
  12. * pointed to by the arguments (for passing pointers to structures,
  13. * etc). When the multicall is actually issued, all the space for the
  14. * commands and allocated memory is freed for reuse.
  15. *
  16. * Multicalls are flushed whenever any of the buffers get full, or
  17. * when explicitly requested. There's no way to get per-multicall
  18. * return results back. It will BUG if any of the multicalls fail.
  19. *
  20. * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
  21. */
  22. #include <linux/percpu.h>
  23. #include <linux/hardirq.h>
  24. #include <linux/debugfs.h>
  25. #include <asm/xen/hypercall.h>
  26. #include "multicalls.h"
  27. #include "debugfs.h"
  28. #define MC_BATCH 32
  29. #define MC_DEBUG 1
  30. #define MC_ARGS (MC_BATCH * 16)
  31. struct mc_buffer {
  32. struct multicall_entry entries[MC_BATCH];
  33. #if MC_DEBUG
  34. struct multicall_entry debug[MC_BATCH];
  35. void *caller[MC_BATCH];
  36. #endif
  37. unsigned char args[MC_ARGS];
  38. struct callback {
  39. void (*fn)(void *);
  40. void *data;
  41. } callbacks[MC_BATCH];
  42. unsigned mcidx, argidx, cbidx;
  43. };
  44. static DEFINE_PER_CPU(struct mc_buffer, mc_buffer);
  45. DEFINE_PER_CPU(unsigned long, xen_mc_irq_flags);
  46. /* flush reasons 0- slots, 1- args, 2- callbacks */
  47. enum flush_reasons
  48. {
  49. FL_SLOTS,
  50. FL_ARGS,
  51. FL_CALLBACKS,
  52. FL_N_REASONS
  53. };
  54. #ifdef CONFIG_XEN_DEBUG_FS
  55. #define NHYPERCALLS 40 /* not really */
  56. static struct {
  57. unsigned histo[MC_BATCH+1];
  58. unsigned issued;
  59. unsigned arg_total;
  60. unsigned hypercalls;
  61. unsigned histo_hypercalls[NHYPERCALLS];
  62. unsigned flush[FL_N_REASONS];
  63. } mc_stats;
  64. static u8 zero_stats;
  65. static inline void check_zero(void)
  66. {
  67. if (unlikely(zero_stats)) {
  68. memset(&mc_stats, 0, sizeof(mc_stats));
  69. zero_stats = 0;
  70. }
  71. }
  72. static void mc_add_stats(const struct mc_buffer *mc)
  73. {
  74. int i;
  75. check_zero();
  76. mc_stats.issued++;
  77. mc_stats.hypercalls += mc->mcidx;
  78. mc_stats.arg_total += mc->argidx;
  79. mc_stats.histo[mc->mcidx]++;
  80. for(i = 0; i < mc->mcidx; i++) {
  81. unsigned op = mc->entries[i].op;
  82. if (op < NHYPERCALLS)
  83. mc_stats.histo_hypercalls[op]++;
  84. }
  85. }
  86. static void mc_stats_flush(enum flush_reasons idx)
  87. {
  88. check_zero();
  89. mc_stats.flush[idx]++;
  90. }
  91. #else /* !CONFIG_XEN_DEBUG_FS */
  92. static inline void mc_add_stats(const struct mc_buffer *mc)
  93. {
  94. }
  95. static inline void mc_stats_flush(enum flush_reasons idx)
  96. {
  97. }
  98. #endif /* CONFIG_XEN_DEBUG_FS */
  99. void xen_mc_flush(void)
  100. {
  101. struct mc_buffer *b = &__get_cpu_var(mc_buffer);
  102. int ret = 0;
  103. unsigned long flags;
  104. int i;
  105. BUG_ON(preemptible());
  106. /* Disable interrupts in case someone comes in and queues
  107. something in the middle */
  108. local_irq_save(flags);
  109. mc_add_stats(b);
  110. if (b->mcidx) {
  111. #if MC_DEBUG
  112. memcpy(b->debug, b->entries,
  113. b->mcidx * sizeof(struct multicall_entry));
  114. #endif
  115. if (HYPERVISOR_multicall(b->entries, b->mcidx) != 0)
  116. BUG();
  117. for (i = 0; i < b->mcidx; i++)
  118. if (b->entries[i].result < 0)
  119. ret++;
  120. #if MC_DEBUG
  121. if (ret) {
  122. printk(KERN_ERR "%d multicall(s) failed: cpu %d\n",
  123. ret, smp_processor_id());
  124. dump_stack();
  125. for (i = 0; i < b->mcidx; i++) {
  126. printk(KERN_DEBUG " call %2d/%d: op=%lu arg=[%lx] result=%ld\t%pF\n",
  127. i+1, b->mcidx,
  128. b->debug[i].op,
  129. b->debug[i].args[0],
  130. b->entries[i].result,
  131. b->caller[i]);
  132. }
  133. }
  134. #endif
  135. b->mcidx = 0;
  136. b->argidx = 0;
  137. } else
  138. BUG_ON(b->argidx != 0);
  139. for (i = 0; i < b->cbidx; i++) {
  140. struct callback *cb = &b->callbacks[i];
  141. (*cb->fn)(cb->data);
  142. }
  143. b->cbidx = 0;
  144. local_irq_restore(flags);
  145. WARN_ON(ret);
  146. }
  147. struct multicall_space __xen_mc_entry(size_t args)
  148. {
  149. struct mc_buffer *b = &__get_cpu_var(mc_buffer);
  150. struct multicall_space ret;
  151. unsigned argidx = roundup(b->argidx, sizeof(u64));
  152. BUG_ON(preemptible());
  153. BUG_ON(b->argidx >= MC_ARGS);
  154. if (b->mcidx == MC_BATCH ||
  155. (argidx + args) >= MC_ARGS) {
  156. mc_stats_flush(b->mcidx == MC_BATCH ? FL_SLOTS : FL_ARGS);
  157. xen_mc_flush();
  158. argidx = roundup(b->argidx, sizeof(u64));
  159. }
  160. ret.mc = &b->entries[b->mcidx];
  161. #ifdef MC_DEBUG
  162. b->caller[b->mcidx] = __builtin_return_address(0);
  163. #endif
  164. b->mcidx++;
  165. ret.args = &b->args[argidx];
  166. b->argidx = argidx + args;
  167. BUG_ON(b->argidx >= MC_ARGS);
  168. return ret;
  169. }
  170. struct multicall_space xen_mc_extend_args(unsigned long op, size_t size)
  171. {
  172. struct mc_buffer *b = &__get_cpu_var(mc_buffer);
  173. struct multicall_space ret = { NULL, NULL };
  174. BUG_ON(preemptible());
  175. BUG_ON(b->argidx >= MC_ARGS);
  176. if (b->mcidx == 0)
  177. return ret;
  178. if (b->entries[b->mcidx - 1].op != op)
  179. return ret;
  180. if ((b->argidx + size) >= MC_ARGS)
  181. return ret;
  182. ret.mc = &b->entries[b->mcidx - 1];
  183. ret.args = &b->args[b->argidx];
  184. b->argidx += size;
  185. BUG_ON(b->argidx >= MC_ARGS);
  186. return ret;
  187. }
  188. void xen_mc_callback(void (*fn)(void *), void *data)
  189. {
  190. struct mc_buffer *b = &__get_cpu_var(mc_buffer);
  191. struct callback *cb;
  192. if (b->cbidx == MC_BATCH) {
  193. mc_stats_flush(FL_CALLBACKS);
  194. xen_mc_flush();
  195. }
  196. cb = &b->callbacks[b->cbidx++];
  197. cb->fn = fn;
  198. cb->data = data;
  199. }
  200. #ifdef CONFIG_XEN_DEBUG_FS
  201. static struct dentry *d_mc_debug;
  202. static int __init xen_mc_debugfs(void)
  203. {
  204. struct dentry *d_xen = xen_init_debugfs();
  205. if (d_xen == NULL)
  206. return -ENOMEM;
  207. d_mc_debug = debugfs_create_dir("multicalls", d_xen);
  208. debugfs_create_u8("zero_stats", 0644, d_mc_debug, &zero_stats);
  209. debugfs_create_u32("batches", 0444, d_mc_debug, &mc_stats.issued);
  210. debugfs_create_u32("hypercalls", 0444, d_mc_debug, &mc_stats.hypercalls);
  211. debugfs_create_u32("arg_total", 0444, d_mc_debug, &mc_stats.arg_total);
  212. xen_debugfs_create_u32_array("batch_histo", 0444, d_mc_debug,
  213. mc_stats.histo, MC_BATCH);
  214. xen_debugfs_create_u32_array("hypercall_histo", 0444, d_mc_debug,
  215. mc_stats.histo_hypercalls, NHYPERCALLS);
  216. xen_debugfs_create_u32_array("flush_reasons", 0444, d_mc_debug,
  217. mc_stats.flush, FL_N_REASONS);
  218. return 0;
  219. }
  220. fs_initcall(xen_mc_debugfs);
  221. #endif /* CONFIG_XEN_DEBUG_FS */