test_run.c 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174
  1. /* Copyright (c) 2017 Facebook
  2. *
  3. * This program is free software; you can redistribute it and/or
  4. * modify it under the terms of version 2 of the GNU General Public
  5. * License as published by the Free Software Foundation.
  6. */
  7. #include <linux/bpf.h>
  8. #include <linux/slab.h>
  9. #include <linux/vmalloc.h>
  10. #include <linux/etherdevice.h>
  11. #include <linux/filter.h>
  12. #include <linux/sched/signal.h>
  13. static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx)
  14. {
  15. u32 ret;
  16. preempt_disable();
  17. rcu_read_lock();
  18. ret = BPF_PROG_RUN(prog, ctx);
  19. rcu_read_unlock();
  20. preempt_enable();
  21. return ret;
  22. }
  23. static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time)
  24. {
  25. u64 time_start, time_spent = 0;
  26. u32 ret = 0, i;
  27. if (!repeat)
  28. repeat = 1;
  29. time_start = ktime_get_ns();
  30. for (i = 0; i < repeat; i++) {
  31. ret = bpf_test_run_one(prog, ctx);
  32. if (need_resched()) {
  33. if (signal_pending(current))
  34. break;
  35. time_spent += ktime_get_ns() - time_start;
  36. cond_resched();
  37. time_start = ktime_get_ns();
  38. }
  39. }
  40. time_spent += ktime_get_ns() - time_start;
  41. do_div(time_spent, repeat);
  42. *time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
  43. return ret;
  44. }
  45. static int bpf_test_finish(const union bpf_attr *kattr,
  46. union bpf_attr __user *uattr, const void *data,
  47. u32 size, u32 retval, u32 duration)
  48. {
  49. void __user *data_out = u64_to_user_ptr(kattr->test.data_out);
  50. int err = -EFAULT;
  51. if (data_out && copy_to_user(data_out, data, size))
  52. goto out;
  53. if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
  54. goto out;
  55. if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
  56. goto out;
  57. if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
  58. goto out;
  59. err = 0;
  60. out:
  61. return err;
  62. }
  63. static void *bpf_test_init(const union bpf_attr *kattr, u32 size,
  64. u32 headroom, u32 tailroom)
  65. {
  66. void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
  67. void *data;
  68. if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
  69. return ERR_PTR(-EINVAL);
  70. data = kzalloc(size + headroom + tailroom, GFP_USER);
  71. if (!data)
  72. return ERR_PTR(-ENOMEM);
  73. if (copy_from_user(data + headroom, data_in, size)) {
  74. kfree(data);
  75. return ERR_PTR(-EFAULT);
  76. }
  77. return data;
  78. }
  79. int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
  80. union bpf_attr __user *uattr)
  81. {
  82. bool is_l2 = false, is_direct_pkt_access = false;
  83. u32 size = kattr->test.data_size_in;
  84. u32 repeat = kattr->test.repeat;
  85. u32 retval, duration;
  86. struct sk_buff *skb;
  87. void *data;
  88. int ret;
  89. data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
  90. SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
  91. if (IS_ERR(data))
  92. return PTR_ERR(data);
  93. switch (prog->type) {
  94. case BPF_PROG_TYPE_SCHED_CLS:
  95. case BPF_PROG_TYPE_SCHED_ACT:
  96. is_l2 = true;
  97. /* fall through */
  98. case BPF_PROG_TYPE_LWT_IN:
  99. case BPF_PROG_TYPE_LWT_OUT:
  100. case BPF_PROG_TYPE_LWT_XMIT:
  101. is_direct_pkt_access = true;
  102. break;
  103. default:
  104. break;
  105. }
  106. skb = build_skb(data, 0);
  107. if (!skb) {
  108. kfree(data);
  109. return -ENOMEM;
  110. }
  111. skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
  112. __skb_put(skb, size);
  113. skb->protocol = eth_type_trans(skb, current->nsproxy->net_ns->loopback_dev);
  114. skb_reset_network_header(skb);
  115. if (is_l2)
  116. __skb_push(skb, ETH_HLEN);
  117. if (is_direct_pkt_access)
  118. bpf_compute_data_end(skb);
  119. retval = bpf_test_run(prog, skb, repeat, &duration);
  120. if (!is_l2)
  121. __skb_push(skb, ETH_HLEN);
  122. size = skb->len;
  123. /* bpf program can never convert linear skb to non-linear */
  124. if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
  125. size = skb_headlen(skb);
  126. ret = bpf_test_finish(kattr, uattr, skb->data, size, retval, duration);
  127. kfree_skb(skb);
  128. return ret;
  129. }
  130. int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
  131. union bpf_attr __user *uattr)
  132. {
  133. u32 size = kattr->test.data_size_in;
  134. u32 repeat = kattr->test.repeat;
  135. struct xdp_buff xdp = {};
  136. u32 retval, duration;
  137. void *data;
  138. int ret;
  139. data = bpf_test_init(kattr, size, XDP_PACKET_HEADROOM + NET_IP_ALIGN, 0);
  140. if (IS_ERR(data))
  141. return PTR_ERR(data);
  142. xdp.data_hard_start = data;
  143. xdp.data = data + XDP_PACKET_HEADROOM + NET_IP_ALIGN;
  144. xdp.data_end = xdp.data + size;
  145. retval = bpf_test_run(prog, &xdp, repeat, &duration);
  146. if (xdp.data != data + XDP_PACKET_HEADROOM + NET_IP_ALIGN)
  147. size = xdp.data_end - xdp.data;
  148. ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration);
  149. kfree(data);
  150. return ret;
  151. }