nf_queue.c 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237
  1. /*
  2. * Rusty Russell (C)2000 -- This code is GPL.
  3. * Patrick McHardy (c) 2006-2012
  4. */
  5. #include <linux/kernel.h>
  6. #include <linux/slab.h>
  7. #include <linux/init.h>
  8. #include <linux/module.h>
  9. #include <linux/proc_fs.h>
  10. #include <linux/skbuff.h>
  11. #include <linux/netfilter.h>
  12. #include <linux/netfilter_bridge.h>
  13. #include <linux/seq_file.h>
  14. #include <linux/rcupdate.h>
  15. #include <net/protocol.h>
  16. #include <net/netfilter/nf_queue.h>
  17. #include <net/dst.h>
  18. #include "nf_internals.h"
  19. /*
  20. * Hook for nfnetlink_queue to register its queue handler.
  21. * We do this so that most of the NFQUEUE code can be modular.
  22. *
  23. * Once the queue is registered it must reinject all packets it
  24. * receives, no matter what.
  25. */
  26. /* return EBUSY when somebody else is registered, return EEXIST if the
  27. * same handler is registered, return 0 in case of success. */
  28. void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh)
  29. {
  30. /* should never happen, we only have one queueing backend in kernel */
  31. WARN_ON(rcu_access_pointer(net->nf.queue_handler));
  32. rcu_assign_pointer(net->nf.queue_handler, qh);
  33. }
  34. EXPORT_SYMBOL(nf_register_queue_handler);
  35. /* The caller must flush their queue before this */
  36. void nf_unregister_queue_handler(struct net *net)
  37. {
  38. RCU_INIT_POINTER(net->nf.queue_handler, NULL);
  39. }
  40. EXPORT_SYMBOL(nf_unregister_queue_handler);
  41. void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
  42. {
  43. struct nf_hook_state *state = &entry->state;
  44. /* Release those devices we held, or Alexey will kill me. */
  45. if (state->in)
  46. dev_put(state->in);
  47. if (state->out)
  48. dev_put(state->out);
  49. if (state->sk)
  50. sock_put(state->sk);
  51. #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
  52. if (entry->skb->nf_bridge) {
  53. struct net_device *physdev;
  54. physdev = nf_bridge_get_physindev(entry->skb);
  55. if (physdev)
  56. dev_put(physdev);
  57. physdev = nf_bridge_get_physoutdev(entry->skb);
  58. if (physdev)
  59. dev_put(physdev);
  60. }
  61. #endif
  62. }
  63. EXPORT_SYMBOL_GPL(nf_queue_entry_release_refs);
  64. /* Bump dev refs so they don't vanish while packet is out */
  65. void nf_queue_entry_get_refs(struct nf_queue_entry *entry)
  66. {
  67. struct nf_hook_state *state = &entry->state;
  68. if (state->in)
  69. dev_hold(state->in);
  70. if (state->out)
  71. dev_hold(state->out);
  72. if (state->sk)
  73. sock_hold(state->sk);
  74. #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
  75. if (entry->skb->nf_bridge) {
  76. struct net_device *physdev;
  77. physdev = nf_bridge_get_physindev(entry->skb);
  78. if (physdev)
  79. dev_hold(physdev);
  80. physdev = nf_bridge_get_physoutdev(entry->skb);
  81. if (physdev)
  82. dev_hold(physdev);
  83. }
  84. #endif
  85. }
  86. EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs);
  87. void nf_queue_nf_hook_drop(struct net *net, const struct nf_hook_entry *entry)
  88. {
  89. const struct nf_queue_handler *qh;
  90. rcu_read_lock();
  91. qh = rcu_dereference(net->nf.queue_handler);
  92. if (qh)
  93. qh->nf_hook_drop(net, entry);
  94. rcu_read_unlock();
  95. }
  96. static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
  97. unsigned int queuenum)
  98. {
  99. int status = -ENOENT;
  100. struct nf_queue_entry *entry = NULL;
  101. const struct nf_afinfo *afinfo;
  102. const struct nf_queue_handler *qh;
  103. struct net *net = state->net;
  104. /* QUEUE == DROP if no one is waiting, to be safe. */
  105. qh = rcu_dereference(net->nf.queue_handler);
  106. if (!qh) {
  107. status = -ESRCH;
  108. goto err;
  109. }
  110. afinfo = nf_get_afinfo(state->pf);
  111. if (!afinfo)
  112. goto err;
  113. entry = kmalloc(sizeof(*entry) + afinfo->route_key_size, GFP_ATOMIC);
  114. if (!entry) {
  115. status = -ENOMEM;
  116. goto err;
  117. }
  118. *entry = (struct nf_queue_entry) {
  119. .skb = skb,
  120. .state = *state,
  121. .size = sizeof(*entry) + afinfo->route_key_size,
  122. };
  123. nf_queue_entry_get_refs(entry);
  124. skb_dst_force(skb);
  125. afinfo->saveroute(skb, entry);
  126. status = qh->outfn(entry, queuenum);
  127. if (status < 0) {
  128. nf_queue_entry_release_refs(entry);
  129. goto err;
  130. }
  131. return 0;
  132. err:
  133. kfree(entry);
  134. return status;
  135. }
  136. /* Packets leaving via this function must come back through nf_reinject(). */
  137. int nf_queue(struct sk_buff *skb, struct nf_hook_state *state,
  138. struct nf_hook_entry **entryp, unsigned int verdict)
  139. {
  140. struct nf_hook_entry *entry = *entryp;
  141. int ret;
  142. RCU_INIT_POINTER(state->hook_entries, entry);
  143. ret = __nf_queue(skb, state, verdict >> NF_VERDICT_QBITS);
  144. if (ret < 0) {
  145. if (ret == -ESRCH &&
  146. (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS)) {
  147. *entryp = rcu_dereference(entry->next);
  148. return 1;
  149. }
  150. kfree_skb(skb);
  151. }
  152. return 0;
  153. }
  154. void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
  155. {
  156. struct nf_hook_entry *hook_entry;
  157. struct sk_buff *skb = entry->skb;
  158. const struct nf_afinfo *afinfo;
  159. struct nf_hook_ops *elem;
  160. int err;
  161. hook_entry = rcu_dereference(entry->state.hook_entries);
  162. elem = &hook_entry->ops;
  163. nf_queue_entry_release_refs(entry);
  164. /* Continue traversal iff userspace said ok... */
  165. if (verdict == NF_REPEAT)
  166. verdict = elem->hook(elem->priv, skb, &entry->state);
  167. if (verdict == NF_ACCEPT) {
  168. afinfo = nf_get_afinfo(entry->state.pf);
  169. if (!afinfo || afinfo->reroute(entry->state.net, skb, entry) < 0)
  170. verdict = NF_DROP;
  171. }
  172. entry->state.thresh = INT_MIN;
  173. if (verdict == NF_ACCEPT) {
  174. hook_entry = rcu_dereference(hook_entry->next);
  175. if (hook_entry)
  176. next_hook:
  177. verdict = nf_iterate(skb, &entry->state, &hook_entry);
  178. }
  179. switch (verdict & NF_VERDICT_MASK) {
  180. case NF_ACCEPT:
  181. case NF_STOP:
  182. okfn:
  183. local_bh_disable();
  184. entry->state.okfn(entry->state.net, entry->state.sk, skb);
  185. local_bh_enable();
  186. break;
  187. case NF_QUEUE:
  188. err = nf_queue(skb, &entry->state, &hook_entry, verdict);
  189. if (err == 1) {
  190. if (hook_entry)
  191. goto next_hook;
  192. goto okfn;
  193. }
  194. break;
  195. case NF_STOLEN:
  196. break;
  197. default:
  198. kfree_skb(skb);
  199. }
  200. kfree(entry);
  201. }
  202. EXPORT_SYMBOL(nf_reinject);