xfrm4_policy.c 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332
  1. /*
  2. * xfrm4_policy.c
  3. *
  4. * Changes:
  5. * Kazunori MIYAZAWA @USAGI
  6. * YOSHIFUJI Hideaki @USAGI
  7. * Split up af-specific portion
  8. *
  9. */
  10. #include <linux/err.h>
  11. #include <linux/kernel.h>
  12. #include <linux/inetdevice.h>
  13. #include <linux/if_tunnel.h>
  14. #include <net/dst.h>
  15. #include <net/xfrm.h>
  16. #include <net/ip.h>
  17. static struct xfrm_policy_afinfo xfrm4_policy_afinfo;
  18. static struct dst_entry *__xfrm4_dst_lookup(struct net *net, struct flowi4 *fl4,
  19. int tos,
  20. const xfrm_address_t *saddr,
  21. const xfrm_address_t *daddr)
  22. {
  23. struct rtable *rt;
  24. memset(fl4, 0, sizeof(*fl4));
  25. fl4->daddr = daddr->a4;
  26. fl4->flowi4_tos = tos;
  27. if (saddr)
  28. fl4->saddr = saddr->a4;
  29. rt = __ip_route_output_key(net, fl4);
  30. if (!IS_ERR(rt))
  31. return &rt->dst;
  32. return ERR_CAST(rt);
  33. }
  34. static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos,
  35. const xfrm_address_t *saddr,
  36. const xfrm_address_t *daddr)
  37. {
  38. struct flowi4 fl4;
  39. return __xfrm4_dst_lookup(net, &fl4, tos, saddr, daddr);
  40. }
  41. static int xfrm4_get_saddr(struct net *net,
  42. xfrm_address_t *saddr, xfrm_address_t *daddr)
  43. {
  44. struct dst_entry *dst;
  45. struct flowi4 fl4;
  46. dst = __xfrm4_dst_lookup(net, &fl4, 0, NULL, daddr);
  47. if (IS_ERR(dst))
  48. return -EHOSTUNREACH;
  49. saddr->a4 = fl4.saddr;
  50. dst_release(dst);
  51. return 0;
  52. }
  53. static int xfrm4_get_tos(const struct flowi *fl)
  54. {
  55. return IPTOS_RT_MASK & fl->u.ip4.flowi4_tos; /* Strip ECN bits */
  56. }
  57. static int xfrm4_init_path(struct xfrm_dst *path, struct dst_entry *dst,
  58. int nfheader_len)
  59. {
  60. return 0;
  61. }
  62. static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
  63. const struct flowi *fl)
  64. {
  65. struct rtable *rt = (struct rtable *)xdst->route;
  66. const struct flowi4 *fl4 = &fl->u.ip4;
  67. xdst->u.rt.rt_key_dst = fl4->daddr;
  68. xdst->u.rt.rt_key_src = fl4->saddr;
  69. xdst->u.rt.rt_key_tos = fl4->flowi4_tos;
  70. xdst->u.rt.rt_route_iif = fl4->flowi4_iif;
  71. xdst->u.rt.rt_iif = fl4->flowi4_iif;
  72. xdst->u.rt.rt_oif = fl4->flowi4_oif;
  73. xdst->u.rt.rt_mark = fl4->flowi4_mark;
  74. xdst->u.dst.dev = dev;
  75. dev_hold(dev);
  76. xdst->u.rt.peer = rt->peer;
  77. if (rt->peer)
  78. atomic_inc(&rt->peer->refcnt);
  79. /* Sheit... I remember I did this right. Apparently,
  80. * it was magically lost, so this code needs audit */
  81. xdst->u.rt.rt_flags = rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST |
  82. RTCF_LOCAL);
  83. xdst->u.rt.rt_type = rt->rt_type;
  84. xdst->u.rt.rt_src = rt->rt_src;
  85. xdst->u.rt.rt_dst = rt->rt_dst;
  86. xdst->u.rt.rt_gateway = rt->rt_gateway;
  87. xdst->u.rt.rt_spec_dst = rt->rt_spec_dst;
  88. return 0;
  89. }
  90. static void
  91. _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
  92. {
  93. const struct iphdr *iph = ip_hdr(skb);
  94. int ihl = iph->ihl;
  95. u8 *xprth = skb_network_header(skb) + ihl * 4;
  96. struct flowi4 *fl4 = &fl->u.ip4;
  97. memset(fl4, 0, sizeof(struct flowi4));
  98. fl4->flowi4_mark = skb->mark;
  99. fl4->flowi4_proto = iph->protocol;
  100. fl4->daddr = reverse ? iph->saddr : iph->daddr;
  101. fl4->saddr = reverse ? iph->daddr : iph->saddr;
  102. fl4->flowi4_tos = iph->tos;
  103. if (!ip_is_fragment(iph)) {
  104. switch (iph->protocol) {
  105. case IPPROTO_UDP:
  106. case IPPROTO_UDPLITE:
  107. case IPPROTO_TCP:
  108. case IPPROTO_SCTP:
  109. case IPPROTO_DCCP:
  110. if (xprth + 4 < skb->data ||
  111. pskb_may_pull(skb, xprth + 4 - skb->data)) {
  112. __be16 *ports;
  113. xprth = skb_network_header(skb) + ihl * 4;
  114. ports = (__be16 *)xprth;
  115. fl4->fl4_sport = ports[!!reverse];
  116. fl4->fl4_dport = ports[!reverse];
  117. }
  118. break;
  119. case IPPROTO_ICMP:
  120. if (xprth + 2 < skb->data ||
  121. pskb_may_pull(skb, xprth + 2 - skb->data)) {
  122. u8 *icmp;
  123. xprth = skb_network_header(skb) + ihl * 4;
  124. icmp = xprth;
  125. fl4->fl4_icmp_type = icmp[0];
  126. fl4->fl4_icmp_code = icmp[1];
  127. }
  128. break;
  129. case IPPROTO_ESP:
  130. if (xprth + 4 < skb->data ||
  131. pskb_may_pull(skb, xprth + 4 - skb->data)) {
  132. __be32 *ehdr;
  133. xprth = skb_network_header(skb) + ihl * 4;
  134. ehdr = (__be32 *)xprth;
  135. fl4->fl4_ipsec_spi = ehdr[0];
  136. }
  137. break;
  138. case IPPROTO_AH:
  139. if (xprth + 8 < skb->data ||
  140. pskb_may_pull(skb, xprth + 8 - skb->data)) {
  141. __be32 *ah_hdr;
  142. xprth = skb_network_header(skb) + ihl * 4;
  143. ah_hdr = (__be32 *)xprth;
  144. fl4->fl4_ipsec_spi = ah_hdr[1];
  145. }
  146. break;
  147. case IPPROTO_COMP:
  148. if (xprth + 4 < skb->data ||
  149. pskb_may_pull(skb, xprth + 4 - skb->data)) {
  150. __be16 *ipcomp_hdr;
  151. xprth = skb_network_header(skb) + ihl * 4;
  152. ipcomp_hdr = (__be16 *)xprth;
  153. fl4->fl4_ipsec_spi = htonl(ntohs(ipcomp_hdr[1]));
  154. }
  155. break;
  156. case IPPROTO_GRE:
  157. if (xprth + 12 < skb->data ||
  158. pskb_may_pull(skb, xprth + 12 - skb->data)) {
  159. __be16 *greflags;
  160. __be32 *gre_hdr;
  161. xprth = skb_network_header(skb) + ihl * 4;
  162. greflags = (__be16 *)xprth;
  163. gre_hdr = (__be32 *)xprth;
  164. if (greflags[0] & GRE_KEY) {
  165. if (greflags[0] & GRE_CSUM)
  166. gre_hdr++;
  167. fl4->fl4_gre_key = gre_hdr[1];
  168. }
  169. }
  170. break;
  171. default:
  172. fl4->fl4_ipsec_spi = 0;
  173. break;
  174. }
  175. }
  176. }
  177. static inline int xfrm4_garbage_collect(struct dst_ops *ops)
  178. {
  179. struct net *net = container_of(ops, struct net, xfrm.xfrm4_dst_ops);
  180. xfrm4_policy_afinfo.garbage_collect(net);
  181. return (dst_entries_get_slow(ops) > ops->gc_thresh * 2);
  182. }
  183. static void xfrm4_update_pmtu(struct dst_entry *dst, u32 mtu)
  184. {
  185. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  186. struct dst_entry *path = xdst->route;
  187. path->ops->update_pmtu(path, mtu);
  188. }
  189. static void xfrm4_dst_destroy(struct dst_entry *dst)
  190. {
  191. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  192. dst_destroy_metrics_generic(dst);
  193. if (likely(xdst->u.rt.peer))
  194. inet_putpeer(xdst->u.rt.peer);
  195. xfrm_dst_destroy(xdst);
  196. }
  197. static void xfrm4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
  198. int unregister)
  199. {
  200. if (!unregister)
  201. return;
  202. xfrm_dst_ifdown(dst, dev);
  203. }
  204. static struct dst_ops xfrm4_dst_ops = {
  205. .family = AF_INET,
  206. .protocol = cpu_to_be16(ETH_P_IP),
  207. .gc = xfrm4_garbage_collect,
  208. .update_pmtu = xfrm4_update_pmtu,
  209. .cow_metrics = dst_cow_metrics_generic,
  210. .destroy = xfrm4_dst_destroy,
  211. .ifdown = xfrm4_dst_ifdown,
  212. .local_out = __ip_local_out,
  213. .gc_thresh = 1024,
  214. };
  215. static struct xfrm_policy_afinfo xfrm4_policy_afinfo = {
  216. .family = AF_INET,
  217. .dst_ops = &xfrm4_dst_ops,
  218. .dst_lookup = xfrm4_dst_lookup,
  219. .get_saddr = xfrm4_get_saddr,
  220. .decode_session = _decode_session4,
  221. .get_tos = xfrm4_get_tos,
  222. .init_path = xfrm4_init_path,
  223. .fill_dst = xfrm4_fill_dst,
  224. .blackhole_route = ipv4_blackhole_route,
  225. };
  226. #ifdef CONFIG_SYSCTL
  227. static struct ctl_table xfrm4_policy_table[] = {
  228. {
  229. .procname = "xfrm4_gc_thresh",
  230. .data = &init_net.xfrm.xfrm4_dst_ops.gc_thresh,
  231. .maxlen = sizeof(int),
  232. .mode = 0644,
  233. .proc_handler = proc_dointvec,
  234. },
  235. { }
  236. };
  237. static struct ctl_table_header *sysctl_hdr;
  238. #endif
  239. static void __init xfrm4_policy_init(void)
  240. {
  241. xfrm_policy_register_afinfo(&xfrm4_policy_afinfo);
  242. }
  243. static void __exit xfrm4_policy_fini(void)
  244. {
  245. #ifdef CONFIG_SYSCTL
  246. if (sysctl_hdr)
  247. unregister_net_sysctl_table(sysctl_hdr);
  248. #endif
  249. xfrm_policy_unregister_afinfo(&xfrm4_policy_afinfo);
  250. }
  251. void __init xfrm4_init(int rt_max_size)
  252. {
  253. /*
  254. * Select a default value for the gc_thresh based on the main route
  255. * table hash size. It seems to me the worst case scenario is when
  256. * we have ipsec operating in transport mode, in which we create a
  257. * dst_entry per socket. The xfrm gc algorithm starts trying to remove
  258. * entries at gc_thresh, and prevents new allocations as 2*gc_thresh
  259. * so lets set an initial xfrm gc_thresh value at the rt_max_size/2.
  260. * That will let us store an ipsec connection per route table entry,
  261. * and start cleaning when were 1/2 full
  262. */
  263. xfrm4_dst_ops.gc_thresh = rt_max_size/2;
  264. dst_entries_init(&xfrm4_dst_ops);
  265. xfrm4_state_init();
  266. xfrm4_policy_init();
  267. #ifdef CONFIG_SYSCTL
  268. sysctl_hdr = register_net_sysctl(&init_net, "net/ipv4",
  269. xfrm4_policy_table);
  270. #endif
  271. }