sock_diag.c 8.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347
  1. /* License: GPL */
  2. #include <linux/mutex.h>
  3. #include <linux/socket.h>
  4. #include <linux/skbuff.h>
  5. #include <net/netlink.h>
  6. #include <net/net_namespace.h>
  7. #include <linux/module.h>
  8. #include <net/sock.h>
  9. #include <linux/kernel.h>
  10. #include <linux/tcp.h>
  11. #include <linux/workqueue.h>
  12. #include <linux/inet_diag.h>
  13. #include <linux/sock_diag.h>
  14. static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
  15. static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
  16. static DEFINE_MUTEX(sock_diag_table_mutex);
  17. static struct workqueue_struct *broadcast_wq;
  18. static u64 sock_gen_cookie(struct sock *sk)
  19. {
  20. while (1) {
  21. u64 res = atomic64_read(&sk->sk_cookie);
  22. if (res)
  23. return res;
  24. res = atomic64_inc_return(&sock_net(sk)->cookie_gen);
  25. atomic64_cmpxchg(&sk->sk_cookie, 0, res);
  26. }
  27. }
  28. int sock_diag_check_cookie(struct sock *sk, const __u32 *cookie)
  29. {
  30. u64 res;
  31. if (cookie[0] == INET_DIAG_NOCOOKIE && cookie[1] == INET_DIAG_NOCOOKIE)
  32. return 0;
  33. res = sock_gen_cookie(sk);
  34. if ((u32)res != cookie[0] || (u32)(res >> 32) != cookie[1])
  35. return -ESTALE;
  36. return 0;
  37. }
  38. EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
  39. void sock_diag_save_cookie(struct sock *sk, __u32 *cookie)
  40. {
  41. u64 res = sock_gen_cookie(sk);
  42. cookie[0] = (u32)res;
  43. cookie[1] = (u32)(res >> 32);
  44. }
  45. EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
  46. int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attrtype)
  47. {
  48. u32 mem[SK_MEMINFO_VARS];
  49. mem[SK_MEMINFO_RMEM_ALLOC] = sk_rmem_alloc_get(sk);
  50. mem[SK_MEMINFO_RCVBUF] = sk->sk_rcvbuf;
  51. mem[SK_MEMINFO_WMEM_ALLOC] = sk_wmem_alloc_get(sk);
  52. mem[SK_MEMINFO_SNDBUF] = sk->sk_sndbuf;
  53. mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc;
  54. mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued;
  55. mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
  56. mem[SK_MEMINFO_BACKLOG] = sk->sk_backlog.len;
  57. mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops);
  58. return nla_put(skb, attrtype, sizeof(mem), &mem);
  59. }
  60. EXPORT_SYMBOL_GPL(sock_diag_put_meminfo);
  61. int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk,
  62. struct sk_buff *skb, int attrtype)
  63. {
  64. struct sock_fprog_kern *fprog;
  65. struct sk_filter *filter;
  66. struct nlattr *attr;
  67. unsigned int flen;
  68. int err = 0;
  69. if (!may_report_filterinfo) {
  70. nla_reserve(skb, attrtype, 0);
  71. return 0;
  72. }
  73. rcu_read_lock();
  74. filter = rcu_dereference(sk->sk_filter);
  75. if (!filter)
  76. goto out;
  77. fprog = filter->prog->orig_prog;
  78. if (!fprog)
  79. goto out;
  80. flen = bpf_classic_proglen(fprog);
  81. attr = nla_reserve(skb, attrtype, flen);
  82. if (attr == NULL) {
  83. err = -EMSGSIZE;
  84. goto out;
  85. }
  86. memcpy(nla_data(attr), fprog->filter, flen);
  87. out:
  88. rcu_read_unlock();
  89. return err;
  90. }
  91. EXPORT_SYMBOL(sock_diag_put_filterinfo);
  92. struct broadcast_sk {
  93. struct sock *sk;
  94. struct work_struct work;
  95. };
  96. static size_t sock_diag_nlmsg_size(void)
  97. {
  98. return NLMSG_ALIGN(sizeof(struct inet_diag_msg)
  99. + nla_total_size(sizeof(u8)) /* INET_DIAG_PROTOCOL */
  100. + nla_total_size_64bit(sizeof(struct tcp_info))); /* INET_DIAG_INFO */
  101. }
  102. static void sock_diag_broadcast_destroy_work(struct work_struct *work)
  103. {
  104. struct broadcast_sk *bsk =
  105. container_of(work, struct broadcast_sk, work);
  106. struct sock *sk = bsk->sk;
  107. const struct sock_diag_handler *hndl;
  108. struct sk_buff *skb;
  109. const enum sknetlink_groups group = sock_diag_destroy_group(sk);
  110. int err = -1;
  111. WARN_ON(group == SKNLGRP_NONE);
  112. skb = nlmsg_new(sock_diag_nlmsg_size(), GFP_KERNEL);
  113. if (!skb)
  114. goto out;
  115. mutex_lock(&sock_diag_table_mutex);
  116. hndl = sock_diag_handlers[sk->sk_family];
  117. if (hndl && hndl->get_info)
  118. err = hndl->get_info(skb, sk);
  119. mutex_unlock(&sock_diag_table_mutex);
  120. if (!err)
  121. nlmsg_multicast(sock_net(sk)->diag_nlsk, skb, 0, group,
  122. GFP_KERNEL);
  123. else
  124. kfree_skb(skb);
  125. out:
  126. sk_destruct(sk);
  127. kfree(bsk);
  128. }
  129. void sock_diag_broadcast_destroy(struct sock *sk)
  130. {
  131. /* Note, this function is often called from an interrupt context. */
  132. struct broadcast_sk *bsk =
  133. kmalloc(sizeof(struct broadcast_sk), GFP_ATOMIC);
  134. if (!bsk)
  135. return sk_destruct(sk);
  136. bsk->sk = sk;
  137. INIT_WORK(&bsk->work, sock_diag_broadcast_destroy_work);
  138. queue_work(broadcast_wq, &bsk->work);
  139. }
  140. void sock_diag_register_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh))
  141. {
  142. mutex_lock(&sock_diag_table_mutex);
  143. inet_rcv_compat = fn;
  144. mutex_unlock(&sock_diag_table_mutex);
  145. }
  146. EXPORT_SYMBOL_GPL(sock_diag_register_inet_compat);
  147. void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh))
  148. {
  149. mutex_lock(&sock_diag_table_mutex);
  150. inet_rcv_compat = NULL;
  151. mutex_unlock(&sock_diag_table_mutex);
  152. }
  153. EXPORT_SYMBOL_GPL(sock_diag_unregister_inet_compat);
  154. int sock_diag_register(const struct sock_diag_handler *hndl)
  155. {
  156. int err = 0;
  157. if (hndl->family >= AF_MAX)
  158. return -EINVAL;
  159. mutex_lock(&sock_diag_table_mutex);
  160. if (sock_diag_handlers[hndl->family])
  161. err = -EBUSY;
  162. else
  163. sock_diag_handlers[hndl->family] = hndl;
  164. mutex_unlock(&sock_diag_table_mutex);
  165. return err;
  166. }
  167. EXPORT_SYMBOL_GPL(sock_diag_register);
  168. void sock_diag_unregister(const struct sock_diag_handler *hnld)
  169. {
  170. int family = hnld->family;
  171. if (family >= AF_MAX)
  172. return;
  173. mutex_lock(&sock_diag_table_mutex);
  174. BUG_ON(sock_diag_handlers[family] != hnld);
  175. sock_diag_handlers[family] = NULL;
  176. mutex_unlock(&sock_diag_table_mutex);
  177. }
  178. EXPORT_SYMBOL_GPL(sock_diag_unregister);
  179. static int __sock_diag_cmd(struct sk_buff *skb, struct nlmsghdr *nlh)
  180. {
  181. int err;
  182. struct sock_diag_req *req = nlmsg_data(nlh);
  183. const struct sock_diag_handler *hndl;
  184. if (nlmsg_len(nlh) < sizeof(*req))
  185. return -EINVAL;
  186. if (req->sdiag_family >= AF_MAX)
  187. return -EINVAL;
  188. if (sock_diag_handlers[req->sdiag_family] == NULL)
  189. request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
  190. NETLINK_SOCK_DIAG, req->sdiag_family);
  191. mutex_lock(&sock_diag_table_mutex);
  192. hndl = sock_diag_handlers[req->sdiag_family];
  193. if (hndl == NULL)
  194. err = -ENOENT;
  195. else if (nlh->nlmsg_type == SOCK_DIAG_BY_FAMILY)
  196. err = hndl->dump(skb, nlh);
  197. else if (nlh->nlmsg_type == SOCK_DESTROY && hndl->destroy)
  198. err = hndl->destroy(skb, nlh);
  199. else
  200. err = -EOPNOTSUPP;
  201. mutex_unlock(&sock_diag_table_mutex);
  202. return err;
  203. }
  204. static int sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
  205. {
  206. int ret;
  207. switch (nlh->nlmsg_type) {
  208. case TCPDIAG_GETSOCK:
  209. case DCCPDIAG_GETSOCK:
  210. if (inet_rcv_compat == NULL)
  211. request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
  212. NETLINK_SOCK_DIAG, AF_INET);
  213. mutex_lock(&sock_diag_table_mutex);
  214. if (inet_rcv_compat != NULL)
  215. ret = inet_rcv_compat(skb, nlh);
  216. else
  217. ret = -EOPNOTSUPP;
  218. mutex_unlock(&sock_diag_table_mutex);
  219. return ret;
  220. case SOCK_DIAG_BY_FAMILY:
  221. case SOCK_DESTROY:
  222. return __sock_diag_cmd(skb, nlh);
  223. default:
  224. return -EINVAL;
  225. }
  226. }
  227. static DEFINE_MUTEX(sock_diag_mutex);
  228. static void sock_diag_rcv(struct sk_buff *skb)
  229. {
  230. mutex_lock(&sock_diag_mutex);
  231. netlink_rcv_skb(skb, &sock_diag_rcv_msg);
  232. mutex_unlock(&sock_diag_mutex);
  233. }
  234. static int sock_diag_bind(struct net *net, int group)
  235. {
  236. switch (group) {
  237. case SKNLGRP_INET_TCP_DESTROY:
  238. case SKNLGRP_INET_UDP_DESTROY:
  239. if (!sock_diag_handlers[AF_INET])
  240. request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
  241. NETLINK_SOCK_DIAG, AF_INET);
  242. break;
  243. case SKNLGRP_INET6_TCP_DESTROY:
  244. case SKNLGRP_INET6_UDP_DESTROY:
  245. if (!sock_diag_handlers[AF_INET6])
  246. request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
  247. NETLINK_SOCK_DIAG, AF_INET6);
  248. break;
  249. }
  250. return 0;
  251. }
  252. int sock_diag_destroy(struct sock *sk, int err)
  253. {
  254. if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
  255. return -EPERM;
  256. if (!sk->sk_prot->diag_destroy)
  257. return -EOPNOTSUPP;
  258. return sk->sk_prot->diag_destroy(sk, err);
  259. }
  260. EXPORT_SYMBOL_GPL(sock_diag_destroy);
  261. static int __net_init diag_net_init(struct net *net)
  262. {
  263. struct netlink_kernel_cfg cfg = {
  264. .groups = SKNLGRP_MAX,
  265. .input = sock_diag_rcv,
  266. .bind = sock_diag_bind,
  267. .flags = NL_CFG_F_NONROOT_RECV,
  268. };
  269. net->diag_nlsk = netlink_kernel_create(net, NETLINK_SOCK_DIAG, &cfg);
  270. return net->diag_nlsk == NULL ? -ENOMEM : 0;
  271. }
  272. static void __net_exit diag_net_exit(struct net *net)
  273. {
  274. netlink_kernel_release(net->diag_nlsk);
  275. net->diag_nlsk = NULL;
  276. }
  277. static struct pernet_operations diag_net_ops = {
  278. .init = diag_net_init,
  279. .exit = diag_net_exit,
  280. };
  281. static int __init sock_diag_init(void)
  282. {
  283. broadcast_wq = alloc_workqueue("sock_diag_events", 0, 0);
  284. BUG_ON(!broadcast_wq);
  285. return register_pernet_subsys(&diag_net_ops);
  286. }
  287. device_initcall(sock_diag_init);