ping.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212
  1. /*
  2. * INET An implementation of the TCP/IP protocol suite for the LINUX
  3. * operating system. INET is implemented using the BSD Socket
  4. * interface as the means of communication with the user level.
  5. *
  6. * "Ping" sockets
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public License
  10. * as published by the Free Software Foundation; either version
  11. * 2 of the License, or (at your option) any later version.
  12. *
  13. * Based on ipv4/udp.c code.
  14. *
  15. * Authors: Vasiliy Kulikov / Openwall (for Linux 2.6),
  16. * Pavel Kankovsky (for Linux 2.4.32)
  17. *
  18. * Pavel gave all rights to bugs to Vasiliy,
  19. * none of the bugs are Pavel's now.
  20. *
  21. */
  22. #include <linux/uaccess.h>
  23. #include <linux/types.h>
  24. #include <linux/fcntl.h>
  25. #include <linux/socket.h>
  26. #include <linux/sockios.h>
  27. #include <linux/in.h>
  28. #include <linux/errno.h>
  29. #include <linux/timer.h>
  30. #include <linux/mm.h>
  31. #include <linux/inet.h>
  32. #include <linux/netdevice.h>
  33. #include <net/snmp.h>
  34. #include <net/ip.h>
  35. #include <net/icmp.h>
  36. #include <net/protocol.h>
  37. #include <linux/skbuff.h>
  38. #include <linux/proc_fs.h>
  39. #include <linux/export.h>
  40. #include <net/sock.h>
  41. #include <net/ping.h>
  42. #include <net/udp.h>
  43. #include <net/route.h>
  44. #include <net/inet_common.h>
  45. #include <net/checksum.h>
  46. #if IS_ENABLED(CONFIG_IPV6)
  47. #include <linux/in6.h>
  48. #include <linux/icmpv6.h>
  49. #include <net/addrconf.h>
  50. #include <net/ipv6.h>
  51. #include <net/transp_v6.h>
  52. #endif
  53. struct ping_table ping_table;
  54. struct pingv6_ops pingv6_ops;
  55. EXPORT_SYMBOL_GPL(pingv6_ops);
  56. static u16 ping_port_rover;
  57. static inline int ping_hashfn(struct net *net, unsigned int num, unsigned int mask)
  58. {
  59. int res = (num + net_hash_mix(net)) & mask;
  60. pr_debug("hash(%d) = %d\n", num, res);
  61. return res;
  62. }
  63. EXPORT_SYMBOL_GPL(ping_hash);
  64. static inline struct hlist_nulls_head *ping_hashslot(struct ping_table *table,
  65. struct net *net, unsigned int num)
  66. {
  67. return &table->hash[ping_hashfn(net, num, PING_HTABLE_MASK)];
  68. }
  69. int ping_get_port(struct sock *sk, unsigned short ident)
  70. {
  71. struct hlist_nulls_node *node;
  72. struct hlist_nulls_head *hlist;
  73. struct inet_sock *isk, *isk2;
  74. struct sock *sk2 = NULL;
  75. isk = inet_sk(sk);
  76. write_lock_bh(&ping_table.lock);
  77. if (ident == 0) {
  78. u32 i;
  79. u16 result = ping_port_rover + 1;
  80. for (i = 0; i < (1L << 16); i++, result++) {
  81. if (!result)
  82. result++; /* avoid zero */
  83. hlist = ping_hashslot(&ping_table, sock_net(sk),
  84. result);
  85. ping_portaddr_for_each_entry(sk2, node, hlist) {
  86. isk2 = inet_sk(sk2);
  87. if (isk2->inet_num == result)
  88. goto next_port;
  89. }
  90. /* found */
  91. ping_port_rover = ident = result;
  92. break;
  93. next_port:
  94. ;
  95. }
  96. if (i >= (1L << 16))
  97. goto fail;
  98. } else {
  99. hlist = ping_hashslot(&ping_table, sock_net(sk), ident);
  100. ping_portaddr_for_each_entry(sk2, node, hlist) {
  101. isk2 = inet_sk(sk2);
  102. /* BUG? Why is this reuse and not reuseaddr? ping.c
  103. * doesn't turn off SO_REUSEADDR, and it doesn't expect
  104. * that other ping processes can steal its packets.
  105. */
  106. if ((isk2->inet_num == ident) &&
  107. (sk2 != sk) &&
  108. (!sk2->sk_reuse || !sk->sk_reuse))
  109. goto fail;
  110. }
  111. }
  112. pr_debug("found port/ident = %d\n", ident);
  113. isk->inet_num = ident;
  114. if (sk_unhashed(sk)) {
  115. pr_debug("was not hashed\n");
  116. sock_hold(sk);
  117. hlist_nulls_add_head(&sk->sk_nulls_node, hlist);
  118. sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
  119. }
  120. write_unlock_bh(&ping_table.lock);
  121. return 0;
  122. fail:
  123. write_unlock_bh(&ping_table.lock);
  124. return 1;
  125. }
  126. EXPORT_SYMBOL_GPL(ping_get_port);
  127. void ping_hash(struct sock *sk)
  128. {
  129. pr_debug("ping_hash(sk->port=%u)\n", inet_sk(sk)->inet_num);
  130. BUG(); /* "Please do not press this button again." */
  131. }
  132. void ping_unhash(struct sock *sk)
  133. {
  134. struct inet_sock *isk = inet_sk(sk);
  135. pr_debug("ping_unhash(isk=%p,isk->num=%u)\n", isk, isk->inet_num);
  136. write_lock_bh(&ping_table.lock);
  137. if (sk_hashed(sk)) {
  138. hlist_nulls_del(&sk->sk_nulls_node);
  139. sk_nulls_node_init(&sk->sk_nulls_node);
  140. sock_put(sk);
  141. isk->inet_num = 0;
  142. isk->inet_sport = 0;
  143. sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
  144. }
  145. write_unlock_bh(&ping_table.lock);
  146. }
  147. EXPORT_SYMBOL_GPL(ping_unhash);
  148. static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident)
  149. {
  150. struct hlist_nulls_head *hslot = ping_hashslot(&ping_table, net, ident);
  151. struct sock *sk = NULL;
  152. struct inet_sock *isk;
  153. struct hlist_nulls_node *hnode;
  154. int dif = skb->dev->ifindex;
  155. if (skb->protocol == htons(ETH_P_IP)) {
  156. pr_debug("try to find: num = %d, daddr = %pI4, dif = %d\n",
  157. (int)ident, &ip_hdr(skb)->daddr, dif);
  158. #if IS_ENABLED(CONFIG_IPV6)
  159. } else if (skb->protocol == htons(ETH_P_IPV6)) {
  160. pr_debug("try to find: num = %d, daddr = %pI6c, dif = %d\n",
  161. (int)ident, &ipv6_hdr(skb)->daddr, dif);
  162. #endif
  163. }
  164. read_lock_bh(&ping_table.lock);
  165. ping_portaddr_for_each_entry(sk, hnode, hslot) {
  166. isk = inet_sk(sk);
  167. pr_debug("iterate\n");
  168. if (isk->inet_num != ident)
  169. continue;
  170. if (skb->protocol == htons(ETH_P_IP) &&
  171. sk->sk_family == AF_INET) {
  172. pr_debug("found: %p: num=%d, daddr=%pI4, dif=%d\n", sk,
  173. (int) isk->inet_num, &isk->inet_rcv_saddr,
  174. sk->sk_bound_dev_if);
  175. if (isk->inet_rcv_saddr &&
  176. isk->inet_rcv_saddr != ip_hdr(skb)->daddr)
  177. continue;
  178. #if IS_ENABLED(CONFIG_IPV6)
  179. } else if (skb->protocol == htons(ETH_P_IPV6) &&
  180. sk->sk_family == AF_INET6) {
  181. struct ipv6_pinfo *np = inet6_sk(sk);
  182. pr_debug("found: %p: num=%d, daddr=%pI6c, dif=%d\n", sk,
  183. (int) isk->inet_num,
  184. &inet6_sk(sk)->rcv_saddr,
  185. sk->sk_bound_dev_if);
  186. if (!ipv6_addr_any(&np->rcv_saddr) &&
  187. !ipv6_addr_equal(&np->rcv_saddr,
  188. &ipv6_hdr(skb)->daddr))
  189. continue;
  190. #endif
  191. } else {
  192. continue;
  193. }
  194. if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)
  195. continue;
  196. sock_hold(sk);
  197. goto exit;
  198. }
  199. sk = NULL;
  200. exit:
  201. read_unlock_bh(&ping_table.lock);
  202. return sk;
  203. }
  204. static void inet_get_ping_group_range_net(struct net *net, gid_t *low,
  205. gid_t *high)
  206. {
  207. gid_t *data = net->ipv4.sysctl_ping_group_range;
  208. unsigned int seq;
  209. do {
  210. seq = read_seqbegin(&sysctl_local_ports.lock);
  211. *low = data[0];
  212. *high = data[1];
  213. } while (read_seqretry(&sysctl_local_ports.lock, seq));
  214. }
  215. int ping_init_sock(struct sock *sk)
  216. {
  217. struct net *net = sock_net(sk);
  218. gid_t group = current_egid();
  219. gid_t range[2];
  220. struct group_info *group_info;
  221. int i, j, count;
  222. int ret = 0;
  223. kgid_t low, high;
  224. if (sk->sk_family == AF_INET6)
  225. inet6_sk(sk)->ipv6only = 1;
  226. inet_get_ping_group_range_net(net, range, range+1);
  227. low = make_kgid(&init_user_ns, range[0]);
  228. high = make_kgid(&init_user_ns, range[1]);
  229. if (!gid_valid(low) || !gid_valid(high) || gid_lt(high, low))
  230. return -EACCES;
  231. if (range[0] <= group && group <= range[1])
  232. return 0;
  233. group_info = get_current_groups();
  234. count = group_info->ngroups;
  235. for (i = 0; i < group_info->nblocks; i++) {
  236. int cp_count = min_t(int, NGROUPS_PER_BLOCK, count);
  237. for (j = 0; j < cp_count; j++) {
  238. kgid_t gid = group_info->blocks[i][j];
  239. if (gid_lte(low, gid) && gid_lte(gid, high))
  240. goto out_release_group;
  241. return 0;
  242. }
  243. count -= cp_count;
  244. }
  245. ret = -EACCES;
  246. out_release_group:
  247. put_group_info(group_info);
  248. return ret;
  249. }
  250. EXPORT_SYMBOL_GPL(ping_init_sock);
  251. void ping_close(struct sock *sk, long timeout)
  252. {
  253. pr_debug("ping_close(sk=%p,sk->num=%u)\n",
  254. inet_sk(sk), inet_sk(sk)->inet_num);
  255. pr_debug("isk->refcnt = %d\n", sk->sk_refcnt.counter);
  256. sk_common_release(sk);
  257. }
  258. EXPORT_SYMBOL_GPL(ping_close);
  259. /* Checks the bind address and possibly modifies sk->sk_bound_dev_if. */
  260. int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
  261. struct sockaddr *uaddr, int addr_len) {
  262. struct net *net = sock_net(sk);
  263. if (sk->sk_family == AF_INET) {
  264. struct sockaddr_in *addr = (struct sockaddr_in *) uaddr;
  265. int chk_addr_ret;
  266. if (addr_len < sizeof(*addr))
  267. return -EINVAL;
  268. if (addr->sin_family != AF_INET &&
  269. !(addr->sin_family == AF_UNSPEC &&
  270. addr->sin_addr.s_addr == htonl(INADDR_ANY)))
  271. return -EAFNOSUPPORT;
  272. pr_debug("ping_check_bind_addr(sk=%p,addr=%pI4,port=%d)\n",
  273. sk, &addr->sin_addr.s_addr, ntohs(addr->sin_port));
  274. chk_addr_ret = inet_addr_type(net, addr->sin_addr.s_addr);
  275. if (addr->sin_addr.s_addr == htonl(INADDR_ANY))
  276. chk_addr_ret = RTN_LOCAL;
  277. if ((sysctl_ip_nonlocal_bind == 0 &&
  278. isk->freebind == 0 && isk->transparent == 0 &&
  279. chk_addr_ret != RTN_LOCAL) ||
  280. chk_addr_ret == RTN_MULTICAST ||
  281. chk_addr_ret == RTN_BROADCAST)
  282. return -EADDRNOTAVAIL;
  283. #if IS_ENABLED(CONFIG_IPV6)
  284. } else if (sk->sk_family == AF_INET6) {
  285. struct sockaddr_in6 *addr = (struct sockaddr_in6 *) uaddr;
  286. int addr_type, scoped, has_addr;
  287. struct net_device *dev = NULL;
  288. if (addr_len < sizeof(*addr))
  289. return -EINVAL;
  290. if (addr->sin6_family != AF_INET6)
  291. return -EAFNOSUPPORT;
  292. pr_debug("ping_check_bind_addr(sk=%p,addr=%pI6c,port=%d)\n",
  293. sk, addr->sin6_addr.s6_addr, ntohs(addr->sin6_port));
  294. addr_type = ipv6_addr_type(&addr->sin6_addr);
  295. scoped = __ipv6_addr_needs_scope_id(addr_type);
  296. if ((addr_type != IPV6_ADDR_ANY &&
  297. !(addr_type & IPV6_ADDR_UNICAST)) ||
  298. (scoped && !addr->sin6_scope_id))
  299. return -EINVAL;
  300. rcu_read_lock();
  301. if (addr->sin6_scope_id) {
  302. dev = dev_get_by_index_rcu(net, addr->sin6_scope_id);
  303. if (!dev) {
  304. rcu_read_unlock();
  305. return -ENODEV;
  306. }
  307. }
  308. has_addr = pingv6_ops.ipv6_chk_addr(net, &addr->sin6_addr, dev,
  309. scoped);
  310. rcu_read_unlock();
  311. if (!(isk->freebind || isk->transparent || has_addr ||
  312. addr_type == IPV6_ADDR_ANY))
  313. return -EADDRNOTAVAIL;
  314. if (scoped)
  315. sk->sk_bound_dev_if = addr->sin6_scope_id;
  316. #endif
  317. } else {
  318. return -EAFNOSUPPORT;
  319. }
  320. return 0;
  321. }
  322. void ping_set_saddr(struct sock *sk, struct sockaddr *saddr)
  323. {
  324. if (saddr->sa_family == AF_INET) {
  325. struct inet_sock *isk = inet_sk(sk);
  326. struct sockaddr_in *addr = (struct sockaddr_in *) saddr;
  327. isk->inet_rcv_saddr = isk->inet_saddr = addr->sin_addr.s_addr;
  328. #if IS_ENABLED(CONFIG_IPV6)
  329. } else if (saddr->sa_family == AF_INET6) {
  330. struct sockaddr_in6 *addr = (struct sockaddr_in6 *) saddr;
  331. struct ipv6_pinfo *np = inet6_sk(sk);
  332. np->rcv_saddr = np->saddr = addr->sin6_addr;
  333. #endif
  334. }
  335. }
  336. void ping_clear_saddr(struct sock *sk, int dif)
  337. {
  338. sk->sk_bound_dev_if = dif;
  339. if (sk->sk_family == AF_INET) {
  340. struct inet_sock *isk = inet_sk(sk);
  341. isk->inet_rcv_saddr = isk->inet_saddr = 0;
  342. #if IS_ENABLED(CONFIG_IPV6)
  343. } else if (sk->sk_family == AF_INET6) {
  344. struct ipv6_pinfo *np = inet6_sk(sk);
  345. memset(&np->rcv_saddr, 0, sizeof(np->rcv_saddr));
  346. memset(&np->saddr, 0, sizeof(np->saddr));
  347. #endif
  348. }
  349. }
  350. /*
  351. * We need our own bind because there are no privileged id's == local ports.
  352. * Moreover, we don't allow binding to multi- and broadcast addresses.
  353. */
  354. int ping_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
  355. {
  356. struct inet_sock *isk = inet_sk(sk);
  357. unsigned short snum;
  358. int err;
  359. int dif = sk->sk_bound_dev_if;
  360. err = ping_check_bind_addr(sk, isk, uaddr, addr_len);
  361. if (err)
  362. return err;
  363. lock_sock(sk);
  364. err = -EINVAL;
  365. if (isk->inet_num != 0)
  366. goto out;
  367. err = -EADDRINUSE;
  368. ping_set_saddr(sk, uaddr);
  369. snum = ntohs(((struct sockaddr_in *)uaddr)->sin_port);
  370. if (ping_get_port(sk, snum) != 0) {
  371. ping_clear_saddr(sk, dif);
  372. goto out;
  373. }
  374. pr_debug("after bind(): num = %d, dif = %d\n",
  375. (int)isk->inet_num,
  376. (int)sk->sk_bound_dev_if);
  377. err = 0;
  378. if ((sk->sk_family == AF_INET && isk->inet_rcv_saddr) ||
  379. (sk->sk_family == AF_INET6 &&
  380. !ipv6_addr_any(&inet6_sk(sk)->rcv_saddr)))
  381. sk->sk_userlocks |= SOCK_BINDADDR_LOCK;
  382. if (snum)
  383. sk->sk_userlocks |= SOCK_BINDPORT_LOCK;
  384. isk->inet_sport = htons(isk->inet_num);
  385. isk->inet_daddr = 0;
  386. isk->inet_dport = 0;
  387. #if IS_ENABLED(CONFIG_IPV6)
  388. if (sk->sk_family == AF_INET6)
  389. memset(&inet6_sk(sk)->daddr, 0, sizeof(inet6_sk(sk)->daddr));
  390. #endif
  391. sk_dst_reset(sk);
  392. out:
  393. release_sock(sk);
  394. pr_debug("ping_v4_bind -> %d\n", err);
  395. return err;
  396. }
  397. EXPORT_SYMBOL_GPL(ping_bind);
  398. /*
  399. * Is this a supported type of ICMP message?
  400. */
  401. static inline int ping_supported(int family, int type, int code)
  402. {
  403. return (family == AF_INET && type == ICMP_ECHO && code == 0) ||
  404. (family == AF_INET6 && type == ICMPV6_ECHO_REQUEST && code == 0);
  405. }
  406. /*
  407. * This routine is called by the ICMP module when it gets some
  408. * sort of error condition.
  409. */
  410. void ping_err(struct sk_buff *skb, int offset, u32 info)
  411. {
  412. int family;
  413. struct icmphdr *icmph;
  414. struct inet_sock *inet_sock;
  415. int type;
  416. int code;
  417. struct net *net = dev_net(skb->dev);
  418. struct sock *sk;
  419. int harderr;
  420. int err;
  421. if (skb->protocol == htons(ETH_P_IP)) {
  422. struct iphdr *iph = (struct iphdr *)skb->data;
  423. offset = iph->ihl << 2;
  424. family = AF_INET;
  425. type = icmp_hdr(skb)->type;
  426. code = icmp_hdr(skb)->code;
  427. icmph = (struct icmphdr *)(skb->data + offset);
  428. } else if (skb->protocol == htons(ETH_P_IPV6)) {
  429. family = AF_INET6;
  430. type = icmp6_hdr(skb)->icmp6_type;
  431. code = icmp6_hdr(skb)->icmp6_code;
  432. icmph = (struct icmphdr *) (skb->data + offset);
  433. } else {
  434. BUG();
  435. }
  436. /* We assume the packet has already been checked by icmp_unreach */
  437. if (!ping_supported(family, icmph->type, icmph->code))
  438. return;
  439. pr_debug("ping_err(proto=0x%x,type=%d,code=%d,id=%04x,seq=%04x)\n",
  440. skb->protocol, type, code, ntohs(icmph->un.echo.id),
  441. ntohs(icmph->un.echo.sequence));
  442. sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id));
  443. if (sk == NULL) {
  444. pr_debug("no socket, dropping\n");
  445. return; /* No socket for error */
  446. }
  447. pr_debug("err on socket %p\n", sk);
  448. err = 0;
  449. harderr = 0;
  450. inet_sock = inet_sk(sk);
  451. if (skb->protocol == htons(ETH_P_IP)) {
  452. switch (type) {
  453. default:
  454. case ICMP_TIME_EXCEEDED:
  455. err = EHOSTUNREACH;
  456. break;
  457. case ICMP_SOURCE_QUENCH:
  458. /* This is not a real error but ping wants to see it.
  459. * Report it with some fake errno. */
  460. err = EREMOTEIO;
  461. break;
  462. case ICMP_PARAMETERPROB:
  463. err = EPROTO;
  464. harderr = 1;
  465. break;
  466. case ICMP_DEST_UNREACH:
  467. if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */
  468. ipv4_sk_update_pmtu(skb, sk, info);
  469. if (inet_sock->pmtudisc != IP_PMTUDISC_DONT) {
  470. err = EMSGSIZE;
  471. harderr = 1;
  472. break;
  473. }
  474. goto out;
  475. }
  476. err = EHOSTUNREACH;
  477. if (code <= NR_ICMP_UNREACH) {
  478. harderr = icmp_err_convert[code].fatal;
  479. err = icmp_err_convert[code].errno;
  480. }
  481. break;
  482. case ICMP_REDIRECT:
  483. /* See ICMP_SOURCE_QUENCH */
  484. err = EREMOTEIO;
  485. break;
  486. }
  487. #if IS_ENABLED(CONFIG_IPV6)
  488. } else if (skb->protocol == htons(ETH_P_IPV6)) {
  489. harderr = pingv6_ops.icmpv6_err_convert(type, code, &err);
  490. #endif
  491. }
  492. /*
  493. * RFC1122: OK. Passes ICMP errors back to application, as per
  494. * 4.1.3.3.
  495. */
  496. if ((family == AF_INET && !inet_sock->recverr) ||
  497. (family == AF_INET6 && !inet6_sk(sk)->recverr)) {
  498. if (!harderr || sk->sk_state != TCP_ESTABLISHED)
  499. goto out;
  500. } else {
  501. if (family == AF_INET) {
  502. ip_icmp_error(sk, skb, err, 0 /* no remote port */,
  503. info, (u8 *)icmph);
  504. #if IS_ENABLED(CONFIG_IPV6)
  505. } else if (family == AF_INET6) {
  506. pingv6_ops.ipv6_icmp_error(sk, skb, err, 0,
  507. info, (u8 *)icmph);
  508. #endif
  509. }
  510. }
  511. sk->sk_err = err;
  512. sk->sk_error_report(sk);
  513. out:
  514. sock_put(sk);
  515. }
  516. EXPORT_SYMBOL_GPL(ping_err);
  517. void ping_v4_err(struct sk_buff *skb, u32 info)
  518. {
  519. ping_err(skb, 0, info);
  520. }
  521. /*
  522. * Copy and checksum an ICMP Echo packet from user space into a buffer
  523. * starting from the payload.
  524. */
  525. int ping_getfrag(void *from, char *to,
  526. int offset, int fraglen, int odd, struct sk_buff *skb)
  527. {
  528. struct pingfakehdr *pfh = (struct pingfakehdr *)from;
  529. if (offset == 0) {
  530. if (fraglen < sizeof(struct icmphdr))
  531. BUG();
  532. if (csum_partial_copy_fromiovecend(to + sizeof(struct icmphdr),
  533. pfh->iov, 0, fraglen - sizeof(struct icmphdr),
  534. &pfh->wcheck))
  535. return -EFAULT;
  536. } else if (offset < sizeof(struct icmphdr)) {
  537. BUG();
  538. } else {
  539. if (csum_partial_copy_fromiovecend
  540. (to, pfh->iov, offset - sizeof(struct icmphdr),
  541. fraglen, &pfh->wcheck))
  542. return -EFAULT;
  543. }
  544. #if IS_ENABLED(CONFIG_IPV6)
  545. /* For IPv6, checksum each skb as we go along, as expected by
  546. * icmpv6_push_pending_frames. For IPv4, accumulate the checksum in
  547. * wcheck, it will be finalized in ping_v4_push_pending_frames.
  548. */
  549. if (pfh->family == AF_INET6) {
  550. skb->csum = pfh->wcheck;
  551. skb->ip_summed = CHECKSUM_NONE;
  552. pfh->wcheck = 0;
  553. }
  554. #endif
  555. return 0;
  556. }
  557. EXPORT_SYMBOL_GPL(ping_getfrag);
  558. static int ping_v4_push_pending_frames(struct sock *sk, struct pingfakehdr *pfh,
  559. struct flowi4 *fl4)
  560. {
  561. struct sk_buff *skb = skb_peek(&sk->sk_write_queue);
  562. pfh->wcheck = csum_partial((char *)&pfh->icmph,
  563. sizeof(struct icmphdr), pfh->wcheck);
  564. pfh->icmph.checksum = csum_fold(pfh->wcheck);
  565. memcpy(icmp_hdr(skb), &pfh->icmph, sizeof(struct icmphdr));
  566. skb->ip_summed = CHECKSUM_NONE;
  567. return ip_push_pending_frames(sk, fl4);
  568. }
  569. int ping_common_sendmsg(int family, struct msghdr *msg, size_t len,
  570. void *user_icmph, size_t icmph_len) {
  571. u8 type, code;
  572. if (len > 0xFFFF || len < icmph_len)
  573. return -EMSGSIZE;
  574. /* Must have at least a full ICMP header. */
  575. if (len < icmph_len)
  576. return -EINVAL;
  577. /*
  578. * Check the flags.
  579. */
  580. /* Mirror BSD error message compatibility */
  581. if (msg->msg_flags & MSG_OOB)
  582. return -EOPNOTSUPP;
  583. /*
  584. * Fetch the ICMP header provided by the userland.
  585. * iovec is modified! The ICMP header is consumed.
  586. */
  587. if (memcpy_fromiovec(user_icmph, msg->msg_iov, icmph_len))
  588. return -EFAULT;
  589. if (family == AF_INET) {
  590. type = ((struct icmphdr *) user_icmph)->type;
  591. code = ((struct icmphdr *) user_icmph)->code;
  592. #if IS_ENABLED(CONFIG_IPV6)
  593. } else if (family == AF_INET6) {
  594. type = ((struct icmp6hdr *) user_icmph)->icmp6_type;
  595. code = ((struct icmp6hdr *) user_icmph)->icmp6_code;
  596. #endif
  597. } else {
  598. BUG();
  599. }
  600. if (!ping_supported(family, type, code))
  601. return -EINVAL;
  602. return 0;
  603. }
  604. EXPORT_SYMBOL_GPL(ping_common_sendmsg);
  605. int ping_v4_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
  606. size_t len)
  607. {
  608. struct net *net = sock_net(sk);
  609. struct flowi4 fl4;
  610. struct inet_sock *inet = inet_sk(sk);
  611. struct ipcm_cookie ipc;
  612. struct icmphdr user_icmph;
  613. struct pingfakehdr pfh;
  614. struct rtable *rt = NULL;
  615. struct ip_options_data opt_copy;
  616. int free = 0;
  617. __be32 saddr, daddr, faddr;
  618. u8 tos;
  619. int err;
  620. pr_debug("ping_v4_sendmsg(sk=%p,sk->num=%u)\n", inet, inet->inet_num);
  621. err = ping_common_sendmsg(AF_INET, msg, len, &user_icmph,
  622. sizeof(user_icmph));
  623. if (err)
  624. return err;
  625. /*
  626. * Get and verify the address.
  627. */
  628. if (msg->msg_name) {
  629. struct sockaddr_in *usin = (struct sockaddr_in *)msg->msg_name;
  630. if (msg->msg_namelen < sizeof(*usin))
  631. return -EINVAL;
  632. if (usin->sin_family != AF_INET)
  633. return -EAFNOSUPPORT;
  634. daddr = usin->sin_addr.s_addr;
  635. /* no remote port */
  636. } else {
  637. if (sk->sk_state != TCP_ESTABLISHED)
  638. return -EDESTADDRREQ;
  639. daddr = inet->inet_daddr;
  640. /* no remote port */
  641. }
  642. ipc.addr = inet->inet_saddr;
  643. ipc.opt = NULL;
  644. ipc.oif = sk->sk_bound_dev_if;
  645. ipc.tx_flags = 0;
  646. sock_tx_timestamp(sk, &ipc.tx_flags);
  647. if (msg->msg_controllen) {
  648. err = ip_cmsg_send(sock_net(sk), msg, &ipc);
  649. if (err)
  650. return err;
  651. if (ipc.opt)
  652. free = 1;
  653. }
  654. if (!ipc.opt) {
  655. struct ip_options_rcu *inet_opt;
  656. rcu_read_lock();
  657. inet_opt = rcu_dereference(inet->inet_opt);
  658. if (inet_opt) {
  659. memcpy(&opt_copy, inet_opt,
  660. sizeof(*inet_opt) + inet_opt->opt.optlen);
  661. ipc.opt = &opt_copy.opt;
  662. }
  663. rcu_read_unlock();
  664. }
  665. saddr = ipc.addr;
  666. ipc.addr = faddr = daddr;
  667. if (ipc.opt && ipc.opt->opt.srr) {
  668. if (!daddr)
  669. return -EINVAL;
  670. faddr = ipc.opt->opt.faddr;
  671. }
  672. tos = RT_TOS(inet->tos);
  673. if (sock_flag(sk, SOCK_LOCALROUTE) ||
  674. (msg->msg_flags & MSG_DONTROUTE) ||
  675. (ipc.opt && ipc.opt->opt.is_strictroute)) {
  676. tos |= RTO_ONLINK;
  677. }
  678. if (ipv4_is_multicast(daddr)) {
  679. if (!ipc.oif)
  680. ipc.oif = inet->mc_index;
  681. if (!saddr)
  682. saddr = inet->mc_addr;
  683. } else if (!ipc.oif)
  684. ipc.oif = inet->uc_index;
  685. flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos,
  686. RT_SCOPE_UNIVERSE, sk->sk_protocol,
  687. inet_sk_flowi_flags(sk), faddr, saddr, 0, 0,
  688. sk->sk_uid);
  689. fl4.fl4_icmp_type = user_icmph.type;
  690. fl4.fl4_icmp_code = user_icmph.code;
  691. security_sk_classify_flow(sk, flowi4_to_flowi(&fl4));
  692. rt = ip_route_output_flow(net, &fl4, sk);
  693. if (IS_ERR(rt)) {
  694. err = PTR_ERR(rt);
  695. rt = NULL;
  696. if (err == -ENETUNREACH)
  697. IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
  698. goto out;
  699. }
  700. err = -EACCES;
  701. if ((rt->rt_flags & RTCF_BROADCAST) &&
  702. !sock_flag(sk, SOCK_BROADCAST))
  703. goto out;
  704. if (msg->msg_flags & MSG_CONFIRM)
  705. goto do_confirm;
  706. back_from_confirm:
  707. if (!ipc.addr)
  708. ipc.addr = fl4.daddr;
  709. lock_sock(sk);
  710. pfh.icmph.type = user_icmph.type; /* already checked */
  711. pfh.icmph.code = user_icmph.code; /* ditto */
  712. pfh.icmph.checksum = 0;
  713. pfh.icmph.un.echo.id = inet->inet_sport;
  714. pfh.icmph.un.echo.sequence = user_icmph.un.echo.sequence;
  715. pfh.iov = msg->msg_iov;
  716. pfh.wcheck = 0;
  717. pfh.family = AF_INET;
  718. err = ip_append_data(sk, &fl4, ping_getfrag, &pfh, len,
  719. 0, &ipc, &rt, msg->msg_flags);
  720. if (err)
  721. ip_flush_pending_frames(sk);
  722. else
  723. err = ping_v4_push_pending_frames(sk, &pfh, &fl4);
  724. release_sock(sk);
  725. out:
  726. ip_rt_put(rt);
  727. if (free)
  728. kfree(ipc.opt);
  729. if (!err) {
  730. icmp_out_count(sock_net(sk), user_icmph.type);
  731. return len;
  732. }
  733. return err;
  734. do_confirm:
  735. dst_confirm(&rt->dst);
  736. if (!(msg->msg_flags & MSG_PROBE) || len)
  737. goto back_from_confirm;
  738. err = 0;
  739. goto out;
  740. }
  741. int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
  742. size_t len, int noblock, int flags, int *addr_len)
  743. {
  744. struct inet_sock *isk = inet_sk(sk);
  745. int family = sk->sk_family;
  746. struct sk_buff *skb;
  747. int copied, err;
  748. pr_debug("ping_recvmsg(sk=%p,sk->num=%u)\n", isk, isk->inet_num);
  749. err = -EOPNOTSUPP;
  750. if (flags & MSG_OOB)
  751. goto out;
  752. if (flags & MSG_ERRQUEUE) {
  753. if (family == AF_INET) {
  754. return ip_recv_error(sk, msg, len, addr_len);
  755. #if IS_ENABLED(CONFIG_IPV6)
  756. } else if (family == AF_INET6) {
  757. return pingv6_ops.ipv6_recv_error(sk, msg, len, addr_len);
  758. #endif
  759. }
  760. }
  761. skb = skb_recv_datagram(sk, flags, noblock, &err);
  762. if (!skb)
  763. goto out;
  764. copied = skb->len;
  765. if (copied > len) {
  766. msg->msg_flags |= MSG_TRUNC;
  767. copied = len;
  768. }
  769. /* Don't bother checking the checksum */
  770. err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
  771. if (err)
  772. goto done;
  773. sock_recv_timestamp(msg, sk, skb);
  774. /* Copy the address and add cmsg data. */
  775. if (family == AF_INET) {
  776. struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
  777. if (sin) {
  778. sin->sin_family = AF_INET;
  779. sin->sin_port = 0 /* skb->h.uh->source */;
  780. sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
  781. memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
  782. *addr_len = sizeof(*sin);
  783. }
  784. if (isk->cmsg_flags)
  785. ip_cmsg_recv(msg, skb);
  786. #if IS_ENABLED(CONFIG_IPV6)
  787. } else if (family == AF_INET6) {
  788. struct ipv6_pinfo *np = inet6_sk(sk);
  789. struct ipv6hdr *ip6 = ipv6_hdr(skb);
  790. struct sockaddr_in6 *sin6 =
  791. (struct sockaddr_in6 *)msg->msg_name;
  792. if (sin6) {
  793. sin6->sin6_family = AF_INET6;
  794. sin6->sin6_port = 0;
  795. sin6->sin6_addr = ip6->saddr;
  796. sin6->sin6_flowinfo = 0;
  797. if (np->sndflow)
  798. sin6->sin6_flowinfo = ip6_flowinfo(ip6);
  799. sin6->sin6_scope_id =
  800. ipv6_iface_scope_id(&sin6->sin6_addr,
  801. IP6CB(skb)->iif);
  802. *addr_len = sizeof(*sin6);
  803. }
  804. if (inet6_sk(sk)->rxopt.all)
  805. pingv6_ops.ip6_datagram_recv_ctl(sk, msg, skb);
  806. #endif
  807. } else {
  808. BUG();
  809. }
  810. err = copied;
  811. done:
  812. skb_free_datagram(sk, skb);
  813. out:
  814. pr_debug("ping_recvmsg -> %d\n", err);
  815. return err;
  816. }
  817. EXPORT_SYMBOL_GPL(ping_recvmsg);
  818. int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
  819. {
  820. pr_debug("ping_queue_rcv_skb(sk=%p,sk->num=%d,skb=%p)\n",
  821. inet_sk(sk), inet_sk(sk)->inet_num, skb);
  822. if (sock_queue_rcv_skb(sk, skb) < 0) {
  823. kfree_skb(skb);
  824. pr_debug("ping_queue_rcv_skb -> failed\n");
  825. return -1;
  826. }
  827. return 0;
  828. }
  829. EXPORT_SYMBOL_GPL(ping_queue_rcv_skb);
  830. /*
  831. * All we need to do is get the socket.
  832. */
  833. void ping_rcv(struct sk_buff *skb)
  834. {
  835. struct sock *sk;
  836. struct net *net = dev_net(skb->dev);
  837. struct icmphdr *icmph = icmp_hdr(skb);
  838. /* We assume the packet has already been checked by icmp_rcv */
  839. pr_debug("ping_rcv(skb=%p,id=%04x,seq=%04x)\n",
  840. skb, ntohs(icmph->un.echo.id), ntohs(icmph->un.echo.sequence));
  841. /* Push ICMP header back */
  842. skb_push(skb, skb->data - (u8 *)icmph);
  843. sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id));
  844. if (sk != NULL) {
  845. pr_debug("rcv on socket %p\n", sk);
  846. ping_queue_rcv_skb(sk, skb_get(skb));
  847. sock_put(sk);
  848. return;
  849. }
  850. pr_debug("no socket, dropping\n");
  851. /* We're called from icmp_rcv(). kfree_skb() is done there. */
  852. }
  853. EXPORT_SYMBOL_GPL(ping_rcv);
  854. struct proto ping_prot = {
  855. .name = "PING",
  856. .owner = THIS_MODULE,
  857. .init = ping_init_sock,
  858. .close = ping_close,
  859. .connect = ip4_datagram_connect,
  860. .disconnect = udp_disconnect,
  861. .setsockopt = ip_setsockopt,
  862. .getsockopt = ip_getsockopt,
  863. .sendmsg = ping_v4_sendmsg,
  864. .recvmsg = ping_recvmsg,
  865. .bind = ping_bind,
  866. .backlog_rcv = ping_queue_rcv_skb,
  867. .hash = ping_hash,
  868. .unhash = ping_unhash,
  869. .get_port = ping_get_port,
  870. .obj_size = sizeof(struct inet_sock),
  871. };
  872. EXPORT_SYMBOL(ping_prot);
  873. #ifdef CONFIG_PROC_FS
  874. static struct sock *ping_get_first(struct seq_file *seq, int start)
  875. {
  876. struct sock *sk;
  877. struct ping_iter_state *state = seq->private;
  878. struct net *net = seq_file_net(seq);
  879. for (state->bucket = start; state->bucket < PING_HTABLE_SIZE;
  880. ++state->bucket) {
  881. struct hlist_nulls_node *node;
  882. struct hlist_nulls_head *hslot;
  883. hslot = &ping_table.hash[state->bucket];
  884. if (hlist_nulls_empty(hslot))
  885. continue;
  886. sk_nulls_for_each(sk, node, hslot) {
  887. if (net_eq(sock_net(sk), net))
  888. goto found;
  889. }
  890. }
  891. sk = NULL;
  892. found:
  893. return sk;
  894. }
  895. static struct sock *ping_get_next(struct seq_file *seq, struct sock *sk)
  896. {
  897. struct ping_iter_state *state = seq->private;
  898. struct net *net = seq_file_net(seq);
  899. do {
  900. sk = sk_nulls_next(sk);
  901. } while (sk && (!net_eq(sock_net(sk), net)));
  902. if (!sk)
  903. return ping_get_first(seq, state->bucket + 1);
  904. return sk;
  905. }
  906. static struct sock *ping_get_idx(struct seq_file *seq, loff_t pos)
  907. {
  908. struct sock *sk = ping_get_first(seq, 0);
  909. if (sk)
  910. while (pos && (sk = ping_get_next(seq, sk)) != NULL)
  911. --pos;
  912. return pos ? NULL : sk;
  913. }
  914. static void *ping_seq_start(struct seq_file *seq, loff_t *pos)
  915. {
  916. struct ping_iter_state *state = seq->private;
  917. state->bucket = 0;
  918. read_lock_bh(&ping_table.lock);
  919. return *pos ? ping_get_idx(seq, *pos-1) : SEQ_START_TOKEN;
  920. }
  921. static void *ping_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  922. {
  923. struct sock *sk;
  924. if (v == SEQ_START_TOKEN)
  925. sk = ping_get_idx(seq, 0);
  926. else
  927. sk = ping_get_next(seq, v);
  928. ++*pos;
  929. return sk;
  930. }
  931. static void ping_seq_stop(struct seq_file *seq, void *v)
  932. {
  933. read_unlock_bh(&ping_table.lock);
  934. }
  935. static void ping_format_sock(struct sock *sp, struct seq_file *f,
  936. int bucket)
  937. {
  938. struct inet_sock *inet = inet_sk(sp);
  939. __be32 dest = inet->inet_daddr;
  940. __be32 src = inet->inet_rcv_saddr;
  941. __u16 destp = ntohs(inet->inet_dport);
  942. __u16 srcp = ntohs(inet->inet_sport);
  943. seq_printf(f, "%5d: %08X:%04X %08X:%04X"
  944. " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d",
  945. bucket, src, srcp, dest, destp, sp->sk_state,
  946. sk_wmem_alloc_get(sp),
  947. sk_rmem_alloc_get(sp),
  948. 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
  949. atomic_read(&sp->sk_refcnt), sp,
  950. atomic_read(&sp->sk_drops));
  951. }
  952. static int ping_seq_show(struct seq_file *seq, void *v)
  953. {
  954. seq_setwidth(seq, 127);
  955. if (v == SEQ_START_TOKEN)
  956. seq_puts(seq, " sl local_address rem_address st tx_queue "
  957. "rx_queue tr tm->when retrnsmt uid timeout "
  958. "inode ref pointer drops");
  959. else {
  960. struct ping_iter_state *state = seq->private;
  961. ping_format_sock(v, seq, state->bucket);
  962. }
  963. seq_pad(seq, '\n');
  964. return 0;
  965. }
  966. static const struct seq_operations ping_seq_ops = {
  967. .show = ping_seq_show,
  968. .start = ping_seq_start,
  969. .next = ping_seq_next,
  970. .stop = ping_seq_stop,
  971. };
  972. static int ping_seq_open(struct inode *inode, struct file *file)
  973. {
  974. return seq_open_net(inode, file, &ping_seq_ops,
  975. sizeof(struct ping_iter_state));
  976. }
  977. static const struct file_operations ping_seq_fops = {
  978. .open = ping_seq_open,
  979. .read = seq_read,
  980. .llseek = seq_lseek,
  981. .release = seq_release_net,
  982. };
  983. static int ping_proc_register(struct net *net)
  984. {
  985. struct proc_dir_entry *p;
  986. int rc = 0;
  987. p = proc_net_fops_create(net, "icmp", S_IRUGO, &ping_seq_fops);
  988. if (!p)
  989. rc = -ENOMEM;
  990. return rc;
  991. }
  992. static void ping_proc_unregister(struct net *net)
  993. {
  994. proc_net_remove(net, "icmp");
  995. }
  996. static int __net_init ping_proc_init_net(struct net *net)
  997. {
  998. return ping_proc_register(net);
  999. }
  1000. static void __net_exit ping_proc_exit_net(struct net *net)
  1001. {
  1002. ping_proc_unregister(net);
  1003. }
  1004. static struct pernet_operations ping_net_ops = {
  1005. .init = ping_proc_init_net,
  1006. .exit = ping_proc_exit_net,
  1007. };
  1008. int __init ping_proc_init(void)
  1009. {
  1010. return register_pernet_subsys(&ping_net_ops);
  1011. }
  1012. void ping_proc_exit(void)
  1013. {
  1014. unregister_pernet_subsys(&ping_net_ops);
  1015. }
  1016. #endif
  1017. void __init ping_init(void)
  1018. {
  1019. int i;
  1020. for (i = 0; i < PING_HTABLE_SIZE; i++)
  1021. INIT_HLIST_NULLS_HEAD(&ping_table.hash[i], i);
  1022. rwlock_init(&ping_table.lock);
  1023. }