udp.h 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482
  1. /*
  2. * INET An implementation of the TCP/IP protocol suite for the LINUX
  3. * operating system. INET is implemented using the BSD Socket
  4. * interface as the means of communication with the user level.
  5. *
  6. * Definitions for the UDP module.
  7. *
  8. * Version: @(#)udp.h 1.0.2 05/07/93
  9. *
  10. * Authors: Ross Biro
  11. * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  12. *
  13. * Fixes:
  14. * Alan Cox : Turned on udp checksums. I don't want to
  15. * chase 'memory corruption' bugs that aren't!
  16. *
  17. * This program is free software; you can redistribute it and/or
  18. * modify it under the terms of the GNU General Public License
  19. * as published by the Free Software Foundation; either version
  20. * 2 of the License, or (at your option) any later version.
  21. */
  22. #ifndef _UDP_H
  23. #define _UDP_H
  24. #include <linux/list.h>
  25. #include <linux/bug.h>
  26. #include <net/inet_sock.h>
  27. #include <net/sock.h>
  28. #include <net/snmp.h>
  29. #include <net/ip.h>
  30. #include <linux/ipv6.h>
  31. #include <linux/seq_file.h>
  32. #include <linux/poll.h>
  33. #include <linux/static_key.h>
  34. /**
  35. * struct udp_skb_cb - UDP(-Lite) private variables
  36. *
  37. * @header: private variables used by IPv4/IPv6
  38. * @cscov: checksum coverage length (UDP-Lite only)
  39. * @partial_cov: if set indicates partial csum coverage
  40. */
  41. struct udp_skb_cb {
  42. union {
  43. struct inet_skb_parm h4;
  44. #if IS_ENABLED(CONFIG_IPV6)
  45. struct inet6_skb_parm h6;
  46. #endif
  47. } header;
  48. __u16 cscov;
  49. __u8 partial_cov;
  50. };
  51. #define UDP_SKB_CB(__skb) ((struct udp_skb_cb *)((__skb)->cb))
  52. /**
  53. * struct udp_hslot - UDP hash slot
  54. *
  55. * @head: head of list of sockets
  56. * @count: number of sockets in 'head' list
  57. * @lock: spinlock protecting changes to head/count
  58. */
  59. struct udp_hslot {
  60. struct hlist_head head;
  61. int count;
  62. spinlock_t lock;
  63. } __attribute__((aligned(2 * sizeof(long))));
  64. /**
  65. * struct udp_table - UDP table
  66. *
  67. * @hash: hash table, sockets are hashed on (local port)
  68. * @hash2: hash table, sockets are hashed on (local port, local address)
  69. * @mask: number of slots in hash tables, minus 1
  70. * @log: log2(number of slots in hash table)
  71. */
  72. struct udp_table {
  73. struct udp_hslot *hash;
  74. struct udp_hslot *hash2;
  75. unsigned int mask;
  76. unsigned int log;
  77. };
  78. extern struct udp_table udp_table;
  79. void udp_table_init(struct udp_table *, const char *);
  80. static inline struct udp_hslot *udp_hashslot(struct udp_table *table,
  81. struct net *net, unsigned int num)
  82. {
  83. return &table->hash[udp_hashfn(net, num, table->mask)];
  84. }
  85. /*
  86. * For secondary hash, net_hash_mix() is performed before calling
  87. * udp_hashslot2(), this explains difference with udp_hashslot()
  88. */
  89. static inline struct udp_hslot *udp_hashslot2(struct udp_table *table,
  90. unsigned int hash)
  91. {
  92. return &table->hash2[hash & table->mask];
  93. }
  94. extern struct proto udp_prot;
  95. extern atomic_long_t udp_memory_allocated;
  96. /* sysctl variables for udp */
  97. extern long sysctl_udp_mem[3];
  98. extern int sysctl_udp_rmem_min;
  99. extern int sysctl_udp_wmem_min;
  100. struct sk_buff;
  101. /*
  102. * Generic checksumming routines for UDP(-Lite) v4 and v6
  103. */
  104. static inline __sum16 __udp_lib_checksum_complete(struct sk_buff *skb)
  105. {
  106. return (UDP_SKB_CB(skb)->cscov == skb->len ?
  107. __skb_checksum_complete(skb) :
  108. __skb_checksum_complete_head(skb, UDP_SKB_CB(skb)->cscov));
  109. }
  110. static inline int udp_lib_checksum_complete(struct sk_buff *skb)
  111. {
  112. return !skb_csum_unnecessary(skb) &&
  113. __udp_lib_checksum_complete(skb);
  114. }
  115. /**
  116. * udp_csum_outgoing - compute UDPv4/v6 checksum over fragments
  117. * @sk: socket we are writing to
  118. * @skb: sk_buff containing the filled-in UDP header
  119. * (checksum field must be zeroed out)
  120. */
  121. static inline __wsum udp_csum_outgoing(struct sock *sk, struct sk_buff *skb)
  122. {
  123. __wsum csum = csum_partial(skb_transport_header(skb),
  124. sizeof(struct udphdr), 0);
  125. skb_queue_walk(&sk->sk_write_queue, skb) {
  126. csum = csum_add(csum, skb->csum);
  127. }
  128. return csum;
  129. }
  130. static inline __wsum udp_csum(struct sk_buff *skb)
  131. {
  132. __wsum csum = csum_partial(skb_transport_header(skb),
  133. sizeof(struct udphdr), skb->csum);
  134. for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next) {
  135. csum = csum_add(csum, skb->csum);
  136. }
  137. return csum;
  138. }
  139. static inline __sum16 udp_v4_check(int len, __be32 saddr,
  140. __be32 daddr, __wsum base)
  141. {
  142. return csum_tcpudp_magic(saddr, daddr, len, IPPROTO_UDP, base);
  143. }
  144. void udp_set_csum(bool nocheck, struct sk_buff *skb,
  145. __be32 saddr, __be32 daddr, int len);
  146. static inline void udp_csum_pull_header(struct sk_buff *skb)
  147. {
  148. if (!skb->csum_valid && skb->ip_summed == CHECKSUM_NONE)
  149. skb->csum = csum_partial(skb->data, sizeof(struct udphdr),
  150. skb->csum);
  151. skb_pull_rcsum(skb, sizeof(struct udphdr));
  152. UDP_SKB_CB(skb)->cscov -= sizeof(struct udphdr);
  153. }
  154. typedef struct sock *(*udp_lookup_t)(struct sk_buff *skb, __be16 sport,
  155. __be16 dport);
  156. struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
  157. struct udphdr *uh, struct sock *sk);
  158. int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup);
  159. struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
  160. netdev_features_t features);
  161. static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb)
  162. {
  163. struct udphdr *uh;
  164. unsigned int hlen, off;
  165. off = skb_gro_offset(skb);
  166. hlen = off + sizeof(*uh);
  167. uh = skb_gro_header_fast(skb, off);
  168. if (skb_gro_header_hard(skb, hlen))
  169. uh = skb_gro_header_slow(skb, hlen, off);
  170. return uh;
  171. }
  172. /* hash routines shared between UDPv4/6 and UDP-Litev4/6 */
  173. static inline int udp_lib_hash(struct sock *sk)
  174. {
  175. BUG();
  176. return 0;
  177. }
  178. void udp_lib_unhash(struct sock *sk);
  179. void udp_lib_rehash(struct sock *sk, u16 new_hash);
  180. static inline void udp_lib_close(struct sock *sk, long timeout)
  181. {
  182. sk_common_release(sk);
  183. }
  184. int udp_lib_get_port(struct sock *sk, unsigned short snum,
  185. unsigned int hash2_nulladdr);
  186. u32 udp_flow_hashrnd(void);
  187. static inline __be16 udp_flow_src_port(struct net *net, struct sk_buff *skb,
  188. int min, int max, bool use_eth)
  189. {
  190. u32 hash;
  191. if (min >= max) {
  192. /* Use default range */
  193. inet_get_local_port_range(net, &min, &max);
  194. }
  195. hash = skb_get_hash(skb);
  196. if (unlikely(!hash)) {
  197. if (use_eth) {
  198. /* Can't find a normal hash, caller has indicated an
  199. * Ethernet packet so use that to compute a hash.
  200. */
  201. hash = jhash(skb->data, 2 * ETH_ALEN,
  202. (__force u32) skb->protocol);
  203. } else {
  204. /* Can't derive any sort of hash for the packet, set
  205. * to some consistent random value.
  206. */
  207. hash = udp_flow_hashrnd();
  208. }
  209. }
  210. /* Since this is being sent on the wire obfuscate hash a bit
  211. * to minimize possbility that any useful information to an
  212. * attacker is leaked. Only upper 16 bits are relevant in the
  213. * computation for 16 bit port value.
  214. */
  215. hash ^= hash << 16;
  216. return htons((((u64) hash * (max - min)) >> 32) + min);
  217. }
  218. static inline int udp_rqueue_get(struct sock *sk)
  219. {
  220. return sk_rmem_alloc_get(sk) - READ_ONCE(udp_sk(sk)->forward_deficit);
  221. }
  222. /* net/ipv4/udp.c */
  223. void udp_destruct_sock(struct sock *sk);
  224. void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len);
  225. int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb);
  226. void udp_skb_destructor(struct sock *sk, struct sk_buff *skb);
  227. struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags,
  228. int noblock, int *peeked, int *off, int *err);
  229. static inline struct sk_buff *skb_recv_udp(struct sock *sk, unsigned int flags,
  230. int noblock, int *err)
  231. {
  232. int peeked, off = 0;
  233. return __skb_recv_udp(sk, flags, noblock, &peeked, &off, err);
  234. }
  235. int udp_v4_early_demux(struct sk_buff *skb);
  236. bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst);
  237. int udp_get_port(struct sock *sk, unsigned short snum,
  238. int (*saddr_cmp)(const struct sock *,
  239. const struct sock *));
  240. void udp_err(struct sk_buff *, u32);
  241. int udp_abort(struct sock *sk, int err);
  242. int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len);
  243. int udp_push_pending_frames(struct sock *sk);
  244. void udp_flush_pending_frames(struct sock *sk);
  245. void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst);
  246. int udp_rcv(struct sk_buff *skb);
  247. int udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
  248. int udp_init_sock(struct sock *sk);
  249. int __udp_disconnect(struct sock *sk, int flags);
  250. int udp_disconnect(struct sock *sk, int flags);
  251. unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait);
  252. struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
  253. netdev_features_t features,
  254. bool is_ipv6);
  255. int udp_lib_getsockopt(struct sock *sk, int level, int optname,
  256. char __user *optval, int __user *optlen);
  257. int udp_lib_setsockopt(struct sock *sk, int level, int optname,
  258. char __user *optval, unsigned int optlen,
  259. int (*push_pending_frames)(struct sock *));
  260. struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
  261. __be32 daddr, __be16 dport, int dif);
  262. struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
  263. __be32 daddr, __be16 dport, int dif, int sdif,
  264. struct udp_table *tbl, struct sk_buff *skb);
  265. struct sock *udp4_lib_lookup_skb(struct sk_buff *skb,
  266. __be16 sport, __be16 dport);
  267. struct sock *udp6_lib_lookup(struct net *net,
  268. const struct in6_addr *saddr, __be16 sport,
  269. const struct in6_addr *daddr, __be16 dport,
  270. int dif);
  271. struct sock *__udp6_lib_lookup(struct net *net,
  272. const struct in6_addr *saddr, __be16 sport,
  273. const struct in6_addr *daddr, __be16 dport,
  274. int dif, int sdif, struct udp_table *tbl,
  275. struct sk_buff *skb);
  276. struct sock *udp6_lib_lookup_skb(struct sk_buff *skb,
  277. __be16 sport, __be16 dport);
  278. /* UDP uses skb->dev_scratch to cache as much information as possible and avoid
  279. * possibly multiple cache miss on dequeue()
  280. */
  281. struct udp_dev_scratch {
  282. /* skb->truesize and the stateless bit are embedded in a single field;
  283. * do not use a bitfield since the compiler emits better/smaller code
  284. * this way
  285. */
  286. u32 _tsize_state;
  287. #if BITS_PER_LONG == 64
  288. /* len and the bit needed to compute skb_csum_unnecessary
  289. * will be on cold cache lines at recvmsg time.
  290. * skb->len can be stored on 16 bits since the udp header has been
  291. * already validated and pulled.
  292. */
  293. u16 len;
  294. bool is_linear;
  295. bool csum_unnecessary;
  296. #endif
  297. };
  298. static inline struct udp_dev_scratch *udp_skb_scratch(struct sk_buff *skb)
  299. {
  300. return (struct udp_dev_scratch *)&skb->dev_scratch;
  301. }
  302. #if BITS_PER_LONG == 64
  303. static inline unsigned int udp_skb_len(struct sk_buff *skb)
  304. {
  305. return udp_skb_scratch(skb)->len;
  306. }
  307. static inline bool udp_skb_csum_unnecessary(struct sk_buff *skb)
  308. {
  309. return udp_skb_scratch(skb)->csum_unnecessary;
  310. }
  311. static inline bool udp_skb_is_linear(struct sk_buff *skb)
  312. {
  313. return udp_skb_scratch(skb)->is_linear;
  314. }
  315. #else
  316. static inline unsigned int udp_skb_len(struct sk_buff *skb)
  317. {
  318. return skb->len;
  319. }
  320. static inline bool udp_skb_csum_unnecessary(struct sk_buff *skb)
  321. {
  322. return skb_csum_unnecessary(skb);
  323. }
  324. static inline bool udp_skb_is_linear(struct sk_buff *skb)
  325. {
  326. return !skb_is_nonlinear(skb);
  327. }
  328. #endif
  329. static inline int copy_linear_skb(struct sk_buff *skb, int len, int off,
  330. struct iov_iter *to)
  331. {
  332. int n;
  333. n = copy_to_iter(skb->data + off, len, to);
  334. if (n == len)
  335. return 0;
  336. iov_iter_revert(to, n);
  337. return -EFAULT;
  338. }
  339. /*
  340. * SNMP statistics for UDP and UDP-Lite
  341. */
  342. #define UDP_INC_STATS(net, field, is_udplite) do { \
  343. if (is_udplite) SNMP_INC_STATS((net)->mib.udplite_statistics, field); \
  344. else SNMP_INC_STATS((net)->mib.udp_statistics, field); } while(0)
  345. #define __UDP_INC_STATS(net, field, is_udplite) do { \
  346. if (is_udplite) __SNMP_INC_STATS((net)->mib.udplite_statistics, field); \
  347. else __SNMP_INC_STATS((net)->mib.udp_statistics, field); } while(0)
  348. #define __UDP6_INC_STATS(net, field, is_udplite) do { \
  349. if (is_udplite) __SNMP_INC_STATS((net)->mib.udplite_stats_in6, field);\
  350. else __SNMP_INC_STATS((net)->mib.udp_stats_in6, field); \
  351. } while(0)
  352. #define UDP6_INC_STATS(net, field, __lite) do { \
  353. if (__lite) SNMP_INC_STATS((net)->mib.udplite_stats_in6, field); \
  354. else SNMP_INC_STATS((net)->mib.udp_stats_in6, field); \
  355. } while(0)
  356. #if IS_ENABLED(CONFIG_IPV6)
  357. #define __UDPX_MIB(sk, ipv4) \
  358. ({ \
  359. ipv4 ? (IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_statistics : \
  360. sock_net(sk)->mib.udp_statistics) : \
  361. (IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_stats_in6 : \
  362. sock_net(sk)->mib.udp_stats_in6); \
  363. })
  364. #else
  365. #define __UDPX_MIB(sk, ipv4) \
  366. ({ \
  367. IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_statistics : \
  368. sock_net(sk)->mib.udp_statistics; \
  369. })
  370. #endif
  371. #define __UDPX_INC_STATS(sk, field) \
  372. __SNMP_INC_STATS(__UDPX_MIB(sk, (sk)->sk_family == AF_INET), field)
  373. /* /proc */
  374. int udp_seq_open(struct inode *inode, struct file *file);
  375. struct udp_seq_afinfo {
  376. char *name;
  377. sa_family_t family;
  378. struct udp_table *udp_table;
  379. const struct file_operations *seq_fops;
  380. struct seq_operations seq_ops;
  381. };
  382. struct udp_iter_state {
  383. struct seq_net_private p;
  384. sa_family_t family;
  385. int bucket;
  386. struct udp_table *udp_table;
  387. };
  388. #ifdef CONFIG_PROC_FS
  389. int udp_proc_register(struct net *net, struct udp_seq_afinfo *afinfo);
  390. void udp_proc_unregister(struct net *net, struct udp_seq_afinfo *afinfo);
  391. int udp4_proc_init(void);
  392. void udp4_proc_exit(void);
  393. #endif
  394. int udpv4_offload_init(void);
  395. void udp_init(void);
  396. static struct static_key udp_encap_needed __read_mostly;
  397. void udp_encap_enable(void);
  398. #if IS_ENABLED(CONFIG_IPV6)
  399. void udpv6_encap_enable(void);
  400. #endif
  401. static inline struct sk_buff *udp_rcv_segment(struct sock *sk,
  402. struct sk_buff *skb, bool ipv4)
  403. {
  404. struct sk_buff *segs;
  405. /* the GSO CB lays after the UDP one, no need to save and restore any
  406. * CB fragment
  407. */
  408. segs = __skb_gso_segment(skb, NETIF_F_SG, false);
  409. if (unlikely(IS_ERR_OR_NULL(segs))) {
  410. int segs_nr = skb_shinfo(skb)->gso_segs;
  411. atomic_add(segs_nr, &sk->sk_drops);
  412. SNMP_ADD_STATS(__UDPX_MIB(sk, ipv4), UDP_MIB_INERRORS, segs_nr);
  413. kfree_skb(skb);
  414. return NULL;
  415. }
  416. consume_skb(skb);
  417. return segs;
  418. }
  419. #endif /* _UDP_H */