ipv6.h 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058
  1. /*
  2. * Linux INET6 implementation
  3. *
  4. * Authors:
  5. * Pedro Roque <roque@di.fc.ul.pt>
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License
  9. * as published by the Free Software Foundation; either version
  10. * 2 of the License, or (at your option) any later version.
  11. */
  12. #ifndef _NET_IPV6_H
  13. #define _NET_IPV6_H
  14. #include <linux/ipv6.h>
  15. #include <linux/hardirq.h>
  16. #include <linux/jhash.h>
  17. #include <net/if_inet6.h>
  18. #include <net/ndisc.h>
  19. #include <net/flow.h>
  20. #include <net/flow_dissector.h>
  21. #include <net/snmp.h>
  22. #define SIN6_LEN_RFC2133 24
  23. #define IPV6_MAXPLEN 65535
  24. /*
  25. * NextHeader field of IPv6 header
  26. */
  27. #define NEXTHDR_HOP 0 /* Hop-by-hop option header. */
  28. #define NEXTHDR_TCP 6 /* TCP segment. */
  29. #define NEXTHDR_UDP 17 /* UDP message. */
  30. #define NEXTHDR_IPV6 41 /* IPv6 in IPv6 */
  31. #define NEXTHDR_ROUTING 43 /* Routing header. */
  32. #define NEXTHDR_FRAGMENT 44 /* Fragmentation/reassembly header. */
  33. #define NEXTHDR_GRE 47 /* GRE header. */
  34. #define NEXTHDR_ESP 50 /* Encapsulating security payload. */
  35. #define NEXTHDR_AUTH 51 /* Authentication header. */
  36. #define NEXTHDR_ICMP 58 /* ICMP for IPv6. */
  37. #define NEXTHDR_NONE 59 /* No next header */
  38. #define NEXTHDR_DEST 60 /* Destination options header. */
  39. #define NEXTHDR_SCTP 132 /* SCTP message. */
  40. #define NEXTHDR_MOBILITY 135 /* Mobility header. */
  41. #define NEXTHDR_MAX 255
  42. #define IPV6_DEFAULT_HOPLIMIT 64
  43. #define IPV6_DEFAULT_MCASTHOPS 1
  44. /*
  45. * Addr type
  46. *
  47. * type - unicast | multicast
  48. * scope - local | site | global
  49. * v4 - compat
  50. * v4mapped
  51. * any
  52. * loopback
  53. */
  54. #define IPV6_ADDR_ANY 0x0000U
  55. #define IPV6_ADDR_UNICAST 0x0001U
  56. #define IPV6_ADDR_MULTICAST 0x0002U
  57. #define IPV6_ADDR_LOOPBACK 0x0010U
  58. #define IPV6_ADDR_LINKLOCAL 0x0020U
  59. #define IPV6_ADDR_SITELOCAL 0x0040U
  60. #define IPV6_ADDR_COMPATv4 0x0080U
  61. #define IPV6_ADDR_SCOPE_MASK 0x00f0U
  62. #define IPV6_ADDR_MAPPED 0x1000U
  63. /*
  64. * Addr scopes
  65. */
  66. #define IPV6_ADDR_MC_SCOPE(a) \
  67. ((a)->s6_addr[1] & 0x0f) /* nonstandard */
  68. #define __IPV6_ADDR_SCOPE_INVALID -1
  69. #define IPV6_ADDR_SCOPE_NODELOCAL 0x01
  70. #define IPV6_ADDR_SCOPE_LINKLOCAL 0x02
  71. #define IPV6_ADDR_SCOPE_SITELOCAL 0x05
  72. #define IPV6_ADDR_SCOPE_ORGLOCAL 0x08
  73. #define IPV6_ADDR_SCOPE_GLOBAL 0x0e
  74. /*
  75. * Addr flags
  76. */
  77. #define IPV6_ADDR_MC_FLAG_TRANSIENT(a) \
  78. ((a)->s6_addr[1] & 0x10)
  79. #define IPV6_ADDR_MC_FLAG_PREFIX(a) \
  80. ((a)->s6_addr[1] & 0x20)
  81. #define IPV6_ADDR_MC_FLAG_RENDEZVOUS(a) \
  82. ((a)->s6_addr[1] & 0x40)
  83. /*
  84. * fragmentation header
  85. */
  86. struct frag_hdr {
  87. __u8 nexthdr;
  88. __u8 reserved;
  89. __be16 frag_off;
  90. __be32 identification;
  91. };
  92. #define IP6_MF 0x0001
  93. #define IP6_OFFSET 0xFFF8
  94. #define IP6_REPLY_MARK(net, mark) \
  95. ((net)->ipv6.sysctl.fwmark_reflect ? (mark) : 0)
  96. #include <net/sock.h>
  97. /* sysctls */
  98. extern int sysctl_mld_max_msf;
  99. extern int sysctl_mld_qrv;
  100. #define _DEVINC(net, statname, mod, idev, field) \
  101. ({ \
  102. struct inet6_dev *_idev = (idev); \
  103. if (likely(_idev != NULL)) \
  104. mod##SNMP_INC_STATS64((_idev)->stats.statname, (field));\
  105. mod##SNMP_INC_STATS64((net)->mib.statname##_statistics, (field));\
  106. })
  107. /* per device counters are atomic_long_t */
  108. #define _DEVINCATOMIC(net, statname, mod, idev, field) \
  109. ({ \
  110. struct inet6_dev *_idev = (idev); \
  111. if (likely(_idev != NULL)) \
  112. SNMP_INC_STATS_ATOMIC_LONG((_idev)->stats.statname##dev, (field)); \
  113. mod##SNMP_INC_STATS((net)->mib.statname##_statistics, (field));\
  114. })
  115. /* per device and per net counters are atomic_long_t */
  116. #define _DEVINC_ATOMIC_ATOMIC(net, statname, idev, field) \
  117. ({ \
  118. struct inet6_dev *_idev = (idev); \
  119. if (likely(_idev != NULL)) \
  120. SNMP_INC_STATS_ATOMIC_LONG((_idev)->stats.statname##dev, (field)); \
  121. SNMP_INC_STATS_ATOMIC_LONG((net)->mib.statname##_statistics, (field));\
  122. })
  123. #define _DEVADD(net, statname, mod, idev, field, val) \
  124. ({ \
  125. struct inet6_dev *_idev = (idev); \
  126. if (likely(_idev != NULL)) \
  127. mod##SNMP_ADD_STATS((_idev)->stats.statname, (field), (val)); \
  128. mod##SNMP_ADD_STATS((net)->mib.statname##_statistics, (field), (val));\
  129. })
  130. #define _DEVUPD(net, statname, mod, idev, field, val) \
  131. ({ \
  132. struct inet6_dev *_idev = (idev); \
  133. if (likely(_idev != NULL)) \
  134. mod##SNMP_UPD_PO_STATS((_idev)->stats.statname, field, (val)); \
  135. mod##SNMP_UPD_PO_STATS((net)->mib.statname##_statistics, field, (val));\
  136. })
  137. /* MIBs */
  138. #define IP6_INC_STATS(net, idev,field) \
  139. _DEVINC(net, ipv6, , idev, field)
  140. #define __IP6_INC_STATS(net, idev,field) \
  141. _DEVINC(net, ipv6, __, idev, field)
  142. #define IP6_ADD_STATS(net, idev,field,val) \
  143. _DEVADD(net, ipv6, , idev, field, val)
  144. #define __IP6_ADD_STATS(net, idev,field,val) \
  145. _DEVADD(net, ipv6, __, idev, field, val)
  146. #define IP6_UPD_PO_STATS(net, idev,field,val) \
  147. _DEVUPD(net, ipv6, , idev, field, val)
  148. #define __IP6_UPD_PO_STATS(net, idev,field,val) \
  149. _DEVUPD(net, ipv6, __, idev, field, val)
  150. #define ICMP6_INC_STATS(net, idev, field) \
  151. _DEVINCATOMIC(net, icmpv6, , idev, field)
  152. #define __ICMP6_INC_STATS(net, idev, field) \
  153. _DEVINCATOMIC(net, icmpv6, __, idev, field)
  154. #define ICMP6MSGOUT_INC_STATS(net, idev, field) \
  155. _DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field +256)
  156. #define ICMP6MSGIN_INC_STATS(net, idev, field) \
  157. _DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field)
  158. struct ip6_ra_chain {
  159. struct ip6_ra_chain *next;
  160. struct sock *sk;
  161. int sel;
  162. void (*destructor)(struct sock *);
  163. };
  164. extern struct ip6_ra_chain *ip6_ra_chain;
  165. extern rwlock_t ip6_ra_lock;
  166. /*
  167. This structure is prepared by protocol, when parsing
  168. ancillary data and passed to IPv6.
  169. */
  170. struct ipv6_txoptions {
  171. atomic_t refcnt;
  172. /* Length of this structure */
  173. int tot_len;
  174. /* length of extension headers */
  175. __u16 opt_flen; /* after fragment hdr */
  176. __u16 opt_nflen; /* before fragment hdr */
  177. struct ipv6_opt_hdr *hopopt;
  178. struct ipv6_opt_hdr *dst0opt;
  179. struct ipv6_rt_hdr *srcrt; /* Routing Header */
  180. struct ipv6_opt_hdr *dst1opt;
  181. struct rcu_head rcu;
  182. /* Option buffer, as read by IPV6_PKTOPTIONS, starts here. */
  183. };
  184. struct ip6_flowlabel {
  185. struct ip6_flowlabel __rcu *next;
  186. __be32 label;
  187. atomic_t users;
  188. struct in6_addr dst;
  189. struct ipv6_txoptions *opt;
  190. unsigned long linger;
  191. struct rcu_head rcu;
  192. u8 share;
  193. union {
  194. struct pid *pid;
  195. kuid_t uid;
  196. } owner;
  197. unsigned long lastuse;
  198. unsigned long expires;
  199. struct net *fl_net;
  200. };
  201. #define IPV6_FLOWINFO_MASK cpu_to_be32(0x0FFFFFFF)
  202. #define IPV6_FLOWLABEL_MASK cpu_to_be32(0x000FFFFF)
  203. #define IPV6_FLOWLABEL_STATELESS_FLAG cpu_to_be32(0x00080000)
  204. #define IPV6_TCLASS_MASK (IPV6_FLOWINFO_MASK & ~IPV6_FLOWLABEL_MASK)
  205. #define IPV6_TCLASS_SHIFT 20
  206. struct ipv6_fl_socklist {
  207. struct ipv6_fl_socklist __rcu *next;
  208. struct ip6_flowlabel *fl;
  209. struct rcu_head rcu;
  210. };
  211. struct ipcm6_cookie {
  212. __s16 hlimit;
  213. __s16 tclass;
  214. __s8 dontfrag;
  215. struct ipv6_txoptions *opt;
  216. };
  217. static inline struct ipv6_txoptions *txopt_get(const struct ipv6_pinfo *np)
  218. {
  219. struct ipv6_txoptions *opt;
  220. rcu_read_lock();
  221. opt = rcu_dereference(np->opt);
  222. if (opt) {
  223. if (!atomic_inc_not_zero(&opt->refcnt))
  224. opt = NULL;
  225. else
  226. opt = rcu_pointer_handoff(opt);
  227. }
  228. rcu_read_unlock();
  229. return opt;
  230. }
  231. static inline void txopt_put(struct ipv6_txoptions *opt)
  232. {
  233. if (opt && atomic_dec_and_test(&opt->refcnt))
  234. kfree_rcu(opt, rcu);
  235. }
  236. struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label);
  237. struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space,
  238. struct ip6_flowlabel *fl,
  239. struct ipv6_txoptions *fopt);
  240. void fl6_free_socklist(struct sock *sk);
  241. int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen);
  242. int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq,
  243. int flags);
  244. int ip6_flowlabel_init(void);
  245. void ip6_flowlabel_cleanup(void);
  246. bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np);
  247. static inline void fl6_sock_release(struct ip6_flowlabel *fl)
  248. {
  249. if (fl)
  250. atomic_dec(&fl->users);
  251. }
  252. void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info);
  253. int icmpv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
  254. struct icmp6hdr *thdr, int len);
  255. int ip6_ra_control(struct sock *sk, int sel);
  256. int ipv6_parse_hopopts(struct sk_buff *skb);
  257. struct ipv6_txoptions *ipv6_dup_options(struct sock *sk,
  258. struct ipv6_txoptions *opt);
  259. struct ipv6_txoptions *ipv6_renew_options(struct sock *sk,
  260. struct ipv6_txoptions *opt,
  261. int newtype,
  262. struct ipv6_opt_hdr __user *newopt,
  263. int newoptlen);
  264. struct ipv6_txoptions *
  265. ipv6_renew_options_kern(struct sock *sk,
  266. struct ipv6_txoptions *opt,
  267. int newtype,
  268. struct ipv6_opt_hdr *newopt,
  269. int newoptlen);
  270. struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space,
  271. struct ipv6_txoptions *opt);
  272. bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb,
  273. const struct inet6_skb_parm *opt);
  274. struct ipv6_txoptions *ipv6_update_options(struct sock *sk,
  275. struct ipv6_txoptions *opt);
  276. static inline bool ipv6_accept_ra(struct inet6_dev *idev)
  277. {
  278. /* If forwarding is enabled, RA are not accepted unless the special
  279. * hybrid mode (accept_ra=2) is enabled.
  280. */
  281. return idev->cnf.forwarding ? idev->cnf.accept_ra == 2 :
  282. idev->cnf.accept_ra;
  283. }
  284. #if IS_ENABLED(CONFIG_IPV6)
  285. static inline int ip6_frag_mem(struct net *net)
  286. {
  287. return sum_frag_mem_limit(&net->ipv6.frags);
  288. }
  289. #endif
  290. #define IPV6_FRAG_HIGH_THRESH (4 * 1024*1024) /* 4194304 */
  291. #define IPV6_FRAG_LOW_THRESH (3 * 1024*1024) /* 3145728 */
  292. #define IPV6_FRAG_TIMEOUT (60 * HZ) /* 60 seconds */
  293. int __ipv6_addr_type(const struct in6_addr *addr);
  294. static inline int ipv6_addr_type(const struct in6_addr *addr)
  295. {
  296. return __ipv6_addr_type(addr) & 0xffff;
  297. }
  298. static inline int ipv6_addr_scope(const struct in6_addr *addr)
  299. {
  300. return __ipv6_addr_type(addr) & IPV6_ADDR_SCOPE_MASK;
  301. }
  302. static inline int __ipv6_addr_src_scope(int type)
  303. {
  304. return (type == IPV6_ADDR_ANY) ? __IPV6_ADDR_SCOPE_INVALID : (type >> 16);
  305. }
  306. static inline int ipv6_addr_src_scope(const struct in6_addr *addr)
  307. {
  308. return __ipv6_addr_src_scope(__ipv6_addr_type(addr));
  309. }
  310. static inline bool __ipv6_addr_needs_scope_id(int type)
  311. {
  312. return type & IPV6_ADDR_LINKLOCAL ||
  313. (type & IPV6_ADDR_MULTICAST &&
  314. (type & (IPV6_ADDR_LOOPBACK|IPV6_ADDR_LINKLOCAL)));
  315. }
  316. static inline __u32 ipv6_iface_scope_id(const struct in6_addr *addr, int iface)
  317. {
  318. return __ipv6_addr_needs_scope_id(__ipv6_addr_type(addr)) ? iface : 0;
  319. }
  320. static inline int ipv6_addr_cmp(const struct in6_addr *a1, const struct in6_addr *a2)
  321. {
  322. return memcmp(a1, a2, sizeof(struct in6_addr));
  323. }
  324. static inline bool
  325. ipv6_masked_addr_cmp(const struct in6_addr *a1, const struct in6_addr *m,
  326. const struct in6_addr *a2)
  327. {
  328. #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
  329. const unsigned long *ul1 = (const unsigned long *)a1;
  330. const unsigned long *ulm = (const unsigned long *)m;
  331. const unsigned long *ul2 = (const unsigned long *)a2;
  332. return !!(((ul1[0] ^ ul2[0]) & ulm[0]) |
  333. ((ul1[1] ^ ul2[1]) & ulm[1]));
  334. #else
  335. return !!(((a1->s6_addr32[0] ^ a2->s6_addr32[0]) & m->s6_addr32[0]) |
  336. ((a1->s6_addr32[1] ^ a2->s6_addr32[1]) & m->s6_addr32[1]) |
  337. ((a1->s6_addr32[2] ^ a2->s6_addr32[2]) & m->s6_addr32[2]) |
  338. ((a1->s6_addr32[3] ^ a2->s6_addr32[3]) & m->s6_addr32[3]));
  339. #endif
  340. }
  341. static inline void ipv6_addr_prefix(struct in6_addr *pfx,
  342. const struct in6_addr *addr,
  343. int plen)
  344. {
  345. /* caller must guarantee 0 <= plen <= 128 */
  346. int o = plen >> 3,
  347. b = plen & 0x7;
  348. memset(pfx->s6_addr, 0, sizeof(pfx->s6_addr));
  349. memcpy(pfx->s6_addr, addr, o);
  350. if (b != 0)
  351. pfx->s6_addr[o] = addr->s6_addr[o] & (0xff00 >> b);
  352. }
  353. static inline void ipv6_addr_prefix_copy(struct in6_addr *addr,
  354. const struct in6_addr *pfx,
  355. int plen)
  356. {
  357. /* caller must guarantee 0 <= plen <= 128 */
  358. int o = plen >> 3,
  359. b = plen & 0x7;
  360. memcpy(addr->s6_addr, pfx, o);
  361. if (b != 0) {
  362. addr->s6_addr[o] &= ~(0xff00 >> b);
  363. addr->s6_addr[o] |= (pfx->s6_addr[o] & (0xff00 >> b));
  364. }
  365. }
  366. static inline void __ipv6_addr_set_half(__be32 *addr,
  367. __be32 wh, __be32 wl)
  368. {
  369. #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
  370. #if defined(__BIG_ENDIAN)
  371. if (__builtin_constant_p(wh) && __builtin_constant_p(wl)) {
  372. *(__force u64 *)addr = ((__force u64)(wh) << 32 | (__force u64)(wl));
  373. return;
  374. }
  375. #elif defined(__LITTLE_ENDIAN)
  376. if (__builtin_constant_p(wl) && __builtin_constant_p(wh)) {
  377. *(__force u64 *)addr = ((__force u64)(wl) << 32 | (__force u64)(wh));
  378. return;
  379. }
  380. #endif
  381. #endif
  382. addr[0] = wh;
  383. addr[1] = wl;
  384. }
  385. static inline void ipv6_addr_set(struct in6_addr *addr,
  386. __be32 w1, __be32 w2,
  387. __be32 w3, __be32 w4)
  388. {
  389. __ipv6_addr_set_half(&addr->s6_addr32[0], w1, w2);
  390. __ipv6_addr_set_half(&addr->s6_addr32[2], w3, w4);
  391. }
  392. static inline bool ipv6_addr_equal(const struct in6_addr *a1,
  393. const struct in6_addr *a2)
  394. {
  395. #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
  396. const unsigned long *ul1 = (const unsigned long *)a1;
  397. const unsigned long *ul2 = (const unsigned long *)a2;
  398. return ((ul1[0] ^ ul2[0]) | (ul1[1] ^ ul2[1])) == 0UL;
  399. #else
  400. return ((a1->s6_addr32[0] ^ a2->s6_addr32[0]) |
  401. (a1->s6_addr32[1] ^ a2->s6_addr32[1]) |
  402. (a1->s6_addr32[2] ^ a2->s6_addr32[2]) |
  403. (a1->s6_addr32[3] ^ a2->s6_addr32[3])) == 0;
  404. #endif
  405. }
  406. #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
  407. static inline bool __ipv6_prefix_equal64_half(const __be64 *a1,
  408. const __be64 *a2,
  409. unsigned int len)
  410. {
  411. if (len && ((*a1 ^ *a2) & cpu_to_be64((~0UL) << (64 - len))))
  412. return false;
  413. return true;
  414. }
  415. static inline bool ipv6_prefix_equal(const struct in6_addr *addr1,
  416. const struct in6_addr *addr2,
  417. unsigned int prefixlen)
  418. {
  419. const __be64 *a1 = (const __be64 *)addr1;
  420. const __be64 *a2 = (const __be64 *)addr2;
  421. if (prefixlen >= 64) {
  422. if (a1[0] ^ a2[0])
  423. return false;
  424. return __ipv6_prefix_equal64_half(a1 + 1, a2 + 1, prefixlen - 64);
  425. }
  426. return __ipv6_prefix_equal64_half(a1, a2, prefixlen);
  427. }
  428. #else
  429. static inline bool ipv6_prefix_equal(const struct in6_addr *addr1,
  430. const struct in6_addr *addr2,
  431. unsigned int prefixlen)
  432. {
  433. const __be32 *a1 = addr1->s6_addr32;
  434. const __be32 *a2 = addr2->s6_addr32;
  435. unsigned int pdw, pbi;
  436. /* check complete u32 in prefix */
  437. pdw = prefixlen >> 5;
  438. if (pdw && memcmp(a1, a2, pdw << 2))
  439. return false;
  440. /* check incomplete u32 in prefix */
  441. pbi = prefixlen & 0x1f;
  442. if (pbi && ((a1[pdw] ^ a2[pdw]) & htonl((0xffffffff) << (32 - pbi))))
  443. return false;
  444. return true;
  445. }
  446. #endif
  447. struct inet_frag_queue;
  448. enum ip6_defrag_users {
  449. IP6_DEFRAG_LOCAL_DELIVER,
  450. IP6_DEFRAG_CONNTRACK_IN,
  451. __IP6_DEFRAG_CONNTRACK_IN = IP6_DEFRAG_CONNTRACK_IN + USHRT_MAX,
  452. IP6_DEFRAG_CONNTRACK_OUT,
  453. __IP6_DEFRAG_CONNTRACK_OUT = IP6_DEFRAG_CONNTRACK_OUT + USHRT_MAX,
  454. IP6_DEFRAG_CONNTRACK_BRIDGE_IN,
  455. __IP6_DEFRAG_CONNTRACK_BRIDGE_IN = IP6_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX,
  456. };
  457. struct ip6_create_arg {
  458. __be32 id;
  459. u32 user;
  460. const struct in6_addr *src;
  461. const struct in6_addr *dst;
  462. int iif;
  463. u8 ecn;
  464. };
  465. void ip6_frag_init(struct inet_frag_queue *q, const void *a);
  466. bool ip6_frag_match(const struct inet_frag_queue *q, const void *a);
  467. /*
  468. * Equivalent of ipv4 struct ip
  469. */
  470. struct frag_queue {
  471. struct inet_frag_queue q;
  472. __be32 id; /* fragment id */
  473. u32 user;
  474. struct in6_addr saddr;
  475. struct in6_addr daddr;
  476. int iif;
  477. unsigned int csum;
  478. __u16 nhoffset;
  479. u8 ecn;
  480. };
  481. void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq,
  482. struct inet_frags *frags);
  483. static inline bool ipv6_addr_any(const struct in6_addr *a)
  484. {
  485. #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
  486. const unsigned long *ul = (const unsigned long *)a;
  487. return (ul[0] | ul[1]) == 0UL;
  488. #else
  489. return (a->s6_addr32[0] | a->s6_addr32[1] |
  490. a->s6_addr32[2] | a->s6_addr32[3]) == 0;
  491. #endif
  492. }
  493. static inline u32 ipv6_addr_hash(const struct in6_addr *a)
  494. {
  495. #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
  496. const unsigned long *ul = (const unsigned long *)a;
  497. unsigned long x = ul[0] ^ ul[1];
  498. return (u32)(x ^ (x >> 32));
  499. #else
  500. return (__force u32)(a->s6_addr32[0] ^ a->s6_addr32[1] ^
  501. a->s6_addr32[2] ^ a->s6_addr32[3]);
  502. #endif
  503. }
  504. /* more secured version of ipv6_addr_hash() */
  505. static inline u32 __ipv6_addr_jhash(const struct in6_addr *a, const u32 initval)
  506. {
  507. u32 v = (__force u32)a->s6_addr32[0] ^ (__force u32)a->s6_addr32[1];
  508. return jhash_3words(v,
  509. (__force u32)a->s6_addr32[2],
  510. (__force u32)a->s6_addr32[3],
  511. initval);
  512. }
  513. static inline bool ipv6_addr_loopback(const struct in6_addr *a)
  514. {
  515. #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
  516. const __be64 *be = (const __be64 *)a;
  517. return (be[0] | (be[1] ^ cpu_to_be64(1))) == 0UL;
  518. #else
  519. return (a->s6_addr32[0] | a->s6_addr32[1] |
  520. a->s6_addr32[2] | (a->s6_addr32[3] ^ cpu_to_be32(1))) == 0;
  521. #endif
  522. }
  523. /*
  524. * Note that we must __force cast these to unsigned long to make sparse happy,
  525. * since all of the endian-annotated types are fixed size regardless of arch.
  526. */
  527. static inline bool ipv6_addr_v4mapped(const struct in6_addr *a)
  528. {
  529. return (
  530. #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
  531. *(unsigned long *)a |
  532. #else
  533. (__force unsigned long)(a->s6_addr32[0] | a->s6_addr32[1]) |
  534. #endif
  535. (__force unsigned long)(a->s6_addr32[2] ^
  536. cpu_to_be32(0x0000ffff))) == 0UL;
  537. }
  538. /*
  539. * Check for a RFC 4843 ORCHID address
  540. * (Overlay Routable Cryptographic Hash Identifiers)
  541. */
  542. static inline bool ipv6_addr_orchid(const struct in6_addr *a)
  543. {
  544. return (a->s6_addr32[0] & htonl(0xfffffff0)) == htonl(0x20010010);
  545. }
  546. static inline bool ipv6_addr_is_multicast(const struct in6_addr *addr)
  547. {
  548. return (addr->s6_addr32[0] & htonl(0xFF000000)) == htonl(0xFF000000);
  549. }
  550. static inline void ipv6_addr_set_v4mapped(const __be32 addr,
  551. struct in6_addr *v4mapped)
  552. {
  553. ipv6_addr_set(v4mapped,
  554. 0, 0,
  555. htonl(0x0000FFFF),
  556. addr);
  557. }
  558. /*
  559. * find the first different bit between two addresses
  560. * length of address must be a multiple of 32bits
  561. */
  562. static inline int __ipv6_addr_diff32(const void *token1, const void *token2, int addrlen)
  563. {
  564. const __be32 *a1 = token1, *a2 = token2;
  565. int i;
  566. addrlen >>= 2;
  567. for (i = 0; i < addrlen; i++) {
  568. __be32 xb = a1[i] ^ a2[i];
  569. if (xb)
  570. return i * 32 + 31 - __fls(ntohl(xb));
  571. }
  572. /*
  573. * we should *never* get to this point since that
  574. * would mean the addrs are equal
  575. *
  576. * However, we do get to it 8) And exacly, when
  577. * addresses are equal 8)
  578. *
  579. * ip route add 1111::/128 via ...
  580. * ip route add 1111::/64 via ...
  581. * and we are here.
  582. *
  583. * Ideally, this function should stop comparison
  584. * at prefix length. It does not, but it is still OK,
  585. * if returned value is greater than prefix length.
  586. * --ANK (980803)
  587. */
  588. return addrlen << 5;
  589. }
  590. #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
  591. static inline int __ipv6_addr_diff64(const void *token1, const void *token2, int addrlen)
  592. {
  593. const __be64 *a1 = token1, *a2 = token2;
  594. int i;
  595. addrlen >>= 3;
  596. for (i = 0; i < addrlen; i++) {
  597. __be64 xb = a1[i] ^ a2[i];
  598. if (xb)
  599. return i * 64 + 63 - __fls(be64_to_cpu(xb));
  600. }
  601. return addrlen << 6;
  602. }
  603. #endif
  604. static inline int __ipv6_addr_diff(const void *token1, const void *token2, int addrlen)
  605. {
  606. #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
  607. if (__builtin_constant_p(addrlen) && !(addrlen & 7))
  608. return __ipv6_addr_diff64(token1, token2, addrlen);
  609. #endif
  610. return __ipv6_addr_diff32(token1, token2, addrlen);
  611. }
  612. static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_addr *a2)
  613. {
  614. return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr));
  615. }
  616. __be32 ipv6_select_ident(struct net *net,
  617. const struct in6_addr *daddr,
  618. const struct in6_addr *saddr);
  619. void ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb);
  620. int ip6_dst_hoplimit(struct dst_entry *dst);
  621. static inline int ip6_sk_dst_hoplimit(struct ipv6_pinfo *np, struct flowi6 *fl6,
  622. struct dst_entry *dst)
  623. {
  624. int hlimit;
  625. if (ipv6_addr_is_multicast(&fl6->daddr))
  626. hlimit = np->mcast_hops;
  627. else
  628. hlimit = np->hop_limit;
  629. if (hlimit < 0)
  630. hlimit = ip6_dst_hoplimit(dst);
  631. return hlimit;
  632. }
  633. /* copy IPv6 saddr & daddr to flow_keys, possibly using 64bit load/store
  634. * Equivalent to : flow->v6addrs.src = iph->saddr;
  635. * flow->v6addrs.dst = iph->daddr;
  636. */
  637. static inline void iph_to_flow_copy_v6addrs(struct flow_keys *flow,
  638. const struct ipv6hdr *iph)
  639. {
  640. BUILD_BUG_ON(offsetof(typeof(flow->addrs), v6addrs.dst) !=
  641. offsetof(typeof(flow->addrs), v6addrs.src) +
  642. sizeof(flow->addrs.v6addrs.src));
  643. memcpy(&flow->addrs.v6addrs, &iph->saddr, sizeof(flow->addrs.v6addrs));
  644. flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
  645. }
  646. #if IS_ENABLED(CONFIG_IPV6)
  647. /* Sysctl settings for net ipv6.auto_flowlabels */
  648. #define IP6_AUTO_FLOW_LABEL_OFF 0
  649. #define IP6_AUTO_FLOW_LABEL_OPTOUT 1
  650. #define IP6_AUTO_FLOW_LABEL_OPTIN 2
  651. #define IP6_AUTO_FLOW_LABEL_FORCED 3
  652. #define IP6_AUTO_FLOW_LABEL_MAX IP6_AUTO_FLOW_LABEL_FORCED
  653. #define IP6_DEFAULT_AUTO_FLOW_LABELS IP6_AUTO_FLOW_LABEL_OPTOUT
  654. static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
  655. __be32 flowlabel, bool autolabel,
  656. struct flowi6 *fl6)
  657. {
  658. u32 hash;
  659. /* @flowlabel may include more than a flow label, eg, the traffic class.
  660. * Here we want only the flow label value.
  661. */
  662. flowlabel &= IPV6_FLOWLABEL_MASK;
  663. if (flowlabel ||
  664. net->ipv6.sysctl.auto_flowlabels == IP6_AUTO_FLOW_LABEL_OFF ||
  665. (!autolabel &&
  666. net->ipv6.sysctl.auto_flowlabels != IP6_AUTO_FLOW_LABEL_FORCED))
  667. return flowlabel;
  668. hash = skb_get_hash_flowi6(skb, fl6);
  669. /* Since this is being sent on the wire obfuscate hash a bit
  670. * to minimize possbility that any useful information to an
  671. * attacker is leaked. Only lower 20 bits are relevant.
  672. */
  673. hash = rol32(hash, 16);
  674. flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK;
  675. if (net->ipv6.sysctl.flowlabel_state_ranges)
  676. flowlabel |= IPV6_FLOWLABEL_STATELESS_FLAG;
  677. return flowlabel;
  678. }
  679. static inline int ip6_default_np_autolabel(struct net *net)
  680. {
  681. switch (net->ipv6.sysctl.auto_flowlabels) {
  682. case IP6_AUTO_FLOW_LABEL_OFF:
  683. case IP6_AUTO_FLOW_LABEL_OPTIN:
  684. default:
  685. return 0;
  686. case IP6_AUTO_FLOW_LABEL_OPTOUT:
  687. case IP6_AUTO_FLOW_LABEL_FORCED:
  688. return 1;
  689. }
  690. }
  691. #else
  692. static inline void ip6_set_txhash(struct sock *sk) { }
  693. static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
  694. __be32 flowlabel, bool autolabel,
  695. struct flowi6 *fl6)
  696. {
  697. return flowlabel;
  698. }
  699. static inline int ip6_default_np_autolabel(struct net *net)
  700. {
  701. return 0;
  702. }
  703. #endif
  704. /*
  705. * Header manipulation
  706. */
  707. static inline void ip6_flow_hdr(struct ipv6hdr *hdr, unsigned int tclass,
  708. __be32 flowlabel)
  709. {
  710. *(__be32 *)hdr = htonl(0x60000000 | (tclass << 20)) | flowlabel;
  711. }
  712. static inline __be32 ip6_flowinfo(const struct ipv6hdr *hdr)
  713. {
  714. return *(__be32 *)hdr & IPV6_FLOWINFO_MASK;
  715. }
  716. static inline __be32 ip6_flowlabel(const struct ipv6hdr *hdr)
  717. {
  718. return *(__be32 *)hdr & IPV6_FLOWLABEL_MASK;
  719. }
  720. static inline u8 ip6_tclass(__be32 flowinfo)
  721. {
  722. return ntohl(flowinfo & IPV6_TCLASS_MASK) >> IPV6_TCLASS_SHIFT;
  723. }
  724. static inline __be32 ip6_make_flowinfo(unsigned int tclass, __be32 flowlabel)
  725. {
  726. return htonl(tclass << IPV6_TCLASS_SHIFT) | flowlabel;
  727. }
  728. /*
  729. * Prototypes exported by ipv6
  730. */
  731. /*
  732. * rcv function (called from netdevice level)
  733. */
  734. int ipv6_rcv(struct sk_buff *skb, struct net_device *dev,
  735. struct packet_type *pt, struct net_device *orig_dev);
  736. int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb);
  737. /*
  738. * upper-layer output functions
  739. */
  740. int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
  741. __u32 mark, struct ipv6_txoptions *opt, int tclass);
  742. int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr);
  743. int ip6_append_data(struct sock *sk,
  744. int getfrag(void *from, char *to, int offset, int len,
  745. int odd, struct sk_buff *skb),
  746. void *from, int length, int transhdrlen,
  747. struct ipcm6_cookie *ipc6, struct flowi6 *fl6,
  748. struct rt6_info *rt, unsigned int flags,
  749. const struct sockcm_cookie *sockc);
  750. int ip6_push_pending_frames(struct sock *sk);
  751. void ip6_flush_pending_frames(struct sock *sk);
  752. int ip6_send_skb(struct sk_buff *skb);
  753. struct sk_buff *__ip6_make_skb(struct sock *sk, struct sk_buff_head *queue,
  754. struct inet_cork_full *cork,
  755. struct inet6_cork *v6_cork);
  756. struct sk_buff *ip6_make_skb(struct sock *sk,
  757. int getfrag(void *from, char *to, int offset,
  758. int len, int odd, struct sk_buff *skb),
  759. void *from, int length, int transhdrlen,
  760. struct ipcm6_cookie *ipc6, struct flowi6 *fl6,
  761. struct rt6_info *rt, unsigned int flags,
  762. const struct sockcm_cookie *sockc);
  763. static inline struct sk_buff *ip6_finish_skb(struct sock *sk)
  764. {
  765. return __ip6_make_skb(sk, &sk->sk_write_queue, &inet_sk(sk)->cork,
  766. &inet6_sk(sk)->cork);
  767. }
  768. int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst,
  769. struct flowi6 *fl6);
  770. struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 *fl6,
  771. const struct in6_addr *final_dst);
  772. struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
  773. const struct in6_addr *final_dst);
  774. struct dst_entry *ip6_blackhole_route(struct net *net,
  775. struct dst_entry *orig_dst);
  776. /*
  777. * skb processing functions
  778. */
  779. int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb);
  780. int ip6_forward(struct sk_buff *skb);
  781. int ip6_input(struct sk_buff *skb);
  782. int ip6_mc_input(struct sk_buff *skb);
  783. int __ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
  784. int ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
  785. /*
  786. * Extension header (options) processing
  787. */
  788. void ipv6_push_nfrag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt,
  789. u8 *proto, struct in6_addr **daddr_p);
  790. void ipv6_push_frag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt,
  791. u8 *proto);
  792. int ipv6_skip_exthdr(const struct sk_buff *, int start, u8 *nexthdrp,
  793. __be16 *frag_offp);
  794. bool ipv6_ext_hdr(u8 nexthdr);
  795. enum {
  796. IP6_FH_F_FRAG = (1 << 0),
  797. IP6_FH_F_AUTH = (1 << 1),
  798. IP6_FH_F_SKIP_RH = (1 << 2),
  799. };
  800. /* find specified header and get offset to it */
  801. int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, int target,
  802. unsigned short *fragoff, int *fragflg);
  803. int ipv6_find_tlv(const struct sk_buff *skb, int offset, int type);
  804. struct in6_addr *fl6_update_dst(struct flowi6 *fl6,
  805. const struct ipv6_txoptions *opt,
  806. struct in6_addr *orig);
  807. /*
  808. * socket options (ipv6_sockglue.c)
  809. */
  810. int ipv6_setsockopt(struct sock *sk, int level, int optname,
  811. char __user *optval, unsigned int optlen);
  812. int ipv6_getsockopt(struct sock *sk, int level, int optname,
  813. char __user *optval, int __user *optlen);
  814. int compat_ipv6_setsockopt(struct sock *sk, int level, int optname,
  815. char __user *optval, unsigned int optlen);
  816. int compat_ipv6_getsockopt(struct sock *sk, int level, int optname,
  817. char __user *optval, int __user *optlen);
  818. int __ip6_datagram_connect(struct sock *sk, struct sockaddr *addr,
  819. int addr_len);
  820. int ip6_datagram_connect(struct sock *sk, struct sockaddr *addr, int addr_len);
  821. int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *addr,
  822. int addr_len);
  823. int ip6_datagram_dst_update(struct sock *sk, bool fix_sk_saddr);
  824. void ip6_datagram_release_cb(struct sock *sk);
  825. int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len,
  826. int *addr_len);
  827. int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len,
  828. int *addr_len);
  829. void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
  830. u32 info, u8 *payload);
  831. void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info);
  832. void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu);
  833. int inet6_release(struct socket *sock);
  834. int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
  835. int inet6_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len,
  836. int peer);
  837. int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
  838. int inet6_hash_connect(struct inet_timewait_death_row *death_row,
  839. struct sock *sk);
  840. /*
  841. * reassembly.c
  842. */
  843. extern const struct proto_ops inet6_stream_ops;
  844. extern const struct proto_ops inet6_dgram_ops;
  845. extern const struct proto_ops inet6_sockraw_ops;
  846. struct group_source_req;
  847. struct group_filter;
  848. int ip6_mc_source(int add, int omode, struct sock *sk,
  849. struct group_source_req *pgsr);
  850. int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf);
  851. int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
  852. struct group_filter __user *optval, int __user *optlen);
  853. #ifdef CONFIG_PROC_FS
  854. int ac6_proc_init(struct net *net);
  855. void ac6_proc_exit(struct net *net);
  856. int raw6_proc_init(void);
  857. void raw6_proc_exit(void);
  858. int tcp6_proc_init(struct net *net);
  859. void tcp6_proc_exit(struct net *net);
  860. int udp6_proc_init(struct net *net);
  861. void udp6_proc_exit(struct net *net);
  862. int udplite6_proc_init(void);
  863. void udplite6_proc_exit(void);
  864. int ipv6_misc_proc_init(void);
  865. void ipv6_misc_proc_exit(void);
  866. int snmp6_register_dev(struct inet6_dev *idev);
  867. int snmp6_unregister_dev(struct inet6_dev *idev);
  868. #else
  869. static inline int ac6_proc_init(struct net *net) { return 0; }
  870. static inline void ac6_proc_exit(struct net *net) { }
  871. static inline int snmp6_register_dev(struct inet6_dev *idev) { return 0; }
  872. static inline int snmp6_unregister_dev(struct inet6_dev *idev) { return 0; }
  873. #endif
  874. #ifdef CONFIG_SYSCTL
  875. extern struct ctl_table ipv6_route_table_template[];
  876. struct ctl_table *ipv6_icmp_sysctl_init(struct net *net);
  877. struct ctl_table *ipv6_route_sysctl_init(struct net *net);
  878. int ipv6_sysctl_register(void);
  879. void ipv6_sysctl_unregister(void);
  880. #endif
  881. int ipv6_sock_mc_join(struct sock *sk, int ifindex,
  882. const struct in6_addr *addr);
  883. int ipv6_sock_mc_drop(struct sock *sk, int ifindex,
  884. const struct in6_addr *addr);
  885. #endif /* _NET_IPV6_H */