if_vlan.h 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671
  1. /*
  2. * VLAN An implementation of 802.1Q VLAN tagging.
  3. *
  4. * Authors: Ben Greear <greearb@candelatech.com>
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. *
  11. */
  12. #ifndef _LINUX_IF_VLAN_H_
  13. #define _LINUX_IF_VLAN_H_
  14. #include <linux/netdevice.h>
  15. #include <linux/etherdevice.h>
  16. #include <linux/rtnetlink.h>
  17. #include <linux/bug.h>
  18. #include <uapi/linux/if_vlan.h>
  19. #define VLAN_HLEN 4 /* The additional bytes required by VLAN
  20. * (in addition to the Ethernet header)
  21. */
  22. #define VLAN_ETH_HLEN 18 /* Total octets in header. */
  23. #define VLAN_ETH_ZLEN 64 /* Min. octets in frame sans FCS */
  24. /*
  25. * According to 802.3ac, the packet can be 4 bytes longer. --Klika Jan
  26. */
  27. #define VLAN_ETH_DATA_LEN 1500 /* Max. octets in payload */
  28. #define VLAN_ETH_FRAME_LEN 1518 /* Max. octets in frame sans FCS */
  29. /*
  30. * struct vlan_hdr - vlan header
  31. * @h_vlan_TCI: priority and VLAN ID
  32. * @h_vlan_encapsulated_proto: packet type ID or len
  33. */
  34. struct vlan_hdr {
  35. __be16 h_vlan_TCI;
  36. __be16 h_vlan_encapsulated_proto;
  37. };
  38. /**
  39. * struct vlan_ethhdr - vlan ethernet header (ethhdr + vlan_hdr)
  40. * @h_dest: destination ethernet address
  41. * @h_source: source ethernet address
  42. * @h_vlan_proto: ethernet protocol
  43. * @h_vlan_TCI: priority and VLAN ID
  44. * @h_vlan_encapsulated_proto: packet type ID or len
  45. */
  46. struct vlan_ethhdr {
  47. unsigned char h_dest[ETH_ALEN];
  48. unsigned char h_source[ETH_ALEN];
  49. __be16 h_vlan_proto;
  50. __be16 h_vlan_TCI;
  51. __be16 h_vlan_encapsulated_proto;
  52. };
  53. #include <linux/skbuff.h>
  54. static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
  55. {
  56. return (struct vlan_ethhdr *)skb_mac_header(skb);
  57. }
  58. #define VLAN_PRIO_MASK 0xe000 /* Priority Code Point */
  59. #define VLAN_PRIO_SHIFT 13
  60. #define VLAN_CFI_MASK 0x1000 /* Canonical Format Indicator */
  61. #define VLAN_TAG_PRESENT VLAN_CFI_MASK
  62. #define VLAN_VID_MASK 0x0fff /* VLAN Identifier */
  63. #define VLAN_N_VID 4096
  64. /* found in socket.c */
  65. extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *));
  66. static inline bool is_vlan_dev(const struct net_device *dev)
  67. {
  68. return dev->priv_flags & IFF_802_1Q_VLAN;
  69. }
  70. #define skb_vlan_tag_present(__skb) ((__skb)->vlan_tci & VLAN_TAG_PRESENT)
  71. #define skb_vlan_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT)
  72. #define skb_vlan_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK)
  73. #define skb_vlan_tag_get_prio(__skb) ((__skb)->vlan_tci & VLAN_PRIO_MASK)
  74. /**
  75. * struct vlan_pcpu_stats - VLAN percpu rx/tx stats
  76. * @rx_packets: number of received packets
  77. * @rx_bytes: number of received bytes
  78. * @rx_multicast: number of received multicast packets
  79. * @tx_packets: number of transmitted packets
  80. * @tx_bytes: number of transmitted bytes
  81. * @syncp: synchronization point for 64bit counters
  82. * @rx_errors: number of rx errors
  83. * @tx_dropped: number of tx drops
  84. */
  85. struct vlan_pcpu_stats {
  86. u64 rx_packets;
  87. u64 rx_bytes;
  88. u64 rx_multicast;
  89. u64 tx_packets;
  90. u64 tx_bytes;
  91. struct u64_stats_sync syncp;
  92. u32 rx_errors;
  93. u32 tx_dropped;
  94. };
  95. #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
  96. extern struct net_device *__vlan_find_dev_deep_rcu(struct net_device *real_dev,
  97. __be16 vlan_proto, u16 vlan_id);
  98. extern struct net_device *vlan_dev_real_dev(const struct net_device *dev);
  99. extern u16 vlan_dev_vlan_id(const struct net_device *dev);
  100. extern __be16 vlan_dev_vlan_proto(const struct net_device *dev);
  101. /**
  102. * struct vlan_priority_tci_mapping - vlan egress priority mappings
  103. * @priority: skb priority
  104. * @vlan_qos: vlan priority: (skb->priority << 13) & 0xE000
  105. * @next: pointer to next struct
  106. */
  107. struct vlan_priority_tci_mapping {
  108. u32 priority;
  109. u16 vlan_qos;
  110. struct vlan_priority_tci_mapping *next;
  111. };
  112. struct proc_dir_entry;
  113. struct netpoll;
  114. /**
  115. * struct vlan_dev_priv - VLAN private device data
  116. * @nr_ingress_mappings: number of ingress priority mappings
  117. * @ingress_priority_map: ingress priority mappings
  118. * @nr_egress_mappings: number of egress priority mappings
  119. * @egress_priority_map: hash of egress priority mappings
  120. * @vlan_proto: VLAN encapsulation protocol
  121. * @vlan_id: VLAN identifier
  122. * @flags: device flags
  123. * @real_dev: underlying netdevice
  124. * @real_dev_addr: address of underlying netdevice
  125. * @dent: proc dir entry
  126. * @vlan_pcpu_stats: ptr to percpu rx stats
  127. */
  128. struct vlan_dev_priv {
  129. unsigned int nr_ingress_mappings;
  130. u32 ingress_priority_map[8];
  131. unsigned int nr_egress_mappings;
  132. struct vlan_priority_tci_mapping *egress_priority_map[16];
  133. __be16 vlan_proto;
  134. u16 vlan_id;
  135. u16 flags;
  136. struct net_device *real_dev;
  137. unsigned char real_dev_addr[ETH_ALEN];
  138. struct proc_dir_entry *dent;
  139. struct vlan_pcpu_stats __percpu *vlan_pcpu_stats;
  140. #ifdef CONFIG_NET_POLL_CONTROLLER
  141. struct netpoll *netpoll;
  142. #endif
  143. unsigned int nest_level;
  144. };
  145. static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev)
  146. {
  147. return netdev_priv(dev);
  148. }
  149. static inline u16
  150. vlan_dev_get_egress_qos_mask(struct net_device *dev, u32 skprio)
  151. {
  152. struct vlan_priority_tci_mapping *mp;
  153. smp_rmb(); /* coupled with smp_wmb() in vlan_dev_set_egress_priority() */
  154. mp = vlan_dev_priv(dev)->egress_priority_map[(skprio & 0xF)];
  155. while (mp) {
  156. if (mp->priority == skprio) {
  157. return mp->vlan_qos; /* This should already be shifted
  158. * to mask correctly with the
  159. * VLAN's TCI */
  160. }
  161. mp = mp->next;
  162. }
  163. return 0;
  164. }
  165. extern bool vlan_do_receive(struct sk_buff **skb);
  166. extern int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid);
  167. extern void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid);
  168. extern int vlan_vids_add_by_dev(struct net_device *dev,
  169. const struct net_device *by_dev);
  170. extern void vlan_vids_del_by_dev(struct net_device *dev,
  171. const struct net_device *by_dev);
  172. extern bool vlan_uses_dev(const struct net_device *dev);
  173. static inline int vlan_get_encap_level(struct net_device *dev)
  174. {
  175. BUG_ON(!is_vlan_dev(dev));
  176. return vlan_dev_priv(dev)->nest_level;
  177. }
  178. #else
  179. static inline struct net_device *
  180. __vlan_find_dev_deep_rcu(struct net_device *real_dev,
  181. __be16 vlan_proto, u16 vlan_id)
  182. {
  183. return NULL;
  184. }
  185. static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev)
  186. {
  187. BUG();
  188. return NULL;
  189. }
  190. static inline u16 vlan_dev_vlan_id(const struct net_device *dev)
  191. {
  192. BUG();
  193. return 0;
  194. }
  195. static inline __be16 vlan_dev_vlan_proto(const struct net_device *dev)
  196. {
  197. BUG();
  198. return 0;
  199. }
  200. static inline u16 vlan_dev_get_egress_qos_mask(struct net_device *dev,
  201. u32 skprio)
  202. {
  203. return 0;
  204. }
  205. static inline bool vlan_do_receive(struct sk_buff **skb)
  206. {
  207. return false;
  208. }
  209. static inline int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid)
  210. {
  211. return 0;
  212. }
  213. static inline void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid)
  214. {
  215. }
  216. static inline int vlan_vids_add_by_dev(struct net_device *dev,
  217. const struct net_device *by_dev)
  218. {
  219. return 0;
  220. }
  221. static inline void vlan_vids_del_by_dev(struct net_device *dev,
  222. const struct net_device *by_dev)
  223. {
  224. }
  225. static inline bool vlan_uses_dev(const struct net_device *dev)
  226. {
  227. return false;
  228. }
  229. static inline int vlan_get_encap_level(struct net_device *dev)
  230. {
  231. BUG();
  232. return 0;
  233. }
  234. #endif
  235. /**
  236. * eth_type_vlan - check for valid vlan ether type.
  237. * @ethertype: ether type to check
  238. *
  239. * Returns true if the ether type is a vlan ether type.
  240. */
  241. static inline bool eth_type_vlan(__be16 ethertype)
  242. {
  243. switch (ethertype) {
  244. case htons(ETH_P_8021Q):
  245. case htons(ETH_P_8021AD):
  246. return true;
  247. default:
  248. return false;
  249. }
  250. }
  251. static inline bool vlan_hw_offload_capable(netdev_features_t features,
  252. __be16 proto)
  253. {
  254. if (proto == htons(ETH_P_8021Q) && features & NETIF_F_HW_VLAN_CTAG_TX)
  255. return true;
  256. if (proto == htons(ETH_P_8021AD) && features & NETIF_F_HW_VLAN_STAG_TX)
  257. return true;
  258. return false;
  259. }
  260. /**
  261. * __vlan_insert_tag - regular VLAN tag inserting
  262. * @skb: skbuff to tag
  263. * @vlan_proto: VLAN encapsulation protocol
  264. * @vlan_tci: VLAN TCI to insert
  265. *
  266. * Inserts the VLAN tag into @skb as part of the payload
  267. * Returns error if skb_cow_head failes.
  268. *
  269. * Does not change skb->protocol so this function can be used during receive.
  270. */
  271. static inline int __vlan_insert_tag(struct sk_buff *skb,
  272. __be16 vlan_proto, u16 vlan_tci)
  273. {
  274. struct vlan_ethhdr *veth;
  275. if (skb_cow_head(skb, VLAN_HLEN) < 0)
  276. return -ENOMEM;
  277. veth = (struct vlan_ethhdr *)skb_push(skb, VLAN_HLEN);
  278. /* Move the mac addresses to the beginning of the new header. */
  279. memmove(skb->data, skb->data + VLAN_HLEN, 2 * ETH_ALEN);
  280. skb->mac_header -= VLAN_HLEN;
  281. /* first, the ethernet type */
  282. veth->h_vlan_proto = vlan_proto;
  283. /* now, the TCI */
  284. veth->h_vlan_TCI = htons(vlan_tci);
  285. return 0;
  286. }
  287. /**
  288. * vlan_insert_tag - regular VLAN tag inserting
  289. * @skb: skbuff to tag
  290. * @vlan_proto: VLAN encapsulation protocol
  291. * @vlan_tci: VLAN TCI to insert
  292. *
  293. * Inserts the VLAN tag into @skb as part of the payload
  294. * Returns a VLAN tagged skb. If a new skb is created, @skb is freed.
  295. *
  296. * Following the skb_unshare() example, in case of error, the calling function
  297. * doesn't have to worry about freeing the original skb.
  298. *
  299. * Does not change skb->protocol so this function can be used during receive.
  300. */
  301. static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
  302. __be16 vlan_proto, u16 vlan_tci)
  303. {
  304. int err;
  305. err = __vlan_insert_tag(skb, vlan_proto, vlan_tci);
  306. if (err) {
  307. dev_kfree_skb_any(skb);
  308. return NULL;
  309. }
  310. return skb;
  311. }
  312. /**
  313. * vlan_insert_tag_set_proto - regular VLAN tag inserting
  314. * @skb: skbuff to tag
  315. * @vlan_proto: VLAN encapsulation protocol
  316. * @vlan_tci: VLAN TCI to insert
  317. *
  318. * Inserts the VLAN tag into @skb as part of the payload
  319. * Returns a VLAN tagged skb. If a new skb is created, @skb is freed.
  320. *
  321. * Following the skb_unshare() example, in case of error, the calling function
  322. * doesn't have to worry about freeing the original skb.
  323. */
  324. static inline struct sk_buff *vlan_insert_tag_set_proto(struct sk_buff *skb,
  325. __be16 vlan_proto,
  326. u16 vlan_tci)
  327. {
  328. skb = vlan_insert_tag(skb, vlan_proto, vlan_tci);
  329. if (skb)
  330. skb->protocol = vlan_proto;
  331. return skb;
  332. }
  333. /*
  334. * __vlan_hwaccel_push_inside - pushes vlan tag to the payload
  335. * @skb: skbuff to tag
  336. *
  337. * Pushes the VLAN tag from @skb->vlan_tci inside to the payload.
  338. *
  339. * Following the skb_unshare() example, in case of error, the calling function
  340. * doesn't have to worry about freeing the original skb.
  341. */
  342. static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb)
  343. {
  344. skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
  345. skb_vlan_tag_get(skb));
  346. if (likely(skb))
  347. skb->vlan_tci = 0;
  348. return skb;
  349. }
  350. /*
  351. * vlan_hwaccel_push_inside - pushes vlan tag to the payload
  352. * @skb: skbuff to tag
  353. *
  354. * Checks is tag is present in @skb->vlan_tci and if it is, it pushes the
  355. * VLAN tag from @skb->vlan_tci inside to the payload.
  356. *
  357. * Following the skb_unshare() example, in case of error, the calling function
  358. * doesn't have to worry about freeing the original skb.
  359. */
  360. static inline struct sk_buff *vlan_hwaccel_push_inside(struct sk_buff *skb)
  361. {
  362. if (skb_vlan_tag_present(skb))
  363. skb = __vlan_hwaccel_push_inside(skb);
  364. return skb;
  365. }
  366. /**
  367. * __vlan_hwaccel_put_tag - hardware accelerated VLAN inserting
  368. * @skb: skbuff to tag
  369. * @vlan_proto: VLAN encapsulation protocol
  370. * @vlan_tci: VLAN TCI to insert
  371. *
  372. * Puts the VLAN TCI in @skb->vlan_tci and lets the device do the rest
  373. */
  374. static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb,
  375. __be16 vlan_proto, u16 vlan_tci)
  376. {
  377. skb->vlan_proto = vlan_proto;
  378. skb->vlan_tci = VLAN_TAG_PRESENT | vlan_tci;
  379. }
  380. /**
  381. * __vlan_get_tag - get the VLAN ID that is part of the payload
  382. * @skb: skbuff to query
  383. * @vlan_tci: buffer to store value
  384. *
  385. * Returns error if the skb is not of VLAN type
  386. */
  387. static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
  388. {
  389. struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb->data;
  390. if (!eth_type_vlan(veth->h_vlan_proto))
  391. return -EINVAL;
  392. *vlan_tci = ntohs(veth->h_vlan_TCI);
  393. return 0;
  394. }
  395. /**
  396. * __vlan_hwaccel_get_tag - get the VLAN ID that is in @skb->cb[]
  397. * @skb: skbuff to query
  398. * @vlan_tci: buffer to store value
  399. *
  400. * Returns error if @skb->vlan_tci is not set correctly
  401. */
  402. static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb,
  403. u16 *vlan_tci)
  404. {
  405. if (skb_vlan_tag_present(skb)) {
  406. *vlan_tci = skb_vlan_tag_get(skb);
  407. return 0;
  408. } else {
  409. *vlan_tci = 0;
  410. return -EINVAL;
  411. }
  412. }
  413. #define HAVE_VLAN_GET_TAG
  414. /**
  415. * vlan_get_tag - get the VLAN ID from the skb
  416. * @skb: skbuff to query
  417. * @vlan_tci: buffer to store value
  418. *
  419. * Returns error if the skb is not VLAN tagged
  420. */
  421. static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
  422. {
  423. if (skb->dev->features & NETIF_F_HW_VLAN_CTAG_TX) {
  424. return __vlan_hwaccel_get_tag(skb, vlan_tci);
  425. } else {
  426. return __vlan_get_tag(skb, vlan_tci);
  427. }
  428. }
  429. /**
  430. * vlan_get_protocol - get protocol EtherType.
  431. * @skb: skbuff to query
  432. * @type: first vlan protocol
  433. * @depth: buffer to store length of eth and vlan tags in bytes
  434. *
  435. * Returns the EtherType of the packet, regardless of whether it is
  436. * vlan encapsulated (normal or hardware accelerated) or not.
  437. */
  438. static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type,
  439. int *depth)
  440. {
  441. unsigned int vlan_depth = skb->mac_len;
  442. /* if type is 802.1Q/AD then the header should already be
  443. * present at mac_len - VLAN_HLEN (if mac_len > 0), or at
  444. * ETH_HLEN otherwise
  445. */
  446. if (eth_type_vlan(type)) {
  447. if (vlan_depth) {
  448. if (WARN_ON(vlan_depth < VLAN_HLEN))
  449. return 0;
  450. vlan_depth -= VLAN_HLEN;
  451. } else {
  452. vlan_depth = ETH_HLEN;
  453. }
  454. do {
  455. struct vlan_hdr *vh;
  456. if (unlikely(!pskb_may_pull(skb,
  457. vlan_depth + VLAN_HLEN)))
  458. return 0;
  459. vh = (struct vlan_hdr *)(skb->data + vlan_depth);
  460. type = vh->h_vlan_encapsulated_proto;
  461. vlan_depth += VLAN_HLEN;
  462. } while (eth_type_vlan(type));
  463. }
  464. if (depth)
  465. *depth = vlan_depth;
  466. return type;
  467. }
  468. /**
  469. * vlan_get_protocol - get protocol EtherType.
  470. * @skb: skbuff to query
  471. *
  472. * Returns the EtherType of the packet, regardless of whether it is
  473. * vlan encapsulated (normal or hardware accelerated) or not.
  474. */
  475. static inline __be16 vlan_get_protocol(struct sk_buff *skb)
  476. {
  477. return __vlan_get_protocol(skb, skb->protocol, NULL);
  478. }
  479. static inline void vlan_set_encap_proto(struct sk_buff *skb,
  480. struct vlan_hdr *vhdr)
  481. {
  482. __be16 proto;
  483. unsigned short *rawp;
  484. /*
  485. * Was a VLAN packet, grab the encapsulated protocol, which the layer
  486. * three protocols care about.
  487. */
  488. proto = vhdr->h_vlan_encapsulated_proto;
  489. if (eth_proto_is_802_3(proto)) {
  490. skb->protocol = proto;
  491. return;
  492. }
  493. rawp = (unsigned short *)(vhdr + 1);
  494. if (*rawp == 0xFFFF)
  495. /*
  496. * This is a magic hack to spot IPX packets. Older Novell
  497. * breaks the protocol design and runs IPX over 802.3 without
  498. * an 802.2 LLC layer. We look for FFFF which isn't a used
  499. * 802.2 SSAP/DSAP. This won't work for fault tolerant netware
  500. * but does for the rest.
  501. */
  502. skb->protocol = htons(ETH_P_802_3);
  503. else
  504. /*
  505. * Real 802.2 LLC
  506. */
  507. skb->protocol = htons(ETH_P_802_2);
  508. }
  509. /**
  510. * skb_vlan_tagged - check if skb is vlan tagged.
  511. * @skb: skbuff to query
  512. *
  513. * Returns true if the skb is tagged, regardless of whether it is hardware
  514. * accelerated or not.
  515. */
  516. static inline bool skb_vlan_tagged(const struct sk_buff *skb)
  517. {
  518. if (!skb_vlan_tag_present(skb) &&
  519. likely(!eth_type_vlan(skb->protocol)))
  520. return false;
  521. return true;
  522. }
  523. /**
  524. * skb_vlan_tagged_multi - check if skb is vlan tagged with multiple headers.
  525. * @skb: skbuff to query
  526. *
  527. * Returns true if the skb is tagged with multiple vlan headers, regardless
  528. * of whether it is hardware accelerated or not.
  529. */
  530. static inline bool skb_vlan_tagged_multi(struct sk_buff *skb)
  531. {
  532. __be16 protocol = skb->protocol;
  533. if (!skb_vlan_tag_present(skb)) {
  534. struct vlan_ethhdr *veh;
  535. if (likely(!eth_type_vlan(protocol)))
  536. return false;
  537. if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
  538. return false;
  539. veh = (struct vlan_ethhdr *)skb->data;
  540. protocol = veh->h_vlan_encapsulated_proto;
  541. }
  542. if (!eth_type_vlan(protocol))
  543. return false;
  544. return true;
  545. }
  546. /**
  547. * vlan_features_check - drop unsafe features for skb with multiple tags.
  548. * @skb: skbuff to query
  549. * @features: features to be checked
  550. *
  551. * Returns features without unsafe ones if the skb has multiple tags.
  552. */
  553. static inline netdev_features_t vlan_features_check(struct sk_buff *skb,
  554. netdev_features_t features)
  555. {
  556. if (skb_vlan_tagged_multi(skb)) {
  557. /* In the case of multi-tagged packets, use a direct mask
  558. * instead of using netdev_interesect_features(), to make
  559. * sure that only devices supporting NETIF_F_HW_CSUM will
  560. * have checksum offloading support.
  561. */
  562. features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM |
  563. NETIF_F_FRAGLIST | NETIF_F_HW_VLAN_CTAG_TX |
  564. NETIF_F_HW_VLAN_STAG_TX;
  565. }
  566. return features;
  567. }
  568. /**
  569. * compare_vlan_header - Compare two vlan headers
  570. * @h1: Pointer to vlan header
  571. * @h2: Pointer to vlan header
  572. *
  573. * Compare two vlan headers, returns 0 if equal.
  574. *
  575. * Please note that alignment of h1 & h2 are only guaranteed to be 16 bits.
  576. */
  577. static inline unsigned long compare_vlan_header(const struct vlan_hdr *h1,
  578. const struct vlan_hdr *h2)
  579. {
  580. #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
  581. return *(u32 *)h1 ^ *(u32 *)h2;
  582. #else
  583. return ((__force u32)h1->h_vlan_TCI ^ (__force u32)h2->h_vlan_TCI) |
  584. ((__force u32)h1->h_vlan_encapsulated_proto ^
  585. (__force u32)h2->h_vlan_encapsulated_proto);
  586. #endif
  587. }
  588. #endif /* !(_LINUX_IF_VLAN_H_) */