pkt_cls.h 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef __NET_PKT_CLS_H
  3. #define __NET_PKT_CLS_H
  4. #include <linux/pkt_cls.h>
  5. #include <linux/workqueue.h>
  6. #include <net/sch_generic.h>
  7. #include <net/act_api.h>
  8. /* Basic packet classifier frontend definitions. */
  9. struct tcf_walker {
  10. int stop;
  11. int skip;
  12. int count;
  13. int (*fn)(struct tcf_proto *, void *node, struct tcf_walker *);
  14. };
  15. int register_tcf_proto_ops(struct tcf_proto_ops *ops);
  16. int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
  17. bool tcf_queue_work(struct work_struct *work);
  18. #ifdef CONFIG_NET_CLS
  19. struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
  20. bool create);
  21. void tcf_chain_put(struct tcf_chain *chain);
  22. int tcf_block_get(struct tcf_block **p_block,
  23. struct tcf_proto __rcu **p_filter_chain);
  24. void tcf_block_put(struct tcf_block *block);
  25. int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
  26. struct tcf_result *res, bool compat_mode);
  27. #else
  28. static inline
  29. int tcf_block_get(struct tcf_block **p_block,
  30. struct tcf_proto __rcu **p_filter_chain)
  31. {
  32. return 0;
  33. }
  34. static inline void tcf_block_put(struct tcf_block *block)
  35. {
  36. }
  37. static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
  38. struct tcf_result *res, bool compat_mode)
  39. {
  40. return TC_ACT_UNSPEC;
  41. }
  42. #endif
  43. static inline unsigned long
  44. __cls_set_class(unsigned long *clp, unsigned long cl)
  45. {
  46. return xchg(clp, cl);
  47. }
  48. static inline unsigned long
  49. cls_set_class(struct tcf_proto *tp, unsigned long *clp,
  50. unsigned long cl)
  51. {
  52. unsigned long old_cl;
  53. tcf_tree_lock(tp);
  54. old_cl = __cls_set_class(clp, cl);
  55. tcf_tree_unlock(tp);
  56. return old_cl;
  57. }
  58. static inline void
  59. tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base)
  60. {
  61. unsigned long cl;
  62. cl = tp->q->ops->cl_ops->bind_tcf(tp->q, base, r->classid);
  63. cl = cls_set_class(tp, &r->class, cl);
  64. if (cl)
  65. tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
  66. }
  67. static inline void
  68. tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r)
  69. {
  70. unsigned long cl;
  71. if ((cl = __cls_set_class(&r->class, 0)) != 0)
  72. tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
  73. }
  74. struct tcf_exts {
  75. #ifdef CONFIG_NET_CLS_ACT
  76. __u32 type; /* for backward compat(TCA_OLD_COMPAT) */
  77. int nr_actions;
  78. struct tc_action **actions;
  79. struct net *net;
  80. #endif
  81. /* Map to export classifier specific extension TLV types to the
  82. * generic extensions API. Unsupported extensions must be set to 0.
  83. */
  84. int action;
  85. int police;
  86. };
  87. static inline int tcf_exts_init(struct tcf_exts *exts, int action, int police)
  88. {
  89. #ifdef CONFIG_NET_CLS_ACT
  90. exts->type = 0;
  91. exts->nr_actions = 0;
  92. exts->net = NULL;
  93. exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
  94. GFP_KERNEL);
  95. if (!exts->actions)
  96. return -ENOMEM;
  97. #endif
  98. exts->action = action;
  99. exts->police = police;
  100. return 0;
  101. }
  102. /* Return false if the netns is being destroyed in cleanup_net(). Callers
  103. * need to do cleanup synchronously in this case, otherwise may race with
  104. * tc_action_net_exit(). Return true for other cases.
  105. */
  106. static inline bool tcf_exts_get_net(struct tcf_exts *exts)
  107. {
  108. #ifdef CONFIG_NET_CLS_ACT
  109. exts->net = maybe_get_net(exts->net);
  110. return exts->net != NULL;
  111. #else
  112. return true;
  113. #endif
  114. }
  115. static inline void tcf_exts_put_net(struct tcf_exts *exts)
  116. {
  117. #ifdef CONFIG_NET_CLS_ACT
  118. if (exts->net)
  119. put_net(exts->net);
  120. #endif
  121. }
  122. static inline void tcf_exts_to_list(const struct tcf_exts *exts,
  123. struct list_head *actions)
  124. {
  125. #ifdef CONFIG_NET_CLS_ACT
  126. int i;
  127. for (i = 0; i < exts->nr_actions; i++) {
  128. struct tc_action *a = exts->actions[i];
  129. list_add_tail(&a->list, actions);
  130. }
  131. #endif
  132. }
  133. static inline void
  134. tcf_exts_stats_update(const struct tcf_exts *exts,
  135. u64 bytes, u64 packets, u64 lastuse)
  136. {
  137. #ifdef CONFIG_NET_CLS_ACT
  138. int i;
  139. preempt_disable();
  140. for (i = 0; i < exts->nr_actions; i++) {
  141. struct tc_action *a = exts->actions[i];
  142. tcf_action_stats_update(a, bytes, packets, lastuse);
  143. }
  144. preempt_enable();
  145. #endif
  146. }
  147. /**
  148. * tcf_exts_has_actions - check if at least one action is present
  149. * @exts: tc filter extensions handle
  150. *
  151. * Returns true if at least one action is present.
  152. */
  153. static inline bool tcf_exts_has_actions(struct tcf_exts *exts)
  154. {
  155. #ifdef CONFIG_NET_CLS_ACT
  156. return exts->nr_actions;
  157. #else
  158. return false;
  159. #endif
  160. }
  161. /**
  162. * tcf_exts_has_one_action - check if exactly one action is present
  163. * @exts: tc filter extensions handle
  164. *
  165. * Returns true if exactly one action is present.
  166. */
  167. static inline bool tcf_exts_has_one_action(struct tcf_exts *exts)
  168. {
  169. #ifdef CONFIG_NET_CLS_ACT
  170. return exts->nr_actions == 1;
  171. #else
  172. return false;
  173. #endif
  174. }
  175. /**
  176. * tcf_exts_exec - execute tc filter extensions
  177. * @skb: socket buffer
  178. * @exts: tc filter extensions handle
  179. * @res: desired result
  180. *
  181. * Executes all configured extensions. Returns TC_ACT_OK on a normal execution,
  182. * a negative number if the filter must be considered unmatched or
  183. * a positive action code (TC_ACT_*) which must be returned to the
  184. * underlying layer.
  185. */
  186. static inline int
  187. tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
  188. struct tcf_result *res)
  189. {
  190. #ifdef CONFIG_NET_CLS_ACT
  191. return tcf_action_exec(skb, exts->actions, exts->nr_actions, res);
  192. #endif
  193. return TC_ACT_OK;
  194. }
  195. int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
  196. struct nlattr **tb, struct nlattr *rate_tlv,
  197. struct tcf_exts *exts, bool ovr);
  198. void tcf_exts_destroy(struct tcf_exts *exts);
  199. void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src);
  200. int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
  201. int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
  202. int tcf_exts_get_dev(struct net_device *dev, struct tcf_exts *exts,
  203. struct net_device **hw_dev);
  204. /**
  205. * struct tcf_pkt_info - packet information
  206. */
  207. struct tcf_pkt_info {
  208. unsigned char * ptr;
  209. int nexthdr;
  210. };
  211. #ifdef CONFIG_NET_EMATCH
  212. struct tcf_ematch_ops;
  213. /**
  214. * struct tcf_ematch - extended match (ematch)
  215. *
  216. * @matchid: identifier to allow userspace to reidentify a match
  217. * @flags: flags specifying attributes and the relation to other matches
  218. * @ops: the operations lookup table of the corresponding ematch module
  219. * @datalen: length of the ematch specific configuration data
  220. * @data: ematch specific data
  221. */
  222. struct tcf_ematch {
  223. struct tcf_ematch_ops * ops;
  224. unsigned long data;
  225. unsigned int datalen;
  226. u16 matchid;
  227. u16 flags;
  228. struct net *net;
  229. };
  230. static inline int tcf_em_is_container(struct tcf_ematch *em)
  231. {
  232. return !em->ops;
  233. }
  234. static inline int tcf_em_is_simple(struct tcf_ematch *em)
  235. {
  236. return em->flags & TCF_EM_SIMPLE;
  237. }
  238. static inline int tcf_em_is_inverted(struct tcf_ematch *em)
  239. {
  240. return em->flags & TCF_EM_INVERT;
  241. }
  242. static inline int tcf_em_last_match(struct tcf_ematch *em)
  243. {
  244. return (em->flags & TCF_EM_REL_MASK) == TCF_EM_REL_END;
  245. }
  246. static inline int tcf_em_early_end(struct tcf_ematch *em, int result)
  247. {
  248. if (tcf_em_last_match(em))
  249. return 1;
  250. if (result == 0 && em->flags & TCF_EM_REL_AND)
  251. return 1;
  252. if (result != 0 && em->flags & TCF_EM_REL_OR)
  253. return 1;
  254. return 0;
  255. }
  256. /**
  257. * struct tcf_ematch_tree - ematch tree handle
  258. *
  259. * @hdr: ematch tree header supplied by userspace
  260. * @matches: array of ematches
  261. */
  262. struct tcf_ematch_tree {
  263. struct tcf_ematch_tree_hdr hdr;
  264. struct tcf_ematch * matches;
  265. };
  266. /**
  267. * struct tcf_ematch_ops - ematch module operations
  268. *
  269. * @kind: identifier (kind) of this ematch module
  270. * @datalen: length of expected configuration data (optional)
  271. * @change: called during validation (optional)
  272. * @match: called during ematch tree evaluation, must return 1/0
  273. * @destroy: called during destroyage (optional)
  274. * @dump: called during dumping process (optional)
  275. * @owner: owner, must be set to THIS_MODULE
  276. * @link: link to previous/next ematch module (internal use)
  277. */
  278. struct tcf_ematch_ops {
  279. int kind;
  280. int datalen;
  281. int (*change)(struct net *net, void *,
  282. int, struct tcf_ematch *);
  283. int (*match)(struct sk_buff *, struct tcf_ematch *,
  284. struct tcf_pkt_info *);
  285. void (*destroy)(struct tcf_ematch *);
  286. int (*dump)(struct sk_buff *, struct tcf_ematch *);
  287. struct module *owner;
  288. struct list_head link;
  289. };
  290. int tcf_em_register(struct tcf_ematch_ops *);
  291. void tcf_em_unregister(struct tcf_ematch_ops *);
  292. int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *,
  293. struct tcf_ematch_tree *);
  294. void tcf_em_tree_destroy(struct tcf_ematch_tree *);
  295. int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int);
  296. int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
  297. struct tcf_pkt_info *);
  298. /**
  299. * tcf_em_tree_match - evaulate an ematch tree
  300. *
  301. * @skb: socket buffer of the packet in question
  302. * @tree: ematch tree to be used for evaluation
  303. * @info: packet information examined by classifier
  304. *
  305. * This function matches @skb against the ematch tree in @tree by going
  306. * through all ematches respecting their logic relations returning
  307. * as soon as the result is obvious.
  308. *
  309. * Returns 1 if the ematch tree as-one matches, no ematches are configured
  310. * or ematch is not enabled in the kernel, otherwise 0 is returned.
  311. */
  312. static inline int tcf_em_tree_match(struct sk_buff *skb,
  313. struct tcf_ematch_tree *tree,
  314. struct tcf_pkt_info *info)
  315. {
  316. if (tree->hdr.nmatches)
  317. return __tcf_em_tree_match(skb, tree, info);
  318. else
  319. return 1;
  320. }
  321. #define MODULE_ALIAS_TCF_EMATCH(kind) MODULE_ALIAS("ematch-kind-" __stringify(kind))
  322. #else /* CONFIG_NET_EMATCH */
  323. struct tcf_ematch_tree {
  324. };
  325. #define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0)
  326. #define tcf_em_tree_destroy(t) do { (void)(t); } while(0)
  327. #define tcf_em_tree_dump(skb, t, tlv) (0)
  328. #define tcf_em_tree_match(skb, t, info) ((void)(info), 1)
  329. #endif /* CONFIG_NET_EMATCH */
  330. static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
  331. {
  332. switch (layer) {
  333. case TCF_LAYER_LINK:
  334. return skb->data;
  335. case TCF_LAYER_NETWORK:
  336. return skb_network_header(skb);
  337. case TCF_LAYER_TRANSPORT:
  338. return skb_transport_header(skb);
  339. }
  340. return NULL;
  341. }
  342. static inline int tcf_valid_offset(const struct sk_buff *skb,
  343. const unsigned char *ptr, const int len)
  344. {
  345. return likely((ptr + len) <= skb_tail_pointer(skb) &&
  346. ptr >= skb->head &&
  347. (ptr <= (ptr + len)));
  348. }
  349. #ifdef CONFIG_NET_CLS_IND
  350. #include <net/net_namespace.h>
  351. static inline int
  352. tcf_change_indev(struct net *net, struct nlattr *indev_tlv)
  353. {
  354. char indev[IFNAMSIZ];
  355. struct net_device *dev;
  356. if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ)
  357. return -EINVAL;
  358. dev = __dev_get_by_name(net, indev);
  359. if (!dev)
  360. return -ENODEV;
  361. return dev->ifindex;
  362. }
  363. static inline bool
  364. tcf_match_indev(struct sk_buff *skb, int ifindex)
  365. {
  366. if (!ifindex)
  367. return true;
  368. if (!skb->skb_iif)
  369. return false;
  370. return ifindex == skb->skb_iif;
  371. }
  372. #endif /* CONFIG_NET_CLS_IND */
  373. struct tc_cls_common_offload {
  374. u32 chain_index;
  375. __be16 protocol;
  376. u32 prio;
  377. u32 classid;
  378. };
  379. static inline void
  380. tc_cls_common_offload_init(struct tc_cls_common_offload *cls_common,
  381. const struct tcf_proto *tp)
  382. {
  383. cls_common->chain_index = tp->chain->index;
  384. cls_common->protocol = tp->protocol;
  385. cls_common->prio = tp->prio;
  386. cls_common->classid = tp->classid;
  387. }
  388. struct tc_cls_u32_knode {
  389. struct tcf_exts *exts;
  390. struct tc_u32_sel *sel;
  391. u32 handle;
  392. u32 val;
  393. u32 mask;
  394. u32 link_handle;
  395. u8 fshift;
  396. };
  397. struct tc_cls_u32_hnode {
  398. u32 handle;
  399. u32 prio;
  400. unsigned int divisor;
  401. };
  402. enum tc_clsu32_command {
  403. TC_CLSU32_NEW_KNODE,
  404. TC_CLSU32_REPLACE_KNODE,
  405. TC_CLSU32_DELETE_KNODE,
  406. TC_CLSU32_NEW_HNODE,
  407. TC_CLSU32_REPLACE_HNODE,
  408. TC_CLSU32_DELETE_HNODE,
  409. };
  410. struct tc_cls_u32_offload {
  411. struct tc_cls_common_offload common;
  412. /* knode values */
  413. enum tc_clsu32_command command;
  414. union {
  415. struct tc_cls_u32_knode knode;
  416. struct tc_cls_u32_hnode hnode;
  417. };
  418. };
  419. static inline bool tc_can_offload(const struct net_device *dev)
  420. {
  421. if (!(dev->features & NETIF_F_HW_TC))
  422. return false;
  423. if (!dev->netdev_ops->ndo_setup_tc)
  424. return false;
  425. return true;
  426. }
  427. static inline bool tc_skip_hw(u32 flags)
  428. {
  429. return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false;
  430. }
  431. static inline bool tc_should_offload(const struct net_device *dev, u32 flags)
  432. {
  433. if (tc_skip_hw(flags))
  434. return false;
  435. return tc_can_offload(dev);
  436. }
  437. static inline bool tc_skip_sw(u32 flags)
  438. {
  439. return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false;
  440. }
  441. /* SKIP_HW and SKIP_SW are mutually exclusive flags. */
  442. static inline bool tc_flags_valid(u32 flags)
  443. {
  444. if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW))
  445. return false;
  446. if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)))
  447. return false;
  448. return true;
  449. }
  450. static inline bool tc_in_hw(u32 flags)
  451. {
  452. return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false;
  453. }
  454. enum tc_fl_command {
  455. TC_CLSFLOWER_REPLACE,
  456. TC_CLSFLOWER_DESTROY,
  457. TC_CLSFLOWER_STATS,
  458. };
  459. struct tc_cls_flower_offload {
  460. struct tc_cls_common_offload common;
  461. enum tc_fl_command command;
  462. unsigned long cookie;
  463. struct flow_dissector *dissector;
  464. struct fl_flow_key *mask;
  465. struct fl_flow_key *key;
  466. struct tcf_exts *exts;
  467. bool egress_dev;
  468. };
  469. enum tc_matchall_command {
  470. TC_CLSMATCHALL_REPLACE,
  471. TC_CLSMATCHALL_DESTROY,
  472. };
  473. struct tc_cls_matchall_offload {
  474. struct tc_cls_common_offload common;
  475. enum tc_matchall_command command;
  476. struct tcf_exts *exts;
  477. unsigned long cookie;
  478. };
  479. enum tc_clsbpf_command {
  480. TC_CLSBPF_ADD,
  481. TC_CLSBPF_REPLACE,
  482. TC_CLSBPF_DESTROY,
  483. TC_CLSBPF_STATS,
  484. };
  485. struct tc_cls_bpf_offload {
  486. struct tc_cls_common_offload common;
  487. enum tc_clsbpf_command command;
  488. struct tcf_exts *exts;
  489. struct bpf_prog *prog;
  490. const char *name;
  491. bool exts_integrated;
  492. u32 gen_flags;
  493. };
  494. /* This structure holds cookie structure that is passed from user
  495. * to the kernel for actions and classifiers
  496. */
  497. struct tc_cookie {
  498. u8 *data;
  499. u32 len;
  500. };
  501. #endif