sch_multiq.c 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422
  1. /*
  2. * Copyright (c) 2008, Intel Corporation.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along with
  14. * this program; if not, see <http://www.gnu.org/licenses/>.
  15. *
  16. * Author: Alexander Duyck <alexander.h.duyck@intel.com>
  17. */
  18. #include <linux/module.h>
  19. #include <linux/slab.h>
  20. #include <linux/types.h>
  21. #include <linux/kernel.h>
  22. #include <linux/string.h>
  23. #include <linux/errno.h>
  24. #include <linux/skbuff.h>
  25. #include <net/netlink.h>
  26. #include <net/pkt_sched.h>
  27. #include <net/pkt_cls.h>
  28. struct multiq_sched_data {
  29. u16 bands;
  30. u16 max_bands;
  31. u16 curband;
  32. struct tcf_proto __rcu *filter_list;
  33. struct tcf_block *block;
  34. struct Qdisc **queues;
  35. };
  36. static struct Qdisc *
  37. multiq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
  38. {
  39. struct multiq_sched_data *q = qdisc_priv(sch);
  40. u32 band;
  41. struct tcf_result res;
  42. struct tcf_proto *fl = rcu_dereference_bh(q->filter_list);
  43. int err;
  44. *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
  45. err = tcf_classify(skb, fl, &res, false);
  46. #ifdef CONFIG_NET_CLS_ACT
  47. switch (err) {
  48. case TC_ACT_STOLEN:
  49. case TC_ACT_QUEUED:
  50. case TC_ACT_TRAP:
  51. *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
  52. case TC_ACT_SHOT:
  53. return NULL;
  54. }
  55. #endif
  56. band = skb_get_queue_mapping(skb);
  57. if (band >= q->bands)
  58. return q->queues[0];
  59. return q->queues[band];
  60. }
  61. static int
  62. multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
  63. struct sk_buff **to_free)
  64. {
  65. struct Qdisc *qdisc;
  66. int ret;
  67. qdisc = multiq_classify(skb, sch, &ret);
  68. #ifdef CONFIG_NET_CLS_ACT
  69. if (qdisc == NULL) {
  70. if (ret & __NET_XMIT_BYPASS)
  71. qdisc_qstats_drop(sch);
  72. __qdisc_drop(skb, to_free);
  73. return ret;
  74. }
  75. #endif
  76. ret = qdisc_enqueue(skb, qdisc, to_free);
  77. if (ret == NET_XMIT_SUCCESS) {
  78. sch->q.qlen++;
  79. return NET_XMIT_SUCCESS;
  80. }
  81. if (net_xmit_drop_count(ret))
  82. qdisc_qstats_drop(sch);
  83. return ret;
  84. }
  85. static struct sk_buff *multiq_dequeue(struct Qdisc *sch)
  86. {
  87. struct multiq_sched_data *q = qdisc_priv(sch);
  88. struct Qdisc *qdisc;
  89. struct sk_buff *skb;
  90. int band;
  91. for (band = 0; band < q->bands; band++) {
  92. /* cycle through bands to ensure fairness */
  93. q->curband++;
  94. if (q->curband >= q->bands)
  95. q->curband = 0;
  96. /* Check that target subqueue is available before
  97. * pulling an skb to avoid head-of-line blocking.
  98. */
  99. if (!netif_xmit_stopped(
  100. netdev_get_tx_queue(qdisc_dev(sch), q->curband))) {
  101. qdisc = q->queues[q->curband];
  102. skb = qdisc->dequeue(qdisc);
  103. if (skb) {
  104. qdisc_bstats_update(sch, skb);
  105. sch->q.qlen--;
  106. return skb;
  107. }
  108. }
  109. }
  110. return NULL;
  111. }
  112. static struct sk_buff *multiq_peek(struct Qdisc *sch)
  113. {
  114. struct multiq_sched_data *q = qdisc_priv(sch);
  115. unsigned int curband = q->curband;
  116. struct Qdisc *qdisc;
  117. struct sk_buff *skb;
  118. int band;
  119. for (band = 0; band < q->bands; band++) {
  120. /* cycle through bands to ensure fairness */
  121. curband++;
  122. if (curband >= q->bands)
  123. curband = 0;
  124. /* Check that target subqueue is available before
  125. * pulling an skb to avoid head-of-line blocking.
  126. */
  127. if (!netif_xmit_stopped(
  128. netdev_get_tx_queue(qdisc_dev(sch), curband))) {
  129. qdisc = q->queues[curband];
  130. skb = qdisc->ops->peek(qdisc);
  131. if (skb)
  132. return skb;
  133. }
  134. }
  135. return NULL;
  136. }
  137. static void
  138. multiq_reset(struct Qdisc *sch)
  139. {
  140. u16 band;
  141. struct multiq_sched_data *q = qdisc_priv(sch);
  142. for (band = 0; band < q->bands; band++)
  143. qdisc_reset(q->queues[band]);
  144. sch->q.qlen = 0;
  145. q->curband = 0;
  146. }
  147. static void
  148. multiq_destroy(struct Qdisc *sch)
  149. {
  150. int band;
  151. struct multiq_sched_data *q = qdisc_priv(sch);
  152. tcf_block_put(q->block);
  153. for (band = 0; band < q->bands; band++)
  154. qdisc_destroy(q->queues[band]);
  155. kfree(q->queues);
  156. }
  157. static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
  158. {
  159. struct multiq_sched_data *q = qdisc_priv(sch);
  160. struct tc_multiq_qopt *qopt;
  161. int i;
  162. if (!netif_is_multiqueue(qdisc_dev(sch)))
  163. return -EOPNOTSUPP;
  164. if (nla_len(opt) < sizeof(*qopt))
  165. return -EINVAL;
  166. qopt = nla_data(opt);
  167. qopt->bands = qdisc_dev(sch)->real_num_tx_queues;
  168. sch_tree_lock(sch);
  169. q->bands = qopt->bands;
  170. for (i = q->bands; i < q->max_bands; i++) {
  171. if (q->queues[i] != &noop_qdisc) {
  172. struct Qdisc *child = q->queues[i];
  173. q->queues[i] = &noop_qdisc;
  174. qdisc_tree_reduce_backlog(child, child->q.qlen,
  175. child->qstats.backlog);
  176. qdisc_destroy(child);
  177. }
  178. }
  179. sch_tree_unlock(sch);
  180. for (i = 0; i < q->bands; i++) {
  181. if (q->queues[i] == &noop_qdisc) {
  182. struct Qdisc *child, *old;
  183. child = qdisc_create_dflt(sch->dev_queue,
  184. &pfifo_qdisc_ops,
  185. TC_H_MAKE(sch->handle,
  186. i + 1));
  187. if (child) {
  188. sch_tree_lock(sch);
  189. old = q->queues[i];
  190. q->queues[i] = child;
  191. if (child != &noop_qdisc)
  192. qdisc_hash_add(child, true);
  193. if (old != &noop_qdisc) {
  194. qdisc_tree_reduce_backlog(old,
  195. old->q.qlen,
  196. old->qstats.backlog);
  197. qdisc_destroy(old);
  198. }
  199. sch_tree_unlock(sch);
  200. }
  201. }
  202. }
  203. return 0;
  204. }
  205. static int multiq_init(struct Qdisc *sch, struct nlattr *opt)
  206. {
  207. struct multiq_sched_data *q = qdisc_priv(sch);
  208. int i, err;
  209. q->queues = NULL;
  210. if (opt == NULL)
  211. return -EINVAL;
  212. err = tcf_block_get(&q->block, &q->filter_list);
  213. if (err)
  214. return err;
  215. q->max_bands = qdisc_dev(sch)->num_tx_queues;
  216. q->queues = kcalloc(q->max_bands, sizeof(struct Qdisc *), GFP_KERNEL);
  217. if (!q->queues)
  218. return -ENOBUFS;
  219. for (i = 0; i < q->max_bands; i++)
  220. q->queues[i] = &noop_qdisc;
  221. return multiq_tune(sch, opt);
  222. }
  223. static int multiq_dump(struct Qdisc *sch, struct sk_buff *skb)
  224. {
  225. struct multiq_sched_data *q = qdisc_priv(sch);
  226. unsigned char *b = skb_tail_pointer(skb);
  227. struct tc_multiq_qopt opt;
  228. opt.bands = q->bands;
  229. opt.max_bands = q->max_bands;
  230. if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
  231. goto nla_put_failure;
  232. return skb->len;
  233. nla_put_failure:
  234. nlmsg_trim(skb, b);
  235. return -1;
  236. }
  237. static int multiq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
  238. struct Qdisc **old)
  239. {
  240. struct multiq_sched_data *q = qdisc_priv(sch);
  241. unsigned long band = arg - 1;
  242. if (new == NULL)
  243. new = &noop_qdisc;
  244. *old = qdisc_replace(sch, new, &q->queues[band]);
  245. return 0;
  246. }
  247. static struct Qdisc *
  248. multiq_leaf(struct Qdisc *sch, unsigned long arg)
  249. {
  250. struct multiq_sched_data *q = qdisc_priv(sch);
  251. unsigned long band = arg - 1;
  252. return q->queues[band];
  253. }
  254. static unsigned long multiq_find(struct Qdisc *sch, u32 classid)
  255. {
  256. struct multiq_sched_data *q = qdisc_priv(sch);
  257. unsigned long band = TC_H_MIN(classid);
  258. if (band - 1 >= q->bands)
  259. return 0;
  260. return band;
  261. }
  262. static unsigned long multiq_bind(struct Qdisc *sch, unsigned long parent,
  263. u32 classid)
  264. {
  265. return multiq_find(sch, classid);
  266. }
  267. static void multiq_unbind(struct Qdisc *q, unsigned long cl)
  268. {
  269. }
  270. static int multiq_dump_class(struct Qdisc *sch, unsigned long cl,
  271. struct sk_buff *skb, struct tcmsg *tcm)
  272. {
  273. struct multiq_sched_data *q = qdisc_priv(sch);
  274. tcm->tcm_handle |= TC_H_MIN(cl);
  275. tcm->tcm_info = q->queues[cl - 1]->handle;
  276. return 0;
  277. }
  278. static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
  279. struct gnet_dump *d)
  280. {
  281. struct multiq_sched_data *q = qdisc_priv(sch);
  282. struct Qdisc *cl_q;
  283. cl_q = q->queues[cl - 1];
  284. if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
  285. d, cl_q->cpu_bstats, &cl_q->bstats) < 0 ||
  286. gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0)
  287. return -1;
  288. return 0;
  289. }
  290. static void multiq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
  291. {
  292. struct multiq_sched_data *q = qdisc_priv(sch);
  293. int band;
  294. if (arg->stop)
  295. return;
  296. for (band = 0; band < q->bands; band++) {
  297. if (arg->count < arg->skip) {
  298. arg->count++;
  299. continue;
  300. }
  301. if (arg->fn(sch, band + 1, arg) < 0) {
  302. arg->stop = 1;
  303. break;
  304. }
  305. arg->count++;
  306. }
  307. }
  308. static struct tcf_block *multiq_tcf_block(struct Qdisc *sch, unsigned long cl)
  309. {
  310. struct multiq_sched_data *q = qdisc_priv(sch);
  311. if (cl)
  312. return NULL;
  313. return q->block;
  314. }
  315. static const struct Qdisc_class_ops multiq_class_ops = {
  316. .graft = multiq_graft,
  317. .leaf = multiq_leaf,
  318. .find = multiq_find,
  319. .walk = multiq_walk,
  320. .tcf_block = multiq_tcf_block,
  321. .bind_tcf = multiq_bind,
  322. .unbind_tcf = multiq_unbind,
  323. .dump = multiq_dump_class,
  324. .dump_stats = multiq_dump_class_stats,
  325. };
  326. static struct Qdisc_ops multiq_qdisc_ops __read_mostly = {
  327. .next = NULL,
  328. .cl_ops = &multiq_class_ops,
  329. .id = "multiq",
  330. .priv_size = sizeof(struct multiq_sched_data),
  331. .enqueue = multiq_enqueue,
  332. .dequeue = multiq_dequeue,
  333. .peek = multiq_peek,
  334. .init = multiq_init,
  335. .reset = multiq_reset,
  336. .destroy = multiq_destroy,
  337. .change = multiq_tune,
  338. .dump = multiq_dump,
  339. .owner = THIS_MODULE,
  340. };
  341. static int __init multiq_module_init(void)
  342. {
  343. return register_qdisc(&multiq_qdisc_ops);
  344. }
  345. static void __exit multiq_module_exit(void)
  346. {
  347. unregister_qdisc(&multiq_qdisc_ops);
  348. }
  349. module_init(multiq_module_init)
  350. module_exit(multiq_module_exit)
  351. MODULE_LICENSE("GPL");