sch_fifo.c 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185
  1. /*
  2. * net/sched/sch_fifo.c The simplest FIFO queue.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. *
  9. * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  10. */
  11. #include <linux/module.h>
  12. #include <linux/slab.h>
  13. #include <linux/types.h>
  14. #include <linux/kernel.h>
  15. #include <linux/errno.h>
  16. #include <linux/skbuff.h>
  17. #include <net/pkt_sched.h>
  18. /* 1 band FIFO pseudo-"scheduler" */
  19. static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch,
  20. struct sk_buff **to_free)
  21. {
  22. if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= sch->limit))
  23. return qdisc_enqueue_tail(skb, sch);
  24. return qdisc_drop(skb, sch, to_free);
  25. }
  26. static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch,
  27. struct sk_buff **to_free)
  28. {
  29. if (likely(sch->q.qlen < sch->limit))
  30. return qdisc_enqueue_tail(skb, sch);
  31. return qdisc_drop(skb, sch, to_free);
  32. }
  33. static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch,
  34. struct sk_buff **to_free)
  35. {
  36. unsigned int prev_backlog;
  37. if (likely(sch->q.qlen < sch->limit))
  38. return qdisc_enqueue_tail(skb, sch);
  39. prev_backlog = sch->qstats.backlog;
  40. /* queue full, remove one skb to fulfill the limit */
  41. __qdisc_queue_drop_head(sch, &sch->q, to_free);
  42. qdisc_qstats_drop(sch);
  43. qdisc_enqueue_tail(skb, sch);
  44. qdisc_tree_reduce_backlog(sch, 0, prev_backlog - sch->qstats.backlog);
  45. return NET_XMIT_CN;
  46. }
  47. static int fifo_init(struct Qdisc *sch, struct nlattr *opt)
  48. {
  49. bool bypass;
  50. bool is_bfifo = sch->ops == &bfifo_qdisc_ops;
  51. if (opt == NULL) {
  52. u32 limit = qdisc_dev(sch)->tx_queue_len;
  53. if (is_bfifo)
  54. limit *= psched_mtu(qdisc_dev(sch));
  55. sch->limit = limit;
  56. } else {
  57. struct tc_fifo_qopt *ctl = nla_data(opt);
  58. if (nla_len(opt) < sizeof(*ctl))
  59. return -EINVAL;
  60. sch->limit = ctl->limit;
  61. }
  62. if (is_bfifo)
  63. bypass = sch->limit >= psched_mtu(qdisc_dev(sch));
  64. else
  65. bypass = sch->limit >= 1;
  66. if (bypass)
  67. sch->flags |= TCQ_F_CAN_BYPASS;
  68. else
  69. sch->flags &= ~TCQ_F_CAN_BYPASS;
  70. return 0;
  71. }
  72. static int fifo_dump(struct Qdisc *sch, struct sk_buff *skb)
  73. {
  74. struct tc_fifo_qopt opt = { .limit = sch->limit };
  75. if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
  76. goto nla_put_failure;
  77. return skb->len;
  78. nla_put_failure:
  79. return -1;
  80. }
  81. struct Qdisc_ops pfifo_qdisc_ops __read_mostly = {
  82. .id = "pfifo",
  83. .priv_size = 0,
  84. .enqueue = pfifo_enqueue,
  85. .dequeue = qdisc_dequeue_head,
  86. .peek = qdisc_peek_head,
  87. .init = fifo_init,
  88. .reset = qdisc_reset_queue,
  89. .change = fifo_init,
  90. .dump = fifo_dump,
  91. .owner = THIS_MODULE,
  92. };
  93. EXPORT_SYMBOL(pfifo_qdisc_ops);
  94. struct Qdisc_ops bfifo_qdisc_ops __read_mostly = {
  95. .id = "bfifo",
  96. .priv_size = 0,
  97. .enqueue = bfifo_enqueue,
  98. .dequeue = qdisc_dequeue_head,
  99. .peek = qdisc_peek_head,
  100. .init = fifo_init,
  101. .reset = qdisc_reset_queue,
  102. .change = fifo_init,
  103. .dump = fifo_dump,
  104. .owner = THIS_MODULE,
  105. };
  106. EXPORT_SYMBOL(bfifo_qdisc_ops);
  107. struct Qdisc_ops pfifo_head_drop_qdisc_ops __read_mostly = {
  108. .id = "pfifo_head_drop",
  109. .priv_size = 0,
  110. .enqueue = pfifo_tail_enqueue,
  111. .dequeue = qdisc_dequeue_head,
  112. .peek = qdisc_peek_head,
  113. .init = fifo_init,
  114. .reset = qdisc_reset_queue,
  115. .change = fifo_init,
  116. .dump = fifo_dump,
  117. .owner = THIS_MODULE,
  118. };
  119. /* Pass size change message down to embedded FIFO */
  120. int fifo_set_limit(struct Qdisc *q, unsigned int limit)
  121. {
  122. struct nlattr *nla;
  123. int ret = -ENOMEM;
  124. /* Hack to avoid sending change message to non-FIFO */
  125. if (strncmp(q->ops->id + 1, "fifo", 4) != 0)
  126. return 0;
  127. nla = kmalloc(nla_attr_size(sizeof(struct tc_fifo_qopt)), GFP_KERNEL);
  128. if (nla) {
  129. nla->nla_type = RTM_NEWQDISC;
  130. nla->nla_len = nla_attr_size(sizeof(struct tc_fifo_qopt));
  131. ((struct tc_fifo_qopt *)nla_data(nla))->limit = limit;
  132. ret = q->ops->change(q, nla);
  133. kfree(nla);
  134. }
  135. return ret;
  136. }
  137. EXPORT_SYMBOL(fifo_set_limit);
  138. struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
  139. unsigned int limit)
  140. {
  141. struct Qdisc *q;
  142. int err = -ENOMEM;
  143. q = qdisc_create_dflt(sch->dev_queue, ops, TC_H_MAKE(sch->handle, 1));
  144. if (q) {
  145. err = fifo_set_limit(q, limit);
  146. if (err < 0) {
  147. qdisc_destroy(q);
  148. q = NULL;
  149. }
  150. }
  151. return q ? : ERR_PTR(err);
  152. }
  153. EXPORT_SYMBOL(fifo_create_dflt);