sch_fifo.c 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194
  1. /*
  2. * net/sched/sch_fifo.c The simplest FIFO queue.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. *
  9. * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  10. */
  11. #include <linux/module.h>
  12. #include <linux/slab.h>
  13. #include <linux/types.h>
  14. #include <linux/kernel.h>
  15. #include <linux/errno.h>
  16. #include <linux/skbuff.h>
  17. #include <net/pkt_sched.h>
  18. /* 1 band FIFO pseudo-"scheduler" */
  19. struct fifo_sched_data {
  20. u32 limit;
  21. };
  22. static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch)
  23. {
  24. struct fifo_sched_data *q = qdisc_priv(sch);
  25. if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= q->limit))
  26. return qdisc_enqueue_tail(skb, sch);
  27. return qdisc_reshape_fail(skb, sch);
  28. }
  29. static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch)
  30. {
  31. struct fifo_sched_data *q = qdisc_priv(sch);
  32. if (likely(skb_queue_len(&sch->q) < q->limit))
  33. return qdisc_enqueue_tail(skb, sch);
  34. return qdisc_reshape_fail(skb, sch);
  35. }
  36. static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch)
  37. {
  38. struct sk_buff *skb_head;
  39. struct fifo_sched_data *q = qdisc_priv(sch);
  40. if (likely(skb_queue_len(&sch->q) < q->limit))
  41. return qdisc_enqueue_tail(skb, sch);
  42. /* queue full, remove one skb to fulfill the limit */
  43. skb_head = qdisc_dequeue_head(sch);
  44. sch->qstats.drops++;
  45. kfree_skb(skb_head);
  46. qdisc_enqueue_tail(skb, sch);
  47. return NET_XMIT_CN;
  48. }
  49. static int fifo_init(struct Qdisc *sch, struct nlattr *opt)
  50. {
  51. struct fifo_sched_data *q = qdisc_priv(sch);
  52. bool bypass;
  53. bool is_bfifo = sch->ops == &bfifo_qdisc_ops;
  54. if (opt == NULL) {
  55. u32 limit = qdisc_dev(sch)->tx_queue_len ? : 1;
  56. if (is_bfifo)
  57. limit *= psched_mtu(qdisc_dev(sch));
  58. q->limit = limit;
  59. } else {
  60. struct tc_fifo_qopt *ctl = nla_data(opt);
  61. if (nla_len(opt) < sizeof(*ctl))
  62. return -EINVAL;
  63. q->limit = ctl->limit;
  64. }
  65. if (is_bfifo)
  66. bypass = q->limit >= psched_mtu(qdisc_dev(sch));
  67. else
  68. bypass = q->limit >= 1;
  69. if (bypass)
  70. sch->flags |= TCQ_F_CAN_BYPASS;
  71. else
  72. sch->flags &= ~TCQ_F_CAN_BYPASS;
  73. return 0;
  74. }
  75. static int fifo_dump(struct Qdisc *sch, struct sk_buff *skb)
  76. {
  77. struct fifo_sched_data *q = qdisc_priv(sch);
  78. struct tc_fifo_qopt opt = { .limit = q->limit };
  79. NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
  80. return skb->len;
  81. nla_put_failure:
  82. return -1;
  83. }
  84. struct Qdisc_ops pfifo_qdisc_ops __read_mostly = {
  85. .id = "pfifo",
  86. .priv_size = sizeof(struct fifo_sched_data),
  87. .enqueue = pfifo_enqueue,
  88. .dequeue = qdisc_dequeue_head,
  89. .peek = qdisc_peek_head,
  90. .drop = qdisc_queue_drop,
  91. .init = fifo_init,
  92. .reset = qdisc_reset_queue,
  93. .change = fifo_init,
  94. .dump = fifo_dump,
  95. .owner = THIS_MODULE,
  96. };
  97. EXPORT_SYMBOL(pfifo_qdisc_ops);
  98. struct Qdisc_ops bfifo_qdisc_ops __read_mostly = {
  99. .id = "bfifo",
  100. .priv_size = sizeof(struct fifo_sched_data),
  101. .enqueue = bfifo_enqueue,
  102. .dequeue = qdisc_dequeue_head,
  103. .peek = qdisc_peek_head,
  104. .drop = qdisc_queue_drop,
  105. .init = fifo_init,
  106. .reset = qdisc_reset_queue,
  107. .change = fifo_init,
  108. .dump = fifo_dump,
  109. .owner = THIS_MODULE,
  110. };
  111. EXPORT_SYMBOL(bfifo_qdisc_ops);
  112. struct Qdisc_ops pfifo_head_drop_qdisc_ops __read_mostly = {
  113. .id = "pfifo_head_drop",
  114. .priv_size = sizeof(struct fifo_sched_data),
  115. .enqueue = pfifo_tail_enqueue,
  116. .dequeue = qdisc_dequeue_head,
  117. .peek = qdisc_peek_head,
  118. .drop = qdisc_queue_drop_head,
  119. .init = fifo_init,
  120. .reset = qdisc_reset_queue,
  121. .change = fifo_init,
  122. .dump = fifo_dump,
  123. .owner = THIS_MODULE,
  124. };
  125. /* Pass size change message down to embedded FIFO */
  126. int fifo_set_limit(struct Qdisc *q, unsigned int limit)
  127. {
  128. struct nlattr *nla;
  129. int ret = -ENOMEM;
  130. /* Hack to avoid sending change message to non-FIFO */
  131. if (strncmp(q->ops->id + 1, "fifo", 4) != 0)
  132. return 0;
  133. nla = kmalloc(nla_attr_size(sizeof(struct tc_fifo_qopt)), GFP_KERNEL);
  134. if (nla) {
  135. nla->nla_type = RTM_NEWQDISC;
  136. nla->nla_len = nla_attr_size(sizeof(struct tc_fifo_qopt));
  137. ((struct tc_fifo_qopt *)nla_data(nla))->limit = limit;
  138. ret = q->ops->change(q, nla);
  139. kfree(nla);
  140. }
  141. return ret;
  142. }
  143. EXPORT_SYMBOL(fifo_set_limit);
  144. struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
  145. unsigned int limit)
  146. {
  147. struct Qdisc *q;
  148. int err = -ENOMEM;
  149. q = qdisc_create_dflt(sch->dev_queue, ops, TC_H_MAKE(sch->handle, 1));
  150. if (q) {
  151. err = fifo_set_limit(q, limit);
  152. if (err < 0) {
  153. qdisc_destroy(q);
  154. q = NULL;
  155. }
  156. }
  157. return q ? : ERR_PTR(err);
  158. }
  159. EXPORT_SYMBOL(fifo_create_dflt);