* [Codel] [PATCH v9] codel: Controlled Delay AQM
@ 2012-05-07 5:35 Dave Täht
2012-05-07 5:50 ` Eric Dumazet
` (2 more replies)
0 siblings, 3 replies; 19+ messages in thread
From: Dave Täht @ 2012-05-07 5:35 UTC (permalink / raw)
To: codel; +Cc: Dave Täht
[-- Warning: decoded text below may be mangled, UTF-8 assumed --]
[-- Attachment #1: Type: text/plain, Size: 14822 bytes --]
This version (9) adds support for various forms of decrease in s3,
in the form of the module parameters gentle and decrease_method.
It defaults to the algorithm as described in the original presentation.
v1: Original implementation - Dave Taht
v2: Working code - Corrections for ktime - Dave Taht
v3: 32 bit support and port to net-next - Eric Dumazet
v4: 16 bit precision for inv sqrt and cache - Dave Taht
v5: Kernel cleanup and full precision - Eric Dumazet
v6: Dump Stats support added - Eric Dumazet
v7: Complete rewrite for u32 values - Eric Dumazet
v8: Stats and timing added, 64 bit prescale improved - Eric Dumazet
v9: debated functionality moved to isolated routine - Dave Taht
---
include/linux/pkt_sched.h | 25 +++
net/sched/Kconfig | 11 ++
net/sched/Makefile | 1 +
net/sched/sch_codel.c | 463 +++++++++++++++++++++++++++++++++++++++++++++
4 files changed, 500 insertions(+)
create mode 100644 net/sched/sch_codel.c
diff --git a/include/linux/pkt_sched.h b/include/linux/pkt_sched.h
index 0d5b793..f62141e 100644
--- a/include/linux/pkt_sched.h
+++ b/include/linux/pkt_sched.h
@@ -633,4 +633,29 @@ struct tc_qfq_stats {
__u32 lmax;
};
+/* CODEL */
+
+enum {
+ TCA_CODEL_UNSPEC,
+ TCA_CODEL_TARGET,
+ TCA_CODEL_LIMIT,
+ TCA_CODEL_MINBYTES,
+ TCA_CODEL_INTERVAL,
+ __TCA_CODEL_MAX
+};
+
+#define TCA_CODEL_MAX (__TCA_CODEL_MAX - 1)
+
+struct tc_codel_xstats {
+ __u32 count;
+ __u32 delay; /* time elapsed since next packet was queued (in us) */
+ __u32 drop_next;
+ __u32 drop_overlimit;
+ __u32 dropping;
+ __u32 state1;
+ __u32 state2;
+ __u32 state3;
+ __u32 states;
+};
+
#endif
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index 2590e91..8106c42 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -250,6 +250,17 @@ config NET_SCH_QFQ
If unsure, say N.
+config NET_SCH_CODEL
+ tristate "Controlled Delay AQM (CODEL)"
+ help
+ Say Y here if you want to use the Controlled Delay (CODEL)
+ packet scheduling algorithm.
+
+ To compile this driver as a module, choose M here: the module
+ will be called sch_codel.
+
+ If unsure, say N.
+
config NET_SCH_INGRESS
tristate "Ingress Qdisc"
depends on NET_CLS_ACT
diff --git a/net/sched/Makefile b/net/sched/Makefile
index dc5889c..41130b5 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -36,6 +36,7 @@ obj-$(CONFIG_NET_SCH_DRR) += sch_drr.o
obj-$(CONFIG_NET_SCH_MQPRIO) += sch_mqprio.o
obj-$(CONFIG_NET_SCH_CHOKE) += sch_choke.o
obj-$(CONFIG_NET_SCH_QFQ) += sch_qfq.o
+obj-$(CONFIG_NET_SCH_CODEL) += sch_codel.o
obj-$(CONFIG_NET_CLS_U32) += cls_u32.o
obj-$(CONFIG_NET_CLS_ROUTE4) += cls_route.o
diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
new file mode 100644
index 0000000..a9e6383
--- /dev/null
+++ b/net/sched/sch_codel.c
@@ -0,0 +1,463 @@
+/*
+ * net/sched/sch_codel.c A Codel implementation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Codel, the COntrolled DELay Queueing discipline
+ * Based on ns2 simulation code presented by Kathie Nichols
+ *
+ * Authors: Dave Täht <d@taht.net>
+ * Eric Dumazet <edumazet@google.com>
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/ktime.h>
+#include <linux/skbuff.h>
+#include <net/pkt_sched.h>
+
+/*
+ * codel uses a 1024 nsec clock, encoded in u32
+ */
+typedef u32 codel_time_t;
+#define CODEL_SHIFT 10
+
+static u32 gentle = 0;
+static u32 decrease_method = 0;
+module_param(gentle, uint, 0644);
+module_param(decrease_method, uint, 0644);
+MODULE_PARM_DESC(gentle,"Gently increment count in massive drop state");
+MODULE_PARM_DESC(decrease_method,"Various means of decreasing count");
+
+
+static codel_time_t codel_get_time(void)
+{
+ u64 ns = ktime_to_ns(ktime_get());
+
+ return ns >> CODEL_SHIFT;
+}
+
+#define codel_time_after(a, b) ((int)(a) - (int)(b) > 0)
+#define codel_time_after_eq(a, b) ((int)(a) - (int)(b) >= 0)
+#define codel_time_before(a, b) ((int)(a) - (int)(b) < 0)
+#define codel_time_before_eq(a, b) ((int)(a) - (int)(b) <= 0)
+
+#define MS2TIME(a) ((a * NSEC_PER_MSEC) >> CODEL_SHIFT)
+
+#define DEFAULT_CODEL_LIMIT 1000
+
+/* Per-queue state (codel_queue_t instance variables) */
+
+struct codel_sched_data {
+ u32 minbytes;
+ u32 interval;
+ codel_time_t target;
+
+ u32 count; /* packets dropped since entering drop state */
+ u32 drop_count;
+ bool dropping;
+ /* time to declare above q->target (0 if below)*/
+ codel_time_t first_above_time;
+ codel_time_t drop_next; /* time to drop next packet */
+
+ u32 state1;
+ u32 state2;
+ u32 state3;
+ u32 states;
+ u32 drop_overlimit;
+};
+
+struct codel_skb_cb {
+ codel_time_t enqueue_time;
+};
+
+
+/*
+ * return interval/sqrt(x) with good precision
+ */
+static u32 calc(u32 _interval, u32 _x)
+{
+ u64 interval = _interval;
+ unsigned long x = _x;
+
+ /* scale operands for max precision
+ * On 64bit arches, we can prescale x by 32bits
+ */
+ if (BITS_PER_LONG == 64) {
+ x <<= 32;
+ interval <<= 16;
+ }
+ while (x < (1UL << (BITS_PER_LONG - 2))) {
+ x <<= 2;
+ interval <<= 1;
+ }
+ do_div(interval, int_sqrt(x));
+ return (u32)interval;
+}
+
+static struct codel_skb_cb *get_codel_cb(const struct sk_buff *skb)
+{
+ qdisc_cb_private_validate(skb, sizeof(struct codel_skb_cb));
+ return (struct codel_skb_cb *)qdisc_skb_cb(skb)->data;
+}
+
+static codel_time_t get_enqueue_time(const struct sk_buff *skb)
+{
+ return get_codel_cb(skb)->enqueue_time;
+}
+
+static void set_enqueue_time(struct sk_buff *skb)
+{
+ get_codel_cb(skb)->enqueue_time = codel_get_time();
+}
+
+static codel_time_t control_law(const struct codel_sched_data *q,
+ codel_time_t t)
+{
+ return t + calc(q->interval, q->count);
+}
+
+static bool should_drop(struct sk_buff *skb, struct Qdisc *sch,
+ codel_time_t now)
+{
+ struct codel_sched_data *q = qdisc_priv(sch);
+ codel_time_t sojourn_time;
+ bool drop;
+
+ if (!skb) {
+ q->first_above_time = 0;
+ return false;
+ }
+ sch->qstats.backlog -= qdisc_pkt_len(skb);
+ sojourn_time = now - get_enqueue_time(skb);
+
+ if (codel_time_before(sojourn_time, q->target) ||
+ sch->qstats.backlog < q->minbytes) {
+ /* went below so we'll stay below for at least q->interval */
+ q->first_above_time = 0;
+ return false;
+ }
+ drop = false;
+ if (q->first_above_time == 0) {
+ /* just went above from below. If we stay above
+ * for at least q->interval we'll say it's ok to drop
+ */
+ q->first_above_time = now + q->interval;
+ } else if (codel_time_after(now, q->first_above_time)) {
+ drop = true;
+ q->state1++;
+ }
+ return drop;
+}
+
+static void codel_drop(struct Qdisc *sch, struct sk_buff *skb)
+{
+ struct codel_sched_data *q = qdisc_priv(sch);
+
+ qdisc_drop(skb, sch);
+ q->drop_count++;
+}
+
+/*
+* if min went above target close to when we last went below,
+* assume that some drop rate near that controlled the queue on the
+* last cycle is a good starting point to control it now.
+*
+* Since there is debate about it right now, we try a few
+* different methods.
+*/
+
+static u32 count_rescale(struct codel_sched_data *q, codel_time_t now) {
+ u32 c = 1;
+
+ if (q->count < 2)
+ return 1;
+
+ if (codel_time_after(now - q->drop_next, 16 * q->interval)) {
+ switch(decrease_method) {
+ case 3: /* Taht 1 (not yet what I have in mind) */
+ c = q->count - 1;
+ break;
+ case 2: /* Dumazet 2 */
+ c = q->count >> 1;
+ break;
+ case 1: /* Dumazet 1 */
+ c = min(q->count - 1,
+ q->count - (q->count >> 4));
+ break;
+ case 0: /* Codel Paper Default */
+ default:
+ c = q->count - 1;
+ }
+ c = max(1U, c);
+ }
+ return (u32) c;
+}
+
+static struct sk_buff *codel_dequeue(struct Qdisc *sch)
+{
+ struct codel_sched_data *q = qdisc_priv(sch);
+ struct sk_buff *skb = __skb_dequeue(&sch->q);
+ codel_time_t now;
+ bool drop;
+
+ if (!skb) {
+ q->dropping = false;
+ return skb;
+ }
+ now = codel_get_time();
+ drop = should_drop(skb, sch, now);
+ if (q->dropping) {
+ if (!drop) {
+ /* sojourn time below target - leave dropping state */
+ q->dropping = false;
+ } else if (codel_time_after_eq(now, q->drop_next)) {
+ q->state2++;
+ /* It's time for the next drop. Drop the current
+ * packet and dequeue the next. The dequeue might
+ * take us out of dropping state.
+ * If not, schedule the next drop.
+ * A large backlog might result in drop rates so high
+ * that the next drop should happen now,
+ * hence the while loop.
+ */
+ if(gentle)
+ q->count++;
+ while (q->dropping &&
+ codel_time_after_eq(now, q->drop_next)) {
+ codel_drop(sch, skb);
+ if(!gentle)
+ q->count++;
+ skb = __skb_dequeue(&sch->q);
+ if (!should_drop(skb, sch, now)) {
+ /* leave dropping state */
+ q->dropping = false;
+ } else {
+ /* and schedule the next drop */
+ q->drop_next =
+ control_law(q, q->drop_next);
+ }
+ }
+ }
+ } else if (drop &&
+ (codel_time_before(now - q->drop_next,
+ 16 * q->interval) ||
+ codel_time_after_eq(now - q->first_above_time,
+ 2 * q->interval))) {
+ codel_drop(sch, skb);
+ skb = __skb_dequeue(&sch->q);
+ drop = should_drop(skb, sch, now);
+ q->dropping = true;
+ q->state3++;
+ /*
+ * if min went above target close to when we last went below,
+ * assume that the drop rate that controlled the queue on the
+ * last cycle is a good starting point to control it now.
+ * Since there is debate about it right now, punt.
+ */
+ q->count = count_rescale(q, now);
+ q->drop_next = control_law(q, now);
+ }
+ q->states++;
+ /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
+ * or HTB crashes. Defer it for next round.
+ */
+ if (q->drop_count && sch->q.qlen) {
+ qdisc_tree_decrease_qlen(sch, q->drop_count);
+ q->drop_count = 0;
+ }
+ if (skb)
+ qdisc_bstats_update(sch, skb);
+ return skb;
+}
+
+static int codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+{
+ struct codel_sched_data *q;
+
+ if (likely(skb_queue_len(&sch->q) < sch->limit)) {
+ set_enqueue_time(skb);
+ return qdisc_enqueue_tail(skb, sch);
+ }
+ q = qdisc_priv(sch);
+ q->drop_overlimit++;
+ return qdisc_drop(skb, sch);
+}
+
+static const struct nla_policy codel_policy[TCA_CODEL_MAX + 1] = {
+ [TCA_CODEL_TARGET] = { .type = NLA_U32 },
+ [TCA_CODEL_LIMIT] = { .type = NLA_U32 },
+ [TCA_CODEL_MINBYTES] = { .type = NLA_U32 },
+ [TCA_CODEL_INTERVAL] = { .type = NLA_U32 },
+};
+
+static int codel_change(struct Qdisc *sch, struct nlattr *opt)
+{
+ struct codel_sched_data *q = qdisc_priv(sch);
+ struct nlattr *tb[TCA_CODEL_MAX + 1];
+ unsigned int qlen;
+ int err;
+
+ if (!opt)
+ return -EINVAL;
+
+ err = nla_parse_nested(tb, TCA_CODEL_MAX, opt, codel_policy);
+ if (err < 0)
+ return err;
+
+ sch_tree_lock(sch);
+ if (tb[TCA_CODEL_TARGET]) {
+ u32 target = nla_get_u32(tb[TCA_CODEL_TARGET]);
+
+ q->target = ((u64)target * NSEC_PER_USEC) >> CODEL_SHIFT;
+ }
+ if (tb[TCA_CODEL_INTERVAL]) {
+ u32 interval = nla_get_u32(tb[TCA_CODEL_INTERVAL]);
+
+ q->interval = ((u64)interval * NSEC_PER_USEC) >> CODEL_SHIFT;
+ }
+ if (tb[TCA_CODEL_LIMIT])
+ sch->limit = nla_get_u32(tb[TCA_CODEL_LIMIT]);
+
+ if (tb[TCA_CODEL_MINBYTES])
+ q->minbytes = nla_get_u32(tb[TCA_CODEL_MINBYTES]);
+
+ qlen = sch->q.qlen;
+ while (sch->q.qlen > sch->limit) {
+ struct sk_buff *skb = __skb_dequeue(&sch->q);
+
+ sch->qstats.backlog -= qdisc_pkt_len(skb);
+ qdisc_drop(skb, sch);
+ }
+ qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
+
+ q->drop_next = q->first_above_time = 0;
+ q->dropping = false;
+ sch_tree_unlock(sch);
+ return 0;
+}
+
+static int codel_init(struct Qdisc *sch, struct nlattr *opt)
+{
+ struct codel_sched_data *q = qdisc_priv(sch);
+
+ q->target = MS2TIME(5);
+ /* It should be possible to run with no limit,
+ * with infinite memory
+ */
+ sch->limit = DEFAULT_CODEL_LIMIT;
+ q->minbytes = psched_mtu(qdisc_dev(sch));
+ q->interval = MS2TIME(100);
+ q->drop_next = q->first_above_time = 0;
+ q->dropping = false; /* exit dropping state */
+ q->count = 1;
+ if (opt) {
+ int err = codel_change(sch, opt);
+
+ if (err)
+ return err;
+ }
+
+ if (sch->limit >= 1)
+ sch->flags |= TCQ_F_CAN_BYPASS;
+ else
+ sch->flags &= ~TCQ_F_CAN_BYPASS;
+
+ return 0;
+}
+
+static u32 codel_time_to_us(codel_time_t val)
+{
+ u64 valns = ((u64)val << CODEL_SHIFT);
+
+ do_div(valns, NSEC_PER_USEC);
+ return (u32)valns;
+}
+
+static int codel_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+ struct codel_sched_data *q = qdisc_priv(sch);
+ struct nlattr *opts;
+
+ opts = nla_nest_start(skb, TCA_OPTIONS);
+ if (opts == NULL)
+ goto nla_put_failure;
+ if (nla_put_u32(skb, TCA_CODEL_TARGET, codel_time_to_us(q->target)) ||
+ nla_put_u32(skb, TCA_CODEL_LIMIT, sch->limit) ||
+ nla_put_u32(skb, TCA_CODEL_INTERVAL, codel_time_to_us(q->interval)) ||
+ nla_put_u32(skb, TCA_CODEL_MINBYTES, q->minbytes))
+ goto nla_put_failure;
+
+ return nla_nest_end(skb, opts);
+
+nla_put_failure:
+ nla_nest_cancel(skb, opts);
+ return -1;
+}
+
+static int codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
+{
+ struct codel_sched_data *q = qdisc_priv(sch);
+ struct sk_buff *skb = skb_peek(&sch->q);
+ codel_time_t now = codel_get_time();
+ struct tc_codel_xstats st = {
+ .count = q->count,
+ .state1 = q->state1,
+ .state2 = q->state2,
+ .state3 = q->state3,
+ .states = q->states,
+ .drop_overlimit = q->drop_overlimit,
+ .delay = skb ? now - get_enqueue_time(skb) : 0,
+ .drop_next = q->drop_next ? q->drop_next - now : 0,
+ .dropping = q->dropping,
+ };
+
+ return gnet_stats_copy_app(d, &st, sizeof(st));
+}
+
+static void codel_reset(struct Qdisc *sch)
+{
+ struct codel_sched_data *q = qdisc_priv(sch);
+
+ qdisc_reset_queue(sch);
+ sch->q.qlen = 0;
+ q->dropping = false;
+ q->count = 1;
+}
+
+static struct Qdisc_ops codel_qdisc_ops __read_mostly = {
+ .id = "codel",
+ .priv_size = sizeof(struct codel_sched_data),
+
+ .enqueue = codel_enqueue,
+ .dequeue = codel_dequeue,
+ .peek = qdisc_peek_dequeued,
+ .init = codel_init,
+ .reset = codel_reset,
+ .change = codel_change,
+ .dump = codel_dump,
+ .dump_stats = codel_dump_stats,
+ .owner = THIS_MODULE,
+};
+
+static int __init codel_module_init(void)
+{
+ return register_qdisc(&codel_qdisc_ops);
+}
+static void __exit codel_module_exit(void)
+{
+ unregister_qdisc(&codel_qdisc_ops);
+}
+module_init(codel_module_init)
+module_exit(codel_module_exit)
+
+MODULE_DESCRIPTION("Controlled Delay queue discipline");
+MODULE_AUTHOR("Dave Taht");
+MODULE_AUTHOR("Eric Dumazet");
+MODULE_LICENSE("GPL");
--
1.7.9.5
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [Codel] [PATCH v9] codel: Controlled Delay AQM
2012-05-07 5:35 [Codel] [PATCH v9] codel: Controlled Delay AQM Dave Täht
@ 2012-05-07 5:50 ` Eric Dumazet
2012-05-07 5:52 ` Dave Taht
2012-05-07 5:51 ` Dave Taht
2012-05-07 13:57 ` [Codel] [PATCH v10] " Eric Dumazet
2 siblings, 1 reply; 19+ messages in thread
From: Eric Dumazet @ 2012-05-07 5:50 UTC (permalink / raw)
To: Dave Täht; +Cc: codel
On Sun, 2012-05-06 at 22:35 -0700, Dave Täht wrote:
> This version (9) adds support for various forms of decrease in s3,
> in the form of the module parameters gentle and decrease_method.
>
> It defaults to the algorithm as described in the original presentation.
>
> v1: Original implementation - Dave Taht
> v2: Working code - Corrections for ktime - Dave Taht
> v3: 32 bit support and port to net-next - Eric Dumazet
> v4: 16 bit precision for inv sqrt and cache - Dave Taht
> v5: Kernel cleanup and full precision - Eric Dumazet
> v6: Dump Stats support added - Eric Dumazet
> v7: Complete rewrite for u32 values - Eric Dumazet
> v8: Stats and timing added, 64 bit prescale improved - Eric Dumazet
> v9: debated functionality moved to isolated routine - Dave Taht
> ---
> +static u32 gentle = 0;
> +static u32 decrease_method = 0;
> +module_param(gentle, uint, 0644);
> +module_param(decrease_method, uint, 0644);
> +MODULE_PARM_DESC(gentle,"Gently increment count in massive drop state");
> +MODULE_PARM_DESC(decrease_method,"Various means of decreasing count");
Hey, thats absolutely forbidden.
Just add new TCA_CODEL_xxx knobs to control things with tc, for each
qdisc, and not globally on the machine.
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [Codel] [PATCH v9] codel: Controlled Delay AQM
2012-05-07 5:35 [Codel] [PATCH v9] codel: Controlled Delay AQM Dave Täht
2012-05-07 5:50 ` Eric Dumazet
@ 2012-05-07 5:51 ` Dave Taht
2012-05-07 13:57 ` [Codel] [PATCH v10] " Eric Dumazet
2 siblings, 0 replies; 19+ messages in thread
From: Dave Taht @ 2012-05-07 5:51 UTC (permalink / raw)
To: Dave Täht; +Cc: codel
On Sun, May 6, 2012 at 10:35 PM, Dave Täht <dave.taht@bufferbloat.net> wrote:
> This version (9) adds support for various forms of decrease in s3,
> in the form of the module parameters gentle and decrease_method.
This one (or the one prior) appears to have introduced a wrap-around error.
I see count never get much above 3000, and drop to 1 a lot.
When that happens, delay goes boom. Prior to that it's good...
Over to eric....
>
> It defaults to the algorithm as described in the original presentation.
>
> v1: Original implementation - Dave Taht
> v2: Working code - Corrections for ktime - Dave Taht
> v3: 32 bit support and port to net-next - Eric Dumazet
> v4: 16 bit precision for inv sqrt and cache - Dave Taht
> v5: Kernel cleanup and full precision - Eric Dumazet
> v6: Dump Stats support added - Eric Dumazet
> v7: Complete rewrite for u32 values - Eric Dumazet
> v8: Stats and timing added, 64 bit prescale improved - Eric Dumazet
> v9: debated functionality moved to isolated routine - Dave Taht
> ---
> include/linux/pkt_sched.h | 25 +++
> net/sched/Kconfig | 11 ++
> net/sched/Makefile | 1 +
> net/sched/sch_codel.c | 463 +++++++++++++++++++++++++++++++++++++++++++++
> 4 files changed, 500 insertions(+)
> create mode 100644 net/sched/sch_codel.c
>
> diff --git a/include/linux/pkt_sched.h b/include/linux/pkt_sched.h
> index 0d5b793..f62141e 100644
> --- a/include/linux/pkt_sched.h
> +++ b/include/linux/pkt_sched.h
> @@ -633,4 +633,29 @@ struct tc_qfq_stats {
> __u32 lmax;
> };
>
> +/* CODEL */
> +
> +enum {
> + TCA_CODEL_UNSPEC,
> + TCA_CODEL_TARGET,
> + TCA_CODEL_LIMIT,
> + TCA_CODEL_MINBYTES,
> + TCA_CODEL_INTERVAL,
> + __TCA_CODEL_MAX
> +};
> +
> +#define TCA_CODEL_MAX (__TCA_CODEL_MAX - 1)
> +
> +struct tc_codel_xstats {
> + __u32 count;
> + __u32 delay; /* time elapsed since next packet was queued (in us) */
> + __u32 drop_next;
> + __u32 drop_overlimit;
> + __u32 dropping;
> + __u32 state1;
> + __u32 state2;
> + __u32 state3;
> + __u32 states;
> +};
> +
> #endif
> diff --git a/net/sched/Kconfig b/net/sched/Kconfig
> index 2590e91..8106c42 100644
> --- a/net/sched/Kconfig
> +++ b/net/sched/Kconfig
> @@ -250,6 +250,17 @@ config NET_SCH_QFQ
>
> If unsure, say N.
>
> +config NET_SCH_CODEL
> + tristate "Controlled Delay AQM (CODEL)"
> + help
> + Say Y here if you want to use the Controlled Delay (CODEL)
> + packet scheduling algorithm.
> +
> + To compile this driver as a module, choose M here: the module
> + will be called sch_codel.
> +
> + If unsure, say N.
> +
> config NET_SCH_INGRESS
> tristate "Ingress Qdisc"
> depends on NET_CLS_ACT
> diff --git a/net/sched/Makefile b/net/sched/Makefile
> index dc5889c..41130b5 100644
> --- a/net/sched/Makefile
> +++ b/net/sched/Makefile
> @@ -36,6 +36,7 @@ obj-$(CONFIG_NET_SCH_DRR) += sch_drr.o
> obj-$(CONFIG_NET_SCH_MQPRIO) += sch_mqprio.o
> obj-$(CONFIG_NET_SCH_CHOKE) += sch_choke.o
> obj-$(CONFIG_NET_SCH_QFQ) += sch_qfq.o
> +obj-$(CONFIG_NET_SCH_CODEL) += sch_codel.o
>
> obj-$(CONFIG_NET_CLS_U32) += cls_u32.o
> obj-$(CONFIG_NET_CLS_ROUTE4) += cls_route.o
> diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
> new file mode 100644
> index 0000000..a9e6383
> --- /dev/null
> +++ b/net/sched/sch_codel.c
> @@ -0,0 +1,463 @@
> +/*
> + * net/sched/sch_codel.c A Codel implementation
> + *
> + * This program is free software; you can redistribute it and/or
> + * modify it under the terms of the GNU General Public License
> + * as published by the Free Software Foundation; either version
> + * 2 of the License, or (at your option) any later version.
> + *
> + * Codel, the COntrolled DELay Queueing discipline
> + * Based on ns2 simulation code presented by Kathie Nichols
> + *
> + * Authors: Dave Täht <d@taht.net>
> + * Eric Dumazet <edumazet@google.com>
> + */
> +
> +#include <linux/module.h>
> +#include <linux/slab.h>
> +#include <linux/types.h>
> +#include <linux/kernel.h>
> +#include <linux/errno.h>
> +#include <linux/ktime.h>
> +#include <linux/skbuff.h>
> +#include <net/pkt_sched.h>
> +
> +/*
> + * codel uses a 1024 nsec clock, encoded in u32
> + */
> +typedef u32 codel_time_t;
> +#define CODEL_SHIFT 10
> +
> +static u32 gentle = 0;
> +static u32 decrease_method = 0;
> +module_param(gentle, uint, 0644);
> +module_param(decrease_method, uint, 0644);
> +MODULE_PARM_DESC(gentle,"Gently increment count in massive drop state");
> +MODULE_PARM_DESC(decrease_method,"Various means of decreasing count");
> +
> +
> +static codel_time_t codel_get_time(void)
> +{
> + u64 ns = ktime_to_ns(ktime_get());
> +
> + return ns >> CODEL_SHIFT;
> +}
> +
> +#define codel_time_after(a, b) ((int)(a) - (int)(b) > 0)
> +#define codel_time_after_eq(a, b) ((int)(a) - (int)(b) >= 0)
> +#define codel_time_before(a, b) ((int)(a) - (int)(b) < 0)
> +#define codel_time_before_eq(a, b) ((int)(a) - (int)(b) <= 0)
> +
> +#define MS2TIME(a) ((a * NSEC_PER_MSEC) >> CODEL_SHIFT)
> +
> +#define DEFAULT_CODEL_LIMIT 1000
> +
> +/* Per-queue state (codel_queue_t instance variables) */
> +
> +struct codel_sched_data {
> + u32 minbytes;
> + u32 interval;
> + codel_time_t target;
> +
> + u32 count; /* packets dropped since entering drop state */
> + u32 drop_count;
> + bool dropping;
> + /* time to declare above q->target (0 if below)*/
> + codel_time_t first_above_time;
> + codel_time_t drop_next; /* time to drop next packet */
> +
> + u32 state1;
> + u32 state2;
> + u32 state3;
> + u32 states;
> + u32 drop_overlimit;
> +};
> +
> +struct codel_skb_cb {
> + codel_time_t enqueue_time;
> +};
> +
> +
> +/*
> + * return interval/sqrt(x) with good precision
> + */
> +static u32 calc(u32 _interval, u32 _x)
> +{
> + u64 interval = _interval;
> + unsigned long x = _x;
> +
> + /* scale operands for max precision
> + * On 64bit arches, we can prescale x by 32bits
> + */
> + if (BITS_PER_LONG == 64) {
> + x <<= 32;
> + interval <<= 16;
> + }
> + while (x < (1UL << (BITS_PER_LONG - 2))) {
> + x <<= 2;
> + interval <<= 1;
> + }
> + do_div(interval, int_sqrt(x));
> + return (u32)interval;
> +}
> +
> +static struct codel_skb_cb *get_codel_cb(const struct sk_buff *skb)
> +{
> + qdisc_cb_private_validate(skb, sizeof(struct codel_skb_cb));
> + return (struct codel_skb_cb *)qdisc_skb_cb(skb)->data;
> +}
> +
> +static codel_time_t get_enqueue_time(const struct sk_buff *skb)
> +{
> + return get_codel_cb(skb)->enqueue_time;
> +}
> +
> +static void set_enqueue_time(struct sk_buff *skb)
> +{
> + get_codel_cb(skb)->enqueue_time = codel_get_time();
> +}
> +
> +static codel_time_t control_law(const struct codel_sched_data *q,
> + codel_time_t t)
> +{
> + return t + calc(q->interval, q->count);
> +}
> +
> +static bool should_drop(struct sk_buff *skb, struct Qdisc *sch,
> + codel_time_t now)
> +{
> + struct codel_sched_data *q = qdisc_priv(sch);
> + codel_time_t sojourn_time;
> + bool drop;
> +
> + if (!skb) {
> + q->first_above_time = 0;
> + return false;
> + }
> + sch->qstats.backlog -= qdisc_pkt_len(skb);
> + sojourn_time = now - get_enqueue_time(skb);
> +
> + if (codel_time_before(sojourn_time, q->target) ||
> + sch->qstats.backlog < q->minbytes) {
> + /* went below so we'll stay below for at least q->interval */
> + q->first_above_time = 0;
> + return false;
> + }
> + drop = false;
> + if (q->first_above_time == 0) {
> + /* just went above from below. If we stay above
> + * for at least q->interval we'll say it's ok to drop
> + */
> + q->first_above_time = now + q->interval;
> + } else if (codel_time_after(now, q->first_above_time)) {
> + drop = true;
> + q->state1++;
> + }
> + return drop;
> +}
> +
> +static void codel_drop(struct Qdisc *sch, struct sk_buff *skb)
> +{
> + struct codel_sched_data *q = qdisc_priv(sch);
> +
> + qdisc_drop(skb, sch);
> + q->drop_count++;
> +}
> +
> +/*
> +* if min went above target close to when we last went below,
> +* assume that some drop rate near that controlled the queue on the
> +* last cycle is a good starting point to control it now.
> +*
> +* Since there is debate about it right now, we try a few
> +* different methods.
> +*/
> +
> +static u32 count_rescale(struct codel_sched_data *q, codel_time_t now) {
> + u32 c = 1;
> +
> + if (q->count < 2)
> + return 1;
> +
> + if (codel_time_after(now - q->drop_next, 16 * q->interval)) {
> + switch(decrease_method) {
> + case 3: /* Taht 1 (not yet what I have in mind) */
> + c = q->count - 1;
> + break;
> + case 2: /* Dumazet 2 */
> + c = q->count >> 1;
> + break;
> + case 1: /* Dumazet 1 */
> + c = min(q->count - 1,
> + q->count - (q->count >> 4));
> + break;
> + case 0: /* Codel Paper Default */
> + default:
> + c = q->count - 1;
> + }
> + c = max(1U, c);
> + }
> + return (u32) c;
> +}
> +
> +static struct sk_buff *codel_dequeue(struct Qdisc *sch)
> +{
> + struct codel_sched_data *q = qdisc_priv(sch);
> + struct sk_buff *skb = __skb_dequeue(&sch->q);
> + codel_time_t now;
> + bool drop;
> +
> + if (!skb) {
> + q->dropping = false;
> + return skb;
> + }
> + now = codel_get_time();
> + drop = should_drop(skb, sch, now);
> + if (q->dropping) {
> + if (!drop) {
> + /* sojourn time below target - leave dropping state */
> + q->dropping = false;
> + } else if (codel_time_after_eq(now, q->drop_next)) {
> + q->state2++;
> + /* It's time for the next drop. Drop the current
> + * packet and dequeue the next. The dequeue might
> + * take us out of dropping state.
> + * If not, schedule the next drop.
> + * A large backlog might result in drop rates so high
> + * that the next drop should happen now,
> + * hence the while loop.
> + */
> + if(gentle)
> + q->count++;
> + while (q->dropping &&
> + codel_time_after_eq(now, q->drop_next)) {
> + codel_drop(sch, skb);
> + if(!gentle)
> + q->count++;
> + skb = __skb_dequeue(&sch->q);
> + if (!should_drop(skb, sch, now)) {
> + /* leave dropping state */
> + q->dropping = false;
> + } else {
> + /* and schedule the next drop */
> + q->drop_next =
> + control_law(q, q->drop_next);
> + }
> + }
> + }
> + } else if (drop &&
> + (codel_time_before(now - q->drop_next,
> + 16 * q->interval) ||
> + codel_time_after_eq(now - q->first_above_time,
> + 2 * q->interval))) {
> + codel_drop(sch, skb);
> + skb = __skb_dequeue(&sch->q);
> + drop = should_drop(skb, sch, now);
> + q->dropping = true;
> + q->state3++;
> + /*
> + * if min went above target close to when we last went below,
> + * assume that the drop rate that controlled the queue on the
> + * last cycle is a good starting point to control it now.
> + * Since there is debate about it right now, punt.
> + */
> + q->count = count_rescale(q, now);
> + q->drop_next = control_law(q, now);
> + }
> + q->states++;
> + /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
> + * or HTB crashes. Defer it for next round.
> + */
> + if (q->drop_count && sch->q.qlen) {
> + qdisc_tree_decrease_qlen(sch, q->drop_count);
> + q->drop_count = 0;
> + }
> + if (skb)
> + qdisc_bstats_update(sch, skb);
> + return skb;
> +}
> +
> +static int codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
> +{
> + struct codel_sched_data *q;
> +
> + if (likely(skb_queue_len(&sch->q) < sch->limit)) {
> + set_enqueue_time(skb);
> + return qdisc_enqueue_tail(skb, sch);
> + }
> + q = qdisc_priv(sch);
> + q->drop_overlimit++;
> + return qdisc_drop(skb, sch);
> +}
> +
> +static const struct nla_policy codel_policy[TCA_CODEL_MAX + 1] = {
> + [TCA_CODEL_TARGET] = { .type = NLA_U32 },
> + [TCA_CODEL_LIMIT] = { .type = NLA_U32 },
> + [TCA_CODEL_MINBYTES] = { .type = NLA_U32 },
> + [TCA_CODEL_INTERVAL] = { .type = NLA_U32 },
> +};
> +
> +static int codel_change(struct Qdisc *sch, struct nlattr *opt)
> +{
> + struct codel_sched_data *q = qdisc_priv(sch);
> + struct nlattr *tb[TCA_CODEL_MAX + 1];
> + unsigned int qlen;
> + int err;
> +
> + if (!opt)
> + return -EINVAL;
> +
> + err = nla_parse_nested(tb, TCA_CODEL_MAX, opt, codel_policy);
> + if (err < 0)
> + return err;
> +
> + sch_tree_lock(sch);
> + if (tb[TCA_CODEL_TARGET]) {
> + u32 target = nla_get_u32(tb[TCA_CODEL_TARGET]);
> +
> + q->target = ((u64)target * NSEC_PER_USEC) >> CODEL_SHIFT;
> + }
> + if (tb[TCA_CODEL_INTERVAL]) {
> + u32 interval = nla_get_u32(tb[TCA_CODEL_INTERVAL]);
> +
> + q->interval = ((u64)interval * NSEC_PER_USEC) >> CODEL_SHIFT;
> + }
> + if (tb[TCA_CODEL_LIMIT])
> + sch->limit = nla_get_u32(tb[TCA_CODEL_LIMIT]);
> +
> + if (tb[TCA_CODEL_MINBYTES])
> + q->minbytes = nla_get_u32(tb[TCA_CODEL_MINBYTES]);
> +
> + qlen = sch->q.qlen;
> + while (sch->q.qlen > sch->limit) {
> + struct sk_buff *skb = __skb_dequeue(&sch->q);
> +
> + sch->qstats.backlog -= qdisc_pkt_len(skb);
> + qdisc_drop(skb, sch);
> + }
> + qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
> +
> + q->drop_next = q->first_above_time = 0;
> + q->dropping = false;
> + sch_tree_unlock(sch);
> + return 0;
> +}
> +
> +static int codel_init(struct Qdisc *sch, struct nlattr *opt)
> +{
> + struct codel_sched_data *q = qdisc_priv(sch);
> +
> + q->target = MS2TIME(5);
> + /* It should be possible to run with no limit,
> + * with infinite memory
> + */
> + sch->limit = DEFAULT_CODEL_LIMIT;
> + q->minbytes = psched_mtu(qdisc_dev(sch));
> + q->interval = MS2TIME(100);
> + q->drop_next = q->first_above_time = 0;
> + q->dropping = false; /* exit dropping state */
> + q->count = 1;
> + if (opt) {
> + int err = codel_change(sch, opt);
> +
> + if (err)
> + return err;
> + }
> +
> + if (sch->limit >= 1)
> + sch->flags |= TCQ_F_CAN_BYPASS;
> + else
> + sch->flags &= ~TCQ_F_CAN_BYPASS;
> +
> + return 0;
> +}
> +
> +static u32 codel_time_to_us(codel_time_t val)
> +{
> + u64 valns = ((u64)val << CODEL_SHIFT);
> +
> + do_div(valns, NSEC_PER_USEC);
> + return (u32)valns;
> +}
> +
> +static int codel_dump(struct Qdisc *sch, struct sk_buff *skb)
> +{
> + struct codel_sched_data *q = qdisc_priv(sch);
> + struct nlattr *opts;
> +
> + opts = nla_nest_start(skb, TCA_OPTIONS);
> + if (opts == NULL)
> + goto nla_put_failure;
> + if (nla_put_u32(skb, TCA_CODEL_TARGET, codel_time_to_us(q->target)) ||
> + nla_put_u32(skb, TCA_CODEL_LIMIT, sch->limit) ||
> + nla_put_u32(skb, TCA_CODEL_INTERVAL, codel_time_to_us(q->interval)) ||
> + nla_put_u32(skb, TCA_CODEL_MINBYTES, q->minbytes))
> + goto nla_put_failure;
> +
> + return nla_nest_end(skb, opts);
> +
> +nla_put_failure:
> + nla_nest_cancel(skb, opts);
> + return -1;
> +}
> +
> +static int codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
> +{
> + struct codel_sched_data *q = qdisc_priv(sch);
> + struct sk_buff *skb = skb_peek(&sch->q);
> + codel_time_t now = codel_get_time();
> + struct tc_codel_xstats st = {
> + .count = q->count,
> + .state1 = q->state1,
> + .state2 = q->state2,
> + .state3 = q->state3,
> + .states = q->states,
> + .drop_overlimit = q->drop_overlimit,
> + .delay = skb ? now - get_enqueue_time(skb) : 0,
> + .drop_next = q->drop_next ? q->drop_next - now : 0,
> + .dropping = q->dropping,
> + };
> +
> + return gnet_stats_copy_app(d, &st, sizeof(st));
> +}
> +
> +static void codel_reset(struct Qdisc *sch)
> +{
> + struct codel_sched_data *q = qdisc_priv(sch);
> +
> + qdisc_reset_queue(sch);
> + sch->q.qlen = 0;
> + q->dropping = false;
> + q->count = 1;
> +}
> +
> +static struct Qdisc_ops codel_qdisc_ops __read_mostly = {
> + .id = "codel",
> + .priv_size = sizeof(struct codel_sched_data),
> +
> + .enqueue = codel_enqueue,
> + .dequeue = codel_dequeue,
> + .peek = qdisc_peek_dequeued,
> + .init = codel_init,
> + .reset = codel_reset,
> + .change = codel_change,
> + .dump = codel_dump,
> + .dump_stats = codel_dump_stats,
> + .owner = THIS_MODULE,
> +};
> +
> +static int __init codel_module_init(void)
> +{
> + return register_qdisc(&codel_qdisc_ops);
> +}
> +static void __exit codel_module_exit(void)
> +{
> + unregister_qdisc(&codel_qdisc_ops);
> +}
> +module_init(codel_module_init)
> +module_exit(codel_module_exit)
> +
> +MODULE_DESCRIPTION("Controlled Delay queue discipline");
> +MODULE_AUTHOR("Dave Taht");
> +MODULE_AUTHOR("Eric Dumazet");
> +MODULE_LICENSE("GPL");
> --
> 1.7.9.5
>
>
> _______________________________________________
> Codel mailing list
> Codel@lists.bufferbloat.net
> https://lists.bufferbloat.net/listinfo/codel
>
--
Dave Täht
SKYPE: davetaht
US Tel: 1-239-829-5608
http://www.bufferbloat.net
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [Codel] [PATCH v9] codel: Controlled Delay AQM
2012-05-07 5:50 ` Eric Dumazet
@ 2012-05-07 5:52 ` Dave Taht
0 siblings, 0 replies; 19+ messages in thread
From: Dave Taht @ 2012-05-07 5:52 UTC (permalink / raw)
To: Eric Dumazet; +Cc: codel, Dave Täht
On Sun, May 6, 2012 at 10:50 PM, Eric Dumazet <eric.dumazet@gmail.com> wrote:
> On Sun, 2012-05-06 at 22:35 -0700, Dave Täht wrote:
>> This version (9) adds support for various forms of decrease in s3,
>> in the form of the module parameters gentle and decrease_method.
>>
>> It defaults to the algorithm as described in the original presentation.
>>
>> v1: Original implementation - Dave Taht
>> v2: Working code - Corrections for ktime - Dave Taht
>> v3: 32 bit support and port to net-next - Eric Dumazet
>> v4: 16 bit precision for inv sqrt and cache - Dave Taht
>> v5: Kernel cleanup and full precision - Eric Dumazet
>> v6: Dump Stats support added - Eric Dumazet
>> v7: Complete rewrite for u32 values - Eric Dumazet
>> v8: Stats and timing added, 64 bit prescale improved - Eric Dumazet
>> v9: debated functionality moved to isolated routine - Dave Taht
>> ---
>
>> +static u32 gentle = 0;
>> +static u32 decrease_method = 0;
>> +module_param(gentle, uint, 0644);
>> +module_param(decrease_method, uint, 0644);
>> +MODULE_PARM_DESC(gentle,"Gently increment count in massive drop state");
>> +MODULE_PARM_DESC(decrease_method,"Various means of decreasing count");
>
> Hey, thats absolutely forbidden.
>
> Just add new TCA_CODEL_xxx knobs to control things with tc, for each
> qdisc, and not globally on the machine.
>
I regard this addition as a debugging tool, not api functionality, at least
at present...
>
>
> _______________________________________________
> Codel mailing list
> Codel@lists.bufferbloat.net
> https://lists.bufferbloat.net/listinfo/codel
--
Dave Täht
SKYPE: davetaht
US Tel: 1-239-829-5608
http://www.bufferbloat.net
^ permalink raw reply [flat|nested] 19+ messages in thread
* [Codel] [PATCH v10] codel: Controlled Delay AQM
2012-05-07 5:35 [Codel] [PATCH v9] codel: Controlled Delay AQM Dave Täht
2012-05-07 5:50 ` Eric Dumazet
2012-05-07 5:51 ` Dave Taht
@ 2012-05-07 13:57 ` Eric Dumazet
2012-05-07 16:07 ` [Codel] [PATCH v1 ] sfq: add a Controlled Delay option Eric Dumazet
2012-05-09 13:50 ` [Codel] [PATCH v12] codel: Controlled Delay AQM Eric Dumazet
2 siblings, 2 replies; 19+ messages in thread
From: Eric Dumazet @ 2012-05-07 13:57 UTC (permalink / raw)
To: Dave Täht; +Cc: codel
On Sun, 2012-05-06 at 22:35 -0700, Dave Täht wrote:
> This version (9) adds support for various forms of decrease in s3,
> in the form of the module parameters gentle and decrease_method.
>
> It defaults to the algorithm as described in the original presentation.
>
> v1: Original implementation - Dave Taht
> v2: Working code - Corrections for ktime - Dave Taht
> v3: 32 bit support and port to net-next - Eric Dumazet
> v4: 16 bit precision for inv sqrt and cache - Dave Taht
> v5: Kernel cleanup and full precision - Eric Dumazet
> v6: Dump Stats support added - Eric Dumazet
> v7: Complete rewrite for u32 values - Eric Dumazet
> v8: Stats and timing added, 64 bit prescale improved - Eric Dumazet
> v9: debated functionality moved to isolated routine - Dave Taht
Please find v10 :
- ECN support.
- refactorize code to let codel be plugged in SFQ (or other qdiscs)
with minimal memory costs (separate data into three subsets : params,
vars, stats). Each Qdisc provides its own 'dequeue packet from raw
queue', since it might be specific (in SFQ we have of course one
separate queue per flow, and it has to update 2 backlogs, not only one)
- add codel_ prefixes to cleanup code.
- Qdisc wanting to plug codel must include the room for skb timestamp
in their private skb->cb[] (if they use/have one)
I plan to add codel to SFQ in a very near future (so that you can
optionally select RED or Codel for SFQ flows)
About the choice of q->count decrease, I think its better to wait for
Kathleen & Van input on this subject.
include/linux/pkt_sched.h | 27 +++
include/net/codel.h | 275 ++++++++++++++++++++++++++++++++++++
net/sched/Kconfig | 11 +
net/sched/Makefile | 1
net/sched/sch_codel.c | 252 ++++++++++++++++++++++++++++++++
5 files changed, 566 insertions(+)
diff --git a/include/linux/pkt_sched.h b/include/linux/pkt_sched.h
index ffe975c..45a1abe 100644
--- a/include/linux/pkt_sched.h
+++ b/include/linux/pkt_sched.h
@@ -655,4 +655,31 @@ struct tc_qfq_stats {
__u32 lmax;
};
+/* CODEL */
+
+enum {
+ TCA_CODEL_UNSPEC,
+ TCA_CODEL_TARGET,
+ TCA_CODEL_LIMIT,
+ TCA_CODEL_MINBYTES,
+ TCA_CODEL_INTERVAL,
+ TCA_CODEL_ECN,
+ __TCA_CODEL_MAX
+};
+
+#define TCA_CODEL_MAX (__TCA_CODEL_MAX - 1)
+
+struct tc_codel_xstats {
+ __u32 count;
+ __u32 delay; /* time elapsed since next packet was queued (in us) */
+ __u32 drop_next;
+ __u32 drop_overlimit;
+ __u32 ecn_mark;
+ __u32 dropping;
+ __u32 state1;
+ __u32 state2;
+ __u32 state3;
+ __u32 states;
+};
+
#endif
diff --git a/include/net/codel.h b/include/net/codel.h
new file mode 100644
index 0000000..aed7ee9
--- /dev/null
+++ b/include/net/codel.h
@@ -0,0 +1,275 @@
+#ifndef __NET_SCHED_CODEL_H
+#define __NET_SCHED_CODEL_H
+
+#include <linux/types.h>
+#include <linux/bug.h>
+#include <linux/ktime.h>
+#include <net/inet_ecn.h>
+
+/* Controlling Queue Delay (Codel) algorithm
+ * =========================================
+ * Source : Kathleen Nichols and Van Jacobson
+ */
+
+
+/*
+ * codel uses a 1024 nsec clock, encoded in u32
+ */
+typedef u32 codel_time_t;
+#define CODEL_SHIFT 10
+#define MS2TIME(a) ((a * NSEC_PER_MSEC) >> CODEL_SHIFT)
+
+static inline codel_time_t codel_get_time(void)
+{
+ u64 ns = ktime_to_ns(ktime_get());
+
+ return ns >> CODEL_SHIFT;
+}
+
+#define codel_time_after(a, b) ((int)(a) - (int)(b) > 0)
+#define codel_time_after_eq(a, b) ((int)(a) - (int)(b) >= 0)
+#define codel_time_before(a, b) ((int)(a) - (int)(b) < 0)
+#define codel_time_before_eq(a, b) ((int)(a) - (int)(b) <= 0)
+
+struct codel_skb_cb {
+ codel_time_t enqueue_time;
+};
+
+static struct codel_skb_cb *get_codel_cb(const struct sk_buff *skb)
+{
+ qdisc_cb_private_validate(skb, sizeof(struct codel_skb_cb));
+ return (struct codel_skb_cb *)qdisc_skb_cb(skb)->data;
+}
+
+static codel_time_t codel_get_enqueue_time(const struct sk_buff *skb)
+{
+ return get_codel_cb(skb)->enqueue_time;
+}
+
+static void codel_set_enqueue_time(struct sk_buff *skb)
+{
+ get_codel_cb(skb)->enqueue_time = codel_get_time();
+}
+
+static u32 codel_time_to_us(codel_time_t val)
+{
+ u64 valns = ((u64)val << CODEL_SHIFT);
+
+ do_div(valns, NSEC_PER_USEC);
+ return (u32)valns;
+}
+
+struct codel_params {
+ u32 minbytes; /* 1500, or interface MTU */
+ codel_time_t interval; /* MS2TIME(100) */
+ codel_time_t target; /* MS2TIME(5) */
+ bool ecn; /* is ECN enabled */
+};
+
+struct codel_vars {
+ u32 count; /* packets dropped since we went into drop state */
+ bool dropping;
+ /* time to declare above q->target (0 if below)*/
+ codel_time_t first_above_time;
+ codel_time_t drop_next; /* time to drop next packet */
+};
+
+/* contains stats and some shared info */
+struct codel_stats {
+ struct Qdisc *sch;
+ u32 drop_count; /* temp count of dropped packets in dequeue() */
+
+ u32 ecn_mark;
+ u32 state1;
+ u32 state2;
+ u32 state3;
+ u32 states;
+};
+
+static void codel_params_init(struct codel_params *params,
+ const struct Qdisc *sch)
+{
+ params->minbytes = psched_mtu(qdisc_dev(sch));
+ params->interval = MS2TIME(100);
+ params->target = MS2TIME(5);
+ params->ecn = false;
+}
+
+static void codel_vars_init(struct codel_vars *vars)
+{
+ vars->drop_next = 0;
+ vars->first_above_time = 0;
+ vars->dropping = false; /* exit dropping state */
+ vars->count = 1;
+}
+
+static void codel_stats_init(struct codel_stats *stats,
+ struct Qdisc *sch)
+{
+ stats->sch = sch; /* back pointer for qdisc_drop() calls */
+}
+
+/* return interval/sqrt(x) with good precision */
+static u32 codel_inv_sqrt(u32 _interval, u32 _x)
+{
+ u64 interval = _interval;
+ unsigned long x = _x;
+
+ /* Scale operands for max precision.
+ * On 64bit arches, we can prescale x by 32bits
+ */
+ if (BITS_PER_LONG == 64) {
+ x <<= 32;
+ interval <<= 16;
+ }
+ while (x < (1UL << (BITS_PER_LONG - 2))) {
+ x <<= 2;
+ interval <<= 1;
+ }
+ do_div(interval, int_sqrt(x));
+ return (u32)interval;
+}
+
+static codel_time_t codel_control_law(codel_time_t t,
+ codel_time_t interval,
+ u32 count)
+{
+ return t + codel_inv_sqrt(interval, count);
+}
+
+
+static bool codel_should_drop(struct sk_buff *skb,
+ unsigned int *backlog,
+ struct codel_vars *vars,
+ const struct codel_params *params,
+ struct codel_stats *stats,
+ codel_time_t now)
+{
+ codel_time_t sojourn_time;
+ bool drop;
+
+ if (!skb) {
+ vars->first_above_time = 0;
+ return false;
+ }
+
+ sojourn_time = now - codel_get_enqueue_time(skb);
+ *backlog -= qdisc_pkt_len(skb);
+
+ if (codel_time_before(sojourn_time, params->target) ||
+ *backlog < params->minbytes) {
+ /* went below so we'll stay below for at least q->interval */
+ vars->first_above_time = 0;
+ return false;
+ }
+ drop = false;
+ if (vars->first_above_time == 0) {
+ /* just went above from below. If we stay above
+ * for at least q->interval we'll say it's ok to drop
+ */
+ vars->first_above_time = now + params->interval;
+ } else if (codel_time_after(now, vars->first_above_time)) {
+ drop = true;
+ stats->state1++;
+ }
+ return drop;
+}
+
+typedef struct sk_buff * (*codel_skb_dequeue_t)(struct codel_vars *vars);
+
+static struct sk_buff *codel_dequeue(const struct codel_params *params,
+ struct codel_vars *vars,
+ struct codel_stats *stats,
+ codel_skb_dequeue_t dequeue_func,
+ u32 *backlog)
+{
+ struct sk_buff *skb = dequeue_func(vars);
+ codel_time_t now;
+ bool drop;
+
+ if (!skb) {
+ vars->dropping = false;
+ return skb;
+ }
+ now = codel_get_time();
+ drop = codel_should_drop(skb, backlog,
+ vars, params, stats,
+ now);
+ if (vars->dropping) {
+ if (!drop) {
+ /* sojourn time below target - leave dropping state */
+ vars->dropping = false;
+ } else if (codel_time_after_eq(now, vars->drop_next)) {
+ stats->state2++;
+ /* It's time for the next drop. Drop the current
+ * packet and dequeue the next. The dequeue might
+ * take us out of dropping state.
+ * If not, schedule the next drop.
+ * A large backlog might result in drop rates so high
+ * that the next drop should happen now,
+ * hence the while loop.
+ */
+ while (vars->dropping &&
+ codel_time_after_eq(now, vars->drop_next)) {
+ vars->count++;
+ if (params->ecn && INET_ECN_set_ce(skb)) {
+ stats->ecn_mark++;
+ vars->drop_next =
+ codel_control_law(vars->drop_next,
+ params->interval,
+ vars->count);
+ goto end;
+ }
+ qdisc_drop(skb, stats->sch);
+ stats->drop_count++;
+ skb = dequeue_func(vars);
+ if (!codel_should_drop(skb, backlog,
+ vars, params, stats, now)) {
+ /* leave dropping state */
+ vars->dropping = false;
+ } else {
+ /* and schedule the next drop */
+ vars->drop_next =
+ codel_control_law(vars->drop_next,
+ params->interval,
+ vars->count);
+ }
+ }
+ }
+ } else if (drop &&
+ (codel_time_before(now - vars->drop_next,
+ 16 * params->interval) ||
+ codel_time_after_eq(now - vars->first_above_time,
+ 2 * params->interval))) {
+ if (params->ecn && INET_ECN_set_ce(skb)) {
+ stats->ecn_mark++;
+ } else {
+ qdisc_drop(skb, stats->sch);
+ stats->drop_count++;
+
+ skb = dequeue_func(vars);
+ drop = codel_should_drop(skb, backlog, vars, params, stats, now);
+ }
+ vars->dropping = true;
+ stats->state3++;
+ /*
+ * if min went above target close to when we last went below it
+ * assume that the drop rate that controlled the queue on the
+ * last cycle is a good starting point to control it now.
+ */
+ if (codel_time_before(now - vars->drop_next,
+ 16 * params->interval)) {
+// u32 c = min(q->count - 1, q->count - (q->count >> 4));
+ u32 c = vars->count - 1;
+ vars->count = max(1U, c);
+ } else {
+ vars->count = 1;
+ }
+ vars->drop_next = codel_control_law(now, params->interval,
+ vars->count);
+ }
+end:
+ stats->states++;
+ return skb;
+}
+#endif
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index 75b58f8..fadd252 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -250,6 +250,17 @@ config NET_SCH_QFQ
If unsure, say N.
+config NET_SCH_CODEL
+ tristate "Controlled Delay AQM (CODEL)"
+ help
+ Say Y here if you want to use the Controlled Delay (CODEL)
+ packet scheduling algorithm.
+
+ To compile this driver as a module, choose M here: the module
+ will be called sch_codel.
+
+ If unsure, say N.
+
config NET_SCH_INGRESS
tristate "Ingress Qdisc"
depends on NET_CLS_ACT
diff --git a/net/sched/Makefile b/net/sched/Makefile
index 8cdf4e2..30fab03 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -37,6 +37,7 @@ obj-$(CONFIG_NET_SCH_PLUG) += sch_plug.o
obj-$(CONFIG_NET_SCH_MQPRIO) += sch_mqprio.o
obj-$(CONFIG_NET_SCH_CHOKE) += sch_choke.o
obj-$(CONFIG_NET_SCH_QFQ) += sch_qfq.o
+obj-$(CONFIG_NET_SCH_CODEL) += sch_codel.o
obj-$(CONFIG_NET_CLS_U32) += cls_u32.o
obj-$(CONFIG_NET_CLS_ROUTE4) += cls_route.o
diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
new file mode 100644
index 0000000..fa36dd2
--- /dev/null
+++ b/net/sched/sch_codel.c
@@ -0,0 +1,252 @@
+/*
+ * net/sched/sch_codel.c A Codel implementation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Codel, the COntrolled DELay Queueing discipline
+ * Based on ns2 simulation code presented by Kathie Nichols
+ *
+ * Authors: Dave Täht <d@taht.net>
+ * Eric Dumazet <edumazet@google.com>
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/skbuff.h>
+#include <net/pkt_sched.h>
+#include <net/codel.h>
+
+
+#define DEFAULT_CODEL_LIMIT 1000
+
+struct codel_sched_data {
+ struct codel_params params;
+ struct codel_vars vars;
+ struct codel_stats stats;
+ u32 drop_overlimit;
+};
+
+/* This is the specific function called from codel_dequeue()
+ * to dequeue a packet from queue.
+ */
+static struct sk_buff *dequeue(struct codel_vars *vars)
+{
+ struct codel_sched_data *q;
+ struct Qdisc *sch;
+
+ q = container_of(vars, struct codel_sched_data, vars);
+ sch = (struct Qdisc *)((void *)q - QDISC_ALIGN(sizeof(struct Qdisc)));
+ return __skb_dequeue(&sch->q);
+}
+
+static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
+{
+ struct codel_sched_data *q = qdisc_priv(sch);
+ struct sk_buff *skb;
+
+ skb = codel_dequeue(&q->params, &q->vars, &q->stats,
+ dequeue, &sch->qstats.backlog);
+ /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
+ * or HTB crashes. Defer it for next round.
+ */
+ if (q->stats.drop_count && sch->q.qlen) {
+ qdisc_tree_decrease_qlen(sch, q->stats.drop_count);
+ q->stats.drop_count = 0;
+ }
+ if (skb)
+ qdisc_bstats_update(sch, skb);
+ return skb;
+}
+
+static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+{
+ struct codel_sched_data *q;
+
+ if (likely(qdisc_qlen(sch) < sch->limit)) {
+ codel_set_enqueue_time(skb);
+ return qdisc_enqueue_tail(skb, sch);
+ }
+ q = qdisc_priv(sch);
+ q->drop_overlimit++;
+ return qdisc_drop(skb, sch);
+}
+
+static const struct nla_policy codel_policy[TCA_CODEL_MAX + 1] = {
+ [TCA_CODEL_TARGET] = { .type = NLA_U32 },
+ [TCA_CODEL_LIMIT] = { .type = NLA_U32 },
+ [TCA_CODEL_MINBYTES] = { .type = NLA_U32 },
+ [TCA_CODEL_INTERVAL] = { .type = NLA_U32 },
+ [TCA_CODEL_ECN] = { .type = NLA_U32 },
+};
+
+static int codel_change(struct Qdisc *sch, struct nlattr *opt)
+{
+ struct codel_sched_data *q = qdisc_priv(sch);
+ struct nlattr *tb[TCA_CODEL_MAX + 1];
+ unsigned int qlen;
+ int err;
+
+ if (!opt)
+ return -EINVAL;
+
+ err = nla_parse_nested(tb, TCA_CODEL_MAX, opt, codel_policy);
+ if (err < 0)
+ return err;
+
+ sch_tree_lock(sch);
+ if (tb[TCA_CODEL_TARGET]) {
+ u32 target = nla_get_u32(tb[TCA_CODEL_TARGET]);
+
+ q->params.target = ((u64)target * NSEC_PER_USEC) >> CODEL_SHIFT;
+ }
+ if (tb[TCA_CODEL_INTERVAL]) {
+ u32 interval = nla_get_u32(tb[TCA_CODEL_INTERVAL]);
+
+ q->params.interval = ((u64)interval * NSEC_PER_USEC) >> CODEL_SHIFT;
+ }
+ if (tb[TCA_CODEL_LIMIT])
+ sch->limit = nla_get_u32(tb[TCA_CODEL_LIMIT]);
+
+ if (tb[TCA_CODEL_MINBYTES])
+ q->params.minbytes = nla_get_u32(tb[TCA_CODEL_MINBYTES]);
+
+ if (tb[TCA_CODEL_ECN])
+ q->params.ecn = !!nla_get_u32(tb[TCA_CODEL_ECN]);
+
+ qlen = sch->q.qlen;
+ while (sch->q.qlen > sch->limit) {
+ struct sk_buff *skb = __skb_dequeue(&sch->q);
+
+ sch->qstats.backlog -= qdisc_pkt_len(skb);
+ qdisc_drop(skb, sch);
+ }
+ qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
+
+// q->drop_next = q->first_above_time = 0;
+// q->dropping = false;
+ sch_tree_unlock(sch);
+ return 0;
+}
+
+static int codel_init(struct Qdisc *sch, struct nlattr *opt)
+{
+ struct codel_sched_data *q = qdisc_priv(sch);
+
+ /* It should be possible to run with no limit,
+ * with infinite memory :)
+ */
+ sch->limit = DEFAULT_CODEL_LIMIT;
+
+ codel_params_init(&q->params, sch);
+ codel_vars_init(&q->vars);
+ codel_stats_init(&q->stats, sch);
+
+ if (opt) {
+ int err = codel_change(sch, opt);
+
+ if (err)
+ return err;
+ }
+
+ if (sch->limit >= 1)
+ sch->flags |= TCQ_F_CAN_BYPASS;
+ else
+ sch->flags &= ~TCQ_F_CAN_BYPASS;
+
+ return 0;
+}
+
+static int codel_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+ struct codel_sched_data *q = qdisc_priv(sch);
+ struct nlattr *opts;
+
+ opts = nla_nest_start(skb, TCA_OPTIONS);
+ if (opts == NULL)
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, TCA_CODEL_TARGET,
+ codel_time_to_us(q->params.target)) ||
+ nla_put_u32(skb, TCA_CODEL_LIMIT,
+ sch->limit) ||
+ nla_put_u32(skb, TCA_CODEL_INTERVAL,
+ codel_time_to_us(q->params.interval)) ||
+ nla_put_u32(skb, TCA_CODEL_MINBYTES,
+ q->params.minbytes) ||
+ nla_put_u32(skb, TCA_CODEL_ECN, q->params.ecn))
+ goto nla_put_failure;
+
+ return nla_nest_end(skb, opts);
+
+nla_put_failure:
+ nla_nest_cancel(skb, opts);
+ return -1;
+}
+
+static int codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
+{
+ struct codel_sched_data *q = qdisc_priv(sch);
+ struct sk_buff *skb = skb_peek(&sch->q);
+ codel_time_t now = codel_get_time();
+ struct tc_codel_xstats st = {
+ .count = q->vars.count,
+ .state1 = q->stats.state1,
+ .state2 = q->stats.state2,
+ .state3 = q->stats.state3,
+ .states = q->stats.states,
+ .drop_overlimit = q->drop_overlimit,
+ .delay = skb ? now - codel_get_enqueue_time(skb) : 0,
+ .drop_next = (q->vars.dropping && q->vars.drop_next) ?
+ q->vars.drop_next - now : 0,
+ .dropping = q->vars.dropping,
+ .ecn_mark = q->stats.ecn_mark,
+ };
+
+ return gnet_stats_copy_app(d, &st, sizeof(st));
+}
+
+static void codel_reset(struct Qdisc *sch)
+{
+ struct codel_sched_data *q = qdisc_priv(sch);
+
+ qdisc_reset_queue(sch);
+ sch->q.qlen = 0;
+ codel_vars_init(&q->vars);
+}
+
+static struct Qdisc_ops codel_qdisc_ops __read_mostly = {
+ .id = "codel",
+ .priv_size = sizeof(struct codel_sched_data),
+
+ .enqueue = codel_qdisc_enqueue,
+ .dequeue = codel_qdisc_dequeue,
+ .peek = qdisc_peek_dequeued,
+ .init = codel_init,
+ .reset = codel_reset,
+ .change = codel_change,
+ .dump = codel_dump,
+ .dump_stats = codel_dump_stats,
+ .owner = THIS_MODULE,
+};
+
+static int __init codel_module_init(void)
+{
+ return register_qdisc(&codel_qdisc_ops);
+}
+static void __exit codel_module_exit(void)
+{
+ unregister_qdisc(&codel_qdisc_ops);
+}
+module_init(codel_module_init)
+module_exit(codel_module_exit)
+
+MODULE_DESCRIPTION("Controlled Delay queue discipline");
+MODULE_AUTHOR("Dave Taht");
+MODULE_AUTHOR("Eric Dumazet");
+MODULE_LICENSE("GPL");
^ permalink raw reply [flat|nested] 19+ messages in thread
* [Codel] [PATCH v1 ] sfq: add a Controlled Delay option
2012-05-07 13:57 ` [Codel] [PATCH v10] " Eric Dumazet
@ 2012-05-07 16:07 ` Eric Dumazet
2012-05-09 13:50 ` [Codel] [PATCH v12] codel: Controlled Delay AQM Eric Dumazet
1 sibling, 0 replies; 19+ messages in thread
From: Eric Dumazet @ 2012-05-07 16:07 UTC (permalink / raw)
To: Dave Täht; +Cc: codel
On Mon, 2012-05-07 at 15:57 +0200, Eric Dumazet wrote:
> I plan to add codel to SFQ in a very near future (so that you can
> optionally select RED or Codel for SFQ flows)
Quick and dirty patch, to check if its sane or not.
(dirty because you dont need a new tc binary, this just enables codel by
default, with ECN (cf //FIXME comments)
I am pleased it actually works, with no extra memory need.
Some small changes are needed on codel, so I'll send a V11 to clean the
thing.
include/net/codel.h | 9 +--
net/sched/sch_codel.c | 7 --
net/sched/sch_sfq.c | 117 +++++++++++++++++++++++++++++++++-------
3 files changed, 104 insertions(+), 29 deletions(-)
diff --git a/include/net/codel.h b/include/net/codel.h
index aed7ee9..57aceb8 100644
--- a/include/net/codel.h
+++ b/include/net/codel.h
@@ -175,7 +175,8 @@ static bool codel_should_drop(struct sk_buff *skb,
return drop;
}
-typedef struct sk_buff * (*codel_skb_dequeue_t)(struct codel_vars *vars);
+typedef struct sk_buff * (*codel_skb_dequeue_t)(struct codel_vars *vars,
+ struct Qdisc *sch);
static struct sk_buff *codel_dequeue(const struct codel_params *params,
struct codel_vars *vars,
@@ -183,7 +184,7 @@ static struct sk_buff *codel_dequeue(const struct codel_params *params,
codel_skb_dequeue_t dequeue_func,
u32 *backlog)
{
- struct sk_buff *skb = dequeue_func(vars);
+ struct sk_buff *skb = dequeue_func(vars, stats->sch);
codel_time_t now;
bool drop;
@@ -222,7 +223,7 @@ static struct sk_buff *codel_dequeue(const struct codel_params *params,
}
qdisc_drop(skb, stats->sch);
stats->drop_count++;
- skb = dequeue_func(vars);
+ skb = dequeue_func(vars, stats->sch);
if (!codel_should_drop(skb, backlog,
vars, params, stats, now)) {
/* leave dropping state */
@@ -247,7 +248,7 @@ static struct sk_buff *codel_dequeue(const struct codel_params *params,
qdisc_drop(skb, stats->sch);
stats->drop_count++;
- skb = dequeue_func(vars);
+ skb = dequeue_func(vars, stats->sch);
drop = codel_should_drop(skb, backlog, vars, params, stats, now);
}
vars->dropping = true;
diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
index fa36dd2..c7d7fdc 100644
--- a/net/sched/sch_codel.c
+++ b/net/sched/sch_codel.c
@@ -35,13 +35,8 @@ struct codel_sched_data {
/* This is the specific function called from codel_dequeue()
* to dequeue a packet from queue.
*/
-static struct sk_buff *dequeue(struct codel_vars *vars)
+static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch)
{
- struct codel_sched_data *q;
- struct Qdisc *sch;
-
- q = container_of(vars, struct codel_sched_data, vars);
- sch = (struct Qdisc *)((void *)q - QDISC_ALIGN(sizeof(struct Qdisc)));
return __skb_dequeue(&sch->q);
}
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 8a99179..d48722c 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -25,6 +25,7 @@
#include <net/pkt_sched.h>
#include <net/flow_keys.h>
#include <net/red.h>
+#include <net/codel.h>
/* Stochastic Fairness Queuing algorithm.
@@ -111,7 +112,10 @@ struct sfq_slot {
short allot; /* credit for this slot */
unsigned int backlog;
- struct red_vars vars;
+ union {
+ struct red_vars rvars;
+ struct codel_vars cvars;
+ };
};
struct sfq_sched_data {
@@ -124,6 +128,7 @@ struct sfq_sched_data {
u32 perturbation;
u8 cur_depth; /* depth of longest slot */
u8 flags;
+ bool codel;
unsigned short scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */
struct tcf_proto *filter_list;
sfq_index *ht; /* Hash table ('divisor' slots) */
@@ -132,7 +137,8 @@ struct sfq_sched_data {
struct red_parms *red_parms;
struct tc_sfqred_stats stats;
struct sfq_slot *tail; /* current slot in round */
-
+ struct codel_params cparams;
+ struct codel_stats cstats;
struct sfq_head dep[SFQ_MAX_DEPTH + 1];
/* Linked lists of slots, indexed by depth
* dep[0] : list of unused flows
@@ -161,7 +167,8 @@ static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index
* q->perturbation, we store flow_keys in skb->cb[]
*/
struct sfq_skb_cb {
- struct flow_keys keys;
+ codel_time_t enqueue_time; /* MUST be first field */
+ struct flow_keys keys;
};
static inline struct sfq_skb_cb *sfq_skb_cb(const struct sk_buff *skb)
@@ -350,7 +357,7 @@ drop:
}
/* Is ECN parameter configured */
-static int sfq_prob_mark(const struct sfq_sched_data *q)
+static bool sfq_prob_mark(const struct sfq_sched_data *q)
{
return q->flags & TC_RED_ECN;
}
@@ -396,16 +403,19 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
slot = &q->slots[x];
slot->hash = hash;
slot->backlog = 0; /* should already be 0 anyway... */
- red_set_vars(&slot->vars);
+ if (q->codel)
+ codel_vars_init(&slot->cvars);
+ else
+ red_set_vars(&slot->rvars);
goto enqueue;
}
if (q->red_parms) {
- slot->vars.qavg = red_calc_qavg_no_idle_time(q->red_parms,
- &slot->vars,
+ slot->rvars.qavg = red_calc_qavg_no_idle_time(q->red_parms,
+ &slot->rvars,
slot->backlog);
switch (red_action(q->red_parms,
- &slot->vars,
- slot->vars.qavg)) {
+ &slot->rvars,
+ slot->rvars.qavg)) {
case RED_DONT_MARK:
break;
@@ -462,6 +472,8 @@ congestion_drop:
}
enqueue:
+ if (q->codel)
+ codel_set_enqueue_time(skb);
sch->qstats.backlog += qdisc_pkt_len(skb);
slot->backlog += qdisc_pkt_len(skb);
slot_queue_add(slot, skb);
@@ -497,6 +509,27 @@ enqueue:
return NET_XMIT_SUCCESS;
}
+/* This is the specific function called from codel_dequeue()
+ * to dequeue a packet from queue.
+ * codel already handles slot->backlog changes
+ */
+static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch)
+{
+ struct sfq_sched_data *q = qdisc_priv(sch);
+ struct sk_buff *skb;
+ struct sfq_slot *slot;
+
+ slot = container_of(vars, struct sfq_slot, cvars);
+
+ skb = slot_dequeue_head(slot);
+ sfq_dec(q, slot - q->slots);
+// slot->backlog -= qdisc_pkt_len(skb);
+ sch->qstats.backlog -= qdisc_pkt_len(skb);
+ sch->q.qlen--;
+ return skb;
+}
+
+
static struct sk_buff *
sfq_dequeue(struct Qdisc *sch)
{
@@ -517,12 +550,28 @@ next_slot:
slot->allot += q->scaled_quantum;
goto next_slot;
}
- skb = slot_dequeue_head(slot);
- sfq_dec(q, a);
+ if (q->codel) {
+ skb = codel_dequeue(&q->cparams, &slot->cvars, &q->cstats,
+ dequeue, &slot->backlog);
+ /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
+ * or HTB crashes. Defer it for next round.
+ */
+ if (q->cstats.drop_count && sch->q.qlen) {
+ qdisc_tree_decrease_qlen(sch, q->cstats.drop_count);
+ q->cstats.drop_count = 0;
+ }
+ if (!skb) {
+ WARN_ON_ONCE(1);
+ return NULL;
+ }
+ } else {
+ skb = slot_dequeue_head(slot);
+ sfq_dec(q, a);
+ slot->backlog -= qdisc_pkt_len(skb);
+ sch->q.qlen--;
+ sch->qstats.backlog -= qdisc_pkt_len(skb);
+ }
qdisc_bstats_update(sch, skb);
- sch->q.qlen--;
- sch->qstats.backlog -= qdisc_pkt_len(skb);
- slot->backlog -= qdisc_pkt_len(skb);
/* Is the slot empty? */
if (slot->qlen == 0) {
q->ht[slot->hash] = SFQ_EMPTY_SLOT;
@@ -574,7 +623,10 @@ static void sfq_rehash(struct Qdisc *sch)
__skb_queue_tail(&list, skb);
}
slot->backlog = 0;
- red_set_vars(&slot->vars);
+ if (q->codel)
+ codel_vars_init(&slot->cvars);
+ else
+ red_set_vars(&slot->rvars);
q->ht[slot->hash] = SFQ_EMPTY_SLOT;
}
q->tail = NULL;
@@ -600,8 +652,8 @@ drop: sch->qstats.backlog -= qdisc_pkt_len(skb);
goto drop;
slot_queue_add(slot, skb);
if (q->red_parms)
- slot->vars.qavg = red_calc_qavg(q->red_parms,
- &slot->vars,
+ slot->rvars.qavg = red_calc_qavg(q->red_parms,
+ &slot->rvars,
slot->backlog);
slot->backlog += qdisc_pkt_len(skb);
sfq_inc(q, x);
@@ -636,17 +688,27 @@ static void sfq_perturbation(unsigned long arg)
mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
}
+struct tc_sfq_qopt_v2 {
+ struct tc_sfq_qopt_v1 v1;
+ __u32 target;
+ __u32 interval;
+ __u32 minbytes;
+};
+
static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
{
struct sfq_sched_data *q = qdisc_priv(sch);
struct tc_sfq_qopt *ctl = nla_data(opt);
struct tc_sfq_qopt_v1 *ctl_v1 = NULL;
+ struct tc_sfq_qopt_v2 *ctl_v2 = NULL;
unsigned int qlen;
struct red_parms *p = NULL;
if (opt->nla_len < nla_attr_size(sizeof(*ctl)))
return -EINVAL;
- if (opt->nla_len >= nla_attr_size(sizeof(*ctl_v1)))
+ if (opt->nla_len >= nla_attr_size(sizeof(*ctl_v2)))
+ ctl_v2 = nla_data(opt);
+ else if (opt->nla_len >= nla_attr_size(sizeof(*ctl_v1)))
ctl_v1 = nla_data(opt);
if (ctl->divisor &&
(!is_power_of_2(ctl->divisor) || ctl->divisor > 65536))
@@ -668,7 +730,21 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
q->divisor = ctl->divisor;
q->maxflows = min_t(u32, q->maxflows, q->divisor);
}
- if (ctl_v1) {
+ q->codel = true; // FIXME
+ q->cparams.ecn = true; // FIXME
+ if (ctl_v2) {
+ q->codel = true;
+ if (ctl_v2->target)
+ q->cparams.target = ((u64)ctl_v2->target * NSEC_PER_USEC) >> CODEL_SHIFT;
+ if (ctl_v2->interval)
+ q->cparams.interval = ((u64)ctl_v2->interval * NSEC_PER_USEC) >> CODEL_SHIFT;
+ if (ctl_v2->minbytes)
+ q->cparams.minbytes = ctl_v2->minbytes;
+ q->flags = ctl_v2->v1.flags;
+ q->cparams.ecn = sfq_prob_mark(q);
+ q->headdrop = ctl_v2->v1.headdrop;
+ }
+ if (ctl_v1 && !q->codel) {
if (ctl_v1->depth)
q->maxdepth = min_t(u32, ctl_v1->depth, SFQ_MAX_DEPTH);
if (p) {
@@ -758,6 +834,8 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
q->perturb_period = 0;
q->perturbation = net_random();
+ codel_params_init(&q->cparams, sch);
+ codel_stats_init(&q->cstats, sch);
if (opt) {
int err = sfq_change(sch, opt);
@@ -810,6 +888,7 @@ static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb)
opt.max_P = p->max_P;
}
memcpy(&opt.stats, &q->stats, sizeof(opt.stats));
+ opt.stats.prob_mark += q->cstats.ecn_mark;
opt.flags = q->flags;
if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
^ permalink raw reply [flat|nested] 19+ messages in thread
* [Codel] [PATCH v12] codel: Controlled Delay AQM
2012-05-07 13:57 ` [Codel] [PATCH v10] " Eric Dumazet
2012-05-07 16:07 ` [Codel] [PATCH v1 ] sfq: add a Controlled Delay option Eric Dumazet
@ 2012-05-09 13:50 ` Eric Dumazet
2012-05-09 13:54 ` [Codel] [PATCH v12 iproute2] " Eric Dumazet
` (3 more replies)
1 sibling, 4 replies; 19+ messages in thread
From: Eric Dumazet @ 2012-05-09 13:50 UTC (permalink / raw)
To: Dave Täht, Kathleen Nichols, Van Jacobson; +Cc: codel, bloat
From: Eric Dumazet <edumazet@google.com>
An implementation of CoDel AQM, from Kathleen Nichols and Van Jacobson.
http://queue.acm.org/detail.cfm?id=2209336
Based on initial work from Dave Taht.
Refactored to help future codel inclusion as plugin for other linux
qdisc (SFQ), like done with RED plugin.
Tested up to 10Gb speeds with no particular problems.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: Dave Taht <dave.taht@bufferbloat.net>
Cc: Kathleen Nichols <nichols@pollere.com>
Cc: Van Jacobson <van@pollere.net>
---
v12: algo changes after Kathleen & Van last updates
- introduction of lastcount.
- minbytes renamed to maxpacket.
- maxpacket automatically learns biggest packet size.
- ldelay record sojourn time of last dequeued packet.
- various changes, better comments...
I hope this is the last version before upstream submission (netdev)
I'll send the iproute2 patch as well.
include/linux/pkt_sched.h | 28 +++
include/net/codel.h | 325 ++++++++++++++++++++++++++++++++++++
net/sched/Kconfig | 11 +
net/sched/Makefile | 1
net/sched/sch_codel.c | 272 ++++++++++++++++++++++++++++++
5 files changed, 637 insertions(+)
diff --git a/include/linux/pkt_sched.h b/include/linux/pkt_sched.h
index ffe975c..453dd2c 100644
--- a/include/linux/pkt_sched.h
+++ b/include/linux/pkt_sched.h
@@ -655,4 +655,32 @@ struct tc_qfq_stats {
__u32 lmax;
};
+/* CODEL */
+
+enum {
+ TCA_CODEL_UNSPEC,
+ TCA_CODEL_TARGET,
+ TCA_CODEL_LIMIT,
+ TCA_CODEL_INTERVAL,
+ TCA_CODEL_ECN,
+ __TCA_CODEL_MAX
+};
+
+#define TCA_CODEL_MAX (__TCA_CODEL_MAX - 1)
+
+struct tc_codel_xstats {
+ __u32 maxpacket; /* largest packet we've seen so far */
+ __u32 count;
+ __u32 lastcount;
+ __u32 ldelay; /* in-queue delay seen by most recently dequeued packet */
+ __u32 drop_next;
+ __u32 drop_overlimit;
+ __u32 ecn_mark;
+ __u32 dropping;
+ __u32 state1;
+ __u32 state2;
+ __u32 state3;
+ __u32 states;
+};
+
#endif
diff --git a/include/net/codel.h b/include/net/codel.h
new file mode 100644
index 0000000..565c1fe
--- /dev/null
+++ b/include/net/codel.h
@@ -0,0 +1,325 @@
+#ifndef __NET_SCHED_CODEL_H
+#define __NET_SCHED_CODEL_H
+
+/*
+ * Codel - The Controlled-Delay Active Queue Management algorithm
+ *
+ * Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com>
+ * Copyright (C) 2011-2012 Van Jacobson <van@pollere.net>
+ * Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
+ * Copyright (C) 2012 Eric Dumazet <edumazet@google.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the authors may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this
+ * software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2, in which case the provisions of the
+ * GPL apply INSTEAD OF those given above.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/bug.h>
+#include <linux/ktime.h>
+#include <net/inet_ecn.h>
+
+/* Controlling Queue Delay (CoDel) algorithm
+ * =========================================
+ * Source : Kathleen Nichols and Van Jacobson
+ * http://queue.acm.org/detail.cfm?id=2209396
+ *
+ * Implemented on linux by Dave Taht and Eric Dumazet
+ */
+
+
+/*
+ * CoDel uses a 1024 nsec clock, encoded in u32
+ * This gives a range of 2199 seconds, because of signed compares
+ */
+typedef u32 codel_time_t;
+#define CODEL_SHIFT 10
+#define MS2TIME(a) ((a * NSEC_PER_MSEC) >> CODEL_SHIFT)
+
+static inline codel_time_t codel_get_time(void)
+{
+ u64 ns = ktime_to_ns(ktime_get());
+
+ return ns >> CODEL_SHIFT;
+}
+
+#define codel_time_after(a, b) ((s32)(a) - (s32)(b) > 0)
+#define codel_time_after_eq(a, b) ((s32)(a) - (s32)(b) >= 0)
+#define codel_time_before(a, b) ((s32)(a) - (s32)(b) < 0)
+#define codel_time_before_eq(a, b) ((s32)(a) - (s32)(b) <= 0)
+
+struct codel_skb_cb {
+ codel_time_t enqueue_time;
+};
+
+static struct codel_skb_cb *get_codel_cb(const struct sk_buff *skb)
+{
+ qdisc_cb_private_validate(skb, sizeof(struct codel_skb_cb));
+ return (struct codel_skb_cb *)qdisc_skb_cb(skb)->data;
+}
+
+static codel_time_t codel_get_enqueue_time(const struct sk_buff *skb)
+{
+ return get_codel_cb(skb)->enqueue_time;
+}
+
+static void codel_set_enqueue_time(struct sk_buff *skb)
+{
+ get_codel_cb(skb)->enqueue_time = codel_get_time();
+}
+
+static inline u32 codel_time_to_us(codel_time_t val)
+{
+ u64 valns = ((u64)val << CODEL_SHIFT);
+
+ do_div(valns, NSEC_PER_USEC);
+ return (u32)valns;
+}
+
+struct codel_params {
+ u32 maxpacket; /* largest packet we've seen so far */
+ codel_time_t target; /* target queue size (in time units) */
+ codel_time_t interval; /* width of moving time window */
+ bool ecn; /* is ECN enabled */
+};
+
+struct codel_vars {
+ u32 count; /* how many drops we've done since the last time
+ * we entered dropping state
+ */
+ u32 lastcount; /* count at entry to dropping state */
+ bool dropping; /* set to true id in dropping state */
+
+ codel_time_t first_above_time; /* when we went (or will go) continuously
+ * above target for interval
+ */
+ codel_time_t drop_next; /* time to drop next packet, or when we dropped last */
+ codel_time_t ldelay; /* sojourn time of last dequeued packet */
+};
+
+/* contains stats and some shared info */
+struct codel_stats {
+ struct Qdisc *sch;
+ u32 drop_count; /* temp count of dropped packets in dequeue() */
+
+ u32 ecn_mark; /* number of packets we ECN marked instead of dropping */
+ u32 states; /* number of codel_dequeue() calls */
+ u32 state1; /* number of times ok_to_drop was set to true */
+ u32 state2;
+ u32 state3;
+};
+
+static void codel_params_init(struct codel_params *params,
+ const struct Qdisc *sch)
+{
+ params->maxpacket = 256;
+ params->interval = MS2TIME(100);
+ params->target = MS2TIME(5);
+ params->ecn = false;
+}
+
+static void codel_vars_init(struct codel_vars *vars)
+{
+ vars->drop_next = 0;
+ vars->first_above_time = 0;
+ vars->dropping = false; /* exit dropping state */
+ vars->count = 0;
+ vars->lastcount = 0;
+}
+
+static void codel_stats_init(struct codel_stats *stats,
+ struct Qdisc *sch)
+{
+ stats->sch = sch; /* back pointer for qdisc_drop() calls */
+}
+
+/* return interval/sqrt(x) with good precision */
+static u32 codel_inv_sqrt(u32 _interval, u32 _x)
+{
+ u64 interval = _interval;
+ unsigned long x = _x;
+
+ /* Scale operands for max precision */
+
+#if BITS_PER_LONG == 64
+ x <<= 32; /* On 64bit arches, we can prescale x by 32bits */
+ interval <<= 16;
+#endif
+
+ while (x < (1UL << (BITS_PER_LONG - 2))) {
+ x <<= 2;
+ interval <<= 1;
+ }
+ do_div(interval, int_sqrt(x));
+ return (u32)interval;
+}
+
+static codel_time_t codel_control_law(codel_time_t t,
+ codel_time_t interval,
+ u32 count)
+{
+ return t + codel_inv_sqrt(interval, count);
+}
+
+
+static bool codel_should_drop(struct sk_buff *skb,
+ unsigned int *backlog,
+ struct codel_vars *vars,
+ struct codel_params *params,
+ struct codel_stats *stats,
+ codel_time_t now)
+{
+ bool ok_to_drop;
+
+ if (!skb) {
+ vars->first_above_time = 0;
+ return false;
+ }
+
+ vars->ldelay = now - codel_get_enqueue_time(skb);
+ *backlog -= qdisc_pkt_len(skb);
+
+ if (unlikely(qdisc_pkt_len(skb) > params->maxpacket))
+ params->maxpacket = qdisc_pkt_len(skb);
+
+ if (codel_time_before(vars->ldelay, params->target) ||
+ *backlog <= params->maxpacket) {
+ /* went below - stay below for at least interval */
+ vars->first_above_time = 0;
+ return false;
+ }
+ ok_to_drop = false;
+ if (vars->first_above_time == 0) {
+ /* just went above from below. If we stay above
+ * for at least interval we'll say it's ok to drop
+ */
+ vars->first_above_time = now + params->interval;
+ } else if (codel_time_after(now, vars->first_above_time)) {
+ ok_to_drop = true;
+ stats->state1++;
+ }
+ return ok_to_drop;
+}
+
+typedef struct sk_buff * (*codel_skb_dequeue_t)(struct codel_vars *vars,
+ struct Qdisc *sch);
+
+static struct sk_buff *codel_dequeue(struct codel_params *params,
+ struct codel_vars *vars,
+ struct codel_stats *stats,
+ codel_skb_dequeue_t dequeue_func,
+ u32 *backlog)
+{
+ struct sk_buff *skb = dequeue_func(vars, stats->sch);
+ codel_time_t now;
+ bool drop;
+
+ stats->states++;
+ if (!skb) {
+ vars->dropping = false;
+ return skb;
+ }
+ now = codel_get_time();
+ drop = codel_should_drop(skb, backlog,
+ vars, params, stats,
+ now);
+ if (vars->dropping) {
+ if (!drop) {
+ /* sojourn time below target - leave dropping state */
+ vars->dropping = false;
+ } else if (codel_time_after_eq(now, vars->drop_next)) {
+ stats->state2++;
+ /* It's time for the next drop. Drop the current
+ * packet and dequeue the next. The dequeue might
+ * take us out of dropping state.
+ * If not, schedule the next drop.
+ * A large backlog might result in drop rates so high
+ * that the next drop should happen now,
+ * hence the while loop.
+ */
+ while (vars->dropping &&
+ codel_time_after_eq(now, vars->drop_next)) {
+ vars->count++;
+ if (params->ecn && INET_ECN_set_ce(skb)) {
+ stats->ecn_mark++;
+ vars->drop_next =
+ codel_control_law(vars->drop_next,
+ params->interval,
+ vars->count);
+ goto end;
+ }
+ qdisc_drop(skb, stats->sch);
+ stats->drop_count++;
+ skb = dequeue_func(vars, stats->sch);
+ if (!codel_should_drop(skb, backlog,
+ vars, params, stats, now)) {
+ /* leave dropping state */
+ vars->dropping = false;
+ } else {
+ /* and schedule the next drop */
+ vars->drop_next =
+ codel_control_law(vars->drop_next,
+ params->interval,
+ vars->count);
+ }
+ }
+ }
+ } else if (drop) {
+ if (params->ecn && INET_ECN_set_ce(skb)) {
+ stats->ecn_mark++;
+ } else {
+ qdisc_drop(skb, stats->sch);
+ stats->drop_count++;
+
+ skb = dequeue_func(vars, stats->sch);
+ drop = codel_should_drop(skb, backlog, vars, params,
+ stats, now);
+ }
+ vars->dropping = true;
+ stats->state3++;
+ /*
+ * if min went above target close to when we last went below it
+ * assume that the drop rate that controlled the queue on the
+ * last cycle is a good starting point to control it now.
+ */
+ if (codel_time_before(now - vars->drop_next,
+ 16 * params->interval)) {
+ vars->count = vars->count - vars->lastcount + 1;
+ } else {
+ vars->count = 1;
+ }
+ vars->lastcount = vars->count;
+ vars->drop_next = codel_control_law(now, params->interval,
+ vars->count);
+ }
+end:
+ return skb;
+}
+#endif
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index 75b58f8..fadd252 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -250,6 +250,17 @@ config NET_SCH_QFQ
If unsure, say N.
+config NET_SCH_CODEL
+ tristate "Controlled Delay AQM (CODEL)"
+ help
+ Say Y here if you want to use the Controlled Delay (CODEL)
+ packet scheduling algorithm.
+
+ To compile this driver as a module, choose M here: the module
+ will be called sch_codel.
+
+ If unsure, say N.
+
config NET_SCH_INGRESS
tristate "Ingress Qdisc"
depends on NET_CLS_ACT
diff --git a/net/sched/Makefile b/net/sched/Makefile
index 8cdf4e2..30fab03 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -37,6 +37,7 @@ obj-$(CONFIG_NET_SCH_PLUG) += sch_plug.o
obj-$(CONFIG_NET_SCH_MQPRIO) += sch_mqprio.o
obj-$(CONFIG_NET_SCH_CHOKE) += sch_choke.o
obj-$(CONFIG_NET_SCH_QFQ) += sch_qfq.o
+obj-$(CONFIG_NET_SCH_CODEL) += sch_codel.o
obj-$(CONFIG_NET_CLS_U32) += cls_u32.o
obj-$(CONFIG_NET_CLS_ROUTE4) += cls_route.o
diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
new file mode 100644
index 0000000..3112afa
--- /dev/null
+++ b/net/sched/sch_codel.c
@@ -0,0 +1,272 @@
+/*
+ * Codel - The Controlled-Delay Active Queue Management algorithm
+ *
+ * Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com>
+ * Copyright (C) 2011-2012 Van Jacobson <van@pollere.net>
+ *
+ * Implemented on linux by :
+ * Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
+ * Copyright (C) 2012 Eric Dumazet <edumazet@google.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the authors may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this
+ * software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2, in which case the provisions of the
+ * GPL apply INSTEAD OF those given above.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/skbuff.h>
+#include <net/pkt_sched.h>
+#include <net/codel.h>
+
+
+#define DEFAULT_CODEL_LIMIT 1000
+
+struct codel_sched_data {
+ struct codel_params params;
+ struct codel_vars vars;
+ struct codel_stats stats;
+ u32 drop_overlimit;
+};
+
+/* This is the specific function called from codel_dequeue()
+ * to dequeue a packet from queue. Note: backlog is handled in
+ * codel, we dont need to reduce it here.
+ */
+static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch)
+{
+ struct sk_buff *skb = __skb_dequeue(&sch->q);
+
+ prefetch(&skb->end); /* we'll need skb_shinfo() */
+ return skb;
+}
+
+static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
+{
+ struct codel_sched_data *q = qdisc_priv(sch);
+ struct sk_buff *skb;
+
+ skb = codel_dequeue(&q->params, &q->vars, &q->stats,
+ dequeue, &sch->qstats.backlog);
+ /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
+ * or HTB crashes. Defer it for next round.
+ */
+ if (q->stats.drop_count && sch->q.qlen) {
+ qdisc_tree_decrease_qlen(sch, q->stats.drop_count);
+ q->stats.drop_count = 0;
+ }
+ if (skb)
+ qdisc_bstats_update(sch, skb);
+ return skb;
+}
+
+static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+{
+ struct codel_sched_data *q;
+
+ if (likely(qdisc_qlen(sch) < sch->limit)) {
+ codel_set_enqueue_time(skb);
+ return qdisc_enqueue_tail(skb, sch);
+ }
+ q = qdisc_priv(sch);
+ q->drop_overlimit++;
+ return qdisc_drop(skb, sch);
+}
+
+static const struct nla_policy codel_policy[TCA_CODEL_MAX + 1] = {
+ [TCA_CODEL_TARGET] = { .type = NLA_U32 },
+ [TCA_CODEL_LIMIT] = { .type = NLA_U32 },
+ [TCA_CODEL_INTERVAL] = { .type = NLA_U32 },
+ [TCA_CODEL_ECN] = { .type = NLA_U32 },
+};
+
+static int codel_change(struct Qdisc *sch, struct nlattr *opt)
+{
+ struct codel_sched_data *q = qdisc_priv(sch);
+ struct nlattr *tb[TCA_CODEL_MAX + 1];
+ unsigned int qlen;
+ int err;
+
+ if (!opt)
+ return -EINVAL;
+
+ err = nla_parse_nested(tb, TCA_CODEL_MAX, opt, codel_policy);
+ if (err < 0)
+ return err;
+
+ sch_tree_lock(sch);
+ if (tb[TCA_CODEL_TARGET]) {
+ u32 target = nla_get_u32(tb[TCA_CODEL_TARGET]);
+
+ q->params.target = ((u64)target * NSEC_PER_USEC) >> CODEL_SHIFT;
+ }
+ if (tb[TCA_CODEL_INTERVAL]) {
+ u32 interval = nla_get_u32(tb[TCA_CODEL_INTERVAL]);
+
+ q->params.interval = ((u64)interval * NSEC_PER_USEC) >> CODEL_SHIFT;
+ }
+ if (tb[TCA_CODEL_LIMIT])
+ sch->limit = nla_get_u32(tb[TCA_CODEL_LIMIT]);
+
+ if (tb[TCA_CODEL_ECN])
+ q->params.ecn = !!nla_get_u32(tb[TCA_CODEL_ECN]);
+
+ qlen = sch->q.qlen;
+ while (sch->q.qlen > sch->limit) {
+ struct sk_buff *skb = __skb_dequeue(&sch->q);
+
+ sch->qstats.backlog -= qdisc_pkt_len(skb);
+ qdisc_drop(skb, sch);
+ }
+ qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
+
+ sch_tree_unlock(sch);
+ return 0;
+}
+
+static int codel_init(struct Qdisc *sch, struct nlattr *opt)
+{
+ struct codel_sched_data *q = qdisc_priv(sch);
+
+ /* It should be possible to run with no limit,
+ * with infinite memory :)
+ */
+ sch->limit = DEFAULT_CODEL_LIMIT;
+
+ codel_params_init(&q->params, sch);
+ codel_vars_init(&q->vars);
+ codel_stats_init(&q->stats, sch);
+
+ if (opt) {
+ int err = codel_change(sch, opt);
+
+ if (err)
+ return err;
+ }
+
+ if (sch->limit >= 1)
+ sch->flags |= TCQ_F_CAN_BYPASS;
+ else
+ sch->flags &= ~TCQ_F_CAN_BYPASS;
+
+ return 0;
+}
+
+static int codel_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+ struct codel_sched_data *q = qdisc_priv(sch);
+ struct nlattr *opts;
+
+ opts = nla_nest_start(skb, TCA_OPTIONS);
+ if (opts == NULL)
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, TCA_CODEL_TARGET,
+ codel_time_to_us(q->params.target)) ||
+ nla_put_u32(skb, TCA_CODEL_LIMIT,
+ sch->limit) ||
+ nla_put_u32(skb, TCA_CODEL_INTERVAL,
+ codel_time_to_us(q->params.interval)) ||
+ nla_put_u32(skb, TCA_CODEL_ECN,
+ q->params.ecn))
+ goto nla_put_failure;
+
+ return nla_nest_end(skb, opts);
+
+nla_put_failure:
+ nla_nest_cancel(skb, opts);
+ return -1;
+}
+
+static int codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
+{
+ const struct codel_sched_data *q = qdisc_priv(sch);
+ struct tc_codel_xstats st = {
+ .maxpacket = q->params.maxpacket,
+ .count = q->vars.count,
+ .lastcount = q->vars.lastcount,
+ .states = q->stats.states,
+ .state1 = q->stats.state1,
+ .state2 = q->stats.state2,
+ .state3 = q->stats.state3,
+ .drop_overlimit = q->drop_overlimit,
+ .ldelay = codel_time_to_us(q->vars.ldelay),
+ .dropping = q->vars.dropping,
+ .ecn_mark = q->stats.ecn_mark,
+ };
+
+ if (q->vars.dropping && q->vars.drop_next)
+ st.drop_next = codel_time_to_us(q->vars.drop_next -
+ codel_get_time());
+
+ return gnet_stats_copy_app(d, &st, sizeof(st));
+}
+
+static void codel_reset(struct Qdisc *sch)
+{
+ struct codel_sched_data *q = qdisc_priv(sch);
+
+ qdisc_reset_queue(sch);
+ codel_vars_init(&q->vars);
+}
+
+static struct Qdisc_ops codel_qdisc_ops __read_mostly = {
+ .id = "codel",
+ .priv_size = sizeof(struct codel_sched_data),
+
+ .enqueue = codel_qdisc_enqueue,
+ .dequeue = codel_qdisc_dequeue,
+ .peek = qdisc_peek_dequeued,
+ .init = codel_init,
+ .reset = codel_reset,
+ .change = codel_change,
+ .dump = codel_dump,
+ .dump_stats = codel_dump_stats,
+ .owner = THIS_MODULE,
+};
+
+static int __init codel_module_init(void)
+{
+ return register_qdisc(&codel_qdisc_ops);
+}
+static void __exit codel_module_exit(void)
+{
+ unregister_qdisc(&codel_qdisc_ops);
+}
+module_init(codel_module_init)
+module_exit(codel_module_exit)
+
+MODULE_DESCRIPTION("Controlled Delay queue discipline");
+MODULE_AUTHOR("Dave Taht");
+MODULE_AUTHOR("Eric Dumazet");
+MODULE_LICENSE("Dual BSD/GPL");
^ permalink raw reply [flat|nested] 19+ messages in thread
* [Codel] [PATCH v12 iproute2] codel: Controlled Delay AQM
2012-05-09 13:50 ` [Codel] [PATCH v12] codel: Controlled Delay AQM Eric Dumazet
@ 2012-05-09 13:54 ` Eric Dumazet
2012-05-09 15:47 ` [Codel] [PATCH v12] " Dave Taht
` (2 subsequent siblings)
3 siblings, 0 replies; 19+ messages in thread
From: Eric Dumazet @ 2012-05-09 13:54 UTC (permalink / raw)
To: Dave Täht; +Cc: codel, bloat
From: Eric Dumazet <edumazet@google.com>
An implementation of CoDel AQM, from Kathleen Nichols and Van Jacobson.
http://queue.acm.org/detail.cfm?id=2209336
Based on initial work from Dave Taht.
tc qdisc ... codel [ limit PACKETS ] [ target TIME ]
[ interval TIME ] [ ecn ]
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: Dave Taht <dave.taht@bufferbloat.net>
Cc: Kathleen Nichols <nichols@pollere.com>
Cc: Van Jacobson <van@pollere.net>
---
This is the patch against iproute2
include/linux/pkt_sched.h | 28 +++++
tc/Makefile | 1
tc/q_codel.c | 189 ++++++++++++++++++++++++++++++++++++
3 files changed, 218 insertions(+)
diff --git a/include/linux/pkt_sched.h b/include/linux/pkt_sched.h
index 410b33d..4d542ed 100644
--- a/include/linux/pkt_sched.h
+++ b/include/linux/pkt_sched.h
@@ -654,4 +654,32 @@ struct tc_qfq_stats {
__u32 lmax;
};
+/* CODEL */
+
+enum {
+ TCA_CODEL_UNSPEC,
+ TCA_CODEL_TARGET,
+ TCA_CODEL_LIMIT,
+ TCA_CODEL_INTERVAL,
+ TCA_CODEL_ECN,
+ __TCA_CODEL_MAX
+};
+
+#define TCA_CODEL_MAX (__TCA_CODEL_MAX - 1)
+
+struct tc_codel_xstats {
+ __u32 maxpacket; /* largest packet we've seen so far */
+ __u32 count;
+ __u32 lastcount;
+ __u32 ldelay; /* in-queue delay seen by most recently dequeued packet */
+ __u32 drop_next;
+ __u32 drop_overlimit;
+ __u32 ecn_mark;
+ __u32 dropping;
+ __u32 state1;
+ __u32 state2;
+ __u32 state3;
+ __u32 states;
+};
+
#endif
diff --git a/tc/Makefile b/tc/Makefile
index be8cd5a..8a7cc8d 100644
--- a/tc/Makefile
+++ b/tc/Makefile
@@ -47,6 +47,7 @@ TCMODULES += em_cmp.o
TCMODULES += em_u32.o
TCMODULES += em_meta.o
TCMODULES += q_mqprio.o
+TCMODULES += q_codel.o
TCSO :=
ifeq ($(TC_CONFIG_ATM),y)
diff --git a/tc/q_codel.c b/tc/q_codel.c
new file mode 100644
index 0000000..485fe47
--- /dev/null
+++ b/tc/q_codel.c
@@ -0,0 +1,189 @@
+/*
+ * Codel - The Controlled-Delay Active Queue Management algorithm
+ *
+ * Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com>
+ * Copyright (C) 2011-2012 Van Jacobson <van@pollere.com>
+ * Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
+ * Copyright (C) 2012 Eric Dumazet <edumazet@google.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the authors may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this
+ * software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2, in which case the provisions of the
+ * GPL apply INSTEAD OF those given above.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <syslog.h>
+#include <fcntl.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+#include <string.h>
+
+#include "utils.h"
+#include "tc_util.h"
+
+static void explain(void)
+{
+ fprintf(stderr, "Usage: ... codel [ limit PACKETS ] [ target TIME]\n");
+ fprintf(stderr, " [ interval TIME ] [ ecn ]\n");
+}
+
+static int codel_parse_opt(struct qdisc_util *qu, int argc, char **argv,
+ struct nlmsghdr *n)
+{
+ unsigned limit = 0;
+ unsigned target = 0;
+ unsigned interval = 0;
+ int ecn = -1;
+ struct rtattr *tail;
+
+ while (argc > 0) {
+ if (strcmp(*argv, "limit") == 0) {
+ NEXT_ARG();
+ if (get_unsigned(&limit, *argv, 0)) {
+ fprintf(stderr, "Illegal \"limit\"\n");
+ return -1;
+ }
+ } else if (strcmp(*argv, "target") == 0) {
+ NEXT_ARG();
+ if (get_time(&target, *argv)) {
+ fprintf(stderr, "Illegal \"target\"\n");
+ return -1;
+ }
+ } else if (strcmp(*argv, "interval") == 0) {
+ NEXT_ARG();
+ if (get_time(&interval, *argv)) {
+ fprintf(stderr, "Illegal \"interval\"\n");
+ return -1;
+ }
+ } else if (strcmp(*argv, "ecn") == 0) {
+ ecn = 1;
+ } else if (strcmp(*argv, "noecn") == 0) {
+ ecn = 0;
+ } else if (strcmp(*argv, "help") == 0) {
+ explain();
+ return -1;
+ } else {
+ fprintf(stderr, "What is \"%s\"?\n", *argv);
+ explain();
+ return -1;
+ }
+ argc--; argv++;
+ }
+
+ tail = NLMSG_TAIL(n);
+ addattr_l(n, 1024, TCA_OPTIONS, NULL, 0);
+ if (limit)
+ addattr_l(n, 1024, TCA_CODEL_LIMIT, &limit, sizeof(limit));
+ if (interval)
+ addattr_l(n, 1024, TCA_CODEL_INTERVAL, &interval, sizeof(interval));
+ if (target)
+ addattr_l(n, 1024, TCA_CODEL_TARGET, &target, sizeof(target));
+ if (ecn != -1)
+ addattr_l(n, 1024, TCA_CODEL_ECN, &ecn, sizeof(ecn));
+ tail->rta_len = (void *) NLMSG_TAIL(n) - (void *) tail;
+ return 0;
+}
+
+static int codel_print_opt(struct qdisc_util *qu, FILE *f, struct rtattr *opt)
+{
+ struct rtattr *tb[TCA_CODEL_MAX + 1];
+ unsigned limit;
+ unsigned interval;
+ unsigned target;
+ unsigned ecn;
+ SPRINT_BUF(b1);
+
+ if (opt == NULL)
+ return 0;
+
+ parse_rtattr_nested(tb, TCA_CODEL_MAX, opt);
+
+ if (tb[TCA_CODEL_ECN] &&
+ RTA_PAYLOAD(tb[TCA_CODEL_ECN]) >= sizeof(__u32)) {
+ ecn = rta_getattr_u32(tb[TCA_CODEL_ECN]);
+ if (ecn)
+ fprintf(f, "ecn ");
+ }
+ if (tb[TCA_CODEL_LIMIT] &&
+ RTA_PAYLOAD(tb[TCA_CODEL_LIMIT]) >= sizeof(__u32)) {
+ limit = rta_getattr_u32(tb[TCA_CODEL_LIMIT]);
+ fprintf(f, "limit %up ", limit);
+ }
+ if (tb[TCA_CODEL_TARGET] &&
+ RTA_PAYLOAD(tb[TCA_CODEL_TARGET]) >= sizeof(__u32)) {
+ target = rta_getattr_u32(tb[TCA_CODEL_TARGET]);
+ fprintf(f, "target %s ", sprint_time(target, b1));
+ }
+ if (tb[TCA_CODEL_INTERVAL] &&
+ RTA_PAYLOAD(tb[TCA_CODEL_INTERVAL]) >= sizeof(__u32)) {
+ interval = rta_getattr_u32(tb[TCA_CODEL_INTERVAL]);
+ fprintf(f, "interval %s ", sprint_time(interval, b1));
+ }
+
+ return 0;
+}
+
+static int codel_print_xstats(struct qdisc_util *qu, FILE *f,
+ struct rtattr *xstats)
+{
+ struct tc_codel_xstats *st;
+ SPRINT_BUF(b1);
+
+ if (xstats == NULL)
+ return 0;
+
+ if (RTA_PAYLOAD(xstats) < sizeof(*st))
+ return -1;
+
+ st = RTA_DATA(xstats);
+ fprintf(f, " maxpacket %u count %u lastcount %u ldelay %s",
+ st->maxpacket, st->count, st->lastcount,
+ sprint_time(st->ldelay, b1));
+ if (st->dropping)
+ fprintf(f, " dropping");
+ if (st->drop_next)
+ fprintf(f, " drop_next %s", sprint_time(st->drop_next, b1));
+ fprintf(f, "\n ecn_mark %u", st->ecn_mark);
+ fprintf(f, " drop_overlimit %u", st->drop_overlimit);
+ fprintf(f, " states %u : %u %u %u",
+ st->states, st->state1, st->state2, st->state3);
+ return 0;
+
+}
+
+struct qdisc_util codel_qdisc_util = {
+ .id = "codel",
+ .parse_qopt = codel_parse_opt,
+ .print_qopt = codel_print_opt,
+ .print_xstats = codel_print_xstats,
+};
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [Codel] [PATCH v12] codel: Controlled Delay AQM
2012-05-09 13:50 ` [Codel] [PATCH v12] codel: Controlled Delay AQM Eric Dumazet
2012-05-09 13:54 ` [Codel] [PATCH v12 iproute2] " Eric Dumazet
@ 2012-05-09 15:47 ` Dave Taht
2012-05-09 15:54 ` Eric Dumazet
2012-05-09 16:32 ` [Codel] [Bloat] " Josh Hunt
2012-05-10 17:51 ` [Codel] [PATCH v13 net-next] " Eric Dumazet
3 siblings, 1 reply; 19+ messages in thread
From: Dave Taht @ 2012-05-09 15:47 UTC (permalink / raw)
To: Eric Dumazet; +Cc: codel, bloat, Dave Täht
Doing (or leveraging) the timestamp on actual ingress to the system,
rather than entrance to the exiting queue, more accurately will
compensate for overall system loading and the overhead of traversing
3+ pages of function calls to get it from the
entrance point to the exit, and should result in 'tighter' results.
--
Dave Täht
SKYPE: davetaht
US Tel: 1-239-829-5608
http://www.bufferbloat.net
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [Codel] [PATCH v12] codel: Controlled Delay AQM
2012-05-09 15:47 ` [Codel] [PATCH v12] " Dave Taht
@ 2012-05-09 15:54 ` Eric Dumazet
2012-05-09 16:08 ` Dave Taht
0 siblings, 1 reply; 19+ messages in thread
From: Eric Dumazet @ 2012-05-09 15:54 UTC (permalink / raw)
To: Dave Taht; +Cc: codel, bloat, Dave Täht
On Wed, 2012-05-09 at 08:47 -0700, Dave Taht wrote:
> Doing (or leveraging) the timestamp on actual ingress to the system,
> rather than entrance to the exiting queue, more accurately will
> compensate for overall system loading and the overhead of traversing
> 3+ pages of function calls to get it from the
> entrance point to the exit, and should result in 'tighter' results.
Most qdiscs dont need this timestamp, so it would add overhead in fast
path.
Also, the delta between 'entering' qdisc layer and entering codel
enqueue is almost a constant for a given qdisc/filter setup, and less
than one us :
If you really want to take it into account, replace 'target 5000us' by
'target 5001 us'
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [Codel] [PATCH v12] codel: Controlled Delay AQM
2012-05-09 15:54 ` Eric Dumazet
@ 2012-05-09 16:08 ` Dave Taht
2012-05-09 16:14 ` Eric Dumazet
0 siblings, 1 reply; 19+ messages in thread
From: Dave Taht @ 2012-05-09 16:08 UTC (permalink / raw)
To: Eric Dumazet; +Cc: codel, bloat, Dave Täht
On Wed, May 9, 2012 at 8:54 AM, Eric Dumazet <eric.dumazet@gmail.com> wrote:
> On Wed, 2012-05-09 at 08:47 -0700, Dave Taht wrote:
>> Doing (or leveraging) the timestamp on actual ingress to the system,
>> rather than entrance to the exiting queue, more accurately will
>> compensate for overall system loading and the overhead of traversing
>> 3+ pages of function calls to get it from the
>> entrance point to the exit, and should result in 'tighter' results.
>
>
> Most qdiscs dont need this timestamp, so it would add overhead in fast
> path.
>
> Also, the delta between 'entering' qdisc layer and entering codel
> enqueue is almost a constant for a given qdisc/filter setup, and less
> than one us :
>
> If you really want to take it into account, replace 'target 5000us' by
> 'target 5001 us'
In the special case of routers and switches with codel on by default,
I think it would help. I can code it up....
>
>
--
Dave Täht
SKYPE: davetaht
US Tel: 1-239-829-5608
http://www.bufferbloat.net
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [Codel] [PATCH v12] codel: Controlled Delay AQM
2012-05-09 16:08 ` Dave Taht
@ 2012-05-09 16:14 ` Eric Dumazet
0 siblings, 0 replies; 19+ messages in thread
From: Eric Dumazet @ 2012-05-09 16:14 UTC (permalink / raw)
To: Dave Taht; +Cc: codel, bloat, Dave Täht
On Wed, 2012-05-09 at 09:08 -0700, Dave Taht wrote:
> In the special case of routers and switches with codel on by default,
> I think it would help. I can code it up....
Can you explain why it would help ?
How less than 0.1 % offset in time measurement can change things ?
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [Codel] [Bloat] [PATCH v12] codel: Controlled Delay AQM
2012-05-09 13:50 ` [Codel] [PATCH v12] codel: Controlled Delay AQM Eric Dumazet
2012-05-09 13:54 ` [Codel] [PATCH v12 iproute2] " Eric Dumazet
2012-05-09 15:47 ` [Codel] [PATCH v12] " Dave Taht
@ 2012-05-09 16:32 ` Josh Hunt
2012-05-09 16:37 ` Eric Dumazet
2012-05-10 17:51 ` [Codel] [PATCH v13 net-next] " Eric Dumazet
3 siblings, 1 reply; 19+ messages in thread
From: Josh Hunt @ 2012-05-09 16:32 UTC (permalink / raw)
To: Eric Dumazet; +Cc: codel, bloat, Dave Täht
On Wed, May 9, 2012 at 8:50 AM, Eric Dumazet <eric.dumazet@gmail.com> wrote:
> From: Eric Dumazet <edumazet@google.com>
>
> An implementation of CoDel AQM, from Kathleen Nichols and Van Jacobson.
>
> http://queue.acm.org/detail.cfm?id=2209336
>
> Based on initial work from Dave Taht.
>
> Refactored to help future codel inclusion as plugin for other linux
> qdisc (SFQ), like done with RED plugin.
>
> Tested up to 10Gb speeds with no particular problems.
>
> Signed-off-by: Eric Dumazet <edumazet@google.com>
> Signed-off-by: Dave Taht <dave.taht@bufferbloat.net>
> Cc: Kathleen Nichols <nichols@pollere.com>
> Cc: Van Jacobson <van@pollere.net>
> ---
> v12: algo changes after Kathleen & Van last updates
> - introduction of lastcount.
> - minbytes renamed to maxpacket.
> - maxpacket automatically learns biggest packet size.
> - ldelay record sojourn time of last dequeued packet.
> - various changes, better comments...
>
> I hope this is the last version before upstream submission (netdev)
> I'll send the iproute2 patch as well.
>
> include/linux/pkt_sched.h | 28 +++
> include/net/codel.h | 325 ++++++++++++++++++++++++++++++++++++
> net/sched/Kconfig | 11 +
> net/sched/Makefile | 1
> net/sched/sch_codel.c | 272 ++++++++++++++++++++++++++++++
> 5 files changed, 637 insertions(+)
>
> diff --git a/include/linux/pkt_sched.h b/include/linux/pkt_sched.h
> index ffe975c..453dd2c 100644
> --- a/include/linux/pkt_sched.h
> +++ b/include/linux/pkt_sched.h
> @@ -655,4 +655,32 @@ struct tc_qfq_stats {
> __u32 lmax;
> };
>
> +/* CODEL */
> +
> +enum {
> + TCA_CODEL_UNSPEC,
> + TCA_CODEL_TARGET,
> + TCA_CODEL_LIMIT,
> + TCA_CODEL_INTERVAL,
> + TCA_CODEL_ECN,
> + __TCA_CODEL_MAX
> +};
> +
> +#define TCA_CODEL_MAX (__TCA_CODEL_MAX - 1)
> +
> +struct tc_codel_xstats {
> + __u32 maxpacket; /* largest packet we've seen so far */
> + __u32 count;
> + __u32 lastcount;
> + __u32 ldelay; /* in-queue delay seen by most recently dequeued packet */
> + __u32 drop_next;
> + __u32 drop_overlimit;
> + __u32 ecn_mark;
> + __u32 dropping;
> + __u32 state1;
> + __u32 state2;
> + __u32 state3;
> + __u32 states;
> +};
> +
> #endif
> diff --git a/include/net/codel.h b/include/net/codel.h
> new file mode 100644
> index 0000000..565c1fe
> --- /dev/null
> +++ b/include/net/codel.h
> @@ -0,0 +1,325 @@
> +#ifndef __NET_SCHED_CODEL_H
> +#define __NET_SCHED_CODEL_H
> +
> +/*
> + * Codel - The Controlled-Delay Active Queue Management algorithm
> + *
> + * Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com>
> + * Copyright (C) 2011-2012 Van Jacobson <van@pollere.net>
> + * Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
> + * Copyright (C) 2012 Eric Dumazet <edumazet@google.com>
> + *
> + * Redistribution and use in source and binary forms, with or without
> + * modification, are permitted provided that the following conditions
> + * are met:
> + * 1. Redistributions of source code must retain the above copyright
> + * notice, this list of conditions, and the following disclaimer,
> + * without modification.
> + * 2. Redistributions in binary form must reproduce the above copyright
> + * notice, this list of conditions and the following disclaimer in the
> + * documentation and/or other materials provided with the distribution.
> + * 3. The names of the authors may not be used to endorse or promote products
> + * derived from this software without specific prior written permission.
> + *
> + * Alternatively, provided that this notice is retained in full, this
> + * software may be distributed under the terms of the GNU General
> + * Public License ("GPL") version 2, in which case the provisions of the
> + * GPL apply INSTEAD OF those given above.
> + *
> + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
> + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
> + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
> + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
> + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
> + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
> + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
> + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
> + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
> + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
> + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
> + * DAMAGE.
> + *
> + */
> +
> +#include <linux/types.h>
> +#include <linux/bug.h>
> +#include <linux/ktime.h>
> +#include <net/inet_ecn.h>
> +
> +/* Controlling Queue Delay (CoDel) algorithm
> + * =========================================
> + * Source : Kathleen Nichols and Van Jacobson
> + * http://queue.acm.org/detail.cfm?id=2209396
This link is dead. Looks like it's a typo and should be:
http://queue.acm.org/detail.cfm?id=2209336
Josh
> + *
> + * Implemented on linux by Dave Taht and Eric Dumazet
> + */
> +
> +
> +/*
> + * CoDel uses a 1024 nsec clock, encoded in u32
> + * This gives a range of 2199 seconds, because of signed compares
> + */
> +typedef u32 codel_time_t;
> +#define CODEL_SHIFT 10
> +#define MS2TIME(a) ((a * NSEC_PER_MSEC) >> CODEL_SHIFT)
> +
> +static inline codel_time_t codel_get_time(void)
> +{
> + u64 ns = ktime_to_ns(ktime_get());
> +
> + return ns >> CODEL_SHIFT;
> +}
> +
> +#define codel_time_after(a, b) ((s32)(a) - (s32)(b) > 0)
> +#define codel_time_after_eq(a, b) ((s32)(a) - (s32)(b) >= 0)
> +#define codel_time_before(a, b) ((s32)(a) - (s32)(b) < 0)
> +#define codel_time_before_eq(a, b) ((s32)(a) - (s32)(b) <= 0)
> +
> +struct codel_skb_cb {
> + codel_time_t enqueue_time;
> +};
> +
> +static struct codel_skb_cb *get_codel_cb(const struct sk_buff *skb)
> +{
> + qdisc_cb_private_validate(skb, sizeof(struct codel_skb_cb));
> + return (struct codel_skb_cb *)qdisc_skb_cb(skb)->data;
> +}
> +
> +static codel_time_t codel_get_enqueue_time(const struct sk_buff *skb)
> +{
> + return get_codel_cb(skb)->enqueue_time;
> +}
> +
> +static void codel_set_enqueue_time(struct sk_buff *skb)
> +{
> + get_codel_cb(skb)->enqueue_time = codel_get_time();
> +}
> +
> +static inline u32 codel_time_to_us(codel_time_t val)
> +{
> + u64 valns = ((u64)val << CODEL_SHIFT);
> +
> + do_div(valns, NSEC_PER_USEC);
> + return (u32)valns;
> +}
> +
> +struct codel_params {
> + u32 maxpacket; /* largest packet we've seen so far */
> + codel_time_t target; /* target queue size (in time units) */
> + codel_time_t interval; /* width of moving time window */
> + bool ecn; /* is ECN enabled */
> +};
> +
> +struct codel_vars {
> + u32 count; /* how many drops we've done since the last time
> + * we entered dropping state
> + */
> + u32 lastcount; /* count at entry to dropping state */
> + bool dropping; /* set to true id in dropping state */
> +
> + codel_time_t first_above_time; /* when we went (or will go) continuously
> + * above target for interval
> + */
> + codel_time_t drop_next; /* time to drop next packet, or when we dropped last */
> + codel_time_t ldelay; /* sojourn time of last dequeued packet */
> +};
> +
> +/* contains stats and some shared info */
> +struct codel_stats {
> + struct Qdisc *sch;
> + u32 drop_count; /* temp count of dropped packets in dequeue() */
> +
> + u32 ecn_mark; /* number of packets we ECN marked instead of dropping */
> + u32 states; /* number of codel_dequeue() calls */
> + u32 state1; /* number of times ok_to_drop was set to true */
> + u32 state2;
> + u32 state3;
> +};
> +
> +static void codel_params_init(struct codel_params *params,
> + const struct Qdisc *sch)
> +{
> + params->maxpacket = 256;
> + params->interval = MS2TIME(100);
> + params->target = MS2TIME(5);
> + params->ecn = false;
> +}
> +
> +static void codel_vars_init(struct codel_vars *vars)
> +{
> + vars->drop_next = 0;
> + vars->first_above_time = 0;
> + vars->dropping = false; /* exit dropping state */
> + vars->count = 0;
> + vars->lastcount = 0;
> +}
> +
> +static void codel_stats_init(struct codel_stats *stats,
> + struct Qdisc *sch)
> +{
> + stats->sch = sch; /* back pointer for qdisc_drop() calls */
> +}
> +
> +/* return interval/sqrt(x) with good precision */
> +static u32 codel_inv_sqrt(u32 _interval, u32 _x)
> +{
> + u64 interval = _interval;
> + unsigned long x = _x;
> +
> + /* Scale operands for max precision */
> +
> +#if BITS_PER_LONG == 64
> + x <<= 32; /* On 64bit arches, we can prescale x by 32bits */
> + interval <<= 16;
> +#endif
> +
> + while (x < (1UL << (BITS_PER_LONG - 2))) {
> + x <<= 2;
> + interval <<= 1;
> + }
> + do_div(interval, int_sqrt(x));
> + return (u32)interval;
> +}
> +
> +static codel_time_t codel_control_law(codel_time_t t,
> + codel_time_t interval,
> + u32 count)
> +{
> + return t + codel_inv_sqrt(interval, count);
> +}
> +
> +
> +static bool codel_should_drop(struct sk_buff *skb,
> + unsigned int *backlog,
> + struct codel_vars *vars,
> + struct codel_params *params,
> + struct codel_stats *stats,
> + codel_time_t now)
> +{
> + bool ok_to_drop;
> +
> + if (!skb) {
> + vars->first_above_time = 0;
> + return false;
> + }
> +
> + vars->ldelay = now - codel_get_enqueue_time(skb);
> + *backlog -= qdisc_pkt_len(skb);
> +
> + if (unlikely(qdisc_pkt_len(skb) > params->maxpacket))
> + params->maxpacket = qdisc_pkt_len(skb);
> +
> + if (codel_time_before(vars->ldelay, params->target) ||
> + *backlog <= params->maxpacket) {
> + /* went below - stay below for at least interval */
> + vars->first_above_time = 0;
> + return false;
> + }
> + ok_to_drop = false;
> + if (vars->first_above_time == 0) {
> + /* just went above from below. If we stay above
> + * for at least interval we'll say it's ok to drop
> + */
> + vars->first_above_time = now + params->interval;
> + } else if (codel_time_after(now, vars->first_above_time)) {
> + ok_to_drop = true;
> + stats->state1++;
> + }
> + return ok_to_drop;
> +}
> +
> +typedef struct sk_buff * (*codel_skb_dequeue_t)(struct codel_vars *vars,
> + struct Qdisc *sch);
> +
> +static struct sk_buff *codel_dequeue(struct codel_params *params,
> + struct codel_vars *vars,
> + struct codel_stats *stats,
> + codel_skb_dequeue_t dequeue_func,
> + u32 *backlog)
> +{
> + struct sk_buff *skb = dequeue_func(vars, stats->sch);
> + codel_time_t now;
> + bool drop;
> +
> + stats->states++;
> + if (!skb) {
> + vars->dropping = false;
> + return skb;
> + }
> + now = codel_get_time();
> + drop = codel_should_drop(skb, backlog,
> + vars, params, stats,
> + now);
> + if (vars->dropping) {
> + if (!drop) {
> + /* sojourn time below target - leave dropping state */
> + vars->dropping = false;
> + } else if (codel_time_after_eq(now, vars->drop_next)) {
> + stats->state2++;
> + /* It's time for the next drop. Drop the current
> + * packet and dequeue the next. The dequeue might
> + * take us out of dropping state.
> + * If not, schedule the next drop.
> + * A large backlog might result in drop rates so high
> + * that the next drop should happen now,
> + * hence the while loop.
> + */
> + while (vars->dropping &&
> + codel_time_after_eq(now, vars->drop_next)) {
> + vars->count++;
> + if (params->ecn && INET_ECN_set_ce(skb)) {
> + stats->ecn_mark++;
> + vars->drop_next =
> + codel_control_law(vars->drop_next,
> + params->interval,
> + vars->count);
> + goto end;
> + }
> + qdisc_drop(skb, stats->sch);
> + stats->drop_count++;
> + skb = dequeue_func(vars, stats->sch);
> + if (!codel_should_drop(skb, backlog,
> + vars, params, stats, now)) {
> + /* leave dropping state */
> + vars->dropping = false;
> + } else {
> + /* and schedule the next drop */
> + vars->drop_next =
> + codel_control_law(vars->drop_next,
> + params->interval,
> + vars->count);
> + }
> + }
> + }
> + } else if (drop) {
> + if (params->ecn && INET_ECN_set_ce(skb)) {
> + stats->ecn_mark++;
> + } else {
> + qdisc_drop(skb, stats->sch);
> + stats->drop_count++;
> +
> + skb = dequeue_func(vars, stats->sch);
> + drop = codel_should_drop(skb, backlog, vars, params,
> + stats, now);
> + }
> + vars->dropping = true;
> + stats->state3++;
> + /*
> + * if min went above target close to when we last went below it
> + * assume that the drop rate that controlled the queue on the
> + * last cycle is a good starting point to control it now.
> + */
> + if (codel_time_before(now - vars->drop_next,
> + 16 * params->interval)) {
> + vars->count = vars->count - vars->lastcount + 1;
> + } else {
> + vars->count = 1;
> + }
> + vars->lastcount = vars->count;
> + vars->drop_next = codel_control_law(now, params->interval,
> + vars->count);
> + }
> +end:
> + return skb;
> +}
> +#endif
> diff --git a/net/sched/Kconfig b/net/sched/Kconfig
> index 75b58f8..fadd252 100644
> --- a/net/sched/Kconfig
> +++ b/net/sched/Kconfig
> @@ -250,6 +250,17 @@ config NET_SCH_QFQ
>
> If unsure, say N.
>
> +config NET_SCH_CODEL
> + tristate "Controlled Delay AQM (CODEL)"
> + help
> + Say Y here if you want to use the Controlled Delay (CODEL)
> + packet scheduling algorithm.
> +
> + To compile this driver as a module, choose M here: the module
> + will be called sch_codel.
> +
> + If unsure, say N.
> +
> config NET_SCH_INGRESS
> tristate "Ingress Qdisc"
> depends on NET_CLS_ACT
> diff --git a/net/sched/Makefile b/net/sched/Makefile
> index 8cdf4e2..30fab03 100644
> --- a/net/sched/Makefile
> +++ b/net/sched/Makefile
> @@ -37,6 +37,7 @@ obj-$(CONFIG_NET_SCH_PLUG) += sch_plug.o
> obj-$(CONFIG_NET_SCH_MQPRIO) += sch_mqprio.o
> obj-$(CONFIG_NET_SCH_CHOKE) += sch_choke.o
> obj-$(CONFIG_NET_SCH_QFQ) += sch_qfq.o
> +obj-$(CONFIG_NET_SCH_CODEL) += sch_codel.o
>
> obj-$(CONFIG_NET_CLS_U32) += cls_u32.o
> obj-$(CONFIG_NET_CLS_ROUTE4) += cls_route.o
> diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
> new file mode 100644
> index 0000000..3112afa
> --- /dev/null
> +++ b/net/sched/sch_codel.c
> @@ -0,0 +1,272 @@
> +/*
> + * Codel - The Controlled-Delay Active Queue Management algorithm
> + *
> + * Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com>
> + * Copyright (C) 2011-2012 Van Jacobson <van@pollere.net>
> + *
> + * Implemented on linux by :
> + * Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
> + * Copyright (C) 2012 Eric Dumazet <edumazet@google.com>
> + *
> + * Redistribution and use in source and binary forms, with or without
> + * modification, are permitted provided that the following conditions
> + * are met:
> + * 1. Redistributions of source code must retain the above copyright
> + * notice, this list of conditions, and the following disclaimer,
> + * without modification.
> + * 2. Redistributions in binary form must reproduce the above copyright
> + * notice, this list of conditions and the following disclaimer in the
> + * documentation and/or other materials provided with the distribution.
> + * 3. The names of the authors may not be used to endorse or promote products
> + * derived from this software without specific prior written permission.
> + *
> + * Alternatively, provided that this notice is retained in full, this
> + * software may be distributed under the terms of the GNU General
> + * Public License ("GPL") version 2, in which case the provisions of the
> + * GPL apply INSTEAD OF those given above.
> + *
> + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
> + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
> + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
> + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
> + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
> + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
> + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
> + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
> + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
> + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
> + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
> + * DAMAGE.
> + *
> + */
> +
> +#include <linux/module.h>
> +#include <linux/slab.h>
> +#include <linux/types.h>
> +#include <linux/kernel.h>
> +#include <linux/errno.h>
> +#include <linux/skbuff.h>
> +#include <net/pkt_sched.h>
> +#include <net/codel.h>
> +
> +
> +#define DEFAULT_CODEL_LIMIT 1000
> +
> +struct codel_sched_data {
> + struct codel_params params;
> + struct codel_vars vars;
> + struct codel_stats stats;
> + u32 drop_overlimit;
> +};
> +
> +/* This is the specific function called from codel_dequeue()
> + * to dequeue a packet from queue. Note: backlog is handled in
> + * codel, we dont need to reduce it here.
> + */
> +static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch)
> +{
> + struct sk_buff *skb = __skb_dequeue(&sch->q);
> +
> + prefetch(&skb->end); /* we'll need skb_shinfo() */
> + return skb;
> +}
> +
> +static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
> +{
> + struct codel_sched_data *q = qdisc_priv(sch);
> + struct sk_buff *skb;
> +
> + skb = codel_dequeue(&q->params, &q->vars, &q->stats,
> + dequeue, &sch->qstats.backlog);
> + /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
> + * or HTB crashes. Defer it for next round.
> + */
> + if (q->stats.drop_count && sch->q.qlen) {
> + qdisc_tree_decrease_qlen(sch, q->stats.drop_count);
> + q->stats.drop_count = 0;
> + }
> + if (skb)
> + qdisc_bstats_update(sch, skb);
> + return skb;
> +}
> +
> +static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
> +{
> + struct codel_sched_data *q;
> +
> + if (likely(qdisc_qlen(sch) < sch->limit)) {
> + codel_set_enqueue_time(skb);
> + return qdisc_enqueue_tail(skb, sch);
> + }
> + q = qdisc_priv(sch);
> + q->drop_overlimit++;
> + return qdisc_drop(skb, sch);
> +}
> +
> +static const struct nla_policy codel_policy[TCA_CODEL_MAX + 1] = {
> + [TCA_CODEL_TARGET] = { .type = NLA_U32 },
> + [TCA_CODEL_LIMIT] = { .type = NLA_U32 },
> + [TCA_CODEL_INTERVAL] = { .type = NLA_U32 },
> + [TCA_CODEL_ECN] = { .type = NLA_U32 },
> +};
> +
> +static int codel_change(struct Qdisc *sch, struct nlattr *opt)
> +{
> + struct codel_sched_data *q = qdisc_priv(sch);
> + struct nlattr *tb[TCA_CODEL_MAX + 1];
> + unsigned int qlen;
> + int err;
> +
> + if (!opt)
> + return -EINVAL;
> +
> + err = nla_parse_nested(tb, TCA_CODEL_MAX, opt, codel_policy);
> + if (err < 0)
> + return err;
> +
> + sch_tree_lock(sch);
> + if (tb[TCA_CODEL_TARGET]) {
> + u32 target = nla_get_u32(tb[TCA_CODEL_TARGET]);
> +
> + q->params.target = ((u64)target * NSEC_PER_USEC) >> CODEL_SHIFT;
> + }
> + if (tb[TCA_CODEL_INTERVAL]) {
> + u32 interval = nla_get_u32(tb[TCA_CODEL_INTERVAL]);
> +
> + q->params.interval = ((u64)interval * NSEC_PER_USEC) >> CODEL_SHIFT;
> + }
> + if (tb[TCA_CODEL_LIMIT])
> + sch->limit = nla_get_u32(tb[TCA_CODEL_LIMIT]);
> +
> + if (tb[TCA_CODEL_ECN])
> + q->params.ecn = !!nla_get_u32(tb[TCA_CODEL_ECN]);
> +
> + qlen = sch->q.qlen;
> + while (sch->q.qlen > sch->limit) {
> + struct sk_buff *skb = __skb_dequeue(&sch->q);
> +
> + sch->qstats.backlog -= qdisc_pkt_len(skb);
> + qdisc_drop(skb, sch);
> + }
> + qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
> +
> + sch_tree_unlock(sch);
> + return 0;
> +}
> +
> +static int codel_init(struct Qdisc *sch, struct nlattr *opt)
> +{
> + struct codel_sched_data *q = qdisc_priv(sch);
> +
> + /* It should be possible to run with no limit,
> + * with infinite memory :)
> + */
> + sch->limit = DEFAULT_CODEL_LIMIT;
> +
> + codel_params_init(&q->params, sch);
> + codel_vars_init(&q->vars);
> + codel_stats_init(&q->stats, sch);
> +
> + if (opt) {
> + int err = codel_change(sch, opt);
> +
> + if (err)
> + return err;
> + }
> +
> + if (sch->limit >= 1)
> + sch->flags |= TCQ_F_CAN_BYPASS;
> + else
> + sch->flags &= ~TCQ_F_CAN_BYPASS;
> +
> + return 0;
> +}
> +
> +static int codel_dump(struct Qdisc *sch, struct sk_buff *skb)
> +{
> + struct codel_sched_data *q = qdisc_priv(sch);
> + struct nlattr *opts;
> +
> + opts = nla_nest_start(skb, TCA_OPTIONS);
> + if (opts == NULL)
> + goto nla_put_failure;
> +
> + if (nla_put_u32(skb, TCA_CODEL_TARGET,
> + codel_time_to_us(q->params.target)) ||
> + nla_put_u32(skb, TCA_CODEL_LIMIT,
> + sch->limit) ||
> + nla_put_u32(skb, TCA_CODEL_INTERVAL,
> + codel_time_to_us(q->params.interval)) ||
> + nla_put_u32(skb, TCA_CODEL_ECN,
> + q->params.ecn))
> + goto nla_put_failure;
> +
> + return nla_nest_end(skb, opts);
> +
> +nla_put_failure:
> + nla_nest_cancel(skb, opts);
> + return -1;
> +}
> +
> +static int codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
> +{
> + const struct codel_sched_data *q = qdisc_priv(sch);
> + struct tc_codel_xstats st = {
> + .maxpacket = q->params.maxpacket,
> + .count = q->vars.count,
> + .lastcount = q->vars.lastcount,
> + .states = q->stats.states,
> + .state1 = q->stats.state1,
> + .state2 = q->stats.state2,
> + .state3 = q->stats.state3,
> + .drop_overlimit = q->drop_overlimit,
> + .ldelay = codel_time_to_us(q->vars.ldelay),
> + .dropping = q->vars.dropping,
> + .ecn_mark = q->stats.ecn_mark,
> + };
> +
> + if (q->vars.dropping && q->vars.drop_next)
> + st.drop_next = codel_time_to_us(q->vars.drop_next -
> + codel_get_time());
> +
> + return gnet_stats_copy_app(d, &st, sizeof(st));
> +}
> +
> +static void codel_reset(struct Qdisc *sch)
> +{
> + struct codel_sched_data *q = qdisc_priv(sch);
> +
> + qdisc_reset_queue(sch);
> + codel_vars_init(&q->vars);
> +}
> +
> +static struct Qdisc_ops codel_qdisc_ops __read_mostly = {
> + .id = "codel",
> + .priv_size = sizeof(struct codel_sched_data),
> +
> + .enqueue = codel_qdisc_enqueue,
> + .dequeue = codel_qdisc_dequeue,
> + .peek = qdisc_peek_dequeued,
> + .init = codel_init,
> + .reset = codel_reset,
> + .change = codel_change,
> + .dump = codel_dump,
> + .dump_stats = codel_dump_stats,
> + .owner = THIS_MODULE,
> +};
> +
> +static int __init codel_module_init(void)
> +{
> + return register_qdisc(&codel_qdisc_ops);
> +}
> +static void __exit codel_module_exit(void)
> +{
> + unregister_qdisc(&codel_qdisc_ops);
> +}
> +module_init(codel_module_init)
> +module_exit(codel_module_exit)
> +
> +MODULE_DESCRIPTION("Controlled Delay queue discipline");
> +MODULE_AUTHOR("Dave Taht");
> +MODULE_AUTHOR("Eric Dumazet");
> +MODULE_LICENSE("Dual BSD/GPL");
>
>
> _______________________________________________
> Bloat mailing list
> Bloat@lists.bufferbloat.net
> https://lists.bufferbloat.net/listinfo/bloat
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [Codel] [Bloat] [PATCH v12] codel: Controlled Delay AQM
2012-05-09 16:32 ` [Codel] [Bloat] " Josh Hunt
@ 2012-05-09 16:37 ` Eric Dumazet
0 siblings, 0 replies; 19+ messages in thread
From: Eric Dumazet @ 2012-05-09 16:37 UTC (permalink / raw)
To: Josh Hunt; +Cc: codel, bloat, Dave Täht
On Wed, 2012-05-09 at 11:32 -0500, Josh Hunt wrote:
> On Wed, May 9, 2012 at 8:50 AM, Eric Dumazet <eric.dumazet@gmail.com> wrote:
> > + * Source : Kathleen Nichols and Van Jacobson
> > + * http://queue.acm.org/detail.cfm?id=2209396
>
> This link is dead. Looks like it's a typo and should be:
> http://queue.acm.org/detail.cfm?id=2209336
Thanks, fixed ;)
^ permalink raw reply [flat|nested] 19+ messages in thread
* [Codel] [PATCH v13 net-next] codel: Controlled Delay AQM
2012-05-09 13:50 ` [Codel] [PATCH v12] codel: Controlled Delay AQM Eric Dumazet
` (2 preceding siblings ...)
2012-05-09 16:32 ` [Codel] [Bloat] " Josh Hunt
@ 2012-05-10 17:51 ` Eric Dumazet
2012-05-11 3:36 ` David Miller
3 siblings, 1 reply; 19+ messages in thread
From: Eric Dumazet @ 2012-05-10 17:51 UTC (permalink / raw)
To: Dave Täht, David Miller
Cc: netdev, Matt Mathis, codel, Yuchung Cheng, bloat, Stephen Hemminger
From: Eric Dumazet <edumazet@google.com>
An implementation of CoDel AQM, from Kathleen Nichols and Van Jacobson.
http://queue.acm.org/detail.cfm?id=2209336
This AQM main input is no longer queue size in bytes or packets, but the
delay packets stay in (FIFO) queue.
As we don't have infinite memory, we still can drop packets in enqueue()
in case of massive load, but mean of CoDel is to drop packets in
dequeue(), using a control law based on two simple parameters :
target : target sojourn time (default 5ms)
interval : width of moving time window (default 100ms)
Based on initial work from Dave Taht.
Refactored to help future codel inclusion as a plugin for other linux
qdisc (FQ_CODEL, ...), like RED.
include/net/codel.h contains codel algorithm as close as possible than
Kathleen reference.
net/sched/sch_codel.c contains the linux qdisc specific glue.
Separate structures permit a memory efficient implementation of fq_codel
(to be sent as a separate work) : Each flow has its own struct
codel_vars.
timestamps are taken at enqueue() time with 1024 ns precision, allowing
a range of 2199 seconds in queue, and 100Gb links support. iproute2 uses
usec as base unit.
Selected packets are dropped, unless ECN is enabled and packets can get
ECN mark instead.
Tested from 2Mb to 10Gb speeds with no particular problems, on ixgbe and
tg3 drivers (BQL enabled).
Usage: tc qdisc ... codel [ limit PACKETS ] [ target TIME ]
[ interval TIME ] [ ecn ]
qdisc codel 10: parent 1:1 limit 2000p target 3.0ms interval 60.0ms ecn
Sent 13347099587 bytes 8815805 pkt (dropped 0, overlimits 0 requeues 0)
rate 202365Kbit 16708pps backlog 113550b 75p requeues 0
count 116 lastcount 98 ldelay 4.3ms dropping drop_next 816us
maxpacket 1514 ecn_mark 84399 drop_overlimit 0
CoDel must be seen as a base module, and should be used keeping in mind
there is still a FIFO queue. So a typical setup will probably need a
hierarchy of several qdiscs and packet classifiers to be able to meet
whatever constraints a user might have.
One possible example would be to use fq_codel, which combines Fair
Queueing and CoDel, in replacement of sfq / sfq_red.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: Dave Taht <dave.taht@bufferbloat.net>
Cc: Kathleen Nichols <nichols@pollere.com>
Cc: Van Jacobson <van@pollere.net>
Cc: Tom Herbert <therbert@google.com>
Cc: Matt Mathis <mattmathis@google.com>
Cc: Yuchung Cheng <ycheng@google.com>
Cc: Stephen Hemminger <shemminger@vyatta.com>
---
include/linux/pkt_sched.h | 26 ++
include/net/codel.h | 334 ++++++++++++++++++++++++++++++++++++
net/sched/Kconfig | 11 +
net/sched/Makefile | 1
net/sched/sch_codel.c | 275 +++++++++++++++++++++++++++++
5 files changed, 647 insertions(+)
diff --git a/include/linux/pkt_sched.h b/include/linux/pkt_sched.h
index ffe975c..cde56c2 100644
--- a/include/linux/pkt_sched.h
+++ b/include/linux/pkt_sched.h
@@ -655,4 +655,30 @@ struct tc_qfq_stats {
__u32 lmax;
};
+/* CODEL */
+
+enum {
+ TCA_CODEL_UNSPEC,
+ TCA_CODEL_TARGET,
+ TCA_CODEL_LIMIT,
+ TCA_CODEL_INTERVAL,
+ TCA_CODEL_ECN,
+ __TCA_CODEL_MAX
+};
+
+#define TCA_CODEL_MAX (__TCA_CODEL_MAX - 1)
+
+struct tc_codel_xstats {
+ __u32 maxpacket; /* largest packet we've seen so far */
+ __u32 count; /* how many drops we've done since the last time we
+ * entered dropping state
+ */
+ __u32 lastcount; /* count at entry to dropping state */
+ __u32 ldelay; /* in-queue delay seen by most recently dequeued packet */
+ __s32 drop_next; /* time to drop next packet */
+ __u32 drop_overlimit; /* number of time max qdisc packet limit was hit */
+ __u32 ecn_mark; /* number of packets we ECN marked instead of dropped */
+ __u32 dropping; /* are we in dropping state ? */
+};
+
#endif
diff --git a/include/net/codel.h b/include/net/codel.h
new file mode 100644
index 0000000..ecafb0b
--- /dev/null
+++ b/include/net/codel.h
@@ -0,0 +1,334 @@
+#ifndef __NET_SCHED_CODEL_H
+#define __NET_SCHED_CODEL_H
+
+/*
+ * Codel - The Controlled-Delay Active Queue Management algorithm
+ *
+ * Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com>
+ * Copyright (C) 2011-2012 Van Jacobson <van@pollere.net>
+ * Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
+ * Copyright (C) 2012 Eric Dumazet <edumazet@google.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the authors may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this
+ * software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2, in which case the provisions of the
+ * GPL apply INSTEAD OF those given above.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/ktime.h>
+#include <linux/skbuff.h>
+#include <net/pkt_sched.h>
+#include <net/inet_ecn.h>
+
+/* Controlling Queue Delay (CoDel) algorithm
+ * =========================================
+ * Source : Kathleen Nichols and Van Jacobson
+ * http://queue.acm.org/detail.cfm?id=2209336
+ *
+ * Implemented on linux by Dave Taht and Eric Dumazet
+ */
+
+
+/*
+ * CoDel uses a 1024 nsec clock, encoded in u32
+ * This gives a range of 2199 seconds, because of signed compares
+ */
+typedef u32 codel_time_t;
+typedef s32 codel_tdiff_t;
+#define CODEL_SHIFT 10
+#define MS2TIME(a) ((a * NSEC_PER_MSEC) >> CODEL_SHIFT)
+
+static inline codel_time_t codel_get_time(void)
+{
+ u64 ns = ktime_to_ns(ktime_get());
+
+ return ns >> CODEL_SHIFT;
+}
+
+#define codel_time_after(a, b) ((s32)(a) - (s32)(b) > 0)
+#define codel_time_after_eq(a, b) ((s32)(a) - (s32)(b) >= 0)
+#define codel_time_before(a, b) ((s32)(a) - (s32)(b) < 0)
+#define codel_time_before_eq(a, b) ((s32)(a) - (s32)(b) <= 0)
+
+/* Qdiscs using codel plugin must use codel_skb_cb in their own cb[] */
+struct codel_skb_cb {
+ codel_time_t enqueue_time;
+};
+
+static struct codel_skb_cb *get_codel_cb(const struct sk_buff *skb)
+{
+ qdisc_cb_private_validate(skb, sizeof(struct codel_skb_cb));
+ return (struct codel_skb_cb *)qdisc_skb_cb(skb)->data;
+}
+
+static codel_time_t codel_get_enqueue_time(const struct sk_buff *skb)
+{
+ return get_codel_cb(skb)->enqueue_time;
+}
+
+static void codel_set_enqueue_time(struct sk_buff *skb)
+{
+ get_codel_cb(skb)->enqueue_time = codel_get_time();
+}
+
+static inline u32 codel_time_to_us(codel_time_t val)
+{
+ u64 valns = ((u64)val << CODEL_SHIFT);
+
+ do_div(valns, NSEC_PER_USEC);
+ return (u32)valns;
+}
+
+/**
+ * struct codel_params - contains codel parameters
+ * @target: target queue size (in time units)
+ * @interval: width of moving time window
+ * @ecn: is Explicit Congestion Notification enabled
+ */
+struct codel_params {
+ codel_time_t target;
+ codel_time_t interval;
+ bool ecn;
+};
+
+/**
+ * struct codel_vars - contains codel variables
+ * @count: how many drops we've done since the last time we
+ * entered dropping state
+ * @lastcount: count at entry to dropping state
+ * @dropping: set to true if in dropping state
+ * @first_above_time: when we went (or will go) continuously above target
+ * for interval
+ * @drop_next: time to drop next packet, or when we dropped last
+ * @ldelay: sojourn time of last dequeued packet
+ */
+struct codel_vars {
+ u32 count;
+ u32 lastcount;
+ bool dropping;
+ codel_time_t first_above_time;
+ codel_time_t drop_next;
+ codel_time_t ldelay;
+};
+
+/**
+ * struct codel_stats - contains codel shared variables and stats
+ * @maxpacket: largest packet we've seen so far
+ * @drop_count: temp count of dropped packets in dequeue()
+ * ecn_mark: number of packets we ECN marked instead of dropping
+ */
+struct codel_stats {
+ u32 maxpacket;
+ u32 drop_count;
+ u32 ecn_mark;
+};
+
+static void codel_params_init(struct codel_params *params)
+{
+ params->interval = MS2TIME(100);
+ params->target = MS2TIME(5);
+ params->ecn = false;
+}
+
+static void codel_vars_init(struct codel_vars *vars)
+{
+ vars->drop_next = 0;
+ vars->first_above_time = 0;
+ vars->dropping = false; /* exit dropping state */
+ vars->count = 0;
+ vars->lastcount = 0;
+}
+
+static void codel_stats_init(struct codel_stats *stats)
+{
+ stats->maxpacket = 256;
+}
+
+/* return interval/sqrt(x) with good precision
+ * relies on int_sqrt(unsigned long x) kernel implementation
+ */
+static u32 codel_inv_sqrt(u32 _interval, u32 _x)
+{
+ u64 interval = _interval;
+ unsigned long x = _x;
+
+ /* Scale operands for max precision */
+
+#if BITS_PER_LONG == 64
+ x <<= 32; /* On 64bit arches, we can prescale x by 32bits */
+ interval <<= 16;
+#endif
+
+ while (x < (1UL << (BITS_PER_LONG - 2))) {
+ x <<= 2;
+ interval <<= 1;
+ }
+ do_div(interval, int_sqrt(x));
+ return (u32)interval;
+}
+
+static codel_time_t codel_control_law(codel_time_t t,
+ codel_time_t interval,
+ u32 count)
+{
+ return t + codel_inv_sqrt(interval, count);
+}
+
+
+static bool codel_should_drop(struct sk_buff *skb,
+ unsigned int *backlog,
+ struct codel_vars *vars,
+ struct codel_params *params,
+ struct codel_stats *stats,
+ codel_time_t now)
+{
+ bool ok_to_drop;
+
+ if (!skb) {
+ vars->first_above_time = 0;
+ return false;
+ }
+
+ vars->ldelay = now - codel_get_enqueue_time(skb);
+ *backlog -= qdisc_pkt_len(skb);
+
+ if (unlikely(qdisc_pkt_len(skb) > stats->maxpacket))
+ stats->maxpacket = qdisc_pkt_len(skb);
+
+ if (codel_time_before(vars->ldelay, params->target) ||
+ *backlog <= stats->maxpacket) {
+ /* went below - stay below for at least interval */
+ vars->first_above_time = 0;
+ return false;
+ }
+ ok_to_drop = false;
+ if (vars->first_above_time == 0) {
+ /* just went above from below. If we stay above
+ * for at least interval we'll say it's ok to drop
+ */
+ vars->first_above_time = now + params->interval;
+ } else if (codel_time_after(now, vars->first_above_time)) {
+ ok_to_drop = true;
+ }
+ return ok_to_drop;
+}
+
+typedef struct sk_buff * (*codel_skb_dequeue_t)(struct codel_vars *vars,
+ struct Qdisc *sch);
+
+static struct sk_buff *codel_dequeue(struct Qdisc *sch,
+ struct codel_params *params,
+ struct codel_vars *vars,
+ struct codel_stats *stats,
+ codel_skb_dequeue_t dequeue_func,
+ u32 *backlog)
+{
+ struct sk_buff *skb = dequeue_func(vars, sch);
+ codel_time_t now;
+ bool drop;
+
+ if (!skb) {
+ vars->dropping = false;
+ return skb;
+ }
+ now = codel_get_time();
+ drop = codel_should_drop(skb, backlog, vars, params, stats, now);
+ if (vars->dropping) {
+ if (!drop) {
+ /* sojourn time below target - leave dropping state */
+ vars->dropping = false;
+ } else if (codel_time_after_eq(now, vars->drop_next)) {
+ /* It's time for the next drop. Drop the current
+ * packet and dequeue the next. The dequeue might
+ * take us out of dropping state.
+ * If not, schedule the next drop.
+ * A large backlog might result in drop rates so high
+ * that the next drop should happen now,
+ * hence the while loop.
+ */
+ while (vars->dropping &&
+ codel_time_after_eq(now, vars->drop_next)) {
+ if (++vars->count == 0) /* avoid zero divides */
+ vars->count = ~0U;
+ if (params->ecn && INET_ECN_set_ce(skb)) {
+ stats->ecn_mark++;
+ vars->drop_next =
+ codel_control_law(vars->drop_next,
+ params->interval,
+ vars->count);
+ goto end;
+ }
+ qdisc_drop(skb, sch);
+ stats->drop_count++;
+ skb = dequeue_func(vars, sch);
+ if (!codel_should_drop(skb, backlog,
+ vars, params, stats, now)) {
+ /* leave dropping state */
+ vars->dropping = false;
+ } else {
+ /* and schedule the next drop */
+ vars->drop_next =
+ codel_control_law(vars->drop_next,
+ params->interval,
+ vars->count);
+ }
+ }
+ }
+ } else if (drop) {
+ if (params->ecn && INET_ECN_set_ce(skb)) {
+ stats->ecn_mark++;
+ } else {
+ qdisc_drop(skb, sch);
+ stats->drop_count++;
+
+ skb = dequeue_func(vars, sch);
+ drop = codel_should_drop(skb, backlog, vars, params,
+ stats, now);
+ }
+ vars->dropping = true;
+ /*
+ * if min went above target close to when we last went below it
+ * assume that the drop rate that controlled the queue on the
+ * last cycle is a good starting point to control it now.
+ */
+ if (codel_time_before(now - vars->drop_next,
+ 16 * params->interval)) {
+ vars->count = (vars->count - vars->lastcount) | 1;
+ } else {
+ vars->count = 1;
+ }
+ vars->lastcount = vars->count;
+ vars->drop_next = codel_control_law(now, params->interval,
+ vars->count);
+ }
+end:
+ return skb;
+}
+#endif
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index 75b58f8..fadd252 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -250,6 +250,17 @@ config NET_SCH_QFQ
If unsure, say N.
+config NET_SCH_CODEL
+ tristate "Controlled Delay AQM (CODEL)"
+ help
+ Say Y here if you want to use the Controlled Delay (CODEL)
+ packet scheduling algorithm.
+
+ To compile this driver as a module, choose M here: the module
+ will be called sch_codel.
+
+ If unsure, say N.
+
config NET_SCH_INGRESS
tristate "Ingress Qdisc"
depends on NET_CLS_ACT
diff --git a/net/sched/Makefile b/net/sched/Makefile
index 8cdf4e2..30fab03 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -37,6 +37,7 @@ obj-$(CONFIG_NET_SCH_PLUG) += sch_plug.o
obj-$(CONFIG_NET_SCH_MQPRIO) += sch_mqprio.o
obj-$(CONFIG_NET_SCH_CHOKE) += sch_choke.o
obj-$(CONFIG_NET_SCH_QFQ) += sch_qfq.o
+obj-$(CONFIG_NET_SCH_CODEL) += sch_codel.o
obj-$(CONFIG_NET_CLS_U32) += cls_u32.o
obj-$(CONFIG_NET_CLS_ROUTE4) += cls_route.o
diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
new file mode 100644
index 0000000..a96e95a
--- /dev/null
+++ b/net/sched/sch_codel.c
@@ -0,0 +1,275 @@
+/*
+ * Codel - The Controlled-Delay Active Queue Management algorithm
+ *
+ * Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com>
+ * Copyright (C) 2011-2012 Van Jacobson <van@pollere.net>
+ *
+ * Implemented on linux by :
+ * Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
+ * Copyright (C) 2012 Eric Dumazet <edumazet@google.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the authors may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this
+ * software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2, in which case the provisions of the
+ * GPL apply INSTEAD OF those given above.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/skbuff.h>
+#include <net/pkt_sched.h>
+#include <net/codel.h>
+
+
+#define DEFAULT_CODEL_LIMIT 1000
+
+struct codel_sched_data {
+ struct codel_params params;
+ struct codel_vars vars;
+ struct codel_stats stats;
+ u32 drop_overlimit;
+};
+
+/* This is the specific function called from codel_dequeue()
+ * to dequeue a packet from queue. Note: backlog is handled in
+ * codel, we dont need to reduce it here.
+ */
+static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch)
+{
+ struct sk_buff *skb = __skb_dequeue(&sch->q);
+
+ prefetch(&skb->end); /* we'll need skb_shinfo() */
+ return skb;
+}
+
+static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
+{
+ struct codel_sched_data *q = qdisc_priv(sch);
+ struct sk_buff *skb;
+
+ skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats,
+ dequeue, &sch->qstats.backlog);
+ /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
+ * or HTB crashes. Defer it for next round.
+ */
+ if (q->stats.drop_count && sch->q.qlen) {
+ qdisc_tree_decrease_qlen(sch, q->stats.drop_count);
+ q->stats.drop_count = 0;
+ }
+ if (skb)
+ qdisc_bstats_update(sch, skb);
+ return skb;
+}
+
+static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+{
+ struct codel_sched_data *q;
+
+ if (likely(qdisc_qlen(sch) < sch->limit)) {
+ codel_set_enqueue_time(skb);
+ return qdisc_enqueue_tail(skb, sch);
+ }
+ q = qdisc_priv(sch);
+ q->drop_overlimit++;
+ return qdisc_drop(skb, sch);
+}
+
+static const struct nla_policy codel_policy[TCA_CODEL_MAX + 1] = {
+ [TCA_CODEL_TARGET] = { .type = NLA_U32 },
+ [TCA_CODEL_LIMIT] = { .type = NLA_U32 },
+ [TCA_CODEL_INTERVAL] = { .type = NLA_U32 },
+ [TCA_CODEL_ECN] = { .type = NLA_U32 },
+};
+
+static int codel_change(struct Qdisc *sch, struct nlattr *opt)
+{
+ struct codel_sched_data *q = qdisc_priv(sch);
+ struct nlattr *tb[TCA_CODEL_MAX + 1];
+ unsigned int qlen;
+ int err;
+
+ if (!opt)
+ return -EINVAL;
+
+ err = nla_parse_nested(tb, TCA_CODEL_MAX, opt, codel_policy);
+ if (err < 0)
+ return err;
+
+ sch_tree_lock(sch);
+
+ if (tb[TCA_CODEL_TARGET]) {
+ u32 target = nla_get_u32(tb[TCA_CODEL_TARGET]);
+
+ q->params.target = ((u64)target * NSEC_PER_USEC) >> CODEL_SHIFT;
+ }
+
+ if (tb[TCA_CODEL_INTERVAL]) {
+ u32 interval = nla_get_u32(tb[TCA_CODEL_INTERVAL]);
+
+ q->params.interval = ((u64)interval * NSEC_PER_USEC) >> CODEL_SHIFT;
+ }
+
+ if (tb[TCA_CODEL_LIMIT])
+ sch->limit = nla_get_u32(tb[TCA_CODEL_LIMIT]);
+
+ if (tb[TCA_CODEL_ECN])
+ q->params.ecn = !!nla_get_u32(tb[TCA_CODEL_ECN]);
+
+ qlen = sch->q.qlen;
+ while (sch->q.qlen > sch->limit) {
+ struct sk_buff *skb = __skb_dequeue(&sch->q);
+
+ sch->qstats.backlog -= qdisc_pkt_len(skb);
+ qdisc_drop(skb, sch);
+ }
+ qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
+
+ sch_tree_unlock(sch);
+ return 0;
+}
+
+static int codel_init(struct Qdisc *sch, struct nlattr *opt)
+{
+ struct codel_sched_data *q = qdisc_priv(sch);
+
+ sch->limit = DEFAULT_CODEL_LIMIT;
+
+ codel_params_init(&q->params);
+ codel_vars_init(&q->vars);
+ codel_stats_init(&q->stats);
+
+ if (opt) {
+ int err = codel_change(sch, opt);
+
+ if (err)
+ return err;
+ }
+
+ if (sch->limit >= 1)
+ sch->flags |= TCQ_F_CAN_BYPASS;
+ else
+ sch->flags &= ~TCQ_F_CAN_BYPASS;
+
+ return 0;
+}
+
+static int codel_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+ struct codel_sched_data *q = qdisc_priv(sch);
+ struct nlattr *opts;
+
+ opts = nla_nest_start(skb, TCA_OPTIONS);
+ if (opts == NULL)
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, TCA_CODEL_TARGET,
+ codel_time_to_us(q->params.target)) ||
+ nla_put_u32(skb, TCA_CODEL_LIMIT,
+ sch->limit) ||
+ nla_put_u32(skb, TCA_CODEL_INTERVAL,
+ codel_time_to_us(q->params.interval)) ||
+ nla_put_u32(skb, TCA_CODEL_ECN,
+ q->params.ecn))
+ goto nla_put_failure;
+
+ return nla_nest_end(skb, opts);
+
+nla_put_failure:
+ nla_nest_cancel(skb, opts);
+ return -1;
+}
+
+static int codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
+{
+ const struct codel_sched_data *q = qdisc_priv(sch);
+ struct tc_codel_xstats st = {
+ .maxpacket = q->stats.maxpacket,
+ .count = q->vars.count,
+ .lastcount = q->vars.lastcount,
+ .drop_overlimit = q->drop_overlimit,
+ .ldelay = codel_time_to_us(q->vars.ldelay),
+ .dropping = q->vars.dropping,
+ .ecn_mark = q->stats.ecn_mark,
+ };
+
+ if (q->vars.dropping) {
+ codel_tdiff_t delta = q->vars.drop_next - codel_get_time();
+
+ if (delta >= 0)
+ st.drop_next = codel_time_to_us(delta);
+ else
+ st.drop_next = -codel_time_to_us(-delta);
+ }
+
+ return gnet_stats_copy_app(d, &st, sizeof(st));
+}
+
+static void codel_reset(struct Qdisc *sch)
+{
+ struct codel_sched_data *q = qdisc_priv(sch);
+
+ qdisc_reset_queue(sch);
+ codel_vars_init(&q->vars);
+}
+
+static struct Qdisc_ops codel_qdisc_ops __read_mostly = {
+ .id = "codel",
+ .priv_size = sizeof(struct codel_sched_data),
+
+ .enqueue = codel_qdisc_enqueue,
+ .dequeue = codel_qdisc_dequeue,
+ .peek = qdisc_peek_dequeued,
+ .init = codel_init,
+ .reset = codel_reset,
+ .change = codel_change,
+ .dump = codel_dump,
+ .dump_stats = codel_dump_stats,
+ .owner = THIS_MODULE,
+};
+
+static int __init codel_module_init(void)
+{
+ return register_qdisc(&codel_qdisc_ops);
+}
+
+static void __exit codel_module_exit(void)
+{
+ unregister_qdisc(&codel_qdisc_ops);
+}
+
+module_init(codel_module_init)
+module_exit(codel_module_exit)
+
+MODULE_DESCRIPTION("Controlled Delay queue discipline");
+MODULE_AUTHOR("Dave Taht");
+MODULE_AUTHOR("Eric Dumazet");
+MODULE_LICENSE("Dual BSD/GPL");
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [Codel] [PATCH v13 net-next] codel: Controlled Delay AQM
2012-05-10 17:51 ` [Codel] [PATCH v13 net-next] " Eric Dumazet
@ 2012-05-11 3:36 ` David Miller
2012-05-11 6:01 ` Eric Dumazet
0 siblings, 1 reply; 19+ messages in thread
From: David Miller @ 2012-05-11 3:36 UTC (permalink / raw)
To: eric.dumazet
Cc: dave.taht, netdev, mattmathis, codel, ycheng, bloat, shemminger
From: Eric Dumazet <eric.dumazet@gmail.com>
Date: Thu, 10 May 2012 19:51:25 +0200
> From: Eric Dumazet <edumazet@google.com>
>
> An implementation of CoDel AQM, from Kathleen Nichols and Van Jacobson.
...
> Signed-off-by: Eric Dumazet <edumazet@google.com>
> Signed-off-by: Dave Taht <dave.taht@bufferbloat.net>
Applied, but there was a lot of trailing whitespace and a few
comments mis-formatted which I had to fix up.
Thanks.
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [Codel] [PATCH v13 net-next] codel: Controlled Delay AQM
2012-05-11 3:36 ` David Miller
@ 2012-05-11 6:01 ` Eric Dumazet
2012-05-14 17:35 ` Simon Barber
0 siblings, 1 reply; 19+ messages in thread
From: Eric Dumazet @ 2012-05-11 6:01 UTC (permalink / raw)
To: David Miller
Cc: dave.taht, netdev, mattmathis, codel, ycheng, bloat, shemminger
On Thu, 2012-05-10 at 23:36 -0400, David Miller wrote:
> Applied, but there was a lot of trailing whitespace and a few
> comments mis-formatted which I had to fix up.
>
> Thanks.
Arg, Stephen told me that too but I didnt have a chance to send a v14 ;)
Thanks !
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [Codel] [PATCH v13 net-next] codel: Controlled Delay AQM
2012-05-11 6:01 ` Eric Dumazet
@ 2012-05-14 17:35 ` Simon Barber
2012-05-14 20:08 ` [Codel] [Bloat] " Dave Hart
0 siblings, 1 reply; 19+ messages in thread
From: Simon Barber @ 2012-05-14 17:35 UTC (permalink / raw)
To: Eric Dumazet; +Cc: bloat, codel, dave.taht
Any chance of getting this patch released under a BSD license? I'd like
to propose my friend in Broadcom use it for their DSL modem chipsets.
Simon
On 05/10/2012 11:01 PM, Eric Dumazet wrote:
> On Thu, 2012-05-10 at 23:36 -0400, David Miller wrote:
>
>> Applied, but there was a lot of trailing whitespace and a few
>> comments mis-formatted which I had to fix up.
>>
>> Thanks.
>
> Arg, Stephen told me that too but I didnt have a chance to send a v14 ;)
>
> Thanks !
>
>
> _______________________________________________
> Codel mailing list
> Codel@lists.bufferbloat.net
> https://lists.bufferbloat.net/listinfo/codel
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [Codel] [Bloat] [PATCH v13 net-next] codel: Controlled Delay AQM
2012-05-14 17:35 ` Simon Barber
@ 2012-05-14 20:08 ` Dave Hart
0 siblings, 0 replies; 19+ messages in thread
From: Dave Hart @ 2012-05-14 20:08 UTC (permalink / raw)
To: Simon Barber; +Cc: codel, bloat
On Mon, May 14, 2012 at 5:35 PM, Simon Barber <simon@superduper.net> wrote:
> Any chance of getting this patch released under a BSD license? I'd like to
> propose my friend in Broadcom use it for their DSL modem chipsets.
It's already dual-licensed BSD/GPL 2:
+/*
+ * Codel - The Controlled-Delay Active Queue Management algorithm
+ *
+ * Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com>
+ * Copyright (C) 2011-2012 Van Jacobson <van@pollere.net>
+ * Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
+ * Copyright (C) 2012 Eric Dumazet <edumazet@google.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the authors may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this
+ * software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2, in which case the provisions of the
+ * GPL apply INSTEAD OF those given above.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
Cheers,
Dave Hart
^ permalink raw reply [flat|nested] 19+ messages in thread
end of thread, other threads:[~2012-05-14 20:09 UTC | newest]
Thread overview: 19+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2012-05-07 5:35 [Codel] [PATCH v9] codel: Controlled Delay AQM Dave Täht
2012-05-07 5:50 ` Eric Dumazet
2012-05-07 5:52 ` Dave Taht
2012-05-07 5:51 ` Dave Taht
2012-05-07 13:57 ` [Codel] [PATCH v10] " Eric Dumazet
2012-05-07 16:07 ` [Codel] [PATCH v1 ] sfq: add a Controlled Delay option Eric Dumazet
2012-05-09 13:50 ` [Codel] [PATCH v12] codel: Controlled Delay AQM Eric Dumazet
2012-05-09 13:54 ` [Codel] [PATCH v12 iproute2] " Eric Dumazet
2012-05-09 15:47 ` [Codel] [PATCH v12] " Dave Taht
2012-05-09 15:54 ` Eric Dumazet
2012-05-09 16:08 ` Dave Taht
2012-05-09 16:14 ` Eric Dumazet
2012-05-09 16:32 ` [Codel] [Bloat] " Josh Hunt
2012-05-09 16:37 ` Eric Dumazet
2012-05-10 17:51 ` [Codel] [PATCH v13 net-next] " Eric Dumazet
2012-05-11 3:36 ` David Miller
2012-05-11 6:01 ` Eric Dumazet
2012-05-14 17:35 ` Simon Barber
2012-05-14 20:08 ` [Codel] [Bloat] " Dave Hart
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox