Cake - FQ_codel the next generation
 help / color / mirror / Atom feed
From: "Toke Høiland-Jørgensen" <toke@toke.dk>
To: netdev@vger.kernel.org
Cc: cake@lists.bufferbloat.net
Subject: [Cake] [PATCH net-next v19 2/8] sch_cake: Add ingress mode
Date: Fri, 06 Jul 2018 17:37:19 +0200	[thread overview]
Message-ID: <153089143921.14813.10415962798599558785.stgit@alrua-x1> (raw)
In-Reply-To: <153089141290.14813.1010951705929696896.stgit@alrua-x1>

The ingress mode is meant to be enabled when CAKE runs downlink of the
actual bottleneck (such as on an IFB device). The mode changes the shaper
to also account dropped packets to the shaped rate, as these have already
traversed the bottleneck.

Enabling ingress mode will also tune the AQM to always keep at least two
packets queued *for each flow*. This is done by scaling the minimum queue
occupancy level that will disable the AQM by the number of active bulk
flows. The rationale for this is that retransmits are more expensive in
ingress mode, since dropped packets have to traverse the bottleneck again
when they are retransmitted; thus, being more lenient and keeping a minimum
number of packets queued will improve throughput in cases where the number
of active flows are so large that they saturate the bottleneck even at
their minimum window size.

This commit also adds a separate switch to enable ingress mode rate
autoscaling. If enabled, the autoscaling code will observe the actual
traffic rate and adjust the shaper rate to match it. This can help avoid
latency increases in the case where the actual bottleneck rate decreases
below the shaped rate. The scaling filters out spikes by an EWMA filter.

Signed-off-by: Toke Høiland-Jørgensen <toke@toke.dk>
---
 net/sched/sch_cake.c |   87 ++++++++++++++++++++++++++++++++++++++++++++++++--
 1 file changed, 83 insertions(+), 4 deletions(-)

diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index ea0272615d63..2950a8d07887 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -435,7 +435,8 @@ static bool cobalt_queue_empty(struct cobalt_vars *vars,
 static bool cobalt_should_drop(struct cobalt_vars *vars,
 			       struct cobalt_params *p,
 			       ktime_t now,
-			       struct sk_buff *skb)
+			       struct sk_buff *skb,
+			       u32 bulk_flows)
 {
 	bool next_due, over_target, drop = false;
 	ktime_t schedule;
@@ -459,6 +460,7 @@ static bool cobalt_should_drop(struct cobalt_vars *vars,
 	sojourn = ktime_to_ns(ktime_sub(now, cobalt_get_enqueue_time(skb)));
 	schedule = ktime_sub(now, vars->drop_next);
 	over_target = sojourn > p->target &&
+		      sojourn > p->mtu_time * bulk_flows * 2 &&
 		      sojourn > p->mtu_time * 4;
 	next_due = vars->count && ktime_to_ns(schedule) >= 0;
 
@@ -881,6 +883,9 @@ static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free)
 	b->tin_dropped++;
 	sch->qstats.drops++;
 
+	if (q->rate_flags & CAKE_FLAG_INGRESS)
+		cake_advance_shaper(q, b, skb, now, true);
+
 	__qdisc_drop(skb, to_free);
 	sch->q.qlen--;
 
@@ -921,6 +926,8 @@ static u32 cake_classify(struct Qdisc *sch, struct cake_tin_data *t,
 	return 0;
 }
 
+static void cake_reconfigure(struct Qdisc *sch);
+
 static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 			struct sk_buff **to_free)
 {
@@ -988,8 +995,46 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 		cake_heapify_up(q, b->overflow_idx[idx]);
 
 	/* incoming bandwidth capacity estimate */
-	q->avg_window_bytes = 0;
-	q->last_packet_time = now;
+	if (q->rate_flags & CAKE_FLAG_AUTORATE_INGRESS) {
+		u64 packet_interval = \
+			ktime_to_ns(ktime_sub(now, q->last_packet_time));
+
+		if (packet_interval > NSEC_PER_SEC)
+			packet_interval = NSEC_PER_SEC;
+
+		/* filter out short-term bursts, eg. wifi aggregation */
+		q->avg_packet_interval = \
+			cake_ewma(q->avg_packet_interval,
+				  packet_interval,
+				  (packet_interval > q->avg_packet_interval ?
+					  2 : 8));
+
+		q->last_packet_time = now;
+
+		if (packet_interval > q->avg_packet_interval) {
+			u64 window_interval = \
+				ktime_to_ns(ktime_sub(now,
+						      q->avg_window_begin));
+			u64 b = q->avg_window_bytes * (u64)NSEC_PER_SEC;
+
+			do_div(b, window_interval);
+			q->avg_peak_bandwidth =
+				cake_ewma(q->avg_peak_bandwidth, b,
+					  b > q->avg_peak_bandwidth ? 2 : 8);
+			q->avg_window_bytes = 0;
+			q->avg_window_begin = now;
+
+			if (ktime_after(now,
+					ktime_add_ms(q->last_reconfig_time,
+						     250))) {
+				q->rate_bps = (q->avg_peak_bandwidth * 15) >> 4;
+				cake_reconfigure(sch);
+			}
+		}
+	} else {
+		q->avg_window_bytes = 0;
+		q->last_packet_time = now;
+	}
 
 	/* flowchain */
 	if (!flow->set || flow->set == CAKE_SET_DECAYING) {
@@ -1268,15 +1313,27 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
 		}
 
 		/* Last packet in queue may be marked, shouldn't be dropped */
-		if (!cobalt_should_drop(&flow->cvars, &b->cparams, now, skb) ||
+		if (!cobalt_should_drop(&flow->cvars, &b->cparams, now, skb,
+					(b->bulk_flow_count *
+					 !!(q->rate_flags &
+					    CAKE_FLAG_INGRESS))) ||
 		    !flow->head)
 			break;
 
+		/* drop this packet, get another one */
+		if (q->rate_flags & CAKE_FLAG_INGRESS) {
+			len = cake_advance_shaper(q, b, skb,
+						  now, true);
+			flow->deficit -= len;
+			b->tin_deficit -= len;
+		}
 		flow->dropped++;
 		b->tin_dropped++;
 		qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb));
 		qdisc_qstats_drop(sch);
 		kfree_skb(skb);
+		if (q->rate_flags & CAKE_FLAG_INGRESS)
+			goto retry;
 	}
 
 	b->tin_ecn_mark += !!flow->cvars.ecn_marked;
@@ -1459,6 +1516,20 @@ static int cake_change(struct Qdisc *sch, struct nlattr *opt,
 			q->target = 1;
 	}
 
+	if (tb[TCA_CAKE_AUTORATE]) {
+		if (!!nla_get_u32(tb[TCA_CAKE_AUTORATE]))
+			q->rate_flags |= CAKE_FLAG_AUTORATE_INGRESS;
+		else
+			q->rate_flags &= ~CAKE_FLAG_AUTORATE_INGRESS;
+	}
+
+	if (tb[TCA_CAKE_INGRESS]) {
+		if (!!nla_get_u32(tb[TCA_CAKE_INGRESS]))
+			q->rate_flags |= CAKE_FLAG_INGRESS;
+		else
+			q->rate_flags &= ~CAKE_FLAG_INGRESS;
+	}
+
 	if (tb[TCA_CAKE_MEMORY])
 		q->buffer_config_limit = nla_get_u32(tb[TCA_CAKE_MEMORY]);
 
@@ -1582,6 +1653,14 @@ static int cake_dump(struct Qdisc *sch, struct sk_buff *skb)
 	if (nla_put_u32(skb, TCA_CAKE_MEMORY, q->buffer_config_limit))
 		goto nla_put_failure;
 
+	if (nla_put_u32(skb, TCA_CAKE_AUTORATE,
+			!!(q->rate_flags & CAKE_FLAG_AUTORATE_INGRESS)))
+		goto nla_put_failure;
+
+	if (nla_put_u32(skb, TCA_CAKE_INGRESS,
+			!!(q->rate_flags & CAKE_FLAG_INGRESS)))
+		goto nla_put_failure;
+
 	return nla_nest_end(skb, opts);
 
 nla_put_failure:


  parent reply	other threads:[~2018-07-06 15:37 UTC|newest]

Thread overview: 11+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-07-06 15:37 [Cake] [PATCH net-next v19 0/8] sched: Add Common Applications Kept Enhanced (cake) qdisc Toke Høiland-Jørgensen
2018-07-06 15:37 ` [Cake] [PATCH net-next v19 6/8] sch_cake: Add DiffServ handling Toke Høiland-Jørgensen
2018-07-06 15:37 ` [Cake] [PATCH net-next v19 4/8] netfilter: Add nf_ct_get_tuple_skb global lookup function Toke Høiland-Jørgensen
2018-07-06 15:37 ` Toke Høiland-Jørgensen [this message]
2018-07-06 15:37 ` [Cake] [PATCH net-next v19 5/8] sch_cake: Add NAT awareness to packet classifier Toke Høiland-Jørgensen
2018-07-06 15:37 ` [Cake] [PATCH net-next v19 1/8] sched: Add Common Applications Kept Enhanced (cake) qdisc Toke Høiland-Jørgensen
2018-07-06 15:37 ` [Cake] [PATCH net-next v19 7/8] sch_cake: Add overhead compensation support to the rate shaper Toke Høiland-Jørgensen
2018-07-06 15:37 ` [Cake] [PATCH net-next v19 3/8] sch_cake: Add optional ACK filter Toke Høiland-Jørgensen
2018-07-06 15:37 ` [Cake] [PATCH net-next v19 8/8] sch_cake: Conditionally split GSO segments Toke Høiland-Jørgensen
2018-07-11  5:56 ` [Cake] [PATCH net-next v19 0/8] sched: Add Common Applications Kept Enhanced (cake) qdisc David Miller
2018-07-11 20:40   ` Toke Høiland-Jørgensen

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

  List information: https://lists.bufferbloat.net/postorius/lists/cake.lists.bufferbloat.net/

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=153089143921.14813.10415962798599558785.stgit@alrua-x1 \
    --to=toke@toke.dk \
    --cc=cake@lists.bufferbloat.net \
    --cc=netdev@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox