[Cake] [PATCH net-next v16 2/8] sch_cake: Add ingress mode
Toke Høiland-Jørgensen
toke at toke.dk
Mon May 28 10:27:46 EDT 2018
The ingress mode is meant to be enabled when CAKE runs downlink of the
actual bottleneck (such as on an IFB device). The mode changes the shaper
to also account dropped packets to the shaped rate, as these have already
traversed the bottleneck.
Enabling ingress mode will also tune the AQM to always keep at least two
packets queued *for each flow*. This is done by scaling the minimum queue
occupancy level that will disable the AQM by the number of active bulk
flows. The rationale for this is that retransmits are more expensive in
ingress mode, since dropped packets have to traverse the bottleneck again
when they are retransmitted; thus, being more lenient and keeping a minimum
number of packets queued will improve throughput in cases where the number
of active flows are so large that they saturate the bottleneck even at
their minimum window size.
This commit also adds a separate switch to enable ingress mode rate
autoscaling. If enabled, the autoscaling code will observe the actual
traffic rate and adjust the shaper rate to match it. This can help avoid
latency increases in the case where the actual bottleneck rate decreases
below the shaped rate. The scaling filters out spikes by an EWMA filter.
Signed-off-by: Toke Høiland-Jørgensen <toke at toke.dk>
---
net/sched/sch_cake.c | 85 ++++++++++++++++++++++++++++++++++++++++++++++++--
1 file changed, 81 insertions(+), 4 deletions(-)
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 7ea4aa261cec..10e208e4255d 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -435,7 +435,8 @@ static bool cobalt_queue_empty(struct cobalt_vars *vars,
static bool cobalt_should_drop(struct cobalt_vars *vars,
struct cobalt_params *p,
ktime_t now,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ u32 bulk_flows)
{
bool next_due, over_target, drop = false;
ktime_t schedule;
@@ -459,6 +460,7 @@ static bool cobalt_should_drop(struct cobalt_vars *vars,
sojourn = ktime_to_ns(ktime_sub(now, cobalt_get_enqueue_time(skb)));
schedule = ktime_sub(now, vars->drop_next);
over_target = sojourn > p->target &&
+ sojourn > p->mtu_time * bulk_flows * 2 &&
sojourn > p->mtu_time * 4;
next_due = vars->count && ktime_to_ns(schedule) >= 0;
@@ -913,6 +915,9 @@ static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free)
b->tin_dropped++;
sch->qstats.drops++;
+ if (q->rate_flags & CAKE_FLAG_INGRESS)
+ cake_advance_shaper(q, b, skb, now, true);
+
__qdisc_drop(skb, to_free);
sch->q.qlen--;
@@ -990,8 +995,46 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
cake_heapify_up(q, b->overflow_idx[idx]);
/* incoming bandwidth capacity estimate */
- q->avg_window_bytes = 0;
- q->last_packet_time = now;
+ if (q->rate_flags & CAKE_FLAG_AUTORATE_INGRESS) {
+ u64 packet_interval = \
+ ktime_to_ns(ktime_sub(now, q->last_packet_time));
+
+ if (packet_interval > NSEC_PER_SEC)
+ packet_interval = NSEC_PER_SEC;
+
+ /* filter out short-term bursts, eg. wifi aggregation */
+ q->avg_packet_interval = \
+ cake_ewma(q->avg_packet_interval,
+ packet_interval,
+ (packet_interval > q->avg_packet_interval ?
+ 2 : 8));
+
+ q->last_packet_time = now;
+
+ if (packet_interval > q->avg_packet_interval) {
+ u64 window_interval = \
+ ktime_to_ns(ktime_sub(now,
+ q->avg_window_begin));
+ u64 b = q->avg_window_bytes * (u64)NSEC_PER_SEC;
+
+ do_div(b, window_interval);
+ q->avg_peak_bandwidth =
+ cake_ewma(q->avg_peak_bandwidth, b,
+ b > q->avg_peak_bandwidth ? 2 : 8);
+ q->avg_window_bytes = 0;
+ q->avg_window_begin = now;
+
+ if (ktime_after(now,
+ ktime_add_ms(q->last_reconfig_time,
+ 250))) {
+ q->rate_bps = (q->avg_peak_bandwidth * 15) >> 4;
+ cake_reconfigure(sch);
+ }
+ }
+ } else {
+ q->avg_window_bytes = 0;
+ q->last_packet_time = now;
+ }
/* flowchain */
if (!flow->set || flow->set == CAKE_SET_DECAYING) {
@@ -1251,15 +1294,27 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
}
/* Last packet in queue may be marked, shouldn't be dropped */
- if (!cobalt_should_drop(&flow->cvars, &b->cparams, now, skb) ||
+ if (!cobalt_should_drop(&flow->cvars, &b->cparams, now, skb,
+ (b->bulk_flow_count *
+ !!(q->rate_flags &
+ CAKE_FLAG_INGRESS))) ||
!flow->head)
break;
+ /* drop this packet, get another one */
+ if (q->rate_flags & CAKE_FLAG_INGRESS) {
+ len = cake_advance_shaper(q, b, skb,
+ now, true);
+ flow->deficit -= len;
+ b->tin_deficit -= len;
+ }
flow->dropped++;
b->tin_dropped++;
qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb));
qdisc_qstats_drop(sch);
kfree_skb(skb);
+ if (q->rate_flags & CAKE_FLAG_INGRESS)
+ goto retry;
}
b->tin_ecn_mark += !!flow->cvars.ecn_marked;
@@ -1442,6 +1497,20 @@ static int cake_change(struct Qdisc *sch, struct nlattr *opt,
q->target = 1;
}
+ if (tb[TCA_CAKE_AUTORATE]) {
+ if (!!nla_get_u32(tb[TCA_CAKE_AUTORATE]))
+ q->rate_flags |= CAKE_FLAG_AUTORATE_INGRESS;
+ else
+ q->rate_flags &= ~CAKE_FLAG_AUTORATE_INGRESS;
+ }
+
+ if (tb[TCA_CAKE_INGRESS]) {
+ if (!!nla_get_u32(tb[TCA_CAKE_INGRESS]))
+ q->rate_flags |= CAKE_FLAG_INGRESS;
+ else
+ q->rate_flags &= ~CAKE_FLAG_INGRESS;
+ }
+
if (tb[TCA_CAKE_MEMORY])
q->buffer_config_limit = nla_get_u32(tb[TCA_CAKE_MEMORY]);
@@ -1565,6 +1634,14 @@ static int cake_dump(struct Qdisc *sch, struct sk_buff *skb)
if (nla_put_u32(skb, TCA_CAKE_MEMORY, q->buffer_config_limit))
goto nla_put_failure;
+ if (nla_put_u32(skb, TCA_CAKE_AUTORATE,
+ !!(q->rate_flags & CAKE_FLAG_AUTORATE_INGRESS)))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, TCA_CAKE_INGRESS,
+ !!(q->rate_flags & CAKE_FLAG_INGRESS)))
+ goto nla_put_failure;
+
return nla_nest_end(skb, opts);
nla_put_failure:
More information about the Cake
mailing list