[Cake] [PATCH net-next v7 7/7] sch_cake: Conditionally split GSO segments

Toke Høiland-Jørgensen toke at toke.dk
Wed May 2 11:11:33 EDT 2018


At lower bandwidths, the transmission time of a single GSO segment can add
an unacceptable amount of latency due to HOL blocking. Furthermore, with a
software shaper, any tuning mechanism employed by the kernel to control the
maximum size of GSO segments is thrown off by the artificial limit on
bandwidth. For this reason, we split GSO segments into their individual
packets iff the shaper is active and configured to a bandwidth <= 1 Gbps.

Signed-off-by: Toke Høiland-Jørgensen <toke at toke.dk>
---
 net/sched/sch_cake.c |   95 ++++++++++++++++++++++++++++++++++++--------------
 1 file changed, 69 insertions(+), 26 deletions(-)

diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index cfc094356f9f..54bdf3022c75 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -81,6 +81,7 @@
 #define CAKE_QUEUES (1024)
 #define CAKE_FLOW_MASK 63
 #define CAKE_FLOW_NAT_FLAG 64
+#define CAKE_SPLIT_GSO_THRESHOLD (125000000) /* 1Gbps */
 #define US2TIME(a) (a * (u64)NSEC_PER_USEC)
 
 typedef u64 cobalt_time_t;
@@ -1530,36 +1531,73 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 	if (unlikely(len > b->max_skblen))
 		b->max_skblen = len;
 
-	cobalt_set_enqueue_time(skb, now);
-	get_cobalt_cb(skb)->adjusted_len = cake_overhead(q, skb);
-	flow_queue_add(flow, skb);
-
-	if (q->ack_filter)
-		ack = cake_ack_filter(q, flow);
+	if (skb_is_gso(skb) && q->rate_flags & CAKE_FLAG_SPLIT_GSO) {
+		struct sk_buff *segs, *nskb;
+		netdev_features_t features = netif_skb_features(skb);
+		unsigned int slen = 0;
+
+		segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
+		if (IS_ERR_OR_NULL(segs))
+			return qdisc_drop(skb, sch, to_free);
+
+		while (segs) {
+			nskb = segs->next;
+			segs->next = NULL;
+			qdisc_skb_cb(segs)->pkt_len = segs->len;
+			cobalt_set_enqueue_time(segs, now);
+			get_cobalt_cb(segs)->adjusted_len = cake_overhead(q,
+									  segs);
+			flow_queue_add(flow, segs);
+
+			sch->q.qlen++;
+			slen += segs->len;
+			q->buffer_used += segs->truesize;
+			b->packets++;
+			segs = nskb;
+		}
 
-	if (ack) {
-		b->ack_drops++;
-		sch->qstats.drops++;
-		b->bytes += qdisc_pkt_len(ack);
-		len -= qdisc_pkt_len(ack);
-		q->buffer_used += skb->truesize - ack->truesize;
-		if (q->rate_flags & CAKE_FLAG_INGRESS)
-			cake_advance_shaper(q, b, ack, now, true);
+		/* stats */
+		b->bytes	    += slen;
+		b->backlogs[idx]    += slen;
+		b->tin_backlog      += slen;
+		sch->qstats.backlog += slen;
+		q->avg_window_bytes += slen;
 
-		qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(ack));
-		consume_skb(ack);
+		qdisc_tree_reduce_backlog(sch, 1, len);
+		consume_skb(skb);
 	} else {
-		sch->q.qlen++;
-		q->buffer_used      += skb->truesize;
-	}
+		/* not splitting */
+		cobalt_set_enqueue_time(skb, now);
+		get_cobalt_cb(skb)->adjusted_len = cake_overhead(q, skb);
+		flow_queue_add(flow, skb);
+
+		if (q->ack_filter)
+			ack = cake_ack_filter(q, flow);
+
+		if (ack) {
+			b->ack_drops++;
+			sch->qstats.drops++;
+			b->bytes += qdisc_pkt_len(ack);
+			len -= qdisc_pkt_len(ack);
+			q->buffer_used += skb->truesize - ack->truesize;
+			if (q->rate_flags & CAKE_FLAG_INGRESS)
+				cake_advance_shaper(q, b, ack, now, true);
+
+			qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(ack));
+			consume_skb(ack);
+		} else {
+			sch->q.qlen++;
+			q->buffer_used      += skb->truesize;
+		}
 
-	/* stats */
-	b->packets++;
-	b->bytes	    += len;
-	b->backlogs[idx]    += len;
-	b->tin_backlog      += len;
-	sch->qstats.backlog += len;
-	q->avg_window_bytes += len;
+		/* stats */
+		b->packets++;
+		b->bytes	    += len;
+		b->backlogs[idx]    += len;
+		b->tin_backlog      += len;
+		sch->qstats.backlog += len;
+		q->avg_window_bytes += len;
+	}
 
 	if (q->overflow_timeout)
 		cake_heapify_up(q, b->overflow_idx[idx]);
@@ -2367,6 +2405,11 @@ static int cake_change(struct Qdisc *sch, struct nlattr *opt,
 	if (tb[TCA_CAKE_MEMORY])
 		q->buffer_config_limit = nla_get_u32(tb[TCA_CAKE_MEMORY]);
 
+	if (q->rate_bps && q->rate_bps <= CAKE_SPLIT_GSO_THRESHOLD)
+		q->rate_flags |= CAKE_FLAG_SPLIT_GSO;
+	else
+		q->rate_flags &= ~CAKE_FLAG_SPLIT_GSO;
+
 	if (q->tins) {
 		sch_tree_lock(sch);
 		cake_reconfigure(sch);



More information about the Cake mailing list