[Cake] [PATCH] Keep an internal counter for total queue length

Toke Høiland-Jørgensen toke at toke.dk
Fri Jul 6 07:57:45 EDT 2018


Since sch->q.qlen can be changed from outside cake, we can't rely on it for
break loops etc. So keep an internal counter that mirrors it and use that
for all checks.

This the issue with inifinte loops on multi-queue hardware.

Signed-off-by: Toke Høiland-Jørgensen <toke at toke.dk>
---
 sch_cake.c | 16 ++++++++++++----
 1 file changed, 12 insertions(+), 4 deletions(-)

diff --git a/sch_cake.c b/sch_cake.c
index ec0b0f2..3667741 100644
--- a/sch_cake.c
+++ b/sch_cake.c
@@ -233,6 +233,10 @@ struct cake_sched_data {
 	u32		buffer_limit;
 	u32		buffer_config_limit;
 
+	/* we need an internal counter for total qlen since sch->q.qlen can be
+	 * modified by other parts of the qdisc infrastructure */
+	u32		tot_qlen;
+
 	/* indices for dequeue */
 	u16		cur_tin;
 	u16		cur_flow;
@@ -1523,6 +1527,7 @@ static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free)
 
 	__qdisc_drop(skb, to_free);
 	sch->q.qlen--;
+	q->tot_qlen--;
 
 	cake_heapify(q, 0);
 
@@ -1664,7 +1669,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 		if (ktime_before(b->time_next_packet, now))
 			b->time_next_packet = now;
 
-		if (!sch->q.qlen) {
+		if (!q->tot_qlen) {
 			if (ktime_before(q->time_next_packet, now)) {
 				q->failsafe_next_packet = now;
 				q->time_next_packet = now;
@@ -1702,6 +1707,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 			flow_queue_add(flow, segs);
 
 			sch->q.qlen++;
+			q->tot_qlen++;
 			slen += segs->len;
 			q->buffer_used += segs->truesize;
 			b->packets++;
@@ -1739,6 +1745,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 			consume_skb(ack);
 		} else {
 			sch->q.qlen++;
+			q->tot_qlen++;
 			q->buffer_used      += skb->truesize;
 		}
 
@@ -1859,6 +1866,7 @@ static struct sk_buff *cake_dequeue_one(struct Qdisc *sch)
 		sch->qstats.backlog      -= len;
 		q->buffer_used		 -= skb->truesize;
 		sch->q.qlen--;
+		q->tot_qlen--;
 
 		if (q->overflow_timeout)
 			cake_heapify(q, b->overflow_idx[q->cur_flow]);
@@ -1893,7 +1901,7 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
 	u32 len;
 
 begin:
-	if (!sch->q.qlen)
+	if (!q->tot_qlen)
 		return NULL;
 
 	/* global hard shaper */
@@ -2092,12 +2100,12 @@ retry:
 	flow->deficit -= len;
 	b->tin_deficit -= len;
 
-	if (ktime_after(q->time_next_packet, now) && sch->q.qlen) {
+	if (ktime_after(q->time_next_packet, now) && q->tot_qlen) {
 		u64 next = min(ktime_to_ns(q->time_next_packet),
 			       ktime_to_ns(q->failsafe_next_packet));
 
 		qdisc_watchdog_schedule_ns(&q->watchdog, next);
-	} else if (!sch->q.qlen) {
+	} else if (!q->tot_qlen) {
 		int i;
 
 		for (i = 0; i < q->tin_cnt; i++) {
-- 
2.7.4



More information about the Cake mailing list