Cake - FQ_codel the next generation
 help / color / mirror / Atom feed
* [Cake] [PATCH net v2] sched: sch_cake: add bounds checks to host bulk flow fairness counts
@ 2025-01-07 12:01 Toke Høiland-Jørgensen
  2025-01-08 16:10 ` Dave Taht
                   ` (2 more replies)
  0 siblings, 3 replies; 9+ messages in thread
From: Toke Høiland-Jørgensen @ 2025-01-07 12:01 UTC (permalink / raw)
  To: Toke Høiland-Jørgensen, Jamal Hadi Salim, Cong Wang,
	Jiri Pirko, Paolo Abeni
  Cc: Toke Høiland-Jørgensen, syzbot+f63600d288bfb7057424,
	David S. Miller, Eric Dumazet, Jakub Kicinski, Simon Horman,
	cake, netdev

Even though we fixed a logic error in the commit cited below, syzbot
still managed to trigger an underflow of the per-host bulk flow
counters, leading to an out of bounds memory access.

To avoid any such logic errors causing out of bounds memory accesses,
this commit factors out all accesses to the per-host bulk flow counters
to a series of helpers that perform bounds-checking before any
increments and decrements. This also has the benefit of improving
readability by moving the conditional checks for the flow mode into
these helpers, instead of having them spread out throughout the
code (which was the cause of the original logic error).

v2:
- Remove now-unused srchost and dsthost local variables in cake_dequeue()

Fixes: 546ea84d07e3 ("sched: sch_cake: fix bulk flow accounting logic for host fairness")
Reported-by: syzbot+f63600d288bfb7057424@syzkaller.appspotmail.com
Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
---
 net/sched/sch_cake.c | 140 +++++++++++++++++++++++--------------------
 1 file changed, 75 insertions(+), 65 deletions(-)

diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 8d8b2db4653c..2c2e2a67f3b2 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -627,6 +627,63 @@ static bool cake_ddst(int flow_mode)
 	return (flow_mode & CAKE_FLOW_DUAL_DST) == CAKE_FLOW_DUAL_DST;
 }
 
+static void cake_dec_srchost_bulk_flow_count(struct cake_tin_data *q,
+					     struct cake_flow *flow,
+					     int flow_mode)
+{
+	if (likely(cake_dsrc(flow_mode) &&
+		   q->hosts[flow->srchost].srchost_bulk_flow_count))
+		q->hosts[flow->srchost].srchost_bulk_flow_count--;
+}
+
+static void cake_inc_srchost_bulk_flow_count(struct cake_tin_data *q,
+					     struct cake_flow *flow,
+					     int flow_mode)
+{
+	if (likely(cake_dsrc(flow_mode) &&
+		   q->hosts[flow->srchost].srchost_bulk_flow_count < CAKE_QUEUES))
+		q->hosts[flow->srchost].srchost_bulk_flow_count++;
+}
+
+static void cake_dec_dsthost_bulk_flow_count(struct cake_tin_data *q,
+					     struct cake_flow *flow,
+					     int flow_mode)
+{
+	if (likely(cake_ddst(flow_mode) &&
+		   q->hosts[flow->dsthost].dsthost_bulk_flow_count))
+		q->hosts[flow->dsthost].dsthost_bulk_flow_count--;
+}
+
+static void cake_inc_dsthost_bulk_flow_count(struct cake_tin_data *q,
+					     struct cake_flow *flow,
+					     int flow_mode)
+{
+	if (likely(cake_ddst(flow_mode) &&
+		   q->hosts[flow->dsthost].dsthost_bulk_flow_count < CAKE_QUEUES))
+		q->hosts[flow->dsthost].dsthost_bulk_flow_count++;
+}
+
+static u16 cake_get_flow_quantum(struct cake_tin_data *q,
+				 struct cake_flow *flow,
+				 int flow_mode)
+{
+	u16 host_load = 1;
+
+	if (cake_dsrc(flow_mode))
+		host_load = max(host_load,
+				q->hosts[flow->srchost].srchost_bulk_flow_count);
+
+	if (cake_ddst(flow_mode))
+		host_load = max(host_load,
+				q->hosts[flow->dsthost].dsthost_bulk_flow_count);
+
+	/* The get_random_u16() is a way to apply dithering to avoid
+	 * accumulating roundoff errors
+	 */
+	return (q->flow_quantum * quantum_div[host_load] +
+		get_random_u16()) >> 16;
+}
+
 static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
 		     int flow_mode, u16 flow_override, u16 host_override)
 {
@@ -773,10 +830,8 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
 		allocate_dst = cake_ddst(flow_mode);
 
 		if (q->flows[outer_hash + k].set == CAKE_SET_BULK) {
-			if (allocate_src)
-				q->hosts[q->flows[reduced_hash].srchost].srchost_bulk_flow_count--;
-			if (allocate_dst)
-				q->hosts[q->flows[reduced_hash].dsthost].dsthost_bulk_flow_count--;
+			cake_dec_srchost_bulk_flow_count(q, &q->flows[outer_hash + k], flow_mode);
+			cake_dec_dsthost_bulk_flow_count(q, &q->flows[outer_hash + k], flow_mode);
 		}
 found:
 		/* reserve queue for future packets in same flow */
@@ -801,9 +856,10 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
 			q->hosts[outer_hash + k].srchost_tag = srchost_hash;
 found_src:
 			srchost_idx = outer_hash + k;
-			if (q->flows[reduced_hash].set == CAKE_SET_BULK)
-				q->hosts[srchost_idx].srchost_bulk_flow_count++;
 			q->flows[reduced_hash].srchost = srchost_idx;
+
+			if (q->flows[reduced_hash].set == CAKE_SET_BULK)
+				cake_inc_srchost_bulk_flow_count(q, &q->flows[reduced_hash], flow_mode);
 		}
 
 		if (allocate_dst) {
@@ -824,9 +880,10 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
 			q->hosts[outer_hash + k].dsthost_tag = dsthost_hash;
 found_dst:
 			dsthost_idx = outer_hash + k;
-			if (q->flows[reduced_hash].set == CAKE_SET_BULK)
-				q->hosts[dsthost_idx].dsthost_bulk_flow_count++;
 			q->flows[reduced_hash].dsthost = dsthost_idx;
+
+			if (q->flows[reduced_hash].set == CAKE_SET_BULK)
+				cake_inc_dsthost_bulk_flow_count(q, &q->flows[reduced_hash], flow_mode);
 		}
 	}
 
@@ -1839,10 +1896,6 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 
 	/* flowchain */
 	if (!flow->set || flow->set == CAKE_SET_DECAYING) {
-		struct cake_host *srchost = &b->hosts[flow->srchost];
-		struct cake_host *dsthost = &b->hosts[flow->dsthost];
-		u16 host_load = 1;
-
 		if (!flow->set) {
 			list_add_tail(&flow->flowchain, &b->new_flows);
 		} else {
@@ -1852,18 +1905,8 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 		flow->set = CAKE_SET_SPARSE;
 		b->sparse_flow_count++;
 
-		if (cake_dsrc(q->flow_mode))
-			host_load = max(host_load, srchost->srchost_bulk_flow_count);
-
-		if (cake_ddst(q->flow_mode))
-			host_load = max(host_load, dsthost->dsthost_bulk_flow_count);
-
-		flow->deficit = (b->flow_quantum *
-				 quantum_div[host_load]) >> 16;
+		flow->deficit = cake_get_flow_quantum(b, flow, q->flow_mode);
 	} else if (flow->set == CAKE_SET_SPARSE_WAIT) {
-		struct cake_host *srchost = &b->hosts[flow->srchost];
-		struct cake_host *dsthost = &b->hosts[flow->dsthost];
-
 		/* this flow was empty, accounted as a sparse flow, but actually
 		 * in the bulk rotation.
 		 */
@@ -1871,12 +1914,8 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 		b->sparse_flow_count--;
 		b->bulk_flow_count++;
 
-		if (cake_dsrc(q->flow_mode))
-			srchost->srchost_bulk_flow_count++;
-
-		if (cake_ddst(q->flow_mode))
-			dsthost->dsthost_bulk_flow_count++;
-
+		cake_inc_srchost_bulk_flow_count(b, flow, q->flow_mode);
+		cake_inc_dsthost_bulk_flow_count(b, flow, q->flow_mode);
 	}
 
 	if (q->buffer_used > q->buffer_max_used)
@@ -1933,13 +1972,11 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
 {
 	struct cake_sched_data *q = qdisc_priv(sch);
 	struct cake_tin_data *b = &q->tins[q->cur_tin];
-	struct cake_host *srchost, *dsthost;
 	ktime_t now = ktime_get();
 	struct cake_flow *flow;
 	struct list_head *head;
 	bool first_flow = true;
 	struct sk_buff *skb;
-	u16 host_load;
 	u64 delay;
 	u32 len;
 
@@ -2039,11 +2076,6 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
 	q->cur_flow = flow - b->flows;
 	first_flow = false;
 
-	/* triple isolation (modified DRR++) */
-	srchost = &b->hosts[flow->srchost];
-	dsthost = &b->hosts[flow->dsthost];
-	host_load = 1;
-
 	/* flow isolation (DRR++) */
 	if (flow->deficit <= 0) {
 		/* Keep all flows with deficits out of the sparse and decaying
@@ -2055,11 +2087,8 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
 				b->sparse_flow_count--;
 				b->bulk_flow_count++;
 
-				if (cake_dsrc(q->flow_mode))
-					srchost->srchost_bulk_flow_count++;
-
-				if (cake_ddst(q->flow_mode))
-					dsthost->dsthost_bulk_flow_count++;
+				cake_inc_srchost_bulk_flow_count(b, flow, q->flow_mode);
+				cake_inc_dsthost_bulk_flow_count(b, flow, q->flow_mode);
 
 				flow->set = CAKE_SET_BULK;
 			} else {
@@ -2071,19 +2100,7 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
 			}
 		}
 
-		if (cake_dsrc(q->flow_mode))
-			host_load = max(host_load, srchost->srchost_bulk_flow_count);
-
-		if (cake_ddst(q->flow_mode))
-			host_load = max(host_load, dsthost->dsthost_bulk_flow_count);
-
-		WARN_ON(host_load > CAKE_QUEUES);
-
-		/* The get_random_u16() is a way to apply dithering to avoid
-		 * accumulating roundoff errors
-		 */
-		flow->deficit += (b->flow_quantum * quantum_div[host_load] +
-				  get_random_u16()) >> 16;
+		flow->deficit += cake_get_flow_quantum(b, flow, q->flow_mode);
 		list_move_tail(&flow->flowchain, &b->old_flows);
 
 		goto retry;
@@ -2107,11 +2124,8 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
 				if (flow->set == CAKE_SET_BULK) {
 					b->bulk_flow_count--;
 
-					if (cake_dsrc(q->flow_mode))
-						srchost->srchost_bulk_flow_count--;
-
-					if (cake_ddst(q->flow_mode))
-						dsthost->dsthost_bulk_flow_count--;
+					cake_dec_srchost_bulk_flow_count(b, flow, q->flow_mode);
+					cake_dec_dsthost_bulk_flow_count(b, flow, q->flow_mode);
 
 					b->decaying_flow_count++;
 				} else if (flow->set == CAKE_SET_SPARSE ||
@@ -2129,12 +2143,8 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
 				else if (flow->set == CAKE_SET_BULK) {
 					b->bulk_flow_count--;
 
-					if (cake_dsrc(q->flow_mode))
-						srchost->srchost_bulk_flow_count--;
-
-					if (cake_ddst(q->flow_mode))
-						dsthost->dsthost_bulk_flow_count--;
-
+					cake_dec_srchost_bulk_flow_count(b, flow, q->flow_mode);
+					cake_dec_dsthost_bulk_flow_count(b, flow, q->flow_mode);
 				} else
 					b->decaying_flow_count--;
 
-- 
2.47.1


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [Cake] [PATCH net v2] sched: sch_cake: add bounds checks to host bulk flow fairness counts
  2025-01-07 12:01 [Cake] [PATCH net v2] sched: sch_cake: add bounds checks to host bulk flow fairness counts Toke Høiland-Jørgensen
@ 2025-01-08 16:10 ` Dave Taht
  2025-01-09 12:11 ` Paolo Abeni
  2025-01-09 16:30 ` patchwork-bot+netdevbpf
  2 siblings, 0 replies; 9+ messages in thread
From: Dave Taht @ 2025-01-08 16:10 UTC (permalink / raw)
  To: Toke Høiland-Jørgensen
  Cc: Toke Høiland-Jørgensen, Jamal Hadi Salim, Cong Wang,
	Jiri Pirko, Paolo Abeni, netdev, cake, Eric Dumazet,
	Simon Horman, Jakub Kicinski, syzbot+f63600d288bfb7057424,
	David S. Miller

On Tue, Jan 7, 2025 at 4:01 AM Toke Høiland-Jørgensen via Cake
<cake@lists.bufferbloat.net> wrote:
>
> Even though we fixed a logic error in the commit cited below, syzbot
> still managed to trigger an underflow of the per-host bulk flow
> counters, leading to an out of bounds memory access.
>
> To avoid any such logic errors causing out of bounds memory accesses,
> this commit factors out all accesses to the per-host bulk flow counters
> to a series of helpers that perform bounds-checking before any
> increments and decrements. This also has the benefit of improving
> readability by moving the conditional checks for the flow mode into
> these helpers, instead of having them spread out throughout the
> code (which was the cause of the original logic error).
>
> v2:
> - Remove now-unused srchost and dsthost local variables in cake_dequeue()
>
> Fixes: 546ea84d07e3 ("sched: sch_cake: fix bulk flow accounting logic for host fairness")
> Reported-by: syzbot+f63600d288bfb7057424@syzkaller.appspotmail.com
> Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
> ---
>  net/sched/sch_cake.c | 140 +++++++++++++++++++++++--------------------
>  1 file changed, 75 insertions(+), 65 deletions(-)
>
> diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
> index 8d8b2db4653c..2c2e2a67f3b2 100644
> --- a/net/sched/sch_cake.c
> +++ b/net/sched/sch_cake.c
> @@ -627,6 +627,63 @@ static bool cake_ddst(int flow_mode)
>         return (flow_mode & CAKE_FLOW_DUAL_DST) == CAKE_FLOW_DUAL_DST;
>  }
>
> +static void cake_dec_srchost_bulk_flow_count(struct cake_tin_data *q,
> +                                            struct cake_flow *flow,
> +                                            int flow_mode)
> +{
> +       if (likely(cake_dsrc(flow_mode) &&
> +                  q->hosts[flow->srchost].srchost_bulk_flow_count))
> +               q->hosts[flow->srchost].srchost_bulk_flow_count--;
> +}
> +
> +static void cake_inc_srchost_bulk_flow_count(struct cake_tin_data *q,
> +                                            struct cake_flow *flow,
> +                                            int flow_mode)
> +{
> +       if (likely(cake_dsrc(flow_mode) &&
> +                  q->hosts[flow->srchost].srchost_bulk_flow_count < CAKE_QUEUES))
> +               q->hosts[flow->srchost].srchost_bulk_flow_count++;
> +}
> +
> +static void cake_dec_dsthost_bulk_flow_count(struct cake_tin_data *q,
> +                                            struct cake_flow *flow,
> +                                            int flow_mode)
> +{
> +       if (likely(cake_ddst(flow_mode) &&
> +                  q->hosts[flow->dsthost].dsthost_bulk_flow_count))
> +               q->hosts[flow->dsthost].dsthost_bulk_flow_count--;
> +}
> +
> +static void cake_inc_dsthost_bulk_flow_count(struct cake_tin_data *q,
> +                                            struct cake_flow *flow,
> +                                            int flow_mode)
> +{
> +       if (likely(cake_ddst(flow_mode) &&
> +                  q->hosts[flow->dsthost].dsthost_bulk_flow_count < CAKE_QUEUES))
> +               q->hosts[flow->dsthost].dsthost_bulk_flow_count++;
> +}
> +
> +static u16 cake_get_flow_quantum(struct cake_tin_data *q,
> +                                struct cake_flow *flow,
> +                                int flow_mode)
> +{
> +       u16 host_load = 1;
> +
> +       if (cake_dsrc(flow_mode))
> +               host_load = max(host_load,
> +                               q->hosts[flow->srchost].srchost_bulk_flow_count);
> +
> +       if (cake_ddst(flow_mode))
> +               host_load = max(host_load,
> +                               q->hosts[flow->dsthost].dsthost_bulk_flow_count);
> +
> +       /* The get_random_u16() is a way to apply dithering to avoid
> +        * accumulating roundoff errors
> +        */
> +       return (q->flow_quantum * quantum_div[host_load] +
> +               get_random_u16()) >> 16;
> +}
> +
>  static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
>                      int flow_mode, u16 flow_override, u16 host_override)
>  {
> @@ -773,10 +830,8 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
>                 allocate_dst = cake_ddst(flow_mode);
>
>                 if (q->flows[outer_hash + k].set == CAKE_SET_BULK) {
> -                       if (allocate_src)
> -                               q->hosts[q->flows[reduced_hash].srchost].srchost_bulk_flow_count--;
> -                       if (allocate_dst)
> -                               q->hosts[q->flows[reduced_hash].dsthost].dsthost_bulk_flow_count--;
> +                       cake_dec_srchost_bulk_flow_count(q, &q->flows[outer_hash + k], flow_mode);
> +                       cake_dec_dsthost_bulk_flow_count(q, &q->flows[outer_hash + k], flow_mode);
>                 }
>  found:
>                 /* reserve queue for future packets in same flow */
> @@ -801,9 +856,10 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
>                         q->hosts[outer_hash + k].srchost_tag = srchost_hash;
>  found_src:
>                         srchost_idx = outer_hash + k;
> -                       if (q->flows[reduced_hash].set == CAKE_SET_BULK)
> -                               q->hosts[srchost_idx].srchost_bulk_flow_count++;
>                         q->flows[reduced_hash].srchost = srchost_idx;
> +
> +                       if (q->flows[reduced_hash].set == CAKE_SET_BULK)
> +                               cake_inc_srchost_bulk_flow_count(q, &q->flows[reduced_hash], flow_mode);
>                 }
>
>                 if (allocate_dst) {
> @@ -824,9 +880,10 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
>                         q->hosts[outer_hash + k].dsthost_tag = dsthost_hash;
>  found_dst:
>                         dsthost_idx = outer_hash + k;
> -                       if (q->flows[reduced_hash].set == CAKE_SET_BULK)
> -                               q->hosts[dsthost_idx].dsthost_bulk_flow_count++;
>                         q->flows[reduced_hash].dsthost = dsthost_idx;
> +
> +                       if (q->flows[reduced_hash].set == CAKE_SET_BULK)
> +                               cake_inc_dsthost_bulk_flow_count(q, &q->flows[reduced_hash], flow_mode);
>                 }
>         }
>
> @@ -1839,10 +1896,6 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
>
>         /* flowchain */
>         if (!flow->set || flow->set == CAKE_SET_DECAYING) {
> -               struct cake_host *srchost = &b->hosts[flow->srchost];
> -               struct cake_host *dsthost = &b->hosts[flow->dsthost];
> -               u16 host_load = 1;
> -
>                 if (!flow->set) {
>                         list_add_tail(&flow->flowchain, &b->new_flows);
>                 } else {
> @@ -1852,18 +1905,8 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
>                 flow->set = CAKE_SET_SPARSE;
>                 b->sparse_flow_count++;
>
> -               if (cake_dsrc(q->flow_mode))
> -                       host_load = max(host_load, srchost->srchost_bulk_flow_count);
> -
> -               if (cake_ddst(q->flow_mode))
> -                       host_load = max(host_load, dsthost->dsthost_bulk_flow_count);
> -
> -               flow->deficit = (b->flow_quantum *
> -                                quantum_div[host_load]) >> 16;
> +               flow->deficit = cake_get_flow_quantum(b, flow, q->flow_mode);
>         } else if (flow->set == CAKE_SET_SPARSE_WAIT) {
> -               struct cake_host *srchost = &b->hosts[flow->srchost];
> -               struct cake_host *dsthost = &b->hosts[flow->dsthost];
> -
>                 /* this flow was empty, accounted as a sparse flow, but actually
>                  * in the bulk rotation.
>                  */
> @@ -1871,12 +1914,8 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
>                 b->sparse_flow_count--;
>                 b->bulk_flow_count++;
>
> -               if (cake_dsrc(q->flow_mode))
> -                       srchost->srchost_bulk_flow_count++;
> -
> -               if (cake_ddst(q->flow_mode))
> -                       dsthost->dsthost_bulk_flow_count++;
> -
> +               cake_inc_srchost_bulk_flow_count(b, flow, q->flow_mode);
> +               cake_inc_dsthost_bulk_flow_count(b, flow, q->flow_mode);
>         }
>
>         if (q->buffer_used > q->buffer_max_used)
> @@ -1933,13 +1972,11 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
>  {
>         struct cake_sched_data *q = qdisc_priv(sch);
>         struct cake_tin_data *b = &q->tins[q->cur_tin];
> -       struct cake_host *srchost, *dsthost;
>         ktime_t now = ktime_get();
>         struct cake_flow *flow;
>         struct list_head *head;
>         bool first_flow = true;
>         struct sk_buff *skb;
> -       u16 host_load;
>         u64 delay;
>         u32 len;
>
> @@ -2039,11 +2076,6 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
>         q->cur_flow = flow - b->flows;
>         first_flow = false;
>
> -       /* triple isolation (modified DRR++) */
> -       srchost = &b->hosts[flow->srchost];
> -       dsthost = &b->hosts[flow->dsthost];
> -       host_load = 1;
> -
>         /* flow isolation (DRR++) */
>         if (flow->deficit <= 0) {
>                 /* Keep all flows with deficits out of the sparse and decaying
> @@ -2055,11 +2087,8 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
>                                 b->sparse_flow_count--;
>                                 b->bulk_flow_count++;
>
> -                               if (cake_dsrc(q->flow_mode))
> -                                       srchost->srchost_bulk_flow_count++;
> -
> -                               if (cake_ddst(q->flow_mode))
> -                                       dsthost->dsthost_bulk_flow_count++;
> +                               cake_inc_srchost_bulk_flow_count(b, flow, q->flow_mode);
> +                               cake_inc_dsthost_bulk_flow_count(b, flow, q->flow_mode);
>
>                                 flow->set = CAKE_SET_BULK;
>                         } else {
> @@ -2071,19 +2100,7 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
>                         }
>                 }
>
> -               if (cake_dsrc(q->flow_mode))
> -                       host_load = max(host_load, srchost->srchost_bulk_flow_count);
> -
> -               if (cake_ddst(q->flow_mode))
> -                       host_load = max(host_load, dsthost->dsthost_bulk_flow_count);
> -
> -               WARN_ON(host_load > CAKE_QUEUES);
> -
> -               /* The get_random_u16() is a way to apply dithering to avoid
> -                * accumulating roundoff errors
> -                */
> -               flow->deficit += (b->flow_quantum * quantum_div[host_load] +
> -                                 get_random_u16()) >> 16;
> +               flow->deficit += cake_get_flow_quantum(b, flow, q->flow_mode);
>                 list_move_tail(&flow->flowchain, &b->old_flows);
>
>                 goto retry;
> @@ -2107,11 +2124,8 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
>                                 if (flow->set == CAKE_SET_BULK) {
>                                         b->bulk_flow_count--;
>
> -                                       if (cake_dsrc(q->flow_mode))
> -                                               srchost->srchost_bulk_flow_count--;
> -
> -                                       if (cake_ddst(q->flow_mode))
> -                                               dsthost->dsthost_bulk_flow_count--;
> +                                       cake_dec_srchost_bulk_flow_count(b, flow, q->flow_mode);
> +                                       cake_dec_dsthost_bulk_flow_count(b, flow, q->flow_mode);
>
>                                         b->decaying_flow_count++;
>                                 } else if (flow->set == CAKE_SET_SPARSE ||
> @@ -2129,12 +2143,8 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
>                                 else if (flow->set == CAKE_SET_BULK) {
>                                         b->bulk_flow_count--;
>
> -                                       if (cake_dsrc(q->flow_mode))
> -                                               srchost->srchost_bulk_flow_count--;
> -
> -                                       if (cake_ddst(q->flow_mode))
> -                                               dsthost->dsthost_bulk_flow_count--;
> -
> +                                       cake_dec_srchost_bulk_flow_count(b, flow, q->flow_mode);
> +                                       cake_dec_dsthost_bulk_flow_count(b, flow, q->flow_mode);
>                                 } else
>                                         b->decaying_flow_count--;
>
> --
> 2.47.1
>

Acked-By: Dave Taht <dave.taht@gmail.com>



-- 
Dave Täht CSO, LibreQos

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [Cake] [PATCH net v2] sched: sch_cake: add bounds checks to host bulk flow fairness counts
  2025-01-07 12:01 [Cake] [PATCH net v2] sched: sch_cake: add bounds checks to host bulk flow fairness counts Toke Høiland-Jørgensen
  2025-01-08 16:10 ` Dave Taht
@ 2025-01-09 12:11 ` Paolo Abeni
  2025-01-09 12:47   ` Toke Høiland-Jørgensen
  2025-01-09 16:30 ` patchwork-bot+netdevbpf
  2 siblings, 1 reply; 9+ messages in thread
From: Paolo Abeni @ 2025-01-09 12:11 UTC (permalink / raw)
  To: Toke Høiland-Jørgensen,
	Toke Høiland-Jørgensen, Jamal Hadi Salim, Cong Wang,
	Jiri Pirko
  Cc: syzbot+f63600d288bfb7057424, David S. Miller, Eric Dumazet,
	Jakub Kicinski, Simon Horman, cake, netdev

On 1/7/25 1:01 PM, Toke Høiland-Jørgensen wrote:
> Even though we fixed a logic error in the commit cited below, syzbot
> still managed to trigger an underflow of the per-host bulk flow
> counters, leading to an out of bounds memory access.
> 
> To avoid any such logic errors causing out of bounds memory accesses,
> this commit factors out all accesses to the per-host bulk flow counters
> to a series of helpers that perform bounds-checking before any
> increments and decrements. This also has the benefit of improving
> readability by moving the conditional checks for the flow mode into
> these helpers, instead of having them spread out throughout the
> code (which was the cause of the original logic error).
> 
> v2:
> - Remove now-unused srchost and dsthost local variables in cake_dequeue()

Small nit: the changelog should come after the '---' separator. No need
to repost just for this.

> Fixes: 546ea84d07e3 ("sched: sch_cake: fix bulk flow accounting logic for host fairness")
> Reported-by: syzbot+f63600d288bfb7057424@syzkaller.appspotmail.com
> Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
> ---
>  net/sched/sch_cake.c | 140 +++++++++++++++++++++++--------------------
>  1 file changed, 75 insertions(+), 65 deletions(-)
> 
> diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
> index 8d8b2db4653c..2c2e2a67f3b2 100644
> --- a/net/sched/sch_cake.c
> +++ b/net/sched/sch_cake.c
> @@ -627,6 +627,63 @@ static bool cake_ddst(int flow_mode)
>  	return (flow_mode & CAKE_FLOW_DUAL_DST) == CAKE_FLOW_DUAL_DST;
>  }
>  
> +static void cake_dec_srchost_bulk_flow_count(struct cake_tin_data *q,
> +					     struct cake_flow *flow,
> +					     int flow_mode)
> +{
> +	if (likely(cake_dsrc(flow_mode) &&
> +		   q->hosts[flow->srchost].srchost_bulk_flow_count))
> +		q->hosts[flow->srchost].srchost_bulk_flow_count--;
> +}
> +
> +static void cake_inc_srchost_bulk_flow_count(struct cake_tin_data *q,
> +					     struct cake_flow *flow,
> +					     int flow_mode)
> +{
> +	if (likely(cake_dsrc(flow_mode) &&
> +		   q->hosts[flow->srchost].srchost_bulk_flow_count < CAKE_QUEUES))
> +		q->hosts[flow->srchost].srchost_bulk_flow_count++;
> +}
> +
> +static void cake_dec_dsthost_bulk_flow_count(struct cake_tin_data *q,
> +					     struct cake_flow *flow,
> +					     int flow_mode)
> +{
> +	if (likely(cake_ddst(flow_mode) &&
> +		   q->hosts[flow->dsthost].dsthost_bulk_flow_count))
> +		q->hosts[flow->dsthost].dsthost_bulk_flow_count--;
> +}
> +
> +static void cake_inc_dsthost_bulk_flow_count(struct cake_tin_data *q,
> +					     struct cake_flow *flow,
> +					     int flow_mode)
> +{
> +	if (likely(cake_ddst(flow_mode) &&
> +		   q->hosts[flow->dsthost].dsthost_bulk_flow_count < CAKE_QUEUES))
> +		q->hosts[flow->dsthost].dsthost_bulk_flow_count++;
> +}
> +
> +static u16 cake_get_flow_quantum(struct cake_tin_data *q,
> +				 struct cake_flow *flow,
> +				 int flow_mode)
> +{
> +	u16 host_load = 1;
> +
> +	if (cake_dsrc(flow_mode))
> +		host_load = max(host_load,
> +				q->hosts[flow->srchost].srchost_bulk_flow_count);
> +
> +	if (cake_ddst(flow_mode))
> +		host_load = max(host_load,
> +				q->hosts[flow->dsthost].dsthost_bulk_flow_count);
> +
> +	/* The get_random_u16() is a way to apply dithering to avoid
> +	 * accumulating roundoff errors
> +	 */
> +	return (q->flow_quantum * quantum_div[host_load] +
> +		get_random_u16()) >> 16;

dithering is now applied on both enqueue and dequeue, while prior to
this patch it only happened on dequeue. Is that intentional? can't lead
to (small) flow_deficit increase?

Thanks!

Paolo


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [Cake] [PATCH net v2] sched: sch_cake: add bounds checks to host bulk flow fairness counts
  2025-01-09 12:11 ` Paolo Abeni
@ 2025-01-09 12:47   ` Toke Høiland-Jørgensen
  2025-01-09 15:06     ` Paolo Abeni
  0 siblings, 1 reply; 9+ messages in thread
From: Toke Høiland-Jørgensen @ 2025-01-09 12:47 UTC (permalink / raw)
  To: Paolo Abeni, Jamal Hadi Salim, Cong Wang, Jiri Pirko
  Cc: syzbot+f63600d288bfb7057424, David S. Miller, Eric Dumazet,
	Jakub Kicinski, Simon Horman, cake, netdev

Paolo Abeni <pabeni@redhat.com> writes:

> On 1/7/25 1:01 PM, Toke Høiland-Jørgensen wrote:
>> Even though we fixed a logic error in the commit cited below, syzbot
>> still managed to trigger an underflow of the per-host bulk flow
>> counters, leading to an out of bounds memory access.
>> 
>> To avoid any such logic errors causing out of bounds memory accesses,
>> this commit factors out all accesses to the per-host bulk flow counters
>> to a series of helpers that perform bounds-checking before any
>> increments and decrements. This also has the benefit of improving
>> readability by moving the conditional checks for the flow mode into
>> these helpers, instead of having them spread out throughout the
>> code (which was the cause of the original logic error).
>> 
>> v2:
>> - Remove now-unused srchost and dsthost local variables in cake_dequeue()
>
> Small nit: the changelog should come after the '---' separator. No need
> to repost just for this.

Oh, I was under the impression that we wanted them preserved in the git
log (and hence above the ---). Is that not the case (anymore?)?

>> Fixes: 546ea84d07e3 ("sched: sch_cake: fix bulk flow accounting logic for host fairness")
>> Reported-by: syzbot+f63600d288bfb7057424@syzkaller.appspotmail.com
>> Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
>> ---
>>  net/sched/sch_cake.c | 140 +++++++++++++++++++++++--------------------
>>  1 file changed, 75 insertions(+), 65 deletions(-)
>> 
>> diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
>> index 8d8b2db4653c..2c2e2a67f3b2 100644
>> --- a/net/sched/sch_cake.c
>> +++ b/net/sched/sch_cake.c
>> @@ -627,6 +627,63 @@ static bool cake_ddst(int flow_mode)
>>  	return (flow_mode & CAKE_FLOW_DUAL_DST) == CAKE_FLOW_DUAL_DST;
>>  }
>>  
>> +static void cake_dec_srchost_bulk_flow_count(struct cake_tin_data *q,
>> +					     struct cake_flow *flow,
>> +					     int flow_mode)
>> +{
>> +	if (likely(cake_dsrc(flow_mode) &&
>> +		   q->hosts[flow->srchost].srchost_bulk_flow_count))
>> +		q->hosts[flow->srchost].srchost_bulk_flow_count--;
>> +}
>> +
>> +static void cake_inc_srchost_bulk_flow_count(struct cake_tin_data *q,
>> +					     struct cake_flow *flow,
>> +					     int flow_mode)
>> +{
>> +	if (likely(cake_dsrc(flow_mode) &&
>> +		   q->hosts[flow->srchost].srchost_bulk_flow_count < CAKE_QUEUES))
>> +		q->hosts[flow->srchost].srchost_bulk_flow_count++;
>> +}
>> +
>> +static void cake_dec_dsthost_bulk_flow_count(struct cake_tin_data *q,
>> +					     struct cake_flow *flow,
>> +					     int flow_mode)
>> +{
>> +	if (likely(cake_ddst(flow_mode) &&
>> +		   q->hosts[flow->dsthost].dsthost_bulk_flow_count))
>> +		q->hosts[flow->dsthost].dsthost_bulk_flow_count--;
>> +}
>> +
>> +static void cake_inc_dsthost_bulk_flow_count(struct cake_tin_data *q,
>> +					     struct cake_flow *flow,
>> +					     int flow_mode)
>> +{
>> +	if (likely(cake_ddst(flow_mode) &&
>> +		   q->hosts[flow->dsthost].dsthost_bulk_flow_count < CAKE_QUEUES))
>> +		q->hosts[flow->dsthost].dsthost_bulk_flow_count++;
>> +}
>> +
>> +static u16 cake_get_flow_quantum(struct cake_tin_data *q,
>> +				 struct cake_flow *flow,
>> +				 int flow_mode)
>> +{
>> +	u16 host_load = 1;
>> +
>> +	if (cake_dsrc(flow_mode))
>> +		host_load = max(host_load,
>> +				q->hosts[flow->srchost].srchost_bulk_flow_count);
>> +
>> +	if (cake_ddst(flow_mode))
>> +		host_load = max(host_load,
>> +				q->hosts[flow->dsthost].dsthost_bulk_flow_count);
>> +
>> +	/* The get_random_u16() is a way to apply dithering to avoid
>> +	 * accumulating roundoff errors
>> +	 */
>> +	return (q->flow_quantum * quantum_div[host_load] +
>> +		get_random_u16()) >> 16;
>
> dithering is now applied on both enqueue and dequeue, while prior to
> this patch it only happened on dequeue. Is that intentional? can't lead
> to (small) flow_deficit increase?

Yeah, that was deliberate. The flow quantum is only set on enqueue when
the flow is first initialised as a sparse flow, not for every packet.
The only user-visible effect I can see this having is that the maximum
packet size that can be sent while a flow stays sparse will now vary
with +/- one byte in some cases. I am pretty sure this won't have any
consequence in practice, and I don't think it's worth complicating the
code (with a 'dither' argument to cake_flow_get_quantum(), say) to
preserve the old behaviour.

I guess I should have mentioned in the commit message that this was
deliberate. Since it seems you'll be editing that anyway (cf the above),
how about adding a paragraph like:

 As part of this change, the flow quantum calculation is consolidated
 into a helper function, which means that the dithering applied to the
 host load scaling is now applied both in the DRR rotation and when a
 sparse flow's quantum is first initiated. The only user-visible effect
 of this is that the maximum packet size that can be sent while a flow
 stays sparse will now vary with +/- one byte in some cases. This should
 not make a noticeable difference in practice, and thus it's not worth
 complicating the code to preserve the old behaviour.

-Toke

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [Cake] [PATCH net v2] sched: sch_cake: add bounds checks to host bulk flow fairness counts
  2025-01-09 12:47   ` Toke Høiland-Jørgensen
@ 2025-01-09 15:06     ` Paolo Abeni
  2025-01-09 16:08       ` Toke Høiland-Jørgensen
  0 siblings, 1 reply; 9+ messages in thread
From: Paolo Abeni @ 2025-01-09 15:06 UTC (permalink / raw)
  To: Toke Høiland-Jørgensen, Jamal Hadi Salim, Cong Wang,
	Jiri Pirko
  Cc: syzbot+f63600d288bfb7057424, David S. Miller, Eric Dumazet,
	Jakub Kicinski, Simon Horman, cake, netdev

On 1/9/25 1:47 PM, Toke Høiland-Jørgensen wrote:
> Paolo Abeni <pabeni@redhat.com> writes:
>> On 1/7/25 1:01 PM, Toke Høiland-Jørgensen wrote:
>>> Even though we fixed a logic error in the commit cited below, syzbot
>>> still managed to trigger an underflow of the per-host bulk flow
>>> counters, leading to an out of bounds memory access.
>>>
>>> To avoid any such logic errors causing out of bounds memory accesses,
>>> this commit factors out all accesses to the per-host bulk flow counters
>>> to a series of helpers that perform bounds-checking before any
>>> increments and decrements. This also has the benefit of improving
>>> readability by moving the conditional checks for the flow mode into
>>> these helpers, instead of having them spread out throughout the
>>> code (which was the cause of the original logic error).
>>>
>>> v2:
>>> - Remove now-unused srchost and dsthost local variables in cake_dequeue()
>>
>> Small nit: the changelog should come after the '---' separator. No need
>> to repost just for this.
> 
> Oh, I was under the impression that we wanted them preserved in the git
> log (and hence above the ---). Is that not the case (anymore?)?

It was some time ago. Is this way since a while:

https://elixir.bootlin.com/linux/v6.13-rc3/source/Documentation/process/maintainer-netdev.rst#L229

[...]
>> dithering is now applied on both enqueue and dequeue, while prior to
>> this patch it only happened on dequeue. Is that intentional? can't lead
>> to (small) flow_deficit increase?
> 
> Yeah, that was deliberate. The flow quantum is only set on enqueue when
> the flow is first initialised as a sparse flow, not for every packet.
> The only user-visible effect I can see this having is that the maximum
> packet size that can be sent while a flow stays sparse will now vary
> with +/- one byte in some cases. I am pretty sure this won't have any
> consequence in practice, and I don't think it's worth complicating the
> code (with a 'dither' argument to cake_flow_get_quantum(), say) to
> preserve the old behaviour.

Understood, and fine by me.

> I guess I should have mentioned in the commit message that this was
> deliberate. Since it seems you'll be editing that anyway (cf the above),
> how about adding a paragraph like:
> 
>  As part of this change, the flow quantum calculation is consolidated
>  into a helper function, which means that the dithering applied to the
>  host load scaling is now applied both in the DRR rotation and when a
>  sparse flow's quantum is first initiated. The only user-visible effect
>  of this is that the maximum packet size that can be sent while a flow
>  stays sparse will now vary with +/- one byte in some cases. This should
>  not make a noticeable difference in practice, and thus it's not worth
>  complicating the code to preserve the old behaviour.

It's in Jakub's hands now, possibly he could prefer a repost to reduce
the maintainer's overhead.

Thanks,

Paolo


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [Cake] [PATCH net v2] sched: sch_cake: add bounds checks to host bulk flow fairness counts
  2025-01-09 15:06     ` Paolo Abeni
@ 2025-01-09 16:08       ` Toke Høiland-Jørgensen
  2025-01-09 16:18         ` Jakub Kicinski
  0 siblings, 1 reply; 9+ messages in thread
From: Toke Høiland-Jørgensen @ 2025-01-09 16:08 UTC (permalink / raw)
  To: Paolo Abeni, Jamal Hadi Salim, Cong Wang, Jiri Pirko
  Cc: syzbot+f63600d288bfb7057424, David S. Miller, Eric Dumazet,
	Jakub Kicinski, Simon Horman, cake, netdev

Paolo Abeni <pabeni@redhat.com> writes:

> On 1/9/25 1:47 PM, Toke Høiland-Jørgensen wrote:
>> Paolo Abeni <pabeni@redhat.com> writes:
>>> On 1/7/25 1:01 PM, Toke Høiland-Jørgensen wrote:
>>>> Even though we fixed a logic error in the commit cited below, syzbot
>>>> still managed to trigger an underflow of the per-host bulk flow
>>>> counters, leading to an out of bounds memory access.
>>>>
>>>> To avoid any such logic errors causing out of bounds memory accesses,
>>>> this commit factors out all accesses to the per-host bulk flow counters
>>>> to a series of helpers that perform bounds-checking before any
>>>> increments and decrements. This also has the benefit of improving
>>>> readability by moving the conditional checks for the flow mode into
>>>> these helpers, instead of having them spread out throughout the
>>>> code (which was the cause of the original logic error).
>>>>
>>>> v2:
>>>> - Remove now-unused srchost and dsthost local variables in cake_dequeue()
>>>
>>> Small nit: the changelog should come after the '---' separator. No need
>>> to repost just for this.
>> 
>> Oh, I was under the impression that we wanted them preserved in the git
>> log (and hence above the ---). Is that not the case (anymore?)?
>
> It was some time ago. Is this way since a while:
>
> https://elixir.bootlin.com/linux/v6.13-rc3/source/Documentation/process/maintainer-netdev.rst#L229

Huh, whaddyaknow. Thanks for the pointer.

> [...]
>>> dithering is now applied on both enqueue and dequeue, while prior to
>>> this patch it only happened on dequeue. Is that intentional? can't lead
>>> to (small) flow_deficit increase?
>> 
>> Yeah, that was deliberate. The flow quantum is only set on enqueue when
>> the flow is first initialised as a sparse flow, not for every packet.
>> The only user-visible effect I can see this having is that the maximum
>> packet size that can be sent while a flow stays sparse will now vary
>> with +/- one byte in some cases. I am pretty sure this won't have any
>> consequence in practice, and I don't think it's worth complicating the
>> code (with a 'dither' argument to cake_flow_get_quantum(), say) to
>> preserve the old behaviour.
>
> Understood, and fine by me.
>
>> I guess I should have mentioned in the commit message that this was
>> deliberate. Since it seems you'll be editing that anyway (cf the above),
>> how about adding a paragraph like:
>> 
>>  As part of this change, the flow quantum calculation is consolidated
>>  into a helper function, which means that the dithering applied to the
>>  host load scaling is now applied both in the DRR rotation and when a
>>  sparse flow's quantum is first initiated. The only user-visible effect
>>  of this is that the maximum packet size that can be sent while a flow
>>  stays sparse will now vary with +/- one byte in some cases. This should
>>  not make a noticeable difference in practice, and thus it's not worth
>>  complicating the code to preserve the old behaviour.
>
> It's in Jakub's hands now, possibly he could prefer a repost to reduce
> the maintainer's overhead.

Alright, sure, I'll respin :)

-Toke

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [Cake] [PATCH net v2] sched: sch_cake: add bounds checks to host bulk flow fairness counts
  2025-01-09 16:08       ` Toke Høiland-Jørgensen
@ 2025-01-09 16:18         ` Jakub Kicinski
  2025-01-09 16:35           ` Toke Høiland-Jørgensen
  0 siblings, 1 reply; 9+ messages in thread
From: Jakub Kicinski @ 2025-01-09 16:18 UTC (permalink / raw)
  To: Toke Høiland-Jørgensen
  Cc: Paolo Abeni, Jamal Hadi Salim, Cong Wang, Jiri Pirko,
	syzbot+f63600d288bfb7057424, David S. Miller, Eric Dumazet,
	Simon Horman, cake, netdev

On Thu, 09 Jan 2025 17:08:14 +0100 Toke Høiland-Jørgensen wrote:
> >> I guess I should have mentioned in the commit message that this was
> >> deliberate. Since it seems you'll be editing that anyway (cf the above),
> >> how about adding a paragraph like:
> >> 
> >>  As part of this change, the flow quantum calculation is consolidated
> >>  into a helper function, which means that the dithering applied to the
> >>  host load scaling is now applied both in the DRR rotation and when a
> >>  sparse flow's quantum is first initiated. The only user-visible effect
> >>  of this is that the maximum packet size that can be sent while a flow
> >>  stays sparse will now vary with +/- one byte in some cases. This should
> >>  not make a noticeable difference in practice, and thus it's not worth
> >>  complicating the code to preserve the old behaviour.  
> >
> > It's in Jakub's hands now, possibly he could prefer a repost to reduce
> > the maintainer's overhead.  
> 
> Alright, sure, I'll respin :)

Hold on, I'll do it :)

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [Cake] [PATCH net v2] sched: sch_cake: add bounds checks to host bulk flow fairness counts
  2025-01-07 12:01 [Cake] [PATCH net v2] sched: sch_cake: add bounds checks to host bulk flow fairness counts Toke Høiland-Jørgensen
  2025-01-08 16:10 ` Dave Taht
  2025-01-09 12:11 ` Paolo Abeni
@ 2025-01-09 16:30 ` patchwork-bot+netdevbpf
  2 siblings, 0 replies; 9+ messages in thread
From: patchwork-bot+netdevbpf @ 2025-01-09 16:30 UTC (permalink / raw)
  To: =?utf-8?b?VG9rZSBIw7hpbGFuZC1Kw7hyZ2Vuc2VuIDx0b2tlQHJlZGhhdC5jb20+?=
  Cc: toke, jhs, xiyou.wangcong, jiri, pabeni,
	syzbot+f63600d288bfb7057424, davem, edumazet, kuba, horms, cake,
	netdev

Hello:

This patch was applied to netdev/net.git (main)
by Jakub Kicinski <kuba@kernel.org>:

On Tue,  7 Jan 2025 13:01:05 +0100 you wrote:
> Even though we fixed a logic error in the commit cited below, syzbot
> still managed to trigger an underflow of the per-host bulk flow
> counters, leading to an out of bounds memory access.
> 
> To avoid any such logic errors causing out of bounds memory accesses,
> this commit factors out all accesses to the per-host bulk flow counters
> to a series of helpers that perform bounds-checking before any
> increments and decrements. This also has the benefit of improving
> readability by moving the conditional checks for the flow mode into
> these helpers, instead of having them spread out throughout the
> code (which was the cause of the original logic error).
> 
> [...]

Here is the summary with links:
  - [net,v2] sched: sch_cake: add bounds checks to host bulk flow fairness counts
    https://git.kernel.org/netdev/net/c/737d4d91d35b

You are awesome, thank you!
-- 
Deet-doot-dot, I am a bot.
https://korg.docs.kernel.org/patchwork/pwbot.html



^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [Cake] [PATCH net v2] sched: sch_cake: add bounds checks to host bulk flow fairness counts
  2025-01-09 16:18         ` Jakub Kicinski
@ 2025-01-09 16:35           ` Toke Høiland-Jørgensen
  0 siblings, 0 replies; 9+ messages in thread
From: Toke Høiland-Jørgensen @ 2025-01-09 16:35 UTC (permalink / raw)
  To: Jakub Kicinski
  Cc: Paolo Abeni, Jamal Hadi Salim, Cong Wang, Jiri Pirko,
	syzbot+f63600d288bfb7057424, David S. Miller, Eric Dumazet,
	Simon Horman, cake, netdev

Jakub Kicinski <kuba@kernel.org> writes:

> On Thu, 09 Jan 2025 17:08:14 +0100 Toke Høiland-Jørgensen wrote:
>> >> I guess I should have mentioned in the commit message that this was
>> >> deliberate. Since it seems you'll be editing that anyway (cf the above),
>> >> how about adding a paragraph like:
>> >> 
>> >>  As part of this change, the flow quantum calculation is consolidated
>> >>  into a helper function, which means that the dithering applied to the
>> >>  host load scaling is now applied both in the DRR rotation and when a
>> >>  sparse flow's quantum is first initiated. The only user-visible effect
>> >>  of this is that the maximum packet size that can be sent while a flow
>> >>  stays sparse will now vary with +/- one byte in some cases. This should
>> >>  not make a noticeable difference in practice, and thus it's not worth
>> >>  complicating the code to preserve the old behaviour.  
>> >
>> > It's in Jakub's hands now, possibly he could prefer a repost to reduce
>> > the maintainer's overhead.  
>> 
>> Alright, sure, I'll respin :)
>
> Hold on, I'll do it :)

Crossed streams, but thanks! :)

-Toke

^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2025-01-09 16:35 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2025-01-07 12:01 [Cake] [PATCH net v2] sched: sch_cake: add bounds checks to host bulk flow fairness counts Toke Høiland-Jørgensen
2025-01-08 16:10 ` Dave Taht
2025-01-09 12:11 ` Paolo Abeni
2025-01-09 12:47   ` Toke Høiland-Jørgensen
2025-01-09 15:06     ` Paolo Abeni
2025-01-09 16:08       ` Toke Høiland-Jørgensen
2025-01-09 16:18         ` Jakub Kicinski
2025-01-09 16:35           ` Toke Høiland-Jørgensen
2025-01-09 16:30 ` patchwork-bot+netdevbpf

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox