[Cake] dual-src/dsthost unfairness, only with bi-directional traffic

Georgios Amanakis gamanakis at gmail.com
Tue Jan 15 17:42:41 EST 2019


The patch I previously sent had the host_load manipulated only when
dual-dsthost is sent, since that was what I was primarily testing. For
dual-srchost to behave the same way line 2107 has to be changed, too. Will
resubmit in case anybody wants to test, later today.

On Tue, Jan 15, 2019, 2:22 PM George Amanakis <gamanakis at gmail.com wrote:

>
> I think what is happening here is that if a client has flows such as "a
> (bulk upload)" and "b (bulk download)", the incoming ACKs of flow "a"
> compete with the incoming bulk traffic on flow "b". With compete I mean
> in terms of flow selection.
>
> So if we adjust the host_load to be the same with the bulk_flow_count of
> *each* host, the problem seems to be resolved.
> I drafted a patch below.
>
> Pete's setup, tested with the patch (ingress in mbit/s):
> IP1: 8down  49.18mbit/s
> IP1: 1up    46.73mbit/s
> IP2: 1down  47.39mbit/s
> IP2: 8up    49.21mbit/s
>
>
> ---
>  sch_cake.c | 34 ++++++++++++++++++++++++++++------
>  1 file changed, 28 insertions(+), 6 deletions(-)
>
> diff --git a/sch_cake.c b/sch_cake.c
> index d434ae0..5c0f0e1 100644
> --- a/sch_cake.c
> +++ b/sch_cake.c
> @@ -148,6 +148,7 @@ struct cake_host {
>         u32 dsthost_tag;
>         u16 srchost_refcnt;
>         u16 dsthost_refcnt;
> +       u16 bulk_flow_count;
>  };
>
>  struct cake_heap_entry {
> @@ -1897,10 +1898,10 @@ static s32 cake_enqueue(struct sk_buff *skb,
> struct Qdisc *sch,
>                 q->last_packet_time = now;
>         }
>
> +       struct cake_host *srchost = &b->hosts[flow->srchost];
> +       struct cake_host *dsthost = &b->hosts[flow->dsthost];
>         /* flowchain */
>         if (!flow->set || flow->set == CAKE_SET_DECAYING) {
> -               struct cake_host *srchost = &b->hosts[flow->srchost];
> -               struct cake_host *dsthost = &b->hosts[flow->dsthost];
>                 u16 host_load = 1;
>
>                 if (!flow->set) {
> @@ -1927,6 +1928,11 @@ static s32 cake_enqueue(struct sk_buff *skb, struct
> Qdisc *sch,
>                 flow->set = CAKE_SET_BULK;
>                 b->sparse_flow_count--;
>                 b->bulk_flow_count++;
> +               if (cake_dsrc(q->flow_mode))
> +                       srchost->bulk_flow_count++;
> +
> +               if (cake_ddst(q->flow_mode))
> +                       dsthost->bulk_flow_count++;
>         }
>
>         if (q->buffer_used > q->buffer_max_used)
> @@ -2101,7 +2107,7 @@ retry:
>                 host_load = max(host_load, srchost->srchost_refcnt);
>
>         if (cake_ddst(q->flow_mode))
> -               host_load = max(host_load, dsthost->dsthost_refcnt);
> +               host_load = max(host_load, dsthost->bulk_flow_count);
>
>         WARN_ON(host_load > CAKE_QUEUES);
>
> @@ -2110,8 +2116,6 @@ retry:
>                 /* The shifted prandom_u32() is a way to apply dithering to
>                  * avoid accumulating roundoff errors
>                  */
> -               flow->deficit += (b->flow_quantum * quantum_div[host_load]
> +
> -                                 (prandom_u32() >> 16)) >> 16;
>                 list_move_tail(&flow->flowchain, &b->old_flows);
>
>                 /* Keep all flows with deficits out of the sparse and
> decaying
> @@ -2122,6 +2126,11 @@ retry:
>                         if (flow->head) {
>                                 b->sparse_flow_count--;
>                                 b->bulk_flow_count++;
> +                               if (cake_dsrc(q->flow_mode))
> +                                       srchost->bulk_flow_count++;
> +
> +                               if (cake_ddst(q->flow_mode))
> +                                       dsthost->bulk_flow_count++;
>                                 flow->set = CAKE_SET_BULK;
>                         } else {
>                                 /* we've moved it to the bulk rotation for
> @@ -2131,6 +2140,8 @@ retry:
>                                 flow->set = CAKE_SET_SPARSE_WAIT;
>                         }
>                 }
> +               flow->deficit += (b->flow_quantum * quantum_div[host_load]
> +
> +                                 (prandom_u32() >> 16)) >> 16;
>                 goto retry;
>         }
>
> @@ -2151,6 +2162,11 @@ retry:
>                                                &b->decaying_flows);
>                                 if (flow->set == CAKE_SET_BULK) {
>                                         b->bulk_flow_count--;
> +                                       if (cake_dsrc(q->flow_mode))
> +                                               srchost->bulk_flow_count--;
> +
> +                                       if (cake_ddst(q->flow_mode))
> +                                               dsthost->bulk_flow_count--;
>                                         b->decaying_flow_count++;
>                                 } else if (flow->set == CAKE_SET_SPARSE ||
>                                            flow->set ==
> CAKE_SET_SPARSE_WAIT) {
> @@ -2164,8 +2180,14 @@ retry:
>                                 if (flow->set == CAKE_SET_SPARSE ||
>                                     flow->set == CAKE_SET_SPARSE_WAIT)
>                                         b->sparse_flow_count--;
> -                               else if (flow->set == CAKE_SET_BULK)
> +                               else if (flow->set == CAKE_SET_BULK) {
>                                         b->bulk_flow_count--;
> +                                       if (cake_dsrc(q->flow_mode))
> +                                               srchost->bulk_flow_count--;
> +
> +                                       if (cake_ddst(q->flow_mode))
> +                                               dsthost->bulk_flow_count--;
> +                               }
>                                 else
>                                         b->decaying_flow_count--;
>
> --
> 2.20.1
>
>
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <https://lists.bufferbloat.net/pipermail/cake/attachments/20190115/160dc8c9/attachment.html>


More information about the Cake mailing list