[Cake] total download rate with many flows

George Amanakis g_amanakis at yahoo.com
Mon Nov 13 20:53:57 EST 2017


I meant proportionally to (1-1/sqrt(x)).


On 11/13/2017 8:51 PM, George Amanakis wrote:
> I am exploring this idea further. If q->time_next_packet is 
> incremented for dropped packets proportionally to (1-1/x), where x is 
> the count of all flows in the tin that is being served, ingress mode 
> works much more smoothly: latency is still <50ms and throughput is 
> very near to the set limit.
>
> I *tried* to make a patch from latest cobalt.
>
> =============8<=============
> diff --git a/sch_cake.c b/sch_cake.c
> index 82f264f..752783a 100644
> --- a/sch_cake.c
> +++ b/sch_cake.c
> @@ -145,6 +145,7 @@ struct cake_flow {
>         struct list_head  flowchain;
>         s32               deficit;
>         struct cobalt_vars cvars;
> +       struct cobalt_vars cvars2;
>         u16               srchost; /* index into cake_host table */
>         u16               dsthost;
>         u8                set;
> @@ -254,6 +255,7 @@ struct cake_sched_data {
>         u32             avg_window_bytes;
>         u32             avg_peak_bandwidth;
>         u64             last_reconfig_time;
> +       u32             drop_len;
>  };
>
>  enum {
> @@ -820,7 +822,7 @@ static unsigned int cake_drop(struct Qdisc *sch, 
> struct sk_buff **to_free)
>         sch->qstats.drops++;
>
>         if(q->rate_flags & CAKE_FLAG_INGRESS)
> -               cake_advance_shaper(q, b, cake_overhead(q, len), now);
> +               q->drop_len += len;
>
>  #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)
>         kfree_skb(skb);
> @@ -1274,7 +1276,9 @@ retry:
>                 /* drop this packet, get another one */
>                 if(q->rate_flags & CAKE_FLAG_INGRESS) {
>                         len = cake_overhead(q, qdisc_pkt_len(skb));
> -                       cake_advance_shaper(q, b, len, now);
> +                       flow->cvars2.count = 
> b->bulk_flow_count+b->sparse_flow_count+b->decaying_flow_count+b->unresponsive_flow_count;
> +                       cobalt_invsqrt(&(flow->cvars2));
> +                       q->drop_len += (len - reciprocal_scale(len, 
> flow->cvars2.rec_inv_sqrt));
>                         flow->deficit -= len;
>                         b->tin_deficit -= len;
>                 }
> @@ -1286,8 +1290,6 @@ retry:
>                 qdisc_qstats_drop(sch);
>                 kfree_skb(skb);
>  #endif
> -               if(q->rate_flags & CAKE_FLAG_INGRESS)
> -                       goto retry;
>         }
>
>         b->tin_ecn_mark += !!flow->cvars.ecn_marked;
> @@ -1340,7 +1342,7 @@ static void cake_advance_shaper(struct 
> cake_sched_data *q, struct cake_tin_data
>         if(q->rate_ns) {
>                 s64 tdiff1 = b->tin_time_next_packet - now;
>                 s64 tdiff2 = (len * (u64)b->tin_rate_ns) >> 
> b->tin_rate_shft;
> -               s64 tdiff3 = (len * (u64)q->rate_ns) >> q->rate_shft;
> +               s64 tdiff3 = ((q->drop_len + len) * (u64)q->rate_ns) 
> >> q->rate_shft;
>
>                 if(tdiff1 < 0)
>                         b->tin_time_next_packet += tdiff2;
> @@ -1348,6 +1350,7 @@ static void cake_advance_shaper(struct 
> cake_sched_data *q, struct cake_tin_data
>                         b->tin_time_next_packet = now + tdiff2;
>
>                 q->time_next_packet += tdiff3;
> +               q->drop_len = 0;
>         }
>  }
>
> @@ -1711,6 +1714,7 @@ static void cake_reconfigure(struct Qdisc *sch)
>  {
>         struct cake_sched_data *q = qdisc_priv(sch);
>         int c, ft;
> +       q->drop_len=0;
>
>         switch (q->tin_mode) {
>         case CAKE_MODE_BESTEFFORT:
> @@ -1941,6 +1945,7 @@ static int cake_init(struct Qdisc *sch, struct 
> nlattr *opt)
>
>                         INIT_LIST_HEAD(&flow->flowchain);
>                         cobalt_vars_init(&flow->cvars);
> +                       cobalt_vars_init(&flow->cvars2);
>
>                         q->overflow_heap[k].t = i;
>                         q->overflow_heap[k].b = j;
>
> =============8<=============
>
>
>
> On 11/11/2017 10:48 PM, George Amanakis wrote:
>> I totally understand what you are saying. However, I believe cake's 
>> egress and ingress modes currently behave as two extremes. One could 
>> argue that neither of them is the golden mean. With a patch in 
>> ingress mode (see below) and a single host using 32 flows to download 
>> I managed to increase throughput from ~7Mbps to ~10Mbps (configured 
>> limit 12200kbps) while latency increased from ~10ms to ~50ms, which 
>> would still be acceptable. As a comparison egress mode in the same 
>> setup gives me throughput ~11.5Mbps and latency ~500ms.
>>
>> I would like to hear your thoughts about this idea: the patch is 
>> incrementing q->time_next_packet for dropped packets differently than 
>> for passed-through ones. Please focus on the idea, not the actual 
>> implementation :) (also pasted in https://pastebin.com/SZ14WiYw)
>>
>> =============8<=============
>>
>> diff --git a/sch_cake.c b/sch_cake.c
>> index 82f264f..a3a4a88 100644
>> --- a/sch_cake.c
>> +++ b/sch_cake.c
>> @@ -769,6 +769,7 @@ static void cake_heapify_up(struct 
>> cake_sched_data *q, u16 i)
>>  }
>>
>>  static void cake_advance_shaper(struct cake_sched_data *q, struct 
>> cake_tin_data *b, u32 len, u64 now);
>> +static void cake_advance_shaper2(struct cake_sched_data *q, struct 
>> cake_tin_data *b, u32 len, u64 now);
>>
>>  #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)
>>  static unsigned int cake_drop(struct Qdisc *sch)
>> @@ -1274,7 +1275,7 @@ retry:
>>                 /* drop this packet, get another one */
>>                 if(q->rate_flags & CAKE_FLAG_INGRESS) {
>>                         len = cake_overhead(q, qdisc_pkt_len(skb));
>> -                       cake_advance_shaper(q, b, len, now);
>> +                       cake_advance_shaper2(q, b, len, now);
>>                         flow->deficit -= len;
>>                         b->tin_deficit -= len;
>>                 }
>> @@ -1286,8 +1287,6 @@ retry:
>>                 qdisc_qstats_drop(sch);
>>                 kfree_skb(skb);
>>  #endif
>> -               if(q->rate_flags & CAKE_FLAG_INGRESS)
>> -                       goto retry;
>>         }
>>
>>         b->tin_ecn_mark += !!flow->cvars.ecn_marked;
>> @@ -1351,6 +1350,24 @@ static void cake_advance_shaper(struct 
>> cake_sched_data *q, struct cake_tin_data
>>         }
>>  }
>>
>> +static void cake_advance_shaper2(struct cake_sched_data *q, struct 
>> cake_tin_data *b, u32 len, u64 now)
>> +{
>> +       /* charge packet bandwidth to this tin, lower tins,
>> +        * and to the global shaper.
>> +        */
>> +       if(q->rate_ns) {
>> +               s64 tdiff1 = b->tin_time_next_packet - now;
>> +               s64 tdiff2 = (len * (u64)b->tin_rate_ns) >> 
>> b->tin_rate_shft;
>> +               s64 tdiff3 = (len * (u64)q->rate_ns) >> q->rate_shft;
>> +
>> +               if(tdiff1 < 0)
>> +                       b->tin_time_next_packet += tdiff2;
>> +               else if(tdiff1 < tdiff2)
>> +                       b->tin_time_next_packet = now + tdiff2;
>> +
>> +               q->time_next_packet += (tdiff3*27)>>5;
>> +       }
>> +}
>>  static void cake_reset(struct Qdisc *sch)
>>  {
>>         u32 c;
>>
>> =============8<=============
>>
>> On 11/10/2017 4:50 PM, Jonathan Morton wrote:
>>>
>>> In fact, that's why I put a failsafe into ingress mode, so that it 
>>> would never stall completely.  It can happen, however, that 
>>> throughput is significantly reduced when the drop rate is high.
>>>
>>> If throughput is more important to you than induced latency, switch 
>>> to egress mode.
>>>
>>> Unfortunately it's not possible to guarantee both low latency and 
>>> high throughput when operating downstream of the bottleneck link.  
>>> ECN gives you better results, though.
>>>
>>> - Jonathan Morton
>>>
>>
>



More information about the Cake mailing list