<div dir="auto">The patch I previously sent had the host_load manipulated only when dual-dsthost is sent, since that was what I was primarily testing. For dual-srchost to behave the same way line 2107 has to be changed, too. Will resubmit in case anybody wants to test, later today.</div><br><div class="gmail_quote"><div dir="ltr">On Tue, Jan 15, 2019, 2:22 PM George Amanakis <<a href="mailto:gamanakis@gmail.com">gamanakis@gmail.com</a> wrote:<br></div><blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex"><br>
I think what is happening here is that if a client has flows such as "a<br>
(bulk upload)" and "b (bulk download)", the incoming ACKs of flow "a"<br>
compete with the incoming bulk traffic on flow "b". With compete I mean<br>
in terms of flow selection.<br>
<br>
So if we adjust the host_load to be the same with the bulk_flow_count of<br>
*each* host, the problem seems to be resolved.<br>
I drafted a patch below.<br>
<br>
Pete's setup, tested with the patch (ingress in mbit/s):<br>
IP1: 8down 49.18mbit/s<br>
IP1: 1up 46.73mbit/s<br>
IP2: 1down 47.39mbit/s<br>
IP2: 8up 49.21mbit/s<br>
<br>
<br>
---<br>
sch_cake.c | 34 ++++++++++++++++++++++++++++------<br>
1 file changed, 28 insertions(+), 6 deletions(-)<br>
<br>
diff --git a/sch_cake.c b/sch_cake.c<br>
index d434ae0..5c0f0e1 100644<br>
--- a/sch_cake.c<br>
+++ b/sch_cake.c<br>
@@ -148,6 +148,7 @@ struct cake_host {<br>
u32 dsthost_tag;<br>
u16 srchost_refcnt;<br>
u16 dsthost_refcnt;<br>
+ u16 bulk_flow_count;<br>
};<br>
<br>
struct cake_heap_entry {<br>
@@ -1897,10 +1898,10 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,<br>
q->last_packet_time = now;<br>
}<br>
<br>
+ struct cake_host *srchost = &b->hosts[flow->srchost];<br>
+ struct cake_host *dsthost = &b->hosts[flow->dsthost];<br>
/* flowchain */<br>
if (!flow->set || flow->set == CAKE_SET_DECAYING) {<br>
- struct cake_host *srchost = &b->hosts[flow->srchost];<br>
- struct cake_host *dsthost = &b->hosts[flow->dsthost];<br>
u16 host_load = 1;<br>
<br>
if (!flow->set) {<br>
@@ -1927,6 +1928,11 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,<br>
flow->set = CAKE_SET_BULK;<br>
b->sparse_flow_count--;<br>
b->bulk_flow_count++;<br>
+ if (cake_dsrc(q->flow_mode))<br>
+ srchost->bulk_flow_count++;<br>
+<br>
+ if (cake_ddst(q->flow_mode))<br>
+ dsthost->bulk_flow_count++;<br>
}<br>
<br>
if (q->buffer_used > q->buffer_max_used)<br>
@@ -2101,7 +2107,7 @@ retry:<br>
host_load = max(host_load, srchost->srchost_refcnt);<br>
<br>
if (cake_ddst(q->flow_mode))<br>
- host_load = max(host_load, dsthost->dsthost_refcnt);<br>
+ host_load = max(host_load, dsthost->bulk_flow_count);<br>
<br>
WARN_ON(host_load > CAKE_QUEUES);<br>
<br>
@@ -2110,8 +2116,6 @@ retry:<br>
/* The shifted prandom_u32() is a way to apply dithering to<br>
* avoid accumulating roundoff errors<br>
*/<br>
- flow->deficit += (b->flow_quantum * quantum_div[host_load] +<br>
- (prandom_u32() >> 16)) >> 16;<br>
list_move_tail(&flow->flowchain, &b->old_flows);<br>
<br>
/* Keep all flows with deficits out of the sparse and decaying<br>
@@ -2122,6 +2126,11 @@ retry:<br>
if (flow->head) {<br>
b->sparse_flow_count--;<br>
b->bulk_flow_count++;<br>
+ if (cake_dsrc(q->flow_mode))<br>
+ srchost->bulk_flow_count++;<br>
+<br>
+ if (cake_ddst(q->flow_mode))<br>
+ dsthost->bulk_flow_count++;<br>
flow->set = CAKE_SET_BULK;<br>
} else {<br>
/* we've moved it to the bulk rotation for<br>
@@ -2131,6 +2140,8 @@ retry:<br>
flow->set = CAKE_SET_SPARSE_WAIT;<br>
}<br>
}<br>
+ flow->deficit += (b->flow_quantum * quantum_div[host_load] +<br>
+ (prandom_u32() >> 16)) >> 16;<br>
goto retry;<br>
}<br>
<br>
@@ -2151,6 +2162,11 @@ retry:<br>
&b->decaying_flows);<br>
if (flow->set == CAKE_SET_BULK) {<br>
b->bulk_flow_count--;<br>
+ if (cake_dsrc(q->flow_mode))<br>
+ srchost->bulk_flow_count--;<br>
+<br>
+ if (cake_ddst(q->flow_mode))<br>
+ dsthost->bulk_flow_count--;<br>
b->decaying_flow_count++;<br>
} else if (flow->set == CAKE_SET_SPARSE ||<br>
flow->set == CAKE_SET_SPARSE_WAIT) {<br>
@@ -2164,8 +2180,14 @@ retry:<br>
if (flow->set == CAKE_SET_SPARSE ||<br>
flow->set == CAKE_SET_SPARSE_WAIT)<br>
b->sparse_flow_count--;<br>
- else if (flow->set == CAKE_SET_BULK)<br>
+ else if (flow->set == CAKE_SET_BULK) {<br>
b->bulk_flow_count--;<br>
+ if (cake_dsrc(q->flow_mode))<br>
+ srchost->bulk_flow_count--;<br>
+<br>
+ if (cake_ddst(q->flow_mode))<br>
+ dsthost->bulk_flow_count--;<br>
+ }<br>
else<br>
b->decaying_flow_count--;<br>
<br>
-- <br>
2.20.1<br>
<br>
</blockquote></div>