From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mail.toke.dk (mail.toke.dk [52.28.52.200]) (using TLSv1.2 with cipher ADH-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by lists.bufferbloat.net (Postfix) with ESMTPS id 0982A3F416 for ; Mon, 14 May 2018 15:00:31 -0400 (EDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=toke.dk; s=20161023; t=1526324430; bh=9lS5BoW+aj+KGBq7twmGJQqXsYtNyfpkhri+WAP5/TE=; h=Subject:From:To:Cc:Date:In-Reply-To:References:From; b=WuTRicGg+zGDgeeVNMxIlX8DPv1ZhHk6WouMbEtfbSo/unMgjg/GtNgQCxsk9NUwb uUKS6I9LMBXYU5N/ibPxxovxM7XKu0GAutnfsJ/AKbfZaOsk2n4fx930s9GQfHa3L5 eJ7UoBxrKrDLI0qhrVLQzLrlUlgz7gSiQJnfccbMwrGsqM7j4Wg5J0wN+NTSL7Pk1n MvW8PkdXFgHaR6gx9odjFdxvI6zL3N+sk9AXNDjRUgxEm0ktyL6VF7w09uPhj6eDem PS3mjsAXVK/cz8TfjKn8aqNcSEzfQf7JyFUae+5isrYXUlvg78lq6Ou59ZZ7HuQ49K OaxoSttxbb6RQ== From: Toke =?utf-8?q?H=C3=B8iland-J=C3=B8rgensen?= To: netdev@vger.kernel.org Cc: cake@lists.bufferbloat.net Date: Mon, 14 May 2018 21:00:29 +0200 X-Clacks-Overhead: GNU Terry Pratchett Message-ID: <152632442982.4861.6429347611148029227.stgit@alrua-kau> In-Reply-To: <152632431302.4861.16657365789045735410.stgit@alrua-kau> References: <152632431302.4861.16657365789045735410.stgit@alrua-kau> MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: 8bit Subject: [Cake] [PATCH net-next v10 4/7] sch_cake: Add NAT awareness to packet classifier X-BeenThere: cake@lists.bufferbloat.net X-Mailman-Version: 2.1.20 Precedence: list List-Id: Cake - FQ_codel the next generation List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Mon, 14 May 2018 19:00:32 -0000 When CAKE is deployed on a gateway that also performs NAT (which is a common deployment mode), the host fairness mechanism cannot distinguish internal hosts from each other, and so fails to work correctly. To fix this, we add an optional NAT awareness mode, which will query the kernel conntrack mechanism to obtain the pre-NAT addresses for each packet and use that in the flow and host hashing. When the shaper is enabled and the host is already performing NAT, the cost of this lookup is negligible. However, in unlimited mode with no NAT being performed, there is a significant CPU cost at higher bandwidths. For this reason, the feature is turned off by default. Signed-off-by: Toke Høiland-Jørgensen --- net/sched/sch_cake.c | 72 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c index 4bc178c09f3a..2802bb2ace84 100644 --- a/net/sched/sch_cake.c +++ b/net/sched/sch_cake.c @@ -71,6 +71,12 @@ #include #include +#if IS_REACHABLE(CONFIG_NF_CONNTRACK) +#include +#include +#include +#endif + #define CAKE_SET_WAYS (8) #define CAKE_MAX_TINS (8) #define CAKE_QUEUES (1024) @@ -522,6 +528,60 @@ static bool cobalt_should_drop(struct cobalt_vars *vars, return drop; } +#if IS_REACHABLE(CONFIG_NF_CONNTRACK) + +static void cake_update_flowkeys(struct flow_keys *keys, + const struct sk_buff *skb) +{ + const struct nf_conntrack_tuple *tuple; + enum ip_conntrack_info ctinfo; + struct nf_conn *ct; + bool rev = false; + + if (tc_skb_protocol(skb) != htons(ETH_P_IP)) + return; + + ct = nf_ct_get(skb, &ctinfo); + if (ct) { + tuple = nf_ct_tuple(ct, CTINFO2DIR(ctinfo)); + } else { + const struct nf_conntrack_tuple_hash *hash; + struct nf_conntrack_tuple srctuple; + + if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb), + NFPROTO_IPV4, dev_net(skb->dev), + &srctuple)) + return; + + hash = nf_conntrack_find_get(dev_net(skb->dev), + &nf_ct_zone_dflt, + &srctuple); + if (!hash) + return; + + rev = true; + ct = nf_ct_tuplehash_to_ctrack(hash); + tuple = nf_ct_tuple(ct, !hash->tuple.dst.dir); + } + + keys->addrs.v4addrs.src = rev ? tuple->dst.u3.ip : tuple->src.u3.ip; + keys->addrs.v4addrs.dst = rev ? tuple->src.u3.ip : tuple->dst.u3.ip; + + if (keys->ports.ports) { + keys->ports.src = rev ? tuple->dst.u.all : tuple->src.u.all; + keys->ports.dst = rev ? tuple->src.u.all : tuple->dst.u.all; + } + if (rev) + nf_ct_put(ct); +} +#else +static void cake_update_flowkeys(struct flow_keys *keys, + const struct sk_buff *skb) +{ + /* There is nothing we can do here without CONNTRACK */ +} +#endif + /* Cake has several subtle multiple bit settings. In these cases you * would be matching triple isolate mode as well. */ @@ -549,6 +609,9 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb, skb_flow_dissect_flow_keys(skb, &keys, FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL); + if (flow_mode & CAKE_FLOW_NAT_FLAG) + cake_update_flowkeys(&keys, skb); + /* flow_hash_from_keys() sorts the addresses by value, so we have * to preserve their order in a separate data structure to treat * src and dst host addresses as independently selectable. @@ -1716,6 +1779,12 @@ static int cake_change(struct Qdisc *sch, struct nlattr *opt, q->flow_mode = (nla_get_u32(tb[TCA_CAKE_FLOW_MODE]) & CAKE_FLOW_MASK); + if (tb[TCA_CAKE_NAT]) { + q->flow_mode &= ~CAKE_FLOW_NAT_FLAG; + q->flow_mode |= CAKE_FLOW_NAT_FLAG * + !!nla_get_u32(tb[TCA_CAKE_NAT]); + } + if (tb[TCA_CAKE_RTT]) { q->interval = nla_get_u32(tb[TCA_CAKE_RTT]); @@ -1880,6 +1949,9 @@ static int cake_dump(struct Qdisc *sch, struct sk_buff *skb) if (nla_put_u32(skb, TCA_CAKE_ACK_FILTER, q->ack_filter)) goto nla_put_failure; + if (nla_put_u32(skb, TCA_CAKE_NAT, !!(q->flow_mode & CAKE_FLOW_NAT_FLAG))) + goto nla_put_failure; + return nla_nest_end(skb, opts); nla_put_failure: