From: "Toke Høiland-Jørgensen" <toke@redhat.com>
To: "Toke Høiland-Jørgensen" <toke@toke.dk>,
"Jamal Hadi Salim" <jhs@mojatatu.com>,
"Cong Wang" <xiyou.wangcong@gmail.com>,
"Jiri Pirko" <jiri@resnulli.us>,
"David S. Miller" <davem@davemloft.net>,
"Eric Dumazet" <edumazet@google.com>,
"Jakub Kicinski" <kuba@kernel.org>,
"Paolo Abeni" <pabeni@redhat.com>,
"Simon Horman" <horms@kernel.org>
Cc: "Jonas Köppeler" <j.koeppeler@tu-berlin.de>,
cake@lists.bufferbloat.net, netdev@vger.kernel.org,
"Toke Høiland-Jørgensen" <toke@redhat.com>
Subject: [Cake] [PATCH RFC net-next 1/4] net/sched: sch_cake: Factor out config variables into separate struct
Date: Wed, 24 Sep 2025 14:16:03 +0200 [thread overview]
Message-ID: <20250924-mq-cake-sub-qdisc-v1-1-43a060d1112a@redhat.com> (raw)
In-Reply-To: <20250924-mq-cake-sub-qdisc-v1-0-43a060d1112a@redhat.com>
Factor out all the user-configurable variables into a separate struct
and embed it into struct cake_sched_data. This is done in preparation
for sharing the configuration across multiple instances of cake in an mq
setup.
No functional change is intended with this patch.
Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
---
net/sched/sch_cake.c | 245 ++++++++++++++++++++++++++++-----------------------
1 file changed, 133 insertions(+), 112 deletions(-)
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 32bacfc314c260dccf94178d309ccb2be22d69e4..a02f3cfcb09b50bda6ee66dfc8a8df584ae6a365 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -197,40 +197,42 @@ struct cake_tin_data {
u32 way_collisions;
}; /* number of tins is small, so size of this struct doesn't matter much */
+struct cake_sched_config {
+ u64 rate_bps;
+ u64 interval;
+ u64 target;
+ u32 buffer_config_limit;
+ u32 fwmark_mask;
+ u16 fwmark_shft;
+ s16 rate_overhead;
+ u16 rate_mpu;
+ u16 rate_flags;
+ u8 tin_mode;
+ u8 flow_mode;
+ u8 atm_mode;
+ u8 ack_filter;
+};
+
struct cake_sched_data {
struct tcf_proto __rcu *filter_list; /* optional external classifier */
struct tcf_block *block;
struct cake_tin_data *tins;
+ struct cake_sched_config *config;
struct cake_heap_entry overflow_heap[CAKE_QUEUES * CAKE_MAX_TINS];
- u16 overflow_timeout;
-
- u16 tin_cnt;
- u8 tin_mode;
- u8 flow_mode;
- u8 ack_filter;
- u8 atm_mode;
-
- u32 fwmark_mask;
- u16 fwmark_shft;
/* time_next = time_this + ((len * rate_ns) >> rate_shft) */
- u16 rate_shft;
ktime_t time_next_packet;
ktime_t failsafe_next_packet;
u64 rate_ns;
- u64 rate_bps;
- u16 rate_flags;
- s16 rate_overhead;
- u16 rate_mpu;
- u64 interval;
- u64 target;
+ u16 rate_shft;
+ u16 overflow_timeout;
+ u16 tin_cnt;
/* resource tracking */
u32 buffer_used;
u32 buffer_max_used;
u32 buffer_limit;
- u32 buffer_config_limit;
/* indices for dequeue */
u16 cur_tin;
@@ -1198,7 +1200,7 @@ static bool cake_tcph_may_drop(const struct tcphdr *tcph,
static struct sk_buff *cake_ack_filter(struct cake_sched_data *q,
struct cake_flow *flow)
{
- bool aggressive = q->ack_filter == CAKE_ACK_AGGRESSIVE;
+ bool aggressive = q->config->ack_filter == CAKE_ACK_AGGRESSIVE;
struct sk_buff *elig_ack = NULL, *elig_ack_prev = NULL;
struct sk_buff *skb_check, *skb_prev = NULL;
const struct ipv6hdr *ipv6h, *ipv6h_check;
@@ -1358,15 +1360,17 @@ static u64 cake_ewma(u64 avg, u64 sample, u32 shift)
return avg;
}
-static u32 cake_calc_overhead(struct cake_sched_data *q, u32 len, u32 off)
+static u32 cake_calc_overhead(struct cake_sched_data *qd, u32 len, u32 off)
{
+ struct cake_sched_config *q = qd->config;
+
if (q->rate_flags & CAKE_FLAG_OVERHEAD)
len -= off;
- if (q->max_netlen < len)
- q->max_netlen = len;
- if (q->min_netlen > len)
- q->min_netlen = len;
+ if (qd->max_netlen < len)
+ qd->max_netlen = len;
+ if (qd->min_netlen > len)
+ qd->min_netlen = len;
len += q->rate_overhead;
@@ -1385,10 +1389,10 @@ static u32 cake_calc_overhead(struct cake_sched_data *q, u32 len, u32 off)
len += (len + 63) / 64;
}
- if (q->max_adjlen < len)
- q->max_adjlen = len;
- if (q->min_adjlen > len)
- q->min_adjlen = len;
+ if (qd->max_adjlen < len)
+ qd->max_adjlen = len;
+ if (qd->min_adjlen > len)
+ qd->min_adjlen = len;
return len;
}
@@ -1592,7 +1596,7 @@ static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free)
flow->dropped++;
b->tin_dropped++;
- if (q->rate_flags & CAKE_FLAG_INGRESS)
+ if (q->config->rate_flags & CAKE_FLAG_INGRESS)
cake_advance_shaper(q, b, skb, now, true);
qdisc_drop_reason(skb, sch, to_free, SKB_DROP_REASON_QDISC_OVERLIMIT);
@@ -1663,7 +1667,8 @@ static u8 cake_handle_diffserv(struct sk_buff *skb, bool wash)
static struct cake_tin_data *cake_select_tin(struct Qdisc *sch,
struct sk_buff *skb)
{
- struct cake_sched_data *q = qdisc_priv(sch);
+ struct cake_sched_data *qd = qdisc_priv(sch);
+ struct cake_sched_config *q = qd->config;
u32 tin, mark;
bool wash;
u8 dscp;
@@ -1680,24 +1685,24 @@ static struct cake_tin_data *cake_select_tin(struct Qdisc *sch,
if (q->tin_mode == CAKE_DIFFSERV_BESTEFFORT)
tin = 0;
- else if (mark && mark <= q->tin_cnt)
- tin = q->tin_order[mark - 1];
+ else if (mark && mark <= qd->tin_cnt)
+ tin = qd->tin_order[mark - 1];
else if (TC_H_MAJ(skb->priority) == sch->handle &&
TC_H_MIN(skb->priority) > 0 &&
- TC_H_MIN(skb->priority) <= q->tin_cnt)
- tin = q->tin_order[TC_H_MIN(skb->priority) - 1];
+ TC_H_MIN(skb->priority) <= qd->tin_cnt)
+ tin = qd->tin_order[TC_H_MIN(skb->priority) - 1];
else {
if (!wash)
dscp = cake_handle_diffserv(skb, wash);
- tin = q->tin_index[dscp];
+ tin = qd->tin_index[dscp];
- if (unlikely(tin >= q->tin_cnt))
+ if (unlikely(tin >= qd->tin_cnt))
tin = 0;
}
- return &q->tins[tin];
+ return &qd->tins[tin];
}
static u32 cake_classify(struct Qdisc *sch, struct cake_tin_data **t,
@@ -1753,7 +1758,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
u32 idx, tin;
/* choose flow to insert into */
- idx = cake_classify(sch, &b, skb, q->flow_mode, &ret);
+ idx = cake_classify(sch, &b, skb, q->config->flow_mode, &ret);
if (idx == 0) {
if (ret & __NET_XMIT_BYPASS)
qdisc_qstats_drop(sch);
@@ -1788,7 +1793,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
if (unlikely(len > b->max_skblen))
b->max_skblen = len;
- if (skb_is_gso(skb) && q->rate_flags & CAKE_FLAG_SPLIT_GSO) {
+ if (skb_is_gso(skb) && q->config->rate_flags & CAKE_FLAG_SPLIT_GSO) {
struct sk_buff *segs, *nskb;
netdev_features_t features = netif_skb_features(skb);
unsigned int slen = 0, numsegs = 0;
@@ -1827,7 +1832,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
get_cobalt_cb(skb)->adjusted_len = cake_overhead(q, skb);
flow_queue_add(flow, skb);
- if (q->ack_filter)
+ if (q->config->ack_filter)
ack = cake_ack_filter(q, flow);
if (ack) {
@@ -1836,7 +1841,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
b->bytes += qdisc_pkt_len(ack);
len -= qdisc_pkt_len(ack);
q->buffer_used += skb->truesize - ack->truesize;
- if (q->rate_flags & CAKE_FLAG_INGRESS)
+ if (q->config->rate_flags & CAKE_FLAG_INGRESS)
cake_advance_shaper(q, b, ack, now, true);
qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(ack));
@@ -1859,7 +1864,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
cake_heapify_up(q, b->overflow_idx[idx]);
/* incoming bandwidth capacity estimate */
- if (q->rate_flags & CAKE_FLAG_AUTORATE_INGRESS) {
+ if (q->config->rate_flags & CAKE_FLAG_AUTORATE_INGRESS) {
u64 packet_interval = \
ktime_to_ns(ktime_sub(now, q->last_packet_time));
@@ -1891,7 +1896,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
if (ktime_after(now,
ktime_add_ms(q->last_reconfig_time,
250))) {
- q->rate_bps = (q->avg_peak_bandwidth * 15) >> 4;
+ q->config->rate_bps = (q->avg_peak_bandwidth * 15) >> 4;
cake_reconfigure(sch);
}
}
@@ -1911,7 +1916,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
flow->set = CAKE_SET_SPARSE;
b->sparse_flow_count++;
- flow->deficit = cake_get_flow_quantum(b, flow, q->flow_mode);
+ flow->deficit = cake_get_flow_quantum(b, flow, q->config->flow_mode);
} else if (flow->set == CAKE_SET_SPARSE_WAIT) {
/* this flow was empty, accounted as a sparse flow, but actually
* in the bulk rotation.
@@ -1920,8 +1925,8 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
b->sparse_flow_count--;
b->bulk_flow_count++;
- cake_inc_srchost_bulk_flow_count(b, flow, q->flow_mode);
- cake_inc_dsthost_bulk_flow_count(b, flow, q->flow_mode);
+ cake_inc_srchost_bulk_flow_count(b, flow, q->config->flow_mode);
+ cake_inc_dsthost_bulk_flow_count(b, flow, q->config->flow_mode);
}
if (q->buffer_used > q->buffer_max_used)
@@ -2103,8 +2108,8 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
b->sparse_flow_count--;
b->bulk_flow_count++;
- cake_inc_srchost_bulk_flow_count(b, flow, q->flow_mode);
- cake_inc_dsthost_bulk_flow_count(b, flow, q->flow_mode);
+ cake_inc_srchost_bulk_flow_count(b, flow, q->config->flow_mode);
+ cake_inc_dsthost_bulk_flow_count(b, flow, q->config->flow_mode);
flow->set = CAKE_SET_BULK;
} else {
@@ -2116,7 +2121,7 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
}
}
- flow->deficit += cake_get_flow_quantum(b, flow, q->flow_mode);
+ flow->deficit += cake_get_flow_quantum(b, flow, q->config->flow_mode);
list_move_tail(&flow->flowchain, &b->old_flows);
goto retry;
@@ -2140,8 +2145,8 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
if (flow->set == CAKE_SET_BULK) {
b->bulk_flow_count--;
- cake_dec_srchost_bulk_flow_count(b, flow, q->flow_mode);
- cake_dec_dsthost_bulk_flow_count(b, flow, q->flow_mode);
+ cake_dec_srchost_bulk_flow_count(b, flow, q->config->flow_mode);
+ cake_dec_dsthost_bulk_flow_count(b, flow, q->config->flow_mode);
b->decaying_flow_count++;
} else if (flow->set == CAKE_SET_SPARSE ||
@@ -2159,8 +2164,8 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
else if (flow->set == CAKE_SET_BULK) {
b->bulk_flow_count--;
- cake_dec_srchost_bulk_flow_count(b, flow, q->flow_mode);
- cake_dec_dsthost_bulk_flow_count(b, flow, q->flow_mode);
+ cake_dec_srchost_bulk_flow_count(b, flow, q->config->flow_mode);
+ cake_dec_dsthost_bulk_flow_count(b, flow, q->config->flow_mode);
} else
b->decaying_flow_count--;
@@ -2171,14 +2176,14 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
reason = cobalt_should_drop(&flow->cvars, &b->cparams, now, skb,
(b->bulk_flow_count *
- !!(q->rate_flags &
+ !!(q->config->rate_flags &
CAKE_FLAG_INGRESS)));
/* Last packet in queue may be marked, shouldn't be dropped */
if (reason == SKB_NOT_DROPPED_YET || !flow->head)
break;
/* drop this packet, get another one */
- if (q->rate_flags & CAKE_FLAG_INGRESS) {
+ if (q->config->rate_flags & CAKE_FLAG_INGRESS) {
len = cake_advance_shaper(q, b, skb,
now, true);
flow->deficit -= len;
@@ -2189,7 +2194,7 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb));
qdisc_qstats_drop(sch);
kfree_skb_reason(skb, reason);
- if (q->rate_flags & CAKE_FLAG_INGRESS)
+ if (q->config->rate_flags & CAKE_FLAG_INGRESS)
goto retry;
}
@@ -2311,7 +2316,7 @@ static int cake_config_besteffort(struct Qdisc *sch)
struct cake_sched_data *q = qdisc_priv(sch);
struct cake_tin_data *b = &q->tins[0];
u32 mtu = psched_mtu(qdisc_dev(sch));
- u64 rate = q->rate_bps;
+ u64 rate = q->config->rate_bps;
q->tin_cnt = 1;
@@ -2319,7 +2324,7 @@ static int cake_config_besteffort(struct Qdisc *sch)
q->tin_order = normal_order;
cake_set_rate(b, rate, mtu,
- us_to_ns(q->target), us_to_ns(q->interval));
+ us_to_ns(q->config->target), us_to_ns(q->config->interval));
b->tin_quantum = 65535;
return 0;
@@ -2330,7 +2335,7 @@ static int cake_config_precedence(struct Qdisc *sch)
/* convert high-level (user visible) parameters into internal format */
struct cake_sched_data *q = qdisc_priv(sch);
u32 mtu = psched_mtu(qdisc_dev(sch));
- u64 rate = q->rate_bps;
+ u64 rate = q->config->rate_bps;
u32 quantum = 256;
u32 i;
@@ -2341,8 +2346,8 @@ static int cake_config_precedence(struct Qdisc *sch)
for (i = 0; i < q->tin_cnt; i++) {
struct cake_tin_data *b = &q->tins[i];
- cake_set_rate(b, rate, mtu, us_to_ns(q->target),
- us_to_ns(q->interval));
+ cake_set_rate(b, rate, mtu, us_to_ns(q->config->target),
+ us_to_ns(q->config->interval));
b->tin_quantum = max_t(u16, 1U, quantum);
@@ -2419,7 +2424,7 @@ static int cake_config_diffserv8(struct Qdisc *sch)
struct cake_sched_data *q = qdisc_priv(sch);
u32 mtu = psched_mtu(qdisc_dev(sch));
- u64 rate = q->rate_bps;
+ u64 rate = q->config->rate_bps;
u32 quantum = 256;
u32 i;
@@ -2433,8 +2438,8 @@ static int cake_config_diffserv8(struct Qdisc *sch)
for (i = 0; i < q->tin_cnt; i++) {
struct cake_tin_data *b = &q->tins[i];
- cake_set_rate(b, rate, mtu, us_to_ns(q->target),
- us_to_ns(q->interval));
+ cake_set_rate(b, rate, mtu, us_to_ns(q->config->target),
+ us_to_ns(q->config->interval));
b->tin_quantum = max_t(u16, 1U, quantum);
@@ -2463,7 +2468,7 @@ static int cake_config_diffserv4(struct Qdisc *sch)
struct cake_sched_data *q = qdisc_priv(sch);
u32 mtu = psched_mtu(qdisc_dev(sch));
- u64 rate = q->rate_bps;
+ u64 rate = q->config->rate_bps;
u32 quantum = 1024;
q->tin_cnt = 4;
@@ -2474,13 +2479,13 @@ static int cake_config_diffserv4(struct Qdisc *sch)
/* class characteristics */
cake_set_rate(&q->tins[0], rate, mtu,
- us_to_ns(q->target), us_to_ns(q->interval));
+ us_to_ns(q->config->target), us_to_ns(q->config->interval));
cake_set_rate(&q->tins[1], rate >> 4, mtu,
- us_to_ns(q->target), us_to_ns(q->interval));
+ us_to_ns(q->config->target), us_to_ns(q->config->interval));
cake_set_rate(&q->tins[2], rate >> 1, mtu,
- us_to_ns(q->target), us_to_ns(q->interval));
+ us_to_ns(q->config->target), us_to_ns(q->config->interval));
cake_set_rate(&q->tins[3], rate >> 2, mtu,
- us_to_ns(q->target), us_to_ns(q->interval));
+ us_to_ns(q->config->target), us_to_ns(q->config->interval));
/* bandwidth-sharing weights */
q->tins[0].tin_quantum = quantum;
@@ -2500,7 +2505,7 @@ static int cake_config_diffserv3(struct Qdisc *sch)
*/
struct cake_sched_data *q = qdisc_priv(sch);
u32 mtu = psched_mtu(qdisc_dev(sch));
- u64 rate = q->rate_bps;
+ u64 rate = q->config->rate_bps;
u32 quantum = 1024;
q->tin_cnt = 3;
@@ -2511,11 +2516,11 @@ static int cake_config_diffserv3(struct Qdisc *sch)
/* class characteristics */
cake_set_rate(&q->tins[0], rate, mtu,
- us_to_ns(q->target), us_to_ns(q->interval));
+ us_to_ns(q->config->target), us_to_ns(q->config->interval));
cake_set_rate(&q->tins[1], rate >> 4, mtu,
- us_to_ns(q->target), us_to_ns(q->interval));
+ us_to_ns(q->config->target), us_to_ns(q->config->interval));
cake_set_rate(&q->tins[2], rate >> 2, mtu,
- us_to_ns(q->target), us_to_ns(q->interval));
+ us_to_ns(q->config->target), us_to_ns(q->config->interval));
/* bandwidth-sharing weights */
q->tins[0].tin_quantum = quantum;
@@ -2527,7 +2532,8 @@ static int cake_config_diffserv3(struct Qdisc *sch)
static void cake_reconfigure(struct Qdisc *sch)
{
- struct cake_sched_data *q = qdisc_priv(sch);
+ struct cake_sched_data *qd = qdisc_priv(sch);
+ struct cake_sched_config *q = qd->config;
int c, ft;
switch (q->tin_mode) {
@@ -2553,36 +2559,37 @@ static void cake_reconfigure(struct Qdisc *sch)
break;
}
- for (c = q->tin_cnt; c < CAKE_MAX_TINS; c++) {
+ for (c = qd->tin_cnt; c < CAKE_MAX_TINS; c++) {
cake_clear_tin(sch, c);
- q->tins[c].cparams.mtu_time = q->tins[ft].cparams.mtu_time;
+ qd->tins[c].cparams.mtu_time = qd->tins[ft].cparams.mtu_time;
}
- q->rate_ns = q->tins[ft].tin_rate_ns;
- q->rate_shft = q->tins[ft].tin_rate_shft;
+ qd->rate_ns = qd->tins[ft].tin_rate_ns;
+ qd->rate_shft = qd->tins[ft].tin_rate_shft;
if (q->buffer_config_limit) {
- q->buffer_limit = q->buffer_config_limit;
+ qd->buffer_limit = q->buffer_config_limit;
} else if (q->rate_bps) {
u64 t = q->rate_bps * q->interval;
do_div(t, USEC_PER_SEC / 4);
- q->buffer_limit = max_t(u32, t, 4U << 20);
+ qd->buffer_limit = max_t(u32, t, 4U << 20);
} else {
- q->buffer_limit = ~0;
+ qd->buffer_limit = ~0;
}
sch->flags &= ~TCQ_F_CAN_BYPASS;
- q->buffer_limit = min(q->buffer_limit,
- max(sch->limit * psched_mtu(qdisc_dev(sch)),
- q->buffer_config_limit));
+ qd->buffer_limit = min(qd->buffer_limit,
+ max(sch->limit * psched_mtu(qdisc_dev(sch)),
+ q->buffer_config_limit));
}
static int cake_change(struct Qdisc *sch, struct nlattr *opt,
struct netlink_ext_ack *extack)
{
- struct cake_sched_data *q = qdisc_priv(sch);
+ struct cake_sched_data *qd = qdisc_priv(sch);
+ struct cake_sched_config *q = qd->config;
struct nlattr *tb[TCA_CAKE_MAX + 1];
u16 rate_flags;
u8 flow_mode;
@@ -2636,19 +2643,19 @@ static int cake_change(struct Qdisc *sch, struct nlattr *opt,
nla_get_s32(tb[TCA_CAKE_OVERHEAD]));
rate_flags |= CAKE_FLAG_OVERHEAD;
- q->max_netlen = 0;
- q->max_adjlen = 0;
- q->min_netlen = ~0;
- q->min_adjlen = ~0;
+ qd->max_netlen = 0;
+ qd->max_adjlen = 0;
+ qd->min_netlen = ~0;
+ qd->min_adjlen = ~0;
}
if (tb[TCA_CAKE_RAW]) {
rate_flags &= ~CAKE_FLAG_OVERHEAD;
- q->max_netlen = 0;
- q->max_adjlen = 0;
- q->min_netlen = ~0;
- q->min_adjlen = ~0;
+ qd->max_netlen = 0;
+ qd->max_adjlen = 0;
+ qd->min_netlen = ~0;
+ qd->min_adjlen = ~0;
}
if (tb[TCA_CAKE_MPU])
@@ -2704,7 +2711,7 @@ static int cake_change(struct Qdisc *sch, struct nlattr *opt,
WRITE_ONCE(q->rate_flags, rate_flags);
WRITE_ONCE(q->flow_mode, flow_mode);
- if (q->tins) {
+ if (qd->tins) {
sch_tree_lock(sch);
cake_reconfigure(sch);
sch_tree_unlock(sch);
@@ -2720,14 +2727,20 @@ static void cake_destroy(struct Qdisc *sch)
qdisc_watchdog_cancel(&q->watchdog);
tcf_block_put(q->block);
kvfree(q->tins);
+ kvfree(q->config);
}
static int cake_init(struct Qdisc *sch, struct nlattr *opt,
struct netlink_ext_ack *extack)
{
- struct cake_sched_data *q = qdisc_priv(sch);
+ struct cake_sched_data *qd = qdisc_priv(sch);
+ struct cake_sched_config *q;
int i, j, err;
+ q = kvcalloc(1, sizeof(struct cake_sched_config), GFP_KERNEL);
+ if (!q)
+ return -ENOMEM;
+
sch->limit = 10240;
q->tin_mode = CAKE_DIFFSERV_DIFFSERV3;
q->flow_mode = CAKE_FLOW_TRIPLE;
@@ -2739,33 +2752,36 @@ static int cake_init(struct Qdisc *sch, struct nlattr *opt,
* for 5 to 10% of interval
*/
q->rate_flags |= CAKE_FLAG_SPLIT_GSO;
- q->cur_tin = 0;
- q->cur_flow = 0;
+ qd->cur_tin = 0;
+ qd->cur_flow = 0;
+ qd->config = q;
- qdisc_watchdog_init(&q->watchdog, sch);
+ qdisc_watchdog_init(&qd->watchdog, sch);
if (opt) {
err = cake_change(sch, opt, extack);
if (err)
- return err;
+ goto err;
}
- err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
+ err = tcf_block_get(&qd->block, &qd->filter_list, sch, extack);
if (err)
- return err;
+ goto err;
quantum_div[0] = ~0;
for (i = 1; i <= CAKE_QUEUES; i++)
quantum_div[i] = 65535 / i;
- q->tins = kvcalloc(CAKE_MAX_TINS, sizeof(struct cake_tin_data),
- GFP_KERNEL);
- if (!q->tins)
- return -ENOMEM;
+ qd->tins = kvcalloc(CAKE_MAX_TINS, sizeof(struct cake_tin_data),
+ GFP_KERNEL);
+ if (!qd->tins) {
+ err = -ENOMEM;
+ goto err;
+ }
for (i = 0; i < CAKE_MAX_TINS; i++) {
- struct cake_tin_data *b = q->tins + i;
+ struct cake_tin_data *b = qd->tins + i;
INIT_LIST_HEAD(&b->new_flows);
INIT_LIST_HEAD(&b->old_flows);
@@ -2781,22 +2797,27 @@ static int cake_init(struct Qdisc *sch, struct nlattr *opt,
INIT_LIST_HEAD(&flow->flowchain);
cobalt_vars_init(&flow->cvars);
- q->overflow_heap[k].t = i;
- q->overflow_heap[k].b = j;
+ qd->overflow_heap[k].t = i;
+ qd->overflow_heap[k].b = j;
b->overflow_idx[j] = k;
}
}
cake_reconfigure(sch);
- q->avg_peak_bandwidth = q->rate_bps;
- q->min_netlen = ~0;
- q->min_adjlen = ~0;
+ qd->avg_peak_bandwidth = q->rate_bps;
+ qd->min_netlen = ~0;
+ qd->min_adjlen = ~0;
return 0;
+err:
+ kvfree(qd->config);
+ qd->config = NULL;
+ return err;
}
static int cake_dump(struct Qdisc *sch, struct sk_buff *skb)
{
- struct cake_sched_data *q = qdisc_priv(sch);
+ struct cake_sched_data *qd = qdisc_priv(sch);
+ struct cake_sched_config *q = qd->config;
struct nlattr *opts;
u16 rate_flags;
u8 flow_mode;
--
2.51.0
next prev parent reply other threads:[~2025-09-24 12:16 UTC|newest]
Thread overview: 8+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-09-24 12:16 [Cake] [PATCH RFC net-next 0/4] Multi-queue aware sch_cake Toke Høiland-Jørgensen
2025-09-24 12:16 ` Toke Høiland-Jørgensen [this message]
2025-09-24 12:16 ` [Cake] [PATCH RFC net-next 2/4] net/sched: sch_cake: Add cake_mq qdisc for using cake on mq devices Toke Høiland-Jørgensen
2025-09-24 12:16 ` [Cake] [PATCH RFC net-next 3/4] net/sched: sch_cake: Share config across cake_mq sub-qdiscs Toke Høiland-Jørgensen
2025-09-24 12:16 ` [Cake] [PATCH RFC net-next 4/4] net/sched: sch_cake: share shaper state across sub-instances of cake_mq Toke Høiland-Jørgensen
2025-09-25 9:04 ` [Cake] " Donald Hunter
2025-09-25 13:29 ` Toke Høiland-Jørgensen
2025-09-24 12:18 ` [Cake] [PATCH RFC net-next] tc: cake: add cake_mq support Toke Høiland-Jørgensen
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
List information: https://lists.bufferbloat.net/postorius/lists/cake.lists.bufferbloat.net/
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250924-mq-cake-sub-qdisc-v1-1-43a060d1112a@redhat.com \
--to=toke@redhat.com \
--cc=cake@lists.bufferbloat.net \
--cc=davem@davemloft.net \
--cc=edumazet@google.com \
--cc=horms@kernel.org \
--cc=j.koeppeler@tu-berlin.de \
--cc=jhs@mojatatu.com \
--cc=jiri@resnulli.us \
--cc=kuba@kernel.org \
--cc=netdev@vger.kernel.org \
--cc=pabeni@redhat.com \
--cc=toke@toke.dk \
--cc=xiyou.wangcong@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox