[Cake] backward compatibility cruft and possible performance optimizaton
Dave Taht
dave.taht at gmail.com
Fri Nov 20 07:25:34 EST 2015
I just pushed a change cleaning up the typedefs.
Upcoming for discussion:
I was objecting to carrying around a whole lot of variables that could
otherwise be kept in a ptr to their referenced codel_params structure.
The ABI for most function calls in most OSes barely allows for 6
arguments before stuff gets pushed on the stack. x86 32 bit is the
worst, here. On the other hand other arches have 32 or more registers,
and in any case, at least on the two (x86_64, arm) arches I have built
for, everything gets inlined anyway so the difference in using a
codel_params p->interval, for example, vanishes, near as I can tell.
(I will go disassemble)
Simultaneiously (sigh), I started breaking out the compatability cruft into
a separate file to make backporting as far back as 3.4 easier.
Does anyone have any objection to me making these two changes (in a
cleaner way than the below)?
diff --git a/codel5.h b/codel5.h
index d45aa6e..0c29735 100644
--- a/codel5.h
+++ b/codel5.h
@@ -58,66 +58,13 @@
* Implemented on linux by Dave Taht and Eric Dumazet
*/
-/* Backport some stuff if needed.
- */
-#if KERNEL_VERSION(3, 14, 0) > LINUX_VERSION_CODE
-
-static inline u32 reciprocal_scale(u32 val, u32 ep_ro)
-{
- return (u32)(((u64) val * ep_ro) >> 32);
-}
-
-#endif
-
-#if KERNEL_VERSION(3, 15, 0) > LINUX_VERSION_CODE
-
-static inline void kvfree(const void *addr)
-{
- if (is_vmalloc_addr(addr))
- vfree(addr);
- else
- kfree(addr);
-}
-
-#endif
-
-#if KERNEL_VERSION(3, 17, 0) > LINUX_VERSION_CODE
-
-#define ktime_get_ns() ktime_to_ns(ktime_get())
-
-#endif
-
-#if KERNEL_VERSION(3, 18, 0) > LINUX_VERSION_CODE
-static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch,
- const struct sk_buff *skb)
-{
- sch->qstats.backlog -= qdisc_pkt_len(skb);
-}
-
-static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch,
- const struct sk_buff *skb)
-{
- sch->qstats.backlog += qdisc_pkt_len(skb);
-}
-
-static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count)
-{
- sch->qstats.drops += count;
-}
-
-static inline void qdisc_qstats_drop(struct Qdisc *sch)
-{
- sch->qstats.drops++;
-}
-
-#define codel_stats_copy_queue(a, b, c, d) gnet_stats_copy_queue(a, c)
-#define codel_watchdog_schedule_ns(a, b, c) qdisc_watchdog_schedule_ns(a, b)
+#if KERNEL_VERSION(4, 2, 0) > LINUX_VERSION_CODE
+#include "codel5_compat.h"
#else
#define codel_stats_copy_queue(a, b, c, d) gnet_stats_copy_queue(a, b, c, d)
#define codel_watchdog_schedule_ns(a, b, c) qdisc_watchdog_schedule_ns(a, b, c)
#endif
-
/* CoDel5 uses a real clock, unlike codel */
typedef u64 codel_time_t;
@@ -192,11 +139,13 @@ struct codel_vars {
u16 drop_count;
u16 ecn_mark;
};
+
/* sizeof_in_bits(rec_inv_sqrt) */
#define REC_INV_SQRT_BITS (8 * sizeof(u16))
/* needed shift to get a Q0.32 number from rec_inv_sqrt */
#define REC_INV_SQRT_SHIFT (32 - REC_INV_SQRT_BITS)
-#define REC_INV_SQRT_CACHE (16)
+
+#define REC_INV_SQRT_CACHE (16) // fixme cache line fix
/* Newton approximation method needs more iterations at small inputs,
* so cache them.
@@ -232,6 +181,7 @@ static void codel_Newton_step(struct codel_vars *vars)
}
}
+// FIXME for peel threshold
static void codel_cache_init(void)
{
struct codel_vars v;
@@ -239,7 +189,7 @@ static void codel_cache_init(void)
codel_vars_init(&v);
v.rec_inv_sqrt = ~0U >> REC_INV_SQRT_SHIFT;
codel_rec_inv_sqrt_cache[0] = v.rec_inv_sqrt;
-
+ //fixme for no cache
for (v.count = 1; v.count < REC_INV_SQRT_CACHE; v.count++) {
codel_Newton_step(&v);
codel_Newton_step(&v);
@@ -267,9 +217,7 @@ static codel_time_t codel_control_law(codel_time_t t,
static bool codel_should_drop(const struct sk_buff *skb,
struct Qdisc *sch,
struct codel_vars *vars,
- codel_time_t interval,
- codel_time_t target,
- codel_time_t threshold,
+ const struct codel_params *p,
codel_time_t now)
{
if (!skb) {
@@ -279,7 +227,7 @@ static bool codel_should_drop(const struct sk_buff *skb,
sch->qstats.backlog -= qdisc_pkt_len(skb);
- if (now - codel_get_enqueue_time(skb) < target ||
+ if (now - codel_get_enqueue_time(skb) < p->target ||
!sch->qstats.backlog) {
/* went below - stay below for at least interval */
vars->first_above_time = 0;
@@ -292,14 +240,14 @@ static bool codel_should_drop(const struct sk_buff *skb,
/* just went above from below; mark the time */
vars->first_above_time = now;
- } else if (vars->count > 1 && now - vars->drop_next < 8 * interval) {
+ } else if (vars->count > 1 && now - vars->drop_next < 8 * p->interval) {
/* we were recently dropping; be more aggressive */
return now > codel_control_law(
vars->first_above_time,
- interval,
+ p->interval,
vars->rec_inv_sqrt);
} else if (((now - vars->first_above_time) >> 15) *
- ((now - codel_get_enqueue_time(skb)) >> 15) > threshold) {
+ ((now - codel_get_enqueue_time(skb)) >> 15) > p->threshold) {
return true;
}
@@ -313,9 +261,7 @@ static inline struct sk_buff
*custom_dequeue(struct codel_vars *vars,
static struct sk_buff *codel_dequeue(struct Qdisc *sch,
struct codel_vars *vars,
- codel_time_t interval,
- codel_time_t target,
- codel_time_t threshold,
+ struct codel_params *p,
bool overloaded)
{
struct sk_buff *skb = custom_dequeue(vars, sch);
@@ -327,8 +273,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
return skb;
}
now = codel_get_time();
- drop = codel_should_drop(skb, sch, vars, interval, target, threshold,
- now);
+ drop = codel_should_drop(skb, sch, vars, p, now);
if (vars->dropping) {
if (!drop) {
/* sojourn time below target - leave dropping state */
@@ -350,14 +295,14 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
codel_Newton_step(vars);
vars->drop_next = codel_control_law(vars->drop_next,
- interval,
+ p->interval,
vars->rec_inv_sqrt);
do {
if (INET_ECN_set_ce(skb) && !overloaded) {
vars->ecn_mark++;
/* and schedule the next drop */
vars->drop_next = codel_control_law(
- vars->drop_next, interval,
+ vars->drop_next, p->interval,
vars->rec_inv_sqrt);
goto end;
}
@@ -365,16 +310,13 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
vars->drop_count++;
skb = custom_dequeue(vars, sch);
if (skb && !codel_should_drop(skb, sch, vars,
- interval,
- target,
- threshold,
- now)) {
+ p,now)) {
/* leave dropping state */
vars->dropping = false;
} else {
/* schedule the next drop */
vars->drop_next = codel_control_law(vars->drop_next,
- interval, vars->rec_inv_sqrt);
+ p->interval, vars->rec_inv_sqrt);
}
} while (skb && vars->dropping && now >=
vars->drop_next);
@@ -391,9 +333,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
vars->drop_count++;
skb = custom_dequeue(vars, sch);
- drop = codel_should_drop(skb, sch, vars,
- interval, target,
- threshold, now);
+ drop = codel_should_drop(skb, sch, vars, p, now);
if (skb && INET_ECN_set_ce(skb))
vars->ecn_mark++;
}
@@ -403,7 +343,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
* last cycle is a good starting point to control it now.
*/
if (vars->count > 2 &&
- now - vars->drop_next < 8 * interval) {
+ now - vars->drop_next < 8 * p->interval) {
/* when count is halved, time interval is
* multiplied by 1.414...
*/
@@ -415,7 +355,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
vars->rec_inv_sqrt = ~0U >> REC_INV_SQRT_SHIFT;
}
codel_Newton_step(vars);
- vars->drop_next = codel_control_law(now, interval,
+ vars->drop_next = codel_control_law(now, p->interval,
vars->rec_inv_sqrt);
}
end:
diff --git a/codel_compat.h b/codel_compat.h
new file mode 100644
index 0000000..56f83d9
--- /dev/null
+++ b/codel_compat.h
@@ -0,0 +1,55 @@
+/* Backport some stuff if needed.
+ */
+#if KERNEL_VERSION(3, 14, 0) > LINUX_VERSION_CODE
+
+static inline u32 reciprocal_scale(u32 val, u32 ep_ro)
+{
+ return (u32)(((u64) val * ep_ro) >> 32);
+}
+
+#endif
+
+#if KERNEL_VERSION(3, 15, 0) > LINUX_VERSION_CODE
+
+static inline void kvfree(const void *addr)
+{
+ if (is_vmalloc_addr(addr))
+ vfree(addr);
+ else
+ kfree(addr);
+}
+
+#endif
+
+#if KERNEL_VERSION(3, 17, 0) > LINUX_VERSION_CODE
+
+#define ktime_get_ns() ktime_to_ns(ktime_get())
+
+#endif
+
+#if KERNEL_VERSION(3, 18, 0) > LINUX_VERSION_CODE
+static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch,
+ const struct sk_buff *skb)
+{
+ sch->qstats.backlog -= qdisc_pkt_len(skb);
+}
+
+static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch,
+ const struct sk_buff *skb)
+{
+ sch->qstats.backlog += qdisc_pkt_len(skb);
+}
+
+static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count)
+{
+ sch->qstats.drops += count;
+}
+
+static inline void qdisc_qstats_drop(struct Qdisc *sch)
+{
+ sch->qstats.drops++;
+}
+
+#define codel_stats_copy_queue(a, b, c, d) gnet_stats_copy_queue(a, c)
+#define codel_watchdog_schedule_ns(a, b, c) qdisc_watchdog_schedule_ns(a, b)
+#endif
diff --git a/sch_cake.c b/sch_cake.c
index 0466ae4..84652cc 100644
--- a/sch_cake.c
+++ b/sch_cake.c
@@ -118,7 +118,7 @@ struct cake_flow {
struct sk_buff *head;
struct sk_buff *tail;
struct list_head flowchain;
- int deficit;
+ s32 deficit;
u32 dropped; /* Drops (or ECN marks) on this flow */
struct codel_vars cvars;
}; /* please try to keep this structure <= 64 bytes */
@@ -152,7 +152,7 @@ struct cake_tin_data {
u16 tin_quantum_prio;
u16 tin_quantum_band;
- int tin_deficit;
+ s32 tin_deficit;
u32 tin_backlog;
u32 tin_dropped;
u32 tin_ecn_mark;
@@ -185,7 +185,7 @@ struct cake_sched_data {
u32 rate_ns;
u32 rate_bps;
u16 rate_flags;
- short rate_overhead;
+ s16 rate_overhead;
u32 interval;
u32 target;
@@ -414,7 +414,7 @@ static inline u32 cake_overhead(struct
cake_sched_data *q, u32 in)
}
static inline codel_time_t cake_ewma(codel_time_t avg, codel_time_t sample,
- int shift)
+ u32 shift)
{
avg -= avg >> shift;
avg += sample >> shift;
@@ -494,7 +494,7 @@ static inline void cake_wash_diffserv(struct sk_buff *skb)
static inline unsigned int cake_handle_diffserv(struct sk_buff *skb, u16 wash)
{
- unsigned int dscp;
+ u32 dscp;
switch (skb->protocol) {
case htons(ETH_P_IP):
@@ -517,10 +517,10 @@ static inline unsigned int
cake_handle_diffserv(struct sk_buff *skb, u16 wash)
static void cake_reconfigure(struct Qdisc *sch);
-static int cake_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct cake_sched_data *q = qdisc_priv(sch);
- unsigned int idx, tin;
+ u32 idx, tin;
struct cake_tin_data *b;
struct cake_flow *flow;
u32 len = qdisc_pkt_len(skb);
@@ -563,6 +563,7 @@ static int cake_enqueue(struct sk_buff *skb,
struct Qdisc *sch)
* or if we need to know individual packet sizes for framing overhead.
*/
+ // FIXME: We always peel at unlimited speed
if (unlikely((len * max_t(u32, b->bulk_flow_count, 1U) >
q->peel_threshold && skb_is_gso(skb)))) {
struct sk_buff *segs, *nskb;
@@ -718,7 +719,7 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
u32 prev_drop_count, prev_ecn_mark;
u32 len;
u64 now = ktime_get_ns();
- int i;
+ s32 i;
codel_time_t delay;
begin:
@@ -779,8 +780,7 @@ retry:
prev_drop_count = flow->cvars.drop_count;
prev_ecn_mark = flow->cvars.ecn_mark;
- skb = codel_dequeue(sch, &flow->cvars, b->cparams.interval,
- b->cparams.target, b->cparams.threshold,
+ skb = codel_dequeue(sch, &flow->cvars, &b->cparams,
q->buffer_used >
(q->buffer_limit >> 2) + (q->buffer_limit >> 1));
@@ -838,7 +838,7 @@ retry:
static void cake_reset(struct Qdisc *sch)
{
- int c;
+ u32 c;
for (c = 0; c < CAKE_MAX_TINS; c++)
cake_clear_tin(sch, c);
@@ -900,7 +900,7 @@ static void cake_config_besteffort(struct Qdisc *sch)
struct cake_tin_data *b = &q->tins[0];
u64 rate = q->rate_bps;
u32 mtu = psched_mtu(qdisc_dev(sch));
- unsigned int i;
+ u32 i;
q->tin_cnt = 1;
==
Dave Täht
Let's go make home routers and wifi faster! With better software!
https://www.gofundme.com/savewifi
-------------- next part --------------
A non-text attachment was scrubbed...
Name: cake_ptr.patch
Type: text/x-patch
Size: 11461 bytes
Desc: not available
URL: <https://lists.bufferbloat.net/pipermail/cake/attachments/20151120/eca0c54b/attachment-0002.bin>
More information about the Cake
mailing list