2005-04-17 00:20:36 +02:00
|
|
|
/*
|
|
|
|
* net/sched/sch_gred.c Generic Random Early Detection queue.
|
|
|
|
*
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; either version
|
|
|
|
* 2 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* Authors: J Hadi Salim (hadi@cyberus.ca) 1998-2002
|
|
|
|
*
|
|
|
|
* 991129: - Bug fix with grio mode
|
|
|
|
* - a better sing. AvgQ mode with Grio(WRED)
|
|
|
|
* - A finer grained VQ dequeue based on sugestion
|
|
|
|
* from Ren Liu
|
|
|
|
* - More error checks
|
|
|
|
*
|
2005-11-05 21:14:25 +01:00
|
|
|
* For all the glorious comments look at include/net/red.h
|
2005-04-17 00:20:36 +02:00
|
|
|
*/
|
|
|
|
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 09:04:11 +01:00
|
|
|
#include <linux/slab.h>
|
2005-04-17 00:20:36 +02:00
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/skbuff.h>
|
|
|
|
#include <net/pkt_sched.h>
|
2005-11-05 21:14:16 +01:00
|
|
|
#include <net/red.h>
|
2005-04-17 00:20:36 +02:00
|
|
|
|
2005-11-05 21:14:15 +01:00
|
|
|
#define GRED_DEF_PRIO (MAX_DPs / 2)
|
2005-11-05 21:14:20 +01:00
|
|
|
#define GRED_VQ_MASK (MAX_DPs - 1)
|
2005-11-05 21:14:15 +01:00
|
|
|
|
2005-04-17 00:20:36 +02:00
|
|
|
struct gred_sched_data;
|
|
|
|
struct gred_sched;
|
|
|
|
|
2011-01-19 20:26:56 +01:00
|
|
|
struct gred_sched_data {
|
2005-04-17 00:20:36 +02:00
|
|
|
u32 limit; /* HARD maximal queue length */
|
|
|
|
u32 DP; /* the drop pramaters */
|
|
|
|
u32 bytesin; /* bytes seen on virtualQ so far*/
|
|
|
|
u32 packetsin; /* packets seen on virtualQ so far*/
|
|
|
|
u32 backlog; /* bytes on the virtualQ */
|
2005-11-05 21:14:25 +01:00
|
|
|
u8 prio; /* the prio of this vq */
|
2005-04-17 00:20:36 +02:00
|
|
|
|
2005-11-05 21:14:16 +01:00
|
|
|
struct red_parms parms;
|
|
|
|
struct red_stats stats;
|
2005-04-17 00:20:36 +02:00
|
|
|
};
|
|
|
|
|
2005-11-05 21:14:09 +01:00
|
|
|
enum {
|
|
|
|
GRED_WRED_MODE = 1,
|
2005-11-05 21:14:10 +01:00
|
|
|
GRED_RIO_MODE,
|
2005-11-05 21:14:09 +01:00
|
|
|
};
|
|
|
|
|
2011-01-19 20:26:56 +01:00
|
|
|
struct gred_sched {
|
2005-04-17 00:20:36 +02:00
|
|
|
struct gred_sched_data *tab[MAX_DPs];
|
2005-11-05 21:14:09 +01:00
|
|
|
unsigned long flags;
|
2005-11-05 21:14:27 +01:00
|
|
|
u32 red_flags;
|
2005-11-05 21:14:25 +01:00
|
|
|
u32 DPs;
|
|
|
|
u32 def;
|
2005-11-05 21:14:23 +01:00
|
|
|
struct red_parms wred_set;
|
2005-04-17 00:20:36 +02:00
|
|
|
};
|
|
|
|
|
2005-11-05 21:14:09 +01:00
|
|
|
static inline int gred_wred_mode(struct gred_sched *table)
|
|
|
|
{
|
|
|
|
return test_bit(GRED_WRED_MODE, &table->flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void gred_enable_wred_mode(struct gred_sched *table)
|
|
|
|
{
|
|
|
|
__set_bit(GRED_WRED_MODE, &table->flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void gred_disable_wred_mode(struct gred_sched *table)
|
|
|
|
{
|
|
|
|
__clear_bit(GRED_WRED_MODE, &table->flags);
|
|
|
|
}
|
|
|
|
|
2005-11-05 21:14:10 +01:00
|
|
|
static inline int gred_rio_mode(struct gred_sched *table)
|
|
|
|
{
|
|
|
|
return test_bit(GRED_RIO_MODE, &table->flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void gred_enable_rio_mode(struct gred_sched *table)
|
|
|
|
{
|
|
|
|
__set_bit(GRED_RIO_MODE, &table->flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void gred_disable_rio_mode(struct gred_sched *table)
|
|
|
|
{
|
|
|
|
__clear_bit(GRED_RIO_MODE, &table->flags);
|
|
|
|
}
|
|
|
|
|
2005-11-05 21:14:09 +01:00
|
|
|
static inline int gred_wred_mode_check(struct Qdisc *sch)
|
|
|
|
{
|
|
|
|
struct gred_sched *table = qdisc_priv(sch);
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* Really ugly O(n^2) but shouldn't be necessary too frequent. */
|
|
|
|
for (i = 0; i < table->DPs; i++) {
|
|
|
|
struct gred_sched_data *q = table->tab[i];
|
|
|
|
int n;
|
|
|
|
|
|
|
|
if (q == NULL)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
for (n = 0; n < table->DPs; n++)
|
|
|
|
if (table->tab[n] && table->tab[n] != q &&
|
|
|
|
table->tab[n]->prio == q->prio)
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-11-05 21:14:16 +01:00
|
|
|
static inline unsigned int gred_backlog(struct gred_sched *table,
|
|
|
|
struct gred_sched_data *q,
|
|
|
|
struct Qdisc *sch)
|
|
|
|
{
|
|
|
|
if (gred_wred_mode(table))
|
|
|
|
return sch->qstats.backlog;
|
|
|
|
else
|
|
|
|
return q->backlog;
|
|
|
|
}
|
|
|
|
|
2005-11-05 21:14:20 +01:00
|
|
|
static inline u16 tc_index_to_dp(struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
return skb->tc_index & GRED_VQ_MASK;
|
|
|
|
}
|
|
|
|
|
2005-11-05 21:14:23 +01:00
|
|
|
static inline void gred_load_wred_set(struct gred_sched *table,
|
|
|
|
struct gred_sched_data *q)
|
|
|
|
{
|
|
|
|
q->parms.qavg = table->wred_set.qavg;
|
|
|
|
q->parms.qidlestart = table->wred_set.qidlestart;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void gred_store_wred_set(struct gred_sched *table,
|
|
|
|
struct gred_sched_data *q)
|
|
|
|
{
|
|
|
|
table->wred_set.qavg = q->parms.qavg;
|
|
|
|
}
|
|
|
|
|
2005-11-05 21:14:27 +01:00
|
|
|
static inline int gred_use_ecn(struct gred_sched *t)
|
|
|
|
{
|
|
|
|
return t->red_flags & TC_RED_ECN;
|
|
|
|
}
|
|
|
|
|
2005-11-05 21:14:28 +01:00
|
|
|
static inline int gred_use_harddrop(struct gred_sched *t)
|
|
|
|
{
|
|
|
|
return t->red_flags & TC_RED_HARDDROP;
|
|
|
|
}
|
|
|
|
|
2011-01-19 20:26:56 +01:00
|
|
|
static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
2005-04-17 00:20:36 +02:00
|
|
|
{
|
2011-01-19 20:26:56 +01:00
|
|
|
struct gred_sched_data *q = NULL;
|
|
|
|
struct gred_sched *t = qdisc_priv(sch);
|
2005-11-05 21:14:16 +01:00
|
|
|
unsigned long qavg = 0;
|
2005-11-05 21:14:22 +01:00
|
|
|
u16 dp = tc_index_to_dp(skb);
|
2005-04-17 00:20:36 +02:00
|
|
|
|
2011-01-19 20:26:56 +01:00
|
|
|
if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
|
2005-11-05 21:14:21 +01:00
|
|
|
dp = t->def;
|
|
|
|
|
2011-01-19 20:26:56 +01:00
|
|
|
q = t->tab[dp];
|
|
|
|
if (!q) {
|
2005-11-05 21:14:21 +01:00
|
|
|
/* Pass through packets not assigned to a DP
|
|
|
|
* if no default DP has been configured. This
|
|
|
|
* allows for DP flows to be left untouched.
|
|
|
|
*/
|
2008-07-09 02:06:30 +02:00
|
|
|
if (skb_queue_len(&sch->q) < qdisc_dev(sch)->tx_queue_len)
|
2005-11-05 21:14:21 +01:00
|
|
|
return qdisc_enqueue_tail(skb, sch);
|
|
|
|
else
|
|
|
|
goto drop;
|
2005-04-17 00:20:36 +02:00
|
|
|
}
|
2005-11-05 21:14:21 +01:00
|
|
|
|
2005-04-17 00:20:36 +02:00
|
|
|
/* fix tc_index? --could be controvesial but needed for
|
|
|
|
requeueing */
|
2005-11-05 21:14:21 +01:00
|
|
|
skb->tc_index = (skb->tc_index & ~GRED_VQ_MASK) | dp;
|
2005-04-17 00:20:36 +02:00
|
|
|
}
|
|
|
|
|
2005-11-05 21:14:25 +01:00
|
|
|
/* sum up all the qaves of prios <= to ours to get the new qave */
|
2005-11-05 21:14:10 +01:00
|
|
|
if (!gred_wred_mode(t) && gred_rio_mode(t)) {
|
2005-11-05 21:14:25 +01:00
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < t->DPs; i++) {
|
|
|
|
if (t->tab[i] && t->tab[i]->prio < q->prio &&
|
2005-11-05 21:14:16 +01:00
|
|
|
!red_is_idling(&t->tab[i]->parms))
|
2011-01-19 20:26:56 +01:00
|
|
|
qavg += t->tab[i]->parms.qavg;
|
2005-04-17 00:20:36 +02:00
|
|
|
}
|
2005-11-05 21:14:25 +01:00
|
|
|
|
2005-04-17 00:20:36 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
q->packetsin++;
|
2008-07-20 09:08:27 +02:00
|
|
|
q->bytesin += qdisc_pkt_len(skb);
|
2005-04-17 00:20:36 +02:00
|
|
|
|
2005-11-05 21:14:25 +01:00
|
|
|
if (gred_wred_mode(t))
|
2005-11-05 21:14:23 +01:00
|
|
|
gred_load_wred_set(t, q);
|
2005-04-17 00:20:36 +02:00
|
|
|
|
2005-11-05 21:14:16 +01:00
|
|
|
q->parms.qavg = red_calc_qavg(&q->parms, gred_backlog(t, q, sch));
|
2005-04-17 00:20:36 +02:00
|
|
|
|
2005-11-05 21:14:16 +01:00
|
|
|
if (red_is_idling(&q->parms))
|
|
|
|
red_end_of_idle_period(&q->parms);
|
2005-04-17 00:20:36 +02:00
|
|
|
|
2005-11-05 21:14:09 +01:00
|
|
|
if (gred_wred_mode(t))
|
2005-11-05 21:14:23 +01:00
|
|
|
gred_store_wred_set(t, q);
|
2005-04-17 00:20:36 +02:00
|
|
|
|
2005-11-05 21:14:16 +01:00
|
|
|
switch (red_action(&q->parms, q->parms.qavg + qavg)) {
|
2011-01-19 20:26:56 +01:00
|
|
|
case RED_DONT_MARK:
|
|
|
|
break;
|
|
|
|
|
|
|
|
case RED_PROB_MARK:
|
|
|
|
sch->qstats.overlimits++;
|
|
|
|
if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) {
|
|
|
|
q->stats.prob_drop++;
|
|
|
|
goto congestion_drop;
|
|
|
|
}
|
|
|
|
|
|
|
|
q->stats.prob_mark++;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case RED_HARD_MARK:
|
|
|
|
sch->qstats.overlimits++;
|
|
|
|
if (gred_use_harddrop(t) || !gred_use_ecn(t) ||
|
|
|
|
!INET_ECN_set_ce(skb)) {
|
|
|
|
q->stats.forced_drop++;
|
|
|
|
goto congestion_drop;
|
|
|
|
}
|
|
|
|
q->stats.forced_mark++;
|
|
|
|
break;
|
2005-04-17 00:20:36 +02:00
|
|
|
}
|
2005-11-05 21:14:16 +01:00
|
|
|
|
2008-07-20 09:08:27 +02:00
|
|
|
if (q->backlog + qdisc_pkt_len(skb) <= q->limit) {
|
|
|
|
q->backlog += qdisc_pkt_len(skb);
|
2005-11-05 21:14:19 +01:00
|
|
|
return qdisc_enqueue_tail(skb, sch);
|
2005-04-17 00:20:36 +02:00
|
|
|
}
|
2005-11-05 21:14:16 +01:00
|
|
|
|
|
|
|
q->stats.pdrop++;
|
|
|
|
drop:
|
2005-11-05 21:14:19 +01:00
|
|
|
return qdisc_drop(skb, sch);
|
2005-11-05 21:14:18 +01:00
|
|
|
|
|
|
|
congestion_drop:
|
2005-11-05 21:14:19 +01:00
|
|
|
qdisc_drop(skb, sch);
|
2005-11-05 21:14:18 +01:00
|
|
|
return NET_XMIT_CN;
|
2005-04-17 00:20:36 +02:00
|
|
|
}
|
|
|
|
|
2011-01-19 20:26:56 +01:00
|
|
|
static struct sk_buff *gred_dequeue(struct Qdisc *sch)
|
2005-04-17 00:20:36 +02:00
|
|
|
{
|
|
|
|
struct sk_buff *skb;
|
2005-11-05 21:14:25 +01:00
|
|
|
struct gred_sched *t = qdisc_priv(sch);
|
2005-04-17 00:20:36 +02:00
|
|
|
|
2005-11-05 21:14:19 +01:00
|
|
|
skb = qdisc_dequeue_head(sch);
|
|
|
|
|
2005-04-17 00:20:36 +02:00
|
|
|
if (skb) {
|
2005-11-05 21:14:25 +01:00
|
|
|
struct gred_sched_data *q;
|
2005-11-05 21:14:21 +01:00
|
|
|
u16 dp = tc_index_to_dp(skb);
|
|
|
|
|
|
|
|
if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
|
|
|
|
if (net_ratelimit())
|
2011-01-19 20:26:56 +01:00
|
|
|
pr_warning("GRED: Unable to relocate VQ 0x%x "
|
|
|
|
"after dequeue, screwing up "
|
|
|
|
"backlog.\n", tc_index_to_dp(skb));
|
2005-11-05 21:14:21 +01:00
|
|
|
} else {
|
2008-07-20 09:08:27 +02:00
|
|
|
q->backlog -= qdisc_pkt_len(skb);
|
2005-11-05 21:14:21 +01:00
|
|
|
|
2005-11-05 21:14:09 +01:00
|
|
|
if (!q->backlog && !gred_wred_mode(t))
|
2005-11-05 21:14:16 +01:00
|
|
|
red_start_of_idle_period(&q->parms);
|
2005-04-17 00:20:36 +02:00
|
|
|
}
|
2005-11-05 21:14:21 +01:00
|
|
|
|
2005-04-17 00:20:36 +02:00
|
|
|
return skb;
|
|
|
|
}
|
|
|
|
|
2005-11-05 21:14:26 +01:00
|
|
|
if (gred_wred_mode(t) && !red_is_idling(&t->wred_set))
|
2005-11-05 21:14:23 +01:00
|
|
|
red_start_of_idle_period(&t->wred_set);
|
2005-04-17 00:20:36 +02:00
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2011-01-19 20:26:56 +01:00
|
|
|
static unsigned int gred_drop(struct Qdisc *sch)
|
2005-04-17 00:20:36 +02:00
|
|
|
{
|
|
|
|
struct sk_buff *skb;
|
2005-11-05 21:14:25 +01:00
|
|
|
struct gred_sched *t = qdisc_priv(sch);
|
2005-04-17 00:20:36 +02:00
|
|
|
|
2005-11-05 21:14:19 +01:00
|
|
|
skb = qdisc_dequeue_tail(sch);
|
2005-04-17 00:20:36 +02:00
|
|
|
if (skb) {
|
2008-07-20 09:08:27 +02:00
|
|
|
unsigned int len = qdisc_pkt_len(skb);
|
2005-11-05 21:14:25 +01:00
|
|
|
struct gred_sched_data *q;
|
2005-11-05 21:14:21 +01:00
|
|
|
u16 dp = tc_index_to_dp(skb);
|
|
|
|
|
|
|
|
if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
|
|
|
|
if (net_ratelimit())
|
2011-01-19 20:26:56 +01:00
|
|
|
pr_warning("GRED: Unable to relocate VQ 0x%x "
|
|
|
|
"while dropping, screwing up "
|
|
|
|
"backlog.\n", tc_index_to_dp(skb));
|
2005-11-05 21:14:21 +01:00
|
|
|
} else {
|
2005-04-17 00:20:36 +02:00
|
|
|
q->backlog -= len;
|
2005-11-05 21:14:16 +01:00
|
|
|
q->stats.other++;
|
2005-11-05 21:14:21 +01:00
|
|
|
|
2005-11-05 21:14:09 +01:00
|
|
|
if (!q->backlog && !gred_wred_mode(t))
|
2005-11-05 21:14:16 +01:00
|
|
|
red_start_of_idle_period(&q->parms);
|
2005-04-17 00:20:36 +02:00
|
|
|
}
|
|
|
|
|
2005-11-05 21:14:19 +01:00
|
|
|
qdisc_drop(skb, sch);
|
2005-04-17 00:20:36 +02:00
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
2005-11-05 21:14:26 +01:00
|
|
|
if (gred_wred_mode(t) && !red_is_idling(&t->wred_set))
|
2005-11-05 21:14:23 +01:00
|
|
|
red_start_of_idle_period(&t->wred_set);
|
2005-04-17 00:20:36 +02:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2011-01-19 20:26:56 +01:00
|
|
|
static void gred_reset(struct Qdisc *sch)
|
2005-04-17 00:20:36 +02:00
|
|
|
{
|
|
|
|
int i;
|
2005-11-05 21:14:25 +01:00
|
|
|
struct gred_sched *t = qdisc_priv(sch);
|
2005-04-17 00:20:36 +02:00
|
|
|
|
2005-11-05 21:14:19 +01:00
|
|
|
qdisc_reset_queue(sch);
|
2005-04-17 00:20:36 +02:00
|
|
|
|
2007-02-09 15:25:16 +01:00
|
|
|
for (i = 0; i < t->DPs; i++) {
|
2005-11-05 21:14:25 +01:00
|
|
|
struct gred_sched_data *q = t->tab[i];
|
|
|
|
|
|
|
|
if (!q)
|
|
|
|
continue;
|
|
|
|
|
2005-11-05 21:14:16 +01:00
|
|
|
red_restart(&q->parms);
|
2005-04-17 00:20:36 +02:00
|
|
|
q->backlog = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-11-05 21:14:13 +01:00
|
|
|
static inline void gred_destroy_vq(struct gred_sched_data *q)
|
|
|
|
{
|
|
|
|
kfree(q);
|
|
|
|
}
|
|
|
|
|
2008-01-23 07:11:17 +01:00
|
|
|
static inline int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps)
|
2005-04-17 00:20:36 +02:00
|
|
|
{
|
|
|
|
struct gred_sched *table = qdisc_priv(sch);
|
|
|
|
struct tc_gred_sopt *sopt;
|
2005-11-05 21:14:13 +01:00
|
|
|
int i;
|
2005-04-17 00:20:36 +02:00
|
|
|
|
2008-01-24 05:35:39 +01:00
|
|
|
if (dps == NULL)
|
2005-04-17 00:20:36 +02:00
|
|
|
return -EINVAL;
|
|
|
|
|
2008-01-23 07:11:17 +01:00
|
|
|
sopt = nla_data(dps);
|
2005-04-17 00:20:36 +02:00
|
|
|
|
2005-11-05 21:14:13 +01:00
|
|
|
if (sopt->DPs > MAX_DPs || sopt->DPs == 0 || sopt->def_DP >= sopt->DPs)
|
|
|
|
return -EINVAL;
|
2005-04-17 00:20:36 +02:00
|
|
|
|
2005-11-05 21:14:13 +01:00
|
|
|
sch_tree_lock(sch);
|
|
|
|
table->DPs = sopt->DPs;
|
|
|
|
table->def = sopt->def_DP;
|
2005-11-05 21:14:27 +01:00
|
|
|
table->red_flags = sopt->flags;
|
2005-11-05 21:14:09 +01:00
|
|
|
|
2005-11-05 21:14:13 +01:00
|
|
|
/*
|
|
|
|
* Every entry point to GRED is synchronized with the above code
|
|
|
|
* and the DP is checked against DPs, i.e. shadowed VQs can no
|
|
|
|
* longer be found so we can unlock right here.
|
|
|
|
*/
|
|
|
|
sch_tree_unlock(sch);
|
2005-11-05 21:14:09 +01:00
|
|
|
|
2005-11-05 21:14:13 +01:00
|
|
|
if (sopt->grio) {
|
|
|
|
gred_enable_rio_mode(table);
|
|
|
|
gred_disable_wred_mode(table);
|
|
|
|
if (gred_wred_mode_check(sch))
|
|
|
|
gred_enable_wred_mode(table);
|
|
|
|
} else {
|
|
|
|
gred_disable_rio_mode(table);
|
|
|
|
gred_disable_wred_mode(table);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = table->DPs; i < MAX_DPs; i++) {
|
|
|
|
if (table->tab[i]) {
|
2011-01-19 20:26:56 +01:00
|
|
|
pr_warning("GRED: Warning: Destroying "
|
|
|
|
"shadowed VQ 0x%x\n", i);
|
2005-11-05 21:14:13 +01:00
|
|
|
gred_destroy_vq(table->tab[i]);
|
|
|
|
table->tab[i] = NULL;
|
2007-02-09 15:25:16 +01:00
|
|
|
}
|
2005-11-05 21:14:13 +01:00
|
|
|
}
|
2005-04-17 00:20:36 +02:00
|
|
|
|
2005-11-05 21:14:13 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-11-05 21:14:15 +01:00
|
|
|
static inline int gred_change_vq(struct Qdisc *sch, int dp,
|
|
|
|
struct tc_gred_qopt *ctl, int prio, u8 *stab)
|
2005-11-05 21:14:13 +01:00
|
|
|
{
|
|
|
|
struct gred_sched *table = qdisc_priv(sch);
|
|
|
|
struct gred_sched_data *q;
|
2005-11-05 21:14:15 +01:00
|
|
|
|
|
|
|
if (table->tab[dp] == NULL) {
|
2006-07-21 23:51:30 +02:00
|
|
|
table->tab[dp] = kzalloc(sizeof(*q), GFP_KERNEL);
|
2005-11-05 21:14:15 +01:00
|
|
|
if (table->tab[dp] == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
q = table->tab[dp];
|
|
|
|
q->DP = dp;
|
|
|
|
q->prio = prio;
|
|
|
|
q->limit = ctl->limit;
|
2005-11-05 21:14:16 +01:00
|
|
|
|
|
|
|
if (q->backlog == 0)
|
|
|
|
red_end_of_idle_period(&q->parms);
|
|
|
|
|
|
|
|
red_set_parms(&q->parms,
|
|
|
|
ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog,
|
|
|
|
ctl->Scell_log, stab);
|
|
|
|
|
2005-11-05 21:14:15 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-01-24 05:35:39 +01:00
|
|
|
static const struct nla_policy gred_policy[TCA_GRED_MAX + 1] = {
|
|
|
|
[TCA_GRED_PARMS] = { .len = sizeof(struct tc_gred_qopt) },
|
|
|
|
[TCA_GRED_STAB] = { .len = 256 },
|
|
|
|
[TCA_GRED_DPS] = { .len = sizeof(struct tc_gred_sopt) },
|
|
|
|
};
|
|
|
|
|
2008-01-23 07:11:17 +01:00
|
|
|
static int gred_change(struct Qdisc *sch, struct nlattr *opt)
|
2005-11-05 21:14:15 +01:00
|
|
|
{
|
|
|
|
struct gred_sched *table = qdisc_priv(sch);
|
2005-11-05 21:14:13 +01:00
|
|
|
struct tc_gred_qopt *ctl;
|
2008-01-23 07:11:17 +01:00
|
|
|
struct nlattr *tb[TCA_GRED_MAX + 1];
|
2008-01-24 05:33:32 +01:00
|
|
|
int err, prio = GRED_DEF_PRIO;
|
2005-11-05 21:14:15 +01:00
|
|
|
u8 *stab;
|
2005-11-05 21:14:13 +01:00
|
|
|
|
2008-01-24 05:33:32 +01:00
|
|
|
if (opt == NULL)
|
2005-11-05 21:14:13 +01:00
|
|
|
return -EINVAL;
|
|
|
|
|
2008-01-24 05:35:39 +01:00
|
|
|
err = nla_parse_nested(tb, TCA_GRED_MAX, opt, gred_policy);
|
2008-01-24 05:33:32 +01:00
|
|
|
if (err < 0)
|
|
|
|
return err;
|
|
|
|
|
2008-01-23 07:11:17 +01:00
|
|
|
if (tb[TCA_GRED_PARMS] == NULL && tb[TCA_GRED_STAB] == NULL)
|
2005-11-05 21:14:15 +01:00
|
|
|
return gred_change_table_def(sch, opt);
|
2005-04-17 00:20:36 +02:00
|
|
|
|
2008-01-23 07:11:17 +01:00
|
|
|
if (tb[TCA_GRED_PARMS] == NULL ||
|
2008-01-24 05:35:39 +01:00
|
|
|
tb[TCA_GRED_STAB] == NULL)
|
2005-11-05 21:14:15 +01:00
|
|
|
return -EINVAL;
|
2005-04-17 00:20:36 +02:00
|
|
|
|
2008-01-24 05:33:32 +01:00
|
|
|
err = -EINVAL;
|
2008-01-23 07:11:17 +01:00
|
|
|
ctl = nla_data(tb[TCA_GRED_PARMS]);
|
|
|
|
stab = nla_data(tb[TCA_GRED_STAB]);
|
2005-11-05 21:14:14 +01:00
|
|
|
|
|
|
|
if (ctl->DP >= table->DPs)
|
2005-11-05 21:14:15 +01:00
|
|
|
goto errout;
|
2005-04-17 00:20:36 +02:00
|
|
|
|
2005-11-05 21:14:10 +01:00
|
|
|
if (gred_rio_mode(table)) {
|
2005-11-05 21:14:15 +01:00
|
|
|
if (ctl->prio == 0) {
|
|
|
|
int def_prio = GRED_DEF_PRIO;
|
|
|
|
|
|
|
|
if (table->tab[table->def])
|
|
|
|
def_prio = table->tab[table->def]->prio;
|
|
|
|
|
|
|
|
printk(KERN_DEBUG "GRED: DP %u does not have a prio "
|
|
|
|
"setting default to %d\n", ctl->DP, def_prio);
|
|
|
|
|
|
|
|
prio = def_prio;
|
|
|
|
} else
|
|
|
|
prio = ctl->prio;
|
2005-04-17 00:20:36 +02:00
|
|
|
}
|
|
|
|
|
2005-11-05 21:14:15 +01:00
|
|
|
sch_tree_lock(sch);
|
2005-04-17 00:20:36 +02:00
|
|
|
|
2005-11-05 21:14:15 +01:00
|
|
|
err = gred_change_vq(sch, ctl->DP, ctl, prio, stab);
|
|
|
|
if (err < 0)
|
|
|
|
goto errout_locked;
|
2005-04-17 00:20:36 +02:00
|
|
|
|
2005-11-05 21:14:10 +01:00
|
|
|
if (gred_rio_mode(table)) {
|
2005-11-05 21:14:09 +01:00
|
|
|
gred_disable_wred_mode(table);
|
|
|
|
if (gred_wred_mode_check(sch))
|
|
|
|
gred_enable_wred_mode(table);
|
2005-04-17 00:20:36 +02:00
|
|
|
}
|
|
|
|
|
2005-11-05 21:14:15 +01:00
|
|
|
err = 0;
|
2005-04-17 00:20:36 +02:00
|
|
|
|
2005-11-05 21:14:15 +01:00
|
|
|
errout_locked:
|
|
|
|
sch_tree_unlock(sch);
|
|
|
|
errout:
|
|
|
|
return err;
|
2005-04-17 00:20:36 +02:00
|
|
|
}
|
|
|
|
|
2008-01-23 07:11:17 +01:00
|
|
|
static int gred_init(struct Qdisc *sch, struct nlattr *opt)
|
2005-04-17 00:20:36 +02:00
|
|
|
{
|
2008-01-23 07:11:17 +01:00
|
|
|
struct nlattr *tb[TCA_GRED_MAX + 1];
|
2008-01-24 05:33:32 +01:00
|
|
|
int err;
|
2005-04-17 00:20:36 +02:00
|
|
|
|
2008-01-24 05:33:32 +01:00
|
|
|
if (opt == NULL)
|
2005-04-17 00:20:36 +02:00
|
|
|
return -EINVAL;
|
|
|
|
|
2008-01-24 05:35:39 +01:00
|
|
|
err = nla_parse_nested(tb, TCA_GRED_MAX, opt, gred_policy);
|
2008-01-24 05:33:32 +01:00
|
|
|
if (err < 0)
|
|
|
|
return err;
|
|
|
|
|
2008-01-23 07:11:17 +01:00
|
|
|
if (tb[TCA_GRED_PARMS] || tb[TCA_GRED_STAB])
|
2005-11-05 21:14:13 +01:00
|
|
|
return -EINVAL;
|
2005-04-17 00:20:36 +02:00
|
|
|
|
2008-01-23 07:11:17 +01:00
|
|
|
return gred_change_table_def(sch, tb[TCA_GRED_DPS]);
|
2005-04-17 00:20:36 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
struct gred_sched *table = qdisc_priv(sch);
|
2008-01-23 07:11:17 +01:00
|
|
|
struct nlattr *parms, *opts = NULL;
|
2005-04-17 00:20:36 +02:00
|
|
|
int i;
|
2005-11-05 21:14:12 +01:00
|
|
|
struct tc_gred_sopt sopt = {
|
|
|
|
.DPs = table->DPs,
|
|
|
|
.def_DP = table->def,
|
|
|
|
.grio = gred_rio_mode(table),
|
2005-11-05 21:14:27 +01:00
|
|
|
.flags = table->red_flags,
|
2005-11-05 21:14:12 +01:00
|
|
|
};
|
2005-04-17 00:20:36 +02:00
|
|
|
|
2008-01-23 07:11:17 +01:00
|
|
|
opts = nla_nest_start(skb, TCA_OPTIONS);
|
|
|
|
if (opts == NULL)
|
|
|
|
goto nla_put_failure;
|
|
|
|
NLA_PUT(skb, TCA_GRED_DPS, sizeof(sopt), &sopt);
|
|
|
|
parms = nla_nest_start(skb, TCA_GRED_PARMS);
|
|
|
|
if (parms == NULL)
|
|
|
|
goto nla_put_failure;
|
2005-04-17 00:20:36 +02:00
|
|
|
|
2005-11-05 21:14:11 +01:00
|
|
|
for (i = 0; i < MAX_DPs; i++) {
|
|
|
|
struct gred_sched_data *q = table->tab[i];
|
|
|
|
struct tc_gred_qopt opt;
|
2005-04-17 00:20:36 +02:00
|
|
|
|
2005-11-05 21:14:11 +01:00
|
|
|
memset(&opt, 0, sizeof(opt));
|
2005-04-17 00:20:36 +02:00
|
|
|
|
|
|
|
if (!q) {
|
|
|
|
/* hack -- fix at some point with proper message
|
|
|
|
This is how we indicate to tc that there is no VQ
|
|
|
|
at this DP */
|
|
|
|
|
2005-11-05 21:14:11 +01:00
|
|
|
opt.DP = MAX_DPs + i;
|
|
|
|
goto append_opt;
|
2005-04-17 00:20:36 +02:00
|
|
|
}
|
|
|
|
|
2005-11-05 21:14:11 +01:00
|
|
|
opt.limit = q->limit;
|
|
|
|
opt.DP = q->DP;
|
|
|
|
opt.backlog = q->backlog;
|
|
|
|
opt.prio = q->prio;
|
2005-11-05 21:14:16 +01:00
|
|
|
opt.qth_min = q->parms.qth_min >> q->parms.Wlog;
|
|
|
|
opt.qth_max = q->parms.qth_max >> q->parms.Wlog;
|
|
|
|
opt.Wlog = q->parms.Wlog;
|
|
|
|
opt.Plog = q->parms.Plog;
|
|
|
|
opt.Scell_log = q->parms.Scell_log;
|
|
|
|
opt.other = q->stats.other;
|
|
|
|
opt.early = q->stats.prob_drop;
|
|
|
|
opt.forced = q->stats.forced_drop;
|
|
|
|
opt.pdrop = q->stats.pdrop;
|
2005-11-05 21:14:11 +01:00
|
|
|
opt.packets = q->packetsin;
|
|
|
|
opt.bytesin = q->bytesin;
|
|
|
|
|
2005-11-05 21:14:16 +01:00
|
|
|
if (gred_wred_mode(table)) {
|
|
|
|
q->parms.qidlestart =
|
|
|
|
table->tab[table->def]->parms.qidlestart;
|
|
|
|
q->parms.qavg = table->tab[table->def]->parms.qavg;
|
2005-04-17 00:20:36 +02:00
|
|
|
}
|
2005-11-05 21:14:11 +01:00
|
|
|
|
2005-11-05 21:14:16 +01:00
|
|
|
opt.qave = red_calc_qavg(&q->parms, q->parms.qavg);
|
|
|
|
|
2005-11-05 21:14:11 +01:00
|
|
|
append_opt:
|
2008-01-23 07:11:17 +01:00
|
|
|
if (nla_append(skb, sizeof(opt), &opt) < 0)
|
|
|
|
goto nla_put_failure;
|
2005-04-17 00:20:36 +02:00
|
|
|
}
|
|
|
|
|
2008-01-23 07:11:17 +01:00
|
|
|
nla_nest_end(skb, parms);
|
2005-04-17 00:20:36 +02:00
|
|
|
|
2008-01-23 07:11:17 +01:00
|
|
|
return nla_nest_end(skb, opts);
|
2005-04-17 00:20:36 +02:00
|
|
|
|
2008-01-23 07:11:17 +01:00
|
|
|
nla_put_failure:
|
2008-06-04 01:36:54 +02:00
|
|
|
nla_nest_cancel(skb, opts);
|
|
|
|
return -EMSGSIZE;
|
2005-04-17 00:20:36 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void gred_destroy(struct Qdisc *sch)
|
|
|
|
{
|
|
|
|
struct gred_sched *table = qdisc_priv(sch);
|
|
|
|
int i;
|
|
|
|
|
2005-11-05 21:14:25 +01:00
|
|
|
for (i = 0; i < table->DPs; i++) {
|
2005-04-17 00:20:36 +02:00
|
|
|
if (table->tab[i])
|
2005-11-05 21:14:13 +01:00
|
|
|
gred_destroy_vq(table->tab[i]);
|
2005-04-17 00:20:36 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-11-14 10:44:41 +01:00
|
|
|
static struct Qdisc_ops gred_qdisc_ops __read_mostly = {
|
2005-04-17 00:20:36 +02:00
|
|
|
.id = "gred",
|
|
|
|
.priv_size = sizeof(struct gred_sched),
|
|
|
|
.enqueue = gred_enqueue,
|
|
|
|
.dequeue = gred_dequeue,
|
2008-10-31 08:45:55 +01:00
|
|
|
.peek = qdisc_peek_head,
|
2005-04-17 00:20:36 +02:00
|
|
|
.drop = gred_drop,
|
|
|
|
.init = gred_init,
|
|
|
|
.reset = gred_reset,
|
|
|
|
.destroy = gred_destroy,
|
|
|
|
.change = gred_change,
|
|
|
|
.dump = gred_dump,
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int __init gred_module_init(void)
|
|
|
|
{
|
|
|
|
return register_qdisc(&gred_qdisc_ops);
|
|
|
|
}
|
2005-11-05 21:14:25 +01:00
|
|
|
|
|
|
|
static void __exit gred_module_exit(void)
|
2005-04-17 00:20:36 +02:00
|
|
|
{
|
|
|
|
unregister_qdisc(&gred_qdisc_ops);
|
|
|
|
}
|
2005-11-05 21:14:25 +01:00
|
|
|
|
2005-04-17 00:20:36 +02:00
|
|
|
module_init(gred_module_init)
|
|
|
|
module_exit(gred_module_exit)
|
2005-11-05 21:14:25 +01:00
|
|
|
|
2005-04-17 00:20:36 +02:00
|
|
|
MODULE_LICENSE("GPL");
|