dect
/
linux-2.6
Archived
13
0
Fork 0

[PKT_SCHED] netem: packet corruption option

Here is a new feature for netem in 2.6.16. It adds the ability to
randomly corrupt packets with netem. A version was done by
Hagen Paul Pfeifer, but I redid it to handle the cases of backwards
compatibility with netlink interface and presence of hardware checksum
offload. It is useful for testing hardware offload in devices.

Signed-off-by: Stephen Hemminger <shemminger@osdl.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Stephen Hemminger 2005-12-21 19:03:44 -08:00 committed by David S. Miller
parent 8cbb512e50
commit c865e5d99e
2 changed files with 53 additions and 3 deletions

View File

@ -429,6 +429,7 @@ enum
TCA_NETEM_CORR,
TCA_NETEM_DELAY_DIST,
TCA_NETEM_REORDER,
TCA_NETEM_CORRUPT,
__TCA_NETEM_MAX,
};
@ -457,6 +458,12 @@ struct tc_netem_reorder
__u32 correlation;
};
struct tc_netem_corrupt
{
__u32 probability;
__u32 correlation;
};
#define NETEM_DIST_SCALE 8192
#endif

View File

@ -25,7 +25,7 @@
#include <net/pkt_sched.h>
#define VERSION "1.1"
#define VERSION "1.2"
/* Network Emulation Queuing algorithm.
====================================
@ -65,11 +65,12 @@ struct netem_sched_data {
u32 jitter;
u32 duplicate;
u32 reorder;
u32 corrupt;
struct crndstate {
unsigned long last;
unsigned long rho;
} delay_cor, loss_cor, dup_cor, reorder_cor;
} delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
struct disttable {
u32 size;
@ -183,6 +184,23 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
q->duplicate = dupsave;
}
/*
* Randomized packet corruption.
* Make copy if needed since we are modifying
* If packet is going to be hardware checksummed, then
* do it now in software before we mangle it.
*/
if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
if (!(skb = skb_unshare(skb, GFP_ATOMIC))
|| (skb->ip_summed == CHECKSUM_HW
&& skb_checksum_help(skb, 0))) {
sch->qstats.drops++;
return NET_XMIT_DROP;
}
skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
}
if (q->gap == 0 /* not doing reordering */
|| q->counter < q->gap /* inside last reordering gap */
|| q->reorder < get_crandom(&q->reorder_cor)) {
@ -382,6 +400,20 @@ static int get_reorder(struct Qdisc *sch, const struct rtattr *attr)
return 0;
}
static int get_corrupt(struct Qdisc *sch, const struct rtattr *attr)
{
struct netem_sched_data *q = qdisc_priv(sch);
const struct tc_netem_corrupt *r = RTA_DATA(attr);
if (RTA_PAYLOAD(attr) != sizeof(*r))
return -EINVAL;
q->corrupt = r->probability;
init_crandom(&q->corrupt_cor, r->correlation);
return 0;
}
/* Parse netlink message to set options */
static int netem_change(struct Qdisc *sch, struct rtattr *opt)
{
struct netem_sched_data *q = qdisc_priv(sch);
@ -432,13 +464,19 @@ static int netem_change(struct Qdisc *sch, struct rtattr *opt)
if (ret)
return ret;
}
if (tb[TCA_NETEM_REORDER-1]) {
ret = get_reorder(sch, tb[TCA_NETEM_REORDER-1]);
if (ret)
return ret;
}
}
if (tb[TCA_NETEM_CORRUPT-1]) {
ret = get_corrupt(sch, tb[TCA_NETEM_CORRUPT-1]);
if (ret)
return ret;
}
}
return 0;
}
@ -564,6 +602,7 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
struct tc_netem_qopt qopt;
struct tc_netem_corr cor;
struct tc_netem_reorder reorder;
struct tc_netem_corrupt corrupt;
qopt.latency = q->latency;
qopt.jitter = q->jitter;
@ -582,6 +621,10 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
reorder.correlation = q->reorder_cor.rho;
RTA_PUT(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder);
corrupt.probability = q->corrupt;
corrupt.correlation = q->corrupt_cor.rho;
RTA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt);
rta->rta_len = skb->tail - b;
return skb->len;