--- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -371,16 +371,50 @@ static const u8 prio2band[TC_PRIO_MAX+1] #define PFIFO_FAST_BANDS 3 +struct pfifo_fast_sched_data { + struct tcf_proto *filter_list; + struct sk_buff_head list[PFIFO_FAST_BANDS]; +}; + static inline struct sk_buff_head *prio2list(struct sk_buff *skb, struct Qdisc *qdisc) { - struct sk_buff_head *list = qdisc_priv(qdisc); + struct pfifo_fast_sched_data *q = qdisc_priv(qdisc); + struct sk_buff_head *list = q->list; return list + prio2band[skb->priority & TC_PRIO_MAX]; } +static int pfifo_fast_filter(struct sk_buff *skb, struct Qdisc* qdisc) +{ +#ifdef CONFIG_NET_CLS_ACT + struct pfifo_fast_sched_data *q = qdisc_priv(qdisc); + int result = 0, ret = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; + struct tcf_result res; + + if (q->filter_list != NULL) + result = tc_classify(skb, q->filter_list, &res); + if (result >= 0) { + switch (result) { + case TC_ACT_STOLEN: + case TC_ACT_QUEUED: + ret = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; + case TC_ACT_SHOT: + kfree_skb(skb); + return ret; + } + } +#endif + return 0; +} + static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc) { struct sk_buff_head *list = prio2list(skb, qdisc); + int ret; + + ret = pfifo_fast_filter(skb, qdisc); + if (ret) + return ret; if (skb_queue_len(list) < qdisc_dev(qdisc)->tx_queue_len) { qdisc->q.qlen++; @@ -392,8 +426,9 @@ static int pfifo_fast_enqueue(struct sk_ static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc) { + struct pfifo_fast_sched_data *q = qdisc_priv(qdisc); + struct sk_buff_head *list = q->list; int prio; - struct sk_buff_head *list = qdisc_priv(qdisc); for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) { if (!skb_queue_empty(list + prio)) { @@ -420,8 +455,9 @@ static struct sk_buff *pfifo_fast_peek(s static void pfifo_fast_reset(struct Qdisc* qdisc) { + struct pfifo_fast_sched_data *q = qdisc_priv(qdisc); + struct sk_buff_head *list = q->list; int prio; - struct sk_buff_head *list = qdisc_priv(qdisc); for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) __qdisc_reset_queue(qdisc, list + prio); @@ -444,8 +480,9 @@ nla_put_failure: static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt) { + struct pfifo_fast_sched_data *q = qdisc_priv(qdisc); + struct sk_buff_head *list = q->list; int prio; - struct sk_buff_head *list = qdisc_priv(qdisc); for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) skb_queue_head_init(list + prio); @@ -453,9 +490,36 @@ static int pfifo_fast_init(struct Qdisc return 0; } +static int pfifo_fast_change_class(struct Qdisc *qdisc, u32 classid, u32 parentid, + struct nlattr **tca, unsigned long *arg) +{ + return -EOPNOTSUPP; +} + +static unsigned long pfifo_fast_get(struct Qdisc *qdisc, u32 classid) +{ + return 0; +} + +static struct tcf_proto **pfifo_fast_find_tcf(struct Qdisc *qdisc, unsigned long cl) +{ + struct pfifo_fast_sched_data *q = qdisc_priv(qdisc); + + if (cl) + return NULL; + return &q->filter_list; +} + +static const struct Qdisc_class_ops pfifo_fast_class_ops = { + .get = pfifo_fast_get, + .change = pfifo_fast_change_class, + .tcf_chain = pfifo_fast_find_tcf, +}; + static struct Qdisc_ops pfifo_fast_ops __read_mostly = { .id = "pfifo_fast", - .priv_size = PFIFO_FAST_BANDS * sizeof(struct sk_buff_head), + .cl_ops = &pfifo_fast_class_ops, + .priv_size = sizeof(struct pfifo_fast_sched_data), .enqueue = pfifo_fast_enqueue, .dequeue = pfifo_fast_dequeue, .peek = pfifo_fast_peek, @@ -735,3 +799,18 @@ void dev_shutdown(struct net_device *dev shutdown_scheduler_queue(dev, &dev->rx_queue, &noop_qdisc); WARN_ON(timer_pending(&dev->watchdog_timer)); } + +#ifdef CONFIG_NET_SCHED +static int __init sch_generic_init(void) +{ + return register_qdisc(&pfifo_fast_ops); +} + +static void __exit sch_generic_exit(void) +{ + unregister_qdisc(&pfifo_fast_ops); +} + +module_init(sch_generic_init) +module_exit(sch_generic_exit) +#endif