1 #ifndef __NET_PKT_ACT_H
2 #define __NET_PKT_ACT_H
4 #include <asm/uaccess.h>
5 #include <asm/system.h>
6 #include <linux/bitops.h>
7 #include <linux/config.h>
8 #include <linux/types.h>
9 #include <linux/kernel.h>
10 #include <linux/sched.h>
11 #include <linux/string.h>
13 #include <linux/socket.h>
14 #include <linux/sockios.h>
16 #include <linux/errno.h>
17 #include <linux/interrupt.h>
18 #include <linux/netdevice.h>
19 #include <linux/skbuff.h>
20 #include <linux/rtnetlink.h>
21 #include <linux/module.h>
22 #include <linux/init.h>
23 #include <linux/proc_fs.h>
25 #include <net/pkt_sched.h>
27 #define tca_st(val) (struct tcf_##val *)
28 #define PRIV(a,name) ( tca_st(name) (a)->priv)
31 #define DPRINTK(format,args...) printk(KERN_DEBUG format,##args)
33 #define DPRINTK(format,args...)
37 #define D2PRINTK(format,args...) printk(KERN_DEBUG format,##args)
39 #define D2PRINTK(format,args...)
42 static __inline__ unsigned
45 return index & MY_TAB_MASK;
48 /* probably move this from being inline
49 * and put into act_generic
52 tcf_hash_destroy(struct tcf_st *p)
54 unsigned h = tcf_hash(p->index);
57 for (p1p = &tcf_ht[h]; *p1p; p1p = &(*p1p)->next) {
59 write_lock_bh(&tcf_t_lock);
61 write_unlock_bh(&tcf_t_lock);
62 #ifdef CONFIG_NET_ESTIMATOR
63 gen_kill_estimator(&p->bstats, &p->rate_est);
73 tcf_hash_release(struct tcf_st *p, int bind )
81 if(p->bindcnt <=0 && p->refcnt <= 0) {
90 tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb,
94 int err =0, index = -1,i= 0, s_i = 0, n_i = 0;
97 read_lock(&tcf_t_lock);
101 for (i = 0; i < MY_TAB_SIZE; i++) {
102 p = tcf_ht[tcf_hash(i)];
104 for (; p; p = p->next) {
110 r = (struct rtattr*) skb->tail;
111 RTA_PUT(skb, a->order, 0, NULL);
112 err = tcf_action_dump_1(skb, a, 0, 0);
115 skb_trim(skb, (u8*)r - skb->data);
118 r->rta_len = skb->tail - (u8*)r;
120 if (n_i >= TCA_ACT_MAX_PRIO) {
126 read_unlock(&tcf_t_lock);
132 skb_trim(skb, (u8*)r - skb->data);
136 static __inline__ int
137 tcf_del_walker(struct sk_buff *skb, struct tc_action *a)
139 struct tcf_st *p, *s_p;
143 r = (struct rtattr*) skb->tail;
144 RTA_PUT(skb, a->order, 0, NULL);
145 RTA_PUT(skb, TCA_KIND, IFNAMSIZ, a->ops->kind);
146 for (i = 0; i < MY_TAB_SIZE; i++) {
147 p = tcf_ht[tcf_hash(i)];
151 if (ACT_P_DELETED == tcf_hash_release(p, 0)) {
152 module_put(a->ops->owner);
158 RTA_PUT(skb, TCA_FCNT, 4, &n_i);
159 r->rta_len = skb->tail - (u8*)r;
163 skb_trim(skb, (u8*)r - skb->data);
167 static __inline__ int
168 tcf_generic_walker(struct sk_buff *skb, struct netlink_callback *cb, int type,
171 if (type == RTM_DELACTION) {
172 return tcf_del_walker(skb,a);
173 } else if (type == RTM_GETACTION) {
174 return tcf_dump_walker(skb,cb,a);
176 printk("tcf_generic_walker: unknown action %d\n",type);
181 static __inline__ struct tcf_st *
182 tcf_hash_lookup(u32 index)
186 read_lock(&tcf_t_lock);
187 for (p = tcf_ht[tcf_hash(index)]; p; p = p->next) {
188 if (p->index == index)
191 read_unlock(&tcf_t_lock);
195 static __inline__ u32
196 tcf_hash_new_index(void)
201 } while (tcf_hash_lookup(idx_gen));
208 tcf_hash_search(struct tc_action *a, u32 index)
210 struct tcf_st *p = tcf_hash_lookup(index);
219 #ifdef CONFIG_NET_ACT_INIT
220 static inline struct tcf_st *
221 tcf_hash_check(u32 index, struct tc_action *a, int ovr, int bind)
223 struct tcf_st *p = NULL;
224 if (index && (p = tcf_hash_lookup(index)) != NULL) {
234 static inline struct tcf_st *
235 tcf_hash_create(u32 index, struct rtattr *est, struct tc_action *a, int size, int ovr, int bind)
237 struct tcf_st *p = NULL;
239 p = kmalloc(size, GFP_KERNEL);
250 spin_lock_init(&p->lock);
251 p->stats_lock = &p->lock;
252 p->index = index ? : tcf_hash_new_index();
253 p->tm.install = jiffies;
254 p->tm.lastuse = jiffies;
255 #ifdef CONFIG_NET_ESTIMATOR
257 gen_new_estimator(&p->bstats, &p->rate_est, p->stats_lock, est);
259 a->priv = (void *) p;
263 static inline void tcf_hash_insert(struct tcf_st *p)
265 unsigned h = tcf_hash(p->index);
267 write_lock_bh(&tcf_t_lock);
270 write_unlock_bh(&tcf_t_lock);