2 * INETPEER - A storage for permanent information about peers
4 * Version: $Id: inetpeer.h,v 1.2 2002/01/12 07:54:56 davem Exp $
6 * Authors: Andrey V. Savochkin <saw@msu.ru>
9 #ifndef _NET_INETPEER_H
10 #define _NET_INETPEER_H
12 #include <linux/types.h>
13 #include <linux/init.h>
14 #include <linux/jiffies.h>
15 #include <linux/spinlock.h>
16 #include <asm/atomic.h>
20 struct inet_peer *avl_left, *avl_right;
21 struct inet_peer *unused_next, **unused_prevp;
22 unsigned long dtime; /* the time of last use of not
23 * referenced entries */
25 __u32 v4daddr; /* peer's address */
27 __u16 ip_id_count; /* IP ID for the next packet */
28 atomic_t rid; /* Frag reception counter */
30 unsigned long tcp_ts_stamp;
33 void inet_initpeers(void) __init;
35 /* can be called with or without local BH being disabled */
36 struct inet_peer *inet_getpeer(__u32 daddr, int create);
38 extern spinlock_t inet_peer_unused_lock;
39 extern struct inet_peer **inet_peer_unused_tailp;
40 /* can be called from BH context or outside */
41 static inline void inet_putpeer(struct inet_peer *p)
43 spin_lock_bh(&inet_peer_unused_lock);
44 if (atomic_dec_and_test(&p->refcnt)) {
45 p->unused_prevp = inet_peer_unused_tailp;
46 p->unused_next = NULL;
47 *inet_peer_unused_tailp = p;
48 inet_peer_unused_tailp = &p->unused_next;
51 spin_unlock_bh(&inet_peer_unused_lock);
54 extern spinlock_t inet_peer_idlock;
55 /* can be called with or without local BH being disabled */
56 static inline __u16 inet_getid(struct inet_peer *p, int more)
60 spin_lock_bh(&inet_peer_idlock);
62 p->ip_id_count += 1 + more;
63 spin_unlock_bh(&inet_peer_idlock);
67 #endif /* _NET_INETPEER_H */