4 * An implementation of the DCCP protocol
5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
16 static u8 builtin_ccids[] = {
17 DCCPC_CCID2, /* CCID2 is supported by default */
18 #if defined(CONFIG_IP_DCCP_CCID3) || defined(CONFIG_IP_DCCP_CCID3_MODULE)
23 static struct ccid_operations *ccids[CCID_MAX];
24 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
25 static atomic_t ccids_lockct = ATOMIC_INIT(0);
26 static DEFINE_SPINLOCK(ccids_lock);
29 * The strategy is: modifications ccids vector are short, do not sleep and
30 * veeery rare, but read access should be free of any exclusive locks.
32 static void ccids_write_lock(void)
34 spin_lock(&ccids_lock);
35 while (atomic_read(&ccids_lockct) != 0) {
36 spin_unlock(&ccids_lock);
38 spin_lock(&ccids_lock);
42 static inline void ccids_write_unlock(void)
44 spin_unlock(&ccids_lock);
47 static inline void ccids_read_lock(void)
49 atomic_inc(&ccids_lockct);
50 smp_mb__after_atomic_inc();
51 spin_unlock_wait(&ccids_lock);
54 static inline void ccids_read_unlock(void)
56 atomic_dec(&ccids_lockct);
60 #define ccids_write_lock() do { } while(0)
61 #define ccids_write_unlock() do { } while(0)
62 #define ccids_read_lock() do { } while(0)
63 #define ccids_read_unlock() do { } while(0)
66 static struct kmem_cache *ccid_kmem_cache_create(int obj_size, const char *fmt,...)
68 struct kmem_cache *slab;
69 char slab_name_fmt[32], *slab_name;
73 vsnprintf(slab_name_fmt, sizeof(slab_name_fmt), fmt, args);
76 slab_name = kstrdup(slab_name_fmt, GFP_KERNEL);
77 if (slab_name == NULL)
79 slab = kmem_cache_create(slab_name, sizeof(struct ccid) + obj_size, 0,
80 SLAB_HWCACHE_ALIGN, NULL);
86 static void ccid_kmem_cache_destroy(struct kmem_cache *slab)
89 const char *name = kmem_cache_name(slab);
91 kmem_cache_destroy(slab);
96 /* check that up to @array_len members in @ccid_array are supported */
97 bool ccid_support_check(u8 const *ccid_array, u8 array_len)
101 for (i = 0, found = 0; i < array_len; i++, found = 0) {
102 for (j = 0; !found && j < ARRAY_SIZE(builtin_ccids); j++)
103 found = (ccid_array[i] == builtin_ccids[j]);
111 * ccid_get_builtin_ccids - Provide copy of `builtin' CCID array
112 * @ccid_array: pointer to copy into
113 * @array_len: value to return length into
114 * This function allocates memory - caller must see that it is freed after use.
116 int ccid_get_builtin_ccids(u8 **ccid_array, u8 *array_len)
118 *ccid_array = kmemdup(builtin_ccids, sizeof(builtin_ccids), gfp_any());
119 if (*ccid_array == NULL)
121 *array_len = ARRAY_SIZE(builtin_ccids);
125 int ccid_getsockopt_builtin_ccids(struct sock *sk, int len,
126 char __user *optval, int __user *optlen)
128 if (len < sizeof(builtin_ccids))
131 if (put_user(sizeof(builtin_ccids), optlen) ||
132 copy_to_user(optval, builtin_ccids, sizeof(builtin_ccids)))
137 int ccid_register(struct ccid_operations *ccid_ops)
141 ccid_ops->ccid_hc_rx_slab =
142 ccid_kmem_cache_create(ccid_ops->ccid_hc_rx_obj_size,
145 if (ccid_ops->ccid_hc_rx_slab == NULL)
148 ccid_ops->ccid_hc_tx_slab =
149 ccid_kmem_cache_create(ccid_ops->ccid_hc_tx_obj_size,
152 if (ccid_ops->ccid_hc_tx_slab == NULL)
153 goto out_free_rx_slab;
157 if (ccids[ccid_ops->ccid_id] == NULL) {
158 ccids[ccid_ops->ccid_id] = ccid_ops;
161 ccids_write_unlock();
163 goto out_free_tx_slab;
165 pr_info("CCID: Registered CCID %d (%s)\n",
166 ccid_ops->ccid_id, ccid_ops->ccid_name);
170 ccid_kmem_cache_destroy(ccid_ops->ccid_hc_tx_slab);
171 ccid_ops->ccid_hc_tx_slab = NULL;
174 ccid_kmem_cache_destroy(ccid_ops->ccid_hc_rx_slab);
175 ccid_ops->ccid_hc_rx_slab = NULL;
179 EXPORT_SYMBOL_GPL(ccid_register);
181 int ccid_unregister(struct ccid_operations *ccid_ops)
184 ccids[ccid_ops->ccid_id] = NULL;
185 ccids_write_unlock();
187 ccid_kmem_cache_destroy(ccid_ops->ccid_hc_tx_slab);
188 ccid_ops->ccid_hc_tx_slab = NULL;
189 ccid_kmem_cache_destroy(ccid_ops->ccid_hc_rx_slab);
190 ccid_ops->ccid_hc_rx_slab = NULL;
192 pr_info("CCID: Unregistered CCID %d (%s)\n",
193 ccid_ops->ccid_id, ccid_ops->ccid_name);
197 EXPORT_SYMBOL_GPL(ccid_unregister);
199 struct ccid *ccid_new(unsigned char id, struct sock *sk, int rx, gfp_t gfp)
201 struct ccid_operations *ccid_ops;
202 struct ccid *ccid = NULL;
205 #ifdef CONFIG_MODULES
206 if (ccids[id] == NULL) {
207 /* We only try to load if in process context */
209 if (gfp & GFP_ATOMIC)
211 request_module("net-dccp-ccid-%d", id);
215 ccid_ops = ccids[id];
216 if (ccid_ops == NULL)
219 if (!try_module_get(ccid_ops->ccid_owner))
224 ccid = kmem_cache_alloc(rx ? ccid_ops->ccid_hc_rx_slab :
225 ccid_ops->ccid_hc_tx_slab, gfp);
228 ccid->ccid_ops = ccid_ops;
230 memset(ccid + 1, 0, ccid_ops->ccid_hc_rx_obj_size);
231 if (ccid->ccid_ops->ccid_hc_rx_init != NULL &&
232 ccid->ccid_ops->ccid_hc_rx_init(ccid, sk) != 0)
235 memset(ccid + 1, 0, ccid_ops->ccid_hc_tx_obj_size);
236 if (ccid->ccid_ops->ccid_hc_tx_init != NULL &&
237 ccid->ccid_ops->ccid_hc_tx_init(ccid, sk) != 0)
246 kmem_cache_free(rx ? ccid_ops->ccid_hc_rx_slab :
247 ccid_ops->ccid_hc_tx_slab, ccid);
250 module_put(ccid_ops->ccid_owner);
254 EXPORT_SYMBOL_GPL(ccid_new);
256 static void ccid_delete(struct ccid *ccid, struct sock *sk, int rx)
258 struct ccid_operations *ccid_ops;
263 ccid_ops = ccid->ccid_ops;
265 if (ccid_ops->ccid_hc_rx_exit != NULL)
266 ccid_ops->ccid_hc_rx_exit(sk);
267 kmem_cache_free(ccid_ops->ccid_hc_rx_slab, ccid);
269 if (ccid_ops->ccid_hc_tx_exit != NULL)
270 ccid_ops->ccid_hc_tx_exit(sk);
271 kmem_cache_free(ccid_ops->ccid_hc_tx_slab, ccid);
274 if (ccids[ccid_ops->ccid_id] != NULL)
275 module_put(ccid_ops->ccid_owner);
279 void ccid_hc_rx_delete(struct ccid *ccid, struct sock *sk)
281 ccid_delete(ccid, sk, 1);
284 EXPORT_SYMBOL_GPL(ccid_hc_rx_delete);
286 void ccid_hc_tx_delete(struct ccid *ccid, struct sock *sk)
288 ccid_delete(ccid, sk, 0);
291 EXPORT_SYMBOL_GPL(ccid_hc_tx_delete);