[PATCH] Dereference in tokenring/olympic.c
[linux-2.6] / drivers / connector / cn_queue.c
1 /*
2  *      cn_queue.c
3  * 
4  * 2004-2005 Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
5  * All rights reserved.
6  * 
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
20  *
21  */
22
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/list.h>
26 #include <linux/workqueue.h>
27 #include <linux/spinlock.h>
28 #include <linux/slab.h>
29 #include <linux/skbuff.h>
30 #include <linux/suspend.h>
31 #include <linux/connector.h>
32 #include <linux/delay.h>
33
34 void cn_queue_wrapper(void *data)
35 {
36         struct cn_callback_data *d = data;
37
38         d->callback(d->callback_priv);
39
40         d->destruct_data(d->ddata);
41         d->ddata = NULL;
42
43         kfree(d->free);
44 }
45
46 static struct cn_callback_entry *cn_queue_alloc_callback_entry(char *name, struct cb_id *id, void (*callback)(void *))
47 {
48         struct cn_callback_entry *cbq;
49
50         cbq = kzalloc(sizeof(*cbq), GFP_KERNEL);
51         if (!cbq) {
52                 printk(KERN_ERR "Failed to create new callback queue.\n");
53                 return NULL;
54         }
55
56         snprintf(cbq->id.name, sizeof(cbq->id.name), "%s", name);
57         memcpy(&cbq->id.id, id, sizeof(struct cb_id));
58         cbq->data.callback = callback;
59         
60         INIT_WORK(&cbq->work, &cn_queue_wrapper, &cbq->data);
61         return cbq;
62 }
63
64 static void cn_queue_free_callback(struct cn_callback_entry *cbq)
65 {
66         cancel_delayed_work(&cbq->work);
67         flush_workqueue(cbq->pdev->cn_queue);
68
69         kfree(cbq);
70 }
71
72 int cn_cb_equal(struct cb_id *i1, struct cb_id *i2)
73 {
74         return ((i1->idx == i2->idx) && (i1->val == i2->val));
75 }
76
77 int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id, void (*callback)(void *))
78 {
79         struct cn_callback_entry *cbq, *__cbq;
80         int found = 0;
81
82         cbq = cn_queue_alloc_callback_entry(name, id, callback);
83         if (!cbq)
84                 return -ENOMEM;
85
86         atomic_inc(&dev->refcnt);
87         cbq->pdev = dev;
88
89         spin_lock_bh(&dev->queue_lock);
90         list_for_each_entry(__cbq, &dev->queue_list, callback_entry) {
91                 if (cn_cb_equal(&__cbq->id.id, id)) {
92                         found = 1;
93                         break;
94                 }
95         }
96         if (!found)
97                 list_add_tail(&cbq->callback_entry, &dev->queue_list);
98         spin_unlock_bh(&dev->queue_lock);
99
100         if (found) {
101                 atomic_dec(&dev->refcnt);
102                 cn_queue_free_callback(cbq);
103                 return -EINVAL;
104         }
105
106         cbq->nls = dev->nls;
107         cbq->seq = 0;
108         cbq->group = cbq->id.id.idx;
109
110         return 0;
111 }
112
113 void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id)
114 {
115         struct cn_callback_entry *cbq, *n;
116         int found = 0;
117
118         spin_lock_bh(&dev->queue_lock);
119         list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry) {
120                 if (cn_cb_equal(&cbq->id.id, id)) {
121                         list_del(&cbq->callback_entry);
122                         found = 1;
123                         break;
124                 }
125         }
126         spin_unlock_bh(&dev->queue_lock);
127
128         if (found) {
129                 cn_queue_free_callback(cbq);
130                 atomic_dec(&dev->refcnt);
131         }
132 }
133
134 struct cn_queue_dev *cn_queue_alloc_dev(char *name, struct sock *nls)
135 {
136         struct cn_queue_dev *dev;
137
138         dev = kzalloc(sizeof(*dev), GFP_KERNEL);
139         if (!dev)
140                 return NULL;
141
142         snprintf(dev->name, sizeof(dev->name), "%s", name);
143         atomic_set(&dev->refcnt, 0);
144         INIT_LIST_HEAD(&dev->queue_list);
145         spin_lock_init(&dev->queue_lock);
146
147         dev->nls = nls;
148         dev->netlink_groups = 0;
149
150         dev->cn_queue = create_workqueue(dev->name);
151         if (!dev->cn_queue) {
152                 kfree(dev);
153                 return NULL;
154         }
155
156         return dev;
157 }
158
159 void cn_queue_free_dev(struct cn_queue_dev *dev)
160 {
161         struct cn_callback_entry *cbq, *n;
162
163         flush_workqueue(dev->cn_queue);
164         destroy_workqueue(dev->cn_queue);
165
166         spin_lock_bh(&dev->queue_lock);
167         list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry)
168                 list_del(&cbq->callback_entry);
169         spin_unlock_bh(&dev->queue_lock);
170
171         while (atomic_read(&dev->refcnt)) {
172                 printk(KERN_INFO "Waiting for %s to become free: refcnt=%d.\n",
173                        dev->name, atomic_read(&dev->refcnt));
174                 msleep(1000);
175         }
176
177         kfree(dev);
178         dev = NULL;
179 }