1 /* krxiod.c: Rx I/O daemon
3 * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/sched.h>
13 #include <linux/completion.h>
14 #include <linux/spinlock.h>
15 #include <linux/init.h>
16 #include <linux/freezer.h>
17 #include <rxrpc/krxiod.h>
18 #include <rxrpc/transport.h>
19 #include <rxrpc/peer.h>
20 #include <rxrpc/call.h>
23 static DECLARE_WAIT_QUEUE_HEAD(rxrpc_krxiod_sleepq);
24 static DECLARE_COMPLETION(rxrpc_krxiod_dead);
26 static atomic_t rxrpc_krxiod_qcount = ATOMIC_INIT(0);
28 static LIST_HEAD(rxrpc_krxiod_transportq);
29 static DEFINE_SPINLOCK(rxrpc_krxiod_transportq_lock);
31 static LIST_HEAD(rxrpc_krxiod_callq);
32 static DEFINE_SPINLOCK(rxrpc_krxiod_callq_lock);
34 static volatile int rxrpc_krxiod_die;
36 /*****************************************************************************/
40 static int rxrpc_krxiod(void *arg)
42 DECLARE_WAITQUEUE(krxiod,current);
44 printk("Started krxiod %d\n",current->pid);
48 /* loop around waiting for work to do */
50 /* wait for work or to be told to exit */
51 _debug("### Begin Wait");
52 if (!atomic_read(&rxrpc_krxiod_qcount)) {
53 set_current_state(TASK_INTERRUPTIBLE);
55 add_wait_queue(&rxrpc_krxiod_sleepq, &krxiod);
58 set_current_state(TASK_INTERRUPTIBLE);
59 if (atomic_read(&rxrpc_krxiod_qcount) ||
61 signal_pending(current))
67 remove_wait_queue(&rxrpc_krxiod_sleepq, &krxiod);
68 set_current_state(TASK_RUNNING);
70 _debug("### End Wait");
72 /* do work if been given some to do */
73 _debug("### Begin Work");
75 /* see if there's a transport in need of attention */
76 if (!list_empty(&rxrpc_krxiod_transportq)) {
77 struct rxrpc_transport *trans = NULL;
79 spin_lock_irq(&rxrpc_krxiod_transportq_lock);
81 if (!list_empty(&rxrpc_krxiod_transportq)) {
83 rxrpc_krxiod_transportq.next,
84 struct rxrpc_transport,
87 list_del_init(&trans->krxiodq_link);
88 atomic_dec(&rxrpc_krxiod_qcount);
90 /* make sure it hasn't gone away and doesn't go
92 if (atomic_read(&trans->usage)>0)
93 rxrpc_get_transport(trans);
98 spin_unlock_irq(&rxrpc_krxiod_transportq_lock);
101 rxrpc_trans_receive_packet(trans);
102 rxrpc_put_transport(trans);
106 /* see if there's a call in need of attention */
107 if (!list_empty(&rxrpc_krxiod_callq)) {
108 struct rxrpc_call *call = NULL;
110 spin_lock_irq(&rxrpc_krxiod_callq_lock);
112 if (!list_empty(&rxrpc_krxiod_callq)) {
113 call = list_entry(rxrpc_krxiod_callq.next,
116 list_del_init(&call->rcv_krxiodq_lk);
117 atomic_dec(&rxrpc_krxiod_qcount);
119 /* make sure it hasn't gone away and doesn't go
121 if (atomic_read(&call->usage) > 0) {
123 " Begin Attend Call %p", call);
124 rxrpc_get_call(call);
131 spin_unlock_irq(&rxrpc_krxiod_callq_lock);
134 rxrpc_call_do_stuff(call);
135 rxrpc_put_call(call);
136 _debug("@@@ KRXIOD End Attend Call %p", call);
140 _debug("### End Work");
144 /* discard pending signals */
145 rxrpc_discard_my_signals();
147 } while (!rxrpc_krxiod_die);
150 complete_and_exit(&rxrpc_krxiod_dead, 0);
152 } /* end rxrpc_krxiod() */
154 /*****************************************************************************/
156 * start up a krxiod daemon
158 int __init rxrpc_krxiod_init(void)
160 return kernel_thread(rxrpc_krxiod, NULL, 0);
162 } /* end rxrpc_krxiod_init() */
164 /*****************************************************************************/
166 * kill the krxiod daemon and wait for it to complete
168 void rxrpc_krxiod_kill(void)
170 rxrpc_krxiod_die = 1;
171 wake_up_all(&rxrpc_krxiod_sleepq);
172 wait_for_completion(&rxrpc_krxiod_dead);
174 } /* end rxrpc_krxiod_kill() */
176 /*****************************************************************************/
178 * queue a transport for attention by krxiod
180 void rxrpc_krxiod_queue_transport(struct rxrpc_transport *trans)
186 if (list_empty(&trans->krxiodq_link)) {
187 spin_lock_irqsave(&rxrpc_krxiod_transportq_lock, flags);
189 if (list_empty(&trans->krxiodq_link)) {
190 if (atomic_read(&trans->usage) > 0) {
191 list_add_tail(&trans->krxiodq_link,
192 &rxrpc_krxiod_transportq);
193 atomic_inc(&rxrpc_krxiod_qcount);
197 spin_unlock_irqrestore(&rxrpc_krxiod_transportq_lock, flags);
198 wake_up_all(&rxrpc_krxiod_sleepq);
203 } /* end rxrpc_krxiod_queue_transport() */
205 /*****************************************************************************/
207 * dequeue a transport from krxiod's attention queue
209 void rxrpc_krxiod_dequeue_transport(struct rxrpc_transport *trans)
215 spin_lock_irqsave(&rxrpc_krxiod_transportq_lock, flags);
216 if (!list_empty(&trans->krxiodq_link)) {
217 list_del_init(&trans->krxiodq_link);
218 atomic_dec(&rxrpc_krxiod_qcount);
220 spin_unlock_irqrestore(&rxrpc_krxiod_transportq_lock, flags);
224 } /* end rxrpc_krxiod_dequeue_transport() */
226 /*****************************************************************************/
228 * queue a call for attention by krxiod
230 void rxrpc_krxiod_queue_call(struct rxrpc_call *call)
234 if (list_empty(&call->rcv_krxiodq_lk)) {
235 spin_lock_irqsave(&rxrpc_krxiod_callq_lock, flags);
236 if (atomic_read(&call->usage) > 0) {
237 list_add_tail(&call->rcv_krxiodq_lk,
238 &rxrpc_krxiod_callq);
239 atomic_inc(&rxrpc_krxiod_qcount);
241 spin_unlock_irqrestore(&rxrpc_krxiod_callq_lock, flags);
243 wake_up_all(&rxrpc_krxiod_sleepq);
245 } /* end rxrpc_krxiod_queue_call() */
247 /*****************************************************************************/
249 * dequeue a call from krxiod's attention queue
251 void rxrpc_krxiod_dequeue_call(struct rxrpc_call *call)
255 spin_lock_irqsave(&rxrpc_krxiod_callq_lock, flags);
256 if (!list_empty(&call->rcv_krxiodq_lk)) {
257 list_del_init(&call->rcv_krxiodq_lk);
258 atomic_dec(&rxrpc_krxiod_qcount);
260 spin_unlock_irqrestore(&rxrpc_krxiod_callq_lock, flags);
262 } /* end rxrpc_krxiod_dequeue_call() */