Merge head 'upstream' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/libata-dev
[linux-2.6] / arch / m32r / kernel / semaphore.c
1 /*
2  *  linux/arch/m32r/semaphore.c
3  *    orig : i386 2.6.4
4  *
5  *  M32R semaphore implementation.
6  *
7  *      Copyright (c) 2002 - 2004 Hitoshi Yamamoto
8  */
9
10 /*
11  * i386 semaphore implementation.
12  *
13  * (C) Copyright 1999 Linus Torvalds
14  *
15  * Portions Copyright 1999 Red Hat, Inc.
16  *
17  *      This program is free software; you can redistribute it and/or
18  *      modify it under the terms of the GNU General Public License
19  *      as published by the Free Software Foundation; either version
20  *      2 of the License, or (at your option) any later version.
21  *
22  * rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@kvack.org>
23  */
24 #include <linux/config.h>
25 #include <linux/sched.h>
26 #include <linux/err.h>
27 #include <linux/init.h>
28 #include <asm/semaphore.h>
29
30 /*
31  * Semaphores are implemented using a two-way counter:
32  * The "count" variable is decremented for each process
33  * that tries to acquire the semaphore, while the "sleeping"
34  * variable is a count of such acquires.
35  *
36  * Notably, the inline "up()" and "down()" functions can
37  * efficiently test if they need to do any extra work (up
38  * needs to do something only if count was negative before
39  * the increment operation.
40  *
41  * "sleeping" and the contention routine ordering is protected
42  * by the spinlock in the semaphore's waitqueue head.
43  *
44  * Note that these functions are only called when there is
45  * contention on the lock, and as such all this is the
46  * "non-critical" part of the whole semaphore business. The
47  * critical part is the inline stuff in <asm/semaphore.h>
48  * where we want to avoid any extra jumps and calls.
49  */
50
51 /*
52  * Logic:
53  *  - only on a boundary condition do we need to care. When we go
54  *    from a negative count to a non-negative, we wake people up.
55  *  - when we go from a non-negative count to a negative do we
56  *    (a) synchronize with the "sleeper" count and (b) make sure
57  *    that we're on the wakeup list before we synchronize so that
58  *    we cannot lose wakeup events.
59  */
60
61 asmlinkage void __up(struct semaphore *sem)
62 {
63         wake_up(&sem->wait);
64 }
65
66 asmlinkage void __sched __down(struct semaphore * sem)
67 {
68         struct task_struct *tsk = current;
69         DECLARE_WAITQUEUE(wait, tsk);
70         unsigned long flags;
71
72         tsk->state = TASK_UNINTERRUPTIBLE;
73         spin_lock_irqsave(&sem->wait.lock, flags);
74         add_wait_queue_exclusive_locked(&sem->wait, &wait);
75
76         sem->sleepers++;
77         for (;;) {
78                 int sleepers = sem->sleepers;
79
80                 /*
81                  * Add "everybody else" into it. They aren't
82                  * playing, because we own the spinlock in
83                  * the wait_queue_head.
84                  */
85                 if (!atomic_add_negative(sleepers - 1, &sem->count)) {
86                         sem->sleepers = 0;
87                         break;
88                 }
89                 sem->sleepers = 1;      /* us - see -1 above */
90                 spin_unlock_irqrestore(&sem->wait.lock, flags);
91
92                 schedule();
93
94                 spin_lock_irqsave(&sem->wait.lock, flags);
95                 tsk->state = TASK_UNINTERRUPTIBLE;
96         }
97         remove_wait_queue_locked(&sem->wait, &wait);
98         wake_up_locked(&sem->wait);
99         spin_unlock_irqrestore(&sem->wait.lock, flags);
100         tsk->state = TASK_RUNNING;
101 }
102
103 asmlinkage int __sched __down_interruptible(struct semaphore * sem)
104 {
105         int retval = 0;
106         struct task_struct *tsk = current;
107         DECLARE_WAITQUEUE(wait, tsk);
108         unsigned long flags;
109
110         tsk->state = TASK_INTERRUPTIBLE;
111         spin_lock_irqsave(&sem->wait.lock, flags);
112         add_wait_queue_exclusive_locked(&sem->wait, &wait);
113
114         sem->sleepers++;
115         for (;;) {
116                 int sleepers = sem->sleepers;
117
118                 /*
119                  * With signals pending, this turns into
120                  * the trylock failure case - we won't be
121                  * sleeping, and we* can't get the lock as
122                  * it has contention. Just correct the count
123                  * and exit.
124                  */
125                 if (signal_pending(current)) {
126                         retval = -EINTR;
127                         sem->sleepers = 0;
128                         atomic_add(sleepers, &sem->count);
129                         break;
130                 }
131
132                 /*
133                  * Add "everybody else" into it. They aren't
134                  * playing, because we own the spinlock in
135                  * wait_queue_head. The "-1" is because we're
136                  * still hoping to get the semaphore.
137                  */
138                 if (!atomic_add_negative(sleepers - 1, &sem->count)) {
139                         sem->sleepers = 0;
140                         break;
141                 }
142                 sem->sleepers = 1;      /* us - see -1 above */
143                 spin_unlock_irqrestore(&sem->wait.lock, flags);
144
145                 schedule();
146
147                 spin_lock_irqsave(&sem->wait.lock, flags);
148                 tsk->state = TASK_INTERRUPTIBLE;
149         }
150         remove_wait_queue_locked(&sem->wait, &wait);
151         wake_up_locked(&sem->wait);
152         spin_unlock_irqrestore(&sem->wait.lock, flags);
153
154         tsk->state = TASK_RUNNING;
155         return retval;
156 }
157
158 /*
159  * Trylock failed - make sure we correct for
160  * having decremented the count.
161  *
162  * We could have done the trylock with a
163  * single "cmpxchg" without failure cases,
164  * but then it wouldn't work on a 386.
165  */
166 asmlinkage int __down_trylock(struct semaphore * sem)
167 {
168         int sleepers;
169         unsigned long flags;
170
171         spin_lock_irqsave(&sem->wait.lock, flags);
172         sleepers = sem->sleepers + 1;
173         sem->sleepers = 0;
174
175         /*
176          * Add "everybody else" and us into it. They aren't
177          * playing, because we own the spinlock in the
178          * wait_queue_head.
179          */
180         if (!atomic_add_negative(sleepers, &sem->count)) {
181                 wake_up_locked(&sem->wait);
182         }
183
184         spin_unlock_irqrestore(&sem->wait.lock, flags);
185         return 1;
186 }