ssb: Fix usage of struct device used for DMAing
[linux-2.6] / include / asm-m68k / semaphore-helper.h
1 #ifndef _M68K_SEMAPHORE_HELPER_H
2 #define _M68K_SEMAPHORE_HELPER_H
3
4 /*
5  * SMP- and interrupt-safe semaphores helper functions.
6  *
7  * (C) Copyright 1996 Linus Torvalds
8  *
9  * m68k version by Andreas Schwab
10  */
11
12 #include <linux/errno.h>
13
14 /*
15  * These two _must_ execute atomically wrt each other.
16  */
17 static inline void wake_one_more(struct semaphore * sem)
18 {
19         atomic_inc(&sem->waking);
20 }
21
22 #ifndef CONFIG_RMW_INSNS
23 extern spinlock_t semaphore_wake_lock;
24 #endif
25
26 static inline int waking_non_zero(struct semaphore *sem)
27 {
28         int ret;
29 #ifndef CONFIG_RMW_INSNS
30         unsigned long flags;
31
32         spin_lock_irqsave(&semaphore_wake_lock, flags);
33         ret = 0;
34         if (atomic_read(&sem->waking) > 0) {
35                 atomic_dec(&sem->waking);
36                 ret = 1;
37         }
38         spin_unlock_irqrestore(&semaphore_wake_lock, flags);
39 #else
40         int tmp1, tmp2;
41
42         __asm__ __volatile__
43           ("1:  movel   %1,%2\n"
44            "    jle     2f\n"
45            "    subql   #1,%2\n"
46            "    casl    %1,%2,%3\n"
47            "    jne     1b\n"
48            "    moveq   #1,%0\n"
49            "2:"
50            : "=d" (ret), "=d" (tmp1), "=d" (tmp2)
51            : "m" (sem->waking), "0" (0), "1" (sem->waking));
52 #endif
53
54         return ret;
55 }
56
57 /*
58  * waking_non_zero_interruptible:
59  *      1       got the lock
60  *      0       go to sleep
61  *      -EINTR  interrupted
62  */
63 static inline int waking_non_zero_interruptible(struct semaphore *sem,
64                                                 struct task_struct *tsk)
65 {
66         int ret;
67 #ifndef CONFIG_RMW_INSNS
68         unsigned long flags;
69
70         spin_lock_irqsave(&semaphore_wake_lock, flags);
71         ret = 0;
72         if (atomic_read(&sem->waking) > 0) {
73                 atomic_dec(&sem->waking);
74                 ret = 1;
75         } else if (signal_pending(tsk)) {
76                 atomic_inc(&sem->count);
77                 ret = -EINTR;
78         }
79         spin_unlock_irqrestore(&semaphore_wake_lock, flags);
80 #else
81         int tmp1, tmp2;
82
83         __asm__ __volatile__
84           ("1:  movel   %1,%2\n"
85            "    jle     2f\n"
86            "    subql   #1,%2\n"
87            "    casl    %1,%2,%3\n"
88            "    jne     1b\n"
89            "    moveq   #1,%0\n"
90            "    jra     %a4\n"
91            "2:"
92            : "=d" (ret), "=d" (tmp1), "=d" (tmp2)
93            : "m" (sem->waking), "i" (&&next), "0" (0), "1" (sem->waking));
94         if (signal_pending(tsk)) {
95                 atomic_inc(&sem->count);
96                 ret = -EINTR;
97         }
98 next:
99 #endif
100
101         return ret;
102 }
103
104 /*
105  * waking_non_zero_trylock:
106  *      1       failed to lock
107  *      0       got the lock
108  */
109 static inline int waking_non_zero_trylock(struct semaphore *sem)
110 {
111         int ret;
112 #ifndef CONFIG_RMW_INSNS
113         unsigned long flags;
114
115         spin_lock_irqsave(&semaphore_wake_lock, flags);
116         ret = 1;
117         if (atomic_read(&sem->waking) > 0) {
118                 atomic_dec(&sem->waking);
119                 ret = 0;
120         } else
121                 atomic_inc(&sem->count);
122         spin_unlock_irqrestore(&semaphore_wake_lock, flags);
123 #else
124         int tmp1, tmp2;
125
126         __asm__ __volatile__
127           ("1:  movel   %1,%2\n"
128            "    jle     2f\n"
129            "    subql   #1,%2\n"
130            "    casl    %1,%2,%3\n"
131            "    jne     1b\n"
132            "    moveq   #0,%0\n"
133            "2:"
134            : "=d" (ret), "=d" (tmp1), "=d" (tmp2)
135            : "m" (sem->waking), "0" (1), "1" (sem->waking));
136         if (ret)
137                 atomic_inc(&sem->count);
138 #endif
139         return ret;
140 }
141
142 #endif