1 #ifndef __ASM_SH_SEMAPHORE_HELPER_H
2 #define __ASM_SH_SEMAPHORE_HELPER_H
5 * SMP- and interrupt-safe semaphores helper functions.
7 * (C) Copyright 1996 Linus Torvalds
8 * (C) Copyright 1999 Andrea Arcangeli
12 * These two _must_ execute atomically wrt each other.
14 * This is trivially done with load_locked/store_cond,
15 * which we have. Let the rest of the losers suck eggs.
17 static __inline__ void wake_one_more(struct semaphore * sem)
19 atomic_inc((atomic_t *)&sem->sleepers);
22 static __inline__ int waking_non_zero(struct semaphore *sem)
27 spin_lock_irqsave(&semaphore_wake_lock, flags);
28 if (sem->sleepers > 0) {
32 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
37 * waking_non_zero_interruptible:
42 * We must undo the sem->count down_interruptible() increment while we are
43 * protected by the spinlock in order to make atomic this atomic_inc() with the
44 * atomic_read() in wake_one_more(), otherwise we can race. -arca
46 static __inline__ int waking_non_zero_interruptible(struct semaphore *sem,
47 struct task_struct *tsk)
52 spin_lock_irqsave(&semaphore_wake_lock, flags);
53 if (sem->sleepers > 0) {
56 } else if (signal_pending(tsk)) {
57 atomic_inc(&sem->count);
60 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
65 * waking_non_zero_trylock:
69 * We must undo the sem->count down_trylock() increment while we are
70 * protected by the spinlock in order to make atomic this atomic_inc() with the
71 * atomic_read() in wake_one_more(), otherwise we can race. -arca
73 static __inline__ int waking_non_zero_trylock(struct semaphore *sem)
78 spin_lock_irqsave(&semaphore_wake_lock, flags);
79 if (sem->sleepers <= 0)
80 atomic_inc(&sem->count);
85 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
89 #endif /* __ASM_SH_SEMAPHORE_HELPER_H */