writeback: fix periodic superblock dirty inode flushing
[linux-2.6] / include / asm-arm / semaphore-helper.h
1 #ifndef ASMARM_SEMAPHORE_HELPER_H
2 #define ASMARM_SEMAPHORE_HELPER_H
3
4 /*
5  * These two _must_ execute atomically wrt each other.
6  */
7 static inline void wake_one_more(struct semaphore * sem)
8 {
9         unsigned long flags;
10
11         spin_lock_irqsave(&semaphore_wake_lock, flags);
12         if (atomic_read(&sem->count) <= 0)
13                 sem->waking++;
14         spin_unlock_irqrestore(&semaphore_wake_lock, flags);
15 }
16
17 static inline int waking_non_zero(struct semaphore *sem)
18 {
19         unsigned long flags;
20         int ret = 0;
21
22         spin_lock_irqsave(&semaphore_wake_lock, flags);
23         if (sem->waking > 0) {
24                 sem->waking--;
25                 ret = 1;
26         }
27         spin_unlock_irqrestore(&semaphore_wake_lock, flags);
28         return ret;
29 }
30
31 /*
32  * waking non zero interruptible
33  *      1       got the lock
34  *      0       go to sleep
35  *      -EINTR  interrupted
36  *
37  * We must undo the sem->count down_interruptible() increment while we are
38  * protected by the spinlock in order to make this atomic_inc() with the
39  * atomic_read() in wake_one_more(), otherwise we can race. -arca
40  */
41 static inline int waking_non_zero_interruptible(struct semaphore *sem,
42                                                 struct task_struct *tsk)
43 {
44         unsigned long flags;
45         int ret = 0;
46
47         spin_lock_irqsave(&semaphore_wake_lock, flags);
48         if (sem->waking > 0) {
49                 sem->waking--;
50                 ret = 1;
51         } else if (signal_pending(tsk)) {
52                 atomic_inc(&sem->count);
53                 ret = -EINTR;
54         }
55         spin_unlock_irqrestore(&semaphore_wake_lock, flags);
56         return ret;     
57 }
58
59 /*
60  * waking_non_zero_try_lock:
61  *      1       failed to lock
62  *      0       got the lock
63  *
64  * We must undo the sem->count down_interruptible() increment while we are
65  * protected by the spinlock in order to make this atomic_inc() with the
66  * atomic_read() in wake_one_more(), otherwise we can race. -arca
67  */
68 static inline int waking_non_zero_trylock(struct semaphore *sem)
69 {
70         unsigned long flags;
71         int ret = 1;
72
73         spin_lock_irqsave(&semaphore_wake_lock, flags);
74         if (sem->waking <= 0)
75                 atomic_inc(&sem->count);
76         else {
77                 sem->waking--;
78                 ret = 0;
79         }
80         spin_unlock_irqrestore(&semaphore_wake_lock, flags);
81         return ret;
82 }
83
84 #endif