Merge rsync://rsync.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
[linux-2.6] / arch / ppc / lib / locks.c
1 /*
2  * Locks for smp ppc
3  *
4  * Written by Cort Dougan (cort@cs.nmt.edu)
5  */
6
7 #include <linux/config.h>
8 #include <linux/sched.h>
9 #include <linux/spinlock.h>
10 #include <linux/module.h>
11 #include <asm/ppc_asm.h>
12 #include <asm/smp.h>
13
14 #ifdef CONFIG_DEBUG_SPINLOCK
15
16 #undef INIT_STUCK
17 #define INIT_STUCK 200000000 /*0xffffffff*/
18
19 /*
20  * Try to acquire a spinlock.
21  * Only does the stwcx. if the load returned 0 - the Programming
22  * Environments Manual suggests not doing unnecessary stcwx.'s
23  * since they may inhibit forward progress by other CPUs in getting
24  * a lock.
25  */
26 static inline unsigned long __spin_trylock(volatile unsigned long *lock)
27 {
28         unsigned long ret;
29
30         __asm__ __volatile__ ("\n\
31 1:      lwarx   %0,0,%1\n\
32         cmpwi   0,%0,0\n\
33         bne     2f\n"
34         PPC405_ERR77(0,%1)
35 "       stwcx.  %2,0,%1\n\
36         bne-    1b\n\
37         isync\n\
38 2:"
39         : "=&r"(ret)
40         : "r"(lock), "r"(1)
41         : "cr0", "memory");
42
43         return ret;
44 }
45
46 void _raw_spin_lock(spinlock_t *lock)
47 {
48         int cpu = smp_processor_id();
49         unsigned int stuck = INIT_STUCK;
50         while (__spin_trylock(&lock->lock)) {
51                 while ((unsigned volatile long)lock->lock != 0) {
52                         if (!--stuck) {
53                                 printk("_spin_lock(%p) CPU#%d NIP %p"
54                                        " holder: cpu %ld pc %08lX\n",
55                                        lock, cpu, __builtin_return_address(0),
56                                        lock->owner_cpu,lock->owner_pc);
57                                 stuck = INIT_STUCK;
58                                 /* steal the lock */
59                                 /*xchg_u32((void *)&lock->lock,0);*/
60                         }
61                 }
62         }
63         lock->owner_pc = (unsigned long)__builtin_return_address(0);
64         lock->owner_cpu = cpu;
65 }
66 EXPORT_SYMBOL(_raw_spin_lock);
67
68 int _raw_spin_trylock(spinlock_t *lock)
69 {
70         if (__spin_trylock(&lock->lock))
71                 return 0;
72         lock->owner_cpu = smp_processor_id();
73         lock->owner_pc = (unsigned long)__builtin_return_address(0);
74         return 1;
75 }
76 EXPORT_SYMBOL(_raw_spin_trylock);
77
78 void _raw_spin_unlock(spinlock_t *lp)
79 {
80         if ( !lp->lock )
81                 printk("_spin_unlock(%p): no lock cpu %d curr PC %p %s/%d\n",
82                        lp, smp_processor_id(), __builtin_return_address(0),
83                        current->comm, current->pid);
84         if ( lp->owner_cpu != smp_processor_id() )
85                 printk("_spin_unlock(%p): cpu %d trying clear of cpu %d pc %lx val %lx\n",
86                       lp, smp_processor_id(), (int)lp->owner_cpu,
87                       lp->owner_pc,lp->lock);
88         lp->owner_pc = lp->owner_cpu = 0;
89         wmb();
90         lp->lock = 0;
91 }
92 EXPORT_SYMBOL(_raw_spin_unlock);
93
94 /*
95  * For rwlocks, zero is unlocked, -1 is write-locked,
96  * positive is read-locked.
97  */
98 static __inline__ int __read_trylock(rwlock_t *rw)
99 {
100         signed int tmp;
101
102         __asm__ __volatile__(
103 "2:     lwarx   %0,0,%1         # __read_trylock\n\
104         addic.  %0,%0,1\n\
105         ble-    1f\n"
106         PPC405_ERR77(0,%1)
107 "       stwcx.  %0,0,%1\n\
108         bne-    2b\n\
109         isync\n\
110 1:"
111         : "=&r"(tmp)
112         : "r"(&rw->lock)
113         : "cr0", "memory");
114
115         return tmp;
116 }
117
118 int _raw_read_trylock(rwlock_t *rw)
119 {
120         return __read_trylock(rw) > 0;
121 }
122 EXPORT_SYMBOL(_raw_read_trylock);
123
124 void _raw_read_lock(rwlock_t *rw)
125 {
126         unsigned int stuck;
127
128         while (__read_trylock(rw) <= 0) {
129                 stuck = INIT_STUCK;
130                 while (!read_can_lock(rw)) {
131                         if (--stuck == 0) {
132                                 printk("_read_lock(%p) CPU#%d lock %d\n",
133                                        rw, raw_smp_processor_id(), rw->lock);
134                                 stuck = INIT_STUCK;
135                         }
136                 }
137         }
138 }
139 EXPORT_SYMBOL(_raw_read_lock);
140
141 void _raw_read_unlock(rwlock_t *rw)
142 {
143         if ( rw->lock == 0 )
144                 printk("_read_unlock(): %s/%d (nip %08lX) lock %d\n",
145                        current->comm,current->pid,current->thread.regs->nip,
146                       rw->lock);
147         wmb();
148         atomic_dec((atomic_t *) &(rw)->lock);
149 }
150 EXPORT_SYMBOL(_raw_read_unlock);
151
152 void _raw_write_lock(rwlock_t *rw)
153 {
154         unsigned int stuck;
155
156         while (cmpxchg(&rw->lock, 0, -1) != 0) {
157                 stuck = INIT_STUCK;
158                 while (!write_can_lock(rw)) {
159                         if (--stuck == 0) {
160                                 printk("write_lock(%p) CPU#%d lock %d)\n",
161                                        rw, raw_smp_processor_id(), rw->lock);
162                                 stuck = INIT_STUCK;
163                         }
164                 }
165         }
166         wmb();
167 }
168 EXPORT_SYMBOL(_raw_write_lock);
169
170 int _raw_write_trylock(rwlock_t *rw)
171 {
172         if (cmpxchg(&rw->lock, 0, -1) != 0)
173                 return 0;
174         wmb();
175         return 1;
176 }
177 EXPORT_SYMBOL(_raw_write_trylock);
178
179 void _raw_write_unlock(rwlock_t *rw)
180 {
181         if (rw->lock >= 0)
182                 printk("_write_lock(): %s/%d (nip %08lX) lock %d\n",
183                       current->comm,current->pid,current->thread.regs->nip,
184                       rw->lock);
185         wmb();
186         rw->lock = 0;
187 }
188 EXPORT_SYMBOL(_raw_write_unlock);
189
190 #endif