* Make sure we are not reinitializing a held semaphore:
*/
debug_check_no_locks_freed((void *)sem, sizeof(*sem));
- lockdep_init_map(&sem->dep_map, name, key);
+ lockdep_init_map(&sem->dep_map, name, key, 0);
#endif
sem->activity = 0;
spin_lock_init(&sem->wait_lock);
/*
* get a read lock on the semaphore
*/
-void fastcall __sched __down_read(struct rw_semaphore *sem)
+void __sched __down_read(struct rw_semaphore *sem)
{
struct rwsem_waiter waiter;
struct task_struct *tsk;
/*
* trylock for reading -- returns 1 if successful, 0 if contention
*/
-int fastcall __down_read_trylock(struct rw_semaphore *sem)
+int __down_read_trylock(struct rw_semaphore *sem)
{
unsigned long flags;
int ret = 0;
* get a write lock on the semaphore
* - we increment the waiting count anyway to indicate an exclusive lock
*/
-void fastcall __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
+void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
{
struct rwsem_waiter waiter;
struct task_struct *tsk;
;
}
-void fastcall __sched __down_write(struct rw_semaphore *sem)
+void __sched __down_write(struct rw_semaphore *sem)
{
__down_write_nested(sem, 0);
}
/*
* trylock for writing -- returns 1 if successful, 0 if contention
*/
-int fastcall __down_write_trylock(struct rw_semaphore *sem)
+int __down_write_trylock(struct rw_semaphore *sem)
{
unsigned long flags;
int ret = 0;
/*
* release a read lock on the semaphore
*/
-void fastcall __up_read(struct rw_semaphore *sem)
+void __up_read(struct rw_semaphore *sem)
{
unsigned long flags;
/*
* release a write lock on the semaphore
*/
-void fastcall __up_write(struct rw_semaphore *sem)
+void __up_write(struct rw_semaphore *sem)
{
unsigned long flags;
* downgrade a write lock into a read lock
* - just wake up any readers at the front of the queue
*/
-void fastcall __downgrade_write(struct rw_semaphore *sem)
+void __downgrade_write(struct rw_semaphore *sem)
{
unsigned long flags;