Commit | Line | Data |
---|---|---|
620a6fd1 IM |
1 | /* |
2 | * asm-generic/mutex-dec.h | |
3 | * | |
4 | * Generic implementation of the mutex fastpath, based on atomic | |
5 | * decrement/increment. | |
6 | */ | |
7 | #ifndef _ASM_GENERIC_MUTEX_DEC_H | |
8 | #define _ASM_GENERIC_MUTEX_DEC_H | |
9 | ||
10 | /** | |
11 | * __mutex_fastpath_lock - try to take the lock by moving the count | |
12 | * from 1 to a 0 value | |
13 | * @count: pointer of type atomic_t | |
14 | * @fail_fn: function to call if the original value was not 1 | |
15 | * | |
16 | * Change the count from 1 to a value lower than 1, and call <fail_fn> if | |
17 | * it wasn't 1 originally. This function MUST leave the value lower than | |
18 | * 1 even when the "1" assertion wasn't true. | |
19 | */ | |
e358c1a2 NP |
20 | static inline void |
21 | __mutex_fastpath_lock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *)) | |
22 | { | |
23 | if (unlikely(atomic_dec_return(count) < 0)) | |
24 | fail_fn(count); | |
25 | else | |
26 | smp_mb(); | |
27 | } | |
620a6fd1 IM |
28 | |
29 | /** | |
30 | * __mutex_fastpath_lock_retval - try to take the lock by moving the count | |
31 | * from 1 to a 0 value | |
32 | * @count: pointer of type atomic_t | |
33 | * @fail_fn: function to call if the original value was not 1 | |
34 | * | |
35 | * Change the count from 1 to a value lower than 1, and call <fail_fn> if | |
36 | * it wasn't 1 originally. This function returns 0 if the fastpath succeeds, | |
37 | * or anything the slow path function returns. | |
38 | */ | |
39 | static inline int | |
e358c1a2 | 40 | __mutex_fastpath_lock_retval(atomic_t *count, fastcall int (*fail_fn)(atomic_t *)) |
620a6fd1 IM |
41 | { |
42 | if (unlikely(atomic_dec_return(count) < 0)) | |
43 | return fail_fn(count); | |
44 | else { | |
45 | smp_mb(); | |
46 | return 0; | |
47 | } | |
48 | } | |
49 | ||
50 | /** | |
51 | * __mutex_fastpath_unlock - try to promote the count from 0 to 1 | |
52 | * @count: pointer of type atomic_t | |
53 | * @fail_fn: function to call if the original value was not 0 | |
54 | * | |
55 | * Try to promote the count from 0 to 1. If it wasn't 0, call <fail_fn>. | |
56 | * In the failure case, this function is allowed to either set the value to | |
57 | * 1, or to set it to a value lower than 1. | |
58 | * | |
59 | * If the implementation sets it to a value of lower than 1, then the | |
60 | * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs | |
61 | * to return 0 otherwise. | |
62 | */ | |
e358c1a2 NP |
63 | static inline void |
64 | __mutex_fastpath_unlock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *)) | |
65 | { | |
66 | smp_mb(); | |
67 | if (unlikely(atomic_inc_return(count) <= 0)) | |
68 | fail_fn(count); | |
69 | } | |
620a6fd1 IM |
70 | |
71 | #define __mutex_slowpath_needs_to_unlock() 1 | |
72 | ||
73 | /** | |
74 | * __mutex_fastpath_trylock - try to acquire the mutex, without waiting | |
75 | * | |
76 | * @count: pointer of type atomic_t | |
77 | * @fail_fn: fallback function | |
78 | * | |
79 | * Change the count from 1 to a value lower than 1, and return 0 (failure) | |
80 | * if it wasn't 1 originally, or return 1 (success) otherwise. This function | |
81 | * MUST leave the value lower than 1 even when the "1" assertion wasn't true. | |
82 | * Additionally, if the value was < 0 originally, this function must not leave | |
83 | * it to 0 on failure. | |
84 | * | |
85 | * If the architecture has no effective trylock variant, it should call the | |
86 | * <fail_fn> spinlock-based trylock variant unconditionally. | |
87 | */ | |
88 | static inline int | |
89 | __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) | |
90 | { | |
91 | /* | |
92 | * We have two variants here. The cmpxchg based one is the best one | |
93 | * because it never induce a false contention state. It is included | |
94 | * here because architectures using the inc/dec algorithms over the | |
95 | * xchg ones are much more likely to support cmpxchg natively. | |
96 | * | |
97 | * If not we fall back to the spinlock based variant - that is | |
98 | * just as efficient (and simpler) as a 'destructive' probing of | |
99 | * the mutex state would be. | |
100 | */ | |
101 | #ifdef __HAVE_ARCH_CMPXCHG | |
4cec8736 | 102 | if (likely(atomic_cmpxchg(count, 1, 0) == 1)) { |
620a6fd1 IM |
103 | smp_mb(); |
104 | return 1; | |
105 | } | |
106 | return 0; | |
107 | #else | |
108 | return fail_fn(count); | |
109 | #endif | |
110 | } | |
111 | ||
112 | #endif |