2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License v.2.
12 /* A lock placed on this queue is re-submitted to DLM as soon as the lock_dlm
15 static void queue_submit(struct gdlm_lock *lp)
17 struct gdlm_ls *ls = lp->ls;
19 spin_lock(&ls->async_lock);
20 list_add_tail(&lp->delay_list, &ls->submit);
21 spin_unlock(&ls->async_lock);
22 wake_up(&ls->thread_wait);
25 static void process_blocking(struct gdlm_lock *lp, int bast_mode)
27 struct gdlm_ls *ls = lp->ls;
30 switch (gdlm_make_lmstate(bast_mode)) {
41 gdlm_assert(0, "unknown bast mode %u", lp->bast_mode);
44 ls->fscb(ls->fsdata, cb, &lp->lockname);
47 static void process_complete(struct gdlm_lock *lp)
49 struct gdlm_ls *ls = lp->ls;
50 struct lm_async_cb acb;
51 int16_t prev_mode = lp->cur;
53 memset(&acb, 0, sizeof(acb));
55 if (lp->lksb.sb_status == -DLM_ECANCEL) {
56 log_info("complete dlm cancel %x,%llx flags %lx",
57 lp->lockname.ln_type, lp->lockname.ln_number,
61 acb.lc_ret |= LM_OUT_CANCELED;
62 if (lp->cur == DLM_LOCK_IV)
67 if (test_and_clear_bit(LFL_DLM_UNLOCK, &lp->flags)) {
68 if (lp->lksb.sb_status != -DLM_EUNLOCK) {
69 log_info("unlock sb_status %d %x,%llx flags %lx",
70 lp->lksb.sb_status, lp->lockname.ln_type,
71 lp->lockname.ln_number, lp->flags);
75 lp->cur = DLM_LOCK_IV;
76 lp->req = DLM_LOCK_IV;
79 if (test_and_clear_bit(LFL_UNLOCK_DELETE, &lp->flags)) {
86 if (lp->lksb.sb_flags & DLM_SBF_VALNOTVALID)
87 memset(lp->lksb.sb_lvbptr, 0, GDLM_LVB_SIZE);
89 if (lp->lksb.sb_flags & DLM_SBF_ALTMODE) {
90 if (lp->req == DLM_LOCK_PR)
91 lp->req = DLM_LOCK_CW;
92 else if (lp->req == DLM_LOCK_CW)
93 lp->req = DLM_LOCK_PR;
97 * A canceled lock request. The lock was just taken off the delayed
98 * list and was never even submitted to dlm.
101 if (test_and_clear_bit(LFL_CANCEL, &lp->flags)) {
102 log_info("complete internal cancel %x,%llx",
103 lp->lockname.ln_type, lp->lockname.ln_number);
105 acb.lc_ret |= LM_OUT_CANCELED;
113 if (lp->lksb.sb_status) {
114 /* a "normal" error */
115 if ((lp->lksb.sb_status == -EAGAIN) &&
116 (lp->lkf & DLM_LKF_NOQUEUE)) {
118 if (lp->cur == DLM_LOCK_IV)
119 lp->lksb.sb_lkid = 0;
123 /* this could only happen with cancels I think */
124 log_info("ast sb_status %d %x,%llx flags %lx",
125 lp->lksb.sb_status, lp->lockname.ln_type,
126 lp->lockname.ln_number, lp->flags);
131 * This is an AST for an EX->EX conversion for sync_lvb from GFS.
134 if (test_and_clear_bit(LFL_SYNC_LVB, &lp->flags)) {
135 complete(&lp->ast_wait);
140 * A lock has been demoted to NL because it initially completed during
141 * BLOCK_LOCKS. Now it must be requested in the originally requested
145 if (test_and_clear_bit(LFL_REREQUEST, &lp->flags)) {
146 gdlm_assert(lp->req == DLM_LOCK_NL, "%x,%llx",
147 lp->lockname.ln_type, lp->lockname.ln_number);
148 gdlm_assert(lp->prev_req > DLM_LOCK_NL, "%x,%llx",
149 lp->lockname.ln_type, lp->lockname.ln_number);
151 lp->cur = DLM_LOCK_NL;
152 lp->req = lp->prev_req;
153 lp->prev_req = DLM_LOCK_IV;
154 lp->lkf &= ~DLM_LKF_CONVDEADLK;
156 set_bit(LFL_NOCACHE, &lp->flags);
158 if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) &&
159 !test_bit(LFL_NOBLOCK, &lp->flags))
160 gdlm_queue_delayed(lp);
167 * A request is granted during dlm recovery. It may be granted
168 * because the locks of a failed node were cleared. In that case,
169 * there may be inconsistent data beneath this lock and we must wait
170 * for recovery to complete to use it. When gfs recovery is done this
171 * granted lock will be converted to NL and then reacquired in this
175 if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) &&
176 !test_bit(LFL_NOBLOCK, &lp->flags) &&
177 lp->req != DLM_LOCK_NL) {
180 lp->prev_req = lp->req;
181 lp->req = DLM_LOCK_NL;
182 lp->lkf |= DLM_LKF_CONVERT;
183 lp->lkf &= ~DLM_LKF_CONVDEADLK;
185 log_debug("rereq %x,%llx id %x %d,%d",
186 lp->lockname.ln_type, lp->lockname.ln_number,
187 lp->lksb.sb_lkid, lp->cur, lp->req);
189 set_bit(LFL_REREQUEST, &lp->flags);
195 * DLM demoted the lock to NL before it was granted so GFS must be
196 * told it cannot cache data for this lock.
199 if (lp->lksb.sb_flags & DLM_SBF_DEMOTED)
200 set_bit(LFL_NOCACHE, &lp->flags);
204 * This is an internal lock_dlm lock
207 if (test_bit(LFL_INLOCK, &lp->flags)) {
208 clear_bit(LFL_NOBLOCK, &lp->flags);
210 complete(&lp->ast_wait);
215 * Normal completion of a lock request. Tell GFS it now has the lock.
218 clear_bit(LFL_NOBLOCK, &lp->flags);
221 acb.lc_name = lp->lockname;
222 acb.lc_ret |= gdlm_make_lmstate(lp->cur);
224 if (!test_and_clear_bit(LFL_NOCACHE, &lp->flags) &&
225 (lp->cur > DLM_LOCK_NL) && (prev_mode > DLM_LOCK_NL))
226 acb.lc_ret |= LM_OUT_CACHEABLE;
228 ls->fscb(ls->fsdata, LM_CB_ASYNC, &acb);
231 static inline int no_work(struct gdlm_ls *ls, int blocking)
235 spin_lock(&ls->async_lock);
236 ret = list_empty(&ls->complete) && list_empty(&ls->submit);
238 ret = list_empty(&ls->blocking);
239 spin_unlock(&ls->async_lock);
244 static inline int check_drop(struct gdlm_ls *ls)
246 if (!ls->drop_locks_count)
249 if (time_after(jiffies, ls->drop_time + ls->drop_locks_period * HZ)) {
250 ls->drop_time = jiffies;
251 if (ls->all_locks_count >= ls->drop_locks_count)
257 static int gdlm_thread(void *data)
259 struct gdlm_ls *ls = (struct gdlm_ls *) data;
260 struct gdlm_lock *lp = NULL;
262 uint8_t complete, blocking, submit, drop;
263 DECLARE_WAITQUEUE(wait, current);
265 /* Only thread1 is allowed to do blocking callbacks since gfs
266 may wait for a completion callback within a blocking cb. */
268 if (current == ls->thread1)
271 while (!kthread_should_stop()) {
272 set_current_state(TASK_INTERRUPTIBLE);
273 add_wait_queue(&ls->thread_wait, &wait);
274 if (no_work(ls, blist))
276 remove_wait_queue(&ls->thread_wait, &wait);
277 set_current_state(TASK_RUNNING);
279 complete = blocking = submit = drop = 0;
281 spin_lock(&ls->async_lock);
283 if (blist && !list_empty(&ls->blocking)) {
284 lp = list_entry(ls->blocking.next, struct gdlm_lock,
286 list_del_init(&lp->blist);
287 blocking = lp->bast_mode;
289 } else if (!list_empty(&ls->complete)) {
290 lp = list_entry(ls->complete.next, struct gdlm_lock,
292 list_del_init(&lp->clist);
294 } else if (!list_empty(&ls->submit)) {
295 lp = list_entry(ls->submit.next, struct gdlm_lock,
297 list_del_init(&lp->delay_list);
301 drop = check_drop(ls);
302 spin_unlock(&ls->async_lock);
305 process_complete(lp);
308 process_blocking(lp, blocking);
314 ls->fscb(ls->fsdata, LM_CB_DROPLOCKS, NULL);
322 int gdlm_init_threads(struct gdlm_ls *ls)
324 struct task_struct *p;
327 p = kthread_run(gdlm_thread, ls, "lock_dlm1");
330 log_error("can't start lock_dlm1 thread %d", error);
335 p = kthread_run(gdlm_thread, ls, "lock_dlm2");
338 log_error("can't start lock_dlm2 thread %d", error);
339 kthread_stop(ls->thread1);
347 void gdlm_release_threads(struct gdlm_ls *ls)
349 kthread_stop(ls->thread1);
350 kthread_stop(ls->thread2);