jbd2: fix the way the b_modified flag is cleared
[linux-2.6] / fs / jbd2 / transaction.c
1 /*
2  * linux/fs/jbd2/transaction.c
3  *
4  * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
5  *
6  * Copyright 1998 Red Hat corp --- All Rights Reserved
7  *
8  * This file is part of the Linux kernel and is made available under
9  * the terms of the GNU General Public License, version 2, or at your
10  * option, any later version, incorporated herein by reference.
11  *
12  * Generic filesystem transaction handling code; part of the ext2fs
13  * journaling system.
14  *
15  * This file manages transactions (compound commits managed by the
16  * journaling code) and handles (individual atomic operations by the
17  * filesystem).
18  */
19
20 #include <linux/time.h>
21 #include <linux/fs.h>
22 #include <linux/jbd2.h>
23 #include <linux/errno.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <linux/mm.h>
27 #include <linux/highmem.h>
28
29 static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh);
30
31 /*
32  * jbd2_get_transaction: obtain a new transaction_t object.
33  *
34  * Simply allocate and initialise a new transaction.  Create it in
35  * RUNNING state and add it to the current journal (which should not
36  * have an existing running transaction: we only make a new transaction
37  * once we have started to commit the old one).
38  *
39  * Preconditions:
40  *      The journal MUST be locked.  We don't perform atomic mallocs on the
41  *      new transaction and we can't block without protecting against other
42  *      processes trying to touch the journal while it is in transition.
43  *
44  * Called under j_state_lock
45  */
46
47 static transaction_t *
48 jbd2_get_transaction(journal_t *journal, transaction_t *transaction)
49 {
50         transaction->t_journal = journal;
51         transaction->t_state = T_RUNNING;
52         transaction->t_tid = journal->j_transaction_sequence++;
53         transaction->t_expires = jiffies + journal->j_commit_interval;
54         spin_lock_init(&transaction->t_handle_lock);
55
56         /* Set up the commit timer for the new transaction. */
57         journal->j_commit_timer.expires = round_jiffies(transaction->t_expires);
58         add_timer(&journal->j_commit_timer);
59
60         J_ASSERT(journal->j_running_transaction == NULL);
61         journal->j_running_transaction = transaction;
62         transaction->t_max_wait = 0;
63         transaction->t_start = jiffies;
64
65         return transaction;
66 }
67
68 /*
69  * Handle management.
70  *
71  * A handle_t is an object which represents a single atomic update to a
72  * filesystem, and which tracks all of the modifications which form part
73  * of that one update.
74  */
75
76 /*
77  * start_this_handle: Given a handle, deal with any locking or stalling
78  * needed to make sure that there is enough journal space for the handle
79  * to begin.  Attach the handle to a transaction and set up the
80  * transaction's buffer credits.
81  */
82
83 static int start_this_handle(journal_t *journal, handle_t *handle)
84 {
85         transaction_t *transaction;
86         int needed;
87         int nblocks = handle->h_buffer_credits;
88         transaction_t *new_transaction = NULL;
89         int ret = 0;
90         unsigned long ts = jiffies;
91
92         if (nblocks > journal->j_max_transaction_buffers) {
93                 printk(KERN_ERR "JBD: %s wants too many credits (%d > %d)\n",
94                        current->comm, nblocks,
95                        journal->j_max_transaction_buffers);
96                 ret = -ENOSPC;
97                 goto out;
98         }
99
100 alloc_transaction:
101         if (!journal->j_running_transaction) {
102                 new_transaction = kzalloc(sizeof(*new_transaction),
103                                                 GFP_NOFS|__GFP_NOFAIL);
104                 if (!new_transaction) {
105                         ret = -ENOMEM;
106                         goto out;
107                 }
108         }
109
110         jbd_debug(3, "New handle %p going live.\n", handle);
111
112 repeat:
113
114         /*
115          * We need to hold j_state_lock until t_updates has been incremented,
116          * for proper journal barrier handling
117          */
118         spin_lock(&journal->j_state_lock);
119 repeat_locked:
120         if (is_journal_aborted(journal) ||
121             (journal->j_errno != 0 && !(journal->j_flags & JBD2_ACK_ERR))) {
122                 spin_unlock(&journal->j_state_lock);
123                 ret = -EROFS;
124                 goto out;
125         }
126
127         /* Wait on the journal's transaction barrier if necessary */
128         if (journal->j_barrier_count) {
129                 spin_unlock(&journal->j_state_lock);
130                 wait_event(journal->j_wait_transaction_locked,
131                                 journal->j_barrier_count == 0);
132                 goto repeat;
133         }
134
135         if (!journal->j_running_transaction) {
136                 if (!new_transaction) {
137                         spin_unlock(&journal->j_state_lock);
138                         goto alloc_transaction;
139                 }
140                 jbd2_get_transaction(journal, new_transaction);
141                 new_transaction = NULL;
142         }
143
144         transaction = journal->j_running_transaction;
145
146         /*
147          * If the current transaction is locked down for commit, wait for the
148          * lock to be released.
149          */
150         if (transaction->t_state == T_LOCKED) {
151                 DEFINE_WAIT(wait);
152
153                 prepare_to_wait(&journal->j_wait_transaction_locked,
154                                         &wait, TASK_UNINTERRUPTIBLE);
155                 spin_unlock(&journal->j_state_lock);
156                 schedule();
157                 finish_wait(&journal->j_wait_transaction_locked, &wait);
158                 goto repeat;
159         }
160
161         /*
162          * If there is not enough space left in the log to write all potential
163          * buffers requested by this operation, we need to stall pending a log
164          * checkpoint to free some more log space.
165          */
166         spin_lock(&transaction->t_handle_lock);
167         needed = transaction->t_outstanding_credits + nblocks;
168
169         if (needed > journal->j_max_transaction_buffers) {
170                 /*
171                  * If the current transaction is already too large, then start
172                  * to commit it: we can then go back and attach this handle to
173                  * a new transaction.
174                  */
175                 DEFINE_WAIT(wait);
176
177                 jbd_debug(2, "Handle %p starting new commit...\n", handle);
178                 spin_unlock(&transaction->t_handle_lock);
179                 prepare_to_wait(&journal->j_wait_transaction_locked, &wait,
180                                 TASK_UNINTERRUPTIBLE);
181                 __jbd2_log_start_commit(journal, transaction->t_tid);
182                 spin_unlock(&journal->j_state_lock);
183                 schedule();
184                 finish_wait(&journal->j_wait_transaction_locked, &wait);
185                 goto repeat;
186         }
187
188         /*
189          * The commit code assumes that it can get enough log space
190          * without forcing a checkpoint.  This is *critical* for
191          * correctness: a checkpoint of a buffer which is also
192          * associated with a committing transaction creates a deadlock,
193          * so commit simply cannot force through checkpoints.
194          *
195          * We must therefore ensure the necessary space in the journal
196          * *before* starting to dirty potentially checkpointed buffers
197          * in the new transaction.
198          *
199          * The worst part is, any transaction currently committing can
200          * reduce the free space arbitrarily.  Be careful to account for
201          * those buffers when checkpointing.
202          */
203
204         /*
205          * @@@ AKPM: This seems rather over-defensive.  We're giving commit
206          * a _lot_ of headroom: 1/4 of the journal plus the size of
207          * the committing transaction.  Really, we only need to give it
208          * committing_transaction->t_outstanding_credits plus "enough" for
209          * the log control blocks.
210          * Also, this test is inconsitent with the matching one in
211          * jbd2_journal_extend().
212          */
213         if (__jbd2_log_space_left(journal) < jbd_space_needed(journal)) {
214                 jbd_debug(2, "Handle %p waiting for checkpoint...\n", handle);
215                 spin_unlock(&transaction->t_handle_lock);
216                 __jbd2_log_wait_for_space(journal);
217                 goto repeat_locked;
218         }
219
220         /* OK, account for the buffers that this operation expects to
221          * use and add the handle to the running transaction. */
222
223         if (time_after(transaction->t_start, ts)) {
224                 ts = jbd2_time_diff(ts, transaction->t_start);
225                 if (ts > transaction->t_max_wait)
226                         transaction->t_max_wait = ts;
227         }
228
229         handle->h_transaction = transaction;
230         transaction->t_outstanding_credits += nblocks;
231         transaction->t_updates++;
232         transaction->t_handle_count++;
233         jbd_debug(4, "Handle %p given %d credits (total %d, free %d)\n",
234                   handle, nblocks, transaction->t_outstanding_credits,
235                   __jbd2_log_space_left(journal));
236         spin_unlock(&transaction->t_handle_lock);
237         spin_unlock(&journal->j_state_lock);
238 out:
239         if (unlikely(new_transaction))          /* It's usually NULL */
240                 kfree(new_transaction);
241         return ret;
242 }
243
244 static struct lock_class_key jbd2_handle_key;
245
246 /* Allocate a new handle.  This should probably be in a slab... */
247 static handle_t *new_handle(int nblocks)
248 {
249         handle_t *handle = jbd2_alloc_handle(GFP_NOFS);
250         if (!handle)
251                 return NULL;
252         memset(handle, 0, sizeof(*handle));
253         handle->h_buffer_credits = nblocks;
254         handle->h_ref = 1;
255
256         lockdep_init_map(&handle->h_lockdep_map, "jbd2_handle",
257                                                 &jbd2_handle_key, 0);
258
259         return handle;
260 }
261
262 /**
263  * handle_t *jbd2_journal_start() - Obtain a new handle.
264  * @journal: Journal to start transaction on.
265  * @nblocks: number of block buffer we might modify
266  *
267  * We make sure that the transaction can guarantee at least nblocks of
268  * modified buffers in the log.  We block until the log can guarantee
269  * that much space.
270  *
271  * This function is visible to journal users (like ext3fs), so is not
272  * called with the journal already locked.
273  *
274  * Return a pointer to a newly allocated handle, or NULL on failure
275  */
276 handle_t *jbd2_journal_start(journal_t *journal, int nblocks)
277 {
278         handle_t *handle = journal_current_handle();
279         int err;
280
281         if (!journal)
282                 return ERR_PTR(-EROFS);
283
284         if (handle) {
285                 J_ASSERT(handle->h_transaction->t_journal == journal);
286                 handle->h_ref++;
287                 return handle;
288         }
289
290         handle = new_handle(nblocks);
291         if (!handle)
292                 return ERR_PTR(-ENOMEM);
293
294         current->journal_info = handle;
295
296         err = start_this_handle(journal, handle);
297         if (err < 0) {
298                 jbd2_free_handle(handle);
299                 current->journal_info = NULL;
300                 handle = ERR_PTR(err);
301                 goto out;
302         }
303
304         lock_acquire(&handle->h_lockdep_map, 0, 0, 0, 2, _THIS_IP_);
305 out:
306         return handle;
307 }
308
309 /**
310  * int jbd2_journal_extend() - extend buffer credits.
311  * @handle:  handle to 'extend'
312  * @nblocks: nr blocks to try to extend by.
313  *
314  * Some transactions, such as large extends and truncates, can be done
315  * atomically all at once or in several stages.  The operation requests
316  * a credit for a number of buffer modications in advance, but can
317  * extend its credit if it needs more.
318  *
319  * jbd2_journal_extend tries to give the running handle more buffer credits.
320  * It does not guarantee that allocation - this is a best-effort only.
321  * The calling process MUST be able to deal cleanly with a failure to
322  * extend here.
323  *
324  * Return 0 on success, non-zero on failure.
325  *
326  * return code < 0 implies an error
327  * return code > 0 implies normal transaction-full status.
328  */
329 int jbd2_journal_extend(handle_t *handle, int nblocks)
330 {
331         transaction_t *transaction = handle->h_transaction;
332         journal_t *journal = transaction->t_journal;
333         int result;
334         int wanted;
335
336         result = -EIO;
337         if (is_handle_aborted(handle))
338                 goto out;
339
340         result = 1;
341
342         spin_lock(&journal->j_state_lock);
343
344         /* Don't extend a locked-down transaction! */
345         if (handle->h_transaction->t_state != T_RUNNING) {
346                 jbd_debug(3, "denied handle %p %d blocks: "
347                           "transaction not running\n", handle, nblocks);
348                 goto error_out;
349         }
350
351         spin_lock(&transaction->t_handle_lock);
352         wanted = transaction->t_outstanding_credits + nblocks;
353
354         if (wanted > journal->j_max_transaction_buffers) {
355                 jbd_debug(3, "denied handle %p %d blocks: "
356                           "transaction too large\n", handle, nblocks);
357                 goto unlock;
358         }
359
360         if (wanted > __jbd2_log_space_left(journal)) {
361                 jbd_debug(3, "denied handle %p %d blocks: "
362                           "insufficient log space\n", handle, nblocks);
363                 goto unlock;
364         }
365
366         handle->h_buffer_credits += nblocks;
367         transaction->t_outstanding_credits += nblocks;
368         result = 0;
369
370         jbd_debug(3, "extended handle %p by %d\n", handle, nblocks);
371 unlock:
372         spin_unlock(&transaction->t_handle_lock);
373 error_out:
374         spin_unlock(&journal->j_state_lock);
375 out:
376         return result;
377 }
378
379
380 /**
381  * int jbd2_journal_restart() - restart a handle .
382  * @handle:  handle to restart
383  * @nblocks: nr credits requested
384  *
385  * Restart a handle for a multi-transaction filesystem
386  * operation.
387  *
388  * If the jbd2_journal_extend() call above fails to grant new buffer credits
389  * to a running handle, a call to jbd2_journal_restart will commit the
390  * handle's transaction so far and reattach the handle to a new
391  * transaction capabable of guaranteeing the requested number of
392  * credits.
393  */
394
395 int jbd2_journal_restart(handle_t *handle, int nblocks)
396 {
397         transaction_t *transaction = handle->h_transaction;
398         journal_t *journal = transaction->t_journal;
399         int ret;
400
401         /* If we've had an abort of any type, don't even think about
402          * actually doing the restart! */
403         if (is_handle_aborted(handle))
404                 return 0;
405
406         /*
407          * First unlink the handle from its current transaction, and start the
408          * commit on that.
409          */
410         J_ASSERT(transaction->t_updates > 0);
411         J_ASSERT(journal_current_handle() == handle);
412
413         spin_lock(&journal->j_state_lock);
414         spin_lock(&transaction->t_handle_lock);
415         transaction->t_outstanding_credits -= handle->h_buffer_credits;
416         transaction->t_updates--;
417
418         if (!transaction->t_updates)
419                 wake_up(&journal->j_wait_updates);
420         spin_unlock(&transaction->t_handle_lock);
421
422         jbd_debug(2, "restarting handle %p\n", handle);
423         __jbd2_log_start_commit(journal, transaction->t_tid);
424         spin_unlock(&journal->j_state_lock);
425
426         handle->h_buffer_credits = nblocks;
427         ret = start_this_handle(journal, handle);
428         return ret;
429 }
430
431
432 /**
433  * void jbd2_journal_lock_updates () - establish a transaction barrier.
434  * @journal:  Journal to establish a barrier on.
435  *
436  * This locks out any further updates from being started, and blocks
437  * until all existing updates have completed, returning only once the
438  * journal is in a quiescent state with no updates running.
439  *
440  * The journal lock should not be held on entry.
441  */
442 void jbd2_journal_lock_updates(journal_t *journal)
443 {
444         DEFINE_WAIT(wait);
445
446         spin_lock(&journal->j_state_lock);
447         ++journal->j_barrier_count;
448
449         /* Wait until there are no running updates */
450         while (1) {
451                 transaction_t *transaction = journal->j_running_transaction;
452
453                 if (!transaction)
454                         break;
455
456                 spin_lock(&transaction->t_handle_lock);
457                 if (!transaction->t_updates) {
458                         spin_unlock(&transaction->t_handle_lock);
459                         break;
460                 }
461                 prepare_to_wait(&journal->j_wait_updates, &wait,
462                                 TASK_UNINTERRUPTIBLE);
463                 spin_unlock(&transaction->t_handle_lock);
464                 spin_unlock(&journal->j_state_lock);
465                 schedule();
466                 finish_wait(&journal->j_wait_updates, &wait);
467                 spin_lock(&journal->j_state_lock);
468         }
469         spin_unlock(&journal->j_state_lock);
470
471         /*
472          * We have now established a barrier against other normal updates, but
473          * we also need to barrier against other jbd2_journal_lock_updates() calls
474          * to make sure that we serialise special journal-locked operations
475          * too.
476          */
477         mutex_lock(&journal->j_barrier);
478 }
479
480 /**
481  * void jbd2_journal_unlock_updates (journal_t* journal) - release barrier
482  * @journal:  Journal to release the barrier on.
483  *
484  * Release a transaction barrier obtained with jbd2_journal_lock_updates().
485  *
486  * Should be called without the journal lock held.
487  */
488 void jbd2_journal_unlock_updates (journal_t *journal)
489 {
490         J_ASSERT(journal->j_barrier_count != 0);
491
492         mutex_unlock(&journal->j_barrier);
493         spin_lock(&journal->j_state_lock);
494         --journal->j_barrier_count;
495         spin_unlock(&journal->j_state_lock);
496         wake_up(&journal->j_wait_transaction_locked);
497 }
498
499 /*
500  * Report any unexpected dirty buffers which turn up.  Normally those
501  * indicate an error, but they can occur if the user is running (say)
502  * tune2fs to modify the live filesystem, so we need the option of
503  * continuing as gracefully as possible.  #
504  *
505  * The caller should already hold the journal lock and
506  * j_list_lock spinlock: most callers will need those anyway
507  * in order to probe the buffer's journaling state safely.
508  */
509 static void jbd_unexpected_dirty_buffer(struct journal_head *jh)
510 {
511         int jlist;
512
513         /* If this buffer is one which might reasonably be dirty
514          * --- ie. data, or not part of this journal --- then
515          * we're OK to leave it alone, but otherwise we need to
516          * move the dirty bit to the journal's own internal
517          * JBDDirty bit. */
518         jlist = jh->b_jlist;
519
520         if (jlist == BJ_Metadata || jlist == BJ_Reserved ||
521             jlist == BJ_Shadow || jlist == BJ_Forget) {
522                 struct buffer_head *bh = jh2bh(jh);
523
524                 if (test_clear_buffer_dirty(bh))
525                         set_buffer_jbddirty(bh);
526         }
527 }
528
529 /*
530  * If the buffer is already part of the current transaction, then there
531  * is nothing we need to do.  If it is already part of a prior
532  * transaction which we are still committing to disk, then we need to
533  * make sure that we do not overwrite the old copy: we do copy-out to
534  * preserve the copy going to disk.  We also account the buffer against
535  * the handle's metadata buffer credits (unless the buffer is already
536  * part of the transaction, that is).
537  *
538  */
539 static int
540 do_get_write_access(handle_t *handle, struct journal_head *jh,
541                         int force_copy)
542 {
543         struct buffer_head *bh;
544         transaction_t *transaction;
545         journal_t *journal;
546         int error;
547         char *frozen_buffer = NULL;
548         int need_copy = 0;
549
550         if (is_handle_aborted(handle))
551                 return -EROFS;
552
553         transaction = handle->h_transaction;
554         journal = transaction->t_journal;
555
556         jbd_debug(5, "buffer_head %p, force_copy %d\n", jh, force_copy);
557
558         JBUFFER_TRACE(jh, "entry");
559 repeat:
560         bh = jh2bh(jh);
561
562         /* @@@ Need to check for errors here at some point. */
563
564         lock_buffer(bh);
565         jbd_lock_bh_state(bh);
566
567         /* We now hold the buffer lock so it is safe to query the buffer
568          * state.  Is the buffer dirty?
569          *
570          * If so, there are two possibilities.  The buffer may be
571          * non-journaled, and undergoing a quite legitimate writeback.
572          * Otherwise, it is journaled, and we don't expect dirty buffers
573          * in that state (the buffers should be marked JBD_Dirty
574          * instead.)  So either the IO is being done under our own
575          * control and this is a bug, or it's a third party IO such as
576          * dump(8) (which may leave the buffer scheduled for read ---
577          * ie. locked but not dirty) or tune2fs (which may actually have
578          * the buffer dirtied, ugh.)  */
579
580         if (buffer_dirty(bh)) {
581                 /*
582                  * First question: is this buffer already part of the current
583                  * transaction or the existing committing transaction?
584                  */
585                 if (jh->b_transaction) {
586                         J_ASSERT_JH(jh,
587                                 jh->b_transaction == transaction ||
588                                 jh->b_transaction ==
589                                         journal->j_committing_transaction);
590                         if (jh->b_next_transaction)
591                                 J_ASSERT_JH(jh, jh->b_next_transaction ==
592                                                         transaction);
593                 }
594                 /*
595                  * In any case we need to clean the dirty flag and we must
596                  * do it under the buffer lock to be sure we don't race
597                  * with running write-out.
598                  */
599                 JBUFFER_TRACE(jh, "Unexpected dirty buffer");
600                 jbd_unexpected_dirty_buffer(jh);
601         }
602
603         unlock_buffer(bh);
604
605         error = -EROFS;
606         if (is_handle_aborted(handle)) {
607                 jbd_unlock_bh_state(bh);
608                 goto out;
609         }
610         error = 0;
611
612         /*
613          * The buffer is already part of this transaction if b_transaction or
614          * b_next_transaction points to it
615          */
616         if (jh->b_transaction == transaction ||
617             jh->b_next_transaction == transaction)
618                 goto done;
619
620         /*
621          * this is the first time this transaction is touching this buffer,
622          * reset the modified flag
623          */
624        jh->b_modified = 0;
625
626         /*
627          * If there is already a copy-out version of this buffer, then we don't
628          * need to make another one
629          */
630         if (jh->b_frozen_data) {
631                 JBUFFER_TRACE(jh, "has frozen data");
632                 J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
633                 jh->b_next_transaction = transaction;
634                 goto done;
635         }
636
637         /* Is there data here we need to preserve? */
638
639         if (jh->b_transaction && jh->b_transaction != transaction) {
640                 JBUFFER_TRACE(jh, "owned by older transaction");
641                 J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
642                 J_ASSERT_JH(jh, jh->b_transaction ==
643                                         journal->j_committing_transaction);
644
645                 /* There is one case we have to be very careful about.
646                  * If the committing transaction is currently writing
647                  * this buffer out to disk and has NOT made a copy-out,
648                  * then we cannot modify the buffer contents at all
649                  * right now.  The essence of copy-out is that it is the
650                  * extra copy, not the primary copy, which gets
651                  * journaled.  If the primary copy is already going to
652                  * disk then we cannot do copy-out here. */
653
654                 if (jh->b_jlist == BJ_Shadow) {
655                         DEFINE_WAIT_BIT(wait, &bh->b_state, BH_Unshadow);
656                         wait_queue_head_t *wqh;
657
658                         wqh = bit_waitqueue(&bh->b_state, BH_Unshadow);
659
660                         JBUFFER_TRACE(jh, "on shadow: sleep");
661                         jbd_unlock_bh_state(bh);
662                         /* commit wakes up all shadow buffers after IO */
663                         for ( ; ; ) {
664                                 prepare_to_wait(wqh, &wait.wait,
665                                                 TASK_UNINTERRUPTIBLE);
666                                 if (jh->b_jlist != BJ_Shadow)
667                                         break;
668                                 schedule();
669                         }
670                         finish_wait(wqh, &wait.wait);
671                         goto repeat;
672                 }
673
674                 /* Only do the copy if the currently-owning transaction
675                  * still needs it.  If it is on the Forget list, the
676                  * committing transaction is past that stage.  The
677                  * buffer had better remain locked during the kmalloc,
678                  * but that should be true --- we hold the journal lock
679                  * still and the buffer is already on the BUF_JOURNAL
680                  * list so won't be flushed.
681                  *
682                  * Subtle point, though: if this is a get_undo_access,
683                  * then we will be relying on the frozen_data to contain
684                  * the new value of the committed_data record after the
685                  * transaction, so we HAVE to force the frozen_data copy
686                  * in that case. */
687
688                 if (jh->b_jlist != BJ_Forget || force_copy) {
689                         JBUFFER_TRACE(jh, "generate frozen data");
690                         if (!frozen_buffer) {
691                                 JBUFFER_TRACE(jh, "allocate memory for buffer");
692                                 jbd_unlock_bh_state(bh);
693                                 frozen_buffer =
694                                         jbd2_alloc(jh2bh(jh)->b_size,
695                                                          GFP_NOFS);
696                                 if (!frozen_buffer) {
697                                         printk(KERN_EMERG
698                                                "%s: OOM for frozen_buffer\n",
699                                                __FUNCTION__);
700                                         JBUFFER_TRACE(jh, "oom!");
701                                         error = -ENOMEM;
702                                         jbd_lock_bh_state(bh);
703                                         goto done;
704                                 }
705                                 goto repeat;
706                         }
707                         jh->b_frozen_data = frozen_buffer;
708                         frozen_buffer = NULL;
709                         need_copy = 1;
710                 }
711                 jh->b_next_transaction = transaction;
712         }
713
714
715         /*
716          * Finally, if the buffer is not journaled right now, we need to make
717          * sure it doesn't get written to disk before the caller actually
718          * commits the new data
719          */
720         if (!jh->b_transaction) {
721                 JBUFFER_TRACE(jh, "no transaction");
722                 J_ASSERT_JH(jh, !jh->b_next_transaction);
723                 jh->b_transaction = transaction;
724                 JBUFFER_TRACE(jh, "file as BJ_Reserved");
725                 spin_lock(&journal->j_list_lock);
726                 __jbd2_journal_file_buffer(jh, transaction, BJ_Reserved);
727                 spin_unlock(&journal->j_list_lock);
728         }
729
730 done:
731         if (need_copy) {
732                 struct page *page;
733                 int offset;
734                 char *source;
735
736                 J_EXPECT_JH(jh, buffer_uptodate(jh2bh(jh)),
737                             "Possible IO failure.\n");
738                 page = jh2bh(jh)->b_page;
739                 offset = ((unsigned long) jh2bh(jh)->b_data) & ~PAGE_MASK;
740                 source = kmap_atomic(page, KM_USER0);
741                 memcpy(jh->b_frozen_data, source+offset, jh2bh(jh)->b_size);
742                 kunmap_atomic(source, KM_USER0);
743         }
744         jbd_unlock_bh_state(bh);
745
746         /*
747          * If we are about to journal a buffer, then any revoke pending on it is
748          * no longer valid
749          */
750         jbd2_journal_cancel_revoke(handle, jh);
751
752 out:
753         if (unlikely(frozen_buffer))    /* It's usually NULL */
754                 jbd2_free(frozen_buffer, bh->b_size);
755
756         JBUFFER_TRACE(jh, "exit");
757         return error;
758 }
759
760 /**
761  * int jbd2_journal_get_write_access() - notify intent to modify a buffer for metadata (not data) update.
762  * @handle: transaction to add buffer modifications to
763  * @bh:     bh to be used for metadata writes
764  * @credits: variable that will receive credits for the buffer
765  *
766  * Returns an error code or 0 on success.
767  *
768  * In full data journalling mode the buffer may be of type BJ_AsyncData,
769  * because we're write()ing a buffer which is also part of a shared mapping.
770  */
771
772 int jbd2_journal_get_write_access(handle_t *handle, struct buffer_head *bh)
773 {
774         struct journal_head *jh = jbd2_journal_add_journal_head(bh);
775         int rc;
776
777         /* We do not want to get caught playing with fields which the
778          * log thread also manipulates.  Make sure that the buffer
779          * completes any outstanding IO before proceeding. */
780         rc = do_get_write_access(handle, jh, 0);
781         jbd2_journal_put_journal_head(jh);
782         return rc;
783 }
784
785
786 /*
787  * When the user wants to journal a newly created buffer_head
788  * (ie. getblk() returned a new buffer and we are going to populate it
789  * manually rather than reading off disk), then we need to keep the
790  * buffer_head locked until it has been completely filled with new
791  * data.  In this case, we should be able to make the assertion that
792  * the bh is not already part of an existing transaction.
793  *
794  * The buffer should already be locked by the caller by this point.
795  * There is no lock ranking violation: it was a newly created,
796  * unlocked buffer beforehand. */
797
798 /**
799  * int jbd2_journal_get_create_access () - notify intent to use newly created bh
800  * @handle: transaction to new buffer to
801  * @bh: new buffer.
802  *
803  * Call this if you create a new bh.
804  */
805 int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
806 {
807         transaction_t *transaction = handle->h_transaction;
808         journal_t *journal = transaction->t_journal;
809         struct journal_head *jh = jbd2_journal_add_journal_head(bh);
810         int err;
811
812         jbd_debug(5, "journal_head %p\n", jh);
813         err = -EROFS;
814         if (is_handle_aborted(handle))
815                 goto out;
816         err = 0;
817
818         JBUFFER_TRACE(jh, "entry");
819         /*
820          * The buffer may already belong to this transaction due to pre-zeroing
821          * in the filesystem's new_block code.  It may also be on the previous,
822          * committing transaction's lists, but it HAS to be in Forget state in
823          * that case: the transaction must have deleted the buffer for it to be
824          * reused here.
825          */
826         jbd_lock_bh_state(bh);
827         spin_lock(&journal->j_list_lock);
828         J_ASSERT_JH(jh, (jh->b_transaction == transaction ||
829                 jh->b_transaction == NULL ||
830                 (jh->b_transaction == journal->j_committing_transaction &&
831                           jh->b_jlist == BJ_Forget)));
832
833         J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
834         J_ASSERT_JH(jh, buffer_locked(jh2bh(jh)));
835
836         if (jh->b_transaction == NULL) {
837                 jh->b_transaction = transaction;
838
839                 /* first access by this transaction */
840                 jh->b_modified = 0;
841
842                 JBUFFER_TRACE(jh, "file as BJ_Reserved");
843                 __jbd2_journal_file_buffer(jh, transaction, BJ_Reserved);
844         } else if (jh->b_transaction == journal->j_committing_transaction) {
845                 /* first access by this transaction */
846                 jh->b_modified = 0;
847
848                 JBUFFER_TRACE(jh, "set next transaction");
849                 jh->b_next_transaction = transaction;
850         }
851         spin_unlock(&journal->j_list_lock);
852         jbd_unlock_bh_state(bh);
853
854         /*
855          * akpm: I added this.  ext3_alloc_branch can pick up new indirect
856          * blocks which contain freed but then revoked metadata.  We need
857          * to cancel the revoke in case we end up freeing it yet again
858          * and the reallocating as data - this would cause a second revoke,
859          * which hits an assertion error.
860          */
861         JBUFFER_TRACE(jh, "cancelling revoke");
862         jbd2_journal_cancel_revoke(handle, jh);
863         jbd2_journal_put_journal_head(jh);
864 out:
865         return err;
866 }
867
868 /**
869  * int jbd2_journal_get_undo_access() -  Notify intent to modify metadata with
870  *     non-rewindable consequences
871  * @handle: transaction
872  * @bh: buffer to undo
873  * @credits: store the number of taken credits here (if not NULL)
874  *
875  * Sometimes there is a need to distinguish between metadata which has
876  * been committed to disk and that which has not.  The ext3fs code uses
877  * this for freeing and allocating space, we have to make sure that we
878  * do not reuse freed space until the deallocation has been committed,
879  * since if we overwrote that space we would make the delete
880  * un-rewindable in case of a crash.
881  *
882  * To deal with that, jbd2_journal_get_undo_access requests write access to a
883  * buffer for parts of non-rewindable operations such as delete
884  * operations on the bitmaps.  The journaling code must keep a copy of
885  * the buffer's contents prior to the undo_access call until such time
886  * as we know that the buffer has definitely been committed to disk.
887  *
888  * We never need to know which transaction the committed data is part
889  * of, buffers touched here are guaranteed to be dirtied later and so
890  * will be committed to a new transaction in due course, at which point
891  * we can discard the old committed data pointer.
892  *
893  * Returns error number or 0 on success.
894  */
895 int jbd2_journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
896 {
897         int err;
898         struct journal_head *jh = jbd2_journal_add_journal_head(bh);
899         char *committed_data = NULL;
900
901         JBUFFER_TRACE(jh, "entry");
902
903         /*
904          * Do this first --- it can drop the journal lock, so we want to
905          * make sure that obtaining the committed_data is done
906          * atomically wrt. completion of any outstanding commits.
907          */
908         err = do_get_write_access(handle, jh, 1);
909         if (err)
910                 goto out;
911
912 repeat:
913         if (!jh->b_committed_data) {
914                 committed_data = jbd2_alloc(jh2bh(jh)->b_size, GFP_NOFS);
915                 if (!committed_data) {
916                         printk(KERN_EMERG "%s: No memory for committed data\n",
917                                 __FUNCTION__);
918                         err = -ENOMEM;
919                         goto out;
920                 }
921         }
922
923         jbd_lock_bh_state(bh);
924         if (!jh->b_committed_data) {
925                 /* Copy out the current buffer contents into the
926                  * preserved, committed copy. */
927                 JBUFFER_TRACE(jh, "generate b_committed data");
928                 if (!committed_data) {
929                         jbd_unlock_bh_state(bh);
930                         goto repeat;
931                 }
932
933                 jh->b_committed_data = committed_data;
934                 committed_data = NULL;
935                 memcpy(jh->b_committed_data, bh->b_data, bh->b_size);
936         }
937         jbd_unlock_bh_state(bh);
938 out:
939         jbd2_journal_put_journal_head(jh);
940         if (unlikely(committed_data))
941                 jbd2_free(committed_data, bh->b_size);
942         return err;
943 }
944
945 /**
946  * int jbd2_journal_dirty_data() -  mark a buffer as containing dirty data which
947  *                             needs to be flushed before we can commit the
948  *                             current transaction.
949  * @handle: transaction
950  * @bh: bufferhead to mark
951  *
952  * The buffer is placed on the transaction's data list and is marked as
953  * belonging to the transaction.
954  *
955  * Returns error number or 0 on success.
956  *
957  * jbd2_journal_dirty_data() can be called via page_launder->ext3_writepage
958  * by kswapd.
959  */
960 int jbd2_journal_dirty_data(handle_t *handle, struct buffer_head *bh)
961 {
962         journal_t *journal = handle->h_transaction->t_journal;
963         int need_brelse = 0;
964         struct journal_head *jh;
965
966         if (is_handle_aborted(handle))
967                 return 0;
968
969         jh = jbd2_journal_add_journal_head(bh);
970         JBUFFER_TRACE(jh, "entry");
971
972         /*
973          * The buffer could *already* be dirty.  Writeout can start
974          * at any time.
975          */
976         jbd_debug(4, "jh: %p, tid:%d\n", jh, handle->h_transaction->t_tid);
977
978         /*
979          * What if the buffer is already part of a running transaction?
980          *
981          * There are two cases:
982          * 1) It is part of the current running transaction.  Refile it,
983          *    just in case we have allocated it as metadata, deallocated
984          *    it, then reallocated it as data.
985          * 2) It is part of the previous, still-committing transaction.
986          *    If all we want to do is to guarantee that the buffer will be
987          *    written to disk before this new transaction commits, then
988          *    being sure that the *previous* transaction has this same
989          *    property is sufficient for us!  Just leave it on its old
990          *    transaction.
991          *
992          * In case (2), the buffer must not already exist as metadata
993          * --- that would violate write ordering (a transaction is free
994          * to write its data at any point, even before the previous
995          * committing transaction has committed).  The caller must
996          * never, ever allow this to happen: there's nothing we can do
997          * about it in this layer.
998          */
999         jbd_lock_bh_state(bh);
1000         spin_lock(&journal->j_list_lock);
1001
1002         /* Now that we have bh_state locked, are we really still mapped? */
1003         if (!buffer_mapped(bh)) {
1004                 JBUFFER_TRACE(jh, "unmapped buffer, bailing out");
1005                 goto no_journal;
1006         }
1007
1008         if (jh->b_transaction) {
1009                 JBUFFER_TRACE(jh, "has transaction");
1010                 if (jh->b_transaction != handle->h_transaction) {
1011                         JBUFFER_TRACE(jh, "belongs to older transaction");
1012                         J_ASSERT_JH(jh, jh->b_transaction ==
1013                                         journal->j_committing_transaction);
1014
1015                         /* @@@ IS THIS TRUE  ? */
1016                         /*
1017                          * Not any more.  Scenario: someone does a write()
1018                          * in data=journal mode.  The buffer's transaction has
1019                          * moved into commit.  Then someone does another
1020                          * write() to the file.  We do the frozen data copyout
1021                          * and set b_next_transaction to point to j_running_t.
1022                          * And while we're in that state, someone does a
1023                          * writepage() in an attempt to pageout the same area
1024                          * of the file via a shared mapping.  At present that
1025                          * calls jbd2_journal_dirty_data(), and we get right here.
1026                          * It may be too late to journal the data.  Simply
1027                          * falling through to the next test will suffice: the
1028                          * data will be dirty and wil be checkpointed.  The
1029                          * ordering comments in the next comment block still
1030                          * apply.
1031                          */
1032                         //J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
1033
1034                         /*
1035                          * If we're journalling data, and this buffer was
1036                          * subject to a write(), it could be metadata, forget
1037                          * or shadow against the committing transaction.  Now,
1038                          * someone has dirtied the same darn page via a mapping
1039                          * and it is being writepage()'d.
1040                          * We *could* just steal the page from commit, with some
1041                          * fancy locking there.  Instead, we just skip it -
1042                          * don't tie the page's buffers to the new transaction
1043                          * at all.
1044                          * Implication: if we crash before the writepage() data
1045                          * is written into the filesystem, recovery will replay
1046                          * the write() data.
1047                          */
1048                         if (jh->b_jlist != BJ_None &&
1049                                         jh->b_jlist != BJ_SyncData &&
1050                                         jh->b_jlist != BJ_Locked) {
1051                                 JBUFFER_TRACE(jh, "Not stealing");
1052                                 goto no_journal;
1053                         }
1054
1055                         /*
1056                          * This buffer may be undergoing writeout in commit.  We
1057                          * can't return from here and let the caller dirty it
1058                          * again because that can cause the write-out loop in
1059                          * commit to never terminate.
1060                          */
1061                         if (buffer_dirty(bh)) {
1062                                 get_bh(bh);
1063                                 spin_unlock(&journal->j_list_lock);
1064                                 jbd_unlock_bh_state(bh);
1065                                 need_brelse = 1;
1066                                 sync_dirty_buffer(bh);
1067                                 jbd_lock_bh_state(bh);
1068                                 spin_lock(&journal->j_list_lock);
1069                                 /* Since we dropped the lock... */
1070                                 if (!buffer_mapped(bh)) {
1071                                         JBUFFER_TRACE(jh, "buffer got unmapped");
1072                                         goto no_journal;
1073                                 }
1074                                 /* The buffer may become locked again at any
1075                                    time if it is redirtied */
1076                         }
1077
1078                         /* journal_clean_data_list() may have got there first */
1079                         if (jh->b_transaction != NULL) {
1080                                 JBUFFER_TRACE(jh, "unfile from commit");
1081                                 __jbd2_journal_temp_unlink_buffer(jh);
1082                                 /* It still points to the committing
1083                                  * transaction; move it to this one so
1084                                  * that the refile assert checks are
1085                                  * happy. */
1086                                 jh->b_transaction = handle->h_transaction;
1087                         }
1088                         /* The buffer will be refiled below */
1089
1090                 }
1091                 /*
1092                  * Special case --- the buffer might actually have been
1093                  * allocated and then immediately deallocated in the previous,
1094                  * committing transaction, so might still be left on that
1095                  * transaction's metadata lists.
1096                  */
1097                 if (jh->b_jlist != BJ_SyncData && jh->b_jlist != BJ_Locked) {
1098                         JBUFFER_TRACE(jh, "not on correct data list: unfile");
1099                         J_ASSERT_JH(jh, jh->b_jlist != BJ_Shadow);
1100                         __jbd2_journal_temp_unlink_buffer(jh);
1101                         jh->b_transaction = handle->h_transaction;
1102                         JBUFFER_TRACE(jh, "file as data");
1103                         __jbd2_journal_file_buffer(jh, handle->h_transaction,
1104                                                 BJ_SyncData);
1105                 }
1106         } else {
1107                 JBUFFER_TRACE(jh, "not on a transaction");
1108                 __jbd2_journal_file_buffer(jh, handle->h_transaction, BJ_SyncData);
1109         }
1110 no_journal:
1111         spin_unlock(&journal->j_list_lock);
1112         jbd_unlock_bh_state(bh);
1113         if (need_brelse) {
1114                 BUFFER_TRACE(bh, "brelse");
1115                 __brelse(bh);
1116         }
1117         JBUFFER_TRACE(jh, "exit");
1118         jbd2_journal_put_journal_head(jh);
1119         return 0;
1120 }
1121
1122 /**
1123  * int jbd2_journal_dirty_metadata() -  mark a buffer as containing dirty metadata
1124  * @handle: transaction to add buffer to.
1125  * @bh: buffer to mark
1126  *
1127  * mark dirty metadata which needs to be journaled as part of the current
1128  * transaction.
1129  *
1130  * The buffer is placed on the transaction's metadata list and is marked
1131  * as belonging to the transaction.
1132  *
1133  * Returns error number or 0 on success.
1134  *
1135  * Special care needs to be taken if the buffer already belongs to the
1136  * current committing transaction (in which case we should have frozen
1137  * data present for that commit).  In that case, we don't relink the
1138  * buffer: that only gets done when the old transaction finally
1139  * completes its commit.
1140  */
1141 int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
1142 {
1143         transaction_t *transaction = handle->h_transaction;
1144         journal_t *journal = transaction->t_journal;
1145         struct journal_head *jh = bh2jh(bh);
1146
1147         jbd_debug(5, "journal_head %p\n", jh);
1148         JBUFFER_TRACE(jh, "entry");
1149         if (is_handle_aborted(handle))
1150                 goto out;
1151
1152         jbd_lock_bh_state(bh);
1153
1154         if (jh->b_modified == 0) {
1155                 /*
1156                  * This buffer's got modified and becoming part
1157                  * of the transaction. This needs to be done
1158                  * once a transaction -bzzz
1159                  */
1160                 jh->b_modified = 1;
1161                 J_ASSERT_JH(jh, handle->h_buffer_credits > 0);
1162                 handle->h_buffer_credits--;
1163         }
1164
1165         /*
1166          * fastpath, to avoid expensive locking.  If this buffer is already
1167          * on the running transaction's metadata list there is nothing to do.
1168          * Nobody can take it off again because there is a handle open.
1169          * I _think_ we're OK here with SMP barriers - a mistaken decision will
1170          * result in this test being false, so we go in and take the locks.
1171          */
1172         if (jh->b_transaction == transaction && jh->b_jlist == BJ_Metadata) {
1173                 JBUFFER_TRACE(jh, "fastpath");
1174                 J_ASSERT_JH(jh, jh->b_transaction ==
1175                                         journal->j_running_transaction);
1176                 goto out_unlock_bh;
1177         }
1178
1179         set_buffer_jbddirty(bh);
1180
1181         /*
1182          * Metadata already on the current transaction list doesn't
1183          * need to be filed.  Metadata on another transaction's list must
1184          * be committing, and will be refiled once the commit completes:
1185          * leave it alone for now.
1186          */
1187         if (jh->b_transaction != transaction) {
1188                 JBUFFER_TRACE(jh, "already on other transaction");
1189                 J_ASSERT_JH(jh, jh->b_transaction ==
1190                                         journal->j_committing_transaction);
1191                 J_ASSERT_JH(jh, jh->b_next_transaction == transaction);
1192                 /* And this case is illegal: we can't reuse another
1193                  * transaction's data buffer, ever. */
1194                 goto out_unlock_bh;
1195         }
1196
1197         /* That test should have eliminated the following case: */
1198         J_ASSERT_JH(jh, jh->b_frozen_data == NULL);
1199
1200         JBUFFER_TRACE(jh, "file as BJ_Metadata");
1201         spin_lock(&journal->j_list_lock);
1202         __jbd2_journal_file_buffer(jh, handle->h_transaction, BJ_Metadata);
1203         spin_unlock(&journal->j_list_lock);
1204 out_unlock_bh:
1205         jbd_unlock_bh_state(bh);
1206 out:
1207         JBUFFER_TRACE(jh, "exit");
1208         return 0;
1209 }
1210
1211 /*
1212  * jbd2_journal_release_buffer: undo a get_write_access without any buffer
1213  * updates, if the update decided in the end that it didn't need access.
1214  *
1215  */
1216 void
1217 jbd2_journal_release_buffer(handle_t *handle, struct buffer_head *bh)
1218 {
1219         BUFFER_TRACE(bh, "entry");
1220 }
1221
1222 /**
1223  * void jbd2_journal_forget() - bforget() for potentially-journaled buffers.
1224  * @handle: transaction handle
1225  * @bh:     bh to 'forget'
1226  *
1227  * We can only do the bforget if there are no commits pending against the
1228  * buffer.  If the buffer is dirty in the current running transaction we
1229  * can safely unlink it.
1230  *
1231  * bh may not be a journalled buffer at all - it may be a non-JBD
1232  * buffer which came off the hashtable.  Check for this.
1233  *
1234  * Decrements bh->b_count by one.
1235  *
1236  * Allow this call even if the handle has aborted --- it may be part of
1237  * the caller's cleanup after an abort.
1238  */
1239 int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
1240 {
1241         transaction_t *transaction = handle->h_transaction;
1242         journal_t *journal = transaction->t_journal;
1243         struct journal_head *jh;
1244         int drop_reserve = 0;
1245         int err = 0;
1246
1247         BUFFER_TRACE(bh, "entry");
1248
1249         jbd_lock_bh_state(bh);
1250         spin_lock(&journal->j_list_lock);
1251
1252         if (!buffer_jbd(bh))
1253                 goto not_jbd;
1254         jh = bh2jh(bh);
1255
1256         /* Critical error: attempting to delete a bitmap buffer, maybe?
1257          * Don't do any jbd operations, and return an error. */
1258         if (!J_EXPECT_JH(jh, !jh->b_committed_data,
1259                          "inconsistent data on disk")) {
1260                 err = -EIO;
1261                 goto not_jbd;
1262         }
1263
1264         /*
1265          * The buffer's going from the transaction, we must drop
1266          * all references -bzzz
1267          */
1268         jh->b_modified = 0;
1269
1270         if (jh->b_transaction == handle->h_transaction) {
1271                 J_ASSERT_JH(jh, !jh->b_frozen_data);
1272
1273                 /* If we are forgetting a buffer which is already part
1274                  * of this transaction, then we can just drop it from
1275                  * the transaction immediately. */
1276                 clear_buffer_dirty(bh);
1277                 clear_buffer_jbddirty(bh);
1278
1279                 JBUFFER_TRACE(jh, "belongs to current transaction: unfile");
1280
1281                 drop_reserve = 1;
1282
1283                 /*
1284                  * We are no longer going to journal this buffer.
1285                  * However, the commit of this transaction is still
1286                  * important to the buffer: the delete that we are now
1287                  * processing might obsolete an old log entry, so by
1288                  * committing, we can satisfy the buffer's checkpoint.
1289                  *
1290                  * So, if we have a checkpoint on the buffer, we should
1291                  * now refile the buffer on our BJ_Forget list so that
1292                  * we know to remove the checkpoint after we commit.
1293                  */
1294
1295                 if (jh->b_cp_transaction) {
1296                         __jbd2_journal_temp_unlink_buffer(jh);
1297                         __jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
1298                 } else {
1299                         __jbd2_journal_unfile_buffer(jh);
1300                         jbd2_journal_remove_journal_head(bh);
1301                         __brelse(bh);
1302                         if (!buffer_jbd(bh)) {
1303                                 spin_unlock(&journal->j_list_lock);
1304                                 jbd_unlock_bh_state(bh);
1305                                 __bforget(bh);
1306                                 goto drop;
1307                         }
1308                 }
1309         } else if (jh->b_transaction) {
1310                 J_ASSERT_JH(jh, (jh->b_transaction ==
1311                                  journal->j_committing_transaction));
1312                 /* However, if the buffer is still owned by a prior
1313                  * (committing) transaction, we can't drop it yet... */
1314                 JBUFFER_TRACE(jh, "belongs to older transaction");
1315                 /* ... but we CAN drop it from the new transaction if we
1316                  * have also modified it since the original commit. */
1317
1318                 if (jh->b_next_transaction) {
1319                         J_ASSERT(jh->b_next_transaction == transaction);
1320                         jh->b_next_transaction = NULL;
1321                         drop_reserve = 1;
1322                 }
1323         }
1324
1325 not_jbd:
1326         spin_unlock(&journal->j_list_lock);
1327         jbd_unlock_bh_state(bh);
1328         __brelse(bh);
1329 drop:
1330         if (drop_reserve) {
1331                 /* no need to reserve log space for this block -bzzz */
1332                 handle->h_buffer_credits++;
1333         }
1334         return err;
1335 }
1336
1337 /**
1338  * int jbd2_journal_stop() - complete a transaction
1339  * @handle: tranaction to complete.
1340  *
1341  * All done for a particular handle.
1342  *
1343  * There is not much action needed here.  We just return any remaining
1344  * buffer credits to the transaction and remove the handle.  The only
1345  * complication is that we need to start a commit operation if the
1346  * filesystem is marked for synchronous update.
1347  *
1348  * jbd2_journal_stop itself will not usually return an error, but it may
1349  * do so in unusual circumstances.  In particular, expect it to
1350  * return -EIO if a jbd2_journal_abort has been executed since the
1351  * transaction began.
1352  */
1353 int jbd2_journal_stop(handle_t *handle)
1354 {
1355         transaction_t *transaction = handle->h_transaction;
1356         journal_t *journal = transaction->t_journal;
1357         int old_handle_count, err;
1358         pid_t pid;
1359
1360         J_ASSERT(journal_current_handle() == handle);
1361
1362         if (is_handle_aborted(handle))
1363                 err = -EIO;
1364         else {
1365                 J_ASSERT(transaction->t_updates > 0);
1366                 err = 0;
1367         }
1368
1369         if (--handle->h_ref > 0) {
1370                 jbd_debug(4, "h_ref %d -> %d\n", handle->h_ref + 1,
1371                           handle->h_ref);
1372                 return err;
1373         }
1374
1375         jbd_debug(4, "Handle %p going down\n", handle);
1376
1377         /*
1378          * Implement synchronous transaction batching.  If the handle
1379          * was synchronous, don't force a commit immediately.  Let's
1380          * yield and let another thread piggyback onto this transaction.
1381          * Keep doing that while new threads continue to arrive.
1382          * It doesn't cost much - we're about to run a commit and sleep
1383          * on IO anyway.  Speeds up many-threaded, many-dir operations
1384          * by 30x or more...
1385          *
1386          * But don't do this if this process was the most recent one to
1387          * perform a synchronous write.  We do this to detect the case where a
1388          * single process is doing a stream of sync writes.  No point in waiting
1389          * for joiners in that case.
1390          */
1391         pid = current->pid;
1392         if (handle->h_sync && journal->j_last_sync_writer != pid) {
1393                 journal->j_last_sync_writer = pid;
1394                 do {
1395                         old_handle_count = transaction->t_handle_count;
1396                         schedule_timeout_uninterruptible(1);
1397                 } while (old_handle_count != transaction->t_handle_count);
1398         }
1399
1400         current->journal_info = NULL;
1401         spin_lock(&journal->j_state_lock);
1402         spin_lock(&transaction->t_handle_lock);
1403         transaction->t_outstanding_credits -= handle->h_buffer_credits;
1404         transaction->t_updates--;
1405         if (!transaction->t_updates) {
1406                 wake_up(&journal->j_wait_updates);
1407                 if (journal->j_barrier_count)
1408                         wake_up(&journal->j_wait_transaction_locked);
1409         }
1410
1411         /*
1412          * If the handle is marked SYNC, we need to set another commit
1413          * going!  We also want to force a commit if the current
1414          * transaction is occupying too much of the log, or if the
1415          * transaction is too old now.
1416          */
1417         if (handle->h_sync ||
1418                         transaction->t_outstanding_credits >
1419                                 journal->j_max_transaction_buffers ||
1420                         time_after_eq(jiffies, transaction->t_expires)) {
1421                 /* Do this even for aborted journals: an abort still
1422                  * completes the commit thread, it just doesn't write
1423                  * anything to disk. */
1424                 tid_t tid = transaction->t_tid;
1425
1426                 spin_unlock(&transaction->t_handle_lock);
1427                 jbd_debug(2, "transaction too old, requesting commit for "
1428                                         "handle %p\n", handle);
1429                 /* This is non-blocking */
1430                 __jbd2_log_start_commit(journal, transaction->t_tid);
1431                 spin_unlock(&journal->j_state_lock);
1432
1433                 /*
1434                  * Special case: JBD2_SYNC synchronous updates require us
1435                  * to wait for the commit to complete.
1436                  */
1437                 if (handle->h_sync && !(current->flags & PF_MEMALLOC))
1438                         err = jbd2_log_wait_commit(journal, tid);
1439         } else {
1440                 spin_unlock(&transaction->t_handle_lock);
1441                 spin_unlock(&journal->j_state_lock);
1442         }
1443
1444         lock_release(&handle->h_lockdep_map, 1, _THIS_IP_);
1445
1446         jbd2_free_handle(handle);
1447         return err;
1448 }
1449
1450 /**int jbd2_journal_force_commit() - force any uncommitted transactions
1451  * @journal: journal to force
1452  *
1453  * For synchronous operations: force any uncommitted transactions
1454  * to disk.  May seem kludgy, but it reuses all the handle batching
1455  * code in a very simple manner.
1456  */
1457 int jbd2_journal_force_commit(journal_t *journal)
1458 {
1459         handle_t *handle;
1460         int ret;
1461
1462         handle = jbd2_journal_start(journal, 1);
1463         if (IS_ERR(handle)) {
1464                 ret = PTR_ERR(handle);
1465         } else {
1466                 handle->h_sync = 1;
1467                 ret = jbd2_journal_stop(handle);
1468         }
1469         return ret;
1470 }
1471
1472 /*
1473  *
1474  * List management code snippets: various functions for manipulating the
1475  * transaction buffer lists.
1476  *
1477  */
1478
1479 /*
1480  * Append a buffer to a transaction list, given the transaction's list head
1481  * pointer.
1482  *
1483  * j_list_lock is held.
1484  *
1485  * jbd_lock_bh_state(jh2bh(jh)) is held.
1486  */
1487
1488 static inline void
1489 __blist_add_buffer(struct journal_head **list, struct journal_head *jh)
1490 {
1491         if (!*list) {
1492                 jh->b_tnext = jh->b_tprev = jh;
1493                 *list = jh;
1494         } else {
1495                 /* Insert at the tail of the list to preserve order */
1496                 struct journal_head *first = *list, *last = first->b_tprev;
1497                 jh->b_tprev = last;
1498                 jh->b_tnext = first;
1499                 last->b_tnext = first->b_tprev = jh;
1500         }
1501 }
1502
1503 /*
1504  * Remove a buffer from a transaction list, given the transaction's list
1505  * head pointer.
1506  *
1507  * Called with j_list_lock held, and the journal may not be locked.
1508  *
1509  * jbd_lock_bh_state(jh2bh(jh)) is held.
1510  */
1511
1512 static inline void
1513 __blist_del_buffer(struct journal_head **list, struct journal_head *jh)
1514 {
1515         if (*list == jh) {
1516                 *list = jh->b_tnext;
1517                 if (*list == jh)
1518                         *list = NULL;
1519         }
1520         jh->b_tprev->b_tnext = jh->b_tnext;
1521         jh->b_tnext->b_tprev = jh->b_tprev;
1522 }
1523
1524 /*
1525  * Remove a buffer from the appropriate transaction list.
1526  *
1527  * Note that this function can *change* the value of
1528  * bh->b_transaction->t_sync_datalist, t_buffers, t_forget,
1529  * t_iobuf_list, t_shadow_list, t_log_list or t_reserved_list.  If the caller
1530  * is holding onto a copy of one of thee pointers, it could go bad.
1531  * Generally the caller needs to re-read the pointer from the transaction_t.
1532  *
1533  * Called under j_list_lock.  The journal may not be locked.
1534  */
1535 void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh)
1536 {
1537         struct journal_head **list = NULL;
1538         transaction_t *transaction;
1539         struct buffer_head *bh = jh2bh(jh);
1540
1541         J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh));
1542         transaction = jh->b_transaction;
1543         if (transaction)
1544                 assert_spin_locked(&transaction->t_journal->j_list_lock);
1545
1546         J_ASSERT_JH(jh, jh->b_jlist < BJ_Types);
1547         if (jh->b_jlist != BJ_None)
1548                 J_ASSERT_JH(jh, transaction != NULL);
1549
1550         switch (jh->b_jlist) {
1551         case BJ_None:
1552                 return;
1553         case BJ_SyncData:
1554                 list = &transaction->t_sync_datalist;
1555                 break;
1556         case BJ_Metadata:
1557                 transaction->t_nr_buffers--;
1558                 J_ASSERT_JH(jh, transaction->t_nr_buffers >= 0);
1559                 list = &transaction->t_buffers;
1560                 break;
1561         case BJ_Forget:
1562                 list = &transaction->t_forget;
1563                 break;
1564         case BJ_IO:
1565                 list = &transaction->t_iobuf_list;
1566                 break;
1567         case BJ_Shadow:
1568                 list = &transaction->t_shadow_list;
1569                 break;
1570         case BJ_LogCtl:
1571                 list = &transaction->t_log_list;
1572                 break;
1573         case BJ_Reserved:
1574                 list = &transaction->t_reserved_list;
1575                 break;
1576         case BJ_Locked:
1577                 list = &transaction->t_locked_list;
1578                 break;
1579         }
1580
1581         __blist_del_buffer(list, jh);
1582         jh->b_jlist = BJ_None;
1583         if (test_clear_buffer_jbddirty(bh))
1584                 mark_buffer_dirty(bh);  /* Expose it to the VM */
1585 }
1586
1587 void __jbd2_journal_unfile_buffer(struct journal_head *jh)
1588 {
1589         __jbd2_journal_temp_unlink_buffer(jh);
1590         jh->b_transaction = NULL;
1591 }
1592
1593 void jbd2_journal_unfile_buffer(journal_t *journal, struct journal_head *jh)
1594 {
1595         jbd_lock_bh_state(jh2bh(jh));
1596         spin_lock(&journal->j_list_lock);
1597         __jbd2_journal_unfile_buffer(jh);
1598         spin_unlock(&journal->j_list_lock);
1599         jbd_unlock_bh_state(jh2bh(jh));
1600 }
1601
1602 /*
1603  * Called from jbd2_journal_try_to_free_buffers().
1604  *
1605  * Called under jbd_lock_bh_state(bh)
1606  */
1607 static void
1608 __journal_try_to_free_buffer(journal_t *journal, struct buffer_head *bh)
1609 {
1610         struct journal_head *jh;
1611
1612         jh = bh2jh(bh);
1613
1614         if (buffer_locked(bh) || buffer_dirty(bh))
1615                 goto out;
1616
1617         if (jh->b_next_transaction != NULL)
1618                 goto out;
1619
1620         spin_lock(&journal->j_list_lock);
1621         if (jh->b_transaction != NULL && jh->b_cp_transaction == NULL) {
1622                 if (jh->b_jlist == BJ_SyncData || jh->b_jlist == BJ_Locked) {
1623                         /* A written-back ordered data buffer */
1624                         JBUFFER_TRACE(jh, "release data");
1625                         __jbd2_journal_unfile_buffer(jh);
1626                         jbd2_journal_remove_journal_head(bh);
1627                         __brelse(bh);
1628                 }
1629         } else if (jh->b_cp_transaction != NULL && jh->b_transaction == NULL) {
1630                 /* written-back checkpointed metadata buffer */
1631                 if (jh->b_jlist == BJ_None) {
1632                         JBUFFER_TRACE(jh, "remove from checkpoint list");
1633                         __jbd2_journal_remove_checkpoint(jh);
1634                         jbd2_journal_remove_journal_head(bh);
1635                         __brelse(bh);
1636                 }
1637         }
1638         spin_unlock(&journal->j_list_lock);
1639 out:
1640         return;
1641 }
1642
1643
1644 /**
1645  * int jbd2_journal_try_to_free_buffers() - try to free page buffers.
1646  * @journal: journal for operation
1647  * @page: to try and free
1648  * @unused_gfp_mask: unused
1649  *
1650  *
1651  * For all the buffers on this page,
1652  * if they are fully written out ordered data, move them onto BUF_CLEAN
1653  * so try_to_free_buffers() can reap them.
1654  *
1655  * This function returns non-zero if we wish try_to_free_buffers()
1656  * to be called. We do this if the page is releasable by try_to_free_buffers().
1657  * We also do it if the page has locked or dirty buffers and the caller wants
1658  * us to perform sync or async writeout.
1659  *
1660  * This complicates JBD locking somewhat.  We aren't protected by the
1661  * BKL here.  We wish to remove the buffer from its committing or
1662  * running transaction's ->t_datalist via __jbd2_journal_unfile_buffer.
1663  *
1664  * This may *change* the value of transaction_t->t_datalist, so anyone
1665  * who looks at t_datalist needs to lock against this function.
1666  *
1667  * Even worse, someone may be doing a jbd2_journal_dirty_data on this
1668  * buffer.  So we need to lock against that.  jbd2_journal_dirty_data()
1669  * will come out of the lock with the buffer dirty, which makes it
1670  * ineligible for release here.
1671  *
1672  * Who else is affected by this?  hmm...  Really the only contender
1673  * is do_get_write_access() - it could be looking at the buffer while
1674  * journal_try_to_free_buffer() is changing its state.  But that
1675  * cannot happen because we never reallocate freed data as metadata
1676  * while the data is part of a transaction.  Yes?
1677  */
1678 int jbd2_journal_try_to_free_buffers(journal_t *journal,
1679                                 struct page *page, gfp_t unused_gfp_mask)
1680 {
1681         struct buffer_head *head;
1682         struct buffer_head *bh;
1683         int ret = 0;
1684
1685         J_ASSERT(PageLocked(page));
1686
1687         head = page_buffers(page);
1688         bh = head;
1689         do {
1690                 struct journal_head *jh;
1691
1692                 /*
1693                  * We take our own ref against the journal_head here to avoid
1694                  * having to add tons of locking around each instance of
1695                  * jbd2_journal_remove_journal_head() and jbd2_journal_put_journal_head().
1696                  */
1697                 jh = jbd2_journal_grab_journal_head(bh);
1698                 if (!jh)
1699                         continue;
1700
1701                 jbd_lock_bh_state(bh);
1702                 __journal_try_to_free_buffer(journal, bh);
1703                 jbd2_journal_put_journal_head(jh);
1704                 jbd_unlock_bh_state(bh);
1705                 if (buffer_jbd(bh))
1706                         goto busy;
1707         } while ((bh = bh->b_this_page) != head);
1708         ret = try_to_free_buffers(page);
1709 busy:
1710         return ret;
1711 }
1712
1713 /*
1714  * This buffer is no longer needed.  If it is on an older transaction's
1715  * checkpoint list we need to record it on this transaction's forget list
1716  * to pin this buffer (and hence its checkpointing transaction) down until
1717  * this transaction commits.  If the buffer isn't on a checkpoint list, we
1718  * release it.
1719  * Returns non-zero if JBD no longer has an interest in the buffer.
1720  *
1721  * Called under j_list_lock.
1722  *
1723  * Called under jbd_lock_bh_state(bh).
1724  */
1725 static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
1726 {
1727         int may_free = 1;
1728         struct buffer_head *bh = jh2bh(jh);
1729
1730         __jbd2_journal_unfile_buffer(jh);
1731
1732         if (jh->b_cp_transaction) {
1733                 JBUFFER_TRACE(jh, "on running+cp transaction");
1734                 __jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
1735                 clear_buffer_jbddirty(bh);
1736                 may_free = 0;
1737         } else {
1738                 JBUFFER_TRACE(jh, "on running transaction");
1739                 jbd2_journal_remove_journal_head(bh);
1740                 __brelse(bh);
1741         }
1742         return may_free;
1743 }
1744
1745 /*
1746  * jbd2_journal_invalidatepage
1747  *
1748  * This code is tricky.  It has a number of cases to deal with.
1749  *
1750  * There are two invariants which this code relies on:
1751  *
1752  * i_size must be updated on disk before we start calling invalidatepage on the
1753  * data.
1754  *
1755  *  This is done in ext3 by defining an ext3_setattr method which
1756  *  updates i_size before truncate gets going.  By maintaining this
1757  *  invariant, we can be sure that it is safe to throw away any buffers
1758  *  attached to the current transaction: once the transaction commits,
1759  *  we know that the data will not be needed.
1760  *
1761  *  Note however that we can *not* throw away data belonging to the
1762  *  previous, committing transaction!
1763  *
1764  * Any disk blocks which *are* part of the previous, committing
1765  * transaction (and which therefore cannot be discarded immediately) are
1766  * not going to be reused in the new running transaction
1767  *
1768  *  The bitmap committed_data images guarantee this: any block which is
1769  *  allocated in one transaction and removed in the next will be marked
1770  *  as in-use in the committed_data bitmap, so cannot be reused until
1771  *  the next transaction to delete the block commits.  This means that
1772  *  leaving committing buffers dirty is quite safe: the disk blocks
1773  *  cannot be reallocated to a different file and so buffer aliasing is
1774  *  not possible.
1775  *
1776  *
1777  * The above applies mainly to ordered data mode.  In writeback mode we
1778  * don't make guarantees about the order in which data hits disk --- in
1779  * particular we don't guarantee that new dirty data is flushed before
1780  * transaction commit --- so it is always safe just to discard data
1781  * immediately in that mode.  --sct
1782  */
1783
1784 /*
1785  * The journal_unmap_buffer helper function returns zero if the buffer
1786  * concerned remains pinned as an anonymous buffer belonging to an older
1787  * transaction.
1788  *
1789  * We're outside-transaction here.  Either or both of j_running_transaction
1790  * and j_committing_transaction may be NULL.
1791  */
1792 static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
1793 {
1794         transaction_t *transaction;
1795         struct journal_head *jh;
1796         int may_free = 1;
1797         int ret;
1798
1799         BUFFER_TRACE(bh, "entry");
1800
1801         /*
1802          * It is safe to proceed here without the j_list_lock because the
1803          * buffers cannot be stolen by try_to_free_buffers as long as we are
1804          * holding the page lock. --sct
1805          */
1806
1807         if (!buffer_jbd(bh))
1808                 goto zap_buffer_unlocked;
1809
1810         spin_lock(&journal->j_state_lock);
1811         jbd_lock_bh_state(bh);
1812         spin_lock(&journal->j_list_lock);
1813
1814         jh = jbd2_journal_grab_journal_head(bh);
1815         if (!jh)
1816                 goto zap_buffer_no_jh;
1817
1818         transaction = jh->b_transaction;
1819         if (transaction == NULL) {
1820                 /* First case: not on any transaction.  If it
1821                  * has no checkpoint link, then we can zap it:
1822                  * it's a writeback-mode buffer so we don't care
1823                  * if it hits disk safely. */
1824                 if (!jh->b_cp_transaction) {
1825                         JBUFFER_TRACE(jh, "not on any transaction: zap");
1826                         goto zap_buffer;
1827                 }
1828
1829                 if (!buffer_dirty(bh)) {
1830                         /* bdflush has written it.  We can drop it now */
1831                         goto zap_buffer;
1832                 }
1833
1834                 /* OK, it must be in the journal but still not
1835                  * written fully to disk: it's metadata or
1836                  * journaled data... */
1837
1838                 if (journal->j_running_transaction) {
1839                         /* ... and once the current transaction has
1840                          * committed, the buffer won't be needed any
1841                          * longer. */
1842                         JBUFFER_TRACE(jh, "checkpointed: add to BJ_Forget");
1843                         ret = __dispose_buffer(jh,
1844                                         journal->j_running_transaction);
1845                         jbd2_journal_put_journal_head(jh);
1846                         spin_unlock(&journal->j_list_lock);
1847                         jbd_unlock_bh_state(bh);
1848                         spin_unlock(&journal->j_state_lock);
1849                         return ret;
1850                 } else {
1851                         /* There is no currently-running transaction. So the
1852                          * orphan record which we wrote for this file must have
1853                          * passed into commit.  We must attach this buffer to
1854                          * the committing transaction, if it exists. */
1855                         if (journal->j_committing_transaction) {
1856                                 JBUFFER_TRACE(jh, "give to committing trans");
1857                                 ret = __dispose_buffer(jh,
1858                                         journal->j_committing_transaction);
1859                                 jbd2_journal_put_journal_head(jh);
1860                                 spin_unlock(&journal->j_list_lock);
1861                                 jbd_unlock_bh_state(bh);
1862                                 spin_unlock(&journal->j_state_lock);
1863                                 return ret;
1864                         } else {
1865                                 /* The orphan record's transaction has
1866                                  * committed.  We can cleanse this buffer */
1867                                 clear_buffer_jbddirty(bh);
1868                                 goto zap_buffer;
1869                         }
1870                 }
1871         } else if (transaction == journal->j_committing_transaction) {
1872                 JBUFFER_TRACE(jh, "on committing transaction");
1873                 if (jh->b_jlist == BJ_Locked) {
1874                         /*
1875                          * The buffer is on the committing transaction's locked
1876                          * list.  We have the buffer locked, so I/O has
1877                          * completed.  So we can nail the buffer now.
1878                          */
1879                         may_free = __dispose_buffer(jh, transaction);
1880                         goto zap_buffer;
1881                 }
1882                 /*
1883                  * If it is committing, we simply cannot touch it.  We
1884                  * can remove it's next_transaction pointer from the
1885                  * running transaction if that is set, but nothing
1886                  * else. */
1887                 set_buffer_freed(bh);
1888                 if (jh->b_next_transaction) {
1889                         J_ASSERT(jh->b_next_transaction ==
1890                                         journal->j_running_transaction);
1891                         jh->b_next_transaction = NULL;
1892                 }
1893                 jbd2_journal_put_journal_head(jh);
1894                 spin_unlock(&journal->j_list_lock);
1895                 jbd_unlock_bh_state(bh);
1896                 spin_unlock(&journal->j_state_lock);
1897                 return 0;
1898         } else {
1899                 /* Good, the buffer belongs to the running transaction.
1900                  * We are writing our own transaction's data, not any
1901                  * previous one's, so it is safe to throw it away
1902                  * (remember that we expect the filesystem to have set
1903                  * i_size already for this truncate so recovery will not
1904                  * expose the disk blocks we are discarding here.) */
1905                 J_ASSERT_JH(jh, transaction == journal->j_running_transaction);
1906                 JBUFFER_TRACE(jh, "on running transaction");
1907                 may_free = __dispose_buffer(jh, transaction);
1908         }
1909
1910 zap_buffer:
1911         jbd2_journal_put_journal_head(jh);
1912 zap_buffer_no_jh:
1913         spin_unlock(&journal->j_list_lock);
1914         jbd_unlock_bh_state(bh);
1915         spin_unlock(&journal->j_state_lock);
1916 zap_buffer_unlocked:
1917         clear_buffer_dirty(bh);
1918         J_ASSERT_BH(bh, !buffer_jbddirty(bh));
1919         clear_buffer_mapped(bh);
1920         clear_buffer_req(bh);
1921         clear_buffer_new(bh);
1922         bh->b_bdev = NULL;
1923         return may_free;
1924 }
1925
1926 /**
1927  * void jbd2_journal_invalidatepage()
1928  * @journal: journal to use for flush...
1929  * @page:    page to flush
1930  * @offset:  length of page to invalidate.
1931  *
1932  * Reap page buffers containing data after offset in page.
1933  *
1934  */
1935 void jbd2_journal_invalidatepage(journal_t *journal,
1936                       struct page *page,
1937                       unsigned long offset)
1938 {
1939         struct buffer_head *head, *bh, *next;
1940         unsigned int curr_off = 0;
1941         int may_free = 1;
1942
1943         if (!PageLocked(page))
1944                 BUG();
1945         if (!page_has_buffers(page))
1946                 return;
1947
1948         /* We will potentially be playing with lists other than just the
1949          * data lists (especially for journaled data mode), so be
1950          * cautious in our locking. */
1951
1952         head = bh = page_buffers(page);
1953         do {
1954                 unsigned int next_off = curr_off + bh->b_size;
1955                 next = bh->b_this_page;
1956
1957                 if (offset <= curr_off) {
1958                         /* This block is wholly outside the truncation point */
1959                         lock_buffer(bh);
1960                         may_free &= journal_unmap_buffer(journal, bh);
1961                         unlock_buffer(bh);
1962                 }
1963                 curr_off = next_off;
1964                 bh = next;
1965
1966         } while (bh != head);
1967
1968         if (!offset) {
1969                 if (may_free && try_to_free_buffers(page))
1970                         J_ASSERT(!page_has_buffers(page));
1971         }
1972 }
1973
1974 /*
1975  * File a buffer on the given transaction list.
1976  */
1977 void __jbd2_journal_file_buffer(struct journal_head *jh,
1978                         transaction_t *transaction, int jlist)
1979 {
1980         struct journal_head **list = NULL;
1981         int was_dirty = 0;
1982         struct buffer_head *bh = jh2bh(jh);
1983
1984         J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh));
1985         assert_spin_locked(&transaction->t_journal->j_list_lock);
1986
1987         J_ASSERT_JH(jh, jh->b_jlist < BJ_Types);
1988         J_ASSERT_JH(jh, jh->b_transaction == transaction ||
1989                                 jh->b_transaction == NULL);
1990
1991         if (jh->b_transaction && jh->b_jlist == jlist)
1992                 return;
1993
1994         /* The following list of buffer states needs to be consistent
1995          * with __jbd_unexpected_dirty_buffer()'s handling of dirty
1996          * state. */
1997
1998         if (jlist == BJ_Metadata || jlist == BJ_Reserved ||
1999             jlist == BJ_Shadow || jlist == BJ_Forget) {
2000                 if (test_clear_buffer_dirty(bh) ||
2001                     test_clear_buffer_jbddirty(bh))
2002                         was_dirty = 1;
2003         }
2004
2005         if (jh->b_transaction)
2006                 __jbd2_journal_temp_unlink_buffer(jh);
2007         jh->b_transaction = transaction;
2008
2009         switch (jlist) {
2010         case BJ_None:
2011                 J_ASSERT_JH(jh, !jh->b_committed_data);
2012                 J_ASSERT_JH(jh, !jh->b_frozen_data);
2013                 return;
2014         case BJ_SyncData:
2015                 list = &transaction->t_sync_datalist;
2016                 break;
2017         case BJ_Metadata:
2018                 transaction->t_nr_buffers++;
2019                 list = &transaction->t_buffers;
2020                 break;
2021         case BJ_Forget:
2022                 list = &transaction->t_forget;
2023                 break;
2024         case BJ_IO:
2025                 list = &transaction->t_iobuf_list;
2026                 break;
2027         case BJ_Shadow:
2028                 list = &transaction->t_shadow_list;
2029                 break;
2030         case BJ_LogCtl:
2031                 list = &transaction->t_log_list;
2032                 break;
2033         case BJ_Reserved:
2034                 list = &transaction->t_reserved_list;
2035                 break;
2036         case BJ_Locked:
2037                 list =  &transaction->t_locked_list;
2038                 break;
2039         }
2040
2041         __blist_add_buffer(list, jh);
2042         jh->b_jlist = jlist;
2043
2044         if (was_dirty)
2045                 set_buffer_jbddirty(bh);
2046 }
2047
2048 void jbd2_journal_file_buffer(struct journal_head *jh,
2049                                 transaction_t *transaction, int jlist)
2050 {
2051         jbd_lock_bh_state(jh2bh(jh));
2052         spin_lock(&transaction->t_journal->j_list_lock);
2053         __jbd2_journal_file_buffer(jh, transaction, jlist);
2054         spin_unlock(&transaction->t_journal->j_list_lock);
2055         jbd_unlock_bh_state(jh2bh(jh));
2056 }
2057
2058 /*
2059  * Remove a buffer from its current buffer list in preparation for
2060  * dropping it from its current transaction entirely.  If the buffer has
2061  * already started to be used by a subsequent transaction, refile the
2062  * buffer on that transaction's metadata list.
2063  *
2064  * Called under journal->j_list_lock
2065  *
2066  * Called under jbd_lock_bh_state(jh2bh(jh))
2067  */
2068 void __jbd2_journal_refile_buffer(struct journal_head *jh)
2069 {
2070         int was_dirty;
2071         struct buffer_head *bh = jh2bh(jh);
2072
2073         J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh));
2074         if (jh->b_transaction)
2075                 assert_spin_locked(&jh->b_transaction->t_journal->j_list_lock);
2076
2077         /* If the buffer is now unused, just drop it. */
2078         if (jh->b_next_transaction == NULL) {
2079                 __jbd2_journal_unfile_buffer(jh);
2080                 return;
2081         }
2082
2083         /*
2084          * It has been modified by a later transaction: add it to the new
2085          * transaction's metadata list.
2086          */
2087
2088         was_dirty = test_clear_buffer_jbddirty(bh);
2089         __jbd2_journal_temp_unlink_buffer(jh);
2090         jh->b_transaction = jh->b_next_transaction;
2091         jh->b_next_transaction = NULL;
2092         __jbd2_journal_file_buffer(jh, jh->b_transaction,
2093                                 was_dirty ? BJ_Metadata : BJ_Reserved);
2094         J_ASSERT_JH(jh, jh->b_transaction->t_state == T_RUNNING);
2095
2096         if (was_dirty)
2097                 set_buffer_jbddirty(bh);
2098 }
2099
2100 /*
2101  * For the unlocked version of this call, also make sure that any
2102  * hanging journal_head is cleaned up if necessary.
2103  *
2104  * __jbd2_journal_refile_buffer is usually called as part of a single locked
2105  * operation on a buffer_head, in which the caller is probably going to
2106  * be hooking the journal_head onto other lists.  In that case it is up
2107  * to the caller to remove the journal_head if necessary.  For the
2108  * unlocked jbd2_journal_refile_buffer call, the caller isn't going to be
2109  * doing anything else to the buffer so we need to do the cleanup
2110  * ourselves to avoid a jh leak.
2111  *
2112  * *** The journal_head may be freed by this call! ***
2113  */
2114 void jbd2_journal_refile_buffer(journal_t *journal, struct journal_head *jh)
2115 {
2116         struct buffer_head *bh = jh2bh(jh);
2117
2118         jbd_lock_bh_state(bh);
2119         spin_lock(&journal->j_list_lock);
2120
2121         __jbd2_journal_refile_buffer(jh);
2122         jbd_unlock_bh_state(bh);
2123         jbd2_journal_remove_journal_head(bh);
2124
2125         spin_unlock(&journal->j_list_lock);
2126         __brelse(bh);
2127 }